diff --git a/.gitignore b/.gitignore index 2c19b1b3a2cd5..3dcc328829ab7 100644 --- a/.gitignore +++ b/.gitignore @@ -124,6 +124,7 @@ iotdb-core/tsfile/src/main/antlr4/org/apache/tsfile/parser/gen/ .mvn/.gradle-enterprise/ .mvn/.develocity/ .run/ +*.sevo # Relational Grammar ANTLR iotdb-core/relational-grammar/src/main/antlr4/org/apache/iotdb/db/relational/grammar/sql/.antlr/ diff --git a/dependencies.json b/dependencies.json index 1e88db84e77ed..ac83e0fed9250 100644 --- a/dependencies.json +++ b/dependencies.json @@ -113,7 +113,6 @@ "org.bouncycastle:bcprov-jdk18on", "org.bouncycastle:bcutil-jdk18on", "org.checkerframework:checker-qual", - "org.checkerframework:checker-qual", "org.eclipse.collections:eclipse-collections", "org.eclipse.collections:eclipse-collections-api", "org.eclipse.jetty:jetty-http", diff --git a/integration-test/src/main/java/org/apache/iotdb/itbase/runtime/RequestDelegate.java b/integration-test/src/main/java/org/apache/iotdb/itbase/runtime/RequestDelegate.java index 47f31004e6f28..78e979d5c2565 100644 --- a/integration-test/src/main/java/org/apache/iotdb/itbase/runtime/RequestDelegate.java +++ b/integration-test/src/main/java/org/apache/iotdb/itbase/runtime/RequestDelegate.java @@ -126,6 +126,10 @@ protected void handleExceptions(Exception[] exceptions) throws SQLException { break; } } + + if (!exceptionInconsistent && exceptionMsg[0] != null) { + throw new SQLException(exceptionMsg[0]); + } for (int i = 0; i < businessExceptions.length; i++) { if (businessExceptions[i] != null) { // As each exception has its own stacktrace, in order to display them clearly, we can only @@ -134,9 +138,6 @@ protected void handleExceptions(Exception[] exceptions) throws SQLException { "Exception happens during request to {}", getEndpoints().get(i), businessExceptions[i]); } } - if (!exceptionInconsistent && exceptionMsg[0] != null) { - throw new SQLException(exceptionMsg[0]); - } if (exceptionInconsistent) { throw new InconsistentDataException(Arrays.asList(exceptionMsg), getEndpoints()); } diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/utils/TestUtils.java b/integration-test/src/test/java/org/apache/iotdb/db/it/utils/TestUtils.java index 3d790f12a090f..67672d6f383f2 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/utils/TestUtils.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/utils/TestUtils.java @@ -1510,7 +1510,7 @@ public static void restartCluster(BaseEnv env) { public static void assertDataEventuallyOnEnv( BaseEnv env, String sql, String expectedHeader, Set expectedResSet) { - assertDataEventuallyOnEnv(env, sql, expectedHeader, expectedResSet, 600); + assertDataEventuallyOnEnv(env, sql, expectedHeader, expectedResSet, 600, false); } public static void assertDataEventuallyOnEnv( @@ -1528,9 +1528,19 @@ public static void assertDataEventuallyOnEnv( String expectedHeader, Set expectedResSet, long timeoutSeconds) { + assertDataEventuallyOnEnv(env, sql, expectedHeader, expectedResSet, timeoutSeconds, false); + } + + public static void assertDataEventuallyOnEnv( + BaseEnv env, + String sql, + String expectedHeader, + Set expectedResSet, + long timeoutSeconds, + boolean tableModel) { final long startTime = System.currentTimeMillis(); final boolean[] flushed = {false}; - try (Connection connection = env.getConnection(); + try (Connection connection = tableModel ? env.getTableConnection() : env.getConnection(); Statement statement = connection.createStatement()) { // Keep retrying if there are execution failures await() diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/dual/tablemodel/manual/AbstractPipeTableModelDualManualIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/dual/tablemodel/manual/AbstractPipeTableModelDualManualIT.java index 3b3fae80902f1..f33b577384bb3 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/dual/tablemodel/manual/AbstractPipeTableModelDualManualIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/dual/tablemodel/manual/AbstractPipeTableModelDualManualIT.java @@ -28,8 +28,8 @@ public abstract class AbstractPipeTableModelDualManualIT { - protected BaseEnv senderEnv; - protected BaseEnv receiverEnv; + protected static BaseEnv senderEnv; + protected static BaseEnv receiverEnv; @Before public void setUp() { @@ -71,7 +71,7 @@ protected void setupConfig() { } @After - public final void tearDown() { + public void tearDown() { senderEnv.cleanClusterEnvironment(); receiverEnv.cleanClusterEnvironment(); } diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/dual/tablemodel/manual/enhanced/IoTDBPipeAlterTableColumnNameIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/dual/tablemodel/manual/enhanced/IoTDBPipeAlterTableColumnNameIT.java new file mode 100644 index 0000000000000..9eb9ac653de6e --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/dual/tablemodel/manual/enhanced/IoTDBPipeAlterTableColumnNameIT.java @@ -0,0 +1,177 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.pipe.it.dual.tablemodel.manual.enhanced; + +import org.apache.iotdb.consensus.ConsensusFactory; +import org.apache.iotdb.db.it.utils.TestUtils; +import org.apache.iotdb.isession.ITableSession; +import org.apache.iotdb.it.env.MultiEnvFactory; +import org.apache.iotdb.it.env.cluster.node.DataNodeWrapper; +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2DualTableManualEnhanced; +import org.apache.iotdb.itbase.env.BaseEnv; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; + +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.util.Collections; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2DualTableManualEnhanced.class}) +public class IoTDBPipeAlterTableColumnNameIT { + + protected static BaseEnv senderEnv; + protected static BaseEnv receiverEnv; + + @BeforeClass + public static void setUpClass() { + MultiEnvFactory.createEnv(2); + senderEnv = MultiEnvFactory.getEnv(0); + receiverEnv = MultiEnvFactory.getEnv(1); + + senderEnv + .getConfig() + .getCommonConfig() + .setAutoCreateSchemaEnabled(true) + .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS) + .setDnConnectionTimeoutMs(600000) + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false) + .setPipeAutoSplitFullEnabled(false); + ; + + receiverEnv + .getConfig() + .getCommonConfig() + .setAutoCreateSchemaEnabled(true) + .setDataReplicationFactor(2) + .setSchemaReplicationFactor(3) + .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS) + .setDnConnectionTimeoutMs(600000) + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false) + .setPipeAutoSplitFullEnabled(false); + ; + + senderEnv.initClusterEnvironment(1, 3, 180); + receiverEnv.initClusterEnvironment(1, 3, 180); + } + + @AfterClass + public static void tearDownClass() { + if (senderEnv != null) { + senderEnv.cleanClusterEnvironment(); + } + if (receiverEnv != null) { + receiverEnv.cleanClusterEnvironment(); + } + } + + @Test + public void testAlterTableName() throws IoTDBConnectionException, StatementExecutionException { + final DataNodeWrapper receiverDataNode = receiverEnv.getDataNodeWrapper(0); + + final String receiverIp = receiverDataNode.getIp(); + final int receiverPort = receiverDataNode.getPort(); + + try (ITableSession senderSession = senderEnv.getTableSessionConnection()) { + try { + // insert some data to t1 and flush + senderSession.executeNonQueryStatement("CREATE DATABASE IF NOT EXISTS test_alter_table"); + senderSession.executeNonQueryStatement("USE test_alter_table"); + senderSession.executeNonQueryStatement("CREATE TABLE t1 (s1 INT32, s2 FLOAT, s3 BOOLEAN)"); + senderSession.executeNonQueryStatement( + "INSERT INTO t1 (time, s1, s2, s3) VALUES (1, 1, 1.0, true)"); + senderSession.executeNonQueryStatement("FLUSH"); + // rename table t1 to t2 + senderSession.executeNonQueryStatement("ALTER TABLE t1 RENAME TO t2"); + + // create a pipe + senderSession.executeNonQueryStatement( + String.format( + "CREATE PIPE p1 WITH SINK ('sink'='iotdb-thrift-sink', 'node-urls' = '%s:%s')", + receiverIp, receiverPort)); + + // check that receiver has received data in t2 + + TestUtils.assertDataEventuallyOnEnv( + receiverEnv, + "SELECT * FROM test_alter_table.t2", + "time,s1,s2,s3,", + Collections.singleton("1970-01-01T00:00:00.001Z,1,1.0,true,"), + 20, + true); + } finally { + senderSession.executeNonQueryStatement("DROP PIPE p1"); + senderSession.executeNonQueryStatement("DROP DATABASE test_alter_table"); + } + } + } + + @Test + public void testAlterColumnName() throws IoTDBConnectionException, StatementExecutionException { + final DataNodeWrapper receiverDataNode = receiverEnv.getDataNodeWrapper(0); + + final String receiverIp = receiverDataNode.getIp(); + final int receiverPort = receiverDataNode.getPort(); + + try (ITableSession senderSession = senderEnv.getTableSessionConnection()) { + try { + // insert some data to t1 and flush + senderSession.executeNonQueryStatement("CREATE DATABASE IF NOT EXISTS test_alter_column"); + senderSession.executeNonQueryStatement("USE test_alter_column"); + senderSession.executeNonQueryStatement("CREATE TABLE t1 (s1 INT32, s2 FLOAT, s3 BOOLEAN)"); + senderSession.executeNonQueryStatement( + "INSERT INTO t1 (time, s1, s2, s3) VALUES (1, 1, 1.0, true)"); + senderSession.executeNonQueryStatement("FLUSH"); + // rename table t1 to t2 + senderSession.executeNonQueryStatement("ALTER TABLE t1 RENAME COLUMN s1 TO s4"); + + // create a pipe + senderSession.executeNonQueryStatement( + String.format( + "CREATE PIPE p2 WITH SINK ('sink'='iotdb-thrift-sink', 'node-urls' = '%s:%s')", + receiverIp, receiverPort)); + + // check that receiver has received data in t2 + + TestUtils.assertDataEventuallyOnEnv( + receiverEnv, + "SELECT * FROM test_alter_column.t1", + "time,s4,s2,s3,", + Collections.singleton("1970-01-01T00:00:00.001Z,1,1.0,true,"), + 20, + true); + } finally { + senderSession.executeNonQueryStatement("DROP PIPE p2"); + senderSession.executeNonQueryStatement("DROP DATABASE test_alter_column"); + } + } + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/relational/it/db/it/IoTDBLoadTsFileIT.java b/integration-test/src/test/java/org/apache/iotdb/relational/it/db/it/IoTDBLoadTsFileIT.java index a58e0633b7425..c65754289a413 100644 --- a/integration-test/src/test/java/org/apache/iotdb/relational/it/db/it/IoTDBLoadTsFileIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/relational/it/db/it/IoTDBLoadTsFileIT.java @@ -19,11 +19,16 @@ package org.apache.iotdb.relational.it.db.it; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.ColumnRename; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolution; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolutionFile; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.TableRename; import org.apache.iotdb.it.env.EnvFactory; import org.apache.iotdb.it.framework.IoTDBTestRunner; import org.apache.iotdb.it.utils.TsFileTableGenerator; import org.apache.iotdb.itbase.category.TableClusterIT; import org.apache.iotdb.itbase.category.TableLocalStandaloneIT; +import org.apache.iotdb.itbase.constant.TestConstant; import org.apache.iotdb.itbase.env.BaseEnv; import org.apache.tsfile.enums.ColumnCategory; @@ -31,6 +36,7 @@ import org.apache.tsfile.utils.Pair; import org.apache.tsfile.write.schema.IMeasurementSchema; import org.apache.tsfile.write.schema.MeasurementSchema; +import org.awaitility.Awaitility; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -45,14 +51,22 @@ import java.nio.file.Files; import java.sql.Connection; import java.sql.ResultSet; +import java.sql.SQLException; import java.sql.Statement; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Objects; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +@SuppressWarnings("SqlSourceToSinkFlow") @RunWith(IoTDBTestRunner.class) @Category({TableLocalStandaloneIT.class, TableClusterIT.class}) public class IoTDBLoadTsFileIT { @@ -289,9 +303,223 @@ public void testLoadWithTableMod() throws Exception { try (final ResultSet resultSet = statement.executeQuery("show tables")) { Assert.assertTrue(resultSet.next()); - Assert.assertFalse(resultSet.next()); + assertFalse(resultSet.next()); + } + } + } + + @Test + public void testLoadWithSevoFile() throws Exception { + testLoadWithSevoFile(false); + } + + @Test + public void testAsyncLoadWithSevoFile() throws Exception { + testLoadWithSevoFile(true); + } + + public void testLoadWithSevoFile(boolean async) throws Exception { + final int lineCount = 10000; + + List> measurementSchemas = + generateMeasurementSchemas(); + List columnCategories = + generateTabletColumnCategory(0, measurementSchemas.size(), -1); + + final File file = new File(tmpDir, "1-0-0-0.tsfile"); + + List schemaList1 = + measurementSchemas.stream().map(pair -> pair.left).collect(Collectors.toList()); + + try (final TsFileTableGenerator generator = new TsFileTableGenerator(file)) { + generator.registerTable(SchemaConfig.TABLE_0, new ArrayList<>(schemaList1), columnCategories); + generator.generateData(SchemaConfig.TABLE_0, lineCount, PARTITION_INTERVAL / 10_000); + } + + // rename table0 to table1 + File sevoFile = new File(tmpDir, "0.sevo"); + SchemaEvolutionFile schemaEvolutionFile = new SchemaEvolutionFile(sevoFile.getAbsolutePath()); + SchemaEvolution schemaEvolution = new TableRename(SchemaConfig.TABLE_0, SchemaConfig.TABLE_1); + schemaEvolutionFile.append(Collections.singletonList(schemaEvolution)); + // rename INT322INT32 to INT322INT32_NEW + schemaEvolution = new ColumnRename(SchemaConfig.TABLE_1, "INT322INT32", "INT322INT32_NEW"); + schemaEvolutionFile.append(Collections.singletonList(schemaEvolution)); + + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute(String.format("create database if not exists %s", SchemaConfig.DATABASE_0)); + statement.execute(String.format("use %s", SchemaConfig.DATABASE_0)); + statement.execute( + String.format( + "load '%s' with ('database'='%s', 'sevo-file-path'='%s', 'on-success'='delete', 'async'='%s')", + file.getAbsolutePath(), + SchemaConfig.DATABASE_0, + schemaEvolutionFile.getFilePath(), + async)); + + if (!async) { + checkSevoResult(statement, lineCount); + } else { + Awaitility.await() + .atMost(20, TimeUnit.SECONDS) + .pollInterval(1, TimeUnit.SECONDS) + .untilAsserted( + () -> { + try { + checkSevoResult(statement, lineCount); + } catch (SQLException e) { + throw new AssertionError(e); + } + }); + } + } + assertFalse(sevoFile.exists()); + } + + private void checkSevoResult(Statement statement, int lineCount) throws SQLException { + statement.execute("use " + SchemaConfig.DATABASE_0); + // cannot query using table0 + try (final ResultSet resultSet = + statement.executeQuery(String.format("select count(*) from %s", SchemaConfig.TABLE_0))) { + fail(); + } catch (SQLException e) { + assertEquals("550: Table 'root.test' does not exist.", e.getMessage()); + } + + // can query with table1 + try (final ResultSet resultSet = + statement.executeQuery(String.format("select count(*) from %s", SchemaConfig.TABLE_1))) { + if (resultSet.next()) { + Assert.assertEquals(lineCount, resultSet.getLong(1)); + } else { + Assert.fail("This ResultSet is empty."); + } + } + + // cannot query using INT322INT32 + try (final ResultSet resultSet = + statement.executeQuery( + String.format("select count(%s) from %s", "INT322INT32", SchemaConfig.TABLE_1))) { + fail(); + } catch (SQLException e) { + assertEquals("616: Column 'int322int32' cannot be resolved", e.getMessage()); + } + + // can query with INT322INT32_NEW + try (final ResultSet resultSet = + statement.executeQuery( + String.format("select count(%s) from %s", "INT322INT32_NEW", SchemaConfig.TABLE_1))) { + if (resultSet.next()) { + Assert.assertEquals(lineCount, resultSet.getLong(1)); + } else { + Assert.fail("This ResultSet is empty."); } } + + try (final ResultSet resultSet = statement.executeQuery("show tables")) { + Assert.assertTrue(resultSet.next()); + assertEquals(SchemaConfig.TABLE_1, resultSet.getString(1)); + assertFalse(resultSet.next()); + } + } + + @Test + public void testLoadSevoWithIoTDBDir() throws Exception { + testLoadSevoWithIoTDBDir(false); + } + + @Test + public void testAsyncLoadSevoWithIoTDBDir() throws Exception { + testLoadSevoWithIoTDBDir(true); + } + + public void testLoadSevoWithIoTDBDir(boolean async) throws Exception { + final int lineCount = 10000; + File datanodeDir = prepareIoTDBDirWithSevo(lineCount); + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute(String.format("DROP DATABASE IF EXISTS %s", SchemaConfig.DATABASE_0)); + statement.execute(String.format("create database if not exists %s", "another")); + statement.execute(String.format("use %s", "another")); + + try { + statement.execute( + String.format("load '%s' WITH ('database'='somedb')", datanodeDir.getAbsolutePath())); + fail(); + } catch (SQLException e) { + assertTrue( + e.getMessage() + .contains( + "Database is not supported when loading from datanode directory, if you wish to use specified database and ignore ones in the datanode directory, please rename the datanode directory to any other one.")); + } + + statement.execute( + String.format("load '%s' WITH ('async'='%s')", datanodeDir.getAbsolutePath(), async)); + + if (!async) { + checkSevoResult(statement, lineCount); + } else { + Awaitility.await() + .atMost(20, TimeUnit.SECONDS) + .pollInterval(1, TimeUnit.SECONDS) + .untilAsserted( + () -> { + try { + checkSevoResult(statement, lineCount); + } catch (SQLException e) { + throw new AssertionError(e); + } + }); + } + } + } + + @SuppressWarnings({"ResultOfMethodCallIgnored", "SameParameterValue"}) + private File prepareIoTDBDirWithSevo(int lineCount) throws Exception { + File datanodeDir = new File(TestConstant.BASE_OUTPUT_PATH, "datanode"); + File dataDir = new File(datanodeDir, "data"); + File sequenceDir = new File(dataDir, "sequence"); + File databaseDataDir = new File(sequenceDir, SchemaConfig.DATABASE_0); + File regionDataDir = new File(databaseDataDir, "0"); + File partitionDataDir = new File(regionDataDir, "0"); + partitionDataDir.mkdirs(); + + List> measurementSchemas = + generateMeasurementSchemas(); + List columnCategories = + generateTabletColumnCategory(0, measurementSchemas.size(), -1); + + final File file = new File(partitionDataDir, "1-0-0-0.tsfile"); + + List schemaList1 = + measurementSchemas.stream().map(pair -> pair.left).collect(Collectors.toList()); + + try (final TsFileTableGenerator generator = new TsFileTableGenerator(file)) { + generator.registerTable(SchemaConfig.TABLE_0, new ArrayList<>(schemaList1), columnCategories); + generator.generateData(SchemaConfig.TABLE_0, lineCount, PARTITION_INTERVAL / 10_000); + } + + File systemDir = new File(datanodeDir, "system"); + File databasesDir = new File(systemDir, "databases"); + File databaseSystemDir = new File(databasesDir, SchemaConfig.DATABASE_0); + File regionSystemDir = new File(databaseSystemDir, "0"); + File partitionSystemDir = new File(regionSystemDir, "0"); + File fileSetsDir = new File(partitionSystemDir, "filesets"); + File fileSetDir = new File(fileSetsDir, "0"); + fileSetDir.mkdirs(); + + // rename table0 to table1 + File sevoFile = new File(fileSetDir, "0.sevo"); + SchemaEvolutionFile schemaEvolutionFile = new SchemaEvolutionFile(sevoFile.getAbsolutePath()); + SchemaEvolution schemaEvolution = new TableRename(SchemaConfig.TABLE_0, SchemaConfig.TABLE_1); + schemaEvolutionFile.append(Collections.singletonList(schemaEvolution)); + // rename INT322INT32 to INT322INT32_NEW + schemaEvolution = new ColumnRename(SchemaConfig.TABLE_1, "INT322INT32", "INT322INT32_NEW"); + schemaEvolutionFile.append(Collections.singletonList(schemaEvolution)); + + return datanodeDir; } @Test diff --git a/integration-test/src/test/java/org/apache/iotdb/relational/it/schema/IoTDBTableIT.java b/integration-test/src/test/java/org/apache/iotdb/relational/it/schema/IoTDBTableIT.java index 57d482dd1b057..57b291d50ce39 100644 --- a/integration-test/src/test/java/org/apache/iotdb/relational/it/schema/IoTDBTableIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/relational/it/schema/IoTDBTableIT.java @@ -20,8 +20,10 @@ package org.apache.iotdb.relational.it.schema; import org.apache.iotdb.db.it.utils.TestUtils; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileID; import org.apache.iotdb.isession.ITableSession; import org.apache.iotdb.it.env.EnvFactory; +import org.apache.iotdb.it.env.cluster.node.DataNodeWrapper; import org.apache.iotdb.it.framework.IoTDBTestRunner; import org.apache.iotdb.itbase.category.TableClusterIT; import org.apache.iotdb.itbase.category.TableLocalStandaloneIT; @@ -29,18 +31,24 @@ import org.apache.iotdb.rpc.IoTDBConnectionException; import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.tsfile.common.constant.TsFileConstant; import org.apache.tsfile.enums.ColumnCategory; import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.ColumnSchema; +import org.apache.tsfile.file.metadata.TableSchema; import org.apache.tsfile.write.record.Tablet; import org.apache.tsfile.write.schema.IMeasurementSchema; import org.apache.tsfile.write.schema.MeasurementSchema; +import org.awaitility.Awaitility; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; +import org.junit.Ignore; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; +import java.io.File; import java.sql.Connection; import java.sql.ResultSet; import java.sql.ResultSetMetaData; @@ -51,7 +59,14 @@ import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.Random; import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; import static org.apache.iotdb.commons.schema.column.ColumnHeaderConstant.describeTableColumnHeaders; import static org.apache.iotdb.commons.schema.column.ColumnHeaderConstant.describeTableDetailsColumnHeaders; @@ -60,6 +75,7 @@ import static org.apache.iotdb.commons.schema.column.ColumnHeaderConstant.showTablesDetailsColumnHeaders; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -71,6 +87,8 @@ public class IoTDBTableIT { public static void setUp() throws Exception { EnvFactory.getEnv().getConfig().getCommonConfig().setEnforceStrongPassword(false); EnvFactory.getEnv().getConfig().getCommonConfig().setRestrictObjectLimit(true); + EnvFactory.getEnv().getConfig().getConfigNodeConfig().setLeaderDistributionPolicy("HASH"); + EnvFactory.getEnv().getConfig().getDataNodeConfig().setCompactionScheduleInterval(100); EnvFactory.getEnv().initClusterEnvironment(); } @@ -151,22 +169,6 @@ public void testManageTable() { assertEquals(tableNames.length, cnt); } - // Test unsupported, to be deleted - try { - statement.execute("alter table test1.table1 rename to tableN"); - } catch (final SQLException e) { - assertEquals("701: The renaming for base table is currently unsupported", e.getMessage()); - } - - // Test unsupported, to be deleted - try { - statement.execute( - "alter table if exists test_db.table1 rename column if exists model to modelType"); - } catch (final SQLException e) { - assertEquals( - "701: The renaming for base table column is currently unsupported", e.getMessage()); - } - // Alter table properties statement.execute("alter table test1.table1 set properties ttl=1000000"); ttls = new String[] {"1000000"}; @@ -614,7 +616,6 @@ public void testManageTable() { statement.execute("create table test100 (time time)"); statement.execute("create table test101 (time timestamp time)"); } catch (final SQLException e) { - e.printStackTrace(); fail(e.getMessage()); } } @@ -686,7 +687,7 @@ public void testConcurrentAutoCreateAndDropColumn() throws Exception { try (final ITableSession session = EnvFactory.getEnv().getTableSessionConnection(); final Connection adminCon = EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); final Statement adminStmt = adminCon.createStatement()) { - adminStmt.execute("create database db1"); + adminStmt.execute("create database if not exists db1"); session.executeNonQueryStatement("USE \"db1\""); final StringBuilder sb = new StringBuilder("CREATE TABLE table8 (tag1 string tag"); @@ -1091,4 +1092,1036 @@ public void testTreeViewTable() throws Exception { } } } + + @Test + public void testAllowAlterTableName() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute("DROP DATABASE IF EXISTS testdb"); + statement.execute("CREATE DATABASE IF NOT EXISTS testdb"); + statement.execute("USE testdb"); + + try { + statement.execute( + "CREATE TABLE IF NOT EXISTS alter_table_name_disabled () WITH (allow_alter_name=1)"); + fail("allow_alter_name must be boolean"); + } catch (SQLException e) { + assertEquals( + "701: allow_alter_name value must be a BooleanLiteral, but now is LongLiteral, value: 1", + e.getMessage()); + } + + statement.execute( + "CREATE TABLE IF NOT EXISTS alter_table_name_disabled () WITH (allow_alter_name=false)"); + + try { + statement.execute( + "ALTER TABLE alter_table_name_disabled SET PROPERTIES allow_alter_name=true"); + fail("allow_alter_name cannot be altered"); + } catch (SQLException e) { + assertEquals("701: The property allow_alter_name cannot be altered.", e.getMessage()); + } + + try { + statement.execute("ALTER TABLE alter_table_name_disabled RENAME TO alter_table_named"); + fail("the table cannot be renamed"); + } catch (SQLException e) { + assertEquals( + "701: Table 'testdb.alter_table_name_disabled' is created in a old version and cannot be renamed, please migrate its data to a new table manually", + e.getMessage()); + } + } + } + + @Test + public void testAlterTableName() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute("DROP DATABASE IF EXISTS testdb"); + statement.execute("CREATE DATABASE IF NOT EXISTS testdb"); + statement.execute("USE testdb"); + + // alter non-exist + try { + statement.execute("ALTER TABLE alter_table_name RENAME TO alter_table_named"); + fail(); + } catch (SQLException e) { + assertEquals("550: Table 'testdb.alter_table_name' does not exist", e.getMessage()); + } + + // alter information schema + try { + statement.execute("ALTER TABLE information_schema.tables RENAME TO tables_new"); + fail(); + } catch (SQLException e) { + assertEquals("701: The database 'information_schema' can only be queried", e.getMessage()); + } + + // alter once + statement.execute("CREATE TABLE IF NOT EXISTS alter_table_name (s1 int32)"); + statement.execute("INSERT INTO alter_table_name (time, s1) VALUES (1, 1)"); + statement.execute("ALTER TABLE alter_table_name RENAME TO alter_table_named"); + try { + statement.execute("INSERT INTO alter_table_name (time, s1) VALUES (0, 0)"); + fail(); + } catch (SQLException e) { + assertEquals("550: Table 'testdb.alter_table_name' does not exist.", e.getMessage()); + } + statement.execute("INSERT INTO alter_table_named (time, s1) VALUES (2, 2)"); + + ResultSet resultSet = statement.executeQuery("SELECT * FROM alter_table_named"); + for (int i = 1; i <= 2; i++) { + assertTrue(resultSet.next()); + assertEquals(i, resultSet.getLong(1)); + assertEquals(i, resultSet.getLong(2)); + } + assertFalse(resultSet.next()); + resultSet = + statement.executeQuery("SELECT last(time), last_by(s1,time) FROM alter_table_named"); + assertTrue(resultSet.next()); + assertEquals(2, resultSet.getLong(1)); + assertEquals(2, resultSet.getLong(2)); + assertFalse(resultSet.next()); + + // alter twice + statement.execute("ALTER TABLE alter_table_named RENAME TO alter_table_named2"); + try { + statement.execute("INSERT INTO alter_table_named (time, s1) VALUES (0, 0)"); + fail(); + } catch (SQLException e) { + assertEquals("550: Table 'testdb.alter_table_named' does not exist.", e.getMessage()); + } + statement.execute("INSERT INTO alter_table_named2 (time, s1) VALUES (3, 3)"); + + resultSet = statement.executeQuery("SELECT * FROM alter_table_named2"); + for (int i = 1; i <= 3; i++) { + assertTrue(resultSet.next()); + assertEquals(i, resultSet.getLong(1)); + assertEquals(i, resultSet.getLong(2)); + } + assertFalse(resultSet.next()); + resultSet = + statement.executeQuery("SELECT last(time), last_by(s1,time) FROM alter_table_named2"); + assertTrue(resultSet.next()); + assertEquals(3, resultSet.getLong(1)); + assertEquals(3, resultSet.getLong(2)); + assertFalse(resultSet.next()); + + // alter back + statement.execute("ALTER TABLE alter_table_named2 RENAME TO alter_table_name"); + try { + statement.execute("INSERT INTO alter_table_named2 (time, s1) VALUES (0, 0)"); + fail(); + } catch (SQLException e) { + assertEquals("550: Table 'testdb.alter_table_named2' does not exist.", e.getMessage()); + } + statement.execute("INSERT INTO alter_table_name (time, s1) VALUES (4, 4)"); + + resultSet = statement.executeQuery("SELECT * FROM alter_table_name"); + for (int i = 1; i <= 4; i++) { + assertTrue(resultSet.next()); + assertEquals(i, resultSet.getLong(1)); + assertEquals(i, resultSet.getLong(2)); + } + assertFalse(resultSet.next()); + resultSet = + statement.executeQuery("SELECT last(time), last_by(s1,time) FROM alter_table_name"); + assertTrue(resultSet.next()); + assertEquals(4, resultSet.getLong(1)); + assertEquals(4, resultSet.getLong(2)); + assertFalse(resultSet.next()); + } + } + + @Test + public void testAlterColumnName() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute("DROP DATABASE IF EXISTS testdb"); + statement.execute("CREATE DATABASE IF NOT EXISTS testdb"); + statement.execute("USE testdb"); + + statement.execute("CREATE TABLE IF NOT EXISTS alter_column_name (s1 int32)"); + statement.execute("INSERT INTO alter_column_name (time, s1) VALUES (1, 1)"); + + // alter once + statement.execute("ALTER TABLE alter_column_name RENAME COLUMN s1 TO s2"); + try { + statement.execute("INSERT INTO alter_column_name (time, s1) VALUES (0, 0)"); + fail(); + } catch (SQLException e) { + assertEquals( + "616: Unknown column category for s1. Cannot auto create column.", e.getMessage()); + } + statement.execute("INSERT INTO alter_column_name (time, s2) VALUES (2, 2)"); + + ResultSet resultSet = statement.executeQuery("SELECT * FROM alter_column_name"); + ResultSetMetaData metaData = resultSet.getMetaData(); + assertEquals(2, metaData.getColumnCount()); + assertEquals("s2", metaData.getColumnName(2)); + for (int i = 1; i <= 2; i++) { + assertTrue(resultSet.next()); + assertEquals(i, resultSet.getLong(1)); + assertEquals(i, resultSet.getInt(2)); + } + assertFalse(resultSet.next()); + resultSet = + statement.executeQuery("SELECT last(time), last_by(s2,time) FROM alter_column_name"); + assertTrue(resultSet.next()); + assertEquals(2, resultSet.getLong(1)); + assertEquals(2, resultSet.getLong(2)); + assertFalse(resultSet.next()); + // alter twice + statement.execute("ALTER TABLE alter_column_name RENAME COLUMN s2 TO s3"); + try { + statement.execute("INSERT INTO alter_column_name (time, s2) VALUES (0, 0)"); + fail(); + } catch (SQLException e) { + assertEquals( + "616: Unknown column category for s2. Cannot auto create column.", e.getMessage()); + } + statement.execute("INSERT INTO alter_column_name (time, s3) VALUES (3, 3)"); + + resultSet = statement.executeQuery("SELECT * FROM alter_column_name"); + metaData = resultSet.getMetaData(); + assertEquals(2, metaData.getColumnCount()); + assertEquals("s3", metaData.getColumnName(2)); + for (int i = 1; i <= 3; i++) { + assertTrue(resultSet.next()); + assertEquals(i, resultSet.getLong(1)); + assertEquals(i, resultSet.getInt(2)); + } + assertFalse(resultSet.next()); + resultSet = + statement.executeQuery("SELECT last(time), last_by(s3,time) FROM alter_column_name"); + assertTrue(resultSet.next()); + assertEquals(3, resultSet.getLong(1)); + assertEquals(3, resultSet.getLong(2)); + assertFalse(resultSet.next()); + + // alter back + statement.execute("ALTER TABLE alter_column_name RENAME COLUMN s3 TO s1"); + try { + statement.execute("INSERT INTO alter_column_name (time, s3) VALUES (0, 0)"); + fail(); + } catch (SQLException e) { + assertEquals( + "616: Unknown column category for s3. Cannot auto create column.", e.getMessage()); + } + statement.execute("INSERT INTO alter_column_name (time, s1) VALUES (4, 4)"); + + resultSet = statement.executeQuery("SELECT * FROM alter_column_name"); + metaData = resultSet.getMetaData(); + assertEquals(2, metaData.getColumnCount()); + assertEquals("s1", metaData.getColumnName(2)); + for (int i = 1; i <= 4; i++) { + assertTrue(resultSet.next()); + assertEquals(i, resultSet.getLong(1)); + assertEquals(i, resultSet.getInt(2)); + } + assertFalse(resultSet.next()); + resultSet = + statement.executeQuery("SELECT last(time), last_by(s1,time) FROM alter_column_name"); + assertTrue(resultSet.next()); + assertEquals(4, resultSet.getLong(1)); + assertEquals(4, resultSet.getLong(2)); + assertFalse(resultSet.next()); + + // alter multi + statement.execute("ALTER TABLE alter_column_name ADD COLUMN s901 INT32"); + statement.execute("ALTER TABLE alter_column_name RENAME COLUMN s1 TO s2 s901 TO s902"); + try { + statement.execute("INSERT INTO alter_column_name (time, s1, s901) VALUES (0, 0, 0)"); + fail(); + } catch (SQLException e) { + assertEquals( + "616: Unknown column category for s1. Cannot auto create column.", e.getMessage()); + } + statement.execute("INSERT INTO alter_column_name (time, s2, s902) VALUES (5, 5, 5)"); + + resultSet = statement.executeQuery("SELECT * FROM alter_column_name"); + metaData = resultSet.getMetaData(); + assertEquals(3, metaData.getColumnCount()); + assertEquals("s2", metaData.getColumnName(2)); + assertEquals("s902", metaData.getColumnName(3)); + for (int i = 1; i <= 5; i++) { + assertTrue(resultSet.next()); + assertEquals(i, resultSet.getLong(1)); + assertEquals(i, resultSet.getInt(2)); + if (i <= 4) { + resultSet.getInt(3); + assertTrue(resultSet.wasNull()); + } else { + assertEquals(i, resultSet.getInt(3)); + } + } + assertFalse(resultSet.next()); + resultSet = + statement.executeQuery( + "SELECT last(time), last_by(s2,time), last_by(s902,time) FROM alter_column_name"); + assertTrue(resultSet.next()); + assertEquals(5, resultSet.getLong(1)); + assertEquals(5, resultSet.getLong(2)); + assertEquals(5, resultSet.getLong(3)); + assertFalse(resultSet.next()); + } + } + + @Test + public void testTableRenameConflict() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute("DROP DATABASE IF EXISTS testdb"); + statement.execute("CREATE DATABASE IF NOT EXISTS testdb"); + statement.execute("USE testdb"); + + statement.execute("CREATE TABLE IF NOT EXISTS table_a ()"); + statement.execute("CREATE TABLE IF NOT EXISTS table_b ()"); + + try { + statement.execute("ALTER TABLE table_a RENAME TO table_b"); + fail(); + } catch (final SQLException e) { + // expect table already exists (use code 551) + assertTrue( + e.getMessage().startsWith("551") && e.getMessage().toLowerCase().contains("already")); + } + } + } + + @Test + public void testColumnRenameConflict() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute("DROP DATABASE IF EXISTS testdb"); + statement.execute("CREATE DATABASE IF NOT EXISTS testdb"); + statement.execute("USE testdb"); + + statement.execute("CREATE TABLE IF NOT EXISTS tconf (c1 int32, c2 int32)"); + + try { + statement.execute("ALTER TABLE tconf RENAME COLUMN c1 TO c2"); + fail(); + } catch (final SQLException e) { + // expect column already exist error (code 552) + assertTrue( + e.getMessage().startsWith("552") && e.getMessage().toLowerCase().contains("exist")); + } + } + } + + @Test + public void testAlterTableRenameToSameName() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute("DROP DATABASE IF EXISTS testdb"); + statement.execute("CREATE DATABASE IF NOT EXISTS testdb"); + statement.execute("USE testdb"); + + statement.execute("CREATE TABLE IF NOT EXISTS rename_same (s1 int32)"); + statement.execute("INSERT INTO rename_same (time, s1) VALUES (1, 1)"); + + // Renaming to the same name should be a no-op and not lose data + try { + statement.execute("ALTER TABLE rename_same RENAME TO rename_same"); + fail(); + } catch (SQLException e) { + assertEquals( + "701: The table's old name shall not be equal to the new one.", e.getMessage()); + } + } + } + + @Test + public void testAlterTableRenameToQuotedSpecialName() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute("DROP DATABASE IF EXISTS testdb"); + statement.execute("CREATE DATABASE IF NOT EXISTS testdb"); + statement.execute("USE testdb"); + + statement.execute("CREATE TABLE IF NOT EXISTS rename_special (s1 int32)"); + statement.execute("INSERT INTO rename_special (time, s1) VALUES (1, 1)"); + + // rename to a quoted name containing hyphen and unicode + statement.execute("ALTER TABLE rename_special RENAME TO \"rename-特殊\""); + + // old name should not exist + try { + statement.execute("INSERT INTO rename_special (time, s1) VALUES (2, 2)"); + fail(); + } catch (final SQLException e) { + assertTrue( + e.getMessage().startsWith("550") + || e.getMessage().toLowerCase().contains("does not exist")); + } + + // insert into new quoted name and verify + statement.execute("INSERT INTO \"rename-特殊\" (time, s1) VALUES (2, 2)"); + ResultSet rs = statement.executeQuery("SELECT * FROM \"rename-特殊\""); + for (int i = 1; i <= 2; i++) { + assertTrue(rs.next()); + assertEquals(i, rs.getLong(1)); + assertEquals(i, rs.getInt(2)); + } + assertFalse(rs.next()); + } + } + + @Test + public void testAlterTableRenameWithDots() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute("DROP DATABASE IF EXISTS db1"); + statement.execute("DROP DATABASE IF EXISTS db2"); + statement.execute("CREATE DATABASE IF NOT EXISTS db1"); + statement.execute("CREATE DATABASE IF NOT EXISTS db2"); + statement.execute("USE db1"); + + statement.execute("CREATE TABLE IF NOT EXISTS t1 (s1 int32)"); + statement.execute("INSERT INTO t1 (time, s1) VALUES (1, 1)"); + + statement.execute("ALTER TABLE t1 RENAME TO \"db2.t1\""); + + ResultSet rs = statement.executeQuery("SELECT * FROM \"db2.t1\""); + assertTrue(rs.next()); + assertEquals(1, rs.getLong(1)); + assertEquals(1, rs.getInt(2)); + assertFalse(rs.next()); + } + } + + @Test + public void testAlterColumnRenameCaseSensitivity() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute("DROP DATABASE IF EXISTS testdb"); + statement.execute("CREATE DATABASE IF NOT EXISTS testdb"); + statement.execute("USE testdb"); + + statement.execute("CREATE TABLE IF NOT EXISTS tcase (c1 int32)"); + statement.execute("INSERT INTO tcase (time, c1) VALUES (1, 1)"); + + statement.execute("ALTER TABLE tcase RENAME COLUMN c1 TO C1"); + + ResultSet rs = statement.executeQuery("SELECT * FROM tcase"); + ResultSetMetaData md = rs.getMetaData(); + assertEquals(2, md.getColumnCount()); + // server may normalize column names; accept either exact case or normalized lower-case + String colName = md.getColumnName(2); + assertTrue(colName.equals("C1") || colName.equals("c1")); + + // ensure data still accessible via the new identifier (try using the new name in insert) + try { + statement.execute("INSERT INTO tcase (time, c1) VALUES (2, 2)"); + // if server treats identifiers case-insensitively this may succeed + } catch (final SQLException ignored) { + // ignore - the purpose is to assert existence/behavior, not enforce one model here + } + } + } + + @Test + public void testAlterColumnRenameToQuotedSpecialChars() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute("DROP DATABASE IF EXISTS testdb"); + statement.execute("CREATE DATABASE IF NOT EXISTS testdb"); + statement.execute("USE testdb"); + + statement.execute("CREATE TABLE IF NOT EXISTS tcolspecial (s1 int32)"); + statement.execute("INSERT INTO tcolspecial (time, s1) VALUES (1, 1)"); + + statement.execute("ALTER TABLE tcolspecial RENAME COLUMN s1 TO \"s-特\""); + + try { + statement.execute("INSERT INTO tcolspecial (time, s1) VALUES (2, 2)"); + fail(); + } catch (final SQLException e) { + assertTrue( + e.getMessage().startsWith("616") || e.getMessage().toLowerCase().contains("unknown")); + } + + statement.execute("INSERT INTO tcolspecial (time, \"s-特\") VALUES (2, 2)"); + ResultSet rs = statement.executeQuery("SELECT * FROM tcolspecial"); + ResultSetMetaData md = rs.getMetaData(); + assertEquals(2, md.getColumnCount()); + String colName = md.getColumnName(2); + // accept either exact quoted name or normalized variant + assertTrue(colName.equals("s-特") || colName.equals("s特") || colName.equals("s_特")); + } + } + + @Test + public void testAlterColumnMultipleRenamesAndBack() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute("DROP DATABASE IF EXISTS testdb"); + statement.execute("CREATE DATABASE IF NOT EXISTS testdb"); + statement.execute("USE testdb"); + + statement.execute("CREATE TABLE IF NOT EXISTS tmulti (a int32)"); + statement.execute("INSERT INTO tmulti (time, a) VALUES (1, 1)"); + + statement.execute("ALTER TABLE tmulti RENAME COLUMN a TO b"); + statement.execute("INSERT INTO tmulti (time, b) VALUES (2, 2)"); + + statement.execute("ALTER TABLE tmulti RENAME COLUMN b TO c"); + statement.execute("INSERT INTO tmulti (time, c) VALUES (3, 3)"); + + statement.execute("ALTER TABLE tmulti RENAME COLUMN c TO a"); + statement.execute("INSERT INTO tmulti (time, a) VALUES (4, 4)"); + + ResultSet rs = statement.executeQuery("SELECT * FROM tmulti"); + for (int i = 1; i <= 4; i++) { + assertTrue(rs.next()); + assertEquals(i, rs.getLong(1)); + assertEquals(i, rs.getInt(2)); + } + assertFalse(rs.next()); + } + } + + @Test + public void testRenameNonExistentColumn() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute("DROP DATABASE IF EXISTS testdb"); + statement.execute("CREATE DATABASE IF NOT EXISTS testdb"); + statement.execute("USE testdb"); + + statement.execute("CREATE TABLE IF NOT EXISTS tnonexist (x int32)"); + + try { + statement.execute("ALTER TABLE tnonexist RENAME COLUMN y TO z"); + fail(); + } catch (final SQLException e) { + // error should indicate column does not exist (use code 616 + contains) + assertTrue(e.getMessage().startsWith("616")); + assertTrue( + e.getMessage().toLowerCase().contains("does not exist") + || e.getMessage().toLowerCase().contains("cannot be resolved")); + } + } + } + + // Helper: recognize SQLExceptions that mean the target table/device cannot be found. + private static boolean isTableNotFound(final SQLException e) { + if (e == null) { + return false; + } + final String msg = e.getMessage(); + if (msg == null) { + return false; + } + final String lm = msg.toLowerCase(); + // code 550 is commonly used for 'does not exist' in this project; also match textual phrases + return msg.startsWith("550") || lm.contains("not exist"); + } + + @Test(timeout = 120000) + @SuppressWarnings("resource") + public void testConcurrentRenameVsQueries() throws Throwable { + if (EnvFactory.getEnv().getDataNodeWrapperList().size() > 1) { + // The RequestDelegate cannot be used in the cluster mode with concurrent operations + // Because the results may vary due to concurrent operations + return; + } + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement stmt = connection.createStatement()) { + final String db = "concrenamedb"; + final int tableCount = 6; + final int rows = 50; + stmt.execute("DROP DATABASE IF EXISTS " + db); + stmt.execute("CREATE DATABASE IF NOT EXISTS " + db); + stmt.execute("USE " + db); + + final String[] names = new String[tableCount]; + for (int i = 0; i < tableCount; i++) { + names[i] = "crtable" + i; + stmt.execute(String.format("CREATE TABLE IF NOT EXISTS %s (v int32)", names[i])); + for (int r = 1; r <= rows; r++) { + stmt.execute(String.format("INSERT INTO %s (time, v) VALUES (%d, %d)", names[i], r, r)); + } + } + + final AtomicReference err = new AtomicReference<>(); + final CountDownLatch startLatch = new CountDownLatch(1); + final CountDownLatch doneLatch = new CountDownLatch(4); + + ExecutorService exec = null; + try { + exec = Executors.newFixedThreadPool(8); + + // Renamer task: rotate rename a subset of tables repeatedly + exec.submit( + () -> { + try (final Connection c = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement s = c.createStatement()) { + startLatch.await(); + // ensure this thread's connection uses the test database + try { + s.execute("USE " + db); + } catch (final SQLException ignore) { + } + for (int round = 0; round < 20 && err.get() == null; round++) { + for (int i = 0; i < tableCount / 2; i++) { + final String oldName = names[i]; + final String newName = oldName + "_r" + round; + try { + s.execute(String.format("ALTER TABLE %s RENAME TO %s", oldName, newName)); + // reflect change locally so queries target updated names + names[i] = newName; + } catch (final SQLException ex) { + // Only ignore if the failure is due to table not existing; otherwise record + // the error + if (isTableNotFound(ex)) { + // table not found: likely a transient race with concurrent rename — ignore + // and log + System.out.println( + "Ignored table-not-found during rename: " + ex.getMessage()); + } else { + err.compareAndSet(null, ex); + } + } + } + try { + Thread.sleep(50); + } catch (final InterruptedException ie) { + Thread.currentThread().interrupt(); + break; + } + } + } catch (final Throwable t) { + err.compareAndSet(null, t); + } finally { + doneLatch.countDown(); + } + }); + + // Queryer tasks: continuously query random tables + for (int q = 0; q < 2; q++) { + exec.submit( + () -> { + try (final Connection c = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement s = c.createStatement()) { + final Random rnd = new Random(); + startLatch.await(); + // ensure this thread's connection uses the test database + try { + s.execute("USE " + db); + } catch (final SQLException ignore) { + } + for (int iter = 0; iter < 200 && err.get() == null; iter++) { + final int idx = rnd.nextInt(tableCount); + final String tname = names[idx]; + try (final ResultSet rs = s.executeQuery("SELECT count(*) FROM " + tname)) { + if (rs.next()) { + rs.getLong(1); + } + } catch (final SQLException ex) { + // Only ignore table-not-found; otherwise surface the error to fail the test + if (!isTableNotFound(ex)) { + err.compareAndSet(null, ex); + break; + } + } + } + } catch (final Throwable t) { + err.compareAndSet(null, t); + } finally { + doneLatch.countDown(); + } + }); + } + + // Another queryer to trigger more parallel access + exec.submit( + () -> { + try (final Connection c = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement s = c.createStatement()) { + startLatch.await(); + // ensure this thread's connection uses the test database + try { + s.execute("USE " + db); + } catch (final SQLException ignore) { + } + for (int iter = 0; iter < 200 && err.get() == null; iter++) { + for (int i = 0; i < tableCount; i++) { + try (final ResultSet rs = + s.executeQuery("SELECT * FROM " + names[i] + " LIMIT 1")) { + // consume + while (rs.next()) { + rs.getLong(1); + } + } catch (final SQLException ex) { + if (!isTableNotFound(ex)) { + err.compareAndSet(null, ex); + break; + } + } + } + } + } catch (final Throwable t) { + err.compareAndSet(null, t); + } finally { + doneLatch.countDown(); + } + }); + + // start + startLatch.countDown(); + // wait for tasks + doneLatch.await(); + + if (err.get() != null) { + throw err.get(); + } + } finally { + if (exec != null) { + exec.shutdownNow(); + } + } + } + } + + @Test + public void testMultiTableCrossCheckAfterRenames() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement stmt = connection.createStatement()) { + final String db = "multicheckdb"; + stmt.execute("DROP DATABASE IF EXISTS " + db); + stmt.execute("CREATE DATABASE IF NOT EXISTS " + db); + stmt.execute("USE " + db); + + // create two related tables + stmt.execute("CREATE TABLE IF NOT EXISTS mta (k int32)"); + stmt.execute("CREATE TABLE IF NOT EXISTS mtb (k int32)"); + + for (int i = 1; i <= 10; i++) { + stmt.execute(String.format("INSERT INTO mta (time, k) VALUES (%d, %d)", i, i)); + stmt.execute(String.format("INSERT INTO mtb (time, k) VALUES (%d, %d)", i, i)); + } + + // baseline: read aggregates + long aCount = 0, bCount = 0; + try (final ResultSet ra = stmt.executeQuery("SELECT count(*) FROM mta")) { + if (ra.next()) { + aCount = ra.getLong(1); + } + } + try (final ResultSet rb = stmt.executeQuery("SELECT count(*) FROM mtb")) { + if (rb.next()) { + bCount = rb.getLong(1); + } + } + + // rename one table and verify cross results remain consistent when queried separately + stmt.execute("ALTER TABLE mtb RENAME TO mtb_renamed"); + + long bCountAfter = 0; + try (final ResultSet rb2 = stmt.executeQuery("SELECT count(*) FROM mtb_renamed")) { + if (rb2.next()) { + bCountAfter = rb2.getLong(1); + } + } + + // assert counts unchanged + assertEquals(bCount, bCountAfter); + assertEquals(10, aCount); + + // rename the other table and verify again + stmt.execute("ALTER TABLE mta RENAME TO mta_renamed"); + long aCountAfter = 0; + try (final ResultSet ra2 = stmt.executeQuery("SELECT count(*) FROM mta_renamed")) { + if (ra2.next()) { + aCountAfter = ra2.getLong(1); + } + } + assertEquals(aCount, aCountAfter); + } + } + + @Ignore("Performance test, not for regular CI") + @Test + public void testPerformanceWithQuotedSpecialNameRenames() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement stmt = connection.createStatement(); + final ITableSession session = EnvFactory.getEnv().getTableSessionConnection()) { + final String db = "perfquotedb"; + final int numColsPerTable = 100; + final int numTables = 800; + final int numRowsPerFile = 1000; + final int numFilesPerTable = 5; + final int runs = 10; + final float ratioAlteredTables = 0.5f; + stmt.execute("DROP DATABASE IF EXISTS " + db); + stmt.execute("CREATE DATABASE IF NOT EXISTS " + db); + stmt.execute("USE " + db); + stmt.execute("set configuration enable_seq_space_compaction='false'"); + session.executeNonQueryStatement("USE " + db); + + final String[] names = new String[numTables]; + StringBuilder createTableTemplate = new StringBuilder("CREATE TABLE IF NOT EXISTS %s ("); + for (int c = 0; c < numColsPerTable; c++) { + createTableTemplate.append(String.format("v%d int32,", c)); + } + createTableTemplate = + new StringBuilder( + createTableTemplate.substring(0, createTableTemplate.length() - 1) + ")"); + List columns = new ArrayList<>(); + for (int i = 0; i < numColsPerTable; i++) { + columns.add(new ColumnSchema("v" + i, TSDataType.INT32, ColumnCategory.FIELD)); + } + TableSchema tableSchema = + new TableSchema( + "", // place holder + columns); + + System.out.println("Start data preparation..."); + for (int i = 0; i < numTables; i++) { + names[i] = "qtable" + i; + stmt.execute(String.format(createTableTemplate.toString(), names[i])); + tableSchema.setTableName(names[i]); + Tablet tablet = + new Tablet( + tableSchema.getTableName(), + tableSchema.getColumnSchemas().stream() + .map(IMeasurementSchema::getMeasurementName) + .collect(Collectors.toList()), + tableSchema.getColumnSchemas().stream() + .map(IMeasurementSchema::getType) + .collect(Collectors.toList()), + tableSchema.getColumnTypes(), + numRowsPerFile); + for (int j = 0; j < numFilesPerTable; j++) { + tablet.reset(); + for (int r = 1; r <= numRowsPerFile; r++) { + tablet.addTimestamp(r - 1, r + j * numRowsPerFile); + for (int c = 0; c < numColsPerTable; c++) { + tablet.addValue(r - 1, c, r + j * numRowsPerFile); + } + } + session.insert(tablet); + stmt.execute("FLUSH"); + } + } + System.out.println("Data preparation done."); + + // baseline measurement: simple average over a few runs + double totalMs = 0.0; + for (int run = 0; run < runs; run++) { + final long start = System.nanoTime(); + for (int i = 0; i < numTables; i++) { + try (final ResultSet rs = stmt.executeQuery("SELECT count(*) FROM " + names[i])) { + assertTrue(rs.next()); + assertEquals(numRowsPerFile * numFilesPerTable, rs.getLong(1)); + } + } + final long end = System.nanoTime(); + if (run > runs * 0.1) { + totalMs += (end - start) / 1_000_000.0; + } + } + final double baseline = totalMs / (runs * 0.9); + System.out.println("baseline_total_ms=" + String.format("%.3f", baseline)); + + // rename some of them to quoted special names and measure again + for (int i = 0; i < numTables * ratioAlteredTables; i++) { + final String oldName = names[i]; + final String newName = "\"" + oldName + "-特\""; // quoted name + stmt.execute(String.format("ALTER TABLE %s RENAME TO %s", oldName, newName)); + names[i] = newName; + } + + totalMs = 0.0; + for (int run = 0; run < runs; run++) { + final long start = System.nanoTime(); + for (int i = 0; i < numTables; i++) { + try (final ResultSet rs = stmt.executeQuery("SELECT count(*) FROM " + names[i])) { + assertTrue(rs.next()); + assertEquals(numRowsPerFile * numFilesPerTable, rs.getLong(1)); + } + } + final long end = System.nanoTime(); + if (run > runs * 0.1) { + totalMs += (end - start) / 1_000_000.0; + } + } + final double after = totalMs / (runs * 0.9); + System.out.println("after_quoted_total_ms=" + String.format("%.3f", after)); + + // basic sanity: ensure queries still return counts + for (int i = 0; i < numTables; i++) { + try (final ResultSet rs = stmt.executeQuery("SELECT count(*) FROM " + names[i])) { + assertTrue(rs.next()); + assertEquals(numRowsPerFile * numFilesPerTable, rs.getLong(1)); + } + } + } + } + + @Test + public void testAlterTableAndColumnTogether() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement stmt = connection.createStatement()) { + final String db = "dualalterdb"; + stmt.execute("DROP DATABASE IF EXISTS " + db); + stmt.execute("CREATE DATABASE IF NOT EXISTS " + db); + stmt.execute("USE " + db); + + stmt.execute("CREATE TABLE IF NOT EXISTS tab1 (c1 int32, c2 int32)"); + stmt.execute("INSERT INTO tab1 (time, c1, c2) VALUES (1, 1, 10)"); + + // rename column first and then rename table + stmt.execute("ALTER TABLE tab1 RENAME COLUMN c1 TO c1_new"); + stmt.execute("ALTER TABLE tab1 RENAME TO tab1_new"); + + // old table name should not exist + try { + stmt.execute("INSERT INTO tab1 (time, c1_new) VALUES (2, 2)"); + fail(); + } catch (final SQLException e) { + assertTrue( + e.getMessage().startsWith("550") + || e.getMessage().toLowerCase().contains("does not exist")); + } + + // inserting using new table and new column names should succeed + stmt.execute("INSERT INTO tab1_new (time, c1_new, c2) VALUES (2, 2, 20)"); + + // verify data + try (final ResultSet rs = stmt.executeQuery("SELECT * FROM tab1_new ORDER BY time")) { + assertTrue(rs.next()); + assertEquals(1, rs.getLong(1)); + assertEquals(1, rs.getInt("c1_new")); + assertEquals(10, rs.getInt("c2")); + + assertTrue(rs.next()); + assertEquals(2, rs.getLong(1)); + assertEquals(2, rs.getInt("c1_new")); + assertEquals(20, rs.getInt("c2")); + + assertFalse(rs.next()); + } + + // rename column again on the renamed table and verify + stmt.execute("ALTER TABLE tab1_new RENAME COLUMN c1_new TO c1_final"); + try { + // old column identifier should fail + stmt.execute("INSERT INTO tab1_new (time, c1_new) VALUES (3, 3)"); + fail(); + } catch (final SQLException e) { + assertTrue( + e.getMessage().startsWith("616") + || e.getMessage().toLowerCase().contains("unknown") + || e.getMessage().toLowerCase().contains("cannot be resolved")); + } + + // use final name + stmt.execute("INSERT INTO tab1_new (time, c1_final, c2) VALUES (3, 3, 30)"); + try (final ResultSet rs = stmt.executeQuery("SELECT count(*) FROM tab1_new")) { + if (rs.next()) { + assertEquals(3L, rs.getLong(1)); + } else { + fail(); + } + } + } + } + + @Test + public void testAlterTableAndColumnWithCompaction() throws Exception { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement stmt = connection.createStatement()) { + if (EnvFactory.getEnv().getDataNodeWrapperList() != null + && EnvFactory.getEnv().getDataNodeWrapperList().size() > 1) { + // file distribution is not deterministic in cluster mode, so skip this test + return; + } + + final String db = "dualalterdb"; + stmt.execute("DROP DATABASE IF EXISTS " + db); + stmt.execute("CREATE DATABASE IF NOT EXISTS " + db); + stmt.execute("USE " + db); + + stmt.execute("CREATE TABLE IF NOT EXISTS tab1 (c1 int32, c2 int32)"); + stmt.execute("INSERT INTO tab1 (time, c1, c2) VALUES (1, 1, 10)"); + + // rename column first and then rename table + stmt.execute("ALTER TABLE tab1 RENAME COLUMN c1 TO c1_new"); + stmt.execute("ALTER TABLE tab1 RENAME TO tab1_new"); + + // inserting using new table and new column names should succeed + stmt.execute("INSERT INTO tab1_new (time, c1_new, c2) VALUES (2, 2, 20)"); + + // rename column again on the renamed table and verify + stmt.execute("ALTER TABLE tab1_new RENAME COLUMN c1_new TO c1_final"); + // use final name + stmt.execute("INSERT INTO tab1_new (time, c1_final, c2) VALUES (3, 3, 30)"); + + stmt.execute("FLUSH"); + + Awaitility.await() + .atMost(30, TimeUnit.SECONDS) + .until( + () -> { + DataNodeWrapper nodeWrapper = EnvFactory.getEnv().getDataNodeWrapper(0); + String dataDir = + nodeWrapper.getDataNodeDir() + + File.separator + + "data" + + File.separator + + "sequence"; + File dbDir = new File(dataDir, db); + String[] regionList = dbDir.list(); + assertNotNull(regionList); + assertEquals(1, regionList.length); + File regionDir = new File(dbDir, regionList[0]); + String[] partitionList = regionDir.list(); + assertNotNull(partitionList); + assertEquals(1, partitionList.length); + File partitionDir = new File(regionDir, partitionList[0]); + File[] fileList = partitionDir.listFiles(); + assertNotNull(fileList); + for (File file : fileList) { + if (file.getName().endsWith(TsFileConstant.TSFILE_SUFFIX)) { + TsFileID tsFileID = new TsFileID(file.getAbsolutePath()); + if (tsFileID.getInnerCompactionCount() == 0) { + return false; + } + } + } + return true; + }); + + try (final ResultSet rs = stmt.executeQuery("SELECT count(*) FROM tab1_new")) { + if (rs.next()) { + assertEquals(3L, rs.getLong(1)); + } else { + fail(); + } + } + } + } } diff --git a/integration-test/src/test/java/org/apache/iotdb/relational/it/schema/IoTDBTablePermissionRenameIT.java b/integration-test/src/test/java/org/apache/iotdb/relational/it/schema/IoTDBTablePermissionRenameIT.java new file mode 100644 index 0000000000000..9f5c2c2c66927 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/relational/it/schema/IoTDBTablePermissionRenameIT.java @@ -0,0 +1,255 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.relational.it.schema; + +import org.apache.iotdb.it.env.EnvFactory; +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.TableClusterIT; +import org.apache.iotdb.itbase.category.TableLocalStandaloneIT; +import org.apache.iotdb.itbase.env.BaseEnv; + +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +@RunWith(IoTDBTestRunner.class) +@Category({TableLocalStandaloneIT.class, TableClusterIT.class}) +public class IoTDBTablePermissionRenameIT { + + @BeforeClass + public static void setUp() throws Exception { + EnvFactory.getEnv().initClusterEnvironment(); + } + + @AfterClass + public static void tearDown() throws Exception { + EnvFactory.getEnv().cleanClusterEnvironment(); + } + + @Test + public void testRenameTablePermissionDenied() throws Exception { + // admin setup + try (final Connection adminCon = EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement adminStmt = adminCon.createStatement()) { + adminStmt.execute("DROP DATABASE IF EXISTS permdb"); + adminStmt.execute("CREATE DATABASE IF NOT EXISTS permdb"); + adminStmt.execute("CREATE USER permUser 'permPass123456@'"); + adminStmt.execute("USE permdb"); + adminStmt.execute("CREATE TABLE IF NOT EXISTS t1 (s1 int32)"); + } + + // grant a non-ALTER privilege so the user can USE the database + try (final Connection adminCon = EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement adminStmt = adminCon.createStatement()) { + adminStmt.execute("USE permdb"); + adminStmt.execute("GRANT SELECT ON TABLE t1 TO USER permUser"); + } + + // user attempts rename without ALTER -> expect access denied + try (final Connection userCon = + EnvFactory.getEnv() + .getConnection("permUser", "permPass123456@", BaseEnv.TABLE_SQL_DIALECT); + final Statement userStmt = userCon.createStatement()) { + // ensure user is using the target database + userStmt.execute("USE permdb"); + try { + userStmt.execute("ALTER TABLE t1 RENAME TO t1_new"); + fail(); + } catch (final SQLException e) { + assertEquals( + "803: Access Denied: No permissions for this operation, please add privilege ALTER ON permdb.t1", + e.getMessage()); + } + } + + // grant ALTER and retry + try (final Connection adminCon = EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement adminStmt = adminCon.createStatement()) { + // ensure we're operating in the target database + adminStmt.execute("USE permdb"); + adminStmt.execute("GRANT ALTER,INSERT,SELECT ON TABLE t1 TO USER permUser"); + } + + try (final Connection userCon = + EnvFactory.getEnv() + .getConnection("permUser", "permPass123456@", BaseEnv.TABLE_SQL_DIALECT); + final Statement userStmt = userCon.createStatement()) { + // ensure user is using the target database + userStmt.execute("USE permdb"); + userStmt.execute("ALTER TABLE t1 RENAME TO t1_new"); + userStmt.execute("INSERT INTO t1_new (time, s1) VALUES (1, 1)"); + try (final ResultSet rs = userStmt.executeQuery("SELECT * FROM t1_new")) { + assertTrue(rs.next()); + assertEquals(1L, rs.getLong(1)); + assertEquals(1, rs.getInt(2)); + assertFalse(rs.next()); + } + } + + // cleanup + try (final Connection adminCon = EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement adminStmt = adminCon.createStatement()) { + // operate in the database for cleanup + try { + adminStmt.execute("USE permdb"); + } catch (SQLException ignore) { + } + try { + adminStmt.execute("REVOKE ALTER ON TABLE t1_new FROM USER permUser"); + } catch (SQLException ignore) { + } + try { + adminStmt.execute("REVOKE SELECT ON TABLE t1 FROM USER permUser"); + } catch (SQLException ignore) { + } + try { + adminStmt.execute("DROP TABLE IF EXISTS t1_new"); + } catch (SQLException ignore) { + } + try { + adminStmt.execute("DROP DATABASE IF EXISTS permdb"); + } catch (SQLException ignore) { + } + try { + adminStmt.execute("DROP USER permUser"); + } catch (SQLException ignore) { + } + } + } + + @Test + public void testRenameColumnPermissionDenied() throws Exception { + // admin setup + try (final Connection adminCon = EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement adminStmt = adminCon.createStatement()) { + adminStmt.execute("DROP DATABASE IF EXISTS col_db"); + adminStmt.execute("CREATE DATABASE IF NOT EXISTS col_db"); + adminStmt.execute("CREATE USER colUser 'colPass123456@'"); + adminStmt.execute("USE col_db"); + adminStmt.execute("CREATE TABLE IF NOT EXISTS tc (c1 int32)"); + adminStmt.execute("INSERT INTO tc (time, c1) VALUES (1, 1)"); + } + + // grant a non-ALTER privilege so the user can USE the database + try (final Connection adminCon = EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement adminStmt = adminCon.createStatement()) { + adminStmt.execute("USE col_db"); + adminStmt.execute("GRANT SELECT ON TABLE tc TO USER colUser"); + } + + // user attempts rename column without ALTER -> expect access denied + try (final Connection userCon = + EnvFactory.getEnv() + .getConnection("colUser", "colPass123456@", BaseEnv.TABLE_SQL_DIALECT); + final Statement userStmt = userCon.createStatement()) { + // ensure user is using the target database + userStmt.execute("USE col_db"); + try { + userStmt.execute("ALTER TABLE tc RENAME COLUMN c1 TO c2"); + fail(); + } catch (final SQLException e) { + assertEquals( + "803: Access Denied: No permissions for this operation, please add privilege ALTER ON col_db.tc", + e.getMessage()); + } + } + + // grant ALTER and retry + try (final Connection adminCon = EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement adminStmt = adminCon.createStatement()) { + // ensure we're operating in the target database + adminStmt.execute("USE col_db"); + adminStmt.execute("GRANT ALTER,INSERT,SELECT ON TABLE tc TO USER colUser"); + } + + try (final Connection userCon = + EnvFactory.getEnv() + .getConnection("colUser", "colPass123456@", BaseEnv.TABLE_SQL_DIALECT); + final Statement userStmt = userCon.createStatement()) { + // ensure user is using the target database + userStmt.execute("USE col_db"); + userStmt.execute("ALTER TABLE tc RENAME COLUMN c1 TO c2"); + userStmt.execute("INSERT INTO tc (time, c2) VALUES (2, 2)"); + try (final ResultSet rs = userStmt.executeQuery("SELECT * FROM tc ORDER BY time")) { + assertTrue(rs.next()); + assertEquals(1L, rs.getLong(1)); + assertEquals(1, rs.getInt(2)); + assertTrue(rs.next()); + assertEquals(2L, rs.getLong(1)); + assertEquals(2, rs.getInt(2)); + assertFalse(rs.next()); + } + + // verify metadata contains c2 + try (final ResultSet rs2 = userStmt.executeQuery("DESC tc")) { + boolean found = false; + while (rs2.next()) { + String colName = rs2.getString(1); + if ("c2".equalsIgnoreCase(colName)) { + found = true; + break; + } + } + assertTrue(found); + } + } + + // cleanup + try (final Connection adminCon = EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement adminStmt = adminCon.createStatement()) { + try { + adminStmt.execute("USE col_db"); + } catch (SQLException ignore) { + } + try { + adminStmt.execute("REVOKE ALTER ON TABLE tc FROM USER colUser"); + } catch (SQLException ignore) { + } + try { + adminStmt.execute("REVOKE SELECT ON TABLE tc FROM USER colUser"); + } catch (SQLException ignore) { + } + try { + adminStmt.execute("DROP TABLE IF EXISTS tc"); + } catch (SQLException ignore) { + } + try { + adminStmt.execute("DROP DATABASE IF EXISTS col_db"); + } catch (SQLException ignore) { + } + try { + adminStmt.execute("DROP USER colUser"); + } catch (SQLException ignore) { + } + } + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/CnToDnAsyncRequestType.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/CnToDnAsyncRequestType.java index e5753bf1bd184..676d6b8b487fb 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/CnToDnAsyncRequestType.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/CnToDnAsyncRequestType.java @@ -129,6 +129,8 @@ public enum CnToDnAsyncRequestType { DELETE_DATA_FOR_TABLE_DEVICE, DELETE_TABLE_DEVICE_IN_BLACK_LIST, DETECT_TREE_DEVICE_VIEW_FIELD_TYPE, + EVOLVE_DATA_REGION_SCHEMA, + EVOLVE_SCHEMA_REGION_SCHEMA, // audit log and event write-back INSERT_RECORD, diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/CnToDnInternalServiceAsyncRequestManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/CnToDnInternalServiceAsyncRequestManager.java index cd69f8b2c846d..b7c44ab66e40a 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/CnToDnInternalServiceAsyncRequestManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/CnToDnInternalServiceAsyncRequestManager.java @@ -64,6 +64,7 @@ import org.apache.iotdb.mpp.rpc.thrift.TCreatePipePluginInstanceReq; import org.apache.iotdb.mpp.rpc.thrift.TCreateSchemaRegionReq; import org.apache.iotdb.mpp.rpc.thrift.TCreateTriggerInstanceReq; +import org.apache.iotdb.mpp.rpc.thrift.TDataRegionEvolveSchemaReq; import org.apache.iotdb.mpp.rpc.thrift.TDeactivateTemplateReq; import org.apache.iotdb.mpp.rpc.thrift.TDeleteColumnDataReq; import org.apache.iotdb.mpp.rpc.thrift.TDeleteDataForDeleteSchemaReq; @@ -97,6 +98,7 @@ import org.apache.iotdb.mpp.rpc.thrift.TRollbackSchemaBlackListReq; import org.apache.iotdb.mpp.rpc.thrift.TRollbackSchemaBlackListWithTemplateReq; import org.apache.iotdb.mpp.rpc.thrift.TRollbackViewSchemaBlackListReq; +import org.apache.iotdb.mpp.rpc.thrift.TSchemaRegionEvolveSchemaReq; import org.apache.iotdb.mpp.rpc.thrift.TTableDeviceDeletionWithPatternAndFilterReq; import org.apache.iotdb.mpp.rpc.thrift.TTableDeviceDeletionWithPatternOrModReq; import org.apache.iotdb.mpp.rpc.thrift.TTableDeviceInvalidateCacheReq; @@ -440,6 +442,16 @@ protected void initActionMapBuilder() { (req, client, handler) -> client.deleteColumnData( (TDeleteColumnDataReq) req, (DataNodeTSStatusRPCHandler) handler)); + actionMapBuilder.put( + CnToDnAsyncRequestType.EVOLVE_DATA_REGION_SCHEMA, + (req, client, handler) -> + client.evolveSchemaInDataRegion( + (TDataRegionEvolveSchemaReq) req, (DataNodeTSStatusRPCHandler) handler)); + actionMapBuilder.put( + CnToDnAsyncRequestType.EVOLVE_SCHEMA_REGION_SCHEMA, + (req, client, handler) -> + client.evolveSchemaInSchemaRegion( + (TSchemaRegionEvolveSchemaReq) req, (DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( CnToDnAsyncRequestType.CONSTRUCT_TABLE_DEVICE_BLACK_LIST, (req, client, handler) -> diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/DataNodeAsyncRequestRPCHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/DataNodeAsyncRequestRPCHandler.java index b2e2ec3232781..2efabd4cbb902 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/DataNodeAsyncRequestRPCHandler.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/DataNodeAsyncRequestRPCHandler.java @@ -237,6 +237,8 @@ public static DataNodeAsyncRequestRPCHandler buildHandler( case DELETE_DEVICES_FOR_DROP_TABLE: case INVALIDATE_COLUMN_CACHE: case DELETE_COLUMN_DATA: + case EVOLVE_DATA_REGION_SCHEMA: + case EVOLVE_SCHEMA_REGION_SCHEMA: case CONSTRUCT_TABLE_DEVICE_BLACK_LIST: case ROLLBACK_TABLE_DEVICE_BLACK_LIST: case INVALIDATE_MATCHED_TABLE_DEVICE_CACHE: diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/table/RenameTableColumnPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/table/RenameTableColumnPlan.java index eadbca3407f6f..861b50056efe7 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/table/RenameTableColumnPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/table/RenameTableColumnPlan.java @@ -19,58 +19,61 @@ package org.apache.iotdb.confignode.consensus.request.write.table; +import org.apache.iotdb.commons.utils.IOUtils; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; -import org.apache.tsfile.utils.ReadWriteIOUtils; - import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; +import java.util.List; public class RenameTableColumnPlan extends AbstractTablePlan { - private String oldName; - private String newName; + private List oldNames; + private List newNames; public RenameTableColumnPlan(final ConfigPhysicalPlanType type) { super(type); } public RenameTableColumnPlan( - final String database, final String tableName, final String oldName, final String newName) { - this(ConfigPhysicalPlanType.RenameTableColumn, database, tableName, oldName, newName); + final String database, + final String tableName, + final List oldNames, + final List newNames) { + this(ConfigPhysicalPlanType.RenameTableColumn, database, tableName, oldNames, newNames); } protected RenameTableColumnPlan( final ConfigPhysicalPlanType type, final String database, final String tableName, - final String oldName, - final String newName) { + final List oldNames, + final List newNames) { super(type, database, tableName); - this.oldName = oldName; - this.newName = newName; + this.oldNames = oldNames; + this.newNames = newNames; } - public String getOldName() { - return oldName; + public List getOldNames() { + return oldNames; } - public String getNewName() { - return newName; + public List getNewNames() { + return newNames; } @Override protected void serializeImpl(final DataOutputStream stream) throws IOException { super.serializeImpl(stream); - ReadWriteIOUtils.write(oldName, stream); - ReadWriteIOUtils.write(newName, stream); + IOUtils.write(oldNames, stream); + IOUtils.write(newNames, stream); } @Override protected void deserializeImpl(final ByteBuffer buffer) throws IOException { super.deserializeImpl(buffer); - this.oldName = ReadWriteIOUtils.readString(buffer); - this.newName = ReadWriteIOUtils.readString(buffer); + this.oldNames = IOUtils.readStringList(buffer); + this.newNames = IOUtils.readStringList(buffer); } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/table/view/RenameViewColumnPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/table/view/RenameViewColumnPlan.java index 63857cf97b99b..2860898efee27 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/table/view/RenameViewColumnPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/table/view/RenameViewColumnPlan.java @@ -22,13 +22,18 @@ import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; import org.apache.iotdb.confignode.consensus.request.write.table.RenameTableColumnPlan; +import java.util.List; + public class RenameViewColumnPlan extends RenameTableColumnPlan { public RenameViewColumnPlan() { super(ConfigPhysicalPlanType.RenameViewColumn); } public RenameViewColumnPlan( - final String database, final String tableName, final String oldName, final String newName) { - super(ConfigPhysicalPlanType.RenameViewColumn, database, tableName, oldName, newName); + final String database, + final String tableName, + final List oldNames, + final List newNames) { + super(ConfigPhysicalPlanType.RenameViewColumn, database, tableName, oldNames, newNames); } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java index e51d1a43299b5..582ffd16220f3 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java @@ -48,7 +48,11 @@ import org.apache.iotdb.commons.conf.IoTDBConstant; import org.apache.iotdb.commons.conf.TrimProperties; import org.apache.iotdb.commons.exception.IllegalPathException; +import org.apache.iotdb.commons.exception.IoTDBRuntimeException; import org.apache.iotdb.commons.exception.MetadataException; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.FullDeviceIdKey; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.NoTableNameDeviceIdKey; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.SeriesPartitionKey; import org.apache.iotdb.commons.path.MeasurementPath; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.path.PathDeserializeUtil; @@ -108,6 +112,7 @@ import org.apache.iotdb.confignode.consensus.response.template.TemplateSetInfoResp; import org.apache.iotdb.confignode.consensus.response.ttl.ShowTTLResp; import org.apache.iotdb.confignode.consensus.statemachine.ConfigRegionStateMachine; +import org.apache.iotdb.confignode.exception.DatabaseNotExistsException; import org.apache.iotdb.confignode.manager.consensus.ConsensusManager; import org.apache.iotdb.confignode.manager.cq.CQManager; import org.apache.iotdb.confignode.manager.externalservice.ExternalServiceInfo; @@ -259,6 +264,7 @@ import org.apache.tsfile.enums.TSDataType; import org.apache.tsfile.file.metadata.IDeviceID; +import org.apache.tsfile.file.metadata.IDeviceID.Factory; import org.apache.tsfile.utils.Pair; import org.apache.tsfile.utils.ReadWriteIOUtils; import org.slf4j.Logger; @@ -278,6 +284,8 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Optional; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -825,9 +833,43 @@ private List calculateRelatedSlot( return Collections.emptyList(); } } + IDeviceID deviceID = Factory.DEFAULT_FACTORY.create(devicePath); + + SeriesPartitionKey seriesPartitionKey = getSeriesPartitionKey(deviceID, database.getFullPath()); return Collections.singletonList( - getPartitionManager() - .getSeriesPartitionSlot(IDeviceID.Factory.DEFAULT_FACTORY.create(devicePath))); + getPartitionManager().getSeriesPartitionSlot(seriesPartitionKey)); + } + + @SuppressWarnings("OptionalGetWithoutIsPresent") + @Override + public SeriesPartitionKey getSeriesPartitionKey(IDeviceID deviceID, String databaseName) { + SeriesPartitionKey seriesPartitionKey; + boolean isTableModel = false; + try { + TDatabaseSchema databaseSchema = + getClusterSchemaManager().getDatabaseSchemaByName(databaseName); + isTableModel = databaseSchema.isTableModel; + } catch (DatabaseNotExistsException e) { + throw new IoTDBRuntimeException(e, TSStatusCode.TABLE_NOT_EXISTS.getStatusCode()); + } + + if (isTableModel) { + try { + Optional tableOptional = + getClusterSchemaManager().getTableIfExists(databaseName, deviceID.getTableName()); + TsTable tsTable = tableOptional.get(); + boolean canAlterTableName = tsTable.canAlterName(); + seriesPartitionKey = + canAlterTableName + ? new NoTableNameDeviceIdKey(deviceID) + : new FullDeviceIdKey(deviceID); + } catch (NoSuchElementException | MetadataException e) { + throw new IoTDBRuntimeException(e, TSStatusCode.TABLE_NOT_EXISTS.getStatusCode()); + } + } else { + seriesPartitionKey = new FullDeviceIdKey(deviceID); + } + return seriesPartitionKey; } @Override @@ -921,9 +963,10 @@ public TSchemaPartitionTableResp getOrCreateSchemaPartition(final PathPatternTre for (final IDeviceID deviceID : devicePaths) { for (final String database : databases) { if (PathUtils.isStartWith(deviceID, database)) { + SeriesPartitionKey seriesPartitionKey = getSeriesPartitionKey(deviceID, database); partitionSlotsMap .computeIfAbsent(database, key -> new HashSet<>()) - .add(getPartitionManager().getSeriesPartitionSlot(deviceID)); + .add(getPartitionManager().getSeriesPartitionSlot(seriesPartitionKey)); break; } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/IManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/IManager.java index fe83b8bd0c189..61748b69f9719 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/IManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/IManager.java @@ -33,6 +33,7 @@ import org.apache.iotdb.common.rpc.thrift.TShowConfigurationResp; import org.apache.iotdb.commons.auth.entity.PrivilegeUnion; import org.apache.iotdb.commons.cluster.NodeStatus; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.SeriesPartitionKey; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.path.PathPatternTree; import org.apache.iotdb.confignode.audit.CNAuditLogger; @@ -165,6 +166,8 @@ import org.apache.iotdb.consensus.common.DataSet; import org.apache.iotdb.rpc.TSStatusCode; +import org.apache.tsfile.file.metadata.IDeviceID; + import java.nio.ByteBuffer; import java.util.List; import java.util.Map; @@ -422,6 +425,9 @@ public interface IManager { */ TSStatus deleteDatabases(TDeleteDatabasesReq tDeleteReq); + @SuppressWarnings("OptionalGetWithoutIsPresent") + SeriesPartitionKey getSeriesPartitionKey(IDeviceID deviceID, String databaseName); + /** * Get SchemaPartition. * diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ProcedureManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ProcedureManager.java index 0fe3abc79a72b..ce725ae3a10d3 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ProcedureManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ProcedureManager.java @@ -42,6 +42,7 @@ import org.apache.iotdb.commons.schema.view.viewExpression.ViewExpression; import org.apache.iotdb.commons.service.metric.MetricService; import org.apache.iotdb.commons.trigger.TriggerInformation; +import org.apache.iotdb.commons.utils.IOUtils; import org.apache.iotdb.commons.utils.StatusUtils; import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.confignode.conf.ConfigNodeConfig; @@ -1985,6 +1986,8 @@ public TSStatus alterTableSetProperties(final TAlterOrDropTableReq req) { public TSStatus alterTableRenameColumn(final TAlterOrDropTableReq req) { final boolean isView = req.isSetIsView() && req.isIsView(); + List oldNames = IOUtils.readStringList(req.updateInfo); + List newNames = IOUtils.readStringList(req.updateInfo); return executeWithoutDuplicate( req.database, null, @@ -1995,19 +1998,9 @@ public TSStatus alterTableRenameColumn(final TAlterOrDropTableReq req) { : ProcedureType.RENAME_TABLE_COLUMN_PROCEDURE, isView ? new RenameViewColumnProcedure( - req.database, - req.tableName, - req.queryId, - ReadWriteIOUtils.readString(req.updateInfo), - ReadWriteIOUtils.readString(req.updateInfo), - false) + req.database, req.tableName, req.queryId, oldNames, newNames, false) : new RenameTableColumnProcedure( - req.database, - req.tableName, - req.queryId, - ReadWriteIOUtils.readString(req.updateInfo), - ReadWriteIOUtils.readString(req.updateInfo), - false)); + req.database, req.tableName, req.queryId, oldNames, newNames, false)); } public TSStatus alterTableDropColumn(final TAlterOrDropTableReq req) { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionManager.java index 576d805c78624..f76fb5421322f 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionManager.java @@ -35,6 +35,7 @@ import org.apache.iotdb.commons.partition.DataPartitionTable; import org.apache.iotdb.commons.partition.SchemaPartitionTable; import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.SeriesPartitionKey; import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager; import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext; @@ -1055,11 +1056,11 @@ public boolean isDatabasePreDeleted(final String database) { /** * Get TSeriesPartitionSlot. * - * @param deviceID IDeviceID + * @param key IDeviceID * @return SeriesPartitionSlot */ - public TSeriesPartitionSlot getSeriesPartitionSlot(final IDeviceID deviceID) { - return executor.getSeriesPartitionSlot(deviceID); + public TSeriesPartitionSlot getSeriesPartitionSlot(final SeriesPartitionKey key) { + return executor.getSeriesPartitionSlot(key); } public RegionInfoListResp getRegionInfoList(final GetRegionInfoListPlan req) { @@ -1152,8 +1153,11 @@ public GetRegionIdResp getRegionId(final TGetRegionIdReq req) { } else { final IDeviceID deviceID = Deserializer.DEFAULT_DESERIALIZER.deserializeFrom(ByteBuffer.wrap(req.getDevice())); - plan.setDatabase(getClusterSchemaManager().getDatabaseNameByDevice(deviceID)); - plan.setSeriesSlotId(executor.getSeriesPartitionSlot(deviceID)); + String databaseName = getClusterSchemaManager().getDatabaseNameByDevice(deviceID); + plan.setDatabase(databaseName); + SeriesPartitionKey seriesPartitionKey = + configManager.getSeriesPartitionKey(deviceID, databaseName); + plan.setSeriesSlotId(executor.getSeriesPartitionSlot(seriesPartitionKey)); } if (Objects.equals(plan.getDatabase(), "")) { // Return empty result if Database not specified @@ -1189,8 +1193,11 @@ public GetTimeSlotListResp getTimeSlotList(TGetTimeSlotListReq req) { } else if (req.isSetDevice()) { IDeviceID deviceID = Deserializer.DEFAULT_DESERIALIZER.deserializeFrom(ByteBuffer.wrap(req.getDevice())); - plan.setDatabase(getClusterSchemaManager().getDatabaseNameByDevice(deviceID)); - plan.setSeriesSlotId(executor.getSeriesPartitionSlot(deviceID)); + String databaseName = getClusterSchemaManager().getDatabaseNameByDevice(deviceID); + plan.setDatabase(databaseName); + SeriesPartitionKey seriesPartitionKey = + configManager.getSeriesPartitionKey(deviceID, databaseName); + plan.setSeriesSlotId(executor.getSeriesPartitionSlot(seriesPartitionKey)); if (Objects.equals(plan.getDatabase(), "")) { // Return empty result if Database not specified return new GetTimeSlotListResp(RpcUtils.SUCCESS_STATUS, new ArrayList<>()); @@ -1218,8 +1225,11 @@ public CountTimeSlotListResp countTimeSlotList(TCountTimeSlotListReq req) { } else if (req.isSetDevice()) { IDeviceID deviceID = Deserializer.DEFAULT_DESERIALIZER.deserializeFrom(ByteBuffer.wrap(req.getDevice())); - plan.setDatabase(getClusterSchemaManager().getDatabaseNameByDevice(deviceID)); - plan.setSeriesSlotId(executor.getSeriesPartitionSlot(deviceID)); + String databaseName = getClusterSchemaManager().getDatabaseNameByDevice(deviceID); + plan.setDatabase(databaseName); + SeriesPartitionKey seriesPartitionKey = + configManager.getSeriesPartitionKey(deviceID, databaseName); + plan.setSeriesSlotId(executor.getSeriesPartitionSlot(seriesPartitionKey)); if (Objects.equals(plan.getDatabase(), "")) { // Return empty result if Database not specified return new CountTimeSlotListResp(RpcUtils.SUCCESS_STATUS, 0); diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/receiver/protocol/IoTDBConfigNodeReceiver.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/receiver/protocol/IoTDBConfigNodeReceiver.java index d9f6e07be2b2b..e408983980f60 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/receiver/protocol/IoTDBConfigNodeReceiver.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/receiver/protocol/IoTDBConfigNodeReceiver.java @@ -972,8 +972,8 @@ private TSStatus executePlan(final ConfigPhysicalPlan plan) throws ConsensusExce ((RenameTableColumnPlan) plan).getDatabase(), ((RenameTableColumnPlan) plan).getTableName(), queryId, - ((RenameTableColumnPlan) plan).getOldName(), - ((RenameTableColumnPlan) plan).getNewName(), + ((RenameTableColumnPlan) plan).getOldNames(), + ((RenameTableColumnPlan) plan).getNewNames(), shouldMarkAsPipeRequest.get())); case RenameViewColumn: return configManager @@ -988,8 +988,8 @@ private TSStatus executePlan(final ConfigPhysicalPlan plan) throws ConsensusExce ((RenameViewColumnPlan) plan).getDatabase(), ((RenameViewColumnPlan) plan).getTableName(), queryId, - ((RenameViewColumnPlan) plan).getOldName(), - ((RenameViewColumnPlan) plan).getNewName(), + ((RenameViewColumnPlan) plan).getOldNames(), + ((RenameViewColumnPlan) plan).getNewNames(), shouldMarkAsPipeRequest.get())); case CommitDeleteTable: return configManager diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/schema/ClusterSchemaManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/schema/ClusterSchemaManager.java index 490370e38c31a..ddea3f38881ad 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/schema/ClusterSchemaManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/schema/ClusterSchemaManager.java @@ -33,7 +33,6 @@ import org.apache.iotdb.commons.schema.table.TsTable; import org.apache.iotdb.commons.schema.table.TsTableInternalRPCUtil; import org.apache.iotdb.commons.schema.table.column.FieldColumnSchema; -import org.apache.iotdb.commons.schema.table.column.TsTableColumnCategory; import org.apache.iotdb.commons.schema.table.column.TsTableColumnSchema; import org.apache.iotdb.commons.schema.template.Template; import org.apache.iotdb.commons.service.metric.MetricService; @@ -1419,8 +1418,8 @@ public synchronized Pair tableColumnCheckForColumnAltering( public synchronized Pair tableColumnCheckForColumnRenaming( final String database, final String tableName, - final String oldName, - final String newName, + final List oldNames, + final List newNames, final boolean isTableView) throws MetadataException { final TsTable originalTable = getTableIfExists(database, tableName).orElse(null); @@ -1439,33 +1438,40 @@ public synchronized Pair tableColumnCheckForColumnRenaming( return result.get(); } - final TsTableColumnSchema schema = originalTable.getColumnSchema(oldName); - if (Objects.isNull(schema)) { - return new Pair<>( - RpcUtils.getStatus( - TSStatusCode.COLUMN_NOT_EXISTS, String.format("Column '%s' does not exist", oldName)), - null); - } + for (String oldName : oldNames) { + final TsTableColumnSchema schema = originalTable.getColumnSchema(oldName); + if (Objects.isNull(schema)) { + return new Pair<>( + RpcUtils.getStatus( + TSStatusCode.COLUMN_NOT_EXISTS, + String.format("Column '%s' does not exist", oldName)), + null); + } - if (schema.getColumnCategory() == TsTableColumnCategory.TIME) { - return new Pair<>( - RpcUtils.getStatus( - TSStatusCode.COLUMN_CATEGORY_MISMATCH, - "The renaming for time column is not supported."), - null); + if (schema.getDataType().equals(TSDataType.OBJECT)) { + return new Pair<>( + RpcUtils.getStatus( + TSStatusCode.SEMANTIC_ERROR, + String.format("Renaming Object column %s is currently unsupported", oldName)), + null); + } } - if (Objects.nonNull(originalTable.getColumnSchema(newName))) { - return new Pair<>( - RpcUtils.getStatus( - TSStatusCode.COLUMN_ALREADY_EXISTS, - "The new column name " + newName + " already exists"), - null); + for (String newName : newNames) { + if (Objects.nonNull(originalTable.getColumnSchema(newName))) { + return new Pair<>( + RpcUtils.getStatus( + TSStatusCode.COLUMN_ALREADY_EXISTS, + "The new column name " + newName + " already exists"), + null); + } } final TsTable expandedTable = new TsTable(originalTable); - expandedTable.renameColumnSchema(oldName, newName); + for (int i = 0; i < oldNames.size(); i++) { + expandedTable.renameColumnSchema(oldNames.get(i), newNames.get(i)); + } return new Pair<>(RpcUtils.SUCCESS_STATUS, expandedTable); } @@ -1486,12 +1492,37 @@ public synchronized Pair tableCheckForRenaming( null); } + if (!originalTable.canAlterName()) { + return new Pair<>( + RpcUtils.getStatus( + TSStatusCode.SEMANTIC_ERROR, + String.format( + "Table '%s.%s' is created in a old version and cannot be renamed, " + + "please migrate its data to a new table manually", + database, tableName)), + null); + } + final Optional> result = checkTable4View(database, originalTable, isTableView); if (result.isPresent()) { return result.get(); } + for (TsTableColumnSchema tsTableColumnSchema : originalTable.getColumnList()) { + if (tsTableColumnSchema.getDataType().equals(TSDataType.OBJECT)) { + if (!TreeViewSchema.isTreeViewTable(originalTable)) { + return new Pair<>( + RpcUtils.getStatus( + TSStatusCode.SEMANTIC_ERROR, + String.format( + "Table '%s.%s' contains Object column, renaming is currently unsupported", + database, tableName)), + null); + } + } + } + if (getTableIfExists(database, newName).isPresent()) { return new Pair<>( RpcUtils.getStatus( diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/auth/AuthorInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/auth/AuthorInfo.java index 743e0a3f09a56..cc425fd431002 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/auth/AuthorInfo.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/auth/AuthorInfo.java @@ -36,6 +36,7 @@ import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorPlan; import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorRelationalPlan; import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorTreePlan; +import org.apache.iotdb.confignode.consensus.request.write.table.RenameTablePlan; import org.apache.iotdb.confignode.consensus.response.auth.PermissionInfoResp; import org.apache.iotdb.confignode.rpc.thrift.TAuthizedPatternTreeResp; import org.apache.iotdb.confignode.rpc.thrift.TPermissionInfoResp; @@ -273,4 +274,8 @@ public void clear() throws AuthException { } authorizer.reset(); } + + public TSStatus renameTable(RenameTablePlan plan) { + return authorizer.renameTable(plan.getDatabase(), plan.getTableName(), plan.getNewName()); + } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/executor/ConfigPlanExecutor.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/executor/ConfigPlanExecutor.java index 12ba1d8840b49..3c657c2fd651a 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/executor/ConfigPlanExecutor.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/executor/ConfigPlanExecutor.java @@ -611,7 +611,12 @@ public TSStatus executeNonQueryPlan(ConfigPhysicalPlan physicalPlan) return clusterSchemaInfo.setTableColumnComment((SetTableColumnCommentPlan) physicalPlan); case RenameTable: case RenameView: - return clusterSchemaInfo.renameTable((RenameTablePlan) physicalPlan); + RenameTablePlan renameTablePlan = (RenameTablePlan) physicalPlan; + TSStatus tsStatus = clusterSchemaInfo.renameTable(renameTablePlan); + if (tsStatus.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return tsStatus; + } + return authorInfo.renameTable(renameTablePlan); case CreatePipeV2: return pipeInfo.createPipe((CreatePipePlanV2) physicalPlan); case SetPipeStatusV2: diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ClusterSchemaInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ClusterSchemaInfo.java index e3ce7d7c29db1..408bf031a98f8 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ClusterSchemaInfo.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ClusterSchemaInfo.java @@ -1503,8 +1503,8 @@ public TSStatus renameTableColumn(final RenameTableColumnPlan plan) { tableModelMTree.renameTableColumn( getQualifiedDatabasePartialPath(plan.getDatabase()), plan.getTableName(), - plan.getOldName(), - plan.getNewName())); + plan.getOldNames(), + plan.getNewNames())); } public TSStatus setTableProperties(final SetTablePropertiesPlan plan) { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ConfigMTree.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ConfigMTree.java index 6b47773813e31..e18a34cd47a14 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ConfigMTree.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ConfigMTree.java @@ -766,11 +766,13 @@ public void renameTable(final PartialPath database, final String tableName, fina public void renameTableColumn( final PartialPath database, final String tableName, - final String oldName, - final String newName) + final List oldNames, + final List newNames) throws MetadataException { final ConfigTableNode tableNode = getTableNode(database, tableName); - tableNode.getTable().renameColumnSchema(oldName, newName); + for (int i = 0; i < oldNames.size(); i++) { + tableNode.getTable().renameColumnSchema(oldNames.get(i), newNames.get(i)); + } } public void setTableComment( diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/CreateTableProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/CreateTableProcedure.java index d0fbdb605d12f..e882ffb59733d 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/CreateTableProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/CreateTableProcedure.java @@ -129,6 +129,10 @@ protected void checkTableExistence(final ConfigNodeProcedureEnv env) { && schema.getTTL() != Long.MAX_VALUE) { table.addProp(TsTable.TTL_PROPERTY, String.valueOf(schema.getTTL())); } + if (!table.getPropValue(TsTable.ALLOW_ALTER_NAME_PROPERTY).isPresent()) { + table.addProp( + TsTable.ALLOW_ALTER_NAME_PROPERTY, String.valueOf(TsTable.ALLOW_ALTER_NAME_DEFAULT)); + } setNextState(CreateTableState.PRE_CREATE); } } catch (final MetadataException | DatabaseNotExistsException e) { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/RenameTableColumnProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/RenameTableColumnProcedure.java index da51dad267266..3c6de8fe1f519 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/RenameTableColumnProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/RenameTableColumnProcedure.java @@ -19,10 +19,14 @@ package org.apache.iotdb.confignode.procedure.impl.schema.table; +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.exception.IoTDBException; import org.apache.iotdb.commons.exception.MetadataException; import org.apache.iotdb.commons.schema.table.TsTable; +import org.apache.iotdb.commons.utils.IOUtils; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.consensus.request.write.table.RenameTableColumnPlan; import org.apache.iotdb.confignode.consensus.request.write.table.view.RenameViewColumnPlan; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; @@ -30,24 +34,30 @@ import org.apache.iotdb.confignode.procedure.impl.schema.table.view.RenameViewColumnProcedure; import org.apache.iotdb.confignode.procedure.state.schema.RenameTableColumnState; import org.apache.iotdb.confignode.procedure.store.ProcedureType; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.ColumnRename; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolution; +import org.apache.iotdb.mpp.rpc.thrift.TDataRegionEvolveSchemaReq; import org.apache.iotdb.rpc.TSStatusCode; import org.apache.tsfile.utils.Pair; -import org.apache.tsfile.utils.ReadWriteIOUtils; +import org.apache.tsfile.utils.PublicBAOS; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; import java.util.Objects; public class RenameTableColumnProcedure extends AbstractAlterOrDropTableProcedure { private static final Logger LOGGER = LoggerFactory.getLogger(RenameTableColumnProcedure.class); - private String oldName; - private String newName; + private List oldNames; + private List newNames; public RenameTableColumnProcedure(final boolean isGeneratedByPipe) { super(isGeneratedByPipe); @@ -57,12 +67,12 @@ public RenameTableColumnProcedure( final String database, final String tableName, final String queryId, - final String oldName, - final String newName, + final List oldNames, + final List newNames, final boolean isGeneratedByPipe) { super(database, tableName, queryId, isGeneratedByPipe); - this.oldName = oldName; - this.newName = newName; + this.oldNames = oldNames; + this.newNames = newNames; } @Override @@ -84,6 +94,10 @@ protected Flow executeFromState( LOGGER.info("Rename column to table {}.{} on config node", database, tableName); renameColumn(env); break; + case EXECUTE_ON_REGION: + LOGGER.info("Rename column to table {}.{} on data regions", database, tableName); + executeOnRegions(env); + break; case COMMIT_RELEASE: LOGGER.info( "Commit release info of table {}.{} when renaming column", database, tableName); @@ -110,7 +124,11 @@ private void columnCheck(final ConfigNodeProcedureEnv env) { env.getConfigManager() .getClusterSchemaManager() .tableColumnCheckForColumnRenaming( - database, tableName, oldName, newName, this instanceof RenameViewColumnProcedure); + database, + tableName, + oldNames, + newNames, + this instanceof RenameViewColumnProcedure); final TSStatus status = result.getLeft(); if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { setFailure(new ProcedureException(new IoTDBException(status))); @@ -135,14 +153,43 @@ private void renameColumn(final ConfigNodeProcedureEnv env) { .getClusterSchemaManager() .executePlan( this instanceof RenameViewColumnProcedure - ? new RenameViewColumnPlan(database, tableName, oldName, newName) - : new RenameTableColumnPlan(database, tableName, oldName, newName), + ? new RenameViewColumnPlan(database, tableName, oldNames, newNames) + : new RenameTableColumnPlan(database, tableName, oldNames, newNames), isGeneratedByPipe); if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { setFailure(new ProcedureException(new IoTDBException(status))); } else { - setNextState(RenameTableColumnState.COMMIT_RELEASE); + setNextState(RenameTableColumnState.EXECUTE_ON_REGION); + } + } + + private void executeOnRegions(final ConfigNodeProcedureEnv env) { + final Map relatedRegionGroup = + env.getConfigManager().getRelatedDataRegionGroup4TableModel(database); + + if (!relatedRegionGroup.isEmpty()) { + List schemaEvolutions = new ArrayList<>(); + for (int i = 0; i < oldNames.size(); i++) { + schemaEvolutions.add(new ColumnRename(tableName, oldNames.get(i), newNames.get(i))); + } + PublicBAOS publicBAOS = new PublicBAOS(); + try { + SchemaEvolution.serializeList(schemaEvolutions, publicBAOS); + } catch (IOException ignored) { + } + ByteBuffer byteBuffer = ByteBuffer.wrap(publicBAOS.getBuf(), 0, publicBAOS.size()); + new TableRegionTaskExecutor<>( + "evolve data region schema", + env, + relatedRegionGroup, + CnToDnAsyncRequestType.EVOLVE_DATA_REGION_SCHEMA, + ((dataNodeLocation, consensusGroupIdList) -> + new TDataRegionEvolveSchemaReq( + new ArrayList<>(consensusGroupIdList), byteBuffer))) + .execute(); } + + setNextState(RenameTableColumnState.COMMIT_RELEASE); } @Override @@ -180,7 +227,7 @@ private void rollbackRenameColumn(final ConfigNodeProcedureEnv env) { env.getConfigManager() .getClusterSchemaManager() .executePlan( - new RenameTableColumnPlan(database, tableName, newName, oldName), + new RenameTableColumnPlan(database, tableName, newNames, oldNames), isGeneratedByPipe); if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { setFailure(new ProcedureException(new IoTDBException(status))); @@ -219,27 +266,27 @@ public void serialize(final DataOutputStream stream) throws IOException { protected void innerSerialize(final DataOutputStream stream) throws IOException { super.serialize(stream); - ReadWriteIOUtils.write(oldName, stream); - ReadWriteIOUtils.write(newName, stream); + IOUtils.write(oldNames, stream); + IOUtils.write(newNames, stream); } @Override public void deserialize(final ByteBuffer byteBuffer) { super.deserialize(byteBuffer); - this.oldName = ReadWriteIOUtils.readString(byteBuffer); - this.newName = ReadWriteIOUtils.readString(byteBuffer); + this.oldNames = IOUtils.readStringList(byteBuffer); + this.newNames = IOUtils.readStringList(byteBuffer); } @Override public boolean equals(final Object o) { return super.equals(o) - && Objects.equals(oldName, ((RenameTableColumnProcedure) o).oldName) - && Objects.equals(newName, ((RenameTableColumnProcedure) o).newName); + && Objects.equals(oldNames, ((RenameTableColumnProcedure) o).oldNames) + && Objects.equals(newNames, ((RenameTableColumnProcedure) o).newNames); } @Override public int hashCode() { - return Objects.hash(super.hashCode(), oldName, newName); + return Objects.hash(super.hashCode(), oldNames, newNames); } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/RenameTableProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/RenameTableProcedure.java index 93d1035a7615c..4fe1b9971f5c6 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/RenameTableProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/RenameTableProcedure.java @@ -19,10 +19,16 @@ package org.apache.iotdb.confignode.procedure.impl.schema.table; +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.common.rpc.thrift.TDataNodeConfiguration; +import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.exception.IoTDBException; import org.apache.iotdb.commons.exception.MetadataException; import org.apache.iotdb.commons.schema.table.TsTable; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; +import org.apache.iotdb.confignode.client.sync.CnToDnSyncRequestType; +import org.apache.iotdb.confignode.client.sync.SyncDataNodeClientPool; import org.apache.iotdb.confignode.consensus.request.write.table.RenameTablePlan; import org.apache.iotdb.confignode.consensus.request.write.table.view.RenameViewPlan; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; @@ -30,9 +36,15 @@ import org.apache.iotdb.confignode.procedure.impl.schema.table.view.RenameViewProcedure; import org.apache.iotdb.confignode.procedure.state.schema.RenameTableState; import org.apache.iotdb.confignode.procedure.store.ProcedureType; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolution; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.TableRename; +import org.apache.iotdb.mpp.rpc.thrift.TDataRegionEvolveSchemaReq; +import org.apache.iotdb.mpp.rpc.thrift.TInvalidatePermissionCacheReq; +import org.apache.iotdb.mpp.rpc.thrift.TSchemaRegionEvolveSchemaReq; import org.apache.iotdb.rpc.TSStatusCode; import org.apache.tsfile.utils.Pair; +import org.apache.tsfile.utils.PublicBAOS; import org.apache.tsfile.utils.ReadWriteIOUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -40,6 +52,11 @@ import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Map; public class RenameTableProcedure extends AbstractAlterOrDropTableProcedure { private static final Logger LOGGER = LoggerFactory.getLogger(RenameTableProcedure.class); @@ -74,9 +91,13 @@ protected Flow executeFromState(final ConfigNodeProcedureEnv env, final RenameTa preRelease(env); break; case RENAME_TABLE: - LOGGER.info("Rename column to table {}.{} on config node", database, tableName); + LOGGER.info("Rename table {}.{} on config node", database, tableName); renameTable(env); break; + case EXECUTE_ON_REGIONS: + LOGGER.info("Rename table {}.{} on regions", database, tableName); + executeOnRegions(env); + break; case COMMIT_RELEASE: LOGGER.info( "Commit release info of table {}.{} when renaming table", database, tableName); @@ -134,7 +155,95 @@ private void renameTable(final ConfigNodeProcedureEnv env) { if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { setFailure(new ProcedureException(new IoTDBException(status))); } else { - setNextState(RenameTableState.COMMIT_RELEASE); + setNextState(RenameTableState.EXECUTE_ON_REGIONS); + } + } + + private void executeOnRegions(final ConfigNodeProcedureEnv env) { + final Map relatedDataRegionGroup = + env.getConfigManager().getRelatedDataRegionGroup4TableModel(database); + + if (!relatedDataRegionGroup.isEmpty()) { + List schemaEvolutions = + Collections.singletonList(new TableRename(tableName, newName)); + PublicBAOS publicBAOS = new PublicBAOS(); + try { + SchemaEvolution.serializeList(schemaEvolutions, publicBAOS); + } catch (IOException ignored) { + } + ByteBuffer byteBuffer = ByteBuffer.wrap(publicBAOS.getBuf(), 0, publicBAOS.size()); + new TableRegionTaskExecutor<>( + "evolve data region schema", + env, + relatedDataRegionGroup, + CnToDnAsyncRequestType.EVOLVE_DATA_REGION_SCHEMA, + ((dataNodeLocation, consensusGroupIdList) -> + new TDataRegionEvolveSchemaReq( + new ArrayList<>(consensusGroupIdList), byteBuffer))) + .execute(); + } + + final Map relatedSchemaRegionGroup = + env.getConfigManager().getRelatedSchemaRegionGroup4TableModel(database); + + if (!relatedSchemaRegionGroup.isEmpty()) { + List schemaEvolutions = + Collections.singletonList(new TableRename(tableName, newName)); + PublicBAOS publicBAOS = new PublicBAOS(); + try { + SchemaEvolution.serializeList(schemaEvolutions, publicBAOS); + } catch (IOException ignored) { + } + ByteBuffer byteBuffer = ByteBuffer.wrap(publicBAOS.getBuf(), 0, publicBAOS.size()); + new TableRegionTaskExecutor<>( + "evolve schema region schema", + env, + relatedSchemaRegionGroup, + CnToDnAsyncRequestType.EVOLVE_SCHEMA_REGION_SCHEMA, + ((dataNodeLocation, consensusGroupIdList) -> + new TSchemaRegionEvolveSchemaReq( + new ArrayList<>(consensusGroupIdList), byteBuffer))) + .execute(); + } + + invalidateAuthCache(env); + + setNextState(RenameTableState.COMMIT_RELEASE); + } + + private void invalidateAuthCache(final ConfigNodeProcedureEnv env) { + TInvalidatePermissionCacheReq req = new TInvalidatePermissionCacheReq(); + // use all empty to invalidate all cache + req.setUsername(""); + req.setRoleName(""); + TSStatus status; + List allDataNodes = + env.getConfigManager().getNodeManager().getRegisteredDataNodes(); + List> dataNodesToInvalid = new ArrayList<>(); + for (TDataNodeConfiguration item : allDataNodes) { + dataNodesToInvalid.add(new Pair<>(item, System.currentTimeMillis())); + } + Iterator> it = dataNodesToInvalid.iterator(); + long timeoutMS = 10 * 60 * 1000; // 10 minutes + while (it.hasNext()) { + Pair pair = it.next(); + if (pair.getRight() + timeoutMS < System.currentTimeMillis()) { + LOGGER.error( + "invalidateAuthCache: timeout on {}, may need clear cache manually", + pair.getLeft().getLocation()); + it.remove(); + continue; + } + status = + (TSStatus) + SyncDataNodeClientPool.getInstance() + .sendSyncRequestToDataNodeWithRetry( + pair.getLeft().getLocation().getInternalEndPoint(), + req, + CnToDnSyncRequestType.INVALIDATE_PERMISSION_CACHE); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + it.remove(); + } } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/view/RenameViewColumnProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/view/RenameViewColumnProcedure.java index 1981e63976a4b..4adfd31545639 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/view/RenameViewColumnProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/view/RenameViewColumnProcedure.java @@ -24,6 +24,7 @@ import java.io.DataOutputStream; import java.io.IOException; +import java.util.List; public class RenameViewColumnProcedure extends RenameTableColumnProcedure { public RenameViewColumnProcedure(final boolean isGeneratedByPipe) { @@ -34,10 +35,10 @@ public RenameViewColumnProcedure( final String database, final String tableName, final String queryId, - final String oldName, - final String newName, + final List oldNames, + final List newNames, final boolean isGeneratedByPipe) { - super(database, tableName, queryId, oldName, newName, isGeneratedByPipe); + super(database, tableName, queryId, oldNames, newNames, isGeneratedByPipe); } @Override diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/schema/RenameTableColumnState.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/schema/RenameTableColumnState.java index 398ef64222440..83428eadd6d3e 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/schema/RenameTableColumnState.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/schema/RenameTableColumnState.java @@ -23,5 +23,6 @@ public enum RenameTableColumnState { COLUMN_CHECK, PRE_RELEASE, RENAME_COLUMN, + EXECUTE_ON_REGION, COMMIT_RELEASE } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/schema/RenameTableState.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/schema/RenameTableState.java index 1c71cb50182ed..6f9e2c295588c 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/schema/RenameTableState.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/schema/RenameTableState.java @@ -23,5 +23,6 @@ public enum RenameTableState { COLUMN_CHECK, PRE_RELEASE, RENAME_TABLE, + EXECUTE_ON_REGIONS, COMMIT_RELEASE } diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/consensus/request/ConfigPhysicalPlanSerDeTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/consensus/request/ConfigPhysicalPlanSerDeTest.java index 109e3c0d337d2..f56fd9c8b4b3a 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/consensus/request/ConfigPhysicalPlanSerDeTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/consensus/request/ConfigPhysicalPlanSerDeTest.java @@ -1338,7 +1338,11 @@ public void RollbackCreateTablePlanTest() throws IOException { @Test public void RenameTableColumnPlanTest() throws IOException { final RenameTableColumnPlan renameTablePropertiesPlan0 = - new RenameTableColumnPlan("database1", "table1", "attr1", "att2"); + new RenameTableColumnPlan( + "database1", + "table1", + Collections.singletonList("attr1"), + Collections.singletonList("att2")); final RenameTableColumnPlan renameTablePropertiesPlan1 = (RenameTableColumnPlan) ConfigPhysicalPlan.Factory.create(renameTablePropertiesPlan0.serializeToByteBuffer()); @@ -1347,15 +1351,19 @@ public void RenameTableColumnPlanTest() throws IOException { Assert.assertEquals( renameTablePropertiesPlan0.getTableName(), renameTablePropertiesPlan1.getTableName()); Assert.assertEquals( - renameTablePropertiesPlan0.getOldName(), renameTablePropertiesPlan1.getOldName()); + renameTablePropertiesPlan0.getOldNames(), renameTablePropertiesPlan1.getOldNames()); Assert.assertEquals( - renameTablePropertiesPlan0.getNewName(), renameTablePropertiesPlan1.getNewName()); + renameTablePropertiesPlan0.getNewNames(), renameTablePropertiesPlan1.getNewNames()); } @Test public void RenameViewColumnPlanTest() throws IOException { final RenameViewColumnPlan renameViewPropertiesPlan0 = - new RenameViewColumnPlan("database1", "table1", "attr1", "att2"); + new RenameViewColumnPlan( + "database1", + "table1", + Collections.singletonList("attr1"), + Collections.singletonList("att2")); final RenameViewColumnPlan renameViewPropertiesPlan1 = (RenameViewColumnPlan) ConfigPhysicalPlan.Factory.create(renameViewPropertiesPlan0.serializeToByteBuffer()); @@ -1364,9 +1372,9 @@ public void RenameViewColumnPlanTest() throws IOException { Assert.assertEquals( renameViewPropertiesPlan0.getTableName(), renameViewPropertiesPlan1.getTableName()); Assert.assertEquals( - renameViewPropertiesPlan0.getOldName(), renameViewPropertiesPlan1.getOldName()); + renameViewPropertiesPlan0.getOldNames(), renameViewPropertiesPlan1.getOldNames()); Assert.assertEquals( - renameViewPropertiesPlan0.getNewName(), renameViewPropertiesPlan1.getNewName()); + renameViewPropertiesPlan0.getNewNames(), renameViewPropertiesPlan1.getNewNames()); } @Test diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/hash/DeviceGroupHashExecutorManualTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/hash/DeviceGroupHashExecutorManualTest.java index 628cc02524c5d..3cd19850cb306 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/hash/DeviceGroupHashExecutorManualTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/hash/DeviceGroupHashExecutorManualTest.java @@ -18,6 +18,7 @@ */ package org.apache.iotdb.confignode.manager.hash; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.FullDeviceIdKey; import org.apache.iotdb.confignode.manager.ConfigManager; import org.apache.iotdb.confignode.manager.partition.PartitionManager; import org.apache.iotdb.confignode.persistence.partition.PartitionInfo; @@ -74,7 +75,7 @@ public void GeneralIndexTest() throws IOException { List devices = genBatchDevices(); totalTime -= System.currentTimeMillis(); for (IDeviceID device : devices) { - bucket[manager.getSeriesPartitionSlot(device).getSlotId()] += 1; + bucket[manager.getSeriesPartitionSlot(new FullDeviceIdKey(device)).getSlotId()] += 1; } totalTime += System.currentTimeMillis(); } diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/source/PipeConfigTablePatternParseVisitorTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/source/PipeConfigTablePatternParseVisitorTest.java index 229c0452dcc0a..f359426244603 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/source/PipeConfigTablePatternParseVisitorTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/source/PipeConfigTablePatternParseVisitorTest.java @@ -138,17 +138,23 @@ public void testCommitDeleteViewColumn() { @Test public void testRenameTableColumn() { testInput( - new RenameTableColumnPlan("db1", "ab", "old", "new"), - new RenameTableColumnPlan("db1", "ac", "old", "new"), - new RenameTableColumnPlan("da", "ac", "old", "new")); + new RenameTableColumnPlan( + "db1", "ab", Collections.singletonList("old"), Collections.singletonList("new")), + new RenameTableColumnPlan( + "db1", "ac", Collections.singletonList("old"), Collections.singletonList("new")), + new RenameTableColumnPlan( + "da", "ac", Collections.singletonList("old"), Collections.singletonList("new"))); } @Test public void testRenameViewColumn() { testInput( - new RenameViewColumnPlan("db1", "ab", "old", "new"), - new RenameViewColumnPlan("db1", "ac", "old", "new"), - new RenameViewColumnPlan("da", "ac", "old", "new")); + new RenameViewColumnPlan( + "db1", "ab", Collections.singletonList("old"), Collections.singletonList("new")), + new RenameViewColumnPlan( + "db1", "ac", Collections.singletonList("old"), Collections.singletonList("new")), + new RenameViewColumnPlan( + "da", "ac", Collections.singletonList("old"), Collections.singletonList("new"))); } @Test diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/pipe/receiver/PipeEnrichedProcedureTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/pipe/receiver/PipeEnrichedProcedureTest.java index ae61fcfc0cf4b..889cb75f119e5 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/pipe/receiver/PipeEnrichedProcedureTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/pipe/receiver/PipeEnrichedProcedureTest.java @@ -480,7 +480,13 @@ public void setTablePropertiesTest() throws IOException { @Test public void renameTableColumnTest() throws IOException { final RenameTableColumnProcedure renameTableColumnProcedure = - new RenameTableColumnProcedure("database1", "table1", "0", "oldName", "newName", true); + new RenameTableColumnProcedure( + "database1", + "table1", + "0", + Collections.singletonList("oldName"), + Collections.singletonList("newName"), + true); final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); final DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); @@ -693,7 +699,13 @@ public void dropViewTest() throws IOException { @Test public void renameViewColumnTest() throws IOException { final RenameViewColumnProcedure renameViewColumnProcedure = - new RenameViewColumnProcedure("database1", "table1", "0", "oldName", "newName", true); + new RenameViewColumnProcedure( + "database1", + "table1", + "0", + Collections.singletonList("oldName"), + Collections.singletonList("newName"), + true); final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); final DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/schema/table/RenameTableColumnProcedureTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/schema/table/RenameTableColumnProcedureTest.java index d510342bd91a4..7f6f1cf4f0873 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/schema/table/RenameTableColumnProcedureTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/schema/table/RenameTableColumnProcedureTest.java @@ -29,12 +29,19 @@ import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; +import java.util.Collections; public class RenameTableColumnProcedureTest { @Test public void serializeDeserializeTest() throws IllegalPathException, IOException { final RenameTableColumnProcedure renameTableColumnProcedure = - new RenameTableColumnProcedure("database1", "table1", "0", "oldName", "newName", false); + new RenameTableColumnProcedure( + "database1", + "table1", + "0", + Collections.singletonList("oldName"), + Collections.singletonList("newName"), + false); final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); final DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/schema/table/view/RenameViewColumnProcedureTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/schema/table/view/RenameViewColumnProcedureTest.java index 0c9a0083307bd..da27fb3d5a679 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/schema/table/view/RenameViewColumnProcedureTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/schema/table/view/RenameViewColumnProcedureTest.java @@ -29,12 +29,19 @@ import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; +import java.util.Collections; public class RenameViewColumnProcedureTest { @Test public void serializeDeserializeTest() throws IllegalPathException, IOException { final RenameViewColumnProcedure renameViewColumnProcedure = - new RenameViewColumnProcedure("database1", "table1", "0", "oldName", "newName", false); + new RenameViewColumnProcedure( + "database1", + "table1", + "0", + Collections.singletonList("old"), + Collections.singletonList("new"), + false); final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); final DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/client/DispatchLogHandler.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/client/DispatchLogHandler.java index bb0326d7473e7..9bfd48cea9b72 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/client/DispatchLogHandler.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/client/DispatchLogHandler.java @@ -106,7 +106,7 @@ public void onError(Exception exception) { ++retryCount; Throwable rootCause = ExceptionUtils.getRootCause(exception); logger.warn( - "Can not send {} to peer for {} times {} because {}", + "v {} to peer for {} times {} because {}", batch, thread.getPeer(), retryCount, diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/auth/AuthorityChecker.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/auth/AuthorityChecker.java index 81db578ace16a..9d0447a4a1ad4 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/auth/AuthorityChecker.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/auth/AuthorityChecker.java @@ -524,4 +524,8 @@ private static void appendEntryInfo(String name, TRoleResp resp, TsBlockBuilder } } } + + public static void invalidateAllCache() { + authorityFetcher.get().getAuthorCache().invalidAllCache(); + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/DataExecutionVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/DataExecutionVisitor.java index ca81365846794..e82f98f531c8a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/DataExecutionVisitor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/DataExecutionVisitor.java @@ -34,6 +34,7 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedDeleteDataNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedInsertNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.DeleteDataNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.EvolveSchemaNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertMultiTabletsNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowsNode; @@ -287,6 +288,17 @@ public TSStatus visitDeleteData( } } + @Override + public TSStatus visitEvolveSchemaNode(EvolveSchemaNode node, DataRegion dataRegion) { + try { + dataRegion.applySchemaEvolution(node.getSchemaEvolutions()); + return StatusUtils.OK; + } catch (final IOException e) { + LOGGER.error("Error in executing plan node: {}", node, e); + return new TSStatus(TSStatusCode.WRITE_PROCESS_ERROR.getStatusCode()); + } + } + @Override public TSStatus visitPipeEnrichedDeleteDataNode( final PipeEnrichedDeleteDataNode node, final DataRegion context) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/schemaregion/SchemaExecutionVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/schemaregion/SchemaExecutionVisitor.java index 261b4908a9061..63747b6c922ed 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/schemaregion/SchemaExecutionVisitor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/schemaregion/SchemaExecutionVisitor.java @@ -61,6 +61,7 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedNonWritePlanNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedWritePlanNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeOperateSchemaQueueNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.EvolveSchemaNode; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.ConstructTableDevicesBlackListNode; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.CreateOrUpdateTableDeviceNode; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.DeleteTableDeviceNode; @@ -854,6 +855,17 @@ public TSStatus visitPipeOperateSchemaQueueNode( return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); } + @Override + public TSStatus visitEvolveSchemaNode(EvolveSchemaNode node, ISchemaRegion schemaRegion) { + try { + schemaRegion.applySchemaEvolution(node); + } catch (MetadataException e) { + logMetaDataException(e); + return RpcUtils.getStatus(e.getErrorCode(), e.getMessage()); + } + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } + @Override public TSStatus visitPlan(final PlanNode node, final ISchemaRegion context) { return null; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeTsFileInsertionEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeTsFileInsertionEvent.java index 6b3e505d2eafb..ffc02e069e665 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeTsFileInsertionEvent.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeTsFileInsertionEvent.java @@ -907,4 +907,8 @@ protected void finalizeResource() { } } } + + public TsFileResource getResource() { + return resource; + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiver.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiver.java index 904b335ec5874..35b34e0e4ba73 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiver.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiver.java @@ -113,10 +113,12 @@ import org.apache.iotdb.service.rpc.thrift.TPipeTransferResp; import com.google.common.util.concurrent.ListenableFuture; +import org.apache.tsfile.common.constant.TsFileConstant; import org.apache.tsfile.utils.Pair; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.nio.file.Paths; @@ -586,6 +588,10 @@ private TSStatus loadTsFileAsync(final String dataBaseName, final List a private TSStatus loadTsFileSync(final String dataBaseName, final String fileAbsolutePath) throws FileNotFoundException { + LOGGER.info( + "DataNode Receiver starts to load TsFile: {} for database {}", + fileAbsolutePath, + dataBaseName); final LoadTsFileStatement statement = new LoadTsFileStatement(fileAbsolutePath); statement.setDeleteAfterLoad(true); statement.setConvertOnTypeMismatch(true); @@ -594,6 +600,16 @@ private TSStatus loadTsFileSync(final String dataBaseName, final String fileAbso IoTDBDescriptor.getInstance().getConfig().isAutoCreateSchemaEnabled()); statement.setDatabase(dataBaseName); + // add associated sevo file path if exists + String sevoFilePath = + fileAbsolutePath.replace( + TsFileConstant.TSFILE_SUFFIX, IoTDBConstant.SCHEMA_EVOLUTION_FILE_SUFFIX); + File sevoFile = new File(sevoFilePath); + if (sevoFile.exists()) { + LOGGER.info("Loading a tsfile with schema evolution, sevo file path: {}", sevoFilePath); + statement.setSchemaEvolutionFile(sevoFile); + } + return executeStatementAndClassifyExceptions(statement); } @@ -865,6 +881,7 @@ private TSStatus executeStatementWithPermissionCheckAndRetryOnDataTypeMismatch( TSStatusCode.PIPE_TRANSFER_EXECUTE_STATEMENT_ERROR, "Execute null statement."); } + LOGGER.info("Receiver id = {}: executing {}", receiverId.get(), statement); // Judge which model the statement belongs to final boolean isTableModelStatement; final String databaseName; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTsFilePieceReq.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTsFilePieceReq.java index 0435d9a4c3720..8e621565311e8 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTsFilePieceReq.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTsFilePieceReq.java @@ -24,6 +24,7 @@ import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq; import java.io.IOException; +import java.nio.ByteBuffer; public class PipeTransferTsFilePieceReq extends PipeTransferFilePieceReq { @@ -45,6 +46,13 @@ public static PipeTransferTsFilePieceReq toTPipeTransferReq( .convertToTPipeTransferReq(fileName, startWritingOffset, filePiece); } + public static PipeTransferTsFilePieceReq toTPipeTransferReq( + String fileName, long startWritingOffset, ByteBuffer filePiece) throws IOException { + return (PipeTransferTsFilePieceReq) + new PipeTransferTsFilePieceReq() + .convertToTPipeTransferReq(fileName, startWritingOffset, filePiece); + } + public static PipeTransferTsFilePieceReq fromTPipeTransferReq(TPipeTransferReq transferReq) { return (PipeTransferTsFilePieceReq) new PipeTransferTsFilePieceReq().translateFromTPipeTransferReq(transferReq); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTsFilePieceWithModReq.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTsFilePieceWithModReq.java index 7feb339f6d599..86255fab0897e 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTsFilePieceWithModReq.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTsFilePieceWithModReq.java @@ -24,6 +24,7 @@ import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq; import java.io.IOException; +import java.nio.ByteBuffer; public class PipeTransferTsFilePieceWithModReq extends PipeTransferFilePieceReq { @@ -45,6 +46,13 @@ public static PipeTransferTsFilePieceWithModReq toTPipeTransferReq( .convertToTPipeTransferReq(fileName, startWritingOffset, filePiece); } + public static PipeTransferTsFilePieceWithModReq toTPipeTransferReq( + String fileName, long startWritingOffset, ByteBuffer filePiece) throws IOException { + return (PipeTransferTsFilePieceWithModReq) + new PipeTransferTsFilePieceWithModReq() + .convertToTPipeTransferReq(fileName, startWritingOffset, filePiece); + } + public static PipeTransferTsFilePieceWithModReq fromTPipeTransferReq( TPipeTransferReq transferReq) { return (PipeTransferTsFilePieceWithModReq) diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/IoTDBDataRegionAsyncSink.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/IoTDBDataRegionAsyncSink.java index f8d0b104096f1..64bc882ba2ba4 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/IoTDBDataRegionAsyncSink.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/IoTDBDataRegionAsyncSink.java @@ -261,6 +261,7 @@ private void transferInBatchWithoutCheck( eventsHadBeenAddedToRetryQueue, sealedFile.right, null, + null, false, sealedFile.left)); } @@ -417,6 +418,7 @@ private boolean transferWithoutCheck(final TsFileInsertionEvent tsFileInsertionE new AtomicBoolean(false), pipeTsFileInsertionEvent.getTsFile(), pipeTsFileInsertionEvent.getModFile(), + pipeTsFileInsertionEvent.getResource(), pipeTsFileInsertionEvent.isWithMod() && clientManager.supportModsIfIsDataNodeReceiver(), pipeTsFileInsertionEvent.isTableModelEvent() diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTsFileHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTsFileHandler.java index 6726c9a67011a..2499b9972cd1d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTsFileHandler.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTsFileHandler.java @@ -37,6 +37,9 @@ import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTsFilePieceWithModReq; import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTsFileSealWithModReq; import org.apache.iotdb.db.pipe.sink.protocol.thrift.async.IoTDBDataRegionAsyncSink; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolutionFile; import org.apache.iotdb.pipe.api.exception.PipeException; import org.apache.iotdb.rpc.TSStatusCode; import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq; @@ -51,6 +54,7 @@ import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; +import java.nio.ByteBuffer; import java.util.Arrays; import java.util.List; import java.util.Map; @@ -75,6 +79,7 @@ public class PipeTransferTsFileHandler extends PipeTransferTrackableHandler { private final File tsFile; private final File modFile; private File currentFile; + private final TsFileResource resource; private final boolean transferMod; @@ -87,6 +92,8 @@ public class PipeTransferTsFileHandler extends PipeTransferTrackableHandler { private RandomAccessFile reader; + private volatile boolean isSevoTransferStarted = false; + private volatile boolean isSevoTransferred = false; private final AtomicBoolean isSealSignalSent; private IoTDBDataNodeAsyncClientManager clientManager; @@ -99,6 +106,7 @@ public PipeTransferTsFileHandler( final AtomicBoolean eventsHadBeenAddedToRetryQueue, final File tsFile, final File modFile, + final TsFileResource resource, final boolean transferMod, final String dataBaseName) throws InterruptedException { @@ -115,6 +123,7 @@ public PipeTransferTsFileHandler( this.transferMod = transferMod; this.dataBaseName = dataBaseName; currentFile = transferMod ? modFile : tsFile; + this.resource = resource; // NOTE: Waiting for resource enough for slicing here may cause deadlock! // TsFile events are producing and consuming at the same time, and the memory of a TsFile @@ -191,6 +200,12 @@ public void transfer( reader = new RandomAccessFile(tsFile, "r"); transfer(clientManager, client); } else if (currentFile == tsFile) { + if (!isSevoTransferred && transferSevo(client)) { + // if the transfer has been initiated, return directly to allow the callback to trigger + // the next transfer + return; + } + isSealSignalSent.set(true); final TPipeTransferReq uncompressedReq = @@ -247,6 +262,43 @@ public void transfer( position += readLength; } + private boolean transferSevo(AsyncPipeDataTransferServiceClient client) + throws IOException, TException { + isSevoTransferStarted = true; + + if (resource == null) { + isSevoTransferred = true; + // transferring tsfile written from tablets, no schema evolution + return false; + } + + EvolvedSchema evolvedSchema = resource.getMergedEvolvedSchema(); + if (evolvedSchema == null) { + isSevoTransferred = true; + return false; + } + + ByteBuffer fileBuffer = evolvedSchema.toSchemaEvolutionFileBuffer(); + final TPipeTransferReq uncompressedReq = + PipeTransferTsFilePieceReq.toTPipeTransferReq( + SchemaEvolutionFile.getTsFileAssociatedSchemaEvolutionFileName(currentFile), + 0, + fileBuffer); + final TPipeTransferReq req = connector.compressIfNeeded(uncompressedReq); + + pipeName2WeightMap.forEach( + (pipePair, weight) -> + connector.rateLimitIfNeeded( + pipePair.getLeft(), + pipePair.getRight(), + client.getEndPoint(), + (long) (req.getBody().length * weight))); + + tryTransfer(client, req); + LOGGER.info("Transferring schema evolution file for tsfile {}.", tsFile); + return true; + } + @Override public void onComplete(final TPipeTransferResp response) { try { @@ -347,6 +399,10 @@ protected boolean onCompleteInternal(final TPipeTransferResp response) { } } + if (isSevoTransferStarted && !isSevoTransferred) { + isSevoTransferred = true; + } + transfer(clientManager, client); } catch (final Exception e) { onError(e); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/sync/IoTDBDataRegionSyncSink.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/sync/IoTDBDataRegionSyncSink.java index 016f787afaa2a..eb8afea52d6d7 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/sync/IoTDBDataRegionSyncSink.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/sync/IoTDBDataRegionSyncSink.java @@ -48,6 +48,9 @@ import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTsFileSealWithModReq; import org.apache.iotdb.db.pipe.sink.util.cacher.LeaderCacheUtils; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolutionFile; import org.apache.iotdb.metrics.type.Histogram; import org.apache.iotdb.pipe.api.annotation.TableModel; import org.apache.iotdb.pipe.api.annotation.TreeModel; @@ -70,6 +73,7 @@ import java.io.File; import java.io.IOException; +import java.nio.ByteBuffer; import java.nio.file.NoSuchFileException; import java.util.Arrays; import java.util.Collections; @@ -336,7 +340,7 @@ private void doTransfer(final PipeTabletEventTsFileBatch batchToTransfer) final Map, Double> pipe2WeightMap = batchToTransfer.deepCopyPipe2WeightMap(); for (final Pair dbTsFile : dbTsFilePairs) { - doTransfer(pipe2WeightMap, dbTsFile.right, null, dbTsFile.left); + doTransfer(pipe2WeightMap, dbTsFile.right, null, null, dbTsFile.left); try { RetryUtils.retryOnException( () -> { @@ -504,6 +508,7 @@ private void doTransferWrapper(final PipeTsFileInsertionEvent pipeTsFileInsertio 1.0), pipeTsFileInsertionEvent.getTsFile(), pipeTsFileInsertionEvent.isWithMod() ? pipeTsFileInsertionEvent.getModFile() : null, + pipeTsFileInsertionEvent.getResource(), pipeTsFileInsertionEvent.isTableModelEvent() ? pipeTsFileInsertionEvent.getTableModelDatabaseName() : null); @@ -517,19 +522,59 @@ private void doTransfer( final Map, Double> pipeName2WeightMap, final File tsFile, final File modFile, + final TsFileResource resource, final String dataBaseName) throws PipeException, IOException { final Pair clientAndStatus = clientManager.getClient(); - final TPipeTransferResp resp; + TPipeTransferResp resp; // 1. Transfer tsFile, and mod file if exists and receiver's version >= 2 if (Objects.nonNull(modFile) && clientManager.supportModsIfIsDataNodeReceiver()) { transferFilePieces(pipeName2WeightMap, modFile, clientAndStatus, true); transferFilePieces(pipeName2WeightMap, tsFile, clientAndStatus, true); - // 2. Transfer file seal signal with mod, which means the file is transferred completely try { + // 2. Transfer schema evolution file if exists + if (resource != null) { + EvolvedSchema evolvedSchema = resource.getMergedEvolvedSchema(); + if (evolvedSchema == null) { + return; + } + + ByteBuffer fileBuffer = evolvedSchema.toSchemaEvolutionFileBuffer(); + final byte[] payload = fileBuffer.array(); + final TPipeTransferReq uncompressedReq = + PipeTransferTsFilePieceReq.toTPipeTransferReq( + SchemaEvolutionFile.getTsFileAssociatedSchemaEvolutionFileName(tsFile), + 0, + payload); + final TPipeTransferReq sevoReq = compressIfNeeded(uncompressedReq); + + pipeName2WeightMap.forEach( + (pipePair, weight) -> + rateLimitIfNeeded( + pipePair.getLeft(), + pipePair.getRight(), + clientAndStatus.getLeft().getEndPoint(), + (long) (sevoReq.getBody().length * weight))); + + resp = clientAndStatus.left.pipeTransfer(sevoReq); + + final TSStatus status = resp.getStatus(); + // Only handle the failed statuses to avoid string format performance overhead + if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode() + && status.getCode() != TSStatusCode.REDIRECTION_RECOMMEND.getStatusCode()) { + receiverStatusHandler.handle( + resp.getStatus(), + String.format("Seal file %s error, result status %s.", tsFile, resp.getStatus()), + tsFile.getName()); + return; + } + LOGGER.info("Transferred schema evolution file for tsfile {}.", tsFile); + } + + // 3. Transfer file seal signal with mod, which means the file is transferred completely final TPipeTransferReq req = compressIfNeeded( PipeTransferTsFileSealWithModReq.toTPipeTransferReq( @@ -548,6 +593,7 @@ private void doTransfer( (long) (req.getBody().length * weight))); resp = clientAndStatus.getLeft().pipeTransfer(req); + } catch (final Exception e) { clientAndStatus.setRight(false); clientManager.adjustTimeoutIfNecessary(e); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeInternalRPCServiceImpl.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeInternalRPCServiceImpl.java index 42929be741819..4a57de0ceb39a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeInternalRPCServiceImpl.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeInternalRPCServiceImpl.java @@ -162,8 +162,10 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.metadata.write.view.DeleteLogicalViewNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.metadata.write.view.RollbackLogicalViewBlackListNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedDeleteDataNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedEvolveSchemaNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedNonWritePlanNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.DeleteDataNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.EvolveSchemaNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.RelationalDeleteDataNode; import org.apache.iotdb.db.queryengine.plan.relational.metadata.fetcher.TableDeviceSchemaFetcher; import org.apache.iotdb.db.queryengine.plan.relational.metadata.fetcher.cache.TableDeviceSchemaCache; @@ -199,8 +201,9 @@ import org.apache.iotdb.db.storageengine.dataregion.compaction.settle.SettleRequestHandler; import org.apache.iotdb.db.storageengine.dataregion.flush.CompressionRatio; import org.apache.iotdb.db.storageengine.dataregion.modification.DeletionPredicate; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate; import org.apache.iotdb.db.storageengine.dataregion.modification.TableDeletionEntry; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolution; import org.apache.iotdb.db.storageengine.rescon.quotas.DataNodeSpaceQuotaManager; import org.apache.iotdb.db.storageengine.rescon.quotas.DataNodeThrottleQuotaManager; import org.apache.iotdb.db.subscription.agent.SubscriptionAgent; @@ -240,6 +243,7 @@ import org.apache.iotdb.mpp.rpc.thrift.TCreateTriggerInstanceReq; import org.apache.iotdb.mpp.rpc.thrift.TDataNodeHeartbeatReq; import org.apache.iotdb.mpp.rpc.thrift.TDataNodeHeartbeatResp; +import org.apache.iotdb.mpp.rpc.thrift.TDataRegionEvolveSchemaReq; import org.apache.iotdb.mpp.rpc.thrift.TDeactivateTemplateReq; import org.apache.iotdb.mpp.rpc.thrift.TDeleteColumnDataReq; import org.apache.iotdb.mpp.rpc.thrift.TDeleteDataForDeleteSchemaReq; @@ -296,6 +300,7 @@ import org.apache.iotdb.mpp.rpc.thrift.TRollbackViewSchemaBlackListReq; import org.apache.iotdb.mpp.rpc.thrift.TSchemaFetchRequest; import org.apache.iotdb.mpp.rpc.thrift.TSchemaFetchResponse; +import org.apache.iotdb.mpp.rpc.thrift.TSchemaRegionEvolveSchemaReq; import org.apache.iotdb.mpp.rpc.thrift.TSendBatchPlanNodeReq; import org.apache.iotdb.mpp.rpc.thrift.TSendBatchPlanNodeResp; import org.apache.iotdb.mpp.rpc.thrift.TSendFragmentInstanceReq; @@ -795,6 +800,44 @@ public TSStatus deleteDataForDeleteSchema(final TDeleteDataForDeleteSchemaReq re .getStatus()); } + @Override + public TSStatus evolveSchemaInDataRegion(final TDataRegionEvolveSchemaReq req) { + final List schemaEvolutions = + SchemaEvolution.createListFrom(req.schemaEvolutions); + return executeInternalSchemaTask( + req.getDataRegionIdList(), + consensusGroupId -> + new RegionWriteExecutor() + .execute( + new DataRegionId(consensusGroupId.getId()), + // Now the deletion plan may be re-collected here by pipe, resulting multiple + // transfer to delete time series plan. Now just ignore. + req.isSetIsGeneratedByPipe() && req.isIsGeneratedByPipe() + ? new PipeEnrichedEvolveSchemaNode( + new EvolveSchemaNode(new PlanNodeId(""), schemaEvolutions)) + : new EvolveSchemaNode(new PlanNodeId(""), schemaEvolutions)) + .getStatus()); + } + + @Override + public TSStatus evolveSchemaInSchemaRegion(final TSchemaRegionEvolveSchemaReq req) { + final List schemaEvolutions = + SchemaEvolution.createListFrom(req.schemaEvolutions); + return executeInternalSchemaTask( + req.getSchemaRegionIdList(), + consensusGroupId -> + new RegionWriteExecutor() + .execute( + new SchemaRegionId(consensusGroupId.getId()), + // Now the deletion plan may be re-collected here by pipe, resulting multiple + // transfer to delete time series plan. Now just ignore. + req.isSetIsGeneratedByPipe() && req.isIsGeneratedByPipe() + ? new PipeEnrichedEvolveSchemaNode( + new EvolveSchemaNode(new PlanNodeId(""), schemaEvolutions)) + : new EvolveSchemaNode(new PlanNodeId(""), schemaEvolutions)) + .getStatus()); + } + @Override public TSStatus deleteTimeSeries(final TDeleteTimeSeriesReq req) throws TException { final PathPatternTree patternTree = @@ -2001,7 +2044,7 @@ public TSStatus deleteColumnData(final TDeleteColumnDataReq req) { new TableDeletionEntry( new DeletionPredicate( req.getTableName(), - new IDPredicate.NOP(), + new TagPredicate.NOP(), Collections.singletonList(req.getColumnName())), new TimeRange(Long.MIN_VALUE, Long.MAX_VALUE)), // the request is only sent to associated region @@ -2355,6 +2398,10 @@ private void sampleDiskLoad(TLoadSample loadSample) { @Override public TSStatus invalidatePermissionCache(TInvalidatePermissionCacheReq req) { + if (req.getUsername().isEmpty() && req.getRoleName().isEmpty()) { + AuthorityChecker.invalidateAllCache(); + return RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS); + } if (AuthorityChecker.invalidateCache(req.getUsername(), req.getRoleName())) { return RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/AlignedSeriesScanUtil.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/AlignedSeriesScanUtil.java index f7ddaee472ea9..f17c01d2820fd 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/AlignedSeriesScanUtil.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/AlignedSeriesScanUtil.java @@ -62,6 +62,15 @@ public AlignedSeriesScanUtil( this(seriesPath, scanOrder, scanOptions, context, false, null); } + public AlignedSeriesScanUtil( + AlignedFullPath seriesPath, + Ordering scanOrder, + SeriesScanOptions scanOptions, + FragmentInstanceContext context, + long maxTsFileSetEndVersion) { + this(seriesPath, scanOrder, scanOptions, context, false, null, maxTsFileSetEndVersion); + } + public AlignedSeriesScanUtil( AlignedFullPath seriesPath, Ordering scanOrder, @@ -69,7 +78,25 @@ public AlignedSeriesScanUtil( FragmentInstanceContext context, boolean queryAllSensors, List givenDataTypes) { - super(seriesPath, scanOrder, scanOptions, context); + this( + seriesPath, + scanOrder, + scanOptions, + context, + queryAllSensors, + givenDataTypes, + Long.MAX_VALUE); + } + + public AlignedSeriesScanUtil( + AlignedFullPath seriesPath, + Ordering scanOrder, + SeriesScanOptions scanOptions, + FragmentInstanceContext context, + boolean queryAllSensors, + List givenDataTypes, + long maxTsFileSetEndVersion) { + super(seriesPath, scanOrder, scanOptions, context, maxTsFileSetEndVersion); isAligned = true; this.dataTypes = givenDataTypes != null @@ -100,7 +127,8 @@ protected AbstractAlignedTimeSeriesMetadata loadTimeSeriesMetadata( context, scanOptions.getGlobalTimeFilter(), isSeq, - ignoreAllNullRows); + ignoreAllNullRows, + maxTsFileSetEndVersion); } @Override diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/FileLoaderUtils.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/FileLoaderUtils.java index 3eff3b9a53fbc..2d1f5d3530c9c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/FileLoaderUtils.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/FileLoaderUtils.java @@ -35,6 +35,7 @@ import org.apache.iotdb.db.storageengine.dataregion.read.reader.chunk.metadata.MemAlignedChunkMetadataLoader; import org.apache.iotdb.db.storageengine.dataregion.read.reader.chunk.metadata.MemChunkMetadataLoader; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; import org.apache.iotdb.db.storageengine.dataregion.tsfile.timeindex.ITimeIndex; import org.apache.iotdb.db.utils.ModificationUtils; import org.apache.iotdb.db.utils.SchemaUtils; @@ -87,7 +88,8 @@ public static TimeseriesMetadata loadTimeSeriesMetadata( FragmentInstanceContext context, Filter globalTimeFilter, Set allSensors, - boolean isSeq) + boolean isSeq, + long maxTsFileSetEndVersion) throws IOException { long t1 = System.nanoTime(); boolean loadFromMem = false; @@ -98,14 +100,20 @@ public static TimeseriesMetadata loadTimeSeriesMetadata( if (resource.isClosed()) { // when resource.getTimeIndexType() == 1, TsFileResource.timeIndexType is deviceTimeIndex // we should not ignore the non-exist of device in TsFileMetadata + EvolvedSchema evolvedSchema = resource.getMergedEvolvedSchema(maxTsFileSetEndVersion); + IDeviceID deviceId = seriesPath.getDeviceId(); + String measurement = seriesPath.getMeasurement(); + if (evolvedSchema != null) { + measurement = evolvedSchema.getOriginalColumnName(deviceId.getTableName(), measurement); + deviceId = evolvedSchema.rewriteToOriginal(deviceId); + } + timeSeriesMetadata = TimeSeriesMetadataCache.getInstance() .get( resource.getTsFilePath(), new TimeSeriesMetadataCache.TimeSeriesMetadataCacheKey( - resource.getTsFileID(), - seriesPath.getDeviceId(), - seriesPath.getMeasurement()), + resource.getTsFileID(), deviceId, measurement), allSensors, context.ignoreNotExistsDevice() || resource.getTimeIndexType() == ITimeIndex.FILE_TIME_INDEX_TYPE, @@ -114,8 +122,7 @@ public static TimeseriesMetadata loadTimeSeriesMetadata( if (timeSeriesMetadata != null) { long t2 = System.nanoTime(); List pathModifications = - context.getPathModifications( - resource, seriesPath.getDeviceId(), seriesPath.getMeasurement()); + context.getPathModifications(resource, deviceId, measurement); timeSeriesMetadata.setModified(!pathModifications.isEmpty()); timeSeriesMetadata.setChunkMetadataLoader( new DiskChunkMetadataLoader(resource, context, globalTimeFilter, pathModifications)); @@ -192,7 +199,8 @@ public static AbstractAlignedTimeSeriesMetadata loadAlignedTimeSeriesMetadata( FragmentInstanceContext context, Filter globalTimeFilter, boolean isSeq, - boolean ignoreAllNullRows) + boolean ignoreAllNullRows, + long maxTsFileSetEndVersion) throws IOException { final long t1 = System.nanoTime(); boolean loadFromMem = false; @@ -206,7 +214,12 @@ public static AbstractAlignedTimeSeriesMetadata loadAlignedTimeSeriesMetadata( if (resource.isClosed()) { alignedTimeSeriesMetadata = loadAlignedTimeSeriesMetadataFromDisk( - resource, alignedPath, context, globalTimeFilter, ignoreAllNullRows); + resource, + alignedPath, + context, + globalTimeFilter, + ignoreAllNullRows, + maxTsFileSetEndVersion); } else { // if the tsfile is unclosed, we just get it directly from TsFileResource loadFromMem = true; alignedTimeSeriesMetadata = @@ -274,7 +287,8 @@ private static AbstractAlignedTimeSeriesMetadata loadAlignedTimeSeriesMetadataFr AlignedFullPath alignedPath, FragmentInstanceContext context, Filter globalTimeFilter, - boolean ignoreAllNullRows) + boolean ignoreAllNullRows, + long maxTsFileSetEndVersion) throws IOException { AbstractAlignedTimeSeriesMetadata alignedTimeSeriesMetadata = null; // load all the TimeseriesMetadata of vector, the first one is for time column and the @@ -287,6 +301,20 @@ private static AbstractAlignedTimeSeriesMetadata loadAlignedTimeSeriesMetadataFr String filePath = resource.getTsFilePath(); IDeviceID deviceId = alignedPath.getDeviceId(); + EvolvedSchema evolvedSchema = resource.getMergedEvolvedSchema(maxTsFileSetEndVersion); + if (evolvedSchema != null) { + IDeviceID finalDeviceId = deviceId; + valueMeasurementList = + valueMeasurementList.stream() + .map(m -> evolvedSchema.getOriginalColumnName(finalDeviceId.getTableName(), m)) + .collect(Collectors.toList()); + allSensors = + allSensors.stream() + .map(m -> evolvedSchema.getOriginalColumnName(finalDeviceId.getTableName(), m)) + .collect(Collectors.toSet()); + deviceId = evolvedSchema.rewriteToOriginal(deviceId); + } + // when resource.getTimeIndexType() == 1, TsFileResource.timeIndexType is deviceTimeIndex // we should not ignore the non-exist of device in TsFileMetadata TimeseriesMetadata timeColumn = @@ -308,7 +336,7 @@ private static AbstractAlignedTimeSeriesMetadata loadAlignedTimeSeriesMetadataFr resource, timeColumn, Collections.emptyList(), - alignedPath, + deviceId, context, globalTimeFilter, false); @@ -336,7 +364,7 @@ private static AbstractAlignedTimeSeriesMetadata loadAlignedTimeSeriesMetadataFr resource, timeColumn, valueTimeSeriesMetadataList, - alignedPath, + deviceId, context, globalTimeFilter, ignoreAllNullRows); @@ -350,7 +378,7 @@ private static AbstractAlignedTimeSeriesMetadata setModifications( TsFileResource resource, TimeseriesMetadata timeColumnMetadata, List valueColumnMetadataList, - AlignedFullPath alignedPath, + IDeviceID deviceID, QueryContext context, Filter globalTimeFilter, boolean ignoreAllNullRows) { @@ -358,8 +386,7 @@ private static AbstractAlignedTimeSeriesMetadata setModifications( // deal with time column List timeModifications = - context.getPathModifications( - resource, alignedPath.getDeviceId(), timeColumnMetadata.getMeasurementId()); + context.getPathModifications(resource, deviceID, timeColumnMetadata.getMeasurementId()); // all rows are deleted, just return null to skip device data in this file if (ModificationUtils.isAllDeletedByMods( timeModifications, @@ -382,7 +409,7 @@ private static AbstractAlignedTimeSeriesMetadata setModifications( if (valueColumnMetadata != null) { List modifications = context.getPathModifications( - resource, alignedPath.getDeviceId(), valueColumnMetadata.getMeasurementId()); + resource, deviceID, valueColumnMetadata.getMeasurementId()); valueColumnMetadata.setModified(!modifications.isEmpty()); valueColumnsModifications.add(modifications); modified = (modified || !modifications.isEmpty()); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/SeriesScanUtil.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/SeriesScanUtil.java index 5e7111bf09cd0..3216e196730f0 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/SeriesScanUtil.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/SeriesScanUtil.java @@ -144,6 +144,9 @@ public class SeriesScanUtil implements Accountable { protected final int MAX_NUMBER_OF_POINTS_IN_PAGE = TSFileDescriptor.getInstance().getConfig().getMaxNumberOfPointsInPage(); + // to restrict the scope of sevo files for compaction + protected final long maxTsFileSetEndVersion; + private static final long INSTANCE_SIZE = RamUsageEstimator.shallowSizeOfInstance(SeriesScanUtil.class) + RamUsageEstimator.shallowSizeOfInstance(IDeviceID.class) @@ -159,6 +162,15 @@ public SeriesScanUtil( Ordering scanOrder, SeriesScanOptions scanOptions, FragmentInstanceContext context) { + this(seriesPath, scanOrder, scanOptions, context, Long.MAX_VALUE); + } + + public SeriesScanUtil( + IFullPath seriesPath, + Ordering scanOrder, + SeriesScanOptions scanOptions, + FragmentInstanceContext context, + long maxTsFileSetEndVersion) { this.seriesPath = seriesPath; this.deviceID = seriesPath.getDeviceId(); this.dataType = seriesPath.getSeriesType(); @@ -196,6 +208,8 @@ public SeriesScanUtil( new PriorityQueue<>( orderUtils.comparingLong( versionPageReader -> orderUtils.getOrderTime(versionPageReader.getStatistics()))); + + this.maxTsFileSetEndVersion = maxTsFileSetEndVersion; } /** @@ -204,7 +218,7 @@ public SeriesScanUtil( * @param dataSource the query data source */ public void initQueryDataSource(QueryDataSource dataSource) { - dataSource.fillOrderIndexes(deviceID, orderUtils.getAscending()); + dataSource.fillOrderIndexes(deviceID, orderUtils.getAscending(), maxTsFileSetEndVersion); this.dataSource = dataSource; // updated filter concerning TTL @@ -1892,7 +1906,8 @@ protected ITimeSeriesMetadata loadTimeSeriesMetadata(TsFileResource resource, bo context, scanOptions.getGlobalTimeFilter(), scanOptions.getAllSensors(), - isSeq); + isSeq, + maxTsFileSetEndVersion); } public List getTsDataTypeList() { @@ -2309,26 +2324,38 @@ public Ordering getScanOrder() { @Override public boolean hasNextSeqResource() { - while (dataSource.hasNextSeqResource(curSeqFileIndex, false, deviceID)) { + while (dataSource.hasNextSeqResource( + curSeqFileIndex, false, deviceID, maxTsFileSetEndVersion)) { if (dataSource.isSeqSatisfied( - deviceID, curSeqFileIndex, scanOptions.getGlobalTimeFilter(), false)) { + deviceID, + curSeqFileIndex, + scanOptions.getGlobalTimeFilter(), + false, + maxTsFileSetEndVersion)) { break; } curSeqFileIndex--; } - return dataSource.hasNextSeqResource(curSeqFileIndex, false, deviceID); + return dataSource.hasNextSeqResource( + curSeqFileIndex, false, deviceID, maxTsFileSetEndVersion); } @Override public boolean hasNextUnseqResource() { - while (dataSource.hasNextUnseqResource(curUnseqFileIndex, false, deviceID)) { + while (dataSource.hasNextUnseqResource( + curUnseqFileIndex, false, deviceID, maxTsFileSetEndVersion)) { if (dataSource.isUnSeqSatisfied( - deviceID, curUnseqFileIndex, scanOptions.getGlobalTimeFilter(), false)) { + deviceID, + curUnseqFileIndex, + scanOptions.getGlobalTimeFilter(), + false, + maxTsFileSetEndVersion)) { break; } curUnseqFileIndex++; } - return dataSource.hasNextUnseqResource(curUnseqFileIndex, false, deviceID); + return dataSource.hasNextUnseqResource( + curUnseqFileIndex, false, deviceID, maxTsFileSetEndVersion); } @Override @@ -2438,26 +2465,37 @@ public Ordering getScanOrder() { @Override public boolean hasNextSeqResource() { - while (dataSource.hasNextSeqResource(curSeqFileIndex, true, deviceID)) { + while (dataSource.hasNextSeqResource( + curSeqFileIndex, true, deviceID, maxTsFileSetEndVersion)) { if (dataSource.isSeqSatisfied( - deviceID, curSeqFileIndex, scanOptions.getGlobalTimeFilter(), false)) { + deviceID, + curSeqFileIndex, + scanOptions.getGlobalTimeFilter(), + false, + maxTsFileSetEndVersion)) { break; } curSeqFileIndex++; } - return dataSource.hasNextSeqResource(curSeqFileIndex, true, deviceID); + return dataSource.hasNextSeqResource(curSeqFileIndex, true, deviceID, maxTsFileSetEndVersion); } @Override public boolean hasNextUnseqResource() { - while (dataSource.hasNextUnseqResource(curUnseqFileIndex, true, deviceID)) { + while (dataSource.hasNextUnseqResource( + curUnseqFileIndex, true, deviceID, maxTsFileSetEndVersion)) { if (dataSource.isUnSeqSatisfied( - deviceID, curUnseqFileIndex, scanOptions.getGlobalTimeFilter(), false)) { + deviceID, + curUnseqFileIndex, + scanOptions.getGlobalTimeFilter(), + false, + maxTsFileSetEndVersion)) { break; } curUnseqFileIndex++; } - return dataSource.hasNextUnseqResource(curUnseqFileIndex, true, deviceID); + return dataSource.hasNextUnseqResource( + curUnseqFileIndex, true, deviceID, maxTsFileSetEndVersion); } @Override diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeUtils.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeUtils.java index 2e0b0b52fdc09..41df1928112b8 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeUtils.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeUtils.java @@ -53,10 +53,10 @@ import org.apache.iotdb.db.schemaengine.table.DataNodeTableCache; import org.apache.iotdb.db.schemaengine.table.DataNodeTreeViewSchemaUtils; import org.apache.iotdb.db.storageengine.dataregion.modification.DeletionPredicate; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.And; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.SegmentExactMatch; import org.apache.iotdb.db.storageengine.dataregion.modification.TableDeletionEntry; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.And; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.SegmentExactMatch; import org.apache.iotdb.rpc.RpcUtils; import org.apache.iotdb.rpc.TSStatusCode; @@ -436,23 +436,23 @@ private static TableDeletionEntry parsePredicate(Expression expression, TsTable Queue expressionQueue = new LinkedList<>(); expressionQueue.add(expression); DeletionPredicate predicate = new DeletionPredicate(table.getTableName()); - IDPredicate idPredicate = null; + TagPredicate tagPredicate = null; TimeRange timeRange = new TimeRange(Long.MIN_VALUE, Long.MAX_VALUE, true); while (!expressionQueue.isEmpty()) { Expression currExp = expressionQueue.remove(); if (currExp instanceof LogicalExpression) { parseAndPredicate(((LogicalExpression) currExp), expressionQueue); } else if (currExp instanceof ComparisonExpression) { - idPredicate = - parseComparison(((ComparisonExpression) currExp), timeRange, idPredicate, table); + tagPredicate = + parseComparison(((ComparisonExpression) currExp), timeRange, tagPredicate, table); } else if (currExp instanceof IsNullPredicate) { - idPredicate = parseIsNull((IsNullPredicate) currExp, idPredicate, table); + tagPredicate = parseIsNull((IsNullPredicate) currExp, tagPredicate, table); } else { throw new SemanticException("Unsupported expression: " + currExp + " in " + expression); } } - if (idPredicate != null) { - predicate.setIdPredicate(idPredicate); + if (tagPredicate != null) { + predicate.setIdPredicate(tagPredicate); } if (timeRange.getStartTime() > timeRange.getEndTime()) { throw new SemanticException( @@ -472,8 +472,8 @@ private static void parseAndPredicate( expressionQueue.addAll(expression.getTerms()); } - private static IDPredicate parseIsNull( - IsNullPredicate isNullPredicate, IDPredicate oldPredicate, TsTable table) { + private static TagPredicate parseIsNull( + IsNullPredicate isNullPredicate, TagPredicate oldPredicate, TsTable table) { Expression leftHandExp = isNullPredicate.getValue(); if (!(leftHandExp instanceof Identifier)) { throw new SemanticException("Left hand expression is not an identifier: " + leftHandExp); @@ -486,25 +486,26 @@ private static IDPredicate parseIsNull( } // the first segment is the table name, so + 1 - IDPredicate newPredicate = new SegmentExactMatch(null, idColumnOrdinal + 1); + TagPredicate newPredicate = new SegmentExactMatch(null, idColumnOrdinal + 1); return combinePredicates(oldPredicate, newPredicate); } - private static IDPredicate combinePredicates(IDPredicate oldPredicate, IDPredicate newPredicate) { + private static TagPredicate combinePredicates( + TagPredicate oldPredicate, TagPredicate newPredicate) { if (oldPredicate == null) { return newPredicate; } - if (oldPredicate instanceof IDPredicate.And) { + if (oldPredicate instanceof TagPredicate.And) { ((And) oldPredicate).add(newPredicate); return oldPredicate; } - return new IDPredicate.And(oldPredicate, newPredicate); + return new TagPredicate.And(oldPredicate, newPredicate); } - private static IDPredicate parseComparison( + private static TagPredicate parseComparison( ComparisonExpression comparisonExpression, TimeRange timeRange, - IDPredicate oldPredicate, + TagPredicate oldPredicate, TsTable table) { Expression left = comparisonExpression.getLeft(); Expression right = comparisonExpression.getRight(); @@ -556,11 +557,11 @@ private static IDPredicate parseComparison( "The column '" + columnName + "' does not exist or is not a tag column"); } - IDPredicate newPredicate = getIdPredicate(comparisonExpression, right, idColumnOrdinal); + TagPredicate newPredicate = getIdPredicate(comparisonExpression, right, idColumnOrdinal); return combinePredicates(oldPredicate, newPredicate); } - private static IDPredicate getIdPredicate( + private static TagPredicate getIdPredicate( ComparisonExpression comparisonExpression, Expression right, int idColumnOrdinal) { if (comparisonExpression.getOperator() != ComparisonExpression.Operator.EQUAL) { throw new SemanticException("The operator of tag predicate must be '=' for " + right); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ClusterPartitionFetcher.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ClusterPartitionFetcher.java index 2274762341b5d..67db363a8f6af 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ClusterPartitionFetcher.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ClusterPartitionFetcher.java @@ -47,6 +47,7 @@ import org.apache.iotdb.db.protocol.client.ConfigNodeClientManager; import org.apache.iotdb.db.protocol.client.ConfigNodeInfo; import org.apache.iotdb.db.queryengine.plan.analyze.cache.partition.PartitionCache; +import org.apache.iotdb.db.utils.CommonUtils; import org.apache.iotdb.mpp.rpc.thrift.TRegionRouteReq; import org.apache.iotdb.rpc.TSStatusCode; @@ -347,6 +348,7 @@ private SchemaPartition getOrCreateSchemaPartition( final List partitionSlots = Objects.nonNull(deviceIDs) ? deviceIDs.stream() + .map(deviceID -> CommonUtils.getSeriesPartitionKey(deviceID, database, false)) .map(partitionExecutor::getSeriesPartitionSlot) .distinct() .collect(Collectors.toList()) @@ -458,6 +460,7 @@ private TDataPartitionReq constructDataPartitionReq( final Map> partitionSlotsMap = new HashMap<>(); for (final Map.Entry> entry : sgNameToQueryParamsMap.entrySet()) { + String databaseName = entry.getKey(); // for each sg final Map deviceToTimePartitionMap = new HashMap<>(); @@ -467,7 +470,9 @@ private TDataPartitionReq constructDataPartitionReq( for (final DataPartitionQueryParam queryParam : entry.getValue()) { seriesSlotTimePartitionMap .computeIfAbsent( - partitionExecutor.getSeriesPartitionSlot(queryParam.getDeviceID()), + partitionExecutor.getSeriesPartitionSlot( + CommonUtils.getSeriesPartitionKey( + queryParam.getDeviceID(), databaseName, false)), k -> new ComplexTimeSlotList( queryParam.isNeedLeftAll(), queryParam.isNeedRightAll())) @@ -479,7 +484,7 @@ private TDataPartitionReq constructDataPartitionReq( k, new TTimeSlotList( new ArrayList<>(v.timeSlotList), v.needLeftAll, v.needRightAll))); - partitionSlotsMap.put(entry.getKey(), deviceToTimePartitionMap); + partitionSlotsMap.put(databaseName, deviceToTimePartitionMap); } return new TDataPartitionReq(partitionSlotsMap); } @@ -491,6 +496,7 @@ private TDataPartitionReq constructDataPartitionReqForQuery( TTimeSlotList sharedTTimeSlotList = null; for (final Map.Entry> entry : sgNameToQueryParamsMap.entrySet()) { + String databaseName = entry.getKey(); // for each sg final Map deviceToTimePartitionMap = new HashMap<>(); @@ -503,10 +509,11 @@ private TDataPartitionReq constructDataPartitionReqForQuery( queryParam.isNeedRightAll()); } deviceToTimePartitionMap.putIfAbsent( - partitionExecutor.getSeriesPartitionSlot(queryParam.getDeviceID()), + partitionExecutor.getSeriesPartitionSlot( + CommonUtils.getSeriesPartitionKey(queryParam.getDeviceID(), databaseName, false)), sharedTTimeSlotList); } - partitionSlotsMap.put(entry.getKey(), deviceToTimePartitionMap); + partitionSlotsMap.put(databaseName, deviceToTimePartitionMap); } return new TDataPartitionReq(partitionSlotsMap); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/partition/PartitionCache.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/partition/PartitionCache.java index 3c76bd08c1d81..79f0c7d63ea4f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/partition/PartitionCache.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/partition/PartitionCache.java @@ -60,6 +60,7 @@ import org.apache.iotdb.db.protocol.session.SessionManager; import org.apache.iotdb.db.schemaengine.schemaregion.utils.MetaUtils; import org.apache.iotdb.db.service.metrics.CacheMetrics; +import org.apache.iotdb.db.utils.CommonUtils; import org.apache.iotdb.rpc.TSStatusCode; import com.github.benmanes.caffeine.cache.Cache; @@ -707,7 +708,8 @@ public SchemaPartition getSchemaPartition( List consensusGroupIds = new ArrayList<>(entry.getValue().size()); for (final IDeviceID device : entry.getValue()) { final TSeriesPartitionSlot seriesPartitionSlot = - partitionExecutor.getSeriesPartitionSlot(device); + partitionExecutor.getSeriesPartitionSlot( + CommonUtils.getSeriesPartitionKey(device, databaseName, true)); if (!map.containsKey(seriesPartitionSlot)) { // if one device not find, then return cache miss. if (logger.isDebugEnabled()) { @@ -874,7 +876,9 @@ public DataPartition getDataPartition( for (DataPartitionQueryParam param : params) { TSeriesPartitionSlot seriesPartitionSlot; if (null != param.getDeviceID()) { - seriesPartitionSlot = partitionExecutor.getSeriesPartitionSlot(param.getDeviceID()); + seriesPartitionSlot = + partitionExecutor.getSeriesPartitionSlot( + CommonUtils.getSeriesPartitionKey(param.getDeviceID(), databaseName, true)); } else { return null; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/load/LoadTsFileAnalyzer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/load/LoadTsFileAnalyzer.java index 9d350d97c82e1..f522f94580e94 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/load/LoadTsFileAnalyzer.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/load/LoadTsFileAnalyzer.java @@ -39,8 +39,13 @@ import org.apache.iotdb.db.queryengine.plan.relational.metadata.Metadata; import org.apache.iotdb.db.queryengine.plan.relational.sql.ast.LoadTsFile; import org.apache.iotdb.db.queryengine.plan.statement.crud.LoadTsFileStatement; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileID; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileManager; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResourceStatus; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolutionFile; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.fileset.TsFileSet; import org.apache.iotdb.db.storageengine.dataregion.utils.TsFileResourceUtils; import org.apache.iotdb.db.storageengine.load.active.ActiveLoadPathHelper; import org.apache.iotdb.db.storageengine.load.converter.LoadTsFileDataTypeConverter; @@ -69,10 +74,13 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; import static org.apache.iotdb.db.queryengine.plan.execution.config.TableConfigTaskVisitor.DATABASE_NOT_SPECIFIED; import static org.apache.iotdb.db.storageengine.load.metrics.LoadTsFileCostMetricsSet.ANALYSIS; @@ -101,16 +109,19 @@ public class LoadTsFileAnalyzer implements AutoCloseable { private final String statementString; private final boolean isGeneratedByPipe; + private final File originalFile; private final List tsFiles; private final List isMiniTsFile; private boolean isMiniTsFileConverted = false; private final List isTableModelTsFile; private int isTableModelTsFileReliableIndex = -1; + private final File schemaEvolutionFile; + private EvolvedSchema evolvedSchema; // User specified configs private final int databaseLevel; private String databaseForTableData; - private final boolean isAsyncLoad; + private boolean isAsyncLoad; private final boolean isVerifySchema; private final boolean isAutoCreateDatabase; private final boolean isDeleteAfterLoad; @@ -121,6 +132,9 @@ public class LoadTsFileAnalyzer implements AutoCloseable { private TreeSchemaAutoCreatorAndVerifier treeSchemaAutoCreatorAndVerifier; private LoadTsFileTableSchemaCache tableSchemaCache; + // for loading iotdb datanode dir + Map> databaseRegionTsFileManagers; + public LoadTsFileAnalyzer( LoadTsFileStatement loadTsFileStatement, boolean isGeneratedByPipe, MPPQueryContext context) { this.context = context; @@ -132,8 +146,10 @@ public LoadTsFileAnalyzer( this.isGeneratedByPipe = isGeneratedByPipe; this.tsFiles = loadTsFileStatement.getTsFiles(); + this.originalFile = loadTsFileStatement.getFile(); this.isMiniTsFile = new ArrayList<>(Collections.nCopies(this.tsFiles.size(), false)); this.isTableModelTsFile = new ArrayList<>(Collections.nCopies(this.tsFiles.size(), false)); + this.schemaEvolutionFile = loadTsFileStatement.getSchemaEvolutionFile(); this.databaseLevel = loadTsFileStatement.getDatabaseLevel(); this.databaseForTableData = loadTsFileStatement.getDatabase(); @@ -156,8 +172,10 @@ public LoadTsFileAnalyzer( this.isGeneratedByPipe = isGeneratedByPipe; this.tsFiles = loadTsFileTableStatement.getTsFiles(); + this.originalFile = new File(loadTsFileTableStatement.getFilePath()); this.isMiniTsFile = new ArrayList<>(Collections.nCopies(this.tsFiles.size(), false)); this.isTableModelTsFile = new ArrayList<>(Collections.nCopies(this.tsFiles.size(), false)); + this.schemaEvolutionFile = loadTsFileTableStatement.getSchemaEvolutionFile(); this.databaseLevel = loadTsFileTableStatement.getDatabaseLevel(); this.databaseForTableData = loadTsFileTableStatement.getDatabase(); @@ -200,6 +218,13 @@ public IAnalysis analyzeFileByFile(IAnalysis analysis) { } try { + if (schemaEvolutionFile != null && schemaEvolutionFile.exists()) { + + SchemaEvolutionFile sevoFile = + new SchemaEvolutionFile(schemaEvolutionFile.getAbsolutePath()); + evolvedSchema = sevoFile.readAsSchema(); + } + if (!doAnalyzeFileByFile(analysis)) { return analysis; } @@ -266,9 +291,158 @@ private boolean checkBeforeAnalyzeFileByFile(IAnalysis analysis) { "LoadTsFileAnalyzer: Current datanode is read only, will try to convert to tablets and insert later."); } + File inputFile; + if (isTableModelStatement) { + inputFile = new File(loadTsFileTableStatement.getFilePath()); + } else { + inputFile = loadTsFileTreeStatement.getFile(); + } + + if (LOGGER.isInfoEnabled()) { + LOGGER.info("LoadTsFileAnalyzer: Input file: {}", inputFile.getAbsolutePath()); + } + if (inputFile.getName().equals("datanode")) { + LOGGER.info("LoadTsFileAnalyzer: Load TsFile from datanode file."); + analyzeIoTDBDirectory(inputFile); + Set databases = databaseRegionTsFileManagers.keySet(); + for (String database : databases) { + try { + getOrCreateTableSchemaCache().autoCreateTableDatabaseIfAbsent(database); + } catch (LoadAnalyzeException e) { + throw new SemanticException( + "Cannot create database " + database + " when loading datanode directory"); + } + } + } + + if (schemaEvolutionFile != null && isAsyncLoad) { + LOGGER.warn( + "Cannot use schema evolution file when loading from datanode directory asynchronously, switching to sync load"); + isAsyncLoad = false; + } + return true; } + private void analyzeIoTDBDirectory(File dataNodeDirectory) { + if (!dataNodeDirectory.exists() || !dataNodeDirectory.isDirectory()) { + return; + } + File systemDirectory = new File(dataNodeDirectory, "system"); + if (!systemDirectory.exists() || !systemDirectory.isDirectory()) { + LOGGER.info( + "LoadTsFileAnalyzer: No system directory found in datanode directory, treat as normal directory."); + return; + } + + File databasesDirectory = new File(systemDirectory, "databases"); + if (!databasesDirectory.exists() || !databasesDirectory.isDirectory()) { + LOGGER.info( + "LoadTsFileAnalyzer: No databases directory found in datanode directory, treat as normal directory."); + return; + } + + String[] databases = databasesDirectory.list(); + if (databases == null) { + LOGGER.info( + "LoadTsFileAnalyzer: No databases found in datanode directory, treat as normal directory."); + return; + } + + databaseRegionTsFileManagers = new HashMap<>(); + for (String database : databases) { + File databaseDirectory = new File(databasesDirectory, database); + String[] regions = databaseDirectory.list(); + if (regions == null) { + LOGGER.info("LoadTsFileAnalyzer: No region directory found in database {}.", database); + continue; + } + + for (String region : regions) { + File regionDirectory = new File(databaseDirectory, region); + String[] timePartitions = regionDirectory.list(); + if (timePartitions == null) { + LOGGER.info( + "LoadTsFileAnalyzer: No partition directory found in region {}-{}.", + database, + region); + continue; + } + + int regionId = Integer.parseInt(region); + + TsFileManager tsFileManager = new TsFileManager(database, region); + databaseRegionTsFileManagers + .computeIfAbsent(database, k -> new HashMap<>()) + .put(regionId, tsFileManager); + for (String timePartition : timePartitions) { + File timePartitionDir = new File(regionDirectory, timePartition); + File filesetsDir = new File(timePartitionDir, "filesets"); + if (!filesetsDir.exists()) { + LOGGER.info( + "LoadTsFileAnalyzer: No filesets directory found in partition {}-{}-{}.", + database, + region, + timePartition); + continue; + } + + String[] filesets = filesetsDir.list(); + if (filesets == null) { + continue; + } + + for (String fileset : filesets) { + File filesetDir = new File(filesetsDir, fileset); + String[] sevos = filesetDir.list(); + if (sevos == null || sevos.length == 0) { + continue; + } + + long endFileVersion = Long.parseLong(fileset); + long partitionId = Long.parseLong(timePartition); + TsFileSet tsFileSet = + new TsFileSet(endFileVersion, filesetsDir.getAbsolutePath(), true); + tsFileManager.addTsFileSet(tsFileSet, partitionId); + } + } + } + } + + LOGGER.info("databaseRegionTsFileManagers: {}", databaseRegionTsFileManagers); + + if (schemaEvolutionFile != null) { + if (databaseRegionTsFileManagers.isEmpty()) { + // no sevo found, treat as normal load + databaseRegionTsFileManagers = null; + } else { + throw new SemanticException( + "Schema evolution file is not supported when loading from datanode directory, if you wish " + + "to use specified schema evolution file and ignore ones in the datanode directory, " + + "please rename the datanode directory to any other one."); + } + } + + String userDatabase; + if (isTableModelStatement) { + userDatabase = loadTsFileTableStatement.getDatabase(); + } else { + userDatabase = loadTsFileTreeStatement.getDatabase(); + } + if (userDatabase != null && isLoadingIoTDBDir()) { + throw new SemanticException( + "Database is not supported when loading from datanode directory, if you wish " + + "to use specified database and ignore ones in the datanode directory, " + + "please rename the datanode directory to any other one."); + } + + if (isTableModelStatement) { + loadTsFileTableStatement.setDatabaseRegionTsFileManagers(databaseRegionTsFileManagers); + } else { + loadTsFileTreeStatement.setDatabaseRegionTsFileManagers(databaseRegionTsFileManagers); + } + } + private boolean doAsyncLoad(final IAnalysis analysis) { final long startTime = System.nanoTime(); try { @@ -291,13 +465,24 @@ private boolean doAsyncLoad(final IAnalysis analysis) { tabletConversionThresholdBytes, isGeneratedByPipe); - if (LoadUtil.loadTsFileAsyncToActiveDir(tsFiles, activeLoadAttributes, isDeleteAfterLoad)) { - analysis.setFinishQueryAfterAnalyze(true); - setRealStatement(analysis); - return true; + if (!isLoadingIoTDBDir()) { + if (LoadUtil.loadTsFileAsyncToActiveDir(tsFiles, activeLoadAttributes, isDeleteAfterLoad)) { + analysis.setFinishQueryAfterAnalyze(true); + setRealStatement(analysis); + return true; + } + LOGGER.info("Async Load TsFile has failed, and is now trying to load sync"); + return false; + } else { + if (LoadUtil.loadDatanodeDirAsyncToActiveDir( + originalFile, activeLoadAttributes, isDeleteAfterLoad)) { + analysis.setFinishQueryAfterAnalyze(true); + setRealStatement(analysis); + return true; + } + LOGGER.info("Async Load datanode dir has failed, and is now trying to load sync"); + return false; } - LOGGER.info("Async Load has failed, and is now trying to load sync"); - return false; } finally { LoadTsFileCostMetricsSet.getInstance() .recordPhaseTimeCost(ANALYSIS_ASYNC_MOVE, System.nanoTime() - startTime); @@ -522,11 +707,29 @@ private void doAnalyzeSingleTreeFile( addWritePointCount(writePointCount); } + private EvolvedSchema getEvolvedSchema(File tsFile) { + if (evolvedSchema != null) { + return evolvedSchema; + } + + if (isLoadingIoTDBDir()) { + TsFileID tsFileID = new TsFileID(tsFile.getAbsolutePath()); + TsFileManager tsFileManager = + databaseRegionTsFileManagers.get(tsFileID.databaseName).get(tsFileID.regionId); + List tsFileSets = + tsFileManager.getTsFileSet( + tsFileID.timePartitionId, tsFileID.fileVersion, Long.MAX_VALUE); + + return TsFileSet.getMergedEvolvedSchema(tsFileSets); + } + return null; + } + private void doAnalyzeSingleTableFile( final File tsFile, final TsFileSequenceReader reader, final TsFileSequenceReaderTimeseriesMetadataIterator timeseriesMetadataIterator, - final Map tableSchemaMap) + Map tableSchemaMap) throws IOException, LoadAnalyzeException { // construct tsfile resource final TsFileResource tsFileResource = constructTsFileResource(reader, tsFile); @@ -544,19 +747,42 @@ private void doAnalyzeSingleTableFile( } else { loadTsFileTreeStatement.setDatabase(dbName.get()); } - } else { + } else if (!isLoadingIoTDBDir()) { throw new SemanticException(DATABASE_NOT_SPECIFIED); } } - getOrCreateTableSchemaCache().setDatabase(databaseForTableData); - getOrCreateTableSchemaCache().setTableSchemaMap(tableSchemaMap); + String databaseToUse = + isLoadingIoTDBDir() ? tsFileResource.getDatabaseName() : databaseForTableData; + + LOGGER.info("Table schemas before rewriting to final: {}", tableSchemaMap); + + EvolvedSchema currEvolvedSchema = getEvolvedSchema(tsFile); + LOGGER.info("Schema evolution {}", currEvolvedSchema); + if (currEvolvedSchema != null) { + LOGGER.info("Rewriting table schemas with {}", currEvolvedSchema); + tableSchemaMap = currEvolvedSchema.rewriteToFinal(tableSchemaMap); + LOGGER.info("Table schemas after rewriting to final: {}", tableSchemaMap); + } + getOrCreateTableSchemaCache().setTableSchemaMap(databaseToUse, tableSchemaMap); getOrCreateTableSchemaCache().setCurrentModificationsAndTimeIndex(tsFileResource, reader); while (timeseriesMetadataIterator.hasNext()) { - final Map> device2TimeseriesMetadata = + Map> device2TimeseriesMetadata = timeseriesMetadataIterator.next(); + if (currEvolvedSchema != null) { + device2TimeseriesMetadata = + device2TimeseriesMetadata.entrySet().stream() + .collect( + Collectors.toMap( + e -> currEvolvedSchema.rewriteToFinal(e.getKey()), + e -> { + currEvolvedSchema.rewriteToFinal(e.getKey().getTableName(), e.getValue()); + return e.getValue(); + })); + } + // Update time index no matter if resource file exists or not, because resource file may be // untrusted TsFileResourceUtils.updateTsFileResource( @@ -566,7 +792,7 @@ private void doAnalyzeSingleTableFile( getOrCreateTableSchemaCache().setCurrentTimeIndex(tsFileResource.getTimeIndex()); for (IDeviceID deviceId : device2TimeseriesMetadata.keySet()) { - getOrCreateTableSchemaCache().autoCreateAndVerify(deviceId); + getOrCreateTableSchemaCache().autoCreateAndVerify(databaseToUse, deviceId); } writePointCount += getWritePointCount(device2TimeseriesMetadata); @@ -772,6 +998,10 @@ private void getFileModelInfoBeforeTabletConversion() throws IOException { } } + private boolean isLoadingIoTDBDir() { + return databaseRegionTsFileManagers != null; + } + @Override public void close() throws Exception { if (treeSchemaAutoCreatorAndVerifier != null) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/load/LoadTsFileTableSchemaCache.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/load/LoadTsFileTableSchemaCache.java index 79ede0f459e58..a73d785f790e9 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/load/LoadTsFileTableSchemaCache.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/load/LoadTsFileTableSchemaCache.java @@ -66,10 +66,12 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Objects; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Stream; import static org.apache.iotdb.commons.schema.MemUsageUtil.computeStringMemUsage; import static org.apache.iotdb.db.queryengine.plan.execution.config.TableConfigTaskVisitor.validateDatabaseName; @@ -92,13 +94,13 @@ public class LoadTsFileTableSchemaCache { private final LoadTsFileMemoryBlock block; - private String database; - private boolean needToCreateDatabase; - private Map tableSchemaMap; + // database -> table name -> TableSchema + private final Map> + databaseTableSchemaMap = new HashMap<>(); private final Metadata metadata; private final MPPQueryContext context; - private Map> currentBatchTable2Devices; + private Map>> currentBatchTable2Devices; // tableName -> Pair private Map>> tableTagColumnMapper = new HashMap<>(); @@ -113,6 +115,8 @@ public class LoadTsFileTableSchemaCache { private int currentBatchDevicesCount = 0; private final AtomicBoolean needDecode4DifferentTimeColumn = new AtomicBoolean(false); + private final boolean needToCreateDatabase; + private final Set createdDatabases = new HashSet<>(); public LoadTsFileTableSchemaCache( final Metadata metadata, final MPPQueryContext context, final boolean needToCreateDatabase) @@ -127,16 +131,14 @@ public LoadTsFileTableSchemaCache( this.needToCreateDatabase = needToCreateDatabase; } - public void setDatabase(final String database) { - this.database = database; - } - public void setTableSchemaMap( + final String database, final Map tableSchemaMap) { - this.tableSchemaMap = tableSchemaMap; + this.databaseTableSchemaMap.put(database, tableSchemaMap); } - public void autoCreateAndVerify(final IDeviceID device) throws LoadAnalyzeException { + public void autoCreateAndVerify(final String database, final IDeviceID device) + throws LoadAnalyzeException { try { if (ModificationUtils.isDeviceDeletedByMods(currentModifications, currentTimeIndex, device)) { return; @@ -149,7 +151,7 @@ public void autoCreateAndVerify(final IDeviceID device) throws LoadAnalyzeExcept } try { - createTableAndDatabaseIfNecessary(device.getTableName()); + createTableAndDatabaseIfNecessary(database, device.getTableName()); } catch (final Exception e) { if (IoTDBDescriptor.getInstance().getConfig().isSkipFailedTableSchemaCheck()) { LOGGER.info( @@ -161,19 +163,22 @@ public void autoCreateAndVerify(final IDeviceID device) throws LoadAnalyzeExcept } // TODO: add permission check and record auth cost - addDevice(device); + addDevice(database, device); if (shouldFlushDevices()) { flush(); } } - private void addDevice(final IDeviceID device) { + private void addDevice(final String database, final IDeviceID device) { final String tableName = device.getTableName(); long memoryUsageSizeInBytes = 0; if (!currentBatchTable2Devices.containsKey(tableName)) { memoryUsageSizeInBytes += computeStringMemUsage(tableName); } - if (currentBatchTable2Devices.computeIfAbsent(tableName, k -> new HashSet<>()).add(device)) { + if (currentBatchTable2Devices + .computeIfAbsent(database, db -> new HashMap<>()) + .computeIfAbsent(tableName, k -> new HashSet<>()) + .add(device)) { memoryUsageSizeInBytes += device.ramBytesUsed(); currentBatchDevicesCount++; } @@ -209,12 +214,19 @@ private void doAutoCreateAndVerify() throws SemanticException { } private Iterator getTableSchemaValidationIterator() { - return currentBatchTable2Devices.keySet().stream() - .map(this::createTableSchemaValidation) - .iterator(); + Stream stream = Stream.empty(); + for (Entry>> entry : currentBatchTable2Devices.entrySet()) { + String database = entry.getKey(); + Map> tableMap = entry.getValue(); + Stream iTableDeviceSchemaValidationStream = + tableMap.keySet().stream().map(s -> createTableSchemaValidation(database, s)); + stream = Stream.concat(stream, iTableDeviceSchemaValidationStream); + } + return stream.iterator(); } - private ITableDeviceSchemaValidation createTableSchemaValidation(String tableName) { + private ITableDeviceSchemaValidation createTableSchemaValidation( + String database, String tableName) { return new ITableDeviceSchemaValidation() { @Override @@ -237,7 +249,7 @@ public List getDeviceIdList() { LOGGER.warn("Failed to find tag column mapping for table {}", tableName); } - for (final IDeviceID device : currentBatchTable2Devices.get(tableName)) { + for (final IDeviceID device : currentBatchTable2Devices.get(database).get(tableName)) { if (Objects.isNull(tagColumnCountAndMapper)) { devices.add(Arrays.copyOfRange(device.getSegments(), 1, device.getSegments().length)); continue; @@ -265,7 +277,8 @@ public List getAttributeColumnNameList() { @Override public List getAttributeValueList() { - return Collections.nCopies(currentBatchTable2Devices.get(tableName).size(), new Object[0]); + return Collections.nCopies( + currentBatchTable2Devices.get(database).get(tableName).size(), new Object[0]); } }; } @@ -278,8 +291,10 @@ private static Object[] truncateNullSuffixesOfDeviceIdSegments(Object[] segments return Arrays.copyOf(segments, lastNonNullIndex + 1); } - public void createTableAndDatabaseIfNecessary(final String tableName) + public void createTableAndDatabaseIfNecessary(final String database, final String tableName) throws LoadAnalyzeException { + Map tableSchemaMap = + databaseTableSchemaMap.get(database); final org.apache.tsfile.file.metadata.TableSchema schema = tableSchemaMap.remove(tableName); if (Objects.isNull(schema)) { return; @@ -292,9 +307,9 @@ public void createTableAndDatabaseIfNecessary(final String tableName) new QualifiedObjectName(database, tableName), context); - if (needToCreateDatabase) { + if (needToCreateDatabase && !createdDatabases.contains(database)) { autoCreateTableDatabaseIfAbsent(database); - needToCreateDatabase = false; + createdDatabases.add(database); } final org.apache.iotdb.db.queryengine.plan.relational.metadata.TableSchema fileSchema = org.apache.iotdb.db.queryengine.plan.relational.metadata.TableSchema.fromTsFileTableSchema( @@ -317,7 +332,7 @@ public boolean isNeedDecode4DifferentTimeColumn() { return needDecode4DifferentTimeColumn.get(); } - private void autoCreateTableDatabaseIfAbsent(final String database) throws LoadAnalyzeException { + public void autoCreateTableDatabaseIfAbsent(final String database) throws LoadAnalyzeException { validateDatabaseName(database); if (DataNodeTableCache.getInstance().isDatabaseExist(database)) { return; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/TableConfigTaskVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/TableConfigTaskVisitor.java index aa1f43d95ea42..64b2504c55c0f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/TableConfigTaskVisitor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/TableConfigTaskVisitor.java @@ -145,6 +145,7 @@ import org.apache.iotdb.db.queryengine.plan.relational.sql.ast.AlterDB; import org.apache.iotdb.db.queryengine.plan.relational.sql.ast.AlterPipe; import org.apache.iotdb.db.queryengine.plan.relational.sql.ast.AstVisitor; +import org.apache.iotdb.db.queryengine.plan.relational.sql.ast.BooleanLiteral; import org.apache.iotdb.db.queryengine.plan.relational.sql.ast.ClearCache; import org.apache.iotdb.db.queryengine.plan.relational.sql.ast.ColumnDefinition; import org.apache.iotdb.db.queryengine.plan.relational.sql.ast.CreateDB; @@ -175,6 +176,7 @@ import org.apache.iotdb.db.queryengine.plan.relational.sql.ast.Expression; import org.apache.iotdb.db.queryengine.plan.relational.sql.ast.ExtendRegion; import org.apache.iotdb.db.queryengine.plan.relational.sql.ast.Flush; +import org.apache.iotdb.db.queryengine.plan.relational.sql.ast.Identifier; import org.apache.iotdb.db.queryengine.plan.relational.sql.ast.KillQuery; import org.apache.iotdb.db.queryengine.plan.relational.sql.ast.Literal; import org.apache.iotdb.db.queryengine.plan.relational.sql.ast.LoadConfiguration; @@ -271,12 +273,13 @@ import java.util.Optional; import java.util.Set; import java.util.function.Predicate; +import java.util.stream.Collectors; import static org.apache.iotdb.commons.conf.IoTDBConstant.MAX_DATABASE_NAME_LENGTH; import static org.apache.iotdb.commons.conf.IoTDBConstant.TTL_INFINITE; import static org.apache.iotdb.commons.executable.ExecutableManager.getUnTrustedUriErrorMsg; import static org.apache.iotdb.commons.executable.ExecutableManager.isUriTrusted; -import static org.apache.iotdb.commons.schema.table.TsTable.TABLE_ALLOWED_PROPERTIES; +import static org.apache.iotdb.commons.schema.table.TsTable.ALLOW_ALTER_NAME_PROPERTY; import static org.apache.iotdb.commons.schema.table.TsTable.TIME_COLUMN_NAME; import static org.apache.iotdb.commons.schema.table.TsTable.TTL_PROPERTY; import static org.apache.iotdb.db.queryengine.plan.execution.config.metadata.relational.CreateDBTask.DATA_REGION_GROUP_NUM_KEY; @@ -575,7 +578,11 @@ private Pair parseTable4CreateTableOrView( final TsTable table = new TsTable(tableName); - table.setProps(convertPropertiesToMap(node.getProperties(), false)); + Map properties = convertPropertiesToMap(node.getProperties(), false); + // new tables' names can be altered by default + properties.putIfAbsent( + ALLOW_ALTER_NAME_PROPERTY, String.valueOf(TsTable.ALLOW_ALTER_NAME_DEFAULT)); + table.setProps(properties); if (Objects.nonNull(node.getComment())) { table.addProp(TsTable.COMMENT_KEY, node.getComment()); } @@ -751,17 +758,22 @@ protected IConfigTask visitRenameColumn(final RenameColumn node, final MPPQueryC accessControl.checkCanAlterTable( context.getSession().getUserName(), new QualifiedObjectName(database, tableName), context); - final String oldName = node.getSource().getValue(); - final String newName = node.getTarget().getValue(); - if (oldName.equals(newName)) { + final List oldNames = + node.getSources().stream().map(Identifier::getValue).collect(Collectors.toList()); + final List newNames = + node.getTargets().stream().map(Identifier::getValue).collect(Collectors.toList()); + if (oldNames.equals(newNames)) { throw new SemanticException("The column's old name shall not be equal to the new one."); } + if (!Collections.disjoint(oldNames, newNames)) { + throw new SemanticException("The old names must be disjoint with the new names"); + } return new AlterTableRenameColumnTask( database, tableName, - node.getSource().getValue(), - node.getTarget().getValue(), + oldNames, + newNames, context.getQueryId().getId(), node.tableIfExists(), node.columnIfExists(), @@ -799,10 +811,15 @@ protected IConfigTask visitSetProperties( accessControl.checkCanAlterTable( context.getSession().getUserName(), new QualifiedObjectName(database, tableName), context); + Map properties = convertPropertiesToMap(node.getProperties(), true); + if (properties.containsKey(ALLOW_ALTER_NAME_PROPERTY)) { + throw new SemanticException( + "The property " + ALLOW_ALTER_NAME_PROPERTY + " cannot be altered."); + } return new AlterTableSetPropertiesTask( database, tableName, - convertPropertiesToMap(node.getProperties(), true), + properties, context.getQueryId().getId(), node.ifExists(), node.getType() == SetProperties.Type.TREE_VIEW); @@ -879,7 +896,7 @@ private Map convertPropertiesToMap( final Map map = new HashMap<>(); for (final Property property : propertyList) { final String key = property.getName().getValue().toLowerCase(Locale.ENGLISH); - if (TABLE_ALLOWED_PROPERTIES.contains(key)) { + if (TTL_PROPERTY.equals(key)) { if (!property.isSetToDefault()) { final Expression value = property.getNonDefaultValue(); final Optional strValue = parseStringFromLiteralIfBinary(value); @@ -896,6 +913,27 @@ private Map convertPropertiesToMap( } else if (serializeDefault) { map.put(key, null); } + } else if (ALLOW_ALTER_NAME_PROPERTY.equals(key)) { + if (property.isSetToDefault()) { + // no such property, the table is from an older version and its table name + // cannot be altered + map.put(key, "false"); + } else { + Expression value = property.getNonDefaultValue(); + final Optional strValue = parseStringFromLiteralIfBinary(value); + if (strValue.isPresent()) { + try { + boolean ignored = Boolean.parseBoolean(strValue.get()); + } catch (Exception e) { + throw new SemanticException( + ALLOW_ALTER_NAME_PROPERTY + " value must be a boolean, but now is: " + value); + } + map.put(key, strValue.get()); + continue; + } + // TODO: support validation for other properties + map.put(key, String.valueOf(parseBooleanFromLiteral(value, ALLOW_ALTER_NAME_PROPERTY))); + } } else { throw new SemanticException("Table property '" + key + "' is currently not allowed."); } @@ -1093,6 +1131,18 @@ private long parseLongFromLiteral(final Object value, final String name) { return parsedValue; } + private boolean parseBooleanFromLiteral(final Object value, final String name) { + if (!(value instanceof BooleanLiteral)) { + throw new SemanticException( + name + + " value must be a BooleanLiteral, but now is " + + (Objects.nonNull(value) ? value.getClass().getSimpleName() : null) + + ", value: " + + value); + } + return ((BooleanLiteral) value).getParsedValue(); + } + private int parseIntFromLiteral(final Object value, final String name) { if (!(value instanceof LongLiteral)) { throw new SemanticException( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/executor/ClusterConfigTaskExecutor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/executor/ClusterConfigTaskExecutor.java index cb45757d3be0b..6d1137a8e4009 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/executor/ClusterConfigTaskExecutor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/executor/ClusterConfigTaskExecutor.java @@ -92,6 +92,7 @@ import org.apache.iotdb.commons.udf.service.UDFExecutableManager; import org.apache.iotdb.commons.utils.CommonDateTimeUtils; import org.apache.iotdb.commons.utils.FileUtils; +import org.apache.iotdb.commons.utils.IOUtils; import org.apache.iotdb.commons.utils.PathUtils; import org.apache.iotdb.commons.utils.SerializeUtils; import org.apache.iotdb.commons.utils.TimePartitionUtils; @@ -4451,8 +4452,8 @@ public SettableFuture alterColumnDataType( public SettableFuture alterTableRenameColumn( final String database, final String tableName, - final String oldName, - final String newName, + final List oldNames, + final List newNames, final String queryId, final boolean tableIfExists, final boolean columnIfExists, @@ -4463,8 +4464,8 @@ public SettableFuture alterTableRenameColumn( final ByteArrayOutputStream stream = new ByteArrayOutputStream(); try { - ReadWriteIOUtils.write(oldName, stream); - ReadWriteIOUtils.write(newName, stream); + IOUtils.write(oldNames, stream); + IOUtils.write(newNames, stream); } catch (final IOException ignored) { // ByteArrayOutputStream won't throw IOException } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/executor/IConfigTaskExecutor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/executor/IConfigTaskExecutor.java index 9e02ba6cff7f4..ceaed77894b8c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/executor/IConfigTaskExecutor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/executor/IConfigTaskExecutor.java @@ -386,8 +386,8 @@ SettableFuture alterColumnDataType( SettableFuture alterTableRenameColumn( final String database, final String tableName, - final String oldName, - final String newName, + final List oldNames, + final List newNames, final String queryId, final boolean tableIfExists, final boolean columnIfExists, diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/metadata/relational/AlterTableRenameColumnTask.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/metadata/relational/AlterTableRenameColumnTask.java index 79cea6c05e08a..1f785d537fada 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/metadata/relational/AlterTableRenameColumnTask.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/metadata/relational/AlterTableRenameColumnTask.java @@ -24,26 +24,28 @@ import com.google.common.util.concurrent.ListenableFuture; +import java.util.List; + public class AlterTableRenameColumnTask extends AbstractAlterOrDropTableTask { - private final String oldName; + private final List oldNames; - private final String newName; + private final List newNames; private final boolean columnIfExists; public AlterTableRenameColumnTask( final String database, final String tableName, - final String oldName, - final String newName, + final List oldNames, + final List newNames, final String queryId, final boolean tableIfExists, final boolean columnIfExists, final boolean view) { super(database, tableName, queryId, tableIfExists, view); - this.oldName = oldName; - this.newName = newName; + this.oldNames = oldNames; + this.newNames = newNames; this.columnIfExists = columnIfExists; } @@ -51,6 +53,6 @@ public AlterTableRenameColumnTask( public ListenableFuture execute(final IConfigTaskExecutor configTaskExecutor) throws InterruptedException { return configTaskExecutor.alterTableRenameColumn( - database, tableName, oldName, newName, queryId, tableIfExists, columnIfExists, view); + database, tableName, oldNames, newNames, queryId, tableIfExists, columnIfExists, view); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/LogicalPlanVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/LogicalPlanVisitor.java index c3ac21641d181..2215a8ccfca68 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/LogicalPlanVisitor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/LogicalPlanVisitor.java @@ -554,7 +554,10 @@ public PlanNode visitLoadFile( loadTsFileStatement.getResources(), isTableModel, loadTsFileStatement.getDatabase(), - loadTsFileStatement.isNeedDecode4TimeColumn()); + loadTsFileStatement.isNeedDecode4TimeColumn(), + loadTsFileStatement.getSchemaEvolutionFile(), + loadTsFileStatement.getDatabaseRegionTsFileManagers(), + loadTsFileStatement.getFile()); } @Override diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/SourceRewriter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/SourceRewriter.java index bc6475b23236f..0d3089cad3b7e 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/SourceRewriter.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/SourceRewriter.java @@ -24,6 +24,7 @@ import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; import org.apache.iotdb.commons.exception.IllegalPathException; import org.apache.iotdb.commons.partition.DataPartition; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.SeriesPartitionKey; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.path.PathPatternTree; import org.apache.iotdb.commons.schema.SchemaConstant; @@ -84,6 +85,7 @@ import org.apache.iotdb.db.queryengine.plan.statement.component.OrderByKey; import org.apache.iotdb.db.queryengine.plan.statement.component.Ordering; import org.apache.iotdb.db.queryengine.plan.statement.component.SortItem; +import org.apache.iotdb.db.utils.CommonUtils; import org.apache.iotdb.db.utils.constant.SqlConstant; import com.google.common.collect.ImmutableList; @@ -1442,7 +1444,10 @@ private List getDeviceReplicaSets( Map> slot2ReplicasMap = cache.computeIfAbsent(db, k -> new HashMap<>()); - TSeriesPartitionSlot tSeriesPartitionSlot = dataPartition.calculateDeviceGroupId(deviceID); + + SeriesPartitionKey seriesPartitionKey = CommonUtils.getSeriesPartitionKey(deviceID, db, true); + TSeriesPartitionSlot tSeriesPartitionSlot = + dataPartition.calculateDeviceGroupId(seriesPartitionKey); Map>> finalSeriesPartitionMap = seriesPartitionMap; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/PlanNodeType.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/PlanNodeType.java index 55aaefe8be618..c5865ed1e6b84 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/PlanNodeType.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/PlanNodeType.java @@ -60,6 +60,7 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.metadata.write.view.DeleteLogicalViewNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.metadata.write.view.RollbackLogicalViewBlackListNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedDeleteDataNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedEvolveSchemaNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedInsertNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedNonWritePlanNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedWritePlanNode; @@ -107,6 +108,7 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.source.TimeseriesRegionScanNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.ContinuousSameSearchIndexSeparatorNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.DeleteDataNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.EvolveSchemaNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertMultiTabletsNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowsNode; @@ -329,6 +331,8 @@ public enum PlanNodeType { RELATIONAL_INSERT_ROWS((short) 2002), RELATIONAL_DELETE_DATA((short) 2003), OBJECT_FILE_NODE((short) 2004), + EVOLVE_SCHEMA((short) 2005), + PIPE_ENRICHED_EVOLVE_SCHEMA((short) 2006), ; public static final int BYTES = Short.BYTES; @@ -374,6 +378,10 @@ public static PlanNode deserializeFromWAL(DataInputStream stream) throws IOExcep return RelationalDeleteDataNode.deserializeFromWAL(stream); case 2004: return ObjectNode.deserializeFromWAL(stream); + case 2005: + return EvolveSchemaNode.deserializeFromWAL(stream); + case 2006: + return PipeEnrichedEvolveSchemaNode.deserializeFromWAL(stream); default: throw new IllegalArgumentException("Invalid node type: " + nodeType); } @@ -402,6 +410,10 @@ public static PlanNode deserializeFromWAL(ByteBuffer buffer) { return RelationalDeleteDataNode.deserializeFromWAL(buffer); case 2004: return ObjectNode.deserialize(buffer); + case 2005: + return EvolveSchemaNode.deserializeFromWAL(buffer); + case 2006: + return PipeEnrichedEvolveSchemaNode.deserializeFromWAL(buffer); default: throw new IllegalArgumentException("Invalid node type: " + nodeType); } @@ -737,6 +749,10 @@ public static PlanNode deserialize(ByteBuffer buffer, short nodeType) { return RelationalDeleteDataNode.deserialize(buffer); case 2004: return ObjectNode.deserialize(buffer); + case 2005: + return EvolveSchemaNode.deserialize(buffer); + case 2006: + return PipeEnrichedEvolveSchemaNode.deserialize(buffer); default: throw new IllegalArgumentException("Invalid node type: " + nodeType); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/PlanVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/PlanVisitor.java index 44f1cd8bc1f67..bf54fd17c72df 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/PlanVisitor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/PlanVisitor.java @@ -57,6 +57,7 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.metadata.write.view.DeleteLogicalViewNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.metadata.write.view.RollbackLogicalViewBlackListNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedDeleteDataNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedEvolveSchemaNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedInsertNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedNonWritePlanNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedWritePlanNode; @@ -111,6 +112,7 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.source.SourceNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.source.TimeseriesRegionScanNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.DeleteDataNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.EvolveSchemaNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertMultiTabletsNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowsNode; @@ -645,6 +647,10 @@ public R visitDeleteData(RelationalDeleteDataNode node, C context) { return visitPlan(node, context); } + public R visitEvolveSchemaNode(EvolveSchemaNode node, C context) { + return visitPlan(node, context); + } + public R visitWriteObjectFile(ObjectNode node, C context) { return visitPlan(node, context); } @@ -661,6 +667,10 @@ public R visitPipeEnrichedDeleteDataNode(PipeEnrichedDeleteDataNode node, C cont return visitPlan(node, context); } + public R visitPipeEnrichedEvolveSchemaNode(PipeEnrichedEvolveSchemaNode node, C context) { + return visitPlan(node, context); + } + public R visitPipeEnrichedWritePlanNode(PipeEnrichedWritePlanNode node, C context) { return visitPlan(node, context); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/load/LoadSingleTsFileNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/load/LoadSingleTsFileNode.java index c8170a4880a08..a8c0bd8dfb155 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/load/LoadSingleTsFileNode.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/load/LoadSingleTsFileNode.java @@ -23,6 +23,7 @@ import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; +import org.apache.iotdb.commons.utils.FileUtils; import org.apache.iotdb.commons.utils.TimePartitionUtils; import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.iotdb.db.queryengine.plan.analyze.IAnalysis; @@ -30,7 +31,11 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeId; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.WritePlanNode; import org.apache.iotdb.db.storageengine.dataregion.modification.ModificationFile; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileManager; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolutionFile; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.fileset.TsFileSet; import org.apache.iotdb.db.storageengine.load.util.LoadUtil; import org.apache.tsfile.exception.NotImplementedException; @@ -62,9 +67,15 @@ public class LoadSingleTsFileNode extends WritePlanNode { private final boolean deleteAfterLoad; private final long writePointCount; private boolean needDecodeTsFile; + private final File schemaEvolutionFile; + private final Boolean isLastFile; private TRegionReplicaSet localRegionReplicaSet; + // for loading IoTDB datanode dir + private TsFileManager managerForLoadingIoTDBDir = null; + private final File originalInputFile; + public LoadSingleTsFileNode( final PlanNodeId id, final TsFileResource resource, @@ -72,7 +83,11 @@ public LoadSingleTsFileNode( final String database, final boolean deleteAfterLoad, final long writePointCount, - final boolean needDecodeTsFile) { + final boolean needDecodeTsFile, + File schemaEvolutionFile, + boolean isLastFile, + TsFileManager managerForLoadingIoTDBDir, + File originalInputFile) { super(id); this.tsFile = resource.getTsFile(); this.resource = resource; @@ -81,6 +96,10 @@ public LoadSingleTsFileNode( this.deleteAfterLoad = deleteAfterLoad; this.writePointCount = writePointCount; this.needDecodeTsFile = needDecodeTsFile; + this.schemaEvolutionFile = schemaEvolutionFile; + this.isLastFile = isLastFile; + this.managerForLoadingIoTDBDir = managerForLoadingIoTDBDir; + this.originalInputFile = originalInputFile; } public boolean isTsFileEmpty() { @@ -95,6 +114,12 @@ public boolean needDecodeTsFile( return true; } + if (schemaEvolutionFile != null || managerForLoadingIoTDBDir != null) { + // with schema evolution, must split + needDecodeTsFile = true; + return needDecodeTsFile; + } + List> slotList = new ArrayList<>(); resource .getDevices() @@ -158,6 +183,29 @@ public long getWritePointCount() { return writePointCount; } + public EvolvedSchema getSchemaEvolutionFile() { + if (schemaEvolutionFile != null) { + try { + SchemaEvolutionFile evolutionFile = + new SchemaEvolutionFile(schemaEvolutionFile.getAbsolutePath()); + return evolutionFile.readAsSchema(); + } catch (IOException e) { + LOGGER.error("Failed to read schema evolution file {}", schemaEvolutionFile, e); + } + } + + if (managerForLoadingIoTDBDir != null) { + List tsFileSets = + managerForLoadingIoTDBDir.getTsFileSet( + resource.getTsFileID().timePartitionId, + resource.getTsFileID().fileVersion, + Long.MAX_VALUE); + return TsFileSet.getMergedEvolvedSchema(tsFileSets); + } + + return null; + } + /** * only used for load locally. * @@ -239,6 +287,19 @@ public void clean() { Files.deleteIfExists(ModificationFile.getExclusiveMods(tsFile).toPath()); Files.deleteIfExists( new File(LoadUtil.getTsFileModsV1Path(tsFile.getAbsolutePath())).toPath()); + Files.deleteIfExists( + SchemaEvolutionFile.getTsFileAssociatedSchemaEvolutionFile(tsFile).toPath()); + if (isLastFile) { + LOGGER.info("Delete After Loading {}.", tsFile); + if (schemaEvolutionFile != null) { + Files.deleteIfExists(schemaEvolutionFile.toPath()); + LOGGER.info("schemaEvolutionFile {} is deleted.", schemaEvolutionFile); + } + if (originalInputFile.isDirectory() && originalInputFile.getName().equals("datanode")) { + FileUtils.deleteDirectoryAndEmptyParent(originalInputFile); + LOGGER.info("datanode dir {} is deleted.", originalInputFile); + } + } } } catch (final IOException e) { LOGGER.warn("Delete After Loading {} error.", tsFile, e); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/load/LoadTsFileNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/load/LoadTsFileNode.java index e70bf08fd52ef..51c752528ee98 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/load/LoadTsFileNode.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/load/LoadTsFileNode.java @@ -29,16 +29,20 @@ import org.apache.iotdb.db.queryengine.plan.relational.sql.ast.PipeEnriched; import org.apache.iotdb.db.queryengine.plan.statement.crud.LoadTsFileStatement; import org.apache.iotdb.db.queryengine.plan.statement.pipe.PipeEnrichedStatement; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileID; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileManager; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; import org.apache.tsfile.exception.NotImplementedException; import java.io.DataOutputStream; +import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Objects; public class LoadTsFileNode extends WritePlanNode { @@ -46,19 +50,30 @@ public class LoadTsFileNode extends WritePlanNode { private final List resources; private final List isTableModel; private final String database; + private final File schemaEvolutionFile; private final boolean needDecode4TimeColumn; + private final File originalFile; + + // for loading IoTDB datanode dir + private final Map> databaseRegionTsFileManagers; public LoadTsFileNode( final PlanNodeId id, final List resources, final List isTableModel, final String database, - final boolean needDecode4TimeColumn) { + final boolean needDecode4TimeColumn, + final File schemaEvolutionFile, + final Map> databaseRegionTsFileManagers, + final File originalFile) { super(id); this.resources = resources; this.isTableModel = isTableModel; this.database = database; + this.schemaEvolutionFile = schemaEvolutionFile; this.needDecode4TimeColumn = needDecode4TimeColumn; + this.databaseRegionTsFileManagers = databaseRegionTsFileManagers; + this.originalFile = originalFile; } @Override @@ -120,6 +135,16 @@ private List splitByPartitionForTreeModel(Analysis analysis) { : (LoadTsFileStatement) analysis.getTreeStatement(); for (int i = 0; i < resources.size(); i++) { + TsFileManager managerForLoadingIoTDBDir = null; + String database = this.database; + if (databaseRegionTsFileManagers != null) { + TsFileID tsFileID = resources.get(i).getTsFileID(); + Map regionTsFileManagers = + databaseRegionTsFileManagers.get(tsFileID.databaseName); + managerForLoadingIoTDBDir = regionTsFileManagers.get(tsFileID.regionId); + database = tsFileID.databaseName; + } + res.add( new LoadSingleTsFileNode( getPlanNodeId(), @@ -128,7 +153,11 @@ private List splitByPartitionForTreeModel(Analysis analysis) { database, statement.isDeleteAfterLoad(), statement.getWritePointCount(i), - needDecode4TimeColumn)); + needDecode4TimeColumn, + schemaEvolutionFile, + i == resources.size() - 1, + managerForLoadingIoTDBDir, + originalFile)); } return res; } @@ -143,6 +172,16 @@ private List splitByPartitionForTableModel( for (int i = 0; i < resources.size(); i++) { if (statement != null) { + TsFileManager managerForLoadingIoTDBDir = null; + String database = this.database; + if (databaseRegionTsFileManagers != null) { + TsFileID tsFileID = resources.get(i).getTsFileID(); + Map regionTsFileManagers = + databaseRegionTsFileManagers.get(tsFileID.databaseName); + managerForLoadingIoTDBDir = regionTsFileManagers.get(tsFileID.regionId); + database = tsFileID.databaseName; + } + res.add( new LoadSingleTsFileNode( getPlanNodeId(), @@ -151,7 +190,11 @@ private List splitByPartitionForTableModel( database, statement.isDeleteAfterLoad(), statement.getWritePointCount(i), - needDecode4TimeColumn)); + needDecode4TimeColumn, + schemaEvolutionFile, + i == resources.size() - 1, + managerForLoadingIoTDBDir, + originalFile)); } } return res; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/pipe/PipeEnrichedEvolveSchemaNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/pipe/PipeEnrichedEvolveSchemaNode.java new file mode 100644 index 0000000000000..794601eeb4e5e --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/pipe/PipeEnrichedEvolveSchemaNode.java @@ -0,0 +1,194 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe; + +import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; +import org.apache.iotdb.commons.consensus.index.ProgressIndex; +import org.apache.iotdb.db.queryengine.plan.analyze.IAnalysis; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeId; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeType; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanVisitor; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.WritePlanNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.EvolveSchemaNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.SearchNode; +import org.apache.iotdb.db.storageengine.dataregion.wal.buffer.IWALByteBufferView; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.stream.Collectors; + +public class PipeEnrichedEvolveSchemaNode extends EvolveSchemaNode { + + private final EvolveSchemaNode evolveSchemaNode; + + public PipeEnrichedEvolveSchemaNode(final EvolveSchemaNode evolveSchemaNode) { + super(evolveSchemaNode.getPlanNodeId(), evolveSchemaNode.getSchemaEvolutions()); + this.evolveSchemaNode = evolveSchemaNode; + } + + public PlanNode EvolveSchemaNode() { + return evolveSchemaNode; + } + + @Override + public boolean isGeneratedByPipe() { + return evolveSchemaNode.isGeneratedByPipe(); + } + + @Override + public void markAsGeneratedByPipe() { + evolveSchemaNode.markAsGeneratedByPipe(); + } + + @Override + public PlanNodeId getPlanNodeId() { + return evolveSchemaNode.getPlanNodeId(); + } + + @Override + public void setPlanNodeId(final PlanNodeId id) { + evolveSchemaNode.setPlanNodeId(id); + } + + @Override + public ProgressIndex getProgressIndex() { + return evolveSchemaNode.getProgressIndex(); + } + + @Override + public void setProgressIndex(ProgressIndex progressIndex) { + evolveSchemaNode.setProgressIndex(progressIndex); + } + + @Override + public List getChildren() { + return evolveSchemaNode.getChildren(); + } + + @Override + public void addChild(final PlanNode child) { + evolveSchemaNode.addChild(child); + } + + @Override + public PlanNodeType getType() { + return PlanNodeType.PIPE_ENRICHED_EVOLVE_SCHEMA; + } + + @Override + public PlanNode clone() { + return new PipeEnrichedEvolveSchemaNode((EvolveSchemaNode) evolveSchemaNode.clone()); + } + + @Override + public PlanNode createSubNode(final int subNodeId, final int startIndex, final int endIndex) { + return new PipeEnrichedEvolveSchemaNode( + (EvolveSchemaNode) evolveSchemaNode.createSubNode(subNodeId, startIndex, endIndex)); + } + + @Override + public PlanNode cloneWithChildren(final List children) { + return new PipeEnrichedEvolveSchemaNode( + (EvolveSchemaNode) evolveSchemaNode.cloneWithChildren(children)); + } + + @Override + public int allowedChildCount() { + return evolveSchemaNode.allowedChildCount(); + } + + @Override + public List getOutputColumnNames() { + return evolveSchemaNode.getOutputColumnNames(); + } + + @Override + public R accept(final PlanVisitor visitor, final C context) { + return visitor.visitPipeEnrichedEvolveSchemaNode(this, context); + } + + @Override + protected void serializeAttributes(final ByteBuffer byteBuffer) { + PlanNodeType.PIPE_ENRICHED_EVOLVE_SCHEMA.serialize(byteBuffer); + evolveSchemaNode.serialize(byteBuffer); + } + + @Override + protected void serializeAttributes(final DataOutputStream stream) throws IOException { + PlanNodeType.PIPE_ENRICHED_EVOLVE_SCHEMA.serialize(stream); + evolveSchemaNode.serialize(stream); + } + + public static PipeEnrichedEvolveSchemaNode deserialize(final ByteBuffer buffer) { + return new PipeEnrichedEvolveSchemaNode((EvolveSchemaNode) PlanNodeType.deserialize(buffer)); + } + + @Override + public boolean equals(final Object o) { + return o instanceof PipeEnrichedEvolveSchemaNode + && evolveSchemaNode.equals(((PipeEnrichedEvolveSchemaNode) o).evolveSchemaNode); + } + + @Override + public int hashCode() { + return evolveSchemaNode.hashCode(); + } + + @Override + public TRegionReplicaSet getRegionReplicaSet() { + return evolveSchemaNode.getRegionReplicaSet(); + } + + @Override + public List splitByPartition(final IAnalysis analysis) { + return evolveSchemaNode.splitByPartition(analysis).stream() + .map( + plan -> + plan instanceof PipeEnrichedEvolveSchemaNode + ? plan + : new PipeEnrichedEvolveSchemaNode((EvolveSchemaNode) plan)) + .collect(Collectors.toList()); + } + + @Override + public void serializeToWAL(final IWALByteBufferView buffer) { + evolveSchemaNode.serializeToWAL(buffer); + } + + @Override + public int serializedSize() { + return evolveSchemaNode.serializedSize(); + } + + @Override + public SearchNode merge(List searchNodes) { + List unrichedNodes = + searchNodes.stream() + .map( + searchNode -> + (SearchNode) ((PipeEnrichedEvolveSchemaNode) searchNode).EvolveSchemaNode()) + .collect(Collectors.toList()); + return new PipeEnrichedEvolveSchemaNode( + (EvolveSchemaNode) evolveSchemaNode.merge(unrichedNodes)); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/EvolveSchemaNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/EvolveSchemaNode.java new file mode 100644 index 0000000000000..834fd05887df3 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/EvolveSchemaNode.java @@ -0,0 +1,206 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.queryengine.plan.planner.plan.node.write; + +import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; +import org.apache.iotdb.commons.consensus.index.ProgressIndex; +import org.apache.iotdb.db.queryengine.plan.analyze.IAnalysis; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeId; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeType; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanVisitor; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.WritePlanNode; +import org.apache.iotdb.db.schemaengine.schemaregion.ISchemaRegionPlan; +import org.apache.iotdb.db.schemaengine.schemaregion.SchemaRegionPlanType; +import org.apache.iotdb.db.schemaengine.schemaregion.SchemaRegionPlanVisitor; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolution; +import org.apache.iotdb.db.storageengine.dataregion.wal.buffer.IWALByteBufferView; +import org.apache.iotdb.db.storageengine.dataregion.wal.buffer.WALEntryValue; +import org.apache.iotdb.db.utils.io.IOUtils; + +import org.apache.tsfile.utils.ReadWriteForEncodingUtils; +import org.apache.tsfile.utils.ReadWriteIOUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class EvolveSchemaNode extends SearchNode implements WALEntryValue, ISchemaRegionPlan { + + private static final Logger LOGGER = LoggerFactory.getLogger(EvolveSchemaNode.class); + + protected TRegionReplicaSet regionReplicaSet; + protected ProgressIndex progressIndex; + private List schemaEvolutions; + + public EvolveSchemaNode() { + super(new PlanNodeId("")); + } + + public EvolveSchemaNode(PlanNodeId id, List schemaEvolutions) { + super(id); + this.schemaEvolutions = schemaEvolutions; + } + + public static PlanNode deserializeFromWAL(DataInputStream stream) throws IOException { + long searchIndex = stream.readLong(); + int size = ReadWriteForEncodingUtils.readVarInt(stream); + List evolutions = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + evolutions.add(SchemaEvolution.createFrom(stream)); + } + + EvolveSchemaNode evolveSchemaNode = new EvolveSchemaNode(new PlanNodeId(""), evolutions); + evolveSchemaNode.setSearchIndex(searchIndex); + + return evolveSchemaNode; + } + + public static PlanNode deserializeFromWAL(ByteBuffer buffer) { + long searchIndex = buffer.getLong(); + int size = ReadWriteForEncodingUtils.readVarInt(buffer); + List evolutions = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + evolutions.add(SchemaEvolution.createFrom(buffer)); + } + + EvolveSchemaNode evolveSchemaNode = new EvolveSchemaNode(new PlanNodeId(""), evolutions); + evolveSchemaNode.setSearchIndex(searchIndex); + + return evolveSchemaNode; + } + + public static PlanNode deserialize(ByteBuffer buffer) { + int size = ReadWriteForEncodingUtils.readVarInt(buffer); + List evolutions = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + evolutions.add(SchemaEvolution.createFrom(buffer)); + } + + PlanNodeId planNodeId = PlanNodeId.deserialize(buffer); + + // EvolveSchemaNode has no child + int ignoredChildrenSize = ReadWriteIOUtils.readInt(buffer); + return new EvolveSchemaNode(planNodeId, evolutions); + } + + @Override + public SearchNode merge(List searchNodes) { + return this; + } + + @Override + public ProgressIndex getProgressIndex() { + return progressIndex; + } + + @Override + public void setProgressIndex(ProgressIndex progressIndex) { + this.progressIndex = progressIndex; + } + + @Override + public List splitByPartition(IAnalysis analysis) { + return Collections.singletonList(this); + } + + @Override + public TRegionReplicaSet getRegionReplicaSet() { + return regionReplicaSet; + } + + @Override + public List getChildren() { + return Collections.emptyList(); + } + + @Override + public void addChild(PlanNode child) { + throw new UnsupportedOperationException(); + } + + @Override + public PlanNode clone() { + return new EvolveSchemaNode(id, schemaEvolutions); + } + + @Override + public int allowedChildCount() { + return 0; + } + + @Override + public List getOutputColumnNames() { + return Collections.emptyList(); + } + + @Override + protected void serializeAttributes(ByteBuffer byteBuffer) { + PlanNodeType.EVOLVE_SCHEMA.serialize(byteBuffer); + IOUtils.writeList(schemaEvolutions, byteBuffer); + } + + @Override + protected void serializeAttributes(DataOutputStream stream) throws IOException { + PlanNodeType.EVOLVE_SCHEMA.serialize(stream); + IOUtils.writeList(schemaEvolutions, stream); + } + + @Override + public void serializeToWAL(IWALByteBufferView buffer) { + buffer.putShort(PlanNodeType.EVOLVE_SCHEMA.getNodeType()); + buffer.putLong(searchIndex); + try { + IOUtils.writeList(schemaEvolutions, buffer); + } catch (IOException e) { + LOGGER.warn("Error writing schema evolutions to WAL", e); + } + } + + @Override + public int serializedSize() { + return 0; + } + + public List getSchemaEvolutions() { + return schemaEvolutions; + } + + @Override + public R accept(PlanVisitor visitor, C context) { + return visitor.visitEvolveSchemaNode(this, context); + } + + @Override + public SchemaRegionPlanType getPlanType() { + return SchemaRegionPlanType.EVOLVE_SCHEMA; + } + + @Override + public R accept(SchemaRegionPlanVisitor visitor, C context) { + return visitor.visitEvolveSchema(this, context); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertRowNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertRowNode.java index a2bd6cb1a00fc..ffe5a3b99cd09 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertRowNode.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertRowNode.java @@ -21,6 +21,7 @@ import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; import org.apache.iotdb.commons.exception.IllegalPathException; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.FullDeviceIdKey; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.commons.utils.TimePartitionUtils; @@ -129,7 +130,7 @@ public List splitByPartition(IAnalysis analysis) { analysis .getDataPartitionInfo() .getDataRegionReplicaSetForWriting( - getDeviceID(), timePartitionSlot, analysis.getDatabaseName()); + new FullDeviceIdKey(getDeviceID()), timePartitionSlot, analysis.getDatabaseName()); // collect redirectInfo analysis.setRedirectNodeList( Collections.singletonList( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertRowsNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertRowsNode.java index 7392b7612705e..7494f62bc87e5 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertRowsNode.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertRowsNode.java @@ -23,6 +23,7 @@ import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.consensus.index.ProgressIndex; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.FullDeviceIdKey; import org.apache.iotdb.commons.utils.StatusUtils; import org.apache.iotdb.commons.utils.TimePartitionUtils; import org.apache.iotdb.db.exception.DataTypeInconsistentException; @@ -276,7 +277,7 @@ public List splitByPartition(IAnalysis analysis) { analysis .getDataPartitionInfo() .getDataRegionReplicaSetForWriting( - insertRowNode.targetPath.getIDeviceIDAsFullDevice(), + new FullDeviceIdKey(insertRowNode.targetPath.getIDeviceIDAsFullDevice()), TimePartitionUtils.getTimePartitionSlot(insertRowNode.getTime()), null); // Collect redirectInfo diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertRowsOfOneDeviceNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertRowsOfOneDeviceNode.java index f1e28d32b104d..f54d62a7a2b52 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertRowsOfOneDeviceNode.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertRowsOfOneDeviceNode.java @@ -23,6 +23,7 @@ import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; import org.apache.iotdb.commons.consensus.index.ProgressIndex; import org.apache.iotdb.commons.exception.IllegalPathException; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.FullDeviceIdKey; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.utils.StatusUtils; import org.apache.iotdb.commons.utils.TimePartitionUtils; @@ -173,7 +174,7 @@ public List splitByPartition(IAnalysis analysis) { analysis .getDataPartitionInfo() .getDataRegionReplicaSetForWriting( - targetPath.getIDeviceIDAsFullDevice(), + new FullDeviceIdKey(targetPath.getIDeviceIDAsFullDevice()), timePartitionSlot, analysis.getDatabaseName()); Map> tmpMap = diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertTabletNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertTabletNode.java index 39683e5d9f94e..c69913164473a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertTabletNode.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/InsertTabletNode.java @@ -23,6 +23,7 @@ import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; import org.apache.iotdb.commons.exception.IllegalPathException; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.FullDeviceIdKey; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.utils.CommonDateTimeUtils; import org.apache.iotdb.commons.utils.TestOnly; @@ -288,7 +289,9 @@ protected Map> splitByReplicaSet( analysis .getDataPartitionInfo() .getDataRegionReplicaSetForWriting( - deviceID, splitInfo.timePartitionSlots, analysis.getDatabaseName()); + new FullDeviceIdKey(deviceID), + splitInfo.timePartitionSlots, + analysis.getDatabaseName()); splitInfo.replicaSets = replicaSets; // collect redirectInfo analysis.addEndPointToRedirectNodeList( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/RelationalInsertRowsNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/RelationalInsertRowsNode.java index 594ccf50471f9..20f3e10f40293 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/RelationalInsertRowsNode.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/RelationalInsertRowsNode.java @@ -21,6 +21,7 @@ import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.SeriesPartitionKey; import org.apache.iotdb.commons.utils.TimePartitionUtils; import org.apache.iotdb.db.exception.DataTypeInconsistentException; import org.apache.iotdb.db.queryengine.plan.analyze.IAnalysis; @@ -29,6 +30,7 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanVisitor; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.WritePlanNode; import org.apache.iotdb.db.storageengine.dataregion.memtable.AbstractMemTable; +import org.apache.iotdb.db.utils.CommonUtils; import org.apache.tsfile.file.metadata.IDeviceID; import org.apache.tsfile.file.metadata.IDeviceID.Factory; @@ -168,11 +170,14 @@ public List splitByPartition(IAnalysis analysis) { InsertRowNode insertRowNode = getInsertRowNodeList().get(i); // Data region for insert row node // each row may belong to different database, pass null for auto-detection + SeriesPartitionKey seriesPartitionKey = + CommonUtils.getSeriesPartitionKey( + insertRowNode.getDeviceID(), analysis.getDatabaseName(), true); TRegionReplicaSet dataRegionReplicaSet = analysis .getDataPartitionInfo() .getDataRegionReplicaSetForWriting( - insertRowNode.getDeviceID(), + seriesPartitionKey, TimePartitionUtils.getTimePartitionSlot(insertRowNode.getTime()), analysis.getDatabaseName()); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/RelationalInsertTabletNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/RelationalInsertTabletNode.java index 257f691e4a785..1a30d9323825f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/RelationalInsertTabletNode.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/RelationalInsertTabletNode.java @@ -22,6 +22,7 @@ import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.SeriesPartitionKey; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.schema.table.column.TsTableColumnCategory; import org.apache.iotdb.commons.utils.TestOnly; @@ -36,6 +37,7 @@ import org.apache.iotdb.db.storageengine.dataregion.memtable.AbstractMemTable; import org.apache.iotdb.db.storageengine.dataregion.memtable.IWritableMemChunkGroup; import org.apache.iotdb.db.storageengine.dataregion.wal.buffer.IWALByteBufferView; +import org.apache.iotdb.db.utils.CommonUtils; import org.apache.tsfile.enums.TSDataType; import org.apache.tsfile.file.metadata.IDeviceID; @@ -207,11 +209,13 @@ protected Map> splitByReplicaSet( for (final Map.Entry entry : deviceIDSplitInfoMap.entrySet()) { final IDeviceID deviceID = entry.getKey(); final PartitionSplitInfo splitInfo = entry.getValue(); + SeriesPartitionKey seriesPartitionKey = + CommonUtils.getSeriesPartitionKey(deviceID, analysis.getDatabaseName(), true); final List replicaSets = analysis .getDataPartitionInfo() .getDataRegionReplicaSetForWriting( - deviceID, splitInfo.timePartitionSlots, analysis.getDatabaseName()); + seriesPartitionKey, splitInfo.timePartitionSlots, analysis.getDatabaseName()); splitInfo.replicaSets = replicaSets; // collect redirectInfo endPointMap.put( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/RelationPlanner.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/RelationPlanner.java index ded699588c1c2..f6b9c3cb62f69 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/RelationPlanner.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/RelationPlanner.java @@ -138,6 +138,7 @@ import org.apache.tsfile.read.common.type.Type; import org.apache.tsfile.write.schema.MeasurementSchema; +import java.io.File; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -1392,7 +1393,10 @@ protected RelationPlan visitLoadTsFile(final LoadTsFile node, final Void context node.getResources(), isTableModel, node.getDatabase(), - node.isNeedDecode4TimeColumn()), + node.isNeedDecode4TimeColumn(), + node.getSchemaEvolutionFile(), + node.getDatabaseRegionTsFileManagers(), + new File(node.getFilePath())), analysis.getRootScope(), Collections.emptyList(), outerContext); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/distribute/TableDistributedPlanGenerator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/distribute/TableDistributedPlanGenerator.java index 7072b5f519f73..29c55c01187a7 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/distribute/TableDistributedPlanGenerator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/distribute/TableDistributedPlanGenerator.java @@ -26,6 +26,7 @@ import org.apache.iotdb.commons.exception.IoTDBRuntimeException; import org.apache.iotdb.commons.partition.DataPartition; import org.apache.iotdb.commons.partition.SchemaPartition; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.SeriesPartitionKey; import org.apache.iotdb.commons.schema.table.TsTable; import org.apache.iotdb.commons.schema.table.column.TsTableColumnCategory; import org.apache.iotdb.commons.utils.TimePartitionUtils; @@ -104,6 +105,7 @@ import org.apache.iotdb.db.queryengine.plan.statement.component.Ordering; import org.apache.iotdb.db.schemaengine.table.DataNodeTableCache; import org.apache.iotdb.db.schemaengine.table.DataNodeTreeViewSchemaUtils; +import org.apache.iotdb.db.utils.CommonUtils; import org.apache.iotdb.rpc.TSStatusCode; import com.google.common.collect.ImmutableList; @@ -714,7 +716,8 @@ private List constructDeviceTableScanByTags( seriesSlotMap, deviceEntry.getDeviceID(), node.getTimeFilter(), - cachedSeriesSlotWithRegions); + cachedSeriesSlotWithRegions, + dbName); regionReplicaSets.forEach( regionReplicaSet -> regionDeviceCount.put( @@ -805,7 +808,8 @@ private List constructDeviceTableScanByRegionReplicaSet( seriesSlotMap, deviceEntry.getDeviceID(), node.getTimeFilter(), - cachedSeriesSlotWithRegions); + cachedSeriesSlotWithRegions, + dbName); if (regionReplicaSets.size() > 1) { context.deviceCrossRegion = true; } @@ -895,7 +899,8 @@ public List visitTreeDeviceViewScan(TreeDeviceViewScanNode node, PlanC seriesSlotMap, deviceEntry.getDeviceID(), node.getTimeFilter(), - cachedSeriesSlotWithRegions); + cachedSeriesSlotWithRegions, + dbName); if (regionReplicaSets.size() > 1) { context.deviceCrossRegion = true; @@ -1215,7 +1220,8 @@ public List visitAggregationTableScan( seriesSlotMap, deviceEntry.getDeviceID(), node.getTimeFilter(), - cachedSeriesSlotWithRegions); + cachedSeriesSlotWithRegions, + dbName); if (regionReplicaSets.size() > 1) { needSplit = true; context.deviceCrossRegion = true; @@ -1298,10 +1304,14 @@ private List getDeviceReplicaSets( Map>> seriesSlotMap, IDeviceID deviceId, Filter timeFilter, - Map> cachedSeriesSlotWithRegions) { + Map> cachedSeriesSlotWithRegions, + String databaseName) { // given seriesPartitionSlot has already been calculated - final TSeriesPartitionSlot seriesPartitionSlot = dataPartition.calculateDeviceGroupId(deviceId); + SeriesPartitionKey seriesPartitionKey = + CommonUtils.getSeriesPartitionKey(deviceId, databaseName, true); + final TSeriesPartitionSlot seriesPartitionSlot = + dataPartition.calculateDeviceGroupId(seriesPartitionKey); List regionReplicaSets = cachedSeriesSlotWithRegions.get(seriesPartitionSlot.getSlotId()); if (regionReplicaSets != null) { @@ -1780,8 +1790,10 @@ public List visitTableDeviceFetch( final List partitionKeyList = node.getPartitionKeyList(); final List deviceIDArray = node.getDeviceIdList(); for (int i = 0; i < node.getPartitionKeyList().size(); ++i) { + SeriesPartitionKey seriesPartitionKey = + CommonUtils.getSeriesPartitionKey(partitionKeyList.get(i), database, true); final TRegionReplicaSet regionReplicaSet = - databaseMap.get(schemaPartition.calculateDeviceGroupId(partitionKeyList.get(i))); + databaseMap.get(schemaPartition.calculateDeviceGroupId(seriesPartitionKey)); if (Objects.nonNull(regionReplicaSet)) { tableDeviceFetchMap .computeIfAbsent( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/node/schema/CreateOrUpdateTableDeviceNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/node/schema/CreateOrUpdateTableDeviceNode.java index be4b1337e7e4c..6b7b78aeb9974 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/node/schema/CreateOrUpdateTableDeviceNode.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/node/schema/CreateOrUpdateTableDeviceNode.java @@ -20,6 +20,7 @@ package org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema; import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.SeriesPartitionKey; import org.apache.iotdb.db.queryengine.plan.analyze.IAnalysis; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeId; @@ -29,6 +30,7 @@ import org.apache.iotdb.db.schemaengine.schemaregion.ISchemaRegionPlan; import org.apache.iotdb.db.schemaengine.schemaregion.SchemaRegionPlanType; import org.apache.iotdb.db.schemaengine.schemaregion.SchemaRegionPlanVisitor; +import org.apache.iotdb.db.utils.CommonUtils; import org.apache.tsfile.file.metadata.IDeviceID; import org.apache.tsfile.utils.ReadWriteIOUtils; @@ -260,10 +262,10 @@ public List splitByPartition(final IAnalysis analysis) { final List partitionKeyList = getPartitionKeyList(); for (int i = 0; i < partitionKeyList.size(); i++) { // Use the string literal of deviceId as the partition key + SeriesPartitionKey seriesPartitionKey = + CommonUtils.getSeriesPartitionKey(partitionKeyList.get(i), database, true); final TRegionReplicaSet regionReplicaSet = - analysis - .getSchemaPartitionInfo() - .getSchemaRegionReplicaSet(database, partitionKeyList.get(i)); + analysis.getSchemaPartitionInfo().getSchemaRegionReplicaSet(database, seriesPartitionKey); splitMap.computeIfAbsent(regionReplicaSet, k -> new ArrayList<>()).add(i); } final List result = new ArrayList<>(splitMap.size()); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/BooleanLiteral.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/BooleanLiteral.java index 42f62559a7286..c8a06a501be4d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/BooleanLiteral.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/BooleanLiteral.java @@ -119,4 +119,8 @@ public long ramBytesUsed() { return INSTANCE_SIZE + AstMemoryEstimationHelper.getEstimatedSizeOfNodeLocation(getLocationInternal()); } + + public boolean getParsedValue() { + return value; + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/LoadTsFile.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/LoadTsFile.java index 8deb97c2e5af6..b0cae8a4f788e 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/LoadTsFile.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/LoadTsFile.java @@ -21,6 +21,7 @@ import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.iotdb.db.exception.sql.SemanticException; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileManager; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; import org.apache.iotdb.db.storageengine.load.config.LoadTsFileConfigurator; @@ -64,8 +65,12 @@ public class LoadTsFile extends Statement { private List resources; private List writePointCountList; private List isTableModel; + private File schemaEvolutionFile; private boolean needDecode4TimeColumn; + // for loading iotdb datanode dir + private Map> databaseRegionTsFileManagers; + public LoadTsFile(NodeLocation location, String filePath, Map loadAttributes) { super(location); this.filePath = requireNonNull(filePath, "filePath is null"); @@ -196,6 +201,19 @@ public long getWritePointCount(int resourceIndex) { return writePointCountList.get(resourceIndex); } + public File getSchemaEvolutionFile() { + return schemaEvolutionFile; + } + + public void setDatabaseRegionTsFileManagers( + Map> databaseRegionTsFileManagers) { + this.databaseRegionTsFileManagers = databaseRegionTsFileManagers; + } + + public Map> getDatabaseRegionTsFileManagers() { + return databaseRegionTsFileManagers; + } + private void initAttributes() { this.databaseLevel = LoadTsFileConfigurator.parseOrGetDefaultDatabaseLevel(loadAttributes); this.database = LoadTsFileConfigurator.parseDatabaseName(loadAttributes); @@ -206,6 +224,7 @@ private void initAttributes() { LoadTsFileConfigurator.parseOrGetDefaultTabletConversionThresholdBytes(loadAttributes); this.verify = LoadTsFileConfigurator.parseOrGetDefaultVerify(loadAttributes); this.isAsyncLoad = LoadTsFileConfigurator.parseOrGetDefaultAsyncLoad(loadAttributes); + this.schemaEvolutionFile = LoadTsFileConfigurator.parseSevoFile(loadAttributes); } public boolean reconstructStatementIfMiniFileConverted(final List isMiniTsFile) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/RenameColumn.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/RenameColumn.java index 5e1277316a439..c42b120e9449f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/RenameColumn.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/RenameColumn.java @@ -19,8 +19,6 @@ package org.apache.iotdb.db.queryengine.plan.relational.sql.ast; -import org.apache.iotdb.db.exception.sql.SemanticException; - import com.google.common.collect.ImmutableList; import org.apache.tsfile.utils.RamUsageEstimator; @@ -35,8 +33,8 @@ public final class RenameColumn extends Statement { RamUsageEstimator.shallowSizeOfInstance(RenameColumn.class); private final QualifiedName table; - private final Identifier source; - private final Identifier target; + private final List sources; + private final List targets; private final boolean tableIfExists; private final boolean columnIfNotExists; @@ -45,33 +43,30 @@ public final class RenameColumn extends Statement { public RenameColumn( final NodeLocation location, final QualifiedName table, - final Identifier source, - final Identifier target, + final List sources, + final List targets, final boolean tableIfExists, final boolean columnIfNotExists, final boolean view) { super(requireNonNull(location, "location is null")); this.table = requireNonNull(table, "table is null"); - this.source = requireNonNull(source, "source is null"); - this.target = requireNonNull(target, "target is null"); + this.sources = requireNonNull(sources, "source is null"); + this.targets = requireNonNull(targets, "target is null"); this.tableIfExists = tableIfExists; this.columnIfNotExists = columnIfNotExists; this.view = view; - if (!view) { - throw new SemanticException("The renaming for base table column is currently unsupported"); - } } public QualifiedName getTable() { return table; } - public Identifier getSource() { - return source; + public List getSources() { + return sources; } - public Identifier getTarget() { - return target; + public List getTargets() { + return targets; } public boolean tableIfExists() { @@ -108,22 +103,22 @@ public boolean equals(final Object o) { return tableIfExists == that.tableIfExists && columnIfNotExists == that.columnIfNotExists && Objects.equals(table, that.table) - && Objects.equals(source, that.source) - && Objects.equals(target, that.target) + && Objects.equals(sources, that.sources) + && Objects.equals(targets, that.targets) && view == that.view; } @Override public int hashCode() { - return Objects.hash(table, source, target, view); + return Objects.hash(table, sources, targets, view); } @Override public String toString() { return toStringHelper(this) .add("table", table) - .add("source", source) - .add("target", target) + .add("source", sources) + .add("target", targets) .add("tableIfExists", tableIfExists) .add("columnIfExists", columnIfNotExists) .add("view", view) @@ -135,8 +130,12 @@ public long ramBytesUsed() { long size = INSTANCE_SIZE; size += AstMemoryEstimationHelper.getEstimatedSizeOfNodeLocation(getLocationInternal()); size += table == null ? 0L : table.ramBytesUsed(); - size += AstMemoryEstimationHelper.getEstimatedSizeOfAccountableObject(source); - size += AstMemoryEstimationHelper.getEstimatedSizeOfAccountableObject(target); + for (Identifier source : sources) { + size += AstMemoryEstimationHelper.getEstimatedSizeOfAccountableObject(source); + } + for (Identifier target : targets) { + size += AstMemoryEstimationHelper.getEstimatedSizeOfAccountableObject(target); + } return size; } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/RenameTable.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/RenameTable.java index 77c3296fee00b..5919dec7d03ff 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/RenameTable.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/RenameTable.java @@ -19,8 +19,6 @@ package org.apache.iotdb.db.queryengine.plan.relational.sql.ast; -import org.apache.iotdb.db.exception.sql.SemanticException; - import com.google.common.collect.ImmutableList; import org.apache.tsfile.utils.RamUsageEstimator; @@ -51,9 +49,6 @@ public RenameTable( this.target = requireNonNull(target, "target name is null"); this.tableIfExists = tableIfExists; this.view = view; - if (!view) { - throw new SemanticException("The renaming for base table is currently unsupported"); - } } public QualifiedName getSource() { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/parser/AstBuilder.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/parser/AstBuilder.java index f46b356423ddc..f31c461c1b1f4 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/parser/AstBuilder.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/parser/AstBuilder.java @@ -261,6 +261,7 @@ import org.apache.iotdb.db.relational.grammar.sql.RelationalSqlBaseVisitor; import org.apache.iotdb.db.relational.grammar.sql.RelationalSqlLexer; import org.apache.iotdb.db.relational.grammar.sql.RelationalSqlParser; +import org.apache.iotdb.db.relational.grammar.sql.RelationalSqlParser.IdentifierContext; import org.apache.iotdb.db.schemaengine.table.DataNodeTableCache; import org.apache.iotdb.db.storageengine.load.config.LoadTsFileConfigurator; import org.apache.iotdb.db.utils.DateTimeUtils; @@ -505,11 +506,21 @@ public Node visitAddColumn(final RelationalSqlParser.AddColumnContext ctx) { @Override public Node visitRenameColumn(final RelationalSqlParser.RenameColumnContext ctx) { + List identifiers = ctx.identifier(); + List oldNames = new ArrayList<>(identifiers.size() / 2); + List newNames = new ArrayList<>(identifiers.size() / 2); + for (int i = 0; i < identifiers.size(); i++) { + if (i % 2 == 0) { + oldNames.add((Identifier) visit(identifiers.get(i))); + } else { + newNames.add((Identifier) visit(identifiers.get(i))); + } + } return new RenameColumn( getLocation(ctx), getQualifiedName(ctx.tableName), - (Identifier) visit(ctx.from), - (Identifier) visit(ctx.to), + oldNames, + newNames, ctx.EXISTS().stream() .anyMatch( node -> @@ -650,11 +661,21 @@ public Node visitAddViewColumn(final RelationalSqlParser.AddViewColumnContext ct @Override public Node visitRenameViewColumn(final RelationalSqlParser.RenameViewColumnContext ctx) { + List identifiers = ctx.identifier(); + List oldNames = new ArrayList<>(identifiers.size() / 2); + List newNames = new ArrayList<>(identifiers.size() / 2); + for (int i = 0; i < identifiers.size(); i++) { + if (i % 2 == 0) { + oldNames.add((Identifier) visit(identifiers.get(i))); + } else { + newNames.add((Identifier) visit(identifiers.get(i))); + } + } return new RenameColumn( getLocation(ctx), getQualifiedName(ctx.viewName), - (Identifier) visit(ctx.from), - (Identifier) visit(ctx.to), + oldNames, + newNames, ctx.EXISTS().stream() .anyMatch( node -> diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/util/SqlFormatter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/util/SqlFormatter.java index 0c1d862a886cc..5667b3aa85c26 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/util/SqlFormatter.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/util/SqlFormatter.java @@ -969,10 +969,15 @@ protected Void visitRenameColumn(RenameColumn node, Integer indent) { builder.append("IF EXISTS "); } - builder - .append(formatName(node.getSource())) - .append(" TO ") - .append(formatName(node.getTarget())); + for (int i = 0; i < node.getSources().size(); i++) { + builder + .append(formatName(node.getSources().get(i))) + .append(" TO ") + .append(formatName(node.getTargets().get(i))); + if (i != node.getSources().size() - 1) { + builder.append(" "); + } + } return null; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/load/LoadTsFileScheduler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/load/LoadTsFileScheduler.java index 98d683284119c..74b46973d4637 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/load/LoadTsFileScheduler.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/load/LoadTsFileScheduler.java @@ -34,6 +34,7 @@ import org.apache.iotdb.commons.partition.DataPartition; import org.apache.iotdb.commons.partition.DataPartitionQueryParam; import org.apache.iotdb.commons.partition.StorageExecutor; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.SeriesPartitionKey; import org.apache.iotdb.commons.service.metric.MetricService; import org.apache.iotdb.commons.service.metric.enums.Metric; import org.apache.iotdb.commons.service.metric.enums.Tag; @@ -72,6 +73,7 @@ import org.apache.iotdb.db.storageengine.load.splitter.DeletionData; import org.apache.iotdb.db.storageengine.load.splitter.TsFileData; import org.apache.iotdb.db.storageengine.load.splitter.TsFileSplitter; +import org.apache.iotdb.db.utils.CommonUtils; import org.apache.iotdb.metrics.utils.MetricLevel; import org.apache.iotdb.mpp.rpc.thrift.TLoadCommandReq; import org.apache.iotdb.rpc.TSStatusCode; @@ -315,7 +317,9 @@ private boolean firstPhase(LoadSingleTsFileNode node) { final TsFileDataManager tsFileDataManager = new TsFileDataManager(this, node, block); try { new TsFileSplitter( - node.getTsFileResource().getTsFile(), tsFileDataManager::addOrSendTsFileData) + node.getTsFileResource().getTsFile(), + tsFileDataManager::addOrSendTsFileData, + node.getSchemaEvolutionFile()) .splitTsFileByDataPartition(); if (!tsFileDataManager.sendAllTsFileData()) { return false; @@ -846,12 +850,15 @@ public List queryDataPartition( subSlotList.stream() .map( pair -> - // (database != null) means this file will be loaded into table-model - database != null - ? dataPartition.getDataRegionReplicaSetForWriting( - pair.left, pair.right, database) - : dataPartition.getDataRegionReplicaSetForWriting( - pair.left, pair.right)) + // (database != null) means this file will be loaded into table-model + { + SeriesPartitionKey seriesPartitionKey = + CommonUtils.getSeriesPartitionKey(pair.left, database, false); + return database != null + ? dataPartition.getDataRegionReplicaSetForWriting( + seriesPartitionKey, pair.right, database) + : dataPartition.getDataRegionReplicaSetForWriting(pair.left, pair.right); + }) .collect(Collectors.toList())); } return replicaSets; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/crud/LoadTsFileStatement.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/crud/LoadTsFileStatement.java index e45603a14f92f..11aede042511f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/crud/LoadTsFileStatement.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/crud/LoadTsFileStatement.java @@ -26,6 +26,7 @@ import org.apache.iotdb.db.queryengine.plan.statement.Statement; import org.apache.iotdb.db.queryengine.plan.statement.StatementType; import org.apache.iotdb.db.queryengine.plan.statement.StatementVisitor; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileManager; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; import org.apache.iotdb.db.storageengine.load.config.LoadTsFileConfigurator; @@ -49,6 +50,7 @@ import static org.apache.iotdb.db.storageengine.load.config.LoadTsFileConfigurator.ON_SUCCESS_KEY; import static org.apache.iotdb.db.storageengine.load.config.LoadTsFileConfigurator.ON_SUCCESS_NONE_VALUE; import static org.apache.iotdb.db.storageengine.load.config.LoadTsFileConfigurator.PIPE_GENERATED_KEY; +import static org.apache.iotdb.db.storageengine.load.config.LoadTsFileConfigurator.SEVO_FILE_PATH_KEY; import static org.apache.iotdb.db.storageengine.load.config.LoadTsFileConfigurator.TABLET_CONVERSION_THRESHOLD_KEY; public class LoadTsFileStatement extends Statement { @@ -68,8 +70,12 @@ public class LoadTsFileStatement extends Statement { private List isTableModel; private List resources; private List writePointCountList; + private File schemaEvolutionFile; private boolean needDecode4TimeColumn; + // for loading iotdb datanode dir + private Map> databaseRegionTsFileManagers; + public LoadTsFileStatement(String filePath) throws FileNotFoundException { this.file = new File(filePath).getAbsoluteFile(); this.databaseLevel = IoTDBDescriptor.getInstance().getConfig().getDefaultDatabaseLevel(); @@ -256,10 +262,27 @@ public void setLoadAttributes(final Map loadAttributes) { initAttributes(loadAttributes); } + public File getSchemaEvolutionFile() { + return schemaEvolutionFile; + } + + public void setSchemaEvolutionFile(File schemaEvolutionFile) { + this.schemaEvolutionFile = schemaEvolutionFile; + } + public boolean isAsyncLoad() { return isAsyncLoad; } + public void setDatabaseRegionTsFileManagers( + Map> databaseRegionTsFileManagers) { + this.databaseRegionTsFileManagers = databaseRegionTsFileManagers; + } + + public Map> getDatabaseRegionTsFileManagers() { + return databaseRegionTsFileManagers; + } + private void initAttributes(final Map loadAttributes) { this.databaseLevel = LoadTsFileConfigurator.parseOrGetDefaultDatabaseLevel(loadAttributes); this.database = LoadTsFileConfigurator.parseDatabaseName(loadAttributes); @@ -273,6 +296,7 @@ private void initAttributes(final Map loadAttributes) { if (LoadTsFileConfigurator.parseOrGetDefaultPipeGenerated(loadAttributes)) { markIsGeneratedByPipe(); } + this.schemaEvolutionFile = LoadTsFileConfigurator.parseSevoFile(loadAttributes); } public boolean reconstructStatementIfMiniFileConverted(final List isMiniTsFile) { @@ -363,6 +387,10 @@ public List getSubStatements() { return subStatements; } + public File getFile() { + return file; + } + @Override public List getPaths() { return Collections.emptyList(); @@ -387,6 +415,9 @@ public org.apache.iotdb.db.queryengine.plan.relational.sql.ast.Statement toRelat if (isGeneratedByPipe) { loadAttributes.put(PIPE_GENERATED_KEY, String.valueOf(true)); } + if (schemaEvolutionFile != null) { + loadAttributes.put(SEVO_FILE_PATH_KEY, schemaEvolutionFile.getAbsolutePath()); + } return new LoadTsFile(null, file.getAbsolutePath(), loadAttributes); } @@ -415,6 +446,8 @@ public String toString() { + isAsyncLoad + ", tsFiles size=" + tsFiles.size() + + ", database='" + + database + '}'; } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/ISchemaRegion.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/ISchemaRegion.java index 4a11165353cd6..578b0a1e86273 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/ISchemaRegion.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/ISchemaRegion.java @@ -29,6 +29,7 @@ import org.apache.iotdb.db.exception.metadata.SchemaQuotaExceededException; import org.apache.iotdb.db.queryengine.common.schematree.ClusterSchemaTree; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.metadata.write.AlterEncodingCompressorNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.EvolveSchemaNode; import org.apache.iotdb.db.queryengine.plan.relational.metadata.fetcher.cache.TableId; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.ConstructTableDevicesBlackListNode; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.CreateOrUpdateTableDeviceNode; @@ -418,5 +419,7 @@ void commitUpdateAttribute(final TableDeviceAttributeCommitUpdateNode node) void addNodeLocation(final TableNodeLocationAddNode node) throws MetadataException; + void applySchemaEvolution(EvolveSchemaNode schemaEvolutions) throws MetadataException; + // endregion } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/SchemaRegionPlanType.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/SchemaRegionPlanType.java index b0d41c725eeb0..8b058742ea3c3 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/SchemaRegionPlanType.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/SchemaRegionPlanType.java @@ -63,6 +63,7 @@ public enum SchemaRegionPlanType { ROLLBACK_TABLE_DEVICES_BLACK_LIST((byte) 106), DELETE_TABLE_DEVICES_IN_BLACK_LIST((byte) 107), DROP_TABLE_ATTRIBUTE((byte) 108), + EVOLVE_SCHEMA((byte) 109), // query plan doesn't need any ser/deSer, thus use one type to represent all READ_SCHEMA(Byte.MAX_VALUE); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/SchemaRegionPlanVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/SchemaRegionPlanVisitor.java index 0cf087a40e117..e97b094291855 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/SchemaRegionPlanVisitor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/SchemaRegionPlanVisitor.java @@ -20,6 +20,7 @@ package org.apache.iotdb.db.schemaengine.schemaregion; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.metadata.write.AlterEncodingCompressorNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.EvolveSchemaNode; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.ConstructTableDevicesBlackListNode; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.CreateOrUpdateTableDeviceNode; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.DeleteTableDeviceNode; @@ -185,4 +186,8 @@ public R visitAlterEncodingCompressor( final AlterEncodingCompressorNode alterEncodingCompressorNode, final C context) { return visitSchemaRegionPlan(alterEncodingCompressorNode, context); } + + public R visitEvolveSchema(final EvolveSchemaNode evolveSchemaNode, final C context) { + return visitSchemaRegionPlan(evolveSchemaNode, context); + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/impl/SchemaRegionMemoryImpl.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/impl/SchemaRegionMemoryImpl.java index 0719835fe7a61..26339eae3858d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/impl/SchemaRegionMemoryImpl.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/impl/SchemaRegionMemoryImpl.java @@ -49,10 +49,12 @@ import org.apache.iotdb.db.queryengine.execution.operator.schema.source.DeviceBlackListConstructor; import org.apache.iotdb.db.queryengine.execution.operator.schema.source.TableDeviceQuerySource; import org.apache.iotdb.db.queryengine.execution.relational.ColumnTransformerBuilder; +import org.apache.iotdb.db.queryengine.execution.relational.ColumnTransformerBuilder.Context; import org.apache.iotdb.db.queryengine.plan.analyze.TypeProvider; import org.apache.iotdb.db.queryengine.plan.planner.LocalExecutionPlanner; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.metadata.write.AlterEncodingCompressorNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.metadata.write.CreateTimeSeriesNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.EvolveSchemaNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.parameter.InputLocation; import org.apache.iotdb.db.queryengine.plan.relational.metadata.Metadata; import org.apache.iotdb.db.queryengine.plan.relational.metadata.fetcher.cache.TableDeviceSchemaCache; @@ -125,6 +127,8 @@ import org.apache.iotdb.db.schemaengine.schemaregion.write.req.view.IPreDeleteLogicalViewPlan; import org.apache.iotdb.db.schemaengine.schemaregion.write.req.view.IRollbackPreDeleteLogicalViewPlan; import org.apache.iotdb.db.schemaengine.table.DataNodeTableCache; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolution; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.TableRename; import org.apache.iotdb.db.storageengine.rescon.memory.SystemInfo; import org.apache.iotdb.db.utils.SchemaUtils; @@ -1563,7 +1567,7 @@ private DeviceAttributeUpdater constructDevicePredicateUpdater( Objects.nonNull(predicate) ? visitor.process( predicate, - new ColumnTransformerBuilder.Context( + new Context( sessionInfo, filterLeafColumnTransformerList, inputLocations, @@ -1590,8 +1594,8 @@ private DeviceAttributeUpdater constructDevicePredicateUpdater( // records common ColumnTransformer between filter and project expressions final List commonTransformerList = new ArrayList<>(); - final ColumnTransformerBuilder.Context projectColumnTransformerContext = - new ColumnTransformerBuilder.Context( + final Context projectColumnTransformerContext = + new Context( sessionInfo, projectLeafColumnTransformerList, inputLocations, @@ -1790,6 +1794,23 @@ public void addNodeLocation(final TableNodeLocationAddNode node) throws Metadata } } + @Override + public void applySchemaEvolution(EvolveSchemaNode node) throws MetadataException { + for (SchemaEvolution schemaEvolution : node.getSchemaEvolutions()) { + if (schemaEvolution instanceof TableRename) { + TableRename tableRename = (TableRename) schemaEvolution; + applyTableRename(tableRename.getNameBefore(), tableRename.getNameAfter()); + } else { + logger.warn("Unsupported schemaEvolution {}, ignore it", schemaEvolution); + } + } + writeToMLog(node); + } + + public void applyTableRename(String oldName, String newName) { + mTree.renameTable(oldName, newName); + } + // endregion private static class RecoverOperationResult { @@ -2123,5 +2144,16 @@ public RecoverOperationResult visitAlterEncodingCompressor( return new RecoverOperationResult(e); } } + + @Override + public RecoverOperationResult visitEvolveSchema( + EvolveSchemaNode evolveSchemaNode, SchemaRegionMemoryImpl context) { + try { + applySchemaEvolution(evolveSchemaNode); + return RecoverOperationResult.SUCCESS; + } catch (final MetadataException e) { + return new RecoverOperationResult(e); + } + } } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/impl/SchemaRegionPBTreeImpl.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/impl/SchemaRegionPBTreeImpl.java index 2f4bec896ddf4..5ebc9d52f9d4d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/impl/SchemaRegionPBTreeImpl.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/impl/SchemaRegionPBTreeImpl.java @@ -41,6 +41,7 @@ import org.apache.iotdb.db.exception.metadata.SchemaQuotaExceededException; import org.apache.iotdb.db.queryengine.common.schematree.ClusterSchemaTree; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.metadata.write.AlterEncodingCompressorNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.EvolveSchemaNode; import org.apache.iotdb.db.queryengine.plan.relational.metadata.fetcher.cache.TableId; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.ConstructTableDevicesBlackListNode; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.CreateOrUpdateTableDeviceNode; @@ -1583,6 +1584,11 @@ public void addNodeLocation(final TableNodeLocationAddNode node) { throw new UnsupportedOperationException(); } + @Override + public void applySchemaEvolution(EvolveSchemaNode schemaEvolutions) { + throw new UnsupportedOperationException(); + } + // endregion private static class RecoverOperationResult { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/logfile/visitor/SchemaRegionPlanDeserializer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/logfile/visitor/SchemaRegionPlanDeserializer.java index 163ccb4e59de8..eb2b07225266c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/logfile/visitor/SchemaRegionPlanDeserializer.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/logfile/visitor/SchemaRegionPlanDeserializer.java @@ -26,6 +26,7 @@ import org.apache.iotdb.commons.schema.view.viewExpression.ViewExpression; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeType; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.metadata.write.AlterEncodingCompressorNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.EvolveSchemaNode; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.ConstructTableDevicesBlackListNode; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.CreateOrUpdateTableDeviceNode; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.DeleteTableDeviceNode; @@ -482,5 +483,11 @@ public ISchemaRegionPlan visitAlterEncodingCompressor( final AlterEncodingCompressorNode alterEncodingCompressorNode, final ByteBuffer buffer) { return (AlterEncodingCompressorNode) PlanNodeType.deserialize(buffer); } + + @Override + public ISchemaRegionPlan visitEvolveSchema( + EvolveSchemaNode evolveSchemaNode, ByteBuffer buffer) { + return (EvolveSchemaNode) PlanNodeType.deserialize(buffer); + } } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/logfile/visitor/SchemaRegionPlanSerializer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/logfile/visitor/SchemaRegionPlanSerializer.java index b7b7d9758ca4b..d65b18f18af57 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/logfile/visitor/SchemaRegionPlanSerializer.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/logfile/visitor/SchemaRegionPlanSerializer.java @@ -23,6 +23,7 @@ import org.apache.iotdb.commons.schema.view.viewExpression.ViewExpression; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.metadata.write.AlterEncodingCompressorNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.EvolveSchemaNode; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.ConstructTableDevicesBlackListNode; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.CreateOrUpdateTableDeviceNode; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.DeleteTableDeviceNode; @@ -553,6 +554,12 @@ public SchemaRegionPlanSerializationResult visitAlterEncodingCompressor( return visitPlanNode(alterEncodingCompressorNode, outputStream); } + @Override + public SchemaRegionPlanSerializationResult visitEvolveSchema( + EvolveSchemaNode evolveSchemaNode, DataOutputStream outputStream) { + return visitPlanNode(evolveSchemaNode, outputStream); + } + private SchemaRegionPlanSerializationResult visitPlanNode( final PlanNode planNode, final DataOutputStream outputStream) { try { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/mtree/impl/mem/MTreeBelowSGMemoryImpl.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/mtree/impl/mem/MTreeBelowSGMemoryImpl.java index ed519030b630e..eeef42dab7c8d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/mtree/impl/mem/MTreeBelowSGMemoryImpl.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/mtree/impl/mem/MTreeBelowSGMemoryImpl.java @@ -1884,6 +1884,16 @@ public int getTableDeviceNotExistNum(final String tableName, final List lastTsFileSetMap = new ConcurrentHashMap<>(); + + private DataRegionTaskManager dataRegionTaskManager; + /** * Construct a database processor. * @@ -399,8 +413,7 @@ public DataRegion( acquireDirectBufferMemory(); dataRegionSysDir = SystemFileFactory.INSTANCE.getFile(systemDir, dataRegionIdString); - this.tsFileManager = - new TsFileManager(databaseName, dataRegionIdString, dataRegionSysDir.getPath()); + this.tsFileManager = new TsFileManager(databaseName, dataRegionIdString); if (dataRegionSysDir.mkdirs()) { logger.info( "Database system Directory {} doesn't exist, create it", dataRegionSysDir.getPath()); @@ -457,7 +470,7 @@ public DataRegion(String databaseName, String dataRegionIdString) { this.databaseName = databaseName; this.dataRegionIdString = dataRegionIdString; this.dataRegionId = new DataRegionId(Integer.parseInt(this.dataRegionIdString)); - this.tsFileManager = new TsFileManager(databaseName, dataRegionIdString, ""); + this.tsFileManager = new TsFileManager(databaseName, dataRegionIdString); this.partitionMaxFileVersions = new HashMap<>(); partitionMaxFileVersions.put(0L, 0L); upgradeModFileThreadPool = null; @@ -467,7 +480,7 @@ public DataRegion(String databaseName, String dataRegionIdString) { } private void initDiskSelector() { - final ILoadDiskSelector.DiskDirectorySelector selector = + final DiskDirectorySelector selector = (sourceDirectory, fileName, tierLevel) -> { try { return TierManager.getInstance() @@ -669,6 +682,10 @@ private void recover() throws DataRegionException { throw new RuntimeException(e); } } + // ensure that seq and unseq files in the same partition have the same TsFileSet + Map> recoveredPartitionTsFileSetMap = new HashMap<>(); + Map partitionMinimalVersion = new HashMap<>(); + for (Entry> partitionFiles : partitionTmpSeqTsFiles.entrySet()) { Callable asyncRecoverTask = recoverFilesInPartition( @@ -676,7 +693,9 @@ private void recover() throws DataRegionException { dataRegionRecoveryContext, partitionFiles.getValue(), fileTimeIndexMap, - true); + true, + recoveredPartitionTsFileSetMap, + partitionMinimalVersion); if (asyncRecoverTask != null) { asyncTsFileResourceRecoverTaskList.add(asyncRecoverTask); } @@ -689,7 +708,9 @@ private void recover() throws DataRegionException { dataRegionRecoveryContext, partitionFiles.getValue(), fileTimeIndexMap, - false); + false, + recoveredPartitionTsFileSetMap, + partitionMinimalVersion); if (asyncRecoverTask != null) { asyncTsFileResourceRecoverTaskList.add(asyncRecoverTask); } @@ -704,6 +725,19 @@ private void recover() throws DataRegionException { Long.MAX_VALUE, lastFlushTimeMap.getMemSize(latestPartitionId))); } + + // remove empty file sets + for (Entry> entry : recoveredPartitionTsFileSetMap.entrySet()) { + long partitionId = entry.getKey(); + // if no file in the partition, all filesets should be cleared + long minimumFileVersion = + partitionMinimalVersion.getOrDefault(partitionId, Long.MAX_VALUE); + for (TsFileSet tsFileSet : entry.getValue()) { + if (tsFileSet.getEndVersion() < minimumFileVersion) { + tsFileSet.remove(); + } + } + } } // wait until all unsealed TsFiles have been recovered for (WALRecoverListener recoverListener : recoverListeners) { @@ -741,6 +775,9 @@ private void recover() throws DataRegionException { throw new DataRegionException(e); } + dataRegionTaskManager = new DataRegionTaskManager(this); + dataRegionTaskManager.recover(); + if (asyncTsFileResourceRecoverTaskList.isEmpty()) { initCompactionSchedule(); } @@ -772,9 +809,13 @@ private void updatePartitionLastFlushTime(TsFileResource resource) { protected void updateDeviceLastFlushTime(TsFileResource resource) { long timePartitionId = resource.getTimePartition(); Map endTimeMap = new HashMap<>(); + EvolvedSchema mergedEvolvedSchema = resource.getMergedEvolvedSchema(); for (IDeviceID deviceId : resource.getDevices()) { @SuppressWarnings("OptionalGetWithoutIsPresent") // checked above long endTime = resource.getEndTime(deviceId).get(); + if (mergedEvolvedSchema != null) { + deviceId = mergedEvolvedSchema.rewriteToOriginal(deviceId); + } endTimeMap.put(deviceId, endTime); } if (config.isEnableSeparateData()) { @@ -789,10 +830,14 @@ protected void upgradeAndUpdateDeviceLastFlushTime( long timePartitionId, List resources) { Map endTimeMap = new HashMap<>(); for (TsFileResource resource : resources) { + EvolvedSchema mergedEvolvedSchema = resource.getMergedEvolvedSchema(); for (IDeviceID deviceId : resource.getDevices()) { // checked above //noinspection OptionalGetWithoutIsPresent long endTime = resource.getEndTime(deviceId).get(); + if (mergedEvolvedSchema != null) { + deviceId = mergedEvolvedSchema.rewriteToOriginal(deviceId); + } endTimeMap.put(deviceId, endTime); } } @@ -1010,16 +1055,74 @@ private void recoverSealedTsFiles( } } + private String getFileSetsDir(long partitionId) { + return dataRegionSysDir + + File.separator + + partitionId + + File.separator + + TsFileSet.FILE_SET_DIR_NAME; + } + + public File getDataRegionSysDir() { + return dataRegionSysDir; + } + + private void recoverTsFileSets(long partitionId, Map> tsFileSetMap) { + List tsFileSets = + tsFileSetMap.computeIfAbsent( + partitionId, + pid -> { + File fileSetDir = new File(getFileSetsDir(partitionId)); + File[] fileSets = fileSetDir.listFiles(); + if (fileSets == null || fileSets.length == 0) { + return Collections.emptyList(); + } else { + List results = new ArrayList<>(); + for (File fileSet : fileSets) { + TsFileSet tsFileSet; + try { + tsFileSet = + new TsFileSet( + Long.parseLong(fileSet.getName()), fileSetDir.getAbsolutePath(), true); + tsFileManager.addTsFileSet(tsFileSet, partitionId); + } catch (NumberFormatException e) { + continue; + } + results.add(tsFileSet); + } + return results; + } + }); + if (!tsFileSets.isEmpty()) { + tsFileSets.sort(null); + lastTsFileSetMap.put(partitionId, tsFileSets.get(tsFileSets.size() - 1)); + } + } + private Callable recoverFilesInPartition( long partitionId, DataRegionRecoveryContext context, List resourceList, Map fileTimeIndexMap, - boolean isSeq) { + boolean isSeq, + Map> partitionTsFileSetMap, + Map partitionMinimalVersion) { + List resourceListForAsyncRecover = new ArrayList<>(); List resourceListForSyncRecover = new ArrayList<>(); Callable asyncRecoverTask = null; + recoverTsFileSets(partitionId, partitionTsFileSetMap); for (TsFileResource tsFileResource : resourceList) { + long fileVersion = tsFileResource.getTsFileID().fileVersion; + partitionMinimalVersion.compute( + partitionId, + (pid, oldVersion) -> { + if (oldVersion == null) { + return fileVersion; + } + return Math.min(oldVersion, fileVersion); + }); + tsFileManager.add(tsFileResource, isSeq); if (fileTimeIndexMap.containsKey(tsFileResource.getTsFileID()) && tsFileResource.resourceFileExists()) { @@ -1140,6 +1243,87 @@ private int compareFileName(File o1, File o2) { } } + private TsFileSet createNewFileSet(long maxVersion, long partitionId) { + TsFileSet newSet = new TsFileSet(maxVersion, getFileSetsDir(partitionId), false); + tsFileManager.addTsFileSet(newSet, partitionId); + return newSet; + } + + public void applySchemaEvolution(List schemaEvolutions) throws IOException { + long startTime = System.nanoTime(); + writeLock("applySchemaEvolution"); + TableDeviceSchemaCache.getInstance().invalidateLastCache(); + PERFORMANCE_OVERVIEW_METRICS.recordScheduleLockCost(System.nanoTime() - startTime); + try { + if (deleted) { + return; + } + + syncCloseAllWorkingTsFileProcessors(); + + // may update table names in deviceIds + schemaEvolutions.forEach(lastFlushTimeMap::accept); + + SchemaEvolutionTask evolutionTask = new SchemaEvolutionTask(schemaEvolutions, this); + dataRegionTaskManager.submitAndRun(evolutionTask); + } finally { + writeUnlock(); + } + } + + public void recordSchemaEvolution(List schemaEvolutions) { + for (Entry partitionVersionEntry : partitionMaxFileVersions.entrySet()) { + long partitionId = partitionVersionEntry.getKey(); + long maxVersion = partitionVersionEntry.getValue(); + lastTsFileSetMap.compute( + partitionId, + (pid, lastSet) -> { + if (lastSet == null) { + lastSet = createNewFileSet(maxVersion, partitionId); + } else if (lastSet.getEndVersion() < maxVersion) { + lastSet = createNewFileSet(maxVersion, partitionId); + } + try { + lastSet.appendSchemaEvolution(schemaEvolutions); + } catch (IOException e) { + logger.error( + "Cannot append schema evolutions to fileSets in partition {}-{}", + dataRegionId, + partitionId, + e); + } + return lastSet; + }); + } + } + + public void applySchemaEvolutionToObjects(List schemaEvolutions) { + for (SchemaEvolution schemaEvolution : schemaEvolutions) { + if (schemaEvolution instanceof TableRename) { + TableRename tableRename = (TableRename) schemaEvolution; + renameTableForObjects(tableRename.getNameBefore(), tableRename.getNameAfter()); + } else if (schemaEvolution instanceof ColumnRename) { + ColumnRename columnRename = (ColumnRename) schemaEvolution; + if (columnRename.getDataType() == TSDataType.OBJECT) { + renameMeasurementForObjects( + columnRename.getTableName(), + columnRename.getNameBefore(), + columnRename.getNameAfter()); + } + } + } + } + + private void renameTableForObjects(String nameBefore, String nameAfter) { + // TODO-SchemaEvolution + // throw new UnsupportedOperationException(); + } + + private void renameMeasurementForObjects(String tableName, String nameBefore, String nameAfter) { + // TODO-SchemaEvolution + // throw new UnsupportedOperationException(); + } + /** * insert one row of data. * @@ -1708,7 +1892,7 @@ private List insertToTsFileProcessors( } List executedInsertRowNodeList = new ArrayList<>(); - for (Map.Entry entry : tsFileProcessorMap.entrySet()) { + for (Entry entry : tsFileProcessorMap.entrySet()) { TsFileProcessor tsFileProcessor = entry.getKey(); InsertRowsNode subInsertRowsNode = entry.getValue(); try { @@ -2416,7 +2600,8 @@ private boolean tryGetFLushLock( for (TsFileResource tsFileResource : seqResources) { // only need to acquire flush lock for those unclosed and satisfied tsfile if (!tsFileResource.isClosed() - && tsFileResource.isSatisfied(singleDeviceId, globalTimeFilter, true, isDebug)) { + && tsFileResource.isFinalDeviceIdSatisfied( + singleDeviceId, globalTimeFilter, true, isDebug)) { TsFileProcessor tsFileProcessor = tsFileResource.getProcessor(); try { if (tsFileProcessor == null) { @@ -2463,7 +2648,8 @@ private boolean tryGetFLushLock( // deal with unSeq resources for (TsFileResource tsFileResource : unSeqResources) { if (!tsFileResource.isClosed() - && tsFileResource.isSatisfied(singleDeviceId, globalTimeFilter, false, isDebug)) { + && tsFileResource.isFinalDeviceIdSatisfied( + singleDeviceId, globalTimeFilter, false, isDebug)) { TsFileProcessor tsFileProcessor = tsFileResource.getProcessor(); try { if (tsFileProcessor == null) { @@ -2578,7 +2764,8 @@ private List getFileHandleListForQuery( List fileScanHandles = new ArrayList<>(); for (TsFileResource tsFileResource : tsFileResources) { - if (!tsFileResource.isSatisfied(null, globalTimeFilter, isSeq, context.isDebug())) { + if (!tsFileResource.isFinalDeviceIdSatisfied( + null, globalTimeFilter, isSeq, context.isDebug())) { continue; } if (tsFileResource.isClosed()) { @@ -2656,7 +2843,8 @@ private List getFileHandleListForQuery( List fileScanHandles = new ArrayList<>(); for (TsFileResource tsFileResource : tsFileResources) { - if (!tsFileResource.isSatisfied(null, globalTimeFilter, isSeq, context.isDebug())) { + if (!tsFileResource.isFinalDeviceIdSatisfied( + null, globalTimeFilter, isSeq, context.isDebug())) { continue; } if (tsFileResource.isClosed()) { @@ -2737,8 +2925,9 @@ public void writeUnlock() { * @param tsFileResources includes sealed and unsealed tsfile resources * @return fill unsealed tsfile resources with memory data and ChunkMetadataList of data in disk */ + @SuppressWarnings("SuspiciousSystemArraycopy") private List getFileResourceListForQuery( - Collection tsFileResources, + List tsFileResources, List pathList, IDeviceID singleDeviceId, QueryContext context, @@ -2748,8 +2937,56 @@ private List getFileResourceListForQuery( List tsfileResourcesForQuery = new ArrayList<>(); + List tsFileSets = Collections.emptyList(); + int tsFileSetsIndex = 0; + Long currentTimePartitionId = null; + EvolvedSchema currentEvolvedSchema; + IDeviceID originalDeviceId = singleDeviceId; + for (TsFileResource tsFileResource : tsFileResources) { - if (!tsFileResource.isSatisfied(singleDeviceId, globalTimeFilter, isSeq, context.isDebug())) { + long fileTimePartition = tsFileResource.getTimePartition(); + // update TsFileSets if time partition changes + boolean tsFileSetsChanged = false; + if (currentTimePartitionId == null || currentTimePartitionId != fileTimePartition) { + currentTimePartitionId = fileTimePartition; + tsFileSets = tsFileManager.getTsFileSet(fileTimePartition); + tsFileSetsIndex = 0; + tsFileSetsChanged = true; + originalDeviceId = singleDeviceId; + } + // find TsFileSets this file belongs to + while (tsFileSetsIndex < tsFileSets.size()) { + TsFileSet tsFileSet = tsFileSets.get(tsFileSetsIndex); + if (tsFileSet.contains(tsFileResource)) { + break; + } else { + tsFileSetsChanged = true; + tsFileSetsIndex++; + } + } + // if TsFileSets change + if (tsFileSetsChanged) { + // and there are remaining TsFileSets, update EvolvedSchema + if (tsFileSetsIndex < tsFileSets.size()) { + currentEvolvedSchema = + TsFileSet.getMergedEvolvedSchema( + tsFileSets.subList(tsFileSetsIndex, tsFileSets.size())); + // use EvolvedSchema to rewrite deviceId to original deviceId + if (currentEvolvedSchema != null) { + originalDeviceId = currentEvolvedSchema.rewriteToOriginal(singleDeviceId); + } else { + // no schema evolution, use the singleDeviceId as originalDeviceId + originalDeviceId = singleDeviceId; + } + } else { + // no remaining TsFileSets, no schema evolution + originalDeviceId = singleDeviceId; + } + } + + // reuse the deviceId to avoid rewriting again or reading EvolvedSchema unnecessarily + if (!tsFileResource.isOriginalDeviceIdSatisfied( + originalDeviceId, globalTimeFilter, isSeq, context.isDebug())) { continue; } try { @@ -3010,12 +3247,12 @@ private List logDeletionInWAL(RelationalDeleteDataNode deleteD for (TableDeletionEntry modEntry : deleteDataNode.getModEntries()) { long startTime = modEntry.getStartTime(); long endTime = modEntry.getEndTime(); - for (Map.Entry entry : workSequenceTsFileProcessors.entrySet()) { + for (Entry entry : workSequenceTsFileProcessors.entrySet()) { if (TimePartitionUtils.satisfyPartitionId(startTime, endTime, entry.getKey())) { involvedProcessors.add(entry.getValue()); } } - for (Map.Entry entry : workUnsequenceTsFileProcessors.entrySet()) { + for (Entry entry : workUnsequenceTsFileProcessors.entrySet()) { if (TimePartitionUtils.satisfyPartitionId(startTime, endTime, entry.getKey())) { involvedProcessors.add(entry.getValue()); } @@ -3051,13 +3288,13 @@ private List logDeletionInWAL( DeleteDataNode deleteDataNode = new DeleteDataNode(new PlanNodeId(""), Collections.singletonList(path), startTime, endTime); deleteDataNode.setSearchIndex(searchIndex); - for (Map.Entry entry : workSequenceTsFileProcessors.entrySet()) { + for (Entry entry : workSequenceTsFileProcessors.entrySet()) { if (TimePartitionUtils.satisfyPartitionId(startTime, endTime, entry.getKey())) { WALFlushListener walFlushListener = entry.getValue().logDeleteDataNodeInWAL(deleteDataNode); walFlushListeners.add(walFlushListener); } } - for (Map.Entry entry : workUnsequenceTsFileProcessors.entrySet()) { + for (Entry entry : workUnsequenceTsFileProcessors.entrySet()) { if (TimePartitionUtils.satisfyPartitionId(startTime, endTime, entry.getKey())) { WALFlushListener walFlushListener = entry.getValue().logDeleteDataNodeInWAL(deleteDataNode); walFlushListeners.add(walFlushListener); @@ -3174,6 +3411,11 @@ private boolean canSkipDelete(TsFileResource tsFileResource, ModEntry deletion) return false; } + EvolvedSchema evolvedSchema = tsFileResource.getMergedEvolvedSchema(); + if (evolvedSchema != null) { + deletion = evolvedSchema.rewriteToOriginal(deletion); + } + for (IDeviceID device : tsFileResource.getDevices()) { // we are iterating the time index so the times are definitely present long startTime = tsFileResource.getTimeIndex().getStartTime(device).get(); @@ -3225,69 +3467,127 @@ private void deleteDataInUnsealedFiles( } } + private boolean canBeFullyDeleted( + ArrayDeviceTimeIndex deviceTimeIndex, TableDeletionEntry tableDeletionEntry) { + Set devicesInFile = deviceTimeIndex.getDevices(); + String tableName = tableDeletionEntry.getTableName(); + long matchSize = + devicesInFile.stream() + .filter( + device -> { + if (logger.isDebugEnabled()) { + logger.debug( + "device is {}, deviceTable is {}, tableDeletionEntry.getPredicate().matches(device) is {}", + device, + device.getTableName(), + tableDeletionEntry.getPredicate().matches(device)); + } + return tableName.equals(device.getTableName()) + && tableDeletionEntry.getPredicate().matches(device); + }) + .count(); + boolean onlyOneTable = matchSize == devicesInFile.size(); + if (logger.isDebugEnabled()) { + logger.debug( + "tableName is {}, matchSize is {}, onlyOneTable is {}", + tableName, + matchSize, + onlyOneTable); + } + + if (onlyOneTable) { + matchSize = 0; + for (IDeviceID device : devicesInFile) { + Optional optStart = deviceTimeIndex.getStartTime(device); + Optional optEnd = deviceTimeIndex.getEndTime(device); + if (!optStart.isPresent() || !optEnd.isPresent()) { + continue; + } + + long fileStartTime = optStart.get(); + long fileEndTime = optEnd.get(); + + if (logger.isDebugEnabled()) { + logger.debug( + "tableName is {}, device is {}, deletionStartTime is {}, deletionEndTime is {}, fileStartTime is {}, fileEndTime is {}", + device.getTableName(), + device, + tableDeletionEntry.getStartTime(), + tableDeletionEntry.getEndTime(), + fileStartTime, + fileEndTime); + } + if (isFileFullyMatchedByTime(tableDeletionEntry, fileStartTime, fileEndTime)) { + ++matchSize; + } else { + return false; + } + } + return matchSize == devicesInFile.size(); + } else { + return false; + } + } + private void deleteDataInSealedFiles(Collection sealedTsFiles, ModEntry deletion) throws IOException { - Set involvedModificationFiles = new HashSet<>(); - List deletedByMods = new ArrayList<>(); + Set> involvedModificationFiles = new HashSet<>(); List deletedByFiles = new ArrayList<>(); - boolean isDropMeasurementExist = false; - IDPredicate.IDPredicateType idPredicateType = null; - - if (deletion instanceof TableDeletionEntry) { - TableDeletionEntry tableDeletionEntry = (TableDeletionEntry) deletion; - isDropMeasurementExist = !tableDeletionEntry.getPredicate().getMeasurementNames().isEmpty(); - idPredicateType = tableDeletionEntry.getPredicate().getIdPredicateType(); - } for (TsFileResource sealedTsFile : sealedTsFiles) { if (canSkipDelete(sealedTsFile, deletion)) { continue; } - // the tsfile may not be closed here, it should not be added in deletedByFiles - if (!sealedTsFile.isClosed()) { - deletedByMods.add(sealedTsFile); - continue; - } - ITimeIndex timeIndex = sealedTsFile.getTimeIndex(); + EvolvedSchema evolvedSchema = sealedTsFile.getMergedEvolvedSchema(); + // the tsfile may not be closed here, it should not be added in deletedByFiles if ((timeIndex instanceof ArrayDeviceTimeIndex) - && (deletion.getType() == ModEntry.ModType.TABLE_DELETION)) { + && (deletion.getType() == ModType.TABLE_DELETION) + && sealedTsFile.isClosed()) { ArrayDeviceTimeIndex deviceTimeIndex = (ArrayDeviceTimeIndex) timeIndex; + Set devicesInFile = deviceTimeIndex.getDevices(); boolean onlyOneTable = false; - if (deletion instanceof TableDeletionEntry) { - TableDeletionEntry tableDeletionEntry = (TableDeletionEntry) deletion; - String tableName = tableDeletionEntry.getTableName(); - long matchSize = - devicesInFile.stream() - .filter( - device -> { - if (logger.isDebugEnabled()) { - logger.debug( - "device is {}, deviceTable is {}, tableDeletionEntry.getPredicate().matches(device) is {}", - device, - device.getTableName(), - tableDeletionEntry.getPredicate().matches(device)); - } - return tableName.equals(device.getTableName()) - && tableDeletionEntry.getPredicate().matches(device); - }) - .count(); - onlyOneTable = matchSize == devicesInFile.size(); - if (logger.isDebugEnabled()) { - logger.debug( - "tableName is {}, matchSize is {}, onlyOneTable is {}", - tableName, - matchSize, - onlyOneTable); - } + TableDeletionEntry tableDeletionEntry = (TableDeletionEntry) deletion; + tableDeletionEntry = + evolvedSchema != null + ? evolvedSchema.rewriteToOriginal(tableDeletionEntry) + : tableDeletionEntry; + boolean isDropMeasurementExist = + !tableDeletionEntry.getPredicate().getMeasurementNames().isEmpty(); + TagPredicateType tagPredicateType = tableDeletionEntry.getPredicate().getTagPredicateType(); + + String tableName = tableDeletionEntry.getTableName(); + TableDeletionEntry finalTableDeletionEntry = tableDeletionEntry; + long matchSize = + devicesInFile.stream() + .filter( + device -> { + if (logger.isDebugEnabled()) { + logger.debug( + "device is {}, deviceTable is {}, tableDeletionEntry.getPredicate().matches(device) is {}", + device, + device.getTableName(), + finalTableDeletionEntry.getPredicate().matches(device)); + } + return tableName.equals(device.getTableName()) + && finalTableDeletionEntry.getPredicate().matches(device); + }) + .count(); + onlyOneTable = matchSize == devicesInFile.size(); + if (logger.isDebugEnabled()) { + logger.debug( + "tableName is {}, matchSize is {}, onlyOneTable is {}", + tableName, + matchSize, + onlyOneTable); } if (onlyOneTable) { - int matchSize = 0; + matchSize = 0; for (IDeviceID device : devicesInFile) { Optional optStart = deviceTimeIndex.getStartTime(device); Optional optEnd = deviceTimeIndex.getEndTime(device); @@ -3309,11 +3609,12 @@ private void deleteDataInSealedFiles(Collection sealedTsFiles, M fileEndTime); } if (isFileFullyMatchedByTime(deletion, fileStartTime, fileEndTime) - && idPredicateType.equals(IDPredicate.IDPredicateType.NOP) + && tagPredicateType.equals(TagPredicateType.NOP) && !isDropMeasurementExist) { ++matchSize; } else { - deletedByMods.add(sealedTsFile); + involvedModificationFiles.add( + new Pair<>(sealedTsFile.getModFileForWrite(), tableDeletionEntry)); break; } } @@ -3329,20 +3630,17 @@ private void deleteDataInSealedFiles(Collection sealedTsFiles, M } } } else { - involvedModificationFiles.add(sealedTsFile.getModFileForWrite()); + involvedModificationFiles.add( + new Pair<>(sealedTsFile.getModFileForWrite(), tableDeletionEntry)); } } else { - involvedModificationFiles.add(sealedTsFile.getModFileForWrite()); + involvedModificationFiles.add( + new Pair<>( + sealedTsFile.getModFileForWrite(), + evolvedSchema != null ? evolvedSchema.rewriteToOriginal(deletion) : deletion)); } } - for (TsFileResource tsFileResource : deletedByMods) { - if (tsFileResource.isClosed() - || !tsFileResource.getProcessor().deleteDataInMemory(deletion)) { - involvedModificationFiles.add(tsFileResource.getModFileForWrite()); - } // else do nothing - } - if (!deletedByFiles.isEmpty()) { deleteTsFileCompletely(deletedByFiles); if (logger.isDebugEnabled()) { @@ -3359,10 +3657,10 @@ private void deleteDataInSealedFiles(Collection sealedTsFiles, M List exceptions = involvedModificationFiles.parallelStream() .map( - modFile -> { + modFileEntryPair -> { try { - modFile.write(deletion); - modFile.close(); + modFileEntryPair.getLeft().write(modFileEntryPair.getRight()); + modFileEntryPair.getLeft().close(); } catch (Exception e) { return e; } @@ -4448,7 +4746,7 @@ public void insert(InsertRowsOfOneDeviceNode insertRowsOfOneDeviceNode) // infoForMetrics[2]: ScheduleWalTimeCost // infoForMetrics[3]: ScheduleMemTableTimeCost // infoForMetrics[4]: InsertedPointsNumber - for (Map.Entry entry : tsFileProcessorMap.entrySet()) { + for (Entry entry : tsFileProcessorMap.entrySet()) { TsFileProcessor tsFileProcessor = entry.getKey(); InsertRowsNode subInsertRowsNode = entry.getValue(); try { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/DeviceLastFlushTime.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/DeviceLastFlushTime.java index f02044b041472..3e72acaa34dbc 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/DeviceLastFlushTime.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/DeviceLastFlushTime.java @@ -19,6 +19,9 @@ package org.apache.iotdb.db.storageengine.dataregion; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolution; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.TableRename; + import org.apache.tsfile.file.metadata.IDeviceID; import java.util.HashMap; @@ -53,4 +56,13 @@ public ILastFlushTime degradeLastFlushTime() { Map getDeviceLastFlushTimeMap() { return deviceLastFlushTimeMap; } + + @Override + public void accept(SchemaEvolution schemaEvolution) { + if (!(schemaEvolution instanceof TableRename)) { + return; + } + TableRename tableRename = (TableRename) schemaEvolution; + tableRename.rewriteMap(deviceLastFlushTimeMap); + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/HashLastFlushTimeMap.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/HashLastFlushTimeMap.java index 3f0abfd3e2481..d02835015eb58 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/HashLastFlushTimeMap.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/HashLastFlushTimeMap.java @@ -20,12 +20,15 @@ package org.apache.iotdb.db.storageengine.dataregion; import org.apache.iotdb.db.storageengine.StorageEngine; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolution; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.TableRename; import org.apache.tsfile.file.metadata.IDeviceID; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Map; +import java.util.Map.Entry; import java.util.concurrent.ConcurrentHashMap; public class HashLastFlushTimeMap implements ILastFlushTimeMap { @@ -70,7 +73,7 @@ public void updateMultiDeviceFlushedTime( timePartitionId, id -> new DeviceLastFlushTime()); long memIncr = 0L; - for (Map.Entry entry : flushedTimeMap.entrySet()) { + for (Entry entry : flushedTimeMap.entrySet()) { if (flushTimeMapForPartition.getLastFlushTime(entry.getKey()) == Long.MIN_VALUE) { memIncr += HASHMAP_NODE_BASIC_SIZE + entry.getKey().ramBytesUsed(); } @@ -93,7 +96,7 @@ public void upgradeAndUpdateMultiDeviceFlushedTime( long maxFlushTime = flushTimeMapForPartition.getLastFlushTime(null); ILastFlushTime newDeviceLastFlushTime = new DeviceLastFlushTime(); long memIncr = 0; - for (Map.Entry entry : flushedTimeMap.entrySet()) { + for (Entry entry : flushedTimeMap.entrySet()) { memIncr += HASHMAP_NODE_BASIC_SIZE + entry.getKey().ramBytesUsed(); newDeviceLastFlushTime.updateLastFlushTime(entry.getKey(), entry.getValue()); maxFlushTime = Math.max(maxFlushTime, entry.getValue()); @@ -104,7 +107,7 @@ public void upgradeAndUpdateMultiDeviceFlushedTime( } else { // go here when DeviceLastFlushTime was recovered by wal recovery long memIncr = 0; - for (Map.Entry entry : flushedTimeMap.entrySet()) { + for (Entry entry : flushedTimeMap.entrySet()) { if (flushTimeMapForPartition.getLastFlushTime(entry.getKey()) == Long.MIN_VALUE) { memIncr += HASHMAP_NODE_BASIC_SIZE + entry.getKey().ramBytesUsed(); } @@ -131,7 +134,7 @@ public void updatePartitionFlushedTime(long timePartitionId, long maxFlushedTime // go here when DeviceLastFlushTime was recovered by wal recovery DeviceLastFlushTime deviceLastFlushTime = (DeviceLastFlushTime) flushTimeMapForPartition; Map flushedTimeMap = deviceLastFlushTime.getDeviceLastFlushTimeMap(); - for (Map.Entry entry : flushedTimeMap.entrySet()) { + for (Entry entry : flushedTimeMap.entrySet()) { flushTimeMapForPartition.updateLastFlushTime(entry.getKey(), entry.getValue()); } } @@ -139,7 +142,7 @@ public void updatePartitionFlushedTime(long timePartitionId, long maxFlushedTime @Override public void updateMultiDeviceGlobalFlushedTime(Map globalFlushedTimeMap) { - for (Map.Entry entry : globalFlushedTimeMap.entrySet()) { + for (Entry entry : globalFlushedTimeMap.entrySet()) { globalLatestFlushedTimeForEachDevice.merge(entry.getKey(), entry.getValue(), Math::max); } } @@ -161,7 +164,7 @@ public boolean checkAndCreateFlushedTimePartition( // For insert @Override public void updateLatestFlushTime(long partitionId, Map updateMap) { - for (Map.Entry entry : updateMap.entrySet()) { + for (Entry entry : updateMap.entrySet()) { partitionLatestFlushedTime .computeIfAbsent(partitionId, id -> new DeviceLastFlushTime()) .updateLastFlushTime(entry.getKey(), entry.getValue()); @@ -212,4 +215,15 @@ public long getMemSize(long partitionId) { } return 0; } + + @Override + public void accept(SchemaEvolution schemaEvolution) { + if (!(schemaEvolution instanceof TableRename)) { + return; + } + + TableRename tableRename = (TableRename) schemaEvolution; + tableRename.rewriteMap(globalLatestFlushedTimeForEachDevice); + partitionLatestFlushedTime.values().forEach(t -> t.accept(schemaEvolution)); + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/ILastFlushTime.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/ILastFlushTime.java index be68369a42b87..9b685407326f0 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/ILastFlushTime.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/ILastFlushTime.java @@ -19,6 +19,8 @@ package org.apache.iotdb.db.storageengine.dataregion; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolution; + import org.apache.tsfile.file.metadata.IDeviceID; public interface ILastFlushTime { @@ -28,4 +30,6 @@ public interface ILastFlushTime { void updateLastFlushTime(IDeviceID device, long time); ILastFlushTime degradeLastFlushTime(); + + void accept(SchemaEvolution schemaEvolution); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/ILastFlushTimeMap.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/ILastFlushTimeMap.java index 7bdd141bf6b5e..ca3a5e37ced69 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/ILastFlushTimeMap.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/ILastFlushTimeMap.java @@ -19,6 +19,8 @@ package org.apache.iotdb.db.storageengine.dataregion; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolution; + import org.apache.tsfile.file.metadata.IDeviceID; import java.util.Map; @@ -63,4 +65,6 @@ void upgradeAndUpdateMultiDeviceFlushedTime( void degradeLastFlushTime(long partitionId); long getMemSize(long partitionId); + + void accept(SchemaEvolution schemaEvolution); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/PartitionLastFlushTime.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/PartitionLastFlushTime.java index a5976861441e7..e37ce43b930a0 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/PartitionLastFlushTime.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/PartitionLastFlushTime.java @@ -19,6 +19,8 @@ package org.apache.iotdb.db.storageengine.dataregion; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolution; + import org.apache.tsfile.file.metadata.IDeviceID; public class PartitionLastFlushTime implements ILastFlushTime { @@ -43,4 +45,9 @@ public void updateLastFlushTime(IDeviceID device, long time) { public ILastFlushTime degradeLastFlushTime() { return this; } + + @Override + public void accept(SchemaEvolution schemaEvolution) { + // no-op + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/performer/impl/FastCompactionPerformer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/performer/impl/FastCompactionPerformer.java index 54b21ddd382fe..1390fa2bdcfb3 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/performer/impl/FastCompactionPerformer.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/performer/impl/FastCompactionPerformer.java @@ -48,6 +48,7 @@ import org.apache.iotdb.db.storageengine.dataregion.compaction.selector.estimator.FastCrossSpaceCompactionEstimator; import org.apache.iotdb.db.storageengine.dataregion.modification.ModEntry; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; import org.apache.iotdb.db.utils.datastructure.PatternTreeMapFactory; import org.apache.tsfile.common.conf.TSFileDescriptor; @@ -75,6 +76,8 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; +import java.util.stream.Collectors; +import java.util.stream.Stream; public class FastCompactionPerformer implements ICrossCompactionPerformer, ISeqCompactionPerformer, IUnseqCompactionPerformer { @@ -103,6 +106,7 @@ public class FastCompactionPerformer private final boolean isCrossCompaction; private EncryptParameter encryptParameter; + private final Pair maxTsFileSetEndVersionAndMinResource; @TestOnly public FastCompactionPerformer( @@ -122,6 +126,7 @@ public FastCompactionPerformer( new EncryptParameter( TSFileDescriptor.getInstance().getConfig().getEncryptType(), TSFileDescriptor.getInstance().getConfig().getEncryptKey()); + this.maxTsFileSetEndVersionAndMinResource = new Pair<>(Long.MIN_VALUE, null); } public FastCompactionPerformer( @@ -139,6 +144,9 @@ public FastCompactionPerformer( isCrossCompaction = true; } this.encryptParameter = encryptParameter; + this.maxTsFileSetEndVersionAndMinResource = + TsFileResource.getMaxTsFileSetEndVersionAndMinResource( + Stream.concat(seqFiles.stream(), unseqFiles.stream()).collect(Collectors.toList())); } @TestOnly @@ -148,27 +156,45 @@ public FastCompactionPerformer(boolean isCrossCompaction) { new EncryptParameter( TSFileDescriptor.getInstance().getConfig().getEncryptType(), TSFileDescriptor.getInstance().getConfig().getEncryptKey()); + this.maxTsFileSetEndVersionAndMinResource = new Pair<>(Long.MIN_VALUE, null); } public FastCompactionPerformer(boolean isCrossCompaction, EncryptParameter encryptParameter) { this.isCrossCompaction = isCrossCompaction; this.encryptParameter = encryptParameter; + this.maxTsFileSetEndVersionAndMinResource = new Pair<>(Long.MIN_VALUE, null); } @Override public void perform() throws Exception { this.subTaskSummary.setTemporalFileNum(targetFiles.size()); + List allSourceFiles = + Stream.concat(seqFiles.stream(), unseqFiles.stream()) + .sorted(TsFileResource::compareFileName) + .collect(Collectors.toList()); + Pair maxTsFileSetEndVersionAndMinResource = + TsFileResource.getMaxTsFileSetEndVersionAndMinResource(allSourceFiles); + try (MultiTsFileDeviceIterator deviceIterator = new MultiTsFileDeviceIterator(seqFiles, unseqFiles, readerCacheMap); AbstractCompactionWriter compactionWriter = isCrossCompaction ? new FastCrossCompactionWriter( - targetFiles, seqFiles, readerCacheMap, encryptParameter) + targetFiles, + seqFiles, + readerCacheMap, + encryptParameter, + maxTsFileSetEndVersionAndMinResource.left) : new FastInnerCompactionWriter(targetFiles, encryptParameter)) { List schemas = CompactionTableSchemaCollector.collectSchema( - seqFiles, unseqFiles, readerCacheMap, deviceIterator.getDeprecatedTableSchemaMap()); - compactionWriter.setSchemaForAllTargetFile(schemas); + seqFiles, + unseqFiles, + readerCacheMap, + deviceIterator.getDeprecatedTableSchemaMap(), + maxTsFileSetEndVersionAndMinResource); + + compactionWriter.setSchemaForAllTargetFile(schemas, maxTsFileSetEndVersionAndMinResource); readModification(seqFiles); readModification(unseqFiles); while (deviceIterator.hasNextDevice()) { @@ -184,10 +210,23 @@ public void perform() throws Exception { sortedSourceFiles.addAll(unseqFiles); boolean isTreeModel = !isAligned || device.getTableName().startsWith("root."); long ttl = deviceIterator.getTTLForCurrentDevice(); - sortedSourceFiles.removeIf(x -> x.definitelyNotContains(device)); + sortedSourceFiles.removeIf( + x -> { + EvolvedSchema evolvedSchema = + x.getMergedEvolvedSchema(maxTsFileSetEndVersionAndMinResource.left); + IDeviceID originalDevice = device; + if (evolvedSchema != null) { + originalDevice = evolvedSchema.rewriteToOriginal(device); + } + return x.definitelyNotContains(originalDevice); + }); // checked above - //noinspection OptionalGetWithoutIsPresent - sortedSourceFiles.sort(Comparator.comparingLong(x -> x.getStartTime(device).get())); + sortedSourceFiles.sort( + Comparator.comparingLong( + x -> { + //noinspection OptionalGetWithoutIsPresent + return x.getStartTime(device, maxTsFileSetEndVersionAndMinResource.left).get(); + })); ModEntry ttlDeletion = null; if (ttl != Long.MAX_VALUE) { ttlDeletion = @@ -273,7 +312,8 @@ private void compactAlignedSeries( measurementSchemas, deviceId, taskSummary, - ignoreAllNullRows) + ignoreAllNullRows, + maxTsFileSetEndVersionAndMinResource) .call(); subTaskSummary.increase(taskSummary); } @@ -333,7 +373,8 @@ private void compactNonAlignedSeries( measurementsForEachSubTask[i], deviceID, taskSummary, - i))); + i, + maxTsFileSetEndVersionAndMinResource))); taskSummaryList.add(taskSummary); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/performer/impl/ReadChunkCompactionPerformer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/performer/impl/ReadChunkCompactionPerformer.java index d406286e37f64..7d3cb5fa80151 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/performer/impl/ReadChunkCompactionPerformer.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/performer/impl/ReadChunkCompactionPerformer.java @@ -27,6 +27,7 @@ import org.apache.iotdb.db.exception.StorageEngineException; import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.performer.ISeqCompactionPerformer; import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.task.CompactionTaskSummary; +import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.utils.CompactionTableSchema; import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.utils.CompactionTableSchemaCollector; import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.utils.MultiTsFileDeviceIterator; import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.utils.executor.batch.BatchedReadChunkAlignedSeriesCompactionExecutor; @@ -36,6 +37,7 @@ import org.apache.iotdb.db.storageengine.dataregion.compaction.selector.estimator.AbstractInnerSpaceEstimator; import org.apache.iotdb.db.storageengine.dataregion.compaction.selector.estimator.ReadChunkInnerCompactionEstimator; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; import org.apache.iotdb.db.storageengine.rescon.memory.SystemInfo; import org.apache.iotdb.db.utils.EncryptDBUtils; @@ -71,7 +73,8 @@ public class ReadChunkCompactionPerformer implements ISeqCompactionPerformer { * IoTDBDescriptor.getInstance().getConfig().getChunkMetadataSizeProportion()); private Schema schema = null; - private EncryptParameter firstEncryptParameter; + private final EncryptParameter firstEncryptParameter; + protected Pair maxTsFileSetEndVersionAndMinResource; @TestOnly public ReadChunkCompactionPerformer(List sourceFiles, TsFileResource targetFile) { @@ -91,6 +94,7 @@ public ReadChunkCompactionPerformer( setSourceFiles(sourceFiles); setTargetFiles(targetFiles); this.firstEncryptParameter = EncryptDBUtils.getDefaultFirstEncryptParam(); + this.maxTsFileSetEndVersionAndMinResource = new Pair<>(Long.MIN_VALUE, null); } public ReadChunkCompactionPerformer( @@ -100,18 +104,23 @@ public ReadChunkCompactionPerformer( setSourceFiles(sourceFiles); setTargetFiles(targetFiles); this.firstEncryptParameter = encryptParameter; + this.maxTsFileSetEndVersionAndMinResource = + TsFileResource.getMaxTsFileSetEndVersionAndMinResource(sourceFiles); } @TestOnly public ReadChunkCompactionPerformer(List sourceFiles) { setSourceFiles(sourceFiles); this.firstEncryptParameter = EncryptDBUtils.getDefaultFirstEncryptParam(); + this.maxTsFileSetEndVersionAndMinResource = new Pair<>(Long.MIN_VALUE, null); } public ReadChunkCompactionPerformer( List sourceFiles, EncryptParameter encryptParameter) { setSourceFiles(sourceFiles); this.firstEncryptParameter = encryptParameter; + this.maxTsFileSetEndVersionAndMinResource = + TsFileResource.getMaxTsFileSetEndVersionAndMinResource(sourceFiles); } @TestOnly @@ -120,6 +129,7 @@ public ReadChunkCompactionPerformer() { new EncryptParameter( TSFileDescriptor.getInstance().getConfig().getEncryptType(), TSFileDescriptor.getInstance().getConfig().getEncryptKey()); + this.maxTsFileSetEndVersionAndMinResource = new Pair<>(Long.MIN_VALUE, null); } public ReadChunkCompactionPerformer(EncryptParameter encryptParameter) { @@ -138,7 +148,8 @@ public void perform() CompactionTableSchemaCollector.collectSchema( seqFiles, deviceIterator.getReaderMap(), - deviceIterator.getDeprecatedTableSchemaMap()); + deviceIterator.getDeprecatedTableSchemaMap(), + maxTsFileSetEndVersionAndMinResource); while (deviceIterator.hasNextDevice()) { currentWriter = getAvailableCompactionWriter(); Pair deviceInfo = deviceIterator.nextDevice(); @@ -204,13 +215,26 @@ private void rollCompactionFileWriter() throws IOException { } private void useNewWriter() throws IOException { + TsFileResource tsFileResource = targetResources.get(currentTargetFileIndex); currentWriter = new CompactionTsFileWriter( - targetResources.get(currentTargetFileIndex).getTsFile(), + tsFileResource, memoryBudgetForFileWriter, CompactionType.INNER_SEQ_COMPACTION, - firstEncryptParameter); - currentWriter.setSchema(CompactionTableSchemaCollector.copySchema(schema)); + firstEncryptParameter, + maxTsFileSetEndVersionAndMinResource.getLeft()); + + Schema schema = CompactionTableSchemaCollector.copySchema(this.schema); + TsFileResource minVersionResource = maxTsFileSetEndVersionAndMinResource.getRight(); + // only null during test + tsFileResource.setTsFileManager( + minVersionResource != null ? minVersionResource.getTsFileManager() : null); + EvolvedSchema evolvedSchema = + tsFileResource.getMergedEvolvedSchema(maxTsFileSetEndVersionAndMinResource.getLeft()); + currentWriter.setSchema( + evolvedSchema != null + ? evolvedSchema.rewriteToOriginal(schema, CompactionTableSchema::new) + : schema); } @Override @@ -248,8 +272,10 @@ private void compactAlignedSeries( compactionExecutor.execute(); for (ChunkMetadata chunkMetadata : writer.getChunkMetadataListOfCurrentDeviceInMemory()) { if (chunkMetadata.getMeasurementUid().isEmpty()) { - targetResource.updateStartTime(device, chunkMetadata.getStartTime()); - targetResource.updateEndTime(device, chunkMetadata.getEndTime()); + targetResource.updateStartTime( + writer.getCurrentOriginalDeviceId(), chunkMetadata.getStartTime()); + targetResource.updateEndTime( + writer.getCurrentOriginalDeviceId(), chunkMetadata.getEndTime()); } } writer.checkMetadataSizeAndMayFlush(); @@ -352,6 +378,8 @@ private void compactNotAlignedSeries( @Override public void setSourceFiles(List seqFiles) { this.seqFiles = seqFiles; + this.maxTsFileSetEndVersionAndMinResource = + TsFileResource.getMaxTsFileSetEndVersionAndMinResource(seqFiles); } @Override diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/performer/impl/ReadPointCompactionPerformer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/performer/impl/ReadPointCompactionPerformer.java index c58870357d915..00cbfddfd7926 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/performer/impl/ReadPointCompactionPerformer.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/performer/impl/ReadPointCompactionPerformer.java @@ -19,7 +19,6 @@ package org.apache.iotdb.db.storageengine.dataregion.compaction.execute.performer.impl; import org.apache.iotdb.commons.conf.IoTDBConstant; -import org.apache.iotdb.commons.exception.MetadataException; import org.apache.iotdb.commons.path.AlignedFullPath; import org.apache.iotdb.commons.path.IFullPath; import org.apache.iotdb.commons.path.NonAlignedFullPath; @@ -71,6 +70,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.stream.Collectors; +import java.util.stream.Stream; public class ReadPointCompactionPerformer implements ICrossCompactionPerformer, IUnseqCompactionPerformer { @@ -153,26 +153,45 @@ public void perform() throws Exception { // Do not close device iterator, because tsfile reader is managed by FileReaderManager. MultiTsFileDeviceIterator deviceIterator = new MultiTsFileDeviceIterator(seqFiles, unseqFiles); + List allSourceFiles = + Stream.concat(seqFiles.stream(), unseqFiles.stream()) + .sorted(TsFileResource::compareFileName) + .collect(Collectors.toList()); + Pair maxTsFileSetEndVersionAndMinResource = + TsFileResource.getMaxTsFileSetEndVersionAndMinResource(allSourceFiles); + List schemas = CompactionTableSchemaCollector.collectSchema( seqFiles, unseqFiles, deviceIterator.getReaderMap(), - deviceIterator.getDeprecatedTableSchemaMap()); - compactionWriter.setSchemaForAllTargetFile(schemas); + deviceIterator.getDeprecatedTableSchemaMap(), + maxTsFileSetEndVersionAndMinResource); + + compactionWriter.setSchemaForAllTargetFile(schemas, maxTsFileSetEndVersionAndMinResource); while (deviceIterator.hasNextDevice()) { checkThreadInterrupted(); Pair deviceInfo = deviceIterator.nextDevice(); IDeviceID device = deviceInfo.left; boolean isAligned = deviceInfo.right; - queryDataSource.fillOrderIndexes(device, true); + queryDataSource.fillOrderIndexes(device, true, maxTsFileSetEndVersionAndMinResource.left); if (isAligned) { compactAlignedSeries( - device, deviceIterator, compactionWriter, fragmentInstanceContext, queryDataSource); + device, + deviceIterator, + compactionWriter, + fragmentInstanceContext, + queryDataSource, + maxTsFileSetEndVersionAndMinResource); } else { compactNonAlignedSeries( - device, deviceIterator, compactionWriter, fragmentInstanceContext, queryDataSource); + device, + deviceIterator, + compactionWriter, + fragmentInstanceContext, + queryDataSource, + maxTsFileSetEndVersionAndMinResource); } summary.setTemporaryFileSize(compactionWriter.getWriterSize()); } @@ -208,9 +227,11 @@ private void compactAlignedSeries( MultiTsFileDeviceIterator deviceIterator, AbstractCompactionWriter compactionWriter, FragmentInstanceContext fragmentInstanceContext, - QueryDataSource queryDataSource) - throws IOException, MetadataException { - Map schemaMap = deviceIterator.getAllSchemasOfCurrentDevice(); + QueryDataSource queryDataSource, + Pair maxTsFileSetEndVersionAndMinResource) + throws IOException { + Map schemaMap = + deviceIterator.getAllSchemasOfCurrentDevice(maxTsFileSetEndVersionAndMinResource); IMeasurementSchema timeSchema = schemaMap.remove(TsFileConstant.TIME_COLUMN_ID); List measurementSchemas = new ArrayList<>(schemaMap.values()); if (measurementSchemas.isEmpty()) { @@ -230,16 +251,15 @@ private void compactAlignedSeries( new ArrayList<>(schemaMap.keySet()), fragmentInstanceContext, queryDataSource, - true); + true, + maxTsFileSetEndVersionAndMinResource.left); if (dataBlockReader.hasNextBatch()) { - // chunkgroup is serialized only when at least one timeseries under this device has data compactionWriter.startChunkGroup(device, true); - measurementSchemas.add(0, timeSchema); compactionWriter.startMeasurement( TsFileConstant.TIME_COLUMN_ID, new AlignedChunkWriterImpl( - measurementSchemas.remove(0), + timeSchema, measurementSchemas, EncryptUtils.getEncryptParameter(getEncryptParameter())), 0); @@ -256,9 +276,11 @@ private void compactNonAlignedSeries( MultiTsFileDeviceIterator deviceIterator, AbstractCompactionWriter compactionWriter, FragmentInstanceContext fragmentInstanceContext, - QueryDataSource queryDataSource) + QueryDataSource queryDataSource, + Pair maxTsFileSetEndVersionAndMinResource) throws IOException, InterruptedException, ExecutionException { - Map schemaMap = deviceIterator.getAllSchemasOfCurrentDevice(); + Map schemaMap = + deviceIterator.getAllSchemasOfCurrentDevice(maxTsFileSetEndVersionAndMinResource); List allMeasurements = new ArrayList<>(schemaMap.keySet()); allMeasurements.sort((String::compareTo)); int subTaskNums = Math.min(allMeasurements.size(), SUB_TASK_NUM); @@ -287,7 +309,8 @@ private void compactNonAlignedSeries( new QueryDataSource(queryDataSource), compactionWriter, schemaMap, - i))); + i, + maxTsFileSetEndVersionAndMinResource.left))); } for (Future future : futures) { future.get(); @@ -311,7 +334,8 @@ public static IDataBlockReader constructReader( List allSensors, FragmentInstanceContext fragmentInstanceContext, QueryDataSource queryDataSource, - boolean isAlign) { + boolean isAlign, + long maxTsFileSetEndVersion) { IFullPath seriesPath; if (isAlign) { seriesPath = new AlignedFullPath(deviceId, measurementIds, measurementSchemas); @@ -320,7 +344,12 @@ public static IDataBlockReader constructReader( } return new SeriesDataBlockReader( - seriesPath, new HashSet<>(allSensors), fragmentInstanceContext, queryDataSource, true); + seriesPath, + new HashSet<>(allSensors), + fragmentInstanceContext, + queryDataSource, + true, + maxTsFileSetEndVersion); } @SuppressWarnings("squid:S1172") @@ -351,8 +380,16 @@ protected AbstractCompactionWriter getCompactionWriter( throws IOException { if (!seqFileResources.isEmpty() && !unseqFileResources.isEmpty()) { // cross space + List allSourceFiles = + Stream.concat(seqFileResources.stream(), unseqFileResources.stream()) + .collect(Collectors.toList()); + Pair maxTsFileSetEndVersionAndMinResource = + TsFileResource.getMaxTsFileSetEndVersionAndMinResource(allSourceFiles); return new ReadPointCrossCompactionWriter( - targetFileResources, seqFileResources, encryptParameter); + targetFileResources, + seqFileResources, + encryptParameter, + maxTsFileSetEndVersionAndMinResource.left); } else { // inner space return new ReadPointInnerCompactionWriter(targetFileResources, encryptParameter); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/InnerSpaceCompactionTask.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/InnerSpaceCompactionTask.java index c61d2275ac1af..fb431ad874f2c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/InnerSpaceCompactionTask.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/InnerSpaceCompactionTask.java @@ -332,19 +332,55 @@ protected void calculateSourceFilesAndTargetFiles() // problem of the same file name. calculateRenamedTargetFiles(needToAdjustSourceFilesPosition); + // If there is a schema evolution and not all files have the same schema evolution + // we should compact all fils to the last position so that the target will have the least + // schema evolution + boolean hasDifferentSchemaEvolution = + filesView.sortedAllSourceFilesInTask.get(0).getMergedEvolvedSchema() != null + && filesView + .sortedAllSourceFilesInTask + .get(0) + .getTsFileSets() + .equals( + filesView + .sourceFilesInCompactionPerformer + .get(filesView.sortedAllSourceFilesInTask.size() - 1) + .getTsFileSets()); if (needToAdjustSourceFilesPosition) { - filesView.targetFilesInPerformer = - TsFileNameGenerator.getNewInnerCompactionTargetFileResources( - filesView.sortedAllSourceFilesInTask.subList( - filesView.renamedTargetFiles.size(), - Math.min( - filesView.renamedTargetFiles.size() + requiredPositionNum, - filesView.sortedAllSourceFilesInTask.size())), - filesView.sequence); + if (!hasDifferentSchemaEvolution) { + filesView.targetFilesInPerformer = + TsFileNameGenerator.getNewInnerCompactionTargetFileResources( + filesView.sortedAllSourceFilesInTask.subList( + filesView.renamedTargetFiles.size(), + Math.min( + filesView.renamedTargetFiles.size() + requiredPositionNum, + filesView.sortedAllSourceFilesInTask.size())), + filesView.sequence); + } else { + filesView.targetFilesInPerformer = + Collections.singletonList( + TsFileNameGenerator.getInnerCompactionTargetFileResource( + availablePositionForTargetFiles.subList( + availablePositionForTargetFiles.size() - 1, + availablePositionForTargetFiles.size()), + filesView.sequence)); + } + } else { - filesView.targetFilesInPerformer = - TsFileNameGenerator.getNewInnerCompactionTargetFileResources( - availablePositionForTargetFiles.subList(0, requiredPositionNum), filesView.sequence); + if (!hasDifferentSchemaEvolution) { + filesView.targetFilesInPerformer = + TsFileNameGenerator.getNewInnerCompactionTargetFileResources( + availablePositionForTargetFiles.subList(0, requiredPositionNum), + filesView.sequence); + } else { + filesView.targetFilesInPerformer = + Collections.singletonList( + TsFileNameGenerator.getInnerCompactionTargetFileResource( + availablePositionForTargetFiles.subList( + availablePositionForTargetFiles.size() - 1, + availablePositionForTargetFiles.size()), + filesView.sequence)); + } } filesView.targetFilesInLog = new ArrayList<>( @@ -378,6 +414,7 @@ private void calculateRenamedTargetFiles(boolean needAdjustSourceFilePosition) new File(skippedSourceFile.getParentFile().getPath() + File.separator + newFileName), TsFileResourceStatus.COMPACTING); filesView.renamedTargetFiles.add(renamedTargetFile); + renamedTargetFile.setTsFileManager(tsFileManager); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/RepairUnsortedFileCompactionTask.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/RepairUnsortedFileCompactionTask.java index fe1957975cf63..d027957c6e656 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/RepairUnsortedFileCompactionTask.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/RepairUnsortedFileCompactionTask.java @@ -121,9 +121,10 @@ protected void prepare() throws IOException { @Override protected void calculateSourceFilesAndTargetFiles() throws IOException { filesView.sourceFilesInLog = filesView.sourceFilesInCompactionPerformer; - filesView.targetFilesInLog = - Collections.singletonList( - new TsFileResource(generateTargetFile(), TsFileResourceStatus.COMPACTING)); + TsFileResource targetResource = + new TsFileResource(generateTargetFile(), TsFileResourceStatus.COMPACTING); + targetResource.setTsFileManager(tsFileManager); + filesView.targetFilesInLog = Collections.singletonList(targetResource); filesView.targetFilesInPerformer = filesView.targetFilesInLog; } @@ -137,7 +138,7 @@ private File generateTargetFile() throws IOException { sourceFile.isSeq() ? lastAllocatedFileTimestamp.incrementAndGet() : sourceFileName.getTime(), - sourceFile.isSeq() ? 0 : sourceFileName.getVersion(), + sourceFileName.getVersion(), sourceFileName.getInnerCompactionCnt() + 1, sourceFileName.getCrossCompactionCnt()); // if source file is sequence, the sequence data targetFileDir should be replaced to unsequence diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/subtask/FastCompactionPerformerSubTask.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/subtask/FastCompactionPerformerSubTask.java index 873993d97df4c..c8bd778c0ffc7 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/subtask/FastCompactionPerformerSubTask.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/subtask/FastCompactionPerformerSubTask.java @@ -31,6 +31,7 @@ import org.apache.iotdb.db.storageengine.dataregion.modification.ModEntry; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; import org.apache.iotdb.db.utils.datastructure.PatternTreeMapFactory; +import org.apache.iotdb.db.utils.datastructure.PatternTreeMapFactory.ModsSerializer; import org.apache.tsfile.exception.write.PageException; import org.apache.tsfile.file.metadata.IDeviceID; @@ -76,6 +77,8 @@ public class FastCompactionPerformerSubTask implements Callable { private List measurementSchemas; + private final Pair maxTsFileSetEndVersionAndMinResource; + private Map compactionSeriesContextMap; /** Used for nonAligned timeseries. */ @@ -90,7 +93,8 @@ public FastCompactionPerformerSubTask( List measurements, IDeviceID deviceId, FastCompactionTaskSummary summary, - int subTaskId) { + int subTaskId, + Pair maxTsFileSetEndVersionAndMinResource) { this.compactionWriter = compactionWriter; this.subTaskId = subTaskId; this.timeseriesMetadataOffsetMap = timeseriesMetadataOffsetMap; @@ -102,6 +106,7 @@ public FastCompactionPerformerSubTask( this.measurements = measurements; this.summary = summary; this.ignoreAllNullRows = true; + this.maxTsFileSetEndVersionAndMinResource = maxTsFileSetEndVersionAndMinResource; } public FastCompactionPerformerSubTask( @@ -114,7 +119,8 @@ public FastCompactionPerformerSubTask( List measurements, IDeviceID deviceId, FastCompactionTaskSummary summary, - int subTaskId) { + int subTaskId, + Pair maxTsFileSetEndVersionAndMinResource) { this.compactionWriter = compactionWriter; this.subTaskId = subTaskId; this.compactionSeriesContextMap = compactionSeriesContextMap; @@ -127,6 +133,7 @@ public FastCompactionPerformerSubTask( this.measurements = measurements; this.summary = summary; this.ignoreAllNullRows = true; + this.maxTsFileSetEndVersionAndMinResource = maxTsFileSetEndVersionAndMinResource; } /** Used for aligned timeseries. */ @@ -134,13 +141,13 @@ public FastCompactionPerformerSubTask( AbstractCompactionWriter compactionWriter, Map>> timeseriesMetadataOffsetMap, Map readerCacheMap, - Map> - modificationCacheMap, + Map> modificationCacheMap, List sortedSourceFiles, List measurementSchemas, IDeviceID deviceId, FastCompactionTaskSummary summary, - boolean ignoreAllNullRows) { + boolean ignoreAllNullRows, + Pair maxTsFileSetEndVersionAndMinResource) { this.compactionWriter = compactionWriter; this.subTaskId = 0; this.timeseriesMetadataOffsetMap = timeseriesMetadataOffsetMap; @@ -152,6 +159,7 @@ public FastCompactionPerformerSubTask( this.measurementSchemas = measurementSchemas; this.summary = summary; this.ignoreAllNullRows = ignoreAllNullRows; + this.maxTsFileSetEndVersionAndMinResource = maxTsFileSetEndVersionAndMinResource; } @Override @@ -166,7 +174,8 @@ public Void call() sortedSourceFiles, deviceId, subTaskId, - summary); + summary, + maxTsFileSetEndVersionAndMinResource); for (String measurement : measurements) { seriesCompactionExecutor.setNewMeasurement( compactionSeriesContextMap.get(measurement).getFileTimeseriesMetdataOffsetMap()); @@ -191,7 +200,8 @@ public Void call() subTaskId, measurementSchemas, summary, - ignoreAllNullRows); + ignoreAllNullRows, + maxTsFileSetEndVersionAndMinResource); } else { seriesCompactionExecutor = new FastAlignedSeriesCompactionExecutor( @@ -204,7 +214,8 @@ public Void call() subTaskId, measurementSchemas, summary, - ignoreAllNullRows); + ignoreAllNullRows, + maxTsFileSetEndVersionAndMinResource); } seriesCompactionExecutor.execute(); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/subtask/ReadPointPerformerSubTask.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/subtask/ReadPointPerformerSubTask.java index 74f7259074508..741a6f314cd92 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/subtask/ReadPointPerformerSubTask.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/subtask/ReadPointPerformerSubTask.java @@ -57,6 +57,7 @@ public class ReadPointPerformerSubTask implements Callable { private final AbstractCompactionWriter compactionWriter; private final Map schemaMap; private final int taskId; + private final long maxTsFileSetEndVersion; public ReadPointPerformerSubTask( IDeviceID device, @@ -65,7 +66,8 @@ public ReadPointPerformerSubTask( QueryDataSource queryDataSource, AbstractCompactionWriter compactionWriter, Map schemaMap, - int taskId) { + int taskId, + long maxTsFileSetEndVersion) { this.device = device; this.measurementList = measurementList; this.fragmentInstanceContext = fragmentInstanceContext; @@ -73,6 +75,7 @@ public ReadPointPerformerSubTask( this.compactionWriter = compactionWriter; this.schemaMap = schemaMap; this.taskId = taskId; + this.maxTsFileSetEndVersion = maxTsFileSetEndVersion; } @Override @@ -88,7 +91,8 @@ public Void call() throws Exception { new ArrayList<>(schemaMap.keySet()), fragmentInstanceContext, queryDataSource, - false); + false, + maxTsFileSetEndVersion); if (dataBlockReader.hasNextBatch()) { compactionWriter.startMeasurement( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionTableSchema.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionTableSchema.java index 3f6e83cbe96c0..8d43305d94e19 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionTableSchema.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionTableSchema.java @@ -33,6 +33,18 @@ public CompactionTableSchema(String tableName) { super(tableName); } + public CompactionTableSchema(TableSchema tableSchema) { + this(tableSchema.getTableName(), tableSchema.getColumnSchemas(), tableSchema.getColumnTypes()); + this.updatable = tableSchema.isUpdatable(); + } + + public CompactionTableSchema( + String tableName, + List columnSchemas, + List columnCategories) { + super(tableName, columnSchemas, columnCategories); + } + public boolean merge(TableSchema tableSchema) { if (tableSchema == null) { return true; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionTableSchemaCollector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionTableSchemaCollector.java index 55640c3fcfa4d..2c2f34e0fca1f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionTableSchemaCollector.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionTableSchemaCollector.java @@ -20,9 +20,11 @@ package org.apache.iotdb.db.storageengine.dataregion.compaction.execute.utils; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; import org.apache.tsfile.file.metadata.TableSchema; import org.apache.tsfile.read.TsFileSequenceReader; +import org.apache.tsfile.utils.Pair; import org.apache.tsfile.write.schema.Schema; import java.io.IOException; @@ -42,7 +44,8 @@ public static List collectSchema( List seqFiles, List unseqFiles, Map readerMap, - Map> deprecatedTableSchemaMap) + Map> deprecatedTableSchemaMap, + Pair maxTsFileSetEndVersionAndAssociatedResource) throws IOException { List targetSchemas = new ArrayList<>(seqFiles.size()); Schema schema = @@ -51,7 +54,8 @@ public static List collectSchema( .sorted(TsFileResource::compareFileName) .collect(Collectors.toList()), readerMap, - deprecatedTableSchemaMap); + deprecatedTableSchemaMap, + maxTsFileSetEndVersionAndAssociatedResource); targetSchemas.add(schema); for (int i = 1; i < seqFiles.size(); i++) { @@ -72,10 +76,12 @@ public static Schema copySchema(Schema source) { public static Schema collectSchema( List sourceFiles, Map readerMap, - Map> deprecatedTableSchemaMap) + Map> deprecatedTableSchemaMap, + Pair maxTsFileSetEndVersionAndAssociatedResource) throws IOException { Schema targetSchema = new Schema(); Map targetTableSchemaMap = new HashMap<>(); + for (int i = 0; i < sourceFiles.size(); i++) { TsFileResource resource = sourceFiles.get(i); TsFileSequenceReader reader = readerMap.get(resource); @@ -84,12 +90,21 @@ public static Schema collectSchema( // v3 tsfile continue; } + + EvolvedSchema evolvedSchema = + resource.getMergedEvolvedSchema(maxTsFileSetEndVersionAndAssociatedResource.getLeft()); + for (Map.Entry entry : tableSchemaMap.entrySet()) { String tableName = entry.getKey(); TableSchema currentTableSchema = entry.getValue(); if (isTreeModel(currentTableSchema)) { continue; } + if (evolvedSchema != null) { + currentTableSchema = evolvedSchema.rewriteToFinal(currentTableSchema); + tableName = currentTableSchema.getTableName(); + } + // merge all id columns, measurement schema will be generated automatically when end chunk // group CompactionTableSchema collectedTableSchema = diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionUtils.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionUtils.java index 61943047e541f..41ffd58198244 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionUtils.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionUtils.java @@ -34,13 +34,14 @@ import org.apache.iotdb.db.service.metrics.CompactionMetrics; import org.apache.iotdb.db.service.metrics.FileMetrics; import org.apache.iotdb.db.storageengine.dataregion.compaction.constant.CompactionTaskType; +import org.apache.iotdb.db.storageengine.dataregion.compaction.io.CompactionTsFileWriter; import org.apache.iotdb.db.storageengine.dataregion.compaction.schedule.CompactionTaskManager; import org.apache.iotdb.db.storageengine.dataregion.modification.DeletionPredicate; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.FullExactMatch; import org.apache.iotdb.db.storageengine.dataregion.modification.ModEntry; import org.apache.iotdb.db.storageengine.dataregion.modification.ModFileManagement; import org.apache.iotdb.db.storageengine.dataregion.modification.ModificationFile; import org.apache.iotdb.db.storageengine.dataregion.modification.TableDeletionEntry; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.FullExactMatch; import org.apache.iotdb.db.storageengine.dataregion.modification.TreeDeletionEntry; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; import org.apache.iotdb.db.storageengine.dataregion.tsfile.timeindex.ArrayDeviceTimeIndex; @@ -56,7 +57,6 @@ import org.apache.tsfile.file.metadata.IDeviceID; import org.apache.tsfile.fileSystem.FSFactoryProducer; import org.apache.tsfile.read.common.TimeRange; -import org.apache.tsfile.write.writer.TsFileIOWriter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -324,9 +324,10 @@ public static void deleteModificationForSourceFile( } public static void updateResource( - TsFileResource resource, TsFileIOWriter tsFileIoWriter, IDeviceID deviceId) { + TsFileResource resource, CompactionTsFileWriter tsFileIoWriter) { List chunkMetadatasOfCurrentDevice = tsFileIoWriter.getChunkMetadataListOfCurrentDeviceInMemory(); + IDeviceID deviceId = tsFileIoWriter.getCurrentOriginalDeviceId(); if (chunkMetadatasOfCurrentDevice != null) { // this target file contains current device for (ChunkMetadata chunkMetadata : chunkMetadatasOfCurrentDevice) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/MultiTsFileDeviceIterator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/MultiTsFileDeviceIterator.java index 9099586e87e5c..ab50d6740fa91 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/MultiTsFileDeviceIterator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/MultiTsFileDeviceIterator.java @@ -33,6 +33,8 @@ import org.apache.iotdb.db.storageengine.dataregion.modification.TreeDeletionEntry; import org.apache.iotdb.db.storageengine.dataregion.read.control.FileReaderManager; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.fileset.TsFileSet; import org.apache.iotdb.db.utils.EncryptDBUtils; import org.apache.iotdb.db.utils.ModificationUtils; import org.apache.iotdb.db.utils.datastructure.PatternTreeMapFactory; @@ -81,6 +83,7 @@ public class MultiTsFileDeviceIterator implements AutoCloseable { private long ttlForCurrentDevice; private long timeLowerBoundForCurrentDevice; private final String databaseName; + private final long maxTsFileSetEndVersion; /** * Used for compaction with read chunk performer. @@ -96,6 +99,18 @@ public MultiTsFileDeviceIterator(List tsFileResources) throws IO // sort the files from the newest to the oldest Collections.sort( this.tsFileResourcesSortedByDesc, TsFileResource::compareFileCreationOrderByDesc); + maxTsFileSetEndVersion = + this.tsFileResourcesSortedByDesc.stream() + .mapToLong( + // max endVersion of all filesets of a TsFile + resource -> + resource.getTsFileSets().stream() + .mapToLong(TsFileSet::getEndVersion) + .max() + .orElse(Long.MAX_VALUE)) + // overall max endVersion + .max() + .orElse(Long.MAX_VALUE); try { for (TsFileResource tsFileResource : this.tsFileResourcesSortedByDesc) { CompactionTsFileReader reader = @@ -104,7 +119,15 @@ public MultiTsFileDeviceIterator(List tsFileResources) throws IO CompactionType.INNER_SEQ_COMPACTION, EncryptDBUtils.getFirstEncryptParamFromTSFilePath(tsFileResource.getTsFilePath())); readerMap.put(tsFileResource, reader); - deviceIteratorMap.put(tsFileResource, reader.getAllDevicesIteratorWithIsAligned()); + TsFileDeviceIterator tsFileDeviceIterator; + EvolvedSchema evolvedSchema = tsFileResource.getMergedEvolvedSchema(maxTsFileSetEndVersion); + if (evolvedSchema != null) { + tsFileDeviceIterator = + new ReorderedTsFileDeviceIterator(reader, evolvedSchema::rewriteToFinal); + } else { + tsFileDeviceIterator = reader.getAllDevicesIteratorWithIsAligned(); + } + deviceIteratorMap.put(tsFileResource, tsFileDeviceIterator); } } catch (Exception e) { // if there is any exception occurs @@ -129,12 +152,35 @@ public MultiTsFileDeviceIterator( // sort the files from the newest to the oldest Collections.sort( this.tsFileResourcesSortedByDesc, TsFileResource::compareFileCreationOrderByDesc); + + maxTsFileSetEndVersion = + this.tsFileResourcesSortedByDesc.stream() + .mapToLong( + // max endVersion of all filesets of a TsFile + resource -> + resource.getTsFileSets().stream() + .mapToLong(TsFileSet::getEndVersion) + .max() + .orElse(Long.MAX_VALUE)) + // overall max endVersion + .max() + .orElse(Long.MAX_VALUE); + for (TsFileResource tsFileResource : tsFileResourcesSortedByDesc) { TsFileSequenceReader reader = FileReaderManager.getInstance() .get(tsFileResource.getTsFilePath(), tsFileResource.getTsFileID(), true); readerMap.put(tsFileResource, reader); - deviceIteratorMap.put(tsFileResource, reader.getAllDevicesIteratorWithIsAligned()); + + TsFileDeviceIterator tsFileDeviceIterator; + EvolvedSchema evolvedSchema = tsFileResource.getMergedEvolvedSchema(maxTsFileSetEndVersion); + if (evolvedSchema != null) { + tsFileDeviceIterator = + new ReorderedTsFileDeviceIterator(reader, evolvedSchema::rewriteToFinal); + } else { + tsFileDeviceIterator = reader.getAllDevicesIteratorWithIsAligned(); + } + deviceIteratorMap.put(tsFileResource, tsFileDeviceIterator); } } @@ -156,6 +202,19 @@ public MultiTsFileDeviceIterator( this.tsFileResourcesSortedByDesc, TsFileResource::compareFileCreationOrderByDesc); this.readerMap = readerMap; + maxTsFileSetEndVersion = + this.tsFileResourcesSortedByDesc.stream() + .mapToLong( + // max endVersion of all filesets of a TsFile + resource -> + resource.getTsFileSets().stream() + .mapToLong(TsFileSet::getEndVersion) + .max() + .orElse(Long.MAX_VALUE)) + // overall max endVersion + .max() + .orElse(Long.MAX_VALUE); + CompactionType type = null; if (!seqResources.isEmpty() && !unseqResources.isEmpty()) { type = CompactionType.CROSS_COMPACTION; @@ -172,7 +231,16 @@ public MultiTsFileDeviceIterator( type, EncryptDBUtils.getFirstEncryptParamFromTSFilePath(tsFileResource.getTsFilePath())); readerMap.put(tsFileResource, reader); - deviceIteratorMap.put(tsFileResource, reader.getAllDevicesIteratorWithIsAligned()); + + TsFileDeviceIterator tsFileDeviceIterator; + EvolvedSchema evolvedSchema = tsFileResource.getMergedEvolvedSchema(maxTsFileSetEndVersion); + if (evolvedSchema != null) { + tsFileDeviceIterator = + new ReorderedTsFileDeviceIterator(reader, evolvedSchema::rewriteToFinal); + } else { + tsFileDeviceIterator = reader.getAllDevicesIteratorWithIsAligned(); + } + deviceIteratorMap.put(tsFileResource, tsFileDeviceIterator); } } @@ -260,7 +328,8 @@ public long getTimeLowerBoundForCurrentDevice() { * * @throws IOException if io errors occurred */ - public Map getAllSchemasOfCurrentDevice() throws IOException { + public Map getAllSchemasOfCurrentDevice( + Pair maxTsFileSetEndVersionAndMinResource) throws IOException { Map schemaMap = new ConcurrentHashMap<>(); // get schemas from the newest file to the oldest file for (TsFileResource resource : tsFileResourcesSortedByDesc) { @@ -278,12 +347,23 @@ public Map getAllSchemasOfCurrentDevice() throws IOEx schemaMap.keySet(), true, null); + EvolvedSchema evolvedSchema = + resource.getMergedEvolvedSchema(maxTsFileSetEndVersionAndMinResource.left); + if (evolvedSchema != null) { + // the device has been rewritten, should get the original name for rewriting + evolvedSchema.rewriteToFinal( + evolvedSchema.getOriginalTableName(currentDevice.left.getTableName()), + timeseriesMetadataList); + } + for (TimeseriesMetadata timeseriesMetadata : timeseriesMetadataList) { if (!schemaMap.containsKey(timeseriesMetadata.getMeasurementId()) && !timeseriesMetadata.getChunkMetadataList().isEmpty()) { - schemaMap.put( - timeseriesMetadata.getMeasurementId(), - reader.getMeasurementSchema(timeseriesMetadata.getChunkMetadataList())); + MeasurementSchema measurementSchema = + reader.getMeasurementSchema(timeseriesMetadata.getChunkMetadataList()); + // the column may be renamed + measurementSchema.setMeasurementName(timeseriesMetadata.getMeasurementId()); + schemaMap.put(timeseriesMetadata.getMeasurementId(), measurementSchema); } } } @@ -437,6 +517,12 @@ public Map getCompactionSeriesContextOfCurrentD true) .entrySet()) { String measurementId = entrySet.getKey(); + EvolvedSchema evolvedSchema = resource.getMergedEvolvedSchema(maxTsFileSetEndVersion); + if (evolvedSchema != null) { + String originalTableName = + evolvedSchema.getOriginalTableName(currentDevice.left.getTableName()); + measurementId = evolvedSchema.getFinalColumnName(originalTableName, measurementId); + } if (!timeseriesMetadataOffsetMap.containsKey(measurementId)) { MeasurementSchema schema = reader.getMeasurementSchema(entrySet.getValue().left); timeseriesMetadataOffsetMap.put(measurementId, new Pair<>(schema, new HashMap<>())); @@ -497,10 +583,31 @@ public Map getCompactionSeriesContextOfCurrentD MetadataIndexNode firstMeasurementNodeOfCurrentDevice = iterator.getFirstMeasurementNodeOfCurrentDevice(); TsFileSequenceReader reader = readerMap.get(tsFileResource); + EvolvedSchema evolvedSchema = tsFileResource.getMergedEvolvedSchema(maxTsFileSetEndVersion); + IDeviceID originalDeviceId = currentDevice.left; + if (evolvedSchema != null) { + // rewrite the deviceId to the original one so that we can use it to query the file + originalDeviceId = evolvedSchema.rewriteToOriginal(originalDeviceId); + } List alignedChunkMetadataList = reader.getAlignedChunkMetadataByMetadataIndexNode( - currentDevice.left, firstMeasurementNodeOfCurrentDevice, ignoreAllNullRows); + originalDeviceId, firstMeasurementNodeOfCurrentDevice, ignoreAllNullRows); applyModificationForAlignedChunkMetadataList(tsFileResource, alignedChunkMetadataList); + + if (evolvedSchema != null) { + // rewrite the measurementId to the final ones so that they can be aligned with other files + for (AbstractAlignedChunkMetadata abstractAlignedChunkMetadata : alignedChunkMetadataList) { + for (IChunkMetadata chunkMetadata : + abstractAlignedChunkMetadata.getValueChunkMetadataList()) { + if (chunkMetadata != null) { + chunkMetadata.setMeasurementUid( + evolvedSchema.getFinalColumnName( + originalDeviceId.getTableName(), chunkMetadata.getMeasurementUid())); + } + } + } + } + readerAndChunkMetadataList.add(new Pair<>(reader, alignedChunkMetadataList)); } @@ -522,7 +629,7 @@ private void applyModificationForAlignedChunkMetadataList( } IDeviceID device = currentDevice.getLeft(); ModEntry ttlDeletion = null; - Optional startTime = tsFileResource.getStartTime(device); + Optional startTime = tsFileResource.getStartTime(device, maxTsFileSetEndVersion); if (startTime.isPresent() && startTime.get() < timeLowerBoundForCurrentDevice) { ttlDeletion = CompactionUtils.convertTtlToDeletion(device, timeLowerBoundForCurrentDevice); } @@ -748,7 +855,7 @@ public String nextSeries() throws IllegalPathException { Map> chunkMetadataListMap = chunkMetadataCacheMap.get(reader); ModEntry ttlDeletion = null; - Optional startTime = resource.getStartTime(device); + Optional startTime = resource.getStartTime(device, maxTsFileSetEndVersion); if (startTime.isPresent() && startTime.get() < timeLowerBoundForCurrentDevice) { ttlDeletion = new TreeDeletionEntry( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/ReorderedTsFileDeviceIterator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/ReorderedTsFileDeviceIterator.java new file mode 100644 index 0000000000000..91ac97feb21c8 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/ReorderedTsFileDeviceIterator.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.compaction.execute.utils; + +import org.apache.tsfile.file.metadata.IDeviceID; +import org.apache.tsfile.file.metadata.MetadataIndexNode; +import org.apache.tsfile.read.TsFileSequenceReader; +import org.apache.tsfile.utils.Pair; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.Iterator; +import java.util.List; +import java.util.function.Function; + +public class ReorderedTsFileDeviceIterator extends TransformedTsFileDeviceIterator { + + private final List, MetadataIndexNode>> + deviceIDAndFirstMeasurementNodeList = new ArrayList<>(); + private Iterator, MetadataIndexNode>> deviceIDListIterator; + private Pair, MetadataIndexNode> current; + + public ReorderedTsFileDeviceIterator( + TsFileSequenceReader reader, Function transformer) throws IOException { + super(reader, transformer); + collectAndSort(); + } + + public ReorderedTsFileDeviceIterator( + TsFileSequenceReader reader, String tableName, Function transformer) + throws IOException { + super(reader, tableName, transformer); + collectAndSort(); + } + + private void collectAndSort() throws IOException { + while (super.hasNext()) { + Pair next = super.next(); + deviceIDAndFirstMeasurementNodeList.add( + new Pair<>(next, super.getFirstMeasurementNodeOfCurrentDevice())); + } + deviceIDAndFirstMeasurementNodeList.sort(Comparator.comparing(p -> p.getLeft().getLeft())); + deviceIDListIterator = deviceIDAndFirstMeasurementNodeList.iterator(); + } + + @Override + public boolean hasNext() { + return deviceIDListIterator.hasNext(); + } + + @Override + public Pair next() { + Pair, MetadataIndexNode> next = deviceIDListIterator.next(); + current = next; + return next.left; + } + + @Override + public Pair current() { + return current == null ? null : current.left; + } + + @Override + public MetadataIndexNode getFirstMeasurementNodeOfCurrentDevice() { + // the devices have been reordered, cannot use the measurementNode + return current == null ? null : current.right; + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/TransformedTsFileDeviceIterator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/TransformedTsFileDeviceIterator.java new file mode 100644 index 0000000000000..a361adb18e611 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/TransformedTsFileDeviceIterator.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.compaction.execute.utils; + +import org.apache.tsfile.file.metadata.IDeviceID; +import org.apache.tsfile.read.TsFileDeviceIterator; +import org.apache.tsfile.read.TsFileSequenceReader; +import org.apache.tsfile.utils.Pair; + +import java.io.IOException; +import java.util.function.Function; + +public class TransformedTsFileDeviceIterator extends TsFileDeviceIterator { + + protected Function transformer; + + public TransformedTsFileDeviceIterator( + TsFileSequenceReader reader, Function transformer) throws IOException { + super(reader); + this.transformer = transformer; + } + + public TransformedTsFileDeviceIterator( + TsFileSequenceReader reader, String tableName, Function transformer) + throws IOException { + super(reader, tableName, null); + this.transformer = transformer; + } + + @Override + public Pair next() { + Pair next = super.next(); + next.left = transformer.apply(next.left); + return next; + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/batch/BatchedFastAlignedSeriesCompactionExecutor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/batch/BatchedFastAlignedSeriesCompactionExecutor.java index 0c1f12e9886a7..787452be2dc4c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/batch/BatchedFastAlignedSeriesCompactionExecutor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/batch/BatchedFastAlignedSeriesCompactionExecutor.java @@ -89,7 +89,8 @@ public BatchedFastAlignedSeriesCompactionExecutor( int subTaskId, List measurementSchemas, FastCompactionTaskSummary summary, - boolean ignoreAllNullRows) { + boolean ignoreAllNullRows, + Pair maxTsFileSetEndVersionAndMinResource) { super( compactionWriter, timeseriesMetadataOffsetMap, @@ -100,7 +101,8 @@ public BatchedFastAlignedSeriesCompactionExecutor( subTaskId, measurementSchemas, summary, - ignoreAllNullRows); + ignoreAllNullRows, + maxTsFileSetEndVersionAndMinResource); timeSchema = measurementSchemas.remove(0); valueMeasurementSchemas = measurementSchemas; this.batchColumnSelection = @@ -171,7 +173,8 @@ private void compactFirstBatch() subTaskId, selectedMeasurementSchemas, summary, - ignoreAllNullRows); + ignoreAllNullRows, + maxTsFileSetEndVersionAndMinResource); executor.execute(); LOGGER.debug( "[Batch Compaction] current device is {}, first batch compacted time chunk is {}", @@ -199,7 +202,8 @@ private void compactLeftBatches() subTaskId, currentBatchMeasurementSchemas, summary, - ignoreAllNullRows); + ignoreAllNullRows, + maxTsFileSetEndVersionAndMinResource); executor.execute(); } } @@ -230,7 +234,8 @@ public FirstBatchFastAlignedSeriesCompactionExecutor( int subTaskId, List measurementSchemas, FastCompactionTaskSummary summary, - boolean ignoreAllNullRows) { + boolean ignoreAllNullRows, + Pair maxTsFileSetEndVersionAndMinResource) { super( compactionWriter, timeseriesMetadataOffsetMap, @@ -241,7 +246,8 @@ public FirstBatchFastAlignedSeriesCompactionExecutor( subTaskId, measurementSchemas, summary, - ignoreAllNullRows); + ignoreAllNullRows, + maxTsFileSetEndVersionAndMinResource); isBatchedCompaction = true; } @@ -340,7 +346,8 @@ public FollowingBatchFastAlignedSeriesCompactionExecutor( int subTaskId, List measurementSchemas, FastCompactionTaskSummary summary, - boolean ignoreAllNullRows) { + boolean ignoreAllNullRows, + Pair maxTsFileSetEndVersionAndMinResource) { super( compactionWriter, timeseriesMetadataOffsetMap, @@ -351,7 +358,8 @@ public FollowingBatchFastAlignedSeriesCompactionExecutor( subTaskId, measurementSchemas, summary, - ignoreAllNullRows); + ignoreAllNullRows, + maxTsFileSetEndVersionAndMinResource); isBatchedCompaction = true; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/batch/utils/FirstBatchCompactionAlignedChunkWriter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/batch/utils/FirstBatchCompactionAlignedChunkWriter.java index 0109fcda16ce9..1d38a04d91df2 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/batch/utils/FirstBatchCompactionAlignedChunkWriter.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/batch/utils/FirstBatchCompactionAlignedChunkWriter.java @@ -44,6 +44,7 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; +import java.util.function.Function; public class FirstBatchCompactionAlignedChunkWriter extends AlignedChunkWriterImpl { @@ -158,6 +159,18 @@ public void writeToFileWriter(TsFileIOWriter tsfileWriter) throws IOException { super.writeToFileWriter(tsfileWriter); } + @Override + public void writeToFileWriter( + TsFileIOWriter tsfileWriter, Function measurementNameRemapper) + throws IOException { + if (!isEmpty() && beforeChunkWriterFlushCallback != null) { + // make sure all pages are recorded before this call + sealCurrentPage(); + beforeChunkWriterFlushCallback.call(this); + } + super.writeToFileWriter(tsfileWriter, measurementNameRemapper); + } + public void registerBeforeFlushChunkWriterCallback( ChunkWriterFlushCallback flushChunkWriterCallback) { this.beforeChunkWriterFlushCallback = flushChunkWriterCallback; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/batch/utils/FollowingBatchCompactionAlignedChunkWriter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/batch/utils/FollowingBatchCompactionAlignedChunkWriter.java index 85813d2cd4d37..37729391fbde3 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/batch/utils/FollowingBatchCompactionAlignedChunkWriter.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/batch/utils/FollowingBatchCompactionAlignedChunkWriter.java @@ -40,6 +40,7 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; +import java.util.function.Function; public class FollowingBatchCompactionAlignedChunkWriter extends AlignedChunkWriterImpl { private int currentPage = 0; @@ -131,6 +132,21 @@ public void writeToFileWriter(TsFileIOWriter tsfileWriter) throws IOException { } } + @Override + public void writeToFileWriter( + TsFileIOWriter tsfileWriter, Function measurementNameRemapper) + throws IOException { + if (isEmpty()) { + return; + } + for (ValueChunkWriter valueChunkWriter : valueChunkWriterList) { + valueChunkWriter.writeToFileWriter(tsfileWriter, measurementNameRemapper); + } + if (afterChunkWriterFlushCallback != null) { + afterChunkWriterFlushCallback.call(this); + } + } + @Override public boolean checkIsChunkSizeOverThreshold( long size, long pointNum, boolean returnTrueIfChunkEmpty) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/fast/FastAlignedSeriesCompactionExecutor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/fast/FastAlignedSeriesCompactionExecutor.java index 121d4ca1d3a9d..3d03b7baf165e 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/fast/FastAlignedSeriesCompactionExecutor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/fast/FastAlignedSeriesCompactionExecutor.java @@ -37,6 +37,7 @@ import org.apache.iotdb.db.storageengine.dataregion.compaction.io.CompactionTsFileReader; import org.apache.iotdb.db.storageengine.dataregion.modification.ModEntry; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; import org.apache.iotdb.db.utils.ModificationUtils; import org.apache.iotdb.db.utils.datastructure.PatternTreeMapFactory; @@ -89,9 +90,17 @@ public FastAlignedSeriesCompactionExecutor( int subTaskId, List measurementSchemas, FastCompactionTaskSummary summary, - boolean ignoreAllNullRows) { + boolean ignoreAllNullRows, + Pair maxTsFileSetEndVersionAndMinResource) { super( - compactionWriter, readerCacheMap, modificationCacheMap, deviceId, true, subTaskId, summary); + compactionWriter, + readerCacheMap, + modificationCacheMap, + deviceId, + true, + subTaskId, + summary, + maxTsFileSetEndVersionAndMinResource); this.timeseriesMetadataOffsetMap = timeseriesMetadataOffsetMap; this.measurementSchemas = measurementSchemas; this.timeColumnMeasurementSchema = measurementSchemas.get(0); @@ -188,6 +197,9 @@ protected List getAlignedChunkMetadataList(TsFileR // read time chunk metadatas and value chunk metadatas in the current file List timeChunkMetadatas = null; List> valueChunkMetadatas = new ArrayList<>(); + EvolvedSchema evolvedSchema = + resource.getMergedEvolvedSchema(maxTsFileSetEndVersionAndMinResource.getLeft()); + for (Map.Entry>> entry : timeseriesMetadataOffsetMap.entrySet()) { String measurementID = entry.getKey(); @@ -216,7 +228,7 @@ protected List getAlignedChunkMetadataList(TsFileR .get(resource) .getChunkMetadataListByTimeseriesMetadataOffset( timeseriesOffsetInCurrentFile.left, timeseriesOffsetInCurrentFile.right); - if (isValueChunkDataTypeMatchSchema(valueColumnChunkMetadataList)) { + if (isValueChunkDataTypeMatchSchema(valueColumnChunkMetadataList, evolvedSchema)) { valueChunkMetadatas.add(valueColumnChunkMetadataList); } else { valueChunkMetadatas.add(null); @@ -270,18 +282,29 @@ protected List getAlignedChunkMetadataList(TsFileR // modify aligned chunk metadatas ModificationUtils.modifyAlignedChunkMetaData( alignedChunkMetadataList, timeModifications, valueModifications, ignoreAllNullRows); + + if (evolvedSchema != null) { + String originalTableName = evolvedSchema.getOriginalTableName(deviceId.getTableName()); + for (AbstractAlignedChunkMetadata abstractAlignedChunkMetadata : alignedChunkMetadataList) { + evolvedSchema.rewriteToFinal(abstractAlignedChunkMetadata, originalTableName); + } + } } return alignedChunkMetadataList; } private boolean isValueChunkDataTypeMatchSchema( - List chunkMetadataListOfOneValueColumn) { + List chunkMetadataListOfOneValueColumn, EvolvedSchema evolvedSchema) { boolean isMatch = false; for (IChunkMetadata chunkMetadata : chunkMetadataListOfOneValueColumn) { if (chunkMetadata == null) { continue; } String measurement = chunkMetadata.getMeasurementUid(); + if (evolvedSchema != null) { + String originalTableName = evolvedSchema.getOriginalTableName(deviceId.getTableName()); + measurement = evolvedSchema.getFinalColumnName(originalTableName, measurement); + } IMeasurementSchema schema = measurementSchemaMap.get(measurement); if (MetadataUtils.canAlter(chunkMetadata.getDataType(), schema.getType())) { if (schema.getType() != chunkMetadata.getDataType()) { @@ -371,11 +394,15 @@ void readChunk(ChunkMetadataElement chunkMetadataElement) throws IOException { valueChunks.add(null); continue; } + + Chunk chunk = readChunk(reader, (ChunkMetadata) valueChunkMetadata); + // the column may be renamed, enqueue with the final column name + chunk.getHeader().setMeasurementID(valueChunkMetadata.getMeasurementUid()); + if (valueChunkMetadata.getNewType() != null) { - Chunk chunk = - readChunk(reader, (ChunkMetadata) valueChunkMetadata) - .rewrite( - ((ChunkMetadata) valueChunkMetadata).getNewType(), chunkMetadataElement.chunk); + chunk = + chunk.rewrite( + ((ChunkMetadata) valueChunkMetadata).getNewType(), chunkMetadataElement.chunk); valueChunks.add(chunk); ChunkMetadata chunkMetadata = (ChunkMetadata) valueChunkMetadata; @@ -384,7 +411,7 @@ void readChunk(ChunkMetadataElement chunkMetadataElement) throws IOException { statistics.mergeStatistics(chunk.getChunkStatistic()); chunkMetadata.setStatistics(statistics); } else { - valueChunks.add(readChunk(reader, (ChunkMetadata) valueChunkMetadata)); + valueChunks.add(chunk); } } chunkMetadataElement.valueChunks = valueChunks; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/fast/FastNonAlignedSeriesCompactionExecutor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/fast/FastNonAlignedSeriesCompactionExecutor.java index 363554fb60672..9b828cd9a6356 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/fast/FastNonAlignedSeriesCompactionExecutor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/fast/FastNonAlignedSeriesCompactionExecutor.java @@ -85,7 +85,8 @@ public FastNonAlignedSeriesCompactionExecutor( List sortedSourceFiles, IDeviceID deviceId, int subTaskId, - FastCompactionTaskSummary summary) { + FastCompactionTaskSummary summary, + Pair maxTsFileSetEndVersionAndMinResource) { super( compactionWriter, readerCacheMap, @@ -93,7 +94,8 @@ public FastNonAlignedSeriesCompactionExecutor( deviceId, false, subTaskId, - summary); + summary, + maxTsFileSetEndVersionAndMinResource); this.sortResources = sortedSourceFiles; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/fast/SeriesCompactionExecutor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/fast/SeriesCompactionExecutor.java index b3073bd3d258d..ff75a5a867b19 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/fast/SeriesCompactionExecutor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/fast/SeriesCompactionExecutor.java @@ -41,6 +41,7 @@ import org.apache.tsfile.read.TimeValuePair; import org.apache.tsfile.read.TsFileSequenceReader; import org.apache.tsfile.read.common.TimeRange; +import org.apache.tsfile.utils.Pair; import java.io.IOException; import java.util.ArrayList; @@ -97,6 +98,8 @@ void call(PageElement pageElement) protected boolean isAligned; + protected final Pair maxTsFileSetEndVersionAndMinResource; + protected SeriesCompactionExecutor( AbstractCompactionWriter compactionWriter, Map readerCacheMap, @@ -105,7 +108,8 @@ protected SeriesCompactionExecutor( IDeviceID deviceId, boolean isAligned, int subTaskId, - FastCompactionTaskSummary summary) { + FastCompactionTaskSummary summary, + Pair maxTsFileSetEndVersionAndMinResource) { this.compactionWriter = compactionWriter; this.subTaskId = subTaskId; this.deviceId = deviceId; @@ -128,6 +132,7 @@ protected SeriesCompactionExecutor( int timeCompare = Long.compare(o1.getStartTime(), o2.getStartTime()); return timeCompare != 0 ? timeCompare : o2.getPriority().compareTo(o1.getPriority()); }); + this.maxTsFileSetEndVersionAndMinResource = maxTsFileSetEndVersionAndMinResource; } public abstract void execute() @@ -350,12 +355,14 @@ private void checkAndCompactOverlapPage(PageElement nextPageElement, TimeValuePa */ protected List findOverlapFiles(FileElement fileToCheck) { List overlappedFiles = new ArrayList<>(); - Optional endTimeInCheckingFile = fileToCheck.resource.getEndTime(deviceId); + Optional endTimeInCheckingFile = + fileToCheck.resource.getEndTime(deviceId, maxTsFileSetEndVersionAndMinResource.left); for (FileElement otherFile : fileList) { if (!endTimeInCheckingFile.isPresent()) { continue; } - Optional startTimeInOtherFile = otherFile.resource.getStartTime(deviceId); + Optional startTimeInOtherFile = + otherFile.resource.getStartTime(deviceId, maxTsFileSetEndVersionAndMinResource.left); if (startTimeInOtherFile.isPresent() && startTimeInOtherFile.get() <= endTimeInCheckingFile.get()) { if (!otherFile.isSelected) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/readchunk/ReadChunkAlignedSeriesCompactionExecutor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/readchunk/ReadChunkAlignedSeriesCompactionExecutor.java index 654f8f770e74f..66b8062f47173 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/readchunk/ReadChunkAlignedSeriesCompactionExecutor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/executor/readchunk/ReadChunkAlignedSeriesCompactionExecutor.java @@ -165,7 +165,7 @@ private void collectValueColumnSchemaList() throws IOException { ChunkHeader chunkHeader = reader.readChunkHeader(chunkMetadata.getOffsetOfChunkHeader()); IMeasurementSchema schema = new MeasurementSchema( - chunkHeader.getMeasurementID(), + chunkMetadata.getMeasurementUid(), chunkHeader.getDataType(), chunkHeader.getEncodingType(), chunkHeader.getCompressionType()); @@ -262,6 +262,8 @@ protected ChunkLoader getChunkLoader(TsFileSequenceReader reader, ChunkMetadata return new InstantChunkLoader(); } Chunk chunk = reader.readMemChunk(chunkMetadata); + // the chunk may be renamed and chunkMetadata contains the final name + chunk.getHeader().setMeasurementID(chunkMetadata.getMeasurementUid()); return new InstantChunkLoader(reader.getFileName(), chunkMetadata, chunk); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/reader/SeriesDataBlockReader.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/reader/SeriesDataBlockReader.java index a49b97c8f5222..9c78d866852bb 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/reader/SeriesDataBlockReader.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/reader/SeriesDataBlockReader.java @@ -53,7 +53,8 @@ public SeriesDataBlockReader( Set allSensors, FragmentInstanceContext context, QueryDataSource dataSource, - boolean ascending) { + boolean ascending, + long maxTsFileSetEndVersion) { SeriesScanOptions.Builder scanOptionsBuilder = new SeriesScanOptions.Builder(); scanOptionsBuilder.withAllSensors(allSensors); @@ -63,14 +64,16 @@ public SeriesDataBlockReader( (AlignedFullPath) seriesPath, ascending ? Ordering.ASC : Ordering.DESC, scanOptionsBuilder.build(), - context); + context, + maxTsFileSetEndVersion); } else if (seriesPath instanceof NonAlignedFullPath) { this.seriesScanUtil = new SeriesScanUtil( seriesPath, ascending ? Ordering.ASC : Ordering.DESC, scanOptionsBuilder.build(), - context); + context, + maxTsFileSetEndVersion); } else { throw new IllegalArgumentException("Should call exact sub class!"); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/AbstractCompactionWriter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/AbstractCompactionWriter.java index f3cd5185b58af..61e47d318d859 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/AbstractCompactionWriter.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/AbstractCompactionWriter.java @@ -27,6 +27,7 @@ import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.utils.writer.flushcontroller.AbstractCompactionFlushController; import org.apache.iotdb.db.storageengine.dataregion.compaction.io.CompactionTsFileWriter; import org.apache.iotdb.db.storageengine.dataregion.modification.ModEntry; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; import org.apache.tsfile.encrypt.EncryptParameter; import org.apache.tsfile.exception.write.PageException; @@ -38,6 +39,7 @@ import org.apache.tsfile.read.TimeValuePair; import org.apache.tsfile.read.common.Chunk; import org.apache.tsfile.read.common.block.TsBlock; +import org.apache.tsfile.utils.Pair; import org.apache.tsfile.utils.TsPrimitiveType; import org.apache.tsfile.write.chunk.AlignedChunkWriterImpl; import org.apache.tsfile.write.chunk.ChunkWriterImpl; @@ -339,5 +341,6 @@ protected void checkPreviousTimestamp(long currentWritingTimestamp, int subTaskI } } - public abstract void setSchemaForAllTargetFile(List schemas); + public abstract void setSchemaForAllTargetFile( + List schemas, Pair maxTsFileSetEndVersionAndAssociatedResource); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/AbstractCrossCompactionWriter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/AbstractCrossCompactionWriter.java index f970ad65e56c3..c4a986fc16dbe 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/AbstractCrossCompactionWriter.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/AbstractCrossCompactionWriter.java @@ -21,10 +21,12 @@ import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.db.conf.IoTDBDescriptor; +import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.utils.CompactionTableSchema; import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.utils.CompactionUtils; import org.apache.iotdb.db.storageengine.dataregion.compaction.io.CompactionTsFileWriter; import org.apache.iotdb.db.storageengine.dataregion.compaction.schedule.constant.CompactionType; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; import org.apache.iotdb.db.storageengine.dataregion.tsfile.timeindex.ITimeIndex; import org.apache.iotdb.db.storageengine.rescon.memory.SystemInfo; import org.apache.iotdb.db.utils.EncryptDBUtils; @@ -35,6 +37,7 @@ import org.apache.tsfile.read.TimeValuePair; import org.apache.tsfile.read.TsFileSequenceReader; import org.apache.tsfile.read.common.block.TsBlock; +import org.apache.tsfile.utils.Pair; import org.apache.tsfile.utils.TsPrimitiveType; import org.apache.tsfile.write.schema.Schema; @@ -73,17 +76,24 @@ public abstract class AbstractCrossCompactionWriter extends AbstractCompactionWr private final EncryptParameter encryptParameter; + private final long maxTsFileSetEndVersion; + @TestOnly protected AbstractCrossCompactionWriter( List targetResources, List seqFileResources) throws IOException { - this(targetResources, seqFileResources, EncryptDBUtils.getDefaultFirstEncryptParam()); + this( + targetResources, + seqFileResources, + EncryptDBUtils.getDefaultFirstEncryptParam(), + Long.MIN_VALUE); } protected AbstractCrossCompactionWriter( List targetResources, List seqFileResources, - EncryptParameter encryptParameter) + EncryptParameter encryptParameter, + long maxTsFileSetEndVersion) throws IOException { currentDeviceEndTime = new long[seqFileResources.size()]; isCurrentDeviceExistedInSourceSeqFiles = new boolean[seqFileResources.size()]; @@ -99,14 +109,16 @@ protected AbstractCrossCompactionWriter( for (int i = 0; i < targetResources.size(); i++) { this.targetFileWriters.add( new CompactionTsFileWriter( - targetResources.get(i).getTsFile(), + targetResources.get(i), memorySizeForEachWriter, CompactionType.CROSS_COMPACTION, - this.encryptParameter)); + this.encryptParameter, + maxTsFileSetEndVersion)); isEmptyFile[i] = true; } this.seqTsFileResources = seqFileResources; this.targetResources = targetResources; + this.maxTsFileSetEndVersion = maxTsFileSetEndVersion; } @Override @@ -126,7 +138,7 @@ public void endChunkGroup() throws IOException { CompactionTsFileWriter targetFileWriter = targetFileWriters.get(i); if (isDeviceExistedInTargetFiles[i]) { // update resource - CompactionUtils.updateResource(targetResources.get(i), targetFileWriter, deviceId); + CompactionUtils.updateResource(targetResources.get(i), targetFileWriter); targetFileWriter.endChunkGroup(); } else { targetFileWriter.truncate(targetFileWriter.getPos() - chunkGroupHeaderSize); @@ -227,10 +239,17 @@ protected void checkTimeAndMayFlushChunkToCurrentFile(long timestamp, int subTas private void checkIsDeviceExistAndGetDeviceEndTime() throws IOException { int fileIndex = 0; while (fileIndex < seqTsFileResources.size()) { - ITimeIndex timeIndex = seqTsFileResources.get(fileIndex).getTimeIndex(); + TsFileResource tsFileResource = seqTsFileResources.get(fileIndex); + EvolvedSchema evolvedSchema = tsFileResource.getMergedEvolvedSchema(maxTsFileSetEndVersion); + IDeviceID originalDeviceId = deviceId; + if (evolvedSchema != null) { + originalDeviceId = evolvedSchema.rewriteToOriginal(deviceId); + } + + ITimeIndex timeIndex = tsFileResource.getTimeIndex(); if (timeIndex.getTimeIndexType() != ITimeIndex.FILE_TIME_INDEX_TYPE) { // the timeIndexType of resource is deviceTimeIndex - Optional endTime = timeIndex.getEndTime(deviceId); + Optional endTime = timeIndex.getEndTime(originalDeviceId); currentDeviceEndTime[fileIndex] = endTime.orElse(Long.MIN_VALUE); isCurrentDeviceExistedInSourceSeqFiles[fileIndex] = endTime.isPresent(); } else { @@ -239,7 +258,7 @@ private void checkIsDeviceExistAndGetDeviceEndTime() throws IOException { // Fast compaction get reader from cache map, while read point compaction get reader from // FileReaderManager Map deviceMetadataMap = - getFileReader(seqTsFileResources.get(fileIndex)).readDeviceMetadata(deviceId); + getFileReader(tsFileResource).readDeviceMetadata(originalDeviceId); for (Map.Entry entry : deviceMetadataMap.entrySet()) { long tmpStartTime = entry.getValue().getStatistics().getStartTime(); long tmpEndTime = entry.getValue().getStatistics().getEndTime(); @@ -266,9 +285,25 @@ public long getWriterSize() throws IOException { } @Override - public void setSchemaForAllTargetFile(List schemas) { + public void setSchemaForAllTargetFile( + List schemas, Pair maxTsFileSetEndVersionAndMinResource) { for (int i = 0; i < targetFileWriters.size(); i++) { - targetFileWriters.get(i).setSchema(schemas.get(i)); + CompactionTsFileWriter compactionTsFileWriter = targetFileWriters.get(i); + Schema schema = schemas.get(i); + TsFileResource targetResource = compactionTsFileWriter.getTsFileResource(); + if (maxTsFileSetEndVersionAndMinResource.right != null) { + long maxTsFileSetEndVersion = maxTsFileSetEndVersionAndMinResource.left; + TsFileResource minVersionResource = maxTsFileSetEndVersionAndMinResource.getRight(); + targetResource.setTsFileManager(minVersionResource.getTsFileManager()); + EvolvedSchema evolvedSchema = targetResource.getMergedEvolvedSchema(maxTsFileSetEndVersion); + + if (evolvedSchema != null) { + schema = evolvedSchema.rewriteToOriginal(schema, CompactionTableSchema::new); + } + compactionTsFileWriter.setSchema(schema); + } else { + compactionTsFileWriter.setSchema(schema); + } } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/AbstractInnerCompactionWriter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/AbstractInnerCompactionWriter.java index 6573bb7e96e86..cb0205f08ea07 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/AbstractInnerCompactionWriter.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/AbstractInnerCompactionWriter.java @@ -21,11 +21,13 @@ import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.db.conf.IoTDBDescriptor; +import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.utils.CompactionTableSchema; import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.utils.CompactionTableSchemaCollector; import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.utils.CompactionUtils; import org.apache.iotdb.db.storageengine.dataregion.compaction.io.CompactionTsFileWriter; import org.apache.iotdb.db.storageengine.dataregion.compaction.schedule.constant.CompactionType; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; import org.apache.iotdb.db.storageengine.rescon.memory.SystemInfo; import org.apache.iotdb.db.utils.EncryptDBUtils; @@ -33,6 +35,7 @@ import org.apache.tsfile.file.metadata.IDeviceID; import org.apache.tsfile.read.TimeValuePair; import org.apache.tsfile.read.common.block.TsBlock; +import org.apache.tsfile.utils.Pair; import org.apache.tsfile.write.schema.Schema; import java.io.IOException; @@ -46,6 +49,7 @@ public abstract class AbstractInnerCompactionWriter extends AbstractCompactionWr protected long endedFileSize = 0; protected List schemas; protected EncryptParameter encryptParameter; + protected Pair maxTsFileSetEndVersionAndMinResource; protected final long memoryBudgetForFileWriter = (long) @@ -113,20 +117,34 @@ private void rollCompactionFileWriter() throws IOException { } private void useNewWriter() throws IOException { + long maxTsFileSetEndVersion = maxTsFileSetEndVersionAndMinResource.left; fileWriter = new CompactionTsFileWriter( - targetResources.get(currentFileIndex).getTsFile(), + targetResources.get(currentFileIndex), memoryBudgetForFileWriter, targetResources.get(currentFileIndex).isSeq() ? CompactionType.INNER_SEQ_COMPACTION : CompactionType.INNER_UNSEQ_COMPACTION, - encryptParameter); - fileWriter.setSchema(CompactionTableSchemaCollector.copySchema(schemas.get(0))); + encryptParameter, + maxTsFileSetEndVersion); + Schema schema = CompactionTableSchemaCollector.copySchema(schemas.get(0)); + TsFileResource minVersionResource = maxTsFileSetEndVersionAndMinResource.getRight(); + // only null during test + fileWriter + .getTsFileResource() + .setTsFileManager( + minVersionResource != null ? minVersionResource.getTsFileManager() : null); + EvolvedSchema evolvedSchema = + fileWriter.getTsFileResource().getMergedEvolvedSchema(maxTsFileSetEndVersion); + fileWriter.setSchema( + evolvedSchema != null + ? evolvedSchema.rewriteToOriginal(schema, CompactionTableSchema::new) + : schema); } @Override public void endChunkGroup() throws IOException { - CompactionUtils.updateResource(targetResources.get(currentFileIndex), fileWriter, deviceId); + CompactionUtils.updateResource(targetResources.get(currentFileIndex), fileWriter); fileWriter.endChunkGroup(); } @@ -174,8 +192,10 @@ public void checkAndMayFlushChunkMetadata() throws IOException { } @Override - public void setSchemaForAllTargetFile(List schemas) { + public void setSchemaForAllTargetFile( + List schemas, Pair maxTsFileSetEndVersionAndMinResource) { this.schemas = schemas; + this.maxTsFileSetEndVersionAndMinResource = maxTsFileSetEndVersionAndMinResource; } @Override diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/FastCrossCompactionWriter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/FastCrossCompactionWriter.java index 59a87b4211c46..f379d02704add 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/FastCrossCompactionWriter.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/FastCrossCompactionWriter.java @@ -53,7 +53,11 @@ public FastCrossCompactionWriter( List seqSourceResources, Map readerMap) throws IOException { - super(targetResources, seqSourceResources, EncryptDBUtils.getDefaultFirstEncryptParam()); + super( + targetResources, + seqSourceResources, + EncryptDBUtils.getDefaultFirstEncryptParam(), + Long.MIN_VALUE); this.readerMap = readerMap; } @@ -61,9 +65,10 @@ public FastCrossCompactionWriter( List targetResources, List seqSourceResources, Map readerMap, - EncryptParameter encryptParameter) + EncryptParameter encryptParameter, + long maxTsFileSetEndVersion) throws IOException { - super(targetResources, seqSourceResources, encryptParameter); + super(targetResources, seqSourceResources, encryptParameter, maxTsFileSetEndVersion); this.readerMap = readerMap; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/ReadPointCrossCompactionWriter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/ReadPointCrossCompactionWriter.java index 6810df4d1a3b6..b2799c0dfe5e2 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/ReadPointCrossCompactionWriter.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/writer/ReadPointCrossCompactionWriter.java @@ -47,15 +47,20 @@ public class ReadPointCrossCompactionWriter extends AbstractCrossCompactionWrite public ReadPointCrossCompactionWriter( List targetResources, List seqFileResources) throws IOException { - super(targetResources, seqFileResources, EncryptDBUtils.getDefaultFirstEncryptParam()); + super( + targetResources, + seqFileResources, + EncryptDBUtils.getDefaultFirstEncryptParam(), + Long.MIN_VALUE); } public ReadPointCrossCompactionWriter( List targetResources, List seqFileResources, - EncryptParameter encryptParameter) + EncryptParameter encryptParameter, + long maxTsFileSetEndVersion) throws IOException { - super(targetResources, seqFileResources, encryptParameter); + super(targetResources, seqFileResources, encryptParameter, maxTsFileSetEndVersion); } @Override diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/io/CompactionTsFileWriter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/io/CompactionTsFileWriter.java index 1f822e3b9345d..815d3ca7fe4ae 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/io/CompactionTsFileWriter.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/io/CompactionTsFileWriter.java @@ -24,6 +24,8 @@ import org.apache.iotdb.db.storageengine.dataregion.compaction.schedule.CompactionTaskManager; import org.apache.iotdb.db.storageengine.dataregion.compaction.schedule.constant.CompactionIoDataType; import org.apache.iotdb.db.storageengine.dataregion.compaction.schedule.constant.CompactionType; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; import org.apache.iotdb.db.utils.EncryptDBUtils; import org.apache.tsfile.encrypt.EncryptParameter; @@ -53,25 +55,39 @@ public class CompactionTsFileWriter extends TsFileIOWriter { private volatile boolean isWritingAligned = false; private boolean isEmptyTargetFile = true; - private IDeviceID currentDeviceId; + private IDeviceID currentOriginalDeviceId; - private EncryptParameter firstEncryptParameter; + private final TsFileResource tsFileResource; + private final EvolvedSchema evolvedSchema; + + private final EncryptParameter firstEncryptParameter; @TestOnly public CompactionTsFileWriter(File file, long maxMetadataSize, CompactionType type) throws IOException { - this(file, maxMetadataSize, type, EncryptDBUtils.getDefaultFirstEncryptParam()); + this( + new TsFileResource(file), + maxMetadataSize, + type, + EncryptDBUtils.getDefaultFirstEncryptParam(), + Long.MIN_VALUE); } public CompactionTsFileWriter( - File file, long maxMetadataSize, CompactionType type, EncryptParameter encryptParameter) + TsFileResource tsFile, + long maxMetadataSize, + CompactionType type, + EncryptParameter encryptParameter, + long maxTsFileSetEndVersion) throws IOException { - super(file, maxMetadataSize, encryptParameter); + super(tsFile.getTsFile(), maxMetadataSize, encryptParameter); + this.tsFileResource = tsFile; this.firstEncryptParameter = encryptParameter; this.type = type; super.out = new CompactionTsFileOutput( super.out, CompactionTaskManager.getInstance().getMergeWriteRateLimiter()); + evolvedSchema = tsFileResource.getMergedEvolvedSchema(maxTsFileSetEndVersion); } public EncryptParameter getEncryptParameter() { @@ -92,7 +108,14 @@ public void writeChunk(IChunkWriter chunkWriter) throws IOException { if (!chunkWriter.isEmpty()) { isEmptyTargetFile = false; } - chunkWriter.writeToFileWriter(this); + chunkWriter.writeToFileWriter( + this, + evolvedSchema == null + ? null + : measurementName -> + evolvedSchema.getOriginalColumnName( + evolvedSchema.getFinalTableName(currentOriginalDeviceId.getTableName()), + measurementName)); long writtenDataSize = this.getPos() - beforeOffset; CompactionMetrics.getInstance() .recordWriteInfo( @@ -107,6 +130,15 @@ public void writeChunk(Chunk chunk, ChunkMetadata chunkMetadata) throws IOExcept if (chunkMetadata.getNumOfPoints() != 0) { isEmptyTargetFile = false; } + if (evolvedSchema != null) { + String finalTableName = + evolvedSchema.getFinalTableName(currentOriginalDeviceId.getTableName()); + chunk + .getHeader() + .setMeasurementID( + evolvedSchema.getOriginalColumnName( + finalTableName, chunk.getHeader().getMeasurementID())); + } super.writeChunk(chunk, chunkMetadata); long writtenDataSize = this.getPos() - beforeOffset; CompactionMetrics.getInstance() @@ -124,6 +156,11 @@ public void writeEmptyValueChunk( TSEncoding encodingType, Statistics statistics) throws IOException { + if (evolvedSchema != null) { + measurementId = + evolvedSchema.getOriginalColumnName( + currentOriginalDeviceId.getTableName(), measurementId); + } long beforeOffset = this.getPos(); super.writeEmptyValueChunk( measurementId, compressionType, tsDataType, encodingType, statistics); @@ -141,21 +178,24 @@ public int checkMetadataSizeAndMayFlush() throws IOException { @Override public int startChunkGroup(IDeviceID deviceId) throws IOException { - currentDeviceId = deviceId; + if (evolvedSchema != null) { + deviceId = evolvedSchema.rewriteToOriginal(deviceId); + } + currentOriginalDeviceId = deviceId; return super.startChunkGroup(deviceId); } @Override public void endChunkGroup() throws IOException { - if (currentDeviceId == null || chunkMetadataList.isEmpty()) { + if (currentOriginalDeviceId == null || chunkMetadataList.isEmpty()) { return; } - String tableName = currentDeviceId.getTableName(); + String tableName = currentOriginalDeviceId.getTableName(); TableSchema tableSchema = getSchema().getTableSchemaMap().get(tableName); boolean generateTableSchemaForCurrentChunkGroup = tableSchema != null; setGenerateTableSchema(generateTableSchemaForCurrentChunkGroup); super.endChunkGroup(); - currentDeviceId = null; + currentOriginalDeviceId = null; } @Override @@ -192,4 +232,12 @@ private void removeUnusedTableSchema() { iterator.remove(); } } + + public TsFileResource getTsFileResource() { + return tsFileResource; + } + + public IDeviceID getCurrentOriginalDeviceId() { + return currentOriginalDeviceId; + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/modification/DeletionPredicate.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/modification/DeletionPredicate.java index 877c94f7081ca..83eabc0fc5846 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/modification/DeletionPredicate.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/modification/DeletionPredicate.java @@ -18,7 +18,8 @@ */ package org.apache.iotdb.db.storageengine.dataregion.modification; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.NOP; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.NOP; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.TagPredicateType; import org.apache.iotdb.db.utils.io.BufferSerializable; import org.apache.iotdb.db.utils.io.StreamSerializable; @@ -42,7 +43,7 @@ public class DeletionPredicate implements StreamSerializable, BufferSerializable public static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(DeletionPredicate.class); private String tableName; - private IDPredicate idPredicate = new NOP(); + private TagPredicate tagPredicate = new NOP(); // an empty list means affecting all columns private List measurementNames = Collections.emptyList(); @@ -52,32 +53,28 @@ public DeletionPredicate(String tableName) { this.tableName = tableName; } - public DeletionPredicate(String tableName, IDPredicate idPredicate) { + public DeletionPredicate(String tableName, TagPredicate tagPredicate) { this.tableName = tableName; - this.idPredicate = idPredicate; + this.tagPredicate = tagPredicate; } public DeletionPredicate( - String tableName, IDPredicate idPredicate, List measurementNames) { + String tableName, TagPredicate tagPredicate, List measurementNames) { this.tableName = tableName; - this.idPredicate = idPredicate; + this.tagPredicate = tagPredicate; this.measurementNames = measurementNames; } public boolean matches(IDeviceID deviceID) { - return tableName.equals(deviceID.getTableName()) && idPredicate.matches(deviceID); + return tableName.equals(deviceID.getTableName()) && tagPredicate.matches(deviceID); } - public void setIdPredicate(IDPredicate idPredicate) { - this.idPredicate = idPredicate; + public void setIdPredicate(TagPredicate tagPredicate) { + this.tagPredicate = tagPredicate; } - public IDPredicate getIdPredicate() { - return idPredicate; - } - - public IDPredicate.IDPredicateType getIdPredicateType() { - return this.idPredicate.type; + public TagPredicateType getTagPredicateType() { + return this.tagPredicate.type; } public String getTableName() { @@ -88,6 +85,10 @@ public List getMeasurementNames() { return measurementNames; } + public TagPredicate getTagPredicate() { + return tagPredicate; + } + public boolean affects(String measurementName) { return measurementNames.isEmpty() || measurementNames.contains(measurementName); } @@ -95,7 +96,7 @@ public boolean affects(String measurementName) { @Override public long serialize(OutputStream stream) throws IOException { long size = ReadWriteIOUtils.writeVar(tableName, stream); - size += idPredicate.serialize(stream); + size += tagPredicate.serialize(stream); size += ReadWriteForEncodingUtils.writeVarInt(measurementNames.size(), stream); for (String measurementName : measurementNames) { size += ReadWriteIOUtils.writeVar(measurementName, stream); @@ -106,7 +107,7 @@ public long serialize(OutputStream stream) throws IOException { @Override public long serialize(ByteBuffer buffer) { long size = ReadWriteIOUtils.writeVar(tableName, buffer); - size += idPredicate.serialize(buffer); + size += tagPredicate.serialize(buffer); size += ReadWriteForEncodingUtils.writeVarInt(measurementNames.size(), buffer); for (String measurementName : measurementNames) { size += ReadWriteIOUtils.writeVar(measurementName, buffer); @@ -117,7 +118,7 @@ public long serialize(ByteBuffer buffer) { @Override public void deserialize(InputStream stream) throws IOException { tableName = ReadWriteIOUtils.readVarIntString(stream); - idPredicate = IDPredicate.createFrom(stream); + tagPredicate = TagPredicate.createFrom(stream); int measurementLength = ReadWriteForEncodingUtils.readVarInt(stream); if (measurementLength > 0) { @@ -133,7 +134,7 @@ public void deserialize(InputStream stream) throws IOException { @Override public void deserialize(ByteBuffer buffer) { tableName = ReadWriteIOUtils.readVarIntString(buffer); - idPredicate = IDPredicate.createFrom(buffer); + tagPredicate = TagPredicate.createFrom(buffer); int measurementLength = ReadWriteForEncodingUtils.readVarInt(buffer); if (measurementLength > 0) { @@ -151,7 +152,7 @@ public int serializedSize() { int size = ReadWriteForEncodingUtils.varIntSize(tableName.length()) + tableName.length() * Character.BYTES - + idPredicate.serializedSize() + + tagPredicate.serializedSize() + ReadWriteForEncodingUtils.varIntSize(measurementNames.size()); for (String measurementName : measurementNames) { size += @@ -171,13 +172,13 @@ public boolean equals(Object o) { } DeletionPredicate that = (DeletionPredicate) o; return Objects.equals(tableName, that.tableName) - && Objects.equals(idPredicate, that.idPredicate) + && Objects.equals(tagPredicate, that.tagPredicate) && Objects.equals(measurementNames, that.measurementNames); } @Override public int hashCode() { - return Objects.hash(tableName, idPredicate, measurementNames); + return Objects.hash(tableName, tagPredicate, measurementNames); } @Override @@ -187,7 +188,7 @@ public String toString() { + tableName + '\'' + ", idPredicate=" - + idPredicate + + tagPredicate + ", measurementNames=" + measurementNames + '}'; @@ -197,7 +198,7 @@ public String toString() { public long ramBytesUsed() { return SHALLOW_SIZE + RamUsageEstimator.sizeOf(tableName) - + RamUsageEstimator.sizeOfObject(idPredicate) + + RamUsageEstimator.sizeOfObject(tagPredicate) + RamUsageEstimator.sizeOfArrayList(measurementNames); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/modification/TableDeletionEntry.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/modification/TableDeletionEntry.java index 61ec2e0d67854..60aab450bf342 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/modification/TableDeletionEntry.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/modification/TableDeletionEntry.java @@ -21,7 +21,7 @@ import org.apache.iotdb.commons.conf.IoTDBConstant; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.db.queryengine.execution.MemoryEstimationHelper; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.IDPredicateType; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.TagPredicateType; import org.apache.iotdb.db.utils.ModificationUtils; import org.apache.tsfile.file.metadata.IDeviceID; @@ -138,8 +138,8 @@ public String getTableName() { } public boolean isDroppingTable() { - IDPredicate idPredicate = predicate.getIdPredicate(); - return idPredicate.type == IDPredicateType.NOP + TagPredicate tagPredicate = predicate.getTagPredicate(); + return tagPredicate.type == TagPredicateType.NOP && predicate.getMeasurementNames().isEmpty() && timeRange.getMin() == Long.MIN_VALUE && timeRange.getMax() == Long.MAX_VALUE; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/modification/IDPredicate.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/modification/TagPredicate.java similarity index 78% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/modification/IDPredicate.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/modification/TagPredicate.java index 44741f9e67940..9b0022aa9551f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/modification/IDPredicate.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/modification/TagPredicate.java @@ -18,6 +18,7 @@ */ package org.apache.iotdb.db.storageengine.dataregion.modification; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; import org.apache.iotdb.db.utils.io.BufferSerializable; import org.apache.iotdb.db.utils.io.StreamSerializable; @@ -38,7 +39,7 @@ import java.util.List; import java.util.Objects; -public abstract class IDPredicate implements StreamSerializable, BufferSerializable, Accountable { +public abstract class TagPredicate implements StreamSerializable, BufferSerializable, Accountable { public int serializedSize() { // type @@ -46,7 +47,7 @@ public int serializedSize() { } @SuppressWarnings("java:S6548") - public enum IDPredicateType { + public enum TagPredicateType { NOP, FULL_EXACT_MATCH, SEGMENT_EXACT_MATCH, @@ -62,23 +63,31 @@ public long serialize(ByteBuffer buffer) { return 1; } - public static IDPredicateType deserialize(InputStream stream) throws IOException { + public static TagPredicateType deserialize(InputStream stream) throws IOException { return values()[stream.read()]; } - public static IDPredicateType deserialize(ByteBuffer buffer) { + public static TagPredicateType deserialize(ByteBuffer buffer) { return values()[buffer.get()]; } } - protected final IDPredicateType type; + protected final TagPredicateType type; - protected IDPredicate(IDPredicateType type) { + protected TagPredicate(TagPredicateType type) { this.type = type; } public abstract boolean matches(IDeviceID deviceID); + public TagPredicate rewriteToOriginal(EvolvedSchema evolvedSchema) { + return this; + } + + public TagPredicate rewriteToFinal(EvolvedSchema evolvedSchema) { + return this; + } + @Override public long serialize(OutputStream stream) throws IOException { return type.serialize(stream); @@ -89,16 +98,16 @@ public long serialize(ByteBuffer buffer) { return type.serialize(buffer); } - public static IDPredicate createFrom(ByteBuffer buffer) { - IDPredicateType type = IDPredicateType.deserialize(buffer); - IDPredicate predicate; - if (Objects.requireNonNull(type) == IDPredicateType.NOP) { + public static TagPredicate createFrom(ByteBuffer buffer) { + TagPredicateType type = TagPredicateType.deserialize(buffer); + TagPredicate predicate; + if (Objects.requireNonNull(type) == TagPredicateType.NOP) { predicate = new NOP(); - } else if (Objects.requireNonNull(type) == IDPredicateType.FULL_EXACT_MATCH) { + } else if (Objects.requireNonNull(type) == TagPredicateType.FULL_EXACT_MATCH) { predicate = new FullExactMatch(); - } else if (Objects.requireNonNull(type) == IDPredicateType.SEGMENT_EXACT_MATCH) { + } else if (Objects.requireNonNull(type) == TagPredicateType.SEGMENT_EXACT_MATCH) { predicate = new SegmentExactMatch(); - } else if (Objects.requireNonNull(type) == IDPredicateType.AND) { + } else if (Objects.requireNonNull(type) == TagPredicateType.AND) { predicate = new And(); } else { throw new IllegalArgumentException("Unrecognized predicate type: " + type); @@ -107,16 +116,16 @@ public static IDPredicate createFrom(ByteBuffer buffer) { return predicate; } - public static IDPredicate createFrom(InputStream stream) throws IOException { - IDPredicateType type = IDPredicateType.deserialize(stream); - IDPredicate predicate; - if (Objects.requireNonNull(type) == IDPredicateType.NOP) { + public static TagPredicate createFrom(InputStream stream) throws IOException { + TagPredicateType type = TagPredicateType.deserialize(stream); + TagPredicate predicate; + if (Objects.requireNonNull(type) == TagPredicateType.NOP) { predicate = new NOP(); - } else if (Objects.requireNonNull(type) == IDPredicateType.FULL_EXACT_MATCH) { + } else if (Objects.requireNonNull(type) == TagPredicateType.FULL_EXACT_MATCH) { predicate = new FullExactMatch(); - } else if (Objects.requireNonNull(type) == IDPredicateType.SEGMENT_EXACT_MATCH) { + } else if (Objects.requireNonNull(type) == TagPredicateType.SEGMENT_EXACT_MATCH) { predicate = new SegmentExactMatch(); - } else if (Objects.requireNonNull(type) == IDPredicateType.AND) { + } else if (Objects.requireNonNull(type) == TagPredicateType.AND) { predicate = new And(); } else { throw new IllegalArgumentException("Unrecognized predicate type: " + type); @@ -125,11 +134,11 @@ public static IDPredicate createFrom(InputStream stream) throws IOException { return predicate; } - public static class NOP extends IDPredicate { + public static class NOP extends TagPredicate { public static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(NOP.class); public NOP() { - super(IDPredicateType.NOP); + super(TagPredicateType.NOP); } @Override @@ -168,19 +177,19 @@ public long ramBytesUsed() { } } - public static class FullExactMatch extends IDPredicate { + public static class FullExactMatch extends TagPredicate { public static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(FullExactMatch.class); private IDeviceID deviceID; public FullExactMatch(IDeviceID deviceID) { - super(IDPredicateType.FULL_EXACT_MATCH); + super(TagPredicateType.FULL_EXACT_MATCH); this.deviceID = deviceID; } public FullExactMatch() { - super(IDPredicateType.FULL_EXACT_MATCH); + super(TagPredicateType.FULL_EXACT_MATCH); } @Override @@ -243,9 +252,19 @@ public String toString() { public long ramBytesUsed() { return SHALLOW_SIZE + RamUsageEstimator.sizeOfObject(deviceID); } + + @Override + public TagPredicate rewriteToOriginal(EvolvedSchema evolvedSchema) { + return new FullExactMatch(evolvedSchema.rewriteToOriginal(deviceID)); + } + + @Override + public TagPredicate rewriteToFinal(EvolvedSchema evolvedSchema) { + return new FullExactMatch(evolvedSchema.rewriteToFinal(deviceID)); + } } - public static class SegmentExactMatch extends IDPredicate { + public static class SegmentExactMatch extends TagPredicate { public static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(SegmentExactMatch.class); @@ -253,13 +272,13 @@ public static class SegmentExactMatch extends IDPredicate { private int segmentIndex; public SegmentExactMatch(String pattern, int segmentIndex) { - super(IDPredicateType.SEGMENT_EXACT_MATCH); + super(TagPredicateType.SEGMENT_EXACT_MATCH); this.pattern = pattern; this.segmentIndex = segmentIndex; } public SegmentExactMatch() { - super(IDPredicateType.SEGMENT_EXACT_MATCH); + super(TagPredicateType.SEGMENT_EXACT_MATCH); } @Override @@ -342,17 +361,17 @@ public long ramBytesUsed() { } } - public static class And extends IDPredicate { + public static class And extends TagPredicate { public static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(And.class); - private final List predicates = new ArrayList<>(); + private final List predicates = new ArrayList<>(); - public And(IDPredicate... predicates) { - super(IDPredicateType.AND); + public And(TagPredicate... predicates) { + super(TagPredicateType.AND); Collections.addAll(this.predicates, predicates); } - public void add(IDPredicate predicate) { + public void add(TagPredicate predicate) { predicates.add(predicate); } @@ -360,7 +379,7 @@ public void add(IDPredicate predicate) { public int serializedSize() { int serializedSize = super.serializedSize(); serializedSize += ReadWriteForEncodingUtils.varIntSize(predicates.size()); - for (IDPredicate predicate : predicates) { + for (TagPredicate predicate : predicates) { serializedSize += predicate.serializedSize(); } return serializedSize; @@ -370,7 +389,7 @@ public int serializedSize() { public long serialize(OutputStream stream) throws IOException { long size = super.serialize(stream); size += ReadWriteForEncodingUtils.writeVarInt(predicates.size(), stream); - for (IDPredicate predicate : predicates) { + for (TagPredicate predicate : predicates) { size += predicate.serialize(stream); } return size; @@ -380,7 +399,7 @@ public long serialize(OutputStream stream) throws IOException { public long serialize(ByteBuffer buffer) { long size = super.serialize(buffer); size += ReadWriteForEncodingUtils.writeVarInt(predicates.size(), buffer); - for (IDPredicate predicate : predicates) { + for (TagPredicate predicate : predicates) { size += predicate.serialize(buffer); } return size; @@ -390,7 +409,7 @@ public long serialize(ByteBuffer buffer) { public void deserialize(InputStream stream) throws IOException { int size = ReadWriteForEncodingUtils.readVarInt(stream); for (int i = 0; i < size; i++) { - predicates.add(IDPredicate.createFrom(stream)); + predicates.add(TagPredicate.createFrom(stream)); } } @@ -398,7 +417,7 @@ public void deserialize(InputStream stream) throws IOException { public void deserialize(ByteBuffer buffer) { int size = ReadWriteForEncodingUtils.readVarInt(buffer); for (int i = 0; i < size; i++) { - predicates.add(IDPredicate.createFrom(buffer)); + predicates.add(TagPredicate.createFrom(buffer)); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/read/QueryDataSource.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/read/QueryDataSource.java index 2816493401671..8c95847788ee8 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/read/QueryDataSource.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/read/QueryDataSource.java @@ -113,18 +113,26 @@ public IQueryDataSource clone() { return queryDataSource; } - public boolean hasNextSeqResource(int curIndex, boolean ascending, IDeviceID deviceID) { + public boolean hasNextSeqResource( + int curIndex, boolean ascending, IDeviceID deviceID, long maxTsFileSetEndVersion) { boolean res = ascending ? curIndex < seqResources.size() : curIndex >= 0; if (res && curIndex != this.curSeqIndex) { this.curSeqIndex = curIndex; - this.curSeqOrderTime = seqResources.get(curIndex).getOrderTimeForSeq(deviceID, ascending); + this.curSeqOrderTime = + seqResources + .get(curIndex) + .getOrderTimeForSeq(deviceID, ascending, maxTsFileSetEndVersion); this.curSeqSatisfied = null; } return res; } public boolean isSeqSatisfied( - IDeviceID deviceID, int curIndex, Filter timeFilter, boolean debug) { + IDeviceID deviceID, + int curIndex, + Filter timeFilter, + boolean debug, + long maxTsFileSetEndVersion) { if (curIndex != this.curSeqIndex) { throw new IllegalArgumentException( String.format("curIndex %d is not equal to curSeqIndex %d", curIndex, this.curSeqIndex)); @@ -133,7 +141,9 @@ public boolean isSeqSatisfied( TsFileResource tsFileResource = seqResources.get(curSeqIndex); curSeqSatisfied = tsFileResource != null - && (isSingleDevice || tsFileResource.isSatisfied(deviceID, timeFilter, true, debug)); + && (isSingleDevice + || tsFileResource.isFinalDeviceIdSatisfied( + deviceID, timeFilter, true, debug, maxTsFileSetEndVersion)); } return curSeqSatisfied; @@ -154,21 +164,26 @@ public TsFileResource getSeqResourceByIndex(int curIndex) { return null; } - public boolean hasNextUnseqResource(int curIndex, boolean ascending, IDeviceID deviceID) { + public boolean hasNextUnseqResource( + int curIndex, boolean ascending, IDeviceID deviceID, long maxTsFileSetEndVersion) { boolean res = curIndex < unseqResources.size(); if (res && curIndex != this.curUnSeqIndex) { this.curUnSeqIndex = curIndex; this.curUnSeqOrderTime = unseqResources .get(unSeqFileOrderIndex[curIndex]) - .getOrderTimeForUnseq(deviceID, ascending); + .getOrderTimeForUnseq(deviceID, ascending, maxTsFileSetEndVersion); this.curUnSeqSatisfied = null; } return res; } public boolean isUnSeqSatisfied( - IDeviceID deviceID, int curIndex, Filter timeFilter, boolean debug) { + IDeviceID deviceID, + int curIndex, + Filter timeFilter, + boolean debug, + long maxTsFileSetEndVersion) { if (curIndex != this.curUnSeqIndex) { throw new IllegalArgumentException( String.format( @@ -178,7 +193,9 @@ public boolean isUnSeqSatisfied( TsFileResource tsFileResource = unseqResources.get(unSeqFileOrderIndex[curIndex]); curUnSeqSatisfied = tsFileResource != null - && (isSingleDevice || tsFileResource.isSatisfied(deviceID, timeFilter, false, debug)); + && (isSingleDevice + || tsFileResource.isFinalDeviceIdSatisfied( + deviceID, timeFilter, false, debug, maxTsFileSetEndVersion)); } return curUnSeqSatisfied; @@ -209,7 +226,7 @@ public int getUnseqResourcesSize() { return unseqResources.size(); } - public void fillOrderIndexes(IDeviceID deviceId, boolean ascending) { + public void fillOrderIndexes(IDeviceID deviceId, boolean ascending, long maxTsFileSetEndVersion) { if (unseqResources == null || unseqResources.isEmpty()) { return; } @@ -219,7 +236,8 @@ public void fillOrderIndexes(IDeviceID deviceId, boolean ascending) { for (TsFileResource resource : unseqResources) { orderTimeToIndexMap .computeIfAbsent( - resource.getOrderTimeForUnseq(deviceId, ascending), key -> new ArrayList<>()) + resource.getOrderTimeForUnseq(deviceId, ascending, maxTsFileSetEndVersion), + key -> new ArrayList<>()) .add(index++); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/read/control/QueryResourceManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/read/control/QueryResourceManager.java index 930d68b4e891d..391775c8a8178 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/read/control/QueryResourceManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/read/control/QueryResourceManager.java @@ -56,7 +56,12 @@ public long assignQueryId() { * queryId = xx + Long.MIN_VALUE */ public long assignCompactionQueryId() { - long threadNum = Long.parseLong((Thread.currentThread().getName().split("-"))[5]); + long threadNum = 0; + try { + threadNum = Long.parseLong((Thread.currentThread().getName().split("-"))[5]); + } catch (ArrayIndexOutOfBoundsException | NumberFormatException e) { + // test environment, ignore it + } long queryId = Long.MIN_VALUE + threadNum; filePathsManager.addQueryId(queryId); return queryId; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/task/DataRegionTask.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/task/DataRegionTask.java new file mode 100644 index 0000000000000..297518538dd08 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/task/DataRegionTask.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.task; + +import org.apache.iotdb.db.storageengine.dataregion.DataRegion; +import org.apache.iotdb.db.utils.io.StreamSerializable; + +import org.apache.tsfile.utils.ReadWriteForEncodingUtils; + +import java.io.IOException; +import java.io.InputStream; + +public interface DataRegionTask extends Runnable, StreamSerializable { + + long getTaskId(); + + void setTaskId(long taskId); + + TaskType getTaskType(); + + enum TaskType { + SchemaEvolutionTask + } + + @SuppressWarnings("SwitchStatementWithTooFewBranches") + static DataRegionTask createFrom(InputStream stream, long taskId, DataRegion dataRegion) + throws IOException { + int typeOrdinal = ReadWriteForEncodingUtils.readVarInt(stream); + if (typeOrdinal < 0 || typeOrdinal >= TaskType.values().length) { + throw new IOException("Invalid task type: " + typeOrdinal); + } + + TaskType taskType = TaskType.values()[typeOrdinal]; + + DataRegionTask task; + switch (taskType) { + case SchemaEvolutionTask: + task = new SchemaEvolutionTask(dataRegion); + break; + default: + throw new IOException("Invalid task type: " + taskType); + } + task.deserialize(stream); + task.setTaskId(taskId); + return task; + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/task/DataRegionTaskManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/task/DataRegionTaskManager.java new file mode 100644 index 0000000000000..59339aed00795 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/task/DataRegionTaskManager.java @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.task; + +import org.apache.iotdb.db.storageengine.dataregion.DataRegion; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.util.Arrays; +import java.util.concurrent.atomic.AtomicLong; + +@SuppressWarnings("ResultOfMethodCallIgnored") +public class DataRegionTaskManager { + + private static final Logger LOGGER = LoggerFactory.getLogger(DataRegionTaskManager.class); + private static final String TASKS_DIR_NAME = "tasks"; + private static final String TASK_FILE_SUFFIX = ".tsk"; + + private final DataRegion dataRegion; + private final AtomicLong lastestTaskId = new AtomicLong(0); + private final File tasksDir; + + public DataRegionTaskManager(DataRegion dataRegion) { + this.dataRegion = dataRegion; + this.tasksDir = new File(dataRegion.getDataRegionSysDir() + File.separator + TASKS_DIR_NAME); + } + + public void recover() { + tasksDir.mkdirs(); + File[] files = tasksDir.listFiles((File dir, String name) -> name.endsWith(TASK_FILE_SUFFIX)); + if (files == null) { + return; + } + + Arrays.sort( + files, + (f1, f2) -> { + String fileName1 = f1.getName(); + int suffixIndex1 = fileName1.indexOf("."); + long taskId1 = Long.parseLong(fileName1.substring(0, suffixIndex1)); + + String fileName2 = f2.getName(); + int suffixIndex2 = fileName2.indexOf("."); + long taskId2 = Long.parseLong(fileName1.substring(0, suffixIndex2)); + + return Long.compare(taskId1, taskId2); + }); + + for (File file : files) { + String fileName = file.getName(); + int suffixIndex = fileName.indexOf("."); + long taskId = Long.parseLong(fileName.substring(0, suffixIndex)); + lastestTaskId.getAndUpdate(l -> Math.max(l, taskId)); + + try (FileInputStream fis = new FileInputStream(file); + BufferedInputStream bufferedInputStream = new BufferedInputStream(fis)) { + DataRegionTask task = DataRegionTask.createFrom(bufferedInputStream, taskId, dataRegion); + task.run(); + } catch (IOException e) { + if (LOGGER.isWarnEnabled()) { + LOGGER.warn("Cannot recover task from file {}", file.getAbsolutePath(), e); + } + } finally { + file.delete(); + } + } + } + + private void persistTask(DataRegionTask task) throws IOException { + File taskFile = new File(tasksDir, task.getTaskId() + ".tsk"); + try (FileOutputStream fos = new FileOutputStream(taskFile); + BufferedOutputStream bufferedOutputStream = new BufferedOutputStream(fos)) { + task.serialize(bufferedOutputStream); + } + } + + private void removeTask(DataRegionTask task) throws IOException { + File taskFile = new File(tasksDir, task.getTaskId() + ".tsk"); + taskFile.delete(); + } + + public void submitAndRun(DataRegionTask dataRegionTask) throws IOException { + dataRegionTask.setTaskId(lastestTaskId.getAndIncrement()); + persistTask(dataRegionTask); + dataRegionTask.run(); + removeTask(dataRegionTask); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/task/SchemaEvolutionTask.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/task/SchemaEvolutionTask.java new file mode 100644 index 0000000000000..9a361ca4700e9 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/task/SchemaEvolutionTask.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.task; + +import org.apache.iotdb.db.storageengine.dataregion.DataRegion; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolution; + +import org.apache.tsfile.utils.ReadWriteForEncodingUtils; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.List; + +public class SchemaEvolutionTask implements DataRegionTask { + + private List schemaEvolutions; + private final DataRegion dataRegion; + private long taskId; + + @Override + public void run() { + dataRegion.recordSchemaEvolution(schemaEvolutions); + dataRegion.applySchemaEvolutionToObjects(schemaEvolutions); + } + + public SchemaEvolutionTask(DataRegion dataRegion) { + this.dataRegion = dataRegion; + } + + public SchemaEvolutionTask(List schemaEvolutions, DataRegion dataRegion) { + this.schemaEvolutions = schemaEvolutions; + this.dataRegion = dataRegion; + } + + @Override + public long serialize(OutputStream stream) throws IOException { + long size = ReadWriteForEncodingUtils.writeVarInt(getTaskType().ordinal(), stream); + size += ReadWriteForEncodingUtils.writeVarInt(schemaEvolutions.size(), stream); + for (SchemaEvolution schemaEvolution : schemaEvolutions) { + size += schemaEvolution.serialize(stream); + } + return size; + } + + @Override + public void deserialize(InputStream stream) throws IOException { + int size = ReadWriteForEncodingUtils.readVarInt(stream); + schemaEvolutions = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + schemaEvolutions.add(SchemaEvolution.createFrom(stream)); + } + } + + @Override + public long getTaskId() { + return taskId; + } + + @Override + public void setTaskId(long taskId) { + this.taskId = taskId; + } + + @Override + public TaskType getTaskType() { + return TaskType.SchemaEvolutionTask; + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileID.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileID.java index c9656382e3d68..158daf26c8a00 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileID.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileID.java @@ -32,6 +32,7 @@ public class TsFileID { public final long fileVersion; // high 32 bit is compaction level, low 32 bit is merge count public final long compactionVersion; + public String databaseName; public TsFileID() { this.regionId = -1; @@ -70,6 +71,10 @@ public TsFileID(String tsFileAbsolutePath) { } catch (Exception e) { // ignore, load will get in here } + + if (pathLength >= 4) { + this.databaseName = pathSegments[pathLength - 4]; + } } this.regionId = tmpRegionId; this.timePartitionId = tmpTimePartitionId; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileManager.java index b7c1ba2c14fb4..7063d44c835f0 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileManager.java @@ -23,6 +23,7 @@ import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; import org.apache.iotdb.db.storageengine.dataregion.modification.ModFileManagement; import org.apache.iotdb.db.storageengine.dataregion.modification.PartitionLevelModFileManager; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.fileset.TsFileSet; import org.apache.iotdb.db.storageengine.dataregion.tsfile.timeindex.FileTimeIndexCacheRecorder; import org.apache.iotdb.db.storageengine.rescon.memory.TsFileResourceManager; @@ -31,12 +32,15 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeMap; +import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReadWriteLock; @@ -45,7 +49,6 @@ public class TsFileManager { private final String storageGroupName; private String dataRegionId; - private final String dataRegionSysDir; /** Serialize queries, delete resource files, compaction cleanup files */ private final ReadWriteLock resourceListLock = new ReentrantReadWriteLock(); @@ -55,13 +58,13 @@ public class TsFileManager { private final TreeMap sequenceFiles = new TreeMap<>(); private final TreeMap unsequenceFiles = new TreeMap<>(); private final TreeMap modFileManagementMap = new TreeMap<>(); + private final Map> tsfileSets = new ConcurrentSkipListMap<>(); private volatile boolean allowCompaction = true; private final AtomicLong currentCompactionTaskSerialId = new AtomicLong(0); - public TsFileManager(String storageGroupName, String dataRegionId, String dataRegionSysDir) { + public TsFileManager(String storageGroupName, String dataRegionId) { this.storageGroupName = storageGroupName; - this.dataRegionSysDir = dataRegionSysDir; this.dataRegionId = dataRegionId; } @@ -236,6 +239,7 @@ public void insertToPartitionFileList( modFileManagementMap.computeIfAbsent( timePartition, t -> new PartitionLevelModFileManager())); } + tsFileResource.setTsFileManager(this); } finally { writeUnlock(); } @@ -254,6 +258,7 @@ public void add(TsFileResource tsFileResource, boolean sequence) { modFileManagementMap.computeIfAbsent( tsFileResource.getTimePartition(), t -> new PartitionLevelModFileManager())); } + tsFileResource.setTsFileManager(this); } finally { writeUnlock(); } @@ -272,6 +277,7 @@ public void keepOrderInsert(TsFileResource tsFileResource, boolean sequence) thr modFileManagementMap.computeIfAbsent( tsFileResource.getTimePartition(), t -> new PartitionLevelModFileManager())); } + tsFileResource.setTsFileManager(this); } finally { writeUnlock(); } @@ -332,6 +338,7 @@ public void replace( modFileManagementMap.computeIfAbsent( resource.getTimePartition(), t -> new PartitionLevelModFileManager())); } + resource.setTsFileManager(this); } } } finally { @@ -427,10 +434,6 @@ public String getStorageGroupName() { return storageGroupName; } - public String getDataRegionSysDir() { - return dataRegionSysDir; - } - public Set getTimePartitions() { readLock(); try { @@ -507,4 +510,39 @@ public long getMaxFileTimestampOfUnSequenceFile() { } return maxFileTimestamp; } + + public void addTsFileSet(TsFileSet newSet, long partitionId) { + List tsFileSetList = + tsfileSets.computeIfAbsent(partitionId, p -> new CopyOnWriteArrayList<>()); + tsFileSetList.add(newSet); + } + + public List getTsFileSet(long partitionId) { + return getTsFileSet(partitionId, Long.MIN_VALUE, Long.MAX_VALUE); + } + + public List getTsFileSet( + long partitionId, long minFileVersionIncluded, long maxFileVersionExcluded) { + List tsFileSetList = tsfileSets.getOrDefault(partitionId, Collections.emptyList()); + int start = 0, end = tsFileSetList.size(); + for (int i = 0, tsFileSetListSize = tsFileSetList.size(); i < tsFileSetListSize; i++) { + TsFileSet tsFileSet = tsFileSetList.get(i); + if (tsFileSet.getEndVersion() < minFileVersionIncluded) { + start = i + 1; + } + if (tsFileSet.getEndVersion() >= maxFileVersionExcluded) { + end = i; + break; + } + } + return start < end ? tsFileSetList.subList(start, end) : Collections.emptyList(); + } + + public void deleteTsFileSets() { + for (List value : tsfileSets.values()) { + for (TsFileSet tsFileSet : value) { + tsFileSet.remove(); + } + } + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileResource.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileResource.java index b84cce9e8d21b..d6d2b42c7dd29 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileResource.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileResource.java @@ -42,6 +42,8 @@ import org.apache.iotdb.db.storageengine.dataregion.modification.v1.Deletion; import org.apache.iotdb.db.storageengine.dataregion.modification.v1.Modification; import org.apache.iotdb.db.storageengine.dataregion.modification.v1.ModificationFileV1; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.fileset.TsFileSet; import org.apache.iotdb.db.storageengine.dataregion.tsfile.generator.TsFileNameGenerator; import org.apache.iotdb.db.storageengine.dataregion.tsfile.timeindex.ArrayDeviceTimeIndex; import org.apache.iotdb.db.storageengine.dataregion.tsfile.timeindex.FileTimeIndex; @@ -209,6 +211,8 @@ public class TsFileResource implements PersistentResource, Cloneable { private Map>> lastValues; + private TsFileManager tsFileManager = null; + @TestOnly public TsFileResource() { this.tsFileID = new TsFileID(); @@ -257,6 +261,7 @@ public TsFileResource( this.tsFileID = originTsFileResource.tsFileID; this.isSeq = originTsFileResource.isSeq; this.tierLevel = originTsFileResource.tierLevel; + this.tsFileManager = originTsFileResource.tsFileManager; } public synchronized void serialize(String targetFilePath) throws IOException { @@ -610,8 +615,21 @@ public long getTsFileSize() { } } - public Optional getStartTime(IDeviceID deviceId) { + public IDeviceID toOriginalDeviceID(IDeviceID deviceID) { + return toOriginalDeviceID(Long.MAX_VALUE, deviceID); + } + + public IDeviceID toOriginalDeviceID(long maxTsFileSetEndVersion, IDeviceID deviceID) { + EvolvedSchema evolvedSchema = getMergedEvolvedSchema(maxTsFileSetEndVersion); + if (evolvedSchema != null) { + return evolvedSchema.rewriteToOriginal(deviceID); + } + return deviceID; + } + + public Optional getStartTime(IDeviceID deviceId, long maxTsFileSetEndVersion) { try { + deviceId = toOriginalDeviceID(maxTsFileSetEndVersion, deviceId); return deviceId == null ? Optional.of(getFileStartTime()) : timeIndex.getStartTime(deviceId); } catch (Exception e) { LOGGER.error( @@ -623,9 +641,14 @@ public Optional getStartTime(IDeviceID deviceId) { } } + public Optional getStartTime(IDeviceID deviceId) { + return getStartTime(deviceId, Long.MAX_VALUE); + } + /** open file's end time is Long.MIN_VALUE */ - public Optional getEndTime(IDeviceID deviceId) { + public Optional getEndTime(IDeviceID deviceId, long maxTsFileSetEndVersion) { try { + deviceId = toOriginalDeviceID(maxTsFileSetEndVersion, deviceId); return deviceId == null ? Optional.of(getFileEndTime()) : timeIndex.getEndTime(deviceId); } catch (Exception e) { LOGGER.error( @@ -637,9 +660,19 @@ public Optional getEndTime(IDeviceID deviceId) { } } + /** open file's end time is Long.MIN_VALUE */ + public Optional getEndTime(IDeviceID deviceId) { + return getEndTime(deviceId, Long.MAX_VALUE); + } + // cannot use FileTimeIndex - public long getOrderTimeForSeq(IDeviceID deviceId, boolean ascending) { + public long getOrderTimeForSeq( + IDeviceID deviceId, boolean ascending, long maxTsFileSetEndVersion) { if (timeIndex instanceof ArrayDeviceTimeIndex) { + EvolvedSchema evolvedSchema = getMergedEvolvedSchema(maxTsFileSetEndVersion); + if (evolvedSchema != null) { + deviceId = evolvedSchema.rewriteToOriginal(deviceId); + } return ascending ? timeIndex.getStartTime(deviceId).orElse(Long.MIN_VALUE) : timeIndex.getEndTime(deviceId).orElse(Long.MAX_VALUE); @@ -649,8 +682,13 @@ public long getOrderTimeForSeq(IDeviceID deviceId, boolean ascending) { } // can use FileTimeIndex - public long getOrderTimeForUnseq(IDeviceID deviceId, boolean ascending) { + public long getOrderTimeForUnseq( + IDeviceID deviceId, boolean ascending, long maxTsFileSetEndVersion) { if (timeIndex instanceof ArrayDeviceTimeIndex) { + EvolvedSchema evolvedSchema = getMergedEvolvedSchema(maxTsFileSetEndVersion); + if (evolvedSchema != null) { + deviceId = evolvedSchema.rewriteToOriginal(deviceId); + } if (ascending) { return timeIndex.getStartTime(deviceId).orElse(Long.MIN_VALUE); } else { @@ -718,6 +756,8 @@ public ITimeIndex getTimeIndex() { * Whether this TsFile definitely not contains this device, if ture, it must not contain this * device, if false, it may or may not contain this device Notice: using method be CAREFULLY and * you really understand the meaning!!!!! + * + * @param device the IDeviceID before schema evolution */ public boolean definitelyNotContains(IDeviceID device) { return timeIndex.definitelyNotContains(device); @@ -1003,14 +1043,45 @@ public boolean stillLives(long timeLowerBound) { } public boolean isDeviceIdExist(IDeviceID deviceId) { + EvolvedSchema evolvedSchema = getMergedEvolvedSchema(); + if (evolvedSchema != null) { + deviceId = evolvedSchema.rewriteToOriginal(deviceId); + } return timeIndex.checkDeviceIdExist(deviceId); } /** + * @param deviceId IDeviceId after schema evolution + */ + public boolean isFinalDeviceIdSatisfied( + IDeviceID deviceId, Filter timeFilter, boolean isSeq, boolean debug) { + return isFinalDeviceIdSatisfied(deviceId, timeFilter, isSeq, debug, Long.MAX_VALUE); + } + + /** + * @param deviceId the IDeviceID after schema evolution + * @return true if the device is contained in the TsFile + */ + public boolean isFinalDeviceIdSatisfied( + IDeviceID deviceId, + Filter timeFilter, + boolean isSeq, + boolean debug, + long maxTsFileSetEndVersion) { + EvolvedSchema evolvedSchema = getMergedEvolvedSchema(maxTsFileSetEndVersion); + if (evolvedSchema != null) { + deviceId = evolvedSchema.rewriteToOriginal(deviceId); + } + return isOriginalDeviceIdSatisfied(deviceId, timeFilter, isSeq, debug); + } + + /** + * @param deviceId the IDeviceID before schema evolution * @return true if the device is contained in the TsFile */ @SuppressWarnings("OptionalGetWithoutIsPresent") - public boolean isSatisfied(IDeviceID deviceId, Filter timeFilter, boolean isSeq, boolean debug) { + public boolean isOriginalDeviceIdSatisfied( + IDeviceID deviceId, Filter timeFilter, boolean isSeq, boolean debug) { if (deviceId != null && definitelyNotContains(deviceId)) { if (debug) { DEBUG_LOGGER.info( @@ -1057,6 +1128,8 @@ private boolean isAlive(long time, long dataTTL) { /** * Check whether the given device may still alive or not. Return false if the device does not * exist or out of dated. + * + * @param device IDeviceID before schema evolution */ public boolean isDeviceAlive(IDeviceID device, long ttl) { if (definitelyNotContains(device)) { @@ -1635,4 +1708,69 @@ public TsFileResource shallowClone() { public TsFileResource shallowCloneForNative() throws CloneNotSupportedException { return (TsFileResource) clone(); } + + public List getTsFileSets() { + if (tsFileManager == null) { + // loading TsFile, no TsFileSets + return Collections.emptyList(); + } + return tsFileManager.getTsFileSet( + tsFileID.timePartitionId, tsFileID.fileVersion, Long.MAX_VALUE); + } + + public EvolvedSchema getMergedEvolvedSchema() { + return getMergedEvolvedSchema(Long.MAX_VALUE); + } + + public EvolvedSchema getMergedEvolvedSchema(long excludedMaxFileVersion) { + List list = new ArrayList<>(); + List tsFileSets = getTsFileSets(); + for (TsFileSet fileSet : tsFileSets) { + if (fileSet.getEndVersion() >= excludedMaxFileVersion) { + break; + } + + try { + EvolvedSchema readEvolvedSchema = fileSet.readEvolvedSchema(); + list.add(readEvolvedSchema); + } catch (IOException e) { + LOGGER.warn("Cannot read evolved schema from {}, skipping it", fileSet); + } + } + + return EvolvedSchema.merge(list.toArray(new EvolvedSchema[0])); + } + + public static Pair getMaxTsFileSetEndVersionAndMinResource( + List tsFileResources) { + long maxTsFileSetEndVersion = Long.MIN_VALUE; + long minResourceVersion = Long.MAX_VALUE; + TsFileResource minTsFileResource = null; + for (TsFileResource tsFileResource : tsFileResources) { + List tsFileSets = tsFileResource.getTsFileSets(); + if (tsFileSets.isEmpty()) { + // include the newest files that does not belong to any file sets, + // should apply all schema evolution + maxTsFileSetEndVersion = Long.MAX_VALUE; + break; + } + TsFileSet lastTsFileSet = tsFileSets.get(tsFileSets.size() - 1); + if (lastTsFileSet.getEndVersion() > maxTsFileSetEndVersion) { + maxTsFileSetEndVersion = lastTsFileSet.getEndVersion(); + } + if (tsFileResource.getTsFileID().fileVersion < minResourceVersion) { + minTsFileResource = tsFileResource; + minResourceVersion = tsFileResource.getTsFileID().fileVersion; + } + } + return new Pair<>(maxTsFileSetEndVersion, minTsFileResource); + } + + public void setTsFileManager(TsFileManager tsFileManager) { + this.tsFileManager = tsFileManager; + } + + public TsFileManager getTsFileManager() { + return tsFileManager; + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/ColumnRename.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/ColumnRename.java new file mode 100644 index 0000000000000..adb6e7e935911 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/ColumnRename.java @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution; + +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.utils.ReadWriteForEncodingUtils; +import org.apache.tsfile.utils.ReadWriteIOUtils; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; + +/** A schema evolution operation that renames a column in a table schema. */ +public class ColumnRename implements SchemaEvolution { + + private String tableName; + private String nameBefore; + private String nameAfter; + // to judge if the Object directories should be renamed + private TSDataType dataType; + + // for deserialization + public ColumnRename() {} + + public ColumnRename(String tableName, String nameBefore, String nameAfter) { + this.tableName = tableName.toLowerCase(); + this.nameBefore = nameBefore.toLowerCase(); + this.nameAfter = nameAfter.toLowerCase(); + } + + public ColumnRename(String tableName, String nameBefore, String nameAfter, TSDataType dataType) { + this(tableName, nameBefore, nameAfter); + this.dataType = dataType; + } + + @Override + public SchemaEvolutionType getEvolutionType() { + return SchemaEvolutionType.COLUMN_RENAME; + } + + @Override + public void applyTo(EvolvedSchema evolvedSchema) { + evolvedSchema.renameColumn(tableName, nameBefore, nameAfter); + } + + @Override + public long serialize(OutputStream stream) throws IOException { + int size = ReadWriteForEncodingUtils.writeVarInt(getEvolutionType().ordinal(), stream); + size += ReadWriteIOUtils.writeVar(tableName, stream); + size += ReadWriteIOUtils.writeVar(nameBefore, stream); + size += ReadWriteIOUtils.writeVar(nameAfter, stream); + size += ReadWriteIOUtils.write(dataType != null ? (byte) dataType.ordinal() : -1, stream); + return size; + } + + @Override + public void deserialize(InputStream stream) throws IOException { + tableName = ReadWriteIOUtils.readVarIntString(stream); + nameBefore = ReadWriteIOUtils.readVarIntString(stream); + nameAfter = ReadWriteIOUtils.readVarIntString(stream); + byte category = ReadWriteIOUtils.readByte(stream); + if (category != -1) { + dataType = TSDataType.values()[category]; + } + } + + @Override + public long serialize(ByteBuffer buffer) { + int size = ReadWriteForEncodingUtils.writeVarInt(getEvolutionType().ordinal(), buffer); + size += ReadWriteIOUtils.writeVar(tableName, buffer); + size += ReadWriteIOUtils.writeVar(nameBefore, buffer); + size += ReadWriteIOUtils.writeVar(nameAfter, buffer); + size += ReadWriteIOUtils.write(dataType != null ? (byte) dataType.ordinal() : -1, buffer); + return size; + } + + @Override + public void deserialize(ByteBuffer buffer) { + tableName = ReadWriteIOUtils.readVarIntString(buffer); + nameBefore = ReadWriteIOUtils.readVarIntString(buffer); + nameAfter = ReadWriteIOUtils.readVarIntString(buffer); + byte category = ReadWriteIOUtils.readByte(buffer); + if (category != -1) { + dataType = TSDataType.values()[category]; + } + } + + public TSDataType getDataType() { + return dataType; + } + + public void setDataType(TSDataType dataType) { + this.dataType = dataType; + } + + public String getTableName() { + return tableName; + } + + public String getNameBefore() { + return nameBefore; + } + + public String getNameAfter() { + return nameAfter; + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/EvolvedSchema.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/EvolvedSchema.java new file mode 100644 index 0000000000000..d65b9ddac4d0e --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/EvolvedSchema.java @@ -0,0 +1,463 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution; + +import org.apache.iotdb.db.storageengine.dataregion.modification.DeletionPredicate; +import org.apache.iotdb.db.storageengine.dataregion.modification.ModEntry; +import org.apache.iotdb.db.storageengine.dataregion.modification.ModEntry.ModType; +import org.apache.iotdb.db.storageengine.dataregion.modification.TableDeletionEntry; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate; + +import org.apache.tsfile.enums.ColumnCategory; +import org.apache.tsfile.file.metadata.AbstractAlignedChunkMetadata; +import org.apache.tsfile.file.metadata.IChunkMetadata; +import org.apache.tsfile.file.metadata.IDeviceID; +import org.apache.tsfile.file.metadata.IDeviceID.Factory; +import org.apache.tsfile.file.metadata.TableSchema; +import org.apache.tsfile.file.metadata.TimeseriesMetadata; +import org.apache.tsfile.utils.Accountable; +import org.apache.tsfile.utils.PublicBAOS; +import org.apache.tsfile.utils.RamUsageEstimator; +import org.apache.tsfile.utils.ReadWriteIOUtils; +import org.apache.tsfile.write.schema.IMeasurementSchema; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.apache.tsfile.write.schema.Schema; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; + +public class EvolvedSchema implements Accountable { + + private static final Logger LOGGER = LoggerFactory.getLogger(EvolvedSchema.class); + // the evolved table names after applying all schema evolution operations + private Map finalToOriginalTableNames = new LinkedHashMap<>(); + + /** + * the first key is the evolved table name, the second key is the evolved column name, and the + * value is the original column name before any schema evolution. + */ + private Map> finalToOriginalColumnNames = new LinkedHashMap<>(); + + // the reversed version of finalToOriginalTableNames + private Map originalToFinalTableNames = new LinkedHashMap<>(); + + // the reversed version of finalToOriginalColumnNames + private Map> originalToFinalColumnNames = new LinkedHashMap<>(); + + public void renameTable(String oldTableName, String newTableName) { + if (!finalToOriginalTableNames.containsKey(oldTableName) + || finalToOriginalTableNames.get(oldTableName).isEmpty()) { + finalToOriginalTableNames.put(newTableName, oldTableName); + finalToOriginalTableNames.put(oldTableName, ""); + originalToFinalTableNames.put(oldTableName, newTableName); + } else { + // mark the old table name as non-exists (empty) + String originalName = finalToOriginalTableNames.put(oldTableName, ""); + finalToOriginalTableNames.put(newTableName, originalName); + originalToFinalTableNames.put(originalName, newTableName); + } + + if (finalToOriginalColumnNames.containsKey(oldTableName)) { + Map columnMap = finalToOriginalColumnNames.remove(oldTableName); + finalToOriginalColumnNames.put(newTableName, columnMap); + } + } + + public void renameColumn(String newTableName, String oldColumnName, String newColumnName) { + Map finalToOriginalMap = + finalToOriginalColumnNames.computeIfAbsent(newTableName, t -> new LinkedHashMap<>()); + String originalTableName = getOriginalTableName(newTableName); + if (!finalToOriginalMap.containsKey(oldColumnName) + || finalToOriginalMap.get(oldColumnName).isEmpty()) { + finalToOriginalMap.put(newColumnName, oldColumnName); + finalToOriginalMap.put(oldColumnName, ""); + originalToFinalColumnNames + .computeIfAbsent(originalTableName, t -> new LinkedHashMap<>()) + .put(oldColumnName, newColumnName); + } else { + // mark the old column name as non-exists + String originalName = finalToOriginalMap.put(oldColumnName, ""); + if (!newColumnName.equals(originalName)) { + finalToOriginalMap.put(newColumnName, originalName); + originalToFinalColumnNames + .computeIfAbsent(originalTableName, t -> new LinkedHashMap<>()) + .put(originalName, newColumnName); + } else { + // the new name is the same as the original name, remove the mapping + finalToOriginalMap.remove(newColumnName); + finalToOriginalMap.remove(oldColumnName); + if (finalToOriginalMap.isEmpty()) { + finalToOriginalColumnNames.remove(newTableName); + } + + Map originalToFinalMap = originalToFinalColumnNames.get(originalTableName); + if (originalToFinalMap != null) { + originalToFinalMap.remove(originalName); + if (originalToFinalMap.isEmpty()) { + originalToFinalColumnNames.remove(originalTableName); + } + } + } + } + } + + public String getOriginalTableName(String finalTableName) { + return finalToOriginalTableNames.getOrDefault(finalTableName, finalTableName); + } + + public String getFinalTableName(String originalTableName) { + return originalToFinalTableNames.getOrDefault(originalTableName, originalTableName); + } + + public String getOriginalColumnName(String tableName, String evolvedColumnName) { + Map columnNameMap = finalToOriginalColumnNames.get(tableName); + if (columnNameMap == null) { + return evolvedColumnName; + } + return columnNameMap.getOrDefault(evolvedColumnName, evolvedColumnName); + } + + public String getFinalColumnName(String originalTableName, String originalColumnName) { + return originalToFinalColumnNames + .getOrDefault(originalTableName, Collections.emptyMap()) + .getOrDefault(originalColumnName, originalColumnName); + } + + @Override + public boolean equals(Object o) { + if (o == null || getClass() != o.getClass()) { + return false; + } + EvolvedSchema that = (EvolvedSchema) o; + return Objects.equals(finalToOriginalTableNames, that.finalToOriginalTableNames) + && Objects.equals(finalToOriginalColumnNames, that.finalToOriginalColumnNames); + } + + @Override + public int hashCode() { + return Objects.hash(finalToOriginalTableNames, finalToOriginalColumnNames); + } + + @Override + public String toString() { + return "EvolvedSchema{" + + "finalToOriginalTableNames=" + + finalToOriginalTableNames + + ", finalToOriginalColumnNames=" + + finalToOriginalColumnNames + + ", originalToFinalTableNames=" + + originalToFinalTableNames + + ", originalToFinalColumnNames=" + + originalToFinalColumnNames + + '}'; + } + + public List toSchemaEvolutions() { + List schemaEvolutions = new ArrayList<>(); + finalToOriginalTableNames.forEach( + (finalTableName, originalTableName) -> { + if (!originalTableName.isEmpty()) { + schemaEvolutions.add(new TableRename(originalTableName, finalTableName)); + } + }); + finalToOriginalColumnNames.forEach( + (finalTableName, originalColumnNameMap) -> { + originalColumnNameMap.forEach( + (finalColumnName, originalColumnName) -> { + if (!originalColumnName.isEmpty()) { + schemaEvolutions.add( + new ColumnRename(finalTableName, originalColumnName, finalColumnName, null)); + } + }); + }); + return schemaEvolutions; + } + + public ByteBuffer toSchemaEvolutionFileBuffer() { + PublicBAOS publicBAOS = new PublicBAOS(); + try { + ReadWriteIOUtils.write(0L, publicBAOS); + List schemaEvolutions = toSchemaEvolutions(); + for (SchemaEvolution evolution : schemaEvolutions) { + evolution.serialize(publicBAOS); + } + } catch (IOException e) { + // ignored + } + + ByteBuffer buffer = ByteBuffer.wrap(publicBAOS.getBuf(), 0, publicBAOS.size()); + buffer.putLong(0, buffer.limit()); + buffer.position(0); + return buffer; + } + + public ModEntry rewriteToOriginal(ModEntry entry) { + if (entry.getType() == ModType.TABLE_DELETION) { + return rewriteToOriginal(((TableDeletionEntry) entry)); + } + return entry; + } + + public ModEntry rewriteToFinal(ModEntry entry) { + if (entry.getType() == ModType.TABLE_DELETION) { + return rewriteToFinal(((TableDeletionEntry) entry)); + } + return entry; + } + + public TableDeletionEntry rewriteToOriginal(TableDeletionEntry entry) { + DeletionPredicate deletionPredicate = rewriteToOriginal(entry.getPredicate()); + return new TableDeletionEntry(deletionPredicate, entry.getTimeRange()); + } + + public TableDeletionEntry rewriteToFinal(TableDeletionEntry entry) { + DeletionPredicate deletionPredicate = rewriteToFinal(entry.getPredicate()); + return new TableDeletionEntry(deletionPredicate, entry.getTimeRange()); + } + + private DeletionPredicate rewriteToFinal(DeletionPredicate predicate) { + String finalTableName = getFinalTableName(predicate.getTableName()); + TagPredicate tagPredicate = predicate.getTagPredicate(); + tagPredicate = tagPredicate.rewriteToOriginal(this); + List newMeasurements = + predicate.getMeasurementNames().stream() + .map(m -> getFinalColumnName(predicate.getTableName(), m)) + .collect(Collectors.toList()); + return new DeletionPredicate(finalTableName, tagPredicate, newMeasurements); + } + + private DeletionPredicate rewriteToOriginal(DeletionPredicate predicate) { + String originalTableName = getOriginalTableName(predicate.getTableName()); + TagPredicate tagPredicate = predicate.getTagPredicate(); + tagPredicate = tagPredicate.rewriteToOriginal(this); + List newMeasurements = + predicate.getMeasurementNames().stream() + .map(m -> getOriginalColumnName(predicate.getTableName(), m)) + .collect(Collectors.toList()); + return new DeletionPredicate(originalTableName, tagPredicate, newMeasurements); + } + + public IDeviceID rewriteToOriginal(IDeviceID deviceID) { + String tableName = deviceID.getTableName(); + String originalTableName = getOriginalTableName(tableName); + return rewriteTableName(deviceID, originalTableName); + } + + public IDeviceID rewriteToFinal(IDeviceID deviceID) { + String tableName = deviceID.getTableName(); + String finalTableName = getFinalTableName(tableName); + return rewriteTableName(deviceID, finalTableName); + } + + public void rewriteToFinal( + String originalTableName, List timeseriesMetadataList) { + timeseriesMetadataList.forEach( + timeseriesMetadata -> { + String finalColumnName = + getFinalColumnName(originalTableName, timeseriesMetadata.getMeasurementId()); + timeseriesMetadata.setMeasurementId(finalColumnName); + }); + } + + public Map rewriteToFinal(Map tableSchemas) { + Map finalTableSchemas = new HashMap<>(tableSchemas.size()); + for (Map.Entry entry : tableSchemas.entrySet()) { + TableSchema tableSchema = entry.getValue(); + tableSchema = rewriteToFinal(tableSchema); + finalTableSchemas.put(tableSchema.getTableName(), tableSchema); + } + return finalTableSchemas; + } + + private TableSchema rewriteToOriginal(TableSchema tableSchema) { + String originalTableName = getOriginalTableName(tableSchema.getTableName()); + + List measurementSchemas = + new ArrayList<>(tableSchema.getColumnSchemas().size()); + List columnCategories = new ArrayList<>(tableSchema.getColumnTypes().size()); + List columnSchemas = tableSchema.getColumnSchemas(); + for (int i = 0, columnSchemasSize = columnSchemas.size(); i < columnSchemasSize; i++) { + IMeasurementSchema measurementSchema = columnSchemas.get(i); + measurementSchemas.add( + new MeasurementSchema( + getOriginalColumnName( + tableSchema.getTableName(), measurementSchema.getMeasurementName()), + measurementSchema.getType(), + measurementSchema.getEncodingType(), + measurementSchema.getCompressor())); + columnCategories.add(tableSchema.getColumnTypes().get(i)); + } + + TableSchema schema = new TableSchema(originalTableName, measurementSchemas, columnCategories); + schema.setUpdatable(tableSchema.isUpdatable()); + return schema; + } + + public TableSchema rewriteToFinal(TableSchema tableSchema) { + String finalTableName = getFinalTableName(tableSchema.getTableName()); + + List measurementSchemas = + new ArrayList<>(tableSchema.getColumnSchemas().size()); + List columnCategories = new ArrayList<>(tableSchema.getColumnTypes().size()); + List columnSchemas = tableSchema.getColumnSchemas(); + for (int i = 0, columnSchemasSize = columnSchemas.size(); i < columnSchemasSize; i++) { + IMeasurementSchema measurementSchema = columnSchemas.get(i); + measurementSchemas.add( + new MeasurementSchema( + getFinalColumnName( + tableSchema.getTableName(), measurementSchema.getMeasurementName()), + measurementSchema.getType(), + measurementSchema.getEncodingType(), + measurementSchema.getCompressor())); + columnCategories.add(tableSchema.getColumnTypes().get(i)); + } + + TableSchema schema = new TableSchema(finalTableName, measurementSchemas, columnCategories); + schema.setUpdatable(tableSchema.isUpdatable()); + return schema; + } + + @SuppressWarnings("SuspiciousSystemArraycopy") + public static IDeviceID rewriteTableName(IDeviceID deviceID, String newTableName) { + String tableName = deviceID.getTableName(); + if (!tableName.equals(newTableName)) { + Object[] segments = deviceID.getSegments(); + String[] newSegments = new String[segments.length]; + newSegments[0] = newTableName; + System.arraycopy(segments, 1, newSegments, 1, segments.length - 1); + return Factory.DEFAULT_FACTORY.create(newSegments); + } + return deviceID; + } + + public static EvolvedSchema deepCopy(EvolvedSchema evolvedSchema) { + EvolvedSchema newEvolvedSchema = new EvolvedSchema(); + newEvolvedSchema.finalToOriginalTableNames = + new LinkedHashMap<>(evolvedSchema.finalToOriginalTableNames); + newEvolvedSchema.finalToOriginalColumnNames = new LinkedHashMap<>(); + for (Entry> entry : + evolvedSchema.finalToOriginalColumnNames.entrySet()) { + newEvolvedSchema.finalToOriginalColumnNames.put( + entry.getKey(), new LinkedHashMap<>(entry.getValue())); + } + newEvolvedSchema.originalToFinalTableNames = + new LinkedHashMap<>(evolvedSchema.originalToFinalTableNames); + newEvolvedSchema.originalToFinalColumnNames = new LinkedHashMap<>(); + for (Entry> entry : + evolvedSchema.originalToFinalColumnNames.entrySet()) { + newEvolvedSchema.originalToFinalColumnNames.put( + entry.getKey(), new LinkedHashMap<>(entry.getValue())); + } + return newEvolvedSchema; + } + + public static EvolvedSchema merge(EvolvedSchema... schemas) { + EvolvedSchema firstNotNullSchema = null; + int i = 0; + for (; i < schemas.length; i++) { + if (schemas[i] != null) { + firstNotNullSchema = schemas[i]; + i++; + break; + } + } + if (i == schemas.length) { + return firstNotNullSchema; + } + + if (firstNotNullSchema == null) { + return null; + } + EvolvedSchema mergedSchema = deepCopy(firstNotNullSchema); + + for (; i < schemas.length; i++) { + if (schemas[i] != null) { + EvolvedSchema newSchema = schemas[i]; + for (Entry finalOriginalTableName : + newSchema.finalToOriginalTableNames.entrySet()) { + if (!finalOriginalTableName.getValue().isEmpty()) { + mergedSchema.renameTable( + finalOriginalTableName.getValue(), finalOriginalTableName.getKey()); + } + } + for (Entry> finalTableNameColumnNameMapEntry : + newSchema.finalToOriginalColumnNames.entrySet()) { + for (Entry finalColNameOriginalColNameEntry : + finalTableNameColumnNameMapEntry.getValue().entrySet()) { + if (!finalColNameOriginalColNameEntry.getValue().isEmpty()) { + String finalTableName = finalTableNameColumnNameMapEntry.getKey(); + String finalColName = finalColNameOriginalColNameEntry.getKey(); + String originalColName = finalColNameOriginalColNameEntry.getValue(); + mergedSchema.renameColumn(finalTableName, originalColName, finalColName); + } + } + } + } + } + return mergedSchema; + } + + public Schema rewriteToOriginal(Schema schema) { + return rewriteToOriginal(schema, null); + } + + public Schema rewriteToOriginal( + Schema schema, Function tableSchemaTransformer) { + Schema copySchema = new Schema(); + for (TableSchema tableSchema : schema.getTableSchemaMap().values()) { + TableSchema originalSchema = rewriteToOriginal(tableSchema); + if (tableSchemaTransformer != null) { + originalSchema = tableSchemaTransformer.apply(originalSchema); + } + copySchema.registerTableSchema(originalSchema); + } + return copySchema; + } + + public void rewriteToFinal( + AbstractAlignedChunkMetadata abstractAlignedChunkMetadata, String originalTableName) { + for (IChunkMetadata iChunkMetadata : abstractAlignedChunkMetadata.getValueChunkMetadataList()) { + if (iChunkMetadata != null) { + iChunkMetadata.setMeasurementUid( + getFinalColumnName(originalTableName, iChunkMetadata.getMeasurementUid())); + } + } + } + + @Override + public long ramBytesUsed() { + return RamUsageEstimator.sizeOfMap(this.finalToOriginalTableNames) + + RamUsageEstimator.sizeOfMap(this.finalToOriginalColumnNames) + + RamUsageEstimator.sizeOfMap(this.originalToFinalTableNames) + + RamUsageEstimator.sizeOfMap(this.originalToFinalColumnNames); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/EvolvedSchemaCache.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/EvolvedSchemaCache.java new file mode 100644 index 0000000000000..d036528f6de65 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/EvolvedSchemaCache.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution; + +import org.apache.iotdb.db.storageengine.dataregion.tsfile.fileset.TsFileSet; + +import com.github.benmanes.caffeine.cache.Cache; +import com.github.benmanes.caffeine.cache.Caffeine; +import com.github.benmanes.caffeine.cache.Weigher; + +import java.util.function.Supplier; + +public class EvolvedSchemaCache { + + private final Cache cache; + + private EvolvedSchemaCache() { + cache = + Caffeine.newBuilder() + .weigher( + (Weigher) + (k, v) -> { + // TsFileSet is always in memory, do not count it + return (int) v.ramBytesUsed(); + }) + .maximumWeight( + // TODO-Sevo configurable + 128 * 1024 * 1024L) + .build(); + } + + public EvolvedSchema computeIfAbsent( + TsFileSet tsFileSet, Supplier schemaSupplier) { + return cache.get(tsFileSet, k -> schemaSupplier.get()); + } + + public void invalidate(TsFileSet tsFileSet) { + cache.invalidate(tsFileSet); + } + + public static EvolvedSchemaCache getInstance() { + return InstanceHolder.INSTANCE; + } + + private static class InstanceHolder { + private static final EvolvedSchemaCache INSTANCE = new EvolvedSchemaCache(); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/SchemaEvolution.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/SchemaEvolution.java new file mode 100644 index 0000000000000..109998dec27ee --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/SchemaEvolution.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution; + +import org.apache.iotdb.db.utils.io.BufferSerializable; +import org.apache.iotdb.db.utils.io.StreamSerializable; + +import org.apache.tsfile.utils.ReadWriteForEncodingUtils; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; + +/** A schema evolution operation that can be applied to a TableSchemaMap. */ +public interface SchemaEvolution extends StreamSerializable, BufferSerializable { + + /** + * Apply this schema evolution operation to the given metadata. + * + * @param schema the schema to apply the operation to + */ + void applyTo(EvolvedSchema schema); + + SchemaEvolutionType getEvolutionType(); + + enum SchemaEvolutionType { + TABLE_RENAME, + COLUMN_RENAME + } + + static SchemaEvolution createFrom(int type) { + if (type < 0 || type > SchemaEvolutionType.values().length) { + throw new IllegalArgumentException("Invalid evolution type: " + type); + } + SchemaEvolution evolution; + SchemaEvolutionType evolutionType = SchemaEvolutionType.values()[type]; + switch (evolutionType) { + case TABLE_RENAME: + evolution = new TableRename(); + break; + case COLUMN_RENAME: + evolution = new ColumnRename(); + break; + default: + throw new IllegalArgumentException("Invalid evolution type: " + evolutionType); + } + return evolution; + } + + static SchemaEvolution createFrom(InputStream stream) throws IOException { + int type = ReadWriteForEncodingUtils.readVarInt(stream); + SchemaEvolution evolution = createFrom(type); + evolution.deserialize(stream); + return evolution; + } + + static SchemaEvolution createFrom(ByteBuffer buffer) { + int type = ReadWriteForEncodingUtils.readVarInt(buffer); + SchemaEvolution evolution = createFrom(type); + evolution.deserialize(buffer); + return evolution; + } + + static List createListFrom(ByteBuffer buffer) { + int size = ReadWriteForEncodingUtils.readVarInt(buffer); + List list = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + list.add(createFrom(buffer)); + } + return list; + } + + static void serializeList(List list, OutputStream stream) throws IOException { + ReadWriteForEncodingUtils.writeVarInt(list.size(), stream); + for (SchemaEvolution evolution : list) { + evolution.serialize(stream); + } + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/SchemaEvolutionFile.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/SchemaEvolutionFile.java new file mode 100644 index 0000000000000..f45068acfaf62 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/SchemaEvolutionFile.java @@ -0,0 +1,139 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution; + +import org.apache.iotdb.commons.conf.IoTDBConstant; + +import org.apache.tsfile.common.constant.TsFileConstant; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.channels.FileChannel; +import java.util.Collection; + +/** SchemaEvolutionFile manages schema evolutions related to a TsFileSet. */ +public class SchemaEvolutionFile { + + private String filePath; + + public SchemaEvolutionFile(String filePath) { + this.filePath = filePath; + } + + /** + * Recover the SchemaEvolutionFile if it is broken. + * + * @return true if the file exists false otherwise + * @throws IOException if the file cannot be recovered + */ + private boolean recoverFile(boolean forWrite) throws IOException { + File file = new File(filePath); + if (!file.exists() || file.length() < Long.BYTES) { + if (file.exists()) { + boolean ignored = file.delete(); + } + if (forWrite) { + try (RandomAccessFile randomAccessFile = new RandomAccessFile(filePath, "rw")) { + randomAccessFile.writeLong(8); + } + } + return false; + } + + long length = file.length(); + long validLength = readValidLength(); + if (validLength == -1) { + return true; + } + if (length > validLength) { + try (FileOutputStream fis = new FileOutputStream(file, true); + FileChannel fileChannel = fis.getChannel()) { + fileChannel.truncate(validLength); + } + } + return true; + } + + public long readValidLength() throws IOException { + try (RandomAccessFile randomAccessFile = new RandomAccessFile(filePath, "r")) { + return randomAccessFile.readLong(); + } + } + + public void append(Collection schemaEvolutions) throws IOException { + recoverFile(true); + + try (FileOutputStream fos = new FileOutputStream(filePath, true); + BufferedOutputStream bos = new BufferedOutputStream(fos)) { + for (SchemaEvolution schemaEvolution : schemaEvolutions) { + schemaEvolution.serialize(bos); + } + } + + File file = new File(filePath); + long newLength = file.length(); + try (RandomAccessFile randomAccessFile = new RandomAccessFile(filePath, "rw")) { + randomAccessFile.writeLong(newLength); + } + } + + public EvolvedSchema readAsSchema() throws IOException { + boolean exists = recoverFile(false); + if (!exists) { + return null; + } + + EvolvedSchema evolvedSchema = new EvolvedSchema(); + try (FileInputStream fis = new FileInputStream(filePath); + BufferedInputStream bis = new BufferedInputStream(fis)) { + // skip valid length + long skipped = bis.skip(8); + if (skipped != 8) { + throw new IOException("Cannot skip the length of SchemaEvolutionFile"); + } + while (bis.available() > 0) { + SchemaEvolution evolution = SchemaEvolution.createFrom(bis); + evolution.applyTo(evolvedSchema); + } + } + return evolvedSchema; + } + + public String getFilePath() { + return filePath; + } + + // pipe will associate a TsFile with a SchemaEvolutionFile to record the schema evolutions + // this method is used to get the associated SchemaEvolutionFile name for a given TsFile + public static String getTsFileAssociatedSchemaEvolutionFileName(File tsFile) { + return tsFile + .getName() + .replace(TsFileConstant.TSFILE_SUFFIX, IoTDBConstant.SCHEMA_EVOLUTION_FILE_SUFFIX); + } + + public static File getTsFileAssociatedSchemaEvolutionFile(File tsFile) { + return new File(tsFile.getParentFile(), getTsFileAssociatedSchemaEvolutionFileName(tsFile)); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/TableRename.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/TableRename.java new file mode 100644 index 0000000000000..a37557f45aa88 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/TableRename.java @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution; + +import org.apache.tsfile.file.metadata.IDeviceID; +import org.apache.tsfile.file.metadata.IDeviceID.Factory; +import org.apache.tsfile.utils.ReadWriteForEncodingUtils; +import org.apache.tsfile.utils.ReadWriteIOUtils; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** A schema evolution operation that renames a table in a schema map. */ +public class TableRename implements SchemaEvolution { + + private String nameBefore; + private String nameAfter; + + // for deserialization + public TableRename() {} + + public TableRename(String nameBefore, String nameAfter) { + this.nameBefore = nameBefore.toLowerCase(); + this.nameAfter = nameAfter.toLowerCase(); + } + + @Override + public void applyTo(EvolvedSchema evolvedSchema) { + evolvedSchema.renameTable(nameBefore, nameAfter); + } + + @Override + public SchemaEvolutionType getEvolutionType() { + return SchemaEvolutionType.TABLE_RENAME; + } + + @Override + public long serialize(OutputStream stream) throws IOException { + long size = ReadWriteForEncodingUtils.writeVarInt(getEvolutionType().ordinal(), stream); + size += ReadWriteIOUtils.writeVar(nameBefore, stream); + size += ReadWriteIOUtils.writeVar(nameAfter, stream); + return size; + } + + @Override + public void deserialize(InputStream stream) throws IOException { + nameBefore = ReadWriteIOUtils.readVarIntString(stream); + nameAfter = ReadWriteIOUtils.readVarIntString(stream); + } + + @Override + public long serialize(ByteBuffer buffer) { + long size = ReadWriteForEncodingUtils.writeVarInt(getEvolutionType().ordinal(), buffer); + size += ReadWriteIOUtils.writeVar(nameBefore, buffer); + size += ReadWriteIOUtils.writeVar(nameAfter, buffer); + return size; + } + + @Override + public void deserialize(ByteBuffer buffer) { + nameBefore = ReadWriteIOUtils.readVarIntString(buffer); + nameAfter = ReadWriteIOUtils.readVarIntString(buffer); + } + + public String getNameBefore() { + return nameBefore; + } + + public String getNameAfter() { + return nameAfter; + } + + @SuppressWarnings("SuspiciousSystemArraycopy") + public IDeviceID rewriteDeviceId(IDeviceID deviceId) { + if (!deviceId.getTableName().equals(nameBefore)) { + return deviceId; + } + + Object[] segments = deviceId.getSegments(); + String[] newSegments = new String[segments.length]; + newSegments[0] = nameAfter; + System.arraycopy(segments, 1, newSegments, 1, segments.length - 1); + return Factory.DEFAULT_FACTORY.create(newSegments); + } + + public void rewriteMap(Map map) { + List affectedDeviceId = + map.keySet().stream() + .filter(k -> k.getTableName().equals(getNameBefore())) + .collect(Collectors.toList()); + for (IDeviceID deviceID : affectedDeviceId) { + IDeviceID newDeviceId = rewriteDeviceId(deviceID); + T removed = map.remove(deviceID); + map.put(newDeviceId, removed); + } + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/fileset/TsFileSet.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/fileset/TsFileSet.java new file mode 100644 index 0000000000000..b320dc831b252 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/fileset/TsFileSet.java @@ -0,0 +1,164 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.tsfile.fileset; + +import org.apache.iotdb.commons.conf.IoTDBConstant; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchemaCache; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolution; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolutionFile; + +import org.apache.tsfile.external.commons.io.FileUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +/** TsFileSet represents a set of TsFiles in a time partition whose version <= endVersion. */ +public class TsFileSet implements Comparable { + + private static final Logger LOGGER = LoggerFactory.getLogger(TsFileSet.class); + public static final String FILE_SET_DIR_NAME = "filesets"; + + private final long endVersion; + private final File fileSetDir; + private final ReentrantReadWriteLock lock; + private SchemaEvolutionFile schemaEvolutionFile; + + public TsFileSet(long endVersion, String fileSetsDir, boolean recover) { + this.endVersion = endVersion; + this.fileSetDir = new File(fileSetsDir + File.separator + endVersion); + this.lock = new ReentrantReadWriteLock(); + + if (recover) { + recover(); + } else { + //noinspection ResultOfMethodCallIgnored + fileSetDir.mkdirs(); + } + + if (schemaEvolutionFile == null) { + schemaEvolutionFile = + new SchemaEvolutionFile( + fileSetDir + + File.separator + + endVersion + + IoTDBConstant.SCHEMA_EVOLUTION_FILE_SUFFIX); + } + } + + private void recover() { + File[] files = fileSetDir.listFiles(); + if (files != null) { + for (File file : files) { + if (file.getName().endsWith(IoTDBConstant.SCHEMA_EVOLUTION_FILE_SUFFIX)) { + schemaEvolutionFile = new SchemaEvolutionFile(file.getAbsolutePath()); + } + } + } + } + + public void appendSchemaEvolution(Collection schemaEvolutions) + throws IOException { + writeLock(); + try { + schemaEvolutionFile.append(schemaEvolutions); + EvolvedSchemaCache.getInstance().invalidate(this); + } finally { + writeUnlock(); + } + } + + public EvolvedSchema readEvolvedSchema() throws IOException { + readLock(); + try { + return EvolvedSchemaCache.getInstance() + .computeIfAbsent( + this, + () -> { + try { + return schemaEvolutionFile.readAsSchema(); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + } finally { + readUnlock(); + } + } + + @Override + public int compareTo(TsFileSet o) { + return Long.compare(endVersion, o.endVersion); + } + + public void writeLock() { + lock.writeLock().lock(); + } + + public void readLock() { + lock.readLock().lock(); + } + + public void writeUnlock() { + lock.writeLock().unlock(); + } + + public void readUnlock() { + lock.readLock().unlock(); + } + + public long getEndVersion() { + return endVersion; + } + + @Override + public String toString() { + return "TsFileSet{" + "endVersion=" + endVersion + ", fileSetDir=" + fileSetDir + '}'; + } + + public void remove() { + FileUtils.deleteQuietly(fileSetDir); + } + + public boolean contains(TsFileResource tsFileResource) { + return tsFileResource.getVersion() <= endVersion; + } + + public static EvolvedSchema getMergedEvolvedSchema(List tsFileSetList) { + List list = new ArrayList<>(); + for (TsFileSet fileSet : tsFileSetList) { + try { + EvolvedSchema readEvolvedSchema = fileSet.readEvolvedSchema(); + list.add(readEvolvedSchema); + } catch (IOException e) { + LOGGER.warn("Cannot read evolved schema from {}, skipping it", fileSet); + } + } + + return EvolvedSchema.merge(list.toArray(new EvolvedSchema[0])); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/generator/TsFileNameGenerator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/generator/TsFileNameGenerator.java index 16be82188e9ca..698b4b95be681 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/generator/TsFileNameGenerator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/generator/TsFileNameGenerator.java @@ -363,6 +363,7 @@ public static List getNewInnerCompactionTargetFileResources( TsFileResourceStatus.COMPACTING); targetResource.setSeq(sequence); targetResources.add(targetResource); + targetResource.setTsFileManager(resource.getTsFileManager()); } return targetResources; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/recover/file/UnsealedTsFileRecoverPerformer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/recover/file/UnsealedTsFileRecoverPerformer.java index f4da410733458..0c222a7dbda75 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/recover/file/UnsealedTsFileRecoverPerformer.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/recover/file/UnsealedTsFileRecoverPerformer.java @@ -34,9 +34,9 @@ import org.apache.iotdb.db.storageengine.dataregion.memtable.IWritableMemChunk; import org.apache.iotdb.db.storageengine.dataregion.memtable.IWritableMemChunkGroup; import org.apache.iotdb.db.storageengine.dataregion.modification.DeletionPredicate; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.FullExactMatch; import org.apache.iotdb.db.storageengine.dataregion.modification.ModEntry; import org.apache.iotdb.db.storageengine.dataregion.modification.TableDeletionEntry; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.FullExactMatch; import org.apache.iotdb.db.storageengine.dataregion.modification.TreeDeletionEntry; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; import org.apache.iotdb.db.storageengine.dataregion.tsfile.timeindex.FileTimeIndexCacheRecorder; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/active/ActiveLoadDirScanner.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/active/ActiveLoadDirScanner.java index cedc4d5e29f63..0a567fbf187f1 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/active/ActiveLoadDirScanner.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/active/ActiveLoadDirScanner.java @@ -102,14 +102,22 @@ private void scan() throws IOException { final boolean isGeneratedByPipe = listeningDir.equals(IOTDB_CONFIG.getLoadActiveListeningPipeDir()); final File listeningDirFile = new File(listeningDir); - try (final Stream fileStream = - FileUtils.streamFiles(listeningDirFile, true, (String[]) null)) { + try (Stream pathStream = Files.walk(listeningDirFile.toPath()); + final Stream fileStream = + pathStream + .map(Path::toFile) + .filter( + file1 -> + isTsFileCompleted(file1.getAbsolutePath()) + && !file1 + .getAbsolutePath() + .contains(File.separator + "datanode" + File.separator) + || isDataNodeDir(file1.getAbsolutePath()))) { try { fileStream .filter(file -> !activeLoadTsFileLoader.isFilePendingOrLoading(file)) .filter(File::exists) .map(file -> LoadUtil.getTsFilePath(file.getAbsolutePath())) - .filter(this::isTsFileCompleted) .limit(currentAllowedPendingSize) .forEach( filePath -> { @@ -185,6 +193,10 @@ private boolean isTsFileCompleted(final String file) { } } + private boolean isDataNodeDir(final String file) { + return file.endsWith("datanode"); + } + private void hotReloadActiveLoadDirs() { try { // Hot reload active load listening dirs if active listening is enabled diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/active/ActiveLoadPendingQueue.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/active/ActiveLoadPendingQueue.java index 7b5f7166197d2..2bd34fd72649a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/active/ActiveLoadPendingQueue.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/active/ActiveLoadPendingQueue.java @@ -83,6 +83,7 @@ public static class ActiveLoadEntry { private final String pendingDir; private final boolean isGeneratedByPipe; private final boolean isTableModel; + private final boolean isDataNodeDir; public ActiveLoadEntry( String file, String pendingDir, boolean isGeneratedByPipe, boolean isTableModel) { @@ -90,6 +91,7 @@ public ActiveLoadEntry( this.pendingDir = pendingDir; this.isGeneratedByPipe = isGeneratedByPipe; this.isTableModel = isTableModel; + this.isDataNodeDir = file.endsWith("datanode"); } public String getFile() { @@ -107,5 +109,9 @@ public boolean isGeneratedByPipe() { public boolean isTableModel() { return isTableModel; } + + public boolean isDataNodeDir() { + return isDataNodeDir; + } } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/active/ActiveLoadTsFileLoader.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/active/ActiveLoadTsFileLoader.java index d0be2ead5cb22..44cad7ff287e4 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/active/ActiveLoadTsFileLoader.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/active/ActiveLoadTsFileLoader.java @@ -240,6 +240,9 @@ private TSStatus loadTsFile( ? null : parentFile.getName()); } + if (entry.isDataNodeDir()) { + statement.setDatabase(null); + } return executeStatement( entry.isGeneratedByPipe() ? new PipeEnrichedStatement(statement) : statement, session); @@ -273,7 +276,11 @@ private void handleLoadFailure( entry.getFile(), entry.isGeneratedByPipe(), status); - removeFileAndResourceAndModsToFailDir(entry.getFile()); + if (!entry.isDataNodeDir()) { + removeFileAndResourceAndModsToFailDir(entry.getFile()); + } else { + removeDirToFailDir(entry.getFile()); + } } } @@ -282,7 +289,11 @@ private void handleFileNotFoundException(final ActiveLoadPendingQueue.ActiveLoad "Failed to auto load tsfile {} (isGeneratedByPipe = {}) due to file not found, will skip this file.", entry.getFile(), entry.isGeneratedByPipe()); - removeFileAndResourceAndModsToFailDir(entry.getFile()); + if (entry.isDataNodeDir()) { + removeDirToFailDir(entry.getFile()); + } else { + removeFileAndResourceAndModsToFailDir(entry.getFile()); + } } private void handleOtherException( @@ -293,18 +304,41 @@ private void handleOtherException( entry.getFile(), entry.isGeneratedByPipe(), e); - removeFileAndResourceAndModsToFailDir(entry.getFile()); + if (entry.isDataNodeDir()) { + removeDirToFailDir(entry.getFile()); + } else { + removeFileAndResourceAndModsToFailDir(entry.getFile()); + } } } private void removeFileAndResourceAndModsToFailDir(final String filePath) { - removeToFailDir(filePath); - removeToFailDir(LoadUtil.getTsFileResourcePath(filePath)); - removeToFailDir(LoadUtil.getTsFileModsV1Path(filePath)); - removeToFailDir(LoadUtil.getTsFileModsV2Path(filePath)); + removeFileToFailDir(filePath); + removeFileToFailDir(LoadUtil.getTsFileResourcePath(filePath)); + removeFileToFailDir(LoadUtil.getTsFileModsV1Path(filePath)); + removeFileToFailDir(LoadUtil.getTsFileModsV2Path(filePath)); + } + + private void removeDirToFailDir(final String filePath) { + final File sourceFile = new File(filePath); + // prevent the resource or mods not exist + if (!sourceFile.exists()) { + return; + } + + final File targetDir = new File(failDir.get()); + try { + RetryUtils.retryOnException( + () -> { + org.apache.iotdb.commons.utils.FileUtils.moveDirWithMD5Check(sourceFile, targetDir); + return null; + }); + } catch (final IOException e) { + LOGGER.warn("Error occurred during moving file {} to fail directory.", filePath, e); + } } - private void removeToFailDir(final String filePath) { + private void removeFileToFailDir(final String filePath) { final File sourceFile = new File(filePath); // prevent the resource or mods not exist if (!sourceFile.exists()) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/config/LoadTsFileConfigurator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/config/LoadTsFileConfigurator.java index 8478486781be5..9bffbfffce46a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/config/LoadTsFileConfigurator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/config/LoadTsFileConfigurator.java @@ -26,6 +26,7 @@ import javax.annotation.Nullable; +import java.io.File; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; @@ -60,11 +61,21 @@ public static void validateParameters(final String key, final String value) { case ASYNC_LOAD_KEY: validateAsyncLoadParam(value); break; + case SEVO_FILE_PATH_KEY: + validateSevoFilePathParam(value); + break; default: throw new SemanticException("Invalid parameter '" + key + "' for LOAD TSFILE command."); } } + private static void validateSevoFilePathParam(String value) { + File file = new File(value); + if (!file.exists()) { + throw new SemanticException("The sevo file " + value + " does not exist."); + } + } + public static void validateSynonymParameters(final Map parameters) { if (parameters.containsKey(DATABASE_KEY) && parameters.containsKey(DATABASE_NAME_KEY)) { throw new SemanticException( @@ -115,6 +126,13 @@ public static int parseOrGetDefaultDatabaseLevel(final Map loadA return Objects.nonNull(databaseName) ? databaseName.toLowerCase(Locale.ENGLISH) : null; } + public static final String SEVO_FILE_PATH_KEY = "sevo-file-path"; + + public static @Nullable File parseSevoFile(final Map loadAttributes) { + String sevoFilePath = loadAttributes.get(SEVO_FILE_PATH_KEY); + return sevoFilePath != null ? new File(sevoFilePath) : null; + } + public static final String ON_SUCCESS_KEY = "on-success"; public static final String ON_SUCCESS_DELETE_VALUE = "delete"; public static final String ON_SUCCESS_NONE_VALUE = "none"; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/AlignedChunkData.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/AlignedChunkData.java index 72268168258ee..216a22eaa6f57 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/AlignedChunkData.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/AlignedChunkData.java @@ -22,6 +22,7 @@ import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; import org.apache.iotdb.commons.utils.TimePartitionUtils; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.load.LoadTsFilePieceNode; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; import org.apache.tsfile.enums.TSDataType; import org.apache.tsfile.exception.write.PageException; @@ -66,7 +67,7 @@ public class AlignedChunkData implements ChunkData { protected static final Binary DEFAULT_BINARY = null; protected final TTimePartitionSlot timePartitionSlot; - protected final IDeviceID device; + protected IDeviceID device; protected List chunkHeaderList; protected PublicBAOS byteStream; @@ -508,4 +509,14 @@ public String toString() { + needDecodeChunk + '}'; } + + @Override + public void rewriteToFinal(EvolvedSchema evolvedSchema) { + IDeviceID newDevice = evolvedSchema.rewriteToFinal(device); + chunkHeaderList.forEach( + h -> + h.setMeasurementID( + evolvedSchema.getFinalColumnName(device.getTableName(), h.getMeasurementID()))); + device = newDevice; + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/DeletionData.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/DeletionData.java index 0695c7a84def9..c140b79bc1226 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/DeletionData.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/DeletionData.java @@ -22,6 +22,7 @@ import org.apache.iotdb.commons.exception.IllegalPathException; import org.apache.iotdb.db.storageengine.dataregion.modification.ModEntry; import org.apache.iotdb.db.storageengine.dataregion.modification.ModificationFile; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; import org.apache.tsfile.utils.ReadWriteIOUtils; @@ -31,7 +32,7 @@ import java.io.InputStream; public class DeletionData implements TsFileData { - private final ModEntry deletion; + private ModEntry deletion; public DeletionData(ModEntry deletion) { this.deletion = deletion; @@ -51,6 +52,11 @@ public TsFileDataType getType() { return TsFileDataType.DELETION; } + @Override + public void rewriteToFinal(EvolvedSchema evolvedSchema) { + deletion = evolvedSchema.rewriteToFinal(deletion); + } + @Override public void serialize(DataOutputStream stream) throws IOException { ReadWriteIOUtils.write(getType().ordinal(), stream); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/NonAlignedChunkData.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/NonAlignedChunkData.java index 2310b9cb95c3e..5ad970c38de65 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/NonAlignedChunkData.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/NonAlignedChunkData.java @@ -21,6 +21,7 @@ import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; import org.apache.iotdb.commons.utils.TimePartitionUtils; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; import org.apache.tsfile.exception.write.PageException; import org.apache.tsfile.file.header.ChunkHeader; @@ -52,7 +53,7 @@ public class NonAlignedChunkData implements ChunkData { private final TTimePartitionSlot timePartitionSlot; - private final IDeviceID device; + private IDeviceID device; private final ChunkHeader chunkHeader; private final PublicBAOS byteStream; @@ -316,6 +317,14 @@ private void close() throws IOException { stream.close(); } + @Override + public void rewriteToFinal(EvolvedSchema evolvedSchema) { + IDeviceID newDevice = evolvedSchema.rewriteToFinal(device); + chunkHeader.setMeasurementID( + evolvedSchema.getFinalColumnName(device.getTableName(), chunkHeader.getMeasurementID())); + device = newDevice; + } + @Override public String toString() { return "NonAlignedChunkData{" diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/TsFileData.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/TsFileData.java index f24eb45c01bc6..d3c5d150b37e7 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/TsFileData.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/TsFileData.java @@ -20,6 +20,7 @@ package org.apache.iotdb.db.storageengine.load.splitter; import org.apache.iotdb.commons.exception.IllegalPathException; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; import org.apache.tsfile.exception.write.PageException; import org.apache.tsfile.utils.ReadWriteIOUtils; @@ -35,6 +36,8 @@ public interface TsFileData { void serialize(DataOutputStream stream) throws IOException; + void rewriteToFinal(EvolvedSchema evolvedSchema); + static TsFileData deserialize(InputStream stream) throws IOException, PageException, IllegalPathException { final TsFileDataType type = TsFileDataType.values()[ReadWriteIOUtils.readInt(stream)]; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/TsFileSplitter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/TsFileSplitter.java index 5a75f4fb8e085..25f772b15420f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/TsFileSplitter.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/splitter/TsFileSplitter.java @@ -26,6 +26,7 @@ import org.apache.iotdb.db.exception.load.LoadFileException; import org.apache.iotdb.db.storageengine.dataregion.modification.ModEntry; import org.apache.iotdb.db.storageengine.dataregion.modification.ModificationFile; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.EvolvedSchema; import org.apache.tsfile.common.conf.TSFileConfig; import org.apache.tsfile.common.conf.TSFileDescriptor; @@ -67,7 +68,7 @@ public class TsFileSplitter { private static final IoTDBConfig CONFIG = IoTDBDescriptor.getInstance().getConfig(); private final File tsFile; - private final TsFileDataConsumer consumer; + private TsFileDataConsumer consumer; private Map offset2ChunkMetadata = new HashMap<>(); private List deletions = new ArrayList<>(); private Map> pageIndex2ChunkData = new HashMap<>(); @@ -77,6 +78,7 @@ public class TsFileSplitter { private boolean isAligned; private int timeChunkIndexOfCurrentValueColumn = 0; private Set timePartitionSlots = new HashSet<>(); + private EvolvedSchema evolvedSchema; // Maintain the number of times the chunk of each measurement appears. private Map valueColumn2TimeChunkIndex = new HashMap<>(); @@ -87,9 +89,13 @@ public class TsFileSplitter { private List> pageIndex2TimesList = null; private List isTimeChunkNeedDecodeList = new ArrayList<>(); - public TsFileSplitter(File tsFile, TsFileDataConsumer consumer) { + public TsFileSplitter(File tsFile, TsFileDataConsumer consumer, EvolvedSchema evolvedSchema) { this.tsFile = tsFile; this.consumer = consumer; + if (evolvedSchema != null) { + this.evolvedSchema = evolvedSchema; + this.consumer = new SchemaEvolutionTsFileDataConsumer(this.consumer, evolvedSchema); + } } @SuppressWarnings({"squid:S3776", "squid:S6541"}) @@ -588,4 +594,38 @@ private TsPrimitiveType[] decodeValuePage( public interface TsFileDataConsumer { boolean apply(TsFileData tsFileData) throws LoadFileException; } + + public abstract class WrappedTsFileDataConsumer implements TsFileDataConsumer { + + private TsFileDataConsumer delegate; + + public WrappedTsFileDataConsumer(TsFileDataConsumer delegate) { + this.delegate = delegate; + } + + protected abstract TsFileData rewrite(TsFileData tsFileData); + + @Override + public boolean apply(TsFileData tsFileData) throws LoadFileException { + tsFileData = rewrite(tsFileData); + return delegate.apply(tsFileData); + } + } + + private class SchemaEvolutionTsFileDataConsumer extends WrappedTsFileDataConsumer { + + private EvolvedSchema evolvedSchema; + + public SchemaEvolutionTsFileDataConsumer( + TsFileDataConsumer delegate, EvolvedSchema evolvedSchema) { + super(delegate); + this.evolvedSchema = evolvedSchema; + } + + @Override + protected TsFileData rewrite(TsFileData tsFileData) { + tsFileData.rewriteToFinal(evolvedSchema); + return tsFileData; + } + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/util/LoadUtil.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/util/LoadUtil.java index a3d29337b865b..65362bade4d4d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/util/LoadUtil.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/util/LoadUtil.java @@ -25,6 +25,7 @@ import org.apache.iotdb.db.storageengine.dataregion.modification.ModificationFile; import org.apache.iotdb.db.storageengine.dataregion.modification.v1.ModificationFileV1; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.SchemaEvolutionFile; import org.apache.iotdb.db.storageengine.load.active.ActiveLoadPathHelper; import org.apache.iotdb.db.storageengine.load.disk.ILoadDiskSelector; import org.apache.iotdb.db.storageengine.rescon.disk.FolderManager; @@ -41,7 +42,9 @@ import java.util.Map; import java.util.Objects; +import static org.apache.iotdb.commons.utils.FileUtils.copyDirWithMD5Check; import static org.apache.iotdb.commons.utils.FileUtils.copyFileWithMD5Check; +import static org.apache.iotdb.commons.utils.FileUtils.moveDirWithMD5Check; import static org.apache.iotdb.commons.utils.FileUtils.moveFileWithMD5Check; public class LoadUtil { @@ -72,6 +75,26 @@ public static boolean loadTsFileAsyncToActiveDir( return true; } + public static boolean loadDatanodeDirAsyncToActiveDir( + final File datanodeDir, + final Map loadAttributes, + final boolean isDeleteAfterLoad) { + if (datanodeDir == null || !datanodeDir.isDirectory()) { + return true; + } + + try { + if (!loadDatanodeDirToActiveDir(loadAttributes, datanodeDir, isDeleteAfterLoad)) { + return false; + } + } catch (Exception e) { + LOGGER.warn("Fail to load tsfile to Active dir", e); + return false; + } + + return true; + } + public static String getTsFilePath(final String filePathWithResourceOrModsTail) { if (filePathWithResourceOrModsTail.endsWith(TsFileResource.RESOURCE_SUFFIX)) { return filePathWithResourceOrModsTail.substring( @@ -103,6 +126,34 @@ public static String getTsFileResourcePath(final String tsFilePath) { return tsFilePath + TsFileResource.RESOURCE_SUFFIX; } + private static boolean loadDatanodeDirToActiveDir( + final Map loadAttributes, final File file, final boolean isDeleteAfterLoad) + throws IOException { + if (file == null) { + return true; + } + + final File targetFilePath; + try { + targetFilePath = + loadDiskSelector.selectTargetDirectory(file.getParentFile(), file.getName(), false, 0); + } catch (Exception e) { + LOGGER.warn("Fail to load disk space of file {}", file.getAbsolutePath(), e); + return false; + } + + if (targetFilePath == null) { + LOGGER.warn("Load active listening dir is not set."); + return false; + } + final Map attributes = + Objects.nonNull(loadAttributes) ? loadAttributes : Collections.emptyMap(); + final File targetDir = ActiveLoadPathHelper.resolveTargetDir(targetFilePath, attributes); + + loadDatanodeDirAsyncToTargetDir(targetDir, file, isDeleteAfterLoad); + return true; + } + private static boolean loadTsFilesToActiveDir( final Map loadAttributes, final File file, final boolean isDeleteAfterLoad) throws IOException { @@ -170,6 +221,27 @@ public static boolean loadFilesToActiveDir( return true; } + private static void loadDatanodeDirAsyncToTargetDir( + final File targetDir, final File file, final boolean isDeleteAfterLoad) throws IOException { + if (!file.exists()) { + return; + } + if (!targetDir.exists() && !targetDir.mkdirs()) { + if (!targetDir.exists()) { + throw new IOException("Failed to create target directory " + targetDir.getAbsolutePath()); + } + } + RetryUtils.retryOnException( + () -> { + if (isDeleteAfterLoad) { + moveDirWithMD5Check(file, targetDir); + } else { + copyDirWithMD5Check(file, targetDir); + } + return null; + }); + } + private static void loadTsFileAsyncToTargetDir( final File targetDir, final File file, final boolean isDeleteAfterLoad) throws IOException { if (!file.exists()) { @@ -187,6 +259,15 @@ private static void loadTsFileAsyncToTargetDir( } else { copyFileWithMD5Check(file, targetDir); } + + File sevoFile = SchemaEvolutionFile.getTsFileAssociatedSchemaEvolutionFile(file); + if (sevoFile.exists()) { + if (isDeleteAfterLoad) { + moveFileWithMD5Check(sevoFile, targetDir); + } else { + copyFileWithMD5Check(sevoFile, targetDir); + } + } return null; }); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/utils/CommonUtils.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/utils/CommonUtils.java index 784312f9f9cd2..6ec11601ad6a5 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/utils/CommonUtils.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/utils/CommonUtils.java @@ -19,17 +19,23 @@ package org.apache.iotdb.db.utils; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.FullDeviceIdKey; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.NoTableNameDeviceIdKey; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.SeriesPartitionKey; +import org.apache.iotdb.commons.schema.table.TsTable; import org.apache.iotdb.commons.schema.table.column.TsTableColumnCategory; import org.apache.iotdb.commons.service.metric.MetricService; import org.apache.iotdb.commons.service.metric.enums.Metric; import org.apache.iotdb.commons.service.metric.enums.Tag; import org.apache.iotdb.commons.utils.CommonDateTimeUtils; +import org.apache.iotdb.commons.utils.PathUtils; import org.apache.iotdb.db.exception.query.QueryProcessException; import org.apache.iotdb.db.exception.sql.SemanticException; import org.apache.iotdb.db.protocol.thrift.OperationType; import org.apache.iotdb.db.queryengine.plan.execution.IQueryExecution; import org.apache.iotdb.db.queryengine.plan.statement.StatementType; import org.apache.iotdb.db.queryengine.plan.statement.literal.BinaryLiteral; +import org.apache.iotdb.db.schemaengine.table.DataNodeTableCache; import org.apache.iotdb.db.utils.constant.SqlConstant; import org.apache.iotdb.metrics.utils.MetricLevel; import org.apache.iotdb.service.rpc.thrift.TSAggregationQueryReq; @@ -456,4 +462,27 @@ public static String toString(TsBlock tsBlock) { } return tsBlockBuilder.toString(); } + + public static SeriesPartitionKey getSeriesPartitionKey( + IDeviceID deviceID, String databaseName, boolean tableMustExist) { + if (databaseName != null && PathUtils.isTableModelDatabase(databaseName)) { + TsTable table = + DataNodeTableCache.getInstance() + // in unit test + .getTable(databaseName, deviceID.getTableName(), tableMustExist); + if (table == null) { + // if table does not exist, then we are creating a new table + // use the default setting + return TsTable.ALLOW_ALTER_NAME_DEFAULT + ? new NoTableNameDeviceIdKey(deviceID) + : new FullDeviceIdKey(deviceID); + } + if (table.canAlterName()) { + return new NoTableNameDeviceIdKey(deviceID); + } else { + return new FullDeviceIdKey(deviceID); + } + } + return new FullDeviceIdKey(deviceID); + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/utils/io/IOUtils.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/utils/io/IOUtils.java new file mode 100644 index 0000000000000..3a1841190cb2b --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/utils/io/IOUtils.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.utils.io; + +import org.apache.tsfile.utils.ReadWriteForEncodingUtils; + +import java.io.IOException; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.util.List; + +// TODO: move to TsFile +public class IOUtils { + + private IOUtils() { + // util class + } + + public static long writeList(List list, ByteBuffer byteBuffer) { + long size = ReadWriteForEncodingUtils.writeVarInt(list.size(), byteBuffer); + for (BufferSerializable item : list) { + size += item.serialize(byteBuffer); + } + return size; + } + + public static long writeList(List list, OutputStream stream) + throws IOException { + long size = ReadWriteForEncodingUtils.writeVarInt(list.size(), stream); + for (StreamSerializable item : list) { + size += item.serialize(stream); + } + return size; + } +} diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/metadata/path/PatternTreeMapTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/metadata/path/PatternTreeMapTest.java index 7a7d71bef5127..6f14ac8638467 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/metadata/path/PatternTreeMapTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/metadata/path/PatternTreeMapTest.java @@ -24,9 +24,9 @@ import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.path.PatternTreeMap; import org.apache.iotdb.db.storageengine.dataregion.modification.DeletionPredicate; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.NOP; import org.apache.iotdb.db.storageengine.dataregion.modification.ModEntry; import org.apache.iotdb.db.storageengine.dataregion.modification.TableDeletionEntry; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.NOP; import org.apache.iotdb.db.storageengine.dataregion.modification.TreeDeletionEntry; import org.apache.iotdb.db.utils.datastructure.PatternTreeMapFactory; import org.apache.iotdb.db.utils.datastructure.PatternTreeMapFactory.ModsSerializer; diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/consensus/DeletionRecoverTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/consensus/DeletionRecoverTest.java index 06f823c0e23fb..fcb2ffdb79294 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/consensus/DeletionRecoverTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/consensus/DeletionRecoverTest.java @@ -30,8 +30,8 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.DeleteDataNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.RelationalDeleteDataNode; import org.apache.iotdb.db.storageengine.dataregion.modification.DeletionPredicate; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.NOP; import org.apache.iotdb.db.storageengine.dataregion.modification.TableDeletionEntry; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.NOP; import org.apache.tsfile.read.common.TimeRange; import org.junit.After; diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/consensus/DeletionResourceTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/consensus/DeletionResourceTest.java index f94d909f94bd1..05c1f9361c66b 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/consensus/DeletionResourceTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/consensus/DeletionResourceTest.java @@ -43,8 +43,8 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.DeleteDataNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.RelationalDeleteDataNode; import org.apache.iotdb.db.storageengine.dataregion.modification.DeletionPredicate; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.NOP; import org.apache.iotdb.db.storageengine.dataregion.modification.TableDeletionEntry; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.NOP; import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters; import org.apache.tsfile.read.common.TimeRange; diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/source/PipePlanTablePatternParseVisitorTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/source/PipePlanTablePatternParseVisitorTest.java index d01351ce60adc..82aee3b1fba33 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/source/PipePlanTablePatternParseVisitorTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/source/PipePlanTablePatternParseVisitorTest.java @@ -29,8 +29,8 @@ import org.apache.iotdb.db.queryengine.plan.relational.planner.node.schema.TableDeviceAttributeUpdateNode; import org.apache.iotdb.db.storageengine.dataregion.memtable.DeviceIDFactory; import org.apache.iotdb.db.storageengine.dataregion.modification.DeletionPredicate; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate; import org.apache.iotdb.db.storageengine.dataregion.modification.TableDeletionEntry; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate; import org.apache.tsfile.read.common.TimeRange; import org.junit.Assert; @@ -115,12 +115,12 @@ public void testDeleteData() { new TableDeletionEntry( new DeletionPredicate( "ac", - new IDPredicate.And( - new IDPredicate.FullExactMatch( + new TagPredicate.And( + new TagPredicate.FullExactMatch( DeviceIDFactory.getInstance() .getDeviceID( new PartialPath(new String[] {"ac", "device1"}))), - new IDPredicate.SegmentExactMatch("device2", 1))), + new TagPredicate.SegmentExactMatch("device2", 1))), new TimeRange(0, 1))), "db1"), tablePattern) diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/planner/node/load/LoadTsFileNodeTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/planner/node/load/LoadTsFileNodeTest.java index a22ab8f6739ae..4ab9a5834ee53 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/planner/node/load/LoadTsFileNodeTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/planner/node/load/LoadTsFileNodeTest.java @@ -41,7 +41,18 @@ public void testLoadSingleTsFileNode() { TsFileResource resource = new TsFileResource(new File("1")); String database = "root.db"; LoadSingleTsFileNode node = - new LoadSingleTsFileNode(new PlanNodeId(""), resource, false, database, true, 0L, false); + new LoadSingleTsFileNode( + new PlanNodeId(""), + resource, + false, + database, + true, + 0L, + false, + null, + true, + null, + resource.getTsFile()); Assert.assertTrue(node.isDeleteAfterLoad()); Assert.assertEquals(resource, node.getTsFileResource()); Assert.assertEquals(database, node.getDatabase()); diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/RelationalDeleteDataNodeTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/RelationalDeleteDataNodeTest.java index e51a8b99db91d..a58b4ae7c00da 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/RelationalDeleteDataNodeTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/RelationalDeleteDataNodeTest.java @@ -24,11 +24,11 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeId; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeType; import org.apache.iotdb.db.storageengine.dataregion.modification.DeletionPredicate; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.And; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.FullExactMatch; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.NOP; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.SegmentExactMatch; import org.apache.iotdb.db.storageengine.dataregion.modification.TableDeletionEntry; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.And; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.FullExactMatch; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.NOP; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.SegmentExactMatch; import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALByteBufferForTest; import org.apache.tsfile.file.metadata.IDeviceID.Factory; diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/AggregationTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/AggregationTest.java index 2227a26f93e18..b453d314dd265 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/AggregationTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/AggregationTest.java @@ -57,7 +57,7 @@ import static org.apache.iotdb.db.queryengine.plan.relational.sql.ast.ComparisonExpression.Operator.LESS_THAN; // This test covers the remaining DistributionPlan cases that TSBSTest doesn't cover -public class AggregationTest { +public class AggregationTest extends BaseAnalyzerTest { @Test public void noPushDownTest() { PlanTester planTester = new PlanTester(); diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/AnalyzerTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/AnalyzerTest.java index 36bed6932a877..746def2d3a319 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/AnalyzerTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/AnalyzerTest.java @@ -121,7 +121,7 @@ import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.eq; -public class AnalyzerTest { +public class AnalyzerTest extends BaseAnalyzerTest { private static final AccessControl nopAccessControl = new AllowAllAccessControl(); final String database = "db"; diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/AsofJoinTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/AsofJoinTest.java index 62ec85d27745b..e19583705bf94 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/AsofJoinTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/AsofJoinTest.java @@ -48,7 +48,7 @@ import static org.apache.iotdb.db.queryengine.plan.relational.sql.ast.SortItem.Ordering.ASCENDING; import static org.apache.iotdb.db.queryengine.plan.relational.sql.ast.SortItem.Ordering.DESCENDING; -public class AsofJoinTest { +public class AsofJoinTest extends BaseAnalyzerTest { @Test public void simpleTest() { PlanTester planTester = new PlanTester(); diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/BaseAnalyzerTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/BaseAnalyzerTest.java new file mode 100644 index 0000000000000..3c7f7e920cf0f --- /dev/null +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/BaseAnalyzerTest.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.queryengine.plan.relational.analyzer; + +import org.apache.iotdb.commons.schema.table.TsTable; +import org.apache.iotdb.db.schemaengine.table.DataNodeTableCache; + +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.util.Collections; +import java.util.Map; + +public abstract class BaseAnalyzerTest { + + protected static final String testDb = "testdb"; + protected static final String testTable = "table1"; + + @BeforeClass + public static void setup() { + TsTable tsTable = new TsTable(testTable); + // disable alter table name to use full deviceId key for compatibility + Map properties = + Collections.singletonMap(TsTable.ALLOW_ALTER_NAME_PROPERTY, "false"); + tsTable.setProps(properties); + DataNodeTableCache.getInstance().preUpdateTable(testDb, tsTable, null); + DataNodeTableCache.getInstance().commitUpdateTable(testDb, testTable, null); + } + + @AfterClass + public static void tearDown() { + DataNodeTableCache.getInstance().invalid(testDb); + } +} diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/DistinctTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/DistinctTest.java index 6cccf387a66fe..b5e9c9ff44a64 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/DistinctTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/DistinctTest.java @@ -45,7 +45,7 @@ import static org.apache.iotdb.db.queryengine.plan.relational.planner.assertions.PlanMatchPattern.tableScan; import static org.apache.iotdb.db.queryengine.plan.relational.planner.node.AggregationNode.Step.SINGLE; -public class DistinctTest { +public class DistinctTest extends BaseAnalyzerTest { // ================================================================== // ===================== Select Distinct Test ======================= // ================================================================== diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/InsertIntoQueryTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/InsertIntoQueryTest.java index 879356acaee95..bbd25bab02207 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/InsertIntoQueryTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/InsertIntoQueryTest.java @@ -51,7 +51,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; -public class InsertIntoQueryTest { +public class InsertIntoQueryTest extends BaseAnalyzerTest { String sql; Analysis analysis; LogicalQueryPlan logicalQueryPlan; diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/JoinTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/JoinTest.java index 1b178b50449da..78cc6352a4f2a 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/JoinTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/JoinTest.java @@ -109,7 +109,7 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; -public class JoinTest { +public class JoinTest extends BaseAnalyzerTest { Analysis analysis; LogicalQueryPlan logicalQueryPlan; PlanNode logicalPlanNode; diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/LimitOffsetPushDownTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/LimitOffsetPushDownTest.java index fbe814ff25bc0..1f4c545097cf6 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/LimitOffsetPushDownTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/LimitOffsetPushDownTest.java @@ -62,7 +62,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; -public class LimitOffsetPushDownTest { +public class LimitOffsetPushDownTest extends BaseAnalyzerTest { QueryId queryId = new QueryId("test_query"); SessionInfo sessionInfo = new SessionInfo( diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/PushAggregationThroughUnionTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/PushAggregationThroughUnionTest.java index 722c8ce992d47..03b6342890ad2 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/PushAggregationThroughUnionTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/PushAggregationThroughUnionTest.java @@ -54,7 +54,7 @@ import static org.apache.iotdb.db.queryengine.plan.relational.planner.assertions.PlanMatchPattern.union; import static org.junit.Assert.assertEquals; -public class PushAggregationThroughUnionTest { +public class PushAggregationThroughUnionTest extends BaseAnalyzerTest { @Before public void setUp() { IoTDBDescriptor.getInstance().getConfig().setDataNodeId(1); diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/SortTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/SortTest.java index 2f6a028ea4a46..ad5098f474378 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/SortTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/SortTest.java @@ -64,7 +64,7 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -public class SortTest { +public class SortTest extends BaseAnalyzerTest { static Metadata metadata = new TestMetadata(); String sql; diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/SubQueryTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/SubQueryTest.java index 156123604b5d5..eac502b5fc4b7 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/SubQueryTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/SubQueryTest.java @@ -63,7 +63,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; -public class SubQueryTest { +public class SubQueryTest extends BaseAnalyzerTest { String sql; Analysis analysis; LogicalQueryPlan logicalQueryPlan; diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/TableFunctionTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/TableFunctionTest.java index 344c69cfc1a71..362b65f8edcfe 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/TableFunctionTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/TableFunctionTest.java @@ -65,7 +65,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; -public class TableFunctionTest { +public class TableFunctionTest extends BaseAnalyzerTest { @Test public void testSimpleRowSemantic() { diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/planner/CteSubqueryTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/planner/CteSubqueryTest.java index 25b00fd66acdf..935be72198bd8 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/planner/CteSubqueryTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/planner/CteSubqueryTest.java @@ -31,6 +31,7 @@ import org.apache.iotdb.db.queryengine.plan.execution.ExecutionResult; import org.apache.iotdb.db.queryengine.plan.execution.QueryExecution; import org.apache.iotdb.db.queryengine.plan.planner.plan.LogicalQueryPlan; +import org.apache.iotdb.db.queryengine.plan.relational.analyzer.BaseAnalyzerTest; import org.apache.iotdb.db.queryengine.plan.relational.planner.assertions.PlanMatchPattern; import org.apache.iotdb.db.queryengine.plan.relational.sql.ast.ComparisonExpression; import org.apache.iotdb.db.queryengine.plan.relational.sql.ast.LongLiteral; @@ -65,7 +66,7 @@ @PowerMockIgnore({"com.sun.org.apache.xerces.*", "javax.xml.*", "org.xml.*", "javax.management.*"}) @RunWith(PowerMockRunner.class) @PrepareForTest({Coordinator.class, SessionManager.class}) -public class CteSubqueryTest { +public class CteSubqueryTest extends BaseAnalyzerTest { private PlanTester planTester; @Before diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/planner/ExampleTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/planner/ExampleTest.java index f05b7e5e437fc..1d7ac02afa1a4 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/planner/ExampleTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/planner/ExampleTest.java @@ -20,6 +20,7 @@ package org.apache.iotdb.db.queryengine.plan.relational.planner; import org.apache.iotdb.db.queryengine.plan.planner.plan.LogicalQueryPlan; +import org.apache.iotdb.db.queryengine.plan.relational.analyzer.BaseAnalyzerTest; import org.apache.iotdb.db.queryengine.plan.relational.planner.assertions.PlanMatchPattern; import org.apache.iotdb.db.queryengine.plan.relational.sql.ast.ArithmeticBinaryExpression; import org.apache.iotdb.db.queryengine.plan.relational.sql.ast.Cast; @@ -55,7 +56,7 @@ import static org.apache.iotdb.db.queryengine.plan.relational.sql.ast.SortItem.Ordering.ASCENDING; import static org.apache.iotdb.db.queryengine.plan.relational.sql.ast.SortItem.Ordering.DESCENDING; -public class ExampleTest { +public class ExampleTest extends BaseAnalyzerTest { @Test public void exampleTest() { PlanTester planTester = new PlanTester(); diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/planner/UncorrelatedSubqueryTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/planner/UncorrelatedSubqueryTest.java index 3a0a0acd20364..f4362e825161a 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/planner/UncorrelatedSubqueryTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/planner/UncorrelatedSubqueryTest.java @@ -20,6 +20,7 @@ package org.apache.iotdb.db.queryengine.plan.relational.planner; import org.apache.iotdb.db.queryengine.plan.planner.plan.LogicalQueryPlan; +import org.apache.iotdb.db.queryengine.plan.relational.analyzer.BaseAnalyzerTest; import org.apache.iotdb.db.queryengine.plan.relational.planner.assertions.PlanMatchPattern; import org.apache.iotdb.db.queryengine.plan.relational.planner.ir.PredicateWithUncorrelatedScalarSubqueryReconstructor; import org.apache.iotdb.db.queryengine.plan.relational.planner.node.JoinNode; @@ -61,7 +62,7 @@ import static org.apache.iotdb.db.queryengine.plan.relational.sql.ast.ComparisonExpression.Operator.GREATER_THAN; import static org.apache.iotdb.db.queryengine.plan.relational.sql.ast.ComparisonExpression.Operator.LESS_THAN_OR_EQUAL; -public class UncorrelatedSubqueryTest { +public class UncorrelatedSubqueryTest extends BaseAnalyzerTest { private PlanTester planTester; private PredicateWithUncorrelatedScalarSubqueryReconstructor predicateWithUncorrelatedScalarSubquery; diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/planner/WindowFunctionOptimizationTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/planner/WindowFunctionOptimizationTest.java index e31f2f7e58065..9597bbb4ae57b 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/planner/WindowFunctionOptimizationTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/planner/WindowFunctionOptimizationTest.java @@ -20,6 +20,7 @@ package org.apache.iotdb.db.queryengine.plan.relational.planner; import org.apache.iotdb.db.queryengine.plan.planner.plan.LogicalQueryPlan; +import org.apache.iotdb.db.queryengine.plan.relational.analyzer.BaseAnalyzerTest; import org.apache.iotdb.db.queryengine.plan.relational.planner.assertions.PlanMatchPattern; import com.google.common.collect.ImmutableList; @@ -39,7 +40,7 @@ import static org.apache.iotdb.db.queryengine.plan.relational.planner.assertions.PlanMatchPattern.topKRanking; import static org.apache.iotdb.db.queryengine.plan.relational.planner.assertions.PlanMatchPattern.window; -public class WindowFunctionOptimizationTest { +public class WindowFunctionOptimizationTest extends BaseAnalyzerTest { @Test public void testMergeWindowFunctions() { PlanTester planTester = new PlanTester(); diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/planner/WindowFunctionTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/planner/WindowFunctionTest.java index 4c10a9961dc89..22c36c31c6c47 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/planner/WindowFunctionTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/planner/WindowFunctionTest.java @@ -20,6 +20,7 @@ package org.apache.iotdb.db.queryengine.plan.relational.planner; import org.apache.iotdb.db.queryengine.plan.planner.plan.LogicalQueryPlan; +import org.apache.iotdb.db.queryengine.plan.relational.analyzer.BaseAnalyzerTest; import org.apache.iotdb.db.queryengine.plan.relational.planner.assertions.PlanMatchPattern; import com.google.common.collect.ImmutableList; @@ -37,7 +38,7 @@ import static org.apache.iotdb.db.queryengine.plan.relational.planner.assertions.PlanMatchPattern.tableScan; import static org.apache.iotdb.db.queryengine.plan.relational.planner.assertions.PlanMatchPattern.window; -public class WindowFunctionTest { +public class WindowFunctionTest extends BaseAnalyzerTest { @Test public void testSimpleWindowFunction() { PlanTester planTester = new PlanTester(); diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/statement/StatementTestUtils.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/statement/StatementTestUtils.java index a80095458a942..2b064a77721b1 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/statement/StatementTestUtils.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/statement/StatementTestUtils.java @@ -55,7 +55,11 @@ private StatementTestUtils() { } public static String tableName() { - return "table1"; + return tableName(1); + } + + public static String tableName(int i) { + return "table" + i; } public static String[] genColumnNames() { @@ -232,7 +236,11 @@ public static InsertRowStatement genInsertRowStatement(boolean writeToTable) { } public static TsTable genTsTable() { - final TsTable tsTable = new TsTable(tableName()); + return genTsTable(1); + } + + public static TsTable genTsTable(int tableId) { + final TsTable tsTable = new TsTable(tableName(tableId)); String[] measurements = genColumnNames(); TSDataType[] dataTypes = genDataTypes(); TsTableColumnCategory[] columnCategories = genColumnCategories(); diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/DataRegionTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/DataRegionTest.java index c3895a058c63c..7eb1fce3457ed 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/DataRegionTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/DataRegionTest.java @@ -30,6 +30,7 @@ import org.apache.iotdb.commons.path.MeasurementPath; import org.apache.iotdb.commons.path.NonAlignedFullPath; import org.apache.iotdb.commons.path.PartialPath; +import org.apache.iotdb.commons.schema.table.column.TsTableColumnCategory; import org.apache.iotdb.db.conf.IoTDBConfig; import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.iotdb.db.exception.DataRegionException; @@ -44,6 +45,7 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowsNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertTabletNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.RelationalDeleteDataNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.RelationalInsertRowNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.RelationalInsertTabletNode; import org.apache.iotdb.db.queryengine.plan.statement.StatementTestUtils; @@ -58,11 +60,18 @@ import org.apache.iotdb.db.storageengine.dataregion.compaction.selector.constant.InnerUnsequenceCompactionSelector; import org.apache.iotdb.db.storageengine.dataregion.compaction.utils.CompactionConfigRestorer; import org.apache.iotdb.db.storageengine.dataregion.flush.FlushManager; -import org.apache.iotdb.db.storageengine.dataregion.flush.TsFileFlushPolicy; +import org.apache.iotdb.db.storageengine.dataregion.flush.TsFileFlushPolicy.DirectFlushPolicy; import org.apache.iotdb.db.storageengine.dataregion.memtable.ReadOnlyMemChunk; import org.apache.iotdb.db.storageengine.dataregion.memtable.TsFileProcessor; +import org.apache.iotdb.db.storageengine.dataregion.modification.DeletionPredicate; +import org.apache.iotdb.db.storageengine.dataregion.modification.ModEntry; +import org.apache.iotdb.db.storageengine.dataregion.modification.TableDeletionEntry; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.NOP; import org.apache.iotdb.db.storageengine.dataregion.read.QueryDataSource; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource.ModIterator; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.ColumnRename; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.TableRename; import org.apache.iotdb.db.storageengine.dataregion.tsfile.generator.TsFileNameGenerator; import org.apache.iotdb.db.storageengine.rescon.memory.MemTableManager; import org.apache.iotdb.db.storageengine.rescon.memory.SystemInfo; @@ -71,10 +80,13 @@ import org.apache.tsfile.enums.TSDataType; import org.apache.tsfile.file.metadata.IDeviceID; +import org.apache.tsfile.file.metadata.IDeviceID.Factory; import org.apache.tsfile.file.metadata.enums.CompressionType; import org.apache.tsfile.file.metadata.enums.TSEncoding; import org.apache.tsfile.read.TimeValuePair; +import org.apache.tsfile.read.common.TimeRange; import org.apache.tsfile.read.reader.IPointReader; +import org.apache.tsfile.utils.Binary; import org.apache.tsfile.utils.BitMap; import org.apache.tsfile.write.record.TSRecord; import org.apache.tsfile.write.record.datapoint.DataPoint; @@ -89,6 +101,7 @@ import java.io.File; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -96,6 +109,9 @@ import static org.apache.iotdb.db.queryengine.plan.statement.StatementTestUtils.genInsertRowNode; import static org.apache.iotdb.db.queryengine.plan.statement.StatementTestUtils.genInsertTabletNode; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; public class DataRegionTest { private static final IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig(); @@ -106,7 +122,7 @@ public class DataRegionTest { private String systemDir = TestConstant.OUTPUT_DATA_DIR.concat("info"); private String deviceId = "root.vehicle.d0"; - private IDeviceID device = IDeviceID.Factory.DEFAULT_FACTORY.create(deviceId); + private IDeviceID device = Factory.DEFAULT_FACTORY.create(deviceId); private String measurementId = "s0"; private NonAlignedFullPath nonAlignedFullPath = @@ -134,9 +150,13 @@ public void setUp() throws Exception { config.setInnerUnsequenceCompactionSelector( InnerUnsequenceCompactionSelector.SIZE_TIERED_SINGLE_TARGET); DataNodeTableCache.getInstance() - .preUpdateTable(dataRegion.getDatabaseName(), StatementTestUtils.genTsTable(), null); + .preUpdateTable(dataRegion.getDatabaseName(), StatementTestUtils.genTsTable(1), null); DataNodeTableCache.getInstance() - .commitUpdateTable(dataRegion.getDatabaseName(), StatementTestUtils.tableName(), null); + .commitUpdateTable(dataRegion.getDatabaseName(), StatementTestUtils.tableName(1), null); + DataNodeTableCache.getInstance() + .preUpdateTable(dataRegion.getDatabaseName(), StatementTestUtils.genTsTable(2), null); + DataNodeTableCache.getInstance() + .commitUpdateTable(dataRegion.getDatabaseName(), StatementTestUtils.tableName(2), null); } @After @@ -231,7 +251,7 @@ record = new TSRecord(deviceId, j); null); } - Assert.assertEquals(1, tsfileResourcesForQuery.size()); + assertEquals(1, tsfileResourcesForQuery.size()); List memChunks = tsfileResourcesForQuery.get(0).getReadOnlyMemChunk(IFullPath.convertToIFullPath(fullPath)); long time = 16; @@ -239,7 +259,7 @@ record = new TSRecord(deviceId, j); IPointReader iterator = memChunk.getPointReader(); while (iterator.hasNextTimeValuePair()) { TimeValuePair timeValuePair = iterator.nextTimeValuePair(); - Assert.assertEquals(time++, timeValuePair.getTimestamp()); + assertEquals(time++, timeValuePair.getTimestamp()); } } } @@ -254,7 +274,7 @@ public void testSequenceSyncClose() dataRegion.syncCloseAllWorkingTsFileProcessors(); } - IDeviceID device = IDeviceID.Factory.DEFAULT_FACTORY.create(deviceId); + IDeviceID device = Factory.DEFAULT_FACTORY.create(deviceId); QueryDataSource queryDataSource = dataRegion.query( Collections.singletonList( @@ -264,9 +284,9 @@ device, new MeasurementSchema(measurementId, TSDataType.INT32))), context, null, null); - Assert.assertEquals(10, queryDataSource.getSeqResources().size()); + assertEquals(10, queryDataSource.getSeqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } } @@ -297,8 +317,8 @@ public void testRelationalTabletWriteAndSyncClose() context, null, null); - Assert.assertEquals(1, queryDataSource.getSeqResources().size()); - Assert.assertEquals(0, queryDataSource.getUnseqResources().size()); + assertEquals(1, queryDataSource.getSeqResources().size()); + assertEquals(0, queryDataSource.getUnseqResources().size()); queryDataSource = dataRegion.query( @@ -311,10 +331,10 @@ public void testRelationalTabletWriteAndSyncClose() context, null, null); - Assert.assertEquals(1, queryDataSource.getSeqResources().size()); - Assert.assertEquals(0, queryDataSource.getUnseqResources().size()); + assertEquals(1, queryDataSource.getSeqResources().size()); + assertEquals(0, queryDataSource.getUnseqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } } @@ -345,8 +365,8 @@ public void testRelationRowWriteAndSyncClose() context, null, null); - Assert.assertEquals(1, queryDataSource.getSeqResources().size()); - Assert.assertEquals(0, queryDataSource.getUnseqResources().size()); + assertEquals(1, queryDataSource.getSeqResources().size()); + assertEquals(0, queryDataSource.getUnseqResources().size()); queryDataSource = dataRegion.query( @@ -359,10 +379,10 @@ public void testRelationRowWriteAndSyncClose() context, null, null); - Assert.assertEquals(1, queryDataSource.getSeqResources().size()); - Assert.assertEquals(0, queryDataSource.getUnseqResources().size()); + assertEquals(1, queryDataSource.getSeqResources().size()); + assertEquals(0, queryDataSource.getUnseqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } } @@ -418,8 +438,8 @@ public void testIoTDBTabletWriteAndSyncClose() int hashCode2 = Arrays.hashCode((long[]) columns[1]); dataRegion.insertTablet(insertTabletNode1); // the hashCode should not be changed when insert - Assert.assertEquals(hashCode1, Arrays.hashCode((int[]) columns[0])); - Assert.assertEquals(hashCode2, Arrays.hashCode((long[]) columns[1])); + assertEquals(hashCode1, Arrays.hashCode((int[]) columns[0])); + assertEquals(hashCode2, Arrays.hashCode((long[]) columns[1])); dataRegion.syncCloseAllWorkingTsFileProcessors(); for (int r = 50; r < 149; r++) { @@ -448,10 +468,10 @@ public void testIoTDBTabletWriteAndSyncClose() dataRegion.query( Collections.singletonList(nonAlignedFullPath), device, context, null, null); - Assert.assertEquals(2, queryDataSource.getSeqResources().size()); - Assert.assertEquals(1, queryDataSource.getUnseqResources().size()); + assertEquals(2, queryDataSource.getSeqResources().size()); + assertEquals(1, queryDataSource.getUnseqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } } @@ -518,16 +538,16 @@ public void testIoTDBTabletWriteAndDeleteDataRegion() times.length); dataRegion.insertTablet(insertTabletNode2); - Assert.assertTrue(SystemInfo.getInstance().getTotalMemTableSize() > 0); + assertTrue(SystemInfo.getInstance().getTotalMemTableSize() > 0); dataRegion.syncDeleteDataFiles(); - Assert.assertEquals(0, SystemInfo.getInstance().getTotalMemTableSize()); + assertEquals(0, SystemInfo.getInstance().getTotalMemTableSize()); QueryDataSource queryDataSource = dataRegion.query( Collections.singletonList(nonAlignedFullPath), device, context, null, null); - Assert.assertEquals(0, queryDataSource.getSeqResources().size()); - Assert.assertEquals(0, queryDataSource.getUnseqResources().size()); + assertEquals(0, queryDataSource.getSeqResources().size()); + assertEquals(0, queryDataSource.getUnseqResources().size()); } @Test @@ -600,10 +620,10 @@ public void testEmptyTabletWriteAndSyncClose() dataRegion.query( Collections.singletonList(nonAlignedFullPath), device, context, null, null); - Assert.assertEquals(0, queryDataSource.getSeqResources().size()); - Assert.assertEquals(0, queryDataSource.getUnseqResources().size()); + assertEquals(0, queryDataSource.getSeqResources().size()); + assertEquals(0, queryDataSource.getUnseqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } } @@ -676,10 +696,10 @@ public void testAllMeasurementsFailedTabletWriteAndSyncClose() dataRegion.query( Collections.singletonList(nonAlignedFullPath), device, context, null, null); - Assert.assertEquals(0, queryDataSource.getSeqResources().size()); - Assert.assertEquals(0, queryDataSource.getUnseqResources().size()); + assertEquals(0, queryDataSource.getSeqResources().size()); + assertEquals(0, queryDataSource.getUnseqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } } @@ -703,13 +723,13 @@ public void testSeqAndUnSeqSyncClose() QueryDataSource queryDataSource = dataRegion.query( Collections.singletonList(nonAlignedFullPath), device, context, null, null); - Assert.assertEquals(10, queryDataSource.getSeqResources().size()); - Assert.assertEquals(10, queryDataSource.getUnseqResources().size()); + assertEquals(10, queryDataSource.getSeqResources().size()); + assertEquals(10, queryDataSource.getUnseqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } for (TsFileResource resource : queryDataSource.getUnseqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } } @@ -737,13 +757,13 @@ public void testAllMeasurementsFailedRecordSeqAndUnSeqSyncClose() QueryDataSource queryDataSource = dataRegion.query( Collections.singletonList(nonAlignedFullPath), device, context, null, null); - Assert.assertEquals(0, queryDataSource.getSeqResources().size()); - Assert.assertEquals(0, queryDataSource.getUnseqResources().size()); + assertEquals(0, queryDataSource.getSeqResources().size()); + assertEquals(0, queryDataSource.getUnseqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } for (TsFileResource resource : queryDataSource.getUnseqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } } @@ -770,13 +790,13 @@ public void testDisableSeparateDataForInsertRowPlan() QueryDataSource queryDataSource = dataRegion.query( Collections.singletonList(nonAlignedFullPath), device, context, null, null); - Assert.assertEquals(0, queryDataSource.getSeqResources().size()); - Assert.assertEquals(20, queryDataSource.getUnseqResources().size()); + assertEquals(0, queryDataSource.getSeqResources().size()); + assertEquals(20, queryDataSource.getUnseqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } for (TsFileResource resource : queryDataSource.getUnseqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } config.setEnableSeparateData(defaultValue); @@ -852,10 +872,10 @@ public void testDisableSeparateDataForInsertTablet1() dataRegion.query( Collections.singletonList(nonAlignedFullPath), device, context, null, null); - Assert.assertEquals(0, queryDataSource.getSeqResources().size()); - Assert.assertEquals(2, queryDataSource.getUnseqResources().size()); + assertEquals(0, queryDataSource.getSeqResources().size()); + assertEquals(2, queryDataSource.getUnseqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } config.setEnableSeparateData(defaultEnableDiscard); @@ -932,10 +952,10 @@ public void testDisableSeparateDataForInsertTablet2() dataRegion.query( Collections.singletonList(nonAlignedFullPath), device, context, null, null); - Assert.assertEquals(0, queryDataSource.getSeqResources().size()); - Assert.assertEquals(2, queryDataSource.getUnseqResources().size()); + assertEquals(0, queryDataSource.getSeqResources().size()); + assertEquals(2, queryDataSource.getUnseqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } config.setEnableSeparateData(defaultEnableDiscard); @@ -1012,10 +1032,10 @@ public void testDisableSeparateDataForInsertTablet3() dataRegion.query( Collections.singletonList(nonAlignedFullPath), device, context, null, null); - Assert.assertEquals(0, queryDataSource.getSeqResources().size()); - Assert.assertEquals(2, queryDataSource.getUnseqResources().size()); + assertEquals(0, queryDataSource.getSeqResources().size()); + assertEquals(2, queryDataSource.getUnseqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } config.setEnableSeparateData(defaultEnableDiscard); @@ -1042,7 +1062,7 @@ public void testInsertUnSequenceRows() InsertRowsNode insertRowsNode = new InsertRowsNode(new PlanNodeId(""), indexList, nodes); dataRegion1.insert(insertRowsNode); dataRegion1.syncCloseAllWorkingTsFileProcessors(); - IDeviceID tmpDeviceId = IDeviceID.Factory.DEFAULT_FACTORY.create("root.Rows"); + IDeviceID tmpDeviceId = Factory.DEFAULT_FACTORY.create("root.Rows"); QueryDataSource queryDataSource = dataRegion1.query( Collections.singletonList( @@ -1052,10 +1072,10 @@ tmpDeviceId, new MeasurementSchema(measurementId, TSDataType.INT32))), context, null, null); - Assert.assertEquals(1, queryDataSource.getSeqResources().size()); - Assert.assertEquals(0, queryDataSource.getUnseqResources().size()); + assertEquals(1, queryDataSource.getSeqResources().size()); + assertEquals(0, queryDataSource.getUnseqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } dataRegion1.syncDeleteDataFiles(); } @@ -1079,7 +1099,7 @@ public void testSmallReportProportionInsertRow() dataRegion1.syncCloseAllWorkingTsFileProcessors(); } dataRegion1.syncCloseAllWorkingTsFileProcessors(); - IDeviceID tmpDeviceId = IDeviceID.Factory.DEFAULT_FACTORY.create("root.ln22"); + IDeviceID tmpDeviceId = Factory.DEFAULT_FACTORY.create("root.ln22"); QueryDataSource queryDataSource = dataRegion1.query( Collections.singletonList( @@ -1089,13 +1109,13 @@ tmpDeviceId, new MeasurementSchema(measurementId, TSDataType.INT32))), context, null, null); - Assert.assertEquals(10, queryDataSource.getSeqResources().size()); - Assert.assertEquals(0, queryDataSource.getUnseqResources().size()); + assertEquals(10, queryDataSource.getSeqResources().size()); + assertEquals(0, queryDataSource.getUnseqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } for (TsFileResource resource : queryDataSource.getUnseqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } dataRegion1.syncDeleteDataFiles(); @@ -1158,12 +1178,12 @@ public void testMerge() QueryDataSource queryDataSource = dataRegion.query( Collections.singletonList(nonAlignedFullPath), device, context, null, null); - Assert.assertEquals(2, queryDataSource.getSeqResources().size()); + assertEquals(2, queryDataSource.getSeqResources().size()); for (TsFileResource resource : queryDataSource.getSeqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } for (TsFileResource resource : queryDataSource.getUnseqResources()) { - Assert.assertTrue(resource.isClosed()); + assertTrue(resource.isClosed()); } IoTDBDescriptor.getInstance() .getConfig() @@ -1232,7 +1252,7 @@ public void testDeleteStorageGroupWhenCompacting() throws Exception { + CompactionLogger.INNER_COMPACTION_LOG_NAME_SUFFIX); Assert.assertFalse(logFile.exists()); Assert.assertFalse(CommonDescriptor.getInstance().getConfig().isReadOnly()); - Assert.assertTrue(dataRegion.getTsFileManager().isAllowCompaction()); + assertTrue(dataRegion.getTsFileManager().isAllowCompaction()); } finally { new CompactionConfigRestorer().restoreCompactionConfig(); } @@ -1245,7 +1265,7 @@ public void testTimedFlushSeqMemTable() TSRecord record = new TSRecord(deviceId, 10000); record.addTuple(DataPoint.getDataPoint(TSDataType.INT32, measurementId, String.valueOf(1000))); dataRegion.insert(buildInsertRowNodeByTSRecord(record)); - Assert.assertEquals(1, MemTableManager.getInstance().getCurrentMemtableNumber()); + assertEquals(1, MemTableManager.getInstance().getCurrentMemtableNumber()); // change config & reboot timed service boolean prevEnableTimedFlushSeqMemtable = config.isEnableTimedFlushSeqMemtable(); @@ -1256,7 +1276,7 @@ public void testTimedFlushSeqMemTable() Thread.sleep(500); - Assert.assertEquals(1, dataRegion.getWorkSequenceTsFileProcessors().size()); + assertEquals(1, dataRegion.getWorkSequenceTsFileProcessors().size()); TsFileProcessor tsFileProcessor = dataRegion.getWorkSequenceTsFileProcessors().iterator().next(); FlushManager flushManager = FlushManager.getInstance(); @@ -1281,7 +1301,7 @@ public void testTimedFlushSeqMemTable() } } - Assert.assertEquals(0, MemTableManager.getInstance().getCurrentMemtableNumber()); + assertEquals(0, MemTableManager.getInstance().getCurrentMemtableNumber()); config.setEnableTimedFlushSeqMemtable(prevEnableTimedFlushSeqMemtable); config.setSeqMemtableFlushInterval(preFLushInterval); @@ -1294,15 +1314,15 @@ public void testTimedFlushUnseqMemTable() TSRecord record = new TSRecord(deviceId, 10000); record.addTuple(DataPoint.getDataPoint(TSDataType.INT32, measurementId, String.valueOf(1000))); dataRegion.insert(buildInsertRowNodeByTSRecord(record)); - Assert.assertEquals(1, MemTableManager.getInstance().getCurrentMemtableNumber()); + assertEquals(1, MemTableManager.getInstance().getCurrentMemtableNumber()); dataRegion.syncCloseAllWorkingTsFileProcessors(); - Assert.assertEquals(0, MemTableManager.getInstance().getCurrentMemtableNumber()); + assertEquals(0, MemTableManager.getInstance().getCurrentMemtableNumber()); // create one unsequence memtable record = new TSRecord(deviceId, 1); record.addTuple(DataPoint.getDataPoint(TSDataType.INT32, measurementId, String.valueOf(1000))); dataRegion.insert(buildInsertRowNodeByTSRecord(record)); - Assert.assertEquals(1, MemTableManager.getInstance().getCurrentMemtableNumber()); + assertEquals(1, MemTableManager.getInstance().getCurrentMemtableNumber()); // change config & reboot timed service boolean prevEnableTimedFlushUnseqMemtable = config.isEnableTimedFlushUnseqMemtable(); @@ -1313,7 +1333,7 @@ record = new TSRecord(deviceId, 1); Thread.sleep(500); - Assert.assertEquals(1, dataRegion.getWorkUnsequenceTsFileProcessors().size()); + assertEquals(1, dataRegion.getWorkUnsequenceTsFileProcessors().size()); TsFileProcessor tsFileProcessor = dataRegion.getWorkUnsequenceTsFileProcessors().iterator().next(); FlushManager flushManager = FlushManager.getInstance(); @@ -1338,7 +1358,7 @@ record = new TSRecord(deviceId, 1); } } - Assert.assertEquals(0, MemTableManager.getInstance().getCurrentMemtableNumber()); + assertEquals(0, MemTableManager.getInstance().getCurrentMemtableNumber()); config.setEnableTimedFlushUnseqMemtable(prevEnableTimedFlushUnseqMemtable); config.setUnseqMemtableFlushInterval(preFLushInterval); @@ -1392,11 +1412,11 @@ public void testDeleteDataNotInFile() for (int i = 0; i < dataRegion.getSequenceFileList().size(); i++) { TsFileResource resource = dataRegion.getSequenceFileList().get(i); if (i == 1) { - Assert.assertTrue(resource.anyModFileExists()); - Assert.assertEquals(2, resource.getAllModEntries().size()); + assertTrue(resource.anyModFileExists()); + assertEquals(2, resource.getAllModEntries().size()); } else if (i == 3) { - Assert.assertTrue(resource.anyModFileExists()); - Assert.assertEquals(1, resource.getAllModEntries().size()); + assertTrue(resource.anyModFileExists()); + assertEquals(1, resource.getAllModEntries().size()); } else { Assert.assertFalse(resource.anyModFileExists()); } @@ -1489,8 +1509,8 @@ public void testDeleteDataInSeqFlushingMemtable() dataRegion.deleteByDevice(new MeasurementPath("root.vehicle.d0.s0"), deleteDataNode4); dataRegion.syncCloseAllWorkingTsFileProcessors(); - Assert.assertTrue(tsFileResource.anyModFileExists()); - Assert.assertEquals(3, tsFileResource.getAllModEntries().size()); + assertTrue(tsFileResource.anyModFileExists()); + assertEquals(3, tsFileResource.getAllModEntries().size()); } @Test @@ -1584,8 +1604,8 @@ public void testDeleteDataInUnSeqFlushingMemtable() dataRegion.deleteByDevice(new MeasurementPath("root.vehicle.d0.s0"), deleteDataNode12); dataRegion.syncCloseAllWorkingTsFileProcessors(); - Assert.assertTrue(tsFileResource.anyModFileExists()); - Assert.assertEquals(3, tsFileResource.getAllModEntries().size()); + assertTrue(tsFileResource.anyModFileExists()); + assertEquals(3, tsFileResource.getAllModEntries().size()); } @Test @@ -1625,9 +1645,7 @@ public void testDeleteDataInSeqWorkingMemtable() dataRegion.syncCloseAllWorkingTsFileProcessors(); Assert.assertFalse(tsFileResource.anyModFileExists()); Assert.assertFalse( - tsFileResource - .getDevices() - .contains(IDeviceID.Factory.DEFAULT_FACTORY.create("root.vehicle.d199"))); + tsFileResource.getDevices().contains(Factory.DEFAULT_FACTORY.create("root.vehicle.d199"))); } @Test @@ -1659,7 +1677,7 @@ public static class DummyDataRegion extends DataRegion { public DummyDataRegion(String systemInfoDir, String storageGroupName) throws DataRegionException { - super(systemInfoDir, "0", new TsFileFlushPolicy.DirectFlushPolicy(), storageGroupName); + super(systemInfoDir, "0", new DirectFlushPolicy(), storageGroupName); } } @@ -1686,7 +1704,7 @@ public void testDeleteDataDirectlySeqWriteModsOrDeleteFiles() new DeleteDataNode(new PlanNodeId("1"), Collections.singletonList(path), 50, 100); deleteDataNode1.setSearchIndex(0); dataRegion.deleteDataDirectly(new MeasurementPath("root.vehicle.d0.**"), deleteDataNode1); - Assert.assertTrue(tsFileResource.getTsFile().exists()); + assertTrue(tsFileResource.getTsFile().exists()); Assert.assertFalse(tsFileResource.anyModFileExists()); dataRegion.syncCloseAllWorkingTsFileProcessors(); @@ -1696,8 +1714,8 @@ public void testDeleteDataDirectlySeqWriteModsOrDeleteFiles() new DeleteDataNode(new PlanNodeId("2"), Collections.singletonList(path), 100, 120); deleteDataNode2.setSearchIndex(0); dataRegion.deleteDataDirectly(new MeasurementPath("root.vehicle.d0.**"), deleteDataNode2); - Assert.assertTrue(tsFileResource.getTsFile().exists()); - Assert.assertTrue(tsFileResource.anyModFileExists()); + assertTrue(tsFileResource.getTsFile().exists()); + assertTrue(tsFileResource.anyModFileExists()); // delete data in closed file, and time all match DeleteDataNode deleteDataNode3 = @@ -1727,8 +1745,8 @@ public void testDeleteDataDirectlyUnseqWriteModsOrDeleteFiles() dataRegion.syncCloseWorkingTsFileProcessors(true); TsFileResource tsFileResourceUnSeq = dataRegion.getTsFileManager().getTsFileList(false).get(0); - Assert.assertTrue(tsFileResourceSeq.getTsFile().exists()); - Assert.assertTrue(tsFileResourceUnSeq.getTsFile().exists()); + assertTrue(tsFileResourceSeq.getTsFile().exists()); + assertTrue(tsFileResourceUnSeq.getTsFile().exists()); // already closed, will have a mods file. MeasurementPath path = new MeasurementPath("root.vehicle.d0.**"); @@ -1743,9 +1761,9 @@ public void testDeleteDataDirectlyUnseqWriteModsOrDeleteFiles() dataRegion.deleteDataDirectly(new MeasurementPath("root.vehicle.d0.**"), deleteDataNode2); // delete data in mem table, there is no mods - Assert.assertTrue(tsFileResourceSeq.getTsFile().exists()); - Assert.assertTrue(tsFileResourceUnSeq.getTsFile().exists()); - Assert.assertTrue(tsFileResourceSeq.anyModFileExists()); + assertTrue(tsFileResourceSeq.getTsFile().exists()); + assertTrue(tsFileResourceUnSeq.getTsFile().exists()); + assertTrue(tsFileResourceSeq.anyModFileExists()); Assert.assertFalse(tsFileResourceUnSeq.anyModFileExists()); dataRegion.syncCloseAllWorkingTsFileProcessors(); @@ -1753,8 +1771,8 @@ public void testDeleteDataDirectlyUnseqWriteModsOrDeleteFiles() new DeleteDataNode(new PlanNodeId("3"), Collections.singletonList(path), 40, 80); deleteDataNode3.setSearchIndex(0); dataRegion.deleteDataDirectly(new MeasurementPath("root.vehicle.d0.**"), deleteDataNode3); - Assert.assertTrue(tsFileResourceUnSeq.getTsFile().exists()); - Assert.assertTrue(tsFileResourceUnSeq.anyModFileExists()); + assertTrue(tsFileResourceUnSeq.getTsFile().exists()); + assertTrue(tsFileResourceUnSeq.anyModFileExists()); // seq file and unseq file have data file and mod file now, // this deletion will remove data file and mod file. @@ -1772,4 +1790,259 @@ public void testDeleteDataDirectlyUnseqWriteModsOrDeleteFiles() Assert.assertFalse(tsFileResourceSeq.anyModFileExists()); Assert.assertFalse(tsFileResourceUnSeq.anyModFileExists()); } + + @Test + public void testSchemaEvolution() + throws WriteProcessException, QueryProcessException, IOException { + String[] measurements = {"tag1", "s1", "s2"}; + MeasurementSchema[] measurementSchemas = { + new MeasurementSchema("tag1", TSDataType.STRING), + new MeasurementSchema("s1", TSDataType.INT64), + new MeasurementSchema("s2", TSDataType.DOUBLE) + }; + RelationalInsertRowNode insertRowNode = + new RelationalInsertRowNode( + new PlanNodeId(""), + new PartialPath(new String[] {"table1"}), + true, + measurements, + new TSDataType[] {TSDataType.STRING, TSDataType.INT64, TSDataType.DOUBLE}, + measurementSchemas, + 10, + new Object[] {new Binary("tag1".getBytes(StandardCharsets.UTF_8)), 1L, 1.0}, + false, + new TsTableColumnCategory[] { + TsTableColumnCategory.TAG, TsTableColumnCategory.FIELD, TsTableColumnCategory.FIELD + }); + dataRegion.insert(insertRowNode); + + // table1 -> table2 + dataRegion.applySchemaEvolution(Collections.singletonList(new TableRename("table1", "table2"))); + + // cannot query with the old name + IDeviceID deviceID1 = Factory.DEFAULT_FACTORY.create(new String[] {"table1", "tag1"}); + List fullPaths = + Arrays.asList( + new AlignedFullPath( + deviceID1, Arrays.asList(measurements), Arrays.asList(measurementSchemas))); + QueryDataSource dataSource = + dataRegion.query( + fullPaths, + deviceID1, + new QueryContext(false), + null, + Collections.singletonList(0L), + Long.MAX_VALUE); + assertTrue(dataSource.getSeqResources().isEmpty()); + + // can query with the new name + IDeviceID deviceID2 = Factory.DEFAULT_FACTORY.create(new String[] {"table2", "tag1"}); + fullPaths = + Arrays.asList( + new AlignedFullPath( + deviceID2, Arrays.asList(measurements), Arrays.asList(measurementSchemas))); + dataSource = + dataRegion.query( + fullPaths, + deviceID2, + new QueryContext(false), + null, + Collections.singletonList(0L), + Long.MAX_VALUE); + assertEquals(1, dataSource.getSeqResources().size()); + + DataNodeTableCache.getInstance() + .preUpdateTable(dataRegion.getDatabaseName(), StatementTestUtils.genTsTable(1), null); + DataNodeTableCache.getInstance() + .commitUpdateTable(dataRegion.getDatabaseName(), StatementTestUtils.tableName(1), null); + + // write again with table1 + insertRowNode = + new RelationalInsertRowNode( + new PlanNodeId(""), + new PartialPath(new String[] {"table1"}), + true, + measurements, + new TSDataType[] {TSDataType.STRING, TSDataType.INT64, TSDataType.DOUBLE}, + measurementSchemas, + 10, + new Object[] {new Binary("tag1".getBytes(StandardCharsets.UTF_8)), 1L, 1.0}, + false, + new TsTableColumnCategory[] { + TsTableColumnCategory.TAG, TsTableColumnCategory.FIELD, TsTableColumnCategory.FIELD + }); + dataRegion.insert(insertRowNode); + + // can query with table1 + fullPaths = + Arrays.asList( + new AlignedFullPath( + deviceID1, Arrays.asList(measurements), Arrays.asList(measurementSchemas))); + dataSource = + dataRegion.query( + fullPaths, + deviceID1, + new QueryContext(false), + null, + Collections.singletonList(0L), + Long.MAX_VALUE); + assertEquals(1, dataSource.getSeqResources().size()); + + // can query with table2 + fullPaths = + Arrays.asList( + new AlignedFullPath( + deviceID2, Arrays.asList(measurements), Arrays.asList(measurementSchemas))); + dataSource = + dataRegion.query( + fullPaths, + deviceID2, + new QueryContext(false), + null, + Collections.singletonList(0L), + Long.MAX_VALUE); + assertEquals(1, dataSource.getSeqResources().size()); + } + + @Test + public void testSchemaEvolutionWithPartialDeletion() throws WriteProcessException, IOException { + String[] measurements = {"tag1", "s1", "s2"}; + MeasurementSchema[] measurementSchemas = { + new MeasurementSchema("tag1", TSDataType.STRING), + new MeasurementSchema("s1", TSDataType.INT64), + new MeasurementSchema("s2", TSDataType.DOUBLE) + }; + RelationalInsertRowNode insertRowNode = + new RelationalInsertRowNode( + new PlanNodeId(""), + new PartialPath(new String[] {"table1"}), + true, + measurements, + new TSDataType[] {TSDataType.STRING, TSDataType.INT64, TSDataType.DOUBLE}, + measurementSchemas, + 10, + new Object[] {new Binary("tag1".getBytes(StandardCharsets.UTF_8)), 1L, 1.0}, + false, + new TsTableColumnCategory[] { + TsTableColumnCategory.TAG, TsTableColumnCategory.FIELD, TsTableColumnCategory.FIELD + }); + dataRegion.insert(insertRowNode); + insertRowNode.setTime(20); + dataRegion.insert(insertRowNode); + + // table1 -> table2 + dataRegion.applySchemaEvolution(Collections.singletonList(new TableRename("table1", "table2"))); + // s1 -> s3 + dataRegion.applySchemaEvolution( + Collections.singletonList(new ColumnRename("table2", "s1", "s3", null))); + + // delete with table2 + TableDeletionEntry tableDeletionEntry = + new TableDeletionEntry(new DeletionPredicate("table2"), new TimeRange(0, 15)); + RelationalDeleteDataNode relationalDeleteDataNode = + new RelationalDeleteDataNode( + new PlanNodeId(""), tableDeletionEntry, dataRegion.getDatabaseName()); + dataRegion.deleteByTable(relationalDeleteDataNode); + // delete with s3 + tableDeletionEntry = + new TableDeletionEntry( + new DeletionPredicate("table2", new NOP(), Collections.singletonList("s3")), + new TimeRange(0, 15)); + relationalDeleteDataNode = + new RelationalDeleteDataNode( + new PlanNodeId(""), tableDeletionEntry, dataRegion.getDatabaseName()); + dataRegion.deleteByTable(relationalDeleteDataNode); + // delete with table1 + tableDeletionEntry = + new TableDeletionEntry(new DeletionPredicate("table1"), new TimeRange(0, 15)); + relationalDeleteDataNode = + new RelationalDeleteDataNode( + new PlanNodeId(""), tableDeletionEntry, dataRegion.getDatabaseName()); + dataRegion.deleteByTable(relationalDeleteDataNode); + // delete with s1 + tableDeletionEntry = + new TableDeletionEntry( + new DeletionPredicate("table2", new NOP(), Collections.singletonList("s1")), + new TimeRange(0, 15)); + relationalDeleteDataNode = + new RelationalDeleteDataNode( + new PlanNodeId(""), tableDeletionEntry, dataRegion.getDatabaseName()); + dataRegion.deleteByTable(relationalDeleteDataNode); + + List sequenceFileList = dataRegion.getSequenceFileList(); + assertEquals(1, sequenceFileList.size()); + ModIterator modEntryIterator = sequenceFileList.get(0).getModEntryIterator(); + ModEntry next = modEntryIterator.next(); + // the table2 modification should be rewritten to table1 + assertEquals("table1", ((TableDeletionEntry) next).getTableName()); + next = modEntryIterator.next(); + // the s3 modification should be rewritten to s1 + assertEquals( + Collections.singletonList("s1"), + ((TableDeletionEntry) next).getPredicate().getMeasurementNames()); + next = modEntryIterator.next(); + // the table1 modification should be skipped + // the s1 modification should be rewritten to empty + assertEquals( + Collections.singletonList(""), + ((TableDeletionEntry) next).getPredicate().getMeasurementNames()); + assertFalse(modEntryIterator.hasNext()); + } + + @Test + public void testSchemaEvolutionWithFullDeletion() throws WriteProcessException, IOException { + String[] measurements = {"tag1", "s1", "s2"}; + MeasurementSchema[] measurementSchemas = { + new MeasurementSchema("tag1", TSDataType.STRING), + new MeasurementSchema("s1", TSDataType.INT64), + new MeasurementSchema("s2", TSDataType.DOUBLE) + }; + RelationalInsertRowNode insertRowNode = + new RelationalInsertRowNode( + new PlanNodeId(""), + new PartialPath(new String[] {"table1"}), + true, + measurements, + new TSDataType[] {TSDataType.STRING, TSDataType.INT64, TSDataType.DOUBLE}, + measurementSchemas, + 10, + new Object[] {new Binary("tag1".getBytes(StandardCharsets.UTF_8)), 1L, 1.0}, + false, + new TsTableColumnCategory[] { + TsTableColumnCategory.TAG, TsTableColumnCategory.FIELD, TsTableColumnCategory.FIELD + }); + dataRegion.insert(insertRowNode); + insertRowNode.setTime(20); + dataRegion.insert(insertRowNode); + + // table1 -> table2 + dataRegion.applySchemaEvolution(Collections.singletonList(new TableRename("table1", "table2"))); + // s1 -> s3 + dataRegion.applySchemaEvolution( + Collections.singletonList(new ColumnRename("table2", "s1", "s3", null))); + + // delete with table1 + TableDeletionEntry tableDeletionEntry = + new TableDeletionEntry(new DeletionPredicate("table1"), new TimeRange(0, 30)); + RelationalDeleteDataNode relationalDeleteDataNode = + new RelationalDeleteDataNode( + new PlanNodeId(""), tableDeletionEntry, dataRegion.getDatabaseName()); + dataRegion.deleteByTable(relationalDeleteDataNode); + // nothing should be deleted + List sequenceFileList = dataRegion.getSequenceFileList(); + assertEquals(1, sequenceFileList.size()); + ModIterator modEntryIterator = sequenceFileList.get(0).getModEntryIterator(); + assertFalse(modEntryIterator.hasNext()); + + // delete with table2 + tableDeletionEntry = + new TableDeletionEntry(new DeletionPredicate("table2"), new TimeRange(0, 30)); + relationalDeleteDataNode = + new RelationalDeleteDataNode( + new PlanNodeId(""), tableDeletionEntry, dataRegion.getDatabaseName()); + dataRegion.deleteByTable(relationalDeleteDataNode); + // the file should be deleted + sequenceFileList = dataRegion.getSequenceFileList(); + assertEquals(0, sequenceFileList.size()); + } } diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/TsFileManagerTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/TsFileManagerTest.java index 07989246ae4d4..daae20e690084 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/TsFileManagerTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/TsFileManagerTest.java @@ -54,7 +54,7 @@ public class TsFileManagerTest { public void setUp() throws IOException, WriteProcessException, MetadataException { tempSGDir = new File(TestConstant.BASE_OUTPUT_PATH.concat("tempSG")); tempSGDir.mkdirs(); - tsFileManager = new TsFileManager("test", "0", tempSGDir.getAbsolutePath()); + tsFileManager = new TsFileManager("test", "0"); seqResources = new ArrayList<>(); for (int i = 0; i < 5; i++) { TsFileResource resource = generateTsFileResource(i); diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/AbstractCompactionTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/AbstractCompactionTest.java index a5503bb9e645c..3f2105e37b2e7 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/AbstractCompactionTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/AbstractCompactionTest.java @@ -183,8 +183,7 @@ public class AbstractCompactionTest { private int fileCount = 0; - protected TsFileManager tsFileManager = - new TsFileManager(COMPACTION_TEST_SG, "0", STORAGE_GROUP_DIR.getPath()); + protected TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); public void setUp() throws IOException, WriteProcessException, MetadataException, InterruptedException { @@ -850,7 +849,8 @@ protected List getPaths(List resources) Pair iDeviceIDBooleanPair = deviceIterator.nextDevice(); IDeviceID deviceID = iDeviceIDBooleanPair.getLeft(); boolean isAlign = iDeviceIDBooleanPair.getRight(); - Map schemaMap = deviceIterator.getAllSchemasOfCurrentDevice(); + Map schemaMap = + deviceIterator.getAllSchemasOfCurrentDevice(new Pair<>(Long.MIN_VALUE, null)); IMeasurementSchema timeSchema = schemaMap.remove(TsFileConstant.TIME_COLUMN_ID); List measurementSchemas = new ArrayList<>(schemaMap.values()); if (measurementSchemas.isEmpty()) { diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/BatchedCompactionWithTsFileSplitterTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/BatchedCompactionWithTsFileSplitterTest.java index 272d9e6ae5ca7..b64e67a572546 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/BatchedCompactionWithTsFileSplitterTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/BatchedCompactionWithTsFileSplitterTest.java @@ -301,7 +301,8 @@ private void consumeChunkDataAndValidate(TsFileResource resource) throw new RuntimeException(e); } return true; - }); + }, + null); splitter.splitTsFileByDataPartition(); List splitResources = new ArrayList<>(); for (Map.Entry entry : writerMap.entrySet()) { diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/CompactionSchedulerTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/CompactionSchedulerTest.java index b1377dbbba6bb..4ba44081059ae 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/CompactionSchedulerTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/CompactionSchedulerTest.java @@ -158,7 +158,7 @@ public void test1() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test1"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -283,7 +283,7 @@ public void test2() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test2"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -394,7 +394,7 @@ public void test3() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test3"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -492,7 +492,7 @@ public void test4() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test4"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -576,7 +576,7 @@ public void test5() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test5"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -691,7 +691,7 @@ public void test6() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test6"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -797,7 +797,7 @@ public void test7() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test7"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -899,7 +899,7 @@ public void test8() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test8"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -987,7 +987,7 @@ public void test9() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test9"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -1091,7 +1091,7 @@ public void test10() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test10"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -1199,7 +1199,7 @@ public void test11() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test11"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -1303,7 +1303,7 @@ public void test12() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test12"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -1412,7 +1412,7 @@ public void test14() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test13"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -1522,7 +1522,7 @@ public void test15() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test14"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -1640,7 +1640,7 @@ public void test16() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test16"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -1734,7 +1734,7 @@ public void testLargeFileInLowerLevel() throws Exception { String sgName = COMPACTION_TEST_SG + "test17"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/CompactionSchedulerWithFastPerformerTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/CompactionSchedulerWithFastPerformerTest.java index 747b2a67ea915..d56df57706be9 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/CompactionSchedulerWithFastPerformerTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/CompactionSchedulerWithFastPerformerTest.java @@ -155,7 +155,7 @@ public void test1() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test1"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -280,7 +280,7 @@ public void test2() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test2"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -391,7 +391,7 @@ public void test3() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test3"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -489,7 +489,7 @@ public void test4() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test4"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -573,7 +573,7 @@ public void test5() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test5"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -688,7 +688,7 @@ public void test6() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test6"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -794,7 +794,7 @@ public void test7() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test7"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -896,7 +896,7 @@ public void test8() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test8"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -984,7 +984,7 @@ public void test9() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test9"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -1088,7 +1088,7 @@ public void test10() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test10"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -1196,7 +1196,7 @@ public void test11() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test11"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -1300,7 +1300,7 @@ public void test12() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test12"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -1406,7 +1406,7 @@ public void test14() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test13"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -1516,7 +1516,7 @@ public void test15() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test14"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -1634,7 +1634,7 @@ public void test16() throws IOException, MetadataException, InterruptedException String sgName = COMPACTION_TEST_SG + "test16"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); @@ -1728,7 +1728,7 @@ public void testLargeFileInLowerLevel() throws Exception { String sgName = COMPACTION_TEST_SG + "test17"; try { CompactionTaskManager.getInstance().restart(); - TsFileManager tsFileManager = new TsFileManager(sgName, "0", "target"); + TsFileManager tsFileManager = new TsFileManager(sgName, "0"); Set fullPath = new HashSet<>(); for (String device : fullPaths) { fullPath.add(sgName + device); diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/CompactionTaskComparatorTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/CompactionTaskComparatorTest.java index 4878948ee91fc..5a61935a7fb99 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/CompactionTaskComparatorTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/CompactionTaskComparatorTest.java @@ -63,7 +63,7 @@ public class CompactionTaskComparatorTest { private final Logger LOGGER = LoggerFactory.getLogger(CompactionTaskComparatorTest.class); private FixedPriorityBlockingQueue compactionTaskQueue = new FixedPriorityBlockingQueue<>(1024, new DefaultCompactionTaskComparatorImpl()); - private TsFileManager tsFileManager = new TsFileManager("fakeSg", "0", "/"); + private TsFileManager tsFileManager = new TsFileManager("fakeSg", "0"); @Before public void setUp() { @@ -314,7 +314,7 @@ public void testSerialId() throws InterruptedException { AbstractCompactionTask[] compactionTasks = new AbstractCompactionTask[100]; TsFileManager[] tsFileManagers = new TsFileManager[10]; for (int i = 0; i < 10; ++i) { - tsFileManagers[i] = new TsFileManager("fakeSg" + i, "0", "/"); + tsFileManagers[i] = new TsFileManager("fakeSg" + i, "0"); for (int j = 0; j < 10; ++j) { List resources = new ArrayList<>(); // the j th compaction task for i th sg diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/CompactionTaskManagerTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/CompactionTaskManagerTest.java index d9721646fa910..b673a46f840c2 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/CompactionTaskManagerTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/CompactionTaskManagerTest.java @@ -74,8 +74,7 @@ public void tearDown() throws StorageEngineException, IOException { @Test public void testRepeatedSubmitBeforeExecution() throws Exception { LOGGER.warn("testRepeatedSubmitBeforeExecution"); - TsFileManager tsFileManager = - new TsFileManager("root.compactionTest", "0", tempSGDir.getAbsolutePath()); + TsFileManager tsFileManager = new TsFileManager("root.compactionTest", "0"); tsFileManager.addAll(seqResources, true); InnerSpaceCompactionTask task1 = new InnerSpaceCompactionTask(0, tsFileManager, seqResources, true, performer, 0); @@ -113,8 +112,7 @@ public void testRepeatedSubmitBeforeExecution() throws Exception { @Test public void testRepeatedSubmitWhenExecuting() throws Exception { LOGGER.warn("testRepeatedSubmitWhenExecuting"); - TsFileManager tsFileManager = - new TsFileManager("root.compactionTest", "0", tempSGDir.getAbsolutePath()); + TsFileManager tsFileManager = new TsFileManager("root.compactionTest", "0"); tsFileManager.addAll(seqResources, true); InnerSpaceCompactionTask task1 = new InnerSpaceCompactionTask(0, tsFileManager, seqResources, true, performer, 0); @@ -153,8 +151,7 @@ public void testRepeatedSubmitWhenExecuting() throws Exception { @Test public void testRepeatedSubmitAfterExecution() throws Exception { LOGGER.warn("testRepeatedSubmitAfterExecution"); - TsFileManager tsFileManager = - new TsFileManager("root.compactionTest", "0", tempSGDir.getAbsolutePath()); + TsFileManager tsFileManager = new TsFileManager("root.compactionTest", "0"); tsFileManager.addAll(seqResources, true); InnerSpaceCompactionTask task1 = new InnerSpaceCompactionTask(0, tsFileManager, seqResources, true, performer, 0); @@ -179,8 +176,7 @@ public void testRepeatedSubmitAfterExecution() throws Exception { @Test public void testRemoveSelfFromRunningList() throws Exception { LOGGER.warn("testRemoveSelfFromRunningList"); - TsFileManager tsFileManager = - new TsFileManager("root.compactionTest", "0", tempSGDir.getAbsolutePath()); + TsFileManager tsFileManager = new TsFileManager("root.compactionTest", "0"); tsFileManager.addAll(seqResources, true); InnerSpaceCompactionTask task1 = new InnerSpaceCompactionTask(0, tsFileManager, seqResources, true, performer, 0); @@ -208,8 +204,7 @@ public void testRemoveSelfFromRunningList() throws Exception { @Test public void testSizeTieredCompactionStatus() throws Exception { - TsFileManager tsFileManager = - new TsFileManager("root.compactionTest", "0", tempSGDir.getAbsolutePath()); + TsFileManager tsFileManager = new TsFileManager("root.compactionTest", "0"); tsFileManager.addAll(seqResources, true); InnerSpaceCompactionTask task = new InnerSpaceCompactionTask(0, tsFileManager, seqResources, true, performer, 0); @@ -230,8 +225,7 @@ public void testSizeTieredCompactionStatus() throws Exception { @Test public void testRewriteCrossCompactionFileStatus() throws Exception { - TsFileManager tsFileManager = - new TsFileManager("root.compactionTest", "0", tempSGDir.getAbsolutePath()); + TsFileManager tsFileManager = new TsFileManager("root.compactionTest", "0"); seqResources = seqResources.subList(1, 5); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, false); diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/CompactionWithSevoTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/CompactionWithSevoTest.java new file mode 100644 index 0000000000000..4bc516a8efef6 --- /dev/null +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/CompactionWithSevoTest.java @@ -0,0 +1,768 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.compaction; + +import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.performer.ICompactionPerformer; +import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.performer.impl.FastCompactionPerformer; +import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.performer.impl.ReadChunkCompactionPerformer; +import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.performer.impl.ReadPointCompactionPerformer; +import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.task.CompactionTaskSummary; +import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.task.subtask.FastCompactionTaskSummary; +import org.apache.iotdb.db.storageengine.dataregion.compaction.utils.CompactionFileGeneratorUtils; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.ColumnRename; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.TableRename; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.fileset.TsFileSet; +import org.apache.iotdb.db.utils.EncryptDBUtils; +import org.apache.iotdb.db.utils.constant.TestConstant; + +import org.apache.tsfile.enums.ColumnCategory; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.exception.write.NoMeasurementException; +import org.apache.tsfile.exception.write.NoTableException; +import org.apache.tsfile.file.metadata.ColumnSchemaBuilder; +import org.apache.tsfile.file.metadata.IDeviceID; +import org.apache.tsfile.file.metadata.IDeviceID.Factory; +import org.apache.tsfile.file.metadata.TableSchema; +import org.apache.tsfile.read.query.dataset.ResultSet; +import org.apache.tsfile.read.v4.ITsFileReader; +import org.apache.tsfile.read.v4.TsFileReaderBuilder; +import org.apache.tsfile.write.TsFileWriter; +import org.apache.tsfile.write.record.Tablet; +import org.junit.Test; + +import java.io.File; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.function.Function; +import java.util.function.Supplier; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +public class CompactionWithSevoTest extends AbstractCompactionTest { + + @Test + public void testReadChunkCompactionPerformer() throws Exception { + testInner( + targets -> + new ReadChunkCompactionPerformer( + seqResources, targets, EncryptDBUtils.getDefaultFirstEncryptParam()), + CompactionTaskSummary::new); + } + + @Test + public void testReadPointCompactionPerformerSeq() throws Exception { + testInner( + targets -> new ReadPointCompactionPerformer(seqResources, Collections.emptyList(), targets), + CompactionTaskSummary::new); + } + + @Test + public void testReadPointCompactionPerformerUnseq() throws Exception { + testInner( + targets -> new ReadPointCompactionPerformer(Collections.emptyList(), seqResources, targets), + CompactionTaskSummary::new); + } + + @Test + public void testReadPointCompactionPerformerCross() throws Exception { + testCross( + targets -> new ReadPointCompactionPerformer(seqResources, unseqResources, targets), + CompactionTaskSummary::new); + } + + @Test + public void testFastCompactionPerformerSeq() throws Exception { + testInner( + targets -> + new FastCompactionPerformer( + seqResources, + Collections.emptyList(), + targets, + EncryptDBUtils.getDefaultFirstEncryptParam()), + FastCompactionTaskSummary::new); + } + + @Test + public void testFastCompactionPerformerUnseq() throws Exception { + testInner( + targets -> + new FastCompactionPerformer( + Collections.emptyList(), + seqResources, + targets, + EncryptDBUtils.getDefaultFirstEncryptParam()), + FastCompactionTaskSummary::new); + } + + @Test + public void testFastCompactionPerformerCross() throws Exception { + testCross( + targets -> + new FastCompactionPerformer( + seqResources, + unseqResources, + targets, + EncryptDBUtils.getDefaultFirstEncryptParam()), + FastCompactionTaskSummary::new); + } + + private void genSourceFiles() throws Exception { + String fileSetDir = + TestConstant.BASE_OUTPUT_PATH + File.separator + TsFileSet.FILE_SET_DIR_NAME; + // seq-file1: + // table1[s1, s2, s3] + // table2[s1, s2, s3] + File seqf1 = new File(SEQ_DIRS, "0-1-0-0.tsfile"); + TableSchema tableSchema1_1 = + new TableSchema( + "table1", + Arrays.asList( + new ColumnSchemaBuilder() + .name("s1") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s2") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s3") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build())); + TableSchema tableSchema1_2 = + new TableSchema( + "table2", + Arrays.asList( + new ColumnSchemaBuilder() + .name("s1") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s2") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s3") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build())); + try (TsFileWriter tsFileWriter = new TsFileWriter(seqf1)) { + tsFileWriter.registerTableSchema(tableSchema1_1); + tsFileWriter.registerTableSchema(tableSchema1_2); + + Tablet tablet1 = new Tablet(tableSchema1_1.getTableName(), tableSchema1_1.getColumnSchemas()); + tablet1.addTimestamp(0, 0); + tablet1.addValue(0, 0, 1); + tablet1.addValue(0, 1, 2); + tablet1.addValue(0, 2, 3); + + Tablet tablet2 = new Tablet(tableSchema1_2.getTableName(), tableSchema1_2.getColumnSchemas()); + tablet2.addTimestamp(0, 0); + tablet2.addValue(0, 0, 101); + tablet2.addValue(0, 1, 102); + tablet2.addValue(0, 2, 103); + + tsFileWriter.writeTable(tablet1); + tsFileWriter.writeTable(tablet2); + } + TsFileResource resource1 = new TsFileResource(seqf1); + resource1.setTsFileManager(tsFileManager); + resource1.updateStartTime(Factory.DEFAULT_FACTORY.create(new String[] {"table1"}), 0); + resource1.updateEndTime(Factory.DEFAULT_FACTORY.create(new String[] {"table1"}), 0); + resource1.updateStartTime(Factory.DEFAULT_FACTORY.create(new String[] {"table2"}), 0); + resource1.updateEndTime(Factory.DEFAULT_FACTORY.create(new String[] {"table2"}), 0); + resource1.close(); + + // rename table1 -> table0 + TsFileSet tsFileSet1 = new TsFileSet(1, fileSetDir, false); + tsFileSet1.appendSchemaEvolution( + Collections.singletonList(new TableRename("table1", "table0"))); + tsFileManager.addTsFileSet(tsFileSet1, 0); + + // seq-file2: + // table0[s1, s2, s3] + // table2[s1, s2, s3] + File seqf2 = new File(SEQ_DIRS, "0-2-0-0.tsfile"); + TableSchema tableSchema2_1 = + new TableSchema( + "table0", + Arrays.asList( + new ColumnSchemaBuilder() + .name("s1") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s2") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s3") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build())); + TableSchema tableSchema2_2 = + new TableSchema( + "table2", + Arrays.asList( + new ColumnSchemaBuilder() + .name("s1") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s2") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s3") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build())); + try (TsFileWriter tsFileWriter = new TsFileWriter(seqf2)) { + tsFileWriter.registerTableSchema(tableSchema2_1); + tsFileWriter.registerTableSchema(tableSchema2_2); + + Tablet tablet1 = new Tablet(tableSchema2_1.getTableName(), tableSchema2_1.getColumnSchemas()); + tablet1.addTimestamp(0, 1); + tablet1.addValue(0, 0, 11); + tablet1.addValue(0, 1, 12); + tablet1.addValue(0, 2, 13); + + Tablet tablet2 = new Tablet(tableSchema2_2.getTableName(), tableSchema2_2.getColumnSchemas()); + tablet2.addTimestamp(0, 1); + tablet2.addValue(0, 0, 111); + tablet2.addValue(0, 1, 112); + tablet2.addValue(0, 2, 113); + + tsFileWriter.writeTable(tablet1); + tsFileWriter.writeTable(tablet2); + } + TsFileResource resource2 = new TsFileResource(seqf2); + resource2.setTsFileManager(tsFileManager); + resource2.updateStartTime(Factory.DEFAULT_FACTORY.create(new String[] {"table0"}), 1); + resource2.updateEndTime(Factory.DEFAULT_FACTORY.create(new String[] {"table0"}), 1); + resource2.updateStartTime(Factory.DEFAULT_FACTORY.create(new String[] {"table2"}), 1); + resource2.updateEndTime(Factory.DEFAULT_FACTORY.create(new String[] {"table2"}), 1); + resource2.close(); + + // rename table0.s1 -> table0.s0 + TsFileSet tsFileSet2 = new TsFileSet(2, fileSetDir, false); + tsFileSet2.appendSchemaEvolution( + Collections.singletonList(new ColumnRename("table0", "s1", "s0"))); + tsFileManager.addTsFileSet(tsFileSet2, 0); + + // seq-file3: + // table0[s0, s2, s3] + // table2[s1, s2, s3] + File seqf3 = new File(SEQ_DIRS, "0-3-0-0.tsfile"); + TableSchema tableSchema3_1 = + new TableSchema( + "table0", + Arrays.asList( + new ColumnSchemaBuilder() + .name("s0") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s2") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s3") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build())); + TableSchema tableSchema3_2 = + new TableSchema( + "table2", + Arrays.asList( + new ColumnSchemaBuilder() + .name("s1") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s2") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s3") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build())); + try (TsFileWriter tsFileWriter = new TsFileWriter(seqf3)) { + tsFileWriter.registerTableSchema(tableSchema3_1); + tsFileWriter.registerTableSchema(tableSchema3_2); + + Tablet tablet1 = new Tablet(tableSchema3_1.getTableName(), tableSchema3_1.getColumnSchemas()); + tablet1.addTimestamp(0, 2); + tablet1.addValue(0, 0, 21); + tablet1.addValue(0, 1, 22); + tablet1.addValue(0, 2, 23); + + Tablet tablet2 = new Tablet(tableSchema3_2.getTableName(), tableSchema3_2.getColumnSchemas()); + tablet2.addTimestamp(0, 2); + tablet2.addValue(0, 0, 121); + tablet2.addValue(0, 1, 122); + tablet2.addValue(0, 2, 123); + + tsFileWriter.writeTable(tablet1); + tsFileWriter.writeTable(tablet2); + } + TsFileResource resource3 = new TsFileResource(seqf3); + resource3.setTsFileManager(tsFileManager); + resource3.updateStartTime(Factory.DEFAULT_FACTORY.create(new String[] {"table0"}), 2); + resource3.updateEndTime(Factory.DEFAULT_FACTORY.create(new String[] {"table0"}), 2); + resource3.updateStartTime(Factory.DEFAULT_FACTORY.create(new String[] {"table2"}), 2); + resource3.updateEndTime(Factory.DEFAULT_FACTORY.create(new String[] {"table2"}), 2); + resource3.close(); + + // rename table2 -> table1 + TsFileSet tsFileSet3 = new TsFileSet(3, fileSetDir, false); + tsFileSet3.appendSchemaEvolution( + Collections.singletonList(new TableRename("table2", "table1"))); + tsFileManager.addTsFileSet(tsFileSet3, 0); + + seqResources.add(resource1); + seqResources.add(resource2); + seqResources.add(resource3); + + // unseq-file4: + // table0[s0, s2, s3] + // table1[s1, s2, s3] + File unseqf4 = new File(UNSEQ_DIRS, "0-4-0-0.tsfile"); + TableSchema tableSchema4_1 = + new TableSchema( + "table0", + Arrays.asList( + new ColumnSchemaBuilder() + .name("s0") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s2") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s3") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build())); + TableSchema tableSchema4_2 = + new TableSchema( + "table1", + Arrays.asList( + new ColumnSchemaBuilder() + .name("s1") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s2") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s3") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build())); + try (TsFileWriter tsFileWriter = new TsFileWriter(unseqf4)) { + tsFileWriter.registerTableSchema(tableSchema4_1); + tsFileWriter.registerTableSchema(tableSchema4_2); + + Tablet tablet1 = new Tablet(tableSchema4_1.getTableName(), tableSchema4_1.getColumnSchemas()); + tablet1.addTimestamp(0, 1); + tablet1.addValue(0, 0, 1011); + tablet1.addValue(0, 1, 1012); + tablet1.addValue(0, 2, 1013); + + Tablet tablet2 = new Tablet(tableSchema4_2.getTableName(), tableSchema4_2.getColumnSchemas()); + tablet2.addTimestamp(0, 1); + tablet2.addValue(0, 0, 1111); + tablet2.addValue(0, 1, 1112); + tablet2.addValue(0, 2, 1113); + + tsFileWriter.writeTable(tablet1); + tsFileWriter.writeTable(tablet2); + } + TsFileResource resource4 = new TsFileResource(unseqf4); + resource4.setTsFileManager(tsFileManager); + resource4.updateStartTime(Factory.DEFAULT_FACTORY.create(new String[] {"table0"}), 1); + resource4.updateEndTime(Factory.DEFAULT_FACTORY.create(new String[] {"table0"}), 1); + resource4.updateStartTime(Factory.DEFAULT_FACTORY.create(new String[] {"table1"}), 1); + resource4.updateEndTime(Factory.DEFAULT_FACTORY.create(new String[] {"table1"}), 1); + resource4.close(); + unseqResources.add(resource4); + } + + private void testCross( + Function, ICompactionPerformer> compactionPerformerFunction, + Supplier summarySupplier) + throws Exception { + genSourceFiles(); + List targetResources; + ICompactionPerformer performer; + + targetResources = + CompactionFileGeneratorUtils.getCrossCompactionTargetTsFileResources(seqResources); + targetResources.forEach(s -> s.setTsFileManager(tsFileManager)); + + performer = compactionPerformerFunction.apply(targetResources); + performer.setSummary(summarySupplier.get()); + performer.perform(); + + // target(version=1): + // table1[s1, s2, s3] + // table2[s1, s2, s3] + try (ITsFileReader tsFileReader = + new TsFileReaderBuilder().file(targetResources.get(0).getTsFile()).build()) { + // table0 should not exist + try { + tsFileReader.query( + "table0", Collections.singletonList("s2"), Long.MIN_VALUE, Long.MAX_VALUE); + fail("table0 should not exist"); + } catch (NoTableException e) { + assertEquals("Table table0 not found", e.getMessage()); + } + + // table1.s0 should not exist + try { + tsFileReader.query( + "table1", Collections.singletonList("s0"), Long.MIN_VALUE, Long.MAX_VALUE); + fail("table1.s0 should not exist"); + } catch (NoMeasurementException e) { + assertEquals("No measurement for s0", e.getMessage()); + } + + // check data of table1 + ResultSet resultSet = + tsFileReader.query( + "table1", Arrays.asList("s1", "s2", "s3"), Long.MIN_VALUE, Long.MAX_VALUE); + assertTrue(resultSet.next()); + assertEquals(0, resultSet.getLong(1)); + for (int j = 0; j < 3; j++) { + assertEquals(j + 1, resultSet.getLong(j + 2)); + } + + // check data of table2 + resultSet = + tsFileReader.query( + "table2", Arrays.asList("s1", "s2", "s3"), Long.MIN_VALUE, Long.MAX_VALUE); + assertTrue(resultSet.next()); + assertEquals(0, resultSet.getLong(1)); + for (int j = 0; j < 3; j++) { + assertEquals(100 + j + 1, resultSet.getLong(j + 2)); + } + } + + // target(version=2): + // table0[s1, s2, s3] + // table2[s1, s2, s3] + try (ITsFileReader tsFileReader = + new TsFileReaderBuilder().file(targetResources.get(1).getTsFile()).build()) { + // table1 should not exist + try { + tsFileReader.query( + "table1", Collections.singletonList("s2"), Long.MIN_VALUE, Long.MAX_VALUE); + fail("table1 should not exist"); + } catch (NoTableException e) { + assertEquals("Table table1 not found", e.getMessage()); + } + + // table0.s0 should not exist + try { + tsFileReader.query( + "table0", Collections.singletonList("s0"), Long.MIN_VALUE, Long.MAX_VALUE); + fail("table0.s0 should not exist"); + } catch (NoMeasurementException e) { + assertEquals("No measurement for s0", e.getMessage()); + } + + // check data of table0 + ResultSet resultSet = + tsFileReader.query( + "table0", Arrays.asList("s1", "s2", "s3"), Long.MIN_VALUE, Long.MAX_VALUE); + assertTrue(resultSet.next()); + assertEquals(1, resultSet.getLong(1)); + for (int j = 0; j < 3; j++) { + assertEquals(1010 + j + 1, resultSet.getLong(j + 2)); + } + + // check data of table2 + resultSet = + tsFileReader.query( + "table2", Arrays.asList("s1", "s2", "s3"), Long.MIN_VALUE, Long.MAX_VALUE); + assertTrue(resultSet.next()); + assertEquals(1, resultSet.getLong(1)); + for (int j = 0; j < 3; j++) { + assertEquals(1110 + j + 1, resultSet.getLong(j + 2)); + } + } + + // target(version=2): + // table0[s0, s2, s3] + // table2[s1, s2, s3] + try (ITsFileReader tsFileReader = + new TsFileReaderBuilder().file(targetResources.get(2).getTsFile()).build()) { + // table1 should not exist + try { + tsFileReader.query( + "table1", Collections.singletonList("s2"), Long.MIN_VALUE, Long.MAX_VALUE); + fail("table1 should not exist"); + } catch (NoTableException e) { + assertEquals("Table table1 not found", e.getMessage()); + } + + // table0.s1 should not exist + try { + tsFileReader.query( + "table0", Collections.singletonList("s1"), Long.MIN_VALUE, Long.MAX_VALUE); + fail("table0.s0 should not exist"); + } catch (NoMeasurementException e) { + assertEquals("No measurement for s1", e.getMessage()); + } + + // check data of table0 + ResultSet resultSet = + tsFileReader.query( + "table0", Arrays.asList("s0", "s2", "s3"), Long.MIN_VALUE, Long.MAX_VALUE); + assertTrue(resultSet.next()); + assertEquals(2, resultSet.getLong(1)); + for (int j = 0; j < 3; j++) { + assertEquals(20 + j + 1, resultSet.getLong(j + 2)); + } + + // check data of table2 + resultSet = + tsFileReader.query( + "table2", Arrays.asList("s1", "s2", "s3"), Long.MIN_VALUE, Long.MAX_VALUE); + assertTrue(resultSet.next()); + assertEquals(2, resultSet.getLong(1)); + for (int j = 0; j < 3; j++) { + assertEquals(100 + 20 + j + 1, resultSet.getLong(j + 2)); + } + } + } + + private void testInner( + Function, ICompactionPerformer> compactionPerformerFunction, + Supplier summarySupplier) + throws Exception { + genSourceFiles(); + List targetResources; + ICompactionPerformer performer; + + // target(version=1): + // table1[s1, s2, s3] + // table2[s1, s2, s3] + targetResources = + CompactionFileGeneratorUtils.getInnerCompactionTargetTsFileResources(seqResources, true); + targetResources.forEach(s -> s.setTsFileManager(tsFileManager)); + + performer = compactionPerformerFunction.apply(targetResources); + performer.setSummary(summarySupplier.get()); + performer.perform(); + + Set devices = targetResources.get(0).getDevices(); + Set expectedDevices = new HashSet<>(); + expectedDevices.add(Factory.DEFAULT_FACTORY.create(new String[] {"table1"})); + expectedDevices.add(Factory.DEFAULT_FACTORY.create(new String[] {"table2"})); + assertEquals(expectedDevices, devices); + + try (ITsFileReader tsFileReader = + new TsFileReaderBuilder().file(targetResources.get(0).getTsFile()).build()) { + // table0 should not exist + try { + tsFileReader.query( + "table0", Collections.singletonList("s2"), Long.MIN_VALUE, Long.MAX_VALUE); + fail("table0 should not exist"); + } catch (NoTableException e) { + assertEquals("Table table0 not found", e.getMessage()); + } + + // table1.s0 should not exist + try { + tsFileReader.query( + "table1", Collections.singletonList("s0"), Long.MIN_VALUE, Long.MAX_VALUE); + fail("table1.s0 should not exist"); + } catch (NoMeasurementException e) { + assertEquals("No measurement for s0", e.getMessage()); + } + + // check data of table1 + ResultSet resultSet = + tsFileReader.query( + "table1", Arrays.asList("s1", "s2", "s3"), Long.MIN_VALUE, Long.MAX_VALUE); + for (int i = 0; i < 3; i++) { + assertTrue(resultSet.next()); + assertEquals(i, resultSet.getLong(1)); + for (int j = 0; j < 3; j++) { + assertEquals(i * 10 + j + 1, resultSet.getLong(j + 2)); + } + } + + // check data of table2 + resultSet = + tsFileReader.query( + "table2", Arrays.asList("s1", "s2", "s3"), Long.MIN_VALUE, Long.MAX_VALUE); + for (int i = 0; i < 3; i++) { + assertTrue(resultSet.next()); + assertEquals(i, resultSet.getLong(1)); + for (int j = 0; j < 3; j++) { + assertEquals(100 + i * 10 + j + 1, resultSet.getLong(j + 2)); + } + } + } + + // target(version=2): + // table0[s1, s2, s3] + // table2[s1, s2, s3] + targetResources = + CompactionFileGeneratorUtils.getInnerCompactionTargetTsFileResources( + seqResources.subList(1, seqResources.size()), true); + targetResources.forEach(s -> s.setTsFileManager(tsFileManager)); + + performer = compactionPerformerFunction.apply(targetResources); + performer.setSummary(summarySupplier.get()); + performer.perform(); + + try (ITsFileReader tsFileReader = + new TsFileReaderBuilder().file(targetResources.get(0).getTsFile()).build()) { + // table1 should not exist + try { + tsFileReader.query( + "table1", Collections.singletonList("s2"), Long.MIN_VALUE, Long.MAX_VALUE); + fail("table1 should not exist"); + } catch (NoTableException e) { + assertEquals("Table table1 not found", e.getMessage()); + } + + // table0.s0 should not exist + try { + tsFileReader.query( + "table0", Collections.singletonList("s0"), Long.MIN_VALUE, Long.MAX_VALUE); + fail("table0.s0 should not exist"); + } catch (NoMeasurementException e) { + assertEquals("No measurement for s0", e.getMessage()); + } + + // check data of table0 + ResultSet resultSet = + tsFileReader.query( + "table0", Arrays.asList("s1", "s2", "s3"), Long.MIN_VALUE, Long.MAX_VALUE); + for (int i = 0; i < 3; i++) { + assertTrue(resultSet.next()); + assertEquals(i, resultSet.getLong(1)); + for (int j = 0; j < 3; j++) { + assertEquals(i * 10 + j + 1, resultSet.getLong(j + 2)); + } + } + + // check data of table2 + resultSet = + tsFileReader.query( + "table2", Arrays.asList("s1", "s2", "s3"), Long.MIN_VALUE, Long.MAX_VALUE); + for (int i = 0; i < 3; i++) { + assertTrue(resultSet.next()); + assertEquals(i, resultSet.getLong(1)); + for (int j = 0; j < 3; j++) { + assertEquals(100 + i * 10 + j + 1, resultSet.getLong(j + 2)); + } + } + } + + // target(version=2): + // table0[s0, s2, s3] + // table2[s1, s2, s3] + targetResources = + CompactionFileGeneratorUtils.getInnerCompactionTargetTsFileResources( + seqResources.subList(2, seqResources.size()), true); + targetResources.forEach(s -> s.setTsFileManager(tsFileManager)); + + performer = compactionPerformerFunction.apply(targetResources); + performer.setSummary(summarySupplier.get()); + performer.perform(); + + try (ITsFileReader tsFileReader = + new TsFileReaderBuilder().file(targetResources.get(0).getTsFile()).build()) { + // table1 should not exist + try { + tsFileReader.query( + "table1", Collections.singletonList("s2"), Long.MIN_VALUE, Long.MAX_VALUE); + fail("table1 should not exist"); + } catch (NoTableException e) { + assertEquals("Table table1 not found", e.getMessage()); + } + + // table0.s1 should not exist + try { + tsFileReader.query( + "table0", Collections.singletonList("s1"), Long.MIN_VALUE, Long.MAX_VALUE); + fail("table0.s0 should not exist"); + } catch (NoMeasurementException e) { + assertEquals("No measurement for s1", e.getMessage()); + } + + // check data of table0 + ResultSet resultSet = + tsFileReader.query( + "table0", Arrays.asList("s0", "s2", "s3"), Long.MIN_VALUE, Long.MAX_VALUE); + for (int i = 0; i < 3; i++) { + assertTrue(resultSet.next()); + assertEquals(i, resultSet.getLong(1)); + for (int j = 0; j < 3; j++) { + assertEquals(i * 10 + j + 1, resultSet.getLong(j + 2)); + } + } + + // check data of table2 + resultSet = + tsFileReader.query( + "table2", Arrays.asList("s1", "s2", "s3"), Long.MIN_VALUE, Long.MAX_VALUE); + for (int i = 0; i < 3; i++) { + assertTrue(resultSet.next()); + assertEquals(i, resultSet.getLong(1)); + for (int j = 0; j < 3; j++) { + assertEquals(100 + i * 10 + j + 1, resultSet.getLong(j + 2)); + } + } + } + } +} diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/CompactionWorkerTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/CompactionWorkerTest.java index 11121d5bc015c..47b5fb45f8620 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/CompactionWorkerTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/CompactionWorkerTest.java @@ -145,7 +145,7 @@ public void testFailedToAllocateFileNumInCrossTask() throws InterruptedException TsFileResourceStatus.COMPACTION_CANDIDATE)); } - TsFileManager tsFileManager = new TsFileManager("root.testsg", "0", ""); + TsFileManager tsFileManager = new TsFileManager("root.testsg", "0"); tsFileManager.addAll(sequenceFiles, true); tsFileManager.addAll(unsequenceFiles, false); CrossSpaceCompactionTask task = diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/ReadPointCompactionPerformerTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/ReadPointCompactionPerformerTest.java index f21571ce4f87d..b74bcec8f964c 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/ReadPointCompactionPerformerTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/ReadPointCompactionPerformerTest.java @@ -37,18 +37,32 @@ import org.apache.iotdb.db.storageengine.dataregion.read.control.FileReaderManager; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResourceStatus; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.ColumnRename; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution.TableRename; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.fileset.TsFileSet; import org.apache.iotdb.db.utils.EnvironmentUtils; +import org.apache.iotdb.db.utils.constant.TestConstant; import org.apache.tsfile.common.conf.TSFileDescriptor; +import org.apache.tsfile.enums.ColumnCategory; import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.exception.write.NoMeasurementException; +import org.apache.tsfile.exception.write.NoTableException; import org.apache.tsfile.exception.write.WriteProcessException; +import org.apache.tsfile.file.metadata.ColumnSchemaBuilder; import org.apache.tsfile.file.metadata.IDeviceID; import org.apache.tsfile.file.metadata.IDeviceID.Factory; +import org.apache.tsfile.file.metadata.TableSchema; import org.apache.tsfile.read.common.IBatchDataIterator; import org.apache.tsfile.read.common.block.TsBlock; +import org.apache.tsfile.read.query.dataset.ResultSet; +import org.apache.tsfile.read.v4.ITsFileReader; +import org.apache.tsfile.read.v4.TsFileReaderBuilder; import org.apache.tsfile.utils.Pair; import org.apache.tsfile.utils.TsFileGeneratorUtils; import org.apache.tsfile.utils.TsPrimitiveType; +import org.apache.tsfile.write.TsFileWriter; +import org.apache.tsfile.write.record.Tablet; import org.apache.tsfile.write.schema.IMeasurementSchema; import org.apache.tsfile.write.schema.MeasurementSchema; import org.junit.After; @@ -56,8 +70,10 @@ import org.junit.Before; import org.junit.Test; +import java.io.File; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -66,6 +82,8 @@ import static org.apache.iotdb.commons.conf.IoTDBConstant.PATH_SEPARATOR; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; @SuppressWarnings("OptionalGetWithoutIsPresent") public class ReadPointCompactionPerformerTest extends AbstractCompactionTest { @@ -6993,4 +7011,296 @@ public void testCrossSpaceCompactionWithDeviceMaxTimeLaterInUnseqFile() Assert.fail(); } } + + @Test + public void testWithSevoFile() throws Exception { + String fileSetDir = + TestConstant.BASE_OUTPUT_PATH + File.separator + TsFileSet.FILE_SET_DIR_NAME; + // file1: + // table1[s1, s2, s3] + // table2[s1, s2, s3] + File f1 = new File(SEQ_DIRS, "0-1-0-0.tsfile"); + TableSchema tableSchema1_1 = + new TableSchema( + "table1", + Arrays.asList( + new ColumnSchemaBuilder() + .name("s1") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s2") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s3") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build())); + TableSchema tableSchema1_2 = + new TableSchema( + "table2", + Arrays.asList( + new ColumnSchemaBuilder() + .name("s1") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s2") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s3") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build())); + try (TsFileWriter tsFileWriter = new TsFileWriter(f1)) { + tsFileWriter.registerTableSchema(tableSchema1_1); + tsFileWriter.registerTableSchema(tableSchema1_2); + + Tablet tablet1 = new Tablet(tableSchema1_1.getTableName(), tableSchema1_1.getColumnSchemas()); + tablet1.addTimestamp(0, 0); + tablet1.addValue(0, 0, 1); + tablet1.addValue(0, 1, 2); + tablet1.addValue(0, 2, 3); + + Tablet tablet2 = new Tablet(tableSchema1_2.getTableName(), tableSchema1_2.getColumnSchemas()); + tablet2.addTimestamp(0, 0); + tablet2.addValue(0, 0, 101); + tablet2.addValue(0, 1, 102); + tablet2.addValue(0, 2, 103); + + tsFileWriter.writeTable(tablet1); + tsFileWriter.writeTable(tablet2); + } + TsFileResource resource1 = new TsFileResource(f1); + resource1.setTsFileManager(tsFileManager); + resource1.updateStartTime(Factory.DEFAULT_FACTORY.create(new String[] {"table1"}), 0); + resource1.updateEndTime(Factory.DEFAULT_FACTORY.create(new String[] {"table1"}), 0); + resource1.updateStartTime(Factory.DEFAULT_FACTORY.create(new String[] {"table2"}), 0); + resource1.updateEndTime(Factory.DEFAULT_FACTORY.create(new String[] {"table2"}), 0); + resource1.close(); + + // rename table1 -> table0 + TsFileSet tsFileSet1 = new TsFileSet(1, fileSetDir, false); + tsFileSet1.appendSchemaEvolution( + Collections.singletonList(new TableRename("table1", "table0"))); + tsFileManager.addTsFileSet(tsFileSet1, 0); + + // file2: + // table0[s1, s2, s3] + // table2[s1, s2, s3] + File f2 = new File(SEQ_DIRS, "0-2-0-0.tsfile"); + TableSchema tableSchema2_1 = + new TableSchema( + "table0", + Arrays.asList( + new ColumnSchemaBuilder() + .name("s1") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s2") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s3") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build())); + TableSchema tableSchema2_2 = + new TableSchema( + "table2", + Arrays.asList( + new ColumnSchemaBuilder() + .name("s1") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s2") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s3") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build())); + try (TsFileWriter tsFileWriter = new TsFileWriter(f2)) { + tsFileWriter.registerTableSchema(tableSchema2_1); + tsFileWriter.registerTableSchema(tableSchema2_2); + + Tablet tablet1 = new Tablet(tableSchema2_1.getTableName(), tableSchema2_1.getColumnSchemas()); + tablet1.addTimestamp(0, 1); + tablet1.addValue(0, 0, 11); + tablet1.addValue(0, 1, 12); + tablet1.addValue(0, 2, 13); + + Tablet tablet2 = new Tablet(tableSchema2_2.getTableName(), tableSchema2_2.getColumnSchemas()); + tablet2.addTimestamp(0, 1); + tablet2.addValue(0, 0, 111); + tablet2.addValue(0, 1, 112); + tablet2.addValue(0, 2, 113); + + tsFileWriter.writeTable(tablet1); + tsFileWriter.writeTable(tablet2); + } + TsFileResource resource2 = new TsFileResource(f2); + resource2.setTsFileManager(tsFileManager); + resource2.updateStartTime(Factory.DEFAULT_FACTORY.create(new String[] {"table0"}), 1); + resource2.updateEndTime(Factory.DEFAULT_FACTORY.create(new String[] {"table0"}), 1); + resource2.updateStartTime(Factory.DEFAULT_FACTORY.create(new String[] {"table2"}), 1); + resource2.updateEndTime(Factory.DEFAULT_FACTORY.create(new String[] {"table2"}), 1); + resource2.close(); + + // rename table0.s1 -> table0.s0 + TsFileSet tsFileSet2 = new TsFileSet(2, fileSetDir, false); + tsFileSet2.appendSchemaEvolution( + Collections.singletonList(new ColumnRename("table0", "s1", "s0"))); + tsFileManager.addTsFileSet(tsFileSet2, 0); + + // file3: + // table0[s0, s2, s3] + // table2[s1, s2, s3] + File f3 = new File(SEQ_DIRS, "0-3-0-0.tsfile"); + TableSchema tableSchema3_1 = + new TableSchema( + "table0", + Arrays.asList( + new ColumnSchemaBuilder() + .name("s0") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s2") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s3") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build())); + TableSchema tableSchema3_2 = + new TableSchema( + "table2", + Arrays.asList( + new ColumnSchemaBuilder() + .name("s1") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s2") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build(), + new ColumnSchemaBuilder() + .name("s3") + .dataType(TSDataType.INT32) + .category(ColumnCategory.FIELD) + .build())); + try (TsFileWriter tsFileWriter = new TsFileWriter(f3)) { + tsFileWriter.registerTableSchema(tableSchema3_1); + tsFileWriter.registerTableSchema(tableSchema3_2); + + Tablet tablet1 = new Tablet(tableSchema3_1.getTableName(), tableSchema3_1.getColumnSchemas()); + tablet1.addTimestamp(0, 2); + tablet1.addValue(0, 0, 21); + tablet1.addValue(0, 1, 22); + tablet1.addValue(0, 2, 23); + + Tablet tablet2 = new Tablet(tableSchema3_2.getTableName(), tableSchema3_2.getColumnSchemas()); + tablet2.addTimestamp(0, 2); + tablet2.addValue(0, 0, 121); + tablet2.addValue(0, 1, 122); + tablet2.addValue(0, 2, 123); + + tsFileWriter.writeTable(tablet1); + tsFileWriter.writeTable(tablet2); + } + TsFileResource resource3 = new TsFileResource(f3); + resource3.setTsFileManager(tsFileManager); + resource3.updateStartTime(Factory.DEFAULT_FACTORY.create(new String[] {"table0"}), 2); + resource3.updateEndTime(Factory.DEFAULT_FACTORY.create(new String[] {"table0"}), 2); + resource3.updateStartTime(Factory.DEFAULT_FACTORY.create(new String[] {"table2"}), 2); + resource3.updateEndTime(Factory.DEFAULT_FACTORY.create(new String[] {"table2"}), 2); + resource3.close(); + + // rename table2 -> table1 + TsFileSet tsFileSet3 = new TsFileSet(3, fileSetDir, false); + tsFileSet3.appendSchemaEvolution( + Collections.singletonList(new TableRename("table2", "table1"))); + tsFileManager.addTsFileSet(tsFileSet3, 0); + + // perform compaction + seqResources.add(resource1); + seqResources.add(resource2); + seqResources.add(resource3); + + List targetResources = + CompactionFileGeneratorUtils.getInnerCompactionTargetTsFileResources(seqResources, true); + targetResources.forEach(s -> s.setTsFileManager(tsFileManager)); + + ICompactionPerformer performer = + new ReadPointCompactionPerformer(seqResources, unseqResources, targetResources); + performer.setSummary(new CompactionTaskSummary()); + performer.perform(); + + // target(version=1): + // table1[s1, s2, s3] + // table2[s1, s2, s3] + try (ITsFileReader tsFileReader = + new TsFileReaderBuilder().file(targetResources.get(0).getTsFile()).build()) { + // table1 should not exist + try { + tsFileReader.query( + "table0", Collections.singletonList("s2"), Long.MIN_VALUE, Long.MAX_VALUE); + fail("table0 should not exist"); + } catch (NoTableException e) { + assertEquals("Table table0 not found", e.getMessage()); + } + + // table1.s0 should not exist + try { + tsFileReader.query( + "table1", Collections.singletonList("s0"), Long.MIN_VALUE, Long.MAX_VALUE); + fail("table1.s0 should not exist"); + } catch (NoMeasurementException e) { + assertEquals("No measurement for s0", e.getMessage()); + } + + // check data of table1 + ResultSet resultSet = + tsFileReader.query( + "table1", Arrays.asList("s1", "s2", "s3"), Long.MIN_VALUE, Long.MAX_VALUE); + for (int i = 0; i < 3; i++) { + assertTrue(resultSet.next()); + assertEquals(i, resultSet.getLong(1)); + for (int j = 0; j < 3; j++) { + assertEquals(i * 10 + j + 1, resultSet.getLong(j + 2)); + } + } + + // check data of table2 + resultSet = + tsFileReader.query( + "table2", Arrays.asList("s1", "s2", "s3"), Long.MIN_VALUE, Long.MAX_VALUE); + for (int i = 0; i < 3; i++) { + assertTrue(resultSet.next()); + assertEquals(i, resultSet.getLong(1)); + for (int j = 0; j < 3; j++) { + assertEquals(100 + i * 10 + j + 1, resultSet.getLong(j + 2)); + } + } + } + } } diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/CrossSpaceCompactionExceptionTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/CrossSpaceCompactionExceptionTest.java index 6e5ef5aca14ff..34c038d1fb70b 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/CrossSpaceCompactionExceptionTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/CrossSpaceCompactionExceptionTest.java @@ -94,7 +94,7 @@ public void testHandleWithAllSourceFilesExisted() throws Exception { createFiles(2, 4, 5, 300, 700, 700, 50, 50, false, true); createFiles(3, 3, 4, 200, 20, 10020, 30, 30, false, false); createFiles(2, 1, 5, 100, 450, 20450, 0, 0, false, false); - TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0", SEQ_DIRS.getPath()); + TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, false); @@ -168,7 +168,7 @@ public void testHandleWithAllSourceFilesExistedAndTargetFilesMoved() throws Exce createFiles(2, 4, 5, 300, 700, 700, 50, 50, false, true); createFiles(3, 3, 4, 200, 20, 10020, 30, 30, false, false); createFiles(2, 1, 5, 100, 450, 20450, 0, 0, false, false); - TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0", SEQ_DIRS.getPath()); + TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, false); @@ -243,7 +243,7 @@ public void testHandleWithSomeSourceFilesExisted() throws Exception { createFiles(2, 4, 5, 300, 700, 700, 50, 50, false, true); createFiles(3, 3, 4, 200, 20, 10020, 30, 30, false, false); createFiles(2, 1, 5, 100, 450, 20450, 0, 0, false, false); - TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0", SEQ_DIRS.getPath()); + TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, false); @@ -328,7 +328,7 @@ public void testHandleWithoutAllSourceFilesAndModFilesExist() throws Exception { createFiles(2, 4, 5, 300, 700, 700, 50, 50, false, true); createFiles(3, 3, 4, 200, 20, 10020, 30, 30, false, false); createFiles(2, 1, 5, 100, 450, 20450, 0, 0, false, false); - TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0", SEQ_DIRS.getPath()); + TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, false); @@ -445,7 +445,7 @@ public void testHandleWithAllSourcesFileAndCompactonModFileExist() throws Except createFiles(2, 4, 5, 300, 700, 700, 50, 50, false, true); createFiles(3, 3, 4, 200, 20, 10020, 30, 30, false, false); createFiles(2, 1, 5, 100, 450, 20450, 0, 0, false, false); - TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0", SEQ_DIRS.getPath()); + TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, false); @@ -563,7 +563,7 @@ public void testWhenTargetFileIsDeletedAfterCompactionAndSomeSourceFilesLost() t createFiles(2, 4, 5, 300, 700, 700, 50, 50, false, true); createFiles(3, 3, 4, 200, 20, 10020, 30, 30, false, false); createFiles(2, 1, 5, 100, 450, 20450, 0, 0, false, false); - TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0", SEQ_DIRS.getPath()); + TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, false); @@ -650,7 +650,7 @@ public void testWhenTargetFileIsDeletedAfterCompactionAndAllSourceFilesExisted() createFiles(2, 4, 5, 300, 700, 700, 50, 50, false, true); createFiles(3, 3, 4, 200, 20, 10020, 30, 30, false, false); createFiles(2, 1, 5, 100, 450, 20450, 0, 0, false, false); - TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0", SEQ_DIRS.getPath()); + TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, false); diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/CrossSpaceCompactionWithFastPerformerTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/CrossSpaceCompactionWithFastPerformerTest.java index e327114ca5c99..d0f42e3b9a79f 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/CrossSpaceCompactionWithFastPerformerTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/CrossSpaceCompactionWithFastPerformerTest.java @@ -1049,11 +1049,7 @@ public void testFiveSeqFileAndOneUnseqFile() throws Exception { } private TsFileManager getTsFileManager() { - TsFileManager tsFileManager = - new TsFileManager( - "root.compactionTest", - "0", - "target\\data\\sequence\\test\\root.compactionTest\\0\\0\\"); + TsFileManager tsFileManager = new TsFileManager("root.compactionTest", "0"); tsFileManager.getOrCreateUnsequenceListByTimePartition(0); tsFileManager.getOrCreateSequenceListByTimePartition(0); return tsFileManager; diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/CrossSpaceCompactionWithFastPerformerValidationTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/CrossSpaceCompactionWithFastPerformerValidationTest.java index 644ddf77e1f24..81b4c71837b60 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/CrossSpaceCompactionWithFastPerformerValidationTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/CrossSpaceCompactionWithFastPerformerValidationTest.java @@ -78,8 +78,7 @@ @SuppressWarnings("OptionalGetWithoutIsPresent") public class CrossSpaceCompactionWithFastPerformerValidationTest extends AbstractCompactionTest { - TsFileManager tsFileManager = - new TsFileManager(COMPACTION_TEST_SG, "0", STORAGE_GROUP_DIR.getPath()); + TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); private final String oldThreadName = Thread.currentThread().getName(); diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/CrossSpaceCompactionWithReadPointPerformerTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/CrossSpaceCompactionWithReadPointPerformerTest.java index 3c194a4e3d14d..ed4897c4fede2 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/CrossSpaceCompactionWithReadPointPerformerTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/CrossSpaceCompactionWithReadPointPerformerTest.java @@ -1048,11 +1048,7 @@ public void testFiveSeqFileAndOneUnseqFile() throws Exception { } private TsFileManager getTsFileManager() { - TsFileManager tsFileManager = - new TsFileManager( - "root.compactionTest", - "0", - "target\\data\\sequence\\test\\root.compactionTest\\0\\0\\"); + TsFileManager tsFileManager = new TsFileManager("root.compactionTest", "0"); tsFileManager.getOrCreateUnsequenceListByTimePartition(0); tsFileManager.getOrCreateSequenceListByTimePartition(0); return tsFileManager; diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/MergeUpgradeTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/MergeUpgradeTest.java index 326a2c815a8a7..07baab0a996c8 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/MergeUpgradeTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/MergeUpgradeTest.java @@ -84,7 +84,7 @@ public void tearDown() { @Test public void testMergeUpgradeSelect() throws MergeException { - TsFileManager tsFileManager = new TsFileManager("", "", ""); + TsFileManager tsFileManager = new TsFileManager("", ""); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, true); RewriteCrossSpaceCompactionSelector selector = diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/RewriteCrossSpaceCompactionRecoverTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/RewriteCrossSpaceCompactionRecoverTest.java index 2344b82b9ef2d..3633fa5fdaea8 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/RewriteCrossSpaceCompactionRecoverTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/RewriteCrossSpaceCompactionRecoverTest.java @@ -81,7 +81,7 @@ public void testRecoverWithAllSourceFilesExisted() throws Exception { createFiles(2, 4, 5, 300, 700, 700, 50, 50, false, true); createFiles(3, 3, 4, 200, 20, 10020, 30, 30, false, false); createFiles(2, 1, 5, 100, 450, 20450, 0, 0, false, false); - TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0", SEQ_DIRS.getPath()); + TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, false); @@ -144,7 +144,7 @@ public void testRecoverWithAllSourceFilesExistedAndSomeTargetFilesNotExist() thr createFiles(2, 4, 5, 300, 700, 700, 50, 50, false, true); createFiles(3, 3, 4, 200, 20, 10020, 30, 30, false, false); createFiles(2, 1, 5, 100, 450, 20450, 0, 0, false, false); - TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0", SEQ_DIRS.getPath()); + TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, false); @@ -215,7 +215,7 @@ public void testRecoverWithAllSourceFilesExistedAndTargetFilesMoved() throws Exc createFiles(2, 4, 5, 300, 700, 700, 50, 50, false, true); createFiles(3, 3, 4, 200, 20, 10020, 30, 30, false, false); createFiles(2, 1, 5, 100, 450, 20450, 0, 0, false, false); - TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0", SEQ_DIRS.getPath()); + TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, false); @@ -279,7 +279,7 @@ public void testRecoverWithSomeSourceFilesExisted() throws Exception { createFiles(2, 4, 5, 300, 700, 700, 50, 50, false, true); createFiles(3, 3, 4, 200, 20, 10020, 30, 30, false, false); createFiles(2, 1, 5, 100, 450, 20450, 0, 0, false, false); - TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0", SEQ_DIRS.getPath()); + TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, false); @@ -344,7 +344,7 @@ public void testRecoverWithoutAllSourceFilesAndModFilesExist() throws Exception createFiles(2, 4, 5, 300, 700, 700, 50, 50, false, true); createFiles(3, 3, 4, 200, 20, 10020, 30, 30, false, false); createFiles(2, 1, 5, 100, 450, 20450, 0, 0, false, false); - TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0", SEQ_DIRS.getPath()); + TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, false); @@ -442,7 +442,7 @@ public void testRecoverWithAllSourcesFileAndCompactonModFileExist() throws Excep createFiles(2, 4, 5, 300, 700, 700, 50, 50, false, true); createFiles(3, 3, 4, 200, 20, 10020, 30, 30, false, false); createFiles(2, 1, 5, 100, 450, 20450, 0, 0, false, false); - TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0", SEQ_DIRS.getPath()); + TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, false); @@ -552,7 +552,7 @@ public void testRecoverWithAllSourcesFileAndCompactonModFileExistAndSomeTargetFi createFiles(2, 4, 5, 300, 700, 700, 50, 50, false, true); createFiles(3, 3, 4, 200, 20, 10020, 30, 30, false, false); createFiles(2, 1, 5, 100, 450, 20450, 0, 0, false, false); - TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0", SEQ_DIRS.getPath()); + TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, false); @@ -667,7 +667,7 @@ public void testWhenTargetFileShouldBeDeletedAfterCompactionAndSomeSourceFilesLo createFiles(2, 4, 5, 300, 700, 700, 50, 50, false, true); createFiles(3, 3, 4, 200, 20, 10020, 30, 30, false, false); createFiles(2, 1, 5, 100, 450, 20450, 0, 0, false, false); - TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0", SEQ_DIRS.getPath()); + TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, false); @@ -744,7 +744,7 @@ public void testWhenTargetFileIsDeletedAfterCompactionAndSomeSourceFilesLost() t createFiles(2, 4, 5, 300, 700, 700, 50, 50, false, true); createFiles(3, 3, 4, 200, 20, 10020, 30, 30, false, false); createFiles(2, 1, 5, 100, 450, 20450, 0, 0, false, false); - TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0", SEQ_DIRS.getPath()); + TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, false); @@ -829,7 +829,7 @@ public void testWhenTargetFileIsDeletedAfterCompactionAndAllSourceFilesExisted() createFiles(2, 4, 5, 300, 700, 700, 50, 50, false, true); createFiles(3, 3, 4, 200, 20, 10020, 30, 30, false, false); createFiles(2, 1, 5, 100, 450, 20450, 0, 0, false, false); - TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0", SEQ_DIRS.getPath()); + TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, false); diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/RewriteCrossSpaceCompactionWithFastPerformerTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/RewriteCrossSpaceCompactionWithFastPerformerTest.java index ead500524e550..b452133811a84 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/RewriteCrossSpaceCompactionWithFastPerformerTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/RewriteCrossSpaceCompactionWithFastPerformerTest.java @@ -225,8 +225,7 @@ public void testAlignedCrossSpaceCompactionWithAllDataDeletedInTimeseries() thro List targetResources = CompactionFileGeneratorUtils.getCrossCompactionTargetTsFileResources(seqResources); - TsFileManager tsFileManager = - new TsFileManager(COMPACTION_TEST_SG, "0", STORAGE_GROUP_DIR.getPath()); + TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, false); CrossSpaceCompactionTask task = @@ -454,8 +453,7 @@ public void testAlignedCrossSpaceCompactionWithAllDataDeletedInOneTargetFile() t List targetResources = CompactionFileGeneratorUtils.getCrossCompactionTargetTsFileResources(seqResources); - TsFileManager tsFileManager = - new TsFileManager(COMPACTION_TEST_SG, "0", STORAGE_GROUP_DIR.getPath()); + TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, false); for (TsFileResource resource : seqResources) { diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/RewriteCrossSpaceCompactionWithReadPointPerformerTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/RewriteCrossSpaceCompactionWithReadPointPerformerTest.java index d65400144137a..bdbee56c20040 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/RewriteCrossSpaceCompactionWithReadPointPerformerTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/cross/RewriteCrossSpaceCompactionWithReadPointPerformerTest.java @@ -220,8 +220,7 @@ public void testAlignedCrossSpaceCompactionWithAllDataDeletedInTimeseries() thro List targetResources = CompactionFileGeneratorUtils.getCrossCompactionTargetTsFileResources(seqResources); - TsFileManager tsFileManager = - new TsFileManager(COMPACTION_TEST_SG, "0", STORAGE_GROUP_DIR.getPath()); + TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, false); CrossSpaceCompactionTask task = @@ -449,8 +448,7 @@ public void testAlignedCrossSpaceCompactionWithAllDataDeletedInOneTargetFile() t List targetResources = CompactionFileGeneratorUtils.getCrossCompactionTargetTsFileResources(seqResources); - TsFileManager tsFileManager = - new TsFileManager(COMPACTION_TEST_SG, "0", STORAGE_GROUP_DIR.getPath()); + TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, false); CrossSpaceCompactionTask task = diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/inner/AbstractInnerSpaceCompactionTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/inner/AbstractInnerSpaceCompactionTest.java index 7f9a25563d623..ca9d562dd3a9f 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/inner/AbstractInnerSpaceCompactionTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/inner/AbstractInnerSpaceCompactionTest.java @@ -131,7 +131,7 @@ public void setUp() throws IOException, WriteProcessException, MetadataException EnvironmentUtils.envSetUp(); prepareSeries(); prepareFiles(seqFileNum, unseqFileNum); - tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0", tempSGDir.getAbsolutePath()); + tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); } void prepareSeries() throws MetadataException { diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/inner/InnerCompactionEmptyTsFileTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/inner/InnerCompactionEmptyTsFileTest.java index 4c37bde9e8a1e..a1226b4489df4 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/inner/InnerCompactionEmptyTsFileTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/inner/InnerCompactionEmptyTsFileTest.java @@ -54,7 +54,7 @@ public void setUp() throws Exception { CompactionTaskManager.getInstance().restart(); tempSGDir = new File(TestConstant.BASE_OUTPUT_PATH.concat("tempSG")); tempSGDir.mkdirs(); - tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0", tempSGDir.getAbsolutePath()); + tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); } @After diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/inner/InnerCompactionLogTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/inner/InnerCompactionLogTest.java index a51f81cddfb00..199d39079dd5e 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/inner/InnerCompactionLogTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/inner/InnerCompactionLogTest.java @@ -52,7 +52,7 @@ public void setUp() throws Exception { super.setUp(); tempSGDir = new File(TestConstant.BASE_OUTPUT_PATH.concat("tempSG")); tempSGDir.mkdirs(); - tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0", tempSGDir.getAbsolutePath()); + tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); } @Override diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/inner/InnerCompactionMoreDataTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/inner/InnerCompactionMoreDataTest.java index 09e593766c86d..9347a9a78d238 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/inner/InnerCompactionMoreDataTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/inner/InnerCompactionMoreDataTest.java @@ -141,7 +141,7 @@ public void setUp() throws Exception { super.setUp(); tempSGDir = new File(TestConstant.BASE_OUTPUT_PATH.concat("tempSG")); tempSGDir.mkdirs(); - tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0", tempSGDir.getAbsolutePath()); + tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); } @After diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/inner/InnerCompactionSchedulerTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/inner/InnerCompactionSchedulerTest.java index 340270514713f..adeffb5817bce 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/inner/InnerCompactionSchedulerTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/inner/InnerCompactionSchedulerTest.java @@ -93,7 +93,7 @@ public void testFileSelector1() registerTimeseriesInMManger(3, 5, false); createFiles(2, 5, 5, 50, 600, 800, 50, 50, false, true); registerTimeseriesInMManger(5, 5, false); - TsFileManager tsFileManager = new TsFileManager("testSG", "0", "tmp"); + TsFileManager tsFileManager = new TsFileManager("testSG", "0"); tsFileManager.addAll(seqResources, true); CompactionScheduler.tryToSubmitInnerSpaceCompactionTask( @@ -116,7 +116,7 @@ public void testFileSelector2() createFiles(2, 3, 5, 50, 250, 250, 50, 50, false, true); seqResources.get(0).setStatusForTest(TsFileResourceStatus.COMPACTION_CANDIDATE); seqResources.get(0).setStatusForTest(TsFileResourceStatus.COMPACTING); - TsFileManager tsFileManager = new TsFileManager("testSG", "0", "tmp"); + TsFileManager tsFileManager = new TsFileManager("testSG", "0"); tsFileManager.addAll(seqResources, true); CompactionScheduler.tryToSubmitInnerSpaceCompactionTask( tsFileManager, 0L, true, new CompactionScheduleContext()); @@ -146,7 +146,7 @@ public void testFileSelectorWithUnclosedFile() createFiles(2, 2, 3, 100, 0, 0, 50, 50, false, true); createFiles(2, 3, 5, 50, 250, 250, 50, 50, false, true); seqResources.get(3).setStatusForTest(TsFileResourceStatus.UNCLOSED); - TsFileManager tsFileManager = new TsFileManager("testSG", "0", "tmp"); + TsFileManager tsFileManager = new TsFileManager("testSG", "0"); tsFileManager.addAll(seqResources, true); CompactionScheduler.tryToSubmitInnerSpaceCompactionTask( tsFileManager, 0L, true, new CompactionScheduleContext()); diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/inner/sizetiered/SizeTieredCompactionRecoverTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/inner/sizetiered/SizeTieredCompactionRecoverTest.java index cc9e713c94c6d..285733605cd8d 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/inner/sizetiered/SizeTieredCompactionRecoverTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/inner/sizetiered/SizeTieredCompactionRecoverTest.java @@ -95,8 +95,7 @@ public void tearDown() throws IOException, StorageEngineException { /** Target file uncompleted, source files and log exists */ @Test public void testCompactionRecoverWithUncompletedTargetFileAndLog() throws Exception { - TsFileManager tsFileManager = - new TsFileManager(COMPACTION_TEST_SG, "0", tempSGDir.getAbsolutePath()); + TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, false); IFullPath path = @@ -215,8 +214,7 @@ public void testCompactionRecoverWithUncompletedTargetFileAndLog() throws Except @Test public void testRecoverWithAllSourceFilesExisted() throws Exception { - TsFileManager tsFileManager = - new TsFileManager(COMPACTION_TEST_SG, "0", tempSGDir.getAbsolutePath()); + TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, false); IFullPath path = @@ -323,8 +321,7 @@ public void testRecoverWithAllSourceFilesExisted() throws Exception { @Test public void testRecoverWithAllSourceFilesExistedAndTargetFileNotExist() throws Exception { - TsFileManager tsFileManager = - new TsFileManager(COMPACTION_TEST_SG, "0", tempSGDir.getAbsolutePath()); + TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, false); IFullPath path = @@ -433,8 +430,7 @@ public void testRecoverWithAllSourceFilesExistedAndTargetFileNotExist() throws E @Test public void testRecoverWithoutAllSourceFilesExisted() throws Exception { - TsFileManager tsFileManager = - new TsFileManager(COMPACTION_TEST_SG, "0", tempSGDir.getAbsolutePath()); + TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, false); IFullPath path = @@ -758,8 +754,7 @@ public void testRecoverWithoutAllSourceFilesExistAndModFiles() throws Exception /** compaction recover merge finished, delete one offset */ @Test public void testRecoverCompleteTargetFileAndCompactionLog() throws Exception { - TsFileManager tsFileManager = - new TsFileManager(COMPACTION_TEST_SG, "0", tempSGDir.getAbsolutePath()); + TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, false); IFullPath path = @@ -856,8 +851,7 @@ public void testRecoverCompleteTargetFileAndCompactionLog() throws Exception { @Test public void testCompactionRecoverWithCompletedTargetFileAndLog() throws Exception { - TsFileManager tsFileManager = - new TsFileManager(COMPACTION_TEST_SG, "0", tempSGDir.getAbsolutePath()); + TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, false); IFullPath path = @@ -948,8 +942,7 @@ public void testCompactionRecoverWithCompletedTargetFileAndLog() throws Exceptio /** compeleted target file, and not resource files, compaction log exists */ @Test public void testCompactionRecoverWithCompletedTargetFile() throws Exception { - TsFileManager tsFileManager = - new TsFileManager(COMPACTION_TEST_SG, "0", tempSGDir.getAbsolutePath()); + TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); tsFileManager.addAll(seqResources, true); tsFileManager.addAll(unseqResources, false); IFullPath path = diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/inner/sizetiered/SizeTieredCompactionSelectorTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/inner/sizetiered/SizeTieredCompactionSelectorTest.java index ec87a22ac24c5..dab288c375fa0 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/inner/sizetiered/SizeTieredCompactionSelectorTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/inner/sizetiered/SizeTieredCompactionSelectorTest.java @@ -52,7 +52,7 @@ public void testSubmitWhenNextTimePartitionExists() { resources.add(resource); } - TsFileManager manager = new TsFileManager("root.test", "0", ""); + TsFileManager manager = new TsFileManager("root.test", "0"); manager.addAll(resources, true); for (long i = 0; i < 9; ++i) { diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/recover/SizeTieredCompactionRecoverTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/recover/SizeTieredCompactionRecoverTest.java index e4adaccc00355..e8de855c07454 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/recover/SizeTieredCompactionRecoverTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/recover/SizeTieredCompactionRecoverTest.java @@ -102,8 +102,7 @@ public class SizeTieredCompactionRecoverTest { + "0" + File.separator + "0"; - static final TsFileManager tsFileManager = - new TsFileManager(COMPACTION_TEST_SG, "0", TestConstant.BASE_OUTPUT_PATH); + static final TsFileManager tsFileManager = new TsFileManager(COMPACTION_TEST_SG, "0"); static final String[] fullPaths = new String[] { COMPACTION_TEST_SG + ".device0.sensor0", diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/tablemodel/CompactionWithAllNullRowsTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/tablemodel/CompactionWithAllNullRowsTest.java index 9ff8a401150b8..69e83e769a3ce 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/tablemodel/CompactionWithAllNullRowsTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/tablemodel/CompactionWithAllNullRowsTest.java @@ -35,9 +35,9 @@ import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.performer.impl.ReadPointCompactionPerformer; import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.task.InnerSpaceCompactionTask; import org.apache.iotdb.db.storageengine.dataregion.modification.DeletionPredicate; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.FullExactMatch; import org.apache.iotdb.db.storageengine.dataregion.modification.TableDeletionEntry; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.FullExactMatch; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; import org.apache.tsfile.exception.write.WriteProcessException; @@ -333,7 +333,7 @@ public void testCompactionWithAllValueColumnDeletion() throws IOException, Illeg new TableDeletionEntry( new DeletionPredicate( "t1", - new IDPredicate.FullExactMatch(deviceID), + new TagPredicate.FullExactMatch(deviceID), Collections.singletonList("s0")), new TimeRange(Long.MIN_VALUE, 11))); resource1 @@ -342,7 +342,7 @@ public void testCompactionWithAllValueColumnDeletion() throws IOException, Illeg new TableDeletionEntry( new DeletionPredicate( "t1", - new IDPredicate.FullExactMatch(deviceID), + new TagPredicate.FullExactMatch(deviceID), Collections.singletonList("s1")), new TimeRange(Long.MIN_VALUE, 11))); resource1 @@ -351,7 +351,7 @@ public void testCompactionWithAllValueColumnDeletion() throws IOException, Illeg new TableDeletionEntry( new DeletionPredicate( "t1", - new IDPredicate.FullExactMatch(deviceID), + new TagPredicate.FullExactMatch(deviceID), Collections.singletonList("s2")), new TimeRange(Long.MIN_VALUE, 11))); resource1 @@ -360,7 +360,7 @@ public void testCompactionWithAllValueColumnDeletion() throws IOException, Illeg new TableDeletionEntry( new DeletionPredicate( "t1", - new IDPredicate.FullExactMatch(deviceID), + new TagPredicate.FullExactMatch(deviceID), Collections.singletonList("s3")), new TimeRange(Long.MIN_VALUE, 11))); resource1.getModFileForWrite().close(); diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/utils/CompactionCheckerUtils.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/utils/CompactionCheckerUtils.java index 6c3e692212a85..8aca64683447f 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/utils/CompactionCheckerUtils.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/utils/CompactionCheckerUtils.java @@ -538,7 +538,8 @@ public static List getAllPathsOfResources(List resour Pair iDeviceIDBooleanPair = deviceIterator.nextDevice(); IDeviceID deviceID = iDeviceIDBooleanPair.getLeft(); boolean isAlign = iDeviceIDBooleanPair.getRight(); - Map schemaMap = deviceIterator.getAllSchemasOfCurrentDevice(); + Map schemaMap = + deviceIterator.getAllSchemasOfCurrentDevice(new Pair<>(Long.MIN_VALUE, null)); IMeasurementSchema timeSchema = schemaMap.remove(TsFileConstant.TIME_COLUMN_ID); List measurementSchemas = new ArrayList<>(schemaMap.values()); if (measurementSchemas.isEmpty()) { diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/modification/ModificationFileTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/modification/ModificationFileTest.java index a0a9885ecf08e..29609cc9c2740 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/modification/ModificationFileTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/modification/ModificationFileTest.java @@ -22,9 +22,9 @@ import org.apache.iotdb.commons.exception.IllegalPathException; import org.apache.iotdb.commons.path.MeasurementPath; import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.recover.CompactionRecoverManager; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.FullExactMatch; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.NOP; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.SegmentExactMatch; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.FullExactMatch; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.NOP; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.SegmentExactMatch; import org.apache.iotdb.db.utils.constant.TestConstant; import org.apache.tsfile.file.metadata.IDeviceID.Factory; diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/modification/TableDeletionEntryTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/modification/TableDeletionEntryTest.java index 5c2979a90755c..ee9a4dfa405e4 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/modification/TableDeletionEntryTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/modification/TableDeletionEntryTest.java @@ -18,10 +18,10 @@ */ package org.apache.iotdb.db.storageengine.dataregion.modification; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.And; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.FullExactMatch; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.NOP; -import org.apache.iotdb.db.storageengine.dataregion.modification.IDPredicate.SegmentExactMatch; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.And; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.FullExactMatch; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.NOP; +import org.apache.iotdb.db.storageengine.dataregion.modification.TagPredicate.SegmentExactMatch; import org.apache.tsfile.file.metadata.IDeviceID; import org.apache.tsfile.file.metadata.IDeviceID.Factory; diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/EvolvedSchemaTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/EvolvedSchemaTest.java new file mode 100644 index 0000000000000..e5ab527fa583b --- /dev/null +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/EvolvedSchemaTest.java @@ -0,0 +1,151 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution; + +import org.apache.iotdb.db.utils.constant.TestConstant; + +import org.apache.tsfile.enums.TSDataType; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.file.StandardOpenOption; +import java.util.Arrays; +import java.util.List; + +import static org.junit.Assert.assertEquals; + +public class EvolvedSchemaTest { + + @Test + public void testMerge() throws IOException { + // t1 -> t2, t2.s1 -> t2.s2, t3 -> t1 + List schemaEvolutionList = + Arrays.asList( + new TableRename("t1", "t2"), + new ColumnRename("t2", "s1", "s2", TSDataType.INT32), + new TableRename("t3", "t1")); + EvolvedSchema oldSchema = new EvolvedSchema(); + EvolvedSchema allSchema = new EvolvedSchema(); + schemaEvolutionList.forEach(schemaEvolution -> schemaEvolution.applyTo(oldSchema)); + schemaEvolutionList.forEach(schemaEvolution -> schemaEvolution.applyTo(allSchema)); + + // t1 -> t2 -> t3, t2.s1 -> t2.s2 -> t3.s1, t3 -> t1 -> t2 + schemaEvolutionList = + Arrays.asList( + new TableRename("t2", "t3"), + new ColumnRename("t3", "s2", "s1", TSDataType.INT32), + new TableRename("t1", "t2")); + EvolvedSchema newSchema = new EvolvedSchema(); + schemaEvolutionList.forEach(schemaEvolution -> schemaEvolution.applyTo(newSchema)); + schemaEvolutionList.forEach(schemaEvolution -> schemaEvolution.applyTo(allSchema)); + + EvolvedSchema mergedShema = EvolvedSchema.merge(oldSchema, newSchema); + + assertEquals(allSchema, mergedShema); + + ByteBuffer fileBuffer = mergedShema.toSchemaEvolutionFileBuffer(); + File file = new File(TestConstant.BASE_OUTPUT_PATH + File.separator + "0.sevo"); + try (FileChannel fileChannel = + FileChannel.open( + file.toPath(), + StandardOpenOption.WRITE, + StandardOpenOption.CREATE, + StandardOpenOption.TRUNCATE_EXISTING)) { + fileChannel.write(fileBuffer); + } + + SchemaEvolutionFile schemaEvolutionFile = new SchemaEvolutionFile(file.getAbsolutePath()); + EvolvedSchema schemaFromFile = schemaEvolutionFile.readAsSchema(); + assertEquals(allSchema, schemaFromFile); + } + + @Test + public void testCovert() { + // t1 -> t2, t2.s1 -> t2.s2, t3 -> t1 + List schemaEvolutionList = + Arrays.asList( + new TableRename("t1", "t2"), + new ColumnRename("t2", "s1", "s2", TSDataType.INT32), + new TableRename("t3", "t1")); + EvolvedSchema oldSchema = new EvolvedSchema(); + schemaEvolutionList.forEach(schemaEvolution -> schemaEvolution.applyTo(oldSchema)); + + List convertedSchemaEvolutions = oldSchema.toSchemaEvolutions(); + EvolvedSchema newSchema = new EvolvedSchema(); + convertedSchemaEvolutions.forEach(schemaEvolution -> schemaEvolution.applyTo(newSchema)); + + assertEquals(oldSchema, newSchema); + } + + @Test + public void testTableRename() { + EvolvedSchema schema = new EvolvedSchema(); + // t1 -> t2 + SchemaEvolution schemaEvolution = new TableRename("t1", "t2"); + schemaEvolution.applyTo(schema); + assertEquals("t1", schema.getOriginalTableName("t2")); + assertEquals("", schema.getOriginalTableName("t1")); + assertEquals("t2", schema.getFinalTableName("t1")); + assertEquals("t2", schema.getFinalTableName("t2")); + // t1 -> t2 -> t3 + schemaEvolution = new TableRename("t2", "t3"); + schemaEvolution.applyTo(schema); + assertEquals("t1", schema.getOriginalTableName("t3")); + assertEquals("", schema.getOriginalTableName("t2")); + assertEquals("t3", schema.getFinalTableName("t1")); + assertEquals("t2", schema.getFinalTableName("t2")); + // t1 -> t2 -> t3 -> t1 + schemaEvolution = new TableRename("t3", "t1"); + schemaEvolution.applyTo(schema); + assertEquals("t1", schema.getOriginalTableName("t1")); + assertEquals("", schema.getOriginalTableName("t3")); + assertEquals("t1", schema.getFinalTableName("t1")); + assertEquals("t3", schema.getFinalTableName("t3")); + } + + @Test + public void testColumnRename() { + EvolvedSchema schema = new EvolvedSchema(); + // s1 -> s2 + SchemaEvolution schemaEvolution = new ColumnRename("t1", "s1", "s2"); + schemaEvolution.applyTo(schema); + assertEquals("s1", schema.getOriginalColumnName("t1", "s2")); + assertEquals("", schema.getOriginalColumnName("t1", "s1")); + assertEquals("s2", schema.getFinalColumnName("t1", "s1")); + assertEquals("s2", schema.getFinalColumnName("t1", "s2")); + // s1 -> s2 -> s3 + schemaEvolution = new ColumnRename("t1", "s2", "s3"); + schemaEvolution.applyTo(schema); + assertEquals("s1", schema.getOriginalColumnName("t1", "s3")); + assertEquals("", schema.getOriginalColumnName("t1", "s2")); + assertEquals("s3", schema.getFinalColumnName("t1", "s1")); + assertEquals("s2", schema.getFinalColumnName("t1", "s2")); + // s1 -> s2 -> s3 -> s1 + schemaEvolution = new ColumnRename("t1", "s3", "s1"); + schemaEvolution.applyTo(schema); + assertEquals("s1", schema.getOriginalColumnName("t1", "s1")); + assertEquals("s3", schema.getOriginalColumnName("t1", "s3")); + assertEquals("s1", schema.getFinalColumnName("t1", "s1")); + assertEquals("s3", schema.getFinalColumnName("t3", "s3")); + } +} diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/SchemaEvolutionFileTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/SchemaEvolutionFileTest.java new file mode 100644 index 0000000000000..b6e2d2dbd9a83 --- /dev/null +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/evolution/SchemaEvolutionFileTest.java @@ -0,0 +1,129 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.storageengine.dataregion.tsfile.evolution; + +import org.apache.iotdb.commons.conf.IoTDBConstant; +import org.apache.iotdb.db.utils.constant.TestConstant; + +import org.apache.tsfile.enums.TSDataType; +import org.junit.After; +import org.junit.Test; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.file.Files; +import java.util.Arrays; +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +@SuppressWarnings("ResultOfMethodCallIgnored") +public class SchemaEvolutionFileTest { + + @After + public void tearDown() throws Exception { + clearSchemaEvolutionFile(); + } + + @Test + public void testSchemaEvolutionFile() throws IOException { + String filePath = TestConstant.BASE_OUTPUT_PATH + File.separator + "0.sevo"; + + SchemaEvolutionFile schemaEvolutionFile = new SchemaEvolutionFile(filePath); + + // t1 -> t2, t2.s1 -> t2.s2, t3 -> t1 + List schemaEvolutionList = + Arrays.asList( + new TableRename("t1", "t2"), + new ColumnRename("t2", "s1", "s2", TSDataType.INT32), + new TableRename("t3", "t1")); + schemaEvolutionFile.append(schemaEvolutionList); + + EvolvedSchema evolvedSchema = schemaEvolutionFile.readAsSchema(); + assertEquals("t1", evolvedSchema.getOriginalTableName("t2")); + assertEquals("s1", evolvedSchema.getOriginalColumnName("t2", "s2")); + assertEquals("t3", evolvedSchema.getOriginalTableName("t1")); + // not evolved, should remain the same + assertEquals("t4", evolvedSchema.getOriginalTableName("t4")); + assertEquals("s3", evolvedSchema.getOriginalColumnName("t2", "s3")); + + // t1 -> t2 -> t3, t2.s1 -> t2.s2 -> t3.s1, t3 -> t1 -> t2 + schemaEvolutionList = + Arrays.asList( + new TableRename("t2", "t3"), + new ColumnRename("t3", "s2", "s1", TSDataType.INT32), + new TableRename("t1", "t2")); + schemaEvolutionFile.append(schemaEvolutionList); + evolvedSchema = schemaEvolutionFile.readAsSchema(); + assertEquals("t1", evolvedSchema.getOriginalTableName("t3")); + assertEquals("s1", evolvedSchema.getOriginalColumnName("t3", "s1")); + assertEquals("t3", evolvedSchema.getOriginalTableName("t2")); + // not evolved, should remain the same + assertEquals("t4", evolvedSchema.getOriginalTableName("t4")); + assertEquals("s3", evolvedSchema.getOriginalColumnName("t2", "s3")); + } + + private void clearSchemaEvolutionFile() { + File dir = new File(TestConstant.BASE_OUTPUT_PATH); + File[] files = + dir.listFiles(f -> f.getName().endsWith(IoTDBConstant.SCHEMA_EVOLUTION_FILE_SUFFIX)); + if (files != null) { + for (File file : files) { + file.delete(); + } + } + } + + @Test + public void testRecover() throws IOException { + String filePath = TestConstant.BASE_OUTPUT_PATH + File.separator + "0.sevo"; + Files.deleteIfExists(new File(filePath).toPath()); + + SchemaEvolutionFile schemaEvolutionFile = new SchemaEvolutionFile(filePath); + List schemaEvolutionList = + Arrays.asList( + new TableRename("t1", "t2"), + new ColumnRename("t2", "s1", "s2", TSDataType.INT32), + new TableRename("t3", "t1")); + schemaEvolutionFile.append(schemaEvolutionList); + + File dir = new File(TestConstant.BASE_OUTPUT_PATH); + File[] files = + dir.listFiles(f -> f.getName().endsWith(IoTDBConstant.SCHEMA_EVOLUTION_FILE_SUFFIX)); + assertNotNull(files); + assertEquals(1, files.length); + assertEquals(33, schemaEvolutionFile.readValidLength()); + + try (FileOutputStream fileOutputStream = new FileOutputStream(files[0], true)) { + fileOutputStream.write(new byte[100]); + } + + schemaEvolutionFile = new SchemaEvolutionFile(files[0].getAbsolutePath()); + EvolvedSchema evolvedSchema = schemaEvolutionFile.readAsSchema(); + assertEquals("t1", evolvedSchema.getOriginalTableName("t2")); + assertEquals("s1", evolvedSchema.getOriginalColumnName("t2", "s2")); + assertEquals("t3", evolvedSchema.getOriginalTableName("t1")); + // not evolved, should remain the same + assertEquals("t4", evolvedSchema.getOriginalTableName("t4")); + assertEquals("s3", evolvedSchema.getOriginalColumnName("t2", "s3")); + } +} diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/authorizer/BasicAuthorizer.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/authorizer/BasicAuthorizer.java index 98f70dc0ee609..5288b1e9a03ce 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/authorizer/BasicAuthorizer.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/authorizer/BasicAuthorizer.java @@ -18,6 +18,7 @@ */ package org.apache.iotdb.commons.auth.authorizer; +import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.auth.AuthException; import org.apache.iotdb.commons.auth.entity.PrivilegeType; import org.apache.iotdb.commons.auth.entity.PrivilegeUnion; @@ -33,6 +34,7 @@ import org.apache.iotdb.commons.service.IService; import org.apache.iotdb.commons.service.ServiceType; import org.apache.iotdb.commons.utils.AuthUtils; +import org.apache.iotdb.commons.utils.StatusUtils; import org.apache.iotdb.confignode.rpc.thrift.TListUserInfo; import org.apache.iotdb.rpc.TSStatusCode; @@ -501,4 +503,11 @@ public void processLoadSnapshot(File snapshotDir) throws TException, IOException userManager.processLoadSnapshot(snapshotDir); roleManager.processLoadSnapshot(snapshotDir); } + + @Override + public TSStatus renameTable(String databaseName, String tableName, String newName) { + userManager.renameTable(databaseName, tableName, newName); + roleManager.renameTable(databaseName, tableName, newName); + return StatusUtils.OK; + } } diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/authorizer/IAuthorizer.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/authorizer/IAuthorizer.java index 445b29c0790dc..8285d9048e90c 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/authorizer/IAuthorizer.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/authorizer/IAuthorizer.java @@ -19,6 +19,7 @@ package org.apache.iotdb.commons.auth.authorizer; +import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.auth.AuthException; import org.apache.iotdb.commons.auth.entity.PrivilegeType; import org.apache.iotdb.commons.auth.entity.PrivilegeUnion; @@ -257,4 +258,6 @@ public interface IAuthorizer extends SnapshotProcessor { void createUserWithoutCheck(String username, String password) throws AuthException; void createUserWithRawPassword(String username, String password) throws AuthException; + + TSStatus renameTable(String databaseName, String tableName, String newName); } diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/entity/DatabasePrivilege.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/entity/DatabasePrivilege.java index d7faad6ed9d43..3f431aabe3420 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/entity/DatabasePrivilege.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/entity/DatabasePrivilege.java @@ -247,4 +247,14 @@ public void deserialize(ByteBuffer buffer) { this.tablePrivilegeMap.put(tablePrivilege.getTableName(), tablePrivilege); } } + + public boolean renameTable(String oldTableName, String newTableName) { + TablePrivilege tablePrivilege = tablePrivilegeMap.remove(oldTableName); + if (tablePrivilege != null) { + tablePrivilege.setTableName(newTableName); + tablePrivilegeMap.put(newTableName, tablePrivilege); + return true; + } + return false; + } } diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/entity/Role.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/entity/Role.java index e5296dca60cad..c80937cddd42b 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/entity/Role.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/entity/Role.java @@ -711,4 +711,12 @@ public Set priSetToString(Set privs, Set g } return priSet; } + + public boolean renameTable(String databaseName, String oldTableName, String newTableName) { + DatabasePrivilege databasePrivilege = objectPrivilegeMap.get(databaseName); + if (databasePrivilege != null) { + return databasePrivilege.renameTable(oldTableName, newTableName); + } + return false; + } } diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/entity/TablePrivilege.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/entity/TablePrivilege.java index 5f3488a7596f3..3433def78b330 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/entity/TablePrivilege.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/entity/TablePrivilege.java @@ -166,4 +166,8 @@ public void deserialize(ByteBuffer byteBuffer) { SerializeUtils.deserializePrivilegeTypeSet(this.privileges, byteBuffer); SerializeUtils.deserializePrivilegeTypeSet(this.grantOption, byteBuffer); } + + public void setTableName(String newTableName) { + this.tableName = newTableName; + } } diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/role/BasicRoleManager.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/role/BasicRoleManager.java index f6804d3055c95..58823c60cd3ec 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/role/BasicRoleManager.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/auth/role/BasicRoleManager.java @@ -39,6 +39,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Map.Entry; /** * This class reads roles from local files through LocalFileRoleAccessor and manages them in a hash @@ -251,4 +252,26 @@ public List listAllEntitiesInfo() { rtlist.sort(Comparator.comparingLong(TListUserInfo::getUserId)); return rtlist; } + + public void renameTable(String databaseName, String oldTableName, String newTableName) { + for (Entry entry : entityMap.entrySet()) { + String entityName = entry.getKey(); + Role entity = entry.getValue(); + lock.writeLock(entityName); + try { + if (entity.renameTable(databaseName, oldTableName, newTableName)) { + accessor.saveEntity(entity); + } + } catch (IOException e) { + LOGGER.error( + "Rename table {}.{} failed for entity {}, may need to reset privileges manually", + databaseName, + oldTableName, + entityName, + e); + } finally { + lock.writeUnlock(entityName); + } + } + } } diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/IoTDBConstant.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/IoTDBConstant.java index a94f472b606dd..95ad19be790d3 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/IoTDBConstant.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/IoTDBConstant.java @@ -31,6 +31,8 @@ public class IoTDBConstant { + public static final String SCHEMA_EVOLUTION_FILE_SUFFIX = ".sevo"; + private IoTDBConstant() {} static { diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/DataPartition.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/DataPartition.java index 100c40eddcc23..99507d0ec0dfa 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/DataPartition.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/DataPartition.java @@ -22,9 +22,12 @@ import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.FullDeviceIdKey; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.SeriesPartitionKey; import org.apache.iotdb.commons.utils.PathUtils; import org.apache.iotdb.commons.utils.TimePartitionUtils; +import org.apache.tsfile.annotations.TreeModel; import org.apache.tsfile.file.metadata.IDeviceID; import org.apache.tsfile.read.filter.basic.Filter; import org.slf4j.Logger; @@ -95,18 +98,21 @@ public void setDataPartitionMap( this.dataPartitionMap = dataPartitionMap; } + @TreeModel public List> getTimePartitionRange( IDeviceID deviceID, Filter timeFilter) { - String storageGroup = getDatabaseNameByDevice(deviceID); - TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(deviceID); - if (!dataPartitionMap.containsKey(storageGroup) - || !dataPartitionMap.get(storageGroup).containsKey(seriesPartitionSlot)) { + String databaseName = getDatabaseNameByDevice(deviceID); + // since this method retrieves database from deviceId, it must only be used by the tree model + TSeriesPartitionSlot seriesPartitionSlot = + calculateDeviceGroupId(new FullDeviceIdKey(deviceID)); + if (!dataPartitionMap.containsKey(databaseName) + || !dataPartitionMap.get(databaseName).containsKey(seriesPartitionSlot)) { return Collections.emptyList(); } List> res = new ArrayList<>(); Map> map = - dataPartitionMap.get(storageGroup).get(seriesPartitionSlot); + dataPartitionMap.get(databaseName).get(seriesPartitionSlot); List timePartitionSlotList = map.keySet().stream() .filter(key -> TimePartitionUtils.satisfyPartitionStartTime(timeFilter, key.startTime)) @@ -138,10 +144,13 @@ public List> getTimePartitionRange( return res; } + @TreeModel public List getDataRegionReplicaSetWithTimeFilter( final IDeviceID deviceId, final Filter timeFilter) { + // since this method retrieves database from deviceId, it must only be used by the tree model final String storageGroup = getDatabaseNameByDevice(deviceId); - final TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(deviceId); + final TSeriesPartitionSlot seriesPartitionSlot = + calculateDeviceGroupId(new FullDeviceIdKey(deviceId)); Map> regionReplicaSetMap = dataPartitionMap .getOrDefault(storageGroup, Collections.emptyMap()) @@ -166,8 +175,8 @@ public List getDataRegionReplicaSetWithTimeFilter( *

The device id shall be [table, seg1, ....] */ public List getDataRegionReplicaSetWithTimeFilter( - final String database, final IDeviceID deviceId, final Filter timeFilter) { - final TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(deviceId); + final String database, final SeriesPartitionKey seriesPartitionKey, final Filter timeFilter) { + final TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(seriesPartitionKey); if (!dataPartitionMap.containsKey(database) || !dataPartitionMap.get(database).containsKey(seriesPartitionSlot)) { return Collections.singletonList(NOT_ASSIGNED); @@ -181,15 +190,18 @@ public List getDataRegionReplicaSetWithTimeFilter( .collect(toList()); } + @TreeModel public List getDataRegionReplicaSet( final IDeviceID deviceID, final TTimePartitionSlot tTimePartitionSlot) { + // since this method retrieves database from deviceId, it must only be used by the tree model final String storageGroup = getDatabaseNameByDevice(deviceID); final Map>> dbMap = dataPartitionMap.get(storageGroup); if (dbMap == null) { return Collections.singletonList(NOT_ASSIGNED); } - final TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(deviceID); + final TSeriesPartitionSlot seriesPartitionSlot = + calculateDeviceGroupId(new FullDeviceIdKey(deviceID)); final Map> seriesSlotMap = dbMap.get(seriesPartitionSlot); if (seriesSlotMap == null) { @@ -206,16 +218,17 @@ public List getDataRegionReplicaSet( } public List getDataRegionReplicaSetForWriting( - final IDeviceID deviceID, + final SeriesPartitionKey key, final List timePartitionSlotList, String databaseName) { if (databaseName == null) { - databaseName = getDatabaseNameByDevice(deviceID); + // must be the tree model here + databaseName = getDatabaseNameByDevice(((FullDeviceIdKey) key).getDeviceID()); } // A list of data region replica sets will store data in a same time partition. // We will insert data to the last set in the list. // TODO return the latest dataRegionReplicaSet for each time partition - final TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(deviceID); + final TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(key); // IMPORTANT TODO: (xingtanzjr) need to handle the situation for write operation that there are // more than 1 Regions for one timeSlot final List dataRegionReplicaSets = new ArrayList<>(); @@ -228,8 +241,7 @@ public List getDataRegionReplicaSetForWriting( if (targetRegionList == null || targetRegionList.isEmpty()) { throw new RuntimeException( String.format( - "targetRegionList is empty. device: %s, timeSlot: %s", - deviceID, timePartitionSlot)); + "targetRegionList is empty. device: %s, timeSlot: %s", key, timePartitionSlot)); } else { dataRegionReplicaSets.add(targetRegionList.get(targetRegionList.size() - 1)); } @@ -238,13 +250,16 @@ public List getDataRegionReplicaSetForWriting( } public TRegionReplicaSet getDataRegionReplicaSetForWriting( - final IDeviceID deviceID, final TTimePartitionSlot timePartitionSlot, String databaseName) { + final SeriesPartitionKey seriesPartitionKey, + final TTimePartitionSlot timePartitionSlot, + String databaseName) { // A list of data region replica sets will store data in a same time partition. // We will insert data to the last set in the list. // TODO return the latest dataRegionReplicaSet for each time partition - final TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(deviceID); + final TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(seriesPartitionKey); if (databaseName == null) { - databaseName = getDatabaseNameByDevice(deviceID); + // must be the tree model here + databaseName = getDatabaseNameByDevice(((FullDeviceIdKey) seriesPartitionKey).getDeviceID()); } final Map>> databasePartitionMap = dataPartitionMap.get(databaseName); @@ -261,10 +276,11 @@ public TRegionReplicaSet getDataRegionReplicaSetForWriting( return regions.get(0); } + @TreeModel public TRegionReplicaSet getDataRegionReplicaSetForWriting( IDeviceID deviceID, TTimePartitionSlot timePartitionSlot) { return getDataRegionReplicaSetForWriting( - deviceID, timePartitionSlot, getDatabaseNameByDevice(deviceID)); + new FullDeviceIdKey(deviceID), timePartitionSlot, getDatabaseNameByDevice(deviceID)); } public String getDatabaseNameByDevice(IDeviceID deviceID) { @@ -302,15 +318,14 @@ public List getDistributionInfo() { public void upsertDataPartition(DataPartition targetDataPartition) { requireNonNull(this.dataPartitionMap, "dataPartitionMap is null"); - for (Map.Entry< - String, Map>>> + for (Entry>>> targetDbEntry : targetDataPartition.getDataPartitionMap().entrySet()) { String database = targetDbEntry.getKey(); if (dataPartitionMap.containsKey(database)) { Map>> sourceSeriesPartitionMap = dataPartitionMap.get(database); - for (Map.Entry>> + for (Entry>> targetSeriesSlotEntry : targetDbEntry.getValue().entrySet()) { TSeriesPartitionSlot targetSeriesSlot = targetSeriesSlotEntry.getKey(); @@ -319,7 +334,7 @@ public void upsertDataPartition(DataPartition targetDataPartition) { sourceSeriesPartitionMap.get(targetSeriesSlot); Map> targetTimePartionMap = targetSeriesSlotEntry.getValue(); - for (Map.Entry> targetEntry : + for (Entry> targetEntry : targetTimePartionMap.entrySet()) { if (!sourceTimePartitionMap.containsKey(targetEntry.getKey())) { sourceTimePartitionMap.put(targetEntry.getKey(), targetEntry.getValue()); diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/Partition.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/Partition.java index b94cf3d005d72..5d32e33db610e 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/Partition.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/Partition.java @@ -21,8 +21,7 @@ import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor; - -import org.apache.tsfile.file.metadata.IDeviceID; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.SeriesPartitionKey; import java.util.List; @@ -42,8 +41,8 @@ protected Partition(String seriesSlotExecutorName, int seriesPartitionSlotNum) { seriesSlotExecutorName, seriesPartitionSlotNum); } - public TSeriesPartitionSlot calculateDeviceGroupId(IDeviceID deviceID) { - return executor.getSeriesPartitionSlot(deviceID); + public TSeriesPartitionSlot calculateDeviceGroupId(SeriesPartitionKey key) { + return executor.getSeriesPartitionSlot(key); } public abstract List getDistributionInfo(); diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/SchemaPartition.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/SchemaPartition.java index 96abc7498653d..ff56dee46fed5 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/SchemaPartition.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/SchemaPartition.java @@ -22,10 +22,13 @@ import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; import org.apache.iotdb.commons.exception.IoTDBException; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.FullDeviceIdKey; +import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor.SeriesPartitionKey; import org.apache.iotdb.commons.utils.PathUtils; import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.rpc.TSStatusCode; +import org.apache.tsfile.annotations.TreeModel; import org.apache.tsfile.file.metadata.IDeviceID; import java.util.ArrayList; @@ -75,18 +78,20 @@ public void setSchemaPartitionMap( * *

The device id shall be [table, seg1, ....] */ - public TRegionReplicaSet getSchemaRegionReplicaSet(String database, IDeviceID deviceID) { - TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(deviceID); + public TRegionReplicaSet getSchemaRegionReplicaSet(String database, SeriesPartitionKey key) { + TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(key); return schemaPartitionMap.get(database).get(seriesPartitionSlot); } + @TreeModel // [root, db, ....] public TRegionReplicaSet getSchemaRegionReplicaSet(final IDeviceID deviceID) { // A list of data region replica sets will store data in a same time partition. // We will insert data to the last set in the list. // TODO return the latest dataRegionReplicaSet for each time partition final String storageGroup = getStorageGroupByDevice(deviceID); - final TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(deviceID); + final TSeriesPartitionSlot seriesPartitionSlot = + calculateDeviceGroupId(new FullDeviceIdKey(deviceID)); if (schemaPartitionMap.get(storageGroup) == null) { throw new RuntimeException( new IoTDBException("Path does not exist. ", TSStatusCode.PATH_NOT_EXIST.getStatusCode())); diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/SeriesPartitionExecutor.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/SeriesPartitionExecutor.java index d2666446e8e14..8140cb259b194 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/SeriesPartitionExecutor.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/SeriesPartitionExecutor.java @@ -45,7 +45,12 @@ public SeriesPartitionExecutor(int seriesPartitionSlotNum) { @TestOnly public abstract TSeriesPartitionSlot getSeriesPartitionSlot(String device); - public abstract TSeriesPartitionSlot getSeriesPartitionSlot(IDeviceID deviceID); + @TestOnly + public TSeriesPartitionSlot getSeriesPartitionSlot(IDeviceID deviceID) { + return getSeriesPartitionSlot(new FullDeviceIdKey(deviceID)); + } + + public abstract TSeriesPartitionSlot getSeriesPartitionSlot(SeriesPartitionKey deviceID); public static SeriesPartitionExecutor getSeriesPartitionExecutor( String executorName, int seriesPartitionSlotNum) { @@ -73,4 +78,50 @@ private static synchronized void initStaticSeriesPartitionExecutor( } } } + + public interface SeriesPartitionKey { + int segmentNum(); + + Object segment(int index); + } + + public static class FullDeviceIdKey implements SeriesPartitionKey { + private final IDeviceID deviceID; + + public FullDeviceIdKey(IDeviceID deviceID) { + this.deviceID = deviceID; + } + + @Override + public int segmentNum() { + return deviceID.segmentNum(); + } + + @Override + public Object segment(int index) { + return deviceID.segment(index); + } + + public IDeviceID getDeviceID() { + return deviceID; + } + } + + public static class NoTableNameDeviceIdKey implements SeriesPartitionKey { + private final IDeviceID deviceID; + + public NoTableNameDeviceIdKey(IDeviceID deviceID) { + this.deviceID = deviceID; + } + + @Override + public int segmentNum() { + return deviceID.segmentNum() - 1; + } + + @Override + public Object segment(int index) { + return deviceID.segment(index + 1); + } + } } diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/APHashExecutor.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/APHashExecutor.java index 9390111d24720..6b6c7976a3531 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/APHashExecutor.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/APHashExecutor.java @@ -22,8 +22,6 @@ import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor; -import org.apache.tsfile.file.metadata.IDeviceID; - import static org.apache.iotdb.commons.conf.IoTDBConstant.PATH_SEPARATOR; public class APHashExecutor extends SeriesPartitionExecutor { @@ -49,13 +47,13 @@ public TSeriesPartitionSlot getSeriesPartitionSlot(String device) { } @Override - public TSeriesPartitionSlot getSeriesPartitionSlot(IDeviceID deviceID) { + public TSeriesPartitionSlot getSeriesPartitionSlot(SeriesPartitionKey key) { int hash = 0; - int segmentNum = deviceID.segmentNum(); + int segmentNum = key.segmentNum(); int index = 0; for (int segmentID = 0; segmentID < segmentNum; segmentID++) { - Object segment = deviceID.segment(segmentID); + Object segment = key.segment(segmentID); if (segment instanceof String) { String segmentStr = (String) segment; for (int i = 0; i < segmentStr.length(); i++) { diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/BKDRHashExecutor.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/BKDRHashExecutor.java index c039e8ddd8335..9502f8007df6f 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/BKDRHashExecutor.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/BKDRHashExecutor.java @@ -22,8 +22,6 @@ import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor; -import org.apache.tsfile.file.metadata.IDeviceID; - import static org.apache.iotdb.commons.conf.IoTDBConstant.PATH_SEPARATOR; public class BKDRHashExecutor extends SeriesPartitionExecutor { @@ -47,12 +45,12 @@ public TSeriesPartitionSlot getSeriesPartitionSlot(String device) { } @Override - public TSeriesPartitionSlot getSeriesPartitionSlot(IDeviceID deviceID) { + public TSeriesPartitionSlot getSeriesPartitionSlot(SeriesPartitionKey key) { int hash = 0; - int segmentNum = deviceID.segmentNum(); + int segmentNum = key.segmentNum(); for (int segmentID = 0; segmentID < segmentNum; segmentID++) { - Object segment = deviceID.segment(segmentID); + Object segment = key.segment(segmentID); if (segment instanceof String) { String segmentStr = (String) segment; for (int i = 0; i < segmentStr.length(); i++) { diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/JSHashExecutor.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/JSHashExecutor.java index 1e8c203158378..734da1ee39735 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/JSHashExecutor.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/JSHashExecutor.java @@ -21,8 +21,6 @@ import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor; -import org.apache.tsfile.file.metadata.IDeviceID; - import static org.apache.iotdb.commons.conf.IoTDBConstant.PATH_SEPARATOR; public class JSHashExecutor extends SeriesPartitionExecutor { @@ -46,12 +44,12 @@ public TSeriesPartitionSlot getSeriesPartitionSlot(String device) { } @Override - public TSeriesPartitionSlot getSeriesPartitionSlot(IDeviceID deviceID) { + public TSeriesPartitionSlot getSeriesPartitionSlot(SeriesPartitionKey key) { int hash = BASE; - int segmentNum = deviceID.segmentNum(); + int segmentNum = key.segmentNum(); for (int segmentID = 0; segmentID < segmentNum; segmentID++) { - Object segment = deviceID.segment(segmentID); + Object segment = key.segment(segmentID); if (segment instanceof String) { String segmentStr = (String) segment; for (int i = 0; i < segmentStr.length(); i++) { diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/SDBMHashExecutor.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/SDBMHashExecutor.java index e2143c00c0cec..ec68812cac609 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/SDBMHashExecutor.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/executor/hash/SDBMHashExecutor.java @@ -21,8 +21,6 @@ import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor; -import org.apache.tsfile.file.metadata.IDeviceID; - import static org.apache.iotdb.commons.conf.IoTDBConstant.PATH_SEPARATOR; public class SDBMHashExecutor extends SeriesPartitionExecutor { @@ -44,12 +42,12 @@ public TSeriesPartitionSlot getSeriesPartitionSlot(String device) { } @Override - public TSeriesPartitionSlot getSeriesPartitionSlot(IDeviceID deviceID) { + public TSeriesPartitionSlot getSeriesPartitionSlot(SeriesPartitionKey key) { int hash = 0; - int segmentNum = deviceID.segmentNum(); + int segmentNum = key.segmentNum(); for (int segmentID = 0; segmentID < segmentNum; segmentID++) { - Object segment = deviceID.segment(segmentID); + Object segment = key.segment(segmentID); if (segment instanceof String) { String segmentStr = (String) segment; for (int i = 0; i < segmentStr.length(); i++) { diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/receiver/IoTDBFileReceiver.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/receiver/IoTDBFileReceiver.java index ff8c8dbd293d8..97745bbf7ed2f 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/receiver/IoTDBFileReceiver.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/receiver/IoTDBFileReceiver.java @@ -23,6 +23,7 @@ import org.apache.iotdb.commons.audit.IAuditEntity; import org.apache.iotdb.commons.audit.UserEntity; import org.apache.iotdb.commons.conf.CommonDescriptor; +import org.apache.iotdb.commons.conf.IoTDBConstant; import org.apache.iotdb.commons.exception.IllegalPathException; import org.apache.iotdb.commons.pipe.config.PipeConfig; import org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant; @@ -406,41 +407,16 @@ protected final TPipeTransferResp handleTransferFilePiece( final PipeTransferFilePieceReq req, final boolean isRequestThroughAirGap, final boolean isSingleFile) { - try { - updateWritingFileIfNeeded(req.getFileName(), isSingleFile); - - // If the request is through air gap, the sender will resend the file piece from the beginning - // of the file. So the receiver should reset the offset of the writing file to the beginning - // of the file. - if (isRequestThroughAirGap && req.getStartWritingOffset() < writingFileWriter.length()) { - writingFileWriter.setLength(req.getStartWritingOffset()); - } - - if (!isWritingFileOffsetCorrect(req.getStartWritingOffset())) { - if (!writingFile.getName().endsWith(TsFileConstant.TSFILE_SUFFIX)) { - // If the file is a tsFile, then the content will not be changed for a specific filename. - // However, for other files (mod, snapshot, etc.) the content varies for the same name in - // different times, then we must rewrite the file to apply the newest version. - writingFileWriter.setLength(0); - } + String fileName = req.getFileName(); + String suffix = fileName.substring(fileName.lastIndexOf('.')); - final TSStatus status = - RpcUtils.getStatus( - TSStatusCode.PIPE_TRANSFER_FILE_OFFSET_RESET, - String.format( - "Request sender to reset file reader's offset from %s to %s.", - req.getStartWritingOffset(), writingFileWriter.length())); - PipeLogger.log( - LOGGER::warn, - "Receiver id = %s: File offset reset requested by receiver, response status = %s.", - receiverId.get(), - status); - return PipeTransferFilePieceResp.toTPipeTransferResp(status, writingFileWriter.length()); + try { + if (suffix.equals(IoTDBConstant.SCHEMA_EVOLUTION_FILE_SUFFIX)) { + handleTransferSevoFile(req); + return PipeTransferFilePieceResp.toTPipeTransferResp( + RpcUtils.SUCCESS_STATUS, writingFileWriter.length()); } - - writingFileWriter.write(req.getFilePiece()); - return PipeTransferFilePieceResp.toTPipeTransferResp( - RpcUtils.SUCCESS_STATUS, writingFileWriter.length()); + return handleTsFileOrMods(req, isRequestThroughAirGap, isSingleFile); } catch (final Exception e) { PipeLogger.log( LOGGER::warn, @@ -461,6 +437,58 @@ protected final TPipeTransferResp handleTransferFilePiece( } } + private void handleTransferSevoFile(PipeTransferFilePieceReq req) throws IOException { + File file = new File(receiverFileDirWithIdSuffix.get(), req.getFileName()); + try (RandomAccessFile randomAccessFile = new RandomAccessFile(file, "rw")) { + randomAccessFile.write(req.getFilePiece()); + } + LOGGER.info( + "Receiver id = {}: written schema evolution file {} .", + receiverId.get(), + req.getFileName()); + } + + private PipeTransferFilePieceResp handleTsFileOrMods( + final PipeTransferFilePieceReq req, + final boolean isRequestThroughAirGap, + final boolean isSingleFile) + throws IOException { + updateWritingFileIfNeeded(req.getFileName(), isSingleFile); + + // If the request is through air gap, the sender will resend the file piece from the beginning + // of the file. So the receiver should reset the offset of the writing file to the beginning + // of the file. + if (isRequestThroughAirGap && req.getStartWritingOffset() < writingFileWriter.length()) { + writingFileWriter.setLength(req.getStartWritingOffset()); + } + + if (!isWritingFileOffsetCorrect(req.getStartWritingOffset())) { + if (!writingFile.getName().endsWith(TsFileConstant.TSFILE_SUFFIX)) { + // If the file is a tsFile, then the content will not be changed for a specific filename. + // However, for other files (mod, snapshot, etc.) the content varies for the same name in + // different times, then we must rewrite the file to apply the newest version. + writingFileWriter.setLength(0); + } + + final TSStatus status = + RpcUtils.getStatus( + TSStatusCode.PIPE_TRANSFER_FILE_OFFSET_RESET, + String.format( + "Request sender to reset file reader's offset from %s to %s.", + req.getStartWritingOffset(), writingFileWriter.length())); + PipeLogger.log( + LOGGER::warn, + "Receiver id = %s: File offset reset requested by receiver, response status = %s.", + receiverId.get(), + status); + return PipeTransferFilePieceResp.toTPipeTransferResp(status, writingFileWriter.length()); + } + + writingFileWriter.write(req.getFilePiece()); + return PipeTransferFilePieceResp.toTPipeTransferResp( + RpcUtils.SUCCESS_STATUS, writingFileWriter.length()); + } + protected final void updateWritingFileIfNeeded(final String fileName, final boolean isSingleFile) throws IOException { if (isFileExistedAndNameCorrect(fileName)) { @@ -506,7 +534,7 @@ protected final void updateWritingFileIfNeeded(final String fileName, final bool } private boolean isFileExistedAndNameCorrect(final String fileName) { - return writingFile != null && writingFile.exists() && writingFile.getName().equals(fileName); + return writingFile != null && writingFile.exists() && (writingFile.getName().equals(fileName)); } private void closeCurrentWritingFileWriter(final boolean fsyncBeforeClose) { diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/sink/payload/thrift/request/PipeTransferFilePieceReq.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/sink/payload/thrift/request/PipeTransferFilePieceReq.java index 550fc16002078..7b87d0a10437f 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/sink/payload/thrift/request/PipeTransferFilePieceReq.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/sink/payload/thrift/request/PipeTransferFilePieceReq.java @@ -73,6 +73,29 @@ protected final PipeTransferFilePieceReq convertToTPipeTransferReq( return this; } + protected final PipeTransferFilePieceReq convertToTPipeTransferReq( + String snapshotName, long startWritingOffset, ByteBuffer snapshotPiece) throws IOException { + + this.fileName = snapshotName; + this.startWritingOffset = startWritingOffset; + this.filePiece = snapshotPiece.array(); + + this.version = IoTDBSinkRequestVersion.VERSION_1.getVersion(); + this.type = getPlanType().getType(); + try (final PublicBAOS byteArrayOutputStream = new PublicBAOS(); + final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) { + ReadWriteIOUtils.write(snapshotName, outputStream); + ReadWriteIOUtils.write(startWritingOffset, outputStream); + ReadWriteIOUtils.write(snapshotPiece.remaining(), outputStream); + ReadWriteIOUtils.writeWithoutSize( + snapshotPiece, snapshotPiece.position(), snapshotPiece.remaining(), outputStream); + snapshotPiece.position(snapshotPiece.position() + snapshotPiece.remaining()); + body = ByteBuffer.wrap(byteArrayOutputStream.getBuf(), 0, byteArrayOutputStream.size()); + } + + return this; + } + protected final PipeTransferFilePieceReq translateFromTPipeTransferReq( TPipeTransferReq transferReq) { diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/schema/table/TsTable.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/schema/table/TsTable.java index 8f484f4e231d5..fed861ee2ca6c 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/schema/table/TsTable.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/schema/table/TsTable.java @@ -41,8 +41,9 @@ import java.io.OutputStream; import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.Collections; +import java.util.Arrays; import java.util.HashMap; +import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -63,7 +64,10 @@ public class TsTable { public static final String TIME_COLUMN_NAME = "time"; public static final String COMMENT_KEY = "__comment"; public static final String TTL_PROPERTY = "ttl"; - public static final Set TABLE_ALLOWED_PROPERTIES = Collections.singleton(TTL_PROPERTY); + public static final String ALLOW_ALTER_NAME_PROPERTY = "allow_alter_name"; + public static final boolean ALLOW_ALTER_NAME_DEFAULT = true; + public static final Set TABLE_ALLOWED_PROPERTIES = + new HashSet<>(Arrays.asList(TTL_PROPERTY, ALLOW_ALTER_NAME_PROPERTY)); private static final String OBJECT_STRING_ERROR = "When there are object fields, the %s %s shall not be '.', '..' or contain './', '.\\'."; protected String tableName; @@ -433,4 +437,12 @@ public String toString() { + props + '}'; } + + public boolean canAlterName() { + if (getProps() == null) { + return false; + } + return Boolean.parseBoolean( + getProps().getOrDefault(TsTable.ALLOW_ALTER_NAME_PROPERTY, "false")); + } } diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/utils/FileUtils.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/utils/FileUtils.java index 139d08ef99b0d..e2475c3707c9d 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/utils/FileUtils.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/utils/FileUtils.java @@ -48,6 +48,8 @@ import java.util.List; import java.util.Stack; +import static org.apache.tsfile.external.commons.io.FileUtils.moveDirectory; + public class FileUtils { private static final Logger LOGGER = LoggerFactory.getLogger(FileUtils.class); @@ -428,12 +430,12 @@ public static void moveFileWithMD5Check(final File sourceFile, final File target } private static void moveFile(File sourceFile, File targetDir) throws IOException { - String sourceFileName = sourceFile.getName(); - final File exitsFile = new File(targetDir, sourceFileName); + final String sourceFileName = sourceFile.getName(); + final File existsFile = new File(targetDir, sourceFileName); // First check file sizes long sourceFileSize = sourceFile.length(); - long existsFileSize = exitsFile.length(); + long existsFileSize = existsFile.length(); if (sourceFileSize != existsFileSize) { File file = renameWithSize(sourceFile, sourceFileSize, targetDir); @@ -447,7 +449,7 @@ private static void moveFile(File sourceFile, File targetDir) throws IOException String sourceFileMD5; String existsFileMD5; try (final FileInputStream is1 = new FileInputStream(sourceFile); - final FileInputStream is2 = new FileInputStream(exitsFile); ) { + final FileInputStream is2 = new FileInputStream(existsFile)) { sourceFileMD5 = DigestUtils.md5Hex(is1); existsFileMD5 = DigestUtils.md5Hex(is2); } @@ -464,6 +466,126 @@ private static void moveFile(File sourceFile, File targetDir) throws IOException } } + public static void moveDirWithMD5Check(final File sourceDir, final File targetParentDir) + throws IOException { + final String sourceDirName = sourceDir.getName(); + final File targetDir = new File(targetParentDir, sourceDirName); + if (targetDir.exists()) { + moveDir(sourceDir, targetParentDir); + } else { + // Ensure parent exists and move directory + try { + Files.createDirectories(targetParentDir.toPath()); + } catch (IOException e) { + LOGGER.warn( + "failed to create target parent directory: {}", targetParentDir.getAbsolutePath()); + throw e; + } + moveDirectory(sourceDir, targetDir); + } + } + + private static void moveDir(File sourceDir, File targetParentDir) throws IOException { + final String sourceDirName = sourceDir.getName(); + final File existsDir = new File(targetParentDir, sourceDirName); + + // First check directory sizes + long sourceDirSize = getDirSize(sourceDir.getAbsolutePath()); + long existsDirSize = getDirSize(existsDir.getAbsolutePath()); + + if (sourceDirSize != existsDirSize) { + File file = renameDirWithSize(sourceDirName, sourceDirSize, targetParentDir); + if (!file.exists()) { + moveDirRename(sourceDir, file); + } + return; + } + + // If sizes are equal, check deterministic MD5 of directory contents + String sourceDirMD5 = computeDirMD5(sourceDir); + String existsDirMD5 = computeDirMD5(existsDir); + + if (sourceDirMD5.equals(existsDirMD5)) { + // identical directory contents, delete source dir + recursivelyDeleteFolder(sourceDir.getAbsolutePath()); + LOGGER.info( + "Deleted the directory {} because it already exists in the target directory: {}", + sourceDir.getName(), + targetParentDir.getAbsolutePath()); + } else { + File file = renameDirWithMD5(sourceDirName, sourceDirMD5, targetParentDir); + moveDirRename(sourceDir, file); + } + } + + private static File renameDirWithMD5( + final String sourceDirName, final String sourceDirMD5, final File targetParentDir) { + final String targetDirName = sourceDirName + "-" + sourceDirMD5.substring(0, 16); + return new File(targetParentDir, targetDirName); + } + + private static File renameDirWithSize( + final String sourceDirName, final long sourceDirSize, final File targetParentDir) { + final String newDirName = + String.format("%s_%s_%s", sourceDirName, sourceDirSize, System.currentTimeMillis()); + return new File(targetParentDir, newDirName); + } + + private static void moveDirRename(File sourceDir, File targetDir) throws IOException { + moveDirectory(sourceDir, targetDir); + + LOGGER.info( + RENAME_FILE_MESSAGE, + sourceDir.getName(), + targetDir.getName(), + targetDir.getParentFile().getAbsolutePath()); + } + + private static String computeDirMD5(final File dir) throws IOException { + // Collect all files (not directories) with relative paths + final List files = new ArrayList<>(); + final int basePathLen = dir.getAbsolutePath().length() + 1; + Stack stack = new Stack<>(); + if (dir.exists()) { + stack.push(dir); + } + while (!stack.isEmpty()) { + File file = stack.pop(); + if (file.isDirectory()) { + File[] subs = file.listFiles(); + if (subs != null) { + for (File f : subs) { + stack.push(f); + } + } + } else { + files.add(file); + } + } + + // Sort by relative path to make deterministic + files.sort( + (f1, f2) -> { + String r1 = f1.getAbsolutePath().substring(basePathLen); + String r2 = f2.getAbsolutePath().substring(basePathLen); + return r1.compareTo(r2); + }); + + // Build a combined hash from each file's relative path, size and md5 + StringBuilder sb = new StringBuilder(); + for (File f : files) { + String relPath = f.getAbsolutePath().substring(basePathLen); + long size = f.length(); + String md5; + try (final FileInputStream is = new FileInputStream(f)) { + md5 = DigestUtils.md5Hex(is); + } + sb.append(relPath).append(':').append(size).append(':').append(md5).append(';'); + } + + return DigestUtils.md5Hex(sb.toString().getBytes()); + } + public static void copyFileWithMD5Check(final File sourceFile, final File targetDir) throws IOException { final String sourceFileName = sourceFile.getName(); @@ -486,6 +608,69 @@ public static void copyFileWithMD5Check(final File sourceFile, final File target } } + /** + * Copy an entire directory to targetParentDir with MD5 check semantics similar to + * copyFileWithMD5Check. If the target directory exists, compare sizes and deterministic MD5; if + * identical, do nothing; otherwise copy to a renamed directory (by size or MD5) to avoid + * overwriting. + */ + public static void copyDirWithMD5Check(final File sourceDir, final File targetParentDir) + throws IOException { + final String sourceDirName = sourceDir.getName(); + final File targetDir = new File(targetParentDir, sourceDirName); + if (targetDir.exists()) { + // First check directory sizes + long sourceDirSize = getDirSize(sourceDir.getAbsolutePath()); + long targetDirSize = getDirSize(targetDir.getAbsolutePath()); + + if (sourceDirSize != targetDirSize) { + File file = renameDirWithSize(sourceDirName, sourceDirSize, targetParentDir); + if (!file.exists()) { + copyDirRename(sourceDir, file); + } + return; + } + + // If sizes are equal, check deterministic MD5 + String sourceDirMD5 = computeDirMD5(sourceDir); + String targetDirMD5 = computeDirMD5(targetDir); + + if (sourceDirMD5.equals(targetDirMD5)) { + // identical directory contents, nothing to do + return; + } + + File file = renameDirWithMD5(sourceDirName, sourceDirMD5, targetParentDir); + if (!file.exists()) { + copyDirRename(sourceDir, file); + } + } else { + try { + Files.createDirectories(targetParentDir.toPath()); + } catch (IOException e) { + LOGGER.warn( + "failed to create target parent directory: {}", targetParentDir.getAbsolutePath()); + throw e; + } + + if (!copyDir(sourceDir, targetDir)) { + throw new IOException("copy directory failed: " + sourceDir.getAbsolutePath()); + } + } + } + + private static void copyDirRename(File sourceDir, File targetDir) throws IOException { + if (!copyDir(sourceDir, targetDir)) { + throw new IOException("copy directory failed: " + sourceDir.getAbsolutePath()); + } + + LOGGER.info( + COPY_FILE_MESSAGE, + sourceDir.getName(), + targetDir, + targetDir.getParentFile().getAbsolutePath()); + } + private static File renameWithMD5( final File sourceFile, final String sourceFileMD5, final File targetDir) throws IOException { final String sourceFileBaseName = FilenameUtils.getBaseName(sourceFile.getName()); diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/utils/IOUtils.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/utils/IOUtils.java index 8b63d29bd786e..50b9dba3f4b7e 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/utils/IOUtils.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/utils/IOUtils.java @@ -25,12 +25,17 @@ import org.apache.iotdb.commons.path.PartialPath; import com.google.common.base.Supplier; +import org.apache.tsfile.utils.ReadWriteForEncodingUtils; +import org.apache.tsfile.utils.ReadWriteIOUtils; import java.io.DataInputStream; import java.io.File; import java.io.IOException; +import java.io.InputStream; import java.io.OutputStream; import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; import java.util.Map; import java.util.Optional; import java.util.function.Function; @@ -289,4 +294,39 @@ public static Optional retryNoException( } return Optional.empty(); } + + public static void write(List strings, OutputStream outputStream) throws IOException { + if (strings == null) { + ReadWriteForEncodingUtils.writeVarInt(-1, outputStream); + return; + } + ReadWriteForEncodingUtils.writeVarInt(strings.size(), outputStream); + for (String string : strings) { + ReadWriteIOUtils.writeVar(string, outputStream); + } + } + + public static List readStringList(InputStream inputStream) throws IOException { + int size = ReadWriteForEncodingUtils.readVarInt(inputStream); + if (size == -1) { + return null; + } + List strings = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + strings.add(ReadWriteIOUtils.readVarIntString(inputStream)); + } + return strings; + } + + public static List readStringList(ByteBuffer buffer) { + int size = ReadWriteForEncodingUtils.readVarInt(buffer); + if (size == -1) { + return null; + } + List strings = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + strings.add(ReadWriteIOUtils.readVarIntString(buffer)); + } + return strings; + } } diff --git a/iotdb-core/relational-grammar/src/main/antlr4/org/apache/iotdb/db/relational/grammar/sql/RelationalSql.g4 b/iotdb-core/relational-grammar/src/main/antlr4/org/apache/iotdb/db/relational/grammar/sql/RelationalSql.g4 index 08a8b4c2e82a1..d26586e4eae2b 100644 --- a/iotdb-core/relational-grammar/src/main/antlr4/org/apache/iotdb/db/relational/grammar/sql/RelationalSql.g4 +++ b/iotdb-core/relational-grammar/src/main/antlr4/org/apache/iotdb/db/relational/grammar/sql/RelationalSql.g4 @@ -258,7 +258,7 @@ descTableStatement alterTableStatement : ALTER TABLE (IF EXISTS)? from=qualifiedName RENAME TO to=identifier #renameTable | ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition #addColumn - | ALTER TABLE (IF EXISTS)? tableName=qualifiedName RENAME COLUMN (IF EXISTS)? from=identifier TO to=identifier #renameColumn + | ALTER TABLE (IF EXISTS)? tableName=qualifiedName RENAME COLUMN (IF EXISTS)? identifier TO identifier (identifier TO identifier)* #renameColumn | ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier #dropColumn // set TTL can use this | ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments #setTableProperties diff --git a/iotdb-protocol/thrift-datanode/src/main/thrift/datanode.thrift b/iotdb-protocol/thrift-datanode/src/main/thrift/datanode.thrift index cca7110f28d40..6f8b65dd5a4ef 100644 --- a/iotdb-protocol/thrift-datanode/src/main/thrift/datanode.thrift +++ b/iotdb-protocol/thrift-datanode/src/main/thrift/datanode.thrift @@ -459,6 +459,18 @@ struct TDeleteDataForDeleteSchemaReq { 3: optional bool isGeneratedByPipe } +struct TDataRegionEvolveSchemaReq { + 1: required list dataRegionIdList + 2: required binary schemaEvolutions + 3: optional bool isGeneratedByPipe +} + +struct TSchemaRegionEvolveSchemaReq { + 1: required list schemaRegionIdList + 2: required binary schemaEvolutions + 3: optional bool isGeneratedByPipe +} + struct TDeleteTimeSeriesReq { 1: required list schemaRegionIdList 2: required binary pathPatternTree @@ -1091,6 +1103,10 @@ service IDataNodeRPCService { */ common.TSStatus deleteDataForDeleteSchema(TDeleteDataForDeleteSchemaReq req) + common.TSStatus evolveSchemaInDataRegion(TDataRegionEvolveSchemaReq req) + + common.TSStatus evolveSchemaInSchemaRegion(TSchemaRegionEvolveSchemaReq req) + /** * Delete matched timeseries and remove according schema black list in target schemRegion */