diff --git a/.gitmodules b/.gitmodules index 847abf7d931..f6990fed41f 100644 --- a/.gitmodules +++ b/.gitmodules @@ -103,3 +103,6 @@ [submodule "contrib/fastops"] path = contrib/fastops url = https://github.com/ClickHouse-Extras/fastops +[submodule "contrib/orc"] + path = contrib/orc + url = https://github.com/apache/orc diff --git a/CHANGELOG.md b/CHANGELOG.md index 607f650deeb..7d6714b6474 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,13 @@ +## ClickHouse release 19.13.3.26, 2019-08-22 + +### Bug Fix +* Fix `ALTER TABLE ... UPDATE` query for tables with `enable_mixed_granularity_parts=1`. [#6543](https://github.com/yandex/ClickHouse/pull/6543) ([alesapin](https://github.com/alesapin)) +* Fix NPE when using IN clause with a subquery with a tuple. [#6125](https://github.com/yandex/ClickHouse/issues/6125) [#6550](https://github.com/yandex/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) +* Fixed an issue that if a stale replica becomes alive, it may still have data parts that were removed by DROP PARTITION. [#6522](https://github.com/yandex/ClickHouse/issues/6522) [#6523](https://github.com/yandex/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) +* Fixed issue with parsing CSV [#6426](https://github.com/yandex/ClickHouse/issues/6426) [#6559](https://github.com/yandex/ClickHouse/pull/6559) ([tavplubix](https://github.com/tavplubix)) +* Fixed data race in system.parts table and ALTER query. This fixes [#6245](https://github.com/yandex/ClickHouse/issues/6245). [#6513](https://github.com/yandex/ClickHouse/pull/6513) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fixed wrong code in mutations that may lead to memory corruption. Fixed segfault with read of address `0x14c0` that may happed due to concurrent `DROP TABLE` and `SELECT` from `system.parts` or `system.parts_columns`. Fixed race condition in preparation of mutation queries. Fixed deadlock caused by `OPTIMIZE` of Replicated tables and concurrent modification operations like ALTERs. [#6514](https://github.com/yandex/ClickHouse/pull/6514) ([alexey-milovidov](https://github.com/alexey-milovidov)) + ## ClickHouse release 19.13.2.19, 2019-08-14 ### New Feature @@ -31,6 +41,16 @@ * Fix build with external `libcxx` [#6010](https://github.com/yandex/ClickHouse/pull/6010) ([Ivan](https://github.com/abyss7)) * Fix shared build with `rdkafka` library [#6101](https://github.com/yandex/ClickHouse/pull/6101) ([Ivan](https://github.com/abyss7)) +## ClickHouse release 19.11.8.46, 2019-08-22 + +### Bug Fix +* Fix `ALTER TABLE ... UPDATE` query for tables with `enable_mixed_granularity_parts=1`. [#6543](https://github.com/yandex/ClickHouse/pull/6543) ([alesapin](https://github.com/alesapin)) +* Fix NPE when using IN clause with a subquery with a tuple. [#6125](https://github.com/yandex/ClickHouse/issues/6125) [#6550](https://github.com/yandex/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) +* Fixed an issue that if a stale replica becomes alive, it may still have data parts that were removed by DROP PARTITION. [#6522](https://github.com/yandex/ClickHouse/issues/6522) [#6523](https://github.com/yandex/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) +* Fixed issue with parsing CSV [#6426](https://github.com/yandex/ClickHouse/issues/6426) [#6559](https://github.com/yandex/ClickHouse/pull/6559) ([tavplubix](https://github.com/tavplubix)) +* Fixed data race in system.parts table and ALTER query. This fixes [#6245](https://github.com/yandex/ClickHouse/issues/6245). [#6513](https://github.com/yandex/ClickHouse/pull/6513) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fixed wrong code in mutations that may lead to memory corruption. Fixed segfault with read of address `0x14c0` that may happed due to concurrent `DROP TABLE` and `SELECT` from `system.parts` or `system.parts_columns`. Fixed race condition in preparation of mutation queries. Fixed deadlock caused by `OPTIMIZE` of Replicated tables and concurrent modification operations like ALTERs. [#6514](https://github.com/yandex/ClickHouse/pull/6514) ([alexey-milovidov](https://github.com/alexey-milovidov)) + ## ClickHouse release 19.11.7.40, 2019-08-14 ### Bug fix diff --git a/CMakeLists.txt b/CMakeLists.txt index 8466fa5d33d..6ac4d67f6ae 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -481,6 +481,7 @@ include (cmake/find_mimalloc.cmake) include (cmake/find_simdjson.cmake) include (cmake/find_rapidjson.cmake) include (cmake/find_fastops.cmake) +include (cmake/find_orc.cmake) find_contrib_lib(cityhash) find_contrib_lib(farmhash) diff --git a/cmake/find_orc.cmake b/cmake/find_orc.cmake new file mode 100644 index 00000000000..3676bec1b6b --- /dev/null +++ b/cmake/find_orc.cmake @@ -0,0 +1,8 @@ +##TODO replace hardcode to find procedure + +set(USE_ORC 0) +set(USE_INTERNAL_ORC_LIBRARY ON) + +if (ARROW_LIBRARY) + set(USE_ORC 1) +endif() \ No newline at end of file diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index c478311d77a..e652c393141 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -10,6 +10,18 @@ endif () set_property(DIRECTORY PROPERTY EXCLUDE_FROM_ALL 1) +if (USE_INTERNAL_ORC_LIBRARY) + set(BUILD_JAVA OFF) + set (ANALYZE_JAVA OFF) + set (BUILD_CPP_TESTS OFF) + set (BUILD_TOOLS OFF) + option(BUILD_JAVA OFF) + option (ANALYZE_JAVA OFF) + option (BUILD_CPP_TESTS OFF) + option (BUILD_TOOLS OFF) + set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/contrib/orc/cmake_modules") + add_subdirectory(orc) +endif() if (USE_INTERNAL_UNWIND_LIBRARY) add_subdirectory (libunwind-cmake) diff --git a/contrib/arrow-cmake/CMakeLists.txt b/contrib/arrow-cmake/CMakeLists.txt index a7b6628ea4e..bc229deeced 100644 --- a/contrib/arrow-cmake/CMakeLists.txt +++ b/contrib/arrow-cmake/CMakeLists.txt @@ -47,6 +47,71 @@ target_include_directories(${THRIFT_LIBRARY} SYSTEM PUBLIC ${ClickHouse_SOURCE_D target_link_libraries(${THRIFT_LIBRARY} PRIVATE Threads::Threads) +# === orc + +set(ORC_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/orc/c++) +set(ORC_INCLUDE_DIR ${ORC_SOURCE_DIR}/include) +set(ORC_SOURCE_SRC_DIR ${ORC_SOURCE_DIR}/src) +set(ORC_SOURCE_WRAP_DIR ${ORC_SOURCE_DIR}/wrap) + +set(ORC_BUILD_SRC_DIR ${CMAKE_CURRENT_BINARY_DIR}/../orc/c++/src) +set(ORC_BUILD_INCLUDE_DIR ${CMAKE_CURRENT_BINARY_DIR}/../orc/c++/include) + +set(GOOGLE_PROTOBUF_DIR ${ClickHouse_SOURCE_DIR}/contrib/protobuf/src/) +set(ORC_ADDITION_SOURCE_DIR ${CMAKE_CURRENT_BINARY_DIR}) +set(ARROW_SRC_DIR ${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src) + +set(PROTOBUF_EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/../protobuf/cmake/protoc) +set(PROTO_DIR ${ORC_SOURCE_DIR}/../proto) + + +add_custom_command(OUTPUT orc_proto.pb.h orc_proto.pb.cc + COMMAND ${PROTOBUF_EXECUTABLE} + -I ${PROTO_DIR} + --cpp_out="${CMAKE_CURRENT_BINARY_DIR}" + "${PROTO_DIR}/orc_proto.proto") + +include_directories(SYSTEM ${ORC_INCLUDE_DIR}) +include_directories(SYSTEM ${ORC_SOURCE_SRC_DIR}) +include_directories(SYSTEM ${ORC_SOURCE_WRAP_DIR}) +include_directories(SYSTEM ${GOOGLE_PROTOBUF_DIR}) +include_directories(SYSTEM ${ORC_BUILD_SRC_DIR}) +include_directories(SYSTEM ${ORC_BUILD_INCLUDE_DIR}) +include_directories(SYSTEM ${ORC_ADDITION_SOURCE_DIR}) +include_directories(SYSTEM ${ARROW_SRC_DIR}) + + +set(ORC_SRCS + ${ARROW_SRC_DIR}/arrow/adapters/orc/adapter.cc + ${ORC_SOURCE_SRC_DIR}/Exceptions.cc + ${ORC_SOURCE_SRC_DIR}/OrcFile.cc + ${ORC_SOURCE_SRC_DIR}/Reader.cc + ${ORC_SOURCE_SRC_DIR}/ByteRLE.cc + ${ORC_SOURCE_SRC_DIR}/ColumnPrinter.cc + ${ORC_SOURCE_SRC_DIR}/ColumnReader.cc + ${ORC_SOURCE_SRC_DIR}/ColumnWriter.cc + ${ORC_SOURCE_SRC_DIR}/Common.cc + ${ORC_SOURCE_SRC_DIR}/Compression.cc + ${ORC_SOURCE_SRC_DIR}/Exceptions.cc + ${ORC_SOURCE_SRC_DIR}/Int128.cc + ${ORC_SOURCE_SRC_DIR}/LzoDecompressor.cc + ${ORC_SOURCE_SRC_DIR}/MemoryPool.cc + ${ORC_SOURCE_SRC_DIR}/OrcFile.cc + ${ORC_SOURCE_SRC_DIR}/Reader.cc + ${ORC_SOURCE_SRC_DIR}/RLE.cc + ${ORC_SOURCE_SRC_DIR}/RLEv1.cc + ${ORC_SOURCE_SRC_DIR}/RLEv2.cc + ${ORC_SOURCE_SRC_DIR}/Statistics.cc + ${ORC_SOURCE_SRC_DIR}/StripeStream.cc + ${ORC_SOURCE_SRC_DIR}/Timezone.cc + ${ORC_SOURCE_SRC_DIR}/TypeImpl.cc + ${ORC_SOURCE_SRC_DIR}/Vector.cc + ${ORC_SOURCE_SRC_DIR}/Writer.cc + ${ORC_SOURCE_SRC_DIR}/io/InputStream.cc + ${ORC_SOURCE_SRC_DIR}/io/OutputStream.cc + ${ORC_ADDITION_SOURCE_DIR}/orc_proto.pb.cc + ) + # === arrow @@ -103,6 +168,7 @@ set(ARROW_SRCS ${LIBRARY_DIR}/util/thread-pool.cc ${LIBRARY_DIR}/util/trie.cc ${LIBRARY_DIR}/util/utf8.cc + ${ORC_SRCS} ) set(ARROW_SRCS ${ARROW_SRCS} @@ -151,8 +217,9 @@ endif() add_library(${ARROW_LIBRARY} ${ARROW_SRCS}) +add_dependencies(${ARROW_LIBRARY} protoc) target_include_directories(${ARROW_LIBRARY} SYSTEM PUBLIC ${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/cpp/src ${Boost_INCLUDE_DIRS}) -target_link_libraries(${ARROW_LIBRARY} PRIVATE ${DOUBLE_CONVERSION_LIBRARIES} Threads::Threads) +target_link_libraries(${ARROW_LIBRARY} PRIVATE ${DOUBLE_CONVERSION_LIBRARIES} ${PROTOBUF_LIBRARIES} Threads::Threads) if (ARROW_WITH_LZ4) target_link_libraries(${ARROW_LIBRARY} PRIVATE ${LZ4_LIBRARY}) endif() diff --git a/contrib/orc b/contrib/orc new file mode 160000 index 00000000000..5981208e394 --- /dev/null +++ b/contrib/orc @@ -0,0 +1 @@ +Subproject commit 5981208e39447df84827f6a961d1da76bacb6078 diff --git a/dbms/CMakeLists.txt b/dbms/CMakeLists.txt index b589c398238..f011cc21103 100644 --- a/dbms/CMakeLists.txt +++ b/dbms/CMakeLists.txt @@ -114,6 +114,7 @@ add_headers_and_sources(dbms src/Columns) add_headers_and_sources(dbms src/Storages) add_headers_and_sources(dbms src/Storages/Distributed) add_headers_and_sources(dbms src/Storages/MergeTree) +add_headers_and_sources(dbms src/Storages/LiveView) add_headers_and_sources(dbms src/Client) add_headers_and_sources(dbms src/Formats) add_headers_and_sources(dbms src/Processors) diff --git a/dbms/programs/CMakeLists.txt b/dbms/programs/CMakeLists.txt index 6626d90e5f5..03eba470949 100644 --- a/dbms/programs/CMakeLists.txt +++ b/dbms/programs/CMakeLists.txt @@ -98,7 +98,7 @@ endif() if (CLICKHOUSE_SPLIT_BINARY) set (CLICKHOUSE_ALL_TARGETS clickhouse-server clickhouse-client clickhouse-local clickhouse-benchmark clickhouse-performance-test - clickhouse-extract-from-config clickhouse-compressor clickhouse-format clickhouse-copier) + clickhouse-extract-from-config clickhouse-compressor clickhouse-format clickhouse-obfuscator clickhouse-copier) if (ENABLE_CLICKHOUSE_ODBC_BRIDGE) list (APPEND CLICKHOUSE_ALL_TARGETS clickhouse-odbc-bridge) diff --git a/dbms/programs/obfuscator/Obfuscator.cpp b/dbms/programs/obfuscator/Obfuscator.cpp index a96c10072dc..5149566465c 100644 --- a/dbms/programs/obfuscator/Obfuscator.cpp +++ b/dbms/programs/obfuscator/Obfuscator.cpp @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -263,11 +264,11 @@ public: ColumnPtr generate(const IColumn & column) override { - const auto & src_data = static_cast &>(column).getData(); + const auto & src_data = assert_cast &>(column).getData(); size_t size = src_data.size(); auto res_column = ColumnVector::create(size); - auto & res_data = static_cast &>(*res_column).getData(); + auto & res_data = assert_cast &>(*res_column).getData(); for (size_t i = 0; i < size; ++i) { @@ -355,7 +356,7 @@ public: ColumnPtr generate(const IColumn & column) override { - const ColumnFixedString & column_fixed_string = static_cast(column); + const ColumnFixedString & column_fixed_string = assert_cast(column); const size_t string_size = column_fixed_string.getN(); const auto & src_data = column_fixed_string.getChars(); @@ -392,11 +393,11 @@ public: ColumnPtr generate(const IColumn & column) override { - const auto & src_data = static_cast &>(column).getData(); + const auto & src_data = assert_cast &>(column).getData(); size_t size = src_data.size(); auto res_column = ColumnVector::create(size); - auto & res_data = static_cast &>(*res_column).getData(); + auto & res_data = assert_cast &>(*res_column).getData(); for (size_t i = 0; i < size; ++i) { @@ -749,7 +750,7 @@ public: void train(const IColumn & column) override { - const ColumnString & column_string = static_cast(column); + const ColumnString & column_string = assert_cast(column); size_t size = column_string.size(); for (size_t i = 0; i < size; ++i) @@ -766,7 +767,7 @@ public: ColumnPtr generate(const IColumn & column) override { - const ColumnString & column_string = static_cast(column); + const ColumnString & column_string = assert_cast(column); size_t size = column_string.size(); auto res_column = ColumnString::create(); @@ -801,7 +802,7 @@ public: void train(const IColumn & column) override { - const ColumnArray & column_array = static_cast(column); + const ColumnArray & column_array = assert_cast(column); const IColumn & nested_column = column_array.getData(); nested_model->train(nested_column); @@ -814,7 +815,7 @@ public: ColumnPtr generate(const IColumn & column) override { - const ColumnArray & column_array = static_cast(column); + const ColumnArray & column_array = assert_cast(column); const IColumn & nested_column = column_array.getData(); ColumnPtr new_nested_column = nested_model->generate(nested_column); @@ -834,7 +835,7 @@ public: void train(const IColumn & column) override { - const ColumnNullable & column_nullable = static_cast(column); + const ColumnNullable & column_nullable = assert_cast(column); const IColumn & nested_column = column_nullable.getNestedColumn(); nested_model->train(nested_column); @@ -847,7 +848,7 @@ public: ColumnPtr generate(const IColumn & column) override { - const ColumnNullable & column_nullable = static_cast(column); + const ColumnNullable & column_nullable = assert_cast(column); const IColumn & nested_column = column_nullable.getNestedColumn(); ColumnPtr new_nested_column = nested_model->generate(nested_column); diff --git a/dbms/programs/odbc-bridge/ODBCBlockInputStream.cpp b/dbms/programs/odbc-bridge/ODBCBlockInputStream.cpp index 70aaba3f137..8aa93c43c2b 100644 --- a/dbms/programs/odbc-bridge/ODBCBlockInputStream.cpp +++ b/dbms/programs/odbc-bridge/ODBCBlockInputStream.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -44,46 +45,46 @@ namespace switch (type) { case ValueType::vtUInt8: - static_cast(column).insertValue(value.convert()); + assert_cast(column).insertValue(value.convert()); break; case ValueType::vtUInt16: - static_cast(column).insertValue(value.convert()); + assert_cast(column).insertValue(value.convert()); break; case ValueType::vtUInt32: - static_cast(column).insertValue(value.convert()); + assert_cast(column).insertValue(value.convert()); break; case ValueType::vtUInt64: - static_cast(column).insertValue(value.convert()); + assert_cast(column).insertValue(value.convert()); break; case ValueType::vtInt8: - static_cast(column).insertValue(value.convert()); + assert_cast(column).insertValue(value.convert()); break; case ValueType::vtInt16: - static_cast(column).insertValue(value.convert()); + assert_cast(column).insertValue(value.convert()); break; case ValueType::vtInt32: - static_cast(column).insertValue(value.convert()); + assert_cast(column).insertValue(value.convert()); break; case ValueType::vtInt64: - static_cast(column).insertValue(value.convert()); + assert_cast(column).insertValue(value.convert()); break; case ValueType::vtFloat32: - static_cast(column).insertValue(value.convert()); + assert_cast(column).insertValue(value.convert()); break; case ValueType::vtFloat64: - static_cast(column).insertValue(value.convert()); + assert_cast(column).insertValue(value.convert()); break; case ValueType::vtString: - static_cast(column).insert(value.convert()); + assert_cast(column).insert(value.convert()); break; case ValueType::vtDate: - static_cast(column).insertValue(UInt16{LocalDate{value.convert()}.getDayNum()}); + assert_cast(column).insertValue(UInt16{LocalDate{value.convert()}.getDayNum()}); break; case ValueType::vtDateTime: - static_cast(column).insertValue(time_t{LocalDateTime{value.convert()}}); + assert_cast(column).insertValue(time_t{LocalDateTime{value.convert()}}); break; case ValueType::vtUUID: - static_cast(column).insert(parse(value.convert())); + assert_cast(column).insert(parse(value.convert())); break; } } @@ -114,7 +115,7 @@ Block ODBCBlockInputStream::readImpl() { if (description.types[idx].second) { - ColumnNullable & column_nullable = static_cast(*columns[idx]); + ColumnNullable & column_nullable = assert_cast(*columns[idx]); insertValue(column_nullable.getNestedColumn(), description.types[idx].first, value); column_nullable.getNullMapData().emplace_back(0); } diff --git a/dbms/programs/server/config.xml b/dbms/programs/server/config.xml index 44b3c25c19d..814b7dded3c 100644 --- a/dbms/programs/server/config.xml +++ b/dbms/programs/server/config.xml @@ -217,6 +217,7 @@ See https://clickhouse.yandex/docs/en/table_engines/replication/ --> + diff --git a/docs/zh/database_engines/mysql.md b/docs/zh/database_engines/mysql.md index 51ac4126e2d..38dfcb5ef64 120000 --- a/docs/zh/database_engines/mysql.md +++ b/docs/zh/database_engines/mysql.md @@ -1 +1,124 @@ -../../en/database_engines/mysql.md \ No newline at end of file +# MySQL + +MySQL引擎用于将远程的MySQL服务器中的表映射到ClickHouse中,并允许您对表进行`INSERT`和`SELECT`查询,以方便您在ClickHouse与MySQL之间进行数据交换。 + +`MySQL`数据库引擎会将对其的查询转换为MySQL语法并发送到MySQL服务器中,因此您可以执行诸如`SHOW TABLES`或`SHOW CREATE TABLE`之类的操作。 + +但您无法对其执行以下操作: + +- `ATTACH`/`DETACH` +- `DROP` +- `RENAME` +- `CREATE TABLE` +- `ALTER` + + +## CREATE DATABASE + +``` sql +CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster] +ENGINE = MySQL('host:port', 'database', 'user', 'password') +``` + +**MySQL数据库引擎参数** + +- `host:port` — 链接的MySQL地址。 +- `database` — 链接的MySQL数据库。 +- `user` — 链接的MySQL用户。 +- `password` — 链接的MySQL用户密码。 + + +## 支持的类型对应 + +MySQL | ClickHouse +------|------------ +UNSIGNED TINYINT | [UInt8](../data_types/int_uint.md) +TINYINT | [Int8](../data_types/int_uint.md) +UNSIGNED SMALLINT | [UInt16](../data_types/int_uint.md) +SMALLINT | [Int16](../data_types/int_uint.md) +UNSIGNED INT, UNSIGNED MEDIUMINT | [UInt32](../data_types/int_uint.md) +INT, MEDIUMINT | [Int32](../data_types/int_uint.md) +UNSIGNED BIGINT | [UInt64](../data_types/int_uint.md) +BIGINT | [Int64](../data_types/int_uint.md) +FLOAT | [Float32](../data_types/float.md) +DOUBLE | [Float64](../data_types/float.md) +DATE | [Date](../data_types/date.md) +DATETIME, TIMESTAMP | [DateTime](../data_types/datetime.md) +BINARY | [FixedString](../data_types/fixedstring.md) + +其他的MySQL数据类型将全部都转换为[String](../data_types/string.md)。 + +同时以上的所有类型都支持[Nullable](../data_types/nullable.md)。 + + +## 使用示例 + +在MySQL中创建表: + +``` +mysql> USE test; +Database changed + +mysql> CREATE TABLE `mysql_table` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `float` FLOAT NOT NULL, + -> PRIMARY KEY (`int_id`)); +Query OK, 0 rows affected (0,09 sec) + +mysql> insert into mysql_table (`int_id`, `float`) VALUES (1,2); +Query OK, 1 row affected (0,00 sec) + +mysql> select * from mysql_table; ++--------+-------+ +| int_id | value | ++--------+-------+ +| 1 | 2 | ++--------+-------+ +1 row in set (0,00 sec) +``` + +在ClickHouse中创建MySQL类型的数据库,同时与MySQL服务器交换数据: + +```sql +CREATE DATABASE mysql_db ENGINE = MySQL('localhost:3306', 'test', 'my_user', 'user_password') +``` +```sql +SHOW DATABASES +``` +```text +┌─name─────┐ +│ default │ +│ mysql_db │ +│ system │ +└──────────┘ +``` +```sql +SHOW TABLES FROM mysql_db +``` +```text +┌─name─────────┐ +│ mysql_table │ +└──────────────┘ +``` +```sql +SELECT * FROM mysql_db.mysql_table +``` +```text +┌─int_id─┬─value─┐ +│ 1 │ 2 │ +└────────┴───────┘ +``` +```sql +INSERT INTO mysql_db.mysql_table VALUES (3,4) +``` +```sql +SELECT * FROM mysql_db.mysql_table +``` +```text +┌─int_id─┬─value─┐ +│ 1 │ 2 │ +│ 3 │ 4 │ +└────────┴───────┘ +``` + +[来源文章](https://clickhouse.yandex/docs/en/database_engines/mysql/) diff --git a/docs/zh/operations/table_engines/mergetree.md b/docs/zh/operations/table_engines/mergetree.md index 5ddf837708a..5e330164c5a 100644 --- a/docs/zh/operations/table_engines/mergetree.md +++ b/docs/zh/operations/table_engines/mergetree.md @@ -48,7 +48,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] **子句** -- `ENGINE` - 引擎名和参数。 `ENGINE = MergeTree()`. `MergeTree` 引擎没有参数。 +- `ENGINE` - 引擎名和参数。 `ENGINE = MergeTree()`。 `MergeTree` 引擎不需要其他参数。 - `PARTITION BY` — [分区键](custom_partitioning_key.md) 。