diff --git a/base/common/arithmeticOverflow.h b/base/common/arithmeticOverflow.h index 175e75a62f4..0957342bbb4 100644 --- a/base/common/arithmeticOverflow.h +++ b/base/common/arithmeticOverflow.h @@ -145,6 +145,19 @@ namespace common return __builtin_mul_overflow(x, y, &res); } + template + inline bool mulOverflow(T x, U y, R & res) + { + // not built in type, wide integer + if constexpr (is_big_int_v || is_big_int_v || is_big_int_v) + { + res = mulIgnoreOverflow(x, y); + return false; + } + else + return __builtin_mul_overflow(x, y, &res); + } + template <> inline bool mulOverflow(int x, int y, int & res) { diff --git a/base/mysqlxx/Query.cpp b/base/mysqlxx/Query.cpp index c0d5c20fdfd..e7d1e0c1d69 100644 --- a/base/mysqlxx/Query.cpp +++ b/base/mysqlxx/Query.cpp @@ -77,7 +77,9 @@ void Query::executeImpl() case CR_SERVER_LOST: throw ConnectionLost(errorMessage(mysql_driver), err_no); default: - throw BadQuery(errorMessage(mysql_driver), err_no); + /// Add query to the exception message, since it may differs from the user input query. + /// (also you can use this and create query with an error to see what query ClickHouse created) + throw BadQuery(errorMessage(mysql_driver) + " (query: " + query_string + ")", err_no); } } } diff --git a/cmake/find/ccache.cmake b/cmake/find/ccache.cmake index 986c9cb5fe2..43c2de0c921 100644 --- a/cmake/find/ccache.cmake +++ b/cmake/find/ccache.cmake @@ -51,8 +51,8 @@ if (CCACHE_FOUND AND NOT COMPILER_MATCHES_CCACHE) message(STATUS "ccache is 4.2+ no quirks for SOURCE_DATE_EPOCH required") elseif (CCACHE_VERSION VERSION_GREATER_EQUAL "4.0") message(STATUS "Ignore SOURCE_DATE_EPOCH for ccache") - set_property (GLOBAL PROPERTY RULE_LAUNCH_COMPILE "env -u SOURCE_DATE_EPOCH ${CCACHE_FOUND}") - set_property (GLOBAL PROPERTY RULE_LAUNCH_LINK "env -u SOURCE_DATE_EPOCH ${CCACHE_FOUND}") + set_property (GLOBAL PROPERTY RULE_LAUNCH_COMPILE "env -u SOURCE_DATE_EPOCH") + set_property (GLOBAL PROPERTY RULE_LAUNCH_LINK "env -u SOURCE_DATE_EPOCH") endif() else () message(${RECONFIGURE_MESSAGE_LEVEL} "Not using ${CCACHE_FOUND} ${CCACHE_VERSION} bug: https://bugzilla.samba.org/show_bug.cgi?id=8118") diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index f353f0b1d43..c8b3d690e17 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -23,7 +23,6 @@ The supported formats are: | [CustomSeparated](#format-customseparated) | ✔ | ✔ | | [Values](#data-format-values) | ✔ | ✔ | | [Vertical](#vertical) | ✗ | ✔ | -| [VerticalRaw](#verticalraw) | ✗ | ✔ | | [JSON](#json) | ✗ | ✔ | | [JSONAsString](#jsonasstring) | ✔ | ✗ | | [JSONStrings](#jsonstrings) | ✗ | ✔ | @@ -944,10 +943,6 @@ test: string with 'quotes' and with some special This format is only appropriate for outputting a query result, but not for parsing (retrieving data to insert in a table). -## VerticalRaw {#verticalraw} - -Similar to [Vertical](#vertical), but with escaping disabled. This format is only suitable for outputting query results, not for parsing (receiving data and inserting it in the table). - ## XML {#xml} XML format is suitable only for output, not for parsing. Example: @@ -1579,4 +1574,4 @@ Writing to a file ".msgpk": $ clickhouse-client --query="CREATE TABLE msgpack (array Array(UInt8)) ENGINE = Memory;" $ clickhouse-client --query="INSERT INTO msgpack VALUES ([0, 1, 2, 3, 42, 253, 254, 255]), ([255, 254, 253, 42, 3, 2, 1, 0])"; $ clickhouse-client --query="SELECT * FROM msgpack FORMAT MsgPack" > tmp_msgpack.msgpk; -``` \ No newline at end of file +``` diff --git a/docs/en/interfaces/third-party/gui.md b/docs/en/interfaces/third-party/gui.md index ad39adbd653..b6d475eaad0 100644 --- a/docs/en/interfaces/third-party/gui.md +++ b/docs/en/interfaces/third-party/gui.md @@ -113,6 +113,22 @@ Features: [MindsDB](https://mindsdb.com/) is an open-source AI layer for databases including ClickHouse that allows you to effortlessly develop, train and deploy state-of-the-art machine learning models. MindsDB Studio(GUI) allows you to train new models from database, interpret predictions made by the model, identify potential data biases, and evaluate and visualize model accuracy using the Explainable AI function to adapt and tune your Machine Learning models faster. +### DBM {#dbm} + +[DBM](https://dbm.incubator.edurt.io/) DBM is a visual management tool for ClickHouse! + +Features: + +- Support query history (pagination, clear all, etc.) +- Support selected sql clauses query +- Support terminating query +- Support table management (metadata, delete, preview) +- Support database management (delete, create) +- Support custom query +- Support multiple data sources management(connection test, monitoring) +- Support monitor (processor, connection, query) +- Support migrate data + ## Commercial {#commercial} ### DataGrip {#datagrip} @@ -190,20 +206,4 @@ SeekTable is [free](https://www.seektable.com/help/cloud-pricing) for personal/i [Chadmin](https://github.com/bun4uk/chadmin) is a simple UI where you can visualize your currently running queries on your ClickHouse cluster and info about them and kill them if you want. -### DBM {#dbm} - -[DBM](https://dbm.incubator.edurt.io/) DBM is a visual management tool for ClickHouse! - -Features: - -- Support query history (pagination, clear all, etc.) -- Support selected sql clauses query -- Support terminating query -- Support table management (metadata, delete, preview) -- Support database management (delete, create) -- Support custom query -- Support multiple data sources management(connection test, monitoring) -- Support monitor (processor, connection, query) -- Support migrate data - [Original article](https://clickhouse.tech/docs/en/interfaces/third-party/gui/) diff --git a/docs/en/sql-reference/functions/geo/h3.md b/docs/en/sql-reference/functions/geo/h3.md index ad7b4657af7..b938ab8ca6e 100644 --- a/docs/en/sql-reference/functions/geo/h3.md +++ b/docs/en/sql-reference/functions/geo/h3.md @@ -229,6 +229,42 @@ Result: └───────────────────────────────────────┘ ``` +## h3ToGeoBoundary {#h3togeoboundary} + +Returns array of pairs `(lon, lat)`, which corresponds to the boundary of the provided H3 index. + +**Syntax** + +``` sql +h3ToGeoBoundary(h3Index) +``` + +**Arguments** + +- `h3Index` — H3 Index. Type: [UInt64](../../../sql-reference/data-types/int-uint.md). + +**Returned values** + +- Array of pairs '(lon, lat)'. +Type: [Array](../../../sql-reference/data-types/array.md)([Float64](../../../sql-reference/data-types/float.md), [Float64](../../../sql-reference/data-types/float.md)). + + +**Example** + +Query: + +``` sql +SELECT h3ToGeoBoundary(644325524701193974) AS coordinates; +``` + +Result: + +``` text +┌─h3ToGeoBoundary(599686042433355775)────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +│ [(37.2713558667319,-121.91508032705622),(37.353926450852256,-121.8622232890249),(37.42834118609435,-121.92354999630156),(37.42012867767779,-122.03773496427027),(37.33755608435299,-122.090428929044),(37.26319797461824,-122.02910130919001)] │ +└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + ## h3kRing {#h3kring} Lists all the [H3](#h3index) hexagons in the raduis of `k` from the given hexagon in random order. diff --git a/docs/en/sql-reference/statements/alter/index.md b/docs/en/sql-reference/statements/alter/index.md index 382306016e6..dbf695edf3b 100644 --- a/docs/en/sql-reference/statements/alter/index.md +++ b/docs/en/sql-reference/statements/alter/index.md @@ -5,7 +5,7 @@ toc_title: ALTER ## ALTER {#query_language_queries_alter} -Most `ALTER` queries modify table settings or data: +Most `ALTER TABLE` queries modify table settings or data: - [COLUMN](../../../sql-reference/statements/alter/column.md) - [PARTITION](../../../sql-reference/statements/alter/partition.md) @@ -17,9 +17,14 @@ Most `ALTER` queries modify table settings or data: - [TTL](../../../sql-reference/statements/alter/ttl.md) !!! note "Note" - Most `ALTER` queries are supported only for [\*MergeTree](../../../engines/table-engines/mergetree-family/index.md) tables, as well as [Merge](../../../engines/table-engines/special/merge.md) and [Distributed](../../../engines/table-engines/special/distributed.md). + Most `ALTER TABLE` queries are supported only for [\*MergeTree](../../../engines/table-engines/mergetree-family/index.md) tables, as well as [Merge](../../../engines/table-engines/special/merge.md) and [Distributed](../../../engines/table-engines/special/distributed.md). -While these `ALTER` settings modify entities related to role-based access control: +These `ALTER` statements manipulate views: + +- [ALTER TABLE ... MODIFY QUERY](../../../sql-reference/statements/alter/view.md) — Modifies a [Materialized view](../create/view.md#materialized) structure. +- [ALTER LIVE VIEW](../../../sql-reference/statements/alter/view.md#alter-live-view) — Refreshes a [Live view](../create/view.md#live-view). + +These `ALTER` statements modify entities related to role-based access control: - [USER](../../../sql-reference/statements/alter/user.md) - [ROLE](../../../sql-reference/statements/alter/role.md) diff --git a/docs/en/sql-reference/statements/alter/view.md b/docs/en/sql-reference/statements/alter/view.md new file mode 100644 index 00000000000..0fb1c4be0ff --- /dev/null +++ b/docs/en/sql-reference/statements/alter/view.md @@ -0,0 +1,44 @@ +--- +toc_priority: 50 +toc_title: VIEW +--- + +# ALTER TABLE … MODIFY QUERY Statement {#alter-modify-query} + +You can modify `SELECT` query that was specified when a [materialized view](../create/view.md#materialized) was created with the `ALTER TABLE … MODIFY QUERY` statement. Use it when the materialized view was created without the `TO [db.]name` clause. The `allow_experimental_alter_materialized_view_structure` setting must be enabled. + +If a materialized view uses the `TO [db.]name` construction, you must [DETACH](../detach.md) the view, run [ALTER TABLE](index.md) query for the target table, and then [ATTACH](../attach.md) the previously detached (`DETACH`) view. + +**Example** + +```sql +CREATE TABLE src_table (`a` UInt32) ENGINE = MergeTree ORDER BY a; +CREATE MATERIALIZED VIEW mv (`a` UInt32) ENGINE = MergeTree ORDER BY a AS SELECT a FROM src_table; +INSERT INTO src_table (a) VALUES (1), (2); +SELECT * FROM mv; +``` +```text +┌─a─┐ +│ 1 │ +│ 2 │ +└───┘ +``` +```sql +ALTER TABLE mv MODIFY QUERY SELECT a * 2 as a FROM src_table; +INSERT INTO src_table (a) VALUES (3), (4); +SELECT * FROM mv; +``` +```text +┌─a─┐ +│ 6 │ +│ 8 │ +└───┘ +┌─a─┐ +│ 1 │ +│ 2 │ +└───┘ +``` + +## ALTER LIVE VIEW Statement {#alter-live-view} + +`ALTER LIVE VIEW ... REFRESH` statement refreshes a [Live view](../create/view.md#live-view). See [Force Live View Refresh](../create/view.md#live-view-alter-refresh). diff --git a/docs/en/sql-reference/statements/create/view.md b/docs/en/sql-reference/statements/create/view.md index 110eb2aaaf2..b6a09e25f95 100644 --- a/docs/en/sql-reference/statements/create/view.md +++ b/docs/en/sql-reference/statements/create/view.md @@ -5,9 +5,9 @@ toc_title: VIEW # CREATE VIEW {#create-view} -Creates a new view. There are two types of views: normal and materialized. +Creates a new view. Views can be [normal](#normal), [materialized](#materialized) and [live](#live-view) (the latter is an experimental feature). -## Normal {#normal} +## Normal View {#normal} Syntax: @@ -35,7 +35,7 @@ This query is fully equivalent to using the subquery: SELECT a, b, c FROM (SELECT ...) ``` -## Materialized {#materialized} +## Materialized View {#materialized} ``` sql CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE] AS SELECT ... @@ -59,7 +59,7 @@ If you specify `POPULATE`, the existing table data is inserted in the view when A `SELECT` query can contain `DISTINCT`, `GROUP BY`, `ORDER BY`, `LIMIT`… Note that the corresponding conversions are performed independently on each block of inserted data. For example, if `GROUP BY` is set, data is aggregated during insertion, but only within a single packet of inserted data. The data won’t be further aggregated. The exception is when using an `ENGINE` that independently performs data aggregation, such as `SummingMergeTree`. -The execution of [ALTER](../../../sql-reference/statements/alter/index.md) queries on materialized views has limitations, so they might be inconvenient. If the materialized view uses the construction `TO [db.]name`, you can `DETACH` the view, run `ALTER` for the target table, and then `ATTACH` the previously detached (`DETACH`) view. +The execution of [ALTER](../../../sql-reference/statements/alter/view.md) queries on materialized views has limitations, so they might be inconvenient. If the materialized view uses the construction `TO [db.]name`, you can `DETACH` the view, run `ALTER` for the target table, and then `ATTACH` the previously detached (`DETACH`) view. Note that materialized view is influenced by [optimize_on_insert](../../../operations/settings/settings.md#optimize-on-insert) setting. The data is merged before the insertion into a view. @@ -67,7 +67,7 @@ Views look the same as normal tables. For example, they are listed in the result To delete a view, use [DROP VIEW](../../../sql-reference/statements/drop.md#drop-view). Although `DROP TABLE` works for VIEWs as well. -## Live View (Experimental) {#live-view} +## Live View [Experimental] {#live-view} !!! important "Important" This is an experimental feature that may change in backwards-incompatible ways in the future releases. @@ -93,7 +93,7 @@ Live views work similarly to how a query in a distributed table works. But inste See [WITH REFRESH](#live-view-with-refresh) to force periodic updates of a live view that in some cases can be used as a workaround. -### Monitoring Changes {#live-view-monitoring} +### Monitoring Live View Changes {#live-view-monitoring} You can monitor changes in the `LIVE VIEW` query result using [WATCH](../../../sql-reference/statements/watch.md) query. @@ -118,12 +118,11 @@ WATCH lv; │ 1 │ 1 │ └────────┴──────────┘ ┌─sum(x)─┬─_version─┐ -│ 2 │ 2 │ +│ 3 │ 2 │ └────────┴──────────┘ ┌─sum(x)─┬─_version─┐ │ 6 │ 3 │ └────────┴──────────┘ -... ``` ```sql @@ -154,7 +153,6 @@ WATCH lv EVENTS; ┌─version─┐ │ 3 │ └─────────┘ -... ``` You can execute [SELECT](../../../sql-reference/statements/select/index.md) query on a live view in the same way as for any regular view or a table. If the query result is cached it will return the result immediately without running the stored query on the underlying tables. @@ -163,7 +161,7 @@ You can execute [SELECT](../../../sql-reference/statements/select/index.md) quer SELECT * FROM [db.]live_view WHERE ... ``` -### Force Refresh {#live-view-alter-refresh} +### Force Live View Refresh {#live-view-alter-refresh} You can force live view refresh using the `ALTER LIVE VIEW [db.]table_name REFRESH` statement. @@ -235,7 +233,7 @@ WATCH lv Code: 60. DB::Exception: Received from localhost:9000. DB::Exception: Table default.lv does not exist.. ``` -### Usage {#live-view-usage} +### Live View Usage {#live-view-usage} Most common uses of live view tables include: @@ -244,4 +242,5 @@ Most common uses of live view tables include: - Watching for table changes and triggering a follow-up select queries. - Watching metrics from system tables using periodic refresh. -[Original article](https://clickhouse.tech/docs/en/sql-reference/statements/create/view/) +**See Also** +- [ALTER LIVE VIEW](../alter/view.md#alter-live-view) diff --git a/docs/en/sql-reference/statements/show.md b/docs/en/sql-reference/statements/show.md index b5df38642ad..8992f9f96b9 100644 --- a/docs/en/sql-reference/statements/show.md +++ b/docs/en/sql-reference/statements/show.md @@ -8,7 +8,7 @@ toc_title: SHOW ## SHOW CREATE TABLE {#show-create-table} ``` sql -SHOW CREATE [TEMPORARY] [TABLE|DICTIONARY] [db.]table [INTO OUTFILE filename] [FORMAT format] +SHOW CREATE [TEMPORARY] [TABLE|DICTIONARY|VIEW] [db.]table|view [INTO OUTFILE filename] [FORMAT format] ``` Returns a single `String`-type ‘statement’ column, which contains a single value – the `CREATE` query used for creating the specified object. diff --git a/docs/ja/interfaces/formats.md b/docs/ja/interfaces/formats.md index 35cfd4f67fb..ce91fe2439f 100644 --- a/docs/ja/interfaces/formats.md +++ b/docs/ja/interfaces/formats.md @@ -26,7 +26,6 @@ aの結果 `SELECT`、および実行する `INSERT`ファイルバックアッ | [カスタム区切り](#format-customseparated) | ✔ | ✔ | | [値](#data-format-values) | ✔ | ✔ | | [垂直](#vertical) | ✗ | ✔ | -| [VerticalRaw](#verticalraw) | ✗ | ✔ | | [JSON](#json) | ✗ | ✔ | | [JSONCompact](#jsoncompact) | ✗ | ✔ | | [JSONEachRow](#jsoneachrow) | ✔ | ✔ | @@ -819,10 +818,6 @@ test: string with 'quotes' and with some special この形式は、クエリ結果の出力にのみ適していますが、解析(テーブルに挿入するデータの取得)には適していません。 -## VerticalRaw {#verticalraw} - -に類似した [垂直](#vertical) しかし、エスケープ無効で。 この形式は、クエリ結果の出力にのみ適しており、解析(データの受信とテーブルへの挿入)には適していません。 - ## XML {#xml} XML形式は出力にのみ適しており、解析には適していません。 例: diff --git a/docs/ru/interfaces/formats.md b/docs/ru/interfaces/formats.md index a02e1436d36..059ddc3ed44 100644 --- a/docs/ru/interfaces/formats.md +++ b/docs/ru/interfaces/formats.md @@ -22,7 +22,6 @@ ClickHouse может принимать (`INSERT`) и отдавать (`SELECT | [CustomSeparated](#format-customseparated) | ✔ | ✔ | | [Values](#data-format-values) | ✔ | ✔ | | [Vertical](#vertical) | ✗ | ✔ | -| [VerticalRaw](#verticalraw) | ✗ | ✔ | | [JSON](#json) | ✗ | ✔ | | [JSONAsString](#jsonasstring) | ✔ | ✗ | | [JSONStrings](#jsonstrings) | ✗ | ✔ | @@ -916,10 +915,6 @@ test: string with 'quotes' and with some special Этот формат подходит только для вывода результата выполнения запроса, но не для парсинга (приёма данных для вставки в таблицу). -## VerticalRaw {#verticalraw} - -Аналогичен [Vertical](#vertical), но с отключенным выходом. Этот формат подходит только для вывода результата выполнения запроса, но не для парсинга (приёма данных для вставки в таблицу). - ## XML {#xml} Формат XML подходит только для вывода данных, не для парсинга. Пример: @@ -1493,4 +1488,4 @@ ClickHouse поддерживает запись и чтение из файло $ clickhouse-client --query="CREATE TABLE msgpack (array Array(UInt8)) ENGINE = Memory;" $ clickhouse-client --query="INSERT INTO msgpack VALUES ([0, 1, 2, 3, 42, 253, 254, 255]), ([255, 254, 253, 42, 3, 2, 1, 0])"; $ clickhouse-client --query="SELECT * FROM msgpack FORMAT MsgPack" > tmp_msgpack.msgpk; -``` \ No newline at end of file +``` diff --git a/docs/ru/sql-reference/aggregate-functions/reference/stochasticlinearregression.md b/docs/ru/sql-reference/aggregate-functions/reference/stochasticlinearregression.md index 6da0f6caacd..85524c7ede3 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/stochasticlinearregression.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/stochasticlinearregression.md @@ -50,7 +50,7 @@ AS state FROM train_data; После сохранения состояния в таблице мы можем использовать его несколько раз для прогнозирования или смёржить с другими состояниями и создать новые, улучшенные модели. -``` sql +```sql WITH (SELECT state FROM your_model) AS model SELECT evalMLMethod(model, param1, param2) FROM test_data ``` @@ -65,9 +65,9 @@ evalMLMethod(model, param1, param2) FROM test_data - ``` sql - SELECT state1 + state2 FROM your_models - ``` +```sql +SELECT state1 + state2 FROM your_models +``` где таблица `your_models` содержит обе модели. Запрос вернёт новый объект `AggregateFunctionState`. @@ -75,9 +75,9 @@ evalMLMethod(model, param1, param2) FROM test_data - ``` sql - SELECT stochasticLinearRegression(0.01)(target, param1, param2) FROM train_data - ``` +```sql +SELECT stochasticLinearRegression(0.01)(target, param1, param2) FROM train_data +``` Подобный запрос строит модель и возвращает её веса, отвечающие параметрам моделей и смещение. Таким образом, в приведенном выше примере запрос вернет столбец с тремя значениями. diff --git a/docs/ru/sql-reference/statements/alter/index.md b/docs/ru/sql-reference/statements/alter/index.md index 2b7caa5ad5b..73ee201b56b 100644 --- a/docs/ru/sql-reference/statements/alter/index.md +++ b/docs/ru/sql-reference/statements/alter/index.md @@ -14,7 +14,7 @@ ALTER TABLE [db].name [ON CLUSTER cluster] ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN В запросе указывается список из одного или более действий через запятую. Каждое действие — операция над столбцом. -Большинство запросов `ALTER` изменяют настройки таблицы или данные: +Большинство запросов `ALTER TABLE` изменяют настройки таблицы или данные: - [COLUMN](../../../sql-reference/statements/alter/column.md) - [PARTITION](../../../sql-reference/statements/alter/partition.md) @@ -26,7 +26,12 @@ ALTER TABLE [db].name [ON CLUSTER cluster] ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN - [TTL](../../../sql-reference/statements/alter/ttl.md) !!! note "Note" - Запрос `ALTER` поддерживается только для таблиц типа `*MergeTree`, а также `Merge` и `Distributed`. Запрос имеет несколько вариантов. + Запрос `ALTER TABLE` поддерживается только для таблиц типа `*MergeTree`, а также `Merge` и `Distributed`. Запрос имеет несколько вариантов. + +Следующие запросы `ALTER` управляют представлениями: + +- [ALTER TABLE ... MODIFY QUERY](../../../sql-reference/statements/alter/view.md) — изменяет структуру [Materialized view](../create/view.md#materialized). +- [ALTER LIVE VIEW](../../../sql-reference/statements/alter/view.md#alter-live-view) — обновляет [Live view](../create/view.md#live-view). Следующие запросы `ALTER` изменяют сущности, связанные с управлением доступом на основе ролей: diff --git a/docs/ru/sql-reference/statements/alter/view.md b/docs/ru/sql-reference/statements/alter/view.md new file mode 100644 index 00000000000..86dd018a947 --- /dev/null +++ b/docs/ru/sql-reference/statements/alter/view.md @@ -0,0 +1,44 @@ +--- +toc_priority: 50 +toc_title: VIEW +--- + +# Выражение ALTER TABLE … MODIFY QUERY {#alter-modify-query} + +Вы можеие изменить запрос `SELECT`, который был задан при создании [материализованного представления](../create/view.md#materialized), с помощью запроса 'ALTER TABLE … MODIFY QUERY'. Используйте его если при создании материализованного представления не использовалась секция `TO [db.]name`. Настройка `allow_experimental_alter_materialized_view_structure` должна быть включена. + +Если при создании материализованного представления использовалась конструкция `TO [db.]name`, то для изменения отсоедините представление с помощью [DETACH](../detach.md), измените таблицу с помощью [ALTER TABLE](index.md), а затем снова присоедините запрос с помощью [ATTACH](../attach.md). + +**Пример** + +```sql +CREATE TABLE src_table (`a` UInt32) ENGINE = MergeTree ORDER BY a; +CREATE MATERIALIZED VIEW mv (`a` UInt32) ENGINE = MergeTree ORDER BY a AS SELECT a FROM src_table; +INSERT INTO src_table (a) VALUES (1), (2); +SELECT * FROM mv; +``` +```text +┌─a─┐ +│ 1 │ +│ 2 │ +└───┘ +``` +```sql +ALTER TABLE mv MODIFY QUERY SELECT a * 2 as a FROM src_table; +INSERT INTO src_table (a) VALUES (3), (4); +SELECT * FROM mv; +``` +```text +┌─a─┐ +│ 6 │ +│ 8 │ +└───┘ +┌─a─┐ +│ 1 │ +│ 2 │ +└───┘ +``` + +## Выражение ALTER LIVE VIEW {#alter-live-view} + +Выражение `ALTER LIVE VIEW ... REFRESH` обновляет [Live-представление](../create/view.md#live-view). См. раздел [Force Live View Refresh](../create/view.md#live-view-alter-refresh). diff --git a/docs/ru/sql-reference/statements/create/view.md b/docs/ru/sql-reference/statements/create/view.md index 0be29b12aea..dfbe1207c5b 100644 --- a/docs/ru/sql-reference/statements/create/view.md +++ b/docs/ru/sql-reference/statements/create/view.md @@ -5,7 +5,7 @@ toc_title: "Представление" # CREATE VIEW {#create-view} -Создаёт представление. Представления бывают двух видов - обычные и материализованные (MATERIALIZED). +Создаёт представление. Представления бывают [обычные](#normal), [материализованные](#materialized) (MATERIALIZED) и [LIVE](#live-view). ## Обычные представления {#normal} @@ -54,7 +54,7 @@ CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]na Запрос `SELECT` может содержать `DISTINCT`, `GROUP BY`, `ORDER BY`, `LIMIT`… Следует иметь ввиду, что соответствующие преобразования будут выполняться независимо, на каждый блок вставляемых данных. Например, при наличии `GROUP BY`, данные будут агрегироваться при вставке, но только в рамках одной пачки вставляемых данных. Далее, данные не будут доагрегированы. Исключение - использование ENGINE, производящего агрегацию данных самостоятельно, например, `SummingMergeTree`. -Недоработано выполнение запросов `ALTER` над материализованными представлениями, поэтому они могут быть неудобными для использования. Если материализованное представление использует конструкцию `TO [db.]name`, то можно выполнить `DETACH` представления, `ALTER` для целевой таблицы и последующий `ATTACH` ранее отсоединенного (`DETACH`) представления. +Выполнение запросов [ALTER](../../../sql-reference/statements/alter/view.md) над материализованными представлениями имеет свои особенности, поэтому эти запросы могут быть неудобными для использования. Если материализованное представление использует конструкцию `TO [db.]name`, то можно выполнить `DETACH` представления, `ALTER` для целевой таблицы и последующий `ATTACH` ранее отсоединенного (`DETACH`) представления. Обратите внимание, что работа материализованного представления находится под влиянием настройки [optimize_on_insert](../../../operations/settings/settings.md#optimize-on-insert). Перед вставкой данных в таблицу происходит их слияние. @@ -62,7 +62,7 @@ CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]na Чтобы удалить представление, следует использовать [DROP VIEW](../../../sql-reference/statements/drop.md#drop-view). Впрочем, `DROP TABLE` тоже работает для представлений. -## LIVE-представления {#live-view} +## LIVE-представления [экспериментальный функционал] {#live-view} !!! important "Важно" Представления `LIVE VIEW` являются экспериментальной возможностью. Их использование может повлечь потерю совместимости в будущих версиях. @@ -86,7 +86,7 @@ LIVE-представления работают по тому же принци В случаях, когда `LIVE VIEW` не обновляется автоматически, чтобы обновлять его принудительно с заданной периодичностью, используйте [WITH REFRESH](#live-view-with-refresh). -### Отслеживание изменений {#live-view-monitoring} +### Отслеживание изменений LIVE-представлений {#live-view-monitoring} Для отслеживания изменений LIVE-представления используйте запрос [WATCH](../../../sql-reference/statements/watch.md). @@ -108,12 +108,11 @@ WATCH lv; │ 1 │ 1 │ └────────┴──────────┘ ┌─sum(x)─┬─_version─┐ -│ 2 │ 2 │ +│ 3 │ 2 │ └────────┴──────────┘ ┌─sum(x)─┬─_version─┐ │ 6 │ 3 │ └────────┴──────────┘ -... ``` ```sql @@ -148,7 +147,7 @@ WATCH lv EVENTS; SELECT * FROM [db.]live_view WHERE ... ``` -### Принудительное обновление {#live-view-alter-refresh} +### Принудительное обновление LIVE-представлений {#live-view-alter-refresh} Чтобы принудительно обновить LIVE-представление, используйте запрос `ALTER LIVE VIEW [db.]table_name REFRESH`. @@ -220,9 +219,9 @@ WATCH lv; Code: 60. DB::Exception: Received from localhost:9000. DB::Exception: Table default.lv doesn't exist.. ``` -### Использование {#live-view-usage} +### Использование LIVE-представлений {#live-view-usage} -Наиболее частые случаи использования `LIVE-VIEW`: +Наиболее частые случаи использования `LIVE-представлений`: - Получение push-уведомлений об изменениях данных без дополнительных периодических запросов. - Кеширование результатов часто используемых запросов для получения их без задержки. diff --git a/docs/ru/sql-reference/statements/show.md b/docs/ru/sql-reference/statements/show.md index 7b5296e988e..caeeba7bee0 100644 --- a/docs/ru/sql-reference/statements/show.md +++ b/docs/ru/sql-reference/statements/show.md @@ -8,10 +8,10 @@ toc_title: SHOW ## SHOW CREATE TABLE {#show-create-table} ``` sql -SHOW CREATE [TEMPORARY] [TABLE|DICTIONARY] [db.]table [INTO OUTFILE filename] [FORMAT format] +SHOW CREATE [TEMPORARY] [TABLE|DICTIONARY|VIEW] [db.]table|view [INTO OUTFILE filename] [FORMAT format] ``` -Возвращает один столбец типа `String` с именем statement, содержащий одно значение — запрос `CREATE TABLE`, с помощью которого был создан указанный объект. +Возвращает один столбец типа `String` с именем statement, содержащий одно значение — запрос `CREATE`, с помощью которого был создан указанный объект. ## SHOW DATABASES {#show-databases} diff --git a/docs/tools/requirements.txt b/docs/tools/requirements.txt index 9bb4f57e9e2..975e6949476 100644 --- a/docs/tools/requirements.txt +++ b/docs/tools/requirements.txt @@ -12,7 +12,7 @@ htmlmin==0.1.12 idna==2.10 Jinja2>=2.11.3 jinja2-highlight==0.6.1 -jsmin==2.2.2 +jsmin==3.0.0 livereload==2.6.2 Markdown==3.3.2 MarkupSafe==1.1.1 diff --git a/docs/zh/commercial/cloud.md b/docs/zh/commercial/cloud.md index 651a1a15ec4..84863b24c33 100644 --- a/docs/zh/commercial/cloud.md +++ b/docs/zh/commercial/cloud.md @@ -6,13 +6,13 @@ toc_title: 云 # ClickHouse 云服务提供商 {#clickhouse-cloud-service-providers} !!! info "注意" - 如果您已经推出具有托管 ClickHouse 服务的公共云,请随时[提交一个 pull request](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/commercial/cloud.md) 将其添加到以下列表。 + 如果您已经推出具有托管ClickHouse服务的公共云,请随时[提交一个 pull request](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/commercial/cloud.md)将其添加到以下列表。 ## Yandex 云 {#yandex-cloud} -[Yandex的 ClickHouse 托管服务](https://cloud.yandex.com/services/managed-clickhouse?utm_source=referrals&utm_medium=clickhouseofficialsite&utm_campaign=link3) 提供以下主要功能: +[Yandex ClickHouse托管服务](https://cloud.yandex.com/services/managed-clickhouse?utm_source=referrals&utm_medium=clickhouseofficialsite&utm_campaign=link3)提供以下主要功能: -- 用于 ClickHouse 复制的完全托管的 ZooKeeper 服务 [ClickHouse复制](../engines/table-engines/mergetree-family/replication.md) +- 用于[ClickHouse replication](../engines/table-engines/mergetree-family/replication.md)的完全托管的ZooKeeper服务 - 多种存储类型选择 - 不同可用区副本 - 加密与隔离 @@ -20,34 +20,43 @@ toc_title: 云 ## Altinity.Cloud {#altinity.cloud} -[Altinity.Cloud](https://altinity.com/cloud-database/) 是针对 Amazon 公共云的完全托管的 ClickHouse-as-a-Service +[Altinity.Cloud](https://altinity.com/cloud-database/)是针对Amazon公共云的完全托管的ClickHouse-as-a-Service -- 在 Amazon 资源上快速部署 ClickHouse 集群 +- 在Amazon资源上快速部署ClickHouse集群 - 轻松进行横向扩展/纵向扩展以及节点的垂直扩展 - 具有公共端点或VPC对等的租户隔离 - 可配置存储类型以及卷配置 - 跨可用区扩展以实现性能和高可用性 - 内置监控和SQL查询编辑器 -## 阿里云 {#alibaba-cloud} +## 阿里云{#alibaba-cloud} -[阿里云的 ClickHouse 托管服务](https://www.alibabacloud.com/zh/product/clickhouse) 提供以下主要功能: +[阿里云ClickHouse托管服务](https://www.alibabacloud.com/zh/product/clickhouse)提供以下主要功能: -- 基于阿里飞天分布式系统的高可靠云盘存储引擎 -- 按需扩容,无需手动进行数据搬迁 -- 支持单节点、单副本、多节点、多副本多种架构,支持冷热数据分层 -- 支持访问白名单和一键恢复,多层网络安全防护,云盘加密 -- 与云上日志系统、数据库、数据应用工具无缝集成 -- 内置监控和数据库管理平台 -- 专业的数据库专家技术支持和服务 +- 基于阿里飞天分布式系统的高可靠云盘存储引擎 +- 按需扩容,无需手动进行数据搬迁 +- 支持单节点、单副本、多节点、多副本多种架构,支持冷热数据分层 +- 支持访问白名单和一键恢复,多层网络安全防护,云盘加密 +- 与云上日志系统、数据库、数据应用工具无缝集成 +- 内置监控和数据库管理平台 +- 专业的数据库专家技术支持和服务 + +## SberCloud {#sbercloud} + +[SberCloud.Advanced](https://sbercloud.ru/en/advanced)提供[MapReduce Service (MRS)](https://docs.sbercloud.ru/mrs/ug/topics/ug__clickhouse.html), 一个可靠、安全且易于使用的企业级平台,用于存储、处理和分析大数据。MRS允许您快速创建和管理ClickHouse集群。 + +- 一个ClickHouse实例由三个ZooKeeper节点和多个ClickHouse节点组成。 Dedicated Replica模式用于保证双数据副本的高可靠性。 +- MRS提供平滑弹性伸缩能力,快速满足集群存储容量或CPU计算资源不足场景下的业务增长需求。当您扩展集群中ClickHouse节点的容量时,MRS提供一键式数据平衡工具,让您主动进行数据平衡。 您可以根据业务特点确定数据均衡方式和时间,保证业务的可用性,实现平滑扩展。 +- MRS采用弹性负载均衡保障高可用部署架构,自动将用户访问流量分配到多个后端节点,将服务能力扩展到外部系统,提高容错能力。 通过ELB轮询机制,数据写入本地表,从不同节点的分布式表中读取。 这样就保证了数据读写负载和应用访问的高可用。 ## 腾讯云 {#tencent-cloud} -[腾讯云的 ClickHouse 托管服务](https://cloud.tencent.com/product/cdwch)提供以下主要功能: +[腾讯云ClickHouse托管服务](https://cloud.tencent.com/product/cdwch)提供以下主要功能: -- 易于部署和管理, 集成监控与警报服务 -- 高可用高扩展 -- 通过集群级别的 VPC 保证安全可靠 -- 按需定价,无需前期成本或长期承诺 +- 易于在腾讯云上部署和管理 +- 高度可扩展和可用 +- 集成监控和警报服务 +- 每个集群VPC隔离的高安全性 +- 按需定价,无前期成本或长期承诺 {## [原始文章](https://clickhouse.tech/docs/en/commercial/cloud/) ##} diff --git a/docs/zh/commercial/index.md b/docs/zh/commercial/index.md index 047ee817d7b..34641b8f7fc 100644 --- a/docs/zh/commercial/index.md +++ b/docs/zh/commercial/index.md @@ -6,7 +6,7 @@ toc_title: 简介 # ClickHouse 商业服务 {#clickhouse-commercial-services} -本节是专门从事 ClickHouse 的服务提供商的目录,它们是一些独立的公司,不一定与 Yandex 有关系。 +此部分是专门从事ClickHouse的商业服务提供商的目录。 他们是独立的公司,不一定隶属于Yandex。 服务类别: @@ -14,4 +14,4 @@ toc_title: 简介 - [支持](../commercial/support.md) !!! note "对于服务提供商" -如果您碰巧是其中之一,可以随时提交一个 pull request,将您的公司添加到对应的章节(如果服务不属于现有的任何目录,也可以添加新的章节)。提交关于文档的 pull request 最简单的方式是点击右上角的“铅笔”编辑按钮。如果您的服务在某些本地市场上有售,请确保在本地化的文档界面中也提及它(或至少在 pull request 请求描述中指出)。 + 如果您碰巧代表其中之一,请随时提交一个pull request,将您的公司添加到相应部分(如果服务不适合现有类别,甚至可以添加新部分)。 提交关于文档的pull request最简单的方式是点击右上角的“铅笔”编辑按钮。 如果您的服务在某些本地市场可用,请确保也在本地化文档页面中提及它(或至少在pull request请求描述中指出)。 diff --git a/docs/zh/commercial/support.md b/docs/zh/commercial/support.md index 44b6d1eab8d..3139709e4b8 100644 --- a/docs/zh/commercial/support.md +++ b/docs/zh/commercial/support.md @@ -6,16 +6,20 @@ toc_title: 支持 # ClickHouse 商业支持服务提供商 {#clickhouse-commercial-support-service-providers} !!! info "注意" - 如果您已经推出 ClickHouse 商业支持服务,请随时[提交一个 pull request](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/commercial/support.md) 将其添加到以下列表。 + 如果您已经推出ClickHouse商业支持服务,请随时[提交一个pull request](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/commercial/support.md)将其添加到以下列表。 + +## Yandex.Cloud + +来自ClickHouse作者的ClickHouse全球支持。 支持内部部署和云部署。 在clickhouse-support@yandex-team.com上询问详细信息 ## Altinity {#altinity} - Altinity 自从 2017 年开始为企业提供 ClickHouse 支持服务。Altinity 的客户范围包含百强企业到初创企业等。访问 [www.altinity.com](https://www.altinity.com/) 了解更多信息。 +Altinity自2017年以来一直为企业ClickHouse提供支持和服务。 Altinity的客户范围从财富100强企业到初创公司。访问 [www.altinity.com](https://www.altinity.com/)了解更多信息。 ## Mafiree {#mafiree} -[服务说明](http://mafiree.com/clickhouse-analytics-services.php) +[Service description](http://mafiree.com/clickhouse-analytics-services.php) ## MinervaDB {#minervadb} -[服务说明](https://minervadb.com/index.php/clickhouse-consulting-and-support-by-minervadb/) +[Service description](https://minervadb.com/index.php/clickhouse-consulting-and-support-by-minervadb/) diff --git a/docs/zh/development/adding_test_queries.md b/docs/zh/development/adding_test_queries.md new file mode 100644 index 00000000000..ccfed00bfbe --- /dev/null +++ b/docs/zh/development/adding_test_queries.md @@ -0,0 +1,150 @@ +# 如何将测试查询添加到 ClickHouse CI + +ClickHouse有数百个(甚至数千个)功能。 每个提交都由包含数千个测试用例的一组复杂测试进行检查。 + +核心功能经过了很多的测试,但是ClickHouse CI可以发现一些极端情况和不同的功能组合。 + +我们看到的大多数错误/回归都发生在测试覆盖率较差的`灰色区域`中。 + +我们非常有兴趣通过测试涵盖实现生活中使用的大多数可能的场景和功能组合。 + +## 为什么要添加测试 + +为什么/何时应将测试用例添加到ClickHouse代码中: +1) 您使用了一些复杂的场景/功能组合/您有一些可能未被广泛使用的情况 +2) 您会看到更改日志中没有通知的版本之间的某些行为发生了变化 +3) 您只是想帮助提高ClickHouse的质量并确保您使用的功能在未来的版本中不会被破坏 +4) 一旦测试被添加/接受,您可以确保您检查的角落案例永远不会被意外损坏。 +5) 你将成为伟大的开源社区的一份子 +6) 您的名字将出现在`system.contributors`表中! +7) 你会让世界变得更好。 + +### 要做的步骤 + +#### 先决条件 + +我假设你运行一些Linux机器(你可以在其他操作系统上使用 docker/虚拟机)和任何现代浏览器/互联网连接,并且你有一些基本的Linux和SQL技能。 + +不需要任何高度专业化的知识(因此您不需要了解 C++ 或了解ClickHouse CI的工作原理)。 + +#### 准备 + +1) [create GitHub account](https://github.com/join) (如果你还没有) +2) [setup git](https://docs.github.com/en/free-pro-team@latest/github/getting-started-with-github/set-up-git) +```bash +# for Ubuntu +sudo apt-get update +sudo apt-get install git + +git config --global user.name "John Doe" # fill with your name +git config --global user.email "email@example.com" # fill with your email + +``` +3) [fork ClickHouse project](https://docs.github.com/en/free-pro-team@latest/github/getting-started-with-github/fork-a-repo) - 打开 [https://github.com/ClickHouse/ClickHouse](https://github.com/ClickHouse/ClickHouse) and press fork button in the top right corner: + ![fork repo](https://github-images.s3.amazonaws.com/help/bootcamp/Bootcamp-Fork.png) + +4) 例如,将代码fork克隆到PC上的某个文件夹, `~/workspace/ClickHouse` +``` +mkdir ~/workspace && cd ~/workspace +git clone https://github.com/< your GitHub username>/ClickHouse +cd ClickHouse +git remote add upstream https://github.com/ClickHouse/ClickHouse +``` + +#### 测试的新分支 + +1) 从最新的clickhouse master创建一个新分支 +``` +cd ~/workspace/ClickHouse +git fetch upstream +git checkout -b name_for_a_branch_with_my_test upstream/master +``` + +#### 安装并运行 clickhouse + +1) 安装`clickhouse-server` (参考[离线文档](https://clickhouse.tech/docs/en/getting-started/install/)) +2) 安装测试配置(它将使用Zookeeper模拟实现并调整一些设置) +``` +cd ~/workspace/ClickHouse/tests/config +sudo ./install.sh +``` +3) 运行clickhouse-server +``` +sudo systemctl restart clickhouse-server +``` + +#### 创建测试文件 + + +1) 找到测试的编号 - 在`tests/queries/0_stateless/`中找到编号最大的文件 + +```sh +$ cd ~/workspace/ClickHouse +$ ls tests/queries/0_stateless/[0-9]*.reference | tail -n 1 +tests/queries/0_stateless/01520_client_print_query_id.reference +``` +目前,测试的最后一个数字是`01520`,所以我的测试将有数字`01521` + +2) 使用您测试的功能的下一个编号和名称创建一个SQL文件 + +```sh +touch tests/queries/0_stateless/01521_dummy_test.sql +``` + +3) 使用您最喜欢的编辑器编辑SQL文件(请参阅下面的创建测试提示) +```sh +vim tests/queries/0_stateless/01521_dummy_test.sql +``` + + +4) 运行测试,并将其结果放入参考文件中: +``` +clickhouse-client -nmT < tests/queries/0_stateless/01521_dummy_test.sql | tee tests/queries/0_stateless/01521_dummy_test.reference +``` + +5) 确保一切正确,如果测试输出不正确(例如由于某些错误),请使用文本编辑器调整参考文件。 + +#### 如何创建一个好的测试 + +- 测试应该是 + - 最小 - 仅创建与测试功能相关的表,删除不相关的列和部分查询 + - 快速 - 不应超过几秒钟(更好的亚秒) + - 正确 - 失败则功能不起作用 + - 确定性的 + - 隔离/无状态 + - 不要依赖一些环境的东西 + - 尽可能不要依赖时间 +- 尝试覆盖极端情况(zeros / Nulls / empty sets / throwing exceptions) +- 要测试该查询返回错误,您可以在查询后添加特殊注释:`-- { serverError 60 }`或`-- { clientError 20 }` +- 不要切换数据库(除非必要) +- 如果需要,您可以在同一节点上创建多个表副本 +- 您可以在需要时使用测试集群定义之一(请参阅 system.clusters) +- 使用 `number` / `numbers_mt` / `zeros` / `zeros_mt`和类似的查询要在适用时初始化数据 +- 在测试之后和测试之前清理创建的对象(DROP IF EXISTS) - 在有一些脏状态的情况下 +- 优先选择同步操作模式 (mutations, merges) +- 以`0_stateless`文件夹中的其他SQL文件为例 +- 确保您想要测试的特性/特性组合尚未被现有测试覆盖 + +#### 测试命名规则 + +正确地命名测试非常重要,因此可以在clickhouse-test调用中关闭一些测试子集。 + +| Tester flag| 测试名称中应该包含什么 | 什么时候应该添加标志 | +|---|---|---|---| +| `--[no-]zookeeper`| "zookeeper"或"replica" | 测试使用来自ReplicatedMergeTree家族的表 | +| `--[no-]shard` | "shard"或"distributed"或"global"| 使用到127.0.0.2或类似的连接进行测试 | +| `--[no-]long` | "long"或"deadlock"或"race" | 测试运行时间超过60秒 | + +#### Commit / push / 创建PR. + +1) commit & push您的修改 +```sh +cd ~/workspace/ClickHouse +git add tests/queries/0_stateless/01521_dummy_test.sql +git add tests/queries/0_stateless/01521_dummy_test.reference +git commit # use some nice commit message when possible +git push origin HEAD +``` +2) 使用一个在推送过程中显示的链接,创建一个到master的PR +3) 调整PR标题和内容,在`Changelog category (leave one)`中保留 + `Build/Testing/Packaging Improvement`,如果需要,请填写其余字段。 diff --git a/docs/zh/interfaces/formats.md b/docs/zh/interfaces/formats.md index 6f95c287068..3db3ee96d6c 100644 --- a/docs/zh/interfaces/formats.md +++ b/docs/zh/interfaces/formats.md @@ -23,7 +23,6 @@ ClickHouse可以接受和返回各种格式的数据。受支持的输入格式 | [CustomSeparated](#format-customseparated) | ✔ | ✔ | | [Values](#data-format-values) | ✔ | ✔ | | [Vertical](#vertical) | ✗ | ✔ | -| [VerticalRaw](#verticalraw) | ✗ | ✔ | | [JSON](#json) | ✗ | ✔ | | [JSONAsString](#jsonasstring) | ✔ | ✗ | | [JSONStrings](#jsonstrings) | ✗ | ✔ | @@ -951,31 +950,6 @@ SELECT * FROM t_null FORMAT Vertical 该格式仅适用于输出查询结果,但不适用于解析输入(将数据插入到表中)。 -## VerticalRaw {#verticalraw} - -和 `Vertical` 格式不同点在于,行是不会被转义的。 -这种格式仅仅适用于输出,但不适用于解析输入(将数据插入到表中)。 - -示例: - - :) SHOW CREATE TABLE geonames FORMAT VerticalRaw; - Row 1: - ────── - statement: CREATE TABLE default.geonames ( geonameid UInt32, date Date DEFAULT CAST('2017-12-08' AS Date)) ENGINE = MergeTree(date, geonameid, 8192) - - :) SELECT 'string with \'quotes\' and \t with some special \n characters' AS test FORMAT VerticalRaw; - Row 1: - ────── - test: string with 'quotes' and with some special - characters - -和 Vertical 格式相比: - - :) SELECT 'string with \'quotes\' and \t with some special \n characters' AS test FORMAT Vertical; - Row 1: - ────── - test: string with \'quotes\' and \t with some special \n characters - ## XML {#xml} 该格式仅适用于输出查询结果,但不适用于解析输入,示例: diff --git a/docs/zh/interfaces/third-party/gui.md b/docs/zh/interfaces/third-party/gui.md index fac6baed6c2..e510d88f7a7 100644 --- a/docs/zh/interfaces/third-party/gui.md +++ b/docs/zh/interfaces/third-party/gui.md @@ -72,6 +72,22 @@ ClickHouse Web 界面 [Tabix](https://github.com/tabixio/tabix). [clickhouse-flamegraph](https://github.com/Slach/clickhouse-flamegraph) 是一个可视化的专业工具`system.trace_log`如[flamegraph](http://www.brendangregg.com/flamegraphs.html). +### DBM {#dbm} + +[DBM](https://dbm.incubator.edurt.io/) DBM是一款ClickHouse可视化管理工具! + +特征: + +- 支持查询历史(分页、全部清除等) +- 支持选中的sql子句查询(多窗口等) +- 支持终止查询 +- 支持表管理 +- 支持数据库管理 +- 支持自定义查询 +- 支持多数据源管理(连接测试、监控) +- 支持监控(处理进程、连接、查询) +- 支持迁移数据 + ## 商业 {#shang-ye} ### Holistics {#holistics-software} @@ -99,20 +115,4 @@ ClickHouse Web 界面 [Tabix](https://github.com/tabixio/tabix). - 重构。 - 搜索和导航。 -### DBM {#dbm} - -[DBM](https://dbm.incubator.edurt.io/) DBM是一款ClickHouse可视化管理工具! - -特征: - -- 支持查询历史(分页、全部清除等) -- 支持选中的sql子句查询(多窗口等) -- 支持终止查询 -- 支持表管理 -- 支持数据库管理 -- 支持自定义查询 -- 支持多数据源管理(连接测试、监控) -- 支持监控(处理进程、连接、查询) -- 支持迁移数据 - [来源文章](https://clickhouse.tech/docs/zh/interfaces/third-party/gui/) diff --git a/programs/copier/ClusterCopier.cpp b/programs/copier/ClusterCopier.cpp index cf0b6cc76a4..de26e34bf2e 100644 --- a/programs/copier/ClusterCopier.cpp +++ b/programs/copier/ClusterCopier.cpp @@ -1274,13 +1274,14 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl( auto get_select_query = [&] (const DatabaseAndTableName & from_table, const String & fields, bool enable_splitting, String limit = "") { String query; + query += "WITH " + task_partition.name + " AS partition_key "; query += "SELECT " + fields + " FROM " + getQuotedTable(from_table); if (enable_splitting && experimental_use_sample_offset) query += " SAMPLE 1/" + toString(number_of_splits) + " OFFSET " + toString(current_piece_number) + "/" + toString(number_of_splits); /// TODO: Bad, it is better to rewrite with ASTLiteral(partition_key_field) - query += " WHERE (" + queryToString(task_table.engine_push_partition_key_ast) + " = (" + task_partition.name + " AS partition_key))"; + query += " WHERE (" + queryToString(task_table.engine_push_partition_key_ast) + " = partition_key)"; if (enable_splitting && !experimental_use_sample_offset) query += " AND ( cityHash64(" + primary_key_comma_separated + ") %" + toString(number_of_splits) + " = " + toString(current_piece_number) + " )"; @@ -1851,9 +1852,9 @@ bool ClusterCopier::checkShardHasPartition(const ConnectionTimeouts & timeouts, TaskTable & task_table = task_shard.task_table; WriteBufferFromOwnString ss; + ss << "WITH " + partition_quoted_name + " AS partition_key "; ss << "SELECT 1 FROM " << getQuotedTable(task_shard.table_read_shard); - ss << " WHERE (" << queryToString(task_table.engine_push_partition_key_ast); - ss << " = (" + partition_quoted_name << " AS partition_key))"; + ss << " WHERE (" << queryToString(task_table.engine_push_partition_key_ast) << " = partition_key)"; if (!task_table.where_condition_str.empty()) ss << " AND (" << task_table.where_condition_str << ")"; ss << " LIMIT 1"; @@ -1882,13 +1883,15 @@ bool ClusterCopier::checkPresentPartitionPiecesOnCurrentShard(const ConnectionTi UNUSED(primary_key_comma_separated); - std::string query = "SELECT 1 FROM " + getQuotedTable(task_shard.table_read_shard); + std::string query; + + query += "WITH " + partition_quoted_name + " AS partition_key "; + query += "SELECT 1 FROM " + getQuotedTable(task_shard.table_read_shard); if (experimental_use_sample_offset) query += " SAMPLE 1/" + toString(number_of_splits) + " OFFSET " + toString(current_piece_number) + "/" + toString(number_of_splits); - query += " WHERE (" + queryToString(task_table.engine_push_partition_key_ast) - + " = (" + partition_quoted_name + " AS partition_key))"; + query += " WHERE (" + queryToString(task_table.engine_push_partition_key_ast) + " = partition_key)"; if (!experimental_use_sample_offset) query += " AND (cityHash64(" + primary_key_comma_separated + ") % " diff --git a/src/Common/MemoryTracker.h b/src/Common/MemoryTracker.h index adba0b42f57..d6833de544c 100644 --- a/src/Common/MemoryTracker.h +++ b/src/Common/MemoryTracker.h @@ -162,18 +162,18 @@ public: struct BlockerInThread { private: - BlockerInThread(const BlockerInThread &) = delete; - BlockerInThread & operator=(const BlockerInThread &) = delete; - static thread_local uint64_t counter; static thread_local VariableContext level; VariableContext previous_level; public: /// level_ - block in level and above - BlockerInThread(VariableContext level_ = VariableContext::User); + explicit BlockerInThread(VariableContext level_ = VariableContext::User); ~BlockerInThread(); + BlockerInThread(const BlockerInThread &) = delete; + BlockerInThread & operator=(const BlockerInThread &) = delete; + static bool isBlocked(VariableContext current_level) { return counter > 0 && current_level >= level; @@ -195,9 +195,6 @@ public: struct LockExceptionInThread { private: - LockExceptionInThread(const LockExceptionInThread &) = delete; - LockExceptionInThread & operator=(const LockExceptionInThread &) = delete; - static thread_local uint64_t counter; static thread_local VariableContext level; static thread_local bool block_fault_injections; @@ -207,9 +204,12 @@ public: public: /// level_ - block in level and above /// block_fault_injections_ - block in fault injection too - LockExceptionInThread(VariableContext level_ = VariableContext::User, bool block_fault_injections_ = true); + explicit LockExceptionInThread(VariableContext level_ = VariableContext::User, bool block_fault_injections_ = true); ~LockExceptionInThread(); + LockExceptionInThread(const LockExceptionInThread &) = delete; + LockExceptionInThread & operator=(const LockExceptionInThread &) = delete; + static bool isBlocked(VariableContext current_level, bool fault_injection) { return counter > 0 && current_level >= level && (!fault_injection || block_fault_injections); diff --git a/src/Common/Stopwatch.h b/src/Common/Stopwatch.h index a7f5e76d5be..c307f94b19a 100644 --- a/src/Common/Stopwatch.h +++ b/src/Common/Stopwatch.h @@ -4,6 +4,7 @@ #include #include +#include inline UInt64 clock_gettime_ns(clockid_t clock_type = CLOCK_MONOTONIC) @@ -22,7 +23,7 @@ public: /** CLOCK_MONOTONIC works relatively efficient (~15 million calls/sec) and doesn't lead to syscall. * Pass CLOCK_MONOTONIC_COARSE, if you need better performance with acceptable cost of several milliseconds of inaccuracy. */ - Stopwatch(clockid_t clock_type_ = CLOCK_MONOTONIC) : clock_type(clock_type_) { start(); } + explicit Stopwatch(clockid_t clock_type_ = CLOCK_MONOTONIC) : clock_type(clock_type_) { start(); } void start() { start_ns = nanoseconds(); is_running = true; } void stop() { stop_ns = nanoseconds(); is_running = false; } @@ -43,11 +44,13 @@ private: UInt64 nanoseconds() const { return clock_gettime_ns(clock_type); } }; +using StopwatchUniquePtr = std::unique_ptr; + class AtomicStopwatch { public: - AtomicStopwatch(clockid_t clock_type_ = CLOCK_MONOTONIC) : clock_type(clock_type_) { restart(); } + explicit AtomicStopwatch(clockid_t clock_type_ = CLOCK_MONOTONIC) : clock_type(clock_type_) { restart(); } void restart() { start_ns = nanoseconds(); } UInt64 elapsed() const { return nanoseconds() - start_ns; } @@ -78,11 +81,11 @@ public: { AtomicStopwatch * parent = nullptr; - Lock() {} + Lock() = default; - operator bool() const { return parent != nullptr; } + explicit operator bool() const { return parent != nullptr; } - Lock(AtomicStopwatch * parent_) : parent(parent_) {} + explicit Lock(AtomicStopwatch * parent_) : parent(parent_) {} Lock(Lock &&) = default; diff --git a/src/Common/ThreadPool.cpp b/src/Common/ThreadPool.cpp index 4b6b795a5cd..8ad70d85643 100644 --- a/src/Common/ThreadPool.cpp +++ b/src/Common/ThreadPool.cpp @@ -3,6 +3,7 @@ #include #include +#include #include #include diff --git a/src/Coordination/ZooKeeperDataReader.cpp b/src/Coordination/ZooKeeperDataReader.cpp index cf644110786..4d213d760f6 100644 --- a/src/Coordination/ZooKeeperDataReader.cpp +++ b/src/Coordination/ZooKeeperDataReader.cpp @@ -339,6 +339,9 @@ Coordination::ZooKeeperRequestPtr deserializeCheckVersionTxn(ReadBuffer & in) Coordination::read(result->path, in); Coordination::read(result->version, in); result->restored_from_zookeeper_log = true; + /// It stores version + 1 (which should be, not for request) + result->version -= 1; + return result; } diff --git a/src/DataTypes/IDataType.h b/src/DataTypes/IDataType.h index 91752f8c7f3..360bf9f16e0 100644 --- a/src/DataTypes/IDataType.h +++ b/src/DataTypes/IDataType.h @@ -387,6 +387,12 @@ inline bool isUInt8(const T & data_type) return WhichDataType(data_type).isUInt8(); } +template +inline bool isUInt64(const T & data_type) +{ + return WhichDataType(data_type).isUInt64(); +} + template inline bool isUnsignedInteger(const T & data_type) { diff --git a/src/Disks/IStoragePolicy.h b/src/Disks/IStoragePolicy.h index 16f3b21a41c..f113cf5f120 100644 --- a/src/Disks/IStoragePolicy.h +++ b/src/Disks/IStoragePolicy.h @@ -17,6 +17,7 @@ class IDisk; using DiskPtr = std::shared_ptr; using Disks = std::vector; class IReservation; +using ReservationSharedPtr = std::shared_ptr; using ReservationPtr = std::unique_ptr; using Reservations = std::vector; diff --git a/src/Functions/FunctionsBitmap.h b/src/Functions/FunctionsBitmap.h index 3d63ea42d28..775a39f4d08 100644 --- a/src/Functions/FunctionsBitmap.h +++ b/src/Functions/FunctionsBitmap.h @@ -1077,6 +1077,11 @@ struct BitmapAndnotImpl } }; +struct NameBitmapAnd +{ + static constexpr auto name = "bitmapAnd"; +}; + template