mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 16:42:05 +00:00
Compare commits
49 Commits
d2fb413079
...
def40ddcac
Author | SHA1 | Date | |
---|---|---|---|
|
def40ddcac | ||
|
7eee149487 | ||
|
cc3c7e74ae | ||
|
e0f8b8d351 | ||
|
da2176d696 | ||
|
53e0036593 | ||
|
25bd73ea5e | ||
|
72d5af29e0 | ||
|
44b4bd38b9 | ||
|
40c7d5fd1a | ||
|
9a2a664b04 | ||
|
f45bd58849 | ||
|
4ccebd9a24 | ||
|
99177c0daf | ||
|
ad67608956 | ||
|
0951991c1d | ||
|
19aec5e572 | ||
|
a367de9977 | ||
|
57db5cf24c | ||
|
6894e280b2 | ||
|
39ebe113d9 | ||
|
459fa898ed | ||
|
239bbaa133 | ||
|
07fac5808d | ||
|
ed95e0781f | ||
|
014608fb6b | ||
|
a29ded4941 | ||
|
d2efae7511 | ||
|
49589da56e | ||
|
6879aa130a | ||
|
43f3c886a2 | ||
|
c383a743f7 | ||
|
0d875ecf5c | ||
|
6698212b5a | ||
|
c787838cb2 | ||
|
eb020f1c4b | ||
|
1a40df4d0c | ||
|
4380c6035d | ||
|
5145281088 | ||
|
35fa4c43e4 | ||
|
293e076493 | ||
|
8b92603c6d | ||
|
fb14f6e029 | ||
|
e1f37ec2bb | ||
|
cc0ef6104f | ||
|
46ce65e66e | ||
|
e048893b85 | ||
|
3827d90bb0 | ||
|
bf3a3ad607 |
@ -16,16 +16,18 @@ ClickHouse works 100-1000x faster than traditional database management systems,
|
||||
|
||||
For more information and documentation see https://clickhouse.com/.
|
||||
|
||||
<!-- This is not related to the docker official library, remove it before commit to https://github.com/docker-library/docs -->
|
||||
## Versions
|
||||
|
||||
- The `latest` tag points to the latest release of the latest stable branch.
|
||||
- Branch tags like `22.2` point to the latest release of the corresponding branch.
|
||||
- Full version tags like `22.2.3.5` point to the corresponding release.
|
||||
- Full version tags like `22.2.3` and `22.2.3.5` point to the corresponding release.
|
||||
<!-- docker-official-library:off -->
|
||||
<!-- This is not related to the docker official library, remove it before commit to https://github.com/docker-library/docs -->
|
||||
- The tag `head` is built from the latest commit to the default branch.
|
||||
- Each tag has optional `-alpine` suffix to reflect that it's built on top of `alpine`.
|
||||
|
||||
<!-- REMOVE UNTIL HERE -->
|
||||
<!-- docker-official-library:on -->
|
||||
### Compatibility
|
||||
|
||||
- The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3.
|
||||
|
@ -10,16 +10,18 @@ ClickHouse works 100-1000x faster than traditional database management systems,
|
||||
|
||||
For more information and documentation see https://clickhouse.com/.
|
||||
|
||||
<!-- This is not related to the docker official library, remove it before commit to https://github.com/docker-library/docs -->
|
||||
## Versions
|
||||
|
||||
- The `latest` tag points to the latest release of the latest stable branch.
|
||||
- Branch tags like `22.2` point to the latest release of the corresponding branch.
|
||||
- Full version tags like `22.2.3.5` point to the corresponding release.
|
||||
- Full version tags like `22.2.3` and `22.2.3.5` point to the corresponding release.
|
||||
<!-- docker-official-library:off -->
|
||||
<!-- This is not related to the docker official library, remove it before commit to https://github.com/docker-library/docs -->
|
||||
- The tag `head` is built from the latest commit to the default branch.
|
||||
- Each tag has optional `-alpine` suffix to reflect that it's built on top of `alpine`.
|
||||
|
||||
<!-- REMOVE UNTIL HERE -->
|
||||
<!-- docker-official-library:on -->
|
||||
### Compatibility
|
||||
|
||||
- The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3.
|
||||
|
@ -522,4 +522,3 @@ sidebar_label: 2024
|
||||
* Backported in [#68518](https://github.com/ClickHouse/ClickHouse/issues/68518): Minor update in Dynamic/JSON serializations. [#68459](https://github.com/ClickHouse/ClickHouse/pull/68459) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68558](https://github.com/ClickHouse/ClickHouse/issues/68558): CI: Minor release workflow fix. [#68536](https://github.com/ClickHouse/ClickHouse/pull/68536) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#68576](https://github.com/ClickHouse/ClickHouse/issues/68576): CI: Tidy build timeout from 2h to 3h. [#68567](https://github.com/ClickHouse/ClickHouse/pull/68567) ([Max K.](https://github.com/maxknv)).
|
||||
|
||||
|
@ -497,4 +497,3 @@ sidebar_label: 2024
|
||||
* Backported in [#69899](https://github.com/ClickHouse/ClickHouse/issues/69899): Revert "Merge pull request [#69032](https://github.com/ClickHouse/ClickHouse/issues/69032) from alexon1234/include_real_time_execution_in_http_header". [#69885](https://github.com/ClickHouse/ClickHouse/pull/69885) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#69931](https://github.com/ClickHouse/ClickHouse/issues/69931): RIPE is an acronym and thus should be capital. RIPE stands for **R**ACE **I**ntegrity **P**rimitives **E**valuation and RACE stands for **R**esearch and Development in **A**dvanced **C**ommunications **T**echnologies in **E**urope. [#69901](https://github.com/ClickHouse/ClickHouse/pull/69901) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Backported in [#70034](https://github.com/ClickHouse/ClickHouse/issues/70034): Revert "Add RIPEMD160 function". [#70005](https://github.com/ClickHouse/ClickHouse/pull/70005) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
|
||||
|
@ -936,4 +936,4 @@ SELECT mapPartialReverseSort((k, v) -> v, 2, map('k1', 3, 'k2', 1, 'k3', 2));
|
||||
┌─mapPartialReverseSort(lambda(tuple(k, v), v), 2, map('k1', 3, 'k2', 1, 'k3', 2))─┐
|
||||
│ {'k1':3,'k3':2,'k2':1} │
|
||||
└──────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
```
|
||||
|
@ -49,4 +49,4 @@ LIMIT 2
|
||||
**See Also**
|
||||
|
||||
- [DeltaLake engine](/docs/en/engines/table-engines/integrations/deltalake.md)
|
||||
|
||||
- [DeltaLake cluster table function](/docs/en/sql-reference/table-functions/deltalakeCluster.md)
|
||||
|
30
docs/en/sql-reference/table-functions/deltalakeCluster.md
Normal file
30
docs/en/sql-reference/table-functions/deltalakeCluster.md
Normal file
@ -0,0 +1,30 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/deltalakeCluster
|
||||
sidebar_position: 46
|
||||
sidebar_label: deltaLakeCluster
|
||||
title: "deltaLakeCluster Table Function"
|
||||
---
|
||||
This is an extension to the [deltaLake](/docs/en/sql-reference/table-functions/deltalake.md) table function.
|
||||
|
||||
Allows processing files from [Delta Lake](https://github.com/delta-io/delta) tables in Amazon S3 in parallel from many nodes in a specified cluster. On initiator it creates a connection to all nodes in the cluster and dispatches each file dynamically. On the worker node it asks the initiator about the next task to process and processes it. This is repeated until all tasks are finished.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
deltaLakeCluster(cluster_name, url [,aws_access_key_id, aws_secret_access_key] [,format] [,structure] [,compression])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `cluster_name` — Name of a cluster that is used to build a set of addresses and connection parameters to remote and local servers.
|
||||
|
||||
- Description of all other arguments coincides with description of arguments in equivalent [deltaLake](/docs/en/sql-reference/table-functions/deltalake.md) table function.
|
||||
|
||||
**Returned value**
|
||||
|
||||
A table with the specified structure for reading data from cluster in the specified Delta Lake table in S3.
|
||||
|
||||
**See Also**
|
||||
|
||||
- [deltaLake engine](/docs/en/engines/table-engines/integrations/deltalake.md)
|
||||
- [deltaLake table function](/docs/en/sql-reference/table-functions/deltalake.md)
|
@ -29,4 +29,4 @@ A table with the specified structure for reading data in the specified Hudi tabl
|
||||
**See Also**
|
||||
|
||||
- [Hudi engine](/docs/en/engines/table-engines/integrations/hudi.md)
|
||||
|
||||
- [Hudi cluster table function](/docs/en/sql-reference/table-functions/hudiCluster.md)
|
||||
|
30
docs/en/sql-reference/table-functions/hudiCluster.md
Normal file
30
docs/en/sql-reference/table-functions/hudiCluster.md
Normal file
@ -0,0 +1,30 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/hudiCluster
|
||||
sidebar_position: 86
|
||||
sidebar_label: hudiCluster
|
||||
title: "hudiCluster Table Function"
|
||||
---
|
||||
This is an extension to the [hudi](/docs/en/sql-reference/table-functions/hudi.md) table function.
|
||||
|
||||
Allows processing files from Apache [Hudi](https://hudi.apache.org/) tables in Amazon S3 in parallel from many nodes in a specified cluster. On initiator it creates a connection to all nodes in the cluster and dispatches each file dynamically. On the worker node it asks the initiator about the next task to process and processes it. This is repeated until all tasks are finished.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
hudiCluster(cluster_name, url [,aws_access_key_id, aws_secret_access_key] [,format] [,structure] [,compression])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `cluster_name` — Name of a cluster that is used to build a set of addresses and connection parameters to remote and local servers.
|
||||
|
||||
- Description of all other arguments coincides with description of arguments in equivalent [hudi](/docs/en/sql-reference/table-functions/hudi.md) table function.
|
||||
|
||||
**Returned value**
|
||||
|
||||
A table with the specified structure for reading data from cluster in the specified Hudi table in S3.
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Hudi engine](/docs/en/engines/table-engines/integrations/hudi.md)
|
||||
- [Hudi table function](/docs/en/sql-reference/table-functions/hudi.md)
|
@ -72,3 +72,4 @@ Table function `iceberg` is an alias to `icebergS3` now.
|
||||
**See Also**
|
||||
|
||||
- [Iceberg engine](/docs/en/engines/table-engines/integrations/iceberg.md)
|
||||
- [Iceberg cluster table function](/docs/en/sql-reference/table-functions/icebergCluster.md)
|
||||
|
43
docs/en/sql-reference/table-functions/icebergCluster.md
Normal file
43
docs/en/sql-reference/table-functions/icebergCluster.md
Normal file
@ -0,0 +1,43 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/icebergCluster
|
||||
sidebar_position: 91
|
||||
sidebar_label: icebergCluster
|
||||
title: "icebergCluster Table Function"
|
||||
---
|
||||
This is an extension to the [iceberg](/docs/en/sql-reference/table-functions/iceberg.md) table function.
|
||||
|
||||
Allows processing files from Apache [Iceberg](https://iceberg.apache.org/) in parallel from many nodes in a specified cluster. On initiator it creates a connection to all nodes in the cluster and dispatches each file dynamically. On the worker node it asks the initiator about the next task to process and processes it. This is repeated until all tasks are finished.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
icebergS3Cluster(cluster_name, url [, NOSIGN | access_key_id, secret_access_key, [session_token]] [,format] [,compression_method])
|
||||
icebergS3Cluster(cluster_name, named_collection[, option=value [,..]])
|
||||
|
||||
icebergAzureCluster(cluster_name, connection_string|storage_account_url, container_name, blobpath, [,account_name], [,account_key] [,format] [,compression_method])
|
||||
icebergAzureCluster(cluster_name, named_collection[, option=value [,..]])
|
||||
|
||||
icebergHDFSCluster(cluster_name, path_to_table, [,format] [,compression_method])
|
||||
icebergHDFSCluster(cluster_name, named_collection[, option=value [,..]])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `cluster_name` — Name of a cluster that is used to build a set of addresses and connection parameters to remote and local servers.
|
||||
|
||||
- Description of all other arguments coincides with description of arguments in equivalent [iceberg](/docs/en/sql-reference/table-functions/iceberg.md) table function.
|
||||
|
||||
**Returned value**
|
||||
|
||||
A table with the specified structure for reading data from cluster in the specified Iceberg table.
|
||||
|
||||
**Examples**
|
||||
|
||||
```sql
|
||||
SELECT * FROM icebergS3Cluster('cluster_simple', 'http://test.s3.amazonaws.com/clickhouse-bucket/test_table', 'test', 'test')
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Iceberg engine](/docs/en/engines/table-engines/integrations/iceberg.md)
|
||||
- [Iceberg table function](/docs/en/sql-reference/table-functions/iceberg.md)
|
@ -476,7 +476,7 @@
|
||||
<input id="edit" type="button" value="✎" style="display: none;">
|
||||
<input id="add" type="button" value="Add chart" style="display: none;">
|
||||
<input id="reload" type="button" value="Reload">
|
||||
<span id="search-span" class="nowrap" style="display: none;"><input id="search" type="button" value="🔎" title="Run query to obtain list of charts from ClickHouse"><input id="search-query" name="search" type="text" spellcheck="false"></span>
|
||||
<span id="search-span" class="nowrap" style="display: none;"><input id="search" type="button" value="🔎" title="Run query to obtain list of charts from ClickHouse. Either select dashboard name or write your own query"><input id="search-query" name="search" list="search-options" type="text" spellcheck="false"><datalist id="search-options"></datalist></span>
|
||||
<div id="chart-params"></div>
|
||||
</div>
|
||||
</form>
|
||||
@ -532,9 +532,15 @@ const errorMessages = [
|
||||
}
|
||||
]
|
||||
|
||||
/// Dashboard selector
|
||||
const dashboardSearchQuery = (dashboard_name) => `SELECT title, query FROM system.dashboards WHERE dashboard = '${dashboard_name}'`;
|
||||
let dashboard_queries = {
|
||||
"Overview": dashboardSearchQuery("Overview"),
|
||||
};
|
||||
const default_dashboard = 'Overview';
|
||||
|
||||
/// Query to fill `queries` list for the dashboard
|
||||
let search_query = `SELECT title, query FROM system.dashboards WHERE dashboard = 'Overview'`;
|
||||
let search_query = dashboardSearchQuery(default_dashboard);
|
||||
let customized = false;
|
||||
let queries = [];
|
||||
|
||||
@ -1439,7 +1445,7 @@ async function reloadAll(do_search) {
|
||||
try {
|
||||
updateParams();
|
||||
if (do_search) {
|
||||
search_query = document.getElementById('search-query').value;
|
||||
search_query = toSearchQuery(document.getElementById('search-query').value);
|
||||
queries = [];
|
||||
refreshCustomized(false);
|
||||
}
|
||||
@ -1504,7 +1510,7 @@ function updateFromState() {
|
||||
document.getElementById('url').value = host;
|
||||
document.getElementById('user').value = user;
|
||||
document.getElementById('password').value = password;
|
||||
document.getElementById('search-query').value = search_query;
|
||||
document.getElementById('search-query').value = fromSearchQuery(search_query);
|
||||
refreshCustomized();
|
||||
}
|
||||
|
||||
@ -1543,6 +1549,44 @@ if (window.location.hash) {
|
||||
} catch {}
|
||||
}
|
||||
|
||||
function fromSearchQuery(query) {
|
||||
for (const dashboard_name in dashboard_queries) {
|
||||
if (query == dashboard_queries[dashboard_name])
|
||||
return dashboard_name;
|
||||
}
|
||||
return query;
|
||||
}
|
||||
|
||||
function toSearchQuery(value) {
|
||||
if (value in dashboard_queries)
|
||||
return dashboard_queries[value];
|
||||
else
|
||||
return value;
|
||||
}
|
||||
|
||||
async function populateSearchOptions() {
|
||||
let {reply, error} = await doFetch("SELECT dashboard FROM system.dashboards GROUP BY dashboard ORDER BY ALL");
|
||||
if (error) {
|
||||
throw new Error(error);
|
||||
}
|
||||
let data = reply.data;
|
||||
if (data.dashboard.length == 0) {
|
||||
console.log("Unable to fetch dashboards list");
|
||||
return;
|
||||
}
|
||||
dashboard_queries = {};
|
||||
for (let i = 0; i < data.dashboard.length; i++) {
|
||||
const dashboard = data.dashboard[i];
|
||||
dashboard_queries[dashboard] = dashboardSearchQuery(dashboard);
|
||||
}
|
||||
const searchOptions = document.getElementById('search-options');
|
||||
for (const dashboard in dashboard_queries) {
|
||||
const opt = document.createElement('option');
|
||||
opt.value = dashboard;
|
||||
searchOptions.appendChild(opt);
|
||||
}
|
||||
}
|
||||
|
||||
async function start() {
|
||||
try {
|
||||
updateFromState();
|
||||
@ -1558,6 +1602,7 @@ async function start() {
|
||||
} else {
|
||||
drawAll();
|
||||
}
|
||||
await populateSearchOptions();
|
||||
} catch (e) {
|
||||
showError(e.message);
|
||||
}
|
||||
|
@ -528,7 +528,7 @@ QueryTreeNodePtr IdentifierResolver::tryResolveIdentifierFromCompoundExpression(
|
||||
*
|
||||
* Resolve strategy:
|
||||
* 1. Try to bind identifier to scope argument name to node map.
|
||||
* 2. If identifier is binded but expression context and node type are incompatible return nullptr.
|
||||
* 2. If identifier is bound but expression context and node type are incompatible return nullptr.
|
||||
*
|
||||
* It is important to support edge cases, where we lookup for table or function node, but argument has same name.
|
||||
* Example: WITH (x -> x + 1) AS func, (func -> func(1) + func) AS lambda SELECT lambda(1);
|
||||
|
@ -362,7 +362,7 @@ ReplxxLineReader::ReplxxLineReader(
|
||||
if (highlighter)
|
||||
rx.set_highlighter_callback(highlighter);
|
||||
|
||||
/// By default C-p/C-n binded to COMPLETE_NEXT/COMPLETE_PREV,
|
||||
/// By default C-p/C-n bound to COMPLETE_NEXT/COMPLETE_PREV,
|
||||
/// bind C-p/C-n to history-previous/history-next like readline.
|
||||
rx.bind_key(Replxx::KEY::control('N'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::HISTORY_NEXT, code); });
|
||||
rx.bind_key(Replxx::KEY::control('P'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::HISTORY_PREVIOUS, code); });
|
||||
@ -384,9 +384,9 @@ ReplxxLineReader::ReplxxLineReader(
|
||||
rx.bind_key(Replxx::KEY::control('J'), commit_action);
|
||||
rx.bind_key(Replxx::KEY::ENTER, commit_action);
|
||||
|
||||
/// By default COMPLETE_NEXT/COMPLETE_PREV was binded to C-p/C-n, re-bind
|
||||
/// By default COMPLETE_NEXT/COMPLETE_PREV was bound to C-p/C-n, re-bind
|
||||
/// to M-P/M-N (that was used for HISTORY_COMMON_PREFIX_SEARCH before, but
|
||||
/// it also binded to M-p/M-n).
|
||||
/// it also bound to M-p/M-n).
|
||||
rx.bind_key(Replxx::KEY::meta('N'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::COMPLETE_NEXT, code); });
|
||||
rx.bind_key(Replxx::KEY::meta('P'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::COMPLETE_PREVIOUS, code); });
|
||||
/// By default M-BACKSPACE is KILL_TO_WHITESPACE_ON_LEFT, while in readline it is backward-kill-word
|
||||
|
@ -341,7 +341,10 @@ Coordination::Error ZooKeeper::tryGetChildren(
|
||||
const EventPtr & watch,
|
||||
Coordination::ListRequestType list_request_type)
|
||||
{
|
||||
return tryGetChildrenWatch(path, res, stat,
|
||||
return tryGetChildrenWatch(
|
||||
path,
|
||||
res,
|
||||
stat,
|
||||
watch ? std::make_shared<Coordination::WatchCallback>(callbackForEvent(watch)) : Coordination::WatchCallbackPtr{},
|
||||
list_request_type);
|
||||
}
|
||||
@ -975,11 +978,14 @@ void ZooKeeper::removeRecursive(const std::string & path, uint32_t remove_nodes_
|
||||
|
||||
Coordination::Error ZooKeeper::tryRemoveRecursive(const std::string & path, uint32_t remove_nodes_limit)
|
||||
{
|
||||
if (!isFeatureEnabled(DB::KeeperFeatureFlag::REMOVE_RECURSIVE))
|
||||
const auto fallback_method = [&]
|
||||
{
|
||||
tryRemoveChildrenRecursive(path);
|
||||
return tryRemove(path);
|
||||
}
|
||||
};
|
||||
|
||||
if (!isFeatureEnabled(DB::KeeperFeatureFlag::REMOVE_RECURSIVE))
|
||||
return fallback_method();
|
||||
|
||||
auto promise = std::make_shared<std::promise<Coordination::RemoveRecursiveResponse>>();
|
||||
auto future = promise->get_future();
|
||||
@ -998,6 +1004,10 @@ Coordination::Error ZooKeeper::tryRemoveRecursive(const std::string & path, uint
|
||||
}
|
||||
|
||||
auto response = future.get();
|
||||
|
||||
if (response.error == Coordination::Error::ZNOTEMPTY) /// limit was too low, try without RemoveRecursive request
|
||||
return fallback_method();
|
||||
|
||||
return response.error;
|
||||
}
|
||||
|
||||
|
@ -486,13 +486,13 @@ public:
|
||||
/// Remove the node with the subtree.
|
||||
/// If Keeper supports RemoveRecursive operation then it will be performed atomically.
|
||||
/// Otherwise if someone concurrently adds or removes a node in the subtree, the result is undefined.
|
||||
void removeRecursive(const std::string & path, uint32_t remove_nodes_limit = 100);
|
||||
void removeRecursive(const std::string & path, uint32_t remove_nodes_limit = 1000);
|
||||
|
||||
/// Same as removeRecursive but in case if Keeper does not supports RemoveRecursive and
|
||||
/// if someone concurrently removes a node in the subtree, this will not cause errors.
|
||||
/// For instance, you can call this method twice concurrently for the same node and the end
|
||||
/// result would be the same as for the single call.
|
||||
Coordination::Error tryRemoveRecursive(const std::string & path, uint32_t remove_nodes_limit = 100);
|
||||
Coordination::Error tryRemoveRecursive(const std::string & path, uint32_t remove_nodes_limit = 1000);
|
||||
|
||||
/// Similar to removeRecursive(...) and tryRemoveRecursive(...), but does not remove path itself.
|
||||
/// Node defined as RemoveException will not be deleted.
|
||||
|
@ -767,6 +767,11 @@ size_t ZooKeeperMultiRequest::sizeImpl() const
|
||||
}
|
||||
|
||||
void ZooKeeperMultiRequest::readImpl(ReadBuffer & in)
|
||||
{
|
||||
readImpl(in, /*request_validator=*/{});
|
||||
}
|
||||
|
||||
void ZooKeeperMultiRequest::readImpl(ReadBuffer & in, RequestValidator request_validator)
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
@ -788,6 +793,8 @@ void ZooKeeperMultiRequest::readImpl(ReadBuffer & in)
|
||||
|
||||
ZooKeeperRequestPtr request = ZooKeeperRequestFactory::instance().get(op_num);
|
||||
request->readImpl(in);
|
||||
if (request_validator)
|
||||
request_validator(*request);
|
||||
requests.push_back(request);
|
||||
|
||||
if (in.eof())
|
||||
|
@ -570,6 +570,9 @@ struct ZooKeeperMultiRequest final : MultiRequest<ZooKeeperRequestPtr>, ZooKeepe
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
size_t sizeImpl() const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
|
||||
using RequestValidator = std::function<void(const ZooKeeperRequest &)>;
|
||||
void readImpl(ReadBuffer & in, RequestValidator request_validator);
|
||||
std::string toStringImpl(bool short_format) const override;
|
||||
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
|
@ -514,7 +514,13 @@ void KeeperContext::initializeFeatureFlags(const Poco::Util::AbstractConfigurati
|
||||
feature_flags.disableFeatureFlag(feature_flag.value());
|
||||
}
|
||||
|
||||
if (feature_flags.isEnabled(KeeperFeatureFlag::MULTI_READ))
|
||||
feature_flags.enableFeatureFlag(KeeperFeatureFlag::FILTERED_LIST);
|
||||
else
|
||||
system_nodes_with_data[keeper_api_version_path] = toString(static_cast<uint8_t>(KeeperApiVersion::ZOOKEEPER_COMPATIBLE));
|
||||
|
||||
system_nodes_with_data[keeper_api_feature_flags_path] = feature_flags.getFeatureFlags();
|
||||
|
||||
}
|
||||
|
||||
feature_flags.logFlags(getLogger("KeeperContext"));
|
||||
@ -569,6 +575,25 @@ const CoordinationSettings & KeeperContext::getCoordinationSettings() const
|
||||
return *coordination_settings;
|
||||
}
|
||||
|
||||
bool KeeperContext::isOperationSupported(Coordination::OpNum operation) const
|
||||
{
|
||||
switch (operation)
|
||||
{
|
||||
case Coordination::OpNum::FilteredList:
|
||||
return feature_flags.isEnabled(KeeperFeatureFlag::FILTERED_LIST);
|
||||
case Coordination::OpNum::MultiRead:
|
||||
return feature_flags.isEnabled(KeeperFeatureFlag::MULTI_READ);
|
||||
case Coordination::OpNum::CreateIfNotExists:
|
||||
return feature_flags.isEnabled(KeeperFeatureFlag::CREATE_IF_NOT_EXISTS);
|
||||
case Coordination::OpNum::CheckNotExists:
|
||||
return feature_flags.isEnabled(KeeperFeatureFlag::CHECK_NOT_EXISTS);
|
||||
case Coordination::OpNum::RemoveRecursive:
|
||||
return feature_flags.isEnabled(KeeperFeatureFlag::REMOVE_RECURSIVE);
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t KeeperContext::lastCommittedIndex() const
|
||||
{
|
||||
return last_committed_log_idx.load(std::memory_order_relaxed);
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
#include <Coordination/KeeperFeatureFlags.h>
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
#include <Common/ZooKeeper/ZooKeeperConstants.h>
|
||||
#include <atomic>
|
||||
#include <condition_variable>
|
||||
#include <cstdint>
|
||||
@ -103,6 +104,7 @@ public:
|
||||
return precommit_sleep_probability_for_testing;
|
||||
}
|
||||
|
||||
bool isOperationSupported(Coordination::OpNum operation) const;
|
||||
private:
|
||||
/// local disk defined using path or disk name
|
||||
using Storage = std::variant<DiskPtr, std::string>;
|
||||
|
@ -62,16 +62,17 @@ public:
|
||||
for (size_t i = 0; i < num_rows; ++i)
|
||||
{
|
||||
auto array_size = col_num->getInt(i);
|
||||
auto element_size = col_value->byteSizeAt(i);
|
||||
|
||||
if (unlikely(array_size < 0))
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Array size {} cannot be negative: while executing function {}", array_size, getName());
|
||||
|
||||
Int64 estimated_size = 0;
|
||||
if (unlikely(common::mulOverflow(array_size, col_value->byteSize(), estimated_size)))
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Array size {} with element size {} bytes is too large: while executing function {}", array_size, col_value->byteSize(), getName());
|
||||
if (unlikely(common::mulOverflow(array_size, element_size, estimated_size)))
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Array size {} with element size {} bytes is too large: while executing function {}", array_size, element_size, getName());
|
||||
|
||||
if (unlikely(estimated_size > max_array_size_in_columns_bytes))
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Array size {} with element size {} bytes is too large: while executing function {}", array_size, col_value->byteSize(), getName());
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Array size {} with element size {} bytes is too large: while executing function {}", array_size, element_size, getName());
|
||||
|
||||
offset += array_size;
|
||||
|
||||
|
@ -237,6 +237,7 @@ bool IParserColumnDeclaration<NameParser>::parseImpl(Pos & pos, ASTPtr & node, E
|
||||
null_modifier.emplace(true);
|
||||
}
|
||||
|
||||
bool is_comment = false;
|
||||
/// Collate is also allowed after NULL/NOT NULL
|
||||
if (!collation_expression && s_collate.ignore(pos, expected)
|
||||
&& !collation_parser.parse(pos, collation_expression, expected))
|
||||
@ -254,7 +255,9 @@ bool IParserColumnDeclaration<NameParser>::parseImpl(Pos & pos, ASTPtr & node, E
|
||||
else if (s_ephemeral.ignore(pos, expected))
|
||||
{
|
||||
default_specifier = s_ephemeral.getName();
|
||||
if (!expr_parser.parse(pos, default_expression, expected) && type)
|
||||
if (s_comment.ignore(pos, expected))
|
||||
is_comment = true;
|
||||
if ((is_comment || !expr_parser.parse(pos, default_expression, expected)) && type)
|
||||
{
|
||||
ephemeral_default = true;
|
||||
|
||||
@ -289,19 +292,22 @@ bool IParserColumnDeclaration<NameParser>::parseImpl(Pos & pos, ASTPtr & node, E
|
||||
if (require_type && !type && !default_expression)
|
||||
return false; /// reject column name without type
|
||||
|
||||
if ((type || default_expression) && allow_null_modifiers && !null_modifier.has_value())
|
||||
if (!is_comment)
|
||||
{
|
||||
if (s_not.ignore(pos, expected))
|
||||
if ((type || default_expression) && allow_null_modifiers && !null_modifier.has_value())
|
||||
{
|
||||
if (!s_null.ignore(pos, expected))
|
||||
return false;
|
||||
null_modifier.emplace(false);
|
||||
if (s_not.ignore(pos, expected))
|
||||
{
|
||||
if (!s_null.ignore(pos, expected))
|
||||
return false;
|
||||
null_modifier.emplace(false);
|
||||
}
|
||||
else if (s_null.ignore(pos, expected))
|
||||
null_modifier.emplace(true);
|
||||
}
|
||||
else if (s_null.ignore(pos, expected))
|
||||
null_modifier.emplace(true);
|
||||
}
|
||||
|
||||
if (s_comment.ignore(pos, expected))
|
||||
if (is_comment || s_comment.ignore(pos, expected))
|
||||
{
|
||||
/// should be followed by a string literal
|
||||
if (!string_literal_parser.parse(pos, comment_expression, expected))
|
||||
|
@ -1,5 +1,4 @@
|
||||
#include <Server/KeeperTCPHandler.h>
|
||||
#include "Common/ZooKeeper/ZooKeeperConstants.h"
|
||||
|
||||
#if USE_NURAFT
|
||||
|
||||
@ -19,6 +18,8 @@
|
||||
# include <Common/NetException.h>
|
||||
# include <Common/PipeFDs.h>
|
||||
# include <Common/Stopwatch.h>
|
||||
# include <Common/ZooKeeper/ZooKeeperCommon.h>
|
||||
# include <Common/ZooKeeper/ZooKeeperConstants.h>
|
||||
# include <Common/ZooKeeper/ZooKeeperIO.h>
|
||||
# include <Common/logger_useful.h>
|
||||
# include <Common/setThreadName.h>
|
||||
@ -63,6 +64,7 @@ namespace ErrorCodes
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int UNEXPECTED_PACKET_FROM_CLIENT;
|
||||
extern const int TIMEOUT_EXCEEDED;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
struct PollResult
|
||||
@ -637,7 +639,23 @@ std::pair<Coordination::OpNum, Coordination::XID> KeeperTCPHandler::receiveReque
|
||||
|
||||
Coordination::ZooKeeperRequestPtr request = Coordination::ZooKeeperRequestFactory::instance().get(opnum);
|
||||
request->xid = xid;
|
||||
request->readImpl(read_buffer);
|
||||
|
||||
auto request_validator = [&](const Coordination::ZooKeeperRequest & current_request)
|
||||
{
|
||||
if (!keeper_dispatcher->getKeeperContext()->isOperationSupported(current_request.getOpNum()))
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unsupported operation: {}", current_request.getOpNum());
|
||||
};
|
||||
|
||||
if (auto * multi_request = dynamic_cast<Coordination::ZooKeeperMultiRequest *>(request.get()))
|
||||
{
|
||||
multi_request->readImpl(read_buffer, request_validator);
|
||||
}
|
||||
else
|
||||
{
|
||||
request->readImpl(read_buffer);
|
||||
request_validator(*request);
|
||||
}
|
||||
|
||||
|
||||
if (!keeper_dispatcher->putRequest(request, session_id, use_xid_64))
|
||||
throw Exception(ErrorCodes::TIMEOUT_EXCEEDED, "Session {} already disconnected", session_id);
|
||||
|
@ -226,6 +226,26 @@ template class TableFunctionObjectStorage<HDFSClusterDefinition, StorageHDFSConf
|
||||
#endif
|
||||
template class TableFunctionObjectStorage<LocalDefinition, StorageLocalConfiguration>;
|
||||
|
||||
#if USE_AVRO && USE_AWS_S3
|
||||
template class TableFunctionObjectStorage<IcebergS3ClusterDefinition, StorageS3IcebergConfiguration>;
|
||||
#endif
|
||||
|
||||
#if USE_AVRO && USE_AZURE_BLOB_STORAGE
|
||||
template class TableFunctionObjectStorage<IcebergAzureClusterDefinition, StorageAzureIcebergConfiguration>;
|
||||
#endif
|
||||
|
||||
#if USE_AVRO && USE_HDFS
|
||||
template class TableFunctionObjectStorage<IcebergHDFSClusterDefinition, StorageHDFSIcebergConfiguration>;
|
||||
#endif
|
||||
|
||||
#if USE_PARQUET && USE_AWS_S3
|
||||
template class TableFunctionObjectStorage<DeltaLakeClusterDefinition, StorageS3DeltaLakeConfiguration>;
|
||||
#endif
|
||||
|
||||
#if USE_AWS_S3
|
||||
template class TableFunctionObjectStorage<HudiClusterDefinition, StorageS3HudiConfiguration>;
|
||||
#endif
|
||||
|
||||
#if USE_AVRO
|
||||
void registerTableFunctionIceberg(TableFunctionFactory & factory)
|
||||
{
|
||||
|
@ -96,7 +96,7 @@ void registerTableFunctionObjectStorageCluster(TableFunctionFactory & factory)
|
||||
{
|
||||
.documentation = {
|
||||
.description=R"(The table function can be used to read the data stored on HDFS in parallel for many nodes in a specified cluster.)",
|
||||
.examples{{"HDFSCluster", "SELECT * FROM HDFSCluster(cluster_name, uri, format)", ""}}},
|
||||
.examples{{"HDFSCluster", "SELECT * FROM HDFSCluster(cluster, uri, format)", ""}}},
|
||||
.allow_readonly = false
|
||||
}
|
||||
);
|
||||
@ -105,15 +105,77 @@ void registerTableFunctionObjectStorageCluster(TableFunctionFactory & factory)
|
||||
UNUSED(factory);
|
||||
}
|
||||
|
||||
|
||||
#if USE_AVRO
|
||||
void registerTableFunctionIcebergCluster(TableFunctionFactory & factory)
|
||||
{
|
||||
UNUSED(factory);
|
||||
|
||||
#if USE_AWS_S3
|
||||
template class TableFunctionObjectStorageCluster<S3ClusterDefinition, StorageS3Configuration>;
|
||||
factory.registerFunction<TableFunctionIcebergS3Cluster>(
|
||||
{.documentation
|
||||
= {.description = R"(The table function can be used to read the Iceberg table stored on S3 object store in parallel for many nodes in a specified cluster.)",
|
||||
.examples{{"icebergS3Cluster", "SELECT * FROM icebergS3Cluster(cluster, url, [, NOSIGN | access_key_id, secret_access_key, [session_token]], format, [,compression])", ""}},
|
||||
.categories{"DataLake"}},
|
||||
.allow_readonly = false});
|
||||
#endif
|
||||
|
||||
#if USE_AZURE_BLOB_STORAGE
|
||||
template class TableFunctionObjectStorageCluster<AzureClusterDefinition, StorageAzureConfiguration>;
|
||||
factory.registerFunction<TableFunctionIcebergAzureCluster>(
|
||||
{.documentation
|
||||
= {.description = R"(The table function can be used to read the Iceberg table stored on Azure object store in parallel for many nodes in a specified cluster.)",
|
||||
.examples{{"icebergAzureCluster", "SELECT * FROM icebergAzureCluster(cluster, connection_string|storage_account_url, container_name, blobpath, [account_name, account_key, format, compression])", ""}},
|
||||
.categories{"DataLake"}},
|
||||
.allow_readonly = false});
|
||||
#endif
|
||||
|
||||
#if USE_HDFS
|
||||
template class TableFunctionObjectStorageCluster<HDFSClusterDefinition, StorageHDFSConfiguration>;
|
||||
factory.registerFunction<TableFunctionIcebergHDFSCluster>(
|
||||
{.documentation
|
||||
= {.description = R"(The table function can be used to read the Iceberg table stored on HDFS virtual filesystem in parallel for many nodes in a specified cluster.)",
|
||||
.examples{{"icebergHDFSCluster", "SELECT * FROM icebergHDFSCluster(cluster, uri, [format], [structure], [compression_method])", ""}},
|
||||
.categories{"DataLake"}},
|
||||
.allow_readonly = false});
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
#if USE_AWS_S3
|
||||
#if USE_PARQUET
|
||||
void registerTableFunctionDeltaLakeCluster(TableFunctionFactory & factory)
|
||||
{
|
||||
factory.registerFunction<TableFunctionDeltaLakeCluster>(
|
||||
{.documentation
|
||||
= {.description = R"(The table function can be used to read the DeltaLake table stored on object store in parallel for many nodes in a specified cluster.)",
|
||||
.examples{{"deltaLakeCluster", "SELECT * FROM deltaLakeCluster(cluster, url, access_key_id, secret_access_key)", ""}},
|
||||
.categories{"DataLake"}},
|
||||
.allow_readonly = false});
|
||||
}
|
||||
#endif
|
||||
|
||||
void registerTableFunctionHudiCluster(TableFunctionFactory & factory)
|
||||
{
|
||||
factory.registerFunction<TableFunctionHudiCluster>(
|
||||
{.documentation
|
||||
= {.description = R"(The table function can be used to read the Hudi table stored on object store in parallel for many nodes in a specified cluster.)",
|
||||
.examples{{"hudiCluster", "SELECT * FROM hudiCluster(cluster, url, access_key_id, secret_access_key)", ""}},
|
||||
.categories{"DataLake"}},
|
||||
.allow_readonly = false});
|
||||
}
|
||||
#endif
|
||||
|
||||
void registerDataLakeClusterTableFunctions(TableFunctionFactory & factory)
|
||||
{
|
||||
UNUSED(factory);
|
||||
#if USE_AVRO
|
||||
registerTableFunctionIcebergCluster(factory);
|
||||
#endif
|
||||
#if USE_AWS_S3
|
||||
#if USE_PARQUET
|
||||
registerTableFunctionDeltaLakeCluster(factory);
|
||||
#endif
|
||||
registerTableFunctionHudiCluster(factory);
|
||||
#endif
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -33,6 +33,36 @@ struct HDFSClusterDefinition
|
||||
static constexpr auto storage_type_name = "HDFSCluster";
|
||||
};
|
||||
|
||||
struct IcebergS3ClusterDefinition
|
||||
{
|
||||
static constexpr auto name = "icebergS3Cluster";
|
||||
static constexpr auto storage_type_name = "IcebergS3Cluster";
|
||||
};
|
||||
|
||||
struct IcebergAzureClusterDefinition
|
||||
{
|
||||
static constexpr auto name = "icebergAzureCluster";
|
||||
static constexpr auto storage_type_name = "IcebergAzureCluster";
|
||||
};
|
||||
|
||||
struct IcebergHDFSClusterDefinition
|
||||
{
|
||||
static constexpr auto name = "icebergHDFSCluster";
|
||||
static constexpr auto storage_type_name = "IcebergHDFSCluster";
|
||||
};
|
||||
|
||||
struct DeltaLakeClusterDefinition
|
||||
{
|
||||
static constexpr auto name = "deltaLakeCluster";
|
||||
static constexpr auto storage_type_name = "DeltaLakeS3Cluster";
|
||||
};
|
||||
|
||||
struct HudiClusterDefinition
|
||||
{
|
||||
static constexpr auto name = "hudiCluster";
|
||||
static constexpr auto storage_type_name = "HudiS3Cluster";
|
||||
};
|
||||
|
||||
/**
|
||||
* Class implementing s3/hdfs/azureBlobStorageCluster(...) table functions,
|
||||
* which allow to process many files from S3/HDFS/Azure blob storage on a specific cluster.
|
||||
@ -79,4 +109,25 @@ using TableFunctionAzureBlobCluster = TableFunctionObjectStorageCluster<AzureClu
|
||||
#if USE_HDFS
|
||||
using TableFunctionHDFSCluster = TableFunctionObjectStorageCluster<HDFSClusterDefinition, StorageHDFSConfiguration>;
|
||||
#endif
|
||||
|
||||
#if USE_AVRO && USE_AWS_S3
|
||||
using TableFunctionIcebergS3Cluster = TableFunctionObjectStorageCluster<IcebergS3ClusterDefinition, StorageS3IcebergConfiguration>;
|
||||
#endif
|
||||
|
||||
#if USE_AVRO && USE_AZURE_BLOB_STORAGE
|
||||
using TableFunctionIcebergAzureCluster = TableFunctionObjectStorageCluster<IcebergAzureClusterDefinition, StorageAzureIcebergConfiguration>;
|
||||
#endif
|
||||
|
||||
#if USE_AVRO && USE_HDFS
|
||||
using TableFunctionIcebergHDFSCluster = TableFunctionObjectStorageCluster<IcebergHDFSClusterDefinition, StorageHDFSIcebergConfiguration>;
|
||||
#endif
|
||||
|
||||
#if USE_AWS_S3 && USE_PARQUET
|
||||
using TableFunctionDeltaLakeCluster = TableFunctionObjectStorageCluster<DeltaLakeClusterDefinition, StorageS3DeltaLakeConfiguration>;
|
||||
#endif
|
||||
|
||||
#if USE_AWS_S3
|
||||
using TableFunctionHudiCluster = TableFunctionObjectStorageCluster<HudiClusterDefinition, StorageS3HudiConfiguration>;
|
||||
#endif
|
||||
|
||||
}
|
||||
|
@ -66,6 +66,7 @@ void registerTableFunctions(bool use_legacy_mongodb_integration [[maybe_unused]]
|
||||
registerTableFunctionObjectStorage(factory);
|
||||
registerTableFunctionObjectStorageCluster(factory);
|
||||
registerDataLakeTableFunctions(factory);
|
||||
registerDataLakeClusterTableFunctions(factory);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -70,6 +70,7 @@ void registerTableFunctionExplain(TableFunctionFactory & factory);
|
||||
void registerTableFunctionObjectStorage(TableFunctionFactory & factory);
|
||||
void registerTableFunctionObjectStorageCluster(TableFunctionFactory & factory);
|
||||
void registerDataLakeTableFunctions(TableFunctionFactory & factory);
|
||||
void registerDataLakeClusterTableFunctions(TableFunctionFactory & factory);
|
||||
|
||||
void registerTableFunctionTimeSeries(TableFunctionFactory & factory);
|
||||
|
||||
|
@ -299,8 +299,6 @@ class TagAttrs:
|
||||
|
||||
# Only one latest can exist
|
||||
latest: ClickHouseVersion
|
||||
# Only one can be a major one (the most fresh per a year)
|
||||
majors: Dict[int, ClickHouseVersion]
|
||||
# Only one lts version can exist
|
||||
lts: Optional[ClickHouseVersion]
|
||||
|
||||
@ -345,14 +343,6 @@ def ldf_tags(version: ClickHouseVersion, distro: str, tag_attrs: TagAttrs) -> st
|
||||
tags.append("lts")
|
||||
tags.append(f"lts-{distro}")
|
||||
|
||||
# If the tag `22`, `23`, `24` etc. should be included in the tags
|
||||
with_major = tag_attrs.majors.get(version.major) in (None, version)
|
||||
if with_major:
|
||||
tag_attrs.majors[version.major] = version
|
||||
if without_distro:
|
||||
tags.append(f"{version.major}")
|
||||
tags.append(f"{version.major}-{distro}")
|
||||
|
||||
# Add all normal tags
|
||||
for tag in (
|
||||
f"{version.major}.{version.minor}",
|
||||
@ -384,7 +374,7 @@ def generate_ldf(args: argparse.Namespace) -> None:
|
||||
args.directory / git_runner(f"git -C {args.directory} rev-parse --show-cdup")
|
||||
).absolute()
|
||||
lines = ldf_header(git, directory)
|
||||
tag_attrs = TagAttrs(versions[-1], {}, None)
|
||||
tag_attrs = TagAttrs(versions[-1], None)
|
||||
|
||||
# We iterate from the most recent to the oldest version
|
||||
for version in reversed(versions):
|
||||
|
@ -7,6 +7,7 @@ import os.path as p
|
||||
import platform
|
||||
import pprint
|
||||
import pwd
|
||||
import random
|
||||
import re
|
||||
import shlex
|
||||
import shutil
|
||||
@ -1650,6 +1651,8 @@ class ClickHouseCluster:
|
||||
minio_certs_dir=None,
|
||||
minio_data_dir=None,
|
||||
use_keeper=True,
|
||||
keeper_randomize_feature_flags=True,
|
||||
keeper_required_feature_flags=[],
|
||||
main_config_name="config.xml",
|
||||
users_config_name="users.xml",
|
||||
copy_common_configs=True,
|
||||
@ -1682,6 +1685,8 @@ class ClickHouseCluster:
|
||||
if not env_variables:
|
||||
env_variables = {}
|
||||
self.use_keeper = use_keeper
|
||||
self.keeper_randomize_feature_flags = keeper_randomize_feature_flags
|
||||
self.keeper_required_feature_flags = keeper_required_feature_flags
|
||||
|
||||
# Code coverage files will be placed in database directory
|
||||
# (affect only WITH_COVERAGE=1 build)
|
||||
@ -2828,15 +2833,51 @@ class ClickHouseCluster:
|
||||
|
||||
if self.use_keeper: # TODO: remove hardcoded paths from here
|
||||
for i in range(1, 4):
|
||||
current_keeper_config_dir = os.path.join(
|
||||
f"{self.keeper_instance_dir_prefix}{i}", "config"
|
||||
)
|
||||
shutil.copy(
|
||||
os.path.join(
|
||||
self.keeper_config_dir, f"keeper_config{i}.xml"
|
||||
),
|
||||
os.path.join(
|
||||
self.keeper_instance_dir_prefix + f"{i}", "config"
|
||||
),
|
||||
current_keeper_config_dir,
|
||||
)
|
||||
|
||||
extra_configs_dir = os.path.join(
|
||||
current_keeper_config_dir, f"keeper_config{i}.d"
|
||||
)
|
||||
os.mkdir(extra_configs_dir)
|
||||
feature_flags_config = os.path.join(
|
||||
extra_configs_dir, "feature_flags.yaml"
|
||||
)
|
||||
|
||||
indentation = 4 * " "
|
||||
|
||||
def get_feature_flag_value(feature_flag):
|
||||
if not self.keeper_randomize_feature_flags:
|
||||
return 1
|
||||
|
||||
if feature_flag in self.keeper_required_feature_flags:
|
||||
return 1
|
||||
|
||||
return random.randint(0, 1)
|
||||
|
||||
with open(feature_flags_config, "w") as ff_config:
|
||||
ff_config.write("keeper_server:\n")
|
||||
ff_config.write(f"{indentation}feature_flags:\n")
|
||||
indentation *= 2
|
||||
|
||||
for feature_flag in [
|
||||
"filtered_list",
|
||||
"multi_read",
|
||||
"check_not_exists",
|
||||
"create_if_not_exists",
|
||||
"remove_recursive",
|
||||
]:
|
||||
ff_config.write(
|
||||
f"{indentation}{feature_flag}: {get_feature_flag_value(feature_flag)}\n"
|
||||
)
|
||||
|
||||
run_and_check(self.base_zookeeper_cmd + common_opts, env=self.env)
|
||||
self.up_called = True
|
||||
|
||||
|
@ -13,6 +13,7 @@ node = cluster.add_instance(
|
||||
main_configs=["configs/enable_keeper_map.xml"],
|
||||
user_configs=["configs/keeper_retries.xml"],
|
||||
with_zookeeper=True,
|
||||
keeper_required_feature_flags=["multi_read"],
|
||||
stay_alive=True,
|
||||
)
|
||||
|
||||
|
@ -20,6 +20,7 @@ node1 = cluster.add_instance(
|
||||
main_configs=["configs/config.xml"],
|
||||
user_configs=["configs/users.xml"],
|
||||
with_zookeeper=True,
|
||||
keeper_required_feature_flags=["multi_read", "create_if_not_exists"],
|
||||
macros={"shard": "shard1", "replica": "1"},
|
||||
stay_alive=True,
|
||||
)
|
||||
@ -28,6 +29,7 @@ node2 = cluster.add_instance(
|
||||
main_configs=["configs/config.xml"],
|
||||
user_configs=["configs/users.xml"],
|
||||
with_zookeeper=True,
|
||||
keeper_required_feature_flags=["multi_read", "create_if_not_exists"],
|
||||
macros={"shard": "shard1", "replica": "2"},
|
||||
)
|
||||
nodes = [node1, node2]
|
||||
|
@ -378,7 +378,7 @@ def test_reload_via_client(cluster, zk):
|
||||
configure_from_zk(zk)
|
||||
break
|
||||
except QueryRuntimeException:
|
||||
logging.exception("The new socket is not binded yet")
|
||||
logging.exception("The new socket is not bound yet")
|
||||
time.sleep(0.1)
|
||||
|
||||
if exception:
|
||||
|
@ -0,0 +1,20 @@
|
||||
<clickhouse>
|
||||
<remote_servers>
|
||||
<cluster_simple>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>node1</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
<replica>
|
||||
<host>node2</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
<replica>
|
||||
<host>node3</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</cluster_simple>
|
||||
</remote_servers>
|
||||
</clickhouse>
|
@ -0,0 +1,6 @@
|
||||
<clickhouse>
|
||||
<query_log>
|
||||
<database>system</database>
|
||||
<table>query_log</table>
|
||||
</query_log>
|
||||
</clickhouse>
|
@ -73,14 +73,38 @@ def started_cluster():
|
||||
cluster.add_instance(
|
||||
"node1",
|
||||
main_configs=[
|
||||
"configs/config.d/query_log.xml",
|
||||
"configs/config.d/cluster.xml",
|
||||
"configs/config.d/named_collections.xml",
|
||||
"configs/config.d/filesystem_caches.xml",
|
||||
],
|
||||
user_configs=["configs/users.d/users.xml"],
|
||||
with_minio=True,
|
||||
with_azurite=True,
|
||||
stay_alive=True,
|
||||
with_hdfs=with_hdfs,
|
||||
stay_alive=True,
|
||||
)
|
||||
cluster.add_instance(
|
||||
"node2",
|
||||
main_configs=[
|
||||
"configs/config.d/query_log.xml",
|
||||
"configs/config.d/cluster.xml",
|
||||
"configs/config.d/named_collections.xml",
|
||||
"configs/config.d/filesystem_caches.xml",
|
||||
],
|
||||
user_configs=["configs/users.d/users.xml"],
|
||||
stay_alive=True,
|
||||
)
|
||||
cluster.add_instance(
|
||||
"node3",
|
||||
main_configs=[
|
||||
"configs/config.d/query_log.xml",
|
||||
"configs/config.d/cluster.xml",
|
||||
"configs/config.d/named_collections.xml",
|
||||
"configs/config.d/filesystem_caches.xml",
|
||||
],
|
||||
user_configs=["configs/users.d/users.xml"],
|
||||
stay_alive=True,
|
||||
)
|
||||
|
||||
logging.info("Starting cluster...")
|
||||
@ -182,6 +206,7 @@ def get_creation_expression(
|
||||
cluster,
|
||||
format="Parquet",
|
||||
table_function=False,
|
||||
run_on_cluster=False,
|
||||
**kwargs,
|
||||
):
|
||||
if storage_type == "s3":
|
||||
@ -189,35 +214,56 @@ def get_creation_expression(
|
||||
bucket = kwargs["bucket"]
|
||||
else:
|
||||
bucket = cluster.minio_bucket
|
||||
print(bucket)
|
||||
if table_function:
|
||||
return f"icebergS3(s3, filename = 'iceberg_data/default/{table_name}/', format={format}, url = 'http://minio1:9001/{bucket}/')"
|
||||
|
||||
if run_on_cluster:
|
||||
assert table_function
|
||||
return f"icebergS3Cluster('cluster_simple', s3, filename = 'iceberg_data/default/{table_name}/', format={format}, url = 'http://minio1:9001/{bucket}/')"
|
||||
else:
|
||||
return f"""
|
||||
DROP TABLE IF EXISTS {table_name};
|
||||
CREATE TABLE {table_name}
|
||||
ENGINE=IcebergS3(s3, filename = 'iceberg_data/default/{table_name}/', format={format}, url = 'http://minio1:9001/{bucket}/')"""
|
||||
if table_function:
|
||||
return f"icebergS3(s3, filename = 'iceberg_data/default/{table_name}/', format={format}, url = 'http://minio1:9001/{bucket}/')"
|
||||
else:
|
||||
return f"""
|
||||
DROP TABLE IF EXISTS {table_name};
|
||||
CREATE TABLE {table_name}
|
||||
ENGINE=IcebergS3(s3, filename = 'iceberg_data/default/{table_name}/', format={format}, url = 'http://minio1:9001/{bucket}/')"""
|
||||
|
||||
elif storage_type == "azure":
|
||||
if table_function:
|
||||
if run_on_cluster:
|
||||
assert table_function
|
||||
return f"""
|
||||
icebergAzure(azure, container = '{cluster.azure_container_name}', storage_account_url = '{cluster.env_variables["AZURITE_STORAGE_ACCOUNT_URL"]}', blob_path = '/iceberg_data/default/{table_name}/', format={format})
|
||||
icebergAzureCluster('cluster_simple', azure, container = '{cluster.azure_container_name}', storage_account_url = '{cluster.env_variables["AZURITE_STORAGE_ACCOUNT_URL"]}', blob_path = '/iceberg_data/default/{table_name}/', format={format})
|
||||
"""
|
||||
else:
|
||||
return f"""
|
||||
DROP TABLE IF EXISTS {table_name};
|
||||
CREATE TABLE {table_name}
|
||||
ENGINE=IcebergAzure(azure, container = {cluster.azure_container_name}, storage_account_url = '{cluster.env_variables["AZURITE_STORAGE_ACCOUNT_URL"]}', blob_path = '/iceberg_data/default/{table_name}/', format={format})"""
|
||||
if table_function:
|
||||
return f"""
|
||||
icebergAzure(azure, container = '{cluster.azure_container_name}', storage_account_url = '{cluster.env_variables["AZURITE_STORAGE_ACCOUNT_URL"]}', blob_path = '/iceberg_data/default/{table_name}/', format={format})
|
||||
"""
|
||||
else:
|
||||
return f"""
|
||||
DROP TABLE IF EXISTS {table_name};
|
||||
CREATE TABLE {table_name}
|
||||
ENGINE=IcebergAzure(azure, container = {cluster.azure_container_name}, storage_account_url = '{cluster.env_variables["AZURITE_STORAGE_ACCOUNT_URL"]}', blob_path = '/iceberg_data/default/{table_name}/', format={format})"""
|
||||
|
||||
elif storage_type == "hdfs":
|
||||
if table_function:
|
||||
if run_on_cluster:
|
||||
assert table_function
|
||||
return f"""
|
||||
icebergHDFS(hdfs, filename= 'iceberg_data/default/{table_name}/', format={format}, url = 'hdfs://hdfs1:9000/')
|
||||
icebergHDFSCluster('cluster_simple', hdfs, filename= 'iceberg_data/default/{table_name}/', format={format}, url = 'hdfs://hdfs1:9000/')
|
||||
"""
|
||||
else:
|
||||
return f"""
|
||||
DROP TABLE IF EXISTS {table_name};
|
||||
CREATE TABLE {table_name}
|
||||
ENGINE=IcebergHDFS(hdfs, filename = 'iceberg_data/default/{table_name}/', format={format}, url = 'hdfs://hdfs1:9000/');"""
|
||||
if table_function:
|
||||
return f"""
|
||||
icebergHDFS(hdfs, filename= 'iceberg_data/default/{table_name}/', format={format}, url = 'hdfs://hdfs1:9000/')
|
||||
"""
|
||||
else:
|
||||
return f"""
|
||||
DROP TABLE IF EXISTS {table_name};
|
||||
CREATE TABLE {table_name}
|
||||
ENGINE=IcebergHDFS(hdfs, filename = 'iceberg_data/default/{table_name}/', format={format}, url = 'hdfs://hdfs1:9000/');"""
|
||||
|
||||
elif storage_type == "local":
|
||||
assert not run_on_cluster
|
||||
|
||||
if table_function:
|
||||
return f"""
|
||||
icebergLocal(local, path = '/iceberg_data/default/{table_name}/', format={format})
|
||||
@ -227,6 +273,7 @@ def get_creation_expression(
|
||||
DROP TABLE IF EXISTS {table_name};
|
||||
CREATE TABLE {table_name}
|
||||
ENGINE=IcebergLocal(local, path = '/iceberg_data/default/{table_name}/', format={format});"""
|
||||
|
||||
else:
|
||||
raise Exception(f"Unknown iceberg storage type: {storage_type}")
|
||||
|
||||
@ -492,6 +539,108 @@ def test_types(started_cluster, format_version, storage_type):
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("format_version", ["1", "2"])
|
||||
@pytest.mark.parametrize("storage_type", ["s3", "azure", "hdfs"])
|
||||
def test_cluster_table_function(started_cluster, format_version, storage_type):
|
||||
if is_arm() and storage_type == "hdfs":
|
||||
pytest.skip("Disabled test IcebergHDFS for aarch64")
|
||||
|
||||
instance = started_cluster.instances["node1"]
|
||||
spark = started_cluster.spark_session
|
||||
|
||||
TABLE_NAME = (
|
||||
"test_iceberg_cluster_"
|
||||
+ format_version
|
||||
+ "_"
|
||||
+ storage_type
|
||||
+ "_"
|
||||
+ get_uuid_str()
|
||||
)
|
||||
|
||||
def add_df(mode):
|
||||
write_iceberg_from_df(
|
||||
spark,
|
||||
generate_data(spark, 0, 100),
|
||||
TABLE_NAME,
|
||||
mode=mode,
|
||||
format_version=format_version,
|
||||
)
|
||||
|
||||
files = default_upload_directory(
|
||||
started_cluster,
|
||||
storage_type,
|
||||
f"/iceberg_data/default/{TABLE_NAME}/",
|
||||
f"/iceberg_data/default/{TABLE_NAME}/",
|
||||
)
|
||||
|
||||
logging.info(f"Adding another dataframe. result files: {files}")
|
||||
|
||||
return files
|
||||
|
||||
files = add_df(mode="overwrite")
|
||||
for i in range(1, len(started_cluster.instances)):
|
||||
files = add_df(mode="append")
|
||||
|
||||
logging.info(f"Setup complete. files: {files}")
|
||||
assert len(files) == 5 + 4 * (len(started_cluster.instances) - 1)
|
||||
|
||||
clusters = instance.query(f"SELECT * FROM system.clusters")
|
||||
logging.info(f"Clusters setup: {clusters}")
|
||||
|
||||
# Regular Query only node1
|
||||
table_function_expr = get_creation_expression(
|
||||
storage_type, TABLE_NAME, started_cluster, table_function=True
|
||||
)
|
||||
select_regular = (
|
||||
instance.query(f"SELECT * FROM {table_function_expr}").strip().split()
|
||||
)
|
||||
|
||||
# Cluster Query with node1 as coordinator
|
||||
table_function_expr_cluster = get_creation_expression(
|
||||
storage_type,
|
||||
TABLE_NAME,
|
||||
started_cluster,
|
||||
table_function=True,
|
||||
run_on_cluster=True,
|
||||
)
|
||||
select_cluster = (
|
||||
instance.query(f"SELECT * FROM {table_function_expr_cluster}").strip().split()
|
||||
)
|
||||
|
||||
# Simple size check
|
||||
assert len(select_regular) == 600
|
||||
assert len(select_cluster) == 600
|
||||
|
||||
# Actual check
|
||||
assert select_cluster == select_regular
|
||||
|
||||
# Check query_log
|
||||
for replica in started_cluster.instances.values():
|
||||
replica.query("SYSTEM FLUSH LOGS")
|
||||
|
||||
for node_name, replica in started_cluster.instances.items():
|
||||
cluster_secondary_queries = (
|
||||
replica.query(
|
||||
f"""
|
||||
SELECT query, type, is_initial_query, read_rows, read_bytes FROM system.query_log
|
||||
WHERE
|
||||
type = 'QueryStart' AND
|
||||
positionCaseInsensitive(query, '{storage_type}Cluster') != 0 AND
|
||||
position(query, '{TABLE_NAME}') != 0 AND
|
||||
position(query, 'system.query_log') = 0 AND
|
||||
NOT is_initial_query
|
||||
"""
|
||||
)
|
||||
.strip()
|
||||
.split("\n")
|
||||
)
|
||||
|
||||
logging.info(
|
||||
f"[{node_name}] cluster_secondary_queries: {cluster_secondary_queries}"
|
||||
)
|
||||
assert len(cluster_secondary_queries) == 1
|
||||
|
||||
|
||||
@pytest.mark.parametrize("format_version", ["1", "2"])
|
||||
@pytest.mark.parametrize("storage_type", ["s3", "azure", "hdfs", "local"])
|
||||
def test_delete_files(started_cluster, format_version, storage_type):
|
||||
|
@ -59,6 +59,9 @@ instance = cluster.add_instance(
|
||||
user_configs=["configs/users.xml"],
|
||||
with_kafka=True,
|
||||
with_zookeeper=True, # For Replicated Table
|
||||
keeper_required_feature_flags=[
|
||||
"create_if_not_exists"
|
||||
], # new Kafka doesn't work without this feature
|
||||
macros={
|
||||
"kafka_broker": "kafka1",
|
||||
"kafka_topic_old": KAFKA_TOPIC_OLD,
|
||||
|
@ -99,6 +99,7 @@ def started_cluster():
|
||||
with_minio=True,
|
||||
with_azurite=True,
|
||||
with_zookeeper=True,
|
||||
keeper_required_feature_flags=["create_if_not_exists"],
|
||||
main_configs=[
|
||||
"configs/zookeeper.xml",
|
||||
"configs/s3queue_log.xml",
|
||||
@ -110,6 +111,7 @@ def started_cluster():
|
||||
user_configs=["configs/users.xml"],
|
||||
with_minio=True,
|
||||
with_zookeeper=True,
|
||||
keeper_required_feature_flags=["create_if_not_exists"],
|
||||
main_configs=[
|
||||
"configs/s3queue_log.xml",
|
||||
],
|
||||
@ -118,6 +120,7 @@ def started_cluster():
|
||||
cluster.add_instance(
|
||||
"old_instance",
|
||||
with_zookeeper=True,
|
||||
keeper_required_feature_flags=["create_if_not_exists"],
|
||||
image="clickhouse/clickhouse-server",
|
||||
tag="23.12",
|
||||
stay_alive=True,
|
||||
@ -127,6 +130,7 @@ def started_cluster():
|
||||
cluster.add_instance(
|
||||
"node1",
|
||||
with_zookeeper=True,
|
||||
keeper_required_feature_flags=["create_if_not_exists"],
|
||||
stay_alive=True,
|
||||
main_configs=[
|
||||
"configs/zookeeper.xml",
|
||||
@ -137,6 +141,7 @@ def started_cluster():
|
||||
cluster.add_instance(
|
||||
"node2",
|
||||
with_zookeeper=True,
|
||||
keeper_required_feature_flags=["create_if_not_exists"],
|
||||
stay_alive=True,
|
||||
main_configs=[
|
||||
"configs/zookeeper.xml",
|
||||
@ -149,6 +154,7 @@ def started_cluster():
|
||||
user_configs=["configs/users.xml"],
|
||||
with_minio=True,
|
||||
with_zookeeper=True,
|
||||
keeper_required_feature_flags=["create_if_not_exists"],
|
||||
main_configs=[
|
||||
"configs/s3queue_log.xml",
|
||||
"configs/merge_tree.xml",
|
||||
@ -158,6 +164,7 @@ def started_cluster():
|
||||
cluster.add_instance(
|
||||
"instance_24.5",
|
||||
with_zookeeper=True,
|
||||
keeper_required_feature_flags=["create_if_not_exists"],
|
||||
image="clickhouse/clickhouse-server",
|
||||
tag="24.5",
|
||||
stay_alive=True,
|
||||
@ -170,6 +177,7 @@ def started_cluster():
|
||||
cluster.add_instance(
|
||||
"node_cloud_mode",
|
||||
with_zookeeper=True,
|
||||
keeper_required_feature_flags=["create_if_not_exists"],
|
||||
stay_alive=True,
|
||||
main_configs=[
|
||||
"configs/zookeeper.xml",
|
||||
|
@ -96,13 +96,15 @@ SELECT 'First JOIN INNER second JOIN INNER';
|
||||
First JOIN INNER second JOIN INNER
|
||||
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
|
||||
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
|
||||
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
|
||||
SELECT '--';
|
||||
--
|
||||
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
|
||||
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
|
||||
SELECT '--';
|
||||
@ -115,13 +117,15 @@ SELECT 'First JOIN INNER second JOIN LEFT';
|
||||
First JOIN INNER second JOIN LEFT
|
||||
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
|
||||
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
|
||||
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
|
||||
SELECT '--';
|
||||
--
|
||||
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
|
||||
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
|
||||
SELECT '--';
|
||||
@ -134,17 +138,19 @@ SELECT 'First JOIN INNER second JOIN RIGHT';
|
||||
First JOIN INNER second JOIN RIGHT
|
||||
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
|
||||
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
|
||||
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
|
||||
4 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String
|
||||
SELECT '--';
|
||||
--
|
||||
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
String String Join_3_Value_4 String
|
||||
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
|
||||
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
|
||||
String String Join_3_Value_4 String
|
||||
SELECT '--';
|
||||
--
|
||||
SELECT 1 FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id);
|
||||
@ -156,17 +162,19 @@ SELECT 'First JOIN INNER second JOIN FULL';
|
||||
First JOIN INNER second JOIN FULL
|
||||
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
|
||||
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
0 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String
|
||||
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
|
||||
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
|
||||
0 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String
|
||||
SELECT '--';
|
||||
--
|
||||
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
String String Join_3_Value_4 String
|
||||
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
|
||||
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
|
||||
String String Join_3_Value_4 String
|
||||
SELECT '--';
|
||||
--
|
||||
SELECT 1 FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id);
|
||||
@ -178,13 +186,15 @@ SELECT 'First JOIN LEFT second JOIN INNER';
|
||||
First JOIN LEFT second JOIN INNER
|
||||
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
|
||||
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
|
||||
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
|
||||
SELECT '--';
|
||||
--
|
||||
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
|
||||
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
|
||||
SELECT '--';
|
||||
@ -197,14 +207,16 @@ SELECT 'First JOIN LEFT second JOIN LEFT';
|
||||
First JOIN LEFT second JOIN LEFT
|
||||
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
|
||||
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
|
||||
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
|
||||
2 UInt64 2 UInt64 Join_1_Value_2 String 0 UInt64 String 0 UInt64 String
|
||||
SELECT '--';
|
||||
--
|
||||
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
|
||||
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
|
||||
Join_1_Value_2 String String String
|
||||
@ -219,17 +231,19 @@ SELECT 'First JOIN LEFT second JOIN RIGHT';
|
||||
First JOIN LEFT second JOIN RIGHT
|
||||
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
|
||||
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
|
||||
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
|
||||
4 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String
|
||||
SELECT '--';
|
||||
--
|
||||
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
String String Join_3_Value_4 String
|
||||
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
|
||||
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
|
||||
String String Join_3_Value_4 String
|
||||
SELECT '--';
|
||||
--
|
||||
SELECT 1 FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id);
|
||||
@ -241,19 +255,21 @@ SELECT 'First JOIN LEFT second JOIN FULL';
|
||||
First JOIN LEFT second JOIN FULL
|
||||
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
|
||||
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
0 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String
|
||||
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
|
||||
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
|
||||
2 UInt64 2 UInt64 Join_1_Value_2 String 0 UInt64 String 0 UInt64 String
|
||||
0 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String
|
||||
SELECT '--';
|
||||
--
|
||||
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
String String Join_3_Value_4 String
|
||||
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
|
||||
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
|
||||
Join_1_Value_2 String String String
|
||||
String String Join_3_Value_4 String
|
||||
SELECT '--';
|
||||
--
|
||||
SELECT 1 FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id);
|
||||
@ -266,13 +282,15 @@ SELECT 'First JOIN RIGHT second JOIN INNER';
|
||||
First JOIN RIGHT second JOIN INNER
|
||||
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
|
||||
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
|
||||
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
|
||||
SELECT '--';
|
||||
--
|
||||
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
|
||||
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
|
||||
SELECT '--';
|
||||
@ -285,17 +303,19 @@ SELECT 'First JOIN RIGHT second JOIN LEFT';
|
||||
First JOIN RIGHT second JOIN LEFT
|
||||
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
|
||||
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
|
||||
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
|
||||
3 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 String
|
||||
SELECT '--';
|
||||
--
|
||||
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
String Join_2_Value_3 String String
|
||||
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
|
||||
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
|
||||
String Join_2_Value_3 String String
|
||||
SELECT '--';
|
||||
--
|
||||
SELECT 1 FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id);
|
||||
@ -307,17 +327,19 @@ SELECT 'First JOIN RIGHT second JOIN RIGHT';
|
||||
First JOIN RIGHT second JOIN RIGHT
|
||||
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
|
||||
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
|
||||
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
|
||||
4 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String
|
||||
SELECT '--';
|
||||
--
|
||||
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
String String Join_3_Value_4 String
|
||||
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
|
||||
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
|
||||
String String Join_3_Value_4 String
|
||||
SELECT '--';
|
||||
--
|
||||
SELECT 1 FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id);
|
||||
@ -329,19 +351,21 @@ SELECT 'First JOIN RIGHT second JOIN FULL';
|
||||
First JOIN RIGHT second JOIN FULL
|
||||
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
|
||||
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
0 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String
|
||||
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
|
||||
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
|
||||
3 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 String
|
||||
0 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String
|
||||
SELECT '--';
|
||||
--
|
||||
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
String String Join_3_Value_4 String
|
||||
String Join_2_Value_3 String String
|
||||
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
|
||||
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
|
||||
String Join_2_Value_3 String String
|
||||
String String Join_3_Value_4 String
|
||||
SELECT '--';
|
||||
--
|
||||
SELECT 1 FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id);
|
||||
@ -354,14 +378,16 @@ SELECT 'First JOIN FULL second JOIN INNER';
|
||||
First JOIN FULL second JOIN INNER
|
||||
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
|
||||
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
0 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 Join_3_Value_0 String
|
||||
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
|
||||
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
|
||||
0 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 Join_3_Value_0 String
|
||||
SELECT '--';
|
||||
--
|
||||
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
|
||||
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
|
||||
SELECT '--';
|
||||
@ -374,19 +400,21 @@ SELECT 'First JOIN FULL second JOIN LEFT';
|
||||
First JOIN FULL second JOIN LEFT
|
||||
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
|
||||
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
0 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 Join_3_Value_0 String
|
||||
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
|
||||
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
|
||||
2 UInt64 2 UInt64 Join_1_Value_2 String 0 UInt64 String 0 UInt64 String
|
||||
0 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 Join_3_Value_0 String
|
||||
SELECT '--';
|
||||
--
|
||||
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
String Join_2_Value_3 String String
|
||||
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
|
||||
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
|
||||
Join_1_Value_2 String String String
|
||||
String Join_2_Value_3 String String
|
||||
SELECT '--';
|
||||
--
|
||||
SELECT 1 FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id);
|
||||
@ -399,18 +427,20 @@ SELECT 'First JOIN FULL second JOIN RIGHT';
|
||||
First JOIN FULL second JOIN RIGHT
|
||||
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
|
||||
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
0 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 Join_3_Value_0 String
|
||||
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
|
||||
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
|
||||
0 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 Join_3_Value_0 String
|
||||
4 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String
|
||||
SELECT '--';
|
||||
--
|
||||
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
String String Join_3_Value_4 String
|
||||
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
|
||||
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
|
||||
String String Join_3_Value_4 String
|
||||
SELECT '--';
|
||||
--
|
||||
SELECT 1 FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id);
|
||||
@ -422,21 +452,23 @@ SELECT 'First JOIN FULL second JOIN FULL';
|
||||
First JOIN FULL second JOIN FULL
|
||||
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
|
||||
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
0 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String
|
||||
0 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 Join_3_Value_0 String
|
||||
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
|
||||
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
|
||||
2 UInt64 2 UInt64 Join_1_Value_2 String 0 UInt64 String 0 UInt64 String
|
||||
0 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 Join_3_Value_0 String
|
||||
0 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String
|
||||
SELECT '--';
|
||||
--
|
||||
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
String String Join_3_Value_4 String
|
||||
String Join_2_Value_3 String String
|
||||
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
|
||||
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
|
||||
Join_1_Value_2 String String String
|
||||
String Join_2_Value_3 String String
|
||||
String String Join_3_Value_4 String
|
||||
SELECT '--';
|
||||
--
|
||||
SELECT 1 FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id);
|
||||
|
@ -64,12 +64,14 @@ SELECT 'First JOIN {{ first_join_type }} second JOIN {{ second_join_type }}';
|
||||
|
||||
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
|
||||
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 USING (id) {{ second_join_type }} JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 USING (id) {{ second_join_type }} JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
|
||||
SELECT '--';
|
||||
|
||||
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
|
||||
FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 USING (id) {{ second_join_type }} JOIN test_table_join_3 AS t3 USING(id);
|
||||
FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 USING (id) {{ second_join_type }} JOIN test_table_join_3 AS t3 USING(id)
|
||||
ORDER BY ALL;
|
||||
|
||||
SELECT '--';
|
||||
|
||||
|
@ -1,3 +1,6 @@
|
||||
SELECT arrayWithConstant(96142475, ['qMUF']); -- { serverError TOO_LARGE_ARRAY_SIZE }
|
||||
SELECT arrayWithConstant(100000000, materialize([[[[[[[[[['Hello, world!']]]]]]]]]])); -- { serverError TOO_LARGE_ARRAY_SIZE }
|
||||
SELECT length(arrayWithConstant(10000000, materialize([[[[[[[[[['Hello world']]]]]]]]]])));
|
||||
|
||||
CREATE TEMPORARY TABLE args (value Array(Int)) ENGINE=Memory AS SELECT [1, 1, 1, 1] as value FROM numbers(1, 100);
|
||||
SELECT length(arrayWithConstant(1000000, value)) FROM args FORMAT NULL;
|
||||
|
11
tests/queries/0_stateless/03250_ephemeral_comment.sql
Normal file
11
tests/queries/0_stateless/03250_ephemeral_comment.sql
Normal file
@ -0,0 +1,11 @@
|
||||
drop table if exists test;
|
||||
CREATE TABLE test (
|
||||
`start_s` UInt32 EPHEMERAL COMMENT 'start UNIX time' ,
|
||||
`start_us` UInt16 EPHEMERAL COMMENT 'start microseconds',
|
||||
`finish_s` UInt32 EPHEMERAL COMMENT 'finish UNIX time',
|
||||
`finish_us` UInt16 EPHEMERAL COMMENT 'finish microseconds',
|
||||
`captured` DateTime MATERIALIZED fromUnixTimestamp(start_s),
|
||||
`duration` Decimal32(6) MATERIALIZED finish_s - start_s + (finish_us - start_us)/1000000
|
||||
)
|
||||
ENGINE Null;
|
||||
drop table if exists test;
|
@ -244,7 +244,10 @@ Deduplication
|
||||
DefaultTableEngine
|
||||
DelayedInserts
|
||||
DeliveryTag
|
||||
Deltalake
|
||||
DeltaLake
|
||||
deltalakeCluster
|
||||
deltaLakeCluster
|
||||
Denormalize
|
||||
DestroyAggregatesThreads
|
||||
DestroyAggregatesThreadsActive
|
||||
@ -377,10 +380,15 @@ Homebrew's
|
||||
HorizontalDivide
|
||||
Hostname
|
||||
HouseOps
|
||||
hudi
|
||||
Hudi
|
||||
hudiCluster
|
||||
HudiCluster
|
||||
HyperLogLog
|
||||
Hypot
|
||||
IANA
|
||||
icebergCluster
|
||||
IcebergCluster
|
||||
IDE
|
||||
IDEs
|
||||
IDNA
|
||||
|
Loading…
Reference in New Issue
Block a user