mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 07:31:57 +00:00
Merge remote-tracking branch 'origin/master' into tmp
This commit is contained in:
commit
175404338d
@ -175,7 +175,7 @@ case "$stage" in
|
||||
# Lost connection to the server. This probably means that the server died
|
||||
# with abort.
|
||||
echo "failure" > status.txt
|
||||
if ! grep -ao "Received signal.*\|Logical error.*\|Assertion.*failed\|Failed assertion.*\|.*runtime error: .*" server.log > description.txt
|
||||
if ! grep -ao "Received signal.*\|Logical error.*\|Assertion.*failed\|Failed assertion.*\|.*runtime error: .*\|.*is located.*" server.log > description.txt
|
||||
then
|
||||
echo "Lost connection to server. See the logs" > description.txt
|
||||
fi
|
||||
|
@ -110,17 +110,17 @@ You can pass parameters to `clickhouse-client` (all parameters have a default va
|
||||
|
||||
### Command Line Options {#command-line-options}
|
||||
|
||||
- `--host, -h` -– The server name, ‘localhost’ by default. You can use either the name or the IPv4 or IPv6 address.
|
||||
- `--host, -h` – The server name, ‘localhost’ by default. You can use either the name or the IPv4 or IPv6 address.
|
||||
- `--port` – The port to connect to. Default value: 9000. Note that the HTTP interface and the native interface use different ports.
|
||||
- `--user, -u` – The username. Default value: default.
|
||||
- `--password` – The password. Default value: empty string.
|
||||
- `--query, -q` – The query to process when using non-interactive mode. You must specify either `query` or `queries-file` option.
|
||||
- `--queries-file, -qf` - file path with queries to execute. You must specify either `query` or `queries-file` option.
|
||||
- `--queries-file, -qf` – file path with queries to execute. You must specify either `query` or `queries-file` option.
|
||||
- `--database, -d` – Select the current default database. Default value: the current database from the server settings (‘default’ by default).
|
||||
- `--multiline, -m` – If specified, allow multiline queries (do not send the query on Enter).
|
||||
- `--multiquery, -n` – If specified, allow processing multiple queries separated by semicolons.
|
||||
- `--format, -f` – Use the specified default format to output the result.
|
||||
- `--vertical, -E` – If specified, use the Vertical format by default to output the result. This is the same as ‘–format=Vertical’. In this format, each value is printed on a separate line, which is helpful when displaying wide tables.
|
||||
- `--vertical, -E` – If specified, use the [Vertical format](../interfaces/formats.md#vertical) by default to output the result. This is the same as `–format=Vertical`. In this format, each value is printed on a separate line, which is helpful when displaying wide tables.
|
||||
- `--time, -t` – If specified, print the query execution time to ‘stderr’ in non-interactive mode.
|
||||
- `--stacktrace` – If specified, also print the stack trace if an exception occurs.
|
||||
- `--config-file` – The name of the configuration file.
|
||||
|
@ -348,4 +348,77 @@ Returns a [quota](../../operations/quotas.md) consumption for all users or for c
|
||||
SHOW [CURRENT] QUOTA
|
||||
```
|
||||
|
||||
## SHOW CLUSTER(s) {#show-cluster-statement}
|
||||
|
||||
Returns a list of clusters. All available clusters are listed in the [system.clusters](../../operations/system-tables/clusters.md) table.
|
||||
|
||||
!!! info "Note"
|
||||
`SHOW CLUSTER name` query displays the contents of system.clusters table for this cluster.
|
||||
|
||||
### Syntax {#show-cluster-syntax}
|
||||
|
||||
``` sql
|
||||
SHOW CLUSTER '<name>'
|
||||
SWOW CLUSTERS [LIKE|NOT LIKE '<pattern>'] [LIMIT <N>]
|
||||
```
|
||||
### Examples
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SHOW CLUSTERS;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─cluster──────────────────────────────────────┐
|
||||
│ test_cluster_two_shards │
|
||||
│ test_cluster_two_shards_internal_replication │
|
||||
│ test_cluster_two_shards_localhost │
|
||||
│ test_shard_localhost │
|
||||
│ test_shard_localhost_secure │
|
||||
│ test_unavailable_shard │
|
||||
└──────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SHOW CLUSTERS LIKE 'test%' LIMIT 1;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─cluster─────────────────┐
|
||||
│ test_cluster_two_shards │
|
||||
└─────────────────────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SHOW CLUSTER 'test_shard_localhost' FORMAT Vertical;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
Row 1:
|
||||
──────
|
||||
cluster: test_shard_localhost
|
||||
shard_num: 1
|
||||
shard_weight: 1
|
||||
replica_num: 1
|
||||
host_name: localhost
|
||||
host_address: 127.0.0.1
|
||||
port: 9000
|
||||
is_local: 1
|
||||
user: default
|
||||
default_database:
|
||||
errors_count: 0
|
||||
estimated_recovery_time: 0
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/query_language/show/) <!--hide-->
|
||||
|
@ -116,18 +116,18 @@ $ clickhouse-client --param_tbl="numbers" --param_db="system" --param_col="numbe
|
||||
|
||||
### Параметры командной строки {#parametry-komandnoi-stroki}
|
||||
|
||||
- `--host, -h` — имя сервера, по умолчанию — localhost. Вы можете использовать как имя, так и IPv4 или IPv6 адрес.
|
||||
- `--port` — порт, к которому соединяться, по умолчанию — 9000. Замечу, что для HTTP и родного интерфейса используются разные порты.
|
||||
- `--user, -u` — имя пользователя, по умолчанию — default.
|
||||
- `--host, -h` — имя сервера, по умолчанию — ‘localhost’. Вы можете использовать как имя, так и IPv4 или IPv6 адрес.
|
||||
- `--port` — порт для подключения, по умолчанию — 9000. Обратите внимание: для HTTP-интерфейса и нативного интерфейса используются разные порты.
|
||||
- `--user, -u` — имя пользователя, по умолчанию — ‘default’.
|
||||
- `--password` — пароль, по умолчанию — пустая строка.
|
||||
- `--query, -q` — запрос для выполнения, при использовании в неинтерактивном режиме.
|
||||
- `--database, -d` — выбрать текущую БД, по умолчанию — текущая БД из настроек сервера (по умолчанию — БД default).
|
||||
- `--database, -d` — выбрать текущую БД. Без указания значение берется из настроек сервера (по умолчанию — БД ‘default’).
|
||||
- `--multiline, -m` — если указано — разрешить многострочные запросы, не отправлять запрос по нажатию Enter.
|
||||
- `--multiquery, -n` — если указано — разрешить выполнять несколько запросов, разделённых точкой с запятой.
|
||||
- `--format, -f` — использовать указанный формат по умолчанию для вывода результата.
|
||||
- `--vertical, -E` — если указано, использовать формат Vertical по умолчанию для вывода результата. То же самое, что –format=Vertical. В этом формате каждое значение выводится на отдельной строке, что удобно для отображения широких таблиц.
|
||||
- `--time, -t` — если указано, в неинтерактивном режиме вывести время выполнения запроса в stderr.
|
||||
- `--stacktrace` — если указано, в случае исключения, выводить также его стек трейс.
|
||||
- `--vertical, -E` — если указано, использовать по умолчанию формат [Vertical](../interfaces/formats.md#vertical) для вывода результата. То же самое, что `–format=Vertical`. В этом формате каждое значение выводится на отдельной строке, что удобно для отображения широких таблиц.
|
||||
- `--time, -t` — если указано, в неинтерактивном режиме вывести время выполнения запроса в поток ‘stderr’.
|
||||
- `--stacktrace` — если указано, в случае исключения, выводить также его стек-трейс.
|
||||
- `--config-file` — имя конфигурационного файла.
|
||||
- `--secure` — если указано, будет использован безопасный канал.
|
||||
- `--param_<name>` — значение параметра для [запроса с параметрами](#cli-queries-with-parameters).
|
||||
|
@ -115,6 +115,10 @@ SELECT topLevelDomain('svn+ssh://www.some.svn-hosting.com:80/repo/trunk')
|
||||
|
||||
Например, `cutToFirstSignificantSubdomain('https://news.yandex.com.tr/') = 'yandex.com.tr'`.
|
||||
|
||||
### port(URL[, default_port = 0]) {#port}
|
||||
|
||||
Возвращает порт или значение `default_port`, если в URL-адресе нет порта (или передан невалидный URL)
|
||||
|
||||
### path {#path}
|
||||
|
||||
Возвращает путь. Пример: `/top/news.html` Путь не включает в себя query string.
|
||||
|
@ -65,6 +65,16 @@ static IColumn & extractNestedColumn(IColumn & column)
|
||||
return assert_cast<ColumnMap &>(column).getNestedColumn();
|
||||
}
|
||||
|
||||
DataTypePtr DataTypeMap::tryGetSubcolumnType(const String & subcolumn_name) const
|
||||
{
|
||||
return nested->tryGetSubcolumnType(subcolumn_name);
|
||||
}
|
||||
|
||||
ColumnPtr DataTypeMap::getSubcolumn(const String & subcolumn_name, const IColumn & column) const
|
||||
{
|
||||
return nested->getSubcolumn(subcolumn_name, extractNestedColumn(column));
|
||||
}
|
||||
|
||||
void DataTypeMap::serializeBinary(const Field & field, WriteBuffer & ostr) const
|
||||
{
|
||||
const auto & map = get<const Map &>(field);
|
||||
|
@ -32,6 +32,9 @@ public:
|
||||
|
||||
bool canBeInsideNullable() const override { return false; }
|
||||
|
||||
DataTypePtr tryGetSubcolumnType(const String & subcolumn_name) const override;
|
||||
ColumnPtr getSubcolumn(const String & subcolumn_name, const IColumn & column) const override;
|
||||
|
||||
void serializeBinary(const Field & field, WriteBuffer & ostr) const override;
|
||||
void deserializeBinary(Field & field, ReadBuffer & istr) const override;
|
||||
void serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr) const override;
|
||||
@ -45,7 +48,6 @@ public:
|
||||
void serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override;
|
||||
void deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings &) const override;
|
||||
|
||||
|
||||
void enumerateStreamsImpl(const StreamCallback & callback, SubstreamPath & path) const override;
|
||||
|
||||
void serializeBinaryBulkStatePrefixImpl(
|
||||
|
@ -10,7 +10,6 @@
|
||||
#include <Parsers/ASTInsertQuery.h>
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/queryToString.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -89,7 +88,6 @@ void ColumnAliasesMatcher::visit(ASTIdentifier & node, ASTPtr & ast, Data & data
|
||||
if (col.default_desc.kind == ColumnDefaultKind::Alias)
|
||||
{
|
||||
ast = addTypeConversionToAST(col.default_desc.expression->clone(), col.type->getName(), data.columns.getAll(), data.context);
|
||||
auto str = queryToString(ast);
|
||||
// revisit ast to track recursive alias columns
|
||||
Visitor(data).visit(ast);
|
||||
}
|
||||
|
@ -467,6 +467,7 @@ std::vector<TableNeededColumns> normalizeColumnNamesExtractNeeded(
|
||||
|
||||
for (ASTIdentifier * ident : identifiers)
|
||||
{
|
||||
|
||||
bool got_alias = aliases.count(ident->name());
|
||||
bool allow_ambiguous = got_alias; /// allow ambiguous column overridden by an alias
|
||||
|
||||
@ -475,8 +476,19 @@ std::vector<TableNeededColumns> normalizeColumnNamesExtractNeeded(
|
||||
if (!ident->isShort())
|
||||
{
|
||||
if (got_alias)
|
||||
throw Exception("Alias clashes with qualified column '" + ident->name() + "'", ErrorCodes::AMBIGUOUS_COLUMN_NAME);
|
||||
|
||||
{
|
||||
auto alias = aliases.find(ident->name())->second;
|
||||
auto alias_table = IdentifierSemantic::getTableName(alias->ptr());
|
||||
bool alias_equals_column_name = false;
|
||||
if ((!ident->isShort() && alias->ptr()->getColumnNameWithoutAlias() == ident->getColumnNameWithoutAlias())
|
||||
|| (alias_table == IdentifierSemantic::getTableName(ident->ptr())
|
||||
&& ident->shortName() == alias->as<ASTIdentifier>()->shortName()))
|
||||
{
|
||||
alias_equals_column_name = true;
|
||||
}
|
||||
if (!alias_equals_column_name)
|
||||
throw Exception("Alias clashes with qualified column '" + ident->name() + "'", ErrorCodes::AMBIGUOUS_COLUMN_NAME);
|
||||
}
|
||||
String short_name = ident->shortName();
|
||||
String original_long_name;
|
||||
if (public_identifiers.count(ident))
|
||||
|
@ -80,8 +80,12 @@ void ASTIdentifier::setShortName(const String & new_name)
|
||||
name_parts = {new_name};
|
||||
|
||||
bool special = semantic->special;
|
||||
//how about keep the semantic info here, such as table
|
||||
auto table = semantic->table;
|
||||
|
||||
*semantic = IdentifierSemanticImpl();
|
||||
semantic->special = special;
|
||||
semantic->table = table;
|
||||
}
|
||||
|
||||
const String & ASTIdentifier::name() const
|
||||
|
@ -1,5 +1,5 @@
|
||||
122
|
||||
|
||||
Table dictdb.dict_invalidate doesn\'t exist
|
||||
Table dictdb_01041_01040.dict_invalidate doesn\'t exist
|
||||
|
||||
133
|
||||
|
@ -19,3 +19,12 @@ baz
|
||||
0
|
||||
1
|
||||
2
|
||||
====map====
|
||||
['a','b']
|
||||
['a','c']
|
||||
['b','c']
|
||||
[1,2]
|
||||
[3,4]
|
||||
[5,6]
|
||||
4
|
||||
4
|
||||
|
@ -42,3 +42,22 @@ SELECT ProfileEvents.Values[indexOf(ProfileEvents.Names, 'FileOpen')]
|
||||
FROM system.query_log
|
||||
WHERE (type = 'QueryFinish') AND (lower(query) LIKE lower('SELECT n.null FROM %t_nul%'))
|
||||
AND event_time > now() - INTERVAL 10 SECOND AND current_database = currentDatabase();
|
||||
|
||||
SELECT '====map====';
|
||||
SET allow_experimental_map_type = 1;
|
||||
DROP TABLE IF EXISTS t_map;
|
||||
CREATE TABLE t_map (m Map(String, UInt32)) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;
|
||||
INSERT INTO t_map VALUES (map('a', 1, 'b', 2)) (map('a', 3, 'c', 4)), (map('b', 5, 'c', 6));
|
||||
|
||||
--- will read 4 files: keys.bin, keys.mrk2, size0.bin, size0.mrk2
|
||||
SYSTEM DROP MARK CACHE;
|
||||
SELECT m.keys FROM t_map;
|
||||
|
||||
SYSTEM DROP MARK CACHE;
|
||||
SELECT m.values FROM t_map;
|
||||
|
||||
SYSTEM FLUSH LOGS;
|
||||
SELECT ProfileEvents.Values[indexOf(ProfileEvents.Names, 'FileOpen')]
|
||||
FROM system.query_log
|
||||
WHERE (type = 'QueryFinish') AND (lower(query) LIKE lower('SELECT m.% FROM %t_map%'))
|
||||
AND event_time > now() - INTERVAL 10 SECOND AND current_database = currentDatabase();
|
||||
|
@ -1,18 +1,18 @@
|
||||
Log
|
||||
100 [1,2,3] [[[1,2],[],[4]],[[5,6],[7,8]],[[]]] [1,NULL,2] ('foo',200)
|
||||
100 0 [1,2,3] 3 [[[1,2],[],[4]],[[5,6],[7,8]],[[]]] 3 [3,2,1] [[2,0,1],[2,2],[0]] [1,NULL,2] 3 [0,1,0] ('foo',200) foo 200
|
||||
100 [1,2,3] [[[1,2],[],[4]],[[5,6],[7,8]],[[]]] [1,NULL,2] ('foo',200) {'foo':1,'bar':42}
|
||||
100 0 [1,2,3] 3 [[[1,2],[],[4]],[[5,6],[7,8]],[[]]] 3 [3,2,1] [[2,0,1],[2,2],[0]] [1,NULL,2] 3 [0,1,0] ('foo',200) foo 200 {'foo':1,'bar':42} ['foo','bar'] [1,42]
|
||||
TinyLog
|
||||
100 [1,2,3] [[[1,2],[],[4]],[[5,6],[7,8]],[[]]] [1,NULL,2] ('foo',200)
|
||||
100 0 [1,2,3] 3 [[[1,2],[],[4]],[[5,6],[7,8]],[[]]] 3 [3,2,1] [[2,0,1],[2,2],[0]] [1,NULL,2] 3 [0,1,0] ('foo',200) foo 200
|
||||
100 [1,2,3] [[[1,2],[],[4]],[[5,6],[7,8]],[[]]] [1,NULL,2] ('foo',200) {'foo':1,'bar':42}
|
||||
100 0 [1,2,3] 3 [[[1,2],[],[4]],[[5,6],[7,8]],[[]]] 3 [3,2,1] [[2,0,1],[2,2],[0]] [1,NULL,2] 3 [0,1,0] ('foo',200) foo 200 {'foo':1,'bar':42} ['foo','bar'] [1,42]
|
||||
Memory
|
||||
100 [1,2,3] [[[1,2],[],[4]],[[5,6],[7,8]],[[]]] [1,NULL,2] ('foo',200)
|
||||
100 0 [1,2,3] 3 [[[1,2],[],[4]],[[5,6],[7,8]],[[]]] 3 [3,2,1] [[2,0,1],[2,2],[0]] [1,NULL,2] 3 [0,1,0] ('foo',200) foo 200
|
||||
100 [1,2,3] [[[1,2],[],[4]],[[5,6],[7,8]],[[]]] [1,NULL,2] ('foo',200) {'foo':1,'bar':42}
|
||||
100 0 [1,2,3] 3 [[[1,2],[],[4]],[[5,6],[7,8]],[[]]] 3 [3,2,1] [[2,0,1],[2,2],[0]] [1,NULL,2] 3 [0,1,0] ('foo',200) foo 200 {'foo':1,'bar':42} ['foo','bar'] [1,42]
|
||||
MergeTree ORDER BY tuple() SETTINGS min_bytes_for_compact_part='10M'
|
||||
100 [1,2,3] [[[1,2],[],[4]],[[5,6],[7,8]],[[]]] [1,NULL,2] ('foo',200)
|
||||
100 0 [1,2,3] 3 [[[1,2],[],[4]],[[5,6],[7,8]],[[]]] 3 [3,2,1] [[2,0,1],[2,2],[0]] [1,NULL,2] 3 [0,1,0] ('foo',200) foo 200
|
||||
100 [1,2,3] [[[1,2],[],[4]],[[5,6],[7,8]],[[]]] [1,NULL,2] ('foo',200) {'foo':1,'bar':42}
|
||||
100 0 [1,2,3] 3 [[[1,2],[],[4]],[[5,6],[7,8]],[[]]] 3 [3,2,1] [[2,0,1],[2,2],[0]] [1,NULL,2] 3 [0,1,0] ('foo',200) foo 200 {'foo':1,'bar':42} ['foo','bar'] [1,42]
|
||||
MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part='10M'
|
||||
100 [1,2,3] [[[1,2],[],[4]],[[5,6],[7,8]],[[]]] [1,NULL,2] ('foo',200)
|
||||
100 0 [1,2,3] 3 [[[1,2],[],[4]],[[5,6],[7,8]],[[]]] 3 [3,2,1] [[2,0,1],[2,2],[0]] [1,NULL,2] 3 [0,1,0] ('foo',200) foo 200
|
||||
100 [1,2,3] [[[1,2],[],[4]],[[5,6],[7,8]],[[]]] [1,NULL,2] ('foo',200) {'foo':1,'bar':42}
|
||||
100 0 [1,2,3] 3 [[[1,2],[],[4]],[[5,6],[7,8]],[[]]] 3 [3,2,1] [[2,0,1],[2,2],[0]] [1,NULL,2] 3 [0,1,0] ('foo',200) foo 200 {'foo':1,'bar':42} ['foo','bar'] [1,42]
|
||||
MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part=0
|
||||
100 [1,2,3] [[[1,2],[],[4]],[[5,6],[7,8]],[[]]] [1,NULL,2] ('foo',200)
|
||||
100 0 [1,2,3] 3 [[[1,2],[],[4]],[[5,6],[7,8]],[[]]] 3 [3,2,1] [[2,0,1],[2,2],[0]] [1,NULL,2] 3 [0,1,0] ('foo',200) foo 200
|
||||
100 [1,2,3] [[[1,2],[],[4]],[[5,6],[7,8]],[[]]] [1,NULL,2] ('foo',200) {'foo':1,'bar':42}
|
||||
100 0 [1,2,3] 3 [[[1,2],[],[4]],[[5,6],[7,8]],[[]]] 3 [3,2,1] [[2,0,1],[2,2],[0]] [1,NULL,2] 3 [0,1,0] ('foo',200) foo 200 {'foo':1,'bar':42} ['foo','bar'] [1,42]
|
||||
|
@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
set -e
|
||||
|
||||
create_query="CREATE TABLE subcolumns(n Nullable(UInt32), a1 Array(UInt32),\
|
||||
a2 Array(Array(Array(UInt32))), a3 Array(Nullable(UInt32)), t Tuple(s String, v UInt32))"
|
||||
a2 Array(Array(Array(UInt32))), a3 Array(Nullable(UInt32)), t Tuple(s String, v UInt32), m Map(String, UInt32))"
|
||||
|
||||
# "StripeLog"
|
||||
declare -a ENGINES=("Log" "TinyLog" "Memory" \
|
||||
@ -18,8 +18,8 @@ declare -a ENGINES=("Log" "TinyLog" "Memory" \
|
||||
for engine in "${ENGINES[@]}"; do
|
||||
echo $engine
|
||||
$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS subcolumns"
|
||||
$CLICKHOUSE_CLIENT --query "$create_query ENGINE = $engine"
|
||||
$CLICKHOUSE_CLIENT --query "INSERT INTO subcolumns VALUES (100, [1, 2, 3], [[[1, 2], [], [4]], [[5, 6], [7, 8]], [[]]], [1, NULL, 2], ('foo', 200))"
|
||||
$CLICKHOUSE_CLIENT --query "$create_query ENGINE = $engine" --allow_experimental_map_type 1
|
||||
$CLICKHOUSE_CLIENT --query "INSERT INTO subcolumns VALUES (100, [1, 2, 3], [[[1, 2], [], [4]], [[5, 6], [7, 8]], [[]]], [1, NULL, 2], ('foo', 200), map('foo', 1, 'bar', 42))"
|
||||
$CLICKHOUSE_CLIENT --query "SELECT * FROM subcolumns"
|
||||
$CLICKHOUSE_CLIENT --query "SELECT n, n.null, a1, a1.size0, a2, a2.size0, a2.size1, a2.size2, a3, a3.size0, a3.null, t, t.s, t.v FROM subcolumns"
|
||||
$CLICKHOUSE_CLIENT --query "SELECT n, n.null, a1, a1.size0, a2, a2.size0, a2.size1, a2.size2, a3, a3.size0, a3.null, t, t.s, t.v, m, m.keys, m.values FROM subcolumns"
|
||||
done
|
||||
|
@ -0,0 +1,52 @@
|
||||
drop database if exists test_01600;
|
||||
create database test_01600;
|
||||
|
||||
CREATE TABLE test_01600.base
|
||||
(
|
||||
`id` UInt64,
|
||||
`id2` UInt64,
|
||||
`d` UInt64,
|
||||
`value` UInt64
|
||||
)
|
||||
ENGINE=MergeTree()
|
||||
PARTITION BY d
|
||||
ORDER BY (id,id2,d);
|
||||
|
||||
CREATE TABLE test_01600.derived1
|
||||
(
|
||||
`id1` UInt64,
|
||||
`d1` UInt64,
|
||||
`value1` UInt64
|
||||
)
|
||||
ENGINE = MergeTree()
|
||||
PARTITION BY d1
|
||||
ORDER BY (id1, d1)
|
||||
;
|
||||
|
||||
CREATE TABLE test_01600.derived2
|
||||
(
|
||||
`id2` UInt64,
|
||||
`d2` UInt64,
|
||||
`value2` UInt64
|
||||
)
|
||||
ENGINE = MergeTree()
|
||||
PARTITION BY d2
|
||||
ORDER BY (id2, d2)
|
||||
;
|
||||
|
||||
select
|
||||
base.id as `base.id`,
|
||||
derived2.value2 as `derived2.value2`,
|
||||
derived1.value1 as `derived1.value1`
|
||||
from test_01600.base as base
|
||||
left join test_01600.derived2 as derived2 on base.id2 = derived2.id2
|
||||
left join test_01600.derived1 as derived1 on base.id = derived1.id1;
|
||||
|
||||
|
||||
SELECT
|
||||
base.id AS `base.id`,
|
||||
derived1.value1 AS `derived1.value1`
|
||||
FROM test_01600.base AS base
|
||||
LEFT JOIN test_01600.derived1 AS derived1 ON base.id = derived1.id1;
|
||||
|
||||
drop database test_01600;
|
Loading…
Reference in New Issue
Block a user