mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-27 18:12:02 +00:00
rename tests
This commit is contained in:
parent
14be2f31f5
commit
6a3cb0d9ea
302
tests/queries/0_stateless/02010_lc_native.python
Executable file
302
tests/queries/0_stateless/02010_lc_native.python
Executable file
@ -0,0 +1,302 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import socket
|
||||||
|
import os
|
||||||
|
|
||||||
|
CLICKHOUSE_HOST = os.environ.get('CLICKHOUSE_HOST', '127.0.0.1')
|
||||||
|
CLICKHOUSE_PORT = int(os.environ.get('CLICKHOUSE_PORT_TCP', '900000'))
|
||||||
|
CLICKHOUSE_DATABASE = os.environ.get('CLICKHOUSE_DATABASE', 'default')
|
||||||
|
|
||||||
|
def writeVarUInt(x, ba):
|
||||||
|
for _ in range(0, 9):
|
||||||
|
|
||||||
|
byte = x & 0x7F
|
||||||
|
if x > 0x7F:
|
||||||
|
byte |= 0x80
|
||||||
|
|
||||||
|
ba.append(byte)
|
||||||
|
|
||||||
|
x >>= 7
|
||||||
|
if x == 0:
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
def writeStringBinary(s, ba):
|
||||||
|
b = bytes(s, 'utf-8')
|
||||||
|
writeVarUInt(len(s), ba)
|
||||||
|
ba.extend(b)
|
||||||
|
|
||||||
|
|
||||||
|
def readStrict(s, size = 1):
|
||||||
|
res = bytearray()
|
||||||
|
while size:
|
||||||
|
cur = s.recv(size)
|
||||||
|
# if not res:
|
||||||
|
# raise "Socket is closed"
|
||||||
|
size -= len(cur)
|
||||||
|
res.extend(cur)
|
||||||
|
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
def readUInt(s, size=1):
|
||||||
|
res = readStrict(s, size)
|
||||||
|
val = 0
|
||||||
|
for i in range(len(res)):
|
||||||
|
val += res[i] << (i * 8)
|
||||||
|
return val
|
||||||
|
|
||||||
|
def readUInt8(s):
|
||||||
|
return readUInt(s)
|
||||||
|
|
||||||
|
def readUInt16(s):
|
||||||
|
return readUInt(s, 2)
|
||||||
|
|
||||||
|
def readUInt32(s):
|
||||||
|
return readUInt(s, 4)
|
||||||
|
|
||||||
|
def readUInt64(s):
|
||||||
|
return readUInt(s, 8)
|
||||||
|
|
||||||
|
def readVarUInt(s):
|
||||||
|
x = 0
|
||||||
|
for i in range(9):
|
||||||
|
byte = readStrict(s)[0]
|
||||||
|
x |= (byte & 0x7F) << (7 * i)
|
||||||
|
|
||||||
|
if not byte & 0x80:
|
||||||
|
return x
|
||||||
|
|
||||||
|
return x
|
||||||
|
|
||||||
|
|
||||||
|
def readStringBinary(s):
|
||||||
|
size = readVarUInt(s)
|
||||||
|
s = readStrict(s, size)
|
||||||
|
return s.decode('utf-8')
|
||||||
|
|
||||||
|
|
||||||
|
def sendHello(s):
|
||||||
|
ba = bytearray()
|
||||||
|
writeVarUInt(0, ba) # Hello
|
||||||
|
writeStringBinary('simple native protocol', ba)
|
||||||
|
writeVarUInt(21, ba)
|
||||||
|
writeVarUInt(9, ba)
|
||||||
|
writeVarUInt(54449, ba)
|
||||||
|
writeStringBinary('default', ba) # database
|
||||||
|
writeStringBinary('default', ba) # user
|
||||||
|
writeStringBinary('', ba) # pwd
|
||||||
|
s.sendall(ba)
|
||||||
|
|
||||||
|
|
||||||
|
def receiveHello(s):
|
||||||
|
p_type = readVarUInt(s)
|
||||||
|
assert (p_type == 0) # Hello
|
||||||
|
server_name = readStringBinary(s)
|
||||||
|
# print("Server name: ", server_name)
|
||||||
|
server_version_major = readVarUInt(s)
|
||||||
|
# print("Major: ", server_version_major)
|
||||||
|
server_version_minor = readVarUInt(s)
|
||||||
|
# print("Minor: ", server_version_minor)
|
||||||
|
server_revision = readVarUInt(s)
|
||||||
|
# print("Revision: ", server_revision)
|
||||||
|
server_timezone = readStringBinary(s)
|
||||||
|
# print("Timezone: ", server_timezone)
|
||||||
|
server_display_name = readStringBinary(s)
|
||||||
|
# print("Display name: ", server_display_name)
|
||||||
|
server_version_patch = readVarUInt(s)
|
||||||
|
# print("Version patch: ", server_version_patch)
|
||||||
|
|
||||||
|
|
||||||
|
def serializeClientInfo(ba):
|
||||||
|
writeStringBinary('default', ba) # initial_user
|
||||||
|
writeStringBinary('123456', ba) # initial_query_id
|
||||||
|
writeStringBinary('127.0.0.1:9000', ba) # initial_address
|
||||||
|
ba.extend([0] * 8) # initial_query_start_time_microseconds
|
||||||
|
ba.append(1) # TCP
|
||||||
|
writeStringBinary('os_user', ba) # os_user
|
||||||
|
writeStringBinary('client_hostname', ba) # client_hostname
|
||||||
|
writeStringBinary('client_name', ba) # client_name
|
||||||
|
writeVarUInt(21, ba)
|
||||||
|
writeVarUInt(9, ba)
|
||||||
|
writeVarUInt(54449, ba)
|
||||||
|
writeStringBinary('', ba) # quota_key
|
||||||
|
writeVarUInt(0, ba) # distributed_depth
|
||||||
|
writeVarUInt(1, ba) # client_version_patch
|
||||||
|
ba.append(0) # No telemetry
|
||||||
|
|
||||||
|
|
||||||
|
def sendQuery(s, query):
|
||||||
|
ba = bytearray()
|
||||||
|
writeVarUInt(1, ba) # query
|
||||||
|
writeStringBinary('123456', ba)
|
||||||
|
|
||||||
|
ba.append(1) # INITIAL_QUERY
|
||||||
|
|
||||||
|
# client info
|
||||||
|
serializeClientInfo(ba)
|
||||||
|
|
||||||
|
writeStringBinary('', ba) # No settings
|
||||||
|
writeStringBinary('', ba) # No interserver secret
|
||||||
|
writeVarUInt(2, ba) # Stage - Complete
|
||||||
|
ba.append(0) # No compression
|
||||||
|
writeStringBinary(query + ' settings input_format_defaults_for_omitted_fields=0', ba) # query, finally
|
||||||
|
s.sendall(ba)
|
||||||
|
|
||||||
|
|
||||||
|
def serializeBlockInfo(ba):
|
||||||
|
writeVarUInt(1, ba) # 1
|
||||||
|
ba.append(0) # is_overflows
|
||||||
|
writeVarUInt(2, ba) # 2
|
||||||
|
writeVarUInt(0, ba) # 0
|
||||||
|
ba.extend([0] * 4) # bucket_num
|
||||||
|
|
||||||
|
|
||||||
|
def sendEmptyBlock(s):
|
||||||
|
ba = bytearray()
|
||||||
|
writeVarUInt(2, ba) # Data
|
||||||
|
writeStringBinary('', ba)
|
||||||
|
serializeBlockInfo(ba)
|
||||||
|
writeVarUInt(0, ba) # rows
|
||||||
|
writeVarUInt(0, ba) # columns
|
||||||
|
s.sendall(ba)
|
||||||
|
|
||||||
|
|
||||||
|
def readHeader(s):
|
||||||
|
readVarUInt(s) # Data
|
||||||
|
readStringBinary(s) # external table name
|
||||||
|
# BlockInfo
|
||||||
|
readVarUInt(s) # 1
|
||||||
|
readUInt8(s) # is_overflows
|
||||||
|
readVarUInt(s) # 2
|
||||||
|
readUInt32(s) # bucket_num
|
||||||
|
readVarUInt(s) # 0
|
||||||
|
columns = readVarUInt(s) # rows
|
||||||
|
rows = readVarUInt(s) # columns
|
||||||
|
print("Rows {} Columns {}".format(rows, columns))
|
||||||
|
for _ in range(columns):
|
||||||
|
col_name = readStringBinary(s)
|
||||||
|
type_name = readStringBinary(s)
|
||||||
|
print("Column {} type {}".format(col_name, type_name))
|
||||||
|
|
||||||
|
|
||||||
|
def readException(s):
|
||||||
|
assert(readVarUInt(s) == 2)
|
||||||
|
code = readUInt32(s)
|
||||||
|
name = readStringBinary(s)
|
||||||
|
text = readStringBinary(s)
|
||||||
|
readStringBinary(s) # trace
|
||||||
|
assert(readUInt8(s) == 0) # has_nested
|
||||||
|
print("code {}: {}".format(code, text.replace('DB::Exception:', '')))
|
||||||
|
|
||||||
|
|
||||||
|
def insertValidLowCardinalityRow():
|
||||||
|
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||||
|
s.settimeout(30)
|
||||||
|
s.connect((CLICKHOUSE_HOST, CLICKHOUSE_PORT))
|
||||||
|
sendHello(s)
|
||||||
|
receiveHello(s)
|
||||||
|
sendQuery(s, 'insert into {}.tab format TSV'.format(CLICKHOUSE_DATABASE))
|
||||||
|
|
||||||
|
# external tables
|
||||||
|
sendEmptyBlock(s)
|
||||||
|
readHeader(s)
|
||||||
|
|
||||||
|
# Data
|
||||||
|
ba = bytearray()
|
||||||
|
writeVarUInt(2, ba) # Data
|
||||||
|
writeStringBinary('', ba)
|
||||||
|
serializeBlockInfo(ba)
|
||||||
|
writeVarUInt(1, ba) # rows
|
||||||
|
writeVarUInt(1, ba) # columns
|
||||||
|
writeStringBinary('x', ba)
|
||||||
|
writeStringBinary('LowCardinality(String)', ba)
|
||||||
|
ba.extend([1] + [0] * 7) # SharedDictionariesWithAdditionalKeys
|
||||||
|
ba.extend([3, 2] + [0] * 6) # indexes type: UInt64 [3], with additional keys [2]
|
||||||
|
ba.extend([1] + [0] * 7) # num_keys in dict
|
||||||
|
writeStringBinary('hello', ba) # key
|
||||||
|
ba.extend([1] + [0] * 7) # num_indexes
|
||||||
|
ba.extend([0] * 8) # UInt64 index (0 for 'hello')
|
||||||
|
s.sendall(ba)
|
||||||
|
|
||||||
|
# Fin block
|
||||||
|
sendEmptyBlock(s)
|
||||||
|
|
||||||
|
assert(readVarUInt(s) == 5) # End of stream
|
||||||
|
s.close()
|
||||||
|
|
||||||
|
|
||||||
|
def insertLowCardinalityRowWithIndexOverflow():
|
||||||
|
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||||
|
s.settimeout(30)
|
||||||
|
s.connect((CLICKHOUSE_HOST, CLICKHOUSE_PORT))
|
||||||
|
sendHello(s)
|
||||||
|
receiveHello(s)
|
||||||
|
sendQuery(s, 'insert into {}.tab format TSV'.format(CLICKHOUSE_DATABASE))
|
||||||
|
|
||||||
|
# external tables
|
||||||
|
sendEmptyBlock(s)
|
||||||
|
readHeader(s)
|
||||||
|
|
||||||
|
# Data
|
||||||
|
ba = bytearray()
|
||||||
|
writeVarUInt(2, ba) # Data
|
||||||
|
writeStringBinary('', ba)
|
||||||
|
serializeBlockInfo(ba)
|
||||||
|
writeVarUInt(1, ba) # rows
|
||||||
|
writeVarUInt(1, ba) # columns
|
||||||
|
writeStringBinary('x', ba)
|
||||||
|
writeStringBinary('LowCardinality(String)', ba)
|
||||||
|
ba.extend([1] + [0] * 7) # SharedDictionariesWithAdditionalKeys
|
||||||
|
ba.extend([3, 2] + [0] * 6) # indexes type: UInt64 [3], with additional keys [2]
|
||||||
|
ba.extend([1] + [0] * 7) # num_keys in dict
|
||||||
|
writeStringBinary('hello', ba) # key
|
||||||
|
ba.extend([1] + [0] * 7) # num_indexes
|
||||||
|
ba.extend([0] * 7 + [1]) # UInt64 index (overflow)
|
||||||
|
s.sendall(ba)
|
||||||
|
|
||||||
|
readException(s)
|
||||||
|
s.close()
|
||||||
|
|
||||||
|
|
||||||
|
def insertLowCardinalityRowWithIncorrectDictType():
|
||||||
|
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||||
|
s.settimeout(30)
|
||||||
|
s.connect((CLICKHOUSE_HOST, CLICKHOUSE_PORT))
|
||||||
|
sendHello(s)
|
||||||
|
receiveHello(s)
|
||||||
|
sendQuery(s, 'insert into {}.tab format TSV'.format(CLICKHOUSE_DATABASE))
|
||||||
|
|
||||||
|
# external tables
|
||||||
|
sendEmptyBlock(s)
|
||||||
|
readHeader(s)
|
||||||
|
|
||||||
|
# Data
|
||||||
|
ba = bytearray()
|
||||||
|
writeVarUInt(2, ba) # Data
|
||||||
|
writeStringBinary('', ba)
|
||||||
|
serializeBlockInfo(ba)
|
||||||
|
writeVarUInt(1, ba) # rows
|
||||||
|
writeVarUInt(1, ba) # columns
|
||||||
|
writeStringBinary('x', ba)
|
||||||
|
writeStringBinary('LowCardinality(String)', ba)
|
||||||
|
ba.extend([1] + [0] * 7) # SharedDictionariesWithAdditionalKeys
|
||||||
|
ba.extend([3, 3] + [0] * 6) # indexes type: UInt64 [3], with global dict and add keys [1 + 2]
|
||||||
|
ba.extend([1] + [0] * 7) # num_keys in dict
|
||||||
|
writeStringBinary('hello', ba) # key
|
||||||
|
ba.extend([1] + [0] * 7) # num_indexes
|
||||||
|
ba.extend([0] * 8) # UInt64 index (overflow)
|
||||||
|
s.sendall(ba)
|
||||||
|
|
||||||
|
readException(s)
|
||||||
|
s.close()
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
insertValidLowCardinalityRow()
|
||||||
|
insertLowCardinalityRowWithIndexOverflow()
|
||||||
|
insertLowCardinalityRowWithIncorrectDictType()
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
8
tests/queries/0_stateless/02010_lc_native.reference
Normal file
8
tests/queries/0_stateless/02010_lc_native.reference
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
Rows 0 Columns 1
|
||||||
|
Column x type LowCardinality(String)
|
||||||
|
Rows 0 Columns 1
|
||||||
|
Column x type LowCardinality(String)
|
||||||
|
code 117: Index for LowCardinality is out of range. Dictionary size is 1, but found index with value 72057594037927936
|
||||||
|
Rows 0 Columns 1
|
||||||
|
Column x type LowCardinality(String)
|
||||||
|
code 117: LowCardinality indexes serialization type for Native format cannot use global dictionary
|
13
tests/queries/0_stateless/02010_lc_native.sh
Executable file
13
tests/queries/0_stateless/02010_lc_native.sh
Executable file
@ -0,0 +1,13 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||||
|
# shellcheck source=../shell_config.sh
|
||||||
|
. "$CURDIR"/../shell_config.sh
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT -q "drop table if exists tab;"
|
||||||
|
$CLICKHOUSE_CLIENT -q "create table tab(x LowCardinality(String)) engine = MergeTree order by tuple();"
|
||||||
|
|
||||||
|
# We should have correct env vars from shell_config.sh to run this test
|
||||||
|
python3 "$CURDIR"/2010_lc_native.python
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT -q "drop table if exists tab;"
|
@ -0,0 +1,4 @@
|
|||||||
|
0 \N
|
||||||
|
|
||||||
|
0 \N
|
||||||
|
0 \N
|
@ -0,0 +1,3 @@
|
|||||||
|
set receive_timeout = '10', receive_data_timeout_ms = '10000', extremes = '1', allow_suspicious_low_cardinality_types = '1', force_primary_key = '1', join_use_nulls = '1', max_rows_to_read = '1', join_algorithm = 'partial_merge';
|
||||||
|
|
||||||
|
SELECT * FROM (SELECT dummy AS val FROM system.one) AS s1 ANY LEFT JOIN (SELECT toLowCardinality(dummy) AS rval FROM system.one) AS s2 ON (val + 9223372036854775806) = (rval * 1);
|
@ -0,0 +1,13 @@
|
|||||||
|
Non nullable value only null key
|
||||||
|
\N
|
||||||
|
Non nullable value nullable key
|
||||||
|
Test
|
||||||
|
\N
|
||||||
|
|
||||||
|
Nullable value only null key
|
||||||
|
\N
|
||||||
|
Nullable value nullable key
|
||||||
|
Test
|
||||||
|
\N
|
||||||
|
\N
|
||||||
|
\N
|
29
tests/queries/0_stateless/02014_dict_get_nullable_key.sql
Normal file
29
tests/queries/0_stateless/02014_dict_get_nullable_key.sql
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
DROP TABLE IF EXISTS dictionary_non_nullable_source_table;
|
||||||
|
CREATE TABLE dictionary_non_nullable_source_table (id UInt64, value String) ENGINE=TinyLog;
|
||||||
|
INSERT INTO dictionary_non_nullable_source_table VALUES (0, 'Test');
|
||||||
|
|
||||||
|
DROP DICTIONARY IF EXISTS test_dictionary_non_nullable;
|
||||||
|
CREATE DICTIONARY test_dictionary_non_nullable (id UInt64, value String) PRIMARY KEY id LAYOUT(DIRECT()) SOURCE(CLICKHOUSE(TABLE 'dictionary_non_nullable_source_table'));
|
||||||
|
|
||||||
|
SELECT 'Non nullable value only null key ';
|
||||||
|
SELECT dictGet('test_dictionary_non_nullable', 'value', NULL);
|
||||||
|
SELECT 'Non nullable value nullable key';
|
||||||
|
SELECT dictGet('test_dictionary_non_nullable', 'value', arrayJoin([toUInt64(0), NULL, 1]));
|
||||||
|
|
||||||
|
DROP DICTIONARY test_dictionary_non_nullable;
|
||||||
|
DROP TABLE dictionary_non_nullable_source_table;
|
||||||
|
|
||||||
|
DROP TABLE IF EXISTS dictionary_nullable_source_table;
|
||||||
|
CREATE TABLE dictionary_nullable_source_table (id UInt64, value Nullable(String)) ENGINE=TinyLog;
|
||||||
|
INSERT INTO dictionary_nullable_source_table VALUES (0, 'Test'), (1, NULL);
|
||||||
|
|
||||||
|
DROP DICTIONARY IF EXISTS test_dictionary_nullable;
|
||||||
|
CREATE DICTIONARY test_dictionary_nullable (id UInt64, value Nullable(String)) PRIMARY KEY id LAYOUT(DIRECT()) SOURCE(CLICKHOUSE(TABLE 'dictionary_nullable_source_table'));
|
||||||
|
|
||||||
|
SELECT 'Nullable value only null key ';
|
||||||
|
SELECT dictGet('test_dictionary_nullable', 'value', NULL);
|
||||||
|
SELECT 'Nullable value nullable key';
|
||||||
|
SELECT dictGet('test_dictionary_nullable', 'value', arrayJoin([toUInt64(0), NULL, 1, 2]));
|
||||||
|
|
||||||
|
DROP DICTIONARY test_dictionary_nullable;
|
||||||
|
DROP TABLE dictionary_nullable_source_table;
|
@ -0,0 +1 @@
|
|||||||
|
5 0
|
@ -0,0 +1,37 @@
|
|||||||
|
DROP TABLE IF EXISTS test_table;
|
||||||
|
CREATE TABLE test_table
|
||||||
|
(
|
||||||
|
key_column UInt64,
|
||||||
|
data_column_1 UInt64,
|
||||||
|
data_column_2 UInt8
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY key_column;
|
||||||
|
|
||||||
|
INSERT INTO test_table VALUES (0, 0, 0);
|
||||||
|
|
||||||
|
DROP DICTIONARY IF EXISTS test_dictionary;
|
||||||
|
CREATE DICTIONARY test_dictionary
|
||||||
|
(
|
||||||
|
key_column UInt64 DEFAULT 0,
|
||||||
|
data_column_1 UInt64 DEFAULT 1,
|
||||||
|
data_column_2 UInt8 DEFAULT 1
|
||||||
|
)
|
||||||
|
PRIMARY KEY key_column
|
||||||
|
LAYOUT(DIRECT())
|
||||||
|
SOURCE(CLICKHOUSE(TABLE 'test_table'));
|
||||||
|
|
||||||
|
DROP TABLE IF EXISTS test_table_default;
|
||||||
|
CREATE TABLE test_table_default
|
||||||
|
(
|
||||||
|
data_1 DEFAULT dictGetUInt64('test_dictionary', 'data_column_1', toUInt64(0)),
|
||||||
|
data_2 DEFAULT dictGet(test_dictionary, 'data_column_2', toUInt64(0))
|
||||||
|
)
|
||||||
|
ENGINE=TinyLog;
|
||||||
|
|
||||||
|
INSERT INTO test_table_default(data_1) VALUES (5);
|
||||||
|
SELECT * FROM test_table_default;
|
||||||
|
|
||||||
|
DROP DICTIONARY test_dictionary;
|
||||||
|
DROP TABLE test_table;
|
||||||
|
DROP TABLE test_table_default;
|
@ -0,0 +1 @@
|
|||||||
|
4
|
@ -0,0 +1 @@
|
|||||||
|
SELECT test_function(toUInt64(2), toUInt64(2));
|
@ -0,0 +1,2 @@
|
|||||||
|
10
|
||||||
|
1
|
9
tests/queries/0_stateless/02015_global_in_threads.sh
Executable file
9
tests/queries/0_stateless/02015_global_in_threads.sh
Executable file
@ -0,0 +1,9 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||||
|
# shellcheck source=../shell_config.sh
|
||||||
|
. "$CURDIR"/../shell_config.sh
|
||||||
|
|
||||||
|
${CLICKHOUSE_CLIENT} --log_queries=1 --max_threads=32 --query_id "2015_${CLICKHOUSE_DATABASE}_query" -q "select count() from remote('127.0.0.{2,3}', numbers(10)) where number global in (select number % 5 from numbers_mt(1000000))"
|
||||||
|
${CLICKHOUSE_CLIENT} -q "system flush logs"
|
||||||
|
${CLICKHOUSE_CLIENT} -q "select length(thread_ids) >= 32 from system.query_log where event_date = today() and query_id = '2015_${CLICKHOUSE_DATABASE}_query' and type = 'QueryFinish' and current_database = currentDatabase()"
|
@ -0,0 +1,9 @@
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Hello
|
@ -0,0 +1 @@
|
|||||||
|
SELECT s FROM (SELECT 5 AS x, 'Hello' AS s ORDER BY x WITH FILL FROM 1 TO 10) ORDER BY s;
|
@ -0,0 +1,5 @@
|
|||||||
|
0
|
||||||
|
0
|
||||||
|
0
|
||||||
|
0
|
||||||
|
\N
|
@ -0,0 +1,10 @@
|
|||||||
|
SELECT count() AS cnt WHERE 0 HAVING cnt = 0;
|
||||||
|
|
||||||
|
select cnt from (select count() cnt where 0) where cnt = 0;
|
||||||
|
|
||||||
|
select cnt from (select count() cnt from system.one where 0) where cnt = 0;
|
||||||
|
|
||||||
|
select sum from (select sum(dummy) sum from system.one where 0) where sum = 0;
|
||||||
|
|
||||||
|
set aggregate_functions_null_for_empty=1;
|
||||||
|
select sum from (select sum(dummy) sum from system.one where 0) where sum is null;
|
@ -0,0 +1,3 @@
|
|||||||
|
2021-07-07 15:21:00
|
||||||
|
2021-07-07 15:21:05
|
||||||
|
2021-07-07 15:21:10
|
@ -0,0 +1,6 @@
|
|||||||
|
SELECT toStartOfMinute(some_time) AS ts
|
||||||
|
FROM
|
||||||
|
(
|
||||||
|
SELECT toDateTime('2021-07-07 15:21:05') AS some_time
|
||||||
|
)
|
||||||
|
ORDER BY ts ASC WITH FILL FROM toDateTime('2021-07-07 15:21:00') TO toDateTime('2021-07-07 15:21:15') STEP 5;
|
@ -0,0 +1,9 @@
|
|||||||
|
1
|
||||||
|
2
|
||||||
|
3
|
||||||
|
4
|
||||||
|
5
|
||||||
|
6
|
||||||
|
7
|
||||||
|
8
|
||||||
|
9
|
@ -0,0 +1 @@
|
|||||||
|
SELECT x FROM (SELECT 5 AS x) ORDER BY -x, x WITH FILL FROM 1 TO 10;
|
@ -0,0 +1 @@
|
|||||||
|
SELECT x, y FROM (SELECT 5 AS x, 'Hello' AS y) ORDER BY x WITH FILL FROM 3 TO 7, y, x WITH FILL FROM 1 TO 10; -- { serverError 475 }
|
@ -0,0 +1,45 @@
|
|||||||
|
3 -10
|
||||||
|
3 -9
|
||||||
|
3 -8
|
||||||
|
3 -7
|
||||||
|
3 -6
|
||||||
|
3 -5
|
||||||
|
3 -4
|
||||||
|
3 -3
|
||||||
|
3 -2
|
||||||
|
4 -10
|
||||||
|
4 -9
|
||||||
|
4 -8
|
||||||
|
4 -7
|
||||||
|
4 -6
|
||||||
|
4 -5
|
||||||
|
4 -4
|
||||||
|
4 -3
|
||||||
|
4 -2
|
||||||
|
5 -10
|
||||||
|
5 -9
|
||||||
|
5 -8
|
||||||
|
5 -7
|
||||||
|
5 -6
|
||||||
|
5 -5 Hello
|
||||||
|
5 -4
|
||||||
|
5 -3
|
||||||
|
5 -2
|
||||||
|
6 -10
|
||||||
|
6 -9
|
||||||
|
6 -8
|
||||||
|
6 -7
|
||||||
|
6 -6
|
||||||
|
6 -5
|
||||||
|
6 -4
|
||||||
|
6 -3
|
||||||
|
6 -2
|
||||||
|
7 -10
|
||||||
|
7 -9
|
||||||
|
7 -8
|
||||||
|
7 -7
|
||||||
|
7 -6
|
||||||
|
7 -5
|
||||||
|
7 -4
|
||||||
|
7 -3
|
||||||
|
7 -2
|
14
tests/queries/0_stateless/02019_multiple_weird_with_fill.sql
Normal file
14
tests/queries/0_stateless/02019_multiple_weird_with_fill.sql
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
SELECT
|
||||||
|
x,
|
||||||
|
-x,
|
||||||
|
y
|
||||||
|
FROM
|
||||||
|
(
|
||||||
|
SELECT
|
||||||
|
5 AS x,
|
||||||
|
'Hello' AS y
|
||||||
|
)
|
||||||
|
ORDER BY
|
||||||
|
x ASC WITH FILL FROM 3 TO 7,
|
||||||
|
y ASC,
|
||||||
|
-x ASC WITH FILL FROM -10 TO -1;
|
@ -0,0 +1,144 @@
|
|||||||
|
engine : Null
|
||||||
|
initial comment
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = Null\nCOMMENT \'Test table with comment\'
|
||||||
|
comment= Test table with comment
|
||||||
|
|
||||||
|
change a comment
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = Null\nCOMMENT \'new comment on a table\'
|
||||||
|
comment= new comment on a table
|
||||||
|
|
||||||
|
remove a comment
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = Null
|
||||||
|
comment=
|
||||||
|
|
||||||
|
add a comment back
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = Null\nCOMMENT \'another comment on a table\'
|
||||||
|
comment= another comment on a table
|
||||||
|
|
||||||
|
detach table
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = Null\nCOMMENT \'another comment on a table\'
|
||||||
|
|
||||||
|
re-attach table
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = Null\nCOMMENT \'another comment on a table\'
|
||||||
|
comment= another comment on a table
|
||||||
|
|
||||||
|
engine : Memory
|
||||||
|
initial comment
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = Memory\nCOMMENT \'Test table with comment\'
|
||||||
|
comment= Test table with comment
|
||||||
|
|
||||||
|
change a comment
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = Memory\nCOMMENT \'new comment on a table\'
|
||||||
|
comment= new comment on a table
|
||||||
|
|
||||||
|
remove a comment
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = Memory
|
||||||
|
comment=
|
||||||
|
|
||||||
|
add a comment back
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = Memory\nCOMMENT \'another comment on a table\'
|
||||||
|
comment= another comment on a table
|
||||||
|
|
||||||
|
detach table
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = Memory\nCOMMENT \'another comment on a table\'
|
||||||
|
|
||||||
|
re-attach table
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = Memory\nCOMMENT \'another comment on a table\'
|
||||||
|
comment= another comment on a table
|
||||||
|
|
||||||
|
engine : MergeTree() ORDER BY k
|
||||||
|
initial comment
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = MergeTree\nORDER BY k\nSETTINGS index_granularity = 8192\nCOMMENT \'Test table with comment\'
|
||||||
|
comment= Test table with comment
|
||||||
|
|
||||||
|
change a comment
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = MergeTree\nORDER BY k\nSETTINGS index_granularity = 8192\nCOMMENT \'new comment on a table\'
|
||||||
|
comment= new comment on a table
|
||||||
|
|
||||||
|
remove a comment
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = MergeTree\nORDER BY k\nSETTINGS index_granularity = 8192
|
||||||
|
comment=
|
||||||
|
|
||||||
|
add a comment back
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = MergeTree\nORDER BY k\nSETTINGS index_granularity = 8192\nCOMMENT \'another comment on a table\'
|
||||||
|
comment= another comment on a table
|
||||||
|
|
||||||
|
detach table
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = MergeTree\nORDER BY k\nSETTINGS index_granularity = 8192\nCOMMENT \'another comment on a table\'
|
||||||
|
|
||||||
|
re-attach table
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = MergeTree\nORDER BY k\nSETTINGS index_granularity = 8192\nCOMMENT \'another comment on a table\'
|
||||||
|
comment= another comment on a table
|
||||||
|
|
||||||
|
engine : Log
|
||||||
|
initial comment
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = Log\nCOMMENT \'Test table with comment\'
|
||||||
|
comment= Test table with comment
|
||||||
|
|
||||||
|
change a comment
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = Log\nCOMMENT \'new comment on a table\'
|
||||||
|
comment= new comment on a table
|
||||||
|
|
||||||
|
remove a comment
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = Log
|
||||||
|
comment=
|
||||||
|
|
||||||
|
add a comment back
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = Log\nCOMMENT \'another comment on a table\'
|
||||||
|
comment= another comment on a table
|
||||||
|
|
||||||
|
detach table
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = Log\nCOMMENT \'another comment on a table\'
|
||||||
|
|
||||||
|
re-attach table
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = Log\nCOMMENT \'another comment on a table\'
|
||||||
|
comment= another comment on a table
|
||||||
|
|
||||||
|
engine : TinyLog
|
||||||
|
initial comment
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = TinyLog\nCOMMENT \'Test table with comment\'
|
||||||
|
comment= Test table with comment
|
||||||
|
|
||||||
|
change a comment
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = TinyLog\nCOMMENT \'new comment on a table\'
|
||||||
|
comment= new comment on a table
|
||||||
|
|
||||||
|
remove a comment
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = TinyLog
|
||||||
|
comment=
|
||||||
|
|
||||||
|
add a comment back
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = TinyLog\nCOMMENT \'another comment on a table\'
|
||||||
|
comment= another comment on a table
|
||||||
|
|
||||||
|
detach table
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = TinyLog\nCOMMENT \'another comment on a table\'
|
||||||
|
|
||||||
|
re-attach table
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = TinyLog\nCOMMENT \'another comment on a table\'
|
||||||
|
comment= another comment on a table
|
||||||
|
|
||||||
|
engine : ReplicatedMergeTree('/clickhouse/2020_alter_table_modify_comment_default', '1') ORDER BY k
|
||||||
|
initial comment
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/2020_alter_table_modify_comment_default\', \'1\')\nORDER BY k\nSETTINGS index_granularity = 8192\nCOMMENT \'Test table with comment\'
|
||||||
|
comment= Test table with comment
|
||||||
|
|
||||||
|
change a comment
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/2020_alter_table_modify_comment_default\', \'1\')\nORDER BY k\nSETTINGS index_granularity = 8192\nCOMMENT \'Test table with comment\'
|
||||||
|
comment= Test table with comment
|
||||||
|
|
||||||
|
remove a comment
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/2020_alter_table_modify_comment_default\', \'1\')\nORDER BY k\nSETTINGS index_granularity = 8192\nCOMMENT \'Test table with comment\'
|
||||||
|
comment= Test table with comment
|
||||||
|
|
||||||
|
add a comment back
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/2020_alter_table_modify_comment_default\', \'1\')\nORDER BY k\nSETTINGS index_granularity = 8192\nCOMMENT \'Test table with comment\'
|
||||||
|
comment= Test table with comment
|
||||||
|
|
||||||
|
detach table
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/2020_alter_table_modify_comment_default\', \'1\')\nORDER BY k\nSETTINGS index_granularity = 8192\nCOMMENT \'Test table with comment\'
|
||||||
|
|
||||||
|
re-attach table
|
||||||
|
CREATE TABLE default.comment_test_table\n(\n `k` UInt64,\n `s` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/2020_alter_table_modify_comment_default\', \'1\')\nORDER BY k\nSETTINGS index_granularity = 8192\nCOMMENT \'Test table with comment\'
|
||||||
|
comment= Test table with comment
|
||||||
|
|
59
tests/queries/0_stateless/02020_alter_table_modify_comment.sh
Executable file
59
tests/queries/0_stateless/02020_alter_table_modify_comment.sh
Executable file
@ -0,0 +1,59 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||||
|
# shellcheck source=../shell_config.sh
|
||||||
|
. "$CURDIR"/../shell_config.sh
|
||||||
|
|
||||||
|
function get_table_comment_info()
|
||||||
|
{
|
||||||
|
$CLICKHOUSE_CLIENT --query="SHOW CREATE TABLE comment_test_table;"
|
||||||
|
$CLICKHOUSE_CLIENT --query="SELECT 'comment=', comment FROM system.tables WHERE database=currentDatabase() and name='comment_test_table'"
|
||||||
|
echo # just a newline
|
||||||
|
}
|
||||||
|
|
||||||
|
function test_table_comments()
|
||||||
|
{
|
||||||
|
local ENGINE_NAME="$1"
|
||||||
|
echo "engine : ${ENGINE_NAME}"
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT -nm <<EOF
|
||||||
|
DROP TABLE IF EXISTS comment_test_table;
|
||||||
|
|
||||||
|
CREATE TABLE comment_test_table
|
||||||
|
(
|
||||||
|
k UInt64,
|
||||||
|
s String
|
||||||
|
) ENGINE = ${ENGINE_NAME}
|
||||||
|
COMMENT 'Test table with comment';
|
||||||
|
EOF
|
||||||
|
|
||||||
|
echo initial comment
|
||||||
|
get_table_comment_info
|
||||||
|
|
||||||
|
echo change a comment
|
||||||
|
$CLICKHOUSE_CLIENT --query="ALTER TABLE comment_test_table MODIFY COMMENT 'new comment on a table';"
|
||||||
|
get_table_comment_info
|
||||||
|
|
||||||
|
echo remove a comment
|
||||||
|
$CLICKHOUSE_CLIENT --query="ALTER TABLE comment_test_table MODIFY COMMENT '';"
|
||||||
|
get_table_comment_info
|
||||||
|
|
||||||
|
echo add a comment back
|
||||||
|
$CLICKHOUSE_CLIENT --query="ALTER TABLE comment_test_table MODIFY COMMENT 'another comment on a table';"
|
||||||
|
get_table_comment_info
|
||||||
|
|
||||||
|
echo detach table
|
||||||
|
$CLICKHOUSE_CLIENT --query="DETACH TABLE comment_test_table NO DELAY;"
|
||||||
|
get_table_comment_info
|
||||||
|
|
||||||
|
echo re-attach table
|
||||||
|
$CLICKHOUSE_CLIENT --query="ATTACH TABLE comment_test_table;"
|
||||||
|
get_table_comment_info
|
||||||
|
}
|
||||||
|
|
||||||
|
test_table_comments "Null"
|
||||||
|
test_table_comments "Memory"
|
||||||
|
test_table_comments "MergeTree() ORDER BY k"
|
||||||
|
test_table_comments "Log"
|
||||||
|
test_table_comments "TinyLog"
|
||||||
|
test_table_comments "ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX', '1') ORDER BY k"
|
@ -0,0 +1,2 @@
|
|||||||
|
-2147483648
|
||||||
|
-2147483648
|
@ -0,0 +1,2 @@
|
|||||||
|
SELECT toInt32('-2147483648');
|
||||||
|
SELECT toInt32OrNull('-2147483648');
|
130
tests/queries/0_stateless/02020_exponential_smoothing.reference
Normal file
130
tests/queries/0_stateless/02020_exponential_smoothing.reference
Normal file
@ -0,0 +1,130 @@
|
|||||||
|
1 0 0.5
|
||||||
|
0 1 0.25
|
||||||
|
0 2 0.125
|
||||||
|
0 3 0.0625
|
||||||
|
0 4 0.03125
|
||||||
|
0 5 0.015625
|
||||||
|
0 6 0.0078125
|
||||||
|
0 7 0.00390625
|
||||||
|
0 8 0.001953125
|
||||||
|
0 9 0.0009765625
|
||||||
|
1 0 0.067
|
||||||
|
0 1 0.062
|
||||||
|
0 2 0.058
|
||||||
|
0 3 0.054
|
||||||
|
0 4 0.051
|
||||||
|
0 5 0.047
|
||||||
|
0 6 0.044
|
||||||
|
0 7 0.041
|
||||||
|
0 8 0.038
|
||||||
|
0 9 0.036
|
||||||
|
0 0 0
|
||||||
|
1 1 0.5
|
||||||
|
2 2 1.25
|
||||||
|
3 3 2.125
|
||||||
|
4 4 3.0625
|
||||||
|
5 5 4.03125
|
||||||
|
6 6 5.015625
|
||||||
|
7 7 6.0078125
|
||||||
|
8 8 7.00390625
|
||||||
|
9 9 8.001953125
|
||||||
|
1 0 0.067 ███▎
|
||||||
|
0 1 0.062 ███
|
||||||
|
0 2 0.058 ██▊
|
||||||
|
0 3 0.054 ██▋
|
||||||
|
0 4 0.051 ██▌
|
||||||
|
0 5 0.047 ██▎
|
||||||
|
0 6 0.044 ██▏
|
||||||
|
0 7 0.041 ██
|
||||||
|
0 8 0.038 █▊
|
||||||
|
0 9 0.036 █▋
|
||||||
|
0 10 0.033 █▋
|
||||||
|
0 11 0.031 █▌
|
||||||
|
0 12 0.029 █▍
|
||||||
|
0 13 0.027 █▎
|
||||||
|
0 14 0.025 █▎
|
||||||
|
0 15 0.024 █▏
|
||||||
|
0 16 0.022 █
|
||||||
|
0 17 0.021 █
|
||||||
|
0 18 0.019 ▊
|
||||||
|
0 19 0.018 ▊
|
||||||
|
0 20 0.017 ▋
|
||||||
|
0 21 0.016 ▋
|
||||||
|
0 22 0.015 ▋
|
||||||
|
0 23 0.014 ▋
|
||||||
|
0 24 0.013 ▋
|
||||||
|
1 25 0.079 ███▊
|
||||||
|
1 26 0.14 ███████
|
||||||
|
1 27 0.198 █████████▊
|
||||||
|
1 28 0.252 ████████████▌
|
||||||
|
1 29 0.302 ███████████████
|
||||||
|
1 30 0.349 █████████████████▍
|
||||||
|
1 31 0.392 ███████████████████▌
|
||||||
|
1 32 0.433 █████████████████████▋
|
||||||
|
1 33 0.471 ███████████████████████▌
|
||||||
|
1 34 0.506 █████████████████████████▎
|
||||||
|
1 35 0.539 ██████████████████████████▊
|
||||||
|
1 36 0.57 ████████████████████████████▌
|
||||||
|
1 37 0.599 █████████████████████████████▊
|
||||||
|
1 38 0.626 ███████████████████████████████▎
|
||||||
|
1 39 0.651 ████████████████████████████████▌
|
||||||
|
1 40 0.674 █████████████████████████████████▋
|
||||||
|
1 41 0.696 ██████████████████████████████████▋
|
||||||
|
1 42 0.716 ███████████████████████████████████▋
|
||||||
|
1 43 0.735 ████████████████████████████████████▋
|
||||||
|
1 44 0.753 █████████████████████████████████████▋
|
||||||
|
1 45 0.77 ██████████████████████████████████████▍
|
||||||
|
1 46 0.785 ███████████████████████████████████████▎
|
||||||
|
1 47 0.8 ███████████████████████████████████████▊
|
||||||
|
1 48 0.813 ████████████████████████████████████████▋
|
||||||
|
1 49 0.825 █████████████████████████████████████████▎
|
||||||
|
1 0 0.5 █████████████████████████
|
||||||
|
0 1 0.25 ████████████▌
|
||||||
|
0 2 0.125 ██████▎
|
||||||
|
0 3 0.062 ███
|
||||||
|
0 4 0.031 █▌
|
||||||
|
1 5 0.516 █████████████████████████▋
|
||||||
|
0 6 0.258 ████████████▊
|
||||||
|
0 7 0.129 ██████▍
|
||||||
|
0 8 0.064 ███▏
|
||||||
|
0 9 0.032 █▌
|
||||||
|
1 10 0.516 █████████████████████████▋
|
||||||
|
0 11 0.258 ████████████▊
|
||||||
|
0 12 0.129 ██████▍
|
||||||
|
0 13 0.065 ███▏
|
||||||
|
0 14 0.032 █▌
|
||||||
|
1 15 0.516 █████████████████████████▋
|
||||||
|
0 16 0.258 ████████████▊
|
||||||
|
0 17 0.129 ██████▍
|
||||||
|
0 18 0.065 ███▏
|
||||||
|
0 19 0.032 █▌
|
||||||
|
1 20 0.516 █████████████████████████▋
|
||||||
|
0 21 0.258 ████████████▊
|
||||||
|
0 22 0.129 ██████▍
|
||||||
|
0 23 0.065 ███▏
|
||||||
|
0 24 0.032 █▌
|
||||||
|
1 25 0.516 █████████████████████████▋
|
||||||
|
0 26 0.258 ████████████▊
|
||||||
|
0 27 0.129 ██████▍
|
||||||
|
0 28 0.065 ███▏
|
||||||
|
0 29 0.032 █▌
|
||||||
|
1 30 0.516 █████████████████████████▋
|
||||||
|
0 31 0.258 ████████████▊
|
||||||
|
0 32 0.129 ██████▍
|
||||||
|
0 33 0.065 ███▏
|
||||||
|
0 34 0.032 █▌
|
||||||
|
1 35 0.516 █████████████████████████▋
|
||||||
|
0 36 0.258 ████████████▊
|
||||||
|
0 37 0.129 ██████▍
|
||||||
|
0 38 0.065 ███▏
|
||||||
|
0 39 0.032 █▌
|
||||||
|
1 40 0.516 █████████████████████████▋
|
||||||
|
0 41 0.258 ████████████▊
|
||||||
|
0 42 0.129 ██████▍
|
||||||
|
0 43 0.065 ███▏
|
||||||
|
0 44 0.032 █▌
|
||||||
|
1 45 0.516 █████████████████████████▋
|
||||||
|
0 46 0.258 ████████████▊
|
||||||
|
0 47 0.129 ██████▍
|
||||||
|
0 48 0.065 ███▏
|
||||||
|
0 49 0.032 █▌
|
32
tests/queries/0_stateless/02020_exponential_smoothing.sql
Normal file
32
tests/queries/0_stateless/02020_exponential_smoothing.sql
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
SELECT number = 0 AS value, number AS time, exponentialMovingAverage(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10);
|
||||||
|
SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialMovingAverage(10)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10));
|
||||||
|
|
||||||
|
SELECT number AS value, number AS time, exponentialMovingAverage(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10);
|
||||||
|
|
||||||
|
SELECT
|
||||||
|
value,
|
||||||
|
time,
|
||||||
|
round(exp_smooth, 3),
|
||||||
|
bar(exp_smooth, 0, 1, 50) AS bar
|
||||||
|
FROM
|
||||||
|
(
|
||||||
|
SELECT
|
||||||
|
(number = 0) OR (number >= 25) AS value,
|
||||||
|
number AS time,
|
||||||
|
exponentialMovingAverage(10)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth
|
||||||
|
FROM numbers(50)
|
||||||
|
);
|
||||||
|
|
||||||
|
SELECT
|
||||||
|
value,
|
||||||
|
time,
|
||||||
|
round(exp_smooth, 3),
|
||||||
|
bar(exp_smooth, 0, 1, 50) AS bar
|
||||||
|
FROM
|
||||||
|
(
|
||||||
|
SELECT
|
||||||
|
(number % 5) = 0 AS value,
|
||||||
|
number AS time,
|
||||||
|
exponentialMovingAverage(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth
|
||||||
|
FROM numbers(50)
|
||||||
|
);
|
@ -0,0 +1,8 @@
|
|||||||
|
0.0009765625
|
||||||
|
0.0009775171065493646
|
||||||
|
0.0009775171065493646
|
||||||
|
0.0009775171065493646
|
||||||
|
0.0009775171065493646
|
||||||
|
0.0009775171065493646
|
||||||
|
0.0009775171065493646
|
||||||
|
0.0009775171065493646
|
9
tests/queries/0_stateless/02021_exponential_sum.sql
Normal file
9
tests/queries/0_stateless/02021_exponential_sum.sql
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
-- Check that it is deterministic
|
||||||
|
WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM numbers_mt(10);
|
||||||
|
WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM numbers_mt(100);
|
||||||
|
WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM numbers_mt(1000);
|
||||||
|
WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM numbers_mt(10000);
|
||||||
|
WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM numbers_mt(100000);
|
||||||
|
WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM numbers_mt(1000000);
|
||||||
|
WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM numbers_mt(10000000);
|
||||||
|
WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM numbers_mt(100000000);
|
@ -0,0 +1,5 @@
|
|||||||
|
0.009775171065493644
|
||||||
|
0.009775171065493644
|
||||||
|
0.009775171065493644
|
||||||
|
0.009775171065493644
|
||||||
|
0.009775171065493644
|
@ -0,0 +1,6 @@
|
|||||||
|
-- Check that it is deterministic
|
||||||
|
WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM remote('127.0.0.{1..10}', numbers_mt(1000));
|
||||||
|
WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM remote('127.0.0.{1..10}', numbers_mt(10000));
|
||||||
|
WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM remote('127.0.0.{1..10}', numbers_mt(100000));
|
||||||
|
WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM remote('127.0.0.{1..10}', numbers_mt(1000000));
|
||||||
|
WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM remote('127.0.0.{1..10}', numbers_mt(10000000));
|
4
tests/queries/0_stateless/02021_h3_get_faces.reference
Normal file
4
tests/queries/0_stateless/02021_h3_get_faces.reference
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
[2,3,7,8,12]
|
||||||
|
[7,12]
|
||||||
|
[7]
|
||||||
|
[7]
|
16
tests/queries/0_stateless/02021_h3_get_faces.sql
Normal file
16
tests/queries/0_stateless/02021_h3_get_faces.sql
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
-- Tags: no-unbundled, no-fasttest
|
||||||
|
|
||||||
|
DROP TABLE IF EXISTS h3_indexes;
|
||||||
|
|
||||||
|
CREATE TABLE h3_indexes (h3_index UInt64) ENGINE = Memory;
|
||||||
|
|
||||||
|
-- test H3 indexes from: https://github.com/uber/h3-java/blob/master/src/test/java/com/uber/h3core/TestInspection.java#L86
|
||||||
|
|
||||||
|
INSERT INTO h3_indexes VALUES (stringToH3('0x85283473fffffffL'));
|
||||||
|
INSERT INTO h3_indexes VALUES (stringToH3('85283473fffffff'));
|
||||||
|
INSERT INTO h3_indexes VALUES (stringToH3('0x8167bffffffffffL'));
|
||||||
|
INSERT INTO h3_indexes VALUES (stringToH3('0x804dfffffffffffL'));
|
||||||
|
|
||||||
|
SELECT arraySort(h3GetFaces(h3_index)) FROM h3_indexes ORDER BY h3_index;
|
||||||
|
|
||||||
|
DROP TABLE h3_indexes;
|
4
tests/queries/0_stateless/02021_h3_is_pentagon.reference
Normal file
4
tests/queries/0_stateless/02021_h3_is_pentagon.reference
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
1
|
||||||
|
1
|
||||||
|
0
|
||||||
|
0
|
16
tests/queries/0_stateless/02021_h3_is_pentagon.sql
Normal file
16
tests/queries/0_stateless/02021_h3_is_pentagon.sql
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
-- Tags: no-unbundled, no-fasttest
|
||||||
|
|
||||||
|
DROP TABLE IF EXISTS h3_indexes;
|
||||||
|
|
||||||
|
CREATE TABLE h3_indexes (h3_index UInt64) ENGINE = Memory;
|
||||||
|
|
||||||
|
-- test H3 indexes from: https://github.com/uber/h3-java/blob/master/src/test/java/com/uber/h3core/TestInspection.java#L78
|
||||||
|
|
||||||
|
INSERT INTO h3_indexes VALUES (stringToH3('8f28308280f18f2'));
|
||||||
|
INSERT INTO h3_indexes VALUES (stringToH3('0x8f28308280f18f2L'));
|
||||||
|
INSERT INTO h3_indexes VALUES (stringToH3('821c07fffffffff'));
|
||||||
|
INSERT INTO h3_indexes VALUES (stringToH3('0x821c07fffffffffL'));
|
||||||
|
|
||||||
|
SELECT h3IsPentagon(h3_index) FROM h3_indexes ORDER BY h3_index;
|
||||||
|
|
||||||
|
DROP TABLE h3_indexes;
|
@ -0,0 +1,4 @@
|
|||||||
|
0
|
||||||
|
1
|
||||||
|
0
|
||||||
|
1
|
16
tests/queries/0_stateless/02021_h3_is_res_classIII.sql
Normal file
16
tests/queries/0_stateless/02021_h3_is_res_classIII.sql
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
-- Tags: no-unbundled, no-fasttest
|
||||||
|
|
||||||
|
DROP TABLE IF EXISTS h3_indexes;
|
||||||
|
|
||||||
|
CREATE TABLE h3_indexes (h3_index UInt64) ENGINE = Memory;
|
||||||
|
|
||||||
|
-- test H3 indexes from: https://github.com/uber/h3-java/blob/master/src/test/java/com/uber/h3core/TestInspection.java#L57
|
||||||
|
|
||||||
|
INSERT INTO h3_indexes VALUES (geoToH3(0.0, 0.0, 0));
|
||||||
|
INSERT INTO h3_indexes VALUES (geoToH3(10.0, 0.0, 1));
|
||||||
|
INSERT INTO h3_indexes VALUES (geoToH3(0.0, 10.0, 2));
|
||||||
|
INSERT INTO h3_indexes VALUES (geoToH3(10.0, 10.0, 3));
|
||||||
|
|
||||||
|
SELECT h3IsResClassIII(h3_index) FROM h3_indexes ORDER BY h3_index;
|
||||||
|
|
||||||
|
DROP TABLE h3_indexes;
|
@ -0,0 +1,58 @@
|
|||||||
|
Map bloom filter mapKeys
|
||||||
|
Equals with existing key
|
||||||
|
0 {'K0':'V0'}
|
||||||
|
Equals with non existing key
|
||||||
|
Equals with non existing key and default value
|
||||||
|
0 {'K0':'V0'}
|
||||||
|
1 {'K1':'V1'}
|
||||||
|
Not equals with existing key
|
||||||
|
1 {'K1':'V1'}
|
||||||
|
Not equals with non existing key
|
||||||
|
0 {'K0':'V0'}
|
||||||
|
1 {'K1':'V1'}
|
||||||
|
Not equals with non existing key and default value
|
||||||
|
IN with existing key
|
||||||
|
0 {'K0':'V0'}
|
||||||
|
IN with non existing key
|
||||||
|
IN with non existing key and default value
|
||||||
|
0 {'K0':'V0'}
|
||||||
|
1 {'K1':'V1'}
|
||||||
|
NOT IN with existing key
|
||||||
|
1 {'K1':'V1'}
|
||||||
|
NOT IN with non existing key
|
||||||
|
0 {'K0':'V0'}
|
||||||
|
1 {'K1':'V1'}
|
||||||
|
NOT IN with non existing key and default value
|
||||||
|
MapContains with existing key
|
||||||
|
0 {'K0':'V0'}
|
||||||
|
MapContains with non existing key
|
||||||
|
MapContains with non existing key and default value
|
||||||
|
Has with existing key
|
||||||
|
0 {'K0':'V0'}
|
||||||
|
Has with non existing key
|
||||||
|
Has with non existing key and default value
|
||||||
|
Map bloom filter mapValues
|
||||||
|
Equals with existing key
|
||||||
|
0 {'K0':'V0'}
|
||||||
|
Equals with non existing key
|
||||||
|
Equals with non existing key and default value
|
||||||
|
0 {'K0':'V0'}
|
||||||
|
1 {'K1':'V1'}
|
||||||
|
Not equals with existing key
|
||||||
|
1 {'K1':'V1'}
|
||||||
|
Not equals with non existing key
|
||||||
|
0 {'K0':'V0'}
|
||||||
|
1 {'K1':'V1'}
|
||||||
|
Not equals with non existing key and default value
|
||||||
|
IN with existing key
|
||||||
|
0 {'K0':'V0'}
|
||||||
|
IN with non existing key
|
||||||
|
IN with non existing key and default value
|
||||||
|
0 {'K0':'V0'}
|
||||||
|
1 {'K1':'V1'}
|
||||||
|
NOT IN with existing key
|
||||||
|
1 {'K1':'V1'}
|
||||||
|
NOT IN with non existing key
|
||||||
|
0 {'K0':'V0'}
|
||||||
|
1 {'K1':'V1'}
|
||||||
|
NOT IN with non existing key and default value
|
92
tests/queries/0_stateless/02021_map_bloom_filter_index.sql
Normal file
92
tests/queries/0_stateless/02021_map_bloom_filter_index.sql
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
DROP TABLE IF EXISTS map_test_index_map_keys;
|
||||||
|
CREATE TABLE map_test_index_map_keys
|
||||||
|
(
|
||||||
|
row_id UInt32,
|
||||||
|
map Map(String, String),
|
||||||
|
INDEX map_bloom_filter_keys mapKeys(map) TYPE bloom_filter GRANULARITY 1
|
||||||
|
) Engine=MergeTree() ORDER BY row_id SETTINGS index_granularity = 1;
|
||||||
|
|
||||||
|
INSERT INTO map_test_index_map_keys VALUES (0, {'K0':'V0'}), (1, {'K1':'V1'});
|
||||||
|
|
||||||
|
SELECT 'Map bloom filter mapKeys';
|
||||||
|
|
||||||
|
SELECT 'Equals with existing key';
|
||||||
|
SELECT * FROM map_test_index_map_keys WHERE map['K0'] = 'V0' SETTINGS force_data_skipping_indices='map_bloom_filter_keys';
|
||||||
|
SELECT 'Equals with non existing key';
|
||||||
|
SELECT * FROM map_test_index_map_keys WHERE map['K2'] = 'V2' SETTINGS force_data_skipping_indices='map_bloom_filter_keys';
|
||||||
|
SELECT 'Equals with non existing key and default value';
|
||||||
|
SELECT * FROM map_test_index_map_keys WHERE map['K3'] = '';
|
||||||
|
SELECT 'Not equals with existing key';
|
||||||
|
SELECT * FROM map_test_index_map_keys WHERE map['K0'] != 'V0' SETTINGS force_data_skipping_indices='map_bloom_filter_keys';
|
||||||
|
SELECT 'Not equals with non existing key';
|
||||||
|
SELECT * FROM map_test_index_map_keys WHERE map['K2'] != 'V2' SETTINGS force_data_skipping_indices='map_bloom_filter_keys';
|
||||||
|
SELECT 'Not equals with non existing key and default value';
|
||||||
|
SELECT * FROM map_test_index_map_keys WHERE map['K3'] != '';
|
||||||
|
|
||||||
|
SELECT 'IN with existing key';
|
||||||
|
SELECT * FROM map_test_index_map_keys WHERE map['K0'] IN 'V0' SETTINGS force_data_skipping_indices='map_bloom_filter_keys';
|
||||||
|
SELECT 'IN with non existing key';
|
||||||
|
SELECT * FROM map_test_index_map_keys WHERE map['K2'] IN 'V2' SETTINGS force_data_skipping_indices='map_bloom_filter_keys';
|
||||||
|
SELECT 'IN with non existing key and default value';
|
||||||
|
SELECT * FROM map_test_index_map_keys WHERE map['K3'] IN '';
|
||||||
|
SELECT 'NOT IN with existing key';
|
||||||
|
SELECT * FROM map_test_index_map_keys WHERE map['K0'] NOT IN 'V0' SETTINGS force_data_skipping_indices='map_bloom_filter_keys';
|
||||||
|
SELECT 'NOT IN with non existing key';
|
||||||
|
SELECT * FROM map_test_index_map_keys WHERE map['K2'] NOT IN 'V2' SETTINGS force_data_skipping_indices='map_bloom_filter_keys';
|
||||||
|
SELECT 'NOT IN with non existing key and default value';
|
||||||
|
SELECT * FROM map_test_index_map_keys WHERE map['K3'] NOT IN '';
|
||||||
|
|
||||||
|
SELECT 'MapContains with existing key';
|
||||||
|
SELECT * FROM map_test_index_map_keys WHERE mapContains(map, 'K0') SETTINGS force_data_skipping_indices='map_bloom_filter_keys';
|
||||||
|
SELECT 'MapContains with non existing key';
|
||||||
|
SELECT * FROM map_test_index_map_keys WHERE mapContains(map, 'K2') SETTINGS force_data_skipping_indices='map_bloom_filter_keys';
|
||||||
|
SELECT 'MapContains with non existing key and default value';
|
||||||
|
SELECT * FROM map_test_index_map_keys WHERE mapContains(map, '');
|
||||||
|
|
||||||
|
SELECT 'Has with existing key';
|
||||||
|
SELECT * FROM map_test_index_map_keys WHERE has(map, 'K0') SETTINGS force_data_skipping_indices='map_bloom_filter_keys';
|
||||||
|
SELECT 'Has with non existing key';
|
||||||
|
SELECT * FROM map_test_index_map_keys WHERE has(map, 'K2') SETTINGS force_data_skipping_indices='map_bloom_filter_keys';
|
||||||
|
SELECT 'Has with non existing key and default value';
|
||||||
|
SELECT * FROM map_test_index_map_keys WHERE has(map, '') SETTINGS force_data_skipping_indices='map_bloom_filter_keys';
|
||||||
|
|
||||||
|
DROP TABLE map_test_index_map_keys;
|
||||||
|
|
||||||
|
DROP TABLE IF EXISTS map_test_index_map_values;
|
||||||
|
CREATE TABLE map_test_index_map_values
|
||||||
|
(
|
||||||
|
row_id UInt32,
|
||||||
|
map Map(String, String),
|
||||||
|
INDEX map_bloom_filter_values mapValues(map) TYPE bloom_filter GRANULARITY 1
|
||||||
|
) Engine=MergeTree() ORDER BY row_id SETTINGS index_granularity = 1;
|
||||||
|
|
||||||
|
INSERT INTO map_test_index_map_values VALUES (0, {'K0':'V0'}), (1, {'K1':'V1'});
|
||||||
|
|
||||||
|
SELECT 'Map bloom filter mapValues';
|
||||||
|
|
||||||
|
SELECT 'Equals with existing key';
|
||||||
|
SELECT * FROM map_test_index_map_values WHERE map['K0'] = 'V0' SETTINGS force_data_skipping_indices='map_bloom_filter_values';
|
||||||
|
SELECT 'Equals with non existing key';
|
||||||
|
SELECT * FROM map_test_index_map_values WHERE map['K2'] = 'V2' SETTINGS force_data_skipping_indices='map_bloom_filter_values';
|
||||||
|
SELECT 'Equals with non existing key and default value';
|
||||||
|
SELECT * FROM map_test_index_map_values WHERE map['K3'] = '';
|
||||||
|
SELECT 'Not equals with existing key';
|
||||||
|
SELECT * FROM map_test_index_map_values WHERE map['K0'] != 'V0' SETTINGS force_data_skipping_indices='map_bloom_filter_values';
|
||||||
|
SELECT 'Not equals with non existing key';
|
||||||
|
SELECT * FROM map_test_index_map_values WHERE map['K2'] != 'V2' SETTINGS force_data_skipping_indices='map_bloom_filter_values';
|
||||||
|
SELECT 'Not equals with non existing key and default value';
|
||||||
|
SELECT * FROM map_test_index_map_values WHERE map['K3'] != '';
|
||||||
|
SELECT 'IN with existing key';
|
||||||
|
SELECT * FROM map_test_index_map_values WHERE map['K0'] IN 'V0' SETTINGS force_data_skipping_indices='map_bloom_filter_values';
|
||||||
|
SELECT 'IN with non existing key';
|
||||||
|
SELECT * FROM map_test_index_map_values WHERE map['K2'] IN 'V2' SETTINGS force_data_skipping_indices='map_bloom_filter_values';
|
||||||
|
SELECT 'IN with non existing key and default value';
|
||||||
|
SELECT * FROM map_test_index_map_values WHERE map['K3'] IN '';
|
||||||
|
SELECT 'NOT IN with existing key';
|
||||||
|
SELECT * FROM map_test_index_map_values WHERE map['K0'] NOT IN 'V0' SETTINGS force_data_skipping_indices='map_bloom_filter_values';
|
||||||
|
SELECT 'NOT IN with non existing key';
|
||||||
|
SELECT * FROM map_test_index_map_values WHERE map['K2'] NOT IN 'V2' SETTINGS force_data_skipping_indices='map_bloom_filter_values';
|
||||||
|
SELECT 'NOT IN with non existing key and default value';
|
||||||
|
SELECT * FROM map_test_index_map_values WHERE map['K3'] NOT IN '';
|
||||||
|
|
||||||
|
DROP TABLE map_test_index_map_values;
|
6
tests/queries/0_stateless/02021_map_has.reference
Normal file
6
tests/queries/0_stateless/02021_map_has.reference
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
Non constant map
|
||||||
|
1
|
||||||
|
0
|
||||||
|
Constant map
|
||||||
|
1
|
||||||
|
0
|
14
tests/queries/0_stateless/02021_map_has.sql
Normal file
14
tests/queries/0_stateless/02021_map_has.sql
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
DROP TABLE IF EXISTS test_map;
|
||||||
|
CREATE TABLE test_map (value Map(String, String)) ENGINE=TinyLog;
|
||||||
|
|
||||||
|
SELECT 'Non constant map';
|
||||||
|
INSERT INTO test_map VALUES ({'K0':'V0'});
|
||||||
|
SELECT has(value, 'K0') FROM test_map;
|
||||||
|
SELECT has(value, 'K1') FROM test_map;
|
||||||
|
|
||||||
|
SELECT 'Constant map';
|
||||||
|
|
||||||
|
SELECT has(map('K0', 'V0'), 'K0') FROM system.one;
|
||||||
|
SELECT has(map('K0', 'V0'), 'K1') FROM system.one;
|
||||||
|
|
||||||
|
DROP TABLE test_map;
|
@ -0,0 +1,8 @@
|
|||||||
|
1 ['K1'] ['K1']
|
||||||
|
2 ['K2'] ['K2']
|
||||||
|
1 ['K1'] ['K1']
|
||||||
|
2 ['K2'] ['K2']
|
||||||
|
1 ['K1'] ['K1']
|
||||||
|
2 ['K2'] ['K2']
|
||||||
|
1 ['K1'] ['K1']
|
||||||
|
2 ['K2'] ['K2']
|
@ -0,0 +1,42 @@
|
|||||||
|
DROP TABLE IF EXISTS bf_tokenbf_array_test;
|
||||||
|
DROP TABLE IF EXISTS bf_ngram_array_test;
|
||||||
|
|
||||||
|
CREATE TABLE bf_tokenbf_array_test
|
||||||
|
(
|
||||||
|
row_id UInt32,
|
||||||
|
array Array(String),
|
||||||
|
array_fixed Array(FixedString(2)),
|
||||||
|
INDEX array_bf_tokenbf array TYPE tokenbf_v1(256,2,0) GRANULARITY 1,
|
||||||
|
INDEX array_fixed_bf_tokenbf array_fixed TYPE tokenbf_v1(256,2,0) GRANULARITY 1
|
||||||
|
) Engine=MergeTree() ORDER BY row_id SETTINGS index_granularity = 1;
|
||||||
|
|
||||||
|
CREATE TABLE bf_ngram_array_test
|
||||||
|
(
|
||||||
|
row_id UInt32,
|
||||||
|
array Array(String),
|
||||||
|
array_fixed Array(FixedString(2)),
|
||||||
|
INDEX array_ngram array TYPE ngrambf_v1(4,256,2,0) GRANULARITY 1,
|
||||||
|
INDEX array_fixed_ngram array_fixed TYPE ngrambf_v1(4,256,2,0) GRANULARITY 1
|
||||||
|
) Engine=MergeTree() ORDER BY row_id SETTINGS index_granularity = 1;
|
||||||
|
|
||||||
|
INSERT INTO bf_tokenbf_array_test VALUES (1, ['K1'], ['K1']), (2, ['K2'], ['K2']);
|
||||||
|
INSERT INTO bf_ngram_array_test VALUES (1, ['K1'], ['K1']), (2, ['K2'], ['K2']);
|
||||||
|
|
||||||
|
SELECT * FROM bf_tokenbf_array_test WHERE has(array, 'K1') SETTINGS force_data_skipping_indices='array_bf_tokenbf';
|
||||||
|
SELECT * FROM bf_tokenbf_array_test WHERE has(array, 'K2') SETTINGS force_data_skipping_indices='array_bf_tokenbf';
|
||||||
|
SELECT * FROM bf_tokenbf_array_test WHERE has(array, 'K3') SETTINGS force_data_skipping_indices='array_bf_tokenbf';
|
||||||
|
|
||||||
|
SELECT * FROM bf_tokenbf_array_test WHERE has(array_fixed, 'K1') SETTINGS force_data_skipping_indices='array_fixed_bf_tokenbf';
|
||||||
|
SELECT * FROM bf_tokenbf_array_test WHERE has(array_fixed, 'K2') SETTINGS force_data_skipping_indices='array_fixed_bf_tokenbf';
|
||||||
|
SELECT * FROM bf_tokenbf_array_test WHERE has(array_fixed, 'K3') SETTINGS force_data_skipping_indices='array_fixed_bf_tokenbf';
|
||||||
|
|
||||||
|
SELECT * FROM bf_ngram_array_test WHERE has(array, 'K1') SETTINGS force_data_skipping_indices='array_ngram';
|
||||||
|
SELECT * FROM bf_ngram_array_test WHERE has(array, 'K2') SETTINGS force_data_skipping_indices='array_ngram';
|
||||||
|
SELECT * FROM bf_ngram_array_test WHERE has(array, 'K3') SETTINGS force_data_skipping_indices='array_ngram';
|
||||||
|
|
||||||
|
SELECT * FROM bf_ngram_array_test WHERE has(array_fixed, 'K1') SETTINGS force_data_skipping_indices='array_fixed_ngram';
|
||||||
|
SELECT * FROM bf_ngram_array_test WHERE has(array_fixed, 'K2') SETTINGS force_data_skipping_indices='array_fixed_ngram';
|
||||||
|
SELECT * FROM bf_ngram_array_test WHERE has(array_fixed, 'K3') SETTINGS force_data_skipping_indices='array_fixed_ngram';
|
||||||
|
|
||||||
|
DROP TABLE bf_tokenbf_array_test;
|
||||||
|
DROP TABLE bf_ngram_array_test;
|
@ -0,0 +1,14 @@
|
|||||||
|
1 UInt8
|
||||||
|
2 UInt8
|
||||||
|
4 UInt8
|
||||||
|
8 UInt8
|
||||||
|
Unsigned numbers
|
||||||
|
128 UInt8
|
||||||
|
32768 UInt16
|
||||||
|
2147483648 UInt32
|
||||||
|
9223372036854775808 UInt64
|
||||||
|
Signed numbers
|
||||||
|
-128 Int8
|
||||||
|
-32768 Int16
|
||||||
|
-2147483648 Int32
|
||||||
|
-9223372036854775808 Int64
|
@ -0,0 +1,16 @@
|
|||||||
|
SELECT 0b0001 as number, toTypeName(number);
|
||||||
|
SELECT 0b0010 as number, toTypeName(number);
|
||||||
|
SELECT 0b0100 as number, toTypeName(number);
|
||||||
|
SELECT 0b1000 as number, toTypeName(number);
|
||||||
|
|
||||||
|
SELECT 'Unsigned numbers';
|
||||||
|
SELECT 0b10000000 as number, toTypeName(number);
|
||||||
|
SELECT 0b1000000000000000 as number, toTypeName(number);
|
||||||
|
SELECT 0b10000000000000000000000000000000 as number, toTypeName(number);
|
||||||
|
SELECT 0b1000000000000000000000000000000000000000000000000000000000000000 as number, toTypeName(number);
|
||||||
|
|
||||||
|
SELECT 'Signed numbers';
|
||||||
|
SELECT -0b10000000 as number, toTypeName(number);
|
||||||
|
SELECT -0b1000000000000000 as number, toTypeName(number);
|
||||||
|
SELECT -0b10000000000000000000000000000000 as number, toTypeName(number);
|
||||||
|
SELECT -0b1000000000000000000000000000000000000000000000000000000000000000 as number, toTypeName(number);
|
@ -0,0 +1 @@
|
|||||||
|
[{'l':0,'h':10000,'t':0.1},{'l':10001,'h':100000000000000,'t':0.2}]
|
@ -0,0 +1,25 @@
|
|||||||
|
CREATE TABLE dict_nested_map_test_table
|
||||||
|
(
|
||||||
|
test_id UInt32,
|
||||||
|
type String,
|
||||||
|
test_config Array(Map(String, Decimal(28,12))),
|
||||||
|
ncp UInt8
|
||||||
|
)
|
||||||
|
ENGINE=MergeTree()
|
||||||
|
ORDER BY test_id;
|
||||||
|
|
||||||
|
INSERT INTO dict_nested_map_test_table VALUES (3, 't', [{'l': 0.0, 'h': 10000.0, 't': 0.1}, {'l': 10001.0, 'h': 100000000000000.0, 't': 0.2}], 0);
|
||||||
|
|
||||||
|
CREATE DICTIONARY dict_nested_map_dictionary
|
||||||
|
(
|
||||||
|
test_id UInt32,
|
||||||
|
type String,
|
||||||
|
test_config Array(Map(String, Decimal(28,12))),
|
||||||
|
ncp UInt8
|
||||||
|
)
|
||||||
|
PRIMARY KEY test_id
|
||||||
|
SOURCE(CLICKHOUSE(TABLE 'dict_nested_map_test_table'))
|
||||||
|
LAYOUT(HASHED(PREALLOCATE 1))
|
||||||
|
LIFETIME(MIN 1 MAX 1000000);
|
||||||
|
|
||||||
|
SELECT dictGet('dict_nested_map_dictionary', 'test_config', toUInt64(3));
|
@ -0,0 +1 @@
|
|||||||
|
0 ViewValue Value
|
@ -0,0 +1,43 @@
|
|||||||
|
-- Tags: no-parallel
|
||||||
|
|
||||||
|
DROP DATABASE IF EXISTS 2025_test_db;
|
||||||
|
CREATE DATABASE 2025_test_db;
|
||||||
|
|
||||||
|
DROP TABLE IF EXISTS 2025_test_db.test_table;
|
||||||
|
CREATE TABLE 2025_test_db.test_table
|
||||||
|
(
|
||||||
|
id UInt64,
|
||||||
|
value String
|
||||||
|
) ENGINE=TinyLog;
|
||||||
|
|
||||||
|
INSERT INTO 2025_test_db.test_table VALUES (0, 'Value');
|
||||||
|
|
||||||
|
CREATE DICTIONARY 2025_test_db.test_dictionary
|
||||||
|
(
|
||||||
|
id UInt64,
|
||||||
|
value String
|
||||||
|
)
|
||||||
|
PRIMARY KEY id
|
||||||
|
LAYOUT(DIRECT())
|
||||||
|
SOURCE(CLICKHOUSE(TABLE 'test_table' DB '2025_test_db'));
|
||||||
|
|
||||||
|
DROP TABLE IF EXISTS 2025_test_db.view_table;
|
||||||
|
CREATE TABLE 2025_test_db.view_table
|
||||||
|
(
|
||||||
|
id UInt64,
|
||||||
|
value String
|
||||||
|
) ENGINE=TinyLog;
|
||||||
|
|
||||||
|
INSERT INTO 2025_test_db.view_table VALUES (0, 'ViewValue');
|
||||||
|
|
||||||
|
DROP VIEW IF EXISTS test_view_different_db;
|
||||||
|
CREATE VIEW test_view_different_db AS SELECT id, value, dictGet('2025_test_db.test_dictionary', 'value', id) FROM 2025_test_db.view_table;
|
||||||
|
SELECT * FROM test_view_different_db;
|
||||||
|
|
||||||
|
DROP TABLE 2025_test_db.test_table;
|
||||||
|
DROP DICTIONARY 2025_test_db.test_dictionary;
|
||||||
|
DROP TABLE 2025_test_db.view_table;
|
||||||
|
|
||||||
|
DROP VIEW test_view_different_db;
|
||||||
|
|
||||||
|
DROP DATABASE 2025_test_db;
|
40
tests/queries/0_stateless/02025_having_filter_column.sql
Normal file
40
tests/queries/0_stateless/02025_having_filter_column.sql
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
drop table if exists test;
|
||||||
|
|
||||||
|
-- #29010
|
||||||
|
CREATE TABLE test
|
||||||
|
(
|
||||||
|
d DateTime,
|
||||||
|
a String,
|
||||||
|
b UInt64
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
PARTITION BY toDate(d)
|
||||||
|
ORDER BY d;
|
||||||
|
|
||||||
|
SELECT *
|
||||||
|
FROM (
|
||||||
|
SELECT
|
||||||
|
a,
|
||||||
|
max((d, b)).2 AS value
|
||||||
|
FROM test
|
||||||
|
GROUP BY rollup(a)
|
||||||
|
)
|
||||||
|
WHERE a <> '';
|
||||||
|
|
||||||
|
-- the same query, but after syntax optimization
|
||||||
|
SELECT
|
||||||
|
a,
|
||||||
|
value
|
||||||
|
FROM
|
||||||
|
(
|
||||||
|
SELECT
|
||||||
|
a,
|
||||||
|
max((d, b)).2 AS value
|
||||||
|
FROM test
|
||||||
|
GROUP BY a
|
||||||
|
WITH ROLLUP
|
||||||
|
HAVING a != ''
|
||||||
|
)
|
||||||
|
WHERE a != '';
|
||||||
|
|
||||||
|
drop table if exists test;
|
Loading…
Reference in New Issue
Block a user