Merge branch 'master' of https://github.com/ClickHouse/ClickHouse into pg-ch-replica

This commit is contained in:
kssenii 2021-02-22 12:39:35 +00:00
commit 4cdb55babe
113 changed files with 827 additions and 413 deletions

4
.github/codecov.yml vendored
View File

@ -1,5 +1,5 @@
codecov:
max_report_age: off
max_report_age: "off"
strict_yaml_branch: "master"
ignore:
@ -14,4 +14,4 @@ ignore:
comment: false
github_checks:
annotations: false
annotations: false

View File

@ -8,9 +8,9 @@
name: Docker Container Scan (clickhouse-server)
on:
"on":
pull_request:
paths:
paths:
- docker/server/Dockerfile
- .github/workflows/anchore-analysis.yml
schedule:
@ -20,20 +20,20 @@ jobs:
Anchore-Build-Scan:
runs-on: ubuntu-latest
steps:
- name: Checkout the code
uses: actions/checkout@v2
- name: Build the Docker image
run: |
cd docker/server
perl -pi -e 's|=\$version||g' Dockerfile
docker build . --file Dockerfile --tag localbuild/testimage:latest
- name: Run the local Anchore scan action itself with GitHub Advanced Security code scanning integration enabled
uses: anchore/scan-action@v2
id: scan
with:
image: "localbuild/testimage:latest"
acs-report-enable: true
- name: Upload Anchore Scan Report
uses: github/codeql-action/upload-sarif@v1
with:
sarif_file: ${{ steps.scan.outputs.sarif }}
- name: Checkout the code
uses: actions/checkout@v2
- name: Build the Docker image
run: |
cd docker/server
perl -pi -e 's|=\$version||g' Dockerfile
docker build . --file Dockerfile --tag localbuild/testimage:latest
- name: Run the local Anchore scan action itself with GitHub Advanced Security code scanning integration enabled
uses: anchore/scan-action@v2
id: scan
with:
image: "localbuild/testimage:latest"
acs-report-enable: true
- name: Upload Anchore Scan Report
uses: github/codeql-action/upload-sarif@v1
with:
sarif_file: ${{ steps.scan.outputs.sarif }}

View File

@ -14,14 +14,14 @@ handlers:
# The trigger for creating the Yandex.Tracker issue. When the specified event occurs, it transfers PR data to Yandex.Tracker.
github:pullRequest:labeled:
data:
# The Yandex.Tracker queue to create the issue in. Each issue in Tracker belongs to one of the project queues.
queue: CLICKHOUSEDOCS
# The issue title.
summary: '[Potato] Pull Request #{{pullRequest.number}}'
# The issue description.
description: >
# The Yandex.Tracker queue to create the issue in. Each issue in Tracker belongs to one of the project queues.
queue: CLICKHOUSEDOCS
# The issue title.
summary: '[Potato] Pull Request #{{pullRequest.number}}'
# The issue description.
description: >
{{pullRequest.description}}
Ссылка на Pull Request: {{pullRequest.webUrl}}
# The condition for creating the Yandex.Tracker issue.
condition: eventPayload.labels.filter(label => ['pr-feature'].includes(label.name)).length
# The condition for creating the Yandex.Tracker issue.
condition: eventPayload.labels.filter(label => ['pr-feature'].includes(label.name)).length

15
.yamllint Normal file
View File

@ -0,0 +1,15 @@
# vi: ft=yaml
extends: default
rules:
indentation:
level: warning
indent-sequences: consistent
line-length:
# there are some bash -c "", so this is OK
max: 300
level: warning
comments:
min-spaces-from-content: 1
document-start:
present: false

View File

@ -7,6 +7,7 @@
#include <ctime>
#include <string>
#define DATE_LUT_MAX (0xFFFFFFFFU - 86400)
#define DATE_LUT_MAX_DAY_NUM (0xFFFFFFFFU / 86400)
/// Table size is bigger than DATE_LUT_MAX_DAY_NUM to fill all indices within UInt16 range: this allows to remove extra check.
@ -249,7 +250,7 @@ public:
{
DayNum index = findIndex(t);
if (unlikely(index == 0))
if (unlikely(index == 0 || index > DATE_LUT_MAX_DAY_NUM))
return t + offset_at_start_of_epoch;
time_t res = t - lut[index].date;
@ -264,18 +265,18 @@ public:
{
DayNum index = findIndex(t);
/// If it is not 1970 year (findIndex found nothing appropriate),
/// than limit number of hours to avoid insane results like 1970-01-01 89:28:15
if (unlikely(index == 0))
/// If it is overflow case,
/// then limit number of hours to avoid insane results like 1970-01-01 89:28:15
if (unlikely(index == 0 || index > DATE_LUT_MAX_DAY_NUM))
return static_cast<unsigned>((t + offset_at_start_of_epoch) / 3600) % 24;
time_t res = t - lut[index].date;
time_t time = t - lut[index].date;
/// Data is cleaned to avoid possibility of underflow.
if (res >= lut[index].time_at_offset_change)
res += lut[index].amount_of_offset_change;
if (time >= lut[index].time_at_offset_change)
time += lut[index].amount_of_offset_change;
return res / 3600;
unsigned res = time / 3600;
return res <= 23 ? res : 0;
}
/** Calculating offset from UTC in seconds.
@ -314,12 +315,12 @@ public:
* each minute, with added or subtracted leap second, spans exactly 60 unix timestamps.
*/
inline unsigned toSecond(time_t t) const { return t % 60; }
inline unsigned toSecond(time_t t) const { return UInt32(t) % 60; }
inline unsigned toMinute(time_t t) const
{
if (offset_is_whole_number_of_hours_everytime)
return (t / 60) % 60;
return (UInt32(t) / 60) % 60;
UInt32 date = find(t).date;
return (UInt32(t) - date) / 60 % 60;
@ -555,9 +556,7 @@ public:
}
}
/*
* check and change mode to effective
*/
/// Check and change mode to effective.
inline UInt8 check_week_mode(UInt8 mode) const
{
UInt8 week_format = (mode & 7);
@ -566,10 +565,9 @@ public:
return week_format;
}
/*
* Calc weekday from d
* Returns 0 for monday, 1 for tuesday ...
*/
/** Calculate weekday from d.
* Returns 0 for monday, 1 for tuesday...
*/
inline unsigned calc_weekday(DayNum d, bool sunday_first_day_of_week) const
{
if (!sunday_first_day_of_week)
@ -578,7 +576,7 @@ public:
return toDayOfWeek(DayNum(d + 1)) - 1;
}
/* Calc days in one year. */
/// Calculate days in one year.
inline unsigned calc_days_in_year(UInt16 year) const
{
return ((year & 3) == 0 && (year % 100 || (year % 400 == 0 && year)) ? 366 : 365);

View File

@ -6,6 +6,25 @@
namespace common
{
/// Multiply and ignore overflow.
template <typename T1, typename T2>
inline auto NO_SANITIZE_UNDEFINED mulIgnoreOverflow(T1 x, T2 y)
{
return x * y;
}
template <typename T1, typename T2>
inline auto NO_SANITIZE_UNDEFINED addIgnoreOverflow(T1 x, T2 y)
{
return x + y;
}
template <typename T1, typename T2>
inline auto NO_SANITIZE_UNDEFINED subIgnoreOverflow(T1 x, T2 y)
{
return x - y;
}
template <typename T>
inline bool addOverflow(T x, T y, T & res)
{
@ -35,14 +54,14 @@ namespace common
{
static constexpr __int128 min_int128 = minInt128();
static constexpr __int128 max_int128 = maxInt128();
res = x + y;
res = addIgnoreOverflow(x, y);
return (y > 0 && x > max_int128 - y) || (y < 0 && x < min_int128 - y);
}
template <>
inline bool addOverflow(wInt256 x, wInt256 y, wInt256 & res)
{
res = x + y;
res = addIgnoreOverflow(x, y);
return (y > 0 && x > std::numeric_limits<wInt256>::max() - y) ||
(y < 0 && x < std::numeric_limits<wInt256>::min() - y);
}
@ -50,7 +69,7 @@ namespace common
template <>
inline bool addOverflow(wUInt256 x, wUInt256 y, wUInt256 & res)
{
res = x + y;
res = addIgnoreOverflow(x, y);
return x > std::numeric_limits<wUInt256>::max() - y;
}
@ -83,14 +102,14 @@ namespace common
{
static constexpr __int128 min_int128 = minInt128();
static constexpr __int128 max_int128 = maxInt128();
res = x - y;
res = subIgnoreOverflow(x, y);
return (y < 0 && x > max_int128 + y) || (y > 0 && x < min_int128 + y);
}
template <>
inline bool subOverflow(wInt256 x, wInt256 y, wInt256 & res)
{
res = x - y;
res = subIgnoreOverflow(x, y);
return (y < 0 && x > std::numeric_limits<wInt256>::max() + y) ||
(y > 0 && x < std::numeric_limits<wInt256>::min() + y);
}
@ -98,7 +117,7 @@ namespace common
template <>
inline bool subOverflow(wUInt256 x, wUInt256 y, wUInt256 & res)
{
res = x - y;
res = subIgnoreOverflow(x, y);
return x < y;
}
@ -129,40 +148,33 @@ namespace common
template <>
inline bool mulOverflow(__int128 x, __int128 y, __int128 & res)
{
res = static_cast<unsigned __int128>(x) * static_cast<unsigned __int128>(y); /// Avoid signed integer overflow.
res = mulIgnoreOverflow(x, y);
if (!x || !y)
return false;
unsigned __int128 a = (x > 0) ? x : -x;
unsigned __int128 b = (y > 0) ? y : -y;
return (a * b) / b != a;
return mulIgnoreOverflow(a, b) / b != a;
}
template <>
inline bool mulOverflow(wInt256 x, wInt256 y, wInt256 & res)
{
res = x * y;
res = mulIgnoreOverflow(x, y);
if (!x || !y)
return false;
wInt256 a = (x > 0) ? x : -x;
wInt256 b = (y > 0) ? y : -y;
return (a * b) / b != a;
return mulIgnoreOverflow(a, b) / b != a;
}
template <>
inline bool mulOverflow(wUInt256 x, wUInt256 y, wUInt256 & res)
{
res = x * y;
res = mulIgnoreOverflow(x, y);
if (!x || !y)
return false;
return (x * y) / y != x;
}
/// Multiply and ignore overflow.
template <typename T1, typename T2>
inline auto NO_SANITIZE_UNDEFINED mulIgnoreOverflow(T1 x, T2 y)
{
return x * y;
return res / y != x;
}
}

View File

@ -4,4 +4,4 @@ services:
image: cassandra
restart: always
ports:
- 9043:9042
- 9043:9042

View File

@ -5,6 +5,6 @@ services:
hostname: hdfs1
restart: always
ports:
- 50075:50075
- 50070:50070
- 50075:50075
- 50070:50070
entrypoint: /etc/bootstrap.sh -d

View File

@ -5,42 +5,42 @@ services:
image: zookeeper:3.4.9
hostname: kafka_zookeeper
environment:
ZOO_MY_ID: 1
ZOO_PORT: 2181
ZOO_SERVERS: server.1=kafka_zookeeper:2888:3888
ZOO_MY_ID: 1
ZOO_PORT: 2181
ZOO_SERVERS: server.1=kafka_zookeeper:2888:3888
security_opt:
- label:disable
- label:disable
kafka1:
image: confluentinc/cp-kafka:5.2.0
hostname: kafka1
ports:
- "9092:9092"
- "9092:9092"
environment:
KAFKA_ADVERTISED_LISTENERS: INSIDE://localhost:9092,OUTSIDE://kafka1:19092
KAFKA_LISTENERS: INSIDE://:9092,OUTSIDE://:19092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: "kafka_zookeeper:2181"
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_ADVERTISED_LISTENERS: INSIDE://localhost:9092,OUTSIDE://kafka1:19092
KAFKA_LISTENERS: INSIDE://:9092,OUTSIDE://:19092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: "kafka_zookeeper:2181"
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
depends_on:
- kafka_zookeeper
- kafka_zookeeper
security_opt:
- label:disable
- label:disable
schema-registry:
image: confluentinc/cp-schema-registry:5.2.0
hostname: schema-registry
ports:
- "8081:8081"
- "8081:8081"
environment:
SCHEMA_REGISTRY_HOST_NAME: schema-registry
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:19092
SCHEMA_REGISTRY_HOST_NAME: schema-registry
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:19092
depends_on:
- kafka_zookeeper
- kafka1
- kafka_zookeeper
- kafka1
security_opt:
- label:disable
- label:disable

View File

@ -8,22 +8,22 @@ services:
hostname: kerberizedhdfs1
restart: always
volumes:
- ${KERBERIZED_HDFS_DIR}/../../hdfs_configs/bootstrap.sh:/etc/bootstrap.sh:ro
- ${KERBERIZED_HDFS_DIR}/secrets:/usr/local/hadoop/etc/hadoop/conf
- ${KERBERIZED_HDFS_DIR}/secrets/krb_long.conf:/etc/krb5.conf:ro
- ${KERBERIZED_HDFS_DIR}/../../hdfs_configs/bootstrap.sh:/etc/bootstrap.sh:ro
- ${KERBERIZED_HDFS_DIR}/secrets:/usr/local/hadoop/etc/hadoop/conf
- ${KERBERIZED_HDFS_DIR}/secrets/krb_long.conf:/etc/krb5.conf:ro
ports:
- 1006:1006
- 50070:50070
- 9010:9010
depends_on:
- hdfskerberos
- hdfskerberos
entrypoint: /etc/bootstrap.sh -d
hdfskerberos:
image: yandex/clickhouse-kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG}
hostname: hdfskerberos
volumes:
- ${KERBERIZED_HDFS_DIR}/secrets:/tmp/keytab
- ${KERBERIZED_HDFS_DIR}/../../kerberos_image_config.sh:/config.sh
- /dev/urandom:/dev/random
- ${KERBERIZED_HDFS_DIR}/secrets:/tmp/keytab
- ${KERBERIZED_HDFS_DIR}/../../kerberos_image_config.sh:/config.sh
- /dev/urandom:/dev/random
ports: [88, 749]

View File

@ -6,54 +6,54 @@ services:
# restart: always
hostname: kafka_kerberized_zookeeper
environment:
ZOOKEEPER_SERVER_ID: 1
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_SERVERS: "kafka_kerberized_zookeeper:2888:3888"
KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/secrets/zookeeper_jaas.conf -Djava.security.krb5.conf=/etc/kafka/secrets/krb.conf -Dzookeeper.authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider -Dsun.security.krb5.debug=true"
ZOOKEEPER_SERVER_ID: 1
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_SERVERS: "kafka_kerberized_zookeeper:2888:3888"
KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/secrets/zookeeper_jaas.conf -Djava.security.krb5.conf=/etc/kafka/secrets/krb.conf -Dzookeeper.authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider -Dsun.security.krb5.debug=true"
volumes:
- ${KERBERIZED_KAFKA_DIR}/secrets:/etc/kafka/secrets
- /dev/urandom:/dev/random
- ${KERBERIZED_KAFKA_DIR}/secrets:/etc/kafka/secrets
- /dev/urandom:/dev/random
depends_on:
- kafka_kerberos
- kafka_kerberos
security_opt:
- label:disable
- label:disable
kerberized_kafka1:
image: confluentinc/cp-kafka:5.2.0
# restart: always
hostname: kerberized_kafka1
ports:
- "9092:9092"
- "9093:9093"
- "9092:9092"
- "9093:9093"
environment:
KAFKA_LISTENERS: OUTSIDE://:19092,UNSECURED_OUTSIDE://:19093,UNSECURED_INSIDE://:9093
KAFKA_ADVERTISED_LISTENERS: OUTSIDE://kerberized_kafka1:19092,UNSECURED_OUTSIDE://kerberized_kafka1:19093,UNSECURED_INSIDE://localhost:9093
# KAFKA_LISTENERS: INSIDE://kerberized_kafka1:9092,OUTSIDE://kerberized_kafka1:19092
# KAFKA_ADVERTISED_LISTENERS: INSIDE://localhost:9092,OUTSIDE://kerberized_kafka1:19092
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: GSSAPI
KAFKA_SASL_ENABLED_MECHANISMS: GSSAPI
KAFKA_SASL_KERBEROS_SERVICE_NAME: kafka
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: OUTSIDE:SASL_PLAINTEXT,UNSECURED_OUTSIDE:PLAINTEXT,UNSECURED_INSIDE:PLAINTEXT,
KAFKA_INTER_BROKER_LISTENER_NAME: OUTSIDE
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: "kafka_kerberized_zookeeper:2181"
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/secrets/broker_jaas.conf -Djava.security.krb5.conf=/etc/kafka/secrets/krb.conf -Dsun.security.krb5.debug=true"
KAFKA_LISTENERS: OUTSIDE://:19092,UNSECURED_OUTSIDE://:19093,UNSECURED_INSIDE://:9093
KAFKA_ADVERTISED_LISTENERS: OUTSIDE://kerberized_kafka1:19092,UNSECURED_OUTSIDE://kerberized_kafka1:19093,UNSECURED_INSIDE://localhost:9093
# KAFKA_LISTENERS: INSIDE://kerberized_kafka1:9092,OUTSIDE://kerberized_kafka1:19092
# KAFKA_ADVERTISED_LISTENERS: INSIDE://localhost:9092,OUTSIDE://kerberized_kafka1:19092
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: GSSAPI
KAFKA_SASL_ENABLED_MECHANISMS: GSSAPI
KAFKA_SASL_KERBEROS_SERVICE_NAME: kafka
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: OUTSIDE:SASL_PLAINTEXT,UNSECURED_OUTSIDE:PLAINTEXT,UNSECURED_INSIDE:PLAINTEXT,
KAFKA_INTER_BROKER_LISTENER_NAME: OUTSIDE
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: "kafka_kerberized_zookeeper:2181"
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/secrets/broker_jaas.conf -Djava.security.krb5.conf=/etc/kafka/secrets/krb.conf -Dsun.security.krb5.debug=true"
volumes:
- ${KERBERIZED_KAFKA_DIR}/secrets:/etc/kafka/secrets
- /dev/urandom:/dev/random
- ${KERBERIZED_KAFKA_DIR}/secrets:/etc/kafka/secrets
- /dev/urandom:/dev/random
depends_on:
- kafka_kerberized_zookeeper
- kafka_kerberos
- kafka_kerberized_zookeeper
- kafka_kerberos
security_opt:
- label:disable
- label:disable
kafka_kerberos:
image: yandex/clickhouse-kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest}
hostname: kafka_kerberos
volumes:
- ${KERBERIZED_KAFKA_DIR}/secrets:/tmp/keytab
- ${KERBERIZED_KAFKA_DIR}/../../kerberos_image_config.sh:/config.sh
- /dev/urandom:/dev/random
- ${KERBERIZED_KAFKA_DIR}/secrets:/tmp/keytab
- ${KERBERIZED_KAFKA_DIR}/../../kerberos_image_config.sh:/config.sh
- /dev/urandom:/dev/random
ports: [88, 749]

View File

@ -7,5 +7,5 @@ services:
MONGO_INITDB_ROOT_USERNAME: root
MONGO_INITDB_ROOT_PASSWORD: clickhouse
ports:
- 27018:27017
- 27018:27017
command: --profile=2 --verbose

View File

@ -6,5 +6,5 @@ services:
environment:
MYSQL_ROOT_PASSWORD: clickhouse
ports:
- 3308:3306
- 3308:3306
command: --server_id=100 --log-bin='mysql-bin-1.log' --default-time-zone='+3:00' --gtid-mode="ON" --enforce-gtid-consistency

View File

@ -6,5 +6,9 @@ services:
environment:
MYSQL_ROOT_PASSWORD: clickhouse
ports:
- 3308:3306
command: --server_id=100 --log-bin='mysql-bin-1.log' --default-time-zone='+3:00' --gtid-mode="ON" --enforce-gtid-consistency
- 3308:3306
command: --server_id=100 --log-bin='mysql-bin-1.log'
--default-time-zone='+3:00'
--gtid-mode="ON"
--enforce-gtid-consistency
--log-error-verbosity=3

View File

@ -6,5 +6,10 @@ services:
environment:
MYSQL_ROOT_PASSWORD: clickhouse
ports:
- 33308:3306
command: --server_id=100 --log-bin='mysql-bin-1.log' --default_authentication_plugin='mysql_native_password' --default-time-zone='+3:00' --gtid-mode="ON" --enforce-gtid-consistency
- 33308:3306
command: --server_id=100 --log-bin='mysql-bin-1.log'
--default_authentication_plugin='mysql_native_password'
--default-time-zone='+3:00'
--gtid-mode="ON"
--enforce-gtid-consistency
--log-error-verbosity=3

View File

@ -7,7 +7,7 @@ services:
MYSQL_ALLOW_EMPTY_PASSWORD: 1
command: --federated --socket /var/run/mysqld/mysqld.sock
healthcheck:
test: ["CMD", "mysqladmin" ,"ping", "-h", "localhost"]
test: ["CMD", "mysqladmin", "ping", "-h", "localhost"]
interval: 1s
timeout: 2s
retries: 100

View File

@ -11,4 +11,4 @@ services:
ports:
- "5433:5433"
environment:
POSTGRES_HOST_AUTH_METHOD: "trust"
POSTGRES_HOST_AUTH_METHOD: "trust"

View File

@ -6,9 +6,9 @@ services:
environment:
POSTGRES_PASSWORD: mysecretpassword
ports:
- 5432:5432
- 5432:5432
command: [ "postgres", "-c", "wal_level=logical", "-c", "max_replication_slots=2"]
networks:
default:
aliases:
- postgre-sql.local
default:
aliases:
- postgre-sql.local

View File

@ -4,5 +4,5 @@ services:
image: redis
restart: always
ports:
- 6380:6379
- 6380:6379
command: redis-server --requirepass "clickhouse" --databases 32

View File

@ -97,6 +97,7 @@ function configure
rm -r right/db ||:
rm -r db0/preprocessed_configs ||:
rm -r db0/{data,metadata}/system ||:
rm db0/status ||:
cp -al db0/ left/db/
cp -al db0/ right/db/
}

View File

@ -1,7 +1,14 @@
# docker build -t yandex/clickhouse-style-test .
FROM ubuntu:20.04
RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes shellcheck libxml2-utils git python3-pip pylint && pip3 install codespell
RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
shellcheck \
libxml2-utils \
git \
python3-pip \
pylint \
yamllint \
&& pip3 install codespell
# For |& syntax

View File

@ -139,7 +139,7 @@ You can assign a quotas set for the user. For a detailed description of quotas c
### user_name/databases {#user-namedatabases}
In this section, you can you can limit rows that are returned by ClickHouse for `SELECT` queries made by the current user, thus implementing basic row-level security.
In this section, you can limit rows that are returned by ClickHouse for `SELECT` queries made by the current user, thus implementing basic row-level security.
**Example**

View File

@ -2659,3 +2659,23 @@ Result:
Note that this setting influences [Materialized view](../../sql-reference/statements/create/view.md#materialized) and [MaterializeMySQL](../../engines/database-engines/materialize-mysql.md) behaviour.
[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) <!-- hide -->
## engine_file_empty_if_not_exists {#engine-file-empty_if-not-exists}
Allows to select data from a file engine table without file.
Possible values:
- 0 — `SELECT` throws exception.
- 1 — `SELECT` returns empty result.
Default value: `0`.
## engine_file_truncate_on_insert {#engine-file-truncate-on-insert}
Enables or disables truncate before insert in file engine tables.
Possible values:
- 0 — Disabled.
- 1 — Enabled.
Default value: `0`.

View File

@ -23,7 +23,7 @@ LowCardinality(data_type)
Эффективность использования типа данных `LowCarditality` зависит от разнообразия данных. Если словарь содержит менее 10 000 различных значений, ClickHouse в основном показывает более высокую эффективность чтения и хранения данных. Если же словарь содержит более 100 000 различных значений, ClickHouse может работать хуже, чем при использовании обычных типов данных.
При работе со строками, использование `LowCardinality` вместо [Enum](enum.md). `LowCardinality` обеспечивает большую гибкость в использовании и часто показывает такую же или более высокую эффективность.
При работе со строками используйте `LowCardinality` вместо [Enum](enum.md). `LowCardinality` обеспечивает большую гибкость в использовании и часто показывает такую же или более высокую эффективность.
## Пример

View File

@ -30,6 +30,10 @@ namespace ErrorCodes
extern const int LOGICAL_ERROR;
}
const String & getAggregateFunctionCanonicalNameIfAny(const String & name)
{
return AggregateFunctionFactory::instance().getCanonicalNameIfAny(name);
}
void AggregateFunctionFactory::registerFunction(const String & name, Value creator_with_properties, CaseSensitiveness case_sensitiveness)
{
@ -41,10 +45,14 @@ void AggregateFunctionFactory::registerFunction(const String & name, Value creat
throw Exception("AggregateFunctionFactory: the aggregate function name '" + name + "' is not unique",
ErrorCodes::LOGICAL_ERROR);
if (case_sensitiveness == CaseInsensitive
&& !case_insensitive_aggregate_functions.emplace(Poco::toLower(name), creator_with_properties).second)
throw Exception("AggregateFunctionFactory: the case insensitive aggregate function name '" + name + "' is not unique",
ErrorCodes::LOGICAL_ERROR);
if (case_sensitiveness == CaseInsensitive)
{
auto key = Poco::toLower(name);
if (!case_insensitive_aggregate_functions.emplace(key, creator_with_properties).second)
throw Exception("AggregateFunctionFactory: the case insensitive aggregate function name '" + name + "' is not unique",
ErrorCodes::LOGICAL_ERROR);
case_insensitive_name_mapping[key] = name;
}
}
static DataTypes convertLowCardinalityTypesToNested(const DataTypes & types)

View File

@ -75,28 +75,8 @@ void ColumnAggregateFunction::set(const AggregateFunctionPtr & func_)
ColumnAggregateFunction::~ColumnAggregateFunction()
{
if (!func->hasTrivialDestructor() && !src)
{
if (copiedDataInfo.empty())
{
for (auto * val : data)
{
func->destroy(val);
}
}
else
{
size_t pos;
for (Map::iterator it = copiedDataInfo.begin(), it_end = copiedDataInfo.end(); it != it_end; ++it)
{
pos = it->getValue().second;
if (data[pos] != nullptr)
{
func->destroy(data[pos]);
data[pos] = nullptr;
}
}
}
}
for (auto * val : data)
func->destroy(val);
}
void ColumnAggregateFunction::addArena(ConstArenaPtr arena_)
@ -475,37 +455,14 @@ void ColumnAggregateFunction::insertFrom(const IColumn & from, size_t n)
/// (only as a whole, see comment above).
ensureOwnership();
insertDefault();
insertCopyFrom(assert_cast<const ColumnAggregateFunction &>(from).data[n]);
insertMergeFrom(from, n);
}
void ColumnAggregateFunction::insertFrom(ConstAggregateDataPtr place)
{
ensureOwnership();
insertDefault();
insertCopyFrom(place);
}
void ColumnAggregateFunction::insertCopyFrom(ConstAggregateDataPtr place)
{
Map::LookupResult result;
result = copiedDataInfo.find(place);
if (result == nullptr)
{
copiedDataInfo[place] = data.size()-1;
func->merge(data.back(), place, &createOrGetArena());
}
else
{
size_t pos = result->getValue().second;
if (pos != data.size() - 1)
{
data[data.size() - 1] = data[pos];
}
else /// insert same data to same pos, merge them.
{
func->merge(data.back(), place, &createOrGetArena());
}
}
insertMergeFrom(place);
}
void ColumnAggregateFunction::insertMergeFrom(ConstAggregateDataPtr place)
@ -740,4 +697,5 @@ MutableColumnPtr ColumnAggregateFunction::cloneResized(size_t size) const
return cloned_col;
}
}
}

View File

@ -13,8 +13,6 @@
#include <Functions/FunctionHelpers.h>
#include <Common/HashTable/HashMap.h>
namespace DB
{
@ -84,17 +82,6 @@ private:
/// Name of the type to distinguish different aggregation states.
String type_string;
/// MergedData records, used to avoid duplicated data copy.
///key: src pointer, val: pos in current column.
using Map = HashMap<
ConstAggregateDataPtr,
size_t,
DefaultHash<ConstAggregateDataPtr>,
HashTableGrower<3>,
HashTableAllocatorWithStackMemory<sizeof(std::pair<ConstAggregateDataPtr, size_t>) * (1 << 3)>>;
Map copiedDataInfo;
ColumnAggregateFunction() {}
/// Create a new column that has another column as a source.
@ -153,8 +140,6 @@ public:
void insertFrom(ConstAggregateDataPtr place);
void insertCopyFrom(ConstAggregateDataPtr place);
/// Merge state at last row with specified state in another column.
void insertMergeFrom(ConstAggregateDataPtr place);

View File

@ -69,16 +69,11 @@ namespace ZeroTraits
{
template <typename T>
inline bool check(const T x) { return x == 0; }
bool check(const T x) { return x == 0; }
template <typename T>
inline void set(T & x) { x = 0; }
void set(T & x) { x = 0; }
template <>
inline bool check(const char * x) { return x == nullptr; }
template <>
inline void set(const char *& x){ x = nullptr; }
}

View File

@ -35,6 +35,8 @@ protected:
return name;
}
std::unordered_map<String, String> case_insensitive_name_mapping;
public:
/// For compatibility with SQL, it's possible to specify that certain function name is case insensitive.
enum CaseSensitiveness
@ -68,9 +70,12 @@ public:
factory_name + ": the alias name '" + alias_name + "' is already registered as real name", ErrorCodes::LOGICAL_ERROR);
if (case_sensitiveness == CaseInsensitive)
{
if (!case_insensitive_aliases.emplace(alias_name_lowercase, real_dict_name).second)
throw Exception(
factory_name + ": case insensitive alias name '" + alias_name + "' is not unique", ErrorCodes::LOGICAL_ERROR);
case_insensitive_name_mapping[alias_name_lowercase] = real_name;
}
if (!aliases.emplace(alias_name, real_dict_name).second)
throw Exception(factory_name + ": alias name '" + alias_name + "' is not unique", ErrorCodes::LOGICAL_ERROR);
@ -111,6 +116,15 @@ public:
return getMap().count(name) || getCaseInsensitiveMap().count(name) || isAlias(name);
}
/// Return the canonical name (the name used in registration) if it's different from `name`.
const String & getCanonicalNameIfAny(const String & name) const
{
auto it = case_insensitive_name_mapping.find(Poco::toLower(name));
if (it != case_insensitive_name_mapping.end())
return it->second;
return name;
}
virtual ~IFactoryWithAliases() override {}
private:

View File

@ -78,7 +78,7 @@ public:
static bool compare(A a, B b, UInt32 scale_a, UInt32 scale_b)
{
static const UInt32 max_scale = DecimalUtils::maxPrecision<Decimal256>();
static const UInt32 max_scale = DecimalUtils::max_precision<Decimal256>;
if (scale_a > max_scale || scale_b > max_scale)
throw Exception("Bad scale of decimal field", ErrorCodes::DECIMAL_OVERFLOW);

View File

@ -24,13 +24,13 @@ namespace ErrorCodes
namespace DecimalUtils
{
static constexpr size_t minPrecision() { return 1; }
template <typename T> static constexpr size_t maxPrecision() { return 0; }
template <> constexpr size_t maxPrecision<Decimal32>() { return 9; }
template <> constexpr size_t maxPrecision<Decimal64>() { return 18; }
template <> constexpr size_t maxPrecision<DateTime64>() { return 18; }
template <> constexpr size_t maxPrecision<Decimal128>() { return 38; }
template <> constexpr size_t maxPrecision<Decimal256>() { return 76; }
inline constexpr size_t min_precision = 1;
template <typename T> inline constexpr size_t max_precision = 0;
template <> inline constexpr size_t max_precision<Decimal32> = 9;
template <> inline constexpr size_t max_precision<Decimal64> = 18;
template <> inline constexpr size_t max_precision<DateTime64> = 18;
template <> inline constexpr size_t max_precision<Decimal128> = 38;
template <> inline constexpr size_t max_precision<Decimal256> = 76;
template <typename T>
inline auto scaleMultiplier(UInt32 scale)
@ -87,7 +87,7 @@ struct DataTypeDecimalTrait
*
* Sign of `whole` controls sign of result: negative whole => negative result, positive whole => positive result.
* Sign of `fractional` is expected to be positive, otherwise result is undefined.
* If `scale` is to big (scale > maxPrecision<DecimalType::NativeType>), result is undefined.
* If `scale` is to big (scale > max_precision<DecimalType::NativeType>), result is undefined.
*/
template <typename DecimalType>
inline DecimalType decimalFromComponentsWithMultiplier(
@ -287,21 +287,21 @@ inline auto binaryOpResult(const DecimalType<T> & tx, const DecimalType<U> & ty)
scale = (tx.getScale() > ty.getScale() ? tx.getScale() : ty.getScale());
if constexpr (sizeof(T) < sizeof(U))
return DataTypeDecimalTrait<U>(DecimalUtils::maxPrecision<U>(), scale);
return DataTypeDecimalTrait<U>(DecimalUtils::max_precision<U>, scale);
else
return DataTypeDecimalTrait<T>(DecimalUtils::maxPrecision<T>(), scale);
return DataTypeDecimalTrait<T>(DecimalUtils::max_precision<T>, scale);
}
template <bool, bool, typename T, typename U, template <typename> typename DecimalType>
inline const DataTypeDecimalTrait<T> binaryOpResult(const DecimalType<T> & tx, const DataTypeNumber<U> &)
{
return DataTypeDecimalTrait<T>(DecimalUtils::maxPrecision<T>(), tx.getScale());
return DataTypeDecimalTrait<T>(DecimalUtils::max_precision<T>, tx.getScale());
}
template <bool, bool, typename T, typename U, template <typename> typename DecimalType>
inline const DataTypeDecimalTrait<U> binaryOpResult(const DataTypeNumber<T> &, const DecimalType<U> & ty)
{
return DataTypeDecimalTrait<U>(DecimalUtils::maxPrecision<U>(), ty.getScale());
return DataTypeDecimalTrait<U>(DecimalUtils::max_precision<U>, ty.getScale());
}
}

View File

@ -475,11 +475,11 @@ namespace MySQLReplication
{
const auto & dispatch = [](const size_t & precision, const size_t & scale, const auto & function) -> Field
{
if (precision <= DecimalUtils::maxPrecision<Decimal32>())
if (precision <= DecimalUtils::max_precision<Decimal32>)
return Field(function(precision, scale, Decimal32()));
else if (precision <= DecimalUtils::maxPrecision<Decimal64>())
else if (precision <= DecimalUtils::max_precision<Decimal64>)
return Field(function(precision, scale, Decimal64()));
else if (precision <= DecimalUtils::maxPrecision<Decimal128>())
else if (precision <= DecimalUtils::max_precision<Decimal128>)
return Field(function(precision, scale, Decimal128()));
return Field(function(precision, scale, Decimal256()));

View File

@ -100,7 +100,7 @@ class IColumn;
M(UInt64, min_count_to_compile_expression, 3, "The number of identical expressions before they are JIT-compiled", 0) \
M(UInt64, group_by_two_level_threshold, 100000, "From what number of keys, a two-level aggregation starts. 0 - the threshold is not set.", 0) \
M(UInt64, group_by_two_level_threshold_bytes, 100000000, "From what size of the aggregation state in bytes, a two-level aggregation begins to be used. 0 - the threshold is not set. Two-level aggregation is used when at least one of the thresholds is triggered.", 0) \
M(Bool, distributed_aggregation_memory_efficient, false, "Is the memory-saving mode of distributed aggregation enabled.", 0) \
M(Bool, distributed_aggregation_memory_efficient, true, "Is the memory-saving mode of distributed aggregation enabled.", 0) \
M(UInt64, aggregation_memory_efficient_merge_threads, 0, "Number of threads to use for merge intermediate aggregation results in memory efficient mode. When bigger, then more memory is consumed. 0 means - same as 'max_threads'.", 0) \
\
M(UInt64, max_parallel_replicas, 1, "The maximum number of replicas of each shard used when the query is executed. For consistency (to get different parts of the same partition), this option only works for the specified sampling key. The lag of the replicas is not controlled.", 0) \
@ -383,6 +383,7 @@ class IColumn;
M(Bool, optimize_if_chain_to_multiif, false, "Replace if(cond1, then1, if(cond2, ...)) chains to multiIf. Currently it's not beneficial for numeric types.", 0) \
M(Bool, optimize_if_transform_strings_to_enum, false, "Replaces string-type arguments in If and Transform to enum. Disabled by default cause it could make inconsistent change in distributed query that would lead to its fail.", 0) \
M(Bool, optimize_monotonous_functions_in_order_by, true, "Replace monotonous function with its argument in ORDER BY", 0) \
M(Bool, normalize_function_names, true, "Normalize function names to their canonical names", 0) \
M(Bool, allow_experimental_alter_materialized_view_structure, false, "Allow atomic alter on Materialized views. Work in progress.", 0) \
M(Bool, enable_early_constant_folding, true, "Enable query optimization where we analyze function and subqueries results and rewrite query if there're constants there", 0) \
M(Bool, deduplicate_blocks_in_dependent_materialized_views, false, "Should deduplicate blocks for materialized views if the block is not a duplicate for the table. Use true to always deduplicate in dependent tables.", 0) \
@ -422,6 +423,8 @@ class IColumn;
M(Bool, optimize_rewrite_sum_if_to_count_if, true, "Rewrite sumIf() and sum(if()) function countIf() function when logically equivalent", 0) \
M(UInt64, insert_shard_id, 0, "If non zero, when insert into a distributed table, the data will be inserted into the shard `insert_shard_id` synchronously. Possible values range from 1 to `shards_number` of corresponding distributed table", 0) \
M(Bool, allow_experimental_query_deduplication, false, "Allow sending parts' UUIDs for a query in order to deduplicate data parts if any", 0) \
M(Bool, engine_file_empty_if_not_exists, false, "Allows to select data from a file engine table without file", 0) \
M(Bool, engine_file_truncate_on_insert, false, "Enables or disables truncate before insert in file engine tables", 0) \
M(Bool, allow_experimental_database_replicated, false, "Allow to create databases with Replicated engine", 0) \
M(UInt64, database_replicated_initial_query_timeout_sec, 300, "How long initial DDL query should wait for Replicated database to precess previous DDL queue entries", 0) \
M(Bool, database_replicated_ddl_output, true, "Return table with query execution status as a result of DDL query", 0) \

View File

@ -28,7 +28,7 @@ namespace ErrorCodes
static constexpr UInt32 max_scale = 9;
DataTypeDateTime64::DataTypeDateTime64(UInt32 scale_, const std::string & time_zone_name)
: DataTypeDecimalBase<DateTime64>(DecimalUtils::maxPrecision<DateTime64>(), scale_),
: DataTypeDecimalBase<DateTime64>(DecimalUtils::max_precision<DateTime64>, scale_),
TimezoneMixin(time_zone_name)
{
if (scale > max_scale)
@ -37,7 +37,7 @@ DataTypeDateTime64::DataTypeDateTime64(UInt32 scale_, const std::string & time_z
}
DataTypeDateTime64::DataTypeDateTime64(UInt32 scale_, const TimezoneMixin & time_zone_info)
: DataTypeDecimalBase<DateTime64>(DecimalUtils::maxPrecision<DateTime64>(), scale_),
: DataTypeDecimalBase<DateTime64>(DecimalUtils::max_precision<DateTime64>, scale_),
TimezoneMixin(time_zone_info)
{
if (scale > max_scale)

View File

@ -65,7 +65,7 @@ public:
static constexpr bool is_parametric = true;
static constexpr size_t maxPrecision() { return DecimalUtils::maxPrecision<T>(); }
static constexpr size_t maxPrecision() { return DecimalUtils::max_precision<T>; }
DataTypeDecimalBase(UInt32 precision_, UInt32 scale_)
: precision(precision_),
@ -197,17 +197,17 @@ inline const DecimalType<U> decimalResultType(const DataTypeNumber<T> & tx, cons
template <template <typename> typename DecimalType>
inline DataTypePtr createDecimal(UInt64 precision_value, UInt64 scale_value)
{
if (precision_value < DecimalUtils::minPrecision() || precision_value > DecimalUtils::maxPrecision<Decimal256>())
if (precision_value < DecimalUtils::min_precision || precision_value > DecimalUtils::max_precision<Decimal256>)
throw Exception("Wrong precision", ErrorCodes::ARGUMENT_OUT_OF_BOUND);
if (static_cast<UInt64>(scale_value) > precision_value)
throw Exception("Negative scales and scales larger than precision are not supported", ErrorCodes::ARGUMENT_OUT_OF_BOUND);
if (precision_value <= DecimalUtils::maxPrecision<Decimal32>())
if (precision_value <= DecimalUtils::max_precision<Decimal32>)
return std::make_shared<DecimalType<Decimal32>>(precision_value, scale_value);
else if (precision_value <= DecimalUtils::maxPrecision<Decimal64>())
else if (precision_value <= DecimalUtils::max_precision<Decimal64>)
return std::make_shared<DecimalType<Decimal64>>(precision_value, scale_value);
else if (precision_value <= DecimalUtils::maxPrecision<Decimal128>())
else if (precision_value <= DecimalUtils::max_precision<Decimal128>)
return std::make_shared<DecimalType<Decimal128>>(precision_value, scale_value);
return std::make_shared<DecimalType<Decimal256>>(precision_value, scale_value);
}

View File

@ -141,7 +141,7 @@ static DataTypePtr createExact(const ASTPtr & arguments)
if (!scale_arg || !(scale_arg->value.getType() == Field::Types::Int64 || scale_arg->value.getType() == Field::Types::UInt64))
throw Exception("Decimal data type family must have a two numbers as its arguments", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
UInt64 precision = DecimalUtils::maxPrecision<T>();
UInt64 precision = DecimalUtils::max_precision<T>;
UInt64 scale = scale_arg->value.get<UInt64>();
return createDecimal<DataTypeDecimal>(precision, scale);

View File

@ -270,7 +270,7 @@ tryConvertToDecimal(const typename FromDataType::FieldType & value, UInt32 scale
template <typename T>
inline DataTypePtr createDecimalMaxPrecision(UInt64 scale)
{
return std::make_shared<DataTypeDecimal<T>>(DecimalUtils::maxPrecision<T>(), scale);
return std::make_shared<DataTypeDecimal<T>>(DecimalUtils::max_precision<T>, scale);
}
}

View File

@ -103,11 +103,11 @@ DataTypePtr convertMySQLDataType(MultiEnum<MySQLDataTypesSupport> type_support,
}
else if (type_support.isSet(MySQLDataTypesSupport::DECIMAL) && (type_name == "numeric" || type_name == "decimal"))
{
if (precision <= DecimalUtils::maxPrecision<Decimal32>())
if (precision <= DecimalUtils::max_precision<Decimal32>)
res = std::make_shared<DataTypeDecimal<Decimal32>>(precision, scale);
else if (precision <= DecimalUtils::maxPrecision<Decimal64>())
else if (precision <= DecimalUtils::max_precision<Decimal64>)
res = std::make_shared<DataTypeDecimal<Decimal64>>(precision, scale);
else if (precision <= DecimalUtils::maxPrecision<Decimal128>())
else if (precision <= DecimalUtils::max_precision<Decimal128>)
res = std::make_shared<DataTypeDecimal<Decimal128>>(precision, scale);
}

View File

@ -73,13 +73,13 @@ static DataTypePtr convertPostgreSQLDataType(std::string & type, bool is_nullabl
uint32_t precision = getDecimalPrecision(*res);
uint32_t scale = getDecimalScale(*res);
if (precision <= DecimalUtils::maxPrecision<Decimal32>())
if (precision <= DecimalUtils::max_precision<Decimal32>)
res = std::make_shared<DataTypeDecimal<Decimal32>>(precision, scale);
else if (precision <= DecimalUtils::maxPrecision<Decimal64>())
else if (precision <= DecimalUtils::max_precision<Decimal64>)
res = std::make_shared<DataTypeDecimal<Decimal64>>(precision, scale);
else if (precision <= DecimalUtils::maxPrecision<Decimal128>())
else if (precision <= DecimalUtils::max_precision<Decimal128>)
res = std::make_shared<DataTypeDecimal<Decimal128>>(precision, scale);
else if (precision <= DecimalUtils::maxPrecision<Decimal256>())
else if (precision <= DecimalUtils::max_precision<Decimal256>)
res = std::make_shared<DataTypeDecimal<Decimal256>>(precision, scale);
}

View File

@ -21,6 +21,10 @@ namespace ErrorCodes
extern const int LOGICAL_ERROR;
}
const String & getFunctionCanonicalNameIfAny(const String & name)
{
return FunctionFactory::instance().getCanonicalNameIfAny(name);
}
void FunctionFactory::registerFunction(const
std::string & name,
@ -36,10 +40,13 @@ void FunctionFactory::registerFunction(const
throw Exception("FunctionFactory: the function name '" + name + "' is already registered as alias",
ErrorCodes::LOGICAL_ERROR);
if (case_sensitiveness == CaseInsensitive
&& !case_insensitive_functions.emplace(function_name_lowercase, creator).second)
throw Exception("FunctionFactory: the case insensitive function name '" + name + "' is not unique",
ErrorCodes::LOGICAL_ERROR);
if (case_sensitiveness == CaseInsensitive)
{
if (!case_insensitive_functions.emplace(function_name_lowercase, creator).second)
throw Exception("FunctionFactory: the case insensitive function name '" + name + "' is not unique",
ErrorCodes::LOGICAL_ERROR);
case_insensitive_name_mapping[function_name_lowercase] = name;
}
}

View File

@ -516,6 +516,25 @@ struct ToDateTime64TransformSigned
return DecimalUtils::decimalFromComponentsWithMultiplier<DateTime64>(from, 0, scale_multiplier);
}
};
template <typename FromDataType, typename FromType>
struct ToDateTime64TransformFloat
{
static constexpr auto name = "toDateTime64";
const UInt32 scale = 1;
ToDateTime64TransformFloat(UInt32 scale_ = 0)
: scale(scale_)
{}
inline NO_SANITIZE_UNDEFINED DateTime64::NativeType execute(FromType from, const DateLUTImpl &) const
{
if (from < 0)
return 0;
from = std::min<FromType>(from, FromType(0xFFFFFFFF));
return convertToDecimal<FromDataType, DataTypeDateTime64>(from, scale);
}
};
template <typename Name> struct ConvertImpl<DataTypeInt8, DataTypeDateTime64, Name>
: DateTimeTransformImpl<DataTypeInt8, DataTypeDateTime64, ToDateTime64TransformSigned<Int8>> {};
@ -528,9 +547,9 @@ template <typename Name> struct ConvertImpl<DataTypeInt64, DataTypeDateTime64, N
template <typename Name> struct ConvertImpl<DataTypeUInt64, DataTypeDateTime64, Name>
: DateTimeTransformImpl<DataTypeUInt64, DataTypeDateTime64, ToDateTime64TransformUnsigned<UInt64>> {};
template <typename Name> struct ConvertImpl<DataTypeFloat32, DataTypeDateTime64, Name>
: DateTimeTransformImpl<DataTypeFloat32, DataTypeDateTime64, ToDateTime64TransformSigned<Float32>> {};
: DateTimeTransformImpl<DataTypeFloat32, DataTypeDateTime64, ToDateTime64TransformFloat<DataTypeFloat32, Float32>> {};
template <typename Name> struct ConvertImpl<DataTypeFloat64, DataTypeDateTime64, Name>
: DateTimeTransformImpl<DataTypeFloat64, DataTypeDateTime64, ToDateTime64TransformSigned<Float64>> {};
: DateTimeTransformImpl<DataTypeFloat64, DataTypeDateTime64, ToDateTime64TransformFloat<DataTypeFloat64, Float64>> {};
/** Conversion of DateTime64 to Date or DateTime: discards fractional part.
*/
@ -1313,7 +1332,7 @@ public:
else if constexpr (std::is_same_v<Name, NameToDecimal256>)
return createDecimalMaxPrecision<Decimal256>(scale);
throw Exception("Something wrong with toDecimalNN()", ErrorCodes::LOGICAL_ERROR);
throw Exception("Unexpected branch in code of conversion function: it is a bug.", ErrorCodes::LOGICAL_ERROR);
}
else
{
@ -1337,7 +1356,7 @@ public:
if constexpr (std::is_same_v<ToDataType, DataTypeDateTime>)
return std::make_shared<DataTypeDateTime>(extractTimeZoneNameFromFunctionArguments(arguments, timezone_arg_position, 0));
else if constexpr (std::is_same_v<ToDataType, DataTypeDateTime64>)
throw Exception("LOGICAL ERROR: It is a bug.", ErrorCodes::LOGICAL_ERROR);
throw Exception("Unexpected branch in code of conversion function: it is a bug.", ErrorCodes::LOGICAL_ERROR);
else
return std::make_shared<ToDataType>();
}

View File

@ -8,7 +8,7 @@ namespace DB
void registerFunctionsRound(FunctionFactory & factory)
{
factory.registerFunction<FunctionRound>("round", FunctionFactory::CaseInsensitive);
factory.registerFunction<FunctionRoundBankers>("roundBankers", FunctionFactory::CaseInsensitive);
factory.registerFunction<FunctionRoundBankers>("roundBankers", FunctionFactory::CaseSensitive);
factory.registerFunction<FunctionFloor>("floor", FunctionFactory::CaseInsensitive);
factory.registerFunction<FunctionCeil>("ceil", FunctionFactory::CaseInsensitive);
factory.registerFunction<FunctionTrunc>("trunc", FunctionFactory::CaseInsensitive);

View File

@ -103,7 +103,7 @@ struct ArrayAggregateImpl
{
using DecimalReturnType = ArrayAggregateResult<typename DataType::FieldType, aggregate_operation>;
UInt32 scale = getDecimalScale(*expression_return);
result = std::make_shared<DataTypeDecimal<DecimalReturnType>>(DecimalUtils::maxPrecision<DecimalReturnType>(), scale);
result = std::make_shared<DataTypeDecimal<DecimalReturnType>>(DecimalUtils::max_precision<DecimalReturnType>, scale);
return true;
}

View File

@ -37,7 +37,7 @@ struct ArrayCumSumImpl
if (which.isDecimal())
{
UInt32 scale = getDecimalScale(*expression_return);
DataTypePtr nested = std::make_shared<DataTypeDecimal<Decimal128>>(DecimalUtils::maxPrecision<Decimal128>(), scale);
DataTypePtr nested = std::make_shared<DataTypeDecimal<Decimal128>>(DecimalUtils::max_precision<Decimal128>, scale);
return std::make_shared<DataTypeArray>(nested);
}

View File

@ -40,7 +40,7 @@ struct ArrayCumSumNonNegativeImpl
if (which.isDecimal())
{
UInt32 scale = getDecimalScale(*expression_return);
DataTypePtr nested = std::make_shared<DataTypeDecimal<Decimal128>>(DecimalUtils::maxPrecision<Decimal128>(), scale);
DataTypePtr nested = std::make_shared<DataTypeDecimal<Decimal128>>(DecimalUtils::max_precision<Decimal128>, scale);
return std::make_shared<DataTypeArray>(nested);
}

View File

@ -18,7 +18,7 @@ namespace DB
void registerFunctionExtractAllGroupsVertical(FunctionFactory & factory)
{
factory.registerFunction<FunctionExtractAllGroups<VerticalImpl>>();
factory.registerAlias("extractAllGroups", VerticalImpl::Name, FunctionFactory::CaseInsensitive);
factory.registerAlias("extractAllGroups", VerticalImpl::Name, FunctionFactory::CaseSensitive);
}
}

View File

@ -133,7 +133,7 @@ private:
static_assert(IsDecimalNumber<T>);
using NativeT = typename T::NativeType;
if (precision > DecimalUtils::maxPrecision<T>())
if (precision > DecimalUtils::max_precision<T>)
return false;
NativeT pow10 = intExp10OfSize<NativeT>(precision);

View File

@ -1,4 +1,6 @@
#include <mutex>
#include <ext/bit_cast.h>
#include <Common/FieldVisitors.h>
#include <DataTypes/DataTypeArray.h>
#include <Columns/ColumnString.h>
@ -13,6 +15,7 @@
#include <Functions/FunctionHelpers.h>
#include <Functions/FunctionFactory.h>
#include <DataTypes/getLeastSupertype.h>
#include <Interpreters/convertFieldToType.h>
namespace DB
@ -491,7 +494,7 @@ private:
dst.resize(size);
for (size_t i = 0; i < size; ++i)
{
auto it = table.find(src[i]);
const auto * it = table.find(ext::bit_cast<UInt64>(src[i]));
if (it)
memcpy(&dst[i], &it->getMapped(), sizeof(dst[i])); /// little endian.
else
@ -507,7 +510,7 @@ private:
dst.resize(size);
for (size_t i = 0; i < size; ++i)
{
auto it = table.find(src[i]);
const auto * it = table.find(ext::bit_cast<UInt64>(src[i]));
if (it)
memcpy(&dst[i], &it->getMapped(), sizeof(dst[i])); /// little endian.
else
@ -523,7 +526,7 @@ private:
dst.resize(size);
for (size_t i = 0; i < size; ++i)
{
auto it = table.find(src[i]);
const auto * it = table.find(ext::bit_cast<UInt64>(src[i]));
if (it)
memcpy(&dst[i], &it->getMapped(), sizeof(dst[i]));
else
@ -541,7 +544,7 @@ private:
ColumnString::Offset current_dst_offset = 0;
for (size_t i = 0; i < size; ++i)
{
auto it = table.find(src[i]);
const auto * it = table.find(ext::bit_cast<UInt64>(src[i]));
StringRef ref = it ? it->getMapped() : dst_default;
dst_data.resize(current_dst_offset + ref.size);
memcpy(&dst_data[current_dst_offset], ref.data, ref.size);
@ -562,7 +565,8 @@ private:
ColumnString::Offset current_dst_default_offset = 0;
for (size_t i = 0; i < size; ++i)
{
auto it = table.find(src[i]);
Field key = src[i];
const auto * it = table.find(key.reinterpret<UInt64>());
StringRef ref;
if (it)
@ -778,50 +782,66 @@ private:
/// Note: Doesn't check the duplicates in the `from` array.
if (from[0].getType() != Field::Types::String && to[0].getType() != Field::Types::String)
const IDataType & from_type = *arguments[0].type;
if (from[0].getType() != Field::Types::String)
{
cache.table_num_to_num = std::make_unique<Cache::NumToNum>();
auto & table = *cache.table_num_to_num;
for (size_t i = 0; i < size; ++i)
if (to[0].getType() != Field::Types::String)
{
// Field may be of Float type, but for the purpose of bitwise
// equality we can treat them as UInt64, hence the reinterpret().
table[from[i].reinterpret<UInt64>()] = (*used_to)[i].reinterpret<UInt64>();
cache.table_num_to_num = std::make_unique<Cache::NumToNum>();
auto & table = *cache.table_num_to_num;
for (size_t i = 0; i < size; ++i)
{
Field key = convertFieldToType(from[i], from_type);
if (key.isNull())
continue;
// Field may be of Float type, but for the purpose of bitwise
// equality we can treat them as UInt64, hence the reinterpret().
table[key.reinterpret<UInt64>()] = (*used_to)[i].reinterpret<UInt64>();
}
}
else
{
cache.table_num_to_string = std::make_unique<Cache::NumToString>();
auto & table = *cache.table_num_to_string;
for (size_t i = 0; i < size; ++i)
{
Field key = convertFieldToType(from[i], from_type);
if (key.isNull())
continue;
const String & str_to = to[i].get<const String &>();
StringRef ref{cache.string_pool.insert(str_to.data(), str_to.size() + 1), str_to.size() + 1};
table[key.reinterpret<UInt64>()] = ref;
}
}
}
else if (from[0].getType() != Field::Types::String && to[0].getType() == Field::Types::String)
else
{
cache.table_num_to_string = std::make_unique<Cache::NumToString>();
auto & table = *cache.table_num_to_string;
for (size_t i = 0; i < size; ++i)
if (to[0].getType() != Field::Types::String)
{
const String & str_to = to[i].get<const String &>();
StringRef ref{cache.string_pool.insert(str_to.data(), str_to.size() + 1), str_to.size() + 1};
table[from[i].reinterpret<UInt64>()] = ref;
cache.table_string_to_num = std::make_unique<Cache::StringToNum>();
auto & table = *cache.table_string_to_num;
for (size_t i = 0; i < size; ++i)
{
const String & str_from = from[i].get<const String &>();
StringRef ref{cache.string_pool.insert(str_from.data(), str_from.size() + 1), str_from.size() + 1};
table[ref] = (*used_to)[i].reinterpret<UInt64>();
}
}
}
else if (from[0].getType() == Field::Types::String && to[0].getType() != Field::Types::String)
{
cache.table_string_to_num = std::make_unique<Cache::StringToNum>();
auto & table = *cache.table_string_to_num;
for (size_t i = 0; i < size; ++i)
else
{
const String & str_from = from[i].get<const String &>();
StringRef ref{cache.string_pool.insert(str_from.data(), str_from.size() + 1), str_from.size() + 1};
table[ref] = (*used_to)[i].reinterpret<UInt64>();
}
}
else if (from[0].getType() == Field::Types::String && to[0].getType() == Field::Types::String)
{
cache.table_string_to_string = std::make_unique<Cache::StringToString>();
auto & table = *cache.table_string_to_string;
for (size_t i = 0; i < size; ++i)
{
const String & str_from = from[i].get<const String &>();
const String & str_to = to[i].get<const String &>();
StringRef ref_from{cache.string_pool.insert(str_from.data(), str_from.size() + 1), str_from.size() + 1};
StringRef ref_to{cache.string_pool.insert(str_to.data(), str_to.size() + 1), str_to.size() + 1};
table[ref_from] = ref_to;
cache.table_string_to_string = std::make_unique<Cache::StringToString>();
auto & table = *cache.table_string_to_string;
for (size_t i = 0; i < size; ++i)
{
const String & str_from = from[i].get<const String &>();
const String & str_to = to[i].get<const String &>();
StringRef ref_from{cache.string_pool.insert(str_from.data(), str_from.size() + 1), str_from.size() + 1};
StringRef ref_to{cache.string_pool.insert(str_to.data(), str_to.size() + 1), str_to.size() + 1};
table[ref_from] = ref_to;
}
}
}

View File

@ -831,14 +831,18 @@ ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const D
static constexpr auto date_time_broken_down_length = 19;
/// YYYY-MM-DD
static constexpr auto date_broken_down_length = 10;
/// unix timestamp max length
static constexpr auto unix_timestamp_max_length = 10;
char s[date_time_broken_down_length];
char * s_pos = s;
/// A piece similar to unix timestamp.
while (s_pos < s + unix_timestamp_max_length && !buf.eof() && isNumericASCII(*buf.position()))
/** Read characters, that could represent unix timestamp.
* Only unix timestamp of at least 5 characters is supported.
* Then look at 5th character. If it is a number - treat whole as unix timestamp.
* If it is not a number - then parse datetime in YYYY-MM-DD hh:mm:ss or YYYY-MM-DD format.
*/
/// A piece similar to unix timestamp, maybe scaled to subsecond precision.
while (s_pos < s + date_time_broken_down_length && !buf.eof() && isNumericASCII(*buf.position()))
{
*s_pos = *buf.position();
++s_pos;
@ -846,7 +850,7 @@ ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const D
}
/// 2015-01-01 01:02:03 or 2015-01-01
if (s_pos == s + 4 && !buf.eof() && (*buf.position() < '0' || *buf.position() > '9'))
if (s_pos == s + 4 && !buf.eof() && !isNumericASCII(*buf.position()))
{
const auto already_read_length = s_pos - s;
const size_t remaining_date_time_size = date_time_broken_down_length - already_read_length;
@ -885,8 +889,7 @@ ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const D
}
else
{
/// Only unix timestamp of 5-10 characters is supported. For consistency. See readDateTimeTextImpl.
if (s_pos - s >= 5 && s_pos - s <= 10)
if (s_pos - s >= 5)
{
/// Not very efficient.
datetime = 0;

View File

@ -703,12 +703,6 @@ ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const D
template <typename ReturnType = void>
inline ReturnType readDateTimeTextImpl(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & date_lut)
{
/** Read 10 characters, that could represent unix timestamp.
* Only unix timestamp of 5-10 characters is supported.
* Then look at 5th character. If it is a number - treat whole as unix timestamp.
* If it is not a number - then parse datetime in YYYY-MM-DD hh:mm:ss or YYYY-MM-DD format.
*/
/// Optimistic path, when whole value is in buffer.
const char * s = buf.position();
@ -779,6 +773,18 @@ inline ReturnType readDateTimeTextImpl(DateTime64 & datetime64, UInt32 scale, Re
while (!buf.eof() && isNumericASCII(*buf.position()))
++buf.position();
}
else if (scale && (whole >= 1000000000LL * scale))
{
/// Unix timestamp with subsecond precision, already scaled to integer.
/// For disambiguation we support only time since 2001-09-09 01:46:40 UTC and less than 30 000 years in future.
for (size_t i = 0; i < scale; ++i)
{
components.fractional *= 10;
components.fractional += components.whole % 10;
components.whole /= 10;
}
}
datetime64 = DecimalUtils::decimalFromComponents<DateTime64>(components, scale);

View File

@ -709,7 +709,7 @@ inline void writeUUIDText(const UUID & uuid, WriteBuffer & buf)
template<typename DecimalType>
inline void writeDecimalTypeFractionalText(typename DecimalType::NativeType fractional, UInt32 scale, WriteBuffer & buf)
{
static constexpr UInt32 MaxScale = DecimalUtils::maxPrecision<DecimalType>();
static constexpr UInt32 MaxScale = DecimalUtils::max_precision<DecimalType>;
char data[20] = {'0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0'};
static_assert(sizeof(data) >= MaxScale);
@ -831,19 +831,19 @@ inline void writeDateTimeText(time_t datetime, WriteBuffer & buf, const DateLUTI
template <char date_delimeter = '-', char time_delimeter = ':', char between_date_time_delimiter = ' ', char fractional_time_delimiter = '.'>
inline void writeDateTimeText(DateTime64 datetime64, UInt32 scale, WriteBuffer & buf, const DateLUTImpl & date_lut = DateLUT::instance())
{
static constexpr UInt32 MaxScale = DecimalUtils::maxPrecision<DateTime64>();
static constexpr UInt32 MaxScale = DecimalUtils::max_precision<DateTime64>;
scale = scale > MaxScale ? MaxScale : scale;
auto c = DecimalUtils::split(datetime64, scale);
const auto & values = date_lut.getValues(c.whole);
auto components = DecimalUtils::split(datetime64, scale);
const auto & values = date_lut.getValues(components.whole);
writeDateTimeText<date_delimeter, time_delimeter, between_date_time_delimiter>(
LocalDateTime(values.year, values.month, values.day_of_month,
date_lut.toHour(c.whole), date_lut.toMinute(c.whole), date_lut.toSecond(c.whole)), buf);
date_lut.toHour(components.whole), date_lut.toMinute(components.whole), date_lut.toSecond(components.whole)), buf);
if (scale > 0)
{
buf.write(fractional_time_delimiter);
writeDecimalTypeFractionalText<DateTime64>(c.fractional, scale, buf);
writeDecimalTypeFractionalText<DateTime64>(components.fractional, scale, buf);
}
}
@ -887,16 +887,16 @@ inline void writeDateTimeTextISO(DateTime64 datetime64, UInt32 scale, WriteBuffe
inline void writeDateTimeUnixTimestamp(DateTime64 datetime64, UInt32 scale, WriteBuffer & buf)
{
static constexpr UInt32 MaxScale = DecimalUtils::maxPrecision<DateTime64>();
static constexpr UInt32 MaxScale = DecimalUtils::max_precision<DateTime64>;
scale = scale > MaxScale ? MaxScale : scale;
auto c = DecimalUtils::split(datetime64, scale);
writeIntText(c.whole, buf);
auto components = DecimalUtils::split(datetime64, scale);
writeIntText(components.whole, buf);
if (scale > 0)
if (scale > 0) //-V547
{
buf.write('.');
writeDecimalTypeFractionalText<DateTime64>(c.fractional, scale, buf);
writeDecimalTypeFractionalText<DateTime64>(components.fractional, scale, buf);
}
}

View File

@ -0,0 +1,45 @@
#include <Interpreters/FunctionNameNormalizer.h>
#include <Parsers/ASTColumnDeclaration.h>
#include <Parsers/ASTCreateQuery.h>
namespace DB
{
const String & getFunctionCanonicalNameIfAny(const String & name);
const String & getAggregateFunctionCanonicalNameIfAny(const String & name);
void FunctionNameNormalizer::visit(IAST * ast)
{
if (!ast)
return;
// Normalize only selected children. Avoid normalizing engine clause because some engine might
// have the same name as function, e.g. Log.
if (auto * node_storage = ast->as<ASTStorage>())
{
visit(node_storage->partition_by);
visit(node_storage->primary_key);
visit(node_storage->order_by);
visit(node_storage->sample_by);
visit(node_storage->ttl_table);
return;
}
// Normalize only selected children. Avoid normalizing type clause because some type might
// have the same name as function, e.g. Date.
if (auto * node_decl = ast->as<ASTColumnDeclaration>())
{
visit(node_decl->default_expression.get());
visit(node_decl->ttl.get());
return;
}
if (auto * node_func = ast->as<ASTFunction>())
node_func->name = getAggregateFunctionCanonicalNameIfAny(getFunctionCanonicalNameIfAny(node_func->name));
for (auto & child : ast->children)
visit(child.get());
}
}

View File

@ -0,0 +1,14 @@
#pragma once
#include <Parsers/IAST.h>
#include <Parsers/ASTFunction.h>
namespace DB
{
struct FunctionNameNormalizer
{
static void visit(IAST *);
};
}

View File

@ -58,6 +58,7 @@
#include <Interpreters/InterpreterDropQuery.h>
#include <Interpreters/QueryLog.h>
#include <Interpreters/addTypeConversionToAST.h>
#include <Interpreters/FunctionNameNormalizer.h>
#include <TableFunctions/TableFunctionFactory.h>
#include <common/logger_useful.h>
@ -1182,6 +1183,7 @@ void InterpreterCreateQuery::prepareOnClusterQuery(ASTCreateQuery & create, cons
BlockIO InterpreterCreateQuery::execute()
{
FunctionNameNormalizer().visit(query_ptr.get());
auto & create = query_ptr->as<ASTCreateQuery &>();
if (!create.cluster.empty())
{

View File

@ -1285,8 +1285,11 @@ void InterpreterSelectQuery::executeFetchColumns(
const auto & desc = query_analyzer->aggregates()[0];
const auto & func = desc.function;
std::optional<UInt64> num_rows{};
if (!query.prewhere() && !query.where())
{
num_rows = storage->totalRows(settings);
}
else // It's possible to optimize count() given only partition predicates
{
SelectQueryInfo temp_query_info;
@ -1296,6 +1299,7 @@ void InterpreterSelectQuery::executeFetchColumns(
num_rows = storage->totalRowsByPartitionPredicate(temp_query_info, *context);
}
if (num_rows)
{
AggregateFunctionCount & agg_count = static_cast<AggregateFunctionCount &>(*func);
@ -1790,7 +1794,7 @@ void InterpreterSelectQuery::executeMergeAggregated(QueryPlan & query_plan, bool
auto merging_aggregated = std::make_unique<MergingAggregatedStep>(
query_plan.getCurrentDataStream(),
std::move(transform_params),
settings.distributed_aggregation_memory_efficient,
settings.distributed_aggregation_memory_efficient && storage && storage->isRemote(),
settings.max_threads,
settings.aggregation_memory_efficient_merge_threads);

View File

@ -442,10 +442,10 @@ ASTPtr MutationsInterpreter::prepare(bool dry_run)
auto type_literal = std::make_shared<ASTLiteral>(columns_desc.getPhysical(column).type->getName());
const auto & update_expr = kv.second;
auto updated_column = makeASTFunction("cast",
auto updated_column = makeASTFunction("CAST",
makeASTFunction("if",
getPartitionAndPredicateExpressionForMutationCommand(command),
makeASTFunction("cast",
makeASTFunction("CAST",
update_expr->clone(),
type_literal),
std::make_shared<ASTIdentifier>(column)),

View File

@ -29,7 +29,7 @@ static bool tryExtractConstValueFromCondition(const ASTPtr & condition, bool & v
/// cast of numeric constant in condition to UInt8
if (const auto * function = condition->as<ASTFunction>())
{
if (function->name == "cast")
if (function->name == "CAST")
{
if (const auto * expr_list = function->arguments->as<ASTExpressionList>())
{

View File

@ -8,6 +8,7 @@
#include <Interpreters/ArrayJoinedColumnsVisitor.h>
#include <Interpreters/TranslateQualifiedNamesVisitor.h>
#include <Interpreters/Context.h>
#include <Interpreters/FunctionNameNormalizer.h>
#include <Interpreters/MarkTableIdentifiersVisitor.h>
#include <Interpreters/QueryNormalizer.h>
#include <Interpreters/ExecuteScalarSubqueriesVisitor.h>
@ -934,6 +935,10 @@ void TreeRewriter::normalize(ASTPtr & query, Aliases & aliases, const Settings &
MarkTableIdentifiersVisitor::Data identifiers_data{aliases};
MarkTableIdentifiersVisitor(identifiers_data).visit(query);
/// Rewrite function names to their canonical ones.
if (settings.normalize_function_names)
FunctionNameNormalizer().visit(query.get());
/// Common subexpression elimination. Rewrite rules.
QueryNormalizer::Data normalizer_data(aliases, settings);
QueryNormalizer(normalizer_data).visit(query);

View File

@ -20,7 +20,7 @@ namespace ErrorCodes
ASTPtr addTypeConversionToAST(ASTPtr && ast, const String & type_name)
{
auto func = makeASTFunction("cast", ast, std::make_shared<ASTLiteral>(type_name));
auto func = makeASTFunction("CAST", ast, std::make_shared<ASTLiteral>(type_name));
if (ASTWithAlias * ast_with_alias = dynamic_cast<ASTWithAlias *>(ast.get()))
{

View File

@ -15,6 +15,7 @@
#include <Parsers/ExpressionElementParsers.h>
#include <TableFunctions/TableFunctionFactory.h>
#include <Common/typeid_cast.h>
#include <Interpreters/FunctionNameNormalizer.h>
#include <Interpreters/ReplaceQueryParameterVisitor.h>
#include <Poco/Util/AbstractConfiguration.h>
@ -35,6 +36,10 @@ std::pair<Field, std::shared_ptr<const IDataType>> evaluateConstantExpression(co
auto ast = node->clone();
ReplaceQueryParameterVisitor param_visitor(context.getQueryParameters());
param_visitor.visit(ast);
if (context.getSettingsRef().normalize_function_names)
FunctionNameNormalizer().visit(ast.get());
String name = ast->getColumnName();
auto syntax_result = TreeRewriter(context).analyze(ast, source_columns);
ExpressionActionsPtr expr_for_constant_folding = ExpressionAnalyzer(ast, syntax_result, context).getConstActions();

View File

@ -43,7 +43,7 @@ void addDefaultRequiredExpressionsRecursively(const Block & block, const String
RequiredSourceColumnsVisitor(columns_context).visit(column_default_expr);
NameSet required_columns_names = columns_context.requiredColumns();
auto cast_func = makeASTFunction("cast", column_default_expr, std::make_shared<ASTLiteral>(columns.get(required_column).type->getName()));
auto cast_func = makeASTFunction("CAST", column_default_expr, std::make_shared<ASTLiteral>(columns.get(required_column).type->getName()));
default_expr_list_accum->children.emplace_back(setAlias(cast_func, required_column));
added_columns.emplace(required_column);
@ -79,7 +79,7 @@ ASTPtr convertRequiredExpressions(Block & block, const NamesAndTypesList & requi
continue;
auto cast_func = makeASTFunction(
"cast", std::make_shared<ASTIdentifier>(required_column.name), std::make_shared<ASTLiteral>(required_column.type->getName()));
"CAST", std::make_shared<ASTIdentifier>(required_column.name), std::make_shared<ASTLiteral>(required_column.type->getName()));
conversion_expr_list->children.emplace_back(setAlias(cast_func, required_column.name));

View File

@ -59,6 +59,7 @@ SRCS(
ExternalModelsLoader.cpp
ExtractExpressionInfoVisitor.cpp
FillingRow.cpp
FunctionNameNormalizer.cpp
HashJoin.cpp
IExternalLoadable.cpp
IInterpreter.cpp

View File

@ -864,7 +864,7 @@ bool ParserCastExpression::parseImpl(Pos & pos, ASTPtr & node, Expected & expect
expr_list_args->children.push_back(std::move(type_literal));
auto func_node = std::make_shared<ASTFunction>();
func_node->name = "cast";
func_node->name = "CAST";
func_node->arguments = std::move(expr_list_args);
func_node->children.push_back(func_node->arguments);

View File

@ -626,7 +626,7 @@ void ConstantExpressionTemplate::TemplateStructure::addNodesToCastResult(const I
expr = makeASTFunction("assumeNotNull", std::move(expr));
}
expr = makeASTFunction("cast", std::move(expr), std::make_shared<ASTLiteral>(result_column_type.getName()));
expr = makeASTFunction("CAST", std::move(expr), std::make_shared<ASTLiteral>(result_column_type.getName()));
if (null_as_default)
{

View File

@ -1133,6 +1133,14 @@ void TCPHandler::receiveQuery()
}
query_context->applySettingsChanges(settings_changes);
/// Disable function name normalization when it's a secondary query, because queries are either
/// already normalized on initiator node, or not normalized and should remain unnormalized for
/// compatibility.
if (client_info.query_kind == ClientInfo::QueryKind::SECONDARY_QUERY)
{
query_context->setSetting("normalize_function_names", Field(0));
}
// Use the received query id, or generate a random default. It is convenient
// to also generate the default OpenTelemetry trace id at the same time, and
// set the trace parent.

View File

@ -34,6 +34,7 @@
#include <Storages/Distributed/DirectoryMonitor.h>
#include <Processors/Sources/SourceWithProgress.h>
#include <Processors/Formats/InputStreamFromInputFormat.h>
#include <Processors/Sources/NullSource.h>
#include <Processors/Pipe.h>
namespace fs = std::filesystem;
@ -427,7 +428,12 @@ Pipe StorageFile::read(
paths = {""}; /// when use fd, paths are empty
else
if (paths.size() == 1 && !Poco::File(paths[0]).exists())
throw Exception("File " + paths[0] + " doesn't exist", ErrorCodes::FILE_DOESNT_EXIST);
{
if (context.getSettingsRef().engine_file_empty_if_not_exists)
return Pipe(std::make_shared<NullSource>(metadata_snapshot->getSampleBlockForColumns(column_names, getVirtuals(), getStorageID())));
else
throw Exception("File " + paths[0] + " doesn't exist", ErrorCodes::FILE_DOESNT_EXIST);
}
auto files_info = std::make_shared<StorageFileSource::FilesInfo>();
@ -469,7 +475,8 @@ public:
std::unique_lock<std::shared_timed_mutex> && lock_,
const CompressionMethod compression_method,
const Context & context,
const std::optional<FormatSettings> & format_settings)
const std::optional<FormatSettings> & format_settings,
int & flags)
: storage(storage_)
, metadata_snapshot(metadata_snapshot_)
, lock(std::move(lock_))
@ -485,13 +492,14 @@ public:
* INSERT data; SELECT *; last SELECT returns only insert_data
*/
storage.table_fd_was_used = true;
naked_buffer = std::make_unique<WriteBufferFromFileDescriptor>(storage.table_fd);
naked_buffer = std::make_unique<WriteBufferFromFileDescriptor>(storage.table_fd, DBMS_DEFAULT_BUFFER_SIZE);
}
else
{
if (storage.paths.size() != 1)
throw Exception("Table '" + storage.getStorageID().getNameForLogs() + "' is in readonly mode because of globs in filepath", ErrorCodes::DATABASE_ACCESS_DENIED);
naked_buffer = std::make_unique<WriteBufferFromFile>(storage.paths[0], DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_APPEND | O_CREAT);
flags |= O_WRONLY | O_APPEND | O_CREAT;
naked_buffer = std::make_unique<WriteBufferFromFile>(storage.paths[0], DBMS_DEFAULT_BUFFER_SIZE, flags);
}
/// In case of CSVWithNames we have already written prefix.
@ -546,7 +554,12 @@ BlockOutputStreamPtr StorageFile::write(
if (format_name == "Distributed")
throw Exception("Method write is not implemented for Distributed format", ErrorCodes::NOT_IMPLEMENTED);
int flags = 0;
std::string path;
if (context.getSettingsRef().engine_file_truncate_on_insert)
flags |= O_TRUNC;
if (!paths.empty())
{
path = paths[0];
@ -559,7 +572,8 @@ BlockOutputStreamPtr StorageFile::write(
std::unique_lock{rwlock, getLockTimeout(context)},
chooseCompressionMethod(path, compression_method),
context,
format_settings);
format_settings,
flags);
}
bool StorageFile::storesDataOnDisk() const

View File

@ -16,6 +16,7 @@
#include <Common/PipeFDs.h>
#include <Common/CurrentThread.h>
#include <common/getThreadId.h>
#include <common/logger_useful.h>
namespace DB
@ -150,6 +151,7 @@ namespace
StorageSystemStackTrace::StorageSystemStackTrace(const StorageID & table_id_)
: IStorageSystemOneBlock<StorageSystemStackTrace>(table_id_)
, log(&Poco::Logger::get("StorageSystemStackTrace"))
{
notification_pipe.open();
@ -229,6 +231,8 @@ void StorageSystemStackTrace::fillData(MutableColumns & res_columns, const Conte
}
else
{
LOG_DEBUG(log, "Cannot obtain a stack trace for thread {}", tid);
/// Cannot obtain a stack trace. But create a record in result nevertheless.
res_columns[0]->insert(tid);

View File

@ -6,6 +6,10 @@
#include <ext/shared_ptr_helper.h>
#include <Storages/System/IStorageSystemOneBlock.h>
namespace Poco
{
class Logger;
}
namespace DB
{
@ -30,6 +34,8 @@ protected:
void fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const override;
mutable std::mutex mutex;
Poco::Logger * log;
};
}

View File

@ -44,8 +44,8 @@ def _create_env_file(path, variables, fname=DEFAULT_ENV_NAME):
f.write("=".join([var, value]) + "\n")
return full_path
def run_and_check(args, env=None, shell=False):
res = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, shell=shell)
def run_and_check(args, env=None, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE):
res = subprocess.run(args, stdout=stdout, stderr=stderr, env=env, shell=shell)
if res.returncode != 0:
# check_call(...) from subprocess does not print stderr, so we do it manually
print('Stderr:\n{}\n'.format(res.stderr.decode('utf-8')))

View File

@ -37,6 +37,12 @@ class MySQLNodeInstance:
self.docker_compose = docker_compose
self.project_name = project_name
self.base_dir = p.dirname(__file__)
self.instances_dir = p.join(self.base_dir, '_instances_mysql')
if not os.path.exists(self.instances_dir):
os.mkdir(self.instances_dir)
self.docker_logs_path = p.join(self.instances_dir, 'docker_mysql.log')
def alloc_connection(self):
if self.mysql_connection is None:
@ -71,10 +77,28 @@ class MySQLNodeInstance:
cursor.execute(executio_query)
return cursor.fetchall()
def start_and_wait(self):
run_and_check(['docker-compose',
'-p', cluster.project_name,
'-f', self.docker_compose,
'up', '--no-recreate', '-d',
])
self.wait_mysql_to_start(120)
def close(self):
if self.mysql_connection is not None:
self.mysql_connection.close()
with open(self.docker_logs_path, "w+") as f:
try:
run_and_check([
'docker-compose',
'-p', cluster.project_name,
'-f', self.docker_compose, 'logs',
], stdout=f)
except Exception as e:
print("Unable to get logs from docker mysql.")
def wait_mysql_to_start(self, timeout=60):
start = time.time()
while time.time() - start < timeout:
@ -95,9 +119,7 @@ def started_mysql_5_7():
mysql_node = MySQLNodeInstance('root', 'clickhouse', '127.0.0.1', 3308, docker_compose)
try:
run_and_check(
['docker-compose', '-p', cluster.project_name, '-f', docker_compose, 'up', '--no-recreate', '-d'])
mysql_node.wait_mysql_to_start(120)
mysql_node.start_and_wait()
yield mysql_node
finally:
mysql_node.close()
@ -111,9 +133,7 @@ def started_mysql_8_0():
mysql_node = MySQLNodeInstance('root', 'clickhouse', '127.0.0.1', 33308, docker_compose)
try:
run_and_check(
['docker-compose', '-p', cluster.project_name, '-f', docker_compose, 'up', '--no-recreate', '-d'])
mysql_node.wait_mysql_to_start(120)
mysql_node.start_and_wait()
yield mysql_node
finally:
mysql_node.close()

View File

@ -217,7 +217,7 @@ def test_mysql_replacement_query(mysql_client, server_address):
--password=123 -e "select database();"
'''.format(host=server_address, port=server_port), demux=True)
assert code == 0
assert stdout.decode() == 'database()\ndefault\n'
assert stdout.decode() == 'DATABASE()\ndefault\n'
code, (stdout, stderr) = mysql_client.exec_run('''
mysql --protocol tcp -h {host} -P {port} default -u default

View File

@ -1,24 +0,0 @@
<test max_ignored_relative_change="0.2">
<create_query>drop table if EXISTS test_bm2;</create_query>
<create_query>drop table if EXISTS test_bm_join2;</create_query>
<create_query>create table test_bm2(
dim UInt64,
id UInt64)
ENGINE = MergeTree()
ORDER BY( dim )
SETTINGS index_granularity = 8192;
</create_query>
<create_query>
create table test_bm_join2(
dim UInt64,
ids AggregateFunction(groupBitmap, UInt64) )
ENGINE = MergeTree()
ORDER BY(dim)
SETTINGS index_granularity = 8192;
</create_query>
<fill_query>insert into test_bm2 SELECT 1,number FROM numbers(0, 1000)</fill_query>
<fill_query>insert into test_bm_join2 SELECT 1, bitmapBuild(range(toUInt64(0),toUInt64(11000000)))</fill_query>
<query>select a.dim,bitmapCardinality(b.ids) from test_bm2 a left join test_bm_join2 b using(dim)</query>
<drop_query>drop table if exists test_bm2</drop_query>
<drop_query>drop table if exists test_bm_join2</drop_query>
</test>

View File

@ -19,7 +19,7 @@
toInt256(number) as d,
toString(number) as f,
toFixedString(f, 20) as g
FROM numbers_mt(20000000)
FROM numbers_mt(200000000)
SETTINGS max_threads = 8
FORMAT Null
</query>
@ -38,7 +38,7 @@
toInt256(number) as d,
toString(number) as f,
toFixedString(f, 20) as g
FROM numbers_mt(20000000)
FROM numbers_mt(200000000)
SETTINGS max_threads = 8
FORMAT Null
</query>
@ -76,7 +76,7 @@
toInt256(number) as d,
toString(number) as f,
toFixedString(f, 20) as g
FROM numbers_mt(20000000)
FROM numbers_mt(200000000)
SETTINGS max_threads = 8
FORMAT Null
</query>
@ -115,7 +115,7 @@
toInt256(number) as d,
toString(number) as f,
toFixedString(f, 20) as g
FROM numbers_mt(20000000)
FROM numbers_mt(200000000)
SETTINGS max_threads = 8
FORMAT Null
</query>
@ -134,7 +134,7 @@
toInt256(number) as d,
toString(number) as f,
toFixedString(f, 20) as g
FROM numbers_mt(20000000)
FROM numbers_mt(200000000)
SETTINGS max_threads = 8
FORMAT Null
</query>
@ -153,7 +153,7 @@
toInt256(number) as d,
toString(number) as f,
toFixedString(f, 20) as g
FROM numbers_mt(20000000)
FROM numbers_mt(200000000)
SETTINGS max_threads = 8
FORMAT Null
</query>
@ -172,7 +172,7 @@
toInt256(number) as d,
toString(number) as f,
toFixedString(f, 20) as g
FROM numbers_mt(20000000)
FROM numbers_mt(200000000)
SETTINGS max_threads = 8
FORMAT Null
</query>
@ -191,7 +191,7 @@
toInt256(number) as d,
toString(number) as f,
toFixedString(f, 20) as g
FROM numbers_mt(20000000)
FROM numbers_mt(200000000)
SETTINGS max_threads = 8
FORMAT Null
</query>
@ -230,7 +230,7 @@
toInt256(number) as d,
toString(number) as f,
toFixedString(f, 20) as g
FROM numbers_mt(2000000)
FROM numbers_mt(20000000)
SETTINGS max_threads = 8
FORMAT Null
</query>
@ -249,7 +249,7 @@
toInt256(number) as d,
toString(number) as f,
toFixedString(f, 20) as g
FROM numbers_mt(20000000)
FROM numbers_mt(100000000)
SETTINGS max_threads = 8
FORMAT Null
</query>

View File

@ -114,7 +114,7 @@ FROM
(
SELECT
1 AS id,
identity(cast(1, \'UInt8\')) AS subquery
identity(CAST(1, \'UInt8\')) AS subquery
WHERE subquery = 1
)
WHERE subquery = 1

View File

@ -10,11 +10,11 @@ hello
CREATE TABLE default.cast
(
`x` UInt8,
`e` Enum8('hello' = 1, 'world' = 2) DEFAULT cast(x, 'Enum8(\'hello\' = 1, \'world\' = 2)')
`e` Enum8('hello' = 1, 'world' = 2) DEFAULT CAST(x, 'Enum8(\'hello\' = 1, \'world\' = 2)')
)
ENGINE = MergeTree
ORDER BY e
SETTINGS index_granularity = 8192
x UInt8
e Enum8(\'hello\' = 1, \'world\' = 2) DEFAULT cast(x, \'Enum8(\\\'hello\\\' = 1, \\\'world\\\' = 2)\')
e Enum8(\'hello\' = 1, \'world\' = 2) DEFAULT CAST(x, \'Enum8(\\\'hello\\\' = 1, \\\'world\\\' = 2)\')
1 hello

View File

@ -1,12 +1,12 @@
CREATE TABLE default.cast1
(
`x` UInt8,
`e` Enum8('hello' = 1, 'world' = 2) DEFAULT cast(x, 'Enum8(\'hello\' = 1, \'world\' = 2)')
`e` Enum8('hello' = 1, 'world' = 2) DEFAULT CAST(x, 'Enum8(\'hello\' = 1, \'world\' = 2)')
)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00643/cast', 'r1')
ORDER BY e
SETTINGS index_granularity = 8192
x UInt8
e Enum8(\'hello\' = 1, \'world\' = 2) DEFAULT cast(x, \'Enum8(\\\'hello\\\' = 1, \\\'world\\\' = 2)\')
e Enum8(\'hello\' = 1, \'world\' = 2) DEFAULT CAST(x, \'Enum8(\\\'hello\\\' = 1, \\\'world\\\' = 2)\')
1 hello
1 hello

View File

@ -1,3 +1,5 @@
SET database_atomic_wait_for_drop_and_detach_synchronously=1;
DROP TABLE IF EXISTS cast1;
DROP TABLE IF EXISTS cast2;

View File

@ -2,7 +2,7 @@ SELECT 1
WHERE 0
SELECT 1
SELECT 1
WHERE (1 IN (0, 2)) AND (2 = (identity(cast(2, \'UInt8\')) AS subquery))
WHERE (1 IN (0, 2)) AND (2 = (identity(CAST(2, \'UInt8\')) AS subquery))
SELECT 1
WHERE 1 IN (
(

View File

@ -5,7 +5,7 @@ SELECT (SELECT * FROM system.numbers LIMIT 1 OFFSET 1) AS n, toUInt64(10 / n) FO
1,10
EXPLAIN SYNTAX SELECT (SELECT * FROM system.numbers LIMIT 1 OFFSET 1) AS n, toUInt64(10 / n);
SELECT
identity(cast(0, \'UInt64\')) AS n,
identity(CAST(0, \'UInt64\')) AS n,
toUInt64(10 / n)
SELECT * FROM (WITH (SELECT * FROM system.numbers LIMIT 1 OFFSET 1) AS n, toUInt64(10 / n) as q SELECT * FROM system.one WHERE q > 0);
0

View File

@ -11,7 +11,7 @@ arraySort(used_table_functions)
['numbers']
arraySort(used_functions)
['addDays','array','arrayFlatten','cast','modulo','plus','substring','toDate','toDayOfYear','toTypeName','toWeek']
['CAST','addDays','array','arrayFlatten','modulo','plus','substring','toDate','toDayOfYear','toTypeName','toWeek']
arraySort(used_data_type_families)
['Array','Int32','Nullable','String']

View File

@ -1,3 +1,5 @@
SET database_atomic_wait_for_drop_and_detach_synchronously=1;
SELECT uniqArray([1, 1, 2]),
SUBSTRING('Hello, world', 7, 5),
flatten([[[BIT_AND(123)]], [[mod(3, 2)], [CAST('1' AS INTEGER)]]]),

View File

@ -9,3 +9,11 @@ SELECT CAST('2020-01-01 00:00:00.3' AS DateTime64);
2020-01-01 00:00:00.300
SELECT toDateTime64(bitShiftLeft(toUInt64(1),33), 2);
2106-02-07 09:28:15.00
SELECT toDateTime(-2., 2);
1970-01-01 03:00:00.00
SELECT toDateTime64(-2., 2);
1970-01-01 03:00:00.00
SELECT toDateTime64(toFloat32(bitShiftLeft(toUInt64(1),33)), 2);
2106-02-07 09:28:16.00
SELECT toDateTime64(toFloat64(bitShiftLeft(toUInt64(1),33)), 2);
2106-02-07 09:28:15.00

View File

@ -4,3 +4,7 @@ SELECT toDateTime64(-2, 2);
SELECT CAST(-1 AS DateTime64);
SELECT CAST('2020-01-01 00:00:00.3' AS DateTime64);
SELECT toDateTime64(bitShiftLeft(toUInt64(1),33), 2);
SELECT toDateTime(-2., 2);
SELECT toDateTime64(-2., 2);
SELECT toDateTime64(toFloat32(bitShiftLeft(toUInt64(1),33)), 2);
SELECT toDateTime64(toFloat64(bitShiftLeft(toUInt64(1),33)), 2);

View File

@ -0,0 +1,9 @@
-- { echo }
SELECT toString(toDateTime('-922337203.6854775808', 1));
2106-02-07 15:41:33.6
SELECT toString(toDateTime('9922337203.6854775808', 1));
2104-12-30 00:50:11.6
SELECT toDateTime64(CAST('10000000000.1' AS Decimal64(1)), 1);
2106-02-07 20:50:08.1
SELECT toDateTime64(CAST('-10000000000.1' AS Decimal64(1)), 1);
2011-12-23 00:38:08.1

View File

@ -0,0 +1,5 @@
-- { echo }
SELECT toString(toDateTime('-922337203.6854775808', 1));
SELECT toString(toDateTime('9922337203.6854775808', 1));
SELECT toDateTime64(CAST('10000000000.1' AS Decimal64(1)), 1);
SELECT toDateTime64(CAST('-10000000000.1' AS Decimal64(1)), 1);

View File

@ -0,0 +1,30 @@
-
Hello
-
World
-
-
-
-
-
-
-
-
Hello
-
World
-
-
-
-
-
-
-
Hello
-
World
-
-
-
-
-

View File

@ -0,0 +1,3 @@
SELECT transform(number / 2, [0.5, 1.5], ['Hello', 'World'], '-') FROM numbers(10);
SELECT transform(number / 2, [1.0, 2.0], ['Hello', 'World'], '-') FROM numbers(10);
SELECT transform(number / 2, [1, 2], ['Hello', 'World'], '-') FROM numbers(10);

View File

@ -0,0 +1,66 @@
SELECT
CAST(1, 'INT'),
ceil(1),
ceil(1),
char(49),
CHAR_LENGTH('1'),
CHARACTER_LENGTH('1'),
coalesce(1),
concat('1', '1'),
corr(1, 1),
cos(1),
count(),
covarPop(1, 1),
covarSamp(1, 1),
DATABASE(),
dateDiff('DAY', toDate('2020-10-24'), toDate('2019-10-24')),
exp(1),
arrayFlatten([[1]]),
floor(1),
FQDN(),
greatest(1),
1,
ifNull(1, 1),
lower('A'),
least(1),
length('1'),
log(1),
position('1', '1'),
log(1),
log10(1),
log2(1),
lower('A'),
max(1),
substring('123', 1, 1),
min(1),
1 % 1,
NOT 1,
now(),
now64(),
nullIf(1, 1),
pi(),
position('123', '2'),
pow(1, 1),
pow(1, 1),
rand(),
replaceAll('1', '1', '2'),
reverse('123'),
round(1),
sin(1),
sqrt(1),
stddevPop(1),
stddevSamp(1),
substring('123', 2),
substring('123', 2),
count(),
tan(1),
tanh(1),
trunc(1),
trunc(1),
upper('A'),
upper('A'),
currentUser(),
varPop(1),
varSamp(1),
toWeek(toDate('2020-10-24')),
toYearWeek(toDate('2020-10-24'))

View File

@ -0,0 +1 @@
EXPLAIN SYNTAX SELECT CAST(1 AS INT), CEIL(1), CEILING(1), CHAR(49), CHAR_LENGTH('1'), CHARACTER_LENGTH('1'), COALESCE(1), CONCAT('1', '1'), CORR(1, 1), COS(1), COUNT(1), COVAR_POP(1, 1), COVAR_SAMP(1, 1), DATABASE(), DATEDIFF('DAY', toDate('2020-10-24'), toDate('2019-10-24')), EXP(1), FLATTEN([[1]]), FLOOR(1), FQDN(), GREATEST(1), IF(1, 1, 1), IFNULL(1, 1), LCASE('A'), LEAST(1), LENGTH('1'), LN(1), LOCATE('1', '1'), LOG(1), LOG10(1), LOG2(1), LOWER('A'), MAX(1), MID('123', 1, 1), MIN(1), MOD(1, 1), NOT(1), NOW(), NOW64(), NULLIF(1, 1), PI(), POSITION('123', '2'), POW(1, 1), POWER(1, 1), RAND(), REPLACE('1', '1', '2'), REVERSE('123'), ROUND(1), SIN(1), SQRT(1), STDDEV_POP(1), STDDEV_SAMP(1), SUBSTR('123', 2), SUBSTRING('123', 2), SUM(1), TAN(1), TANH(1), TRUNC(1), TRUNCATE(1), UCASE('A'), UPPER('A'), USER(), VAR_POP(1), VAR_SAMP(1), WEEK(toDate('2020-10-24')), YEARWEEK(toDate('2020-10-24')) format TSVRaw;

View File

@ -0,0 +1,16 @@
DROP TABLE IF EXISTS file_engine_table;
CREATE TABLE file_engine_table (id UInt32) ENGINE=File(TSV);
SELECT * FROM file_engine_table; --{ serverError 107 }
SET engine_file_empty_if_not_exists=0;
SELECT * FROM file_engine_table; --{ serverError 107 }
SET engine_file_empty_if_not_exists=1;
SELECT * FROM file_engine_table;
SET engine_file_empty_if_not_exists=0;
DROP TABLE file_engine_table;

View File

@ -0,0 +1,13 @@
1
2
3
4
1
2
3
4
5
6
0
1
2

View File

@ -0,0 +1,21 @@
DROP TABLE IF EXISTS test;
INSERT INTO TABLE FUNCTION file('01721_file/test/data.TSV', 'TSV', 'id UInt32') VALUES (1);
ATTACH TABLE test FROM '01721_file/test' (id UInt8) ENGINE=File(TSV);
INSERT INTO test VALUES (2), (3);
INSERT INTO test VALUES (4);
SELECT * FROM test;
SET engine_file_truncate_on_insert=0;
INSERT INTO test VALUES (5), (6);
SELECT * FROM test;
SET engine_file_truncate_on_insert=1;
INSERT INTO test VALUES (0), (1), (2);
SELECT * FROM test;
SET engine_file_truncate_on_insert=0;
DROP TABLE test;

View File

@ -0,0 +1,11 @@
CREATE TEMPORARY TABLE decimal
(
f dec(38, 38)
);
INSERT INTO decimal VALUES (0);
INSERT INTO decimal VALUES (0.42);
INSERT INTO decimal VALUES (-0.42);
SELECT f + 1048575, f - 21, f - 84, f * 21, f * -21, f / 21, f / 84 FROM decimal WHERE f > 0; -- { serverError 407 }
SELECT f + -2, f - 21, f - 84, f * 21, f * -21, f / 9223372036854775807, f / 84 FROM decimal WHERE f > 0; -- { serverError 407 }

View File

@ -0,0 +1,8 @@
1 2005-03-18 01:58:31.222
2 2005-03-18 01:58:31.222
3 2005-03-18 01:58:31.222
4 2005-03-18 01:58:31.222
2005-03-18 04:58:31.222
2005-03-18 04:58:31.222
2005-03-18 04:58:31.222
0

View File

@ -0,0 +1,11 @@
CREATE TEMPORARY TABLE t (i UInt8, x DateTime64(3, 'UTC'));
INSERT INTO t VALUES (1, 1111111111222);
INSERT INTO t VALUES (2, 1111111111.222);
INSERT INTO t VALUES (3, '1111111111222');
INSERT INTO t VALUES (4, '1111111111.222');
SELECT * FROM t ORDER BY i;
SELECT toDateTime64(1111111111.222, 3);
SELECT toDateTime64('1111111111.222', 3);
SELECT toDateTime64('1111111111222', 3);
SELECT ignore(toDateTime64(1111111111222, 3)); -- This gives somewhat correct but unexpected result

View File

@ -0,0 +1,4 @@
SELECT arrayStringConcat(arrayMap(x -> transform(x, [1025, -9223372036854775808, 65537, 257, 1048576, 10, 7, 1048575, 65536], ['yandex', 'googlegooglegooglegoogle', 'test', '', '', 'hello', 'world', '', 'xyz'], ''), arrayMap(x -> (x % -inf), range(number))), '')
FROM system.numbers
LIMIT 1025
FORMAT Null;

View File

@ -0,0 +1,7 @@
-- { echo }
SELECT CAST(1111111111.222 AS DateTime64(3));
2005-03-18 04:58:31.222
SELECT toDateTime(1111111111.222, 3);
2005-03-18 04:58:31.222
SELECT toDateTime64(1111111111.222, 3);
2005-03-18 04:58:31.222

Some files were not shown because too many files have changed in this diff Show More