mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 15:12:02 +00:00
Merge branch 'master' into new-block-for-functions-4
This commit is contained in:
commit
a1d5adf8d5
1
.gitignore
vendored
1
.gitignore
vendored
@ -118,6 +118,7 @@ website/package-lock.json
|
||||
|
||||
# clangd cache
|
||||
/.clangd
|
||||
/.cache
|
||||
|
||||
/compile_commands.json
|
||||
|
||||
|
@ -15,6 +15,10 @@ if (COMPILER_GCC)
|
||||
elseif (COMPILER_CLANG)
|
||||
# Require minimum version of clang/apple-clang
|
||||
if (CMAKE_CXX_COMPILER_ID MATCHES "AppleClang")
|
||||
# If you are developer you can figure out what exact versions of AppleClang are Ok,
|
||||
# remove the following line and commit changes below.
|
||||
message (FATAL_ERROR "AppleClang is not supported, you should install clang from brew.")
|
||||
|
||||
# AppleClang 10.0.1 (Xcode 10.2) corresponds to LLVM/Clang upstream version 7.0.0
|
||||
# AppleClang 11.0.0 (Xcode 11.0) corresponds to LLVM/Clang upstream version 8.0.0
|
||||
set (XCODE_MINIMUM_VERSION 10.2)
|
||||
|
@ -56,7 +56,6 @@ RUN apt-get update \
|
||||
python3-lxml \
|
||||
python3-requests \
|
||||
python3-termcolor \
|
||||
qemu-user-static \
|
||||
rename \
|
||||
software-properties-common \
|
||||
tzdata \
|
||||
|
@ -191,63 +191,65 @@ stop_server ||:
|
||||
start_server
|
||||
|
||||
TESTS_TO_SKIP=(
|
||||
parquet
|
||||
avro
|
||||
h3
|
||||
odbc
|
||||
mysql
|
||||
sha256
|
||||
_orc_
|
||||
arrow
|
||||
01098_temporary_and_external_tables
|
||||
01083_expressions_in_engine_arguments
|
||||
hdfs
|
||||
00911_tautological_compare
|
||||
protobuf
|
||||
capnproto
|
||||
java_hash
|
||||
hashing
|
||||
secure
|
||||
00490_special_line_separators_and_characters_outside_of_bmp
|
||||
00436_convert_charset
|
||||
00105_shard_collations
|
||||
01354_order_by_tuple_collate_const
|
||||
01292_create_user
|
||||
01098_msgpack_format
|
||||
00929_multi_match_edit_distance
|
||||
00926_multimatch
|
||||
00834_cancel_http_readonly_queries_on_client_close
|
||||
brotli
|
||||
parallel_alter
|
||||
00109_shard_totals_after_having
|
||||
00110_external_sort
|
||||
00302_http_compression
|
||||
00417_kill_query
|
||||
01294_lazy_database_concurrent
|
||||
01193_metadata_loading
|
||||
base64
|
||||
01031_mutations_interpreter_and_context
|
||||
json
|
||||
client
|
||||
01305_replica_create_drop_zookeeper
|
||||
01092_memory_profiler
|
||||
01355_ilike
|
||||
01281_unsucceeded_insert_select_queries_counter
|
||||
live_view
|
||||
limit_memory
|
||||
memory_limit
|
||||
memory_leak
|
||||
00110_external_sort
|
||||
00436_convert_charset
|
||||
00490_special_line_separators_and_characters_outside_of_bmp
|
||||
00652_replicated_mutations_zookeeper
|
||||
00682_empty_parts_merge
|
||||
00701_rollup
|
||||
00109_shard_totals_after_having
|
||||
ddl_dictionaries
|
||||
00834_cancel_http_readonly_queries_on_client_close
|
||||
00911_tautological_compare
|
||||
00926_multimatch
|
||||
00929_multi_match_edit_distance
|
||||
01031_mutations_interpreter_and_context
|
||||
01053_ssd_dictionary # this test mistakenly requires acces to /var/lib/clickhouse -- can't run this locally, disabled
|
||||
01083_expressions_in_engine_arguments
|
||||
01092_memory_profiler
|
||||
01098_msgpack_format
|
||||
01098_temporary_and_external_tables
|
||||
01103_check_cpu_instructions_at_startup # avoid dependency on qemu -- invonvenient when running locally
|
||||
01193_metadata_loading
|
||||
01238_http_memory_tracking # max_memory_usage_for_user can interfere another queries running concurrently
|
||||
01251_dict_is_in_infinite_loop
|
||||
01259_dictionary_custom_settings_ddl
|
||||
01268_dictionary_direct_layout
|
||||
01280_ssd_complex_key_dictionary
|
||||
00652_replicated_mutations_zookeeper
|
||||
01411_bayesian_ab_testing
|
||||
01238_http_memory_tracking # max_memory_usage_for_user can interfere another queries running concurrently
|
||||
01281_group_by_limit_memory_tracking # max_memory_usage_for_user can interfere another queries running concurrently
|
||||
01281_unsucceeded_insert_select_queries_counter
|
||||
01292_create_user
|
||||
01294_lazy_database_concurrent
|
||||
01305_replica_create_drop_zookeeper
|
||||
01354_order_by_tuple_collate_const
|
||||
01355_ilike
|
||||
01411_bayesian_ab_testing
|
||||
_orc_
|
||||
arrow
|
||||
avro
|
||||
base64
|
||||
brotli
|
||||
capnproto
|
||||
client
|
||||
ddl_dictionaries
|
||||
h3
|
||||
hashing
|
||||
hdfs
|
||||
java_hash
|
||||
json
|
||||
limit_memory
|
||||
live_view
|
||||
memory_leak
|
||||
memory_limit
|
||||
mysql
|
||||
odbc
|
||||
parallel_alter
|
||||
parquet
|
||||
protobuf
|
||||
secure
|
||||
sha256
|
||||
|
||||
# Not sure why these two fail even in sequential mode. Disabled for now
|
||||
# to make some progress.
|
||||
@ -258,7 +260,7 @@ TESTS_TO_SKIP=(
|
||||
01460_DistributedFilesToInsert
|
||||
)
|
||||
|
||||
time clickhouse-test -j 8 --no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/test_log.txt"
|
||||
time clickhouse-test -j 8 --order=random --no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/test_log.txt"
|
||||
|
||||
# substr is to remove semicolon after test name
|
||||
readarray -t FAILED_TESTS < <(awk '/FAIL|TIMEOUT|ERROR/ { print substr($3, 1, length($3)-1) }' "$FASTTEST_OUTPUT/test_log.txt" | tee "$FASTTEST_OUTPUT/failed-parallel-tests.txt")
|
||||
@ -281,7 +283,7 @@ then
|
||||
|
||||
echo "Going to run again: ${FAILED_TESTS[*]}"
|
||||
|
||||
clickhouse-test --no-long --testname --shard --zookeeper "${FAILED_TESTS[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a "$FASTTEST_OUTPUT/test_log.txt"
|
||||
clickhouse-test --order=random --no-long --testname --shard --zookeeper "${FAILED_TESTS[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a "$FASTTEST_OUTPUT/test_log.txt"
|
||||
else
|
||||
echo "No failed tests"
|
||||
fi
|
||||
|
@ -37,7 +37,28 @@ RUN apt-get update \
|
||||
ENV TZ=Europe/Moscow
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
RUN python3 -m pip install urllib3==1.23 pytest docker-compose==1.22.0 docker dicttoxml kazoo PyMySQL psycopg2==2.7.5 pymongo tzlocal kafka-python protobuf redis aerospike pytest-timeout minio grpcio grpcio-tools cassandra-driver confluent-kafka avro
|
||||
RUN python3 -m pip install \
|
||||
PyMySQL \
|
||||
aerospike \
|
||||
avro \
|
||||
cassandra-driver \
|
||||
confluent-kafka \
|
||||
dicttoxml \
|
||||
docker \
|
||||
docker-compose==1.22.0 \
|
||||
grpcio \
|
||||
grpcio-tools \
|
||||
kafka-python \
|
||||
kazoo \
|
||||
minio \
|
||||
protobuf \
|
||||
psycopg2-binary==2.7.5 \
|
||||
pymongo \
|
||||
pytest \
|
||||
pytest-timeout \
|
||||
redis \
|
||||
tzlocal \
|
||||
urllib3
|
||||
|
||||
ENV DOCKER_CHANNEL stable
|
||||
ENV DOCKER_VERSION 17.09.1-ce
|
||||
|
@ -48,4 +48,8 @@ if grep -q -- "--use-skip-list" /usr/bin/clickhouse-test ; then
|
||||
SKIP_LIST_OPT="--use-skip-list"
|
||||
fi
|
||||
|
||||
clickhouse-test --testname --shard --zookeeper --no-stateless "$SKIP_LIST_OPT" "$ADDITIONAL_OPTIONS" "$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
||||
# We can have several additional options so we path them as array because it's
|
||||
# more idiologically correct.
|
||||
read -ra ADDITIONAL_OPTIONS <<< "${ADDITIONAL_OPTIONS:-}"
|
||||
|
||||
clickhouse-test --testname --shard --zookeeper --no-stateless --hung-check --print-time "$SKIP_LIST_OPT" "${ADDITIONAL_OPTIONS[@]}" "$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
||||
|
@ -105,7 +105,11 @@ if grep -q -- "--use-skip-list" /usr/bin/clickhouse-test; then
|
||||
SKIP_LIST_OPT="--use-skip-list"
|
||||
fi
|
||||
|
||||
LLVM_PROFILE_FILE='client_%h_%p_%m.profraw' clickhouse-test --testname --shard --zookeeper --no-stateless "$SKIP_LIST_OPT" "$ADDITIONAL_OPTIONS" "$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
||||
# We can have several additional options so we path them as array because it's
|
||||
# more idiologically correct.
|
||||
read -ra ADDITIONAL_OPTIONS <<< "${ADDITIONAL_OPTIONS:-}"
|
||||
|
||||
LLVM_PROFILE_FILE='client_%h_%p_%m.profraw' clickhouse-test --testname --shard --zookeeper --no-stateless --hung-check --print-time "$SKIP_LIST_OPT" "${ADDITIONAL_OPTIONS[@]}" "$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
||||
|
||||
kill_clickhouse
|
||||
|
||||
|
@ -17,4 +17,8 @@ if grep -q -- "--use-skip-list" /usr/bin/clickhouse-test; then
|
||||
SKIP_LIST_OPT="--use-skip-list"
|
||||
fi
|
||||
|
||||
clickhouse-test --testname --shard --zookeeper "$SKIP_LIST_OPT" "$ADDITIONAL_OPTIONS" "$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
||||
# We can have several additional options so we path them as array because it's
|
||||
# more idiologically correct.
|
||||
read -ra ADDITIONAL_OPTIONS <<< "${ADDITIONAL_OPTIONS:-}"
|
||||
|
||||
clickhouse-test --testname --shard --zookeeper --hung-check --print-time "$SKIP_LIST_OPT" "${ADDITIONAL_OPTIONS[@]}" "$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
||||
|
@ -51,7 +51,11 @@ if grep -q -- "--use-skip-list" /usr/bin/clickhouse-test; then
|
||||
SKIP_LIST_OPT="--use-skip-list"
|
||||
fi
|
||||
|
||||
LLVM_PROFILE_FILE='client_coverage.profraw' clickhouse-test --testname --shard --zookeeper "$SKIP_LIST_OPT" "$ADDITIONAL_OPTIONS" "$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
||||
# We can have several additional options so we path them as array because it's
|
||||
# more idiologically correct.
|
||||
read -ra ADDITIONAL_OPTIONS <<< "${ADDITIONAL_OPTIONS:-}"
|
||||
|
||||
LLVM_PROFILE_FILE='client_coverage.profraw' clickhouse-test --testname --shard --zookeeper --hung-check --print-time "$SKIP_LIST_OPT" "${ADDITIONAL_OPTIONS[@]}" "$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
||||
|
||||
kill_clickhouse
|
||||
|
||||
|
@ -45,7 +45,7 @@ function start()
|
||||
# for clickhouse-server (via service)
|
||||
echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment
|
||||
# for clickhouse-client
|
||||
export ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'
|
||||
export ASAN_OPTIONS='malloc_context_size=10 allocator_release_to_os_interval_ms=10000'
|
||||
|
||||
start
|
||||
|
||||
|
@ -28,8 +28,18 @@ def get_options(i):
|
||||
options = ""
|
||||
if 0 < i:
|
||||
options += " --order=random"
|
||||
|
||||
if i % 2 == 1:
|
||||
options += " --db-engine=Ordinary"
|
||||
|
||||
# If database name is not specified, new database is created for each functional test.
|
||||
# Run some threads with one database for all tests.
|
||||
if i % 3 == 1:
|
||||
options += " --database=test_{}".format(i)
|
||||
|
||||
if i == 13:
|
||||
options += " --client-option='memory_tracker_fault_probability=0.00001'"
|
||||
|
||||
return options
|
||||
|
||||
|
||||
|
@ -117,7 +117,9 @@ CREATE TABLE table_name
|
||||
|
||||
</details>
|
||||
|
||||
As the example shows, these parameters can contain substitutions in curly brackets. The substituted values are taken from the ‘macros’ section of the configuration file. Example:
|
||||
As the example shows, these parameters can contain substitutions in curly brackets. The substituted values are taken from the «[macros](../../../operations/server-configuration-parameters/settings/#macros) section of the configuration file.
|
||||
|
||||
Example:
|
||||
|
||||
``` xml
|
||||
<macros>
|
||||
@ -137,6 +139,9 @@ In this case, the path consists of the following parts:
|
||||
`table_name` is the name of the node for the table in ZooKeeper. It is a good idea to make it the same as the table name. It is defined explicitly, because in contrast to the table name, it doesn’t change after a RENAME query.
|
||||
*HINT*: you could add a database name in front of `table_name` as well. E.g. `db_name.table_name`
|
||||
|
||||
The two built-in substitutions `{database}` and `{table}` can be used, they expand into the table name and the database name respectively (unless these macros are defined in the `macros` section). So the zookeeper path can be specified as `'/clickhouse/tables/{layer}-{shard}/{database}/{table}'`.
|
||||
Be careful with table renames when using these built-in substitutions. The path in Zookeeper cannot be changed, and when the table is renamed, the macros will expand into a different path, the table will refer to a path that does not exist in Zookeeper, and will go into read-only mode.
|
||||
|
||||
The replica name identifies different replicas of the same table. You can use the server name for this, as in the example. The name only needs to be unique within each shard.
|
||||
|
||||
You can define the parameters explicitly instead of using substitutions. This might be convenient for testing and for configuring small clusters. However, you can’t use distributed DDL queries (`ON CLUSTER`) in this case.
|
||||
|
@ -43,6 +43,7 @@ The supported formats are:
|
||||
| [PrettyNoEscapes](#prettynoescapes) | ✗ | ✔ |
|
||||
| [PrettySpace](#prettyspace) | ✗ | ✔ |
|
||||
| [Protobuf](#protobuf) | ✔ | ✔ |
|
||||
| [ProtobufSingle](#protobufsingle) | ✔ | ✔ |
|
||||
| [Avro](#data-format-avro) | ✔ | ✔ |
|
||||
| [AvroConfluent](#data-format-avro-confluent) | ✔ | ✗ |
|
||||
| [Parquet](#data-format-parquet) | ✔ | ✔ |
|
||||
@ -1076,6 +1077,10 @@ ClickHouse inputs and outputs protobuf messages in the `length-delimited` format
|
||||
It means before every message should be written its length as a [varint](https://developers.google.com/protocol-buffers/docs/encoding#varints).
|
||||
See also [how to read/write length-delimited protobuf messages in popular languages](https://cwiki.apache.org/confluence/display/GEODE/Delimiting+Protobuf+Messages).
|
||||
|
||||
## ProtobufSingle {#protobufsingle}
|
||||
|
||||
Same as [Protobuf](#protobuf) but for storing/parsing single Protobuf message without length delimiters.
|
||||
|
||||
## Avro {#data-format-avro}
|
||||
|
||||
[Apache Avro](https://avro.apache.org/) is a row-oriented data serialization framework developed within Apache’s Hadoop project.
|
||||
|
@ -69,6 +69,7 @@ toc_title: Adopters
|
||||
| <a href="https://www.oneapm.com/" class="favicon">OneAPM</a> | Monitorings and Data Analysis | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) |
|
||||
| <a href="https://www.percent.cn/" class="favicon">Percent 百分点</a> | Analytics | Main Product | — | — | [Slides in Chinese, June 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) |
|
||||
| <a href="https://plausible.io/" class="favicon">Plausible</a> | Analytics | Main Product | — | — | [Blog post, June 2020](https://twitter.com/PlausibleHQ/status/1273889629087969280) |
|
||||
| <a href="https://posthog.com/" class="favicon">PostHog</a> | Product Analytics | Main Product | — | — | [Release Notes, Oct 2020](https://posthog.com/blog/the-posthog-array-1-15-0) |
|
||||
| <a href="https://postmates.com/" class="favicon">Postmates</a> | Delivery | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=188) |
|
||||
| <a href="http://www.pragma-innovation.fr/" class="favicon">Pragma Innovation</a> | Telemetry and Big Data Analysis | Main product | — | — | [Slides in English, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/4_pragma_innovation.pdf) |
|
||||
| <a href="https://www.qingcloud.com/" class="favicon">QINGCLOUD</a> | Cloud services | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/4.%20Cloud%20%2B%20TSDB%20for%20ClickHouse%20张健%20QingCloud.pdf) |
|
||||
@ -87,6 +88,7 @@ toc_title: Adopters
|
||||
| <a href="https://smi2.ru/" class="favicon">SMI2</a> | News | Analytics | — | — | [Blog Post in Russian, November 2017](https://habr.com/ru/company/smi2/blog/314558/) |
|
||||
| <a href="https://www.splunk.com/" class="favicon">Splunk</a> | Business Analytics | Main product | — | — | [Slides in English, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/splunk.pdf) |
|
||||
| <a href="https://www.spotify.com" class="favicon">Spotify</a> | Music | Experimentation | — | — | [Slides, July 2018](https://www.slideshare.net/glebus/using-clickhouse-for-experimentation-104247173) |
|
||||
| <a href="https://www.staffcop.ru/" class="favicon">Staffcop</a> | Information Security | Main Product | — | — | [Official website, Documentation](https://www.staffcop.ru/sce43) |
|
||||
| <a href="https://www.tencent.com" class="favicon">Tencent</a> | Big Data | Data processing | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/5.%20ClickHouse大数据集群应用_李俊飞腾讯网媒事业部.pdf) |
|
||||
| <a href="https://www.tencent.com" class="favicon">Tencent</a> | Messaging | Logging | — | — | [Talk in Chinese, November 2019](https://youtu.be/T-iVQRuw-QY?t=5050) |
|
||||
| <a href="https://trafficstars.com/" class="favicon">Traffic Stars</a> | AD network | — | — | — | [Slides in Russian, May 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup15/lightning/ninja.pdf) |
|
||||
|
@ -2034,18 +2034,18 @@ Default value: `120` seconds.
|
||||
|
||||
Enables or disables keeping of the `Nullable` data type in [CAST](../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) operations.
|
||||
|
||||
If set, `CAST(something_nullable AS Type)` returns `Nullable(Type)`.
|
||||
When the setting is enabled and the argument of `CAST` function is `Nullable`, the result is also transformed to `Nullable` type. When the setting is disabled, the result always has the destination type exactly.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — The final type of `CAST` exactly the destination data type specified.
|
||||
- 1 — The final type of `CAST` becomes `Nullable(DestinationDataType)`.
|
||||
- 0 — The `CAST` result has exactly the destination type specified.
|
||||
- 1 — If the argument type is `Nullable`, the `CAST` result is transformed to `Nullable(DestinationDataType)`.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
**Examples**
|
||||
|
||||
The following query exactly results in the destination data type:
|
||||
The following query results in the destination data type exactly:
|
||||
|
||||
```sql
|
||||
SET cast_keep_nullable = 0;
|
||||
@ -2077,7 +2077,7 @@ Result:
|
||||
|
||||
**See Also**
|
||||
|
||||
- [CAST](../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) operator
|
||||
- [CAST](../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) function
|
||||
|
||||
## output_format_pretty_max_value_width {#output_format_pretty_max_value_width}
|
||||
|
||||
|
@ -461,6 +461,66 @@ For other regular expressions, the code is the same as for the ‘match’ funct
|
||||
|
||||
The same thing as ‘like’, but negative.
|
||||
|
||||
## ilike {#ilike}
|
||||
|
||||
Case insensitive variant of [like](https://clickhouse.tech/docs/en/sql-reference/functions/string-search-functions/#function-like) function. You can use `ILIKE` operator instead of the `ilike` function.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
ilike(haystack, pattern)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `haystack` — Input string. [String](../../sql-reference/syntax.md#syntax-string-literal).
|
||||
- `pattern` — If `pattern` doesn't contain percent signs or underscores, then the `pattern` only represents the string itself. An underscore (`_`) in `pattern` stands for (matches) any single character. A percent sign (`%`) matches any sequence of zero or more characters.
|
||||
|
||||
Some `pattern` examples:
|
||||
|
||||
``` text
|
||||
'abc' ILIKE 'abc' true
|
||||
'abc' ILIKE 'a%' true
|
||||
'abc' ILIKE '_b_' true
|
||||
'abc' ILIKE 'c' false
|
||||
```
|
||||
|
||||
**Returned values**
|
||||
|
||||
- True, if the string matches `pattern`.
|
||||
- False, if the string doesn't match `pattern`.
|
||||
|
||||
**Example**
|
||||
|
||||
Input table:
|
||||
|
||||
``` text
|
||||
┌─id─┬─name─────┬─days─┐
|
||||
│ 1 │ January │ 31 │
|
||||
│ 2 │ February │ 29 │
|
||||
│ 3 │ March │ 31 │
|
||||
│ 4 │ April │ 30 │
|
||||
└────┴──────────┴──────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM Months WHERE ilike(name, '%j%')
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─id─┬─name────┬─days─┐
|
||||
│ 1 │ January │ 31 │
|
||||
└────┴─────────┴──────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [like](https://clickhouse.tech/docs/en/sql-reference/functions/string-search-functions/#function-like) <!--hide-->
|
||||
|
||||
## ngramDistance(haystack, needle) {#ngramdistancehaystack-needle}
|
||||
|
||||
Calculates the 4-gram distance between `haystack` and `needle`: counts the symmetric difference between two multisets of 4-grams and normalizes it by the sum of their cardinalities. Returns float number from 0 to 1 – the closer to zero, the more strings are similar to each other. If the constant `needle` or `haystack` is more than 32Kb, throws an exception. If some of the non-constant `haystack` or `needle` strings are more than 32Kb, the distance is always one.
|
||||
|
@ -5,40 +5,68 @@ toc_title: Working with maps
|
||||
|
||||
# Functions for maps {#functions-for-working-with-tuple-maps}
|
||||
|
||||
## mapAdd(Tuple(Array, Array), Tuple(Array, Array) [, ...]) {#function-mapadd}
|
||||
## mapAdd {#function-mapadd}
|
||||
|
||||
Collect all the keys and sum corresponding values.
|
||||
|
||||
Arguments are tuples of two arrays, where items in the first array represent keys, and the second array contains values for the each key.
|
||||
All key arrays should have same type, and all value arrays should contain items which are promotable to the one type (Int64, UInt64 or Float64).
|
||||
The common promoted type is used as a type for the result array.
|
||||
**Syntax**
|
||||
|
||||
Returns one tuple, where the first array contains the sorted keys and the second array contains values.
|
||||
``` sql
|
||||
mapAdd(Tuple(Array, Array), Tuple(Array, Array) [, ...])
|
||||
```
|
||||
|
||||
```sql
|
||||
**Parameters**
|
||||
|
||||
Arguments are [tuples](../../sql-reference/data-types/tuple.md#tuplet1-t2) of two [arrays](../../sql-reference/data-types/array.md#data-type-array), where items in the first array represent keys, and the second array contains values for the each key. All key arrays should have same type, and all value arrays should contain items which are promote to the one type ([Int64](../../sql-reference/data-types/int-uint.md#int-ranges), [UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges) or [Float64](../../sql-reference/data-types/float.md#float32-float64)). The common promoted type is used as a type for the result array.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns one [tuple](../../sql-reference/data-types/tuple.md#tuplet1-t2), where the first array contains the sorted keys and the second array contains values.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT mapAdd(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])) as res, toTypeName(res) as type;
|
||||
```
|
||||
|
||||
```text
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─res───────────┬─type───────────────────────────────┐
|
||||
│ ([1,2],[2,2]) │ Tuple(Array(UInt8), Array(UInt64)) │
|
||||
└───────────────┴────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## mapSubtract(Tuple(Array, Array), Tuple(Array, Array) [, ...]) {#function-mapsubtract}
|
||||
## mapSubtract {#function-mapsubtract}
|
||||
|
||||
Collect all the keys and subtract corresponding values.
|
||||
|
||||
Arguments are tuples of two arrays, where items in the first array represent keys, and the second array contains values for the each key.
|
||||
All key arrays should have same type, and all value arrays should contain items which are promotable to the one type (Int64, UInt64 or Float64).
|
||||
The common promoted type is used as a type for the result array.
|
||||
**Syntax**
|
||||
|
||||
Returns one tuple, where the first array contains the sorted keys and the second array contains values.
|
||||
``` sql
|
||||
mapSubtract(Tuple(Array, Array), Tuple(Array, Array) [, ...])
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
Arguments are [tuples](../../sql-reference/data-types/tuple.md#tuplet1-t2) of two [arrays](../../sql-reference/data-types/array.md#data-type-array), where items in the first array represent keys, and the second array contains values for the each key. All key arrays should have same type, and all value arrays should contain items which are promote to the one type ([Int64](../../sql-reference/data-types/int-uint.md#int-ranges), [UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges) or [Float64](../../sql-reference/data-types/float.md#float32-float64)). The common promoted type is used as a type for the result array.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns one [tuple](../../sql-reference/data-types/tuple.md#tuplet1-t2), where the first array contains the sorted keys and the second array contains values.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT mapSubtract(([toUInt8(1), 2], [toInt32(1), 1]), ([toUInt8(1), 2], [toInt32(2), 1])) as res, toTypeName(res) as type;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─res────────────┬─type──────────────────────────────┐
|
||||
│ ([1,2],[-1,0]) │ Tuple(Array(UInt8), Array(Int64)) │
|
||||
@ -47,21 +75,41 @@ SELECT mapSubtract(([toUInt8(1), 2], [toInt32(1), 1]), ([toUInt8(1), 2], [toInt3
|
||||
|
||||
## mapPopulateSeries {#function-mappopulateseries}
|
||||
|
||||
Syntax: `mapPopulateSeries((keys : Array(<IntegerType>), values : Array(<IntegerType>)[, max : <IntegerType>])`
|
||||
Fills missing keys in the maps (key and value array pair), where keys are integers. Also, it supports specifying the max key, which is used to extend the keys array.
|
||||
|
||||
Generates a map, where keys are a series of numbers, from minimum to maximum keys (or `max` argument if it specified) taken from `keys` array with step size of one, and corresponding values taken from `values` array. If the value is not specified for the key, then it uses default value in the resulting map.
|
||||
For repeated keys only the first value (in order of appearing) gets associated with the key.
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
mapPopulateSeries(keys, values[, max])
|
||||
```
|
||||
|
||||
Generates a map, where keys are a series of numbers, from minimum to maximum keys (or `max` argument if it specified) taken from `keys` array with a step size of one, and corresponding values taken from `values` array. If the value is not specified for the key, then it uses the default value in the resulting map. For repeated keys, only the first value (in order of appearing) gets associated with the key.
|
||||
|
||||
The number of elements in `keys` and `values` must be the same for each row.
|
||||
|
||||
Returns a tuple of two arrays: keys in sorted order, and values the corresponding keys.
|
||||
**Parameters**
|
||||
|
||||
- `keys` — Array of keys. [Array](../../sql-reference/data-types/array.md#data-type-array)([Int](../../sql-reference/data-types/int-uint.md#uint-ranges)).
|
||||
- `values` — Array of values. [Array](../../sql-reference/data-types/array.md#data-type-array)([Int](../../sql-reference/data-types/int-uint.md#uint-ranges)).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns a [tuple](../../sql-reference/data-types/tuple.md#tuplet1-t2) of two [arrays](../../sql-reference/data-types/array.md#data-type-array): keys in sorted order, and values the corresponding keys.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
select mapPopulateSeries([1,2,4], [11,22,44], 5) as res, toTypeName(res) as type;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─res──────────────────────────┬─type──────────────────────────────┐
|
||||
│ ([1,2,3,4,5],[11,22,0,44,0]) │ Tuple(Array(UInt8), Array(UInt8)) │
|
||||
└──────────────────────────────┴───────────────────────────────────┘
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/query_language/functions/tuple-map-functions/) <!--hide-->
|
||||
|
@ -370,6 +370,10 @@ SELECT toTypeName(CAST(x, 'Nullable(UInt16)')) FROM t_null
|
||||
└─────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**See also**
|
||||
|
||||
- [cast_keep_nullable](../../operations/settings/settings.md#cast_keep_nullable) setting
|
||||
|
||||
## toInterval(Year\|Quarter\|Month\|Week\|Day\|Hour\|Minute\|Second) {#function-tointerval}
|
||||
|
||||
Converts a Number type argument to an [Interval](../../sql-reference/data-types/special-data-types/interval.md) data type.
|
||||
|
@ -53,6 +53,8 @@ ClickHouse transforms operators to their corresponding functions at the query pa
|
||||
|
||||
`a NOT LIKE s` – The `notLike(a, b)` function.
|
||||
|
||||
`a ILIKE s` – The `ilike(a, b)` function.
|
||||
|
||||
`a BETWEEN b AND c` – The same as `a >= b AND a <= c`.
|
||||
|
||||
`a NOT BETWEEN b AND c` – The same as `a < b OR a > c`.
|
||||
|
@ -139,7 +139,7 @@ ENGINE = <Engine>
|
||||
```
|
||||
|
||||
The `Default` codec can be specified to reference default compression which may dependend on different settings (and properties of data) in runtime.
|
||||
Example: `value UInt64 CODEC(Default)` - the same as lack of codec specification.
|
||||
Example: `value UInt64 CODEC(Default)` — the same as lack of codec specification.
|
||||
|
||||
Also you can remove current CODEC from the column and use default compression from config.xml:
|
||||
|
||||
|
67
docs/en/sql-reference/table-functions/view.md
Normal file
67
docs/en/sql-reference/table-functions/view.md
Normal file
@ -0,0 +1,67 @@
|
||||
---
|
||||
toc_priority: 51
|
||||
toc_title: view
|
||||
---
|
||||
|
||||
## view {#view}
|
||||
|
||||
Turns a subquery into a table. The function implements views (see [CREATE VIEW](https://clickhouse.tech/docs/en/sql-reference/statements/create/view/#create-view)). The resulting table doesn't store data, but only stores the specified `SELECT` query. When reading from the table, ClickHouse executes the query and deletes all unnecessary columns from the result.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
view(subquery)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `subquery` — `SELECT` query.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A table.
|
||||
|
||||
**Example**
|
||||
|
||||
Input table:
|
||||
|
||||
``` text
|
||||
┌─id─┬─name─────┬─days─┐
|
||||
│ 1 │ January │ 31 │
|
||||
│ 2 │ February │ 29 │
|
||||
│ 3 │ March │ 31 │
|
||||
│ 4 │ April │ 30 │
|
||||
└────┴──────────┴──────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM view(SELECT name FROM months)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─name─────┐
|
||||
│ January │
|
||||
│ February │
|
||||
│ March │
|
||||
│ April │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
You can use the `view` function as a parameter of the [remote](https://clickhouse.tech/docs/en/sql-reference/table-functions/remote/#remote-remotesecure) and [cluster](https://clickhouse.tech/docs/en/sql-reference/table-functions/cluster/#cluster-clusterallreplicas) table functions:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM remote(`127.0.0.1`, view(SELECT a, b, c FROM table_name))
|
||||
```
|
||||
|
||||
``` sql
|
||||
SELECT * FROM cluster(`cluster_name`, view(SELECT a, b, c FROM table_name))
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [View Table Engine](https://clickhouse.tech/docs/en/engines/table-engines/special/view/)
|
||||
[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/view/) <!--hide-->
|
@ -113,7 +113,9 @@ CREATE TABLE table_name
|
||||
|
||||
</details>
|
||||
|
||||
Как видно в примере, эти параметры могут содержать подстановки в фигурных скобках. Подставляемые значения достаются из конфигурационного файла, из секции `macros`. Пример:
|
||||
Как видно в примере, эти параметры могут содержать подстановки в фигурных скобках. Подставляемые значения достаются из конфигурационного файла, из секции «[macros](../../../operations/server-configuration-parameters/settings/#macros)».
|
||||
|
||||
Пример:
|
||||
|
||||
``` xml
|
||||
<macros>
|
||||
@ -133,6 +135,9 @@ CREATE TABLE table_name
|
||||
`table_name` - имя узла для таблицы в ZooKeeper. Разумно делать его таким же, как имя таблицы. Оно указывается явно, так как, в отличие от имени таблицы, оно не меняется после запроса RENAME.
|
||||
*Подсказка*: можно также указать имя базы данных перед `table_name`, например `db_name.table_name`
|
||||
|
||||
Можно использовать две встроенных подстановки `{database}` и `{table}`, они раскрываются в имя таблицы и в имя базы данных соответственно (если эти подстановки не переопределены в секции `macros`). Т.о. Zookeeper путь можно задать как `'/clickhouse/tables/{layer}-{shard}/{database}/{table}'`.
|
||||
Будьте осторожны с переименованиями таблицы при использовании этих автоматических подстановок. Путь в Zookeeper-е нельзя изменить, а подстановка при переименовании таблицы раскроется в другой путь, таблица будет обращаться к несуществующему в Zookeeper-е пути и перейдет в режим только для чтения.
|
||||
|
||||
Имя реплики — то, что идентифицирует разные реплики одной и той же таблицы. Можно использовать для него имя сервера, как показано в примере. Впрочем, достаточно, чтобы имя было уникально лишь в пределах каждого шарда.
|
||||
|
||||
Можно не использовать подстановки, а указать соответствующие параметры явно. Это может быть удобным для тестирования и при настройке маленьких кластеров. Однако в этом случае нельзя пользоваться распределенными DDL-запросами (`ON CLUSTER`).
|
||||
|
@ -27,6 +27,7 @@ ClickHouse может принимать (`INSERT`) и отдавать (`SELECT
|
||||
| [PrettyNoEscapes](#prettynoescapes) | ✗ | ✔ |
|
||||
| [PrettySpace](#prettyspace) | ✗ | ✔ |
|
||||
| [Protobuf](#protobuf) | ✔ | ✔ |
|
||||
| [ProtobufSingle](#protobufsingle) | ✔ | ✔ |
|
||||
| [Parquet](#data-format-parquet) | ✔ | ✔ |
|
||||
| [Arrow](#data-format-arrow) | ✔ | ✔ |
|
||||
| [ArrowStream](#data-format-arrow-stream) | ✔ | ✔ |
|
||||
@ -948,6 +949,10 @@ message MessageType {
|
||||
ClickHouse пишет и читает сообщения `Protocol Buffers` в формате `length-delimited`. Это означает, что перед каждым сообщением пишется его длина
|
||||
в формате [varint](https://developers.google.com/protocol-buffers/docs/encoding#varints). См. также [как читать и записывать сообщения Protocol Buffers в формате length-delimited в различных языках программирования](https://cwiki.apache.org/confluence/display/GEODE/Delimiting+Protobuf+Messages).
|
||||
|
||||
## ProtobufSingle {#protobufsingle}
|
||||
|
||||
То же, что [Protobuf](#protobuf), но без разделителей. Позволяет записать / прочитать не более одного сообщения за раз.
|
||||
|
||||
## Avro {#data-format-avro}
|
||||
|
||||
[Apache Avro](https://avro.apache.org/) — это ориентированный на строки фреймворк для сериализации данных. Разработан в рамках проекта Apache Hadoop.
|
||||
@ -958,7 +963,7 @@ ClickHouse пишет и читает сообщения `Protocol Buffers` в
|
||||
|
||||
## AvroConfluent {#data-format-avro-confluent}
|
||||
|
||||
Для формата `AvroConfluent` ClickHouse поддерживает декодирование сообщений `Avro` с одним объектом. Такие сообщения используются с [Kafka] (http://kafka.apache.org/) и реестром схем [Confluent](https://docs.confluent.io/current/schema-registry/index.html).
|
||||
Для формата `AvroConfluent` ClickHouse поддерживает декодирование сообщений `Avro` с одним объектом. Такие сообщения используются с [Kafka] (http://kafka.apache.org/) и реестром схем [Confluent](https://docs.confluent.io/current/schema-registry/index.html).
|
||||
|
||||
Каждое сообщение `Avro` содержит идентификатор схемы, который может быть разрешен для фактической схемы с помощью реестра схем.
|
||||
|
||||
@ -972,7 +977,7 @@ URL-адрес реестра схем настраивается с помощ
|
||||
|
||||
### Использование {#ispolzovanie}
|
||||
|
||||
Чтобы быстро проверить разрешение схемы, используйте [kafkacat](https://github.com/edenhill/kafkacat) с языком запросов [clickhouse-local](../operations/utilities/clickhouse-local.md):
|
||||
Чтобы быстро проверить разрешение схемы, используйте [kafkacat](https://github.com/edenhill/kafkacat) с языком запросов [clickhouse-local](../operations/utilities/clickhouse-local.md):
|
||||
|
||||
``` bash
|
||||
$ kafkacat -b kafka-broker -C -t topic1 -o beginning -f '%s' -c 3 | clickhouse-local --input-format AvroConfluent --format_avro_schema_registry_url 'http://schema-registry' -S "field1 Int64, field2 String" -q 'select * from table'
|
||||
|
@ -61,7 +61,21 @@ ClickHouse проверяет условия для `min_part_size` и `min_part
|
||||
</compression>
|
||||
```
|
||||
|
||||
## default_database {#default-database}
|
||||
## custom_settings_prefixes {#custom_settings_prefixes}
|
||||
|
||||
Список префиксов для [пользовательских настроек](../../operations/settings/index.md#custom_settings). Префиксы должны перечисляться через запятую.
|
||||
|
||||
**Пример**
|
||||
|
||||
```xml
|
||||
<custom_settings_prefixes>custom_</custom_settings_prefixes>
|
||||
```
|
||||
|
||||
**См. также**
|
||||
|
||||
- [Пользовательские настройки](../../operations/settings/index.md#custom_settings)
|
||||
|
||||
## default\_database {#default-database}
|
||||
|
||||
База данных по умолчанию.
|
||||
|
||||
@ -373,7 +387,7 @@ ClickHouse проверяет условия для `min_part_size` и `min_part
|
||||
|
||||
Можно не указывать, если реплицируемых таблицы не используются.
|
||||
|
||||
Подробнее смотрите в разделе «[Создание реплицируемых таблиц](../../operations/server-configuration-parameters/settings.md)».
|
||||
Подробнее смотрите в разделе «[Создание реплицируемых таблиц](../../engines/table-engines/mergetree-family/replication.md)».
|
||||
|
||||
**Пример**
|
||||
|
||||
|
@ -27,4 +27,30 @@ toc_title: "\u041d\u0430\u0441\u0442\u0440\u043e\u0439\u043a\u0438"
|
||||
|
||||
Настройки, которые можно задать только в конфигурационном файле сервера, в разделе не рассматриваются.
|
||||
|
||||
## Пользовательские настройки {#custom_settings}
|
||||
|
||||
В дополнение к общим [настройкам](../../operations/settings/settings.md), пользователи могут определять собственные настройки.
|
||||
|
||||
Название пользовательской настройки должно начинаться с одного из предопределённых префиксов. Список этих префиксов должен быть задан в параметре [custom_settings_prefixes](../../operations/server-configuration-parameters/settings.md#custom_settings_prefixes) конфигурационнного файла сервера.
|
||||
|
||||
```xml
|
||||
<custom_settings_prefixes>custom_</custom_settings_prefixes>
|
||||
```
|
||||
|
||||
Чтобы задать значение пользовательской настройке, используйте команду `SET`:
|
||||
|
||||
```sql
|
||||
SET custom_a = 123;
|
||||
```
|
||||
|
||||
Чтобы получить текущее значение пользовательской настройки, используйте функцию `getSetting()`:
|
||||
|
||||
```sql
|
||||
SELECT getSetting('custom_a');
|
||||
```
|
||||
|
||||
**См. также**
|
||||
|
||||
- [Конфигурационные параметры сервера](../../operations/server-configuration-parameters/settings.md)
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/) <!--hide-->
|
||||
|
@ -1164,9 +1164,9 @@ ClickHouse генерирует исключение
|
||||
|
||||
## insert_quorum_timeout {#settings-insert_quorum_timeout}
|
||||
|
||||
Время ожидания кворумной записи в секундах. Если время прошло, а запись так не состоялась, то ClickHouse сгенерирует исключение и клиент должен повторить запрос на запись того же блока на эту же или любую другую реплику.
|
||||
Время ожидания кворумной записи в миллисекундах. Если время прошло, а запись так не состоялась, то ClickHouse сгенерирует исключение и клиент должен повторить запрос на запись того же блока на эту же или любую другую реплику.
|
||||
|
||||
Значение по умолчанию: 60 секунд.
|
||||
Значение по умолчанию: 600000 миллисекунд (10 минут).
|
||||
|
||||
См. также:
|
||||
|
||||
@ -1994,12 +1994,13 @@ SELECT range(number) FROM system.numbers LIMIT 5 FORMAT PrettyCompactNoEscapes;
|
||||
|
||||
Включает или отключает сохранение типа `Nullable` для аргумента функции [CAST](../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast).
|
||||
|
||||
Если настройка включена, то функция `CAST(something_nullable AS Type)` возвращает `Nullable(Type)`.
|
||||
Если настройка включена, то когда в функцию `CAST` передается аргумент с типом `Nullable`, функция возвращает результат, также преобразованный к типу `Nullable`.
|
||||
Если настройка отключена, то функция `CAST` всегда возвращает результат строго указанного типа.
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- 0 — функция `CAST` преобразует аргумент строго к указанному типу.
|
||||
- 1 — функция `CAST` преобразует аргумент к типу `Nullable` для указанного типа.
|
||||
- 1 — если аргумент имеет тип `Nullable`, то функция `CAST` преобразует его к типу `Nullable` для указанного типа.
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
|
||||
|
@ -11,3 +11,80 @@
|
||||
### Stochastic Logistic Regression {#stochastic-logistic-regression}
|
||||
|
||||
Агрегатная функция [stochasticLogisticRegression](../../sql-reference/functions/machine-learning-functions.md#agg_functions-stochasticlogisticregression) реализует стохастический градиентный спуск для задачи бинарной классификации.
|
||||
|
||||
## bayesAB {#bayesab}
|
||||
|
||||
Сравнивает тестовые группы (варианты) и для каждой группы рассчитывает вероятность того, что эта группа окажется лучшей. Первая из перечисленных групп считается контрольной.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
bayesAB(distribution_name, higher_is_better, variant_names, x, y)
|
||||
```
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `distribution_name` — вероятностное распределение. [String](../../sql-reference/data-types/string.md). Возможные значения:
|
||||
|
||||
- `beta` для [Бета-распределения](https://ru.wikipedia.org/wiki/Бета-распределение)
|
||||
- `gamma` для [Гамма-распределения](https://ru.wikipedia.org/wiki/Гамма-распределение)
|
||||
|
||||
- `higher_is_better` — способ определения предпочтений. [Boolean](../../sql-reference/data-types/boolean.md). Возможные значения:
|
||||
|
||||
- `0` - чем меньше значение, тем лучше
|
||||
- `1` - чем больше значение, тем лучше
|
||||
|
||||
- `variant_names` - массив, содержащий названия вариантов. [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)).
|
||||
|
||||
- `x` - массив, содержащий число проведенных тестов (испытаний) для каждого варианта. [Array](../../sql-reference/data-types/array.md)([Float64](../../sql-reference/data-types/float.md)).
|
||||
|
||||
- `y` - массив, содержащий число успешных тестов (испытаний) для каждого варианта. [Array](../../sql-reference/data-types/array.md)([Float64](../../sql-reference/data-types/float.md)).
|
||||
|
||||
!!! note "Замечание"
|
||||
Все три массива должны иметь одинаковый размер. Все значения `x` и `y` должны быть неотрицательными числами (константами). Значение `y` не может превышать соответствующее значение `x`.
|
||||
|
||||
**Возвращаемые значения**
|
||||
|
||||
Для каждого варианта рассчитываются:
|
||||
- `beats_control` - вероятность, что данный вариант превосходит контрольный в долгосрочной перспективе
|
||||
- `to_be_best` - вероятность, что данный вариант является лучшим в долгосрочной перспективе
|
||||
|
||||
Тип: JSON.
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT bayesAB('beta', 1, ['Control', 'A', 'B'], [3000., 3000., 3000.], [100., 90., 110.]) FORMAT PrettySpace;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
{
|
||||
"data":[
|
||||
{
|
||||
"variant_name":"Control",
|
||||
"x":3000,
|
||||
"y":100,
|
||||
"beats_control":0,
|
||||
"to_be_best":0.22619
|
||||
},
|
||||
{
|
||||
"variant_name":"A",
|
||||
"x":3000,
|
||||
"y":90,
|
||||
"beats_control":0.23469,
|
||||
"to_be_best":0.04671
|
||||
},
|
||||
{
|
||||
"variant_name":"B",
|
||||
"x":3000,
|
||||
"y":110,
|
||||
"beats_control":0.7580899999999999,
|
||||
"to_be_best":0.7271
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
@ -1431,6 +1431,41 @@ SELECT randomStringUTF8(13)
|
||||
|
||||
```
|
||||
|
||||
## getSetting {#getSetting}
|
||||
|
||||
Возвращает текущее значение [пользовательской настройки](../../operations/settings/index.md#custom_settings).
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
```sql
|
||||
getSetting('custom_setting');
|
||||
```
|
||||
|
||||
**Параметр**
|
||||
|
||||
- `custom_setting` — название настройки. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Текущее значение пользовательской настройки.
|
||||
|
||||
**Пример**
|
||||
|
||||
```sql
|
||||
SET custom_a = 123;
|
||||
SELECT getSetting('custom_a');
|
||||
```
|
||||
|
||||
**Результат**
|
||||
|
||||
```
|
||||
123
|
||||
```
|
||||
|
||||
**См. также**
|
||||
|
||||
- [Пользовательские настройки](../../operations/settings/index.md#custom_settings)
|
||||
|
||||
## isDecimalOverflow {#is-decimal-overflow}
|
||||
|
||||
Проверяет, находится ли число [Decimal](../../sql-reference/data-types/decimal.md#decimalp-s-decimal32s-decimal64s-decimal128s) вне собственной (или заданной) области значений.
|
||||
|
@ -442,6 +442,66 @@ SELECT extractAllGroupsVertical('abc=111, def=222, ghi=333', '("[^"]+"|\\w+)=("[
|
||||
|
||||
То же, что like, но с отрицанием.
|
||||
|
||||
## ilike {#ilike}
|
||||
|
||||
Нечувствительный к регистру вариант функции [like](https://clickhouse.tech/docs/ru/sql-reference/functions/string-search-functions/#function-like). Вы можете использовать оператор `ILIKE` вместо функции `ilike`.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
ilike(haystack, pattern)
|
||||
```
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `haystack` — Входная строка. [String](../../sql-reference/syntax.md#syntax-string-literal).
|
||||
- `pattern` — Если `pattern` не содержит процента или нижнего подчеркивания, тогда `pattern` представляет саму строку. Нижнее подчеркивание (`_`) в `pattern` обозначает любой отдельный символ. Знак процента (`%`) соответствует последовательности из любого количества символов: от нуля и более.
|
||||
|
||||
Некоторые примеры `pattern`:
|
||||
|
||||
``` text
|
||||
'abc' ILIKE 'abc' true
|
||||
'abc' ILIKE 'a%' true
|
||||
'abc' ILIKE '_b_' true
|
||||
'abc' ILIKE 'c' false
|
||||
```
|
||||
|
||||
**Возвращаемые значения**
|
||||
|
||||
- Правда, если строка соответствует `pattern`.
|
||||
- Ложь, если строка не соответствует `pattern`.
|
||||
|
||||
**Пример**
|
||||
|
||||
Входная таблица:
|
||||
|
||||
``` text
|
||||
┌─id─┬─name─────┬─days─┐
|
||||
│ 1 │ January │ 31 │
|
||||
│ 2 │ February │ 29 │
|
||||
│ 3 │ March │ 31 │
|
||||
│ 4 │ April │ 30 │
|
||||
└────┴──────────┴──────┘
|
||||
```
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM Months WHERE ilike(name, '%j%')
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─id─┬─name────┬─days─┐
|
||||
│ 1 │ January │ 31 │
|
||||
└────┴─────────┴──────┘
|
||||
```
|
||||
|
||||
**Смотрите также**
|
||||
|
||||
- [like](https://clickhouse.tech/docs/ru/sql-reference/functions/string-search-functions/#function-like) <!--hide-->
|
||||
|
||||
## ngramDistance(haystack, needle) {#ngramdistancehaystack-needle}
|
||||
|
||||
Вычисление 4-граммного расстояния между `haystack` и `needle`: считается симметрическая разность между двумя мультимножествами 4-грамм и нормализуется на сумму их мощностей. Возвращает число float от 0 до 1 – чем ближе к нулю, тем больше строки похожи друг на друга. Если константный `needle` или `haystack` больше чем 32КБ, кидается исключение. Если некоторые строки из неконстантного `haystack` или `needle` больше 32КБ, расстояние всегда равно единице.
|
||||
|
119
docs/ru/sql-reference/functions/tuple-map-functions.md
Normal file
119
docs/ru/sql-reference/functions/tuple-map-functions.md
Normal file
@ -0,0 +1,119 @@
|
||||
---
|
||||
toc_priority: 46
|
||||
toc_title: Работа с контейнерами map
|
||||
---
|
||||
|
||||
# Функции для работы с контейнерами map {#functions-for-working-with-tuple-maps}
|
||||
|
||||
## mapAdd {#function-mapadd}
|
||||
|
||||
Собирает все ключи и суммирует соответствующие значения.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
mapAdd(Tuple(Array, Array), Tuple(Array, Array) [, ...])
|
||||
```
|
||||
|
||||
**Параметры**
|
||||
|
||||
Аргументами являются [кортежи](../../sql-reference/data-types/tuple.md#tuplet1-t2) из двух [массивов](../../sql-reference/data-types/array.md#data-type-array), где элементы в первом массиве представляют ключи, а второй массив содержит значения для каждого ключа.
|
||||
Все массивы ключей должны иметь один и тот же тип, а все массивы значений должны содержать элементы, которые можно приводить к одному типу ([Int64](../../sql-reference/data-types/int-uint.md#int-ranges), [UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges) или [Float64](../../sql-reference/data-types/float.md#float32-float64)).
|
||||
Общий приведенный тип используется в качестве типа для результирующего массива.
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Возвращает один [кортеж](../../sql-reference/data-types/tuple.md#tuplet1-t2), в котором первый массив содержит отсортированные ключи, а второй - значения.
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT mapAdd(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])) as res, toTypeName(res) as type;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─res───────────┬─type───────────────────────────────┐
|
||||
│ ([1,2],[2,2]) │ Tuple(Array(UInt8), Array(UInt64)) │
|
||||
└───────────────┴────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## mapSubtract {#function-mapsubtract}
|
||||
|
||||
Собирает все ключи и вычитает соответствующие значения.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
mapSubtract(Tuple(Array, Array), Tuple(Array, Array) [, ...])
|
||||
```
|
||||
|
||||
**Параметры**
|
||||
|
||||
Аргументами являются [кортежи](../../sql-reference/data-types/tuple.md#tuplet1-t2) из двух [массивов](../../sql-reference/data-types/array.md#data-type-array), где элементы в первом массиве представляют ключи, а второй массив содержит значения для каждого ключа.
|
||||
Все массивы ключей должны иметь один и тот же тип, а все массивы значений должны содержать элементы, которые можно приводить к одному типу ([Int64](../../sql-reference/data-types/int-uint.md#int-ranges), [UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges) или [Float64](../../sql-reference/data-types/float.md#float32-float64)).
|
||||
Общий приведенный тип используется в качестве типа для результирующего массива.
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Возвращает один [tuple](../../sql-reference/data-types/tuple.md#tuplet1-t2), в котором первый массив содержит отсортированные ключи, а второй - значения.
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
```sql
|
||||
SELECT mapSubtract(([toUInt8(1), 2], [toInt32(1), 1]), ([toUInt8(1), 2], [toInt32(2), 1])) as res, toTypeName(res) as type;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
```text
|
||||
┌─res────────────┬─type──────────────────────────────┐
|
||||
│ ([1,2],[-1,0]) │ Tuple(Array(UInt8), Array(Int64)) │
|
||||
└────────────────┴───────────────────────────────────┘
|
||||
```
|
||||
|
||||
## mapPopulateSeries {#function-mappopulateseries}
|
||||
|
||||
Заполняет недостающие ключи в контейнере map (пара массивов ключей и значений), где ключи являются целыми числами. Кроме того, он поддерживает указание максимального ключа, который используется для расширения массива ключей.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
mapPopulateSeries(keys, values[, max])
|
||||
```
|
||||
|
||||
Генерирует контейнер map, где ключи - это серия чисел, от минимального до максимального ключа (или аргумент `max`, если он указан), взятых из массива `keys` с размером шага один, и соответствующие значения, взятые из массива `values`. Если значение не указано для ключа, то в результирующем контейнере используется значение по умолчанию.
|
||||
|
||||
Количество элементов в `keys` и `values` должно быть одинаковым для каждой строки.
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `keys` — Массив ключей [Array](../../sql-reference/data-types/array.md#data-type-array)([Int](../../sql-reference/data-types/int-uint.md#int-ranges)).
|
||||
- `values` — Массив значений. [Array](../../sql-reference/data-types/array.md#data-type-array)([Int](../../sql-reference/data-types/int-uint.md#int-ranges)).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Возвращает [кортеж](../../sql-reference/data-types/tuple.md#tuplet1-t2) из двух [массивов](../../sql-reference/data-types/array.md#data-type-array): ключи отсортированные по порядку и значения соответствующих ключей.
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
```sql
|
||||
select mapPopulateSeries([1,2,4], [11,22,44], 5) as res, toTypeName(res) as type;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
```text
|
||||
┌─res──────────────────────────┬─type──────────────────────────────┐
|
||||
│ ([1,2,3,4,5],[11,22,0,44,0]) │ Tuple(Array(UInt8), Array(UInt8)) │
|
||||
└──────────────────────────────┴───────────────────────────────────┘
|
||||
```
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/en/query_language/functions/tuple-map-functions/) <!--hide-->
|
@ -362,6 +362,10 @@ SELECT toTypeName(CAST(x, 'Nullable(UInt16)')) FROM t_null
|
||||
└─────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**См. также**
|
||||
|
||||
- Настройка [cast_keep_nullable](../../operations/settings/settings.md#cast_keep_nullable)
|
||||
|
||||
## toInterval(Year\|Quarter\|Month\|Week\|Day\|Hour\|Minute\|Second) {#function-tointerval}
|
||||
|
||||
Приводит аргумент из числового типа данных к типу данных [IntervalType](../../sql-reference/data-types/special-data-types/interval.md).
|
||||
|
@ -49,6 +49,8 @@
|
||||
|
||||
`a NOT LIKE s` - функция `notLike(a, b)`
|
||||
|
||||
`a ILIKE s` – функция `ilike(a, b)`
|
||||
|
||||
`a BETWEEN b AND c` - равнозначно `a >= b AND a <= c`
|
||||
|
||||
`a NOT BETWEEN b AND c` - равнозначно `a < b OR a > c`
|
||||
|
@ -119,7 +119,18 @@ ENGINE = <Engine>
|
||||
...
|
||||
```
|
||||
|
||||
Если задать кодек для столбца, то кодек по умолчанию не применяется. Кодеки можно последовательно комбинировать, например, `CODEC(Delta, ZSTD)`. Чтобы выбрать наиболее подходящую для вашего проекта комбинацию кодеков, необходимо провести сравнительные тесты, подобные тем, что описаны в статье Altinity [New Encodings to Improve ClickHouse Efficiency](https://www.altinity.com/blog/2019/7/new-encodings-to-improve-clickhouse).
|
||||
Если кодек `Default` задан для столбца, используется сжатие по умолчанию, которое может зависеть от различных настроек (и свойств данных) во время выполнения.
|
||||
Пример: `value UInt64 CODEC(Default)` — то же самое, что не указать кодек.
|
||||
|
||||
Также можно подменить кодек столбца сжатием по умолчанию, определенным в config.xml:
|
||||
|
||||
``` sql
|
||||
ALTER TABLE codec_example MODIFY COLUMN float_value CODEC(Default);
|
||||
```
|
||||
|
||||
Кодеки можно последовательно комбинировать, например, `CODEC(Delta, Default)`.
|
||||
|
||||
Чтобы выбрать наиболее подходящую для вашего проекта комбинацию кодеков, необходимо провести сравнительные тесты, подобные тем, что описаны в статье Altinity [New Encodings to Improve ClickHouse Efficiency](https://www.altinity.com/blog/2019/7/new-encodings-to-improve-clickhouse). Для столбцов типа `ALIAS` кодеки не применяются.
|
||||
|
||||
!!! warning "Предупреждение"
|
||||
Нельзя распаковать базу данных ClickHouse с помощью сторонних утилит наподобие `lz4`. Необходимо использовать специальную утилиту [clickhouse-compressor](https://github.com/ClickHouse/ClickHouse/tree/master/programs/compressor).
|
||||
@ -195,4 +206,4 @@ CREATE TEMPORARY TABLE [IF NOT EXISTS] table_name
|
||||
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/create/table)
|
||||
<!--hide-->
|
||||
<!--hide-->
|
||||
|
62
docs/ru/sql-reference/table-functions/view.md
Normal file
62
docs/ru/sql-reference/table-functions/view.md
Normal file
@ -0,0 +1,62 @@
|
||||
## view {#view}
|
||||
|
||||
Преобразовывает подзапрос в таблицу. Функция реализовывает представления (смотрите [CREATE VIEW](https://clickhouse.tech/docs/ru/sql-reference/statements/create/view/#create-view)). Результирующая таблица не хранит данные, а только сохраняет указанный запрос `SELECT`. При чтении из таблицы, ClickHouse выполняет запрос и удаляет все ненужные столбцы из результата.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
view(subquery)
|
||||
```
|
||||
|
||||
**Входные параметры**
|
||||
|
||||
- `subquery` — запрос `SELECT`.
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Таблица.
|
||||
|
||||
**Пример**
|
||||
|
||||
Входная таблица:
|
||||
|
||||
``` text
|
||||
┌─id─┬─name─────┬─days─┐
|
||||
│ 1 │ January │ 31 │
|
||||
│ 2 │ February │ 29 │
|
||||
│ 3 │ March │ 31 │
|
||||
│ 4 │ April │ 30 │
|
||||
└────┴──────────┴──────┘
|
||||
```
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM view(SELECT name FROM months)
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─name─────┐
|
||||
│ January │
|
||||
│ February │
|
||||
│ March │
|
||||
│ April │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
Вы можете использовать функцию `view` как параметр табличных функций [remote](https://clickhouse.tech/docs/ru/sql-reference/table-functions/remote/#remote-remotesecure) и [cluster](https://clickhouse.tech/docs/ru/sql-reference/table-functions/cluster/#cluster-clusterallreplicas):
|
||||
|
||||
``` sql
|
||||
SELECT * FROM remote(`127.0.0.1`, view(SELECT a, b, c FROM table_name))
|
||||
```
|
||||
|
||||
``` sql
|
||||
SELECT * FROM cluster(`cluster_name`, view(SELECT a, b, c FROM table_name))
|
||||
```
|
||||
|
||||
**Смотрите также**
|
||||
|
||||
- [view](https://clickhouse.tech/docs/ru/engines/table-engines/special/view/#table_engines-view)
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/table_functions/view/) <!--hide-->
|
@ -1202,8 +1202,15 @@ private:
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
// Some functions (e.g. protocol parsers) don't throw, but
|
||||
// set last_exception instead, so we'll also do it here for
|
||||
// uniformity.
|
||||
last_exception_received_from_server = std::make_unique<Exception>(getCurrentExceptionMessage(true), getCurrentExceptionCode());
|
||||
received_exception_from_server = true;
|
||||
}
|
||||
|
||||
if (received_exception_from_server)
|
||||
{
|
||||
fmt::print(stderr, "Error on processing query '{}': {}\n",
|
||||
ast_to_process->formatForErrorMessage(),
|
||||
last_exception_received_from_server->message());
|
||||
@ -1213,29 +1220,30 @@ private:
|
||||
{
|
||||
// Probably the server is dead because we found an assertion
|
||||
// failure. Fail fast.
|
||||
fmt::print(stderr, "Lost connection to the server\n");
|
||||
return begin;
|
||||
}
|
||||
|
||||
// The server is still alive so we're going to continue fuzzing.
|
||||
// Determine what we're going to use as the starting AST.
|
||||
if (received_exception_from_server)
|
||||
{
|
||||
// Query completed with error, ignore it and fuzz again.
|
||||
fprintf(stderr, "Got error, will fuzz again\n");
|
||||
|
||||
// Query completed with error, keep the previous starting AST.
|
||||
// Also discard the exception that we now know to be non-fatal,
|
||||
// so that it doesn't influence the exit code.
|
||||
last_exception_received_from_server.reset(nullptr);
|
||||
received_exception_from_server = false;
|
||||
last_exception_received_from_server.reset();
|
||||
|
||||
continue;
|
||||
}
|
||||
else if (ast_to_process->formatForErrorMessage().size() > 500)
|
||||
{
|
||||
// ast too long, start from original ast
|
||||
fprintf(stderr, "current ast too long, won't elaborate\n");
|
||||
fprintf(stderr, "Current AST is too long, discarding it and using the original AST as a start\n");
|
||||
fuzz_base = orig_ast;
|
||||
}
|
||||
else
|
||||
{
|
||||
// fuzz starting from this successful query
|
||||
fprintf(stderr, "using this ast as etalon\n");
|
||||
fprintf(stderr, "Query succeeded, using this AST as a start\n");
|
||||
fuzz_base = ast_to_process;
|
||||
}
|
||||
}
|
||||
|
@ -4,7 +4,7 @@ set(CLICKHOUSE_SERVER_SOURCES
|
||||
)
|
||||
|
||||
if (OS_LINUX)
|
||||
set (LINK_CONFIG_LIB INTERFACE "-Wl,${WHOLE_ARCHIVE} $<TARGET_FILE:clickhouse_server_configs> -Wl,${NO_WHOLE_ARCHIVE}")
|
||||
set (LINK_RESOURCE_LIB INTERFACE "-Wl,${WHOLE_ARCHIVE} $<TARGET_FILE:clickhouse_server_configs> -Wl,${NO_WHOLE_ARCHIVE}")
|
||||
endif ()
|
||||
|
||||
set (CLICKHOUSE_SERVER_LINK
|
||||
@ -20,7 +20,7 @@ set (CLICKHOUSE_SERVER_LINK
|
||||
clickhouse_table_functions
|
||||
string_utils
|
||||
|
||||
${LINK_CONFIG_LIB}
|
||||
${LINK_RESOURCE_LIB}
|
||||
|
||||
PUBLIC
|
||||
daemon
|
||||
@ -37,20 +37,20 @@ if (OS_LINUX)
|
||||
# 1. Allow to run the binary without download of any other files.
|
||||
# 2. Allow to implement "sudo clickhouse install" tool.
|
||||
|
||||
foreach(CONFIG_FILE config users embedded)
|
||||
set(CONFIG_OBJ ${CONFIG_FILE}.o)
|
||||
set(CONFIG_OBJS ${CONFIG_OBJS} ${CONFIG_OBJ})
|
||||
foreach(RESOURCE_FILE config.xml users.xml embedded.xml play.html)
|
||||
set(RESOURCE_OBJ ${RESOURCE_FILE}.o)
|
||||
set(RESOURCE_OBJS ${RESOURCE_OBJS} ${RESOURCE_OBJ})
|
||||
|
||||
# https://stackoverflow.com/questions/14776463/compile-and-add-an-object-file-from-a-binary-with-cmake
|
||||
add_custom_command(OUTPUT ${CONFIG_OBJ}
|
||||
COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR} && ${OBJCOPY_PATH} -I binary ${OBJCOPY_ARCH_OPTIONS} ${CONFIG_FILE}.xml ${CMAKE_CURRENT_BINARY_DIR}/${CONFIG_OBJ}
|
||||
add_custom_command(OUTPUT ${RESOURCE_OBJ}
|
||||
COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR} && ${OBJCOPY_PATH} -I binary ${OBJCOPY_ARCH_OPTIONS} ${RESOURCE_FILE} ${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ}
|
||||
COMMAND ${OBJCOPY_PATH} --rename-section .data=.rodata,alloc,load,readonly,data,contents
|
||||
${CMAKE_CURRENT_BINARY_DIR}/${CONFIG_OBJ} ${CMAKE_CURRENT_BINARY_DIR}/${CONFIG_OBJ})
|
||||
${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ} ${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ})
|
||||
|
||||
set_source_files_properties(${CONFIG_OBJ} PROPERTIES EXTERNAL_OBJECT true GENERATED true)
|
||||
endforeach(CONFIG_FILE)
|
||||
set_source_files_properties(${RESOURCE_OBJ} PROPERTIES EXTERNAL_OBJECT true GENERATED true)
|
||||
endforeach(RESOURCE_FILE)
|
||||
|
||||
add_library(clickhouse_server_configs STATIC ${CONFIG_OBJS})
|
||||
add_library(clickhouse_server_configs STATIC ${RESOURCE_OBJS})
|
||||
set_target_properties(clickhouse_server_configs PROPERTIES LINKER_LANGUAGE C)
|
||||
|
||||
# whole-archive prevents symbols from being discarded for unknown reason
|
||||
|
@ -212,22 +212,10 @@
|
||||
<!-- Directory with user provided files that are accessible by 'file' table function. -->
|
||||
<user_files_path>/var/lib/clickhouse/user_files/</user_files_path>
|
||||
|
||||
<!-- Sources to read users, roles, access rights, profiles of settings, quotas. -->
|
||||
<user_directories>
|
||||
<users_xml>
|
||||
<!-- Path to configuration file with predefined users. -->
|
||||
<path>users.xml</path>
|
||||
</users_xml>
|
||||
<local_directory>
|
||||
<!-- Path to folder where users created by SQL commands are stored. -->
|
||||
<path>/var/lib/clickhouse/access/</path>
|
||||
</local_directory>
|
||||
</user_directories>
|
||||
|
||||
<!-- External user directories (LDAP). -->
|
||||
<!-- LDAP server definitions. -->
|
||||
<ldap_servers>
|
||||
<!-- List LDAP servers with their connection parameters here to later use them as authenticators for dedicated users,
|
||||
who have 'ldap' authentication mechanism specified instead of 'password'.
|
||||
<!-- List LDAP servers with their connection parameters here to later 1) use them as authenticators for dedicated local users,
|
||||
who have 'ldap' authentication mechanism specified instead of 'password', or to 2) use them as remote user directories.
|
||||
Parameters:
|
||||
host - LDAP server hostname or IP, this parameter is mandatory and cannot be empty.
|
||||
port - LDAP server port, default is 636 if enable_tls is set to true, 389 otherwise.
|
||||
@ -246,7 +234,7 @@
|
||||
tls_key_file - path to certificate key file.
|
||||
tls_ca_cert_file - path to CA certificate file.
|
||||
tls_ca_cert_dir - path to the directory containing CA certificates.
|
||||
tls_cipher_suite - allowed cipher suite.
|
||||
tls_cipher_suite - allowed cipher suite (in OpenSSL notation).
|
||||
Example:
|
||||
<my_ldap_server>
|
||||
<host>localhost</host>
|
||||
@ -265,6 +253,36 @@
|
||||
-->
|
||||
</ldap_servers>
|
||||
|
||||
<!-- Sources to read users, roles, access rights, profiles of settings, quotas. -->
|
||||
<user_directories>
|
||||
<users_xml>
|
||||
<!-- Path to configuration file with predefined users. -->
|
||||
<path>users.xml</path>
|
||||
</users_xml>
|
||||
<local_directory>
|
||||
<!-- Path to folder where users created by SQL commands are stored. -->
|
||||
<path>/var/lib/clickhouse/access/</path>
|
||||
</local_directory>
|
||||
|
||||
<!-- To add an LDAP server as a remote user directory of users that are not defined locally, define a single 'ldap' section
|
||||
with the following parameters:
|
||||
server - one of LDAP server names defined in 'ldap_servers' config section above.
|
||||
This parameter is mandatory and cannot be empty.
|
||||
roles - section with a list of locally defined roles that will be assigned to each user retrieved from the LDAP server.
|
||||
If no roles are specified, user will not be able to perform any actions after authentication.
|
||||
If any of the listed roles is not defined locally at the time of authentication, the authenthication attept
|
||||
will fail as if the provided password was incorrect.
|
||||
Example:
|
||||
<ldap>
|
||||
<server>my_ldap_server</server>
|
||||
<roles>
|
||||
<my_local_role1 />
|
||||
<my_local_role2 />
|
||||
</roles>
|
||||
</ldap>
|
||||
-->
|
||||
</user_directories>
|
||||
|
||||
<!-- Default profile of settings. -->
|
||||
<default_profile>default</default_profile>
|
||||
|
||||
|
437
programs/server/play.html
Normal file
437
programs/server/play.html
Normal file
@ -0,0 +1,437 @@
|
||||
<html> <!-- TODO If I write DOCTYPE HTML something changes but I don't know what. -->
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>ClickHouse Query</title>
|
||||
|
||||
<!-- Code style:
|
||||
|
||||
Do not use any JavaScript or CSS frameworks or preprocessors.
|
||||
This HTML page should not require any build systems (node.js, npm, gulp, etc.)
|
||||
This HTML page should not be minified, instead it should be reasonably minimalistic by itself.
|
||||
This HTML page should not load any external resources
|
||||
(CSS and JavaScript must be embedded directly to the page. No external fonts or images should be loaded).
|
||||
This UI should look as lightweight, clean and fast as possible.
|
||||
All UI elements must be aligned in pixel-perfect way.
|
||||
There should not be any animations.
|
||||
No unexpected changes in positions of elements while the page is loading.
|
||||
Navigation by keyboard should work.
|
||||
64-bit numbers must display correctly.
|
||||
|
||||
-->
|
||||
|
||||
<style type="text/css">
|
||||
:root {
|
||||
--background-color: #DDF8FF; /* Or #FFFBEF; actually many pastel colors look great for light theme. */
|
||||
--element-background-color: #FFF;
|
||||
--border-color: #EEE;
|
||||
--shadow-color: rgba(0, 0, 0, 0.1);
|
||||
--button-color: #FFAA00; /* Orange on light-cyan is especially good. */
|
||||
--text-color: #000;
|
||||
--button-active-color: #F00;
|
||||
--button-active-text-color: #FFF;
|
||||
--misc-text-color: #888;
|
||||
--error-color: #FEE; /* Light-pink on light-cyan is so neat, I even want to trigger errors to see this cool combination of colors. */
|
||||
--table-header-color: #F8F8F8;
|
||||
--table-hover-color: #FFF8EF;
|
||||
--null-color: #A88;
|
||||
}
|
||||
|
||||
[data-theme="dark"] {
|
||||
--background-color: #000;
|
||||
--element-background-color: #102030;
|
||||
--border-color: #111;
|
||||
--shadow-color: rgba(255, 255, 255, 0.1);
|
||||
--text-color: #CCC;
|
||||
--button-color: #FFAA00;
|
||||
--button-text-color: #000;
|
||||
--button-active-color: #F00;
|
||||
--button-active-text-color: #FFF;
|
||||
--misc-text-color: #888;
|
||||
--error-color: #400; /* Light-pink on light-cyan is so neat, I even want to trigger errors to see this cool combination of colors. */
|
||||
--table-header-color: #102020;
|
||||
--table-hover-color: #003333;
|
||||
--null-color: #A88;
|
||||
}
|
||||
|
||||
html, body
|
||||
{
|
||||
/* Personal choice. */
|
||||
font-family: Sans-Serif;
|
||||
background: var(--background-color);
|
||||
color: var(--text-color);
|
||||
}
|
||||
|
||||
/* Otherwise Webkit based browsers will display ugly border on focus. */
|
||||
textarea, input, button
|
||||
{
|
||||
outline: none;
|
||||
border: none;
|
||||
color: var(--text-color);
|
||||
}
|
||||
|
||||
/* Otherwise scrollbar may appear dynamically and it will alter viewport height,
|
||||
then relative heights of elements will change suddenly, and it will break overall impression. */
|
||||
/* html
|
||||
{
|
||||
overflow-x: scroll;
|
||||
}*/
|
||||
|
||||
div
|
||||
{
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.monospace
|
||||
{
|
||||
/* Prefer fonts that have full hinting info. This is important for non-retina displays.
|
||||
Also I personally dislike "Ubuntu" font due to the similarity of 'r' and 'г' (it looks very ignorant).
|
||||
*/
|
||||
font-family: Liberation Mono, DejaVu Sans Mono, MonoLisa, Consolas, Monospace;
|
||||
}
|
||||
|
||||
.shadow
|
||||
{
|
||||
box-shadow: 0 0 1rem var(--shadow-color);
|
||||
}
|
||||
|
||||
input, textarea
|
||||
{
|
||||
border: 1px solid var(--border-color);
|
||||
/* The font must be not too small (to be inclusive) and not too large (it's less practical and make general feel of insecurity) */
|
||||
font-size: 11pt;
|
||||
padding: 0.25rem;
|
||||
background-color: var(--element-background-color);
|
||||
}
|
||||
|
||||
#query
|
||||
{
|
||||
/* Make enough space for even huge queries. */
|
||||
height: 20%;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
#inputs
|
||||
{
|
||||
white-space: nowrap;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
#url
|
||||
{
|
||||
width: 70%;
|
||||
}
|
||||
|
||||
#user
|
||||
{
|
||||
width: 15%;
|
||||
}
|
||||
|
||||
#password
|
||||
{
|
||||
width: 15%;
|
||||
}
|
||||
|
||||
#run_div
|
||||
{
|
||||
margin-top: 1rem;
|
||||
}
|
||||
|
||||
#run
|
||||
{
|
||||
color: var(--button-text-color);
|
||||
background-color: var(--button-color);
|
||||
padding: 0.25rem 1rem;
|
||||
cursor: pointer;
|
||||
font-weight: bold;
|
||||
font-size: 100%; /* Otherwise button element will have lower font size. */
|
||||
}
|
||||
|
||||
#run:hover, #run:focus
|
||||
{
|
||||
color: var(--button-active-text-color);
|
||||
background-color: var(--button-active-color);
|
||||
}
|
||||
|
||||
#stats
|
||||
{
|
||||
float: right;
|
||||
color: var(--misc-text-color);
|
||||
}
|
||||
|
||||
#toggle-light, #toggle-dark
|
||||
{
|
||||
float: right;
|
||||
padding-right: 0.5rem;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
.hint
|
||||
{
|
||||
color: var(--misc-text-color);
|
||||
}
|
||||
|
||||
#data_div
|
||||
{
|
||||
margin-top: 1rem;
|
||||
}
|
||||
|
||||
#data-table
|
||||
{
|
||||
border-collapse: collapse;
|
||||
border-spacing: 0;
|
||||
/* I need pixel-perfect alignment but not sure the following is correct, please help */
|
||||
min-width: calc(100vw - 2rem);
|
||||
}
|
||||
|
||||
/* Will be displayed when user specified custom format. */
|
||||
#data-unparsed
|
||||
{
|
||||
background-color: var(--element-background-color);
|
||||
margin-top: 0rem;
|
||||
padding: 0.25rem 0.5rem;
|
||||
display: none;
|
||||
}
|
||||
|
||||
td
|
||||
{
|
||||
background-color: var(--element-background-color);
|
||||
white-space: nowrap;
|
||||
/* For wide tables any individual column will be no more than 50% of page width. */
|
||||
max-width: 50vw;
|
||||
/* The content is cut unless you hover. */
|
||||
overflow: hidden;
|
||||
padding: 0.25rem 0.5rem;
|
||||
border: 1px solid var(--border-color);
|
||||
white-space: pre;
|
||||
}
|
||||
|
||||
td.right
|
||||
{
|
||||
text-align: right;
|
||||
}
|
||||
|
||||
th
|
||||
{
|
||||
padding: 0.25rem 0.5rem;
|
||||
text-align: middle;
|
||||
background-color: var(--table-header-color);
|
||||
border: 1px solid var(--border-color);
|
||||
}
|
||||
|
||||
/* The row under mouse pointer is highlight for better legibility. */
|
||||
tr:hover, tr:hover td
|
||||
{
|
||||
background-color: var(--table-hover-color);
|
||||
}
|
||||
|
||||
tr:hover
|
||||
{
|
||||
box-shadow: 0 0 1rem rgba(0, 0, 0, 0.1);
|
||||
}
|
||||
|
||||
#error
|
||||
{
|
||||
background: var(--error-color);
|
||||
white-space: pre-wrap;
|
||||
padding: 0.5rem 1rem;
|
||||
display: none;
|
||||
}
|
||||
|
||||
/* When mouse pointer is over table cell, will display full text (with wrap) instead of cut.
|
||||
TODO Find a way to make it work on touch devices. */
|
||||
td.left:hover
|
||||
{
|
||||
white-space: pre-wrap;
|
||||
}
|
||||
|
||||
/* The style for SQL NULL */
|
||||
.null
|
||||
{
|
||||
color: var(--null-color);
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div id="inputs">
|
||||
<input class="monospace shadow" id="url" type="text" value="http://localhost:8123/" /><input class="monospace shadow" id="user" type="text" value="default" /><input class="monospace shadow" id="password" type="password" />
|
||||
</div>
|
||||
<div>
|
||||
<textarea autofocus spellcheck="false" class="monospace shadow" id="query"></textarea>
|
||||
</div>
|
||||
<div id="run_div">
|
||||
<button class="shadow" id="run">Run</button>
|
||||
<span class="hint"> (Ctrl+Enter)</span>
|
||||
<span id="stats"></span>
|
||||
<span id="toggle-dark">🌑</span><span id="toggle-light">🌞</span>
|
||||
</div>
|
||||
<div id="data_div">
|
||||
<table class="monospace shadow" id="data-table"></table>
|
||||
<pre class="monospace shadow" id="data-unparsed"></pre>
|
||||
</div>
|
||||
<p id="error" class="monospace shadow">
|
||||
</p>
|
||||
</body>
|
||||
|
||||
<script type="text/javascript">
|
||||
|
||||
/// Substitute the address of the server where the page is served.
|
||||
if (location.protocol != 'file:') {
|
||||
document.getElementById('url').value = location.origin;
|
||||
}
|
||||
|
||||
function post()
|
||||
{
|
||||
var url = document.getElementById('url').value +
|
||||
/// Ask server to allow cross-domain requests.
|
||||
'?add_http_cors_header=1' +
|
||||
'&user=' + encodeURIComponent(document.getElementById('user').value) +
|
||||
'&password=' + encodeURIComponent(document.getElementById('password').value) +
|
||||
'&default_format=JSONCompact' +
|
||||
/// Safety settings to prevent results that browser cannot display.
|
||||
'&max_result_rows=1000&max_result_bytes=10000000&result_overflow_mode=break';
|
||||
|
||||
var query = document.getElementById('query').value;
|
||||
var xhr = new XMLHttpRequest;
|
||||
|
||||
xhr.open('POST', url, true);
|
||||
xhr.send(query);
|
||||
|
||||
xhr.onreadystatechange = function()
|
||||
{
|
||||
if (this.readyState === XMLHttpRequest.DONE) {
|
||||
if (this.status === 200) {
|
||||
var json;
|
||||
try { json = JSON.parse(this.response); } catch (e) {}
|
||||
if (json !== undefined && json.statistics !== undefined) {
|
||||
renderResult(json);
|
||||
} else {
|
||||
renderUnparsedResult(this.response);
|
||||
}
|
||||
} else {
|
||||
renderError(this.response);
|
||||
}
|
||||
} else {
|
||||
//console.log(this);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
document.getElementById('run').onclick = function()
|
||||
{
|
||||
post();
|
||||
}
|
||||
|
||||
document.getElementById('query').onkeypress = function(event)
|
||||
{
|
||||
/// Firefox has code 13 for Enter and Chromium has code 10.
|
||||
if (event.ctrlKey && (event.charCode == 13 || event.charCode == 10)) {
|
||||
post();
|
||||
}
|
||||
}
|
||||
|
||||
function clear()
|
||||
{
|
||||
var table = document.getElementById('data-table');
|
||||
while (table.firstChild) {
|
||||
table.removeChild(table.lastChild);
|
||||
}
|
||||
|
||||
document.getElementById('data-unparsed').innerText = '';
|
||||
document.getElementById('data-unparsed').style.display = 'none';
|
||||
|
||||
document.getElementById('error').innerText = '';
|
||||
document.getElementById('error').style.display = 'none';
|
||||
|
||||
document.getElementById('stats').innerText = '';
|
||||
}
|
||||
|
||||
function renderResult(response)
|
||||
{
|
||||
//console.log(response);
|
||||
clear();
|
||||
|
||||
var stats = document.getElementById('stats');
|
||||
stats.innerText = 'Elapsed: ' + response.statistics.elapsed.toFixed(3) + " sec, read " + response.statistics.rows_read + " rows.";
|
||||
|
||||
var thead = document.createElement('thead');
|
||||
for (var idx in response.meta) {
|
||||
var th = document.createElement('th');
|
||||
var name = document.createTextNode(response.meta[idx].name);
|
||||
th.appendChild(name);
|
||||
thead.appendChild(th);
|
||||
}
|
||||
|
||||
/// To prevent hanging the browser, limit the number of cells in a table.
|
||||
/// It's important to have the limit on number of cells, not just rows, because tables may be wide or narrow.
|
||||
var max_rows = 10000 / response.meta.length;
|
||||
var row_num = 0;
|
||||
|
||||
var tbody = document.createElement('tbody');
|
||||
for (var row_idx in response.data) {
|
||||
var tr = document.createElement('tr');
|
||||
for (var col_idx in response.data[row_idx]) {
|
||||
var td = document.createElement('td');
|
||||
var cell = response.data[row_idx][col_idx];
|
||||
var is_null = (cell === null);
|
||||
var content = document.createTextNode(is_null ? 'ᴺᵁᴸᴸ' : cell);
|
||||
td.appendChild(content);
|
||||
td.className = response.meta[col_idx].type.match(/^(U?Int|Decimal|Float)/) ? 'right' : 'left';
|
||||
if (is_null) {
|
||||
td.className += ' null';
|
||||
}
|
||||
tr.appendChild(td);
|
||||
}
|
||||
tbody.appendChild(tr);
|
||||
|
||||
++row_num;
|
||||
if (row_num >= max_rows) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
var table = document.getElementById('data-table');
|
||||
table.appendChild(thead);
|
||||
table.appendChild(tbody);
|
||||
}
|
||||
|
||||
/// A function to render raw data when non-default format is specified.
|
||||
function renderUnparsedResult(response)
|
||||
{
|
||||
clear();
|
||||
var data = document.getElementById('data-unparsed')
|
||||
data.innerText = response;
|
||||
/// inline-block make width adjust to the size of content.
|
||||
data.style.display = 'inline-block';
|
||||
}
|
||||
|
||||
function renderError(response)
|
||||
{
|
||||
clear();
|
||||
document.getElementById('error').innerText = response;
|
||||
document.getElementById('error').style.display = 'block';
|
||||
}
|
||||
|
||||
function setColorTheme(theme)
|
||||
{
|
||||
window.localStorage.setItem('theme', theme);
|
||||
document.documentElement.setAttribute('data-theme', theme);
|
||||
}
|
||||
|
||||
/// The choice of color theme is saved in browser.
|
||||
var theme = window.localStorage.getItem('theme');
|
||||
if (theme) {
|
||||
setColorTheme(theme);
|
||||
}
|
||||
|
||||
document.getElementById('toggle-light').onclick = function()
|
||||
{
|
||||
setColorTheme('light');
|
||||
}
|
||||
|
||||
document.getElementById('toggle-dark').onclick = function()
|
||||
{
|
||||
setColorTheme('dark');
|
||||
}
|
||||
</script>
|
||||
</html>
|
@ -3,6 +3,7 @@
|
||||
#include <Access/MemoryAccessStorage.h>
|
||||
#include <Access/UsersConfigAccessStorage.h>
|
||||
#include <Access/DiskAccessStorage.h>
|
||||
#include <Access/LDAPAccessStorage.h>
|
||||
#include <Access/ContextAccess.h>
|
||||
#include <Access/RoleCache.h>
|
||||
#include <Access/RowPolicyCache.h>
|
||||
@ -253,6 +254,12 @@ void AccessControlManager::addMemoryStorage(const String & storage_name_)
|
||||
}
|
||||
|
||||
|
||||
void AccessControlManager::addLDAPStorage(const String & storage_name_, const Poco::Util::AbstractConfiguration & config_, const String & prefix_)
|
||||
{
|
||||
addStorage(std::make_shared<LDAPAccessStorage>(storage_name_, this, config_, prefix_));
|
||||
}
|
||||
|
||||
|
||||
void AccessControlManager::addStoragesFromUserDirectoriesConfig(
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
const String & key,
|
||||
@ -275,6 +282,8 @@ void AccessControlManager::addStoragesFromUserDirectoriesConfig(
|
||||
type = UsersConfigAccessStorage::STORAGE_TYPE;
|
||||
else if ((type == "local") || (type == "local_directory"))
|
||||
type = DiskAccessStorage::STORAGE_TYPE;
|
||||
else if (type == "ldap")
|
||||
type = LDAPAccessStorage::STORAGE_TYPE;
|
||||
|
||||
String name = config.getString(prefix + ".name", type);
|
||||
|
||||
@ -295,6 +304,10 @@ void AccessControlManager::addStoragesFromUserDirectoriesConfig(
|
||||
bool readonly = config.getBool(prefix + ".readonly", false);
|
||||
addDiskStorage(name, path, readonly);
|
||||
}
|
||||
else if (type == LDAPAccessStorage::STORAGE_TYPE)
|
||||
{
|
||||
addLDAPStorage(name, config, prefix);
|
||||
}
|
||||
else
|
||||
throw Exception("Unknown storage type '" + type + "' at " + prefix + " in config", ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG);
|
||||
}
|
||||
@ -346,7 +359,7 @@ UUID AccessControlManager::login(const String & user_name, const String & passwo
|
||||
|
||||
void AccessControlManager::setExternalAuthenticatorsConfig(const Poco::Util::AbstractConfiguration & config)
|
||||
{
|
||||
external_authenticators->setConfig(config, getLogger());
|
||||
external_authenticators->setConfiguration(config, getLogger());
|
||||
}
|
||||
|
||||
|
||||
|
@ -82,6 +82,9 @@ public:
|
||||
void addMemoryStorage();
|
||||
void addMemoryStorage(const String & storage_name_);
|
||||
|
||||
/// Adds LDAPAccessStorage which allows querying remote LDAP server for user info.
|
||||
void addLDAPStorage(const String & storage_name_, const Poco::Util::AbstractConfiguration & config_, const String & prefix_);
|
||||
|
||||
/// Adds storages from <users_directories> config.
|
||||
void addStoragesFromUserDirectoriesConfig(const Poco::Util::AbstractConfiguration & config,
|
||||
const String & key,
|
||||
|
@ -156,7 +156,7 @@ void ExternalAuthenticators::reset()
|
||||
ldap_server_params.clear();
|
||||
}
|
||||
|
||||
void ExternalAuthenticators::setConfig(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log)
|
||||
void ExternalAuthenticators::setConfiguration(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log)
|
||||
{
|
||||
std::scoped_lock lock(mutex);
|
||||
reset();
|
||||
|
@ -26,7 +26,7 @@ class ExternalAuthenticators
|
||||
{
|
||||
public:
|
||||
void reset();
|
||||
void setConfig(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log);
|
||||
void setConfiguration(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log);
|
||||
|
||||
void setLDAPServerParams(const String & server, const LDAPServerParams & params);
|
||||
LDAPServerParams getLDAPServerParams(const String & server) const;
|
||||
|
@ -14,6 +14,8 @@ namespace ErrorCodes
|
||||
extern const int ACCESS_ENTITY_ALREADY_EXISTS;
|
||||
extern const int ACCESS_ENTITY_NOT_FOUND;
|
||||
extern const int ACCESS_STORAGE_READONLY;
|
||||
extern const int WRONG_PASSWORD;
|
||||
extern const int IP_ADDRESS_NOT_ALLOWED;
|
||||
extern const int AUTHENTICATION_FAILED;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
@ -418,9 +420,21 @@ UUID IAccessStorage::login(
|
||||
const String & user_name,
|
||||
const String & password,
|
||||
const Poco::Net::IPAddress & address,
|
||||
const ExternalAuthenticators & external_authenticators) const
|
||||
const ExternalAuthenticators & external_authenticators,
|
||||
bool replace_exception_with_cannot_authenticate) const
|
||||
{
|
||||
return loginImpl(user_name, password, address, external_authenticators);
|
||||
try
|
||||
{
|
||||
return loginImpl(user_name, password, address, external_authenticators);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
if (!replace_exception_with_cannot_authenticate)
|
||||
throw;
|
||||
|
||||
tryLogCurrentException(getLogger(), user_name + ": Authentication failed");
|
||||
throwCannotAuthenticate(user_name);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -434,11 +448,16 @@ UUID IAccessStorage::loginImpl(
|
||||
{
|
||||
if (auto user = tryRead<User>(*id))
|
||||
{
|
||||
if (isPasswordCorrectImpl(*user, password, external_authenticators) && isAddressAllowedImpl(*user, address))
|
||||
return *id;
|
||||
if (!isPasswordCorrectImpl(*user, password, external_authenticators))
|
||||
throwInvalidPassword();
|
||||
|
||||
if (!isAddressAllowedImpl(*user, address))
|
||||
throwAddressNotAllowed(address);
|
||||
|
||||
return *id;
|
||||
}
|
||||
}
|
||||
throwCannotAuthenticate(user_name);
|
||||
throwNotFound(EntityType::USER, user_name);
|
||||
}
|
||||
|
||||
|
||||
@ -554,6 +573,15 @@ void IAccessStorage::throwReadonlyCannotRemove(EntityType type, const String & n
|
||||
ErrorCodes::ACCESS_STORAGE_READONLY);
|
||||
}
|
||||
|
||||
void IAccessStorage::throwAddressNotAllowed(const Poco::Net::IPAddress & address)
|
||||
{
|
||||
throw Exception("Connections from " + address.toString() + " are not allowed", ErrorCodes::IP_ADDRESS_NOT_ALLOWED);
|
||||
}
|
||||
|
||||
void IAccessStorage::throwInvalidPassword()
|
||||
{
|
||||
throw Exception("Invalid password", ErrorCodes::WRONG_PASSWORD);
|
||||
}
|
||||
|
||||
void IAccessStorage::throwCannotAuthenticate(const String & user_name)
|
||||
{
|
||||
|
@ -144,7 +144,7 @@ public:
|
||||
|
||||
/// Finds an user, check its password and returns the ID of the user.
|
||||
/// Throws an exception if no such user or password is incorrect.
|
||||
UUID login(const String & user_name, const String & password, const Poco::Net::IPAddress & address, const ExternalAuthenticators & external_authenticators) const;
|
||||
UUID login(const String & user_name, const String & password, const Poco::Net::IPAddress & address, const ExternalAuthenticators & external_authenticators, bool replace_exception_with_cannot_authenticate = true) const;
|
||||
|
||||
/// Returns the ID of an user who has logged in (maybe on another node).
|
||||
/// The function assumes that the password has been already checked somehow, so we can skip checking it now.
|
||||
@ -182,6 +182,8 @@ protected:
|
||||
[[noreturn]] void throwReadonlyCannotInsert(EntityType type, const String & name) const;
|
||||
[[noreturn]] void throwReadonlyCannotUpdate(EntityType type, const String & name) const;
|
||||
[[noreturn]] void throwReadonlyCannotRemove(EntityType type, const String & name) const;
|
||||
[[noreturn]] static void throwAddressNotAllowed(const Poco::Net::IPAddress & address);
|
||||
[[noreturn]] static void throwInvalidPassword();
|
||||
[[noreturn]] static void throwCannotAuthenticate(const String & user_name);
|
||||
|
||||
using Notification = std::tuple<OnChangedHandler, UUID, AccessEntityPtr>;
|
||||
|
313
src/Access/LDAPAccessStorage.cpp
Normal file
313
src/Access/LDAPAccessStorage.cpp
Normal file
@ -0,0 +1,313 @@
|
||||
#include <Access/LDAPAccessStorage.h>
|
||||
#include <Access/AccessControlManager.h>
|
||||
#include <Access/User.h>
|
||||
#include <Access/Role.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <common/logger_useful.h>
|
||||
#include <ext/scope_guard.h>
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
#include <Poco/JSON/JSON.h>
|
||||
#include <Poco/JSON/Object.h>
|
||||
#include <Poco/JSON/Stringifier.h>
|
||||
#include <boost/range/algorithm/copy.hpp>
|
||||
#include <iterator>
|
||||
#include <sstream>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
|
||||
LDAPAccessStorage::LDAPAccessStorage(const String & storage_name_, AccessControlManager * access_control_manager_, const Poco::Util::AbstractConfiguration & config, const String & prefix)
|
||||
: IAccessStorage(storage_name_)
|
||||
{
|
||||
setConfiguration(access_control_manager_, config, prefix);
|
||||
}
|
||||
|
||||
|
||||
void LDAPAccessStorage::setConfiguration(AccessControlManager * access_control_manager_, const Poco::Util::AbstractConfiguration & config, const String & prefix)
|
||||
{
|
||||
std::scoped_lock lock(mutex);
|
||||
|
||||
// TODO: switch to passing config as a ConfigurationView and remove this extra prefix once a version of Poco with proper implementation is available.
|
||||
const String prefix_str = (prefix.empty() ? "" : prefix + ".");
|
||||
|
||||
const bool has_server = config.has(prefix_str + "server");
|
||||
const bool has_roles = config.has(prefix_str + "roles");
|
||||
|
||||
if (!has_server)
|
||||
throw Exception("Missing 'server' field for LDAP user directory.", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
const auto ldap_server_cfg = config.getString(prefix_str + "server");
|
||||
if (ldap_server_cfg.empty())
|
||||
throw Exception("Empty 'server' field for LDAP user directory.", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
std::set<String> roles_cfg;
|
||||
if (has_roles)
|
||||
{
|
||||
Poco::Util::AbstractConfiguration::Keys role_names;
|
||||
config.keys(prefix_str + "roles", role_names);
|
||||
|
||||
// Currently, we only extract names of roles from the section names and assign them directly and unconditionally.
|
||||
roles_cfg.insert(role_names.begin(), role_names.end());
|
||||
}
|
||||
|
||||
access_control_manager = access_control_manager_;
|
||||
ldap_server = ldap_server_cfg;
|
||||
default_role_names.swap(roles_cfg);
|
||||
roles_of_interest.clear();
|
||||
role_change_subscription = access_control_manager->subscribeForChanges<Role>(
|
||||
[this] (const UUID & id, const AccessEntityPtr & entity)
|
||||
{
|
||||
return this->processRoleChange(id, entity);
|
||||
}
|
||||
);
|
||||
|
||||
/// Update `roles_of_interests` with initial values.
|
||||
for (const auto & role_name : default_role_names)
|
||||
{
|
||||
if (auto role_id = access_control_manager->find<Role>(role_name))
|
||||
roles_of_interest.emplace(*role_id, role_name);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LDAPAccessStorage::processRoleChange(const UUID & id, const AccessEntityPtr & entity)
|
||||
{
|
||||
std::scoped_lock lock(mutex);
|
||||
|
||||
/// Update `roles_of_interests`.
|
||||
auto role = typeid_cast<std::shared_ptr<const Role>>(entity);
|
||||
bool need_to_update_users = false;
|
||||
|
||||
if (role && default_role_names.count(role->getName()))
|
||||
{
|
||||
/// If a role was created with one of the `default_role_names` or renamed to one of the `default_role_names`,
|
||||
/// then set `need_to_update_users`.
|
||||
need_to_update_users = roles_of_interest.insert_or_assign(id, role->getName()).second;
|
||||
}
|
||||
else
|
||||
{
|
||||
/// If a role was removed or renamed to a name which isn't contained in the `default_role_names`,
|
||||
/// then set `need_to_update_users`.
|
||||
need_to_update_users = roles_of_interest.erase(id) > 0;
|
||||
}
|
||||
|
||||
/// Update users which have been created.
|
||||
if (need_to_update_users)
|
||||
{
|
||||
auto update_func = [this] (const AccessEntityPtr & entity_) -> AccessEntityPtr
|
||||
{
|
||||
if (auto user = typeid_cast<std::shared_ptr<const User>>(entity_))
|
||||
{
|
||||
auto changed_user = typeid_cast<std::shared_ptr<User>>(user->clone());
|
||||
auto & granted_roles = changed_user->granted_roles.roles;
|
||||
granted_roles.clear();
|
||||
boost::range::copy(roles_of_interest | boost::adaptors::map_keys, std::inserter(granted_roles, granted_roles.end()));
|
||||
return changed_user;
|
||||
}
|
||||
return entity_;
|
||||
};
|
||||
memory_storage.update(memory_storage.findAll<User>(), update_func);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LDAPAccessStorage::checkAllDefaultRoleNamesFoundNoLock() const
|
||||
{
|
||||
boost::container::flat_set<std::string_view> role_names_of_interest;
|
||||
boost::range::copy(roles_of_interest | boost::adaptors::map_values, std::inserter(role_names_of_interest, role_names_of_interest.end()));
|
||||
|
||||
for (const auto & role_name : default_role_names)
|
||||
{
|
||||
if (!role_names_of_interest.count(role_name))
|
||||
throwDefaultRoleNotFound(role_name);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
const char * LDAPAccessStorage::getStorageType() const
|
||||
{
|
||||
return STORAGE_TYPE;
|
||||
}
|
||||
|
||||
|
||||
String LDAPAccessStorage::getStorageParamsJSON() const
|
||||
{
|
||||
std::scoped_lock lock(mutex);
|
||||
Poco::JSON::Object params_json;
|
||||
|
||||
params_json.set("server", ldap_server);
|
||||
params_json.set("roles", default_role_names);
|
||||
|
||||
std::ostringstream oss;
|
||||
Poco::JSON::Stringifier::stringify(params_json, oss);
|
||||
|
||||
return oss.str();
|
||||
}
|
||||
|
||||
|
||||
std::optional<UUID> LDAPAccessStorage::findImpl(EntityType type, const String & name) const
|
||||
{
|
||||
std::scoped_lock lock(mutex);
|
||||
return memory_storage.find(type, name);
|
||||
}
|
||||
|
||||
|
||||
std::vector<UUID> LDAPAccessStorage::findAllImpl(EntityType type) const
|
||||
{
|
||||
std::scoped_lock lock(mutex);
|
||||
return memory_storage.findAll(type);
|
||||
}
|
||||
|
||||
|
||||
bool LDAPAccessStorage::existsImpl(const UUID & id) const
|
||||
{
|
||||
std::scoped_lock lock(mutex);
|
||||
return memory_storage.exists(id);
|
||||
}
|
||||
|
||||
|
||||
AccessEntityPtr LDAPAccessStorage::readImpl(const UUID & id) const
|
||||
{
|
||||
std::scoped_lock lock(mutex);
|
||||
return memory_storage.read(id);
|
||||
}
|
||||
|
||||
|
||||
String LDAPAccessStorage::readNameImpl(const UUID & id) const
|
||||
{
|
||||
std::scoped_lock lock(mutex);
|
||||
return memory_storage.readName(id);
|
||||
}
|
||||
|
||||
|
||||
bool LDAPAccessStorage::canInsertImpl(const AccessEntityPtr &) const
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
UUID LDAPAccessStorage::insertImpl(const AccessEntityPtr & entity, bool)
|
||||
{
|
||||
throwReadonlyCannotInsert(entity->getType(), entity->getName());
|
||||
}
|
||||
|
||||
|
||||
void LDAPAccessStorage::removeImpl(const UUID & id)
|
||||
{
|
||||
std::scoped_lock lock(mutex);
|
||||
auto entity = read(id);
|
||||
throwReadonlyCannotRemove(entity->getType(), entity->getName());
|
||||
}
|
||||
|
||||
|
||||
void LDAPAccessStorage::updateImpl(const UUID & id, const UpdateFunc &)
|
||||
{
|
||||
std::scoped_lock lock(mutex);
|
||||
auto entity = read(id);
|
||||
throwReadonlyCannotUpdate(entity->getType(), entity->getName());
|
||||
}
|
||||
|
||||
|
||||
ext::scope_guard LDAPAccessStorage::subscribeForChangesImpl(const UUID & id, const OnChangedHandler & handler) const
|
||||
{
|
||||
std::scoped_lock lock(mutex);
|
||||
return memory_storage.subscribeForChanges(id, handler);
|
||||
}
|
||||
|
||||
|
||||
ext::scope_guard LDAPAccessStorage::subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const
|
||||
{
|
||||
std::scoped_lock lock(mutex);
|
||||
return memory_storage.subscribeForChanges(type, handler);
|
||||
}
|
||||
|
||||
|
||||
bool LDAPAccessStorage::hasSubscriptionImpl(const UUID & id) const
|
||||
{
|
||||
std::scoped_lock lock(mutex);
|
||||
return memory_storage.hasSubscription(id);
|
||||
}
|
||||
|
||||
|
||||
bool LDAPAccessStorage::hasSubscriptionImpl(EntityType type) const
|
||||
{
|
||||
std::scoped_lock lock(mutex);
|
||||
return memory_storage.hasSubscription(type);
|
||||
}
|
||||
|
||||
UUID LDAPAccessStorage::loginImpl(const String & user_name, const String & password, const Poco::Net::IPAddress & address, const ExternalAuthenticators & external_authenticators) const
|
||||
{
|
||||
std::scoped_lock lock(mutex);
|
||||
auto id = memory_storage.find<User>(user_name);
|
||||
if (id)
|
||||
{
|
||||
auto user = memory_storage.read<User>(*id);
|
||||
|
||||
if (!isPasswordCorrectImpl(*user, password, external_authenticators))
|
||||
throwInvalidPassword();
|
||||
|
||||
if (!isAddressAllowedImpl(*user, address))
|
||||
throwAddressNotAllowed(address);
|
||||
|
||||
return *id;
|
||||
}
|
||||
else
|
||||
{
|
||||
// User does not exist, so we create one, and will add it if authentication is successful.
|
||||
auto user = std::make_shared<User>();
|
||||
user->setName(user_name);
|
||||
user->authentication = Authentication(Authentication::Type::LDAP_SERVER);
|
||||
user->authentication.setServerName(ldap_server);
|
||||
|
||||
if (!isPasswordCorrectImpl(*user, password, external_authenticators))
|
||||
throwInvalidPassword();
|
||||
|
||||
if (!isAddressAllowedImpl(*user, address))
|
||||
throwAddressNotAllowed(address);
|
||||
|
||||
checkAllDefaultRoleNamesFoundNoLock();
|
||||
|
||||
auto & granted_roles = user->granted_roles.roles;
|
||||
boost::range::copy(roles_of_interest | boost::adaptors::map_keys, std::inserter(granted_roles, granted_roles.end()));
|
||||
|
||||
return memory_storage.insert(user);
|
||||
}
|
||||
}
|
||||
|
||||
UUID LDAPAccessStorage::getIDOfLoggedUserImpl(const String & user_name) const
|
||||
{
|
||||
std::scoped_lock lock(mutex);
|
||||
auto id = memory_storage.find<User>(user_name);
|
||||
if (id)
|
||||
{
|
||||
return *id;
|
||||
}
|
||||
else
|
||||
{
|
||||
// User does not exist, so we create one, and add it pretending that the authentication is successful.
|
||||
auto user = std::make_shared<User>();
|
||||
user->setName(user_name);
|
||||
user->authentication = Authentication(Authentication::Type::LDAP_SERVER);
|
||||
user->authentication.setServerName(ldap_server);
|
||||
|
||||
checkAllDefaultRoleNamesFoundNoLock();
|
||||
|
||||
auto & granted_roles = user->granted_roles.roles;
|
||||
boost::range::copy(roles_of_interest | boost::adaptors::map_keys, std::inserter(granted_roles, granted_roles.end()));
|
||||
|
||||
return memory_storage.insert(user);
|
||||
}
|
||||
}
|
||||
|
||||
void LDAPAccessStorage::throwDefaultRoleNotFound(const String & role_name)
|
||||
{
|
||||
throw Exception("One of the default roles, the role '" + role_name + "', is not found", IAccessEntity::TypeInfo::get(IAccessEntity::Type::ROLE).not_found_error_code);
|
||||
}
|
||||
|
||||
}
|
71
src/Access/LDAPAccessStorage.h
Normal file
71
src/Access/LDAPAccessStorage.h
Normal file
@ -0,0 +1,71 @@
|
||||
#pragma once
|
||||
|
||||
#include <Access/MemoryAccessStorage.h>
|
||||
#include <Core/Types.h>
|
||||
#include <ext/scope_guard.h>
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <set>
|
||||
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
namespace Util
|
||||
{
|
||||
class AbstractConfiguration;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
class AccessControlManager;
|
||||
|
||||
/// Implementation of IAccessStorage which allows attaching users from a remote LDAP server.
|
||||
/// Currently, any user name will be treated as a name of an existing remote user,
|
||||
/// a user info entity will be created, with LDAP_SERVER authentication type.
|
||||
class LDAPAccessStorage : public IAccessStorage
|
||||
{
|
||||
public:
|
||||
static constexpr char STORAGE_TYPE[] = "ldap";
|
||||
|
||||
explicit LDAPAccessStorage(const String & storage_name_, AccessControlManager * access_control_manager_, const Poco::Util::AbstractConfiguration & config, const String & prefix);
|
||||
virtual ~LDAPAccessStorage() override = default;
|
||||
|
||||
public: // IAccessStorage implementations.
|
||||
virtual const char * getStorageType() const override;
|
||||
virtual String getStorageParamsJSON() const override;
|
||||
|
||||
private: // IAccessStorage implementations.
|
||||
virtual std::optional<UUID> findImpl(EntityType type, const String & name) const override;
|
||||
virtual std::vector<UUID> findAllImpl(EntityType type) const override;
|
||||
virtual bool existsImpl(const UUID & id) const override;
|
||||
virtual AccessEntityPtr readImpl(const UUID & id) const override;
|
||||
virtual String readNameImpl(const UUID & id) const override;
|
||||
virtual bool canInsertImpl(const AccessEntityPtr &) const override;
|
||||
virtual UUID insertImpl(const AccessEntityPtr & entity, bool replace_if_exists) override;
|
||||
virtual void removeImpl(const UUID & id) override;
|
||||
virtual void updateImpl(const UUID & id, const UpdateFunc & update_func) override;
|
||||
virtual ext::scope_guard subscribeForChangesImpl(const UUID & id, const OnChangedHandler & handler) const override;
|
||||
virtual ext::scope_guard subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const override;
|
||||
virtual bool hasSubscriptionImpl(const UUID & id) const override;
|
||||
virtual bool hasSubscriptionImpl(EntityType type) const override;
|
||||
virtual UUID loginImpl(const String & user_name, const String & password, const Poco::Net::IPAddress & address, const ExternalAuthenticators & external_authenticators) const override;
|
||||
virtual UUID getIDOfLoggedUserImpl(const String & user_name) const override;
|
||||
|
||||
private:
|
||||
void setConfiguration(AccessControlManager * access_control_manager_, const Poco::Util::AbstractConfiguration & config, const String & prefix);
|
||||
void processRoleChange(const UUID & id, const AccessEntityPtr & entity);
|
||||
void checkAllDefaultRoleNamesFoundNoLock() const;
|
||||
|
||||
[[noreturn]] static void throwDefaultRoleNotFound(const String & role_name);
|
||||
|
||||
mutable std::recursive_mutex mutex;
|
||||
AccessControlManager * access_control_manager = nullptr;
|
||||
String ldap_server;
|
||||
std::set<String> default_role_names;
|
||||
std::map<UUID, String> roles_of_interest;
|
||||
ext::scope_guard role_change_subscription;
|
||||
mutable MemoryAccessStorage memory_storage;
|
||||
};
|
||||
}
|
@ -2,6 +2,8 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <ext/scope_guard.h>
|
||||
|
||||
#include <mutex>
|
||||
|
||||
#include <cstring>
|
||||
|
||||
#include <sys/time.h>
|
||||
@ -27,16 +29,13 @@ LDAPClient::~LDAPClient()
|
||||
closeConnection();
|
||||
}
|
||||
|
||||
void LDAPClient::openConnection()
|
||||
{
|
||||
const bool graceful_bind_failure = false;
|
||||
diag(openConnection(graceful_bind_failure));
|
||||
}
|
||||
|
||||
#if USE_LDAP
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
std::recursive_mutex ldap_global_mutex;
|
||||
|
||||
auto escapeForLDAP(const String & src)
|
||||
{
|
||||
String dest;
|
||||
@ -63,10 +62,13 @@ namespace
|
||||
|
||||
return dest;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void LDAPClient::diag(const int rc)
|
||||
{
|
||||
std::scoped_lock lock(ldap_global_mutex);
|
||||
|
||||
if (rc != LDAP_SUCCESS)
|
||||
{
|
||||
String text;
|
||||
@ -100,8 +102,10 @@ void LDAPClient::diag(const int rc)
|
||||
}
|
||||
}
|
||||
|
||||
int LDAPClient::openConnection(const bool graceful_bind_failure)
|
||||
void LDAPClient::openConnection()
|
||||
{
|
||||
std::scoped_lock lock(ldap_global_mutex);
|
||||
|
||||
closeConnection();
|
||||
|
||||
{
|
||||
@ -232,8 +236,6 @@ int LDAPClient::openConnection(const bool graceful_bind_failure)
|
||||
if (params.enable_tls == LDAPServerParams::TLSEnable::YES_STARTTLS)
|
||||
diag(ldap_start_tls_s(handle, nullptr, nullptr));
|
||||
|
||||
int rc = LDAP_OTHER;
|
||||
|
||||
switch (params.sasl_mechanism)
|
||||
{
|
||||
case LDAPServerParams::SASLMechanism::SIMPLE:
|
||||
@ -244,20 +246,21 @@ int LDAPClient::openConnection(const bool graceful_bind_failure)
|
||||
cred.bv_val = const_cast<char *>(params.password.c_str());
|
||||
cred.bv_len = params.password.size();
|
||||
|
||||
rc = ldap_sasl_bind_s(handle, dn.c_str(), LDAP_SASL_SIMPLE, &cred, nullptr, nullptr, nullptr);
|
||||
|
||||
if (!graceful_bind_failure)
|
||||
diag(rc);
|
||||
diag(ldap_sasl_bind_s(handle, dn.c_str(), LDAP_SASL_SIMPLE, &cred, nullptr, nullptr, nullptr));
|
||||
|
||||
break;
|
||||
}
|
||||
default:
|
||||
{
|
||||
throw Exception("Unknown SASL mechanism", ErrorCodes::LDAP_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
void LDAPClient::closeConnection() noexcept
|
||||
{
|
||||
std::scoped_lock lock(ldap_global_mutex);
|
||||
|
||||
if (!handle)
|
||||
return;
|
||||
|
||||
@ -267,42 +270,21 @@ void LDAPClient::closeConnection() noexcept
|
||||
|
||||
bool LDAPSimpleAuthClient::check()
|
||||
{
|
||||
if (params.user.empty())
|
||||
throw Exception("LDAP authentication of a user with an empty name is not allowed", ErrorCodes::BAD_ARGUMENTS);
|
||||
std::scoped_lock lock(ldap_global_mutex);
|
||||
|
||||
if (params.user.empty())
|
||||
throw Exception("LDAP authentication of a user with empty name is not allowed", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
// Silently reject authentication attempt if the password is empty as if it didn't match.
|
||||
if (params.password.empty())
|
||||
return false; // Silently reject authentication attempt if the password is empty as if it didn't match.
|
||||
return false;
|
||||
|
||||
SCOPE_EXIT({ closeConnection(); });
|
||||
|
||||
const bool graceful_bind_failure = true;
|
||||
const auto rc = openConnection(graceful_bind_failure);
|
||||
// Will throw on any error, including invalid credentials.
|
||||
openConnection();
|
||||
|
||||
bool result = false;
|
||||
|
||||
switch (rc)
|
||||
{
|
||||
case LDAP_SUCCESS:
|
||||
{
|
||||
result = true;
|
||||
break;
|
||||
}
|
||||
|
||||
case LDAP_INVALID_CREDENTIALS:
|
||||
{
|
||||
result = false;
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
{
|
||||
result = false;
|
||||
diag(rc);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
return true;
|
||||
}
|
||||
|
||||
#else // USE_LDAP
|
||||
@ -312,7 +294,7 @@ void LDAPClient::diag(const int)
|
||||
throw Exception("ClickHouse was built without LDAP support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME);
|
||||
}
|
||||
|
||||
int LDAPClient::openConnection(const bool)
|
||||
void LDAPClient::openConnection()
|
||||
{
|
||||
throw Exception("ClickHouse was built without LDAP support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME);
|
||||
}
|
||||
|
@ -32,7 +32,6 @@ public:
|
||||
protected:
|
||||
MAYBE_NORETURN void diag(const int rc);
|
||||
MAYBE_NORETURN void openConnection();
|
||||
int openConnection(const bool graceful_bind_failure = false);
|
||||
void closeConnection() noexcept;
|
||||
|
||||
protected:
|
||||
|
@ -42,6 +42,7 @@ struct LDAPServerParams
|
||||
|
||||
enum class SASLMechanism
|
||||
{
|
||||
UNKNOWN,
|
||||
SIMPLE
|
||||
};
|
||||
|
||||
|
@ -69,7 +69,7 @@ UUID MemoryAccessStorage::insertImpl(const AccessEntityPtr & new_entity, bool re
|
||||
|
||||
UUID id = generateRandomID();
|
||||
std::lock_guard lock{mutex};
|
||||
insertNoLock(generateRandomID(), new_entity, replace_if_exists, notifications);
|
||||
insertNoLock(id, new_entity, replace_if_exists, notifications);
|
||||
return id;
|
||||
}
|
||||
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <ext/range.h>
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
#include <boost/range/adaptor/reversed.hpp>
|
||||
#include <boost/range/algorithm/copy.hpp>
|
||||
#include <boost/range/algorithm/find.hpp>
|
||||
|
||||
@ -27,6 +28,15 @@ MultipleAccessStorage::MultipleAccessStorage(const String & storage_name_)
|
||||
{
|
||||
}
|
||||
|
||||
MultipleAccessStorage::~MultipleAccessStorage()
|
||||
{
|
||||
/// It's better to remove the storages in the reverse order because they could depend on each other somehow.
|
||||
const auto storages = getStoragesPtr();
|
||||
for (const auto & storage : *storages | boost::adaptors::reversed)
|
||||
{
|
||||
removeStorage(storage);
|
||||
}
|
||||
}
|
||||
|
||||
void MultipleAccessStorage::setStorages(const std::vector<StoragePtr> & storages)
|
||||
{
|
||||
@ -400,7 +410,7 @@ UUID MultipleAccessStorage::loginImpl(const String & user_name, const String & p
|
||||
{
|
||||
try
|
||||
{
|
||||
auto id = storage->login(user_name, password, address, external_authenticators);
|
||||
auto id = storage->login(user_name, password, address, external_authenticators, /* replace_exception_with_cannot_authenticate = */ false);
|
||||
std::lock_guard lock{mutex};
|
||||
ids_cache.set(id, storage);
|
||||
return id;
|
||||
@ -416,7 +426,7 @@ UUID MultipleAccessStorage::loginImpl(const String & user_name, const String & p
|
||||
throw;
|
||||
}
|
||||
}
|
||||
throwCannotAuthenticate(user_name);
|
||||
throwNotFound(EntityType::USER, user_name);
|
||||
}
|
||||
|
||||
|
||||
|
@ -18,6 +18,7 @@ public:
|
||||
using ConstStoragePtr = std::shared_ptr<const Storage>;
|
||||
|
||||
MultipleAccessStorage(const String & storage_name_ = STORAGE_TYPE);
|
||||
~MultipleAccessStorage() override;
|
||||
|
||||
const char * getStorageType() const override { return STORAGE_TYPE; }
|
||||
|
||||
|
@ -24,6 +24,7 @@ SRCS(
|
||||
GrantedRoles.cpp
|
||||
IAccessEntity.cpp
|
||||
IAccessStorage.cpp
|
||||
LDAPAccessStorage.cpp
|
||||
LDAPClient.cpp
|
||||
MemoryAccessStorage.cpp
|
||||
MultipleAccessStorage.cpp
|
||||
|
@ -509,6 +509,7 @@ namespace ErrorCodes
|
||||
extern const int UNKNOWN_MYSQL_DATATYPES_SUPPORT_LEVEL = 543;
|
||||
extern const int ROW_AND_ROWS_TOGETHER = 544;
|
||||
extern const int FIRST_AND_NEXT_TOGETHER = 545;
|
||||
extern const int NO_ROW_DELIMITER = 546;
|
||||
|
||||
extern const int KEEPER_EXCEPTION = 999;
|
||||
extern const int POCO_EXCEPTION = 1000;
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include <string.h>
|
||||
#include <cxxabi.h>
|
||||
#include <cstdlib>
|
||||
#include <Poco/String.h>
|
||||
#include <common/logger_useful.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
@ -36,13 +37,13 @@ namespace ErrorCodes
|
||||
Exception::Exception(const std::string & msg, int code)
|
||||
: Poco::Exception(msg, code)
|
||||
{
|
||||
// In debug builds, treat LOGICAL_ERROR as an assertion failure.
|
||||
// In debug builds and builds with sanitizers, treat LOGICAL_ERROR as an assertion failure.
|
||||
// Log the message before we fail.
|
||||
#ifndef NDEBUG
|
||||
#ifdef ABORT_ON_LOGICAL_ERROR
|
||||
if (code == ErrorCodes::LOGICAL_ERROR)
|
||||
{
|
||||
LOG_ERROR(&Poco::Logger::root(), "Logical error: '{}'.", msg);
|
||||
assert(false);
|
||||
LOG_FATAL(&Poco::Logger::root(), "Logical error: '{}'.", msg);
|
||||
abort();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -10,6 +10,10 @@
|
||||
|
||||
#include <fmt/format.h>
|
||||
|
||||
#if !defined(NDEBUG) || defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) || defined(MEMORY_SANITIZER) || defined(UNDEFINED_BEHAVIOR_SANITIZER)
|
||||
#define ABORT_ON_LOGICAL_ERROR
|
||||
#endif
|
||||
|
||||
namespace Poco { class Logger; }
|
||||
|
||||
|
||||
|
@ -164,7 +164,8 @@ public:
|
||||
func = std::forward<Function>(func),
|
||||
args = std::make_tuple(std::forward<Args>(args)...)]() mutable /// mutable is needed to destroy capture
|
||||
{
|
||||
SCOPE_EXIT(state->set());
|
||||
auto event = std::move(state);
|
||||
SCOPE_EXIT(event->set());
|
||||
|
||||
/// This moves are needed to destroy function and arguments before exit.
|
||||
/// It will guarantee that after ThreadFromGlobalPool::join all captured params are destroyed.
|
||||
|
@ -502,8 +502,8 @@ Float NO_INLINE really_unrolled(const PODArray<UInt8> & keys, const PODArray<Flo
|
||||
|
||||
struct State4
|
||||
{
|
||||
Float sum[4] = {0, 0, 0, 0};
|
||||
size_t count[4] = {0, 0, 0, 0};
|
||||
Float sum[4]{};
|
||||
size_t count[4]{};
|
||||
|
||||
template <UInt32 idx>
|
||||
void add(Float value)
|
||||
@ -522,13 +522,13 @@ Float NO_INLINE another_unrolled_x4(const PODArray<UInt8> & keys, const PODArray
|
||||
{
|
||||
State4 map[256]{};
|
||||
|
||||
size_t size = keys.size() & ~size_t(3);
|
||||
for (size_t i = 0; i < size; i+=4)
|
||||
size_t size = keys.size() / 4 * 4;
|
||||
for (size_t i = 0; i < size; i += 4)
|
||||
{
|
||||
map[keys[i]].add<0>(values[i]);
|
||||
map[keys[i+1]].add<1>(values[i]);
|
||||
map[keys[i+2]].add<2>(values[i]);
|
||||
map[keys[i+3]].add<3>(values[i]);
|
||||
map[keys[i + 1]].add<1>(values[i]);
|
||||
map[keys[i + 2]].add<2>(values[i]);
|
||||
map[keys[i + 3]].add<3>(values[i]);
|
||||
}
|
||||
|
||||
/// tail
|
||||
|
@ -131,7 +131,10 @@ TEST(Common, RWLockRecursive)
|
||||
|
||||
auto lock2 = fifo_lock->getLock(RWLockImpl::Read, "q2");
|
||||
|
||||
#ifndef ABORT_ON_LOGICAL_ERROR
|
||||
/// It throws LOGICAL_ERROR
|
||||
EXPECT_ANY_THROW({fifo_lock->getLock(RWLockImpl::Write, "q2");});
|
||||
#endif
|
||||
}
|
||||
|
||||
fifo_lock->getLock(RWLockImpl::Write, "q2");
|
||||
|
@ -398,7 +398,12 @@ class IColumn;
|
||||
M(Bool, force_optimize_skip_unused_shards_no_nested, false, "Obsolete setting, does nothing. Will be removed after 2020-12-01. Use force_optimize_skip_unused_shards_nesting instead.", 0) \
|
||||
M(Bool, experimental_use_processors, true, "Obsolete setting, does nothing. Will be removed after 2020-11-29.", 0) \
|
||||
M(Bool, optimize_trivial_insert_select, true, "Optimize trivial 'INSERT INTO table SELECT ... FROM TABLES' query", 0) \
|
||||
M(Bool, allow_experimental_database_atomic, true, "Obsolete setting, does nothing. Will be removed after 2021-02-12", 0)
|
||||
M(Bool, allow_experimental_database_atomic, true, "Obsolete setting, does nothing. Will be removed after 2021-02-12", 0) \
|
||||
M(Bool, allow_non_metadata_alters, true, "Allow to execute alters which affects not only tables metadata, but also data on disk", 0) \
|
||||
M(Bool, enable_global_with_statement, false, "Propagate WITH statements to UNION queries and all subqueries", 0)
|
||||
|
||||
// End of COMMON_SETTINGS
|
||||
// Please add settings related to formats into the FORMAT_FACTORY_SETTINGS below.
|
||||
|
||||
#define FORMAT_FACTORY_SETTINGS(M) \
|
||||
M(Char, format_csv_delimiter, ',', "The character to be considered as a delimiter in CSV data. If setting with a string, a string has to have a length of 1.", 0) \
|
||||
@ -463,9 +468,10 @@ class IColumn;
|
||||
\
|
||||
M(Bool, output_format_enable_streaming, false, "Enable streaming in output formats that support it.", 0) \
|
||||
M(Bool, output_format_write_statistics, true, "Write statistics about read rows, bytes, time elapsed in suitable output formats.", 0) \
|
||||
M(Bool, allow_non_metadata_alters, true, "Allow to execute alters which affects not only tables metadata, but also data on disk", 0) \
|
||||
M(Bool, output_format_pretty_row_numbers, false, "Add row numbers before each row for pretty output format", 0) \
|
||||
M(Bool, enable_global_with_statement, false, "Propagate WITH statements to UNION queries and all subqueries", 0) \
|
||||
M(Bool, output_format_pretty_row_numbers, false, "Add row numbers before each row for pretty output format", 0)
|
||||
|
||||
// End of FORMAT_FACTORY_SETTINGS
|
||||
// Please add settings non-related to formats into the COMMON_SETTINGS above.
|
||||
|
||||
#define LIST_OF_SETTINGS(M) \
|
||||
COMMON_SETTINGS(M) \
|
||||
|
@ -100,14 +100,18 @@ void DatabaseWithOwnTablesBase::attachTableUnlocked(const String & table_name, c
|
||||
throw Exception(ErrorCodes::UNKNOWN_DATABASE, "Database was renamed to `{}`, cannot create table in `{}`",
|
||||
database_name, table_id.database_name);
|
||||
|
||||
if (!tables.emplace(table_name, table).second)
|
||||
throw Exception(ErrorCodes::TABLE_ALREADY_EXISTS, "Table {} already exists.", table_id.getFullTableName());
|
||||
|
||||
if (table_id.hasUUID())
|
||||
{
|
||||
assert(database_name == DatabaseCatalog::TEMPORARY_DATABASE || getEngineName() == "Atomic");
|
||||
DatabaseCatalog::instance().addUUIDMapping(table_id.uuid, shared_from_this(), table);
|
||||
}
|
||||
|
||||
if (!tables.emplace(table_name, table).second)
|
||||
{
|
||||
if (table_id.hasUUID())
|
||||
DatabaseCatalog::instance().removeUUIDMapping(table_id.uuid);
|
||||
throw Exception(ErrorCodes::TABLE_ALREADY_EXISTS, "Table {} already exists.", table_id.getFullTableName());
|
||||
}
|
||||
}
|
||||
|
||||
void DatabaseWithOwnTablesBase::shutdown()
|
||||
|
@ -593,7 +593,7 @@ void registerDictionaryComplexKeyDirect(DictionaryFactory & factory)
|
||||
|
||||
return std::make_unique<ComplexKeyDirectDictionary>(dict_id, dict_struct, std::move(source_ptr));
|
||||
};
|
||||
factory.registerLayout("complex_key_direct", create_layout, false);
|
||||
factory.registerLayout("complex_key_direct", create_layout, true);
|
||||
}
|
||||
|
||||
|
||||
|
@ -42,6 +42,7 @@ namespace DB
|
||||
extern const int UNSUPPORTED_METHOD;
|
||||
extern const int INVALID_CONFIG_PARAMETER;
|
||||
extern const int INTERNAL_REDIS_ERROR;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
|
||||
@ -79,7 +80,13 @@ namespace DB
|
||||
throw Exception{"Redis source with storage type \'hash_map\' requires 2 keys",
|
||||
ErrorCodes::INVALID_CONFIG_PARAMETER};
|
||||
// suppose key[0] is primary key, key[1] is secondary key
|
||||
|
||||
for (const auto & key : *dict_struct.key)
|
||||
if (!isInteger(key.type) && !isString(key.type))
|
||||
throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER,
|
||||
"Redis source supports only integer or string key, but key '{}' of type {} given", key.name, key.type->getName());
|
||||
}
|
||||
|
||||
if (!password.empty())
|
||||
{
|
||||
RedisCommand command("AUTH");
|
||||
@ -207,8 +214,8 @@ namespace DB
|
||||
if (!client->isConnected())
|
||||
client->connect(host, port);
|
||||
|
||||
if (storage_type != RedisStorageType::SIMPLE)
|
||||
throw Exception{"Cannot use loadIds with \'simple\' storage type", ErrorCodes::UNSUPPORTED_METHOD};
|
||||
if (storage_type == RedisStorageType::HASH_MAP)
|
||||
throw Exception{"Cannot use loadIds with 'hash_map' storage type", ErrorCodes::UNSUPPORTED_METHOD};
|
||||
|
||||
if (!dict_struct.id)
|
||||
throw Exception{"'id' is required for selective loading", ErrorCodes::UNSUPPORTED_METHOD};
|
||||
@ -221,6 +228,36 @@ namespace DB
|
||||
return std::make_shared<RedisBlockInputStream>(client, std::move(keys), storage_type, sample_block, max_block_size);
|
||||
}
|
||||
|
||||
BlockInputStreamPtr RedisDictionarySource::loadKeys(const Columns & key_columns, const std::vector<size_t> & requested_rows)
|
||||
{
|
||||
if (!client->isConnected())
|
||||
client->connect(host, port);
|
||||
|
||||
if (key_columns.size() != dict_struct.key->size())
|
||||
throw Exception{"The size of key_columns does not equal to the size of dictionary key", ErrorCodes::LOGICAL_ERROR};
|
||||
|
||||
RedisArray keys;
|
||||
for (auto row : requested_rows)
|
||||
{
|
||||
RedisArray key;
|
||||
for (size_t i = 0; i < key_columns.size(); ++i)
|
||||
{
|
||||
const auto & type = dict_struct.key->at(i).type;
|
||||
if (isInteger(type))
|
||||
key << DB::toString(key_columns[i]->get64(row));
|
||||
else if (isString(type))
|
||||
key << get<String>((*key_columns[i])[row]);
|
||||
else
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected type of key in Redis dictionary");
|
||||
}
|
||||
|
||||
keys.add(key);
|
||||
}
|
||||
|
||||
return std::make_shared<RedisBlockInputStream>(client, std::move(keys), storage_type, sample_block, max_block_size);
|
||||
}
|
||||
|
||||
|
||||
String RedisDictionarySource::toString() const
|
||||
{
|
||||
return "Redis: " + host + ':' + DB::toString(port);
|
||||
|
@ -70,11 +70,7 @@ namespace ErrorCodes
|
||||
|
||||
BlockInputStreamPtr loadIds(const std::vector<UInt64> & ids) override;
|
||||
|
||||
BlockInputStreamPtr loadKeys(const Columns & /* key_columns */, const std::vector<size_t> & /* requested_rows */) override
|
||||
{
|
||||
// Redis does not support native indexing
|
||||
throw Exception{"Method loadKeys is unsupported for RedisDictionarySource", ErrorCodes::NOT_IMPLEMENTED};
|
||||
}
|
||||
BlockInputStreamPtr loadKeys(const Columns & key_columns, const std::vector<size_t> & requested_rows) override;
|
||||
|
||||
bool isModified() const override { return true; }
|
||||
|
||||
|
@ -180,4 +180,9 @@ void DiskDecorator::sync(int fd) const
|
||||
delegate->sync(fd);
|
||||
}
|
||||
|
||||
Executor & DiskDecorator::getExecutor()
|
||||
{
|
||||
return delegate->getExecutor();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -4,6 +4,10 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/** Forwards all methods to another disk.
|
||||
* Methods can be overridden by descendants.
|
||||
*/
|
||||
class DiskDecorator : public IDisk
|
||||
{
|
||||
public:
|
||||
@ -46,6 +50,7 @@ public:
|
||||
void close(int fd) const override;
|
||||
void sync(int fd) const override;
|
||||
const String getType() const override { return delegate->getType(); }
|
||||
Executor & getExecutor() override;
|
||||
|
||||
protected:
|
||||
DiskPtr delegate;
|
||||
|
@ -195,10 +195,10 @@ public:
|
||||
/// Invoked when Global Context is shutdown.
|
||||
virtual void shutdown() { }
|
||||
|
||||
private:
|
||||
/// Returns executor to perform asynchronous operations.
|
||||
Executor & getExecutor() { return *executor; }
|
||||
virtual Executor & getExecutor() { return *executor; }
|
||||
|
||||
private:
|
||||
std::unique_ptr<Executor> executor;
|
||||
};
|
||||
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <DataStreams/ParallelParsingBlockInputStream.h>
|
||||
#include <Formats/FormatSettings.h>
|
||||
#include <Processors/Formats/IRowInputFormat.h>
|
||||
#include <Processors/Formats/IRowOutputFormat.h>
|
||||
#include <Processors/Formats/InputStreamFromInputFormat.h>
|
||||
#include <Processors/Formats/OutputStreamToOutputFormat.h>
|
||||
#include <DataStreams/NativeBlockInputStream.h>
|
||||
@ -203,7 +204,7 @@ BlockInputStreamPtr FormatFactory::getInput(
|
||||
|
||||
|
||||
BlockOutputStreamPtr FormatFactory::getOutput(
|
||||
const String & name, WriteBuffer & buf, const Block & sample, const Context & context, WriteCallback callback) const
|
||||
const String & name, WriteBuffer & buf, const Block & sample, const Context & context, WriteCallback callback, const bool ignore_no_row_delimiter) const
|
||||
{
|
||||
if (!getCreators(name).output_processor_creator)
|
||||
{
|
||||
@ -221,7 +222,7 @@ BlockOutputStreamPtr FormatFactory::getOutput(
|
||||
output_getter(buf, sample, std::move(callback), format_settings), sample);
|
||||
}
|
||||
|
||||
auto format = getOutputFormat(name, buf, sample, context, std::move(callback));
|
||||
auto format = getOutputFormat(name, buf, sample, context, std::move(callback), ignore_no_row_delimiter);
|
||||
return std::make_shared<MaterializingBlockOutputStream>(std::make_shared<OutputStreamToOutputFormat>(format), sample);
|
||||
}
|
||||
|
||||
@ -260,7 +261,7 @@ InputFormatPtr FormatFactory::getInputFormat(
|
||||
|
||||
|
||||
OutputFormatPtr FormatFactory::getOutputFormat(
|
||||
const String & name, WriteBuffer & buf, const Block & sample, const Context & context, WriteCallback callback) const
|
||||
const String & name, WriteBuffer & buf, const Block & sample, const Context & context, WriteCallback callback, const bool ignore_no_row_delimiter) const
|
||||
{
|
||||
const auto & output_getter = getCreators(name).output_processor_creator;
|
||||
if (!output_getter)
|
||||
@ -269,10 +270,14 @@ OutputFormatPtr FormatFactory::getOutputFormat(
|
||||
const Settings & settings = context.getSettingsRef();
|
||||
FormatSettings format_settings = getOutputFormatSetting(settings, context);
|
||||
|
||||
RowOutputFormatParams params;
|
||||
params.ignore_no_row_delimiter = ignore_no_row_delimiter;
|
||||
params.callback = std::move(callback);
|
||||
|
||||
/** TODO: Materialization is needed, because formats can use the functions `IDataType`,
|
||||
* which only work with full columns.
|
||||
*/
|
||||
auto format = output_getter(buf, sample, std::move(callback), format_settings);
|
||||
auto format = output_getter(buf, sample, params, format_settings);
|
||||
|
||||
/// Enable auto-flush for streaming mode. Currently it is needed by INSERT WATCH query.
|
||||
if (format_settings.enable_streaming)
|
||||
|
@ -27,6 +27,7 @@ class IInputFormat;
|
||||
class IOutputFormat;
|
||||
|
||||
struct RowInputFormatParams;
|
||||
struct RowOutputFormatParams;
|
||||
|
||||
using InputFormatPtr = std::shared_ptr<IInputFormat>;
|
||||
using OutputFormatPtr = std::shared_ptr<IOutputFormat>;
|
||||
@ -80,7 +81,7 @@ private:
|
||||
using OutputProcessorCreator = std::function<OutputFormatPtr(
|
||||
WriteBuffer & buf,
|
||||
const Block & sample,
|
||||
WriteCallback callback,
|
||||
const RowOutputFormatParams & params,
|
||||
const FormatSettings & settings)>;
|
||||
|
||||
struct Creators
|
||||
@ -107,7 +108,7 @@ public:
|
||||
ReadCallback callback = {}) const;
|
||||
|
||||
BlockOutputStreamPtr getOutput(const String & name, WriteBuffer & buf,
|
||||
const Block & sample, const Context & context, WriteCallback callback = {}) const;
|
||||
const Block & sample, const Context & context, WriteCallback callback = {}, const bool ignore_no_row_delimiter = false) const;
|
||||
|
||||
InputFormatPtr getInputFormat(
|
||||
const String & name,
|
||||
@ -118,7 +119,7 @@ public:
|
||||
ReadCallback callback = {}) const;
|
||||
|
||||
OutputFormatPtr getOutputFormat(
|
||||
const String & name, WriteBuffer & buf, const Block & sample, const Context & context, WriteCallback callback = {}) const;
|
||||
const String & name, WriteBuffer & buf, const Block & sample, const Context & context, WriteCallback callback = {}, const bool ignore_no_row_delimiter = false) const;
|
||||
|
||||
/// Register format by its name.
|
||||
void registerInputFormat(const String & name, InputCreator input_creator);
|
||||
|
@ -38,36 +38,47 @@ namespace
|
||||
// Those inequations helps checking conditions in ProtobufReader::SimpleReader.
|
||||
constexpr Int64 END_OF_VARINT = -1;
|
||||
constexpr Int64 END_OF_GROUP = -2;
|
||||
constexpr Int64 END_OF_FILE = -3;
|
||||
|
||||
Int64 decodeZigZag(UInt64 n) { return static_cast<Int64>((n >> 1) ^ (~(n & 1) + 1)); }
|
||||
|
||||
[[noreturn]] void throwUnknownFormat()
|
||||
{
|
||||
throw Exception("Protobuf messages are corrupted or don't match the provided schema. Please note that Protobuf stream is length-delimited: every message is prefixed by its length in varint.", ErrorCodes::UNKNOWN_PROTOBUF_FORMAT);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// SimpleReader is an utility class to deserialize protobufs.
|
||||
// Knows nothing about protobuf schemas, just provides useful functions to deserialize data.
|
||||
ProtobufReader::SimpleReader::SimpleReader(ReadBuffer & in_)
|
||||
ProtobufReader::SimpleReader::SimpleReader(ReadBuffer & in_, const bool use_length_delimiters_)
|
||||
: in(in_)
|
||||
, cursor(0)
|
||||
, current_message_level(0)
|
||||
, current_message_end(0)
|
||||
, field_end(0)
|
||||
, last_string_pos(-1)
|
||||
, use_length_delimiters(use_length_delimiters_)
|
||||
{
|
||||
}
|
||||
|
||||
[[noreturn]] void ProtobufReader::SimpleReader::throwUnknownFormat() const
|
||||
{
|
||||
throw Exception(std::string("Protobuf messages are corrupted or don't match the provided schema.") + (use_length_delimiters ? " Please note that Protobuf stream is length-delimited: every message is prefixed by its length in varint." : ""), ErrorCodes::UNKNOWN_PROTOBUF_FORMAT);
|
||||
}
|
||||
|
||||
bool ProtobufReader::SimpleReader::startMessage()
|
||||
{
|
||||
// Start reading a root message.
|
||||
assert(!current_message_level);
|
||||
if (unlikely(in.eof()))
|
||||
return false;
|
||||
size_t size_of_message = readVarint();
|
||||
current_message_end = cursor + size_of_message;
|
||||
|
||||
if (use_length_delimiters)
|
||||
{
|
||||
size_t size_of_message = readVarint();
|
||||
current_message_end = cursor + size_of_message;
|
||||
}
|
||||
else
|
||||
{
|
||||
current_message_end = END_OF_FILE;
|
||||
}
|
||||
++current_message_level;
|
||||
field_end = cursor;
|
||||
return true;
|
||||
@ -150,8 +161,23 @@ bool ProtobufReader::SimpleReader::readFieldNumber(UInt32 & field_number)
|
||||
throwUnknownFormat();
|
||||
}
|
||||
|
||||
if ((cursor >= current_message_end) && (current_message_end != END_OF_GROUP))
|
||||
return false;
|
||||
if (cursor >= current_message_end)
|
||||
{
|
||||
if (current_message_end == END_OF_FILE)
|
||||
{
|
||||
if (unlikely(in.eof()))
|
||||
{
|
||||
current_message_end = cursor;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else if (current_message_end == END_OF_GROUP)
|
||||
{
|
||||
/// We'll check for the `GROUP_END` marker later.
|
||||
}
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
UInt64 varint = readVarint();
|
||||
if (unlikely(varint & (static_cast<UInt64>(0xFFFFFFFF) << 32)))
|
||||
@ -1077,8 +1103,8 @@ std::unique_ptr<ProtobufReader::IConverter> ProtobufReader::createConverter<goog
|
||||
|
||||
|
||||
ProtobufReader::ProtobufReader(
|
||||
ReadBuffer & in_, const google::protobuf::Descriptor * message_type, const std::vector<String> & column_names)
|
||||
: simple_reader(in_)
|
||||
ReadBuffer & in_, const google::protobuf::Descriptor * message_type, const std::vector<String> & column_names, const bool use_length_delimiters_)
|
||||
: simple_reader(in_, use_length_delimiters_)
|
||||
{
|
||||
root_message = ProtobufColumnMatcher::matchColumns<ColumnMatcherTraits>(column_names, message_type);
|
||||
setTraitsDataAfterMatchingColumns(root_message.get());
|
||||
|
@ -37,7 +37,7 @@ using AggregateFunctionPtr = std::shared_ptr<IAggregateFunction>;
|
||||
class ProtobufReader : private boost::noncopyable
|
||||
{
|
||||
public:
|
||||
ProtobufReader(ReadBuffer & in_, const google::protobuf::Descriptor * message_type, const std::vector<String> & column_names);
|
||||
ProtobufReader(ReadBuffer & in_, const google::protobuf::Descriptor * message_type, const std::vector<String> & column_names, const bool use_length_delimiters_);
|
||||
~ProtobufReader();
|
||||
|
||||
/// Should be called when we start reading a new message.
|
||||
@ -93,7 +93,7 @@ private:
|
||||
class SimpleReader
|
||||
{
|
||||
public:
|
||||
SimpleReader(ReadBuffer & in_);
|
||||
SimpleReader(ReadBuffer & in_, const bool use_length_delimiters_);
|
||||
bool startMessage();
|
||||
void endMessage(bool ignore_errors);
|
||||
void startNestedMessage();
|
||||
@ -126,6 +126,7 @@ private:
|
||||
UInt64 continueReadingVarint(UInt64 first_byte);
|
||||
void ignoreVarint();
|
||||
void ignoreGroup();
|
||||
[[noreturn]] void throwUnknownFormat() const;
|
||||
|
||||
ReadBuffer & in;
|
||||
Int64 cursor;
|
||||
@ -134,6 +135,7 @@ private:
|
||||
std::vector<Int64> parent_message_ends;
|
||||
Int64 field_end;
|
||||
Int64 last_string_pos;
|
||||
const bool use_length_delimiters;
|
||||
};
|
||||
|
||||
class IConverter
|
||||
|
@ -123,7 +123,11 @@ namespace
|
||||
|
||||
// SimpleWriter is an utility class to serialize protobufs.
|
||||
// Knows nothing about protobuf schemas, just provides useful functions to serialize data.
|
||||
ProtobufWriter::SimpleWriter::SimpleWriter(WriteBuffer & out_) : out(out_), current_piece_start(0), num_bytes_skipped(0)
|
||||
ProtobufWriter::SimpleWriter::SimpleWriter(WriteBuffer & out_, const bool use_length_delimiters_)
|
||||
: out(out_)
|
||||
, current_piece_start(0)
|
||||
, num_bytes_skipped(0)
|
||||
, use_length_delimiters(use_length_delimiters_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -136,8 +140,11 @@ void ProtobufWriter::SimpleWriter::startMessage()
|
||||
void ProtobufWriter::SimpleWriter::endMessage()
|
||||
{
|
||||
pieces.emplace_back(current_piece_start, buffer.size());
|
||||
size_t size_of_message = buffer.size() - num_bytes_skipped;
|
||||
writeVarint(size_of_message, out);
|
||||
if (use_length_delimiters)
|
||||
{
|
||||
size_t size_of_message = buffer.size() - num_bytes_skipped;
|
||||
writeVarint(size_of_message, out);
|
||||
}
|
||||
for (const auto & piece : pieces)
|
||||
if (piece.end > piece.start)
|
||||
out.write(reinterpret_cast<char *>(&buffer[piece.start]), piece.end - piece.start);
|
||||
@ -827,8 +834,8 @@ std::unique_ptr<ProtobufWriter::IConverter> ProtobufWriter::createConverter<goog
|
||||
|
||||
|
||||
ProtobufWriter::ProtobufWriter(
|
||||
WriteBuffer & out, const google::protobuf::Descriptor * message_type, const std::vector<String> & column_names)
|
||||
: simple_writer(out)
|
||||
WriteBuffer & out, const google::protobuf::Descriptor * message_type, const std::vector<String> & column_names, const bool use_length_delimiters_)
|
||||
: simple_writer(out, use_length_delimiters_)
|
||||
{
|
||||
std::vector<const google::protobuf::FieldDescriptor *> field_descriptors_without_match;
|
||||
root_message = ProtobufColumnMatcher::matchColumns<ColumnMatcherTraits>(column_names, message_type, field_descriptors_without_match);
|
||||
|
@ -37,7 +37,7 @@ using ConstAggregateDataPtr = const char *;
|
||||
class ProtobufWriter : private boost::noncopyable
|
||||
{
|
||||
public:
|
||||
ProtobufWriter(WriteBuffer & out, const google::protobuf::Descriptor * message_type, const std::vector<String> & column_names);
|
||||
ProtobufWriter(WriteBuffer & out, const google::protobuf::Descriptor * message_type, const std::vector<String> & column_names, const bool use_length_delimiters_);
|
||||
~ProtobufWriter();
|
||||
|
||||
/// Should be called at the beginning of writing a message.
|
||||
@ -89,7 +89,7 @@ private:
|
||||
class SimpleWriter
|
||||
{
|
||||
public:
|
||||
SimpleWriter(WriteBuffer & out_);
|
||||
SimpleWriter(WriteBuffer & out_, const bool use_length_delimiters_);
|
||||
~SimpleWriter();
|
||||
|
||||
void startMessage();
|
||||
@ -138,6 +138,7 @@ private:
|
||||
size_t current_piece_start;
|
||||
size_t num_bytes_skipped;
|
||||
std::vector<NestedInfo> nested_infos;
|
||||
const bool use_length_delimiters;
|
||||
};
|
||||
|
||||
class IConverter
|
||||
|
@ -38,13 +38,14 @@ try
|
||||
|
||||
FormatSettings format_settings;
|
||||
|
||||
RowInputFormatParams params{DEFAULT_INSERT_BLOCK_SIZE, 0, 0, []{}};
|
||||
RowInputFormatParams in_params{DEFAULT_INSERT_BLOCK_SIZE, 0, 0, []{}};
|
||||
RowOutputFormatParams out_params{[](const Columns & /* columns */, size_t /* row */){},false};
|
||||
|
||||
InputFormatPtr input_format = std::make_shared<TabSeparatedRowInputFormat>(sample, in_buf, params, false, false, format_settings);
|
||||
InputFormatPtr input_format = std::make_shared<TabSeparatedRowInputFormat>(sample, in_buf, in_params, false, false, format_settings);
|
||||
BlockInputStreamPtr block_input = std::make_shared<InputStreamFromInputFormat>(std::move(input_format));
|
||||
|
||||
BlockOutputStreamPtr block_output = std::make_shared<OutputStreamToOutputFormat>(
|
||||
std::make_shared<TabSeparatedRowOutputFormat>(out_buf, sample, false, false, [](const Columns & /* columns */, size_t /* row */){}, format_settings));
|
||||
std::make_shared<TabSeparatedRowOutputFormat>(out_buf, sample, false, false, out_params, format_settings));
|
||||
|
||||
copyData(*block_input, *block_output);
|
||||
return 0;
|
||||
|
@ -4,25 +4,24 @@
|
||||
|
||||
#if !defined(ARCADIA_BUILD) && USE_STATS
|
||||
|
||||
# include <common/types.h>
|
||||
# include <Common/PODArray.h>
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
|
||||
#include <common/types.h>
|
||||
#include <Common/PODArray.h>
|
||||
# include <algorithm>
|
||||
# include <iostream>
|
||||
# include <vector>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
typedef struct _Variant
|
||||
struct Variant
|
||||
{
|
||||
Float64 x;
|
||||
Float64 y;
|
||||
Float64 beats_control;
|
||||
Float64 best;
|
||||
} Variant;
|
||||
};
|
||||
|
||||
using Variants = PODArray<Variant>;
|
||||
|
||||
|
@ -384,7 +384,7 @@ public:
|
||||
|
||||
/**
|
||||
* If one or both arguments passed to this function are nullable,
|
||||
* we create a new columns that contains non-nullable arguments:
|
||||
* we create a new column that contains non-nullable arguments:
|
||||
*
|
||||
* - if the 1st argument is a non-constant array of nullable values,
|
||||
* it is turned into a non-constant array of ordinary values + a null
|
||||
@ -491,7 +491,7 @@ private:
|
||||
NullMaps maps;
|
||||
ResultColumnPtr result { ResultColumnType::create() };
|
||||
|
||||
inline void move_result() { result_column = std::move(result); }
|
||||
inline void moveResult() { result_column = std::move(result); }
|
||||
};
|
||||
|
||||
static inline bool allowNested(const DataTypePtr & left, const DataTypePtr & right)
|
||||
@ -518,9 +518,11 @@ private:
|
||||
const DataTypePtr array_nullable_nested =
|
||||
checkAndGetDataType<DataTypeNullable>(array_inner_type.get())->getNestedType();
|
||||
|
||||
// We also allow Nullable(T) and LC(U) if the Nullable(T) and U are allowed,
|
||||
// the LC(U) will be converted to U.
|
||||
return allowNested(
|
||||
array_nullable_nested,
|
||||
arg_or_arg_nullable_nested);
|
||||
recursiveRemoveLowCardinality(arg_or_arg_nullable_nested));
|
||||
}
|
||||
else if (arg_is_nullable) // cannot compare Array(T) elem (namely, T) and Nullable(T)
|
||||
return false;
|
||||
@ -568,6 +570,9 @@ private:
|
||||
return allowNested(array_lc_nested_or_lc_nullable_nested, arg);
|
||||
}
|
||||
|
||||
if (arg_is_lc) // Allow T and LC(U) if U and T are allowed (the low cardinality column will be converted).
|
||||
return allowNested(array_inner_type, arg_lc_inner_type);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -641,7 +646,8 @@ private:
|
||||
if (!left)
|
||||
return nullptr;
|
||||
|
||||
const IColumn& right = *arguments[1].column.get();
|
||||
const ColumnPtr right_converted_ptr = arguments[1].column->convertToFullColumnIfLowCardinality();
|
||||
const IColumn& right = *right_converted_ptr.get();
|
||||
|
||||
ExecutionData data = {
|
||||
left->getData(),
|
||||
@ -709,7 +715,7 @@ private:
|
||||
else
|
||||
return false;
|
||||
|
||||
data.move_result();
|
||||
data.moveResult();
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -862,7 +868,7 @@ private:
|
||||
data.result->getData(),
|
||||
data.maps.first, data.maps.second);
|
||||
|
||||
data.move_result();
|
||||
data.moveResult();
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -880,7 +886,8 @@ private:
|
||||
if (!left)
|
||||
return nullptr;
|
||||
|
||||
const IColumn & right = *arguments[1].column.get();
|
||||
const ColumnPtr right_ptr = arguments[1].column->convertToFullColumnIfLowCardinality();
|
||||
const IColumn & right = *right_ptr.get();
|
||||
|
||||
ExecutionData data = {
|
||||
*left, right, array->getOffsets(),
|
||||
@ -952,7 +959,7 @@ private:
|
||||
else
|
||||
return false;
|
||||
|
||||
data.move_result();
|
||||
data.moveResult();
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -965,7 +972,8 @@ private:
|
||||
|
||||
Array arr = col_array->getValue<Array>();
|
||||
|
||||
const IColumn * item_arg = arguments[1].column.get();
|
||||
const ColumnPtr right_ptr = arguments[1].column->convertToFullColumnIfLowCardinality();
|
||||
const IColumn * item_arg = right_ptr.get();
|
||||
|
||||
if (isColumnConst(*item_arg))
|
||||
{
|
||||
@ -1037,7 +1045,9 @@ private:
|
||||
return nullptr;
|
||||
|
||||
const IColumn & col_nested = col->getData();
|
||||
const IColumn & item_arg = *arguments[1].column;
|
||||
|
||||
const ColumnPtr right_ptr = arguments[1].column->convertToFullColumnIfLowCardinality();
|
||||
const IColumn & item_arg = *right_ptr.get();
|
||||
|
||||
auto col_res = ResultColumnType::create();
|
||||
|
||||
|
@ -1,8 +1,8 @@
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <Functions/abtesting.h>
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
|
||||
#if !defined(ARCADIA_BUILD) && USE_STATS
|
||||
|
||||
# include <gtest/gtest.h>
|
||||
|
||||
using namespace DB;
|
||||
|
||||
@ -97,3 +97,4 @@ TEST(BayesAB, gamma)
|
||||
ASSERT_EQ(0, max);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -60,7 +60,6 @@ public:
|
||||
*/
|
||||
virtual ~WriteBuffer() {}
|
||||
|
||||
|
||||
inline void nextIfAtEnd()
|
||||
{
|
||||
if (!hasPendingData())
|
||||
|
@ -787,12 +787,6 @@ void DDLWorker::processTask(DDLTask & task)
|
||||
storage = DatabaseCatalog::instance().tryGetTable(table_id, context);
|
||||
}
|
||||
|
||||
/// For some reason we check consistency of cluster definition only
|
||||
/// in case of ALTER query, but not in case of CREATE/DROP etc.
|
||||
/// It's strange, but this behaviour exits for a long and we cannot change it.
|
||||
if (storage && query_with_table->as<ASTAlterQuery>())
|
||||
checkShardConfig(query_with_table->table, task, storage);
|
||||
|
||||
if (storage && taskShouldBeExecutedOnLeader(rewritten_ast, storage) && !is_circular_replicated)
|
||||
tryExecuteQueryOnLeaderReplica(task, storage, rewritten_query, task.entry_path, zookeeper);
|
||||
else
|
||||
@ -837,35 +831,6 @@ bool DDLWorker::taskShouldBeExecutedOnLeader(const ASTPtr ast_ddl, const Storage
|
||||
return storage->supportsReplication();
|
||||
}
|
||||
|
||||
|
||||
void DDLWorker::checkShardConfig(const String & table, const DDLTask & task, StoragePtr storage) const
|
||||
{
|
||||
const auto & shard_info = task.cluster->getShardsInfo().at(task.host_shard_num);
|
||||
bool config_is_replicated_shard = shard_info.hasInternalReplication();
|
||||
|
||||
if (dynamic_cast<const StorageDistributed *>(storage.get()))
|
||||
{
|
||||
LOG_TRACE(log, "Table {} is distributed, skip checking config.", backQuote(table));
|
||||
return;
|
||||
}
|
||||
|
||||
if (storage->supportsReplication() && !config_is_replicated_shard)
|
||||
{
|
||||
throw Exception(ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION,
|
||||
"Table {} is replicated, but shard #{} isn't replicated according to its cluster definition. "
|
||||
"Possibly <internal_replication>true</internal_replication> is forgotten in the cluster config.",
|
||||
backQuote(table), task.host_shard_num + 1);
|
||||
}
|
||||
|
||||
if (!storage->supportsReplication() && config_is_replicated_shard)
|
||||
{
|
||||
throw Exception(ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION,
|
||||
"Table {} isn't replicated, but shard #{} is replicated according to its cluster definition",
|
||||
backQuote(table), task.host_shard_num + 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool DDLWorker::tryExecuteQueryOnLeaderReplica(
|
||||
DDLTask & task,
|
||||
StoragePtr storage,
|
||||
|
@ -75,9 +75,6 @@ private:
|
||||
/// Check that query should be executed on leader replica only
|
||||
static bool taskShouldBeExecutedOnLeader(const ASTPtr ast_ddl, StoragePtr storage);
|
||||
|
||||
/// Check that shard has consistent config with table
|
||||
void checkShardConfig(const String & table, const DDLTask & task, StoragePtr storage) const;
|
||||
|
||||
/// Executes query only on leader replica in case of replicated table.
|
||||
/// Queries like TRUNCATE/ALTER .../OPTIMIZE have to be executed only on one node of shard.
|
||||
/// Most of these queries can be executed on non-leader replica, but actually they still send
|
||||
|
@ -417,8 +417,10 @@ void DatabaseCatalog::addUUIDMapping(const UUID & uuid, DatabasePtr database, St
|
||||
UUIDToStorageMapPart & map_part = uuid_map[getFirstLevelIdx(uuid)];
|
||||
std::lock_guard lock{map_part.mutex};
|
||||
auto [_, inserted] = map_part.map.try_emplace(uuid, std::move(database), std::move(table));
|
||||
/// Normally this should never happen, but it's possible when the same UUIDs are explicitly specified in different CREATE queries,
|
||||
/// so it's not LOGICAL_ERROR
|
||||
if (!inserted)
|
||||
throw Exception("Mapping for table with UUID=" + toString(uuid) + " already exists", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception("Mapping for table with UUID=" + toString(uuid) + " already exists", ErrorCodes::TABLE_ALREADY_EXISTS);
|
||||
}
|
||||
|
||||
void DatabaseCatalog::removeUUIDMapping(const UUID & uuid)
|
||||
|
@ -117,11 +117,14 @@ ExpressionAnalyzer::ExpressionAnalyzer(
|
||||
const TreeRewriterResultPtr & syntax_analyzer_result_,
|
||||
const Context & context_,
|
||||
size_t subquery_depth_,
|
||||
bool do_global)
|
||||
bool do_global,
|
||||
SubqueriesForSets subqueries_for_sets_)
|
||||
: query(query_), context(context_), settings(context.getSettings())
|
||||
, subquery_depth(subquery_depth_)
|
||||
, syntax(syntax_analyzer_result_)
|
||||
{
|
||||
subqueries_for_sets = std::move(subqueries_for_sets_);
|
||||
|
||||
/// external_tables, subqueries_for_sets for global subqueries.
|
||||
/// Replaces global subqueries with the generated names of temporary tables that will be sent to remote servers.
|
||||
initGlobalSubqueriesAndExternalTables(do_global);
|
||||
@ -421,11 +424,17 @@ bool ExpressionAnalyzer::makeAggregateDescriptions(ActionsDAGPtr & actions)
|
||||
aggregate.argument_names.resize(arguments.size());
|
||||
DataTypes types(arguments.size());
|
||||
|
||||
const auto & index = actions->getIndex();
|
||||
for (size_t i = 0; i < arguments.size(); ++i)
|
||||
{
|
||||
getRootActionsNoMakeSet(arguments[i], true, actions);
|
||||
const std::string & name = arguments[i]->getColumnName();
|
||||
types[i] = actions->getIndex().find(name)->second->result_type;
|
||||
|
||||
auto it = index.find(name);
|
||||
if (it == index.end())
|
||||
throw Exception(ErrorCodes::UNKNOWN_IDENTIFIER, "Unknown identifier (in aggregate function '{}'): {}", node->name, name);
|
||||
|
||||
types[i] = it->second->result_type;
|
||||
aggregate.argument_names[i] = name;
|
||||
}
|
||||
|
||||
|
@ -93,7 +93,7 @@ public:
|
||||
const ASTPtr & query_,
|
||||
const TreeRewriterResultPtr & syntax_analyzer_result_,
|
||||
const Context & context_)
|
||||
: ExpressionAnalyzer(query_, syntax_analyzer_result_, context_, 0, false)
|
||||
: ExpressionAnalyzer(query_, syntax_analyzer_result_, context_, 0, false, {})
|
||||
{}
|
||||
|
||||
void appendExpression(ExpressionActionsChain & chain, const ASTPtr & expr, bool only_types);
|
||||
@ -124,7 +124,8 @@ protected:
|
||||
const TreeRewriterResultPtr & syntax_analyzer_result_,
|
||||
const Context & context_,
|
||||
size_t subquery_depth_,
|
||||
bool do_global_);
|
||||
bool do_global_,
|
||||
SubqueriesForSets subqueries_for_sets_);
|
||||
|
||||
ASTPtr query;
|
||||
const Context & context;
|
||||
@ -244,8 +245,9 @@ public:
|
||||
const StorageMetadataPtr & metadata_snapshot_,
|
||||
const NameSet & required_result_columns_ = {},
|
||||
bool do_global_ = false,
|
||||
const SelectQueryOptions & options_ = {})
|
||||
: ExpressionAnalyzer(query_, syntax_analyzer_result_, context_, options_.subquery_depth, do_global_)
|
||||
const SelectQueryOptions & options_ = {},
|
||||
SubqueriesForSets subqueries_for_sets_ = {})
|
||||
: ExpressionAnalyzer(query_, syntax_analyzer_result_, context_, options_.subquery_depth, do_global_, std::move(subqueries_for_sets_))
|
||||
, metadata_snapshot(metadata_snapshot_)
|
||||
, required_result_columns(required_result_columns_)
|
||||
, query_options(options_)
|
||||
|
@ -304,6 +304,8 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
if (storage)
|
||||
view = dynamic_cast<StorageView *>(storage.get());
|
||||
|
||||
SubqueriesForSets subquery_for_sets;
|
||||
|
||||
auto analyze = [&] (bool try_move_to_prewhere)
|
||||
{
|
||||
/// Allow push down and other optimizations for VIEW: replace with subquery and rewrite it.
|
||||
@ -344,7 +346,7 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
query_analyzer = std::make_unique<SelectQueryExpressionAnalyzer>(
|
||||
query_ptr, syntax_analyzer_result, *context, metadata_snapshot,
|
||||
NameSet(required_result_column_names.begin(), required_result_column_names.end()),
|
||||
!options.only_analyze, options);
|
||||
!options.only_analyze, options, std::move(subquery_for_sets));
|
||||
|
||||
if (!options.only_analyze)
|
||||
{
|
||||
@ -430,6 +432,7 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
|
||||
if (need_analyze_again)
|
||||
{
|
||||
subquery_for_sets = std::move(query_analyzer->getSubqueriesForSets());
|
||||
/// Do not try move conditions to PREWHERE for the second time.
|
||||
/// Otherwise, we won't be able to fallback from inefficient PREWHERE to WHERE later.
|
||||
analyze(/* try_move_to_prewhere = */ false);
|
||||
|
@ -124,6 +124,8 @@ ASTPtr ASTColumns::clone() const
|
||||
res->set(res->indices, indices->clone());
|
||||
if (constraints)
|
||||
res->set(res->constraints, constraints->clone());
|
||||
if (primary_key)
|
||||
res->set(res->primary_key, primary_key->clone());
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@ -41,6 +41,7 @@ public:
|
||||
ASTExpressionList * columns = nullptr;
|
||||
ASTExpressionList * indices = nullptr;
|
||||
ASTExpressionList * constraints = nullptr;
|
||||
IAST * primary_key = nullptr;
|
||||
|
||||
String getID(char) const override { return "Columns definition"; }
|
||||
|
||||
|
@ -19,6 +19,11 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
bool ParserNestedTable::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
{
|
||||
ParserToken open(TokenType::OpeningRoundBracket);
|
||||
@ -150,10 +155,12 @@ bool ParserTablePropertyDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expecte
|
||||
{
|
||||
ParserKeyword s_index("INDEX");
|
||||
ParserKeyword s_constraint("CONSTRAINT");
|
||||
ParserKeyword s_primary_key("PRIMARY KEY");
|
||||
|
||||
ParserIndexDeclaration index_p;
|
||||
ParserConstraintDeclaration constraint_p;
|
||||
ParserColumnDeclaration column_p{true, true};
|
||||
ParserExpression primary_key_p;
|
||||
|
||||
ASTPtr new_node = nullptr;
|
||||
|
||||
@ -167,6 +174,11 @@ bool ParserTablePropertyDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expecte
|
||||
if (!constraint_p.parse(pos, new_node, expected))
|
||||
return false;
|
||||
}
|
||||
else if (s_primary_key.ignore(pos, expected))
|
||||
{
|
||||
if (!primary_key_p.parse(pos, new_node, expected))
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!column_p.parse(pos, new_node, expected))
|
||||
@ -201,6 +213,7 @@ bool ParserTablePropertiesDeclarationList::parseImpl(Pos & pos, ASTPtr & node, E
|
||||
ASTPtr columns = std::make_shared<ASTExpressionList>();
|
||||
ASTPtr indices = std::make_shared<ASTExpressionList>();
|
||||
ASTPtr constraints = std::make_shared<ASTExpressionList>();
|
||||
ASTPtr primary_key;
|
||||
|
||||
for (const auto & elem : list->children)
|
||||
{
|
||||
@ -210,6 +223,14 @@ bool ParserTablePropertiesDeclarationList::parseImpl(Pos & pos, ASTPtr & node, E
|
||||
indices->children.push_back(elem);
|
||||
else if (elem->as<ASTConstraintDeclaration>())
|
||||
constraints->children.push_back(elem);
|
||||
else if (elem->as<ASTIdentifier>() || elem->as<ASTFunction>())
|
||||
{
|
||||
if (primary_key)
|
||||
{
|
||||
throw Exception("Multiple primary keys are not allowed.", ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
primary_key = elem;
|
||||
}
|
||||
else
|
||||
return false;
|
||||
}
|
||||
@ -222,6 +243,8 @@ bool ParserTablePropertiesDeclarationList::parseImpl(Pos & pos, ASTPtr & node, E
|
||||
res->set(res->indices, indices);
|
||||
if (!constraints->children.empty())
|
||||
res->set(res->constraints, constraints);
|
||||
if (primary_key)
|
||||
res->set(res->primary_key, primary_key);
|
||||
|
||||
node = res;
|
||||
|
||||
@ -472,6 +495,15 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe
|
||||
query->set(query->columns_list, columns_list);
|
||||
query->set(query->storage, storage);
|
||||
|
||||
if (query->storage && query->columns_list && query->columns_list->primary_key)
|
||||
{
|
||||
if (query->storage->primary_key)
|
||||
{
|
||||
throw Exception("Multiple primary keys are not allowed.", ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
query->storage->primary_key = query->columns_list->primary_key;
|
||||
}
|
||||
|
||||
tryGetIdentifierNameInto(as_database, query->as_database);
|
||||
tryGetIdentifierNameInto(as_table, query->as_table);
|
||||
query->set(query->select, select);
|
||||
|
@ -391,6 +391,7 @@ protected:
|
||||
* ...
|
||||
* INDEX name1 expr TYPE type1(args) GRANULARITY value,
|
||||
* ...
|
||||
* PRIMARY KEY expr
|
||||
* ) ENGINE = engine
|
||||
*
|
||||
* Or:
|
||||
|
@ -21,12 +21,13 @@ void IRowOutputFormat::consume(DB::Chunk chunk)
|
||||
{
|
||||
if (!first_row)
|
||||
writeRowBetweenDelimiter();
|
||||
first_row = false;
|
||||
|
||||
write(columns, row);
|
||||
|
||||
if (write_single_row_callback)
|
||||
write_single_row_callback(columns, row);
|
||||
if (params.callback)
|
||||
params.callback(columns, row);
|
||||
|
||||
first_row = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -9,6 +9,22 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
struct RowOutputFormatParams
|
||||
{
|
||||
using WriteCallback = std::function<void(const Columns & columns,size_t row)>;
|
||||
|
||||
// Callback used to indicate that another row is written.
|
||||
WriteCallback callback;
|
||||
|
||||
/**
|
||||
* some buffers (kafka / rabbit) split the rows internally using callback
|
||||
* so we can push there formats without framing / delimiters
|
||||
* (like ProtobufSingle). In other cases you can't write more than single row
|
||||
* in unframed format.
|
||||
*/
|
||||
bool ignore_no_row_delimiter = false;
|
||||
};
|
||||
|
||||
class WriteBuffer;
|
||||
|
||||
/** Output format that writes data row by row.
|
||||
@ -17,6 +33,7 @@ class IRowOutputFormat : public IOutputFormat
|
||||
{
|
||||
protected:
|
||||
DataTypes types;
|
||||
bool first_row = true;
|
||||
|
||||
void consume(Chunk chunk) override;
|
||||
void consumeTotals(Chunk chunk) override;
|
||||
@ -24,8 +41,10 @@ protected:
|
||||
void finalize() override;
|
||||
|
||||
public:
|
||||
IRowOutputFormat(const Block & header, WriteBuffer & out_, FormatFactory::WriteCallback callback)
|
||||
: IOutputFormat(header, out_), types(header.getDataTypes()), write_single_row_callback(callback)
|
||||
using Params = RowOutputFormatParams;
|
||||
|
||||
IRowOutputFormat(const Block & header, WriteBuffer & out_, const Params & params_)
|
||||
: IOutputFormat(header, out_), types(header.getDataTypes()), params(params_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -55,12 +74,10 @@ public:
|
||||
virtual void writeLastSuffix() {} /// Write something after resultset, totals end extremes.
|
||||
|
||||
private:
|
||||
bool first_row = true;
|
||||
bool prefix_written = false;
|
||||
bool suffix_written = false;
|
||||
|
||||
// Callback used to indicate that another row is written.
|
||||
FormatFactory::WriteCallback write_single_row_callback;
|
||||
Params params;
|
||||
|
||||
void writePrefixIfNot()
|
||||
{
|
||||
|
@ -79,7 +79,7 @@ void registerOutputFormatProcessorArrow(FormatFactory & factory)
|
||||
"Arrow",
|
||||
[](WriteBuffer & buf,
|
||||
const Block & sample,
|
||||
FormatFactory::WriteCallback,
|
||||
const RowOutputFormatParams &,
|
||||
const FormatSettings & format_settings)
|
||||
{
|
||||
return std::make_shared<ArrowBlockOutputFormat>(buf, sample, false, format_settings);
|
||||
@ -89,7 +89,7 @@ void registerOutputFormatProcessorArrow(FormatFactory & factory)
|
||||
"ArrowStream",
|
||||
[](WriteBuffer & buf,
|
||||
const Block & sample,
|
||||
FormatFactory::WriteCallback,
|
||||
const RowOutputFormatParams &,
|
||||
const FormatSettings & format_settings)
|
||||
{
|
||||
return std::make_shared<ArrowBlockOutputFormat>(buf, sample, true, format_settings);
|
||||
|
@ -347,8 +347,8 @@ static avro::Codec getCodec(const std::string & codec_name)
|
||||
}
|
||||
|
||||
AvroRowOutputFormat::AvroRowOutputFormat(
|
||||
WriteBuffer & out_, const Block & header_, FormatFactory::WriteCallback callback, const FormatSettings & settings_)
|
||||
: IRowOutputFormat(header_, out_, callback)
|
||||
WriteBuffer & out_, const Block & header_, const RowOutputFormatParams & params_, const FormatSettings & settings_)
|
||||
: IRowOutputFormat(header_, out_, params_)
|
||||
, settings(settings_)
|
||||
, serializer(header_.getColumnsWithTypeAndName())
|
||||
, file_writer(
|
||||
@ -383,10 +383,10 @@ void registerOutputFormatProcessorAvro(FormatFactory & factory)
|
||||
factory.registerOutputFormatProcessor("Avro", [](
|
||||
WriteBuffer & buf,
|
||||
const Block & sample,
|
||||
FormatFactory::WriteCallback callback,
|
||||
const RowOutputFormatParams & params,
|
||||
const FormatSettings & settings)
|
||||
{
|
||||
return std::make_shared<AvroRowOutputFormat>(buf, sample, callback, settings);
|
||||
return std::make_shared<AvroRowOutputFormat>(buf, sample, params, settings);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ private:
|
||||
class AvroRowOutputFormat : public IRowOutputFormat
|
||||
{
|
||||
public:
|
||||
AvroRowOutputFormat(WriteBuffer & out_, const Block & header_, FormatFactory::WriteCallback callback, const FormatSettings & settings_);
|
||||
AvroRowOutputFormat(WriteBuffer & out_, const Block & header_, const RowOutputFormatParams & params_, const FormatSettings & settings_);
|
||||
virtual ~AvroRowOutputFormat() override;
|
||||
|
||||
String getName() const override { return "AvroRowOutputFormat"; }
|
||||
|
@ -9,8 +9,8 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
BinaryRowOutputFormat::BinaryRowOutputFormat(WriteBuffer & out_, const Block & header, bool with_names_, bool with_types_, FormatFactory::WriteCallback callback)
|
||||
: IRowOutputFormat(header, out_, callback), with_names(with_names_), with_types(with_types_)
|
||||
BinaryRowOutputFormat::BinaryRowOutputFormat(WriteBuffer & out_, const Block & header, bool with_names_, bool with_types_, const RowOutputFormatParams & params_)
|
||||
: IRowOutputFormat(header, out_, params_), with_names(with_names_), with_types(with_types_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -52,19 +52,19 @@ void registerOutputFormatProcessorRowBinary(FormatFactory & factory)
|
||||
factory.registerOutputFormatProcessor("RowBinary", [](
|
||||
WriteBuffer & buf,
|
||||
const Block & sample,
|
||||
FormatFactory::WriteCallback callback,
|
||||
const RowOutputFormatParams & params,
|
||||
const FormatSettings &)
|
||||
{
|
||||
return std::make_shared<BinaryRowOutputFormat>(buf, sample, false, false, callback);
|
||||
return std::make_shared<BinaryRowOutputFormat>(buf, sample, false, false, params);
|
||||
});
|
||||
|
||||
factory.registerOutputFormatProcessor("RowBinaryWithNamesAndTypes", [](
|
||||
WriteBuffer & buf,
|
||||
const Block & sample,
|
||||
FormatFactory::WriteCallback callback,
|
||||
const RowOutputFormatParams & params,
|
||||
const FormatSettings &)
|
||||
{
|
||||
return std::make_shared<BinaryRowOutputFormat>(buf, sample, true, true, callback);
|
||||
return std::make_shared<BinaryRowOutputFormat>(buf, sample, true, true, params);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,7 @@ class WriteBuffer;
|
||||
class BinaryRowOutputFormat: public IRowOutputFormat
|
||||
{
|
||||
public:
|
||||
BinaryRowOutputFormat(WriteBuffer & out_, const Block & header, bool with_names_, bool with_types_, FormatFactory::WriteCallback callback);
|
||||
BinaryRowOutputFormat(WriteBuffer & out_, const Block & header, bool with_names_, bool with_types_, const RowOutputFormatParams & params_);
|
||||
|
||||
String getName() const override { return "BinaryRowOutputFormat"; }
|
||||
|
||||
|
@ -8,8 +8,8 @@ namespace DB
|
||||
{
|
||||
|
||||
|
||||
CSVRowOutputFormat::CSVRowOutputFormat(WriteBuffer & out_, const Block & header_, bool with_names_, FormatFactory::WriteCallback callback, const FormatSettings & format_settings_)
|
||||
: IRowOutputFormat(header_, out_, callback), with_names(with_names_), format_settings(format_settings_)
|
||||
CSVRowOutputFormat::CSVRowOutputFormat(WriteBuffer & out_, const Block & header_, bool with_names_, const RowOutputFormatParams & params_, const FormatSettings & format_settings_)
|
||||
: IRowOutputFormat(header_, out_, params_), with_names(with_names_), format_settings(format_settings_)
|
||||
{
|
||||
const auto & sample = getPort(PortKind::Main).getHeader();
|
||||
size_t columns = sample.columns();
|
||||
@ -77,10 +77,10 @@ void registerOutputFormatProcessorCSV(FormatFactory & factory)
|
||||
factory.registerOutputFormatProcessor(with_names ? "CSVWithNames" : "CSV", [=](
|
||||
WriteBuffer & buf,
|
||||
const Block & sample,
|
||||
FormatFactory::WriteCallback callback,
|
||||
const RowOutputFormatParams & params,
|
||||
const FormatSettings & format_settings)
|
||||
{
|
||||
return std::make_shared<CSVRowOutputFormat>(buf, sample, with_names, callback, format_settings);
|
||||
return std::make_shared<CSVRowOutputFormat>(buf, sample, with_names, params, format_settings);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ public:
|
||||
/** with_names - output in the first line a header with column names
|
||||
* with_types - output in the next line header with the names of the types
|
||||
*/
|
||||
CSVRowOutputFormat(WriteBuffer & out_, const Block & header_, bool with_names_, FormatFactory::WriteCallback callback, const FormatSettings & format_settings_);
|
||||
CSVRowOutputFormat(WriteBuffer & out_, const Block & header_, bool with_names_, const RowOutputFormatParams & params_, const FormatSettings & format_settings_);
|
||||
|
||||
String getName() const override { return "CSVRowOutputFormat"; }
|
||||
|
||||
|
@ -10,11 +10,11 @@ namespace DB
|
||||
|
||||
JSONCompactEachRowRowOutputFormat::JSONCompactEachRowRowOutputFormat(WriteBuffer & out_,
|
||||
const Block & header_,
|
||||
FormatFactory::WriteCallback callback,
|
||||
const RowOutputFormatParams & params_,
|
||||
const FormatSettings & settings_,
|
||||
bool with_names_,
|
||||
bool yield_strings_)
|
||||
: IRowOutputFormat(header_, out_, callback), settings(settings_), with_names(with_names_), yield_strings(yield_strings_)
|
||||
: IRowOutputFormat(header_, out_, params_), settings(settings_), with_names(with_names_), yield_strings(yield_strings_)
|
||||
{
|
||||
const auto & sample = getPort(PortKind::Main).getHeader();
|
||||
NamesAndTypesList columns(sample.getNamesAndTypesList());
|
||||
@ -103,37 +103,37 @@ void registerOutputFormatProcessorJSONCompactEachRow(FormatFactory & factory)
|
||||
factory.registerOutputFormatProcessor("JSONCompactEachRow", [](
|
||||
WriteBuffer & buf,
|
||||
const Block & sample,
|
||||
FormatFactory::WriteCallback callback,
|
||||
const RowOutputFormatParams & params,
|
||||
const FormatSettings & format_settings)
|
||||
{
|
||||
return std::make_shared<JSONCompactEachRowRowOutputFormat>(buf, sample, callback, format_settings, false, false);
|
||||
return std::make_shared<JSONCompactEachRowRowOutputFormat>(buf, sample, params, format_settings, false, false);
|
||||
});
|
||||
|
||||
factory.registerOutputFormatProcessor("JSONCompactEachRowWithNamesAndTypes", [](
|
||||
WriteBuffer &buf,
|
||||
const Block &sample,
|
||||
FormatFactory::WriteCallback callback,
|
||||
const RowOutputFormatParams & params,
|
||||
const FormatSettings &format_settings)
|
||||
{
|
||||
return std::make_shared<JSONCompactEachRowRowOutputFormat>(buf, sample, callback, format_settings, true, false);
|
||||
return std::make_shared<JSONCompactEachRowRowOutputFormat>(buf, sample, params, format_settings, true, false);
|
||||
});
|
||||
|
||||
factory.registerOutputFormatProcessor("JSONCompactStringsEachRow", [](
|
||||
WriteBuffer & buf,
|
||||
const Block & sample,
|
||||
FormatFactory::WriteCallback callback,
|
||||
const RowOutputFormatParams & params,
|
||||
const FormatSettings & format_settings)
|
||||
{
|
||||
return std::make_shared<JSONCompactEachRowRowOutputFormat>(buf, sample, callback, format_settings, false, true);
|
||||
return std::make_shared<JSONCompactEachRowRowOutputFormat>(buf, sample, params, format_settings, false, true);
|
||||
});
|
||||
|
||||
factory.registerOutputFormatProcessor("JSONCompactStringsEachRowWithNamesAndTypes", [](
|
||||
WriteBuffer &buf,
|
||||
const Block &sample,
|
||||
FormatFactory::WriteCallback callback,
|
||||
const RowOutputFormatParams & params,
|
||||
const FormatSettings &format_settings)
|
||||
{
|
||||
return std::make_shared<JSONCompactEachRowRowOutputFormat>(buf, sample, callback, format_settings, true, true);
|
||||
return std::make_shared<JSONCompactEachRowRowOutputFormat>(buf, sample, params, format_settings, true, true);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -18,7 +18,7 @@ public:
|
||||
JSONCompactEachRowRowOutputFormat(
|
||||
WriteBuffer & out_,
|
||||
const Block & header_,
|
||||
FormatFactory::WriteCallback callback,
|
||||
const RowOutputFormatParams & params_,
|
||||
const FormatSettings & settings_,
|
||||
bool with_names_,
|
||||
bool yield_strings_);
|
||||
|
@ -10,10 +10,10 @@ namespace DB
|
||||
JSONCompactRowOutputFormat::JSONCompactRowOutputFormat(
|
||||
WriteBuffer & out_,
|
||||
const Block & header,
|
||||
FormatFactory::WriteCallback callback,
|
||||
const RowOutputFormatParams & params_,
|
||||
const FormatSettings & settings_,
|
||||
bool yield_strings_)
|
||||
: JSONRowOutputFormat(out_, header, callback, settings_, yield_strings_)
|
||||
: JSONRowOutputFormat(out_, header, params_, settings_, yield_strings_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -93,19 +93,19 @@ void registerOutputFormatProcessorJSONCompact(FormatFactory & factory)
|
||||
factory.registerOutputFormatProcessor("JSONCompact", [](
|
||||
WriteBuffer & buf,
|
||||
const Block & sample,
|
||||
FormatFactory::WriteCallback callback,
|
||||
const RowOutputFormatParams & params,
|
||||
const FormatSettings & format_settings)
|
||||
{
|
||||
return std::make_shared<JSONCompactRowOutputFormat>(buf, sample, callback, format_settings, false);
|
||||
return std::make_shared<JSONCompactRowOutputFormat>(buf, sample, params, format_settings, false);
|
||||
});
|
||||
|
||||
factory.registerOutputFormatProcessor("JSONCompactStrings", [](
|
||||
WriteBuffer & buf,
|
||||
const Block & sample,
|
||||
FormatFactory::WriteCallback callback,
|
||||
const RowOutputFormatParams & params,
|
||||
const FormatSettings & format_settings)
|
||||
{
|
||||
return std::make_shared<JSONCompactRowOutputFormat>(buf, sample, callback, format_settings, true);
|
||||
return std::make_shared<JSONCompactRowOutputFormat>(buf, sample, params, format_settings, true);
|
||||
});
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user