mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 07:31:57 +00:00
Merge remote-tracking branch 'rschu1ze/master' into order-by-all-ambiguities
This commit is contained in:
commit
6f6e74ad8b
17
.github/workflows/master.yml
vendored
17
.github/workflows/master.yml
vendored
@ -327,6 +327,7 @@ jobs:
|
||||
run_command: |
|
||||
python3 build_report_check.py "$CHECK_NAME"
|
||||
MarkReleaseReady:
|
||||
if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
||||
needs:
|
||||
- BuilderBinDarwin
|
||||
- BuilderBinDarwinAarch64
|
||||
@ -374,14 +375,6 @@ jobs:
|
||||
test_name: Stateless tests (release)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatelessTestCoverage:
|
||||
needs: [RunConfig, BuilderDebReleaseCoverage]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (coverage)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatelessTestReleaseDatabaseReplicated:
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
@ -482,14 +475,6 @@ jobs:
|
||||
test_name: Stateful tests (release)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatefulTestCoverage:
|
||||
needs: [RunConfig, BuilderDebReleaseCoverage]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateful tests (coverage)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatefulTestAarch64:
|
||||
needs: [RunConfig, BuilderDebAarch64]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
|
31
.github/workflows/pull_request.yml
vendored
31
.github/workflows/pull_request.yml
vendored
@ -391,14 +391,6 @@ jobs:
|
||||
test_name: Stateless tests (release)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatelessTestCoverage:
|
||||
needs: [RunConfig, BuilderDebReleaseCoverage]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (coverage)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatelessTestReleaseDatabaseReplicated:
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
@ -500,21 +492,9 @@ jobs:
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: tests bugfix validate check
|
||||
test_name: Bugfix validation
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
additional_envs: |
|
||||
KILL_TIMEOUT=3600
|
||||
run_command: |
|
||||
TEMP_PATH="${TEMP_PATH}/integration" \
|
||||
python3 integration_test_check.py "Integration $CHECK_NAME" \
|
||||
--validate-bugfix --post-commit-status=file || echo "ignore exit code"
|
||||
|
||||
TEMP_PATH="${TEMP_PATH}/stateless" \
|
||||
python3 functional_test_check.py "Stateless $CHECK_NAME" "$KILL_TIMEOUT" \
|
||||
--validate-bugfix --post-commit-status=file || echo "ignore exit code"
|
||||
|
||||
python3 bugfix_validate_check.py "${TEMP_PATH}/stateless/functional_commit_status.tsv" "${TEMP_PATH}/integration/integration_commit_status.tsv"
|
||||
##############################################################################################
|
||||
############################ FUNCTIONAl STATEFUL TESTS #######################################
|
||||
##############################################################################################
|
||||
@ -526,14 +506,6 @@ jobs:
|
||||
test_name: Stateful tests (release)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatefulTestCoverage:
|
||||
needs: [RunConfig, BuilderDebReleaseCoverage]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateful tests (coverage)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatefulTestAarch64:
|
||||
needs: [RunConfig, BuilderDebAarch64]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
@ -915,6 +887,7 @@ jobs:
|
||||
- BuilderSpecialReport
|
||||
- DocsCheck
|
||||
- FastTest
|
||||
- TestsBugfixCheck
|
||||
- FunctionalStatelessTestDebug
|
||||
- FunctionalStatelessTestRelease
|
||||
- FunctionalStatelessTestReleaseDatabaseReplicated
|
||||
|
1
.github/workflows/release_branches.yml
vendored
1
.github/workflows/release_branches.yml
vendored
@ -228,6 +228,7 @@ jobs:
|
||||
run_command: |
|
||||
python3 build_report_check.py "$CHECK_NAME"
|
||||
MarkReleaseReady:
|
||||
if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
||||
needs:
|
||||
- BuilderBinDarwin
|
||||
- BuilderBinDarwinAarch64
|
||||
|
10
.gitmessage
10
.gitmessage
@ -1,6 +1,6 @@
|
||||
|
||||
|
||||
### CI modificators (add a leading space to apply):
|
||||
### CI modificators (add a leading space to apply) ###
|
||||
|
||||
## To avoid a merge commit in CI:
|
||||
#no_merge_commit
|
||||
@ -8,13 +8,21 @@
|
||||
## To discard CI cache:
|
||||
#no_ci_cache
|
||||
|
||||
## To not test (only style check):
|
||||
#do_not_test
|
||||
|
||||
## To run specified set of tests in CI:
|
||||
#ci_set_<SET_NAME>
|
||||
#ci_set_reduced
|
||||
#ci_set_arm
|
||||
#ci_set_integration
|
||||
|
||||
## To run specified job in CI:
|
||||
#job_<JOB NAME>
|
||||
#job_stateless_tests_release
|
||||
#job_package_debug
|
||||
#job_integration_tests_asan
|
||||
|
||||
## To run only specified batches for multi-batch job(s)
|
||||
#batch_2
|
||||
#btach_1_2_3
|
||||
|
2
contrib/curl
vendored
2
contrib/curl
vendored
@ -1 +1 @@
|
||||
Subproject commit 7161cb17c01dcff1dc5bf89a18437d9d729f1ecd
|
||||
Subproject commit 5ce164e0e9290c96eb7d502173426c0a135ec008
|
2
contrib/libssh
vendored
2
contrib/libssh
vendored
@ -1 +1 @@
|
||||
Subproject commit 2c76332ef56d90f55965ab24da6b6dbcbef29c4c
|
||||
Subproject commit ed4011b91873836713576475a98cd132cd834539
|
@ -8,24 +8,12 @@ endif()
|
||||
set(LIB_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libssh")
|
||||
set(LIB_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/libssh")
|
||||
|
||||
project(libssh VERSION 0.9.7 LANGUAGES C)
|
||||
# Set CMake variables which are used in libssh_version.h.cmake
|
||||
project(libssh VERSION 0.9.8 LANGUAGES C)
|
||||
|
||||
# global needed variable
|
||||
set(APPLICATION_NAME ${PROJECT_NAME})
|
||||
|
||||
# SOVERSION scheme: CURRENT.AGE.REVISION
|
||||
# If there was an incompatible interface change:
|
||||
# Increment CURRENT. Set AGE and REVISION to 0
|
||||
# If there was a compatible interface change:
|
||||
# Increment AGE. Set REVISION to 0
|
||||
# If the source code was changed, but there were no interface changes:
|
||||
# Increment REVISION.
|
||||
set(LIBRARY_VERSION "4.8.7")
|
||||
set(LIBRARY_VERSION "4.8.8")
|
||||
set(LIBRARY_SOVERSION "4")
|
||||
|
||||
# Copy library files to a lib sub-directory
|
||||
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${LIB_BINARY_DIR}/lib")
|
||||
|
||||
set(CMAKE_THREAD_PREFER_PTHREADS ON)
|
||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
|
||||
@ -33,7 +21,87 @@ set(WITH_ZLIB OFF)
|
||||
set(WITH_SYMBOL_VERSIONING OFF)
|
||||
set(WITH_SERVER ON)
|
||||
|
||||
include(IncludeSources.cmake)
|
||||
set(libssh_SRCS
|
||||
${LIB_SOURCE_DIR}/src/agent.c
|
||||
${LIB_SOURCE_DIR}/src/auth.c
|
||||
${LIB_SOURCE_DIR}/src/base64.c
|
||||
${LIB_SOURCE_DIR}/src/bignum.c
|
||||
${LIB_SOURCE_DIR}/src/buffer.c
|
||||
${LIB_SOURCE_DIR}/src/callbacks.c
|
||||
${LIB_SOURCE_DIR}/src/channels.c
|
||||
${LIB_SOURCE_DIR}/src/client.c
|
||||
${LIB_SOURCE_DIR}/src/config.c
|
||||
${LIB_SOURCE_DIR}/src/connect.c
|
||||
${LIB_SOURCE_DIR}/src/connector.c
|
||||
${LIB_SOURCE_DIR}/src/curve25519.c
|
||||
${LIB_SOURCE_DIR}/src/dh.c
|
||||
${LIB_SOURCE_DIR}/src/ecdh.c
|
||||
${LIB_SOURCE_DIR}/src/error.c
|
||||
${LIB_SOURCE_DIR}/src/getpass.c
|
||||
${LIB_SOURCE_DIR}/src/init.c
|
||||
${LIB_SOURCE_DIR}/src/kdf.c
|
||||
${LIB_SOURCE_DIR}/src/kex.c
|
||||
${LIB_SOURCE_DIR}/src/known_hosts.c
|
||||
${LIB_SOURCE_DIR}/src/knownhosts.c
|
||||
${LIB_SOURCE_DIR}/src/legacy.c
|
||||
${LIB_SOURCE_DIR}/src/log.c
|
||||
${LIB_SOURCE_DIR}/src/match.c
|
||||
${LIB_SOURCE_DIR}/src/messages.c
|
||||
${LIB_SOURCE_DIR}/src/misc.c
|
||||
${LIB_SOURCE_DIR}/src/options.c
|
||||
${LIB_SOURCE_DIR}/src/packet.c
|
||||
${LIB_SOURCE_DIR}/src/packet_cb.c
|
||||
${LIB_SOURCE_DIR}/src/packet_crypt.c
|
||||
${LIB_SOURCE_DIR}/src/pcap.c
|
||||
${LIB_SOURCE_DIR}/src/pki.c
|
||||
${LIB_SOURCE_DIR}/src/pki_container_openssh.c
|
||||
${LIB_SOURCE_DIR}/src/poll.c
|
||||
${LIB_SOURCE_DIR}/src/session.c
|
||||
${LIB_SOURCE_DIR}/src/scp.c
|
||||
${LIB_SOURCE_DIR}/src/socket.c
|
||||
${LIB_SOURCE_DIR}/src/string.c
|
||||
${LIB_SOURCE_DIR}/src/threads.c
|
||||
${LIB_SOURCE_DIR}/src/wrapper.c
|
||||
${LIB_SOURCE_DIR}/src/external/bcrypt_pbkdf.c
|
||||
${LIB_SOURCE_DIR}/src/external/blowfish.c
|
||||
${LIB_SOURCE_DIR}/src/external/chacha.c
|
||||
${LIB_SOURCE_DIR}/src/external/poly1305.c
|
||||
${LIB_SOURCE_DIR}/src/chachapoly.c
|
||||
${LIB_SOURCE_DIR}/src/config_parser.c
|
||||
${LIB_SOURCE_DIR}/src/token.c
|
||||
${LIB_SOURCE_DIR}/src/pki_ed25519_common.c
|
||||
|
||||
${LIB_SOURCE_DIR}/src/threads/noop.c
|
||||
${LIB_SOURCE_DIR}/src/threads/pthread.c
|
||||
|
||||
# LIBCRYPT specific
|
||||
${libssh_SRCS}
|
||||
${LIB_SOURCE_DIR}/src/threads/libcrypto.c
|
||||
${LIB_SOURCE_DIR}/src/pki_crypto.c
|
||||
${LIB_SOURCE_DIR}/src/ecdh_crypto.c
|
||||
${LIB_SOURCE_DIR}/src/libcrypto.c
|
||||
${LIB_SOURCE_DIR}/src/dh_crypto.c
|
||||
|
||||
${LIB_SOURCE_DIR}/src/options.c
|
||||
${LIB_SOURCE_DIR}/src/server.c
|
||||
${LIB_SOURCE_DIR}/src/bind.c
|
||||
${LIB_SOURCE_DIR}/src/bind_config.c
|
||||
)
|
||||
|
||||
if (NOT (ENABLE_OPENSSL OR ENABLE_OPENSSL_DYNAMIC))
|
||||
add_compile_definitions(USE_BORINGSSL=1)
|
||||
endif()
|
||||
|
||||
configure_file(${LIB_SOURCE_DIR}/include/libssh/libssh_version.h.cmake ${LIB_BINARY_DIR}/include/libssh/libssh_version.h @ONLY)
|
||||
|
||||
add_library(_ssh STATIC ${libssh_SRCS})
|
||||
add_library(ch_contrib::ssh ALIAS _ssh)
|
||||
|
||||
target_link_libraries(_ssh PRIVATE OpenSSL::Crypto)
|
||||
|
||||
target_include_directories(_ssh PUBLIC "${LIB_SOURCE_DIR}/include" "${LIB_BINARY_DIR}/include")
|
||||
|
||||
# These headers need to be generated using the native build system on each platform.
|
||||
if (OS_LINUX)
|
||||
if (ARCH_AMD64)
|
||||
if (USE_MUSL)
|
||||
@ -63,7 +131,3 @@ elseif (OS_FREEBSD)
|
||||
else ()
|
||||
message(FATAL_ERROR "Platform is not supported")
|
||||
endif()
|
||||
|
||||
configure_file(${LIB_SOURCE_DIR}/include/libssh/libssh_version.h.cmake
|
||||
${LIB_BINARY_DIR}/include/libssh/libssh_version.h
|
||||
@ONLY)
|
||||
|
@ -1,98 +0,0 @@
|
||||
set(LIBSSH_LINK_LIBRARIES
|
||||
${LIBSSH_LINK_LIBRARIES}
|
||||
OpenSSL::Crypto
|
||||
)
|
||||
|
||||
set(libssh_SRCS
|
||||
${LIB_SOURCE_DIR}/src/agent.c
|
||||
${LIB_SOURCE_DIR}/src/auth.c
|
||||
${LIB_SOURCE_DIR}/src/base64.c
|
||||
${LIB_SOURCE_DIR}/src/bignum.c
|
||||
${LIB_SOURCE_DIR}/src/buffer.c
|
||||
${LIB_SOURCE_DIR}/src/callbacks.c
|
||||
${LIB_SOURCE_DIR}/src/channels.c
|
||||
${LIB_SOURCE_DIR}/src/client.c
|
||||
${LIB_SOURCE_DIR}/src/config.c
|
||||
${LIB_SOURCE_DIR}/src/connect.c
|
||||
${LIB_SOURCE_DIR}/src/connector.c
|
||||
${LIB_SOURCE_DIR}/src/curve25519.c
|
||||
${LIB_SOURCE_DIR}/src/dh.c
|
||||
${LIB_SOURCE_DIR}/src/ecdh.c
|
||||
${LIB_SOURCE_DIR}/src/error.c
|
||||
${LIB_SOURCE_DIR}/src/getpass.c
|
||||
${LIB_SOURCE_DIR}/src/init.c
|
||||
${LIB_SOURCE_DIR}/src/kdf.c
|
||||
${LIB_SOURCE_DIR}/src/kex.c
|
||||
${LIB_SOURCE_DIR}/src/known_hosts.c
|
||||
${LIB_SOURCE_DIR}/src/knownhosts.c
|
||||
${LIB_SOURCE_DIR}/src/legacy.c
|
||||
${LIB_SOURCE_DIR}/src/log.c
|
||||
${LIB_SOURCE_DIR}/src/match.c
|
||||
${LIB_SOURCE_DIR}/src/messages.c
|
||||
${LIB_SOURCE_DIR}/src/misc.c
|
||||
${LIB_SOURCE_DIR}/src/options.c
|
||||
${LIB_SOURCE_DIR}/src/packet.c
|
||||
${LIB_SOURCE_DIR}/src/packet_cb.c
|
||||
${LIB_SOURCE_DIR}/src/packet_crypt.c
|
||||
${LIB_SOURCE_DIR}/src/pcap.c
|
||||
${LIB_SOURCE_DIR}/src/pki.c
|
||||
${LIB_SOURCE_DIR}/src/pki_container_openssh.c
|
||||
${LIB_SOURCE_DIR}/src/poll.c
|
||||
${LIB_SOURCE_DIR}/src/session.c
|
||||
${LIB_SOURCE_DIR}/src/scp.c
|
||||
${LIB_SOURCE_DIR}/src/socket.c
|
||||
${LIB_SOURCE_DIR}/src/string.c
|
||||
${LIB_SOURCE_DIR}/src/threads.c
|
||||
${LIB_SOURCE_DIR}/src/wrapper.c
|
||||
${LIB_SOURCE_DIR}/src/external/bcrypt_pbkdf.c
|
||||
${LIB_SOURCE_DIR}/src/external/blowfish.c
|
||||
${LIB_SOURCE_DIR}/src/external/chacha.c
|
||||
${LIB_SOURCE_DIR}/src/external/poly1305.c
|
||||
${LIB_SOURCE_DIR}/src/chachapoly.c
|
||||
${LIB_SOURCE_DIR}/src/config_parser.c
|
||||
${LIB_SOURCE_DIR}/src/token.c
|
||||
${LIB_SOURCE_DIR}/src/pki_ed25519_common.c
|
||||
)
|
||||
|
||||
set(libssh_SRCS
|
||||
${libssh_SRCS}
|
||||
${LIB_SOURCE_DIR}/src/threads/noop.c
|
||||
${LIB_SOURCE_DIR}/src/threads/pthread.c
|
||||
)
|
||||
|
||||
# LIBCRYPT specific
|
||||
set(libssh_SRCS
|
||||
${libssh_SRCS}
|
||||
${LIB_SOURCE_DIR}/src/threads/libcrypto.c
|
||||
${LIB_SOURCE_DIR}/src/pki_crypto.c
|
||||
${LIB_SOURCE_DIR}/src/ecdh_crypto.c
|
||||
${LIB_SOURCE_DIR}/src/libcrypto.c
|
||||
${LIB_SOURCE_DIR}/src/dh_crypto.c
|
||||
)
|
||||
|
||||
if (NOT (ENABLE_OPENSSL OR ENABLE_OPENSSL_DYNAMIC))
|
||||
add_compile_definitions(USE_BORINGSSL=1)
|
||||
endif()
|
||||
|
||||
set(libssh_SRCS
|
||||
${libssh_SRCS}
|
||||
${LIB_SOURCE_DIR}/src/options.c
|
||||
${LIB_SOURCE_DIR}/src/server.c
|
||||
${LIB_SOURCE_DIR}/src/bind.c
|
||||
${LIB_SOURCE_DIR}/src/bind_config.c
|
||||
)
|
||||
|
||||
|
||||
add_library(_ssh STATIC ${libssh_SRCS})
|
||||
|
||||
target_include_directories(_ssh PRIVATE ${LIB_BINARY_DIR})
|
||||
target_include_directories(_ssh PUBLIC "${LIB_SOURCE_DIR}/include" "${LIB_BINARY_DIR}/include")
|
||||
target_link_libraries(_ssh
|
||||
PRIVATE ${LIBSSH_LINK_LIBRARIES})
|
||||
|
||||
add_library(ch_contrib::ssh ALIAS _ssh)
|
||||
|
||||
target_compile_options(_ssh
|
||||
PRIVATE
|
||||
${DEFAULT_C_COMPILE_FLAGS}
|
||||
-D_GNU_SOURCE)
|
2
contrib/lz4
vendored
2
contrib/lz4
vendored
@ -1 +1 @@
|
||||
Subproject commit 92ebf1870b9acbefc0e7970409a181954a10ff40
|
||||
Subproject commit ce45a9dbdb059511a3e9576b19db3e7f1a4f172e
|
@ -190,7 +190,7 @@ function setup_logs_replication
|
||||
echo -e "Creating remote destination table ${table}_${hash} with statement:\n${statement}" >&2
|
||||
|
||||
echo "$statement" | clickhouse-client --database_replicated_initial_query_timeout_sec=10 \
|
||||
--distributed_ddl_task_timeout=30 \
|
||||
--distributed_ddl_task_timeout=30 --distributed_ddl_output_mode=throw_only_active \
|
||||
"${CONNECTION_ARGS[@]}" || continue
|
||||
|
||||
echo "Creating table system.${table}_sender" >&2
|
||||
|
@ -166,11 +166,11 @@ For most external applications, we recommend using the HTTP interface because it
|
||||
|
||||
## Configuration {#configuration}
|
||||
|
||||
ClickHouse Server is based on POCO C++ Libraries and uses `Poco::Util::AbstractConfiguration` to represent it's configuration. Configuration is held by `Poco::Util::ServerApplication` class inherited by `DaemonBase` class, which in turn is inherited by `DB::Server` class, implementing clickhouse-server itself. So config can be accessed by `ServerApplication::config()` method.
|
||||
ClickHouse Server is based on POCO C++ Libraries and uses `Poco::Util::AbstractConfiguration` to represent its configuration. Configuration is held by `Poco::Util::ServerApplication` class inherited by `DaemonBase` class, which in turn is inherited by `DB::Server` class, implementing clickhouse-server itself. So config can be accessed by `ServerApplication::config()` method.
|
||||
|
||||
Config is read from multiple files (in XML or YAML format) and merged into single `AbstractConfiguration` by `ConfigProcessor` class. Configuration is loaded at server startup and can be reloaded later if one of config files is updated, removed or added. `ConfigReloader` class is responsible for periodic monitoring of these changes and reload procedure as well. `SYSTEM RELOAD CONFIG` query also triggers config to be reloaded.
|
||||
|
||||
For queries and subsystems other than `Server` config is accessible using `Context::getConfigRef()` method. Every subsystem that is capable of reloading it's config without server restart should register itself in reload callback in `Server::main()` method. Note that if newer config has an error, most subsystems will ignore new config, log warning messages and keep working with previously loaded config. Due to the nature of `AbstractConfiguration` it is not possible to pass reference to specific section, so `String config_prefix` is usually used instead.
|
||||
For queries and subsystems other than `Server` config is accessible using `Context::getConfigRef()` method. Every subsystem that is capable of reloading its config without server restart should register itself in reload callback in `Server::main()` method. Note that if newer config has an error, most subsystems will ignore new config, log warning messages and keep working with previously loaded config. Due to the nature of `AbstractConfiguration` it is not possible to pass reference to specific section, so `String config_prefix` is usually used instead.
|
||||
|
||||
## Threads and jobs {#threads-and-jobs}
|
||||
|
||||
@ -255,7 +255,7 @@ When we are going to read something from a part in `MergeTree`, we look at `prim
|
||||
|
||||
When you `INSERT` a bunch of data into `MergeTree`, that bunch is sorted by primary key order and forms a new part. There are background threads that periodically select some parts and merge them into a single sorted part to keep the number of parts relatively low. That’s why it is called `MergeTree`. Of course, merging leads to “write amplification”. All parts are immutable: they are only created and deleted, but not modified. When SELECT is executed, it holds a snapshot of the table (a set of parts). After merging, we also keep old parts for some time to make a recovery after failure easier, so if we see that some merged part is probably broken, we can replace it with its source parts.
|
||||
|
||||
`MergeTree` is not an LSM tree because it does not contain MEMTABLE and LOG: inserted data is written directly to the filesystem. This behavior makes MergeTree much more suitable to insert data in batches. Therefore frequently inserting small amounts of rows is not ideal for MergeTree. For example, a couple of rows per second is OK, but doing it a thousand times a second is not optimal for MergeTree. However, there is an async insert mode for small inserts to overcome this limitation. We did it this way for simplicity’s sake, and because we are already inserting data in batches in our applications
|
||||
`MergeTree` is not an LSM tree because it does not contain MEMTABLE and LOG: inserted data is written directly to the filesystem. This behavior makes MergeTree much more suitable to insert data in batches. Therefore, frequently inserting small amounts of rows is not ideal for MergeTree. For example, a couple of rows per second is OK, but doing it a thousand times a second is not optimal for MergeTree. However, there is an async insert mode for small inserts to overcome this limitation. We did it this way for simplicity’s sake, and because we are already inserting data in batches in our applications
|
||||
|
||||
There are MergeTree engines that are doing additional work during background merges. Examples are `CollapsingMergeTree` and `AggregatingMergeTree`. This could be treated as special support for updates. Keep in mind that these are not real updates because users usually have no control over the time when background merges are executed, and data in a `MergeTree` table is almost always stored in more than one part, not in completely merged form.
|
||||
|
||||
|
@ -38,7 +38,7 @@ ninja
|
||||
|
||||
## Running
|
||||
|
||||
Once built, the binary can be run with, eg.:
|
||||
Once built, the binary can be run with, e.g.:
|
||||
|
||||
```bash
|
||||
qemu-s390x-static -L /usr/s390x-linux-gnu ./clickhouse
|
||||
|
@ -95,7 +95,7 @@ Complete below three steps mentioned in [Star Schema Benchmark](https://clickhou
|
||||
- Inserting data. Here should use `./benchmark_sample/rawdata_dir/ssb-dbgen/*.tbl` as input data.
|
||||
- Converting “star schema” to de-normalized “flat schema”
|
||||
|
||||
Set up database with with IAA Deflate codec
|
||||
Set up database with IAA Deflate codec
|
||||
|
||||
``` bash
|
||||
$ cd ./database_dir/deflate
|
||||
@ -104,7 +104,7 @@ $ [CLICKHOUSE_EXE] client
|
||||
```
|
||||
Complete three steps same as lz4 above
|
||||
|
||||
Set up database with with ZSTD codec
|
||||
Set up database with ZSTD codec
|
||||
|
||||
``` bash
|
||||
$ cd ./database_dir/zstd
|
||||
|
@ -13,7 +13,7 @@ ClickHouse utilizes third-party libraries for different purposes, e.g., to conne
|
||||
SELECT library_name, license_type, license_path FROM system.licenses ORDER BY library_name COLLATE 'en';
|
||||
```
|
||||
|
||||
(Note that the listed libraries are the ones located in the `contrib/` directory of the ClickHouse repository. Depending on the build options, some of of the libraries may have not been compiled, and as a result, their functionality may not be available at runtime.
|
||||
Note that the listed libraries are the ones located in the `contrib/` directory of the ClickHouse repository. Depending on the build options, some of the libraries may have not been compiled, and as a result, their functionality may not be available at runtime.
|
||||
|
||||
[Example](https://play.clickhouse.com/play?user=play#U0VMRUNUIGxpYnJhcnlfbmFtZSwgbGljZW5zZV90eXBlLCBsaWNlbnNlX3BhdGggRlJPTSBzeXN0ZW0ubGljZW5zZXMgT1JERVIgQlkgbGlicmFyeV9uYW1lIENPTExBVEUgJ2VuJw==)
|
||||
|
||||
|
@ -7,13 +7,13 @@ description: Prerequisites and an overview of how to build ClickHouse
|
||||
|
||||
# Getting Started Guide for Building ClickHouse
|
||||
|
||||
ClickHouse can be build on Linux, FreeBSD and macOS. If you use Windows, you can still build ClickHouse in a virtual machine running Linux, e.g. [VirtualBox](https://www.virtualbox.org/) with Ubuntu.
|
||||
ClickHouse can be built on Linux, FreeBSD and macOS. If you use Windows, you can still build ClickHouse in a virtual machine running Linux, e.g. [VirtualBox](https://www.virtualbox.org/) with Ubuntu.
|
||||
|
||||
ClickHouse requires a 64-bit system to compile and run, 32-bit systems do not work.
|
||||
|
||||
## Creating a Repository on GitHub {#creating-a-repository-on-github}
|
||||
|
||||
To start developing for ClickHouse you will need a [GitHub](https://www.virtualbox.org/) account. Please also generate a SSH key locally (if you don't have one already) and upload the public key to GitHub as this is a prerequisite for contributing patches.
|
||||
To start developing for ClickHouse you will need a [GitHub](https://www.virtualbox.org/) account. Please also generate an SSH key locally (if you don't have one already) and upload the public key to GitHub as this is a prerequisite for contributing patches.
|
||||
|
||||
Next, create a fork of the [ClickHouse repository](https://github.com/ClickHouse/ClickHouse/) in your personal account by clicking the "fork" button in the upper right corner.
|
||||
|
||||
@ -37,7 +37,7 @@ git clone git@github.com:your_github_username/ClickHouse.git # replace placehol
|
||||
cd ClickHouse
|
||||
```
|
||||
|
||||
This command creates a directory `ClickHouse/` containing the source code of ClickHouse. If you specify a custom checkout directory after the URL but it is important that this path does not contain whitespaces as it may lead to problems with the build later on.
|
||||
This command creates a directory `ClickHouse/` containing the source code of ClickHouse. If you specify a custom checkout directory after the URL, but it is important that this path does not contain whitespaces as it may lead to problems with the build later on.
|
||||
|
||||
The ClickHouse repository uses Git submodules, i.e. references to external repositories (usually 3rd party libraries used by ClickHouse). These are not checked out by default. To do so, you can either
|
||||
|
||||
@ -45,7 +45,7 @@ The ClickHouse repository uses Git submodules, i.e. references to external repos
|
||||
|
||||
- if `git clone` did not check out submodules, run `git submodule update --init --jobs <N>` (e.g. `<N> = 12` to parallelize the checkout) to achieve the same as the previous alternative, or
|
||||
|
||||
- if `git clone` did not check out submodules and you like to use [sparse](https://github.blog/2020-01-17-bring-your-monorepo-down-to-size-with-sparse-checkout/) and [shallow](https://github.blog/2020-12-21-get-up-to-speed-with-partial-clone-and-shallow-clone/) submodule checkout to omit unneeded files and history in submodules to save space (ca. 5 GB instead of ca. 15 GB), run `./contrib/update-submodules.sh`. Not really recommended as it generally makes working with submodules less convenient and slower.
|
||||
- if `git clone` did not check out submodules, and you like to use [sparse](https://github.blog/2020-01-17-bring-your-monorepo-down-to-size-with-sparse-checkout/) and [shallow](https://github.blog/2020-12-21-get-up-to-speed-with-partial-clone-and-shallow-clone/) submodule checkout to omit unneeded files and history in submodules to save space (ca. 5 GB instead of ca. 15 GB), run `./contrib/update-submodules.sh`. Not really recommended as it generally makes working with submodules less convenient and slower.
|
||||
|
||||
You can check the Git status with the command: `git submodule status`.
|
||||
|
||||
@ -143,7 +143,7 @@ When a large amount of RAM is available on build machine you should limit the nu
|
||||
|
||||
On machines with 4GB of RAM, it is recommended to specify 1, for 8GB of RAM `-j 2` is recommended.
|
||||
|
||||
If you get the message: `ninja: error: loading 'build.ninja': No such file or directory`, it means that generating a build configuration has failed and you need to inspect the message above.
|
||||
If you get the message: `ninja: error: loading 'build.ninja': No such file or directory`, it means that generating a build configuration has failed, and you need to inspect the message above.
|
||||
|
||||
Upon the successful start of the building process, you’ll see the build progress - the number of processed tasks and the total number of tasks.
|
||||
|
||||
@ -184,7 +184,7 @@ You can also run your custom-built ClickHouse binary with the config file from t
|
||||
|
||||
**CLion (recommended)**
|
||||
|
||||
If you do not know which IDE to use, we recommend that you use [CLion](https://www.jetbrains.com/clion/). CLion is commercial software but it offers a 30 day free trial. It is also free of charge for students. CLion can be used on both Linux and macOS.
|
||||
If you do not know which IDE to use, we recommend that you use [CLion](https://www.jetbrains.com/clion/). CLion is commercial software, but it offers a 30 day free trial. It is also free of charge for students. CLion can be used on both Linux and macOS.
|
||||
|
||||
A few things to know when using CLion to develop ClickHouse:
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
Nearest neighborhood search is the problem of finding the M closest points for a given point in an N-dimensional vector space. The most
|
||||
straightforward approach to solve this problem is a brute force search where the distance between all points in the vector space and the
|
||||
reference point is computed. This method guarantees perfect accuracy but it is usually too slow for practical applications. Thus, nearest
|
||||
reference point is computed. This method guarantees perfect accuracy, but it is usually too slow for practical applications. Thus, nearest
|
||||
neighborhood search problems are often solved with [approximative algorithms](https://github.com/erikbern/ann-benchmarks). Approximative
|
||||
nearest neighborhood search techniques, in conjunction with [embedding
|
||||
methods](https://cloud.google.com/architecture/overview-extracting-and-serving-feature-embeddings-for-machine-learning) allow to search huge
|
||||
@ -24,7 +24,7 @@ LIMIT N
|
||||
|
||||
`vectors` contains N-dimensional values of type [Array](../../../sql-reference/data-types/array.md) or
|
||||
[Tuple](../../../sql-reference/data-types/tuple.md), for example embeddings. Function `Distance` computes the distance between two vectors.
|
||||
Often, the the Euclidean (L2) distance is chosen as distance function but [other
|
||||
Often, the Euclidean (L2) distance is chosen as distance function but [other
|
||||
distance functions](/docs/en/sql-reference/functions/distance-functions.md) are also possible. `Point` is the reference point, e.g. `(0.17,
|
||||
0.33, ...)`, and `N` limits the number of search results.
|
||||
|
||||
@ -109,7 +109,7 @@ clickhouse-client --param_vec='hello' --query="SELECT * FROM table_with_ann_inde
|
||||
|
||||
**Restrictions**: Queries that contain both a `WHERE Distance(vectors, Point) < MaxDistance` and an `ORDER BY Distance(vectors, Point)`
|
||||
clause cannot use ANN indexes. Also, the approximate algorithms used to determine the nearest neighbors require a limit, hence queries
|
||||
without `LIMIT` clause cannot utilize ANN indexes. Also ANN indexes are only used if the query has a `LIMIT` value smaller than setting
|
||||
without `LIMIT` clause cannot utilize ANN indexes. Also, ANN indexes are only used if the query has a `LIMIT` value smaller than setting
|
||||
`max_limit_for_ann_queries` (default: 1 million rows). This is a safeguard to prevent large memory allocations by external libraries for
|
||||
approximate neighbor search.
|
||||
|
||||
@ -120,9 +120,9 @@ then each indexed block will contain 16384 rows. However, data structures and al
|
||||
provided by external libraries) are inherently row-oriented. They store a compact representation of a set of rows and also return rows for
|
||||
ANN queries. This causes some rather unintuitive differences in the way ANN indexes behave compared to normal skip indexes.
|
||||
|
||||
When a user defines a ANN index on a column, ClickHouse internally creates a ANN "sub-index" for each index block. The sub-index is "local"
|
||||
When a user defines an ANN index on a column, ClickHouse internally creates an ANN "sub-index" for each index block. The sub-index is "local"
|
||||
in the sense that it only knows about the rows of its containing index block. In the previous example and assuming that a column has 65536
|
||||
rows, we obtain four index blocks (spanning eight granules) and a ANN sub-index for each index block. A sub-index is theoretically able to
|
||||
rows, we obtain four index blocks (spanning eight granules) and an ANN sub-index for each index block. A sub-index is theoretically able to
|
||||
return the rows with the N closest points within its index block directly. However, since ClickHouse loads data from disk to memory at the
|
||||
granularity of granules, sub-indexes extrapolate matching rows to granule granularity. This is different from regular skip indexes which
|
||||
skip data at the granularity of index blocks.
|
||||
@ -231,7 +231,7 @@ The Annoy index currently does not work with per-table, non-default `index_granu
|
||||
|
||||
## USearch {#usearch}
|
||||
|
||||
This type of ANN index is based on the [the USearch library](https://github.com/unum-cloud/usearch), which implements the [HNSW
|
||||
This type of ANN index is based on the [USearch library](https://github.com/unum-cloud/usearch), which implements the [HNSW
|
||||
algorithm](https://arxiv.org/abs/1603.09320), i.e., builds a hierarchical graph where each point represents a vector and the edges represent
|
||||
similarity. Such hierarchical structures can be very efficient on large collections. They may often fetch 0.05% or less data from the
|
||||
overall dataset, while still providing 99% recall. This is especially useful when working with high-dimensional vectors,
|
||||
|
@ -125,7 +125,7 @@ For each resulting data part ClickHouse saves:
|
||||
3. The first “cancel” row, if there are more “cancel” rows than “state” rows.
|
||||
4. None of the rows, in all other cases.
|
||||
|
||||
Also when there are at least 2 more “state” rows than “cancel” rows, or at least 2 more “cancel” rows then “state” rows, the merge continues, but ClickHouse treats this situation as a logical error and records it in the server log. This error can occur if the same data were inserted more than once.
|
||||
Also, when there are at least 2 more “state” rows than “cancel” rows, or at least 2 more “cancel” rows then “state” rows, the merge continues, but ClickHouse treats this situation as a logical error and records it in the server log. This error can occur if the same data were inserted more than once.
|
||||
|
||||
Thus, collapsing should not change the results of calculating statistics.
|
||||
Changes gradually collapsed so that in the end only the last state of almost every object left.
|
||||
@ -196,7 +196,7 @@ What do we see and where is collapsing?
|
||||
|
||||
With two `INSERT` queries, we created 2 data parts. The `SELECT` query was performed in 2 threads, and we got a random order of rows. Collapsing not occurred because there was no merge of the data parts yet. ClickHouse merges data part in an unknown moment which we can not predict.
|
||||
|
||||
Thus we need aggregation:
|
||||
Thus, we need aggregation:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
|
@ -72,7 +72,7 @@ Specifying the `sharding_key` is necessary for the following:
|
||||
|
||||
#### fsync_directories
|
||||
|
||||
`fsync_directories` - do the `fsync` for directories. Guarantees that the OS refreshed directory metadata after operations related to background inserts on Distributed table (after insert, after sending the data to shard, etc).
|
||||
`fsync_directories` - do the `fsync` for directories. Guarantees that the OS refreshed directory metadata after operations related to background inserts on Distributed table (after insert, after sending the data to shard, etc.).
|
||||
|
||||
#### bytes_to_throw_insert
|
||||
|
||||
@ -220,7 +220,7 @@ Second, you can perform `INSERT` statements on a `Distributed` table. In this ca
|
||||
|
||||
Each shard can have a `<weight>` defined in the config file. By default, the weight is `1`. Data is distributed across shards in the amount proportional to the shard weight. All shard weights are summed up, then each shard's weight is divided by the total to determine each shard's proportion. For example, if there are two shards and the first has a weight of 1 while the second has a weight of 2, the first will be sent one third (1 / 3) of inserted rows and the second will be sent two thirds (2 / 3).
|
||||
|
||||
Each shard can have the `internal_replication` parameter defined in the config file. If this parameter is set to `true`, the write operation selects the first healthy replica and writes data to it. Use this if the tables underlying the `Distributed` table are replicated tables (e.g. any of the `Replicated*MergeTree` table engines). One of the table replicas will receive the write and it will be replicated to the other replicas automatically.
|
||||
Each shard can have the `internal_replication` parameter defined in the config file. If this parameter is set to `true`, the write operation selects the first healthy replica and writes data to it. Use this if the tables underlying the `Distributed` table are replicated tables (e.g. any of the `Replicated*MergeTree` table engines). One of the table replicas will receive the write, and it will be replicated to the other replicas automatically.
|
||||
|
||||
If `internal_replication` is set to `false` (the default), data is written to all replicas. In this case, the `Distributed` table replicates data itself. This is worse than using replicated tables because the consistency of replicas is not checked and, over time, they will contain slightly different data.
|
||||
|
||||
|
@ -12,7 +12,7 @@ The queries below were executed on a **Production** instance of [ClickHouse Clou
|
||||
:::
|
||||
|
||||
|
||||
1. Without inserting the data into ClickHouse, we can query it in place. Let's grab some rows so we can see what they look like:
|
||||
1. Without inserting the data into ClickHouse, we can query it in place. Let's grab some rows, so we can see what they look like:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
|
@ -29,7 +29,7 @@ Here is a preview of the dashboard created in this guide:
|
||||
|
||||
This dataset is from [OpenCelliD](https://www.opencellid.org/) - The world's largest Open Database of Cell Towers.
|
||||
|
||||
As of 2021, it contains more than 40 million records about cell towers (GSM, LTE, UMTS, etc.) around the world with their geographical coordinates and metadata (country code, network, etc).
|
||||
As of 2021, it contains more than 40 million records about cell towers (GSM, LTE, UMTS, etc.) around the world with their geographical coordinates and metadata (country code, network, etc.).
|
||||
|
||||
OpenCelliD Project is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License, and we redistribute a snapshot of this dataset under the terms of the same license. The up-to-date version of the dataset is available to download after sign in.
|
||||
|
||||
@ -355,7 +355,7 @@ Click on **UPDATE CHART** to render the visualization.
|
||||
|
||||
### Add the charts to a **dashboard**
|
||||
|
||||
This screenshot shows cell tower locations with LTE, UMTS, and GSM radios. The charts are all created in the same way and they are added to a dashboard.
|
||||
This screenshot shows cell tower locations with LTE, UMTS, and GSM radios. The charts are all created in the same way, and they are added to a dashboard.
|
||||
|
||||
![Dashboard of cell towers by radio type in mcc 204](@site/docs/en/getting-started/example-datasets/images/superset-cell-tower-dashboard.png)
|
||||
|
||||
|
@ -132,7 +132,7 @@ FROM covid19;
|
||||
└────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
7. You will notice the data has a lot of 0's for dates - either weekends or days where numbers were not reported each day. We can use a window function to smooth out the daily averages of new cases:
|
||||
7. You will notice the data has a lot of 0's for dates - either weekends or days when numbers were not reported each day. We can use a window function to smooth out the daily averages of new cases:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
@ -262,4 +262,4 @@ The results look like
|
||||
|
||||
:::note
|
||||
As mentioned in the [GitHub repo](https://github.com/GoogleCloudPlatform/covid-19-open-data), the dataset is no longer updated as of September 15, 2022.
|
||||
:::
|
||||
:::
|
||||
|
@ -243,7 +243,7 @@ If no database is specified, the `default` database will be used.
|
||||
|
||||
If the user name, password or database was specified in the connection string, it cannot be specified using `--user`, `--password` or `--database` (and vice versa).
|
||||
|
||||
The host component can either be an a host name and IP address. Put an IPv6 address in square brackets to specify it:
|
||||
The host component can either be a host name and IP address. Put an IPv6 address in square brackets to specify it:
|
||||
|
||||
```text
|
||||
clickhouse://[2001:db8::1234]
|
||||
|
@ -33,7 +33,7 @@ The supported formats are:
|
||||
| [JSONAsString](#jsonasstring) | ✔ | ✗ |
|
||||
| [JSONStrings](#jsonstrings) | ✔ | ✔ |
|
||||
| [JSONColumns](#jsoncolumns) | ✔ | ✔ |
|
||||
| [JSONColumnsWithMetadata](#jsoncolumnsmonoblock)) | ✔ | ✔ |
|
||||
| [JSONColumnsWithMetadata](#jsoncolumnsmonoblock) | ✔ | ✔ |
|
||||
| [JSONCompact](#jsoncompact) | ✔ | ✔ |
|
||||
| [JSONCompactStrings](#jsoncompactstrings) | ✗ | ✔ |
|
||||
| [JSONCompactColumns](#jsoncompactcolumns) | ✔ | ✔ |
|
||||
|
@ -13,7 +13,7 @@ can control it.
|
||||
|
||||
Schema inference is used when ClickHouse needs to read the data in a specific data format and the structure is unknown.
|
||||
|
||||
## Table functions [file](../sql-reference/table-functions/file.md), [s3](../sql-reference/table-functions/s3.md), [url](../sql-reference/table-functions/url.md), [hdfs](../sql-reference/table-functions/hdfs.md).
|
||||
## Table functions [file](../sql-reference/table-functions/file.md), [s3](../sql-reference/table-functions/s3.md), [url](../sql-reference/table-functions/url.md), [hdfs](../sql-reference/table-functions/hdfs.md), [azureBlobStorage](../sql-reference/table-functions/azureBlobStorage.md).
|
||||
|
||||
These table functions have the optional argument `structure` with the structure of input data. If this argument is not specified or set to `auto`, the structure will be inferred from the data.
|
||||
|
||||
@ -55,7 +55,7 @@ DESCRIBE file('hobbies.jsonl')
|
||||
└─────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
## Table engines [File](../engines/table-engines/special/file.md), [S3](../engines/table-engines/integrations/s3.md), [URL](../engines/table-engines/special/url.md), [HDFS](../engines/table-engines/integrations/hdfs.md)
|
||||
## Table engines [File](../engines/table-engines/special/file.md), [S3](../engines/table-engines/integrations/s3.md), [URL](../engines/table-engines/special/url.md), [HDFS](../engines/table-engines/integrations/hdfs.md), [azureBlobStorage](../engines/table-engines/integrations/azureBlobStorage.md)
|
||||
|
||||
If the list of columns is not specified in `CREATE TABLE` query, the structure of the table will be inferred automatically from the data.
|
||||
|
||||
@ -1061,7 +1061,7 @@ $$)
|
||||
└──────────────┴───────────────┘
|
||||
```
|
||||
|
||||
## Values {#values}
|
||||
### Values {#values}
|
||||
|
||||
In Values format ClickHouse extracts column value from the row and then parses it using
|
||||
the recursive parser similar to how literals are parsed.
|
||||
@ -1986,3 +1986,46 @@ Note:
|
||||
- As some of the files may not contain some columns from the resulting schema, union mode is supported only for formats that support reading subset of columns (like JSONEachRow, Parquet, TSVWithNames, etc) and won't work for other formats (like CSV, TSV, JSONCompactEachRow, etc).
|
||||
- If ClickHouse cannot infer the schema from one of the files, the exception will be thrown.
|
||||
- If you have a lot of files, reading schema from all of them can take a lot of time.
|
||||
|
||||
|
||||
## Automatic format detection {#automatic-format-detection}
|
||||
|
||||
If data format is not specified and cannot be determined by the file extension, ClickHouse will try to detect the file format by its content.
|
||||
|
||||
**Examples:**
|
||||
|
||||
Let's say we have `data` with the following content:
|
||||
```
|
||||
"a","b"
|
||||
1,"Data1"
|
||||
2,"Data2"
|
||||
3,"Data3"
|
||||
```
|
||||
|
||||
We can inspect and query this file without specifying format or structure:
|
||||
```sql
|
||||
:) desc file(data);
|
||||
```
|
||||
|
||||
```text
|
||||
┌─name─┬─type─────────────┐
|
||||
│ a │ Nullable(Int64) │
|
||||
│ b │ Nullable(String) │
|
||||
└──────┴──────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
:) select * from file(data);
|
||||
```
|
||||
|
||||
```text
|
||||
┌─a─┬─b─────┐
|
||||
│ 1 │ Data1 │
|
||||
│ 2 │ Data2 │
|
||||
│ 3 │ Data3 │
|
||||
└───┴───────┘
|
||||
```
|
||||
|
||||
:::note
|
||||
ClickHouse can detect only some subset of formats and this detection takes some time, it's always better to specify the format explicitly.
|
||||
:::
|
@ -6,7 +6,7 @@ sidebar_label: Configuration Files
|
||||
|
||||
# Configuration Files
|
||||
|
||||
The ClickHouse server can be configured with configuration files in XML or YAML syntax. In most installation types, the ClickHouse server runs with `/etc/clickhouse-server/config.xml` as default configuration file but it is also possible to specify the location of the configuration file manually at server startup using command line option `--config-file=` or `-C`. Additional configuration files may be placed into directory `config.d/` relative to the main configuration file, for example into directory `/etc/clickhouse-server/config.d/`. Files in this directory and the main configuration are merged in a preprocessing step before the configuration is applied in ClickHouse server. Configuration files are merged in alphabetical order. To simplify updates and improve modularization, it is best practice to keep the default `config.xml` file unmodified and place additional customization into `config.d/`.
|
||||
The ClickHouse server can be configured with configuration files in XML or YAML syntax. In most installation types, the ClickHouse server runs with `/etc/clickhouse-server/config.xml` as default configuration file, but it is also possible to specify the location of the configuration file manually at server startup using command line option `--config-file=` or `-C`. Additional configuration files may be placed into directory `config.d/` relative to the main configuration file, for example into directory `/etc/clickhouse-server/config.d/`. Files in this directory and the main configuration are merged in a preprocessing step before the configuration is applied in ClickHouse server. Configuration files are merged in alphabetical order. To simplify updates and improve modularization, it is best practice to keep the default `config.xml` file unmodified and place additional customization into `config.d/`.
|
||||
|
||||
It is possible to mix XML and YAML configuration files, for example you could have a main configuration file `config.xml` and additional configuration files `config.d/network.xml`, `config.d/timezone.yaml` and `config.d/keeper.yaml`. Mixing XML and YAML within a single configuration file is not supported. XML configuration files should use `<clickhouse>...</clickhouse>` as top-level tag. In YAML configuration files, `clickhouse:` is optional, the parser inserts it implicitly if absent.
|
||||
|
||||
@ -36,7 +36,7 @@ which is equal to
|
||||
<clickhouse>
|
||||
<profiles>
|
||||
<default>
|
||||
<max_query_size/>150000</max_query_size>
|
||||
<max_query_size>150000</max_query_size>
|
||||
</default>
|
||||
</profiles>
|
||||
</clickhouse>
|
||||
@ -63,7 +63,7 @@ XML substitution example:
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
Substitutions can also be performed from ZooKeeper. To do this, specify the attribute `from_zk = "/path/to/node"`. The element value is replaced with the contents of the node at `/path/to/node` in ZooKeeper. You can also put an entire XML subtree on the ZooKeeper node and it will be fully inserted into the source element.
|
||||
Substitutions can also be performed from ZooKeeper. To do this, specify the attribute `from_zk = "/path/to/node"`. The element value is replaced with the contents of the node at `/path/to/node` in ZooKeeper. You can also put an entire XML subtree on the ZooKeeper node, and it will be fully inserted into the source element.
|
||||
|
||||
## Encrypting and Hiding Configuration {#encryption}
|
||||
|
||||
|
@ -1775,6 +1775,10 @@ Default value: 0 (no restriction).
|
||||
|
||||
## insert_quorum {#insert_quorum}
|
||||
|
||||
:::note
|
||||
`insert_quorum` does not apply when using the [`SharedMergeTree` table engine](/en/cloud/reference/shared-merge-tree) in ClickHouse Cloud as all inserts are quorum inserted.
|
||||
:::
|
||||
|
||||
Enables the quorum writes.
|
||||
|
||||
- If `insert_quorum < 2`, the quorum writes are disabled.
|
||||
@ -1814,6 +1818,10 @@ See also:
|
||||
|
||||
## insert_quorum_parallel {#insert_quorum_parallel}
|
||||
|
||||
:::note
|
||||
`insert_quorum_parallel` does not apply when using the [`SharedMergeTree` table engine](/en/cloud/reference/shared-merge-tree) in ClickHouse Cloud as all inserts are quorum inserted.
|
||||
:::
|
||||
|
||||
Enables or disables parallelism for quorum `INSERT` queries. If enabled, additional `INSERT` queries can be sent while previous queries have not yet finished. If disabled, additional writes to the same table will be rejected.
|
||||
|
||||
Possible values:
|
||||
|
@ -49,6 +49,6 @@ Every job has a pool associated with it and is started in this pool. Each pool h
|
||||
|
||||
Time instants during job lifetime:
|
||||
- `schedule_time` (`DateTime64`) - Time when job was created and scheduled to be executed (usually with all its dependencies).
|
||||
- `enqueue_time` (`Nullable(DateTime64)`) - Time when job became ready and was enqueued into a ready queue of it's pool. Null if the job is not ready yet.
|
||||
- `enqueue_time` (`Nullable(DateTime64)`) - Time when job became ready and was enqueued into a ready queue of its pool. Null if the job is not ready yet.
|
||||
- `start_time` (`Nullable(DateTime64)`) - Time when worker dequeues the job from ready queue and start its execution. Null if the job is not started yet.
|
||||
- `finish_time` (`Nullable(DateTime64)`) - Time when job execution is finished. Null if the job is not finished yet.
|
||||
|
@ -297,11 +297,11 @@ Total number of databases on the server.
|
||||
|
||||
### NumberOfDetachedByUserParts
|
||||
|
||||
The total number of parts detached from MergeTree tables by users with the `ALTER TABLE DETACH` query (as opposed to unexpected, broken or ignored parts). The server does not care about detached parts and they can be removed.
|
||||
The total number of parts detached from MergeTree tables by users with the `ALTER TABLE DETACH` query (as opposed to unexpected, broken or ignored parts). The server does not care about detached parts, and they can be removed.
|
||||
|
||||
### NumberOfDetachedParts
|
||||
|
||||
The total number of parts detached from MergeTree tables. A part can be detached by a user with the `ALTER TABLE DETACH` query or by the server itself it the part is broken, unexpected or unneeded. The server does not care about detached parts and they can be removed.
|
||||
The total number of parts detached from MergeTree tables. A part can be detached by a user with the `ALTER TABLE DETACH` query or by the server itself it the part is broken, unexpected or unneeded. The server does not care about detached parts, and they can be removed.
|
||||
|
||||
### NumberOfTables
|
||||
|
||||
@ -393,7 +393,7 @@ The amount of free memory plus OS page cache memory on the host system, in bytes
|
||||
|
||||
### OSMemoryFreeWithoutCached
|
||||
|
||||
The amount of free memory on the host system, in bytes. This does not include the memory used by the OS page cache memory, in bytes. The page cache memory is also available for usage by programs, so the value of this metric can be confusing. See the `OSMemoryAvailable` metric instead. For convenience we also provide the `OSMemoryFreePlusCached` metric, that should be somewhat similar to OSMemoryAvailable. See also https://www.linuxatemyram.com/. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server.
|
||||
The amount of free memory on the host system, in bytes. This does not include the memory used by the OS page cache memory, in bytes. The page cache memory is also available for usage by programs, so the value of this metric can be confusing. See the `OSMemoryAvailable` metric instead. For convenience, we also provide the `OSMemoryFreePlusCached` metric, that should be somewhat similar to OSMemoryAvailable. See also https://www.linuxatemyram.com/. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server.
|
||||
|
||||
### OSMemoryTotal
|
||||
|
||||
@ -493,7 +493,7 @@ Number of threads in the server of the PostgreSQL compatibility protocol.
|
||||
|
||||
### QueryCacheBytes
|
||||
|
||||
Total size of the query cache cache in bytes.
|
||||
Total size of the query cache in bytes.
|
||||
|
||||
### QueryCacheEntries
|
||||
|
||||
@ -549,7 +549,7 @@ Total amount of bytes (compressed, including data and indices) stored in all tab
|
||||
|
||||
### TotalPartsOfMergeTreeTables
|
||||
|
||||
Total amount of data parts in all tables of MergeTree family. Numbers larger than 10 000 will negatively affect the server startup time and it may indicate unreasonable choice of the partition key.
|
||||
Total amount of data parts in all tables of MergeTree family. Numbers larger than 10 000 will negatively affect the server startup time, and it may indicate unreasonable choice of the partition key.
|
||||
|
||||
### TotalPrimaryKeyBytesInMemory
|
||||
|
||||
|
@ -19,7 +19,7 @@ Columns:
|
||||
- `default_database` ([String](../../sql-reference/data-types/string.md)) — The default database name.
|
||||
- `errors_count` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of times this host failed to reach replica.
|
||||
- `slowdowns_count` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of slowdowns that led to changing replica when establishing a connection with hedged requests.
|
||||
- `estimated_recovery_time` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Seconds remaining until the replica error count is zeroed and it is considered to be back to normal.
|
||||
- `estimated_recovery_time` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Seconds remaining until the replica error count is zeroed, and it is considered to be back to normal.
|
||||
- `database_shard_name` ([String](../../sql-reference/data-types/string.md)) — The name of the `Replicated` database shard (for clusters that belong to a `Replicated` database).
|
||||
- `database_replica_name` ([String](../../sql-reference/data-types/string.md)) — The name of the `Replicated` database replica (for clusters that belong to a `Replicated` database).
|
||||
- `is_active` ([Nullable(UInt8)](../../sql-reference/data-types/int-uint.md)) — The status of the `Replicated` database replica (for clusters that belong to a `Replicated` database): 1 means "replica is online", 0 means "replica is offline", `NULL` means "unknown".
|
||||
|
@ -18,7 +18,7 @@ Columns:
|
||||
- `LOADED_AND_RELOADING` — Dictionary is loaded successfully, and is being reloaded right now (frequent reasons: [SYSTEM RELOAD DICTIONARY](../../sql-reference/statements/system.md#query_language-system-reload-dictionary) query, timeout, dictionary config has changed).
|
||||
- `FAILED_AND_RELOADING` — Could not load the dictionary as a result of an error and is loading now.
|
||||
- `origin` ([String](../../sql-reference/data-types/string.md)) — Path to the configuration file that describes the dictionary.
|
||||
- `type` ([String](../../sql-reference/data-types/string.md)) — Type of a dictionary allocation. [Storing Dictionaries in Memory](../../sql-reference/dictionaries/index.md#storig-dictionaries-in-memory).
|
||||
- `type` ([String](../../sql-reference/data-types/string.md)) — Type of dictionary allocation. [Storing Dictionaries in Memory](../../sql-reference/dictionaries/index.md#storig-dictionaries-in-memory).
|
||||
- `key.names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Array of [key names](../../sql-reference/dictionaries/index.md#dictionary-key-and-fields#ext_dict_structure-key) provided by the dictionary.
|
||||
- `key.types` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Corresponding array of [key types](../../sql-reference/dictionaries/index.md#dictionary-key-and-fields#ext_dict_structure-key) provided by the dictionary.
|
||||
- `attribute.names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Array of [attribute names](../../sql-reference/dictionaries/index.md#dictionary-key-and-fields#ext_dict_structure-attributes) provided by the dictionary.
|
||||
|
@ -27,6 +27,8 @@ Columns:
|
||||
|
||||
- `metadata_modification_time` ([DateTime](../../sql-reference/data-types/datetime.md)) - Time of latest modification of the table metadata.
|
||||
|
||||
- `metadata_version` ([Int32](../../sql-reference/data-types/int-uint.md)) - Metadata version for ReplicatedMergeTree table, 0 for non ReplicatedMergeTree table.
|
||||
|
||||
- `dependencies_database` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) - Database dependencies.
|
||||
|
||||
- `dependencies_table` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) - Table dependencies ([materialized views](../../sql-reference/statements/create/view.md#materialized-view) the current table).
|
||||
|
@ -34,7 +34,7 @@ The binary you just downloaded can run all sorts of ClickHouse tools and utiliti
|
||||
|
||||
A common use of `clickhouse-local` is to run ad-hoc queries on files: where you don't have to insert the data into a table. `clickhouse-local` can stream the data from a file into a temporary table and execute your SQL.
|
||||
|
||||
If the file is sitting on the same machine as `clickhouse-local`, you can simple specify the file to load. The following `reviews.tsv` file contains a sampling of Amazon product reviews:
|
||||
If the file is sitting on the same machine as `clickhouse-local`, you can simply specify the file to load. The following `reviews.tsv` file contains a sampling of Amazon product reviews:
|
||||
|
||||
```bash
|
||||
./clickhouse local -q "SELECT * FROM 'reviews.tsv'"
|
||||
@ -220,7 +220,7 @@ Arguments:
|
||||
- `--help` — arguments references for `clickhouse-local`.
|
||||
- `-V`, `--version` — print version information and exit.
|
||||
|
||||
Also there are arguments for each ClickHouse configuration variable which are more commonly used instead of `--config-file`.
|
||||
Also, there are arguments for each ClickHouse configuration variable which are more commonly used instead of `--config-file`.
|
||||
|
||||
|
||||
## Examples {#examples}
|
||||
|
@ -38,7 +38,7 @@ For example, you have a column `IsMobile` in your table with values 0 and 1. In
|
||||
|
||||
So, the user will be able to count the exact ratio of mobile traffic.
|
||||
|
||||
Let's give another example. When you have some private data in your table, like user email and you don't want to publish any single email address.
|
||||
Let's give another example. When you have some private data in your table, like user email, and you don't want to publish any single email address.
|
||||
If your table is large enough and contains multiple different emails and no email has a very high frequency than all others, it will anonymize all data. But if you have a small number of different values in a column, it can reproduce some of them.
|
||||
You should look at the working algorithm of this tool works, and fine-tune its command line parameters.
|
||||
|
||||
|
@ -9,7 +9,7 @@ Selects the first encountered value of a column.
|
||||
|
||||
By default, it ignores NULL values and returns the first NOT NULL value found in the column. As [`first_value`](../../../sql-reference/aggregate-functions/reference/first_value.md) if supports `RESPECT NULLS`, in which case it will select the first value passed, independently on whether it's NULL or not.
|
||||
|
||||
The return type of the function is the same as the input, except for LowCardinality which is discarded). This means that given no rows as input it will return the default value of that type (0 for integers, or Null for a Nullable() column). You might use the `-OrNull` [combinator](../../../sql-reference/aggregate-functions/combinators.md) ) to modify this behaviour.
|
||||
The return type of the function is the same as the input, except for LowCardinality which is discarded. This means that given no rows as input it will return the default value of that type (0 for integers, or Null for a Nullable() column). You might use the `-OrNull` [combinator](../../../sql-reference/aggregate-functions/combinators.md) ) to modify this behaviour.
|
||||
|
||||
The query can be executed in any order and even in a different order each time, so the result of this function is indeterminate.
|
||||
To get a determinate result, you can use the ‘min’ or ‘max’ function instead of ‘any’.
|
||||
|
@ -20,7 +20,7 @@ contingency(column1, column2)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- a value between 0 to 1. The larger the result, the closer the association of the two columns.
|
||||
- a value between 0 and 1. The larger the result, the closer the association of the two columns.
|
||||
|
||||
**Return type** is always [Float64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
@ -48,4 +48,4 @@ Result:
|
||||
┌──────cramersV(a, b)─┬───contingency(a, b)─┐
|
||||
│ 0.41171788506213564 │ 0.05812725261759165 │
|
||||
└─────────────────────┴─────────────────────┘
|
||||
```
|
||||
```
|
||||
|
@ -9,7 +9,7 @@ sidebar_label: DateTime64
|
||||
Allows to store an instant in time, that can be expressed as a calendar date and a time of a day, with defined sub-second precision
|
||||
|
||||
Tick size (precision): 10<sup>-precision</sup> seconds. Valid range: [ 0 : 9 ].
|
||||
Typically are used - 3 (milliseconds), 6 (microseconds), 9 (nanoseconds).
|
||||
Typically, are used - 3 (milliseconds), 6 (microseconds), 9 (nanoseconds).
|
||||
|
||||
**Syntax:**
|
||||
|
||||
|
@ -10,7 +10,7 @@ Signed fixed-point numbers that keep precision during add, subtract and multiply
|
||||
|
||||
## Parameters
|
||||
|
||||
- P - precision. Valid range: \[ 1 : 76 \]. Determines how many decimal digits number can have (including fraction). By default the precision is 10.
|
||||
- P - precision. Valid range: \[ 1 : 76 \]. Determines how many decimal digits number can have (including fraction). By default, the precision is 10.
|
||||
- S - scale. Valid range: \[ 0 : P \]. Determines how many decimal digits fraction can have.
|
||||
|
||||
Decimal(P) is equivalent to Decimal(P, 0). Similarly, the syntax Decimal is equivalent to Decimal(10, 0).
|
||||
|
@ -6,7 +6,7 @@ sidebar_label: Distributed DDL
|
||||
|
||||
# Distributed DDL Queries (ON CLUSTER Clause)
|
||||
|
||||
By default the `CREATE`, `DROP`, `ALTER`, and `RENAME` queries affect only the current server where they are executed. In a cluster setup, it is possible to run such queries in a distributed manner with the `ON CLUSTER` clause.
|
||||
By default, the `CREATE`, `DROP`, `ALTER`, and `RENAME` queries affect only the current server where they are executed. In a cluster setup, it is possible to run such queries in a distributed manner with the `ON CLUSTER` clause.
|
||||
|
||||
For example, the following query creates the `all_hits` `Distributed` table on each host in `cluster`:
|
||||
|
||||
|
@ -372,7 +372,7 @@ Result:
|
||||
|
||||
## bitmapAnd
|
||||
|
||||
Computes the logical conjunction of two two bitmaps.
|
||||
Computes the logical conjunction of two bitmaps.
|
||||
|
||||
**Syntax**
|
||||
|
||||
|
@ -1564,7 +1564,7 @@ Alias: `TO_DAYS`
|
||||
**Arguments**
|
||||
|
||||
- `date` — The date to calculate the number of days passed since year zero from. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
- `time_zone` — A String type const value or a expression represent the time zone. [String types](../../sql-reference/data-types/string.md)
|
||||
- `time_zone` — A String type const value or an expression represent the time zone. [String types](../../sql-reference/data-types/string.md)
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -2218,7 +2218,7 @@ now64([scale], [timezone])
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `scale` - Tick size (precision): 10<sup>-precision</sup> seconds. Valid range: [ 0 : 9 ]. Typically are used - 3 (default) (milliseconds), 6 (microseconds), 9 (nanoseconds).
|
||||
- `scale` - Tick size (precision): 10<sup>-precision</sup> seconds. Valid range: [ 0 : 9 ]. Typically, are used - 3 (default) (milliseconds), 6 (microseconds), 9 (nanoseconds).
|
||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) for the returned value (optional). [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
@ -2305,7 +2305,7 @@ Rounds the time to the half hour.
|
||||
|
||||
Converts a date or date with time to a UInt32 number containing the year and month number (YYYY \* 100 + MM). Accepts a second optional timezone argument. If provided, the timezone must be a string constant.
|
||||
|
||||
This functions is the opposite of function `YYYYMMDDToDate()`.
|
||||
This function is the opposite of function `YYYYMMDDToDate()`.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -2362,7 +2362,7 @@ Result:
|
||||
|
||||
Converts a number containing the year, month and day number to a [Date](../../sql-reference/data-types/date.md).
|
||||
|
||||
This functions is the opposite of function `toYYYYMMDD()`.
|
||||
This function is the opposite of function `toYYYYMMDD()`.
|
||||
|
||||
The output is undefined if the input does not encode a valid Date value.
|
||||
|
||||
@ -2406,7 +2406,7 @@ Converts a number containing the year, month, day, hours, minute and second numb
|
||||
|
||||
The output is undefined if the input does not encode a valid DateTime value.
|
||||
|
||||
This functions is the opposite of function `toYYYYMMDDhhmmss()`.
|
||||
This function is the opposite of function `toYYYYMMDDhhmmss()`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -2981,8 +2981,8 @@ toUTCTimestamp(time_val, time_zone)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `time_val` — A DateTime/DateTime64 type const value or a expression . [DateTime/DateTime64 types](../../sql-reference/data-types/datetime.md)
|
||||
- `time_zone` — A String type const value or a expression represent the time zone. [String types](../../sql-reference/data-types/string.md)
|
||||
- `time_val` — A DateTime/DateTime64 type const value or an expression . [DateTime/DateTime64 types](../../sql-reference/data-types/datetime.md)
|
||||
- `time_zone` — A String type const value or an expression represent the time zone. [String types](../../sql-reference/data-types/string.md)
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -3014,8 +3014,8 @@ fromUTCTimestamp(time_val, time_zone)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `time_val` — A DateTime/DateTime64 type const value or a expression . [DateTime/DateTime64 types](../../sql-reference/data-types/datetime.md)
|
||||
- `time_zone` — A String type const value or a expression represent the time zone. [String types](../../sql-reference/data-types/string.md)
|
||||
- `time_val` — A DateTime/DateTime64 type const value or an expression . [DateTime/DateTime64 types](../../sql-reference/data-types/datetime.md)
|
||||
- `time_zone` — A String type const value or an expression represent the time zone. [String types](../../sql-reference/data-types/string.md)
|
||||
|
||||
**Returned value**
|
||||
|
||||
|
@ -4,6 +4,8 @@ sidebar_position: 170
|
||||
sidebar_label: Strings
|
||||
---
|
||||
|
||||
import VersionBadge from '@theme/badges/VersionBadge';
|
||||
|
||||
# Functions for Working with Strings
|
||||
|
||||
Functions for [searching](string-search-functions.md) in strings and for [replacing](string-replace-functions.md) in strings are described separately.
|
||||
@ -783,6 +785,8 @@ SELECT startsWith('Spider-Man', 'Spi');
|
||||
|
||||
## startsWithUTF8
|
||||
|
||||
<VersionBadge minVersion='23.8' />
|
||||
|
||||
Returns whether string `str` starts with `prefix`, the difference between `startsWithUTF8` and `startsWith` is that `startsWithUTF8` match `str` and `suffix` by UTF-8 characters.
|
||||
|
||||
|
||||
|
@ -10,7 +10,7 @@ sidebar_label: APPLY DELETED MASK
|
||||
ALTER TABLE [db].name [ON CLUSTER cluster] APPLY DELETED MASK [IN PARTITION partition_id]
|
||||
```
|
||||
|
||||
The command applies mask created by [lightweight delete](/docs/en/sql-reference/statements/delete) and forcefully removes rows marked as deleted from disk. This command is a heavyweight mutation and it semantically equals to query ```ALTER TABLE [db].name DELETE WHERE _row_exists = 0```.
|
||||
The command applies mask created by [lightweight delete](/docs/en/sql-reference/statements/delete) and forcefully removes rows marked as deleted from disk. This command is a heavyweight mutation, and it semantically equals to query ```ALTER TABLE [db].name DELETE WHERE _row_exists = 0```.
|
||||
|
||||
:::note
|
||||
It only works for tables in the [`MergeTree`](../../../engines/table-engines/mergetree-family/mergetree.md) family (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) tables).
|
||||
|
@ -15,7 +15,7 @@ ALTER TABLE [db].name [ON CLUSTER cluster] DROP CONSTRAINT constraint_name;
|
||||
|
||||
See more on [constraints](../../../sql-reference/statements/create/table.md#constraints).
|
||||
|
||||
Queries will add or remove metadata about constraints from table so they are processed immediately.
|
||||
Queries will add or remove metadata about constraints from table, so they are processed immediately.
|
||||
|
||||
:::tip
|
||||
Constraint check **will not be executed** on existing data if it was added.
|
||||
|
@ -16,13 +16,13 @@ DETACH TABLE|VIEW|DICTIONARY|DATABASE [IF EXISTS] [db.]name [ON CLUSTER cluster]
|
||||
Detaching does not delete the data or metadata of a table, a materialized view, a dictionary or a database. If an entity was not detached `PERMANENTLY`, on the next server launch the server will read the metadata and recall the table/view/dictionary/database again. If an entity was detached `PERMANENTLY`, there will be no automatic recall.
|
||||
|
||||
Whether a table, a dictionary or a database was detached permanently or not, in both cases you can reattach them using the [ATTACH](../../sql-reference/statements/attach.md) query.
|
||||
System log tables can be also attached back (e.g. `query_log`, `text_log`, etc). Other system tables can't be reattached. On the next server launch the server will recall those tables again.
|
||||
System log tables can be also attached back (e.g. `query_log`, `text_log`, etc.). Other system tables can't be reattached. On the next server launch the server will recall those tables again.
|
||||
|
||||
`ATTACH MATERIALIZED VIEW` does not work with short syntax (without `SELECT`), but you can attach it using the `ATTACH TABLE` query.
|
||||
|
||||
Note that you can not detach permanently the table which is already detached (temporary). But you can attach it back and then detach permanently again.
|
||||
|
||||
Also you can not [DROP](../../sql-reference/statements/drop.md#drop-table) the detached table, or [CREATE TABLE](../../sql-reference/statements/create/table.md) with the same name as detached permanently, or replace it with the other table with [RENAME TABLE](../../sql-reference/statements/rename.md) query.
|
||||
Also, you can not [DROP](../../sql-reference/statements/drop.md#drop-table) the detached table, or [CREATE TABLE](../../sql-reference/statements/create/table.md) with the same name as detached permanently, or replace it with the other table with [RENAME TABLE](../../sql-reference/statements/rename.md) query.
|
||||
|
||||
The `SYNC` modifier executes the action without delay.
|
||||
|
||||
|
@ -204,6 +204,20 @@ Result:
|
||||
└─────┴───────────────────────┘
|
||||
```
|
||||
|
||||
## Inserts into ClickHouse Cloud
|
||||
|
||||
By default, services on ClickHouse Cloud provide multiple replicas for high availability. When you connect to a service, a connection is established to one of these replicas.
|
||||
|
||||
After an `INSERT` succeeds, data is written to the underlying storage. However, it may take some time for replicas to receive these updates. Therefore, if you use a different connection that executes a `SELECT` query on one of these other replicas, the updated data may not yet be reflected.
|
||||
|
||||
It is possible to use the `select_sequential_consistency` to force the replica to receive the latest updates. Here is an example of a SELECT query using this setting:
|
||||
|
||||
```sql
|
||||
SELECT .... SETTINGS select_sequential_consistency = 1;
|
||||
```
|
||||
|
||||
Note that using `select_sequential_consistency` will increase the load on ClickHouse Keeper (used by ClickHouse Cloud internally) and may result in slower performance depending on the load on the service. We recommend against enabling this setting unless necessary. The recommended approach is to execute read/writes in the same session or to use a client driver that uses the native protocol (and thus supports sticky connections).
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
`INSERT` sorts the input data by primary key and splits them into partitions by a partition key. If you insert data into several partitions at once, it can significantly reduce the performance of the `INSERT` query. To avoid this:
|
||||
|
@ -5,7 +5,7 @@ sidebar_label: DISTINCT
|
||||
|
||||
# DISTINCT Clause
|
||||
|
||||
If `SELECT DISTINCT` is specified, only unique rows will remain in a query result. Thus only a single row will remain out of all the sets of fully matching rows in the result.
|
||||
If `SELECT DISTINCT` is specified, only unique rows will remain in a query result. Thus, only a single row will remain out of all the sets of fully matching rows in the result.
|
||||
|
||||
You can specify the list of columns that must have unique values: `SELECT DISTINCT ON (column1, column2,...)`. If the columns are not specified, all of them are taken into consideration.
|
||||
|
||||
|
@ -63,7 +63,7 @@ ClickHouse — полноценная столбцовая СУБД. Данны
|
||||
|
||||
Для байт-ориентированного ввода-вывода существуют абстрактные классы `ReadBuffer` и `WriteBuffer`. Они используются вместо `iostream`. Не волнуйтесь: каждый зрелый проект C++ использует что-то другое вместо `iostream` по уважительным причинам.
|
||||
|
||||
`ReadBuffer` и `WriteBuffer` — это просто непрерывный буфер и курсор, указывающий на позицию в этом буфере. Реализации могут как владеть так и не владеть памятью буфера. Существует виртуальный метод заполнения буфера следующими данными (для `ReadBuffer`) или сброса буфера куда-нибудь (например `WriteBuffer`). Виртуальные методы редко вызываются.
|
||||
`ReadBuffer` и `WriteBuffer` — это просто непрерывный буфер и курсор, указывающий на позицию в этом буфере. Реализации могут как владеть, так и не владеть памятью буфера. Существует виртуальный метод заполнения буфера следующими данными (для `ReadBuffer`) или сброса буфера куда-нибудь (например `WriteBuffer`). Виртуальные методы редко вызываются.
|
||||
|
||||
Реализации `ReadBuffer`/`WriteBuffer` используются для работы с файлами и файловыми дескрипторами, а также сетевыми сокетами, для реализации сжатия (`CompressedWriteBuffer` инициализируется вместе с другим `WriteBuffer` и осуществляет сжатие данных перед записью в него), и для других целей – названия `ConcatReadBuffer`, `LimitReadBuffer`, и `HashingWriteBuffer` говорят сами за себя.
|
||||
|
||||
|
@ -71,7 +71,7 @@ ClickHouse не работает и не собирается на 32-битны
|
||||
Please make sure you have the correct access rights
|
||||
and the repository exists.
|
||||
|
||||
Как правило это означает, что отсутствуют ssh ключи для соединения с GitHub. Ключи расположены в директории `~/.ssh`. В интерфейсе GitHub, в настройках, необходимо загрузить публичные ключи, чтобы он их понимал.
|
||||
Как правило, это означает, что отсутствуют ssh ключи для соединения с GitHub. Ключи расположены в директории `~/.ssh`. В интерфейсе GitHub, в настройках, необходимо загрузить публичные ключи, чтобы он их понимал.
|
||||
|
||||
Вы также можете клонировать репозиторий по протоколу https:
|
||||
|
||||
@ -199,7 +199,7 @@ sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||
|
||||
В случае успешного запуска, вы увидите прогресс сборки - количество обработанных задач и общее количество задач.
|
||||
|
||||
В процессе сборки могут появится сообщения `libprotobuf WARNING` про protobuf файлы в библиотеке libhdfs2. Это не имеет значения.
|
||||
В процессе сборки могут появиться сообщения `libprotobuf WARNING` про protobuf файлы в библиотеке libhdfs2. Это не имеет значения.
|
||||
|
||||
При успешной сборке, вы получите готовый исполняемый файл `ClickHouse/build/programs/clickhouse`:
|
||||
|
||||
@ -207,7 +207,7 @@ sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||
|
||||
## Запуск собранной версии ClickHouse {#zapusk-sobrannoi-versii-clickhouse}
|
||||
|
||||
Для запуска сервера из под текущего пользователя, с выводом логов в терминал и с использованием примеров конфигурационных файлов, расположенных в исходниках, перейдите в директорию `ClickHouse/programs/server/` (эта директория находится не в директории build) и выполните:
|
||||
Для запуска сервера из-под текущего пользователя, с выводом логов в терминал и с использованием примеров конфигурационных файлов, расположенных в исходниках, перейдите в директорию `ClickHouse/programs/server/` (эта директория находится не в директории build) и выполните:
|
||||
|
||||
../../build/programs/clickhouse server
|
||||
|
||||
|
@ -37,7 +37,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
||||
**Секции запроса**
|
||||
|
||||
При создании таблицы с движком `CollapsingMergeTree` используются те же [секции запроса](mergetree.md#table_engine-mergetree-creating-a-table) что и при создании таблицы с движком `MergeTree`.
|
||||
При создании таблицы с движком `CollapsingMergeTree` используются те же [секции запроса](mergetree.md#table_engine-mergetree-creating-a-table), что и при создании таблицы с движком `MergeTree`.
|
||||
|
||||
<details markdown="1">
|
||||
|
||||
|
@ -42,7 +42,7 @@ CREATE TABLE merge.hits_buffer AS merge.hits ENGINE = Buffer(merge, hits, 16, 10
|
||||
В качестве имени базы данных и имени таблицы можно указать пустые строки в одинарных кавычках. Это обозначает отсутствие таблицы назначения. В таком случае, при достижении условий на сброс данных, буфер будет просто очищаться. Это может быть полезным, чтобы хранить в оперативке некоторое окно данных.
|
||||
|
||||
При чтении из таблицы типа Buffer, будут обработаны данные, как находящиеся в буфере, так и данные из таблицы назначения (если такая есть).
|
||||
Но следует иметь ввиду, что таблица Buffer не поддерживает индекс. То есть, данные в буфере будут просканированы полностью, что может быть медленно для буферов большого размера. (Для данных в подчинённой таблице, будет использоваться тот индекс, который она поддерживает.)
|
||||
Но следует иметь в виду, что таблица Buffer не поддерживает индекс. То есть, данные в буфере будут просканированы полностью, что может быть медленно для буферов большого размера. (Для данных в подчинённой таблице, будет использоваться тот индекс, который она поддерживает.)
|
||||
|
||||
Если множество столбцов таблицы Buffer не совпадает с множеством столбцов подчинённой таблицы, то будут вставлено подмножество столбцов, которое присутствует в обеих таблицах.
|
||||
|
||||
@ -66,4 +66,4 @@ CREATE TABLE merge.hits_buffer AS merge.hits ENGINE = Buffer(merge, hits, 16, 10
|
||||
|
||||
Таблицы типа Buffer используются в тех случаях, когда от большого количества серверов поступает слишком много INSERT-ов в единицу времени, и нет возможности заранее самостоятельно буферизовать данные перед вставкой, в результате чего, INSERT-ы не успевают выполняться.
|
||||
|
||||
Заметим, что даже для таблиц типа Buffer не имеет смысла вставлять данные по одной строке, так как таким образом будет достигнута скорость всего лишь в несколько тысяч строк в секунду, тогда как при вставке более крупными блоками, достижимо более миллиона строк в секунду (смотрите раздел [«Производительность»](../../../introduction/performance.md).
|
||||
Заметим, что даже для таблиц типа Buffer не имеет смысла вставлять данные по одной строке, так как таким образом будет достигнута скорость всего лишь в несколько тысяч строк в секунду, тогда как при вставке более крупными блоками, достижимо более миллиона строк в секунду (смотрите раздел [«Производительность»](../../../introduction/performance.md)).
|
||||
|
@ -177,11 +177,11 @@ URI позволяет подключаться к нескольким хост
|
||||
|
||||
|
||||
|
||||
Строка подключения должна быть указана в первом аргументе clickhouse-client. Строка подключения может комбинироваться с другими [параметрами командной строки] (#command-line-options) кроме `--host/-h` и `--port`.
|
||||
Строка подключения должна быть указана в первом аргументе clickhouse-client. Строка подключения может комбинироваться с другими [параметрами командной строки](#command-line-options) кроме `--host/-h` и `--port`.
|
||||
|
||||
Для компонента `query_parameter` разрешены следующие ключи:
|
||||
|
||||
- `secure` или сокращенно `s` - без значение. Если параметр указан, то соединение с сервером будет осуществляться по защищенному каналу (TLS). См. `secure` в [command-line-options](#command-line-options).
|
||||
- `secure` или сокращенно `s` - без значения. Если параметр указан, то соединение с сервером будет осуществляться по защищенному каналу (TLS). См. `secure` в [command-line-options](#command-line-options).
|
||||
|
||||
### Кодирование URI {#connection_string_uri_percent_encoding}
|
||||
|
||||
@ -206,7 +206,7 @@ clickhouse-client clickhouse://john:secret@127.0.0.1:9000
|
||||
clickhouse-client clickhouse://[::1]:9000
|
||||
```
|
||||
|
||||
Подключиться к localhost через порт 9000 многострочном режиме.
|
||||
Подключиться к localhost через порт 9000 в многострочном режиме.
|
||||
|
||||
``` bash
|
||||
clickhouse-client clickhouse://localhost:9000 '-m'
|
||||
|
@ -69,7 +69,7 @@ ClickHouse Keeper может использоваться как равноце
|
||||
|
||||
|
||||
:::note
|
||||
В случае изменения топологии кластера ClickHouse Keeper(например, замены сервера), удостоверьтесь, что вы сохраняеете отношение `server_id` - `hostname`, не переиспользуете существующие `server_id` для для новых серверов и не перемешиваете идентификаторы. Подобные ошибки могут случаться, если вы используете автоматизацию при разворачивании кластера без логики сохранения идентификаторов.
|
||||
В случае изменения топологии кластера ClickHouse Keeper(например, замены сервера), удостоверьтесь, что вы сохраняеете отношение `server_id` - `hostname`, не переиспользуете существующие `server_id` для новых серверов и не перемешиваете идентификаторы. Подобные ошибки могут случаться, если вы используете автоматизацию при разворачивании кластера без логики сохранения идентификаторов.
|
||||
:::
|
||||
|
||||
Примеры конфигурации кворума с тремя узлами можно найти в [интеграционных тестах](https://github.com/ClickHouse/ClickHouse/tree/master/tests/integration) с префиксом `test_keeper_`. Пример конфигурации для сервера №1:
|
||||
@ -337,7 +337,7 @@ clickhouse-keeper-converter --zookeeper-logs-dir /var/lib/zookeeper/version-2 --
|
||||
|
||||
После того, как выполнили действия выше выполните следующие шаги.
|
||||
1. Выберете одну ноду Keeper, которая станет новым лидером. Учтите, что данные с этой ноды будут использованы всем кластером, поэтому рекомендуется выбрать ноду с наиболее актуальным состоянием.
|
||||
2. Перед дальнейшими действиям сделайте резервную копию данных из директорий `log_storage_path` и `snapshot_storage_path`.
|
||||
2. Перед дальнейшими действиями сделайте резервную копию данных из директорий `log_storage_path` и `snapshot_storage_path`.
|
||||
3. Измените настройки на всех нодах кластера, которые вы собираетесь использовать.
|
||||
4. Отправьте команду `rcvr` на ноду, которую вы выбрали, или остановите ее и запустите заново с аргументом `--force-recovery`. Это переведет ноду в режим восстановления.
|
||||
5. Запускайте остальные ноды кластера по одной и проверяйте, что команда `mntr` возвращает `follower` в выводе состояния `zk_server_state` перед тем, как запустить следующую ноду.
|
||||
|
@ -89,7 +89,7 @@ $ cat /etc/clickhouse-server/users.d/alice.xml
|
||||
|
||||
Вы можете использовать симметричное шифрование для зашифровки элемента конфигурации, например, поля password. Чтобы это сделать, сначала настройте [кодек шифрования](../sql-reference/statements/create/table.md#encryption-codecs), затем добавьте аттибут`encrypted_by` с именем кодека шифрования как значение к элементу, который надо зашифровать.
|
||||
|
||||
В отличии от аттрибутов `from_zk`, `from_env` и `incl` (или элемента `include`), подстановка, т.е. расшифровка зашифрованного значения, не выподняется в файле предобработки. Расшифровка происходит только во время исполнения в серверном процессе.
|
||||
В отличие от аттрибутов `from_zk`, `from_env` и `incl` (или элемента `include`), подстановка, т.е. расшифровка зашифрованного значения, не выподняется в файле предобработки. Расшифровка происходит только во время исполнения в серверном процессе.
|
||||
|
||||
Пример:
|
||||
|
||||
@ -110,7 +110,7 @@ $ cat /etc/clickhouse-server/users.d/alice.xml
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
Чтобы получить зашифрованное значение может быть использовано приложение-пример `encrypt_decrypt` .
|
||||
Чтобы получить зашифрованное значение, может быть использовано приложение-пример `encrypt_decrypt` .
|
||||
|
||||
Пример:
|
||||
|
||||
|
@ -50,7 +50,7 @@ clickhouse-benchmark [keys] < queries_file;
|
||||
- `-r`, `--randomize` — использовать случайный порядок выполнения запросов при наличии более одного входного запроса.
|
||||
- `-s`, `--secure` — используется `TLS` соединение.
|
||||
- `-t N`, `--timelimit=N` — лимит по времени в секундах. `clickhouse-benchmark` перестает отправлять запросы при достижении лимита по времени. Значение по умолчанию: 0 (лимит отключен).
|
||||
- `--confidence=N` — уровень доверия для T-критерия. Возможные значения: 0 (80%), 1 (90%), 2 (95%), 3 (98%), 4 (99%), 5 (99.5%). Значение по умолчанию: 5. В [режиме сравнения](#clickhouse-benchmark-comparison-mode) `clickhouse-benchmark` проверяет [двухвыборочный t-критерий Стьюдента для независимых выборок](https://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test) чтобы определить, различны ли две выборки при выбранном уровне доверия.
|
||||
- `--confidence=N` — уровень доверия для T-критерия. Возможные значения: 0 (80%), 1 (90%), 2 (95%), 3 (98%), 4 (99%), 5 (99.5%). Значение по умолчанию: 5. В [режиме сравнения](#clickhouse-benchmark-comparison-mode) `clickhouse-benchmark` проверяет [двухвыборочный t-критерий Стьюдента для независимых выборок](https://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test), чтобы определить, различны ли две выборки при выбранном уровне доверия.
|
||||
- `--cumulative` — выводить статистику за все время работы, а не за последний временной интервал.
|
||||
- `--database=DATABASE_NAME` — имя базы данных ClickHouse. Значение по умолчанию: `default`.
|
||||
- `--json=FILEPATH` — дополнительный вывод в формате `JSON`. Когда этот ключ указан, `clickhouse-benchmark` выводит отчет в указанный JSON-файл.
|
||||
|
@ -33,7 +33,7 @@ ClickHouse отображает значения в зависимости от
|
||||
|
||||
## Примеры {#primery}
|
||||
|
||||
**1.** Создание таблицы с столбцом типа `DateTime` и вставка данных в неё:
|
||||
**1.** Создание таблицы со столбцом типа `DateTime` и вставка данных в неё:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE dt
|
||||
|
@ -172,7 +172,7 @@ multiplyDecimal(a, b[, result_scale])
|
||||
```
|
||||
|
||||
:::note
|
||||
Эта функция работают гораздо медленнее обычной `multiply`.
|
||||
Эта функция работает гораздо медленнее обычной `multiply`.
|
||||
В случае, если нет необходимости иметь фиксированную точность и/или нужны быстрые вычисления, следует использовать [multiply](#multiply).
|
||||
:::
|
||||
|
||||
|
@ -488,7 +488,7 @@ arrayPushBack(array, single_value)
|
||||
**Аргументы**
|
||||
|
||||
- `array` – массив.
|
||||
- `single_value` – значение добавляемого элемента. В массив с числам можно добавить только числа, в массив со строками только строки. При добавлении чисел ClickHouse автоматически приводит тип `single_value` к типу данных массива. Подробнее о типах данных в ClickHouse читайте в разделе «[Типы данных](../../sql-reference/functions/array-functions.md#data_types)». Может быть равно `NULL`, в этом случае функция добавит элемент `NULL` в массив, а тип элементов массива преобразует в `Nullable`.
|
||||
- `single_value` – значение добавляемого элемента. В массив с числами можно добавить только числа, в массив со строками только строки. При добавлении чисел ClickHouse автоматически приводит тип `single_value` к типу данных массива. Подробнее о типах данных в ClickHouse читайте в разделе «[Типы данных](../../sql-reference/functions/array-functions.md#data_types)». Может быть равно `NULL`, в этом случае функция добавит элемент `NULL` в массив, а тип элементов массива преобразует в `Nullable`.
|
||||
|
||||
**Пример**
|
||||
|
||||
@ -513,7 +513,7 @@ arrayPushFront(array, single_value)
|
||||
**Аргументы**
|
||||
|
||||
- `array` – массив.
|
||||
- `single_value` – значение добавляемого элемента. В массив с числам можно добавить только числа, в массив со строками только строки. При добавлении чисел ClickHouse автоматически приводит тип `single_value` к типу данных массива. Подробнее о типах данных в ClickHouse читайте в разделе «[Типы данных](../../sql-reference/functions/array-functions.md#data_types)». Может быть равно `NULL`, в этом случае функция добавит элемент `NULL` в массив, а тип элементов массива преобразует в `Nullable`.
|
||||
- `single_value` – значение добавляемого элемента. В массив с числами можно добавить только числа, в массив со строками только строки. При добавлении чисел ClickHouse автоматически приводит тип `single_value` к типу данных массива. Подробнее о типах данных в ClickHouse читайте в разделе «[Типы данных](../../sql-reference/functions/array-functions.md#data_types)». Может быть равно `NULL`, в этом случае функция добавит элемент `NULL` в массив, а тип элементов массива преобразует в `Nullable`.
|
||||
|
||||
**Пример**
|
||||
|
||||
|
@ -92,7 +92,7 @@ ClickHouse поддерживает использование секций `DIS
|
||||
|
||||
## Обработка NULL {#null-processing}
|
||||
|
||||
`DISTINCT` работает с [NULL](../../syntax.md#null-literal) как-будто `NULL` — обычное значение и `NULL==NULL`. Другими словами, в результате `DISTINCT`, различные комбинации с `NULL` встретятся только один раз. Это отличается от обработки `NULL` в большинстве других контекстов.
|
||||
`DISTINCT` работает с [NULL](../../syntax.md#null-literal) как будто `NULL` — обычное значение и `NULL==NULL`. Другими словами, в результате `DISTINCT`, различные комбинации с `NULL` встретятся только один раз. Это отличается от обработки `NULL` в большинстве других контекстов.
|
||||
|
||||
## Альтернативы {#alternatives}
|
||||
|
||||
|
@ -33,7 +33,7 @@ clusterAllReplicas('cluster_name', db, table[, sharding_key])
|
||||
|
||||
**Использование макросов**
|
||||
|
||||
`cluster_name` может содержать макрос — подстановку в фигурных скобках. Эта подстановка заменяется на соответствующее значение из секции [macros](../../operations/server-configuration-parameters/settings.md#macros) конфигурационного файла .
|
||||
`cluster_name` может содержать макрос — подстановку в фигурных скобках. Эта подстановка заменяется на соответствующее значение из секции [macros](../../operations/server-configuration-parameters/settings.md#macros) конфигурационного файла.
|
||||
|
||||
Пример:
|
||||
|
||||
|
@ -20,7 +20,7 @@ machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
- `LOADED_AND_RELOADING` — Dictionary is loaded successfully, and is being reloaded right now (frequent reasons: [SYSTEM RELOAD DICTIONARY](../../sql-reference/statements/system.md#query_language-system-reload-dictionary) 查询,超时,字典配置已更改)。
|
||||
- `FAILED_AND_RELOADING` — Could not load the dictionary as a result of an error and is loading now.
|
||||
- `origin` ([字符串](../../sql-reference/data-types/string.md)) — Path to the configuration file that describes the dictionary.
|
||||
- `type` ([字符串](../../sql-reference/data-types/string.md)) — Type of a dictionary allocation. [在内存中存储字典](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md).
|
||||
- `type` ([字符串](../../sql-reference/data-types/string.md)) — Type of dictionary allocation. [在内存中存储字典](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md).
|
||||
- `key` — [密钥类型](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-key):数字键 ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) or Сomposite key ([字符串](../../sql-reference/data-types/string.md)) — form “(type 1, type 2, …, type n)”.
|
||||
- `attribute.names` ([阵列](../../sql-reference/data-types/array.md)([字符串](../../sql-reference/data-types/string.md))) — Array of [属性名称](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes) 由字典提供。
|
||||
- `attribute.types` ([阵列](../../sql-reference/data-types/array.md)([字符串](../../sql-reference/data-types/string.md))) — Corresponding array of [属性类型](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes) 这是由字典提供。
|
||||
|
@ -845,83 +845,7 @@ bool Client::processWithFuzzing(const String & full_query)
|
||||
have_error = true;
|
||||
}
|
||||
|
||||
// Check that after the query is formatted, we can parse it back,
|
||||
// format again and get the same result. Unfortunately, we can't
|
||||
// compare the ASTs, which would be more sensitive to errors. This
|
||||
// double formatting check doesn't catch all errors, e.g. we can
|
||||
// format query incorrectly, but to a valid SQL that we can then
|
||||
// parse and format into the same SQL.
|
||||
// There are some complicated cases where we can generate the SQL
|
||||
// which we can't parse:
|
||||
// * first argument of lambda() replaced by fuzzer with
|
||||
// something else, leading to constructs such as
|
||||
// arrayMap((min(x) + 3) -> x + 1, ....)
|
||||
// * internals of Enum replaced, leading to:
|
||||
// Enum(equals(someFunction(y), 3)).
|
||||
// And there are even the cases when we can parse the query, but
|
||||
// it's logically incorrect and its formatting is a mess, such as
|
||||
// when `lambda()` function gets substituted into a wrong place.
|
||||
// To avoid dealing with these cases, run the check only for the
|
||||
// queries we were able to successfully execute.
|
||||
// Another caveat is that sometimes WITH queries are not executed,
|
||||
// if they are not referenced by the main SELECT, so they can still
|
||||
// have the aforementioned problems. Disable this check for such
|
||||
// queries, for lack of a better solution.
|
||||
// There is also a problem that fuzzer substitutes positive Int64
|
||||
// literals or Decimal literals, which are then parsed back as
|
||||
// UInt64, and suddenly duplicate alias substitution starts or stops
|
||||
// working (ASTWithAlias::formatImpl) or something like that.
|
||||
// So we compare not even the first and second formatting of the
|
||||
// query, but second and third.
|
||||
// If you have to add any more workarounds to this check, just remove
|
||||
// it altogether, it's not so useful.
|
||||
if (ast_to_process && !have_error && !queryHasWithClause(*ast_to_process))
|
||||
{
|
||||
ASTPtr ast_2;
|
||||
try
|
||||
{
|
||||
const auto * tmp_pos = query_to_execute.c_str();
|
||||
ast_2 = parseQuery(tmp_pos, tmp_pos + query_to_execute.size(), false /* allow_multi_statements */);
|
||||
}
|
||||
catch (Exception & e)
|
||||
{
|
||||
if (e.code() != ErrorCodes::SYNTAX_ERROR &&
|
||||
e.code() != ErrorCodes::TOO_DEEP_RECURSION)
|
||||
throw;
|
||||
}
|
||||
|
||||
if (ast_2)
|
||||
{
|
||||
const auto text_2 = ast_2->formatForErrorMessage();
|
||||
const auto * tmp_pos = text_2.c_str();
|
||||
const auto ast_3 = parseQuery(tmp_pos, tmp_pos + text_2.size(),
|
||||
false /* allow_multi_statements */);
|
||||
const auto text_3 = ast_3 ? ast_3->formatForErrorMessage() : "";
|
||||
|
||||
if (text_3 != text_2)
|
||||
{
|
||||
fmt::print(stderr, "Found error: The query formatting is broken.\n");
|
||||
|
||||
printChangedSettings();
|
||||
|
||||
fmt::print(stderr,
|
||||
"Got the following (different) text after formatting the fuzzed query and parsing it back:\n'{}'\n, expected:\n'{}'\n",
|
||||
text_3, text_2);
|
||||
fmt::print(stderr, "In more detail:\n");
|
||||
fmt::print(stderr, "AST-1 (generated by fuzzer):\n'{}'\n", ast_to_process->dumpTree());
|
||||
fmt::print(stderr, "Text-1 (AST-1 formatted):\n'{}'\n", query_to_execute);
|
||||
fmt::print(stderr, "AST-2 (Text-1 parsed):\n'{}'\n", ast_2->dumpTree());
|
||||
fmt::print(stderr, "Text-2 (AST-2 formatted):\n'{}'\n", text_2);
|
||||
fmt::print(stderr, "AST-3 (Text-2 parsed):\n'{}'\n", ast_3 ? ast_3->dumpTree() : "");
|
||||
fmt::print(stderr, "Text-3 (AST-3 formatted):\n'{}'\n", text_3);
|
||||
fmt::print(stderr, "Text-3 must be equal to Text-2, but it is not.\n");
|
||||
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The server is still alive so we're going to continue fuzzing.
|
||||
// The server is still alive, so we're going to continue fuzzing.
|
||||
// Determine what we're going to use as the starting AST.
|
||||
if (have_error)
|
||||
{
|
||||
|
@ -39,6 +39,7 @@ if (BUILD_STANDALONE_KEEPER)
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperContext.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperStateManager.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperStorage.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperConstants.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperAsynchronousMetrics.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/pathUtils.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/SessionExpiryQueue.cpp
|
||||
@ -69,6 +70,7 @@ if (BUILD_STANDALONE_KEEPER)
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/ServerType.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTPRequestHandlerFactoryMain.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/KeeperReadinessHandler.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/CloudPlacementInfo.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTP/HTTPServer.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTP/ReadHeaders.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTP/HTTPServerConnection.cpp
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <Common/assertProcessUserMatchesDataOwner.h>
|
||||
#include <Common/makeSocketAddress.h>
|
||||
#include <Server/waitServersToFinish.h>
|
||||
#include <Server/CloudPlacementInfo.h>
|
||||
#include <base/getMemoryAmount.h>
|
||||
#include <base/scope_guard.h>
|
||||
#include <base/safeExit.h>
|
||||
@ -31,9 +32,10 @@
|
||||
#include <Coordination/KeeperAsynchronousMetrics.h>
|
||||
|
||||
#include <Server/HTTP/HTTPServer.h>
|
||||
#include <Server/TCPServer.h>
|
||||
#include <Server/HTTPHandlerFactory.h>
|
||||
#include <Server/KeeperReadinessHandler.h>
|
||||
#include <Server/PrometheusMetricsWriter.h>
|
||||
#include <Server/TCPServer.h>
|
||||
|
||||
#include "Core/Defines.h"
|
||||
#include "config.h"
|
||||
@ -352,6 +354,11 @@ try
|
||||
|
||||
std::string include_from_path = config().getString("include_from", "/etc/metrika.xml");
|
||||
|
||||
if (config().has(DB::PlacementInfo::PLACEMENT_CONFIG_PREFIX))
|
||||
{
|
||||
PlacementInfo::PlacementInfo::instance().initialize(config());
|
||||
}
|
||||
|
||||
GlobalThreadPool::initialize(
|
||||
config().getUInt("max_thread_pool_size", 100),
|
||||
config().getUInt("max_thread_pool_free_size", 1000),
|
||||
@ -482,19 +489,28 @@ try
|
||||
|
||||
/// Prometheus (if defined and not setup yet with http_port)
|
||||
port_name = "prometheus.port";
|
||||
createServer(listen_host, port_name, listen_try, [&, my_http_context = std::move(http_context)](UInt16 port) mutable
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socketBindListen(socket, listen_host, port);
|
||||
socket.setReceiveTimeout(my_http_context->getReceiveTimeout());
|
||||
socket.setSendTimeout(my_http_context->getSendTimeout());
|
||||
servers->emplace_back(
|
||||
listen_host,
|
||||
port_name,
|
||||
"Prometheus: http://" + address.toString(),
|
||||
std::make_unique<HTTPServer>(
|
||||
std::move(my_http_context), createPrometheusMainHandlerFactory(*this, config_getter(), async_metrics, "PrometheusHandler-factory"), server_pool, socket, http_params));
|
||||
});
|
||||
createServer(
|
||||
listen_host,
|
||||
port_name,
|
||||
listen_try,
|
||||
[&, my_http_context = std::move(http_context)](UInt16 port) mutable
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socketBindListen(socket, listen_host, port);
|
||||
socket.setReceiveTimeout(my_http_context->getReceiveTimeout());
|
||||
socket.setSendTimeout(my_http_context->getSendTimeout());
|
||||
auto metrics_writer = std::make_shared<KeeperPrometheusMetricsWriter>(config, "prometheus", async_metrics);
|
||||
servers->emplace_back(
|
||||
listen_host,
|
||||
port_name,
|
||||
"Prometheus: http://" + address.toString(),
|
||||
std::make_unique<HTTPServer>(
|
||||
std::move(my_http_context),
|
||||
createPrometheusMainHandlerFactory(*this, config_getter(), metrics_writer, "PrometheusHandler-factory"),
|
||||
server_pool,
|
||||
socket,
|
||||
http_params));
|
||||
});
|
||||
|
||||
/// HTTP control endpoints
|
||||
port_name = "keeper_server.http_control.port";
|
||||
|
@ -336,23 +336,23 @@ std::string LocalServer::getInitialCreateTableQuery()
|
||||
auto table_structure = config().getString("table-structure", "auto");
|
||||
|
||||
String table_file;
|
||||
String format_from_file_name;
|
||||
std::optional<String> format_from_file_name;
|
||||
if (!config().has("table-file") || config().getString("table-file") == "-")
|
||||
{
|
||||
/// Use Unix tools stdin naming convention
|
||||
table_file = "stdin";
|
||||
format_from_file_name = FormatFactory::instance().getFormatFromFileDescriptor(STDIN_FILENO);
|
||||
format_from_file_name = FormatFactory::instance().tryGetFormatFromFileDescriptor(STDIN_FILENO);
|
||||
}
|
||||
else
|
||||
{
|
||||
/// Use regular file
|
||||
auto file_name = config().getString("table-file");
|
||||
table_file = quoteString(file_name);
|
||||
format_from_file_name = FormatFactory::instance().getFormatFromFileName(file_name, false);
|
||||
format_from_file_name = FormatFactory::instance().tryGetFormatFromFileName(file_name);
|
||||
}
|
||||
|
||||
auto data_format = backQuoteIfNeed(
|
||||
config().getString("table-data-format", config().getString("format", format_from_file_name.empty() ? "TSV" : format_from_file_name)));
|
||||
config().getString("table-data-format", config().getString("format", format_from_file_name ? *format_from_file_name : "TSV")));
|
||||
|
||||
|
||||
if (table_structure == "auto")
|
||||
|
@ -1310,7 +1310,7 @@ try
|
||||
throw ErrnoException(ErrorCodes::CANNOT_SEEK_THROUGH_FILE, "Input must be seekable file (it will be read twice)");
|
||||
|
||||
SingleReadBufferIterator read_buffer_iterator(std::move(file));
|
||||
schema_columns = readSchemaFromFormat(input_format, {}, read_buffer_iterator, false, context_const);
|
||||
schema_columns = readSchemaFromFormat(input_format, {}, read_buffer_iterator, context_const);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -44,6 +44,7 @@
|
||||
#include <Common/assertProcessUserMatchesDataOwner.h>
|
||||
#include <Common/makeSocketAddress.h>
|
||||
#include <Common/FailPoint.h>
|
||||
#include <Common/CPUID.h>
|
||||
#include <Server/waitServersToFinish.h>
|
||||
#include <Interpreters/Cache/FileCacheFactory.h>
|
||||
#include <Core/ServerUUID.h>
|
||||
@ -97,6 +98,7 @@
|
||||
#include <Server/ProtocolServerAdapter.h>
|
||||
#include <Server/KeeperReadinessHandler.h>
|
||||
#include <Server/HTTP/HTTPServer.h>
|
||||
#include <Server/CloudPlacementInfo.h>
|
||||
#include <Interpreters/AsynchronousInsertQueue.h>
|
||||
#include <Core/ServerSettings.h>
|
||||
#include <filesystem>
|
||||
@ -711,6 +713,22 @@ try
|
||||
getNumberOfPhysicalCPUCores(), // on ARM processors it can show only enabled at current moment cores
|
||||
std::thread::hardware_concurrency());
|
||||
|
||||
#if defined(__x86_64__)
|
||||
String cpu_info;
|
||||
#define COLLECT_FLAG(X) \
|
||||
if (CPU::have##X()) \
|
||||
{ \
|
||||
if (!cpu_info.empty()) \
|
||||
cpu_info += ", "; \
|
||||
cpu_info += #X; \
|
||||
}
|
||||
|
||||
CPU_ID_ENUMERATE(COLLECT_FLAG)
|
||||
#undef COLLECT_FLAG
|
||||
|
||||
LOG_INFO(log, "Available CPU instruction sets: {}", cpu_info);
|
||||
#endif
|
||||
|
||||
sanityChecks(*this);
|
||||
|
||||
// Initialize global thread pool. Do it before we fetch configs from zookeeper
|
||||
@ -1960,6 +1978,11 @@ try
|
||||
load_metadata_tasks);
|
||||
}
|
||||
|
||||
if (config().has(DB::PlacementInfo::PLACEMENT_CONFIG_PREFIX))
|
||||
{
|
||||
PlacementInfo::PlacementInfo::instance().initialize(config());
|
||||
}
|
||||
|
||||
/// Do not keep tasks in server, they should be kept inside databases. Used here to make dependent tasks only.
|
||||
load_metadata_tasks.clear();
|
||||
load_metadata_tasks.shrink_to_fit();
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <Analyzer/ConstantNode.h>
|
||||
#include <Analyzer/FunctionNode.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -32,6 +33,11 @@ public:
|
||||
if (function_node->getArguments().getNodes().size() != 1)
|
||||
return;
|
||||
|
||||
/// forbid the optimization if return value of sum() and count() differs:
|
||||
/// count() returns only UInt64 type, while sum() could return Nullable().
|
||||
if (!function_node->getResultType()->equals(DataTypeUInt64()))
|
||||
return;
|
||||
|
||||
auto & first_argument = function_node->getArguments().getNodes()[0];
|
||||
auto * first_argument_constant_node = first_argument->as<ConstantNode>();
|
||||
if (!first_argument_constant_node)
|
||||
|
@ -79,8 +79,6 @@
|
||||
#include <Analyzer/QueryTreeBuilder.h>
|
||||
#include <Analyzer/IQueryTreeNode.h>
|
||||
#include <Analyzer/Identifier.h>
|
||||
#include <Poco/Logger.h>
|
||||
#include <Common/logger_useful.h>
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
@ -6733,6 +6731,28 @@ void QueryAnalyzer::resolveTableFunction(QueryTreeNodePtr & table_function_node,
|
||||
TableFunctionPtr table_function_ptr = TableFunctionFactory::instance().tryGet(table_function_name, scope_context);
|
||||
if (!table_function_ptr)
|
||||
{
|
||||
String database_name = scope_context->getCurrentDatabase();
|
||||
String table_name;
|
||||
|
||||
auto function_ast = table_function_node->toAST();
|
||||
Identifier table_identifier{table_function_name};
|
||||
if (table_identifier.getPartsSize() == 1)
|
||||
{
|
||||
table_name = table_identifier[0];
|
||||
}
|
||||
else if (table_identifier.getPartsSize() == 2)
|
||||
{
|
||||
database_name = table_identifier[0];
|
||||
table_name = table_identifier[1];
|
||||
}
|
||||
|
||||
auto parametrized_view_storage = scope_context->getQueryContext()->buildParametrizedViewStorage(function_ast, database_name, table_name);
|
||||
if (parametrized_view_storage)
|
||||
{
|
||||
table_function_node = std::make_shared<TableNode>(parametrized_view_storage, scope_context);
|
||||
return;
|
||||
}
|
||||
|
||||
auto hints = TableFunctionFactory::instance().getHints(table_function_name);
|
||||
if (!hints.empty())
|
||||
throw Exception(ErrorCodes::UNKNOWN_FUNCTION,
|
||||
|
@ -421,11 +421,8 @@ ASTPtr QueryNode::toASTImpl(const ConvertToASTOptions & options) const
|
||||
|
||||
if (is_subquery)
|
||||
{
|
||||
auto subquery = std::make_shared<ASTSubquery>();
|
||||
|
||||
auto subquery = std::make_shared<ASTSubquery>(std::move(result_select_query));
|
||||
subquery->cte_name = cte_name;
|
||||
subquery->children.push_back(std::move(result_select_query));
|
||||
|
||||
return subquery;
|
||||
}
|
||||
|
||||
|
@ -61,7 +61,7 @@ namespace ErrorCodes
|
||||
namespace
|
||||
{
|
||||
|
||||
#ifndef NDEBUG
|
||||
#if defined(ABORT_ON_LOGICAL_ERROR)
|
||||
|
||||
/** This visitor checks if Query Tree structure is valid after each pass
|
||||
* in debug build.
|
||||
@ -184,7 +184,7 @@ void QueryTreePassManager::run(QueryTreeNodePtr query_tree_node)
|
||||
for (size_t i = 0; i < passes_size; ++i)
|
||||
{
|
||||
passes[i]->run(query_tree_node, current_context);
|
||||
#ifndef NDEBUG
|
||||
#if defined(ABORT_ON_LOGICAL_ERROR)
|
||||
ValidationChecker(passes[i]->getName()).visit(query_tree_node);
|
||||
#endif
|
||||
}
|
||||
@ -209,7 +209,7 @@ void QueryTreePassManager::run(QueryTreeNodePtr query_tree_node, size_t up_to_pa
|
||||
for (size_t i = 0; i < up_to_pass_index; ++i)
|
||||
{
|
||||
passes[i]->run(query_tree_node, current_context);
|
||||
#ifndef NDEBUG
|
||||
#if defined(ABORT_ON_LOGICAL_ERROR)
|
||||
ValidationChecker(passes[i]->getName()).visit(query_tree_node);
|
||||
#endif
|
||||
}
|
||||
|
@ -185,11 +185,8 @@ ASTPtr UnionNode::toASTImpl(const ConvertToASTOptions & options) const
|
||||
|
||||
if (is_subquery)
|
||||
{
|
||||
auto subquery = std::make_shared<ASTSubquery>();
|
||||
|
||||
auto subquery = std::make_shared<ASTSubquery>(std::move(select_with_union_query));
|
||||
subquery->cte_name = cte_name;
|
||||
subquery->children.push_back(std::move(select_with_union_query));
|
||||
|
||||
return subquery;
|
||||
}
|
||||
|
||||
|
@ -632,9 +632,9 @@ try
|
||||
}
|
||||
else if (query_with_output->out_file)
|
||||
{
|
||||
const auto & format_name = FormatFactory::instance().getFormatFromFileName(out_file);
|
||||
if (!format_name.empty())
|
||||
current_format = format_name;
|
||||
auto format_name = FormatFactory::instance().tryGetFormatFromFileName(out_file);
|
||||
if (format_name)
|
||||
current_format = *format_name;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1508,7 +1508,7 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des
|
||||
|
||||
String current_format = parsed_insert_query->format;
|
||||
if (current_format.empty())
|
||||
current_format = FormatFactory::instance().getFormatFromFileName(in_file, true);
|
||||
current_format = FormatFactory::instance().getFormatFromFileName(in_file);
|
||||
|
||||
/// Create temporary storage file, to support globs and parallel reading
|
||||
/// StorageFile doesn't support ephemeral/materialized/alias columns.
|
||||
|
@ -125,7 +125,7 @@ ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfigurati
|
||||
Poco::Timespan(config.getInt("send_timeout", DBMS_DEFAULT_SEND_TIMEOUT_SEC), 0))
|
||||
.withReceiveTimeout(
|
||||
Poco::Timespan(config.getInt("receive_timeout", DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC), 0))
|
||||
.withTcpKeepAliveTimeout(
|
||||
.withTCPKeepAliveTimeout(
|
||||
Poco::Timespan(config.getInt("tcp_keep_alive_timeout", DEFAULT_TCP_KEEP_ALIVE_TIMEOUT), 0))
|
||||
.withHandshakeTimeout(
|
||||
Poco::Timespan(config.getInt("handshake_timeout_ms", DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC * 1000) * 1000))
|
||||
|
@ -1168,23 +1168,13 @@ void QueryFuzzer::fuzz(ASTPtr & ast)
|
||||
|
||||
fuzz(select->children);
|
||||
}
|
||||
/*
|
||||
* The time to fuzz the settings has not yet come.
|
||||
* Apparently we don't have any infrastructure to validate the values of
|
||||
* the settings, and the first query with max_block_size = -1 breaks
|
||||
* because of overflows here and there.
|
||||
*//*
|
||||
* else if (auto * set = typeid_cast<ASTSetQuery *>(ast.get()))
|
||||
* {
|
||||
* for (auto & c : set->changes)
|
||||
* {
|
||||
* if (fuzz_rand() % 50 == 0)
|
||||
* {
|
||||
* c.value = fuzzField(c.value);
|
||||
* }
|
||||
* }
|
||||
* }
|
||||
*/
|
||||
else if (auto * set = typeid_cast<ASTSetQuery *>(ast.get()))
|
||||
{
|
||||
/// Fuzz settings
|
||||
for (auto & c : set->changes)
|
||||
if (fuzz_rand() % 50 == 0)
|
||||
c.value = fuzzField(c.value);
|
||||
}
|
||||
else if (auto * literal = typeid_cast<ASTLiteral *>(ast.get()))
|
||||
{
|
||||
// There is a caveat with fuzzing the children: many ASTs also keep the
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
#include <base/types.h>
|
||||
|
||||
#if defined(__x86_64__) || defined(__i386__)
|
||||
#if defined(__x86_64__)
|
||||
#include <cpuid.h>
|
||||
#endif
|
||||
|
||||
@ -11,10 +11,10 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Cpu
|
||||
namespace CPU
|
||||
{
|
||||
|
||||
#if (defined(__x86_64__) || defined(__i386__))
|
||||
#if (defined(__x86_64__))
|
||||
/// Our version is independent of -mxsave option, because we do dynamic dispatch.
|
||||
inline UInt64 our_xgetbv(UInt32 xcr) noexcept
|
||||
{
|
||||
@ -30,7 +30,7 @@ inline UInt64 our_xgetbv(UInt32 xcr) noexcept
|
||||
|
||||
inline bool cpuid(UInt32 op, UInt32 sub_op, UInt32 * res) noexcept /// NOLINT
|
||||
{
|
||||
#if defined(__x86_64__) || defined(__i386__)
|
||||
#if defined(__x86_64__)
|
||||
__cpuid_count(op, sub_op, res[0], res[1], res[2], res[3]);
|
||||
return true;
|
||||
#else
|
||||
@ -45,7 +45,7 @@ inline bool cpuid(UInt32 op, UInt32 sub_op, UInt32 * res) noexcept /// NOLINT
|
||||
|
||||
inline bool cpuid(UInt32 op, UInt32 * res) noexcept /// NOLINT
|
||||
{
|
||||
#if defined(__x86_64__) || defined(__i386__)
|
||||
#if defined(__x86_64__)
|
||||
__cpuid(op, res[0], res[1], res[2], res[3]);
|
||||
return true;
|
||||
#else
|
||||
@ -98,7 +98,7 @@ inline bool cpuid(UInt32 op, UInt32 * res) noexcept /// NOLINT
|
||||
OP(AMXTILE) \
|
||||
OP(AMXINT8)
|
||||
|
||||
union CpuInfo
|
||||
union CPUInfo
|
||||
{
|
||||
UInt32 info[4];
|
||||
|
||||
@ -110,9 +110,9 @@ union CpuInfo
|
||||
UInt32 edx;
|
||||
} registers;
|
||||
|
||||
inline explicit CpuInfo(UInt32 op) noexcept { cpuid(op, info); }
|
||||
inline explicit CPUInfo(UInt32 op) noexcept { cpuid(op, info); }
|
||||
|
||||
inline CpuInfo(UInt32 op, UInt32 sub_op) noexcept { cpuid(op, sub_op, info); }
|
||||
inline CPUInfo(UInt32 op, UInt32 sub_op) noexcept { cpuid(op, sub_op, info); }
|
||||
};
|
||||
|
||||
#define DEF_NAME(X) inline bool have##X() noexcept;
|
||||
@ -121,77 +121,77 @@ union CpuInfo
|
||||
|
||||
bool haveRDTSCP() noexcept
|
||||
{
|
||||
return (CpuInfo(0x80000001).registers.edx >> 27) & 1u;
|
||||
return (CPUInfo(0x80000001).registers.edx >> 27) & 1u;
|
||||
}
|
||||
|
||||
bool haveSSE() noexcept
|
||||
{
|
||||
return (CpuInfo(0x1).registers.edx >> 25) & 1u;
|
||||
return (CPUInfo(0x1).registers.edx >> 25) & 1u;
|
||||
}
|
||||
|
||||
bool haveSSE2() noexcept
|
||||
{
|
||||
return (CpuInfo(0x1).registers.edx >> 26) & 1u;
|
||||
return (CPUInfo(0x1).registers.edx >> 26) & 1u;
|
||||
}
|
||||
|
||||
bool haveSSE3() noexcept
|
||||
{
|
||||
return CpuInfo(0x1).registers.ecx & 1u;
|
||||
return CPUInfo(0x1).registers.ecx & 1u;
|
||||
}
|
||||
|
||||
bool havePCLMUL() noexcept
|
||||
{
|
||||
return (CpuInfo(0x1).registers.ecx >> 1) & 1u;
|
||||
return (CPUInfo(0x1).registers.ecx >> 1) & 1u;
|
||||
}
|
||||
|
||||
bool haveSSSE3() noexcept
|
||||
{
|
||||
return (CpuInfo(0x1).registers.ecx >> 9) & 1u;
|
||||
return (CPUInfo(0x1).registers.ecx >> 9) & 1u;
|
||||
}
|
||||
|
||||
bool haveSSE41() noexcept
|
||||
{
|
||||
return (CpuInfo(0x1).registers.ecx >> 19) & 1u;
|
||||
return (CPUInfo(0x1).registers.ecx >> 19) & 1u;
|
||||
}
|
||||
|
||||
bool haveSSE42() noexcept
|
||||
{
|
||||
return (CpuInfo(0x1).registers.ecx >> 20) & 1u;
|
||||
return (CPUInfo(0x1).registers.ecx >> 20) & 1u;
|
||||
}
|
||||
|
||||
bool haveF16C() noexcept
|
||||
{
|
||||
return (CpuInfo(0x1).registers.ecx >> 29) & 1u;
|
||||
return (CPUInfo(0x1).registers.ecx >> 29) & 1u;
|
||||
}
|
||||
|
||||
bool havePOPCNT() noexcept
|
||||
{
|
||||
return (CpuInfo(0x1).registers.ecx >> 23) & 1u;
|
||||
return (CPUInfo(0x1).registers.ecx >> 23) & 1u;
|
||||
}
|
||||
|
||||
bool haveAES() noexcept
|
||||
{
|
||||
return (CpuInfo(0x1).registers.ecx >> 25) & 1u;
|
||||
return (CPUInfo(0x1).registers.ecx >> 25) & 1u;
|
||||
}
|
||||
|
||||
bool haveXSAVE() noexcept
|
||||
{
|
||||
return (CpuInfo(0x1).registers.ecx >> 26) & 1u;
|
||||
return (CPUInfo(0x1).registers.ecx >> 26) & 1u;
|
||||
}
|
||||
|
||||
bool haveOSXSAVE() noexcept
|
||||
{
|
||||
return (CpuInfo(0x1).registers.ecx >> 27) & 1u;
|
||||
return (CPUInfo(0x1).registers.ecx >> 27) & 1u;
|
||||
}
|
||||
|
||||
bool haveAVX() noexcept
|
||||
{
|
||||
#if defined(__x86_64__) || defined(__i386__)
|
||||
#if defined(__x86_64__)
|
||||
// http://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf
|
||||
// https://bugs.chromium.org/p/chromium/issues/detail?id=375968
|
||||
return haveOSXSAVE() // implies haveXSAVE()
|
||||
&& (our_xgetbv(0) & 6u) == 6u // XMM state and YMM state are enabled by OS
|
||||
&& ((CpuInfo(0x1).registers.ecx >> 28) & 1u); // AVX bit
|
||||
&& ((CPUInfo(0x1).registers.ecx >> 28) & 1u); // AVX bit
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
@ -199,33 +199,33 @@ bool haveAVX() noexcept
|
||||
|
||||
bool haveFMA() noexcept
|
||||
{
|
||||
return haveAVX() && ((CpuInfo(0x1).registers.ecx >> 12) & 1u);
|
||||
return haveAVX() && ((CPUInfo(0x1).registers.ecx >> 12) & 1u);
|
||||
}
|
||||
|
||||
bool haveAVX2() noexcept
|
||||
{
|
||||
return haveAVX() && ((CpuInfo(0x7, 0).registers.ebx >> 5) & 1u);
|
||||
return haveAVX() && ((CPUInfo(0x7, 0).registers.ebx >> 5) & 1u);
|
||||
}
|
||||
|
||||
bool haveBMI1() noexcept
|
||||
{
|
||||
return (CpuInfo(0x7, 0).registers.ebx >> 3) & 1u;
|
||||
return (CPUInfo(0x7, 0).registers.ebx >> 3) & 1u;
|
||||
}
|
||||
|
||||
bool haveBMI2() noexcept
|
||||
{
|
||||
return (CpuInfo(0x7, 0).registers.ebx >> 8) & 1u;
|
||||
return (CPUInfo(0x7, 0).registers.ebx >> 8) & 1u;
|
||||
}
|
||||
|
||||
bool haveAVX512F() noexcept
|
||||
{
|
||||
#if defined(__x86_64__) || defined(__i386__)
|
||||
#if defined(__x86_64__)
|
||||
// https://software.intel.com/en-us/articles/how-to-detect-knl-instruction-support
|
||||
return haveOSXSAVE() // implies haveXSAVE()
|
||||
&& (our_xgetbv(0) & 6u) == 6u // XMM state and YMM state are enabled by OS
|
||||
&& ((our_xgetbv(0) >> 5) & 7u) == 7u // ZMM state is enabled by OS
|
||||
&& CpuInfo(0x0).registers.eax >= 0x7 // leaf 7 is present
|
||||
&& ((CpuInfo(0x7, 0).registers.ebx >> 16) & 1u); // AVX512F bit
|
||||
&& CPUInfo(0x0).registers.eax >= 0x7 // leaf 7 is present
|
||||
&& ((CPUInfo(0x7, 0).registers.ebx >> 16) & 1u); // AVX512F bit
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
@ -233,92 +233,92 @@ bool haveAVX512F() noexcept
|
||||
|
||||
bool haveAVX512DQ() noexcept
|
||||
{
|
||||
return haveAVX512F() && ((CpuInfo(0x7, 0).registers.ebx >> 17) & 1u);
|
||||
return haveAVX512F() && ((CPUInfo(0x7, 0).registers.ebx >> 17) & 1u);
|
||||
}
|
||||
|
||||
bool haveRDSEED() noexcept
|
||||
{
|
||||
return CpuInfo(0x0).registers.eax >= 0x7 && ((CpuInfo(0x7, 0).registers.ebx >> 18) & 1u);
|
||||
return CPUInfo(0x0).registers.eax >= 0x7 && ((CPUInfo(0x7, 0).registers.ebx >> 18) & 1u);
|
||||
}
|
||||
|
||||
bool haveADX() noexcept
|
||||
{
|
||||
return CpuInfo(0x0).registers.eax >= 0x7 && ((CpuInfo(0x7, 0).registers.ebx >> 19) & 1u);
|
||||
return CPUInfo(0x0).registers.eax >= 0x7 && ((CPUInfo(0x7, 0).registers.ebx >> 19) & 1u);
|
||||
}
|
||||
|
||||
bool haveAVX512IFMA() noexcept
|
||||
{
|
||||
return haveAVX512F() && ((CpuInfo(0x7, 0).registers.ebx >> 21) & 1u);
|
||||
return haveAVX512F() && ((CPUInfo(0x7, 0).registers.ebx >> 21) & 1u);
|
||||
}
|
||||
|
||||
bool havePCOMMIT() noexcept
|
||||
{
|
||||
return CpuInfo(0x0).registers.eax >= 0x7 && ((CpuInfo(0x7, 0).registers.ebx >> 22) & 1u);
|
||||
return CPUInfo(0x0).registers.eax >= 0x7 && ((CPUInfo(0x7, 0).registers.ebx >> 22) & 1u);
|
||||
}
|
||||
|
||||
bool haveCLFLUSHOPT() noexcept
|
||||
{
|
||||
return CpuInfo(0x0).registers.eax >= 0x7 && ((CpuInfo(0x7, 0).registers.ebx >> 23) & 1u);
|
||||
return CPUInfo(0x0).registers.eax >= 0x7 && ((CPUInfo(0x7, 0).registers.ebx >> 23) & 1u);
|
||||
}
|
||||
|
||||
bool haveCLWB() noexcept
|
||||
{
|
||||
return CpuInfo(0x0).registers.eax >= 0x7 && ((CpuInfo(0x7, 0).registers.ebx >> 24) & 1u);
|
||||
return CPUInfo(0x0).registers.eax >= 0x7 && ((CPUInfo(0x7, 0).registers.ebx >> 24) & 1u);
|
||||
}
|
||||
|
||||
bool haveAVX512PF() noexcept
|
||||
{
|
||||
return haveAVX512F() && ((CpuInfo(0x7, 0).registers.ebx >> 26) & 1u);
|
||||
return haveAVX512F() && ((CPUInfo(0x7, 0).registers.ebx >> 26) & 1u);
|
||||
}
|
||||
|
||||
bool haveAVX512ER() noexcept
|
||||
{
|
||||
return haveAVX512F() && ((CpuInfo(0x7, 0).registers.ebx >> 27) & 1u);
|
||||
return haveAVX512F() && ((CPUInfo(0x7, 0).registers.ebx >> 27) & 1u);
|
||||
}
|
||||
|
||||
bool haveAVX512CD() noexcept
|
||||
{
|
||||
return haveAVX512F() && ((CpuInfo(0x7, 0).registers.ebx >> 28) & 1u);
|
||||
return haveAVX512F() && ((CPUInfo(0x7, 0).registers.ebx >> 28) & 1u);
|
||||
}
|
||||
|
||||
bool haveSHA() noexcept
|
||||
{
|
||||
return CpuInfo(0x0).registers.eax >= 0x7 && ((CpuInfo(0x7, 0).registers.ebx >> 29) & 1u);
|
||||
return CPUInfo(0x0).registers.eax >= 0x7 && ((CPUInfo(0x7, 0).registers.ebx >> 29) & 1u);
|
||||
}
|
||||
|
||||
bool haveAVX512BW() noexcept
|
||||
{
|
||||
return haveAVX512F() && ((CpuInfo(0x7, 0).registers.ebx >> 30) & 1u);
|
||||
return haveAVX512F() && ((CPUInfo(0x7, 0).registers.ebx >> 30) & 1u);
|
||||
}
|
||||
|
||||
bool haveAVX512VL() noexcept
|
||||
{
|
||||
return haveAVX512F() && ((CpuInfo(0x7, 0).registers.ebx >> 31) & 1u);
|
||||
return haveAVX512F() && ((CPUInfo(0x7, 0).registers.ebx >> 31) & 1u);
|
||||
}
|
||||
|
||||
bool havePREFETCHWT1() noexcept
|
||||
{
|
||||
return CpuInfo(0x0).registers.eax >= 0x7 && ((CpuInfo(0x7, 0).registers.ecx >> 0) & 1u);
|
||||
return CPUInfo(0x0).registers.eax >= 0x7 && ((CPUInfo(0x7, 0).registers.ecx >> 0) & 1u);
|
||||
}
|
||||
|
||||
bool haveAVX512VBMI() noexcept
|
||||
{
|
||||
return haveAVX512F() && ((CpuInfo(0x7, 0).registers.ecx >> 1) & 1u);
|
||||
return haveAVX512F() && ((CPUInfo(0x7, 0).registers.ecx >> 1) & 1u);
|
||||
}
|
||||
|
||||
bool haveAVX512VBMI2() noexcept
|
||||
{
|
||||
return haveAVX512F() && ((CpuInfo(0x7, 0).registers.ecx >> 6) & 1u);
|
||||
return haveAVX512F() && ((CPUInfo(0x7, 0).registers.ecx >> 6) & 1u);
|
||||
}
|
||||
|
||||
bool haveRDRAND() noexcept
|
||||
{
|
||||
return CpuInfo(0x0).registers.eax >= 0x7 && ((CpuInfo(0x1).registers.ecx >> 30) & 1u);
|
||||
return CPUInfo(0x0).registers.eax >= 0x7 && ((CPUInfo(0x1).registers.ecx >> 30) & 1u);
|
||||
}
|
||||
|
||||
inline bool haveAMX() noexcept
|
||||
{
|
||||
#if defined(__x86_64__) || defined(__i386__)
|
||||
#if defined(__x86_64__)
|
||||
// http://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf
|
||||
return haveOSXSAVE() // implies haveXSAVE()
|
||||
&& ((our_xgetbv(0) >> 17) & 0x3) == 0x3; // AMX state are enabled by OS
|
||||
@ -330,22 +330,22 @@ inline bool haveAMX() noexcept
|
||||
bool haveAMXBF16() noexcept
|
||||
{
|
||||
return haveAMX()
|
||||
&& ((CpuInfo(0x7, 0).registers.edx >> 22) & 1u); // AMX-BF16 bit
|
||||
&& ((CPUInfo(0x7, 0).registers.edx >> 22) & 1u); // AMX-BF16 bit
|
||||
}
|
||||
|
||||
bool haveAMXTILE() noexcept
|
||||
{
|
||||
return haveAMX()
|
||||
&& ((CpuInfo(0x7, 0).registers.edx >> 24) & 1u); // AMX-TILE bit
|
||||
&& ((CPUInfo(0x7, 0).registers.edx >> 24) & 1u); // AMX-TILE bit
|
||||
}
|
||||
|
||||
bool haveAMXINT8() noexcept
|
||||
{
|
||||
return haveAMX()
|
||||
&& ((CpuInfo(0x7, 0).registers.edx >> 25) & 1u); // AMX-INT8 bit
|
||||
&& ((CPUInfo(0x7, 0).registers.edx >> 25) & 1u); // AMX-INT8 bit
|
||||
}
|
||||
|
||||
struct CpuFlagsCache
|
||||
struct CPUFlagsCache
|
||||
{
|
||||
#define DEF_NAME(X) static inline bool have_##X = have##X();
|
||||
CPU_ID_ENUMERATE(DEF_NAME)
|
||||
@ -354,4 +354,3 @@ struct CpuFlagsCache
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -2,6 +2,8 @@
|
||||
|
||||
|
||||
/// Available metrics. Add something here as you wish.
|
||||
/// If the metric is generic (i.e. not server specific)
|
||||
/// it should be also added to src/Coordination/KeeperConstant.cpp
|
||||
#define APPLY_FOR_BUILTIN_METRICS(M) \
|
||||
M(Query, "Number of executing queries") \
|
||||
M(Merge, "Number of executing background merges") \
|
||||
|
@ -2067,8 +2067,8 @@ Dwarf::LineNumberVM::StepResult Dwarf::LineNumberVM::step(std::string_view & pro
|
||||
if (opcode != 0)
|
||||
{ // standard opcode
|
||||
// Only interpret opcodes that are recognized by the version we're parsing;
|
||||
// the others are vendor extensions and we should ignore them.
|
||||
switch (opcode) // NOLINT(bugprone-switch-missing-default-case)
|
||||
// the others are vendor extensions, and we should ignore them.
|
||||
switch (opcode)
|
||||
{
|
||||
case DW_LNS_copy:
|
||||
basicBlock_ = false;
|
||||
@ -2121,6 +2121,7 @@ Dwarf::LineNumberVM::StepResult Dwarf::LineNumberVM::step(std::string_view & pro
|
||||
}
|
||||
isa_ = readULEB(program);
|
||||
return CONTINUE;
|
||||
default:
|
||||
}
|
||||
|
||||
// Unrecognized standard opcode, slurp the appropriate number of LEB
|
||||
|
@ -594,6 +594,7 @@
|
||||
M(712, TOO_MANY_MATERIALIZED_VIEWS) \
|
||||
M(713, BROKEN_PROJECTION) \
|
||||
M(714, UNEXPECTED_CLUSTER) \
|
||||
M(715, CANNOT_DETECT_FORMAT) \
|
||||
\
|
||||
M(999, KEEPER_EXCEPTION) \
|
||||
M(1000, POCO_EXCEPTION) \
|
||||
|
@ -91,7 +91,7 @@ public:
|
||||
if constexpr (std::is_floating_point_v<T>)
|
||||
return x.getValue().template convertTo<T>() / x.getScaleMultiplier().template convertTo<T>();
|
||||
else
|
||||
return (x.getValue() / x.getScaleMultiplier()). template convertTo<T>();
|
||||
return (x.getValue() / x.getScaleMultiplier()).template convertTo<T>();
|
||||
}
|
||||
|
||||
T operator() (const AggregateFunctionStateData &) const
|
||||
|
@ -4,6 +4,8 @@
|
||||
|
||||
|
||||
/// Available events. Add something here as you wish.
|
||||
/// If the event is generic (i.e. not server specific)
|
||||
/// it should be also added to src/Coordination/KeeperConstant.cpp
|
||||
#define APPLY_FOR_BUILTIN_EVENTS(M) \
|
||||
M(Query, "Number of queries to be interpreted and potentially executed. Does not include queries that failed to parse or were rejected due to AST size limits, quota limits or limits on the number of simultaneously running queries. May include internal queries initiated by ClickHouse itself. Does not count subqueries.") \
|
||||
M(SelectQuery, "Same as Query, but only for SELECT queries.") \
|
||||
@ -310,7 +312,7 @@ The server successfully detected this situation and will download merged part fr
|
||||
M(ParallelReplicasStealingLeftoversMicroseconds, "Time spent collecting orphaned segments") \
|
||||
M(ParallelReplicasCollectingOwnedSegmentsMicroseconds, "Time spent collecting segments meant by hash") \
|
||||
\
|
||||
M(PerfCpuCycles, "Total cycles. Be wary of what happens during CPU frequency scaling.") \
|
||||
M(PerfCPUCycles, "Total cycles. Be wary of what happens during CPU frequency scaling.") \
|
||||
M(PerfInstructions, "Retired instructions. Be careful, these can be affected by various issues, most notably hardware interrupt counts.") \
|
||||
M(PerfCacheReferences, "Cache accesses. Usually, this indicates Last Level Cache accesses, but this may vary depending on your CPU. This may include prefetches and coherency messages; again this depends on the design of your CPU.") \
|
||||
M(PerfCacheMisses, "Cache misses. Usually this indicates Last Level Cache misses; this is intended to be used in conjunction with the PERFCOUNTHWCACHEREFERENCES event to calculate cache miss rates.") \
|
||||
@ -319,12 +321,12 @@ The server successfully detected this situation and will download merged part fr
|
||||
M(PerfBusCycles, "Bus cycles, which can be different from total cycles.") \
|
||||
M(PerfStalledCyclesFrontend, "Stalled cycles during issue.") \
|
||||
M(PerfStalledCyclesBackend, "Stalled cycles during retirement.") \
|
||||
M(PerfRefCpuCycles, "Total cycles; not affected by CPU frequency scaling.") \
|
||||
M(PerfRefCPUCycles, "Total cycles; not affected by CPU frequency scaling.") \
|
||||
\
|
||||
M(PerfCpuClock, "The CPU clock, a high-resolution per-CPU timer") \
|
||||
M(PerfCPUClock, "The CPU clock, a high-resolution per-CPU timer") \
|
||||
M(PerfTaskClock, "A clock count specific to the task that is running") \
|
||||
M(PerfContextSwitches, "Number of context switches") \
|
||||
M(PerfCpuMigrations, "Number of times the process has migrated to a new CPU") \
|
||||
M(PerfCPUMigrations, "Number of times the process has migrated to a new CPU") \
|
||||
M(PerfAlignmentFaults, "Number of alignment faults. These happen when unaligned memory accesses happen; the kernel can handle these but it reduces performance. This happens only on some architectures (never on x86).") \
|
||||
M(PerfEmulationFaults, "Number of emulation faults. The kernel sometimes traps on unimplemented instructions and emulates them for user space. This can negatively impact performance.") \
|
||||
M(PerfMinEnabledTime, "For all events, minimum time that an event was enabled. Used to track event multiplexing influence") \
|
||||
|
@ -1,7 +1,7 @@
|
||||
#include <base/defines.h>
|
||||
#include <Common/TargetSpecific.h>
|
||||
|
||||
#include <Common/CpuId.h>
|
||||
#include <Common/CPUID.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -9,25 +9,25 @@ namespace DB
|
||||
UInt32 getSupportedArchs()
|
||||
{
|
||||
UInt32 result = 0;
|
||||
if (Cpu::CpuFlagsCache::have_SSE42)
|
||||
if (CPU::CPUFlagsCache::have_SSE42)
|
||||
result |= static_cast<UInt32>(TargetArch::SSE42);
|
||||
if (Cpu::CpuFlagsCache::have_AVX)
|
||||
if (CPU::CPUFlagsCache::have_AVX)
|
||||
result |= static_cast<UInt32>(TargetArch::AVX);
|
||||
if (Cpu::CpuFlagsCache::have_AVX2)
|
||||
if (CPU::CPUFlagsCache::have_AVX2)
|
||||
result |= static_cast<UInt32>(TargetArch::AVX2);
|
||||
if (Cpu::CpuFlagsCache::have_AVX512F)
|
||||
if (CPU::CPUFlagsCache::have_AVX512F)
|
||||
result |= static_cast<UInt32>(TargetArch::AVX512F);
|
||||
if (Cpu::CpuFlagsCache::have_AVX512BW)
|
||||
if (CPU::CPUFlagsCache::have_AVX512BW)
|
||||
result |= static_cast<UInt32>(TargetArch::AVX512BW);
|
||||
if (Cpu::CpuFlagsCache::have_AVX512VBMI)
|
||||
if (CPU::CPUFlagsCache::have_AVX512VBMI)
|
||||
result |= static_cast<UInt32>(TargetArch::AVX512VBMI);
|
||||
if (Cpu::CpuFlagsCache::have_AVX512VBMI2)
|
||||
if (CPU::CPUFlagsCache::have_AVX512VBMI2)
|
||||
result |= static_cast<UInt32>(TargetArch::AVX512VBMI2);
|
||||
if (Cpu::CpuFlagsCache::have_AMXBF16)
|
||||
if (CPU::CPUFlagsCache::have_AMXBF16)
|
||||
result |= static_cast<UInt32>(TargetArch::AMXBF16);
|
||||
if (Cpu::CpuFlagsCache::have_AMXTILE)
|
||||
if (CPU::CPUFlagsCache::have_AMXTILE)
|
||||
result |= static_cast<UInt32>(TargetArch::AMXTILE);
|
||||
if (Cpu::CpuFlagsCache::have_AMXINT8)
|
||||
if (CPU::CPUFlagsCache::have_AMXINT8)
|
||||
result |= static_cast<UInt32>(TargetArch::AMXINT8);
|
||||
return result;
|
||||
}
|
||||
|
@ -6,10 +6,8 @@
|
||||
#include "ProcfsMetricsProvider.h"
|
||||
#include "hasLinuxCapability.h"
|
||||
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
#include <optional>
|
||||
#include <unordered_set>
|
||||
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
@ -36,7 +34,7 @@ namespace ProfileEvents
|
||||
extern const Event OSReadBytes;
|
||||
extern const Event OSWriteBytes;
|
||||
|
||||
extern const Event PerfCpuCycles;
|
||||
extern const Event PerfCPUCycles;
|
||||
extern const Event PerfInstructions;
|
||||
extern const Event PerfCacheReferences;
|
||||
extern const Event PerfCacheMisses;
|
||||
@ -45,12 +43,12 @@ namespace ProfileEvents
|
||||
extern const Event PerfBusCycles;
|
||||
extern const Event PerfStalledCyclesFrontend;
|
||||
extern const Event PerfStalledCyclesBackend;
|
||||
extern const Event PerfRefCpuCycles;
|
||||
extern const Event PerfRefCPUCycles;
|
||||
|
||||
extern const Event PerfCpuClock;
|
||||
extern const Event PerfCPUClock;
|
||||
extern const Event PerfTaskClock;
|
||||
extern const Event PerfContextSwitches;
|
||||
extern const Event PerfCpuMigrations;
|
||||
extern const Event PerfCPUMigrations;
|
||||
extern const Event PerfAlignmentFaults;
|
||||
extern const Event PerfEmulationFaults;
|
||||
extern const Event PerfMinEnabledTime;
|
||||
@ -218,7 +216,7 @@ thread_local PerfEventsCounters current_thread_counters;
|
||||
|
||||
// descriptions' source: http://man7.org/linux/man-pages/man2/perf_event_open.2.html
|
||||
static const PerfEventInfo raw_events_info[] = {
|
||||
HARDWARE_EVENT(PERF_COUNT_HW_CPU_CYCLES, PerfCpuCycles),
|
||||
HARDWARE_EVENT(PERF_COUNT_HW_CPU_CYCLES, PerfCPUCycles),
|
||||
HARDWARE_EVENT(PERF_COUNT_HW_INSTRUCTIONS, PerfInstructions),
|
||||
HARDWARE_EVENT(PERF_COUNT_HW_CACHE_REFERENCES, PerfCacheReferences),
|
||||
HARDWARE_EVENT(PERF_COUNT_HW_CACHE_MISSES, PerfCacheMisses),
|
||||
@ -227,13 +225,13 @@ static const PerfEventInfo raw_events_info[] = {
|
||||
HARDWARE_EVENT(PERF_COUNT_HW_BUS_CYCLES, PerfBusCycles),
|
||||
HARDWARE_EVENT(PERF_COUNT_HW_STALLED_CYCLES_FRONTEND, PerfStalledCyclesFrontend),
|
||||
HARDWARE_EVENT(PERF_COUNT_HW_STALLED_CYCLES_BACKEND, PerfStalledCyclesBackend),
|
||||
HARDWARE_EVENT(PERF_COUNT_HW_REF_CPU_CYCLES, PerfRefCpuCycles),
|
||||
HARDWARE_EVENT(PERF_COUNT_HW_REF_CPU_CYCLES, PerfRefCPUCycles),
|
||||
|
||||
// `cpu-clock` is a bit broken according to this: https://stackoverflow.com/a/56967896
|
||||
SOFTWARE_EVENT(PERF_COUNT_SW_CPU_CLOCK, PerfCpuClock),
|
||||
SOFTWARE_EVENT(PERF_COUNT_SW_CPU_CLOCK, PerfCPUClock),
|
||||
SOFTWARE_EVENT(PERF_COUNT_SW_TASK_CLOCK, PerfTaskClock),
|
||||
SOFTWARE_EVENT(PERF_COUNT_SW_CONTEXT_SWITCHES, PerfContextSwitches),
|
||||
SOFTWARE_EVENT(PERF_COUNT_SW_CPU_MIGRATIONS, PerfCpuMigrations),
|
||||
SOFTWARE_EVENT(PERF_COUNT_SW_CPU_MIGRATIONS, PerfCPUMigrations),
|
||||
SOFTWARE_EVENT(PERF_COUNT_SW_ALIGNMENT_FAULTS, PerfAlignmentFaults),
|
||||
SOFTWARE_EVENT(PERF_COUNT_SW_EMULATION_FAULTS, PerfEmulationFaults),
|
||||
|
||||
|
376
src/Coordination/KeeperConstants.cpp
Normal file
376
src/Coordination/KeeperConstants.cpp
Normal file
@ -0,0 +1,376 @@
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
|
||||
/// Events which are useful for Keeper.
|
||||
/// New events should be added manually.
|
||||
#define APPLY_FOR_KEEPER_PROFILE_EVENTS(M) \
|
||||
M(FileOpen) \
|
||||
M(Seek) \
|
||||
M(ReadBufferFromFileDescriptorRead) \
|
||||
M(ReadBufferFromFileDescriptorReadFailed) \
|
||||
M(ReadBufferFromFileDescriptorReadBytes) \
|
||||
M(WriteBufferFromFileDescriptorWrite) \
|
||||
M(WriteBufferFromFileDescriptorWriteFailed) \
|
||||
M(WriteBufferFromFileDescriptorWriteBytes) \
|
||||
M(FileSync) \
|
||||
M(DirectorySync) \
|
||||
M(FileSyncElapsedMicroseconds) \
|
||||
M(DirectorySyncElapsedMicroseconds) \
|
||||
M(ReadCompressedBytes) \
|
||||
M(CompressedReadBufferBlocks) \
|
||||
M(CompressedReadBufferBytes) \
|
||||
M(AIOWrite) \
|
||||
M(AIOWriteBytes) \
|
||||
M(AIORead) \
|
||||
M(AIOReadBytes) \
|
||||
M(IOBufferAllocs) \
|
||||
M(IOBufferAllocBytes) \
|
||||
M(ArenaAllocChunks) \
|
||||
M(ArenaAllocBytes) \
|
||||
M(CreatedReadBufferOrdinary) \
|
||||
M(CreatedReadBufferDirectIO) \
|
||||
M(CreatedReadBufferDirectIOFailed) \
|
||||
M(CreatedReadBufferMMap) \
|
||||
M(CreatedReadBufferMMapFailed) \
|
||||
M(DiskReadElapsedMicroseconds) \
|
||||
M(DiskWriteElapsedMicroseconds) \
|
||||
M(NetworkReceiveElapsedMicroseconds) \
|
||||
M(NetworkSendElapsedMicroseconds) \
|
||||
M(NetworkReceiveBytes) \
|
||||
M(NetworkSendBytes) \
|
||||
\
|
||||
M(DiskS3GetRequestThrottlerCount) \
|
||||
M(DiskS3GetRequestThrottlerSleepMicroseconds) \
|
||||
M(DiskS3PutRequestThrottlerCount) \
|
||||
M(DiskS3PutRequestThrottlerSleepMicroseconds) \
|
||||
M(S3GetRequestThrottlerCount) \
|
||||
M(S3GetRequestThrottlerSleepMicroseconds) \
|
||||
M(S3PutRequestThrottlerCount) \
|
||||
M(S3PutRequestThrottlerSleepMicroseconds) \
|
||||
M(RemoteReadThrottlerBytes) \
|
||||
M(RemoteReadThrottlerSleepMicroseconds) \
|
||||
M(RemoteWriteThrottlerBytes) \
|
||||
M(RemoteWriteThrottlerSleepMicroseconds) \
|
||||
M(LocalReadThrottlerBytes) \
|
||||
M(LocalReadThrottlerSleepMicroseconds) \
|
||||
M(LocalWriteThrottlerBytes) \
|
||||
M(LocalWriteThrottlerSleepMicroseconds) \
|
||||
M(ThrottlerSleepMicroseconds) \
|
||||
\
|
||||
M(SlowRead) \
|
||||
M(ReadBackoff) \
|
||||
\
|
||||
M(ContextLock) \
|
||||
M(ContextLockWaitMicroseconds) \
|
||||
\
|
||||
M(RWLockAcquiredReadLocks) \
|
||||
M(RWLockAcquiredWriteLocks) \
|
||||
M(RWLockReadersWaitMilliseconds) \
|
||||
M(RWLockWritersWaitMilliseconds) \
|
||||
M(DNSError) \
|
||||
M(RealTimeMicroseconds) \
|
||||
M(UserTimeMicroseconds) \
|
||||
M(SystemTimeMicroseconds) \
|
||||
M(MemoryOvercommitWaitTimeMicroseconds) \
|
||||
M(MemoryAllocatorPurge) \
|
||||
M(MemoryAllocatorPurgeTimeMicroseconds) \
|
||||
M(SoftPageFaults) \
|
||||
M(HardPageFaults) \
|
||||
\
|
||||
M(OSIOWaitMicroseconds) \
|
||||
M(OSCPUWaitMicroseconds) \
|
||||
M(OSCPUVirtualTimeMicroseconds) \
|
||||
M(OSReadBytes) \
|
||||
M(OSWriteBytes) \
|
||||
M(OSReadChars) \
|
||||
M(OSWriteChars) \
|
||||
\
|
||||
M(PerfCPUCycles) \
|
||||
M(PerfInstructions) \
|
||||
M(PerfCacheReferences) \
|
||||
M(PerfCacheMisses) \
|
||||
M(PerfBranchInstructions) \
|
||||
M(PerfBranchMisses) \
|
||||
M(PerfBusCycles) \
|
||||
M(PerfStalledCyclesFrontend) \
|
||||
M(PerfStalledCyclesBackend) \
|
||||
M(PerfRefCPUCycles) \
|
||||
\
|
||||
M(PerfCPUClock) \
|
||||
M(PerfTaskClock) \
|
||||
M(PerfContextSwitches) \
|
||||
M(PerfCPUMigrations) \
|
||||
M(PerfAlignmentFaults) \
|
||||
M(PerfEmulationFaults) \
|
||||
M(PerfMinEnabledTime) \
|
||||
M(PerfMinEnabledRunningTime) \
|
||||
M(PerfDataTLBReferences) \
|
||||
M(PerfDataTLBMisses) \
|
||||
M(PerfInstructionTLBReferences) \
|
||||
M(PerfInstructionTLBMisses) \
|
||||
M(PerfLocalMemoryReferences) \
|
||||
M(PerfLocalMemoryMisses) \
|
||||
\
|
||||
M(CreatedHTTPConnections) \
|
||||
M(CannotWriteToWriteBufferDiscard) \
|
||||
\
|
||||
M(S3ReadMicroseconds) \
|
||||
M(S3ReadRequestsCount) \
|
||||
M(S3ReadRequestsErrors) \
|
||||
M(S3ReadRequestsThrottling) \
|
||||
M(S3ReadRequestsRedirects) \
|
||||
\
|
||||
M(S3WriteMicroseconds) \
|
||||
M(S3WriteRequestsCount) \
|
||||
M(S3WriteRequestsErrors) \
|
||||
M(S3WriteRequestsThrottling) \
|
||||
M(S3WriteRequestsRedirects) \
|
||||
\
|
||||
M(DiskS3ReadMicroseconds) \
|
||||
M(DiskS3ReadRequestsCount) \
|
||||
M(DiskS3ReadRequestsErrors) \
|
||||
M(DiskS3ReadRequestsThrottling) \
|
||||
M(DiskS3ReadRequestsRedirects) \
|
||||
\
|
||||
M(DiskS3WriteMicroseconds) \
|
||||
M(DiskS3WriteRequestsCount) \
|
||||
M(DiskS3WriteRequestsErrors) \
|
||||
M(DiskS3WriteRequestsThrottling) \
|
||||
M(DiskS3WriteRequestsRedirects) \
|
||||
\
|
||||
M(S3DeleteObjects) \
|
||||
M(S3CopyObject) \
|
||||
M(S3ListObjects) \
|
||||
M(S3HeadObject) \
|
||||
M(S3GetObjectAttributes) \
|
||||
M(S3CreateMultipartUpload) \
|
||||
M(S3UploadPartCopy) \
|
||||
M(S3UploadPart) \
|
||||
M(S3AbortMultipartUpload) \
|
||||
M(S3CompleteMultipartUpload) \
|
||||
M(S3PutObject) \
|
||||
M(S3GetObject) \
|
||||
\
|
||||
M(AzureUploadPart) \
|
||||
M(DiskAzureUploadPart) \
|
||||
M(AzureCopyObject) \
|
||||
M(DiskAzureCopyObject) \
|
||||
M(AzureDeleteObjects) \
|
||||
M(AzureListObjects) \
|
||||
\
|
||||
M(DiskS3DeleteObjects) \
|
||||
M(DiskS3CopyObject) \
|
||||
M(DiskS3ListObjects) \
|
||||
M(DiskS3HeadObject) \
|
||||
M(DiskS3GetObjectAttributes) \
|
||||
M(DiskS3CreateMultipartUpload) \
|
||||
M(DiskS3UploadPartCopy) \
|
||||
M(DiskS3UploadPart) \
|
||||
M(DiskS3AbortMultipartUpload) \
|
||||
M(DiskS3CompleteMultipartUpload) \
|
||||
M(DiskS3PutObject) \
|
||||
M(DiskS3GetObject) \
|
||||
\
|
||||
M(S3Clients) \
|
||||
M(TinyS3Clients) \
|
||||
\
|
||||
M(ReadBufferFromS3Microseconds) \
|
||||
M(ReadBufferFromS3InitMicroseconds) \
|
||||
M(ReadBufferFromS3Bytes) \
|
||||
M(ReadBufferFromS3RequestsErrors) \
|
||||
M(ReadBufferFromS3ResetSessions) \
|
||||
M(ReadBufferFromS3PreservedSessions) \
|
||||
\
|
||||
M(ReadWriteBufferFromHTTPPreservedSessions) \
|
||||
\
|
||||
M(WriteBufferFromS3Microseconds) \
|
||||
M(WriteBufferFromS3Bytes) \
|
||||
M(WriteBufferFromS3RequestsErrors) \
|
||||
M(WriteBufferFromS3WaitInflightLimitMicroseconds) \
|
||||
M(RemoteFSSeeks) \
|
||||
M(RemoteFSPrefetches) \
|
||||
M(RemoteFSCancelledPrefetches) \
|
||||
M(RemoteFSUnusedPrefetches) \
|
||||
M(RemoteFSPrefetchedReads) \
|
||||
M(RemoteFSPrefetchedBytes) \
|
||||
M(RemoteFSUnprefetchedReads) \
|
||||
M(RemoteFSUnprefetchedBytes) \
|
||||
M(RemoteFSLazySeeks) \
|
||||
M(RemoteFSSeeksWithReset) \
|
||||
M(RemoteFSBuffers) \
|
||||
\
|
||||
M(ThreadpoolReaderTaskMicroseconds) \
|
||||
M(ThreadpoolReaderPrepareMicroseconds) \
|
||||
M(ThreadpoolReaderReadBytes) \
|
||||
M(ThreadpoolReaderSubmit) \
|
||||
M(ThreadpoolReaderSubmitReadSynchronously) \
|
||||
M(ThreadpoolReaderSubmitReadSynchronouslyBytes) \
|
||||
M(ThreadpoolReaderSubmitReadSynchronouslyMicroseconds) \
|
||||
M(ThreadpoolReaderSubmitLookupInCacheMicroseconds) \
|
||||
M(AsynchronousReaderIgnoredBytes) \
|
||||
\
|
||||
M(FileSegmentWaitReadBufferMicroseconds) \
|
||||
M(FileSegmentReadMicroseconds) \
|
||||
M(FileSegmentCacheWriteMicroseconds) \
|
||||
M(FileSegmentPredownloadMicroseconds) \
|
||||
M(FileSegmentUsedBytes) \
|
||||
\
|
||||
M(ReadBufferSeekCancelConnection) \
|
||||
\
|
||||
M(SleepFunctionCalls) \
|
||||
M(SleepFunctionMicroseconds) \
|
||||
M(SleepFunctionElapsedMicroseconds) \
|
||||
\
|
||||
M(ThreadPoolReaderPageCacheHit) \
|
||||
M(ThreadPoolReaderPageCacheHitBytes) \
|
||||
M(ThreadPoolReaderPageCacheHitElapsedMicroseconds) \
|
||||
M(ThreadPoolReaderPageCacheMiss) \
|
||||
M(ThreadPoolReaderPageCacheMissBytes) \
|
||||
M(ThreadPoolReaderPageCacheMissElapsedMicroseconds) \
|
||||
\
|
||||
M(AsynchronousReadWaitMicroseconds) \
|
||||
M(SynchronousReadWaitMicroseconds) \
|
||||
M(AsynchronousRemoteReadWaitMicroseconds) \
|
||||
M(SynchronousRemoteReadWaitMicroseconds) \
|
||||
\
|
||||
M(ExternalDataSourceLocalCacheReadBytes) \
|
||||
\
|
||||
M(MainConfigLoads) \
|
||||
\
|
||||
M(KeeperPacketsSent) \
|
||||
M(KeeperPacketsReceived) \
|
||||
M(KeeperRequestTotal) \
|
||||
M(KeeperLatency) \
|
||||
M(KeeperCommits) \
|
||||
M(KeeperCommitsFailed) \
|
||||
M(KeeperSnapshotCreations) \
|
||||
M(KeeperSnapshotCreationsFailed) \
|
||||
M(KeeperSnapshotApplys) \
|
||||
M(KeeperSnapshotApplysFailed) \
|
||||
M(KeeperReadSnapshot) \
|
||||
M(KeeperSaveSnapshot) \
|
||||
M(KeeperCreateRequest) \
|
||||
M(KeeperRemoveRequest) \
|
||||
M(KeeperSetRequest) \
|
||||
M(KeeperReconfigRequest) \
|
||||
M(KeeperCheckRequest) \
|
||||
M(KeeperMultiRequest) \
|
||||
M(KeeperMultiReadRequest) \
|
||||
M(KeeperGetRequest) \
|
||||
M(KeeperListRequest) \
|
||||
M(KeeperExistsRequest) \
|
||||
\
|
||||
M(IOUringSQEsSubmitted) \
|
||||
M(IOUringSQEsResubmits) \
|
||||
M(IOUringCQEsCompleted) \
|
||||
M(IOUringCQEsFailed) \
|
||||
\
|
||||
M(LogTest) \
|
||||
M(LogTrace) \
|
||||
M(LogDebug) \
|
||||
M(LogInfo) \
|
||||
M(LogWarning) \
|
||||
M(LogError) \
|
||||
M(LogFatal) \
|
||||
\
|
||||
M(InterfaceHTTPSendBytes) \
|
||||
M(InterfaceHTTPReceiveBytes) \
|
||||
M(InterfaceNativeSendBytes) \
|
||||
M(InterfaceNativeReceiveBytes) \
|
||||
M(InterfacePrometheusSendBytes) \
|
||||
M(InterfacePrometheusReceiveBytes) \
|
||||
M(InterfaceInterserverSendBytes) \
|
||||
M(InterfaceInterserverReceiveBytes) \
|
||||
M(InterfaceMySQLSendBytes) \
|
||||
M(InterfaceMySQLReceiveBytes) \
|
||||
M(InterfacePostgreSQLSendBytes) \
|
||||
M(InterfacePostgreSQLReceiveBytes)
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
#define M(NAME) extern const Event NAME;
|
||||
APPLY_FOR_KEEPER_PROFILE_EVENTS(M)
|
||||
#undef M
|
||||
|
||||
#define M(NAME) NAME,
|
||||
extern const std::vector<Event> keeper_profile_events
|
||||
{
|
||||
APPLY_FOR_KEEPER_PROFILE_EVENTS(M)
|
||||
};
|
||||
#undef M
|
||||
}
|
||||
|
||||
/// Metrics which are useful for Keeper.
|
||||
/// New metrics should be added manually.
|
||||
#define APPLY_FOR_KEEPER_METRICS(M) \
|
||||
M(BackgroundCommonPoolTask) \
|
||||
M(BackgroundCommonPoolSize) \
|
||||
M(TCPConnection) \
|
||||
M(HTTPConnection) \
|
||||
M(OpenFileForRead) \
|
||||
M(OpenFileForWrite) \
|
||||
M(Read) \
|
||||
M(RemoteRead) \
|
||||
M(Write) \
|
||||
M(NetworkReceive) \
|
||||
M(NetworkSend) \
|
||||
M(MemoryTracking) \
|
||||
M(ContextLockWait) \
|
||||
M(Revision) \
|
||||
M(VersionInteger) \
|
||||
M(RWLockWaitingReaders) \
|
||||
M(RWLockWaitingWriters) \
|
||||
M(RWLockActiveReaders) \
|
||||
M(RWLockActiveWriters) \
|
||||
M(GlobalThread) \
|
||||
M(GlobalThreadActive) \
|
||||
M(GlobalThreadScheduled) \
|
||||
M(LocalThread) \
|
||||
M(LocalThreadActive) \
|
||||
M(LocalThreadScheduled) \
|
||||
M(IOPrefetchThreads) \
|
||||
M(IOPrefetchThreadsActive) \
|
||||
M(IOPrefetchThreadsScheduled) \
|
||||
M(IOWriterThreads) \
|
||||
M(IOWriterThreadsActive) \
|
||||
M(IOWriterThreadsScheduled) \
|
||||
M(IOThreads) \
|
||||
M(IOThreadsActive) \
|
||||
M(IOThreadsScheduled) \
|
||||
M(ThreadPoolRemoteFSReaderThreads) \
|
||||
M(ThreadPoolRemoteFSReaderThreadsActive) \
|
||||
M(ThreadPoolRemoteFSReaderThreadsScheduled) \
|
||||
M(ThreadPoolFSReaderThreads) \
|
||||
M(ThreadPoolFSReaderThreadsActive) \
|
||||
M(ThreadPoolFSReaderThreadsScheduled) \
|
||||
M(DiskObjectStorageAsyncThreads) \
|
||||
M(DiskObjectStorageAsyncThreadsActive) \
|
||||
M(ObjectStorageS3Threads) \
|
||||
M(ObjectStorageS3ThreadsActive) \
|
||||
M(ObjectStorageS3ThreadsScheduled) \
|
||||
M(ObjectStorageAzureThreads) \
|
||||
M(ObjectStorageAzureThreadsActive) \
|
||||
M(ObjectStorageAzureThreadsScheduled) \
|
||||
M(MMappedFiles) \
|
||||
M(MMappedFileBytes) \
|
||||
M(AsynchronousReadWait) \
|
||||
M(S3Requests) \
|
||||
M(KeeperAliveConnections) \
|
||||
M(KeeperOutstandingRequets) \
|
||||
M(ThreadsInOvercommitTracker) \
|
||||
M(IOUringPendingEvents) \
|
||||
M(IOUringInFlightEvents) \
|
||||
|
||||
namespace CurrentMetrics
|
||||
{
|
||||
#define M(NAME) extern const Metric NAME;
|
||||
APPLY_FOR_KEEPER_METRICS(M)
|
||||
#undef M
|
||||
|
||||
#define M(NAME) NAME,
|
||||
extern const std::vector<Metric> keeper_metrics
|
||||
{
|
||||
APPLY_FOR_KEEPER_METRICS(M)
|
||||
};
|
||||
#undef M
|
||||
}
|
@ -7,6 +7,7 @@
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
#include <Coordination/KeeperConstants.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Server/CloudPlacementInfo.h>
|
||||
#include <Coordination/KeeperFeatureFlags.h>
|
||||
#include <boost/algorithm/string.hpp>
|
||||
|
||||
@ -37,26 +38,11 @@ void KeeperContext::initialize(const Poco::Util::AbstractConfiguration & config,
|
||||
{
|
||||
dispatcher = dispatcher_;
|
||||
|
||||
if (config.hasProperty("keeper_server.availability_zone"))
|
||||
const auto keeper_az = PlacementInfo::PlacementInfo::instance().getAvailabilityZone();
|
||||
if (!keeper_az.empty())
|
||||
{
|
||||
auto keeper_az = config.getString("keeper_server.availability_zone.value", "");
|
||||
const auto auto_detect_for_cloud = config.getBool("keeper_server.availability_zone.enable_auto_detection_on_cloud", false);
|
||||
if (keeper_az.empty() && auto_detect_for_cloud)
|
||||
{
|
||||
try
|
||||
{
|
||||
keeper_az = DB::S3::getRunningAvailabilityZone();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
}
|
||||
}
|
||||
if (!keeper_az.empty())
|
||||
{
|
||||
system_nodes_with_data[keeper_availability_zone_path] = keeper_az;
|
||||
LOG_INFO(getLogger("KeeperContext"), "Initialize the KeeperContext with availability zone: '{}'", keeper_az);
|
||||
}
|
||||
system_nodes_with_data[keeper_availability_zone_path] = keeper_az;
|
||||
LOG_INFO(getLogger("KeeperContext"), "Initialize the KeeperContext with availability zone: '{}'", keeper_az);
|
||||
}
|
||||
|
||||
updateKeeperMemorySoftLimit(config);
|
||||
|
@ -152,7 +152,7 @@ bool notEqualsOp(A a, B b)
|
||||
}
|
||||
|
||||
/// Converts numeric to an equal numeric of other type.
|
||||
/// When `strict` is `true` check that result exactly same as input, otherwise just check overflow
|
||||
/// When `strict` is `true` check that result exactly the same as input, otherwise just check overflow
|
||||
template <typename From, typename To, bool strict = true>
|
||||
inline bool NO_SANITIZE_UNDEFINED convertNumeric(From value, To & result)
|
||||
{
|
||||
|
@ -1146,6 +1146,8 @@ class IColumn;
|
||||
M(Bool, output_format_sql_insert_use_replace, false, "Use REPLACE statement instead of INSERT", 0) \
|
||||
M(Bool, output_format_sql_insert_quote_names, true, "Quote column names with '`' characters", 0) \
|
||||
\
|
||||
M(Bool, output_format_values_escape_quote_with_quote, false, "If true escape ' with '', otherwise quoted with \\'", 0) \
|
||||
\
|
||||
M(Bool, output_format_bson_string_as_string, false, "Use BSON String type instead of Binary for String columns.", 0) \
|
||||
M(Bool, input_format_bson_skip_fields_with_unsupported_types_in_schema_inference, false, "Skip fields with unsupported types while schema inference for format BSON.", 0) \
|
||||
\
|
||||
|
@ -1,8 +1,7 @@
|
||||
#include <Core/SettingsFields.h>
|
||||
|
||||
#include <Core/Field.h>
|
||||
#include <Core/AccurateComparison.h>
|
||||
#include <Common/getNumberOfPhysicalCPUCores.h>
|
||||
#include <Common/FieldVisitorConvertToNumber.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <DataTypes/DataTypeMap.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
@ -13,6 +12,7 @@
|
||||
|
||||
#include <cmath>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
@ -20,6 +20,7 @@ namespace ErrorCodes
|
||||
extern const int SIZE_OF_FIXED_STRING_DOESNT_MATCH;
|
||||
extern const int CANNOT_PARSE_BOOL;
|
||||
extern const int CANNOT_PARSE_NUMBER;
|
||||
extern const int CANNOT_CONVERT_TYPE;
|
||||
}
|
||||
|
||||
|
||||
@ -48,9 +49,51 @@ namespace
|
||||
T fieldToNumber(const Field & f)
|
||||
{
|
||||
if (f.getType() == Field::Types::String)
|
||||
{
|
||||
return stringToNumber<T>(f.get<const String &>());
|
||||
}
|
||||
else if (f.getType() == Field::Types::UInt64)
|
||||
{
|
||||
T result;
|
||||
if (!accurate::convertNumeric(f.get<UInt64>(), result))
|
||||
throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Field value {} is out of range of {} type", f, demangle(typeid(T).name()));
|
||||
return result;
|
||||
}
|
||||
else if (f.getType() == Field::Types::Int64)
|
||||
{
|
||||
T result;
|
||||
if (!accurate::convertNumeric(f.get<Int64>(), result))
|
||||
throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Field value {} is out of range of {} type", f, demangle(typeid(T).name()));
|
||||
return result;
|
||||
}
|
||||
else if (f.getType() == Field::Types::Bool)
|
||||
{
|
||||
return T(f.get<bool>());
|
||||
}
|
||||
else if (f.getType() == Field::Types::Float64)
|
||||
{
|
||||
Float64 x = f.get<Float64>();
|
||||
if constexpr (std::is_floating_point_v<T>)
|
||||
{
|
||||
return T(x);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!isFinite(x))
|
||||
{
|
||||
/// Conversion of infinite values to integer is undefined.
|
||||
throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Cannot convert infinite value to integer type");
|
||||
}
|
||||
else if (x > Float64(std::numeric_limits<T>::max()) || x < Float64(std::numeric_limits<T>::lowest()))
|
||||
{
|
||||
throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Cannot convert out of range floating point value to integer type");
|
||||
}
|
||||
else
|
||||
return T(x);
|
||||
}
|
||||
}
|
||||
else
|
||||
return applyVisitor(FieldVisitorConvertToNumber<T>(), f);
|
||||
throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Invalid value {} of the setting, which needs {}", f, demangle(typeid(T).name()));
|
||||
}
|
||||
|
||||
Map stringToMap(const String & str)
|
||||
@ -174,7 +217,7 @@ namespace
|
||||
if (f.getType() == Field::Types::String)
|
||||
return stringToMaxThreads(f.get<const String &>());
|
||||
else
|
||||
return applyVisitor(FieldVisitorConvertToNumber<UInt64>(), f);
|
||||
return fieldToNumber<UInt64>(f);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -334,9 +334,12 @@ bool SerializationString::tryDeserializeTextEscaped(IColumn & column, ReadBuffer
|
||||
return read<bool>(column, [&](ColumnString::Chars & data) { readEscapedStringInto(data, istr); return true; });
|
||||
}
|
||||
|
||||
void SerializationString::serializeTextQuoted(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const
|
||||
void SerializationString::serializeTextQuoted(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const
|
||||
{
|
||||
writeQuotedString(assert_cast<const ColumnString &>(column).getDataAt(row_num), ostr);
|
||||
if (settings.values.escape_quote_with_quote)
|
||||
writeQuotedStringPostgreSQL(assert_cast<const ColumnString &>(column).getDataAt(row_num).toView(), ostr);
|
||||
else
|
||||
writeQuotedString(assert_cast<const ColumnString &>(column).getDataAt(row_num), ostr);
|
||||
}
|
||||
|
||||
|
||||
|
@ -62,7 +62,7 @@ void SerializationTuple::serializeBinary(const IColumn & column, size_t row_num,
|
||||
}
|
||||
|
||||
|
||||
template <typename ReturnType = void, typename F>
|
||||
template <typename ReturnType, typename F>
|
||||
static ReturnType addElementSafe(size_t num_elems, IColumn & column, F && impl)
|
||||
{
|
||||
static constexpr bool throw_exception = std::is_same_v<ReturnType, void>;
|
||||
@ -85,11 +85,7 @@ static ReturnType addElementSafe(size_t num_elems, IColumn & column, F && impl)
|
||||
|
||||
try
|
||||
{
|
||||
if constexpr (throw_exception)
|
||||
{
|
||||
impl();
|
||||
}
|
||||
else if (!impl())
|
||||
if (!impl())
|
||||
{
|
||||
restore_elements();
|
||||
return ReturnType(false);
|
||||
@ -125,10 +121,11 @@ static ReturnType addElementSafe(size_t num_elems, IColumn & column, F && impl)
|
||||
|
||||
void SerializationTuple::deserializeBinary(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||
{
|
||||
addElementSafe(elems.size(), column, [&]
|
||||
addElementSafe<void>(elems.size(), column, [&]
|
||||
{
|
||||
for (size_t i = 0; i < elems.size(); ++i)
|
||||
elems[i]->deserializeBinary(extractElementColumn(column, i), istr, settings);
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
@ -165,7 +162,7 @@ ReturnType SerializationTuple::deserializeTextImpl(IColumn & column, ReadBuffer
|
||||
if constexpr (throw_exception)
|
||||
assertChar(',', istr);
|
||||
else if (!checkChar(',', istr))
|
||||
return ReturnType(false);
|
||||
return false;
|
||||
|
||||
skipWhitespaceIfAny(istr);
|
||||
}
|
||||
@ -203,16 +200,16 @@ ReturnType SerializationTuple::deserializeTextImpl(IColumn & column, ReadBuffer
|
||||
if constexpr (throw_exception)
|
||||
assertChar(')', istr);
|
||||
else if (!checkChar(')', istr))
|
||||
return ReturnType(false);
|
||||
return false;
|
||||
|
||||
if (whole && !istr.eof())
|
||||
{
|
||||
if constexpr (throw_exception)
|
||||
throwUnexpectedDataAfterParsedValue(column, istr, settings, "Tuple");
|
||||
return ReturnType(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
return ReturnType(true);
|
||||
return true;
|
||||
};
|
||||
|
||||
return addElementSafe<ReturnType>(elems.size(), column, impl);
|
||||
@ -323,6 +320,7 @@ ReturnType SerializationTuple::deserializeTextJSONImpl(IColumn & column, ReadBuf
|
||||
SerializationNullable::deserializeNullAsDefaultOrNestedTextJSON(element_column, istr, settings, elems[element_pos]);
|
||||
else
|
||||
elems[element_pos]->deserializeTextJSON(element_column, istr, settings);
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -353,7 +351,7 @@ ReturnType SerializationTuple::deserializeTextJSONImpl(IColumn & column, ReadBuf
|
||||
{
|
||||
if constexpr (throw_exception)
|
||||
throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected number of elements in named tuple. Expected no more than {} (consider enabling input_format_json_ignore_unknown_keys_in_named_tuple setting)", elems.size());
|
||||
return ReturnType(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (processed + skipped > 0)
|
||||
@ -361,7 +359,7 @@ ReturnType SerializationTuple::deserializeTextJSONImpl(IColumn & column, ReadBuf
|
||||
if constexpr (throw_exception)
|
||||
assertChar(',', istr);
|
||||
else if (!checkChar(',', istr))
|
||||
return ReturnType(false);
|
||||
return false;
|
||||
skipWhitespaceIfAny(istr);
|
||||
}
|
||||
|
||||
@ -369,13 +367,13 @@ ReturnType SerializationTuple::deserializeTextJSONImpl(IColumn & column, ReadBuf
|
||||
if constexpr (throw_exception)
|
||||
readDoubleQuotedString(name, istr);
|
||||
else if (!tryReadDoubleQuotedString(name, istr))
|
||||
return ReturnType(false);
|
||||
return false;
|
||||
|
||||
skipWhitespaceIfAny(istr);
|
||||
if constexpr (throw_exception)
|
||||
assertChar(':', istr);
|
||||
else if (!checkChar(':', istr))
|
||||
return ReturnType(false);
|
||||
return false;
|
||||
skipWhitespaceIfAny(istr);
|
||||
|
||||
const size_t element_pos = getPositionByName(name);
|
||||
@ -386,7 +384,7 @@ ReturnType SerializationTuple::deserializeTextJSONImpl(IColumn & column, ReadBuf
|
||||
if constexpr (throw_exception)
|
||||
skipJSONField(istr, name);
|
||||
else if (!trySkipJSONField(istr, name))
|
||||
return ReturnType(false);
|
||||
return false;
|
||||
|
||||
skipWhitespaceIfAny(istr);
|
||||
++skipped;
|
||||
@ -396,7 +394,7 @@ ReturnType SerializationTuple::deserializeTextJSONImpl(IColumn & column, ReadBuf
|
||||
{
|
||||
if constexpr (throw_exception)
|
||||
throw Exception(ErrorCodes::NOT_FOUND_COLUMN_IN_BLOCK, "Tuple doesn't have element with name '{}', enable setting input_format_json_ignore_unknown_keys_in_named_tuple", name);
|
||||
return ReturnType(false);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -418,7 +416,7 @@ ReturnType SerializationTuple::deserializeTextJSONImpl(IColumn & column, ReadBuf
|
||||
else
|
||||
{
|
||||
if (!deserialize_element(element_column, element_pos))
|
||||
return ReturnType(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
skipWhitespaceIfAny(istr);
|
||||
@ -428,7 +426,7 @@ ReturnType SerializationTuple::deserializeTextJSONImpl(IColumn & column, ReadBuf
|
||||
if constexpr (throw_exception)
|
||||
assertChar('}', istr);
|
||||
else if (!checkChar('}', istr))
|
||||
return ReturnType(false);
|
||||
return false;
|
||||
|
||||
/// Check if we have missing elements.
|
||||
if (processed != elems.size())
|
||||
@ -446,7 +444,7 @@ ReturnType SerializationTuple::deserializeTextJSONImpl(IColumn & column, ReadBuf
|
||||
"JSON object doesn't contain tuple element {}. If you want to insert defaults in case of missing elements, "
|
||||
"enable setting input_format_json_defaults_for_missing_elements_in_named_tuple",
|
||||
elems[element_pos]->getElementName());
|
||||
return ReturnType(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
auto & element_column = extractElementColumn(column, element_pos);
|
||||
@ -454,7 +452,7 @@ ReturnType SerializationTuple::deserializeTextJSONImpl(IColumn & column, ReadBuf
|
||||
}
|
||||
}
|
||||
|
||||
return ReturnType(true);
|
||||
return true;
|
||||
};
|
||||
|
||||
return addElementSafe<ReturnType>(elems.size(), column, impl);
|
||||
@ -465,7 +463,7 @@ ReturnType SerializationTuple::deserializeTextJSONImpl(IColumn & column, ReadBuf
|
||||
if constexpr (throw_exception)
|
||||
assertChar('[', istr);
|
||||
else if (!checkChar('[', istr))
|
||||
return ReturnType(false);
|
||||
return false;
|
||||
skipWhitespaceIfAny(istr);
|
||||
|
||||
auto impl = [&]()
|
||||
@ -478,7 +476,7 @@ ReturnType SerializationTuple::deserializeTextJSONImpl(IColumn & column, ReadBuf
|
||||
if constexpr (throw_exception)
|
||||
assertChar(',', istr);
|
||||
else if (!checkChar(',', istr))
|
||||
return ReturnType(false);
|
||||
return false;
|
||||
skipWhitespaceIfAny(istr);
|
||||
}
|
||||
|
||||
@ -487,16 +485,16 @@ ReturnType SerializationTuple::deserializeTextJSONImpl(IColumn & column, ReadBuf
|
||||
if constexpr (throw_exception)
|
||||
deserialize_element(element_column, i);
|
||||
else if (!deserialize_element(element_column, i))
|
||||
return ReturnType(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
skipWhitespaceIfAny(istr);
|
||||
if constexpr (throw_exception)
|
||||
assertChar(']', istr);
|
||||
else if (!checkChar(']', istr))
|
||||
return ReturnType(false);
|
||||
return false;
|
||||
|
||||
return ReturnType(true);
|
||||
return true;
|
||||
};
|
||||
|
||||
return addElementSafe<ReturnType>(elems.size(), column, impl);
|
||||
@ -538,7 +536,7 @@ void SerializationTuple::serializeTextCSV(const IColumn & column, size_t row_num
|
||||
|
||||
void SerializationTuple::deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||
{
|
||||
addElementSafe(elems.size(), column, [&]
|
||||
addElementSafe<void>(elems.size(), column, [&]
|
||||
{
|
||||
const size_t size = elems.size();
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
@ -556,6 +554,7 @@ void SerializationTuple::deserializeTextCSV(IColumn & column, ReadBuffer & istr,
|
||||
else
|
||||
elems[i]->deserializeTextCSV(element_column, istr, settings);
|
||||
}
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -146,9 +146,18 @@ StoragePtr DatabaseFilesystem::getTableImpl(const String & name, ContextPtr cont
|
||||
if (!checkTableFilePath(table_path, context_, throw_on_error))
|
||||
return {};
|
||||
|
||||
auto format = FormatFactory::instance().getFormatFromFileName(table_path, throw_on_error);
|
||||
if (format.empty())
|
||||
return {};
|
||||
String format;
|
||||
if (throw_on_error)
|
||||
{
|
||||
format = FormatFactory::instance().getFormatFromFileName(table_path);
|
||||
}
|
||||
else
|
||||
{
|
||||
auto format_maybe = FormatFactory::instance().tryGetFormatFromFileName(table_path);
|
||||
if (!format_maybe)
|
||||
return {};
|
||||
format = *format_maybe;
|
||||
}
|
||||
|
||||
auto ast_function_ptr = makeASTFunction("file", std::make_shared<ASTLiteral>(table_path), std::make_shared<ASTLiteral>(format));
|
||||
|
||||
|
@ -227,7 +227,7 @@ void parseMatchNode(UInt64 parent_id, UInt64 & id, const YAML::Node & node, Resu
|
||||
|
||||
if (!match.contains(key_name))
|
||||
{
|
||||
throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "Yaml match rule must contain key {}", key_name);
|
||||
throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "YAML match rule must contain key {}", key_name);
|
||||
}
|
||||
for (const auto & [key, node_] : match)
|
||||
{
|
||||
|
@ -69,12 +69,10 @@ bool AsynchronousBoundedReadBuffer::hasPendingDataToRead()
|
||||
return false;
|
||||
|
||||
if (file_offset_of_buffer_end > *read_until_position)
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Read beyond last offset ({} > {}, info: {})",
|
||||
file_offset_of_buffer_end, *read_until_position, impl->getInfoForLog());
|
||||
}
|
||||
"Read beyond last offset ({} > {}): file size = {}, info: {}",
|
||||
file_offset_of_buffer_end, *read_until_position, impl->getFileSize(), impl->getInfoForLog());
|
||||
}
|
||||
|
||||
return true;
|
||||
@ -126,14 +124,15 @@ void AsynchronousBoundedReadBuffer::setReadUntilPosition(size_t position)
|
||||
if (position < file_offset_of_buffer_end)
|
||||
{
|
||||
/// file has been read beyond new read until position already
|
||||
if (working_buffer.size() >= file_offset_of_buffer_end - position)
|
||||
if (available() >= file_offset_of_buffer_end - position)
|
||||
{
|
||||
/// new read until position is inside working buffer
|
||||
/// new read until position is after the current position in the working buffer
|
||||
file_offset_of_buffer_end = position;
|
||||
working_buffer.resize(working_buffer.size() - (file_offset_of_buffer_end - position));
|
||||
}
|
||||
else
|
||||
{
|
||||
/// new read until position is before working buffer begin
|
||||
/// new read until position is before the current position in the working buffer
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Attempt to set read until position before already read data ({} > {}, info: {})",
|
||||
@ -186,6 +185,7 @@ bool AsynchronousBoundedReadBuffer::nextImpl()
|
||||
return false;
|
||||
|
||||
chassert(file_offset_of_buffer_end <= impl->getFileSize());
|
||||
size_t old_file_offset_of_buffer_end = file_offset_of_buffer_end;
|
||||
|
||||
IAsynchronousReader::Result result;
|
||||
if (prefetch_future.valid())
|
||||
@ -221,6 +221,9 @@ bool AsynchronousBoundedReadBuffer::nextImpl()
|
||||
ProfileEvents::increment(ProfileEvents::RemoteFSUnprefetchedBytes, result.size);
|
||||
}
|
||||
|
||||
bytes_to_ignore = 0;
|
||||
resetWorkingBuffer();
|
||||
|
||||
size_t bytes_read = result.size - result.offset;
|
||||
if (bytes_read)
|
||||
{
|
||||
@ -231,14 +234,26 @@ bool AsynchronousBoundedReadBuffer::nextImpl()
|
||||
}
|
||||
|
||||
file_offset_of_buffer_end = impl->getFileOffsetOfBufferEnd();
|
||||
bytes_to_ignore = 0;
|
||||
|
||||
/// In case of multiple files for the same file in clickhouse (i.e. log family)
|
||||
/// file_offset_of_buffer_end will not match getImplementationBufferOffset()
|
||||
/// so we use [impl->getImplementationBufferOffset(), impl->getFileSize()]
|
||||
chassert(file_offset_of_buffer_end <= impl->getFileSize());
|
||||
|
||||
return bytes_read;
|
||||
if (read_until_position && (file_offset_of_buffer_end > *read_until_position))
|
||||
{
|
||||
size_t excessive_bytes_read = file_offset_of_buffer_end - *read_until_position;
|
||||
|
||||
if (excessive_bytes_read > working_buffer.size())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"File offset moved too far: old_file_offset = {}, new_file_offset = {}, read_until_position = {}, bytes_read = {}",
|
||||
old_file_offset_of_buffer_end, file_offset_of_buffer_end, *read_until_position, bytes_read);
|
||||
|
||||
working_buffer.resize(working_buffer.size() - excessive_bytes_read);
|
||||
file_offset_of_buffer_end = *read_until_position;
|
||||
}
|
||||
|
||||
return !working_buffer.empty();
|
||||
}
|
||||
|
||||
|
||||
|
@ -95,7 +95,6 @@ private:
|
||||
IAsynchronousReader::Result readSync(char * data, size_t size);
|
||||
|
||||
void resetPrefetch(FilesystemPrefetchState state);
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
82
src/Disks/tests/gtest_asynchronous_bounded_read_buffer.cpp
Normal file
82
src/Disks/tests/gtest_asynchronous_bounded_read_buffer.cpp
Normal file
@ -0,0 +1,82 @@
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <Disks/IO/AsynchronousBoundedReadBuffer.h>
|
||||
#include <Disks/IO/ThreadPoolRemoteFSReader.h>
|
||||
#include <Disks/IO/createReadBufferFromFileBase.h>
|
||||
#include <IO/WriteBufferFromFile.h>
|
||||
#include <Poco/TemporaryFile.h>
|
||||
#include <filesystem>
|
||||
|
||||
|
||||
using namespace DB;
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
class AsynchronousBoundedReadBufferTest : public ::testing::TestWithParam<const char *>
|
||||
{
|
||||
public:
|
||||
AsynchronousBoundedReadBufferTest() { fs::create_directories(temp_folder.path()); }
|
||||
|
||||
String makeTempFile(const String & contents)
|
||||
{
|
||||
String path = fmt::format("{}/{}", temp_folder.path(), counter);
|
||||
++counter;
|
||||
|
||||
WriteBufferFromFile out{path};
|
||||
out.write(contents.data(), contents.size());
|
||||
out.finalize();
|
||||
|
||||
return path;
|
||||
}
|
||||
|
||||
private:
|
||||
Poco::TemporaryFile temp_folder;
|
||||
size_t counter = 0;
|
||||
};
|
||||
|
||||
String getAlphabetWithDigits()
|
||||
{
|
||||
String contents;
|
||||
for (char c = 'a'; c <= 'z'; ++c)
|
||||
contents += c;
|
||||
for (char c = '0'; c <= '9'; ++c)
|
||||
contents += c;
|
||||
return contents;
|
||||
}
|
||||
|
||||
|
||||
TEST_F(AsynchronousBoundedReadBufferTest, setReadUntilPosition)
|
||||
{
|
||||
String file_path = makeTempFile(getAlphabetWithDigits());
|
||||
ThreadPoolRemoteFSReader remote_fs_reader(4, 0);
|
||||
|
||||
for (bool with_prefetch : {false, true})
|
||||
{
|
||||
AsynchronousBoundedReadBuffer read_buffer(createReadBufferFromFileBase(file_path, {}), remote_fs_reader, {});
|
||||
read_buffer.setReadUntilPosition(20);
|
||||
|
||||
auto try_read = [&](size_t count)
|
||||
{
|
||||
if (with_prefetch)
|
||||
read_buffer.prefetch(Priority{0});
|
||||
|
||||
String str;
|
||||
str.resize(count);
|
||||
str.resize(read_buffer.read(str.data(), str.size()));
|
||||
return str;
|
||||
};
|
||||
|
||||
EXPECT_EQ(try_read(15), "abcdefghijklmno");
|
||||
EXPECT_EQ(try_read(15), "pqrst");
|
||||
EXPECT_EQ(try_read(15), "");
|
||||
|
||||
read_buffer.setReadUntilPosition(25);
|
||||
|
||||
EXPECT_EQ(try_read(15), "uvwxy");
|
||||
EXPECT_EQ(try_read(15), "");
|
||||
|
||||
read_buffer.setReadUntilEnd();
|
||||
|
||||
EXPECT_EQ(try_read(15), "z0123456789");
|
||||
EXPECT_EQ(try_read(15), "");
|
||||
}
|
||||
}
|
@ -39,7 +39,7 @@ const FormatFactory::Creators & FormatFactory::getCreators(const String & name)
|
||||
throw Exception(ErrorCodes::UNKNOWN_FORMAT, "Unknown format {}", name);
|
||||
}
|
||||
|
||||
FormatSettings getFormatSettings(ContextPtr context)
|
||||
FormatSettings getFormatSettings(const ContextPtr & context)
|
||||
{
|
||||
const auto & settings = context->getSettingsRef();
|
||||
|
||||
@ -47,7 +47,7 @@ FormatSettings getFormatSettings(ContextPtr context)
|
||||
}
|
||||
|
||||
template <typename Settings>
|
||||
FormatSettings getFormatSettings(ContextPtr context, const Settings & settings)
|
||||
FormatSettings getFormatSettings(const ContextPtr & context, const Settings & settings)
|
||||
{
|
||||
FormatSettings format_settings;
|
||||
|
||||
@ -181,6 +181,7 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings)
|
||||
format_settings.values.allow_data_after_semicolon = settings.input_format_values_allow_data_after_semicolon;
|
||||
format_settings.values.deduce_templates_of_expressions = settings.input_format_values_deduce_templates_of_expressions;
|
||||
format_settings.values.interpret_expressions = settings.input_format_values_interpret_expressions;
|
||||
format_settings.values.escape_quote_with_quote = settings.output_format_values_escape_quote_with_quote;
|
||||
format_settings.with_names_use_header = settings.input_format_with_names_use_header;
|
||||
format_settings.with_types_use_header = settings.input_format_with_types_use_header;
|
||||
format_settings.write_statistics = settings.output_format_write_statistics;
|
||||
@ -255,16 +256,16 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings)
|
||||
return format_settings;
|
||||
}
|
||||
|
||||
template FormatSettings getFormatSettings<FormatFactorySettings>(ContextPtr context, const FormatFactorySettings & settings);
|
||||
template FormatSettings getFormatSettings<FormatFactorySettings>(const ContextPtr & context, const FormatFactorySettings & settings);
|
||||
|
||||
template FormatSettings getFormatSettings<Settings>(ContextPtr context, const Settings & settings);
|
||||
template FormatSettings getFormatSettings<Settings>(const ContextPtr & context, const Settings & settings);
|
||||
|
||||
|
||||
InputFormatPtr FormatFactory::getInput(
|
||||
const String & name,
|
||||
ReadBuffer & _buf,
|
||||
const Block & sample,
|
||||
ContextPtr context,
|
||||
const ContextPtr & context,
|
||||
UInt64 max_block_size,
|
||||
const std::optional<FormatSettings> & _format_settings,
|
||||
std::optional<size_t> _max_parsing_threads,
|
||||
@ -427,7 +428,7 @@ std::unique_ptr<ReadBuffer> FormatFactory::wrapReadBufferIfNeeded(
|
||||
return res;
|
||||
}
|
||||
|
||||
static void addExistingProgressToOutputFormat(OutputFormatPtr format, ContextPtr context)
|
||||
static void addExistingProgressToOutputFormat(OutputFormatPtr format, const ContextPtr & context)
|
||||
{
|
||||
auto element_id = context->getProcessListElementSafe();
|
||||
if (element_id)
|
||||
@ -446,7 +447,7 @@ OutputFormatPtr FormatFactory::getOutputFormatParallelIfPossible(
|
||||
const String & name,
|
||||
WriteBuffer & buf,
|
||||
const Block & sample,
|
||||
ContextPtr context,
|
||||
const ContextPtr & context,
|
||||
const std::optional<FormatSettings> & _format_settings) const
|
||||
{
|
||||
const auto & output_getter = getCreators(name).output_creator;
|
||||
@ -484,7 +485,7 @@ OutputFormatPtr FormatFactory::getOutputFormat(
|
||||
const String & name,
|
||||
WriteBuffer & buf,
|
||||
const Block & sample,
|
||||
ContextPtr context,
|
||||
const ContextPtr & context,
|
||||
const std::optional<FormatSettings> & _format_settings) const
|
||||
{
|
||||
const auto & output_getter = getCreators(name).output_creator;
|
||||
@ -518,7 +519,7 @@ OutputFormatPtr FormatFactory::getOutputFormat(
|
||||
|
||||
String FormatFactory::getContentType(
|
||||
const String & name,
|
||||
ContextPtr context,
|
||||
const ContextPtr & context,
|
||||
const std::optional<FormatSettings> & _format_settings) const
|
||||
{
|
||||
const auto & output_getter = getCreators(name).output_creator;
|
||||
@ -537,7 +538,7 @@ String FormatFactory::getContentType(
|
||||
SchemaReaderPtr FormatFactory::getSchemaReader(
|
||||
const String & name,
|
||||
ReadBuffer & buf,
|
||||
ContextPtr & context,
|
||||
const ContextPtr & context,
|
||||
const std::optional<FormatSettings> & _format_settings) const
|
||||
{
|
||||
const auto & schema_reader_creator = dict.at(name).schema_reader_creator;
|
||||
@ -553,7 +554,7 @@ SchemaReaderPtr FormatFactory::getSchemaReader(
|
||||
|
||||
ExternalSchemaReaderPtr FormatFactory::getExternalSchemaReader(
|
||||
const String & name,
|
||||
ContextPtr & context,
|
||||
const ContextPtr & context,
|
||||
const std::optional<FormatSettings> & _format_settings) const
|
||||
{
|
||||
const auto & external_schema_reader_creator = dict.at(name).external_schema_reader_creator;
|
||||
@ -607,7 +608,7 @@ void FormatFactory::markFormatHasNoAppendSupport(const String & name)
|
||||
registerAppendSupportChecker(name, [](const FormatSettings &){ return false; });
|
||||
}
|
||||
|
||||
bool FormatFactory::checkIfFormatSupportAppend(const String & name, ContextPtr context, const std::optional<FormatSettings> & format_settings_)
|
||||
bool FormatFactory::checkIfFormatSupportAppend(const String & name, const ContextPtr & context, const std::optional<FormatSettings> & format_settings_)
|
||||
{
|
||||
auto format_settings = format_settings_ ? *format_settings_ : getFormatSettings(context);
|
||||
auto & append_support_checker = dict[name].append_support_checker;
|
||||
@ -630,10 +631,10 @@ void FormatFactory::registerFileExtension(const String & extension, const String
|
||||
file_extension_formats[boost::to_lower_copy(extension)] = format_name;
|
||||
}
|
||||
|
||||
String FormatFactory::getFormatFromFileName(String file_name, bool throw_if_not_found)
|
||||
std::optional<String> FormatFactory::tryGetFormatFromFileName(String file_name)
|
||||
{
|
||||
if (file_name == "stdin")
|
||||
return getFormatFromFileDescriptor(STDIN_FILENO);
|
||||
return tryGetFormatFromFileDescriptor(STDIN_FILENO);
|
||||
|
||||
CompressionMethod compression_method = chooseCompressionMethod(file_name, "");
|
||||
if (CompressionMethod::None != compression_method)
|
||||
@ -645,43 +646,53 @@ String FormatFactory::getFormatFromFileName(String file_name, bool throw_if_not_
|
||||
|
||||
auto pos = file_name.find_last_of('.');
|
||||
if (pos == String::npos)
|
||||
{
|
||||
if (throw_if_not_found)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot determine the file format by it's extension");
|
||||
return "";
|
||||
}
|
||||
return std::nullopt;
|
||||
|
||||
String file_extension = file_name.substr(pos + 1, String::npos);
|
||||
boost::algorithm::to_lower(file_extension);
|
||||
auto it = file_extension_formats.find(file_extension);
|
||||
if (it == file_extension_formats.end())
|
||||
{
|
||||
if (throw_if_not_found)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot determine the file format by it's extension");
|
||||
return "";
|
||||
}
|
||||
return std::nullopt;
|
||||
|
||||
return it->second;
|
||||
}
|
||||
|
||||
String FormatFactory::getFormatFromFileDescriptor(int fd)
|
||||
String FormatFactory::getFormatFromFileName(String file_name)
|
||||
{
|
||||
if (auto format = tryGetFormatFromFileName(file_name))
|
||||
return *format;
|
||||
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot determine the format of the file {} by it's extension", file_name);
|
||||
}
|
||||
|
||||
std::optional<String> FormatFactory::tryGetFormatFromFileDescriptor(int fd)
|
||||
{
|
||||
#ifdef OS_LINUX
|
||||
std::string proc_path = fmt::format("/proc/self/fd/{}", fd);
|
||||
char file_path[PATH_MAX] = {'\0'};
|
||||
if (readlink(proc_path.c_str(), file_path, sizeof(file_path) - 1) != -1)
|
||||
return getFormatFromFileName(file_path, false);
|
||||
return "";
|
||||
return tryGetFormatFromFileName(file_path);
|
||||
return std::nullopt;
|
||||
#elif defined(OS_DARWIN)
|
||||
char file_path[PATH_MAX] = {'\0'};
|
||||
if (fcntl(fd, F_GETPATH, file_path) != -1)
|
||||
return getFormatFromFileName(file_path, false);
|
||||
return "";
|
||||
return tryGetFormatFromFileName(file_path);
|
||||
return std::nullopt;
|
||||
#else
|
||||
(void)fd;
|
||||
return "";
|
||||
return std::nullopt;
|
||||
#endif
|
||||
}
|
||||
|
||||
String FormatFactory::getFormatFromFileDescriptor(int fd)
|
||||
{
|
||||
if (auto format = tryGetFormatFromFileDescriptor(fd))
|
||||
return *format;
|
||||
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot determine the format of the data by the file descriptor {}", fd);
|
||||
}
|
||||
|
||||
|
||||
void FormatFactory::registerFileSegmentationEngine(const String & name, FileSegmentationEngine file_segmentation_engine)
|
||||
{
|
||||
auto & target = dict[name].file_segmentation_engine_creator;
|
||||
@ -767,7 +778,7 @@ void FormatFactory::registerAdditionalInfoForSchemaCacheGetter(
|
||||
target = std::move(additional_info_for_schema_cache_getter);
|
||||
}
|
||||
|
||||
String FormatFactory::getAdditionalInfoForSchemaCache(const String & name, ContextPtr context, const std::optional<FormatSettings> & format_settings_)
|
||||
String FormatFactory::getAdditionalInfoForSchemaCache(const String & name, const ContextPtr & context, const std::optional<FormatSettings> & format_settings_)
|
||||
{
|
||||
const auto & additional_info_getter = getCreators(name).additional_info_for_schema_cache_getter;
|
||||
if (!additional_info_getter)
|
||||
@ -812,7 +823,7 @@ bool FormatFactory::checkIfOutputFormatPrefersLargeBlocks(const String & name) c
|
||||
return target.prefers_large_blocks;
|
||||
}
|
||||
|
||||
bool FormatFactory::checkParallelizeOutputAfterReading(const String & name, ContextPtr context) const
|
||||
bool FormatFactory::checkParallelizeOutputAfterReading(const String & name, const ContextPtr & context) const
|
||||
{
|
||||
if (name == "Parquet" && context->getSettingsRef().input_format_parquet_preserve_order)
|
||||
return false;
|
||||
@ -827,6 +838,18 @@ void FormatFactory::checkFormatName(const String & name) const
|
||||
throw Exception(ErrorCodes::UNKNOWN_FORMAT, "Unknown format {}", name);
|
||||
}
|
||||
|
||||
std::vector<String> FormatFactory::getAllInputFormats() const
|
||||
{
|
||||
std::vector<String> input_formats;
|
||||
for (const auto & [format_name, creators] : dict)
|
||||
{
|
||||
if (creators.input_creator || creators.random_access_input_creator)
|
||||
input_formats.push_back(format_name);
|
||||
}
|
||||
|
||||
return input_formats;
|
||||
}
|
||||
|
||||
FormatFactory & FormatFactory::instance()
|
||||
{
|
||||
static FormatFactory ret;
|
||||
|
@ -48,10 +48,10 @@ using RowOutputFormatPtr = std::shared_ptr<IRowOutputFormat>;
|
||||
template <typename Allocator>
|
||||
struct Memory;
|
||||
|
||||
FormatSettings getFormatSettings(ContextPtr context);
|
||||
FormatSettings getFormatSettings(const ContextPtr & context);
|
||||
|
||||
template <typename T>
|
||||
FormatSettings getFormatSettings(ContextPtr context, const T & settings);
|
||||
FormatSettings getFormatSettings(const ContextPtr & context, const T & settings);
|
||||
|
||||
/** Allows to create an IInputFormat or IOutputFormat by the name of the format.
|
||||
* Note: format and compression are independent things.
|
||||
@ -161,7 +161,7 @@ public:
|
||||
const String & name,
|
||||
ReadBuffer & buf,
|
||||
const Block & sample,
|
||||
ContextPtr context,
|
||||
const ContextPtr & context,
|
||||
UInt64 max_block_size,
|
||||
const std::optional<FormatSettings> & format_settings = std::nullopt,
|
||||
std::optional<size_t> max_parsing_threads = std::nullopt,
|
||||
@ -178,30 +178,30 @@ public:
|
||||
const String & name,
|
||||
WriteBuffer & buf,
|
||||
const Block & sample,
|
||||
ContextPtr context,
|
||||
const ContextPtr & context,
|
||||
const std::optional<FormatSettings> & format_settings = std::nullopt) const;
|
||||
|
||||
OutputFormatPtr getOutputFormat(
|
||||
const String & name,
|
||||
WriteBuffer & buf,
|
||||
const Block & sample,
|
||||
ContextPtr context,
|
||||
const ContextPtr & context,
|
||||
const std::optional<FormatSettings> & _format_settings = std::nullopt) const;
|
||||
|
||||
String getContentType(
|
||||
const String & name,
|
||||
ContextPtr context,
|
||||
const ContextPtr & context,
|
||||
const std::optional<FormatSettings> & format_settings = std::nullopt) const;
|
||||
|
||||
SchemaReaderPtr getSchemaReader(
|
||||
const String & name,
|
||||
ReadBuffer & buf,
|
||||
ContextPtr & context,
|
||||
const ContextPtr & context,
|
||||
const std::optional<FormatSettings> & format_settings = std::nullopt) const;
|
||||
|
||||
ExternalSchemaReaderPtr getExternalSchemaReader(
|
||||
const String & name,
|
||||
ContextPtr & context,
|
||||
const ContextPtr & context,
|
||||
const std::optional<FormatSettings> & format_settings = std::nullopt) const;
|
||||
|
||||
void registerFileSegmentationEngine(const String & name, FileSegmentationEngine file_segmentation_engine);
|
||||
@ -216,7 +216,7 @@ public:
|
||||
/// registerAppendSupportChecker with append_support_checker that always returns true.
|
||||
void markFormatHasNoAppendSupport(const String & name);
|
||||
|
||||
bool checkIfFormatSupportAppend(const String & name, ContextPtr context, const std::optional<FormatSettings> & format_settings_ = std::nullopt);
|
||||
bool checkIfFormatSupportAppend(const String & name, const ContextPtr & context, const std::optional<FormatSettings> & format_settings_ = std::nullopt);
|
||||
|
||||
/// Register format by its name.
|
||||
void registerInputFormat(const String & name, InputCreator input_creator);
|
||||
@ -225,8 +225,10 @@ public:
|
||||
|
||||
/// Register file extension for format
|
||||
void registerFileExtension(const String & extension, const String & format_name);
|
||||
String getFormatFromFileName(String file_name, bool throw_if_not_found = false);
|
||||
String getFormatFromFileName(String file_name);
|
||||
std::optional<String> tryGetFormatFromFileName(String file_name);
|
||||
String getFormatFromFileDescriptor(int fd);
|
||||
std::optional<String> tryGetFormatFromFileDescriptor(int fd);
|
||||
|
||||
/// Register schema readers for format its name.
|
||||
void registerSchemaReader(const String & name, SchemaReaderCreator schema_reader_creator);
|
||||
@ -244,16 +246,18 @@ public:
|
||||
bool checkIfFormatHasAnySchemaReader(const String & name) const;
|
||||
bool checkIfOutputFormatPrefersLargeBlocks(const String & name) const;
|
||||
|
||||
bool checkParallelizeOutputAfterReading(const String & name, ContextPtr context) const;
|
||||
bool checkParallelizeOutputAfterReading(const String & name, const ContextPtr & context) const;
|
||||
|
||||
void registerAdditionalInfoForSchemaCacheGetter(const String & name, AdditionalInfoForSchemaCacheGetter additional_info_for_schema_cache_getter);
|
||||
String getAdditionalInfoForSchemaCache(const String & name, ContextPtr context, const std::optional<FormatSettings> & format_settings_ = std::nullopt);
|
||||
String getAdditionalInfoForSchemaCache(const String & name, const ContextPtr & context, const std::optional<FormatSettings> & format_settings_ = std::nullopt);
|
||||
|
||||
const FormatsDictionary & getAllFormats() const
|
||||
{
|
||||
return dict;
|
||||
}
|
||||
|
||||
std::vector<String> getAllInputFormats() const;
|
||||
|
||||
bool isInputFormat(const String & name) const;
|
||||
bool isOutputFormat(const String & name) const;
|
||||
|
||||
|
@ -361,6 +361,7 @@ struct FormatSettings
|
||||
bool deduce_templates_of_expressions = true;
|
||||
bool accurate_types_of_literals = true;
|
||||
bool allow_data_after_semicolon = false;
|
||||
bool escape_quote_with_quote = false;
|
||||
} values;
|
||||
|
||||
enum class ORCCompression
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <Common/assert_cast.h>
|
||||
#include <IO/WithFileSize.h>
|
||||
#include <IO/EmptyReadBuffer.h>
|
||||
#include <IO/PeekableReadBuffer.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -14,7 +15,9 @@ namespace ErrorCodes
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int ONLY_NULLS_WHILE_READING_SCHEMA;
|
||||
extern const int CANNOT_EXTRACT_TABLE_STRUCTURE;
|
||||
extern const int CANNOT_DETECT_FORMAT;
|
||||
extern const int TYPE_MISMATCH;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
static std::optional<NamesAndTypesList> getOrderedColumnsList(const NamesAndTypesList & columns_list, const Names & columns_order_hint)
|
||||
@ -43,50 +46,87 @@ bool isRetryableSchemaInferenceError(int code)
|
||||
return code == ErrorCodes::EMPTY_DATA_PASSED || code == ErrorCodes::ONLY_NULLS_WHILE_READING_SCHEMA;
|
||||
}
|
||||
|
||||
ColumnsDescription readSchemaFromFormat(
|
||||
const String & format_name,
|
||||
/// Order of formats to try in automatic format detection.
|
||||
/// If we can successfully detect some format, we won't try next ones.
|
||||
static const std::vector<String> & getFormatsOrderForDetection()
|
||||
{
|
||||
static const std::vector<String> formats_order =
|
||||
{
|
||||
"Parquet",
|
||||
"ORC",
|
||||
"Arrow",
|
||||
"ArrowStream",
|
||||
"Avro",
|
||||
"AvroConfluent",
|
||||
"Npy",
|
||||
"Native",
|
||||
"BSONEachRow",
|
||||
"JSONCompact",
|
||||
"Values",
|
||||
"TSKV",
|
||||
"JSONObjectEachRow",
|
||||
"JSONColumns",
|
||||
"JSONCompactColumns",
|
||||
"JSONCompact",
|
||||
"JSON",
|
||||
};
|
||||
|
||||
return formats_order;
|
||||
}
|
||||
|
||||
/// The set of similar formats to try in automatic format detection.
|
||||
/// We will try all formats from this set and then choose the best one
|
||||
/// according to inferred schema.
|
||||
static const std::vector<String> & getSimilarFormatsSetForDetection()
|
||||
{
|
||||
static const std::vector<String> formats_order =
|
||||
{
|
||||
"TSV",
|
||||
"CSV",
|
||||
};
|
||||
|
||||
return formats_order;
|
||||
}
|
||||
|
||||
std::pair<ColumnsDescription, String> readSchemaFromFormatImpl(
|
||||
std::optional<String> format_name,
|
||||
const std::optional<FormatSettings> & format_settings,
|
||||
IReadBufferIterator & read_buffer_iterator,
|
||||
bool retry,
|
||||
ContextPtr & context,
|
||||
std::unique_ptr<ReadBuffer> & buf)
|
||||
const ContextPtr & context)
|
||||
try
|
||||
{
|
||||
NamesAndTypesList names_and_types;
|
||||
SchemaInferenceMode mode = context->getSettingsRef().schema_inference_mode;
|
||||
if (mode == SchemaInferenceMode::UNION && !FormatFactory::instance().checkIfFormatSupportsSubsetOfColumns(format_name, context, format_settings))
|
||||
if (format_name && mode == SchemaInferenceMode::UNION && !FormatFactory::instance().checkIfFormatSupportsSubsetOfColumns(*format_name, context, format_settings))
|
||||
{
|
||||
String additional_message;
|
||||
/// Better exception message for WithNames(AndTypes) formats.
|
||||
if (format_name.ends_with("WithNames") || format_name.ends_with("WithNamesAndTypes"))
|
||||
if (format_name->ends_with("WithNames") || format_name->ends_with("WithNamesAndTypes"))
|
||||
additional_message = " (formats -WithNames(AndTypes) support reading subset of columns only when setting input_format_with_names_use_header is enabled)";
|
||||
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "UNION schema inference mode is not supported for format {}, because it doesn't support reading subset of columns{}", format_name, additional_message);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "UNION schema inference mode is not supported for format {}, because it doesn't support reading subset of columns{}", *format_name, additional_message);
|
||||
}
|
||||
|
||||
if (FormatFactory::instance().checkIfFormatHasExternalSchemaReader(format_name))
|
||||
if (format_name && FormatFactory::instance().checkIfFormatHasExternalSchemaReader(*format_name))
|
||||
{
|
||||
auto external_schema_reader = FormatFactory::instance().getExternalSchemaReader(format_name, context, format_settings);
|
||||
auto external_schema_reader = FormatFactory::instance().getExternalSchemaReader(*format_name, context, format_settings);
|
||||
try
|
||||
{
|
||||
names_and_types = external_schema_reader->readSchema();
|
||||
return {ColumnsDescription(external_schema_reader->readSchema()), *format_name};
|
||||
}
|
||||
catch (Exception & e)
|
||||
{
|
||||
e.addMessage(
|
||||
fmt::format("Cannot extract table structure from {} format file. You can specify the structure manually", format_name));
|
||||
fmt::format("The table structure cannot be extracted from a {} format file. You can specify the structure manually", *format_name));
|
||||
throw;
|
||||
}
|
||||
}
|
||||
else if (FormatFactory::instance().checkIfFormatHasSchemaReader(format_name))
|
||||
{
|
||||
if (mode == SchemaInferenceMode::UNION)
|
||||
retry = false;
|
||||
|
||||
if (!format_name || FormatFactory::instance().checkIfFormatHasSchemaReader(*format_name))
|
||||
{
|
||||
IReadBufferIterator::Data iterator_data;
|
||||
std::vector<std::pair<NamesAndTypesList, String>> schemas_for_union_mode;
|
||||
std::optional<ColumnsDescription> cached_columns;
|
||||
std::string exception_messages;
|
||||
SchemaReaderPtr schema_reader;
|
||||
size_t max_rows_to_read = format_settings ? format_settings->max_rows_to_read_for_schema_inference
|
||||
: context->getSettingsRef().input_format_max_rows_to_read_for_schema_inference;
|
||||
size_t max_bytes_to_read = format_settings ? format_settings->max_bytes_to_read_for_schema_inference
|
||||
@ -94,45 +134,71 @@ try
|
||||
size_t iterations = 0;
|
||||
while (true)
|
||||
{
|
||||
/// When we finish working with current buffer we should put it back to iterator.
|
||||
SCOPE_EXIT(if (iterator_data.buf) read_buffer_iterator.setPreviousReadBuffer(std::move(iterator_data.buf)));
|
||||
bool is_eof = false;
|
||||
try
|
||||
{
|
||||
read_buffer_iterator.setPreviousReadBuffer(std::move(buf));
|
||||
std::tie(buf, cached_columns) = read_buffer_iterator.next();
|
||||
if (cached_columns)
|
||||
iterator_data = read_buffer_iterator.next();
|
||||
|
||||
/// Read buffer iterator can determine the data format if it's unknown.
|
||||
/// For example by scanning schema cache or by finding new file with format extension.
|
||||
if (!format_name && iterator_data.format_name)
|
||||
{
|
||||
format_name = *iterator_data.format_name;
|
||||
read_buffer_iterator.setFormatName(*iterator_data.format_name);
|
||||
}
|
||||
|
||||
if (iterator_data.cached_columns)
|
||||
{
|
||||
/// If we have schema in cache, we must also know the format.
|
||||
if (!format_name)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Schema from cache was returned, but format name is unknown");
|
||||
|
||||
if (mode == SchemaInferenceMode::DEFAULT)
|
||||
return *cached_columns;
|
||||
schemas_for_union_mode.emplace_back(cached_columns->getAll(), read_buffer_iterator.getLastFileName());
|
||||
{
|
||||
read_buffer_iterator.setResultingSchema(*iterator_data.cached_columns);
|
||||
return {*iterator_data.cached_columns, *format_name};
|
||||
}
|
||||
|
||||
schemas_for_union_mode.emplace_back(iterator_data.cached_columns->getAll(), read_buffer_iterator.getLastFileName());
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!buf)
|
||||
if (!iterator_data.buf)
|
||||
break;
|
||||
|
||||
/// We just want to check for eof, but eof() can be pretty expensive.
|
||||
/// So we use getFileSize() when available, which has better worst case.
|
||||
/// (For remote files, typically eof() would read 1 MB from S3, which may be much
|
||||
/// more than what the schema reader and even data reader will read).
|
||||
auto size = tryGetFileSizeFromReadBuffer(*buf);
|
||||
auto size = tryGetFileSizeFromReadBuffer(*iterator_data.buf);
|
||||
if (size.has_value())
|
||||
is_eof = *size == 0;
|
||||
else
|
||||
is_eof = buf->eof();
|
||||
is_eof = iterator_data.buf->eof();
|
||||
}
|
||||
catch (Exception & e)
|
||||
{
|
||||
e.addMessage(
|
||||
fmt::format("Cannot extract table structure from {} format file. You can specify the structure manually", format_name));
|
||||
if (format_name)
|
||||
e.addMessage(fmt::format("The table structure cannot be extracted from a {} format file. You can specify the structure manually", *format_name));
|
||||
else
|
||||
e.addMessage("The data format cannot be detected by the contents of the files. You can specify the format manually");
|
||||
throw;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
auto exception_message = getCurrentExceptionMessage(false);
|
||||
if (format_name)
|
||||
throw Exception(
|
||||
ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE,
|
||||
"The table structure cannot be extracted from a {} format file:\n{}.\nYou can specify the structure manually",
|
||||
*format_name,
|
||||
exception_message);
|
||||
|
||||
throw Exception(
|
||||
ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE,
|
||||
"Cannot extract table structure from {} format file:\n{}\nYou can specify the structure manually",
|
||||
format_name,
|
||||
ErrorCodes::CANNOT_DETECT_FORMAT,
|
||||
"The data format cannot be detected by the contents of the files:\n{}.\nYou can specify the format manually",
|
||||
exception_message);
|
||||
}
|
||||
|
||||
@ -140,91 +206,224 @@ try
|
||||
|
||||
if (is_eof)
|
||||
{
|
||||
auto exception_message = fmt::format("Cannot extract table structure from {} format file, file is empty", format_name);
|
||||
String exception_message;
|
||||
if (format_name)
|
||||
exception_message = fmt::format("The table structure cannot be extracted from a {} format file: the file is empty", *format_name);
|
||||
else
|
||||
exception_message = fmt::format("The data format cannot be detected by the contents of the files: the file is empty");
|
||||
|
||||
if (!retry)
|
||||
throw Exception(
|
||||
ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE, "{}. You can specify the structure manually", exception_message);
|
||||
if (mode == SchemaInferenceMode::UNION)
|
||||
{
|
||||
if (!format_name)
|
||||
throw Exception(ErrorCodes::CANNOT_DETECT_FORMAT, "The data format cannot be detected by the contents of the files: the file is empty. You can specify the format manually");
|
||||
|
||||
exception_messages += "\n" + exception_message;
|
||||
throw Exception(ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE, "{}. You can specify the structure manually", exception_message);
|
||||
}
|
||||
|
||||
if (!exception_messages.empty())
|
||||
exception_messages += "\n";
|
||||
exception_messages += exception_message;
|
||||
continue;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
schema_reader = FormatFactory::instance().getSchemaReader(format_name, *buf, context, format_settings);
|
||||
schema_reader->setMaxRowsAndBytesToRead(max_rows_to_read, max_bytes_to_read);
|
||||
names_and_types = schema_reader->readSchema();
|
||||
auto num_rows = schema_reader->readNumberOrRows();
|
||||
if (num_rows)
|
||||
read_buffer_iterator.setNumRowsToLastFile(*num_rows);
|
||||
std::unique_ptr<PeekableReadBuffer> peekable_buf; /// Can be used in format detection. Should be destroyed after schema reader.
|
||||
|
||||
/// In default mode, we finish when schema is inferred successfully from any file.
|
||||
if (mode == SchemaInferenceMode::DEFAULT)
|
||||
break;
|
||||
|
||||
if (!names_and_types.empty())
|
||||
read_buffer_iterator.setSchemaToLastFile(ColumnsDescription(names_and_types));
|
||||
schemas_for_union_mode.emplace_back(names_and_types, read_buffer_iterator.getLastFileName());
|
||||
}
|
||||
catch (...)
|
||||
if (format_name)
|
||||
{
|
||||
auto exception_message = getCurrentExceptionMessage(false);
|
||||
if (schema_reader && mode == SchemaInferenceMode::DEFAULT)
|
||||
SchemaReaderPtr schema_reader;
|
||||
|
||||
try
|
||||
{
|
||||
size_t rows_read = schema_reader->getNumRowsRead();
|
||||
assert(rows_read <= max_rows_to_read);
|
||||
max_rows_to_read -= schema_reader->getNumRowsRead();
|
||||
size_t bytes_read = buf->count();
|
||||
/// We could exceed max_bytes_to_read a bit to complete row parsing.
|
||||
max_bytes_to_read -= std::min(bytes_read, max_bytes_to_read);
|
||||
if (rows_read != 0 && (max_rows_to_read == 0 || max_bytes_to_read == 0))
|
||||
{
|
||||
exception_message += "\nTo increase the maximum number of rows/bytes to read for structure determination, use setting "
|
||||
"input_format_max_rows_to_read_for_schema_inference/input_format_max_bytes_to_read_for_schema_inference";
|
||||
schema_reader = FormatFactory::instance().getSchemaReader(*format_name, *iterator_data.buf, context, format_settings);
|
||||
schema_reader->setMaxRowsAndBytesToRead(max_rows_to_read, max_bytes_to_read);
|
||||
names_and_types = schema_reader->readSchema();
|
||||
auto num_rows = schema_reader->readNumberOrRows();
|
||||
if (num_rows)
|
||||
read_buffer_iterator.setNumRowsToLastFile(*num_rows);
|
||||
|
||||
if (iterations > 1)
|
||||
/// In default mode, we finish when schema is inferred successfully from any file.
|
||||
if (mode == SchemaInferenceMode::DEFAULT)
|
||||
break;
|
||||
|
||||
if (!names_and_types.empty())
|
||||
read_buffer_iterator.setSchemaToLastFile(ColumnsDescription(names_and_types));
|
||||
schemas_for_union_mode.emplace_back(names_and_types, read_buffer_iterator.getLastFileName());
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
auto exception_message = getCurrentExceptionMessage(false);
|
||||
if (schema_reader && mode == SchemaInferenceMode::DEFAULT)
|
||||
{
|
||||
size_t rows_read = schema_reader->getNumRowsRead();
|
||||
assert(rows_read <= max_rows_to_read);
|
||||
max_rows_to_read -= schema_reader->getNumRowsRead();
|
||||
size_t bytes_read = iterator_data.buf->count();
|
||||
/// We could exceed max_bytes_to_read a bit to complete row parsing.
|
||||
max_bytes_to_read -= std::min(bytes_read, max_bytes_to_read);
|
||||
if (rows_read != 0 && (max_rows_to_read == 0 || max_bytes_to_read == 0))
|
||||
{
|
||||
exception_messages += "\n" + exception_message;
|
||||
exception_message
|
||||
+= "\nTo increase the maximum number of rows/bytes to read for structure determination, use setting "
|
||||
"input_format_max_rows_to_read_for_schema_inference/input_format_max_bytes_to_read_for_schema_inference";
|
||||
if (!exception_messages.empty())
|
||||
exception_messages += "\n";
|
||||
exception_messages += exception_message;
|
||||
break;
|
||||
}
|
||||
retry = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!retry || !isRetryableSchemaInferenceError(getCurrentExceptionCode()))
|
||||
{
|
||||
try
|
||||
{
|
||||
throw;
|
||||
}
|
||||
catch (Exception & e)
|
||||
{
|
||||
e.addMessage(fmt::format(
|
||||
"Cannot extract table structure from {} format file. You can specify the structure manually", format_name));
|
||||
throw;
|
||||
}
|
||||
catch (...)
|
||||
if (mode == SchemaInferenceMode::UNION || !isRetryableSchemaInferenceError(getCurrentExceptionCode()))
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE,
|
||||
"Cannot extract table structure from {} format file. "
|
||||
"Error: {}. You can specify the structure manually",
|
||||
format_name,
|
||||
"The table structure cannot be extracted from a {} format file. "
|
||||
"Error:\n{}.\nYou can specify the structure manually",
|
||||
*format_name,
|
||||
exception_message);
|
||||
}
|
||||
|
||||
if (!exception_messages.empty())
|
||||
exception_messages += "\n";
|
||||
exception_messages += exception_message;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/// If the format is unknown we try some formats in order and try to apply their schema readers.
|
||||
/// If we can successfully infer the schema in some format, most likely we can use this format to read this data.
|
||||
|
||||
/// If read_buffer_iterator supports recreation of last buffer, we will recreate it for
|
||||
/// each format. Otherwise we will use PeekableReadBuffer and will rollback to the
|
||||
/// beginning of the file before each format. Using PeekableReadBuffer can lead
|
||||
/// to high memory usage as it will save all the read data from the beginning of the file,
|
||||
/// especially it will be noticeable for formats like Parquet/ORC/Arrow that do seeks to the
|
||||
/// end of file.
|
||||
bool support_buf_recreation = read_buffer_iterator.supportsLastReadBufferRecreation();
|
||||
if (!support_buf_recreation)
|
||||
{
|
||||
peekable_buf = std::make_unique<PeekableReadBuffer>(*iterator_data.buf);
|
||||
peekable_buf->setCheckpoint();
|
||||
}
|
||||
|
||||
/// First, try some formats in order. If we successfully inferred the schema for any format,
|
||||
/// we will use this format.
|
||||
for (const auto & format_to_detect : getFormatsOrderForDetection())
|
||||
{
|
||||
try
|
||||
{
|
||||
SchemaReaderPtr schema_reader = FormatFactory::instance().getSchemaReader(format_to_detect, support_buf_recreation ? *iterator_data.buf : *peekable_buf, context, format_settings);
|
||||
schema_reader->setMaxRowsAndBytesToRead(max_rows_to_read, max_bytes_to_read);
|
||||
names_and_types = schema_reader->readSchema();
|
||||
if (names_and_types.empty())
|
||||
continue;
|
||||
|
||||
/// We successfully inferred schema from this file using current format.
|
||||
format_name = format_to_detect;
|
||||
read_buffer_iterator.setFormatName(format_to_detect);
|
||||
|
||||
auto num_rows = schema_reader->readNumberOrRows();
|
||||
if (num_rows)
|
||||
read_buffer_iterator.setNumRowsToLastFile(*num_rows);
|
||||
|
||||
break;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
/// We failed to infer the schema for this format.
|
||||
/// Recreate read buffer or rollback to the beginning of the data
|
||||
/// before trying next format.
|
||||
if (support_buf_recreation)
|
||||
{
|
||||
read_buffer_iterator.setPreviousReadBuffer(std::move(iterator_data.buf));
|
||||
iterator_data.buf = read_buffer_iterator.recreateLastReadBuffer();
|
||||
}
|
||||
else
|
||||
{
|
||||
peekable_buf->rollbackToCheckpoint();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
exception_messages += "\n" + exception_message;
|
||||
/// If no format was detected from first set of formats, we try second set.
|
||||
/// In this set formats are similar and it can happen that data matches some of them.
|
||||
/// We try to infer schema for all of the formats from this set and then choose the best
|
||||
/// one according to the inferred schema.
|
||||
if (!format_name)
|
||||
{
|
||||
std::unordered_map<String, NamesAndTypesList> format_to_schema;
|
||||
const auto & formats_set_to_detect = getSimilarFormatsSetForDetection();
|
||||
for (size_t i = 0; i != formats_set_to_detect.size(); ++i)
|
||||
{
|
||||
try
|
||||
{
|
||||
SchemaReaderPtr schema_reader = FormatFactory::instance().getSchemaReader(
|
||||
formats_set_to_detect[i], support_buf_recreation ? *iterator_data.buf : *peekable_buf, context, format_settings);
|
||||
schema_reader->setMaxRowsAndBytesToRead(max_rows_to_read, max_bytes_to_read);
|
||||
auto tmp_names_and_types = schema_reader->readSchema();
|
||||
/// If schema was inferred successfully for this format, remember it and try next format.
|
||||
if (!tmp_names_and_types.empty())
|
||||
format_to_schema[formats_set_to_detect[i]] = tmp_names_and_types;
|
||||
}
|
||||
catch (...) // NOLINT(bugprone-empty-catch)
|
||||
{
|
||||
/// Try next format.
|
||||
}
|
||||
|
||||
if (i != formats_set_to_detect.size() - 1)
|
||||
{
|
||||
if (support_buf_recreation)
|
||||
{
|
||||
read_buffer_iterator.setPreviousReadBuffer(std::move(iterator_data.buf));
|
||||
iterator_data.buf = read_buffer_iterator.recreateLastReadBuffer();
|
||||
}
|
||||
else
|
||||
{
|
||||
peekable_buf->rollbackToCheckpoint();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// We choose the format with larger number of columns in inferred schema.
|
||||
size_t max_number_of_columns = 0;
|
||||
for (const auto & [format_to_detect, schema] : format_to_schema)
|
||||
{
|
||||
if (schema.size() > max_number_of_columns)
|
||||
{
|
||||
names_and_types = schema;
|
||||
format_name = format_to_detect;
|
||||
max_number_of_columns = schema.size();
|
||||
}
|
||||
}
|
||||
|
||||
if (format_name)
|
||||
read_buffer_iterator.setFormatName(*format_name);
|
||||
}
|
||||
|
||||
if (mode == SchemaInferenceMode::UNION)
|
||||
{
|
||||
/// For UNION mode we need to know the schema of each file,
|
||||
/// if we failed to detect the format, we failed to detect the schema of this file
|
||||
/// in any format. It doesn't make sense to continue.
|
||||
if (!format_name)
|
||||
throw Exception(ErrorCodes::CANNOT_DETECT_FORMAT, "The data format cannot be detected by the contents of the files. You can specify the format manually");
|
||||
|
||||
read_buffer_iterator.setSchemaToLastFile(ColumnsDescription(names_and_types));
|
||||
schemas_for_union_mode.emplace_back(names_and_types, read_buffer_iterator.getLastFileName());
|
||||
}
|
||||
|
||||
if (format_name && mode == SchemaInferenceMode::DEFAULT)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/// If we got all schemas from cache, schema_reader can be uninitialized.
|
||||
/// But we still need some stateless methods of ISchemaReader,
|
||||
/// let's initialize it with empty buffer.
|
||||
if (!format_name)
|
||||
throw Exception(ErrorCodes::CANNOT_DETECT_FORMAT, "The data format cannot be detected by the contents of the files. You can specify the format manually");
|
||||
|
||||
/// We need some stateless methods of ISchemaReader, but during reading schema we
|
||||
/// could not even create a schema reader (for example when we got schema from cache).
|
||||
/// Let's create stateless schema reader from empty read buffer.
|
||||
EmptyReadBuffer empty;
|
||||
if (!schema_reader)
|
||||
schema_reader = FormatFactory::instance().getSchemaReader(format_name, empty, context, format_settings);
|
||||
SchemaReaderPtr stateless_schema_reader = FormatFactory::instance().getSchemaReader(*format_name, empty, context, format_settings);
|
||||
|
||||
if (mode == SchemaInferenceMode::UNION)
|
||||
{
|
||||
@ -251,7 +450,7 @@ try
|
||||
/// If types are not the same, try to transform them according
|
||||
/// to the format to find common type.
|
||||
auto new_type_copy = type;
|
||||
schema_reader->transformTypesFromDifferentFilesIfNeeded(it->second, new_type_copy);
|
||||
stateless_schema_reader->transformTypesFromDifferentFilesIfNeeded(it->second, new_type_copy);
|
||||
|
||||
/// If types are not the same after transform, we cannot do anything, throw an exception.
|
||||
if (!it->second->equals(*new_type_copy))
|
||||
@ -273,11 +472,23 @@ try
|
||||
}
|
||||
|
||||
if (names_and_types.empty())
|
||||
{
|
||||
if (iterations <= 1)
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE,
|
||||
"The table structure cannot be extracted from a {} format file. "
|
||||
"Error:\n{}.\nYou can specify the structure manually",
|
||||
*format_name,
|
||||
exception_messages);
|
||||
}
|
||||
|
||||
throw Exception(
|
||||
ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE,
|
||||
"All attempts to extract table structure from files failed. "
|
||||
"Errors:{}\nYou can specify the structure manually",
|
||||
"Errors:\n{}\nYou can specify the structure manually",
|
||||
exception_messages);
|
||||
}
|
||||
|
||||
/// If we have "INSERT SELECT" query then try to order
|
||||
/// columns as they are ordered in table schema for formats
|
||||
@ -285,7 +496,7 @@ try
|
||||
/// It will allow to execute simple data loading with query
|
||||
/// "INSERT INTO table SELECT * FROM ..."
|
||||
const auto & insertion_table = context->getInsertionTable();
|
||||
if (schema_reader && !schema_reader->hasStrictOrderOfColumns() && !insertion_table.empty())
|
||||
if (!stateless_schema_reader->hasStrictOrderOfColumns() && !insertion_table.empty())
|
||||
{
|
||||
auto storage = DatabaseCatalog::instance().getTable(insertion_table, context);
|
||||
auto metadata = storage->getInMemoryMetadataPtr();
|
||||
@ -294,22 +505,22 @@ try
|
||||
if (ordered_list)
|
||||
names_and_types = *ordered_list;
|
||||
}
|
||||
|
||||
/// Some formats like CSVWithNames can contain empty column names. We don't support empty column names and further processing can fail with an exception. Let's just remove columns with empty names from the structure.
|
||||
names_and_types.erase(
|
||||
std::remove_if(names_and_types.begin(), names_and_types.end(), [](const NameAndTypePair & pair) { return pair.name.empty(); }),
|
||||
names_and_types.end());
|
||||
|
||||
auto columns = ColumnsDescription(names_and_types);
|
||||
if (mode == SchemaInferenceMode::DEFAULT)
|
||||
read_buffer_iterator.setResultingSchema(columns);
|
||||
return {columns, *format_name};
|
||||
}
|
||||
else
|
||||
throw Exception(
|
||||
ErrorCodes::BAD_ARGUMENTS,
|
||||
"{} file format doesn't support schema inference. You must specify the structure manually",
|
||||
format_name);
|
||||
|
||||
/// Some formats like CSVWithNames can contain empty column names. We don't support empty column names and further processing can fail with an exception. Let's just remove columns with empty names from the structure.
|
||||
names_and_types.erase(
|
||||
std::remove_if(names_and_types.begin(), names_and_types.end(), [](const NameAndTypePair & pair) { return pair.name.empty(); }),
|
||||
names_and_types.end());
|
||||
|
||||
auto columns = ColumnsDescription(names_and_types);
|
||||
if (mode == SchemaInferenceMode::DEFAULT)
|
||||
read_buffer_iterator.setResultingSchema(columns);
|
||||
return columns;
|
||||
throw Exception(
|
||||
ErrorCodes::BAD_ARGUMENTS,
|
||||
"{} file format doesn't support schema inference. You must specify the structure manually",
|
||||
*format_name);
|
||||
}
|
||||
catch (Exception & e)
|
||||
{
|
||||
@ -319,16 +530,21 @@ catch (Exception & e)
|
||||
throw;
|
||||
}
|
||||
|
||||
|
||||
ColumnsDescription readSchemaFromFormat(
|
||||
const String & format_name,
|
||||
const std::optional<FormatSettings> & format_settings,
|
||||
IReadBufferIterator & read_buffer_iterator,
|
||||
bool retry,
|
||||
ContextPtr & context)
|
||||
const ContextPtr & context)
|
||||
{
|
||||
std::unique_ptr<ReadBuffer> buf_out;
|
||||
return readSchemaFromFormat(format_name, format_settings, read_buffer_iterator, retry, context, buf_out);
|
||||
return readSchemaFromFormatImpl(format_name, format_settings, read_buffer_iterator, context).first;
|
||||
}
|
||||
|
||||
std::pair<ColumnsDescription, String> detectFormatAndReadSchema(
|
||||
const std::optional<FormatSettings> & format_settings,
|
||||
IReadBufferIterator & read_buffer_iterator,
|
||||
const ContextPtr & context)
|
||||
{
|
||||
return readSchemaFromFormatImpl(std::nullopt, format_settings, read_buffer_iterator, context);
|
||||
}
|
||||
|
||||
SchemaCache::Key getKeyForSchemaCache(
|
||||
|
@ -7,29 +7,68 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
struct IReadBufferIterator
|
||||
{
|
||||
virtual ~IReadBufferIterator() = default;
|
||||
|
||||
virtual void setPreviousReadBuffer(std::unique_ptr<ReadBuffer> /* buffer */) {}
|
||||
|
||||
/// Return read buffer of the next file or cached schema.
|
||||
/// In DEFAULT schema inference mode cached schema can be from any file.
|
||||
/// In UNION mode cached schema can be only from current file.
|
||||
/// When there is no files to process, return pair (nullptr, nullopt)
|
||||
virtual std::pair<std::unique_ptr<ReadBuffer>, std::optional<ColumnsDescription>> next() = 0;
|
||||
|
||||
struct Data
|
||||
{
|
||||
/// Read buffer of the next file. Can be nullptr if there are no more files
|
||||
/// or when schema was found in cache.
|
||||
std::unique_ptr<ReadBuffer> buf;
|
||||
|
||||
/// Schema from cache.
|
||||
/// In DEFAULT schema inference mode cached schema can be from any file.
|
||||
/// In UNION mode cached schema can be only from current file.
|
||||
std::optional<ColumnsDescription> cached_columns;
|
||||
|
||||
/// Format of the file if known.
|
||||
std::optional<String> format_name;
|
||||
};
|
||||
|
||||
virtual Data next() = 0;
|
||||
|
||||
/// Set read buffer returned in previous iteration.
|
||||
virtual void setPreviousReadBuffer(std::unique_ptr<ReadBuffer> /* buffer */) {}
|
||||
|
||||
/// Set number of rows to last file extracted during schema inference.
|
||||
/// Used for caching number of rows from files metadata during schema inference.
|
||||
virtual void setNumRowsToLastFile(size_t /*num_rows*/) {}
|
||||
|
||||
/// Set schema inferred from last file. Used for UNION mode to cache schema
|
||||
/// per file.
|
||||
virtual void setSchemaToLastFile(const ColumnsDescription & /*columns*/) {}
|
||||
|
||||
/// Set resulting inferred schema. Used for DEFAULT mode to cache schema
|
||||
/// for all files.
|
||||
virtual void setResultingSchema(const ColumnsDescription & /*columns*/) {}
|
||||
|
||||
/// Set auto detected format name.
|
||||
virtual void setFormatName(const String & /*format_name*/) {}
|
||||
|
||||
/// Get last processed file name for better exception messages.
|
||||
virtual String getLastFileName() const { return ""; }
|
||||
|
||||
/// Return true if method recreateLastReadBuffer is implemented.
|
||||
virtual bool supportsLastReadBufferRecreation() const { return false; }
|
||||
|
||||
/// Recreate last read buffer to read data from the same file again.
|
||||
/// Used to detect format from the file content to avoid
|
||||
/// copying data.
|
||||
virtual std::unique_ptr<ReadBuffer> recreateLastReadBuffer()
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method recreateLastReadBuffer is not implemented");
|
||||
}
|
||||
};
|
||||
|
||||
struct SingleReadBufferIterator : public IReadBufferIterator
|
||||
@ -39,12 +78,22 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
std::pair<std::unique_ptr<ReadBuffer>, std::optional<ColumnsDescription>> next() override
|
||||
Data next() override
|
||||
{
|
||||
if (done)
|
||||
return {nullptr, {}};
|
||||
return {nullptr, {}, std::nullopt};
|
||||
done = true;
|
||||
return {std::move(buf), {}};
|
||||
return Data{std::move(buf), {}, std::nullopt};
|
||||
}
|
||||
|
||||
void setPreviousReadBuffer(std::unique_ptr<ReadBuffer> buf_) override
|
||||
{
|
||||
buf = std::move(buf_);
|
||||
}
|
||||
|
||||
std::unique_ptr<ReadBuffer> releaseBuffer()
|
||||
{
|
||||
return std::move(buf);
|
||||
}
|
||||
|
||||
private:
|
||||
@ -73,17 +122,16 @@ ColumnsDescription readSchemaFromFormat(
|
||||
const String & format_name,
|
||||
const std::optional<FormatSettings> & format_settings,
|
||||
IReadBufferIterator & read_buffer_iterator,
|
||||
bool retry,
|
||||
ContextPtr & context);
|
||||
const ContextPtr & context);
|
||||
|
||||
/// If ReadBuffer is created, it will be written to buf_out.
|
||||
ColumnsDescription readSchemaFromFormat(
|
||||
const String & format_name,
|
||||
/// Try to detect the format of the data and it's schema.
|
||||
/// It runs schema inference for some set of formats on the same file.
|
||||
/// If schema reader of some format successfully inferred the schema from
|
||||
/// some file, we consider that the data is in this format.
|
||||
std::pair<ColumnsDescription, String> detectFormatAndReadSchema(
|
||||
const std::optional<FormatSettings> & format_settings,
|
||||
IReadBufferIterator & read_buffer_iterator,
|
||||
bool retry,
|
||||
ContextPtr & context,
|
||||
std::unique_ptr<ReadBuffer> & buf_out);
|
||||
const ContextPtr & context);
|
||||
|
||||
SchemaCache::Key getKeyForSchemaCache(const String & source, const String & format, const std::optional<FormatSettings> & format_settings, const ContextPtr & context);
|
||||
SchemaCache::Keys getKeysForSchemaCache(const Strings & sources, const String & format, const std::optional<FormatSettings> & format_settings, const ContextPtr & context);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user