mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-27 10:02:01 +00:00
Merge branch 'master' of github.com:ClickHouse/ClickHouse into 40907_Parameterized_views_as_table_functions
This commit is contained in:
commit
3034229c37
@ -1,7 +1,7 @@
|
||||
# To run clang-tidy from CMake, build ClickHouse with -DENABLE_CLANG_TIDY=1. To show all warnings, it is
|
||||
# recommended to pass "-k0" to Ninja.
|
||||
|
||||
# Enable all checks + disale selected checks. Feel free to remove disabled checks from below list if
|
||||
# Enable all checks + disable selected checks. Feel free to remove disabled checks from below list if
|
||||
# a) the new check is not controversial (this includes many checks in readability-* and google-*) or
|
||||
# b) too noisy (checks with > 100 new warnings are considered noisy, this includes e.g. cppcoreguidelines-*).
|
||||
|
||||
|
1
.gitattributes
vendored
1
.gitattributes
vendored
@ -1,3 +1,4 @@
|
||||
contrib/* linguist-vendored
|
||||
*.h linguist-language=C++
|
||||
tests/queries/0_stateless/data_json/* binary
|
||||
tests/queries/0_stateless/*.reference -crlf
|
||||
|
@ -495,6 +495,14 @@ endif ()
|
||||
|
||||
enable_testing() # Enable for tests without binary
|
||||
|
||||
option(ENABLE_EXTERNAL_OPENSSL "This option is insecure and not recommended for any occasions. If it is enabled, it allows building with alternative OpenSSL library. By default, ClickHouse is using BoringSSL, which is better. Do not use this option." OFF)
|
||||
|
||||
if (ENABLE_EXTERNAL_OPENSSL)
|
||||
message (STATUS "Build and uses OpenSSL library instead of BoringSSL. This is strongly discouraged. Your build of ClickHouse will be unsupported.")
|
||||
set(ENABLE_SSL 1)
|
||||
target_compile_options(global-group INTERFACE "-Wno-deprecated-declarations")
|
||||
endif ()
|
||||
|
||||
# when installing to /usr - place configs to /etc but for /usr/local place to /usr/local/etc
|
||||
if (CMAKE_INSTALL_PREFIX STREQUAL "/usr")
|
||||
set (CLICKHOUSE_ETC_DIR "/etc")
|
||||
@ -567,8 +575,8 @@ function (add_native_target)
|
||||
set_property (GLOBAL APPEND PROPERTY NATIVE_BUILD_TARGETS ${ARGV})
|
||||
endfunction (add_native_target)
|
||||
|
||||
set(ConfigIncludePath ${CMAKE_CURRENT_BINARY_DIR}/includes/configs CACHE INTERNAL "Path to generated configuration files.")
|
||||
include_directories(${ConfigIncludePath})
|
||||
set(CONFIG_INCLUDE_PATH ${CMAKE_CURRENT_BINARY_DIR}/includes/configs CACHE INTERNAL "Path to generated configuration files.")
|
||||
include_directories(${CONFIG_INCLUDE_PATH})
|
||||
|
||||
# Add as many warnings as possible for our own code.
|
||||
include (cmake/warnings.cmake)
|
||||
|
@ -16,5 +16,5 @@ ClickHouse® is an open-source column-oriented database management system that a
|
||||
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
|
||||
|
||||
## Upcoming events
|
||||
* [**v22.9 Release Webinar**](https://clickhouse.com/company/events/v22-9-release-webinar) Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release, provide live demos, and share vision into what is coming in the roadmap.
|
||||
* [**ClickHouse for Analytics @ Barracuda Networks**](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/288140358/) Join us for this in person meetup hosted by our friends at Barracuda in Bay Area.
|
||||
* [**v22.10 Release Webinar**](https://clickhouse.com/company/events/v22-10-release-webinar) Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release, provide live demos, and share vision into what is coming in the roadmap.
|
||||
* [**Introducing ClickHouse Cloud**](https://clickhouse.com/company/events/cloud-beta) Introducing ClickHouse as a service, built by creators and maintainers of the fastest OLAP database on earth. Join Tanya Bragin for a detailed walkthrough of ClickHouse Cloud capabilities, as well as a peek behind the curtain to understand the unique architecture that makes our service tick.
|
||||
|
@ -23,7 +23,7 @@ namespace
|
||||
{
|
||||
|
||||
/// Trim ending whitespace inplace
|
||||
void trim(String & s)
|
||||
void rightTrim(String & s)
|
||||
{
|
||||
s.erase(std::find_if(s.rbegin(), s.rend(), [](unsigned char ch) { return !std::isspace(ch); }).base(), s.end());
|
||||
}
|
||||
@ -441,7 +441,7 @@ LineReader::InputStatus ReplxxLineReader::readOneLine(const String & prompt)
|
||||
return (errno != EAGAIN) ? ABORT : RESET_LINE;
|
||||
input = cinput;
|
||||
|
||||
trim(input);
|
||||
rightTrim(input);
|
||||
return INPUT_LINE;
|
||||
}
|
||||
|
||||
@ -512,6 +512,9 @@ void ReplxxLineReader::openInteractiveHistorySearch()
|
||||
/// NOTE: You can use one of the following to configure the behaviour additionally:
|
||||
/// - SKIM_DEFAULT_OPTIONS
|
||||
/// - FZF_DEFAULT_OPTS
|
||||
///
|
||||
/// And also note, that fzf and skim is 95% compatible (at least option
|
||||
/// that is used here)
|
||||
std::string fuzzy_finder_command = fmt::format(
|
||||
"{} --read0 --tac --no-sort --tiebreak=index --bind=ctrl-r:toggle-sort --height=30% < {} > {}",
|
||||
fuzzy_finder, history_file.getPath(), output_file.getPath());
|
||||
@ -521,7 +524,8 @@ void ReplxxLineReader::openInteractiveHistorySearch()
|
||||
{
|
||||
if (executeCommand(argv) == 0)
|
||||
{
|
||||
const std::string & new_query = readFile(output_file.getPath());
|
||||
std::string new_query = readFile(output_file.getPath());
|
||||
rightTrim(new_query);
|
||||
rx.set_state(replxx::Replxx::State(new_query.c_str(), new_query.size()));
|
||||
}
|
||||
}
|
||||
|
@ -123,11 +123,15 @@
|
||||
/// - tries to print failed assertion into server log
|
||||
/// It can be used for all assertions except heavy ones.
|
||||
/// Heavy assertions (that run loops or call complex functions) are allowed in debug builds only.
|
||||
/// Also it makes sense to call abort() instead of __builtin_unreachable() in debug builds,
|
||||
/// because SIGABRT is easier to debug than SIGTRAP (the second one makes gdb crazy)
|
||||
#if !defined(chassert)
|
||||
#if defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#define chassert(x) static_cast<bool>(x) ? void(0) : abortOnFailedAssertion(#x)
|
||||
#define UNREACHABLE() abort()
|
||||
#else
|
||||
#define chassert(x) ((void)0)
|
||||
#define UNREACHABLE() __builtin_unreachable()
|
||||
#endif
|
||||
#endif
|
||||
|
||||
@ -142,7 +146,9 @@
|
||||
# define TSA_NO_THREAD_SAFETY_ANALYSIS __attribute__((no_thread_safety_analysis)) /// disable TSA for a function
|
||||
|
||||
/// Macros for suppressing TSA warnings for specific reads/writes (instead of suppressing it for the whole function)
|
||||
/// Consider adding a comment before using these macros.
|
||||
/// They use a lambda function to apply function attribute to a single statement. This enable us to suppress warnings locally instead of
|
||||
/// suppressing them in the whole function
|
||||
/// Consider adding a comment when using these macros.
|
||||
# define TSA_SUPPRESS_WARNING_FOR_READ(x) ([&]() TSA_NO_THREAD_SAFETY_ANALYSIS -> const auto & { return (x); }())
|
||||
# define TSA_SUPPRESS_WARNING_FOR_WRITE(x) ([&]() TSA_NO_THREAD_SAFETY_ANALYSIS -> auto & { return (x); }())
|
||||
|
||||
@ -159,9 +165,9 @@
|
||||
# define TSA_REQUIRES_SHARED(...)
|
||||
# define TSA_NO_THREAD_SAFETY_ANALYSIS
|
||||
|
||||
# define TSA_SUPPRESS_WARNING_FOR_READ(x)
|
||||
# define TSA_SUPPRESS_WARNING_FOR_WRITE(x)
|
||||
# define TSA_READ_ONE_THREAD(x)
|
||||
# define TSA_SUPPRESS_WARNING_FOR_READ(x) (x)
|
||||
# define TSA_SUPPRESS_WARNING_FOR_WRITE(x) (x)
|
||||
# define TSA_READ_ONE_THREAD(x) TSA_SUPPRESS_WARNING_FOR_READ(x)
|
||||
#endif
|
||||
|
||||
/// A template function for suppressing warnings about unused variables or function results.
|
||||
|
@ -1,6 +1,7 @@
|
||||
#if defined(OS_LINUX)
|
||||
# include <sys/syscall.h>
|
||||
#endif
|
||||
#include <cstdlib>
|
||||
#include <unistd.h>
|
||||
#include <base/safeExit.h>
|
||||
#include <base/defines.h>
|
||||
@ -11,7 +12,7 @@
|
||||
/// Thread sanitizer tries to do something on exit that we don't need if we want to exit immediately,
|
||||
/// while connection handling threads are still run.
|
||||
(void)syscall(SYS_exit_group, code);
|
||||
__builtin_unreachable();
|
||||
UNREACHABLE();
|
||||
#else
|
||||
_exit(code);
|
||||
#endif
|
||||
|
2
contrib/AMQP-CPP
vendored
2
contrib/AMQP-CPP
vendored
@ -1 +1 @@
|
||||
Subproject commit 1a6c51f4ac51ac56610fa95081bd2f349911375a
|
||||
Subproject commit 818c2d8ad96a08a5d20fece7d1e1e8855a2b0860
|
6
contrib/CMakeLists.txt
vendored
6
contrib/CMakeLists.txt
vendored
@ -74,7 +74,11 @@ add_contrib (re2-cmake re2)
|
||||
add_contrib (xz-cmake xz)
|
||||
add_contrib (brotli-cmake brotli)
|
||||
add_contrib (double-conversion-cmake double-conversion)
|
||||
add_contrib (boringssl-cmake boringssl)
|
||||
if (NOT ENABLE_EXTERNAL_OPENSSL)
|
||||
add_contrib (boringssl-cmake boringssl)
|
||||
else ()
|
||||
add_contrib (openssl-cmake openssl)
|
||||
endif ()
|
||||
add_contrib (poco-cmake poco)
|
||||
add_contrib (croaring-cmake croaring)
|
||||
add_contrib (zstd-cmake zstd)
|
||||
|
@ -4,6 +4,11 @@ if (NOT ENABLE_AMQPCPP)
|
||||
message(STATUS "Not using AMQP-CPP")
|
||||
return()
|
||||
endif()
|
||||
if (OS_FREEBSD)
|
||||
message(STATUS "Not using AMQP-CPP because libuv is disabled")
|
||||
return()
|
||||
endif()
|
||||
|
||||
|
||||
# can be removed once libuv build on MacOS with GCC is possible
|
||||
if (NOT TARGET ch_contrib::uv)
|
||||
|
2
contrib/cctz
vendored
2
contrib/cctz
vendored
@ -1 +1 @@
|
||||
Subproject commit 49c656c62fbd36a1bc20d64c476853bdb7cf7bb9
|
||||
Subproject commit 7a454c25c7d16053bcd327cdd16329212a08fa4a
|
@ -578,6 +578,12 @@ if(CMAKE_SYSTEM_NAME MATCHES "Darwin")
|
||||
list(APPEND ALL_SRCS "${CMAKE_CURRENT_BINARY_DIR}/include_private/kcmrpc.c")
|
||||
endif()
|
||||
|
||||
if (ENABLE_EXTERNAL_OPENSSL)
|
||||
list(REMOVE_ITEM ALL_SRCS "${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/aes.c")
|
||||
list(APPEND ALL_SRCS "${CMAKE_CURRENT_SOURCE_DIR}/aes.c")
|
||||
endif ()
|
||||
|
||||
|
||||
target_sources(_krb5 PRIVATE
|
||||
${ALL_SRCS}
|
||||
)
|
||||
|
@ -59,6 +59,12 @@ set(SRCS
|
||||
|
||||
add_library(_libpq ${SRCS})
|
||||
|
||||
if (ENABLE_EXTERNAL_OPENSSL)
|
||||
add_definitions(-DHAVE_BIO_METH_NEW)
|
||||
add_definitions(-DHAVE_HMAC_CTX_NEW)
|
||||
add_definitions(-DHAVE_HMAC_CTX_FREE)
|
||||
endif ()
|
||||
|
||||
target_include_directories (_libpq SYSTEM PUBLIC ${LIBPQ_SOURCE_DIR})
|
||||
target_include_directories (_libpq SYSTEM PUBLIC "${LIBPQ_SOURCE_DIR}/include")
|
||||
target_include_directories (_libpq SYSTEM PRIVATE "${LIBPQ_SOURCE_DIR}/configs")
|
||||
|
2
contrib/llvm-project
vendored
2
contrib/llvm-project
vendored
@ -1 +1 @@
|
||||
Subproject commit 328e4602120ddd6b2c1fb91bf2d50bd7bc249711
|
||||
Subproject commit 3a39038345a400e7e767811b142a94355d511215
|
@ -1,4 +1,4 @@
|
||||
if (APPLE OR NOT ARCH_AMD64 OR SANITIZE STREQUAL "undefined" OR NOT USE_STATIC_LIBRARIES)
|
||||
if (APPLE OR NOT ARCH_AMD64 OR SANITIZE STREQUAL "undefined")
|
||||
set (ENABLE_EMBEDDED_COMPILER_DEFAULT OFF)
|
||||
else()
|
||||
set (ENABLE_EMBEDDED_COMPILER_DEFAULT ON)
|
||||
@ -6,15 +6,16 @@ endif()
|
||||
|
||||
option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ${ENABLE_EMBEDDED_COMPILER_DEFAULT})
|
||||
|
||||
# If USE_STATIC_LIBRARIES=0 was passed to CMake, we'll still build LLVM statically to keep complexity minimal.
|
||||
|
||||
if (NOT ENABLE_EMBEDDED_COMPILER)
|
||||
message(STATUS "Not using LLVM")
|
||||
return()
|
||||
endif()
|
||||
|
||||
# TODO: Enable shared library build
|
||||
# TODO: Enable compilation on AArch64
|
||||
|
||||
set (LLVM_VERSION "14.0.0bundled")
|
||||
set (LLVM_VERSION "15.0.0bundled")
|
||||
set (LLVM_INCLUDE_DIRS
|
||||
"${ClickHouse_SOURCE_DIR}/contrib/llvm-project/llvm/include"
|
||||
"${ClickHouse_BINARY_DIR}/contrib/llvm-project/llvm/include"
|
||||
@ -62,9 +63,6 @@ set (REQUIRED_LLVM_LIBRARIES
|
||||
# list(APPEND REQUIRED_LLVM_LIBRARIES LLVMAArch64Info LLVMAArch64Desc LLVMAArch64CodeGen)
|
||||
# endif ()
|
||||
|
||||
# ld: unknown option: --color-diagnostics
|
||||
# set (LINKER_SUPPORTS_COLOR_DIAGNOSTICS 0 CACHE INTERNAL "")
|
||||
|
||||
set (CMAKE_INSTALL_RPATH "ON") # Do not adjust RPATH in llvm, since then it will not be able to find libcxx/libcxxabi/libunwind
|
||||
set (LLVM_COMPILER_CHECKED 1 CACHE INTERNAL "") # Skip internal compiler selection
|
||||
set (LLVM_ENABLE_EH 1 CACHE INTERNAL "") # With exception handling
|
||||
@ -80,6 +78,7 @@ set(LLVM_ENABLE_LIBXML2 0 CACHE INTERNAL "")
|
||||
set(LLVM_ENABLE_LIBEDIT 0 CACHE INTERNAL "")
|
||||
set(LLVM_ENABLE_LIBPFM 0 CACHE INTERNAL "")
|
||||
set(LLVM_ENABLE_ZLIB 0 CACHE INTERNAL "")
|
||||
set(LLVM_ENABLE_ZSTD 0 CACHE INTERNAL "")
|
||||
set(LLVM_ENABLE_Z3_SOLVER 0 CACHE INTERNAL "")
|
||||
set(LLVM_INCLUDE_TOOLS 0 CACHE INTERNAL "")
|
||||
set(LLVM_BUILD_TOOLS 0 CACHE INTERNAL "")
|
||||
@ -96,9 +95,6 @@ set(LLVM_INCLUDE_DOCS 0 CACHE INTERNAL "")
|
||||
set(LLVM_ENABLE_OCAMLDOC 0 CACHE INTERNAL "")
|
||||
set(LLVM_ENABLE_BINDINGS 0 CACHE INTERNAL "")
|
||||
|
||||
# C++20 is currently not supported due to ambiguous operator != etc.
|
||||
set (CMAKE_CXX_STANDARD 17)
|
||||
|
||||
set (LLVM_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/llvm-project/llvm")
|
||||
set (LLVM_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/llvm-project/llvm")
|
||||
add_subdirectory ("${LLVM_SOURCE_DIR}" "${LLVM_BINARY_DIR}")
|
||||
|
6
docker/test/fuzzer/allow-nullable-key.xml
Normal file
6
docker/test/fuzzer/allow-nullable-key.xml
Normal file
@ -0,0 +1,6 @@
|
||||
<clickhouse>
|
||||
<!-- Allow nullable key to avoid errors while fuzzing definitions of tables -->
|
||||
<merge_tree>
|
||||
<allow_nullable_key>1</allow_nullable_key>
|
||||
</merge_tree>
|
||||
</clickhouse>
|
@ -94,6 +94,7 @@ function configure
|
||||
# TODO figure out which ones are needed
|
||||
cp -av --dereference "$repo_dir"/tests/config/config.d/listen.xml db/config.d
|
||||
cp -av --dereference "$script_dir"/query-fuzzer-tweaks-users.xml db/users.d
|
||||
cp -av --dereference "$script_dir"/allow-nullable-key.xml db/config.d
|
||||
|
||||
cat > db/config.d/core.xml <<EOL
|
||||
<clickhouse>
|
||||
@ -240,6 +241,7 @@ quit
|
||||
--receive_data_timeout_ms=10000 \
|
||||
--stacktrace \
|
||||
--query-fuzzer-runs=1000 \
|
||||
--create-query-fuzzer-runs=50 \
|
||||
--queries-file $(ls -1 ch/tests/queries/0_stateless/*.sql | sort -R) \
|
||||
$NEW_TESTS_OPT \
|
||||
> >(tail -n 100000 > fuzzer.log) \
|
||||
|
@ -11,6 +11,7 @@ RUN apt-get update -y \
|
||||
apt-get install --yes --no-install-recommends \
|
||||
awscli \
|
||||
brotli \
|
||||
lz4 \
|
||||
expect \
|
||||
golang \
|
||||
lsof \
|
||||
|
23
docs/changelogs/v22.6.9.11-stable.md
Normal file
23
docs/changelogs/v22.6.9.11-stable.md
Normal file
@ -0,0 +1,23 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2022
|
||||
---
|
||||
|
||||
# 2022 Changelog
|
||||
|
||||
### ClickHouse release v22.6.9.11-stable (9ec61dcac49) FIXME as compared to v22.6.8.35-stable (b91dc59a565)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#42089](https://github.com/ClickHouse/ClickHouse/issues/42089): Replace back `clickhouse su` command with `sudo -u` in start in order to respect limits in `/etc/security/limits.conf`. [#41847](https://github.com/ClickHouse/ClickHouse/pull/41847) ([Eugene Konkov](https://github.com/ekonkov)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#41558](https://github.com/ClickHouse/ClickHouse/issues/41558): Add `source` field to deb packages, update `nfpm`. [#41531](https://github.com/ClickHouse/ClickHouse/pull/41531) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Backported in [#41504](https://github.com/ClickHouse/ClickHouse/issues/41504): Writing data in Apache `ORC` format might lead to a buffer overrun. [#41458](https://github.com/ClickHouse/ClickHouse/pull/41458) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Build latest tags ONLY from master branch [#41567](https://github.com/ClickHouse/ClickHouse/pull/41567) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
@ -38,13 +38,13 @@ For other Linux distribution - check the availability of the [prebuild packages]
|
||||
#### Use the latest clang for Builds
|
||||
|
||||
``` bash
|
||||
export CC=clang-14
|
||||
export CXX=clang++-14
|
||||
export CC=clang-15
|
||||
export CXX=clang++-15
|
||||
```
|
||||
|
||||
In this example we use version 14 that is the latest as of Feb 2022.
|
||||
In this example we use version 15 that is the latest as of Sept 2022.
|
||||
|
||||
Gcc can also be used though it is discouraged.
|
||||
Gcc cannot be used.
|
||||
|
||||
### Checkout ClickHouse Sources {#checkout-clickhouse-sources}
|
||||
|
||||
|
@ -122,7 +122,7 @@ If you use Arch or Gentoo, you probably know it yourself how to install CMake.
|
||||
|
||||
## C++ Compiler {#c-compiler}
|
||||
|
||||
Compilers Clang starting from version 12 is supported for building ClickHouse.
|
||||
Compilers Clang starting from version 15 is supported for building ClickHouse.
|
||||
|
||||
Clang should be used instead of gcc. Though, our continuous integration (CI) platform runs checks for about a dozen of build combinations.
|
||||
|
||||
@ -146,7 +146,7 @@ While inside the `build` directory, configure your build by running CMake. Befor
|
||||
export CC=clang CXX=clang++
|
||||
cmake ..
|
||||
|
||||
If you installed clang using the automatic installation script above, also specify the version of clang installed in the first command, e.g. `export CC=clang-14 CXX=clang++-14`. The clang version will be in the script output.
|
||||
If you installed clang using the automatic installation script above, also specify the version of clang installed in the first command, e.g. `export CC=clang-15 CXX=clang++-15`. The clang version will be in the script output.
|
||||
|
||||
The `CC` variable specifies the compiler for C (short for C Compiler), and `CXX` variable instructs which C++ compiler is to be used for building.
|
||||
|
||||
@ -178,7 +178,7 @@ If you get the message: `ninja: error: loading 'build.ninja': No such file or di
|
||||
|
||||
Upon the successful start of the building process, you’ll see the build progress - the number of processed tasks and the total number of tasks.
|
||||
|
||||
While building messages about protobuf files in libhdfs2 library like `libprotobuf WARNING` may show up. They affect nothing and are safe to be ignored.
|
||||
While building messages about LLVM library may show up. They affect nothing and are safe to be ignored.
|
||||
|
||||
Upon successful build you get an executable file `ClickHouse/<build_dir>/programs/clickhouse`:
|
||||
|
||||
@ -272,15 +272,10 @@ Most probably some of the builds will fail at first times. This is due to the fa
|
||||
|
||||
You can use the **Woboq** online code browser available [here](https://clickhouse.com/codebrowser/ClickHouse/src/index.html). It provides code navigation, semantic highlighting, search and indexing. The code snapshot is updated daily.
|
||||
|
||||
You can use GitHub integrated code browser [here](https://github.dev/ClickHouse/ClickHouse).
|
||||
|
||||
Also, you can browse sources on [GitHub](https://github.com/ClickHouse/ClickHouse) as usual.
|
||||
|
||||
## Faster builds for development: Split build configuration {#split-build}
|
||||
|
||||
ClickHouse is normally statically linked into a single static `clickhouse` binary with minimal dependencies. This is convenient for distribution, but it means that for every change the entire binary needs to be re-linked, which is slow and inconvenient for development. As an alternative, you can instead build dynamically linked shared libraries, allowing for faster incremental builds. To use it, add the following flags to your `cmake` invocation:
|
||||
```
|
||||
-DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1
|
||||
```
|
||||
|
||||
If you are not interested in functionality provided by third-party libraries, you can further speed up the build using `cmake` options
|
||||
```
|
||||
-DENABLE_LIBRARIES=0 -DENABLE_EMBEDDED_COMPILER=0
|
||||
|
@ -1,13 +1,34 @@
|
||||
---
|
||||
sidebar_label: Installation
|
||||
sidebar_position: 1
|
||||
keywords: [clickhouse, install, installation, docs]
|
||||
description: ClickHouse can run on any Linux, FreeBSD, or Mac OS X with x86_64, AArch64, or PowerPC64LE CPU architecture.
|
||||
slug: /en/getting-started/install
|
||||
title: Installation
|
||||
sidebar_label: Install
|
||||
keywords: [clickhouse, install, getting started, quick start]
|
||||
slug: /en/install
|
||||
---
|
||||
|
||||
## System Requirements {#system-requirements}
|
||||
# Installing ClickHouse
|
||||
|
||||
You have two options for getting up and running with ClickHouse:
|
||||
|
||||
- **[ClickHouse Cloud](https://clickhouse.cloud/):** the official ClickHouse as a service, - built by, maintained, and supported by the creators of ClickHouse
|
||||
- **Self-managed ClickHouse:** ClickHouse can run on any Linux, FreeBSD, or Mac OS X with x86_64, AArch64, or PowerPC64LE CPU architecture
|
||||
|
||||
## ClickHouse Cloud
|
||||
|
||||
The quickest and easiest way to get up and running with ClickHouse is to create a new service in [ClickHouse Cloud](https://clickhouse.cloud/):
|
||||
|
||||
<div class="eighty-percent">
|
||||
|
||||
![Create a ClickHouse Cloud service](@site/docs/en/_snippets/images/createservice1.png)
|
||||
</div>
|
||||
|
||||
Once your Cloud service is provisioned, you will be able to [connect to it](/docs/en/integrations/connect-a-client.md) and start [inserting data](/docs/en/integrations/data-ingestion.md).
|
||||
|
||||
:::note
|
||||
The [Quick Start](/docs/en/quick-start.mdx) walks through the steps to get a ClickHouse Cloud service up and running, connecting to it, and inserting data.
|
||||
:::
|
||||
|
||||
## Self-Managed Requirements
|
||||
|
||||
### CPU Architecture
|
||||
|
||||
ClickHouse can run on any Linux, FreeBSD, or Mac OS X with x86_64, AArch64, or PowerPC64LE CPU architecture.
|
||||
|
||||
@ -19,6 +40,55 @@ $ grep -q sse4_2 /proc/cpuinfo && echo "SSE 4.2 supported" || echo "SSE 4.2 not
|
||||
|
||||
To run ClickHouse on processors that do not support SSE 4.2 or have AArch64 or PowerPC64LE architecture, you should [build ClickHouse from sources](#from-sources) with proper configuration adjustments.
|
||||
|
||||
ClickHouse implements parallel data processing and uses all the hardware resources available. When choosing a processor, take into account that ClickHouse works more efficiently at configurations with a large number of cores but a lower clock rate than at configurations with fewer cores and a higher clock rate. For example, 16 cores with 2600 MHz is preferable to 8 cores with 3600 MHz.
|
||||
|
||||
It is recommended to use **Turbo Boost** and **hyper-threading** technologies. It significantly improves performance with a typical workload.
|
||||
|
||||
### RAM {#ram}
|
||||
|
||||
We recommend using a minimum of 4GB of RAM to perform non-trivial queries. The ClickHouse server can run with a much smaller amount of RAM, but it requires memory for processing queries.
|
||||
|
||||
The required volume of RAM depends on:
|
||||
|
||||
- The complexity of queries.
|
||||
- The amount of data that is processed in queries.
|
||||
|
||||
To calculate the required volume of RAM, you should estimate the size of temporary data for [GROUP BY](/docs/en/sql-reference/statements/select/group-by.md#select-group-by-clause), [DISTINCT](/docs/en/sql-reference/statements/select/distinct.md#select-distinct), [JOIN](/docs/en/sql-reference/statements/select/join.md#select-join) and other operations you use.
|
||||
|
||||
ClickHouse can use external memory for temporary data. See [GROUP BY in External Memory](/docs/en/sql-reference/statements/select/group-by.md#select-group-by-in-external-memory) for details.
|
||||
|
||||
### Swap File {#swap-file}
|
||||
|
||||
Disable the swap file for production environments.
|
||||
|
||||
### Storage Subsystem {#storage-subsystem}
|
||||
|
||||
You need to have 2GB of free disk space to install ClickHouse.
|
||||
|
||||
The volume of storage required for your data should be calculated separately. Assessment should include:
|
||||
|
||||
- Estimation of the data volume.
|
||||
|
||||
You can take a sample of the data and get the average size of a row from it. Then multiply the value by the number of rows you plan to store.
|
||||
|
||||
- The data compression coefficient.
|
||||
|
||||
To estimate the data compression coefficient, load a sample of your data into ClickHouse, and compare the actual size of the data with the size of the table stored. For example, clickstream data is usually compressed by 6-10 times.
|
||||
|
||||
To calculate the final volume of data to be stored, apply the compression coefficient to the estimated data volume. If you plan to store data in several replicas, then multiply the estimated volume by the number of replicas.
|
||||
|
||||
### Network {#network}
|
||||
|
||||
If possible, use networks of 10G or higher class.
|
||||
|
||||
The network bandwidth is critical for processing distributed queries with a large amount of intermediate data. Besides, network speed affects replication processes.
|
||||
|
||||
### Software {#software}
|
||||
|
||||
ClickHouse is developed primarily for the Linux family of operating systems. The recommended Linux distribution is Ubuntu. The `tzdata` package should be installed in the system.
|
||||
|
||||
## Self-Managed Install
|
||||
|
||||
## Available Installation Options {#available-installation-options}
|
||||
|
||||
### From DEB Packages {#install-from-deb-packages}
|
||||
@ -58,9 +128,9 @@ clickhouse-client # or "clickhouse-client --password" if you set up a password.
|
||||
|
||||
</details>
|
||||
|
||||
You can replace `stable` with `lts` to use different [release kinds](../faq/operations/production.md) based on your needs.
|
||||
You can replace `stable` with `lts` to use different [release kinds](/docs/en/faq/operations/production.md) based on your needs.
|
||||
|
||||
You can also download and install packages manually from [here](https://packages.clickhouse.com/deb/pool/stable).
|
||||
You can also download and install packages manually from [here](https://packages.clickhouse.com/deb/pool/main/c/).
|
||||
|
||||
#### Packages {#packages}
|
||||
|
||||
@ -105,7 +175,7 @@ clickhouse-client # or "clickhouse-client --password" if you set up a password.
|
||||
|
||||
</details>
|
||||
|
||||
You can replace `stable` with `lts` to use different [release kinds](../faq/operations/production.md) based on your needs.
|
||||
You can replace `stable` with `lts` to use different [release kinds](/docs/en/faq/operations/production.md) based on your needs.
|
||||
|
||||
Then run these commands to install packages:
|
||||
|
||||
@ -226,7 +296,7 @@ Use the `clickhouse client` to connect to the server, or `clickhouse local` to p
|
||||
|
||||
### From Sources {#from-sources}
|
||||
|
||||
To manually compile ClickHouse, follow the instructions for [Linux](../development/build.md) or [Mac OS X](../development/build-osx.md).
|
||||
To manually compile ClickHouse, follow the instructions for [Linux](/docs/en/development/build.md) or [Mac OS X](/docs/en/development/build-osx.md).
|
||||
|
||||
You can compile packages and install them or use programs without installing packages. Also by building manually you can disable SSE 4.2 requirement or build for AArch64 CPUs.
|
||||
|
||||
@ -281,7 +351,7 @@ If the configuration file is in the current directory, you do not need to specif
|
||||
|
||||
ClickHouse supports access restriction settings. They are located in the `users.xml` file (next to `config.xml`).
|
||||
By default, access is allowed from anywhere for the `default` user, without a password. See `user/default/networks`.
|
||||
For more information, see the section [“Configuration Files”](../operations/configuration-files.md).
|
||||
For more information, see the section [“Configuration Files”](/docs/en/operations/configuration-files.md).
|
||||
|
||||
After launching server, you can use the command-line client to connect to it:
|
||||
|
||||
@ -292,7 +362,7 @@ $ clickhouse-client
|
||||
By default, it connects to `localhost:9000` on behalf of the user `default` without a password. It can also be used to connect to a remote server using `--host` argument.
|
||||
|
||||
The terminal must use UTF-8 encoding.
|
||||
For more information, see the section [“Command-line client”](../interfaces/cli.md).
|
||||
For more information, see the section [“Command-line client”](/docs/en/interfaces/cli.md).
|
||||
|
||||
Example:
|
||||
|
||||
@ -317,6 +387,5 @@ SELECT 1
|
||||
|
||||
**Congratulations, the system works!**
|
||||
|
||||
To continue experimenting, you can download one of the test data sets or go through [tutorial](./../tutorial.md).
|
||||
To continue experimenting, you can download one of the test data sets or go through [tutorial](/docs/en/tutorial.md).
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/getting_started/install/) <!--hide-->
|
||||
|
@ -1020,6 +1020,62 @@ Example:
|
||||
}
|
||||
```
|
||||
|
||||
To use object name as column value you can use special setting [format_json_object_each_row_column_for_object_name](../operations/settings/settings.md#format_json_object_each_row_column_for_object_name). Value of this setting is set to the name of a column, that is used as JSON key for a row in resulting object.
|
||||
Examples:
|
||||
|
||||
For output:
|
||||
|
||||
Let's say we have table `test` with two columns:
|
||||
```
|
||||
┌─object_name─┬─number─┐
|
||||
│ first_obj │ 1 │
|
||||
│ second_obj │ 2 │
|
||||
│ third_obj │ 3 │
|
||||
└─────────────┴────────┘
|
||||
```
|
||||
Let's output it in `JSONObjectEachRow` format and use `format_json_object_each_row_column_for_object_name` setting:
|
||||
|
||||
```sql
|
||||
select * from test settings format_json_object_each_row_column_for_object_name='object_name'
|
||||
```
|
||||
|
||||
The output:
|
||||
```json
|
||||
{
|
||||
"first_obj": {"number": 1},
|
||||
"second_obj": {"number": 2},
|
||||
"third_obj": {"number": 3}
|
||||
}
|
||||
```
|
||||
|
||||
For input:
|
||||
|
||||
Let's say we stored output from previous example in a file with name `data.json`:
|
||||
```sql
|
||||
select * from file('data.json', JSONObjectEachRow, 'object_name String, number UInt64') settings format_json_object_each_row_column_for_object_name='object_name'
|
||||
```
|
||||
|
||||
```
|
||||
┌─object_name─┬─number─┐
|
||||
│ first_obj │ 1 │
|
||||
│ second_obj │ 2 │
|
||||
│ third_obj │ 3 │
|
||||
└─────────────┴────────┘
|
||||
```
|
||||
|
||||
It also works in schema inference:
|
||||
|
||||
```sql
|
||||
desc file('data.json', JSONObjectEachRow) settings format_json_object_each_row_column_for_object_name='object_name'
|
||||
```
|
||||
|
||||
```
|
||||
┌─name────────┬─type────────────┐
|
||||
│ object_name │ String │
|
||||
│ number │ Nullable(Int64) │
|
||||
└─────────────┴─────────────────┘
|
||||
```
|
||||
|
||||
|
||||
### Inserting Data {#json-inserting-data}
|
||||
|
||||
|
@ -41,6 +41,7 @@ ClickHouse Inc does **not** maintain the libraries listed below and hasn’t don
|
||||
- [node-clickhouse](https://github.com/apla/node-clickhouse)
|
||||
- [nestjs-clickhouse](https://github.com/depyronick/nestjs-clickhouse)
|
||||
- [clickhouse-client](https://github.com/depyronick/clickhouse-client)
|
||||
- [node-clickhouse-orm](https://github.com/zimv/node-clickhouse-orm)
|
||||
- Perl
|
||||
- [perl-DBD-ClickHouse](https://github.com/elcamlost/perl-DBD-ClickHouse)
|
||||
- [HTTP-ClickHouse](https://metacpan.org/release/HTTP-ClickHouse)
|
||||
|
@ -1,60 +0,0 @@
|
||||
---
|
||||
slug: /en/operations/requirements
|
||||
sidebar_position: 44
|
||||
sidebar_label: Requirements
|
||||
---
|
||||
|
||||
# Requirements
|
||||
|
||||
## CPU
|
||||
|
||||
For installation from prebuilt deb packages, use a CPU with x86_64 architecture and support for SSE 4.2 instructions. To run ClickHouse with processors that do not support SSE 4.2 or have AArch64 or PowerPC64LE architecture, you should build ClickHouse from sources.
|
||||
|
||||
ClickHouse implements parallel data processing and uses all the hardware resources available. When choosing a processor, take into account that ClickHouse works more efficiently at configurations with a large number of cores but a lower clock rate than at configurations with fewer cores and a higher clock rate. For example, 16 cores with 2600 MHz is preferable to 8 cores with 3600 MHz.
|
||||
|
||||
It is recommended to use **Turbo Boost** and **hyper-threading** technologies. It significantly improves performance with a typical workload.
|
||||
|
||||
## RAM {#ram}
|
||||
|
||||
We recommend using a minimum of 4GB of RAM to perform non-trivial queries. The ClickHouse server can run with a much smaller amount of RAM, but it requires memory for processing queries.
|
||||
|
||||
The required volume of RAM depends on:
|
||||
|
||||
- The complexity of queries.
|
||||
- The amount of data that is processed in queries.
|
||||
|
||||
To calculate the required volume of RAM, you should estimate the size of temporary data for [GROUP BY](../sql-reference/statements/select/group-by.md#select-group-by-clause), [DISTINCT](../sql-reference/statements/select/distinct.md#select-distinct), [JOIN](../sql-reference/statements/select/join.md#select-join) and other operations you use.
|
||||
|
||||
ClickHouse can use external memory for temporary data. See [GROUP BY in External Memory](../sql-reference/statements/select/group-by.md#select-group-by-in-external-memory) for details.
|
||||
|
||||
## Swap File {#swap-file}
|
||||
|
||||
Disable the swap file for production environments.
|
||||
|
||||
## Storage Subsystem {#storage-subsystem}
|
||||
|
||||
You need to have 2GB of free disk space to install ClickHouse.
|
||||
|
||||
The volume of storage required for your data should be calculated separately. Assessment should include:
|
||||
|
||||
- Estimation of the data volume.
|
||||
|
||||
You can take a sample of the data and get the average size of a row from it. Then multiply the value by the number of rows you plan to store.
|
||||
|
||||
- The data compression coefficient.
|
||||
|
||||
To estimate the data compression coefficient, load a sample of your data into ClickHouse, and compare the actual size of the data with the size of the table stored. For example, clickstream data is usually compressed by 6-10 times.
|
||||
|
||||
To calculate the final volume of data to be stored, apply the compression coefficient to the estimated data volume. If you plan to store data in several replicas, then multiply the estimated volume by the number of replicas.
|
||||
|
||||
## Network {#network}
|
||||
|
||||
If possible, use networks of 10G or higher class.
|
||||
|
||||
The network bandwidth is critical for processing distributed queries with a large amount of intermediate data. Besides, network speed affects replication processes.
|
||||
|
||||
## Software {#software}
|
||||
|
||||
ClickHouse is developed primarily for the Linux family of operating systems. The recommended Linux distribution is Ubuntu. The `tzdata` package should be installed in the system.
|
||||
|
||||
ClickHouse can also work in other operating system families. See details in the [install guide](../getting-started/install.md) section of the documentation.
|
@ -1502,6 +1502,21 @@ If not set, [tmp_path](#tmp-path) is used, otherwise it is ignored.
|
||||
- Policy should have exactly one volume with local disks.
|
||||
:::
|
||||
|
||||
## max_temporary_data_on_disk_size {#max_temporary_data_on_disk_size}
|
||||
|
||||
Limit the amount of disk space consumed by temporary files in `tmp_path` for the server.
|
||||
Queries that exceed this limit will fail with an exception.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
**See also**
|
||||
|
||||
- [max_temporary_data_on_disk_size_for_user](../../operations/settings/query-complexity.md#settings_max_temporary_data_on_disk_size_for_user)
|
||||
- [max_temporary_data_on_disk_size_for_query](../../operations/settings/query-complexity.md#settings_max_temporary_data_on_disk_size_for_query)
|
||||
- [tmp_path](#tmp-path)
|
||||
- [tmp_policy](#tmp-policy)
|
||||
- [max_server_memory_usage](#max_server_memory_usage)
|
||||
|
||||
## uncompressed_cache_size {#server-settings-uncompressed_cache_size}
|
||||
|
||||
Cache size (in bytes) for uncompressed data used by table engines from the [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md).
|
||||
|
@ -313,4 +313,19 @@ When inserting data, ClickHouse calculates the number of partitions in the inser
|
||||
|
||||
> “Too many partitions for single INSERT block (more than” + toString(max_parts) + “). The limit is controlled by ‘max_partitions_per_insert_block’ setting. A large number of partitions is a common misconception. It will lead to severe negative performance impact, including slow server startup, slow INSERT queries and slow SELECT queries. Recommended total number of partitions for a table is under 1000..10000. Please note, that partitioning is not intended to speed up SELECT queries (ORDER BY key is sufficient to make range queries fast). Partitions are intended for data manipulation (DROP PARTITION, etc).”
|
||||
|
||||
## max_temporary_data_on_disk_size_for_user {#settings_max_temporary_data_on_disk_size_for_user}
|
||||
|
||||
The maximum amount of data consumed by temporary files on disk in bytes for all concurrently running user queries.
|
||||
Zero means unlimited.
|
||||
|
||||
Default value: 0.
|
||||
|
||||
|
||||
## max_temporary_data_on_disk_size_for_query {#settings_max_temporary_data_on_disk_size_for_query}
|
||||
|
||||
The maximum amount of data consumed by temporary files on disk in bytes for all concurrently running queries.
|
||||
Zero means unlimited.
|
||||
|
||||
Default value: 0.
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/operations/settings/query_complexity/) <!--hide-->
|
||||
|
@ -35,7 +35,7 @@ Structure of the `users` section:
|
||||
<database_name>
|
||||
<table_name>
|
||||
<filter>expression</filter>
|
||||
<table_name>
|
||||
</table_name>
|
||||
</database_name>
|
||||
</databases>
|
||||
</user_name>
|
||||
|
@ -668,7 +668,7 @@ log_query_views=1
|
||||
|
||||
## log_formatted_queries {#settings-log-formatted-queries}
|
||||
|
||||
Allows to log formatted queries to the [system.query_log](../../operations/system-tables/query_log.md) system table.
|
||||
Allows to log formatted queries to the [system.query_log](../../operations/system-tables/query_log.md) system table (populates `formatted_query` column in the [system.query_log](../../operations/system-tables/query_log.md)).
|
||||
|
||||
Possible values:
|
||||
|
||||
@ -1599,7 +1599,7 @@ Right now it requires `optimize_skip_unused_shards` (the reason behind this is t
|
||||
|
||||
## optimize_throw_if_noop {#setting-optimize_throw_if_noop}
|
||||
|
||||
Enables or disables throwing an exception if an [OPTIMIZE](../../sql-reference/statements/misc.md#misc_operations-optimize) query didn’t perform a merge.
|
||||
Enables or disables throwing an exception if an [OPTIMIZE](../../sql-reference/statements/optimize.md) query didn’t perform a merge.
|
||||
|
||||
By default, `OPTIMIZE` returns successfully even if it didn’t do anything. This setting lets you differentiate these situations and get the reason in an exception message.
|
||||
|
||||
@ -2629,12 +2629,6 @@ Sets the maximum number of inserted blocks after which mergeable blocks are drop
|
||||
|
||||
Default value: `64`.
|
||||
|
||||
## temporary_live_view_timeout {#temporary-live-view-timeout}
|
||||
|
||||
Sets the interval in seconds after which [live view](../../sql-reference/statements/create/view.md#live-view) with timeout is deleted.
|
||||
|
||||
Default value: `5`.
|
||||
|
||||
## periodic_live_view_refresh {#periodic-live-view-refresh}
|
||||
|
||||
Sets the interval in seconds after which periodically refreshed [live view](../../sql-reference/statements/create/view.md#live-view) is forced to refresh.
|
||||
@ -3908,6 +3902,13 @@ Controls validation of UTF-8 sequences in JSON output formats, doesn't impact fo
|
||||
|
||||
Disabled by default.
|
||||
|
||||
### format_json_object_each_row_column_for_object_name {#format_json_object_each_row_column_for_object_name}
|
||||
|
||||
The name of column that will be used for storing/writing object names in [JSONObjectEachRow](../../interfaces/formats.md#jsonobjecteachrow) format.
|
||||
Column type should be String. If value is empty, default names `row_{i}`will be used for object names.
|
||||
|
||||
Default value: ''.
|
||||
|
||||
## TSV format settings {#tsv-format-settings}
|
||||
|
||||
### input_format_tsv_empty_as_default {#input_format_tsv_empty_as_default}
|
||||
|
@ -5,7 +5,7 @@ slug: /en/operations/system-tables/columns
|
||||
|
||||
Contains information about columns in all the tables.
|
||||
|
||||
You can use this table to get information similar to the [DESCRIBE TABLE](../../sql-reference/statements/misc.md#misc-describe-table) query, but for multiple tables at once.
|
||||
You can use this table to get information similar to the [DESCRIBE TABLE](../../sql-reference/statements/describe-table.md) query, but for multiple tables at once.
|
||||
|
||||
Columns from [temporary tables](../../sql-reference/statements/create/table.md#temporary-tables) are visible in the `system.columns` only in those session where they have been created. They are shown with the empty `database` field.
|
||||
|
||||
|
@ -11,6 +11,7 @@ Columns:
|
||||
- `path` ([String](../../sql-reference/data-types/string.md)) — Path to the mount point in the file system.
|
||||
- `free_space` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Free space on disk in bytes.
|
||||
- `total_space` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Disk volume in bytes.
|
||||
- `unreserved_space` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Free space which is not taken by reservations (`free_space` minus the size of reservations taken by merges, inserts, and other disk write operations currently running).
|
||||
- `keep_free_space` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Amount of disk space that should stay free on disk in bytes. Defined in the `keep_free_space_bytes` parameter of disk configuration.
|
||||
|
||||
**Example**
|
||||
|
@ -294,6 +294,53 @@ Result:
|
||||
|
||||
Notice how only a portion of the data was properly decrypted, and the rest is gibberish since either `mode`, `key`, or `iv` were different upon encryption.
|
||||
|
||||
## tryDecrypt
|
||||
|
||||
Similar to `decrypt`, but returns NULL if decryption fails because of using the wrong key.
|
||||
|
||||
**Examples**
|
||||
|
||||
Let's create a table where `user_id` is the unique user id, `encrypted` is an encrypted string field, `iv` is an initial vector for decrypt/encrypt. Assume that users know their id and the key to decrypt the encrypted field:
|
||||
|
||||
```sql
|
||||
CREATE TABLE decrypt_null (
|
||||
dt DateTime,
|
||||
user_id UInt32,
|
||||
encrypted String,
|
||||
iv String
|
||||
) ENGINE = Memory;
|
||||
```
|
||||
|
||||
Insert some data:
|
||||
|
||||
```sql
|
||||
INSERT INTO decrypt_null VALUES
|
||||
('2022-08-02 00:00:00', 1, encrypt('aes-256-gcm', 'value1', 'keykeykeykeykeykeykeykeykeykey01', 'iv1'), 'iv1'),
|
||||
('2022-09-02 00:00:00', 2, encrypt('aes-256-gcm', 'value2', 'keykeykeykeykeykeykeykeykeykey02', 'iv2'), 'iv2'),
|
||||
('2022-09-02 00:00:01', 3, encrypt('aes-256-gcm', 'value3', 'keykeykeykeykeykeykeykeykeykey03', 'iv3'), 'iv3');
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
dt,
|
||||
user_id,
|
||||
tryDecrypt('aes-256-gcm', encrypted, 'keykeykeykeykeykeykeykeykeykey02', iv) AS value
|
||||
FROM decrypt_null
|
||||
ORDER BY user_id ASC
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
┌──────────────────dt─┬─user_id─┬─value──┐
|
||||
│ 2022-08-02 00:00:00 │ 1 │ ᴺᵁᴸᴸ │
|
||||
│ 2022-09-02 00:00:00 │ 2 │ value2 │
|
||||
│ 2022-09-02 00:00:01 │ 3 │ ᴺᵁᴸᴸ │
|
||||
└─────────────────────┴─────────┴────────┘
|
||||
```
|
||||
|
||||
## aes_decrypt_mysql
|
||||
|
||||
Compatible with mysql encryption and decrypts data encrypted with [AES_ENCRYPT](https://dev.mysql.com/doc/refman/8.0/en/encryption-functions.html#function_aes-encrypt) function.
|
||||
|
@ -127,7 +127,7 @@ Adds a comment to the column. If the `IF EXISTS` clause is specified, the query
|
||||
|
||||
Each column can have one comment. If a comment already exists for the column, a new comment overwrites the previous comment.
|
||||
|
||||
Comments are stored in the `comment_expression` column returned by the [DESCRIBE TABLE](../../../sql-reference/statements/misc.md#misc-describe-table) query.
|
||||
Comments are stored in the `comment_expression` column returned by the [DESCRIBE TABLE](../../../sql-reference/statements/describe-table.md) query.
|
||||
|
||||
Example:
|
||||
|
||||
@ -253,7 +253,7 @@ The `ALTER` query lets you create and delete separate elements (columns) in nest
|
||||
|
||||
There is no support for deleting columns in the primary key or the sampling key (columns that are used in the `ENGINE` expression). Changing the type for columns that are included in the primary key is only possible if this change does not cause the data to be modified (for example, you are allowed to add values to an Enum or to change a type from `DateTime` to `UInt32`).
|
||||
|
||||
If the `ALTER` query is not sufficient to make the table changes you need, you can create a new table, copy the data to it using the [INSERT SELECT](../../../sql-reference/statements/insert-into.md#insert_query_insert-select) query, then switch the tables using the [RENAME](../../../sql-reference/statements/misc.md#misc_operations-rename) query and delete the old table. You can use the [clickhouse-copier](../../../operations/utilities/clickhouse-copier.md) as an alternative to the `INSERT SELECT` query.
|
||||
If the `ALTER` query is not sufficient to make the table changes you need, you can create a new table, copy the data to it using the [INSERT SELECT](../../../sql-reference/statements/insert-into.md#insert_query_insert-select) query, then switch the tables using the [RENAME](../../../sql-reference/statements/rename.md#rename-table) query and delete the old table. You can use the [clickhouse-copier](../../../operations/utilities/clickhouse-copier.md) as an alternative to the `INSERT SELECT` query.
|
||||
|
||||
The `ALTER` query blocks all reads and writes for the table. In other words, if a long `SELECT` is running at the time of the `ALTER` query, the `ALTER` query will wait for it to complete. At the same time, all new queries to the same table will wait while this `ALTER` is running.
|
||||
|
||||
|
@ -44,7 +44,7 @@ For `*MergeTree` tables mutations execute by **rewriting whole data parts**. The
|
||||
|
||||
Mutations are totally ordered by their creation order and are applied to each part in that order. Mutations are also partially ordered with `INSERT INTO` queries: data that was inserted into the table before the mutation was submitted will be mutated and data that was inserted after that will not be mutated. Note that mutations do not block inserts in any way.
|
||||
|
||||
A mutation query returns immediately after the mutation entry is added (in case of replicated tables to ZooKeeper, for non-replicated tables - to the filesystem). The mutation itself executes asynchronously using the system profile settings. To track the progress of mutations you can use the [`system.mutations`](../../../operations/system-tables/mutations.md#system_tables-mutations) table. A mutation that was successfully submitted will continue to execute even if ClickHouse servers are restarted. There is no way to roll back the mutation once it is submitted, but if the mutation is stuck for some reason it can be cancelled with the [`KILL MUTATION`](../../../sql-reference/statements/misc.md#kill-mutation) query.
|
||||
A mutation query returns immediately after the mutation entry is added (in case of replicated tables to ZooKeeper, for non-replicated tables - to the filesystem). The mutation itself executes asynchronously using the system profile settings. To track the progress of mutations you can use the [`system.mutations`](../../../operations/system-tables/mutations.md#system_tables-mutations) table. A mutation that was successfully submitted will continue to execute even if ClickHouse servers are restarted. There is no way to roll back the mutation once it is submitted, but if the mutation is stuck for some reason it can be cancelled with the [`KILL MUTATION`](../../../sql-reference/statements/kill.md#kill-mutation) query.
|
||||
|
||||
Entries for finished mutations are not deleted right away (the number of preserved entries is determined by the `finished_mutations_to_keep` storage engine parameter). Older mutation entries are deleted.
|
||||
|
||||
|
@ -319,7 +319,7 @@ You can specify the partition expression in `ALTER ... PARTITION` queries in dif
|
||||
|
||||
Usage of quotes when specifying the partition depends on the type of partition expression. For example, for the `String` type, you have to specify its name in quotes (`'`). For the `Date` and `Int*` types no quotes are needed.
|
||||
|
||||
All the rules above are also true for the [OPTIMIZE](../../../sql-reference/statements/misc.md#misc_operations-optimize) query. If you need to specify the only partition when optimizing a non-partitioned table, set the expression `PARTITION tuple()`. For example:
|
||||
All the rules above are also true for the [OPTIMIZE](../../../sql-reference/statements/optimize.md) query. If you need to specify the only partition when optimizing a non-partitioned table, set the expression `PARTITION tuple()`. For example:
|
||||
|
||||
``` sql
|
||||
OPTIMIZE TABLE table_not_partitioned PARTITION tuple() FINAL;
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
slug: /en/sql-reference/statements/check-table
|
||||
sidebar_position: 41
|
||||
sidebar_label: CHECK
|
||||
sidebar_label: CHECK TABLE
|
||||
title: "CHECK TABLE Statement"
|
||||
---
|
||||
|
||||
|
@ -178,23 +178,6 @@ SELECT * FROM [db.]live_view WHERE ...
|
||||
|
||||
You can force live view refresh using the `ALTER LIVE VIEW [db.]table_name REFRESH` statement.
|
||||
|
||||
### WITH TIMEOUT Clause
|
||||
|
||||
When a live view is created with a `WITH TIMEOUT` clause then the live view will be dropped automatically after the specified number of seconds elapse since the end of the last [WATCH](../../../sql-reference/statements/watch.md) query that was watching the live view.
|
||||
|
||||
```sql
|
||||
CREATE LIVE VIEW [db.]table_name WITH TIMEOUT [value_in_sec] AS SELECT ...
|
||||
```
|
||||
|
||||
If the timeout value is not specified then the value specified by the [temporary_live_view_timeout](../../../operations/settings/settings.md#temporary-live-view-timeout) setting is used.
|
||||
|
||||
**Example:**
|
||||
|
||||
```sql
|
||||
CREATE TABLE mt (x Int8) Engine = MergeTree ORDER BY x;
|
||||
CREATE LIVE VIEW lv WITH TIMEOUT 15 AS SELECT sum(x) FROM mt;
|
||||
```
|
||||
|
||||
### WITH REFRESH Clause
|
||||
|
||||
When a live view is created with a `WITH REFRESH` clause then it will be automatically refreshed after the specified number of seconds elapse since the last refresh or trigger.
|
||||
@ -224,20 +207,6 @@ WATCH lv
|
||||
└─────────────────────┴──────────┘
|
||||
```
|
||||
|
||||
You can combine `WITH TIMEOUT` and `WITH REFRESH` clauses using an `AND` clause.
|
||||
|
||||
```sql
|
||||
CREATE LIVE VIEW [db.]table_name WITH TIMEOUT [value_in_sec] AND REFRESH [value_in_sec] AS SELECT ...
|
||||
```
|
||||
|
||||
**Example:**
|
||||
|
||||
```sql
|
||||
CREATE LIVE VIEW lv WITH TIMEOUT 15 AND REFRESH 5 AS SELECT now();
|
||||
```
|
||||
|
||||
After 15 sec the live view will be automatically dropped if there are no active `WATCH` queries.
|
||||
|
||||
```sql
|
||||
WATCH lv
|
||||
```
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
slug: /en/sql-reference/statements/describe-table
|
||||
sidebar_position: 42
|
||||
sidebar_label: DESCRIBE
|
||||
sidebar_label: DESCRIBE TABLE
|
||||
title: "DESCRIBE TABLE"
|
||||
---
|
||||
|
||||
|
@ -221,7 +221,7 @@ By default, a user account or a role has no privileges.
|
||||
|
||||
If a user or a role has no privileges, it is displayed as [NONE](#grant-none) privilege.
|
||||
|
||||
Some queries by their implementation require a set of privileges. For example, to execute the [RENAME](../../sql-reference/statements/misc.md#misc_operations-rename) query you need the following privileges: `SELECT`, `CREATE TABLE`, `INSERT` and `DROP TABLE`.
|
||||
Some queries by their implementation require a set of privileges. For example, to execute the [RENAME](../../sql-reference/statements/optimize.md) query you need the following privileges: `SELECT`, `CREATE TABLE`, `INSERT` and `DROP TABLE`.
|
||||
|
||||
### SELECT
|
||||
|
||||
@ -304,11 +304,11 @@ Examples of how this hierarchy is treated:
|
||||
- The `MODIFY SETTING` privilege allows modifying table engine settings. It does not affect settings or server configuration parameters.
|
||||
- The `ATTACH` operation needs the [CREATE](#grant-create) privilege.
|
||||
- The `DETACH` operation needs the [DROP](#grant-drop) privilege.
|
||||
- To stop mutation by the [KILL MUTATION](../../sql-reference/statements/misc.md#kill-mutation) query, you need to have a privilege to start this mutation. For example, if you want to stop the `ALTER UPDATE` query, you need the `ALTER UPDATE`, `ALTER TABLE`, or `ALTER` privilege.
|
||||
- To stop mutation by the [KILL MUTATION](../../sql-reference/statements/kill.md#kill-mutation) query, you need to have a privilege to start this mutation. For example, if you want to stop the `ALTER UPDATE` query, you need the `ALTER UPDATE`, `ALTER TABLE`, or `ALTER` privilege.
|
||||
|
||||
### CREATE
|
||||
|
||||
Allows executing [CREATE](../../sql-reference/statements/create/index.md) and [ATTACH](../../sql-reference/statements/misc.md#attach) DDL-queries according to the following hierarchy of privileges:
|
||||
Allows executing [CREATE](../../sql-reference/statements/create/index.md) and [ATTACH](../../sql-reference/statements/attach.md) DDL-queries according to the following hierarchy of privileges:
|
||||
|
||||
- `CREATE`. Level: `GROUP`
|
||||
- `CREATE DATABASE`. Level: `DATABASE`
|
||||
@ -323,7 +323,7 @@ Allows executing [CREATE](../../sql-reference/statements/create/index.md) and [A
|
||||
|
||||
### DROP
|
||||
|
||||
Allows executing [DROP](../../sql-reference/statements/misc.md#drop) and [DETACH](../../sql-reference/statements/misc.md#detach) queries according to the following hierarchy of privileges:
|
||||
Allows executing [DROP](../../sql-reference/statements/drop.md) and [DETACH](../../sql-reference/statements/detach.md) queries according to the following hierarchy of privileges:
|
||||
|
||||
- `DROP`. Level: `GROUP`
|
||||
- `DROP DATABASE`. Level: `DATABASE`
|
||||
@ -333,13 +333,13 @@ Allows executing [DROP](../../sql-reference/statements/misc.md#drop) and [DETACH
|
||||
|
||||
### TRUNCATE
|
||||
|
||||
Allows executing [TRUNCATE](../../sql-reference/statements/misc.md#truncate-statement) queries.
|
||||
Allows executing [TRUNCATE](../../sql-reference/statements/truncate.md) queries.
|
||||
|
||||
Privilege level: `TABLE`.
|
||||
|
||||
### OPTIMIZE
|
||||
|
||||
Allows executing [OPTIMIZE TABLE](../../sql-reference/statements/misc.md#misc_operations-optimize) queries.
|
||||
Allows executing [OPTIMIZE TABLE](../../sql-reference/statements/optimize.md) queries.
|
||||
|
||||
Privilege level: `TABLE`.
|
||||
|
||||
@ -359,7 +359,7 @@ A user has the `SHOW` privilege if it has any other privilege concerning the spe
|
||||
|
||||
### KILL QUERY
|
||||
|
||||
Allows executing [KILL](../../sql-reference/statements/misc.md#kill-query-statement) queries according to the following hierarchy of privileges:
|
||||
Allows executing [KILL](../../sql-reference/statements/kill.md#kill-query) queries according to the following hierarchy of privileges:
|
||||
|
||||
Privilege level: `GLOBAL`.
|
||||
|
||||
|
@ -488,7 +488,7 @@ FORMAT TSV
|
||||
max_insert_block_size 1048576 0 "The maximum block size for insertion, if we control the creation of blocks for insertion."
|
||||
```
|
||||
|
||||
Optionally you can [OPTIMIZE](../sql-reference/statements/misc.md#misc_operations-optimize) the tables after import. Tables that are configured with an engine from MergeTree-family always do merges of data parts in the background to optimize data storage (or at least check if it makes sense). These queries force the table engine to do storage optimization right now instead of some time later:
|
||||
Optionally you can [OPTIMIZE](../sql-reference/statements/optimize.md) the tables after import. Tables that are configured with an engine from MergeTree-family always do merges of data parts in the background to optimize data storage (or at least check if it makes sense). These queries force the table engine to do storage optimization right now instead of some time later:
|
||||
|
||||
``` bash
|
||||
clickhouse-client --query "OPTIMIZE TABLE tutorial.hits_v1 FINAL"
|
||||
|
@ -34,6 +34,7 @@ sidebar_label: "Клиентские библиотеки от сторонни
|
||||
- [node-clickhouse](https://github.com/apla/node-clickhouse)
|
||||
- [nestjs-clickhouse](https://github.com/depyronick/nestjs-clickhouse)
|
||||
- [clickhouse-client](https://github.com/depyronick/clickhouse-client)
|
||||
- [node-clickhouse-orm](https://github.com/zimv/node-clickhouse-orm)
|
||||
- Perl
|
||||
- [perl-DBD-ClickHouse](https://github.com/elcamlost/perl-DBD-ClickHouse)
|
||||
- [HTTP-ClickHouse](https://metacpan.org/release/HTTP-ClickHouse)
|
||||
|
@ -64,7 +64,7 @@ ClickHouse поддерживает управление доступом на
|
||||
|
||||
- [CREATE USER](../sql-reference/statements/create/user.md#create-user-statement)
|
||||
- [ALTER USER](../sql-reference/statements/alter/user.md)
|
||||
- [DROP USER](../sql-reference/statements/misc.md#drop-user-statement)
|
||||
- [DROP USER](../sql-reference/statements/drop.md#drop-user)
|
||||
- [SHOW CREATE USER](../sql-reference/statements/show.md#show-create-user-statement)
|
||||
|
||||
### Применение настроек {#access-control-settings-applying}
|
||||
@ -91,9 +91,9 @@ ClickHouse поддерживает управление доступом на
|
||||
|
||||
- [CREATE ROLE](../sql-reference/statements/create/index.md#create-role-statement)
|
||||
- [ALTER ROLE](../sql-reference/statements/alter/role.md)
|
||||
- [DROP ROLE](../sql-reference/statements/misc.md#drop-role-statement)
|
||||
- [SET ROLE](../sql-reference/statements/misc.md#set-role-statement)
|
||||
- [SET DEFAULT ROLE](../sql-reference/statements/misc.md#set-default-role-statement)
|
||||
- [DROP ROLE](../sql-reference/statements/drop.md#drop-role)
|
||||
- [SET ROLE](../sql-reference/statements/set-role.md)
|
||||
- [SET DEFAULT ROLE](../sql-reference/statements/set-role.md#set-default-role)
|
||||
- [SHOW CREATE ROLE](../sql-reference/statements/show.md#show-create-role-statement)
|
||||
|
||||
Привилегии можно присвоить роли с помощью запроса [GRANT](../sql-reference/statements/grant.md). Для отзыва привилегий у роли ClickHouse предоставляет запрос [REVOKE](../sql-reference/statements/revoke.md).
|
||||
@ -106,7 +106,7 @@ ClickHouse поддерживает управление доступом на
|
||||
|
||||
- [CREATE ROW POLICY](../sql-reference/statements/create/index.md#create-row-policy-statement)
|
||||
- [ALTER ROW POLICY](../sql-reference/statements/alter/row-policy.md)
|
||||
- [DROP ROW POLICY](../sql-reference/statements/misc.md#drop-row-policy-statement)
|
||||
- [DROP ROW POLICY](../sql-reference/statements/drop.md#drop-row-policy)
|
||||
- [SHOW CREATE ROW POLICY](../sql-reference/statements/show.md#show-create-row-policy-statement)
|
||||
|
||||
|
||||
@ -118,7 +118,7 @@ ClickHouse поддерживает управление доступом на
|
||||
|
||||
- [CREATE SETTINGS PROFILE](../sql-reference/statements/create/index.md#create-settings-profile-statement)
|
||||
- [ALTER SETTINGS PROFILE](../sql-reference/statements/alter/settings-profile.md)
|
||||
- [DROP SETTINGS PROFILE](../sql-reference/statements/misc.md#drop-settings-profile-statement)
|
||||
- [DROP SETTINGS PROFILE](../sql-reference/statements/drop.md#drop-settings-profile)
|
||||
- [SHOW CREATE SETTINGS PROFILE](../sql-reference/statements/show.md#show-create-settings-profile-statement)
|
||||
|
||||
|
||||
@ -132,7 +132,7 @@ ClickHouse поддерживает управление доступом на
|
||||
|
||||
- [CREATE QUOTA](../sql-reference/statements/create/index.md#create-quota-statement)
|
||||
- [ALTER QUOTA](../sql-reference/statements/alter/quota.md)
|
||||
- [DROP QUOTA](../sql-reference/statements/misc.md#drop-quota-statement)
|
||||
- [DROP QUOTA](../sql-reference/statements/drop.md#drop-quota)
|
||||
- [SHOW CREATE QUOTA](../sql-reference/statements/show.md#show-create-quota-statement)
|
||||
|
||||
|
||||
|
@ -1986,7 +1986,7 @@ SELECT * FROM test_table
|
||||
|
||||
## optimize_throw_if_noop {#setting-optimize_throw_if_noop}
|
||||
|
||||
Включает или отключает генерирование исключения в случаях, когда запрос [OPTIMIZE](../../sql-reference/statements/misc.md#misc_operations-optimize) не выполняет мёрж.
|
||||
Включает или отключает генерирование исключения в случаях, когда запрос [OPTIMIZE](../../sql-reference/statements/optimize.md) не выполняет мёрж.
|
||||
|
||||
По умолчанию, `OPTIMIZE` завершается успешно и в тех случаях, когда он ничего не сделал. Настройка позволяет отделить подобные случаи и включает генерирование исключения с поясняющим сообщением.
|
||||
|
||||
@ -3258,12 +3258,6 @@ SELECT * FROM test2;
|
||||
|
||||
Значение по умолчанию: `64`.
|
||||
|
||||
## temporary_live_view_timeout {#temporary-live-view-timeout}
|
||||
|
||||
Задает время в секундах, после которого [LIVE VIEW](../../sql-reference/statements/create/view.md#live-view) удаляется.
|
||||
|
||||
Значение по умолчанию: `5`.
|
||||
|
||||
## periodic_live_view_refresh {#periodic-live-view-refresh}
|
||||
|
||||
Задает время в секундах, по истечении которого [LIVE VIEW](../../sql-reference/statements/create/view.md#live-view) с установленным автообновлением обновляется.
|
||||
|
@ -5,7 +5,7 @@ slug: /ru/operations/system-tables/columns
|
||||
|
||||
Содержит информацию о столбцах всех таблиц.
|
||||
|
||||
С помощью этой таблицы можно получить информацию аналогично запросу [DESCRIBE TABLE](../../sql-reference/statements/misc.md#misc-describe-table), но для многих таблиц сразу.
|
||||
С помощью этой таблицы можно получить информацию аналогично запросу [DESCRIBE TABLE](../../sql-reference/statements/describe-table.md), но для многих таблиц сразу.
|
||||
|
||||
Колонки [временных таблиц](../../sql-reference/statements/create/table.md#temporary-tables) содержатся в `system.columns` только в тех сессиях, в которых эти таблицы были созданы. Поле `database` у таких колонок пустое.
|
||||
|
||||
|
@ -11,5 +11,6 @@ Cодержит информацию о дисках, заданных в [ко
|
||||
- `path` ([String](../../sql-reference/data-types/string.md)) — путь к точке монтирования в файловой системе.
|
||||
- `free_space` ([UInt64](../../sql-reference/data-types/int-uint.md)) — свободное место на диске в байтах.
|
||||
- `total_space` ([UInt64](../../sql-reference/data-types/int-uint.md)) — объём диска в байтах.
|
||||
- `unreserved_space` ([UInt64](../../sql-reference/data-types/int-uint.md)) — не зарезервированное cвободное место в байтах (`free_space` минус размер места, зарезервированного на выполняемые в данный момент фоновые слияния, вставки и другие операции записи на диск).
|
||||
- `keep_free_space` ([UInt64](../../sql-reference/data-types/int-uint.md)) — место, которое должно остаться свободным на диске в байтах. Задаётся значением параметра `keep_free_space_bytes` конфигурации дисков.
|
||||
|
||||
|
@ -1053,6 +1053,7 @@ formatDateTime(Time, Format[, Timezone])
|
||||
| %w | номер дня недели, начиная с воскресенья (0-6) | 2 |
|
||||
| %y | год, последние 2 цифры (00-99) | 18 |
|
||||
| %Y | год, 4 цифры | 2018 |
|
||||
| %z | Смещение времени от UTC +HHMM или -HHMM | -0500 |
|
||||
| %% | символ % | % |
|
||||
|
||||
**Пример**
|
||||
|
@ -10,5 +10,4 @@ sidebar_position: 28
|
||||
- [INSERT INTO](statements/insert-into.md)
|
||||
- [CREATE](statements/create/index.md)
|
||||
- [ALTER](statements/alter/index.md#query_language_queries_alter)
|
||||
- [Прочие виды запросов](statements/misc.md)
|
||||
|
||||
|
@ -128,7 +128,7 @@ COMMENT COLUMN [IF EXISTS] name 'Text comment'
|
||||
|
||||
Каждый столбец может содержать только один комментарий. При выполнении запроса существующий комментарий заменяется на новый.
|
||||
|
||||
Посмотреть комментарии можно в столбце `comment_expression` из запроса [DESCRIBE TABLE](../misc.md#misc-describe-table).
|
||||
Посмотреть комментарии можно в столбце `comment_expression` из запроса [DESCRIBE TABLE](../describe-table.md).
|
||||
|
||||
Пример:
|
||||
|
||||
@ -254,7 +254,7 @@ SELECT groupArray(x), groupArray(s) FROM tmp;
|
||||
|
||||
Отсутствует возможность удалять столбцы, входящие в первичный ключ или ключ для сэмплирования (в общем, входящие в выражение `ENGINE`). Изменение типа у столбцов, входящих в первичный ключ возможно только в том случае, если это изменение не приводит к изменению данных (например, разрешено добавление значения в Enum или изменение типа с `DateTime` на `UInt32`).
|
||||
|
||||
Если возможностей запроса `ALTER` не хватает для нужного изменения таблицы, вы можете создать новую таблицу, скопировать туда данные с помощью запроса [INSERT SELECT](../insert-into.md#insert_query_insert-select), затем поменять таблицы местами с помощью запроса [RENAME](../misc.md#misc_operations-rename), и удалить старую таблицу. В качестве альтернативы для запроса `INSERT SELECT`, можно использовать инструмент [clickhouse-copier](../../../sql-reference/statements/alter/index.md).
|
||||
Если возможностей запроса `ALTER` не хватает для нужного изменения таблицы, вы можете создать новую таблицу, скопировать туда данные с помощью запроса [INSERT SELECT](../insert-into.md#insert_query_insert-select), затем поменять таблицы местами с помощью запроса [RENAME](../rename.md#rename-table), и удалить старую таблицу. В качестве альтернативы для запроса `INSERT SELECT`, можно использовать инструмент [clickhouse-copier](../../../sql-reference/statements/alter/index.md).
|
||||
|
||||
Запрос `ALTER` блокирует все чтения и записи для таблицы. То есть если на момент запроса `ALTER` выполнялся долгий `SELECT`, то запрос `ALTER` сначала дождётся его выполнения. И в это время все новые запросы к той же таблице будут ждать, пока завершится этот `ALTER`.
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
slug: /ru/sql-reference/statements/check-table
|
||||
sidebar_position: 41
|
||||
sidebar_label: CHECK
|
||||
sidebar_label: CHECK TABLE
|
||||
---
|
||||
|
||||
# CHECK TABLE Statement {#check-table}
|
||||
|
@ -17,13 +17,13 @@ CREATE ROLE [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1] [, nam
|
||||
|
||||
## Управление ролями {#managing-roles}
|
||||
|
||||
Одному пользователю можно назначить несколько ролей. Пользователи могут применять назначенные роли в произвольных комбинациях с помощью выражения [SET ROLE](../misc.md#set-role-statement). Конечный объем привилегий — это комбинация всех привилегий всех примененных ролей. Если у пользователя имеются привилегии, присвоенные его аккаунту напрямую, они также прибавляются к привилегиям, присвоенным через роли.
|
||||
Одному пользователю можно назначить несколько ролей. Пользователи могут применять назначенные роли в произвольных комбинациях с помощью выражения [SET ROLE](../set-role.md). Конечный объем привилегий — это комбинация всех привилегий всех примененных ролей. Если у пользователя имеются привилегии, присвоенные его аккаунту напрямую, они также прибавляются к привилегиям, присвоенным через роли.
|
||||
|
||||
Роли по умолчанию применяются при входе пользователя в систему. Установить роли по умолчанию можно с помощью выражений [SET DEFAULT ROLE](../misc.md#set-default-role-statement) или [ALTER USER](../alter/index.md#alter-user-statement).
|
||||
Роли по умолчанию применяются при входе пользователя в систему. Установить роли по умолчанию можно с помощью выражений [SET DEFAULT ROLE](../set-role.md#set-default-role) или [ALTER USER](../alter/index.md#alter-user-statement).
|
||||
|
||||
Для отзыва роли используется выражение [REVOKE](../../../sql-reference/statements/revoke.md).
|
||||
|
||||
Для удаления роли используется выражение [DROP ROLE](../misc.md#drop-role-statement). Удаленная роль автоматически отзывается у всех пользователей, которым была назначена.
|
||||
Для удаления роли используется выражение [DROP ROLE](../drop.md#drop-role). Удаленная роль автоматически отзывается у всех пользователей, которым была назначена.
|
||||
|
||||
## Примеры {#create-role-examples}
|
||||
|
||||
|
@ -156,23 +156,6 @@ SELECT * FROM [db.]live_view WHERE ...
|
||||
|
||||
Чтобы принудительно обновить LIVE-представление, используйте запрос `ALTER LIVE VIEW [db.]table_name REFRESH`.
|
||||
|
||||
### Секция WITH TIMEOUT {#live-view-with-timeout}
|
||||
|
||||
LIVE-представление, созданное с параметром `WITH TIMEOUT`, будет автоматически удалено через определенное количество секунд с момента предыдущего запроса [WATCH](../../../sql-reference/statements/watch.md), примененного к данному LIVE-представлению.
|
||||
|
||||
```sql
|
||||
CREATE LIVE VIEW [db.]table_name WITH TIMEOUT [value_in_sec] AS SELECT ...
|
||||
```
|
||||
|
||||
Если временной промежуток не указан, используется значение настройки [temporary_live_view_timeout](../../../operations/settings/settings.md#temporary-live-view-timeout).
|
||||
|
||||
**Пример:**
|
||||
|
||||
```sql
|
||||
CREATE TABLE mt (x Int8) Engine = MergeTree ORDER BY x;
|
||||
CREATE LIVE VIEW lv WITH TIMEOUT 15 AS SELECT sum(x) FROM mt;
|
||||
```
|
||||
|
||||
### Секция WITH REFRESH {#live-view-with-refresh}
|
||||
|
||||
LIVE-представление, созданное с параметром `WITH REFRESH`, будет автоматически обновляться через указанные промежутки времени, начиная с момента последнего обновления.
|
||||
@ -202,20 +185,6 @@ WATCH lv;
|
||||
└─────────────────────┴──────────┘
|
||||
```
|
||||
|
||||
Параметры `WITH TIMEOUT` и `WITH REFRESH` можно сочетать с помощью `AND`.
|
||||
|
||||
```sql
|
||||
CREATE LIVE VIEW [db.]table_name WITH TIMEOUT [value_in_sec] AND REFRESH [value_in_sec] AS SELECT ...
|
||||
```
|
||||
|
||||
**Пример:**
|
||||
|
||||
```sql
|
||||
CREATE LIVE VIEW lv WITH TIMEOUT 15 AND REFRESH 5 AS SELECT now();
|
||||
```
|
||||
|
||||
По истечении 15 секунд представление будет автоматически удалено, если нет активного запроса `WATCH`.
|
||||
|
||||
```sql
|
||||
WATCH lv;
|
||||
```
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
slug: /ru/sql-reference/statements/describe-table
|
||||
sidebar_position: 42
|
||||
sidebar_label: DESCRIBE
|
||||
sidebar_label: DESCRIBE TABLE
|
||||
---
|
||||
|
||||
# DESCRIBE TABLE {#misc-describe-table}
|
||||
|
@ -221,7 +221,7 @@ GRANT SELECT(x,y) ON db.table TO john WITH GRANT OPTION
|
||||
|
||||
Отсутствие привилегий у пользователя или роли отображается как привилегия [NONE](#grant-none).
|
||||
|
||||
Выполнение некоторых запросов требует определенного набора привилегий. Например, чтобы выполнить запрос [RENAME](misc.md#misc_operations-rename), нужны следующие привилегии: `SELECT`, `CREATE TABLE`, `INSERT` и `DROP TABLE`.
|
||||
Выполнение некоторых запросов требует определенного набора привилегий. Например, чтобы выполнить запрос [RENAME](rename.md#rename-table), нужны следующие привилегии: `SELECT`, `CREATE TABLE`, `INSERT` и `DROP TABLE`.
|
||||
|
||||
|
||||
### SELECT {#grant-select}
|
||||
@ -309,7 +309,7 @@ GRANT INSERT(x,y) ON db.table TO john
|
||||
|
||||
### CREATE {#grant-create}
|
||||
|
||||
Разрешает выполнять DDL-запросы [CREATE](../../sql-reference/statements/create/index.md) и [ATTACH](misc.md#attach) в соответствии со следующей иерархией привилегий:
|
||||
Разрешает выполнять DDL-запросы [CREATE](../../sql-reference/statements/create/index.md) и [ATTACH](attach.md) в соответствии со следующей иерархией привилегий:
|
||||
|
||||
- `CREATE`. Уровень: `GROUP`
|
||||
- `CREATE DATABASE`. Уровень: `DATABASE`
|
||||
@ -324,7 +324,7 @@ GRANT INSERT(x,y) ON db.table TO john
|
||||
|
||||
### DROP {#grant-drop}
|
||||
|
||||
Разрешает выполнять запросы [DROP](misc.md#drop) и [DETACH](misc.md#detach-statement) в соответствии со следующей иерархией привилегий:
|
||||
Разрешает выполнять запросы [DROP](drop.md) и [DETACH](detach.md) в соответствии со следующей иерархией привилегий:
|
||||
|
||||
- `DROP`. Уровень: `GROUP`
|
||||
- `DROP DATABASE`. Уровень: `DATABASE`
|
||||
@ -340,7 +340,7 @@ GRANT INSERT(x,y) ON db.table TO john
|
||||
|
||||
### OPTIMIZE {#grant-optimize}
|
||||
|
||||
Разрешает выполнять запросы [OPTIMIZE TABLE](misc.md#misc_operations-optimize).
|
||||
Разрешает выполнять запросы [OPTIMIZE TABLE](optimize.md).
|
||||
|
||||
Уровень: `TABLE`.
|
||||
|
||||
|
@ -1,10 +1,460 @@
|
||||
---
|
||||
slug: /zh/getting-started/example-datasets/brown-benchmark
|
||||
sidebar_label: Brown University Benchmark
|
||||
description: A new analytical benchmark for machine-generated log data
|
||||
title: "Brown University Benchmark"
|
||||
sidebar_label: 布朗大学基准
|
||||
description: 机器生成日志数据的新分析基准
|
||||
title: "布朗大学基准"
|
||||
---
|
||||
|
||||
import Content from '@site/docs/en/getting-started/example-datasets/brown-benchmark.md';
|
||||
`MgBench` 是机器生成的日志数据的新分析基准,[Andrew Crotty](http://cs.brown.edu/people/acrotty/)。
|
||||
|
||||
<Content />
|
||||
下载数据:
|
||||
|
||||
```bash
|
||||
wget https://datasets.clickhouse.com/mgbench{1..3}.csv.xz
|
||||
```
|
||||
|
||||
解压数据:
|
||||
|
||||
```bash
|
||||
xz -v -d mgbench{1..3}.csv.xz
|
||||
```
|
||||
|
||||
创建数据库和表:
|
||||
|
||||
```sql
|
||||
CREATE DATABASE mgbench;
|
||||
```
|
||||
|
||||
```sql
|
||||
USE mgbench;
|
||||
```
|
||||
|
||||
```sql
|
||||
CREATE TABLE mgbench.logs1 (
|
||||
log_time DateTime,
|
||||
machine_name LowCardinality(String),
|
||||
machine_group LowCardinality(String),
|
||||
cpu_idle Nullable(Float32),
|
||||
cpu_nice Nullable(Float32),
|
||||
cpu_system Nullable(Float32),
|
||||
cpu_user Nullable(Float32),
|
||||
cpu_wio Nullable(Float32),
|
||||
disk_free Nullable(Float32),
|
||||
disk_total Nullable(Float32),
|
||||
part_max_used Nullable(Float32),
|
||||
load_fifteen Nullable(Float32),
|
||||
load_five Nullable(Float32),
|
||||
load_one Nullable(Float32),
|
||||
mem_buffers Nullable(Float32),
|
||||
mem_cached Nullable(Float32),
|
||||
mem_free Nullable(Float32),
|
||||
mem_shared Nullable(Float32),
|
||||
swap_free Nullable(Float32),
|
||||
bytes_in Nullable(Float32),
|
||||
bytes_out Nullable(Float32)
|
||||
)
|
||||
ENGINE = MergeTree()
|
||||
ORDER BY (machine_group, machine_name, log_time);
|
||||
```
|
||||
|
||||
|
||||
```sql
|
||||
CREATE TABLE mgbench.logs2 (
|
||||
log_time DateTime,
|
||||
client_ip IPv4,
|
||||
request String,
|
||||
status_code UInt16,
|
||||
object_size UInt64
|
||||
)
|
||||
ENGINE = MergeTree()
|
||||
ORDER BY log_time;
|
||||
```
|
||||
|
||||
|
||||
```sql
|
||||
CREATE TABLE mgbench.logs3 (
|
||||
log_time DateTime64,
|
||||
device_id FixedString(15),
|
||||
device_name LowCardinality(String),
|
||||
device_type LowCardinality(String),
|
||||
device_floor UInt8,
|
||||
event_type LowCardinality(String),
|
||||
event_unit FixedString(1),
|
||||
event_value Nullable(Float32)
|
||||
)
|
||||
ENGINE = MergeTree()
|
||||
ORDER BY (event_type, log_time);
|
||||
```
|
||||
|
||||
插入数据:
|
||||
|
||||
```
|
||||
clickhouse-client --query "INSERT INTO mgbench.logs1 FORMAT CSVWithNames" < mgbench1.csv
|
||||
clickhouse-client --query "INSERT INTO mgbench.logs2 FORMAT CSVWithNames" < mgbench2.csv
|
||||
clickhouse-client --query "INSERT INTO mgbench.logs3 FORMAT CSVWithNames" < mgbench3.csv
|
||||
```
|
||||
|
||||
## 运行基准查询:
|
||||
|
||||
```sql
|
||||
USE mgbench;
|
||||
```
|
||||
|
||||
```sql
|
||||
-- Q1.1: 自午夜以来每个 Web 服务器的 CPU/网络利用率是多少?
|
||||
|
||||
SELECT machine_name,
|
||||
MIN(cpu) AS cpu_min,
|
||||
MAX(cpu) AS cpu_max,
|
||||
AVG(cpu) AS cpu_avg,
|
||||
MIN(net_in) AS net_in_min,
|
||||
MAX(net_in) AS net_in_max,
|
||||
AVG(net_in) AS net_in_avg,
|
||||
MIN(net_out) AS net_out_min,
|
||||
MAX(net_out) AS net_out_max,
|
||||
AVG(net_out) AS net_out_avg
|
||||
FROM (
|
||||
SELECT machine_name,
|
||||
COALESCE(cpu_user, 0.0) AS cpu,
|
||||
COALESCE(bytes_in, 0.0) AS net_in,
|
||||
COALESCE(bytes_out, 0.0) AS net_out
|
||||
FROM logs1
|
||||
WHERE machine_name IN ('anansi','aragog','urd')
|
||||
AND log_time >= TIMESTAMP '2017-01-11 00:00:00'
|
||||
) AS r
|
||||
GROUP BY machine_name;
|
||||
```
|
||||
|
||||
|
||||
```sql
|
||||
-- Q1.2:最近一天有哪些机房的机器离线?
|
||||
|
||||
SELECT machine_name,
|
||||
log_time
|
||||
FROM logs1
|
||||
WHERE (machine_name LIKE 'cslab%' OR
|
||||
machine_name LIKE 'mslab%')
|
||||
AND load_one IS NULL
|
||||
AND log_time >= TIMESTAMP '2017-01-10 00:00:00'
|
||||
ORDER BY machine_name,
|
||||
log_time;
|
||||
```
|
||||
|
||||
```sql
|
||||
-- Q1.3:特定工作站过去 10 天的每小时的平均指标是多少?
|
||||
|
||||
SELECT dt,
|
||||
hr,
|
||||
AVG(load_fifteen) AS load_fifteen_avg,
|
||||
AVG(load_five) AS load_five_avg,
|
||||
AVG(load_one) AS load_one_avg,
|
||||
AVG(mem_free) AS mem_free_avg,
|
||||
AVG(swap_free) AS swap_free_avg
|
||||
FROM (
|
||||
SELECT CAST(log_time AS DATE) AS dt,
|
||||
EXTRACT(HOUR FROM log_time) AS hr,
|
||||
load_fifteen,
|
||||
load_five,
|
||||
load_one,
|
||||
mem_free,
|
||||
swap_free
|
||||
FROM logs1
|
||||
WHERE machine_name = 'babbage'
|
||||
AND load_fifteen IS NOT NULL
|
||||
AND load_five IS NOT NULL
|
||||
AND load_one IS NOT NULL
|
||||
AND mem_free IS NOT NULL
|
||||
AND swap_free IS NOT NULL
|
||||
AND log_time >= TIMESTAMP '2017-01-01 00:00:00'
|
||||
) AS r
|
||||
GROUP BY dt,
|
||||
hr
|
||||
ORDER BY dt,
|
||||
hr;
|
||||
```
|
||||
|
||||
```sql
|
||||
-- Q1.4: 1 个月内,每台服务器的磁盘 I/O 阻塞的频率是多少?
|
||||
|
||||
SELECT machine_name,
|
||||
COUNT(*) AS spikes
|
||||
FROM logs1
|
||||
WHERE machine_group = 'Servers'
|
||||
AND cpu_wio > 0.99
|
||||
AND log_time >= TIMESTAMP '2016-12-01 00:00:00'
|
||||
AND log_time < TIMESTAMP '2017-01-01 00:00:00'
|
||||
GROUP BY machine_name
|
||||
ORDER BY spikes DESC
|
||||
LIMIT 10;
|
||||
```
|
||||
|
||||
```sql
|
||||
-- Q1.5:哪些外部可访问的虚拟机的运行内存不足?
|
||||
|
||||
SELECT machine_name,
|
||||
dt,
|
||||
MIN(mem_free) AS mem_free_min
|
||||
FROM (
|
||||
SELECT machine_name,
|
||||
CAST(log_time AS DATE) AS dt,
|
||||
mem_free
|
||||
FROM logs1
|
||||
WHERE machine_group = 'DMZ'
|
||||
AND mem_free IS NOT NULL
|
||||
) AS r
|
||||
GROUP BY machine_name,
|
||||
dt
|
||||
HAVING MIN(mem_free) < 10000
|
||||
ORDER BY machine_name,
|
||||
dt;
|
||||
```
|
||||
|
||||
```sql
|
||||
-- Q1.6: 每小时所有文件服务器的总网络流量是多少?
|
||||
|
||||
SELECT dt,
|
||||
hr,
|
||||
SUM(net_in) AS net_in_sum,
|
||||
SUM(net_out) AS net_out_sum,
|
||||
SUM(net_in) + SUM(net_out) AS both_sum
|
||||
FROM (
|
||||
SELECT CAST(log_time AS DATE) AS dt,
|
||||
EXTRACT(HOUR FROM log_time) AS hr,
|
||||
COALESCE(bytes_in, 0.0) / 1000000000.0 AS net_in,
|
||||
COALESCE(bytes_out, 0.0) / 1000000000.0 AS net_out
|
||||
FROM logs1
|
||||
WHERE machine_name IN ('allsorts','andes','bigred','blackjack','bonbon',
|
||||
'cadbury','chiclets','cotton','crows','dove','fireball','hearts','huey',
|
||||
'lindt','milkduds','milkyway','mnm','necco','nerds','orbit','peeps',
|
||||
'poprocks','razzles','runts','smarties','smuggler','spree','stride',
|
||||
'tootsie','trident','wrigley','york')
|
||||
) AS r
|
||||
GROUP BY dt,
|
||||
hr
|
||||
ORDER BY both_sum DESC
|
||||
LIMIT 10;
|
||||
```
|
||||
|
||||
```sql
|
||||
-- Q2.1:过去 2 周内哪些请求导致了服务器错误?
|
||||
|
||||
SELECT *
|
||||
FROM logs2
|
||||
WHERE status_code >= 500
|
||||
AND log_time >= TIMESTAMP '2012-12-18 00:00:00'
|
||||
ORDER BY log_time;
|
||||
```
|
||||
|
||||
```sql
|
||||
-- Q2.2:在特定的某 2 周内,用户密码文件是否被泄露了?
|
||||
|
||||
SELECT *
|
||||
FROM logs2
|
||||
WHERE status_code >= 200
|
||||
AND status_code < 300
|
||||
AND request LIKE '%/etc/passwd%'
|
||||
AND log_time >= TIMESTAMP '2012-05-06 00:00:00'
|
||||
AND log_time < TIMESTAMP '2012-05-20 00:00:00';
|
||||
```
|
||||
|
||||
|
||||
```sql
|
||||
-- Q2.3:过去一个月顶级请求的平均路径深度是多少?
|
||||
|
||||
SELECT top_level,
|
||||
AVG(LENGTH(request) - LENGTH(REPLACE(request, '/', ''))) AS depth_avg
|
||||
FROM (
|
||||
SELECT SUBSTRING(request FROM 1 FOR len) AS top_level,
|
||||
request
|
||||
FROM (
|
||||
SELECT POSITION(SUBSTRING(request FROM 2), '/') AS len,
|
||||
request
|
||||
FROM logs2
|
||||
WHERE status_code >= 200
|
||||
AND status_code < 300
|
||||
AND log_time >= TIMESTAMP '2012-12-01 00:00:00'
|
||||
) AS r
|
||||
WHERE len > 0
|
||||
) AS s
|
||||
WHERE top_level IN ('/about','/courses','/degrees','/events',
|
||||
'/grad','/industry','/news','/people',
|
||||
'/publications','/research','/teaching','/ugrad')
|
||||
GROUP BY top_level
|
||||
ORDER BY top_level;
|
||||
```
|
||||
|
||||
|
||||
```sql
|
||||
-- Q2.4:在过去的 3 个月里,哪些客户端发出了过多的请求?
|
||||
|
||||
SELECT client_ip,
|
||||
COUNT(*) AS num_requests
|
||||
FROM logs2
|
||||
WHERE log_time >= TIMESTAMP '2012-10-01 00:00:00'
|
||||
GROUP BY client_ip
|
||||
HAVING COUNT(*) >= 100000
|
||||
ORDER BY num_requests DESC;
|
||||
```
|
||||
|
||||
|
||||
```sql
|
||||
-- Q2.5:每天的独立访问者数量是多少?
|
||||
|
||||
SELECT dt,
|
||||
COUNT(DISTINCT client_ip)
|
||||
FROM (
|
||||
SELECT CAST(log_time AS DATE) AS dt,
|
||||
client_ip
|
||||
FROM logs2
|
||||
) AS r
|
||||
GROUP BY dt
|
||||
ORDER BY dt;
|
||||
```
|
||||
|
||||
|
||||
```sql
|
||||
-- Q2.6:平均和最大数据传输速率(Gbps)是多少?
|
||||
|
||||
SELECT AVG(transfer) / 125000000.0 AS transfer_avg,
|
||||
MAX(transfer) / 125000000.0 AS transfer_max
|
||||
FROM (
|
||||
SELECT log_time,
|
||||
SUM(object_size) AS transfer
|
||||
FROM logs2
|
||||
GROUP BY log_time
|
||||
) AS r;
|
||||
```
|
||||
|
||||
|
||||
```sql
|
||||
-- Q3.1:自 2019/11/29 17:00 以来,室温是否达到过冰点?
|
||||
|
||||
SELECT *
|
||||
FROM logs3
|
||||
WHERE event_type = 'temperature'
|
||||
AND event_value <= 32.0
|
||||
AND log_time >= '2019-11-29 17:00:00.000';
|
||||
```
|
||||
|
||||
|
||||
```sql
|
||||
-- Q3.4:在过去的 6 个月里,每扇门打开的频率是多少?
|
||||
|
||||
SELECT device_name,
|
||||
device_floor,
|
||||
COUNT(*) AS ct
|
||||
FROM logs3
|
||||
WHERE event_type = 'door_open'
|
||||
AND log_time >= '2019-06-01 00:00:00.000'
|
||||
GROUP BY device_name,
|
||||
device_floor
|
||||
ORDER BY ct DESC;
|
||||
```
|
||||
|
||||
下面的查询 3.5 使用了 UNION 关键词。设置该模式以便组合 SELECT 的查询结果。该设置仅在未明确指定 UNION ALL 或 UNION DISTINCT 但使用了 UNION 进行共享时使用。
|
||||
|
||||
```sql
|
||||
SET union_default_mode = 'DISTINCT'
|
||||
```
|
||||
|
||||
```sql
|
||||
-- Q3.5: 在冬季和夏季,建筑物内哪些地方会出现较大的温度变化?
|
||||
|
||||
WITH temperature AS (
|
||||
SELECT dt,
|
||||
device_name,
|
||||
device_type,
|
||||
device_floor
|
||||
FROM (
|
||||
SELECT dt,
|
||||
hr,
|
||||
device_name,
|
||||
device_type,
|
||||
device_floor,
|
||||
AVG(event_value) AS temperature_hourly_avg
|
||||
FROM (
|
||||
SELECT CAST(log_time AS DATE) AS dt,
|
||||
EXTRACT(HOUR FROM log_time) AS hr,
|
||||
device_name,
|
||||
device_type,
|
||||
device_floor,
|
||||
event_value
|
||||
FROM logs3
|
||||
WHERE event_type = 'temperature'
|
||||
) AS r
|
||||
GROUP BY dt,
|
||||
hr,
|
||||
device_name,
|
||||
device_type,
|
||||
device_floor
|
||||
) AS s
|
||||
GROUP BY dt,
|
||||
device_name,
|
||||
device_type,
|
||||
device_floor
|
||||
HAVING MAX(temperature_hourly_avg) - MIN(temperature_hourly_avg) >= 25.0
|
||||
)
|
||||
SELECT DISTINCT device_name,
|
||||
device_type,
|
||||
device_floor,
|
||||
'WINTER'
|
||||
FROM temperature
|
||||
WHERE dt >= DATE '2018-12-01'
|
||||
AND dt < DATE '2019-03-01'
|
||||
UNION
|
||||
SELECT DISTINCT device_name,
|
||||
device_type,
|
||||
device_floor,
|
||||
'SUMMER'
|
||||
FROM temperature
|
||||
WHERE dt >= DATE '2019-06-01'
|
||||
AND dt < DATE '2019-09-01';
|
||||
```
|
||||
|
||||
|
||||
```sql
|
||||
-- Q3.6:对于每种类别的设备,每月的功耗指标是什么?
|
||||
|
||||
SELECT yr,
|
||||
mo,
|
||||
SUM(coffee_hourly_avg) AS coffee_monthly_sum,
|
||||
AVG(coffee_hourly_avg) AS coffee_monthly_avg,
|
||||
SUM(printer_hourly_avg) AS printer_monthly_sum,
|
||||
AVG(printer_hourly_avg) AS printer_monthly_avg,
|
||||
SUM(projector_hourly_avg) AS projector_monthly_sum,
|
||||
AVG(projector_hourly_avg) AS projector_monthly_avg,
|
||||
SUM(vending_hourly_avg) AS vending_monthly_sum,
|
||||
AVG(vending_hourly_avg) AS vending_monthly_avg
|
||||
FROM (
|
||||
SELECT dt,
|
||||
yr,
|
||||
mo,
|
||||
hr,
|
||||
AVG(coffee) AS coffee_hourly_avg,
|
||||
AVG(printer) AS printer_hourly_avg,
|
||||
AVG(projector) AS projector_hourly_avg,
|
||||
AVG(vending) AS vending_hourly_avg
|
||||
FROM (
|
||||
SELECT CAST(log_time AS DATE) AS dt,
|
||||
EXTRACT(YEAR FROM log_time) AS yr,
|
||||
EXTRACT(MONTH FROM log_time) AS mo,
|
||||
EXTRACT(HOUR FROM log_time) AS hr,
|
||||
CASE WHEN device_name LIKE 'coffee%' THEN event_value END AS coffee,
|
||||
CASE WHEN device_name LIKE 'printer%' THEN event_value END AS printer,
|
||||
CASE WHEN device_name LIKE 'projector%' THEN event_value END AS projector,
|
||||
CASE WHEN device_name LIKE 'vending%' THEN event_value END AS vending
|
||||
FROM logs3
|
||||
WHERE device_type = 'meter'
|
||||
) AS r
|
||||
GROUP BY dt,
|
||||
yr,
|
||||
mo,
|
||||
hr
|
||||
) AS s
|
||||
GROUP BY yr,
|
||||
mo
|
||||
ORDER BY yr,
|
||||
mo;
|
||||
```
|
||||
|
||||
此数据集可在 [Playground](https://play.clickhouse.com/play?user=play) 中进行交互式的请求, [example](https://play.clickhouse.com/play?user=play#U0VMRUNUIG1hY2hpbmVfbmFtZSwKICAgICAgIE1JTihjcHUpIEFTIGNwdV9taW4sCiAgICAgICBNQVgoY3B1KSBBUyBjcHVfbWF4LAogICAgICAgQVZHKGNwdSkgQVMgY3B1X2F2ZywKICAgICAgIE1JTihuZXRfaW4pIEFTIG5ldF9pbl9taW4sCiAgICAgICBNQVgobmV0X2luKSBBUyBuZXRfaW5fbWF4LAogICAgICAgQVZHKG5ldF9pbikgQVMgbmV0X2luX2F2ZywKICAgICAgIE1JTihuZXRfb3V0KSBBUyBuZXRfb3V0X21pbiwKICAgICAgIE1BWChuZXRfb3V0KSBBUyBuZXRfb3V0X21heCwKICAgICAgIEFWRyhuZXRfb3V0KSBBUyBuZXRfb3V0X2F2ZwpGUk9NICgKICBTRUxFQ1QgbWFjaGluZV9uYW1lLAogICAgICAgICBDT0FMRVNDRShjcHVfdXNlciwgMC4wKSBBUyBjcHUsCiAgICAgICAgIENPQUxFU0NFKGJ5dGVzX2luLCAwLjApIEFTIG5ldF9pbiwKICAgICAgICAgQ09BTEVTQ0UoYnl0ZXNfb3V0LCAwLjApIEFTIG5ldF9vdXQKICBGUk9NIG1nYmVuY2gubG9nczEKICBXSEVSRSBtYWNoaW5lX25hbWUgSU4gKCdhbmFuc2knLCdhcmFnb2cnLCd1cmQnKQogICAgQU5EIGxvZ190aW1lID49IFRJTUVTVEFNUCAnMjAxNy0wMS0xMSAwMDowMDowMCcKKSBBUyByCkdST1VQIEJZIG1hY2hpbmVfbmFtZQ==).
|
||||
|
@ -1,10 +1,450 @@
|
||||
---
|
||||
slug: /zh/getting-started/example-datasets/uk-price-paid
|
||||
sidebar_label: UK Property Price Paid
|
||||
sidebar_label: 英国房地产支付价格
|
||||
sidebar_position: 1
|
||||
title: "UK Property Price Paid"
|
||||
title: "英国房地产支付价格"
|
||||
---
|
||||
|
||||
import Content from '@site/docs/en/getting-started/example-datasets/uk-price-paid.md';
|
||||
该数据集包含自 1995 年以来有关英格兰和威尔士房地产价格的数据。未压缩的大小约为 4 GiB,在 ClickHouse 中大约需要 278 MiB。
|
||||
|
||||
<Content />
|
||||
来源:https://www.gov.uk/government/statistical-data-sets/price-paid-data-downloads
|
||||
字段说明:https://www.gov.uk/guidance/about-the-price-data
|
||||
|
||||
包含 HM Land Registry data © Crown copyright and database right 2021.。此数据集需在 Open Government License v3.0 的许可下使用。
|
||||
|
||||
## 创建表 {#create-table}
|
||||
|
||||
```sql
|
||||
CREATE TABLE uk_price_paid
|
||||
(
|
||||
price UInt32,
|
||||
date Date,
|
||||
postcode1 LowCardinality(String),
|
||||
postcode2 LowCardinality(String),
|
||||
type Enum8('terraced' = 1, 'semi-detached' = 2, 'detached' = 3, 'flat' = 4, 'other' = 0),
|
||||
is_new UInt8,
|
||||
duration Enum8('freehold' = 1, 'leasehold' = 2, 'unknown' = 0),
|
||||
addr1 String,
|
||||
addr2 String,
|
||||
street LowCardinality(String),
|
||||
locality LowCardinality(String),
|
||||
town LowCardinality(String),
|
||||
district LowCardinality(String),
|
||||
county LowCardinality(String)
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY (postcode1, postcode2, addr1, addr2);
|
||||
```
|
||||
|
||||
## 预处理和插入数据 {#preprocess-import-data}
|
||||
|
||||
我们将使用 `url` 函数将数据流式传输到 ClickHouse。我们需要首先预处理一些传入的数据,其中包括:
|
||||
|
||||
- 将`postcode` 拆分为两个不同的列 - `postcode1` 和 `postcode2`,因为这更适合存储和查询
|
||||
- 将`time` 字段转换为日期为它只包含 00:00 时间
|
||||
- 忽略 [UUid](../../sql-reference/data-types/uuid.md) 字段,因为我们不需要它进行分析
|
||||
- 使用 [transform](../../sql-reference/functions/other-functions.md#transform) 函数将 `Enum` 字段 `type` 和 `duration` 转换为更易读的 `Enum` 字段
|
||||
- 将 `is_new` 字段从单字符串(` Y`/`N`) 到 [UInt8](../../sql-reference/data-types/int-uint.md#uint8-uint16-uint32-uint64-uint256-int8-int16-int32-int64 -int128-int256) 字段为 0 或 1
|
||||
- 删除最后两列,因为它们都具有相同的值(即 0)
|
||||
|
||||
`url` 函数将来自网络服务器的数据流式传输到 ClickHouse 表中。以下命令将 500 万行插入到 `uk_price_paid` 表中:
|
||||
|
||||
```sql
|
||||
INSERT INTO uk_price_paid
|
||||
WITH
|
||||
splitByChar(' ', postcode) AS p
|
||||
SELECT
|
||||
toUInt32(price_string) AS price,
|
||||
parseDateTimeBestEffortUS(time) AS date,
|
||||
p[1] AS postcode1,
|
||||
p[2] AS postcode2,
|
||||
transform(a, ['T', 'S', 'D', 'F', 'O'], ['terraced', 'semi-detached', 'detached', 'flat', 'other']) AS type,
|
||||
b = 'Y' AS is_new,
|
||||
transform(c, ['F', 'L', 'U'], ['freehold', 'leasehold', 'unknown']) AS duration,
|
||||
addr1,
|
||||
addr2,
|
||||
street,
|
||||
locality,
|
||||
town,
|
||||
district,
|
||||
county
|
||||
FROM url(
|
||||
'http://prod.publicdata.landregistry.gov.uk.s3-website-eu-west-1.amazonaws.com/pp-complete.csv',
|
||||
'CSV',
|
||||
'uuid_string String,
|
||||
price_string String,
|
||||
time String,
|
||||
postcode String,
|
||||
a String,
|
||||
b String,
|
||||
c String,
|
||||
addr1 String,
|
||||
addr2 String,
|
||||
street String,
|
||||
locality String,
|
||||
town String,
|
||||
district String,
|
||||
county String,
|
||||
d String,
|
||||
e String'
|
||||
) SETTINGS max_http_get_redirects=10;
|
||||
```
|
||||
|
||||
需要等待一两分钟以便数据插入,具体时间取决于网络速度。
|
||||
|
||||
## 验证数据 {#validate-data}
|
||||
|
||||
让我们通过查看插入了多少行来验证它是否有效:
|
||||
|
||||
```sql
|
||||
SELECT count()
|
||||
FROM uk_price_paid
|
||||
```
|
||||
|
||||
在执行此查询时,数据集有 27,450,499 行。让我们看看 ClickHouse 中表的大小是多少:
|
||||
|
||||
```sql
|
||||
SELECT formatReadableSize(total_bytes)
|
||||
FROM system.tables
|
||||
WHERE name = 'uk_price_paid'
|
||||
```
|
||||
|
||||
请注意,表的大小仅为 221.43 MiB!
|
||||
|
||||
## 运行一些查询 {#run-queries}
|
||||
|
||||
让我们运行一些查询来分析数据:
|
||||
|
||||
### 查询 1. 每年平均价格 {#average-price}
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toYear(date) AS year,
|
||||
round(avg(price)) AS price,
|
||||
bar(price, 0, 1000000, 80
|
||||
)
|
||||
FROM uk_price_paid
|
||||
GROUP BY year
|
||||
ORDER BY year
|
||||
```
|
||||
|
||||
结果如下所示:
|
||||
|
||||
```response
|
||||
┌─year─┬──price─┬─bar(round(avg(price)), 0, 1000000, 80)─┐
|
||||
│ 1995 │ 67934 │ █████▍ │
|
||||
│ 1996 │ 71508 │ █████▋ │
|
||||
│ 1997 │ 78536 │ ██████▎ │
|
||||
│ 1998 │ 85441 │ ██████▋ │
|
||||
│ 1999 │ 96038 │ ███████▋ │
|
||||
│ 2000 │ 107487 │ ████████▌ │
|
||||
│ 2001 │ 118888 │ █████████▌ │
|
||||
│ 2002 │ 137948 │ ███████████ │
|
||||
│ 2003 │ 155893 │ ████████████▍ │
|
||||
│ 2004 │ 178888 │ ██████████████▎ │
|
||||
│ 2005 │ 189359 │ ███████████████▏ │
|
||||
│ 2006 │ 203532 │ ████████████████▎ │
|
||||
│ 2007 │ 219375 │ █████████████████▌ │
|
||||
│ 2008 │ 217056 │ █████████████████▎ │
|
||||
│ 2009 │ 213419 │ █████████████████ │
|
||||
│ 2010 │ 236110 │ ██████████████████▊ │
|
||||
│ 2011 │ 232805 │ ██████████████████▌ │
|
||||
│ 2012 │ 238381 │ ███████████████████ │
|
||||
│ 2013 │ 256927 │ ████████████████████▌ │
|
||||
│ 2014 │ 280008 │ ██████████████████████▍ │
|
||||
│ 2015 │ 297263 │ ███████████████████████▋ │
|
||||
│ 2016 │ 313518 │ █████████████████████████ │
|
||||
│ 2017 │ 346371 │ ███████████████████████████▋ │
|
||||
│ 2018 │ 350556 │ ████████████████████████████ │
|
||||
│ 2019 │ 352184 │ ████████████████████████████▏ │
|
||||
│ 2020 │ 375808 │ ██████████████████████████████ │
|
||||
│ 2021 │ 381105 │ ██████████████████████████████▍ │
|
||||
│ 2022 │ 362572 │ █████████████████████████████ │
|
||||
└──────┴────────┴────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### 查询 2. 伦敦每年的平均价格 {#average-price-london}
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toYear(date) AS year,
|
||||
round(avg(price)) AS price,
|
||||
bar(price, 0, 2000000, 100
|
||||
)
|
||||
FROM uk_price_paid
|
||||
WHERE town = 'LONDON'
|
||||
GROUP BY year
|
||||
ORDER BY year
|
||||
```
|
||||
|
||||
结果如下所示:
|
||||
|
||||
```response
|
||||
┌─year─┬───price─┬─bar(round(avg(price)), 0, 2000000, 100)───────────────┐
|
||||
│ 1995 │ 109110 │ █████▍ │
|
||||
│ 1996 │ 118659 │ █████▊ │
|
||||
│ 1997 │ 136526 │ ██████▋ │
|
||||
│ 1998 │ 153002 │ ███████▋ │
|
||||
│ 1999 │ 180633 │ █████████ │
|
||||
│ 2000 │ 215849 │ ██████████▋ │
|
||||
│ 2001 │ 232987 │ ███████████▋ │
|
||||
│ 2002 │ 263668 │ █████████████▏ │
|
||||
│ 2003 │ 278424 │ █████████████▊ │
|
||||
│ 2004 │ 304664 │ ███████████████▏ │
|
||||
│ 2005 │ 322887 │ ████████████████▏ │
|
||||
│ 2006 │ 356195 │ █████████████████▋ │
|
||||
│ 2007 │ 404062 │ ████████████████████▏ │
|
||||
│ 2008 │ 420741 │ █████████████████████ │
|
||||
│ 2009 │ 427754 │ █████████████████████▍ │
|
||||
│ 2010 │ 480322 │ ████████████████████████ │
|
||||
│ 2011 │ 496278 │ ████████████████████████▋ │
|
||||
│ 2012 │ 519482 │ █████████████████████████▊ │
|
||||
│ 2013 │ 616195 │ ██████████████████████████████▋ │
|
||||
│ 2014 │ 724121 │ ████████████████████████████████████▏ │
|
||||
│ 2015 │ 792101 │ ███████████████████████████████████████▌ │
|
||||
│ 2016 │ 843589 │ ██████████████████████████████████████████▏ │
|
||||
│ 2017 │ 983523 │ █████████████████████████████████████████████████▏ │
|
||||
│ 2018 │ 1016753 │ ██████████████████████████████████████████████████▋ │
|
||||
│ 2019 │ 1041673 │ ████████████████████████████████████████████████████ │
|
||||
│ 2020 │ 1060027 │ █████████████████████████████████████████████████████ │
|
||||
│ 2021 │ 958249 │ ███████████████████████████████████████████████▊ │
|
||||
│ 2022 │ 902596 │ █████████████████████████████████████████████▏ │
|
||||
└──────┴─────────┴───────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
2020 年房价出事了!但这并不令人意外……
|
||||
|
||||
### 查询 3. 最昂贵的社区 {#most-expensive-neighborhoods}
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
town,
|
||||
district,
|
||||
count() AS c,
|
||||
round(avg(price)) AS price,
|
||||
bar(price, 0, 5000000, 100)
|
||||
FROM uk_price_paid
|
||||
WHERE date >= '2020-01-01'
|
||||
GROUP BY
|
||||
town,
|
||||
district
|
||||
HAVING c >= 100
|
||||
ORDER BY price DESC
|
||||
LIMIT 100
|
||||
```
|
||||
|
||||
结果如下所示:
|
||||
|
||||
```response
|
||||
┌─town─────────────────┬─district───────────────┬─────c─┬───price─┬─bar(round(avg(price)), 0, 5000000, 100)─────────────────────────┐
|
||||
│ LONDON │ CITY OF LONDON │ 578 │ 3149590 │ ██████████████████████████████████████████████████████████████▊ │
|
||||
│ LONDON │ CITY OF WESTMINSTER │ 7083 │ 2903794 │ ██████████████████████████████████████████████████████████ │
|
||||
│ LONDON │ KENSINGTON AND CHELSEA │ 4986 │ 2333782 │ ██████████████████████████████████████████████▋ │
|
||||
│ LEATHERHEAD │ ELMBRIDGE │ 203 │ 2071595 │ █████████████████████████████████████████▍ │
|
||||
│ VIRGINIA WATER │ RUNNYMEDE │ 308 │ 1939465 │ ██████████████████████████████████████▋ │
|
||||
│ LONDON │ CAMDEN │ 5750 │ 1673687 │ █████████████████████████████████▍ │
|
||||
│ WINDLESHAM │ SURREY HEATH │ 182 │ 1428358 │ ████████████████████████████▌ │
|
||||
│ NORTHWOOD │ THREE RIVERS │ 112 │ 1404170 │ ████████████████████████████ │
|
||||
│ BARNET │ ENFIELD │ 259 │ 1338299 │ ██████████████████████████▋ │
|
||||
│ LONDON │ ISLINGTON │ 5504 │ 1275520 │ █████████████████████████▌ │
|
||||
│ LONDON │ RICHMOND UPON THAMES │ 1345 │ 1261935 │ █████████████████████████▏ │
|
||||
│ COBHAM │ ELMBRIDGE │ 727 │ 1251403 │ █████████████████████████ │
|
||||
│ BEACONSFIELD │ BUCKINGHAMSHIRE │ 680 │ 1199970 │ ███████████████████████▊ │
|
||||
│ LONDON │ TOWER HAMLETS │ 10012 │ 1157827 │ ███████████████████████▏ │
|
||||
│ LONDON │ HOUNSLOW │ 1278 │ 1144389 │ ██████████████████████▊ │
|
||||
│ BURFORD │ WEST OXFORDSHIRE │ 182 │ 1139393 │ ██████████████████████▋ │
|
||||
│ RICHMOND │ RICHMOND UPON THAMES │ 1649 │ 1130076 │ ██████████████████████▌ │
|
||||
│ KINGSTON UPON THAMES │ RICHMOND UPON THAMES │ 147 │ 1126111 │ ██████████████████████▌ │
|
||||
│ ASCOT │ WINDSOR AND MAIDENHEAD │ 773 │ 1106109 │ ██████████████████████ │
|
||||
│ LONDON │ HAMMERSMITH AND FULHAM │ 6162 │ 1056198 │ █████████████████████ │
|
||||
│ RADLETT │ HERTSMERE │ 513 │ 1045758 │ ████████████████████▊ │
|
||||
│ LEATHERHEAD │ GUILDFORD │ 354 │ 1045175 │ ████████████████████▊ │
|
||||
│ WEYBRIDGE │ ELMBRIDGE │ 1275 │ 1036702 │ ████████████████████▋ │
|
||||
│ FARNHAM │ EAST HAMPSHIRE │ 107 │ 1033682 │ ████████████████████▋ │
|
||||
│ ESHER │ ELMBRIDGE │ 915 │ 1032753 │ ████████████████████▋ │
|
||||
│ FARNHAM │ HART │ 102 │ 1002692 │ ████████████████████ │
|
||||
│ GERRARDS CROSS │ BUCKINGHAMSHIRE │ 845 │ 983639 │ ███████████████████▋ │
|
||||
│ CHALFONT ST GILES │ BUCKINGHAMSHIRE │ 286 │ 973993 │ ███████████████████▍ │
|
||||
│ SALCOMBE │ SOUTH HAMS │ 215 │ 965724 │ ███████████████████▎ │
|
||||
│ SURBITON │ ELMBRIDGE │ 181 │ 960346 │ ███████████████████▏ │
|
||||
│ BROCKENHURST │ NEW FOREST │ 226 │ 951278 │ ███████████████████ │
|
||||
│ SUTTON COLDFIELD │ LICHFIELD │ 110 │ 930757 │ ██████████████████▌ │
|
||||
│ EAST MOLESEY │ ELMBRIDGE │ 372 │ 927026 │ ██████████████████▌ │
|
||||
│ LLANGOLLEN │ WREXHAM │ 127 │ 925681 │ ██████████████████▌ │
|
||||
│ OXFORD │ SOUTH OXFORDSHIRE │ 638 │ 923830 │ ██████████████████▍ │
|
||||
│ LONDON │ MERTON │ 4383 │ 923194 │ ██████████████████▍ │
|
||||
│ GUILDFORD │ WAVERLEY │ 261 │ 905733 │ ██████████████████ │
|
||||
│ TEDDINGTON │ RICHMOND UPON THAMES │ 1147 │ 894856 │ █████████████████▊ │
|
||||
│ HARPENDEN │ ST ALBANS │ 1271 │ 893079 │ █████████████████▋ │
|
||||
│ HENLEY-ON-THAMES │ SOUTH OXFORDSHIRE │ 1042 │ 887557 │ █████████████████▋ │
|
||||
│ POTTERS BAR │ WELWYN HATFIELD │ 314 │ 863037 │ █████████████████▎ │
|
||||
│ LONDON │ WANDSWORTH │ 13210 │ 857318 │ █████████████████▏ │
|
||||
│ BILLINGSHURST │ CHICHESTER │ 255 │ 856508 │ █████████████████▏ │
|
||||
│ LONDON │ SOUTHWARK │ 7742 │ 843145 │ ████████████████▋ │
|
||||
│ LONDON │ HACKNEY │ 6656 │ 839716 │ ████████████████▋ │
|
||||
│ LUTTERWORTH │ HARBOROUGH │ 1096 │ 836546 │ ████████████████▋ │
|
||||
│ KINGSTON UPON THAMES │ KINGSTON UPON THAMES │ 1846 │ 828990 │ ████████████████▌ │
|
||||
│ LONDON │ EALING │ 5583 │ 820135 │ ████████████████▍ │
|
||||
│ INGATESTONE │ CHELMSFORD │ 120 │ 815379 │ ████████████████▎ │
|
||||
│ MARLOW │ BUCKINGHAMSHIRE │ 718 │ 809943 │ ████████████████▏ │
|
||||
│ EAST GRINSTEAD │ TANDRIDGE │ 105 │ 809461 │ ████████████████▏ │
|
||||
│ CHIGWELL │ EPPING FOREST │ 484 │ 809338 │ ████████████████▏ │
|
||||
│ EGHAM │ RUNNYMEDE │ 989 │ 807858 │ ████████████████▏ │
|
||||
│ HASLEMERE │ CHICHESTER │ 223 │ 804173 │ ████████████████ │
|
||||
│ PETWORTH │ CHICHESTER │ 288 │ 803206 │ ████████████████ │
|
||||
│ TWICKENHAM │ RICHMOND UPON THAMES │ 2194 │ 802616 │ ████████████████ │
|
||||
│ WEMBLEY │ BRENT │ 1698 │ 801733 │ ████████████████ │
|
||||
│ HINDHEAD │ WAVERLEY │ 233 │ 801482 │ ████████████████ │
|
||||
│ LONDON │ BARNET │ 8083 │ 792066 │ ███████████████▋ │
|
||||
│ WOKING │ GUILDFORD │ 343 │ 789360 │ ███████████████▋ │
|
||||
│ STOCKBRIDGE │ TEST VALLEY │ 318 │ 777909 │ ███████████████▌ │
|
||||
│ BERKHAMSTED │ DACORUM │ 1049 │ 776138 │ ███████████████▌ │
|
||||
│ MAIDENHEAD │ BUCKINGHAMSHIRE │ 236 │ 775572 │ ███████████████▌ │
|
||||
│ SOLIHULL │ STRATFORD-ON-AVON │ 142 │ 770727 │ ███████████████▍ │
|
||||
│ GREAT MISSENDEN │ BUCKINGHAMSHIRE │ 431 │ 764493 │ ███████████████▎ │
|
||||
│ TADWORTH │ REIGATE AND BANSTEAD │ 920 │ 757511 │ ███████████████▏ │
|
||||
│ LONDON │ BRENT │ 4124 │ 757194 │ ███████████████▏ │
|
||||
│ THAMES DITTON │ ELMBRIDGE │ 470 │ 750828 │ ███████████████ │
|
||||
│ LONDON │ LAMBETH │ 10431 │ 750532 │ ███████████████ │
|
||||
│ RICKMANSWORTH │ THREE RIVERS │ 1500 │ 747029 │ ██████████████▊ │
|
||||
│ KINGS LANGLEY │ DACORUM │ 281 │ 746536 │ ██████████████▊ │
|
||||
│ HARLOW │ EPPING FOREST │ 172 │ 739423 │ ██████████████▋ │
|
||||
│ TONBRIDGE │ SEVENOAKS │ 103 │ 738740 │ ██████████████▋ │
|
||||
│ BELVEDERE │ BEXLEY │ 686 │ 736385 │ ██████████████▋ │
|
||||
│ CRANBROOK │ TUNBRIDGE WELLS │ 769 │ 734328 │ ██████████████▋ │
|
||||
│ SOLIHULL │ WARWICK │ 116 │ 733286 │ ██████████████▋ │
|
||||
│ ALDERLEY EDGE │ CHESHIRE EAST │ 357 │ 732882 │ ██████████████▋ │
|
||||
│ WELWYN │ WELWYN HATFIELD │ 404 │ 730281 │ ██████████████▌ │
|
||||
│ CHISLEHURST │ BROMLEY │ 870 │ 730279 │ ██████████████▌ │
|
||||
│ LONDON │ HARINGEY │ 6488 │ 726715 │ ██████████████▌ │
|
||||
│ AMERSHAM │ BUCKINGHAMSHIRE │ 965 │ 725426 │ ██████████████▌ │
|
||||
│ SEVENOAKS │ SEVENOAKS │ 2183 │ 725102 │ ██████████████▌ │
|
||||
│ BOURNE END │ BUCKINGHAMSHIRE │ 269 │ 724595 │ ██████████████▍ │
|
||||
│ NORTHWOOD │ HILLINGDON │ 568 │ 722436 │ ██████████████▍ │
|
||||
│ PURFLEET │ THURROCK │ 143 │ 722205 │ ██████████████▍ │
|
||||
│ SLOUGH │ BUCKINGHAMSHIRE │ 832 │ 721529 │ ██████████████▍ │
|
||||
│ INGATESTONE │ BRENTWOOD │ 301 │ 718292 │ ██████████████▎ │
|
||||
│ EPSOM │ REIGATE AND BANSTEAD │ 315 │ 709264 │ ██████████████▏ │
|
||||
│ ASHTEAD │ MOLE VALLEY │ 524 │ 708646 │ ██████████████▏ │
|
||||
│ BETCHWORTH │ MOLE VALLEY │ 155 │ 708525 │ ██████████████▏ │
|
||||
│ OXTED │ TANDRIDGE │ 645 │ 706946 │ ██████████████▏ │
|
||||
│ READING │ SOUTH OXFORDSHIRE │ 593 │ 705466 │ ██████████████ │
|
||||
│ FELTHAM │ HOUNSLOW │ 1536 │ 703815 │ ██████████████ │
|
||||
│ TUNBRIDGE WELLS │ WEALDEN │ 207 │ 703296 │ ██████████████ │
|
||||
│ LEWES │ WEALDEN │ 116 │ 701349 │ ██████████████ │
|
||||
│ OXFORD │ OXFORD │ 3656 │ 700813 │ ██████████████ │
|
||||
│ MAYFIELD │ WEALDEN │ 177 │ 698158 │ █████████████▊ │
|
||||
│ PINNER │ HARROW │ 997 │ 697876 │ █████████████▊ │
|
||||
│ LECHLADE │ COTSWOLD │ 155 │ 696262 │ █████████████▊ │
|
||||
│ WALTON-ON-THAMES │ ELMBRIDGE │ 1850 │ 690102 │ █████████████▋ │
|
||||
└──────────────────────┴────────────────────────┴───────┴─────────┴─────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## 使用 Projection 加速查询 {#speedup-with-projections}
|
||||
|
||||
[Projections](../../sql-reference/statements/alter/projection.md) 允许我们通过存储任意格式的预先聚合的数据来提高查询速度。在此示例中,我们创建了一个按年份、地区和城镇分组的房产的平均价格、总价格和数量的 Projection。在执行时,如果 ClickHouse 认为 Projection 可以提高查询的性能,它将使用 Projection(何时使用由 ClickHouse 决定)。
|
||||
|
||||
### 构建投影{#build-projection}
|
||||
|
||||
让我们通过维度 `toYear(date)`、`district` 和 `town` 创建一个聚合 Projection:
|
||||
|
||||
```sql
|
||||
ALTER TABLE uk_price_paid
|
||||
ADD PROJECTION projection_by_year_district_town
|
||||
(
|
||||
SELECT
|
||||
toYear(date),
|
||||
district,
|
||||
town,
|
||||
avg(price),
|
||||
sum(price),
|
||||
count()
|
||||
GROUP BY
|
||||
toYear(date),
|
||||
district,
|
||||
town
|
||||
)
|
||||
```
|
||||
|
||||
填充现有数据的 Projection。 (如果不进行 materialize 操作,则 ClickHouse 只会为新插入的数据创建 Projection):
|
||||
|
||||
```sql
|
||||
ALTER TABLE uk_price_paid
|
||||
MATERIALIZE PROJECTION projection_by_year_district_town
|
||||
SETTINGS mutations_sync = 1
|
||||
```
|
||||
|
||||
## Test Performance {#test-performance}
|
||||
|
||||
让我们再次运行相同的 3 个查询:
|
||||
|
||||
### 查询 1. 每年平均价格 {#average-price-projections}
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toYear(date) AS year,
|
||||
round(avg(price)) AS price,
|
||||
bar(price, 0, 1000000, 80)
|
||||
FROM uk_price_paid
|
||||
GROUP BY year
|
||||
ORDER BY year ASC
|
||||
```
|
||||
|
||||
结果是一样的,但是性能更好!
|
||||
```response
|
||||
No projection: 28 rows in set. Elapsed: 1.775 sec. Processed 27.45 million rows, 164.70 MB (15.47 million rows/s., 92.79 MB/s.)
|
||||
With projection: 28 rows in set. Elapsed: 0.665 sec. Processed 87.51 thousand rows, 3.21 MB (131.51 thousand rows/s., 4.82 MB/s.)
|
||||
```
|
||||
|
||||
|
||||
### 查询 2. 伦敦每年的平均价格 {#average-price-london-projections}
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toYear(date) AS year,
|
||||
round(avg(price)) AS price,
|
||||
bar(price, 0, 2000000, 100)
|
||||
FROM uk_price_paid
|
||||
WHERE town = 'LONDON'
|
||||
GROUP BY year
|
||||
ORDER BY year ASC
|
||||
```
|
||||
|
||||
Same result, but notice the improvement in query performance:
|
||||
|
||||
```response
|
||||
No projection: 28 rows in set. Elapsed: 0.720 sec. Processed 27.45 million rows, 46.61 MB (38.13 million rows/s., 64.74 MB/s.)
|
||||
With projection: 28 rows in set. Elapsed: 0.015 sec. Processed 87.51 thousand rows, 3.51 MB (5.74 million rows/s., 230.24 MB/s.)
|
||||
```
|
||||
|
||||
### 查询 3. 最昂贵的社区 {#most-expensive-neighborhoods-projections}
|
||||
|
||||
注意:需要修改 (date >= '2020-01-01') 以使其与 Projection 定义的维度 (`toYear(date) >= 2020)` 匹配:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
town,
|
||||
district,
|
||||
count() AS c,
|
||||
round(avg(price)) AS price,
|
||||
bar(price, 0, 5000000, 100)
|
||||
FROM uk_price_paid
|
||||
WHERE toYear(date) >= 2020
|
||||
GROUP BY
|
||||
town,
|
||||
district
|
||||
HAVING c >= 100
|
||||
ORDER BY price DESC
|
||||
LIMIT 100
|
||||
```
|
||||
|
||||
同样,结果是相同的,但请注意查询性能的改进:
|
||||
|
||||
```response
|
||||
No projection: 100 rows in set. Elapsed: 0.928 sec. Processed 27.45 million rows, 103.80 MB (29.56 million rows/s., 111.80 MB/s.)
|
||||
With projection: 100 rows in set. Elapsed: 0.336 sec. Processed 17.32 thousand rows, 1.23 MB (51.61 thousand rows/s., 3.65 MB/s.)
|
||||
```
|
||||
|
||||
### 在 Playground 上测试{#playground}
|
||||
|
||||
也可以在 [Online Playground](https://play.clickhouse.com/play?user=play#U0VMRUNUIHRvd24sIGRpc3RyaWN0LCBjb3VudCgpIEFTIGMsIHJvdW5kKGF2ZyhwcmljZSkpIEFTIHByaWNlLCBiYXIocHJpY2UsIDAsIDUwMDAwMDAsIDEwMCkgRlJPTSB1a19wcmljZV9wYWlkIFdIRVJFIGRhdGUgPj0gJzIwMjAtMDEtMDEnIEdST1VQIEJZIHRvd24sIGRpc3RyaWN0IEhBVklORyBjID49IDEwMCBPUkRFUiBCWSBwcmljZSBERVNDIExJTUlUIDEwMA==) 上找到此数据集。
|
||||
|
@ -35,6 +35,9 @@ Yandex**没有**维护下面列出的库,也没有做过任何广泛的测试
|
||||
- NodeJs
|
||||
- [clickhouse (NodeJs)](https://github.com/TimonKK/clickhouse)
|
||||
- [node-clickhouse](https://github.com/apla/node-clickhouse)
|
||||
- [nestjs-clickhouse](https://github.com/depyronick/nestjs-clickhouse)
|
||||
- [clickhouse-client](https://github.com/depyronick/clickhouse-client)
|
||||
- [node-clickhouse-orm](https://github.com/zimv/node-clickhouse-orm)
|
||||
- Perl
|
||||
- [perl-DBD-ClickHouse](https://github.com/elcamlost/perl-DBD-ClickHouse)
|
||||
- [HTTP-ClickHouse](https://metacpan.org/release/HTTP-ClickHouse)
|
||||
|
@ -164,23 +164,6 @@ SELECT * FROM [db.]live_view WHERE ...
|
||||
|
||||
您可以使用`ALTER LIVE VIEW [db.]table_name REFRESH`语法.
|
||||
|
||||
### WITH TIMEOUT条件 {#live-view-with-timeout}
|
||||
|
||||
当使用`WITH TIMEOUT`子句创建实时视图时,[WATCH](../../../sql-reference/statements/watch.md)观察实时视图的查询。
|
||||
|
||||
```sql
|
||||
CREATE LIVE VIEW [db.]table_name WITH TIMEOUT [value_in_sec] AS SELECT ...
|
||||
```
|
||||
|
||||
如果未指定超时值,则由指定的值[temporary_live_view_timeout](../../../operations/settings/settings.md#temporary-live-view-timeout)决定.
|
||||
|
||||
**示例:**
|
||||
|
||||
```sql
|
||||
CREATE TABLE mt (x Int8) Engine = MergeTree ORDER BY x;
|
||||
CREATE LIVE VIEW lv WITH TIMEOUT 15 AS SELECT sum(x) FROM mt;
|
||||
```
|
||||
|
||||
### WITH REFRESH条件 {#live-view-with-refresh}
|
||||
|
||||
当使用`WITH REFRESH`子句创建实时视图时,它将在自上次刷新或触发后经过指定的秒数后自动刷新。
|
||||
@ -210,20 +193,6 @@ WATCH lv
|
||||
└─────────────────────┴──────────┘
|
||||
```
|
||||
|
||||
您可以使用`AND`子句组合`WITH TIMEOUT`和`WITH REFRESH`子句。
|
||||
|
||||
```sql
|
||||
CREATE LIVE VIEW [db.]table_name WITH TIMEOUT [value_in_sec] AND REFRESH [value_in_sec] AS SELECT ...
|
||||
```
|
||||
|
||||
**示例:**
|
||||
|
||||
```sql
|
||||
CREATE LIVE VIEW lv WITH TIMEOUT 15 AND REFRESH 5 AS SELECT now();
|
||||
```
|
||||
|
||||
15 秒后,如果没有活动的`WATCH`查询,实时视图将自动删除。
|
||||
|
||||
```sql
|
||||
WATCH lv
|
||||
```
|
||||
|
@ -120,7 +120,11 @@ use_cron()
|
||||
if [ -x "/bin/systemctl" ] && [ -f /etc/systemd/system/clickhouse-server.service ] && [ -d /run/systemd/system ]; then
|
||||
return 1
|
||||
fi
|
||||
# 2. disabled by config
|
||||
# 2. checking whether the config is existed
|
||||
if [ ! -f "$CLICKHOUSE_CRONFILE" ]; then
|
||||
return 1
|
||||
fi
|
||||
# 3. disabled by config
|
||||
if [ -z "$CLICKHOUSE_CRONFILE" ]; then
|
||||
return 2
|
||||
fi
|
||||
|
@ -189,7 +189,7 @@ else()
|
||||
message(STATUS "ClickHouse su: OFF")
|
||||
endif()
|
||||
|
||||
configure_file (config_tools.h.in ${ConfigIncludePath}/config_tools.h)
|
||||
configure_file (config_tools.h.in ${CONFIG_INCLUDE_PATH}/config_tools.h)
|
||||
|
||||
macro(clickhouse_target_link_split_lib target name)
|
||||
if(NOT CLICKHOUSE_ONE_SHARED)
|
||||
|
@ -12,10 +12,11 @@
|
||||
#include <string>
|
||||
#include "Client.h"
|
||||
#include "Core/Protocol.h"
|
||||
#include "Parsers/formatAST.h"
|
||||
|
||||
#include <base/find_symbols.h>
|
||||
|
||||
#include <Common/config_version.h>
|
||||
#include "config_version.h"
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/formatReadable.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
@ -514,6 +515,66 @@ static bool queryHasWithClause(const IAST & ast)
|
||||
return false;
|
||||
}
|
||||
|
||||
std::optional<bool> Client::processFuzzingStep(const String & query_to_execute, const ASTPtr & parsed_query)
|
||||
{
|
||||
processParsedSingleQuery(query_to_execute, query_to_execute, parsed_query);
|
||||
|
||||
const auto * exception = server_exception ? server_exception.get() : client_exception.get();
|
||||
// Sometimes you may get TOO_DEEP_RECURSION from the server,
|
||||
// and TOO_DEEP_RECURSION should not fail the fuzzer check.
|
||||
if (have_error && exception->code() == ErrorCodes::TOO_DEEP_RECURSION)
|
||||
{
|
||||
have_error = false;
|
||||
server_exception.reset();
|
||||
client_exception.reset();
|
||||
return true;
|
||||
}
|
||||
|
||||
if (have_error)
|
||||
{
|
||||
fmt::print(stderr, "Error on processing query '{}': {}\n", parsed_query->formatForErrorMessage(), exception->message());
|
||||
|
||||
// Try to reconnect after errors, for two reasons:
|
||||
// 1. We might not have realized that the server died, e.g. if
|
||||
// it sent us a <Fatal> trace and closed connection properly.
|
||||
// 2. The connection might have gotten into a wrong state and
|
||||
// the next query will get false positive about
|
||||
// "Unknown packet from server".
|
||||
try
|
||||
{
|
||||
connection->forceConnected(connection_parameters.timeouts);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
// Just report it, we'll terminate below.
|
||||
fmt::print(stderr,
|
||||
"Error while reconnecting to the server: {}\n",
|
||||
getCurrentExceptionMessage(true));
|
||||
|
||||
// The reconnection might fail, but we'll still be connected
|
||||
// in the sense of `connection->isConnected() = true`,
|
||||
// in case when the requested database doesn't exist.
|
||||
// Disconnect manually now, so that the following code doesn't
|
||||
// have any doubts, and the connection state is predictable.
|
||||
connection->disconnect();
|
||||
}
|
||||
}
|
||||
|
||||
if (!connection->isConnected())
|
||||
{
|
||||
// Probably the server is dead because we found an assertion
|
||||
// failure. Fail fast.
|
||||
fmt::print(stderr, "Lost connection to the server.\n");
|
||||
|
||||
// Print the changed settings because they might be needed to
|
||||
// reproduce the error.
|
||||
printChangedSettings();
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/// Returns false when server is not available.
|
||||
bool Client::processWithFuzzing(const String & full_query)
|
||||
@ -558,18 +619,33 @@ bool Client::processWithFuzzing(const String & full_query)
|
||||
// - SET -- The time to fuzz the settings has not yet come
|
||||
// (see comments in Client/QueryFuzzer.cpp)
|
||||
size_t this_query_runs = query_fuzzer_runs;
|
||||
if (orig_ast->as<ASTInsertQuery>() ||
|
||||
orig_ast->as<ASTCreateQuery>() ||
|
||||
orig_ast->as<ASTDropQuery>() ||
|
||||
orig_ast->as<ASTSetQuery>())
|
||||
ASTs queries_for_fuzzed_tables;
|
||||
|
||||
if (orig_ast->as<ASTSetQuery>())
|
||||
{
|
||||
this_query_runs = 1;
|
||||
}
|
||||
else if (const auto * create = orig_ast->as<ASTCreateQuery>())
|
||||
{
|
||||
if (QueryFuzzer::isSuitableForFuzzing(*create))
|
||||
this_query_runs = create_query_fuzzer_runs;
|
||||
else
|
||||
this_query_runs = 1;
|
||||
}
|
||||
else if (const auto * insert = orig_ast->as<ASTInsertQuery>())
|
||||
{
|
||||
this_query_runs = 1;
|
||||
queries_for_fuzzed_tables = fuzzer.getInsertQueriesForFuzzedTables(full_query);
|
||||
}
|
||||
else if (const auto * drop = orig_ast->as<ASTDropQuery>())
|
||||
{
|
||||
this_query_runs = 1;
|
||||
queries_for_fuzzed_tables = fuzzer.getDropQueriesForFuzzedTables(*drop);
|
||||
}
|
||||
|
||||
String query_to_execute;
|
||||
ASTPtr parsed_query;
|
||||
|
||||
ASTPtr fuzz_base = orig_ast;
|
||||
|
||||
for (size_t fuzz_step = 0; fuzz_step < this_query_runs; ++fuzz_step)
|
||||
{
|
||||
fmt::print(stderr, "Fuzzing step {} out of {}\n", fuzz_step, this_query_runs);
|
||||
@ -630,9 +706,9 @@ bool Client::processWithFuzzing(const String & full_query)
|
||||
continue;
|
||||
}
|
||||
|
||||
parsed_query = ast_to_process;
|
||||
query_to_execute = parsed_query->formatForErrorMessage();
|
||||
processParsedSingleQuery(full_query, query_to_execute, parsed_query);
|
||||
query_to_execute = ast_to_process->formatForErrorMessage();
|
||||
if (auto res = processFuzzingStep(query_to_execute, ast_to_process))
|
||||
return *res;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -645,60 +721,6 @@ bool Client::processWithFuzzing(const String & full_query)
|
||||
have_error = true;
|
||||
}
|
||||
|
||||
const auto * exception = server_exception ? server_exception.get() : client_exception.get();
|
||||
// Sometimes you may get TOO_DEEP_RECURSION from the server,
|
||||
// and TOO_DEEP_RECURSION should not fail the fuzzer check.
|
||||
if (have_error && exception->code() == ErrorCodes::TOO_DEEP_RECURSION)
|
||||
{
|
||||
have_error = false;
|
||||
server_exception.reset();
|
||||
client_exception.reset();
|
||||
return true;
|
||||
}
|
||||
|
||||
if (have_error)
|
||||
{
|
||||
fmt::print(stderr, "Error on processing query '{}': {}\n", ast_to_process->formatForErrorMessage(), exception->message());
|
||||
|
||||
// Try to reconnect after errors, for two reasons:
|
||||
// 1. We might not have realized that the server died, e.g. if
|
||||
// it sent us a <Fatal> trace and closed connection properly.
|
||||
// 2. The connection might have gotten into a wrong state and
|
||||
// the next query will get false positive about
|
||||
// "Unknown packet from server".
|
||||
try
|
||||
{
|
||||
connection->forceConnected(connection_parameters.timeouts);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
// Just report it, we'll terminate below.
|
||||
fmt::print(stderr,
|
||||
"Error while reconnecting to the server: {}\n",
|
||||
getCurrentExceptionMessage(true));
|
||||
|
||||
// The reconnection might fail, but we'll still be connected
|
||||
// in the sense of `connection->isConnected() = true`,
|
||||
// in case when the requested database doesn't exist.
|
||||
// Disconnect manually now, so that the following code doesn't
|
||||
// have any doubts, and the connection state is predictable.
|
||||
connection->disconnect();
|
||||
}
|
||||
}
|
||||
|
||||
if (!connection->isConnected())
|
||||
{
|
||||
// Probably the server is dead because we found an assertion
|
||||
// failure. Fail fast.
|
||||
fmt::print(stderr, "Lost connection to the server.\n");
|
||||
|
||||
// Print the changed settings because they might be needed to
|
||||
// reproduce the error.
|
||||
printChangedSettings();
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check that after the query is formatted, we can parse it back,
|
||||
// format again and get the same result. Unfortunately, we can't
|
||||
// compare the ASTs, which would be more sensitive to errors. This
|
||||
@ -729,13 +751,12 @@ bool Client::processWithFuzzing(const String & full_query)
|
||||
// query, but second and third.
|
||||
// If you have to add any more workarounds to this check, just remove
|
||||
// it altogether, it's not so useful.
|
||||
if (parsed_query && !have_error && !queryHasWithClause(*parsed_query))
|
||||
if (ast_to_process && !have_error && !queryHasWithClause(*ast_to_process))
|
||||
{
|
||||
ASTPtr ast_2;
|
||||
try
|
||||
{
|
||||
const auto * tmp_pos = query_to_execute.c_str();
|
||||
|
||||
ast_2 = parseQuery(tmp_pos, tmp_pos + query_to_execute.size(), false /* allow_multi_statements */);
|
||||
}
|
||||
catch (Exception & e)
|
||||
@ -762,7 +783,7 @@ bool Client::processWithFuzzing(const String & full_query)
|
||||
"Got the following (different) text after formatting the fuzzed query and parsing it back:\n'{}'\n, expected:\n'{}'\n",
|
||||
text_3, text_2);
|
||||
fmt::print(stderr, "In more detail:\n");
|
||||
fmt::print(stderr, "AST-1 (generated by fuzzer):\n'{}'\n", parsed_query->dumpTree());
|
||||
fmt::print(stderr, "AST-1 (generated by fuzzer):\n'{}'\n", ast_to_process->dumpTree());
|
||||
fmt::print(stderr, "Text-1 (AST-1 formatted):\n'{}'\n", query_to_execute);
|
||||
fmt::print(stderr, "AST-2 (Text-1 parsed):\n'{}'\n", ast_2->dumpTree());
|
||||
fmt::print(stderr, "Text-2 (AST-2 formatted):\n'{}'\n", text_2);
|
||||
@ -784,6 +805,7 @@ bool Client::processWithFuzzing(const String & full_query)
|
||||
// so that it doesn't influence the exit code.
|
||||
server_exception.reset();
|
||||
client_exception.reset();
|
||||
fuzzer.notifyQueryFailed(ast_to_process);
|
||||
have_error = false;
|
||||
}
|
||||
else if (ast_to_process->formatForErrorMessage().size() > 500)
|
||||
@ -800,6 +822,35 @@ bool Client::processWithFuzzing(const String & full_query)
|
||||
}
|
||||
}
|
||||
|
||||
for (const auto & query : queries_for_fuzzed_tables)
|
||||
{
|
||||
std::cout << std::endl;
|
||||
WriteBufferFromOStream ast_buf(std::cout, 4096);
|
||||
formatAST(*query, ast_buf, false /*highlight*/);
|
||||
ast_buf.next();
|
||||
std::cout << std::endl << std::endl;
|
||||
|
||||
try
|
||||
{
|
||||
query_to_execute = query->formatForErrorMessage();
|
||||
if (auto res = processFuzzingStep(query_to_execute, query))
|
||||
return *res;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
client_exception = std::make_unique<Exception>(getCurrentExceptionMessage(print_stack_trace), getCurrentExceptionCode());
|
||||
have_error = true;
|
||||
}
|
||||
|
||||
if (have_error)
|
||||
{
|
||||
server_exception.reset();
|
||||
client_exception.reset();
|
||||
fuzzer.notifyQueryFailed(query);
|
||||
have_error = false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -834,6 +885,7 @@ void Client::addOptions(OptionsDescription & options_description)
|
||||
("compression", po::value<bool>(), "enable or disable compression (enabled by default for remote communication and disabled for localhost communication).")
|
||||
|
||||
("query-fuzzer-runs", po::value<int>()->default_value(0), "After executing every SELECT query, do random mutations in it and run again specified number of times. This is used for testing to discover unexpected corner cases.")
|
||||
("create-query-fuzzer-runs", po::value<int>()->default_value(0), "")
|
||||
("interleave-queries-file", po::value<std::vector<std::string>>()->multitoken(),
|
||||
"file path with queries to execute before every file from 'queries-file'; multiple files can be specified (--queries-file file1 file2...); this is needed to enable more aggressive fuzzing of newly added tests (see 'query-fuzzer-runs' option)")
|
||||
|
||||
@ -994,6 +1046,17 @@ void Client::processOptions(const OptionsDescription & options_description,
|
||||
ignore_error = true;
|
||||
}
|
||||
|
||||
if ((create_query_fuzzer_runs = options["create-query-fuzzer-runs"].as<int>()))
|
||||
{
|
||||
// Fuzzer implies multiquery.
|
||||
config().setBool("multiquery", true);
|
||||
// Ignore errors in parsing queries.
|
||||
config().setBool("ignore-error", true);
|
||||
|
||||
global_context->setSetting("allow_suspicious_low_cardinality_types", true);
|
||||
ignore_error = true;
|
||||
}
|
||||
|
||||
if (options.count("opentelemetry-traceparent"))
|
||||
{
|
||||
String traceparent = options["opentelemetry-traceparent"].as<std::string>();
|
||||
|
@ -17,6 +17,7 @@ public:
|
||||
|
||||
protected:
|
||||
bool processWithFuzzing(const String & full_query) override;
|
||||
std::optional<bool> processFuzzingStep(const String & query_to_execute, const ASTPtr & parsed_query);
|
||||
|
||||
void connect() override;
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
/// This file was autogenerated by CMake
|
||||
|
||||
// .h autogenerated by cmake !
|
||||
#pragma once
|
||||
|
||||
#cmakedefine01 ENABLE_CLICKHOUSE_SERVER
|
||||
#cmakedefine01 ENABLE_CLICKHOUSE_CLIENT
|
||||
|
@ -58,7 +58,7 @@ void DisksApp::addOptions(
|
||||
("disk", po::value<String>(), "Set disk name")
|
||||
("command_name", po::value<String>(), "Name for command to do")
|
||||
("send-logs", "Send logs")
|
||||
("log-level", "Logging level")
|
||||
("log-level", po::value<String>(), "Logging level")
|
||||
;
|
||||
|
||||
positional_options_description.add("command_name", 1);
|
||||
|
@ -24,8 +24,8 @@
|
||||
#include <pwd.h>
|
||||
#include <Coordination/FourLetterCommand.h>
|
||||
|
||||
#include "config_core.h"
|
||||
#include "Common/config_version.h"
|
||||
#include "config.h"
|
||||
#include "config_version.h"
|
||||
|
||||
#if USE_SSL
|
||||
# include <Poco/Net/Context.h>
|
||||
|
@ -13,7 +13,6 @@
|
||||
#include <Interpreters/DatabaseCatalog.h>
|
||||
#include <base/getFQDNOrHostName.h>
|
||||
#include <Common/scope_guard_safe.h>
|
||||
#include <Interpreters/UserDefinedSQLObjectsLoader.h>
|
||||
#include <Interpreters/Session.h>
|
||||
#include <Access/AccessControl.h>
|
||||
#include <Common/Exception.h>
|
||||
@ -32,6 +31,7 @@
|
||||
#include <Parsers/IAST.h>
|
||||
#include <Parsers/ASTInsertQuery.h>
|
||||
#include <Common/ErrorHandlers.h>
|
||||
#include <Functions/UserDefined/IUserDefinedSQLObjectsLoader.h>
|
||||
#include <Functions/registerFunctions.h>
|
||||
#include <AggregateFunctions/registerAggregateFunctions.h>
|
||||
#include <TableFunctions/registerTableFunctions.h>
|
||||
@ -602,8 +602,6 @@ void LocalServer::processConfig()
|
||||
global_context->setCurrentDatabase(default_database);
|
||||
applyCmdOptions(global_context);
|
||||
|
||||
bool enable_objects_loader = false;
|
||||
|
||||
if (config().has("path"))
|
||||
{
|
||||
String path = global_context->getPath();
|
||||
@ -611,12 +609,6 @@ void LocalServer::processConfig()
|
||||
/// Lock path directory before read
|
||||
status.emplace(fs::path(path) / "status", StatusFile::write_full_info);
|
||||
|
||||
LOG_DEBUG(log, "Loading user defined objects from {}", path);
|
||||
Poco::File(path + "user_defined/").createDirectories();
|
||||
UserDefinedSQLObjectsLoader::instance().loadObjects(global_context);
|
||||
enable_objects_loader = true;
|
||||
LOG_DEBUG(log, "Loaded user defined objects.");
|
||||
|
||||
LOG_DEBUG(log, "Loading metadata from {}", path);
|
||||
loadMetadataSystem(global_context);
|
||||
attachSystemTablesLocal(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::SYSTEM_DATABASE));
|
||||
@ -630,6 +622,9 @@ void LocalServer::processConfig()
|
||||
DatabaseCatalog::instance().loadDatabases();
|
||||
}
|
||||
|
||||
/// For ClickHouse local if path is not set the loader will be disabled.
|
||||
global_context->getUserDefinedSQLObjectsLoader().loadObjects();
|
||||
|
||||
LOG_DEBUG(log, "Loaded metadata.");
|
||||
}
|
||||
else if (!config().has("no-system-tables"))
|
||||
@ -639,9 +634,6 @@ void LocalServer::processConfig()
|
||||
attachInformationSchema(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::INFORMATION_SCHEMA_UPPERCASE));
|
||||
}
|
||||
|
||||
/// Persist SQL user defined objects only if user_defined folder was created
|
||||
UserDefinedSQLObjectsLoader::instance().enable(enable_objects_loader);
|
||||
|
||||
server_display_name = config().getString("display_name", getFQDNOrHostName());
|
||||
prompt_by_server_display_name = config().getRawString("prompt_by_server_display_name.default", "{display_name} :) ");
|
||||
std::map<String, String> prompt_substitutions{{"display_name", server_display_name}};
|
||||
|
@ -219,7 +219,7 @@ auto instructionFailToString(InstructionFail fail)
|
||||
case InstructionFail::AVX512:
|
||||
ret("AVX512");
|
||||
}
|
||||
__builtin_unreachable();
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/config.h>
|
||||
#include "config.h"
|
||||
|
||||
#if USE_ODBC
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Server/HTTP/HTTPRequestHandler.h>
|
||||
#include <Common/config.h>
|
||||
#include "config.h"
|
||||
#include <Poco/Logger.h>
|
||||
|
||||
#if USE_ODBC
|
||||
|
@ -20,7 +20,7 @@
|
||||
#include <Common/BridgeProtocolVersion.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Server/HTTP/HTMLForm.h>
|
||||
#include <Common/config.h>
|
||||
#include "config.h"
|
||||
|
||||
#include <mutex>
|
||||
#include <memory>
|
||||
|
@ -1,7 +1,7 @@
|
||||
#include "ODBCHandlerFactory.h"
|
||||
#include "PingHandler.h"
|
||||
#include "ColumnInfoHandler.h"
|
||||
#include <Common/config.h>
|
||||
#include "config.h"
|
||||
#include <Poco/URI.h>
|
||||
#include <Poco/Net/HTTPServerRequest.h>
|
||||
#include <Common/logger_useful.h>
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Server/HTTP/HTTPRequestHandler.h>
|
||||
#include <Common/config.h>
|
||||
#include "config.h"
|
||||
#include <Poco/Logger.h>
|
||||
|
||||
#if USE_ODBC
|
||||
|
@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/config.h>
|
||||
#include "config.h"
|
||||
|
||||
#if USE_ODBC
|
||||
|
||||
|
@ -53,7 +53,6 @@
|
||||
#include <Interpreters/ExternalDictionariesLoader.h>
|
||||
#include <Interpreters/ProcessList.h>
|
||||
#include <Interpreters/loadMetadata.h>
|
||||
#include <Interpreters/UserDefinedSQLObjectsLoader.h>
|
||||
#include <Interpreters/JIT/CompiledExpressionCache.h>
|
||||
#include <Access/AccessControl.h>
|
||||
#include <Storages/StorageReplicatedMergeTree.h>
|
||||
@ -62,6 +61,7 @@
|
||||
#include <Storages/Cache/ExternalDataSourceCache.h>
|
||||
#include <Storages/Cache/registerRemoteFileMetadatas.h>
|
||||
#include <AggregateFunctions/registerAggregateFunctions.h>
|
||||
#include <Functions/UserDefined/IUserDefinedSQLObjectsLoader.h>
|
||||
#include <Functions/registerFunctions.h>
|
||||
#include <TableFunctions/registerTableFunctions.h>
|
||||
#include <Formats/registerFormats.h>
|
||||
@ -79,17 +79,23 @@
|
||||
#include <Common/ThreadFuzzer.h>
|
||||
#include <Common/getHashOfLoadedBinary.h>
|
||||
#include <Common/filesystemHelpers.h>
|
||||
#if USE_BORINGSSL
|
||||
#include <Compression/CompressionCodecEncrypted.h>
|
||||
#endif
|
||||
#include <Server/HTTP/HTTPServerConnectionFactory.h>
|
||||
#include <Server/MySQLHandlerFactory.h>
|
||||
#include <Server/PostgreSQLHandlerFactory.h>
|
||||
#include <Server/ProxyV1HandlerFactory.h>
|
||||
#include <Server/TLSHandlerFactory.h>
|
||||
#include <Server/CertificateReloader.h>
|
||||
#include <Server/ProtocolServerAdapter.h>
|
||||
#include <Server/HTTP/HTTPServer.h>
|
||||
#include <Interpreters/AsynchronousInsertQueue.h>
|
||||
#include <filesystem>
|
||||
#include <unordered_set>
|
||||
|
||||
#include "config_core.h"
|
||||
#include "Common/config_version.h"
|
||||
#include "config.h"
|
||||
#include "config_version.h"
|
||||
|
||||
#if defined(OS_LINUX)
|
||||
# include <sys/mman.h>
|
||||
@ -385,7 +391,16 @@ bool getListenTry(const Poco::Util::AbstractConfiguration & config)
|
||||
{
|
||||
bool listen_try = config.getBool("listen_try", false);
|
||||
if (!listen_try)
|
||||
listen_try = DB::getMultipleValuesFromConfig(config, "", "listen_host").empty();
|
||||
{
|
||||
Poco::Util::AbstractConfiguration::Keys protocols;
|
||||
config.keys("protocols", protocols);
|
||||
listen_try =
|
||||
DB::getMultipleValuesFromConfig(config, "", "listen_host").empty() &&
|
||||
std::none_of(protocols.begin(), protocols.end(), [&](const auto & protocol)
|
||||
{
|
||||
return config.has("protocols." + protocol + ".host") && config.has("protocols." + protocol + ".port");
|
||||
});
|
||||
}
|
||||
return listen_try;
|
||||
}
|
||||
|
||||
@ -969,10 +984,10 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
|
||||
/// Storage with temporary data for processing of heavy queries.
|
||||
{
|
||||
std::string tmp_path = config().getString("tmp_path", path / "tmp/");
|
||||
std::string tmp_policy = config().getString("tmp_policy", "");
|
||||
size_t tmp_max_size = config().getUInt64("tmp_max_size", 0);
|
||||
const VolumePtr & volume = global_context->setTemporaryStorage(tmp_path, tmp_policy, tmp_max_size);
|
||||
std::string temporary_path = config().getString("tmp_path", path / "tmp/");
|
||||
std::string temporary_policy = config().getString("tmp_policy", "");
|
||||
size_t max_size = config().getUInt64("max_temporary_data_on_disk_size", 0);
|
||||
const VolumePtr & volume = global_context->setTemporaryStorage(temporary_path, temporary_policy, max_size);
|
||||
for (const DiskPtr & disk : volume->getDisks())
|
||||
setupTmpPath(log, disk->getPath());
|
||||
}
|
||||
@ -1008,12 +1023,6 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
fs::create_directories(user_scripts_path);
|
||||
}
|
||||
|
||||
{
|
||||
std::string user_defined_path = config().getString("user_defined_path", path / "user_defined/");
|
||||
global_context->setUserDefinedPath(user_defined_path);
|
||||
fs::create_directories(user_defined_path);
|
||||
}
|
||||
|
||||
/// top_level_domains_lists
|
||||
{
|
||||
const std::string & top_level_domains_path = config().getString("top_level_domains_path", path / "top_level_domains/");
|
||||
@ -1264,8 +1273,9 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
|
||||
global_context->updateStorageConfiguration(*config);
|
||||
global_context->updateInterserverCredentials(*config);
|
||||
|
||||
#if USE_BORINGSSL
|
||||
CompressionCodecEncrypted::Configuration::instance().tryLoad(*config, "encryption_codecs");
|
||||
#endif
|
||||
#if USE_SSL
|
||||
CertificateReloader::instance().tryLoad(*config);
|
||||
#endif
|
||||
@ -1418,8 +1428,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
global_context->setAsynchronousInsertQueue(std::make_shared<AsynchronousInsertQueue>(
|
||||
global_context,
|
||||
settings.async_insert_threads,
|
||||
settings.async_insert_max_data_size,
|
||||
AsynchronousInsertQueue::Timeout{.busy = settings.async_insert_busy_timeout_ms, .stale = settings.async_insert_stale_timeout_ms}));
|
||||
settings.async_insert_cleanup_timeout_ms));
|
||||
|
||||
/// Size of cache for marks (index of MergeTree family of tables).
|
||||
size_t mark_cache_size = config().getUInt64("mark_cache_size", 5368709120);
|
||||
@ -1471,9 +1480,10 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
global_context->getMergeTreeSettings().sanityCheck(background_pool_tasks);
|
||||
global_context->getReplicatedMergeTreeSettings().sanityCheck(background_pool_tasks);
|
||||
}
|
||||
|
||||
#if USE_BORINGSSL
|
||||
/// try set up encryption. There are some errors in config, error will be printed and server wouldn't start.
|
||||
CompressionCodecEncrypted::Configuration::instance().load(config(), "encryption_codecs");
|
||||
#endif
|
||||
|
||||
SCOPE_EXIT({
|
||||
/// Stop reloading of the main config. This must be done before `global_context->shutdown()` because
|
||||
@ -1556,18 +1566,6 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
/// system logs may copy global context.
|
||||
global_context->setCurrentDatabaseNameInGlobalContext(default_database);
|
||||
|
||||
LOG_INFO(log, "Loading user defined objects from {}", path_str);
|
||||
try
|
||||
{
|
||||
UserDefinedSQLObjectsLoader::instance().loadObjects(global_context);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(log, "Caught exception while loading user defined objects");
|
||||
throw;
|
||||
}
|
||||
LOG_DEBUG(log, "Loaded user defined objects");
|
||||
|
||||
LOG_INFO(log, "Loading metadata from {}", path_str);
|
||||
|
||||
try
|
||||
@ -1595,6 +1593,8 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
database_catalog.loadDatabases();
|
||||
/// After loading validate that default database exists
|
||||
database_catalog.assertDatabaseExists(default_database);
|
||||
/// Load user-defined SQL functions.
|
||||
global_context->getUserDefinedSQLObjectsLoader().loadObjects();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -1850,6 +1850,82 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
return Application::EXIT_OK;
|
||||
}
|
||||
|
||||
std::unique_ptr<TCPProtocolStackFactory> Server::buildProtocolStackFromConfig(
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
const std::string & protocol,
|
||||
Poco::Net::HTTPServerParams::Ptr http_params,
|
||||
AsynchronousMetrics & async_metrics,
|
||||
bool & is_secure)
|
||||
{
|
||||
auto create_factory = [&](const std::string & type, const std::string & conf_name) -> TCPServerConnectionFactory::Ptr
|
||||
{
|
||||
if (type == "tcp")
|
||||
return TCPServerConnectionFactory::Ptr(new TCPHandlerFactory(*this, false, false));
|
||||
|
||||
if (type == "tls")
|
||||
#if USE_SSL
|
||||
return TCPServerConnectionFactory::Ptr(new TLSHandlerFactory(*this, conf_name));
|
||||
#else
|
||||
throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.",
|
||||
ErrorCodes::SUPPORT_IS_DISABLED};
|
||||
#endif
|
||||
|
||||
if (type == "proxy1")
|
||||
return TCPServerConnectionFactory::Ptr(new ProxyV1HandlerFactory(*this, conf_name));
|
||||
if (type == "mysql")
|
||||
return TCPServerConnectionFactory::Ptr(new MySQLHandlerFactory(*this));
|
||||
if (type == "postgres")
|
||||
return TCPServerConnectionFactory::Ptr(new PostgreSQLHandlerFactory(*this));
|
||||
if (type == "http")
|
||||
return TCPServerConnectionFactory::Ptr(
|
||||
new HTTPServerConnectionFactory(context(), http_params, createHandlerFactory(*this, config, async_metrics, "HTTPHandler-factory"))
|
||||
);
|
||||
if (type == "prometheus")
|
||||
return TCPServerConnectionFactory::Ptr(
|
||||
new HTTPServerConnectionFactory(context(), http_params, createHandlerFactory(*this, config, async_metrics, "PrometheusHandler-factory"))
|
||||
);
|
||||
if (type == "interserver")
|
||||
return TCPServerConnectionFactory::Ptr(
|
||||
new HTTPServerConnectionFactory(context(), http_params, createHandlerFactory(*this, config, async_metrics, "InterserverIOHTTPHandler-factory"))
|
||||
);
|
||||
|
||||
throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "Protocol configuration error, unknown protocol name '{}'", type);
|
||||
};
|
||||
|
||||
std::string conf_name = "protocols." + protocol;
|
||||
std::string prefix = conf_name + ".";
|
||||
std::unordered_set<std::string> pset {conf_name};
|
||||
|
||||
auto stack = std::make_unique<TCPProtocolStackFactory>(*this, conf_name);
|
||||
|
||||
while (true)
|
||||
{
|
||||
// if there is no "type" - it's a reference to another protocol and this is just an endpoint
|
||||
if (config.has(prefix + "type"))
|
||||
{
|
||||
std::string type = config.getString(prefix + "type");
|
||||
if (type == "tls")
|
||||
{
|
||||
if (is_secure)
|
||||
throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "Protocol '{}' contains more than one TLS layer", protocol);
|
||||
is_secure = true;
|
||||
}
|
||||
|
||||
stack->append(create_factory(type, conf_name));
|
||||
}
|
||||
|
||||
if (!config.has(prefix + "impl"))
|
||||
break;
|
||||
|
||||
conf_name = "protocols." + config.getString(prefix + "impl");
|
||||
prefix = conf_name + ".";
|
||||
|
||||
if (!pset.insert(conf_name).second)
|
||||
throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "Protocol '{}' configuration contains a loop on '{}'", protocol, conf_name);
|
||||
}
|
||||
|
||||
return stack;
|
||||
}
|
||||
|
||||
void Server::createServers(
|
||||
Poco::Util::AbstractConfiguration & config,
|
||||
@ -1868,6 +1944,55 @@ void Server::createServers(
|
||||
http_params->setTimeout(settings.http_receive_timeout);
|
||||
http_params->setKeepAliveTimeout(keep_alive_timeout);
|
||||
|
||||
Poco::Util::AbstractConfiguration::Keys protocols;
|
||||
config.keys("protocols", protocols);
|
||||
|
||||
for (const auto & protocol : protocols)
|
||||
{
|
||||
std::vector<std::string> hosts;
|
||||
if (config.has("protocols." + protocol + ".host"))
|
||||
hosts.push_back(config.getString("protocols." + protocol + ".host"));
|
||||
else
|
||||
hosts = listen_hosts;
|
||||
|
||||
for (const auto & host : hosts)
|
||||
{
|
||||
std::string conf_name = "protocols." + protocol;
|
||||
std::string prefix = conf_name + ".";
|
||||
|
||||
if (!config.has(prefix + "port"))
|
||||
continue;
|
||||
|
||||
std::string description {"<undefined> protocol"};
|
||||
if (config.has(prefix + "description"))
|
||||
description = config.getString(prefix + "description");
|
||||
std::string port_name = prefix + "port";
|
||||
bool is_secure = false;
|
||||
auto stack = buildProtocolStackFromConfig(config, protocol, http_params, async_metrics, is_secure);
|
||||
|
||||
if (stack->empty())
|
||||
throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "Protocol '{}' stack empty", protocol);
|
||||
|
||||
createServer(config, host, port_name.c_str(), listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socketBindListen(config, socket, host, port, is_secure);
|
||||
socket.setReceiveTimeout(settings.receive_timeout);
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
|
||||
return ProtocolServerAdapter(
|
||||
host,
|
||||
port_name.c_str(),
|
||||
description + ": " + address.toString(),
|
||||
std::make_unique<TCPServer>(
|
||||
stack.release(),
|
||||
server_pool,
|
||||
socket,
|
||||
new Poco::Net::TCPServerParams));
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
for (const auto & listen_host : listen_hosts)
|
||||
{
|
||||
/// HTTP
|
||||
@ -2115,13 +2240,50 @@ void Server::updateServers(
|
||||
{
|
||||
if (!server.isStopping())
|
||||
{
|
||||
bool has_host = std::find(listen_hosts.begin(), listen_hosts.end(), server.getListenHost()) != listen_hosts.end();
|
||||
bool has_port = !config.getString(server.getPortName(), "").empty();
|
||||
std::string port_name = server.getPortName();
|
||||
bool has_host = false;
|
||||
bool is_http = false;
|
||||
if (port_name.starts_with("protocols."))
|
||||
{
|
||||
std::string protocol = port_name.substr(0, port_name.find_last_of('.'));
|
||||
has_host = config.has(protocol + ".host");
|
||||
|
||||
/// NOTE: better to compare using getPortName() over using
|
||||
/// dynamic_cast<> since HTTPServer is also used for prometheus and
|
||||
/// internal replication communications.
|
||||
bool is_http = server.getPortName() == "http_port" || server.getPortName() == "https_port";
|
||||
std::string conf_name = protocol;
|
||||
std::string prefix = protocol + ".";
|
||||
std::unordered_set<std::string> pset {conf_name};
|
||||
while (true)
|
||||
{
|
||||
if (config.has(prefix + "type"))
|
||||
{
|
||||
std::string type = config.getString(prefix + "type");
|
||||
if (type == "http")
|
||||
{
|
||||
is_http = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!config.has(prefix + "impl"))
|
||||
break;
|
||||
|
||||
conf_name = "protocols." + config.getString(prefix + "impl");
|
||||
prefix = conf_name + ".";
|
||||
|
||||
if (!pset.insert(conf_name).second)
|
||||
throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "Protocol '{}' configuration contains a loop on '{}'", protocol, conf_name);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/// NOTE: better to compare using getPortName() over using
|
||||
/// dynamic_cast<> since HTTPServer is also used for prometheus and
|
||||
/// internal replication communications.
|
||||
is_http = server.getPortName() == "http_port" || server.getPortName() == "https_port";
|
||||
}
|
||||
|
||||
if (!has_host)
|
||||
has_host = std::find(listen_hosts.begin(), listen_hosts.end(), server.getListenHost()) != listen_hosts.end();
|
||||
bool has_port = !config.getString(port_name, "").empty();
|
||||
bool force_restart = is_http && !isSameConfiguration(previous_config, config, "http_handlers");
|
||||
if (force_restart)
|
||||
LOG_TRACE(log, "<http_handlers> had been changed, will reload {}", server.getDescription());
|
||||
|
@ -3,6 +3,8 @@
|
||||
#include <Server/IServer.h>
|
||||
|
||||
#include <Daemon/BaseDaemon.h>
|
||||
#include <Server/TCPProtocolStackFactory.h>
|
||||
#include <Poco/Net/HTTPServerParams.h>
|
||||
|
||||
/** Server provides three interfaces:
|
||||
* 1. HTTP - simple interface for any applications.
|
||||
@ -77,6 +79,13 @@ private:
|
||||
UInt16 port,
|
||||
[[maybe_unused]] bool secure = false) const;
|
||||
|
||||
std::unique_ptr<TCPProtocolStackFactory> buildProtocolStackFromConfig(
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
const std::string & protocol,
|
||||
Poco::Net::HTTPServerParams::Ptr http_params,
|
||||
AsynchronousMetrics & async_metrics,
|
||||
bool & is_secure);
|
||||
|
||||
using CreateServerFunc = std::function<ProtocolServerAdapter(UInt16)>;
|
||||
void createServer(
|
||||
Poco::Util::AbstractConfiguration & config,
|
||||
|
@ -1173,6 +1173,18 @@
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</processors_profile_log>
|
||||
|
||||
<!-- Log of asynchronous inserts. It allows to check status
|
||||
of insert query in fire-and-forget mode.
|
||||
-->
|
||||
<asynchronous_insert_log>
|
||||
<database>system</database>
|
||||
<table>asynchronous_insert_log</table>
|
||||
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<partition_by>event_date</partition_by>
|
||||
<ttl>event_date + INTERVAL 3 DAY</ttl>
|
||||
</asynchronous_insert_log>
|
||||
|
||||
<!-- <top_level_domains_path>/var/lib/clickhouse/top_level_domains/</top_level_domains_path> -->
|
||||
<!-- Custom TLD lists.
|
||||
Format: <name>/path/to/file</name>
|
||||
|
@ -145,7 +145,7 @@ AccessEntityPtr deserializeAccessEntity(const String & definition, const String
|
||||
{
|
||||
e.addMessage("Could not parse " + file_path);
|
||||
e.rethrow();
|
||||
__builtin_unreachable();
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -209,7 +209,7 @@ namespace
|
||||
case TABLE_LEVEL: return AccessFlags::allFlagsGrantableOnTableLevel();
|
||||
case COLUMN_LEVEL: return AccessFlags::allFlagsGrantableOnColumnLevel();
|
||||
}
|
||||
__builtin_unreachable();
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include "config_core.h"
|
||||
#include "config.h"
|
||||
|
||||
#include <Access/Credentials.h>
|
||||
#include <base/types.h>
|
||||
|
@ -215,7 +215,7 @@ std::vector<UUID> IAccessStorage::insert(const std::vector<AccessEntityPtr> & mu
|
||||
e.addMessage("After successfully inserting {}/{}: {}", successfully_inserted.size(), multiple_entities.size(), successfully_inserted_str);
|
||||
}
|
||||
e.rethrow();
|
||||
__builtin_unreachable();
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
@ -319,7 +319,7 @@ std::vector<UUID> IAccessStorage::remove(const std::vector<UUID> & ids, bool thr
|
||||
e.addMessage("After successfully removing {}/{}: {}", removed_names.size(), ids.size(), removed_names_str);
|
||||
}
|
||||
e.rethrow();
|
||||
__builtin_unreachable();
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
@ -416,7 +416,7 @@ std::vector<UUID> IAccessStorage::update(const std::vector<UUID> & ids, const Up
|
||||
e.addMessage("After successfully updating {}/{}: {}", names_of_updated.size(), ids.size(), names_of_updated_str);
|
||||
}
|
||||
e.rethrow();
|
||||
__builtin_unreachable();
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include "config_core.h"
|
||||
#include "config.h"
|
||||
|
||||
#include <base/types.h>
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include "config_core.h"
|
||||
#include "config.h"
|
||||
|
||||
#include <base/types.h>
|
||||
|
||||
|
@ -11,7 +11,7 @@
|
||||
#include <AggregateFunctions/AggregateFunctionSum.h>
|
||||
#include <Core/DecimalFunctions.h>
|
||||
|
||||
#include <Common/config.h>
|
||||
#include "config.h"
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
# include <llvm/IR/IRBuilder.h>
|
||||
@ -153,10 +153,10 @@ public:
|
||||
|
||||
auto * numerator_type = toNativeType<Numerator>(b);
|
||||
|
||||
auto * numerator_dst_ptr = b.CreatePointerCast(aggregate_data_dst_ptr, numerator_type->getPointerTo());
|
||||
auto * numerator_dst_ptr = aggregate_data_dst_ptr;
|
||||
auto * numerator_dst_value = b.CreateLoad(numerator_type, numerator_dst_ptr);
|
||||
|
||||
auto * numerator_src_ptr = b.CreatePointerCast(aggregate_data_src_ptr, numerator_type->getPointerTo());
|
||||
auto * numerator_src_ptr = aggregate_data_src_ptr;
|
||||
auto * numerator_src_value = b.CreateLoad(numerator_type, numerator_src_ptr);
|
||||
|
||||
auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_dst_value, numerator_src_value) : b.CreateFAdd(numerator_dst_value, numerator_src_value);
|
||||
@ -164,10 +164,8 @@ public:
|
||||
|
||||
auto * denominator_type = toNativeType<Denominator>(b);
|
||||
static constexpr size_t denominator_offset = offsetof(Fraction, denominator);
|
||||
auto * ty_aggregate_data_dst_ptr = llvm::cast<llvm::PointerType>(aggregate_data_dst_ptr->getType()->getScalarType())->getElementType();
|
||||
auto * denominator_dst_ptr = b.CreatePointerCast(b.CreateConstInBoundsGEP1_64(ty_aggregate_data_dst_ptr, aggregate_data_dst_ptr, denominator_offset), denominator_type->getPointerTo());
|
||||
auto * ty_aggregate_data_src_ptr = llvm::cast<llvm::PointerType>(aggregate_data_src_ptr->getType()->getScalarType())->getElementType();
|
||||
auto * denominator_src_ptr = b.CreatePointerCast(b.CreateConstInBoundsGEP1_64(ty_aggregate_data_src_ptr, aggregate_data_src_ptr, denominator_offset), denominator_type->getPointerTo());
|
||||
auto * denominator_dst_ptr = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_dst_ptr, denominator_offset);
|
||||
auto * denominator_src_ptr = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_src_ptr, denominator_offset);
|
||||
|
||||
auto * denominator_dst_value = b.CreateLoad(denominator_type, denominator_dst_ptr);
|
||||
auto * denominator_src_value = b.CreateLoad(denominator_type, denominator_src_ptr);
|
||||
@ -181,13 +179,12 @@ public:
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
auto * numerator_type = toNativeType<Numerator>(b);
|
||||
auto * numerator_ptr = b.CreatePointerCast(aggregate_data_ptr, numerator_type->getPointerTo());
|
||||
auto * numerator_ptr = aggregate_data_ptr;
|
||||
auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr);
|
||||
|
||||
auto * denominator_type = toNativeType<Denominator>(b);
|
||||
static constexpr size_t denominator_offset = offsetof(Fraction, denominator);
|
||||
auto * ty_aggregate_data_ptr = llvm::cast<llvm::PointerType>(aggregate_data_ptr->getType()->getScalarType())->getElementType();
|
||||
auto * denominator_ptr = b.CreatePointerCast(b.CreateConstGEP1_32(ty_aggregate_data_ptr, aggregate_data_ptr, denominator_offset), denominator_type->getPointerTo());
|
||||
auto * denominator_ptr = b.CreateConstGEP1_32(b.getInt8Ty(), aggregate_data_ptr, denominator_offset);
|
||||
auto * denominator_value = b.CreateLoad(denominator_type, denominator_ptr);
|
||||
|
||||
auto * double_numerator = nativeCast<Numerator>(b, numerator_value, b.getDoubleTy());
|
||||
@ -306,7 +303,7 @@ public:
|
||||
|
||||
auto * numerator_type = toNativeType<Numerator>(b);
|
||||
|
||||
auto * numerator_ptr = b.CreatePointerCast(aggregate_data_ptr, numerator_type->getPointerTo());
|
||||
auto * numerator_ptr = aggregate_data_ptr;
|
||||
auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr);
|
||||
auto * value_cast_to_numerator = nativeCast(b, arguments_types[0], argument_values[0], numerator_type);
|
||||
auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_value, value_cast_to_numerator) : b.CreateFAdd(numerator_value, value_cast_to_numerator);
|
||||
@ -314,8 +311,7 @@ public:
|
||||
|
||||
auto * denominator_type = toNativeType<Denominator>(b);
|
||||
static constexpr size_t denominator_offset = offsetof(Fraction, denominator);
|
||||
auto * ty_aggregate_data_ptr = llvm::cast<llvm::PointerType>(aggregate_data_ptr->getType()->getScalarType())->getElementType();
|
||||
auto * denominator_ptr = b.CreatePointerCast(b.CreateConstGEP1_32(ty_aggregate_data_ptr, aggregate_data_ptr, denominator_offset), denominator_type->getPointerTo());
|
||||
auto * denominator_ptr = b.CreateConstGEP1_32(b.getInt8Ty(), aggregate_data_ptr, denominator_offset);
|
||||
auto * denominator_value_updated = b.CreateAdd(b.CreateLoad(denominator_type, denominator_ptr), llvm::ConstantInt::get(denominator_type, 1));
|
||||
b.CreateStore(denominator_value_updated, denominator_ptr);
|
||||
}
|
||||
|
@ -60,8 +60,7 @@ public:
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
auto * numerator_type = toNativeType<Numerator>(b);
|
||||
|
||||
auto * numerator_ptr = b.CreatePointerCast(aggregate_data_ptr, numerator_type->getPointerTo());
|
||||
auto * numerator_ptr = aggregate_data_ptr;
|
||||
auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr);
|
||||
|
||||
auto * argument = nativeCast(b, arguments_types[0], argument_values[0], numerator_type);
|
||||
@ -74,9 +73,7 @@ public:
|
||||
auto * denominator_type = toNativeType<Denominator>(b);
|
||||
|
||||
static constexpr size_t denominator_offset = offsetof(Fraction, denominator);
|
||||
auto * ty_aggregate_data_ptr = llvm::cast<llvm::PointerType>(aggregate_data_ptr->getType()->getScalarType())->getElementType();
|
||||
auto * denominator_offset_ptr = b.CreateConstInBoundsGEP1_64(ty_aggregate_data_ptr, aggregate_data_ptr, denominator_offset);
|
||||
auto * denominator_ptr = b.CreatePointerCast(denominator_offset_ptr, denominator_type->getPointerTo());
|
||||
auto * denominator_ptr = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, denominator_offset);
|
||||
|
||||
auto * weight_cast_to_denominator = nativeCast(b, arguments_types[1], argument_values[1], denominator_type);
|
||||
|
||||
|
@ -9,7 +9,7 @@
|
||||
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
|
||||
#include <Common/config.h>
|
||||
#include "config.h"
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
# include <llvm/IR/IRBuilder.h>
|
||||
@ -143,10 +143,7 @@ public:
|
||||
|
||||
void compileCreate(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) const override
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
auto * return_type = toNativeType(b, getReturnType());
|
||||
auto * value_ptr = b.CreatePointerCast(aggregate_data_ptr, return_type->getPointerTo());
|
||||
auto * value_ptr = aggregate_data_ptr;
|
||||
Data::compileCreate(builder, value_ptr);
|
||||
}
|
||||
|
||||
@ -156,7 +153,7 @@ public:
|
||||
|
||||
auto * return_type = toNativeType(b, getReturnType());
|
||||
|
||||
auto * value_ptr = b.CreatePointerCast(aggregate_data_ptr, return_type->getPointerTo());
|
||||
auto * value_ptr = aggregate_data_ptr;
|
||||
auto * value = b.CreateLoad(return_type, value_ptr);
|
||||
|
||||
const auto & argument_value = argument_values[0];
|
||||
@ -171,10 +168,10 @@ public:
|
||||
|
||||
auto * return_type = toNativeType(b, getReturnType());
|
||||
|
||||
auto * value_dst_ptr = b.CreatePointerCast(aggregate_data_dst_ptr, return_type->getPointerTo());
|
||||
auto * value_dst_ptr = aggregate_data_dst_ptr;
|
||||
auto * value_dst = b.CreateLoad(return_type, value_dst_ptr);
|
||||
|
||||
auto * value_src_ptr = b.CreatePointerCast(aggregate_data_src_ptr, return_type->getPointerTo());
|
||||
auto * value_src_ptr = aggregate_data_src_ptr;
|
||||
auto * value_src = b.CreateLoad(return_type, value_src_ptr);
|
||||
|
||||
auto * result_value = Data::compileUpdate(builder, value_dst, value_src);
|
||||
@ -187,7 +184,7 @@ public:
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
auto * return_type = toNativeType(b, getReturnType());
|
||||
auto * value_ptr = b.CreatePointerCast(aggregate_data_ptr, return_type->getPointerTo());
|
||||
auto * value_ptr = aggregate_data_ptr;
|
||||
|
||||
return b.CreateLoad(return_type, value_ptr);
|
||||
}
|
||||
|
@ -12,7 +12,7 @@
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
#include <Common/assert_cast.h>
|
||||
|
||||
#include <Common/config.h>
|
||||
#include "config.h"
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
# include <llvm/IR/IRBuilder.h>
|
||||
@ -169,7 +169,7 @@ public:
|
||||
|
||||
auto * return_type = toNativeType(b, getReturnType());
|
||||
|
||||
auto * count_value_ptr = b.CreatePointerCast(aggregate_data_ptr, return_type->getPointerTo());
|
||||
auto * count_value_ptr = aggregate_data_ptr;
|
||||
auto * count_value = b.CreateLoad(return_type, count_value_ptr);
|
||||
auto * updated_count_value = b.CreateAdd(count_value, llvm::ConstantInt::get(return_type, 1));
|
||||
|
||||
@ -182,10 +182,10 @@ public:
|
||||
|
||||
auto * return_type = toNativeType(b, getReturnType());
|
||||
|
||||
auto * count_value_dst_ptr = b.CreatePointerCast(aggregate_data_dst_ptr, return_type->getPointerTo());
|
||||
auto * count_value_dst_ptr = aggregate_data_dst_ptr;
|
||||
auto * count_value_dst = b.CreateLoad(return_type, count_value_dst_ptr);
|
||||
|
||||
auto * count_value_src_ptr = b.CreatePointerCast(aggregate_data_src_ptr, return_type->getPointerTo());
|
||||
auto * count_value_src_ptr = aggregate_data_src_ptr;
|
||||
auto * count_value_src = b.CreateLoad(return_type, count_value_src_ptr);
|
||||
|
||||
auto * count_value_dst_updated = b.CreateAdd(count_value_dst, count_value_src);
|
||||
@ -198,7 +198,7 @@ public:
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
auto * return_type = toNativeType(b, getReturnType());
|
||||
auto * count_value_ptr = b.CreatePointerCast(aggregate_data_ptr, return_type->getPointerTo());
|
||||
auto * count_value_ptr = aggregate_data_ptr;
|
||||
|
||||
return b.CreateLoad(return_type, count_value_ptr);
|
||||
}
|
||||
@ -316,7 +316,7 @@ public:
|
||||
auto * is_null_value = b.CreateExtractValue(values[0], {1});
|
||||
auto * increment_value = b.CreateSelect(is_null_value, llvm::ConstantInt::get(return_type, 0), llvm::ConstantInt::get(return_type, 1));
|
||||
|
||||
auto * count_value_ptr = b.CreatePointerCast(aggregate_data_ptr, return_type->getPointerTo());
|
||||
auto * count_value_ptr = aggregate_data_ptr;
|
||||
auto * count_value = b.CreateLoad(return_type, count_value_ptr);
|
||||
auto * updated_count_value = b.CreateAdd(count_value, increment_value);
|
||||
|
||||
@ -329,10 +329,10 @@ public:
|
||||
|
||||
auto * return_type = toNativeType(b, getReturnType());
|
||||
|
||||
auto * count_value_dst_ptr = b.CreatePointerCast(aggregate_data_dst_ptr, return_type->getPointerTo());
|
||||
auto * count_value_dst_ptr = aggregate_data_dst_ptr;
|
||||
auto * count_value_dst = b.CreateLoad(return_type, count_value_dst_ptr);
|
||||
|
||||
auto * count_value_src_ptr = b.CreatePointerCast(aggregate_data_src_ptr, return_type->getPointerTo());
|
||||
auto * count_value_src_ptr = aggregate_data_src_ptr;
|
||||
auto * count_value_src = b.CreateLoad(return_type, count_value_src_ptr);
|
||||
|
||||
auto * count_value_dst_updated = b.CreateAdd(count_value_dst, count_value_src);
|
||||
@ -345,7 +345,7 @@ public:
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
auto * return_type = toNativeType(b, getReturnType());
|
||||
auto * count_value_ptr = b.CreatePointerCast(aggregate_data_ptr, return_type->getPointerTo());
|
||||
auto * count_value_ptr = aggregate_data_ptr;
|
||||
|
||||
return b.CreateLoad(return_type, count_value_ptr);
|
||||
}
|
||||
|
@ -114,4 +114,6 @@ struct AggregateUtils
|
||||
}
|
||||
};
|
||||
|
||||
const String & getAggregateFunctionCanonicalNameIfAny(const String & name);
|
||||
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ static constexpr const char * getNameByTrait()
|
||||
return "groupArraySample";
|
||||
// else if (Trait::sampler == Sampler::DETERMINATOR) // TODO
|
||||
|
||||
__builtin_unreachable();
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
|
@ -207,8 +207,7 @@ public:
|
||||
if constexpr (result_is_nullable)
|
||||
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
|
||||
|
||||
auto * ty_aggregate_data_ptr = llvm::cast<llvm::PointerType>(aggregate_data_ptr->getType()->getScalarType())->getElementType();
|
||||
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(ty_aggregate_data_ptr, aggregate_data_ptr, this->prefix_size);
|
||||
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
||||
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { removeNullable(nullable_type) }, { wrapped_value });
|
||||
b.CreateBr(join_block);
|
||||
|
||||
@ -420,8 +419,7 @@ public:
|
||||
if constexpr (result_is_nullable)
|
||||
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
|
||||
|
||||
auto * ty_aggregate_data_ptr = llvm::cast<llvm::PointerType>(aggregate_data_ptr->getType()->getScalarType())->getElementType();
|
||||
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(ty_aggregate_data_ptr, aggregate_data_ptr, this->prefix_size);
|
||||
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
||||
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, non_nullable_types, wrapped_values);
|
||||
b.CreateBr(join_block);
|
||||
|
||||
|
@ -5,7 +5,7 @@
|
||||
#include <Common/assert_cast.h>
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
|
||||
#include <Common/config.h>
|
||||
#include "config.h"
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
# include <llvm/IR/IRBuilder.h>
|
||||
|
@ -14,7 +14,7 @@
|
||||
#include <DataTypes/DataTypeNullable.h>
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
|
||||
#include <Common/config.h>
|
||||
#include "config.h"
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
# include <llvm/IR/IRBuilder.h>
|
||||
@ -199,11 +199,7 @@ public:
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
static constexpr size_t value_offset_from_structure = offsetof(SingleValueDataFixed<T>, value);
|
||||
|
||||
auto * type = toNativeType<T>(builder);
|
||||
auto * ty_aggregate_data_ptr = llvm::cast<llvm::PointerType>(aggregate_data_ptr->getType()->getScalarType())->getElementType();
|
||||
auto * value_ptr_with_offset = b.CreateConstInBoundsGEP1_64(ty_aggregate_data_ptr, aggregate_data_ptr, value_offset_from_structure);
|
||||
auto * value_ptr = b.CreatePointerCast(value_ptr_with_offset, type->getPointerTo());
|
||||
auto * value_ptr = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, value_offset_from_structure);
|
||||
|
||||
return value_ptr;
|
||||
}
|
||||
@ -222,7 +218,7 @@ public:
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
auto * has_value_ptr = b.CreatePointerCast(aggregate_data_ptr, b.getInt1Ty()->getPointerTo());
|
||||
auto * has_value_ptr = aggregate_data_ptr;
|
||||
b.CreateStore(b.getInt1(true), has_value_ptr);
|
||||
|
||||
auto * value_ptr = getValuePtrFromAggregateDataPtr(b, aggregate_data_ptr);
|
||||
@ -240,7 +236,7 @@ public:
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
auto * has_value_ptr = b.CreatePointerCast(aggregate_data_ptr, b.getInt1Ty()->getPointerTo());
|
||||
auto * has_value_ptr = aggregate_data_ptr;
|
||||
auto * has_value_value = b.CreateLoad(b.getInt1Ty(), has_value_ptr);
|
||||
|
||||
auto * head = b.GetInsertBlock();
|
||||
@ -265,10 +261,10 @@ public:
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
auto * has_value_dst_ptr = b.CreatePointerCast(aggregate_data_dst_ptr, b.getInt1Ty()->getPointerTo());
|
||||
auto * has_value_dst_ptr = aggregate_data_dst_ptr;
|
||||
auto * has_value_dst = b.CreateLoad(b.getInt1Ty(), has_value_dst_ptr);
|
||||
|
||||
auto * has_value_src_ptr = b.CreatePointerCast(aggregate_data_src_ptr, b.getInt1Ty()->getPointerTo());
|
||||
auto * has_value_src_ptr = aggregate_data_src_ptr;
|
||||
auto * has_value_src = b.CreateLoad(b.getInt1Ty(), has_value_src_ptr);
|
||||
|
||||
auto * head = b.GetInsertBlock();
|
||||
@ -298,7 +294,7 @@ public:
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
auto * has_value_src_ptr = b.CreatePointerCast(aggregate_data_src_ptr, b.getInt1Ty()->getPointerTo());
|
||||
auto * has_value_src_ptr = aggregate_data_src_ptr;
|
||||
auto * has_value_src = b.CreateLoad(b.getInt1Ty(), has_value_src_ptr);
|
||||
|
||||
auto * head = b.GetInsertBlock();
|
||||
@ -324,7 +320,7 @@ public:
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
auto * has_value_ptr = b.CreatePointerCast(aggregate_data_ptr, b.getInt1Ty()->getPointerTo());
|
||||
auto * has_value_ptr = aggregate_data_ptr;
|
||||
auto * has_value_value = b.CreateLoad(b.getInt1Ty(), has_value_ptr);
|
||||
|
||||
auto * value = getValueFromAggregateDataPtr(b, aggregate_data_ptr);
|
||||
@ -371,12 +367,12 @@ public:
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
auto * has_value_dst_ptr = b.CreatePointerCast(aggregate_data_dst_ptr, b.getInt1Ty()->getPointerTo());
|
||||
auto * has_value_dst_ptr = aggregate_data_dst_ptr;
|
||||
auto * has_value_dst = b.CreateLoad(b.getInt1Ty(), has_value_dst_ptr);
|
||||
|
||||
auto * value_dst = getValueFromAggregateDataPtr(b, aggregate_data_dst_ptr);
|
||||
|
||||
auto * has_value_src_ptr = b.CreatePointerCast(aggregate_data_src_ptr, b.getInt1Ty()->getPointerTo());
|
||||
auto * has_value_src_ptr = aggregate_data_src_ptr;
|
||||
auto * has_value_src = b.CreateLoad(b.getInt1Ty(), has_value_src_ptr);
|
||||
|
||||
auto * value_src = getValueFromAggregateDataPtr(b, aggregate_data_src_ptr);
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
|
||||
#include <Common/config.h>
|
||||
#include "config.h"
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
# include <llvm/IR/IRBuilder.h>
|
||||
@ -225,8 +225,7 @@ public:
|
||||
if constexpr (result_is_nullable)
|
||||
b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), this->prefix_size, llvm::assumeAligned(this->alignOfData()));
|
||||
|
||||
auto * ty_aggregate_data_ptr = llvm::cast<llvm::PointerType>(aggregate_data_ptr->getType()->getScalarType())->getElementType();
|
||||
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(ty_aggregate_data_ptr, aggregate_data_ptr, this->prefix_size);
|
||||
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
||||
this->nested_function->compileCreate(b, aggregate_data_ptr_with_prefix_size_offset);
|
||||
}
|
||||
|
||||
@ -236,18 +235,16 @@ public:
|
||||
|
||||
if constexpr (result_is_nullable)
|
||||
{
|
||||
auto * aggregate_data_is_null_dst_value = b.CreateLoad(aggregate_data_dst_ptr->getType()->getPointerElementType(), aggregate_data_dst_ptr);
|
||||
auto * aggregate_data_is_null_src_value = b.CreateLoad(aggregate_data_src_ptr->getType()->getPointerElementType(), aggregate_data_src_ptr);
|
||||
auto * aggregate_data_is_null_dst_value = b.CreateLoad(b.getInt8Ty(), aggregate_data_dst_ptr);
|
||||
auto * aggregate_data_is_null_src_value = b.CreateLoad(b.getInt8Ty(), aggregate_data_src_ptr);
|
||||
|
||||
auto * is_src_null = nativeBoolCast(b, std::make_shared<DataTypeUInt8>(), aggregate_data_is_null_src_value);
|
||||
auto * is_null_result_value = b.CreateSelect(is_src_null, llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_is_null_dst_value);
|
||||
b.CreateStore(is_null_result_value, aggregate_data_dst_ptr);
|
||||
}
|
||||
|
||||
auto * ty_aggregate_data_dst_ptr = llvm::cast<llvm::PointerType>(aggregate_data_dst_ptr->getType()->getScalarType())->getElementType();
|
||||
auto * aggregate_data_dst_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(ty_aggregate_data_dst_ptr, aggregate_data_dst_ptr, this->prefix_size);
|
||||
auto * ty_aggregate_data_src_ptr = llvm::cast<llvm::PointerType>(aggregate_data_src_ptr->getType()->getScalarType())->getElementType();
|
||||
auto * aggregate_data_src_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(ty_aggregate_data_src_ptr, aggregate_data_src_ptr, this->prefix_size);
|
||||
auto * aggregate_data_dst_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_dst_ptr, this->prefix_size);
|
||||
auto * aggregate_data_src_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_src_ptr, this->prefix_size);
|
||||
|
||||
this->nested_function->compileMerge(b, aggregate_data_dst_ptr_with_prefix_size_offset, aggregate_data_src_ptr_with_prefix_size_offset);
|
||||
}
|
||||
@ -281,8 +278,7 @@ public:
|
||||
b.CreateBr(join_block);
|
||||
|
||||
b.SetInsertPoint(if_not_null);
|
||||
auto * ty_aggregate_data_ptr = llvm::cast<llvm::PointerType>(aggregate_data_ptr->getType()->getScalarType())->getElementType();
|
||||
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(ty_aggregate_data_ptr, aggregate_data_ptr, this->prefix_size);
|
||||
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
||||
auto * nested_result = this->nested_function->compileGetResult(builder, aggregate_data_ptr_with_prefix_size_offset);
|
||||
b.CreateStore(b.CreateInsertValue(nullable_value, nested_result, {0}), nullable_value_ptr);
|
||||
b.CreateBr(join_block);
|
||||
@ -378,8 +374,7 @@ public:
|
||||
if constexpr (result_is_nullable)
|
||||
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
|
||||
|
||||
auto * ty_aggregate_data_ptr = llvm::cast<llvm::PointerType>(aggregate_data_ptr->getType()->getScalarType())->getElementType();
|
||||
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(ty_aggregate_data_ptr, aggregate_data_ptr, this->prefix_size);
|
||||
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
||||
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { removeNullable(nullable_type) }, { wrapped_value });
|
||||
b.CreateBr(join_block);
|
||||
|
||||
@ -603,8 +598,7 @@ public:
|
||||
if constexpr (result_is_nullable)
|
||||
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
|
||||
|
||||
auto * ty_aggregate_data_ptr = llvm::cast<llvm::PointerType>(aggregate_data_ptr->getType()->getScalarType())->getElementType();
|
||||
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(ty_aggregate_data_ptr, aggregate_data_ptr, this->prefix_size);
|
||||
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
||||
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, arguments_types, wrapped_values);
|
||||
b.CreateBr(join_block);
|
||||
|
||||
|
@ -32,7 +32,7 @@ struct RankCorrelationData : public StatisticalSample<Float64, Float64>
|
||||
std::tie(ranks_y, std::ignore) = computeRanksAndTieCorrection(this->y);
|
||||
|
||||
/// Sizes can be non-equal due to skipped NaNs.
|
||||
const auto size = std::min(this->size_x, this->size_y);
|
||||
const Float64 size = static_cast<Float64>(std::min(this->size_x, this->size_y));
|
||||
|
||||
/// Count d^2 sum
|
||||
Float64 answer = 0;
|
||||
|
@ -395,7 +395,7 @@ public:
|
||||
break;
|
||||
return (i == events_size) ? base - i : unmatched_idx;
|
||||
}
|
||||
__builtin_unreachable();
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
|
@ -114,7 +114,7 @@ public:
|
||||
return "covarSamp";
|
||||
if constexpr (StatFunc::kind == StatisticsFunctionKind::corr)
|
||||
return "corr";
|
||||
__builtin_unreachable();
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
DataTypePtr getReturnType() const override
|
||||
|
@ -13,7 +13,7 @@
|
||||
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
|
||||
#include <Common/config.h>
|
||||
#include "config.h"
|
||||
#include <Common/TargetSpecific.h>
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
@ -407,7 +407,7 @@ public:
|
||||
return "sumWithOverflow";
|
||||
else if constexpr (Type == AggregateFunctionTypeSumKahan)
|
||||
return "sumKahan";
|
||||
__builtin_unreachable();
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
explicit AggregateFunctionSum(const DataTypes & argument_types_)
|
||||
@ -559,7 +559,7 @@ public:
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
auto * return_type = toNativeType(b, getReturnType());
|
||||
auto * aggregate_sum_ptr = b.CreatePointerCast(aggregate_data_ptr, return_type->getPointerTo());
|
||||
auto * aggregate_sum_ptr = aggregate_data_ptr;
|
||||
|
||||
b.CreateStore(llvm::Constant::getNullValue(return_type), aggregate_sum_ptr);
|
||||
}
|
||||
@ -570,7 +570,7 @@ public:
|
||||
|
||||
auto * return_type = toNativeType(b, getReturnType());
|
||||
|
||||
auto * sum_value_ptr = b.CreatePointerCast(aggregate_data_ptr, return_type->getPointerTo());
|
||||
auto * sum_value_ptr = aggregate_data_ptr;
|
||||
auto * sum_value = b.CreateLoad(return_type, sum_value_ptr);
|
||||
|
||||
const auto & argument_type = arguments_types[0];
|
||||
@ -588,10 +588,10 @@ public:
|
||||
|
||||
auto * return_type = toNativeType(b, getReturnType());
|
||||
|
||||
auto * sum_value_dst_ptr = b.CreatePointerCast(aggregate_data_dst_ptr, return_type->getPointerTo());
|
||||
auto * sum_value_dst_ptr = aggregate_data_dst_ptr;
|
||||
auto * sum_value_dst = b.CreateLoad(return_type, sum_value_dst_ptr);
|
||||
|
||||
auto * sum_value_src_ptr = b.CreatePointerCast(aggregate_data_src_ptr, return_type->getPointerTo());
|
||||
auto * sum_value_src_ptr = aggregate_data_src_ptr;
|
||||
auto * sum_value_src = b.CreateLoad(return_type, sum_value_src_ptr);
|
||||
|
||||
auto * sum_return_value = sum_value_dst->getType()->isIntegerTy() ? b.CreateAdd(sum_value_dst, sum_value_src) : b.CreateFAdd(sum_value_dst, sum_value_src);
|
||||
@ -603,7 +603,7 @@ public:
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
auto * return_type = toNativeType(b, getReturnType());
|
||||
auto * sum_value_ptr = b.CreatePointerCast(aggregate_data_ptr, return_type->getPointerTo());
|
||||
auto * sum_value_ptr = aggregate_data_ptr;
|
||||
|
||||
return b.CreateLoad(return_type, sum_value_ptr);
|
||||
}
|
||||
|
@ -131,7 +131,7 @@ namespace
|
||||
return createAggregateFunctionWithHashType<20>(use_64_bit_hash, argument_types, params);
|
||||
}
|
||||
|
||||
__builtin_unreachable();
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <base/types.h>
|
||||
|
||||
#include "config_core.h"
|
||||
#include "config.h"
|
||||
|
||||
#include <cstddef>
|
||||
#include <memory>
|
||||
|
@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/config.h>
|
||||
#include "config.h"
|
||||
|
||||
#if USE_DATASKETCHES
|
||||
|
||||
|
@ -27,7 +27,6 @@ namespace ErrorCodes
|
||||
{
|
||||
extern const int INCONSISTENT_METADATA_FOR_BACKUP;
|
||||
extern const int CANNOT_BACKUP_TABLE;
|
||||
extern const int TABLE_IS_DROPPED;
|
||||
extern const int UNKNOWN_TABLE;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
@ -526,14 +525,10 @@ void BackupEntriesCollector::lockTablesForReading()
|
||||
auto storage = table_info.storage;
|
||||
if (storage)
|
||||
{
|
||||
try
|
||||
table_info.table_lock = storage->tryLockForShare(context->getInitialQueryId(), context->getSettingsRef().lock_acquire_timeout);
|
||||
if (table_info.table_lock == nullptr)
|
||||
{
|
||||
table_info.table_lock = storage->lockForShare(context->getInitialQueryId(), context->getSettingsRef().lock_acquire_timeout);
|
||||
}
|
||||
catch (Exception & e)
|
||||
{
|
||||
if (e.code() != ErrorCodes::TABLE_IS_DROPPED)
|
||||
throw;
|
||||
// Table was dropped while acquiring the lock
|
||||
throw Exception(ErrorCodes::INCONSISTENT_METADATA_FOR_BACKUP, "{} was dropped during scanning", tableNameWithTypeToString(table_name.database, table_name.table, true));
|
||||
}
|
||||
}
|
||||
|
@ -43,8 +43,8 @@ namespace ErrorCodes
|
||||
|
||||
namespace
|
||||
{
|
||||
const UInt64 INITIAL_BACKUP_VERSION = 1;
|
||||
const UInt64 CURRENT_BACKUP_VERSION = 1;
|
||||
const int INITIAL_BACKUP_VERSION = 1;
|
||||
const int CURRENT_BACKUP_VERSION = 1;
|
||||
|
||||
using SizeAndChecksum = IBackup::SizeAndChecksum;
|
||||
using FileInfo = IBackupCoordination::FileInfo;
|
||||
@ -275,7 +275,7 @@ void BackupImpl::writeBackupMetadata()
|
||||
assert(!is_internal_backup);
|
||||
|
||||
Poco::AutoPtr<Poco::Util::XMLConfiguration> config{new Poco::Util::XMLConfiguration()};
|
||||
config->setUInt("version", CURRENT_BACKUP_VERSION);
|
||||
config->setInt("version", CURRENT_BACKUP_VERSION);
|
||||
config->setString("timestamp", toString(LocalDateTime{timestamp}));
|
||||
config->setString("uuid", toString(*uuid));
|
||||
|
||||
@ -302,7 +302,7 @@ void BackupImpl::writeBackupMetadata()
|
||||
{
|
||||
String prefix = index ? "contents.file[" + std::to_string(index) + "]." : "contents.file.";
|
||||
config->setString(prefix + "name", info.file_name);
|
||||
config->setUInt(prefix + "size", info.size);
|
||||
config->setUInt64(prefix + "size", info.size);
|
||||
if (info.size)
|
||||
{
|
||||
config->setString(prefix + "checksum", hexChecksum(info.checksum));
|
||||
@ -311,7 +311,7 @@ void BackupImpl::writeBackupMetadata()
|
||||
config->setBool(prefix + "use_base", true);
|
||||
if (info.base_size != info.size)
|
||||
{
|
||||
config->setUInt(prefix + "base_size", info.base_size);
|
||||
config->setUInt64(prefix + "base_size", info.base_size);
|
||||
config->setString(prefix + "base_checksum", hexChecksum(info.base_checksum));
|
||||
}
|
||||
}
|
||||
@ -367,7 +367,7 @@ void BackupImpl::readBackupMetadata()
|
||||
Poco::AutoPtr<Poco::Util::XMLConfiguration> config{new Poco::Util::XMLConfiguration()};
|
||||
config->load(stream);
|
||||
|
||||
version = config->getUInt("version");
|
||||
version = config->getInt("version");
|
||||
if ((version < INITIAL_BACKUP_VERSION) || (version > CURRENT_BACKUP_VERSION))
|
||||
throw Exception(ErrorCodes::BACKUP_VERSION_NOT_SUPPORTED, "Backup {}: Version {} is not supported", backup_name, version);
|
||||
|
||||
@ -389,13 +389,13 @@ void BackupImpl::readBackupMetadata()
|
||||
String prefix = "contents." + key + ".";
|
||||
FileInfo info;
|
||||
info.file_name = config->getString(prefix + "name");
|
||||
info.size = config->getUInt(prefix + "size");
|
||||
info.size = config->getUInt64(prefix + "size");
|
||||
if (info.size)
|
||||
{
|
||||
info.checksum = unhexChecksum(config->getString(prefix + "checksum"));
|
||||
|
||||
bool use_base = config->getBool(prefix + "use_base", false);
|
||||
info.base_size = config->getUInt(prefix + "base_size", use_base ? info.size : 0);
|
||||
info.base_size = config->getUInt64(prefix + "base_size", use_base ? info.size : 0);
|
||||
if (info.base_size)
|
||||
use_base = true;
|
||||
|
||||
@ -691,7 +691,7 @@ void BackupImpl::writeFile(const String & file_name, BackupEntryPtr entry)
|
||||
std::string from_file_name = "memory buffer";
|
||||
if (auto fname = entry->getFilePath(); !fname.empty())
|
||||
from_file_name = "file " + fname;
|
||||
LOG_TRACE(log, "Writing backup for file {} from file {}", file_name, from_file_name);
|
||||
LOG_TRACE(log, "Writing backup for file {} from {}", file_name, from_file_name);
|
||||
|
||||
auto adjusted_path = removeLeadingSlash(file_name);
|
||||
if (coordination->getFileInfo(adjusted_path))
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user