mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-29 02:52:13 +00:00
Merge branch 'master' into addressToLineWithInlines
This commit is contained in:
commit
9f9fce07e2
3
.github/workflows/master.yml
vendored
3
.github/workflows/master.yml
vendored
@ -86,6 +86,7 @@ jobs:
|
|||||||
StyleCheck:
|
StyleCheck:
|
||||||
needs: DockerHubPush
|
needs: DockerHubPush
|
||||||
runs-on: [self-hosted, style-checker]
|
runs-on: [self-hosted, style-checker]
|
||||||
|
if: ${{ success() || failure() }}
|
||||||
steps:
|
steps:
|
||||||
- name: Set envs
|
- name: Set envs
|
||||||
run: |
|
run: |
|
||||||
@ -93,6 +94,8 @@ jobs:
|
|||||||
TEMP_PATH=${{ runner.temp }}/style_check
|
TEMP_PATH=${{ runner.temp }}/style_check
|
||||||
EOF
|
EOF
|
||||||
- name: Download changed images
|
- name: Download changed images
|
||||||
|
# even if artifact does not exist, e.g. on `do not test` label or failed Docker job
|
||||||
|
continue-on-error: true
|
||||||
uses: actions/download-artifact@v2
|
uses: actions/download-artifact@v2
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
name: changed_images
|
||||||
|
3
.github/workflows/pull_request.yml
vendored
3
.github/workflows/pull_request.yml
vendored
@ -111,6 +111,7 @@ jobs:
|
|||||||
StyleCheck:
|
StyleCheck:
|
||||||
needs: DockerHubPush
|
needs: DockerHubPush
|
||||||
runs-on: [self-hosted, style-checker]
|
runs-on: [self-hosted, style-checker]
|
||||||
|
if: ${{ success() || failure() }}
|
||||||
steps:
|
steps:
|
||||||
- name: Set envs
|
- name: Set envs
|
||||||
run: |
|
run: |
|
||||||
@ -118,6 +119,8 @@ jobs:
|
|||||||
TEMP_PATH=${{ runner.temp }}/style_check
|
TEMP_PATH=${{ runner.temp }}/style_check
|
||||||
EOF
|
EOF
|
||||||
- name: Download changed images
|
- name: Download changed images
|
||||||
|
# even if artifact does not exist, e.g. on `do not test` label or failed Docker job
|
||||||
|
continue-on-error: true
|
||||||
uses: actions/download-artifact@v2
|
uses: actions/download-artifact@v2
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
name: changed_images
|
||||||
|
1
.github/workflows/release.yml
vendored
1
.github/workflows/release.yml
vendored
@ -22,7 +22,6 @@ jobs:
|
|||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Download packages and push to Artifactory
|
- name: Download packages and push to Artifactory
|
||||||
env:
|
|
||||||
run: |
|
run: |
|
||||||
rm -rf "$TEMP_PATH" && mkdir -p "$REPO_COPY"
|
rm -rf "$TEMP_PATH" && mkdir -p "$REPO_COPY"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$REPO_COPY"
|
cp -r "$GITHUB_WORKSPACE" "$REPO_COPY"
|
||||||
|
38
.github/workflows/tags_stable.yml
vendored
Normal file
38
.github/workflows/tags_stable.yml
vendored
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
name: TagsStableWorkflow
|
||||||
|
# - Gets artifacts from S3
|
||||||
|
# - Sends it to JFROG Artifactory
|
||||||
|
# - Adds them to the release assets
|
||||||
|
|
||||||
|
on: # yamllint disable-line rule:truthy
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- 'v*-stable'
|
||||||
|
- 'v*-lts'
|
||||||
|
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
UpdateVersions:
|
||||||
|
runs-on: [self-hosted, style-checker]
|
||||||
|
steps:
|
||||||
|
- name: Get tag name
|
||||||
|
run: echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV"
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
ref: master
|
||||||
|
- name: Generate versions
|
||||||
|
run: |
|
||||||
|
git fetch --tags
|
||||||
|
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
|
||||||
|
- name: Create Pull Request
|
||||||
|
uses: peter-evans/create-pull-request@v3
|
||||||
|
with:
|
||||||
|
commit-message: Update version_date.tsv after ${{ env.GITHUB_TAG }}
|
||||||
|
branch: auto/${{ env.GITHUB_TAG }}
|
||||||
|
delete-branch: true
|
||||||
|
title: Update version_date.tsv after ${{ env.GITHUB_TAG }}
|
||||||
|
body: |
|
||||||
|
Update version_date.tsv after ${{ env.GITHUB_TAG }}
|
||||||
|
|
||||||
|
Changelog category (leave one):
|
||||||
|
- Not for changelog (changelog entry is not required)
|
@ -67,7 +67,7 @@ if (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/.git" AND NOT EXISTS "${ClickHouse_SOURC
|
|||||||
message (FATAL_ERROR "Submodules are not initialized. Run\n\tgit submodule update --init --recursive")
|
message (FATAL_ERROR "Submodules are not initialized. Run\n\tgit submodule update --init --recursive")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
include (cmake/find/ccache.cmake)
|
include (cmake/ccache.cmake)
|
||||||
|
|
||||||
# Take care to add prlimit in command line before ccache, or else ccache thinks that
|
# Take care to add prlimit in command line before ccache, or else ccache thinks that
|
||||||
# prlimit is compiler, and clang++ is its input file, and refuses to work with
|
# prlimit is compiler, and clang++ is its input file, and refuses to work with
|
||||||
|
@ -22,9 +22,10 @@ The following versions of ClickHouse server are currently being supported with s
|
|||||||
| 21.7 | :x: |
|
| 21.7 | :x: |
|
||||||
| 21.8 | ✅ |
|
| 21.8 | ✅ |
|
||||||
| 21.9 | :x: |
|
| 21.9 | :x: |
|
||||||
| 21.10 | ✅ |
|
| 21.10 | :x: |
|
||||||
| 21.11 | ✅ |
|
| 21.11 | ✅ |
|
||||||
| 21.12 | ✅ |
|
| 21.12 | ✅ |
|
||||||
|
| 22.1 | ✅ |
|
||||||
|
|
||||||
## Reporting a Vulnerability
|
## Reporting a Vulnerability
|
||||||
|
|
||||||
|
@ -81,7 +81,7 @@ replxx::Replxx::completions_t LineReader::Suggest::getCompletions(const String &
|
|||||||
std::lock_guard lock(mutex);
|
std::lock_guard lock(mutex);
|
||||||
|
|
||||||
/// Only perform case sensitive completion when the prefix string contains any uppercase characters
|
/// Only perform case sensitive completion when the prefix string contains any uppercase characters
|
||||||
if (std::none_of(prefix.begin(), prefix.end(), [&](auto c) { return c >= 'A' && c <= 'Z'; }))
|
if (std::none_of(prefix.begin(), prefix.end(), [](char32_t x) { return iswupper(static_cast<wint_t>(x)); }))
|
||||||
range = std::equal_range(
|
range = std::equal_range(
|
||||||
words_no_case.begin(), words_no_case.end(), last_word, [prefix_length](std::string_view s, std::string_view prefix_searched)
|
words_no_case.begin(), words_no_case.end(), last_word, [prefix_length](std::string_view s, std::string_view prefix_searched)
|
||||||
{
|
{
|
||||||
|
@ -25,13 +25,6 @@ void trim(String & s)
|
|||||||
s.erase(std::find_if(s.rbegin(), s.rend(), [](unsigned char ch) { return !std::isspace(ch); }).base(), s.end());
|
s.erase(std::find_if(s.rbegin(), s.rend(), [](unsigned char ch) { return !std::isspace(ch); }).base(), s.end());
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Check if string ends with given character after skipping whitespaces.
|
|
||||||
bool ends_with(const std::string_view & s, const std::string_view & p)
|
|
||||||
{
|
|
||||||
auto ss = std::string_view(s.data(), s.rend() - std::find_if(s.rbegin(), s.rend(), [](unsigned char ch) { return !std::isspace(ch); }));
|
|
||||||
return ss.ends_with(p);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string getEditor()
|
std::string getEditor()
|
||||||
{
|
{
|
||||||
const char * editor = std::getenv("EDITOR");
|
const char * editor = std::getenv("EDITOR");
|
||||||
@ -132,6 +125,12 @@ void convertHistoryFile(const std::string & path, replxx::Replxx & rx)
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool replxx_last_is_delimiter = false;
|
||||||
|
void ReplxxLineReader::setLastIsDelimiter(bool flag)
|
||||||
|
{
|
||||||
|
replxx_last_is_delimiter = flag;
|
||||||
|
}
|
||||||
|
|
||||||
ReplxxLineReader::ReplxxLineReader(
|
ReplxxLineReader::ReplxxLineReader(
|
||||||
Suggest & suggest,
|
Suggest & suggest,
|
||||||
const String & history_file_path_,
|
const String & history_file_path_,
|
||||||
@ -185,6 +184,7 @@ ReplxxLineReader::ReplxxLineReader(
|
|||||||
rx.set_completion_callback(callback);
|
rx.set_completion_callback(callback);
|
||||||
rx.set_complete_on_empty(false);
|
rx.set_complete_on_empty(false);
|
||||||
rx.set_word_break_characters(word_break_characters);
|
rx.set_word_break_characters(word_break_characters);
|
||||||
|
rx.set_ignore_case(true);
|
||||||
|
|
||||||
if (highlighter)
|
if (highlighter)
|
||||||
rx.set_highlighter_callback(highlighter);
|
rx.set_highlighter_callback(highlighter);
|
||||||
@ -196,21 +196,11 @@ ReplxxLineReader::ReplxxLineReader(
|
|||||||
|
|
||||||
auto commit_action = [this](char32_t code)
|
auto commit_action = [this](char32_t code)
|
||||||
{
|
{
|
||||||
std::string_view str = rx.get_state().text();
|
|
||||||
|
|
||||||
/// Always commit line when we see extender at the end. It will start a new prompt.
|
|
||||||
for (const auto * extender : extenders)
|
|
||||||
if (ends_with(str, extender))
|
|
||||||
return rx.invoke(Replxx::ACTION::COMMIT_LINE, code);
|
|
||||||
|
|
||||||
/// If we see an delimiter at the end, commit right away.
|
|
||||||
for (const auto * delimiter : delimiters)
|
|
||||||
if (ends_with(str, delimiter))
|
|
||||||
return rx.invoke(Replxx::ACTION::COMMIT_LINE, code);
|
|
||||||
|
|
||||||
/// If we allow multiline and there is already something in the input, start a newline.
|
/// If we allow multiline and there is already something in the input, start a newline.
|
||||||
if (multiline && !input.empty())
|
/// NOTE: Lexer is only available if we use highlighter.
|
||||||
|
if (highlighter && multiline && !replxx_last_is_delimiter)
|
||||||
return rx.invoke(Replxx::ACTION::NEW_LINE, code);
|
return rx.invoke(Replxx::ACTION::NEW_LINE, code);
|
||||||
|
replxx_last_is_delimiter = false;
|
||||||
return rx.invoke(Replxx::ACTION::COMMIT_LINE, code);
|
return rx.invoke(Replxx::ACTION::COMMIT_LINE, code);
|
||||||
};
|
};
|
||||||
/// bind C-j to ENTER action.
|
/// bind C-j to ENTER action.
|
||||||
|
@ -19,6 +19,9 @@ public:
|
|||||||
|
|
||||||
void enableBracketedPaste() override;
|
void enableBracketedPaste() override;
|
||||||
|
|
||||||
|
/// If highlight is on, we will set a flag to denote whether the last token is a delimiter.
|
||||||
|
/// This is useful to determine the behavior of <ENTER> key when multiline is enabled.
|
||||||
|
static void setLastIsDelimiter(bool flag);
|
||||||
private:
|
private:
|
||||||
InputStatus readOneLine(const String & prompt) override;
|
InputStatus readOneLine(const String & prompt) override;
|
||||||
void addToHistory(const String & line) override;
|
void addToHistory(const String & line) override;
|
||||||
|
@ -12,6 +12,8 @@ namespace
|
|||||||
{
|
{
|
||||||
template <typename... Ts> constexpr size_t numArgs(Ts &&...) { return sizeof...(Ts); }
|
template <typename... Ts> constexpr size_t numArgs(Ts &&...) { return sizeof...(Ts); }
|
||||||
template <typename T, typename... Ts> constexpr auto firstArg(T && x, Ts &&...) { return std::forward<T>(x); }
|
template <typename T, typename... Ts> constexpr auto firstArg(T && x, Ts &&...) { return std::forward<T>(x); }
|
||||||
|
/// For implicit conversion of fmt::basic_runtime<> to char* for std::string ctor
|
||||||
|
template <typename T, typename... Ts> constexpr auto firstArg(fmt::basic_runtime<T> && data, Ts &&...) { return data.str.data(); }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -317,7 +317,7 @@ private:
|
|||||||
else
|
else
|
||||||
error_message = "Sanitizer trap.";
|
error_message = "Sanitizer trap.";
|
||||||
|
|
||||||
LOG_FATAL(log, error_message);
|
LOG_FATAL(log, fmt::runtime(error_message));
|
||||||
|
|
||||||
if (stack_trace.getSize())
|
if (stack_trace.getSize())
|
||||||
{
|
{
|
||||||
@ -330,11 +330,11 @@ private:
|
|||||||
for (size_t i = stack_trace.getOffset(); i < stack_trace.getSize(); ++i)
|
for (size_t i = stack_trace.getOffset(); i < stack_trace.getSize(); ++i)
|
||||||
bare_stacktrace << ' ' << stack_trace.getFramePointers()[i];
|
bare_stacktrace << ' ' << stack_trace.getFramePointers()[i];
|
||||||
|
|
||||||
LOG_FATAL(log, bare_stacktrace.str());
|
LOG_FATAL(log, fmt::runtime(bare_stacktrace.str()));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Write symbolized stack trace line by line for better grep-ability.
|
/// Write symbolized stack trace line by line for better grep-ability.
|
||||||
stack_trace.toStringEveryLine([&](const std::string & s) { LOG_FATAL(log, s); });
|
stack_trace.toStringEveryLine([&](const std::string & s) { LOG_FATAL(log, fmt::runtime(s)); });
|
||||||
|
|
||||||
#if defined(OS_LINUX)
|
#if defined(OS_LINUX)
|
||||||
/// Write information about binary checksum. It can be difficult to calculate, so do it only after printing stack trace.
|
/// Write information about binary checksum. It can be difficult to calculate, so do it only after printing stack trace.
|
||||||
|
@ -22,7 +22,7 @@ set(CMAKE_OSX_DEPLOYMENT_TARGET 10.15)
|
|||||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||||
find_package(Threads REQUIRED)
|
find_package(Threads REQUIRED)
|
||||||
|
|
||||||
include (cmake/find/cxx.cmake)
|
include (cmake/cxx.cmake)
|
||||||
|
|
||||||
target_link_libraries(global-group INTERFACE
|
target_link_libraries(global-group INTERFACE
|
||||||
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>
|
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>
|
||||||
|
@ -22,8 +22,8 @@ set(CMAKE_C_STANDARD_LIBRARIES ${DEFAULT_LIBS})
|
|||||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||||
find_package(Threads REQUIRED)
|
find_package(Threads REQUIRED)
|
||||||
|
|
||||||
include (cmake/find/unwind.cmake)
|
include (cmake/unwind.cmake)
|
||||||
include (cmake/find/cxx.cmake)
|
include (cmake/cxx.cmake)
|
||||||
|
|
||||||
target_link_libraries(global-group INTERFACE
|
target_link_libraries(global-group INTERFACE
|
||||||
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>
|
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>
|
||||||
|
@ -42,8 +42,8 @@ if (NOT OS_ANDROID)
|
|||||||
add_subdirectory(base/harmful)
|
add_subdirectory(base/harmful)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
include (cmake/find/unwind.cmake)
|
include (cmake/unwind.cmake)
|
||||||
include (cmake/find/cxx.cmake)
|
include (cmake/cxx.cmake)
|
||||||
|
|
||||||
target_link_libraries(global-group INTERFACE
|
target_link_libraries(global-group INTERFACE
|
||||||
-Wl,--start-group
|
-Wl,--start-group
|
||||||
|
@ -29,12 +29,6 @@ if (OS_FREEBSD)
|
|||||||
message (FATAL_ERROR "Using internal parquet library on FreeBSD is not supported")
|
message (FATAL_ERROR "Using internal parquet library on FreeBSD is not supported")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(USE_STATIC_LIBRARIES)
|
|
||||||
set(FLATBUFFERS_LIBRARY flatbuffers)
|
|
||||||
else()
|
|
||||||
set(FLATBUFFERS_LIBRARY flatbuffers_shared)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
set (CMAKE_CXX_STANDARD 17)
|
set (CMAKE_CXX_STANDARD 17)
|
||||||
|
|
||||||
set(ARROW_VERSION "6.0.1")
|
set(ARROW_VERSION "6.0.1")
|
||||||
@ -95,9 +89,16 @@ set(FLATBUFFERS_BUILD_TESTS OFF CACHE BOOL "Skip flatbuffers tests")
|
|||||||
|
|
||||||
add_subdirectory(${FLATBUFFERS_SRC_DIR} "${FLATBUFFERS_BINARY_DIR}")
|
add_subdirectory(${FLATBUFFERS_SRC_DIR} "${FLATBUFFERS_BINARY_DIR}")
|
||||||
|
|
||||||
message(STATUS "FLATBUFFERS_LIBRARY: ${FLATBUFFERS_LIBRARY}")
|
add_library(_flatbuffers INTERFACE)
|
||||||
|
if(USE_STATIC_LIBRARIES)
|
||||||
|
target_link_libraries(_flatbuffers INTERFACE flatbuffers)
|
||||||
|
else()
|
||||||
|
target_link_libraries(_flatbuffers INTERFACE flatbuffers_shared)
|
||||||
|
endif()
|
||||||
|
target_include_directories(_flatbuffers INTERFACE ${FLATBUFFERS_INCLUDE_DIR})
|
||||||
|
|
||||||
# === hdfs
|
# === hdfs
|
||||||
|
# NOTE: cannot use ch_contrib::hdfs since it's INCLUDE_DIRECTORIES does not includes trailing "hdfs/"
|
||||||
set(HDFS_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libhdfs3/include/hdfs/")
|
set(HDFS_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libhdfs3/include/hdfs/")
|
||||||
|
|
||||||
# arrow-cmake cmake file calling orc cmake subroutine which detects certain compiler features.
|
# arrow-cmake cmake file calling orc cmake subroutine which detects certain compiler features.
|
||||||
@ -123,8 +124,6 @@ configure_file("${ORC_SOURCE_SRC_DIR}/Adaptor.hh.in" "${ORC_BUILD_INCLUDE_DIR}/A
|
|||||||
|
|
||||||
# ARROW_ORC + adapters/orc/CMakefiles
|
# ARROW_ORC + adapters/orc/CMakefiles
|
||||||
set(ORC_SRCS
|
set(ORC_SRCS
|
||||||
"${ARROW_SRC_DIR}/arrow/adapters/orc/adapter.cc"
|
|
||||||
"${ARROW_SRC_DIR}/arrow/adapters/orc/adapter_util.cc"
|
|
||||||
"${ORC_SOURCE_SRC_DIR}/Exceptions.cc"
|
"${ORC_SOURCE_SRC_DIR}/Exceptions.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/OrcFile.cc"
|
"${ORC_SOURCE_SRC_DIR}/OrcFile.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/Reader.cc"
|
"${ORC_SOURCE_SRC_DIR}/Reader.cc"
|
||||||
@ -151,6 +150,22 @@ set(ORC_SRCS
|
|||||||
"${ORC_ADDITION_SOURCE_DIR}/orc_proto.pb.cc"
|
"${ORC_ADDITION_SOURCE_DIR}/orc_proto.pb.cc"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
add_library(_orc ${ORC_SRCS})
|
||||||
|
target_link_libraries(_orc PRIVATE
|
||||||
|
ch_contrib::protobuf
|
||||||
|
ch_contrib::lz4
|
||||||
|
ch_contrib::snappy
|
||||||
|
ch_contrib::zlib
|
||||||
|
ch_contrib::zstd)
|
||||||
|
target_include_directories(_orc SYSTEM BEFORE PUBLIC ${ORC_INCLUDE_DIR})
|
||||||
|
target_include_directories(_orc SYSTEM BEFORE PUBLIC ${ORC_BUILD_INCLUDE_DIR})
|
||||||
|
target_include_directories(_orc SYSTEM PRIVATE
|
||||||
|
${ORC_SOURCE_SRC_DIR}
|
||||||
|
${ORC_SOURCE_WRAP_DIR}
|
||||||
|
${ORC_BUILD_SRC_DIR}
|
||||||
|
${ORC_ADDITION_SOURCE_DIR}
|
||||||
|
${ARROW_SRC_DIR})
|
||||||
|
|
||||||
|
|
||||||
# === arrow
|
# === arrow
|
||||||
|
|
||||||
@ -336,7 +351,8 @@ set(ARROW_SRCS
|
|||||||
"${LIBRARY_DIR}/ipc/reader.cc"
|
"${LIBRARY_DIR}/ipc/reader.cc"
|
||||||
"${LIBRARY_DIR}/ipc/writer.cc"
|
"${LIBRARY_DIR}/ipc/writer.cc"
|
||||||
|
|
||||||
${ORC_SRCS}
|
"${ARROW_SRC_DIR}/arrow/adapters/orc/adapter.cc"
|
||||||
|
"${ARROW_SRC_DIR}/arrow/adapters/orc/adapter_util.cc"
|
||||||
)
|
)
|
||||||
|
|
||||||
add_definitions(-DARROW_WITH_LZ4)
|
add_definitions(-DARROW_WITH_LZ4)
|
||||||
@ -356,30 +372,27 @@ endif ()
|
|||||||
|
|
||||||
add_library(_arrow ${ARROW_SRCS})
|
add_library(_arrow ${ARROW_SRCS})
|
||||||
|
|
||||||
# Arrow dependencies
|
target_link_libraries(_arrow PRIVATE
|
||||||
add_dependencies(_arrow ${FLATBUFFERS_LIBRARY})
|
boost::filesystem
|
||||||
|
|
||||||
target_link_libraries(_arrow PRIVATE ${FLATBUFFERS_LIBRARY} boost::filesystem)
|
_flatbuffers
|
||||||
|
|
||||||
|
ch_contrib::double_conversion
|
||||||
|
|
||||||
|
ch_contrib::lz4
|
||||||
|
ch_contrib::snappy
|
||||||
|
ch_contrib::zlib
|
||||||
|
ch_contrib::zstd
|
||||||
|
ch_contrib::zstd
|
||||||
|
)
|
||||||
|
target_link_libraries(_arrow PUBLIC _orc)
|
||||||
|
|
||||||
add_dependencies(_arrow protoc)
|
add_dependencies(_arrow protoc)
|
||||||
|
|
||||||
target_include_directories(_arrow SYSTEM BEFORE PUBLIC ${ARROW_SRC_DIR})
|
target_include_directories(_arrow SYSTEM BEFORE PUBLIC ${ARROW_SRC_DIR})
|
||||||
target_include_directories(_arrow SYSTEM BEFORE PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/cpp/src")
|
target_include_directories(_arrow SYSTEM BEFORE PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/cpp/src")
|
||||||
target_link_libraries(_arrow PRIVATE ch_contrib::double_conversion)
|
|
||||||
target_link_libraries(_arrow PRIVATE ch_contrib::protobuf)
|
|
||||||
target_link_libraries(_arrow PRIVATE ch_contrib::lz4)
|
|
||||||
target_link_libraries(_arrow PRIVATE ch_contrib::snappy)
|
|
||||||
target_link_libraries(_arrow PRIVATE ch_contrib::zlib)
|
|
||||||
target_link_libraries(_arrow PRIVATE ch_contrib::zstd)
|
|
||||||
|
|
||||||
target_include_directories(_arrow SYSTEM BEFORE PUBLIC ${ORC_INCLUDE_DIR})
|
|
||||||
target_include_directories(_arrow SYSTEM BEFORE PUBLIC ${ORC_BUILD_INCLUDE_DIR})
|
|
||||||
target_include_directories(_arrow SYSTEM PRIVATE ${ORC_SOURCE_SRC_DIR})
|
|
||||||
target_include_directories(_arrow SYSTEM PRIVATE ${ORC_SOURCE_WRAP_DIR})
|
|
||||||
target_include_directories(_arrow SYSTEM PRIVATE ${ORC_BUILD_SRC_DIR})
|
|
||||||
target_include_directories(_arrow SYSTEM PRIVATE ${ORC_ADDITION_SOURCE_DIR})
|
|
||||||
target_include_directories(_arrow SYSTEM PRIVATE ${ARROW_SRC_DIR})
|
target_include_directories(_arrow SYSTEM PRIVATE ${ARROW_SRC_DIR})
|
||||||
target_include_directories(_arrow SYSTEM PRIVATE ${FLATBUFFERS_INCLUDE_DIR})
|
|
||||||
target_include_directories(_arrow SYSTEM PRIVATE ${HDFS_INCLUDE_DIR})
|
target_include_directories(_arrow SYSTEM PRIVATE ${HDFS_INCLUDE_DIR})
|
||||||
|
|
||||||
# === parquet
|
# === parquet
|
||||||
|
2
contrib/fmtlib
vendored
2
contrib/fmtlib
vendored
@ -1 +1 @@
|
|||||||
Subproject commit c108ee1d590089ccf642fc85652b845924067af2
|
Subproject commit b6f4ceaed0a0a24ccf575fab6c56dd50ccf6f1a9
|
@ -1,7 +1,10 @@
|
|||||||
set (SRCS
|
set (SRCS
|
||||||
|
# NOTE: do not build module for now:
|
||||||
|
# ../fmtlib/src/fmt.cc
|
||||||
../fmtlib/src/format.cc
|
../fmtlib/src/format.cc
|
||||||
../fmtlib/src/os.cc
|
../fmtlib/src/os.cc
|
||||||
|
|
||||||
|
../fmtlib/include/fmt/args.h
|
||||||
../fmtlib/include/fmt/chrono.h
|
../fmtlib/include/fmt/chrono.h
|
||||||
../fmtlib/include/fmt/color.h
|
../fmtlib/include/fmt/color.h
|
||||||
../fmtlib/include/fmt/compile.h
|
../fmtlib/include/fmt/compile.h
|
||||||
@ -11,9 +14,9 @@ set (SRCS
|
|||||||
../fmtlib/include/fmt/locale.h
|
../fmtlib/include/fmt/locale.h
|
||||||
../fmtlib/include/fmt/os.h
|
../fmtlib/include/fmt/os.h
|
||||||
../fmtlib/include/fmt/ostream.h
|
../fmtlib/include/fmt/ostream.h
|
||||||
../fmtlib/include/fmt/posix.h
|
|
||||||
../fmtlib/include/fmt/printf.h
|
../fmtlib/include/fmt/printf.h
|
||||||
../fmtlib/include/fmt/ranges.h
|
../fmtlib/include/fmt/ranges.h
|
||||||
|
../fmtlib/include/fmt/xchar.h
|
||||||
)
|
)
|
||||||
|
|
||||||
add_library(_fmt ${SRCS})
|
add_library(_fmt ${SRCS})
|
||||||
|
2
contrib/replxx
vendored
2
contrib/replxx
vendored
@ -1 +1 @@
|
|||||||
Subproject commit f019cba7ea1bcd1b4feb7826f28ed57fb581b04c
|
Subproject commit c745b3fb012ee5ae762fbc8cd7a40c4dc3fe15df
|
@ -72,11 +72,6 @@ else()
|
|||||||
|
|
||||||
if(WITH_ZSTD)
|
if(WITH_ZSTD)
|
||||||
add_definitions(-DZSTD)
|
add_definitions(-DZSTD)
|
||||||
include_directories(${ZSTD_INCLUDE_DIR})
|
|
||||||
include_directories("${ZSTD_INCLUDE_DIR}/common")
|
|
||||||
include_directories("${ZSTD_INCLUDE_DIR}/dictBuilder")
|
|
||||||
include_directories("${ZSTD_INCLUDE_DIR}/deprecated")
|
|
||||||
|
|
||||||
list(APPEND THIRDPARTY_LIBS ch_contrib::zstd)
|
list(APPEND THIRDPARTY_LIBS ch_contrib::zstd)
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
@ -12,7 +12,11 @@ dpkg -i package_folder/clickhouse-common-static_*.deb
|
|||||||
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb
|
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb
|
||||||
dpkg -i package_folder/clickhouse-server_*.deb
|
dpkg -i package_folder/clickhouse-server_*.deb
|
||||||
dpkg -i package_folder/clickhouse-client_*.deb
|
dpkg -i package_folder/clickhouse-client_*.deb
|
||||||
dpkg -i package_folder/clickhouse-test_*.deb
|
if [[ -n "$TEST_CASES_FROM_DEB" ]] && [[ "$TEST_CASES_FROM_DEB" -eq 1 ]]; then
|
||||||
|
dpkg -i package_folder/clickhouse-test_*.deb
|
||||||
|
else
|
||||||
|
ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
||||||
|
fi
|
||||||
|
|
||||||
# install test configs
|
# install test configs
|
||||||
/usr/share/clickhouse-test/config/install.sh
|
/usr/share/clickhouse-test/config/install.sh
|
||||||
|
@ -11,6 +11,7 @@ RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
|||||||
curl \
|
curl \
|
||||||
git \
|
git \
|
||||||
libxml2-utils \
|
libxml2-utils \
|
||||||
|
moreutils \
|
||||||
pylint \
|
pylint \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
shellcheck \
|
shellcheck \
|
||||||
|
@ -10,72 +10,26 @@ def process_result(result_folder):
|
|||||||
status = "success"
|
status = "success"
|
||||||
description = ""
|
description = ""
|
||||||
test_results = []
|
test_results = []
|
||||||
|
checks = (
|
||||||
|
("header duplicates", "duplicate_output.txt"),
|
||||||
|
("shellcheck", "shellcheck_output.txt"),
|
||||||
|
("style", "style_output.txt"),
|
||||||
|
("typos", "typos_output.txt"),
|
||||||
|
("whitespaces", "whitespaces_output.txt"),
|
||||||
|
("workflows", "workflows_output.txt"),
|
||||||
|
)
|
||||||
|
|
||||||
duplicate_log_path = "{}/duplicate_output.txt".format(result_folder)
|
for name, out_file in checks:
|
||||||
if not os.path.exists(duplicate_log_path):
|
full_path = os.path.join(result_folder, out_file)
|
||||||
logging.info("No header duplicates check log on path %s", duplicate_log_path)
|
if not os.path.exists(full_path):
|
||||||
return "exception", "No header duplicates check log", []
|
logging.info("No %s check log on path %s", name, full_path)
|
||||||
elif os.stat(duplicate_log_path).st_size != 0:
|
return "exception", f"No {name} check log", []
|
||||||
description += " Header duplicates check failed. "
|
elif os.stat(full_path).st_size != 0:
|
||||||
test_results.append(("Header duplicates check", "FAIL"))
|
description += f"Check {name} failed. "
|
||||||
|
test_results.append((f"Check {name}", "FAIL"))
|
||||||
status = "failure"
|
status = "failure"
|
||||||
else:
|
else:
|
||||||
test_results.append(("Header duplicates check", "OK"))
|
test_results.append((f"Check {name}", "OK"))
|
||||||
|
|
||||||
shellcheck_log_path = "{}/shellcheck_output.txt".format(result_folder)
|
|
||||||
if not os.path.exists(shellcheck_log_path):
|
|
||||||
logging.info("No shellcheck log on path %s", shellcheck_log_path)
|
|
||||||
return "exception", "No shellcheck log", []
|
|
||||||
elif os.stat(shellcheck_log_path).st_size != 0:
|
|
||||||
description += " Shellcheck check failed. "
|
|
||||||
test_results.append(("Shellcheck ", "FAIL"))
|
|
||||||
status = "failure"
|
|
||||||
else:
|
|
||||||
test_results.append(("Shellcheck", "OK"))
|
|
||||||
|
|
||||||
style_log_path = "{}/style_output.txt".format(result_folder)
|
|
||||||
if not os.path.exists(style_log_path):
|
|
||||||
logging.info("No style check log on path %s", style_log_path)
|
|
||||||
return "exception", "No style check log", []
|
|
||||||
elif os.stat(style_log_path).st_size != 0:
|
|
||||||
description += "Style check failed. "
|
|
||||||
test_results.append(("Style check", "FAIL"))
|
|
||||||
status = "failure"
|
|
||||||
else:
|
|
||||||
test_results.append(("Style check", "OK"))
|
|
||||||
|
|
||||||
typos_log_path = "{}/typos_output.txt".format(result_folder)
|
|
||||||
if not os.path.exists(typos_log_path):
|
|
||||||
logging.info("No typos check log on path %s", typos_log_path)
|
|
||||||
return "exception", "No typos check log", []
|
|
||||||
elif os.stat(typos_log_path).st_size != 0:
|
|
||||||
description += "Typos check failed. "
|
|
||||||
test_results.append(("Typos check", "FAIL"))
|
|
||||||
status = "failure"
|
|
||||||
else:
|
|
||||||
test_results.append(("Typos check", "OK"))
|
|
||||||
|
|
||||||
whitespaces_log_path = "{}/whitespaces_output.txt".format(result_folder)
|
|
||||||
if not os.path.exists(whitespaces_log_path):
|
|
||||||
logging.info("No whitespaces check log on path %s", whitespaces_log_path)
|
|
||||||
return "exception", "No whitespaces check log", []
|
|
||||||
elif os.stat(whitespaces_log_path).st_size != 0:
|
|
||||||
description += "Whitespaces check failed. "
|
|
||||||
test_results.append(("Whitespaces check", "FAIL"))
|
|
||||||
status = "failure"
|
|
||||||
else:
|
|
||||||
test_results.append(("Whitespaces check", "OK"))
|
|
||||||
|
|
||||||
workflows_log_path = "{}/workflows_output.txt".format(result_folder)
|
|
||||||
if not os.path.exists(workflows_log_path):
|
|
||||||
logging.info("No workflows check log on path %s", style_log_path)
|
|
||||||
return "exception", "No workflows check log", []
|
|
||||||
elif os.stat(whitespaces_log_path).st_size != 0:
|
|
||||||
description += "Workflows check failed. "
|
|
||||||
test_results.append(("Workflows check", "FAIL"))
|
|
||||||
status = "failure"
|
|
||||||
else:
|
|
||||||
test_results.append(("Workflows check", "OK"))
|
|
||||||
|
|
||||||
if not description:
|
if not description:
|
||||||
description += "Style check success"
|
description += "Style check success"
|
||||||
|
@ -3,10 +3,16 @@
|
|||||||
# yaml check is not the best one
|
# yaml check is not the best one
|
||||||
|
|
||||||
cd /ClickHouse/utils/check-style || echo -e "failure\tRepo not found" > /test_output/check_status.tsv
|
cd /ClickHouse/utils/check-style || echo -e "failure\tRepo not found" > /test_output/check_status.tsv
|
||||||
|
echo "Check duplicates" | ts
|
||||||
./check-duplicate-includes.sh |& tee /test_output/duplicate_output.txt
|
./check-duplicate-includes.sh |& tee /test_output/duplicate_output.txt
|
||||||
|
echo "Check style" | ts
|
||||||
./check-style -n |& tee /test_output/style_output.txt
|
./check-style -n |& tee /test_output/style_output.txt
|
||||||
|
echo "Check typos" | ts
|
||||||
./check-typos |& tee /test_output/typos_output.txt
|
./check-typos |& tee /test_output/typos_output.txt
|
||||||
|
echo "Check whitespaces" | ts
|
||||||
./check-whitespaces -n |& tee /test_output/whitespaces_output.txt
|
./check-whitespaces -n |& tee /test_output/whitespaces_output.txt
|
||||||
|
echo "Check sorkflows" | ts
|
||||||
./check-workflows |& tee /test_output/workflows_output.txt
|
./check-workflows |& tee /test_output/workflows_output.txt
|
||||||
|
echo "Check shell scripts with shellcheck" | ts
|
||||||
./shellcheck-run.sh |& tee /test_output/shellcheck_output.txt
|
./shellcheck-run.sh |& tee /test_output/shellcheck_output.txt
|
||||||
/process_style_check_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
|
/process_style_check_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
|
||||||
|
@ -22,7 +22,7 @@ cmake .. \
|
|||||||
|
|
||||||
1. ClickHouse's source CMake files (located in the root directory and in `/src`).
|
1. ClickHouse's source CMake files (located in the root directory and in `/src`).
|
||||||
2. Arch-dependent CMake files (located in `/cmake/*os_name*`).
|
2. Arch-dependent CMake files (located in `/cmake/*os_name*`).
|
||||||
3. Libraries finders (search for contrib libraries, located in `/cmake/find`).
|
3. Libraries finders (search for contrib libraries, located in `/contrib/*/CMakeLists.txt`).
|
||||||
3. Contrib build CMake files (used instead of libraries' own CMake files, located in `/cmake/modules`)
|
3. Contrib build CMake files (used instead of libraries' own CMake files, located in `/cmake/modules`)
|
||||||
|
|
||||||
## List of CMake flags
|
## List of CMake flags
|
||||||
|
@ -8,4 +8,4 @@ sudo apt-get update
|
|||||||
sudo apt-get install -y clickhouse-server clickhouse-client
|
sudo apt-get install -y clickhouse-server clickhouse-client
|
||||||
|
|
||||||
sudo service clickhouse-server start
|
sudo service clickhouse-server start
|
||||||
clickhouse-client
|
clickhouse-client # or "clickhouse-client --password" if you set up a password.
|
||||||
|
@ -4,4 +4,4 @@ sudo yum-config-manager --add-repo https://repo.clickhouse.com/rpm/clickhouse.re
|
|||||||
sudo yum install clickhouse-server clickhouse-client
|
sudo yum install clickhouse-server clickhouse-client
|
||||||
|
|
||||||
sudo /etc/init.d/clickhouse-server start
|
sudo /etc/init.d/clickhouse-server start
|
||||||
clickhouse-client
|
clickhouse-client # or "clickhouse-client --password" if you set up a password.
|
||||||
|
@ -10,7 +10,7 @@ Applies Student's t-test to samples from two populations.
|
|||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
studentTTest(sample_data, sample_index)
|
studentTTest([confidence_level])(sample_data, sample_index)
|
||||||
```
|
```
|
||||||
|
|
||||||
Values of both samples are in the `sample_data` column. If `sample_index` equals to 0 then the value in that row belongs to the sample from the first population. Otherwise it belongs to the sample from the second population.
|
Values of both samples are in the `sample_data` column. If `sample_index` equals to 0 then the value in that row belongs to the sample from the first population. Otherwise it belongs to the sample from the second population.
|
||||||
@ -21,12 +21,19 @@ The null hypothesis is that means of populations are equal. Normal distribution
|
|||||||
- `sample_data` — Sample data. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md).
|
- `sample_data` — Sample data. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md).
|
||||||
- `sample_index` — Sample index. [Integer](../../../sql-reference/data-types/int-uint.md).
|
- `sample_index` — Sample index. [Integer](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Parameters**
|
||||||
|
|
||||||
|
- `confidence_level` — Confidence level in order to calculate confidence intervals. [Float](../../../sql-reference/data-types/float.md).
|
||||||
|
|
||||||
|
|
||||||
**Returned values**
|
**Returned values**
|
||||||
|
|
||||||
[Tuple](../../../sql-reference/data-types/tuple.md) with two elements:
|
[Tuple](../../../sql-reference/data-types/tuple.md) with two or four elements (if the optional `confidence_level` is specified):
|
||||||
|
|
||||||
- calculated t-statistic. [Float64](../../../sql-reference/data-types/float.md).
|
- calculated t-statistic. [Float64](../../../sql-reference/data-types/float.md).
|
||||||
- calculated p-value. [Float64](../../../sql-reference/data-types/float.md).
|
- calculated p-value. [Float64](../../../sql-reference/data-types/float.md).
|
||||||
|
- [calculated confidence-interval-low.] [Float64](../../../sql-reference/data-types/float.md).
|
||||||
|
- [calculated confidence-interval-high.] [Float64](../../../sql-reference/data-types/float.md).
|
||||||
|
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
@ -10,7 +10,7 @@ Applies Welch's t-test to samples from two populations.
|
|||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
welchTTest(sample_data, sample_index)
|
welchTTest([confidence_level])(sample_data, sample_index)
|
||||||
```
|
```
|
||||||
|
|
||||||
Values of both samples are in the `sample_data` column. If `sample_index` equals to 0 then the value in that row belongs to the sample from the first population. Otherwise it belongs to the sample from the second population.
|
Values of both samples are in the `sample_data` column. If `sample_index` equals to 0 then the value in that row belongs to the sample from the first population. Otherwise it belongs to the sample from the second population.
|
||||||
@ -21,12 +21,18 @@ The null hypothesis is that means of populations are equal. Normal distribution
|
|||||||
- `sample_data` — Sample data. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md).
|
- `sample_data` — Sample data. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md).
|
||||||
- `sample_index` — Sample index. [Integer](../../../sql-reference/data-types/int-uint.md).
|
- `sample_index` — Sample index. [Integer](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Parameters**
|
||||||
|
|
||||||
|
- `confidence_level` — Confidence level in order to calculate confidence intervals. [Float](../../../sql-reference/data-types/float.md).
|
||||||
|
|
||||||
**Returned values**
|
**Returned values**
|
||||||
|
|
||||||
[Tuple](../../../sql-reference/data-types/tuple.md) with two elements:
|
[Tuple](../../../sql-reference/data-types/tuple.md) with two two or four elements (if the optional `confidence_level` is specified)
|
||||||
|
|
||||||
- calculated t-statistic. [Float64](../../../sql-reference/data-types/float.md).
|
- calculated t-statistic. [Float64](../../../sql-reference/data-types/float.md).
|
||||||
- calculated p-value. [Float64](../../../sql-reference/data-types/float.md).
|
- calculated p-value. [Float64](../../../sql-reference/data-types/float.md).
|
||||||
|
- [calculated confidence-interval-low.] [Float64](../../../sql-reference/data-types/float.md).
|
||||||
|
- [calculated confidence-interval-high.] [Float64](../../../sql-reference/data-types/float.md).
|
||||||
|
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
@ -30,7 +30,7 @@ There may be any number of space symbols between syntactical constructions (incl
|
|||||||
|
|
||||||
ClickHouse supports either SQL-style and C-style comments:
|
ClickHouse supports either SQL-style and C-style comments:
|
||||||
|
|
||||||
- SQL-style comments start with `--` and continue to the end of the line, a space after `--` can be omitted.
|
- SQL-style comments start with `--`, `#!` or `# ` and continue to the end of the line, a space after `--` and `#!` can be omitted.
|
||||||
- C-style are from `/*` to `*/`and can be multiline, spaces are not required either.
|
- C-style are from `/*` to `*/`and can be multiline, spaces are not required either.
|
||||||
|
|
||||||
## Keywords {#syntax-keywords}
|
## Keywords {#syntax-keywords}
|
||||||
|
@ -2,8 +2,13 @@
|
|||||||
toc_priority: 65
|
toc_priority: 65
|
||||||
toc_title: Сборка на Mac OS X
|
toc_title: Сборка на Mac OS X
|
||||||
---
|
---
|
||||||
|
|
||||||
# Как собрать ClickHouse на Mac OS X {#how-to-build-clickhouse-on-mac-os-x}
|
# Как собрать ClickHouse на Mac OS X {#how-to-build-clickhouse-on-mac-os-x}
|
||||||
|
|
||||||
|
!!! info "Вам не нужно собирать ClickHouse самостоятельно"
|
||||||
|
Вы можете установить предварительно собранный ClickHouse, как описано в [Быстром старте](https://clickhouse.com/#quick-start).
|
||||||
|
Следуйте инструкциям по установке для `macOS (Intel)` или `macOS (Apple Silicon)`.
|
||||||
|
|
||||||
Сборка должна запускаться с x86_64 (Intel) на macOS версии 10.15 (Catalina) и выше в последней версии компилятора Xcode's native AppleClang, Homebrew's vanilla Clang или в GCC-компиляторах.
|
Сборка должна запускаться с x86_64 (Intel) на macOS версии 10.15 (Catalina) и выше в последней версии компилятора Xcode's native AppleClang, Homebrew's vanilla Clang или в GCC-компиляторах.
|
||||||
|
|
||||||
## Установка Homebrew {#install-homebrew}
|
## Установка Homebrew {#install-homebrew}
|
||||||
|
@ -28,7 +28,7 @@ INSERT INTO t VALUES (1, 'Hello, world'), (2, 'abc'), (3, 'def')
|
|||||||
## Комментарии {#comments}
|
## Комментарии {#comments}
|
||||||
|
|
||||||
Поддерживаются комментарии в SQL-стиле и C-стиле.
|
Поддерживаются комментарии в SQL-стиле и C-стиле.
|
||||||
Комментарии в SQL-стиле: от `--` до конца строки. Пробел после `--` может не ставиться.
|
Комментарии в SQL-стиле: от `--`, `#!` или `# ` до конца строки. Пробел после `--` и `#!` может не ставиться.
|
||||||
Комментарии в C-стиле: от `/*` до `*/`. Такие комментарии могут быть многострочными. Пробелы тоже не обязательны.
|
Комментарии в C-стиле: от `/*` до `*/`. Такие комментарии могут быть многострочными. Пробелы тоже не обязательны.
|
||||||
|
|
||||||
## Ключевые слова {#syntax-keywords}
|
## Ключевые слова {#syntax-keywords}
|
||||||
|
@ -90,7 +90,10 @@ def concatenate(lang, docs_path, single_page_file, nav):
|
|||||||
line)
|
line)
|
||||||
|
|
||||||
# If failed to replace the relative link, print to log
|
# If failed to replace the relative link, print to log
|
||||||
if '../' in line:
|
# But with some exceptions:
|
||||||
|
# - "../src/" -- for cmake-in-clickhouse.md (link to sources)
|
||||||
|
# - "../usr/share" -- changelog entry that has "../usr/share/zoneinfo"
|
||||||
|
if '../' in line and (not '../usr/share' in line) and (not '../src/' in line):
|
||||||
logging.info('Failed to resolve relative link:')
|
logging.info('Failed to resolve relative link:')
|
||||||
logging.info(path)
|
logging.info(path)
|
||||||
logging.info(line)
|
logging.info(line)
|
||||||
|
@ -1 +0,0 @@
|
|||||||
../../../en/faq/general/how-do-i-contribute-code-to-clickhouse.md
|
|
@ -0,0 +1,17 @@
|
|||||||
|
---
|
||||||
|
title: 我如何为ClickHouse贡献代码?
|
||||||
|
toc_hidden: true
|
||||||
|
toc_priority: 120
|
||||||
|
---
|
||||||
|
|
||||||
|
# 我如何为ClickHouse贡献代码? {#how-do-i-contribute-code-to-clickhouse}
|
||||||
|
|
||||||
|
ClickHouse是一个开源项目[在GitHub上开发](https://github.com/ClickHouse/ClickHouse)。
|
||||||
|
|
||||||
|
按照惯例,贡献指南发布在源代码库根目录的 [CONTRIBUTING.md](https://github.com/ClickHouse/ClickHouse/blob/master/CONTRIBUTING.md)文件中。
|
||||||
|
|
||||||
|
如果你想对ClickHouse提出实质性的改变建议,可以考虑[在GitHub上发布一个问题](https://github.com/ClickHouse/ClickHouse/issues/new/choose),解释一下你想做什么,先与维护人员和社区讨论一下。[此类RFC问题的例子](https://github.com/ClickHouse/ClickHouse/issues?q=is%3Aissue+is%3Aopen+rfc)。
|
||||||
|
|
||||||
|
如果您的贡献与安全相关,也请查看[我们的安全政策](https://github.com/ClickHouse/ClickHouse/security/policy/)。
|
||||||
|
|
||||||
|
|
@ -26,6 +26,7 @@ toc_priority: 76
|
|||||||
- **[运维操作](../faq/operations/index.md)**
|
- **[运维操作](../faq/operations/index.md)**
|
||||||
- [如果想在生产环境部署,需要用哪个版本的 ClickHouse 呢?](../faq/operations/production.md)
|
- [如果想在生产环境部署,需要用哪个版本的 ClickHouse 呢?](../faq/operations/production.md)
|
||||||
- [是否可能从 ClickHouse 数据表中删除所有旧的数据记录?](../faq/operations/delete-old-data.md)
|
- [是否可能从 ClickHouse 数据表中删除所有旧的数据记录?](../faq/operations/delete-old-data.md)
|
||||||
|
- [ClickHouse支持多区域复制吗?](../faq/operations/multi-region-replication.md)
|
||||||
- **[集成开发](../faq/integration/index.md)**
|
- **[集成开发](../faq/integration/index.md)**
|
||||||
- [如何从 ClickHouse 导出数据到一个文件?](../faq/integration/file-export.md)
|
- [如何从 ClickHouse 导出数据到一个文件?](../faq/integration/file-export.md)
|
||||||
- [如果我用ODBC链接Oracle数据库出现编码问题该怎么办?](../faq/integration/oracle-odbc.md)
|
- [如果我用ODBC链接Oracle数据库出现编码问题该怎么办?](../faq/integration/oracle-odbc.md)
|
||||||
|
@ -1 +0,0 @@
|
|||||||
../../../en/faq/integration/index.md
|
|
21
docs/zh/faq/integration/index.md
Normal file
21
docs/zh/faq/integration/index.md
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
---
|
||||||
|
title: 关于集成ClickHouse和其他系统的问题
|
||||||
|
toc_hidden_folder: true
|
||||||
|
toc_priority: 4
|
||||||
|
toc_title: Integration
|
||||||
|
---
|
||||||
|
|
||||||
|
# 关于集成ClickHouse和其他系统的问题 {#question-about-integrating-clickhouse-and-other-systems}
|
||||||
|
|
||||||
|
问题:
|
||||||
|
|
||||||
|
- [如何从 ClickHouse 导出数据到一个文件?](../../faq/integration/file-export.md)
|
||||||
|
- [如何导入JSON到ClickHouse?](../../faq/integration/json-import.md)
|
||||||
|
- [如果我用ODBC链接Oracle数据库出现编码问题该怎么办?](../../faq/integration/oracle-odbc.md)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
!!! info "没看到你要找的东西吗?"
|
||||||
|
查看[其他faq类别](../../faq/index.md)或浏览左边栏中的主要文档文章。
|
||||||
|
|
||||||
|
{## [原文](https://clickhouse.com/docs/en/faq/integration/) ##}
|
@ -1 +0,0 @@
|
|||||||
../../../en/faq/operations/index.md
|
|
20
docs/zh/faq/operations/index.md
Normal file
20
docs/zh/faq/operations/index.md
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
title: 关于操作ClickHouse服务器和集群的问题
|
||||||
|
toc_hidden_folder: true
|
||||||
|
toc_priority: 3
|
||||||
|
toc_title: Operations
|
||||||
|
---
|
||||||
|
|
||||||
|
# 关于操作ClickHouse服务器和集群的问题 {#question-about-operating-clickhouse-servers-and-clusters}
|
||||||
|
|
||||||
|
问题:
|
||||||
|
|
||||||
|
- [如果想在生产环境部署,需要用哪个版本的 ClickHouse 呢?](../../faq/operations/production.md)
|
||||||
|
- [是否可能从 ClickHouse 数据表中删除所有旧的数据记录?](../../faq/operations/delete-old-data.md)
|
||||||
|
- [ClickHouse支持多区域复制吗?](../../faq/operations/multi-region-replication.md)
|
||||||
|
|
||||||
|
|
||||||
|
!!! info "没看到你要找的东西吗?"
|
||||||
|
查看[其他faq类别](../../faq/index.md)或浏览左边栏中的主要文档文章。
|
||||||
|
|
||||||
|
{## [原文](https://clickhouse.com/docs/en/faq/production/) ##}
|
@ -1 +0,0 @@
|
|||||||
../../../en/faq/operations/multi-region-replication.md
|
|
14
docs/zh/faq/operations/multi-region-replication.md
Normal file
14
docs/zh/faq/operations/multi-region-replication.md
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
---
|
||||||
|
title: ClickHouse支持多区域复制吗?
|
||||||
|
toc_hidden: true
|
||||||
|
toc_priority: 30
|
||||||
|
---
|
||||||
|
|
||||||
|
# ClickHouse支持多区域复制吗? {#does-clickhouse-support-multi-region-replication}
|
||||||
|
|
||||||
|
简短的回答是“是的”。然而,我们建议将所有区域/数据中心之间的延迟保持在两位数字范围内,否则,在通过分布式共识协议时,写性能将受到影响。例如,美国海岸之间的复制可能会很好,但美国和欧洲之间就不行。
|
||||||
|
|
||||||
|
在配置方面,这与单区域复制没有区别,只是使用位于不同位置的主机作为副本。
|
||||||
|
|
||||||
|
更多信息,请参见[关于数据复制的完整文章](../../engines/table-engines/mergetree-family/replication.md)。
|
||||||
|
|
@ -1,180 +1,189 @@
|
|||||||
---
|
---
|
||||||
machine_translated: true
|
|
||||||
machine_translated_rev: ad252bbb4f7e2899c448eb42ecc39ff195c8faa1
|
|
||||||
toc_priority: 40
|
toc_priority: 40
|
||||||
toc_title: "ANSI\u517C\u5BB9\u6027"
|
toc_title: "ANSI\u517C\u5BB9\u6027"
|
||||||
---
|
---
|
||||||
|
|
||||||
# Ansi Sql兼容性的ClickHouse SQL方言 {#ansi-sql-compatibility-of-clickhouse-sql-dialect}
|
# ClickHouse SQL方言 与ANSI SQL的兼容性{#ansi-sql-compatibility-of-clickhouse-sql-dialect}
|
||||||
|
|
||||||
!!! note "注"
|
!!! note "注"
|
||||||
本文依赖于表38, “Feature taxonomy and definition for mandatory features”, Annex F of ISO/IEC CD 9075-2:2013.
|
本文参考Annex G所著的[ISO/IEC CD 9075-2:2011](https://www.iso.org/obp/ui/#iso:std:iso-iec:9075:-2:ed-4:v1:en:sec:8)标准.
|
||||||
|
|
||||||
## 行为差异 {#differences-in-behaviour}
|
## 行为差异 {#differences-in-behaviour}
|
||||||
|
|
||||||
下表列出了查询功能在ClickHouse中有效但不符合ANSI SQL标准的情况。
|
下表列出了ClickHouse能够使用,但与ANSI SQL规定有差异的查询特性。
|
||||||
|
|
||||||
| Feature ID | 功能名称 | 差异 |
|
| 功能ID | 功能名称 | 差异 |
|
||||||
|------------|--------------------|---------------------------------------------------------------------|
|
| ------- | --------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| E011 | 数值(Numeric)数据类型 | 带小数点的数值文字被解释为近似值 (`Float64`)而不是精确值 (`Decimal`) |
|
| E011 | 数值型数据类型 | 带小数点的数字被视为近似值 (`Float64`)而不是精确值 (`Decimal`) |
|
||||||
| E051-05 | SELECT字段可以重命名 | 字段不仅仅在SELECT结果中可被重命名 |
|
| E051-05 | SELECT 的列可以重命名 | 字段重命名的作用范围不限于进行重命名的SELECT子查询(参考[表达式别名](https://clickhouse.com/docs/zh/sql-reference/syntax/#notes-on-usage)) |
|
||||||
| E141-01 | 非空约束 | 表中每一列默认为`NOT NULL` |
|
| E141-01 | NOT NULL(非空)约束 | ClickHouse表中每一列默认为`NOT NULL` |
|
||||||
| E011-04 | 算术运算符 | ClickHouse不会检查算法,并根据自定义规则更改结果数据类型,而是会溢出 |
|
| E011-04 | 算术运算符 | ClickHouse在运算时会进行溢出,而不是四舍五入。此外会根据自定义规则修改结果数据类型(参考[溢出检查](https://clickhouse.com/docs/zh/sql-reference/data-types/decimal/#yi-chu-jian-cha)) |
|
||||||
|
|
||||||
## 功能匹配 {#feature-status}
|
## 功能状态 {#feature-status}
|
||||||
|
|
||||||
| Feature ID | 功能名称 | 匹配 | 评论 |
|
| 功能ID | 功能名称 | 状态 | 注释 |
|
||||||
|------------|----------------------------------------------------------------|--------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
| -------- | ---------------------------------------------------------------------------------------- | ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||||
| **E011** | **数字数据类型** | **部分**{.text-warning} | |
|
| **E011** | **数值型数据类型** | **部分**{.text-warning} | |
|
||||||
| E011-01 | 整型和小型数据类型 | 是 {.text-success} | |
|
| E011-01 | INTEGER (整型)和SMALLINT (小整型)数据类型 | 是 {.text-success} | |
|
||||||
| E011-02 | 真实、双精度和浮点数据类型数据类型 | 部分 {.text-warning} | `FLOAT(<binary_precision>)`, `REAL` 和 `DOUBLE PRECISION` 不支持 |
|
| E011-02 | REAL (实数)、DOUBLE PRECISION (双精度浮点数)和FLOAT(单精度浮点数)数据类型数据类型 | 是 {.text-success} | |
|
||||||
| E011-03 | 十进制和数值数据类型 | 部分 {.text-warning} | 只有 `DECIMAL(p,s)` 支持,而不是 `NUMERIC` |
|
| E011-03 | DECIMAL (精确数字)和NUMERIC (精确数字)数据类型 | 是 {.text-success} | |
|
||||||
| E011-04 | 算术运算符 | 是 {.text-success} | |
|
| E011-04 | 算术运算符 | 是 {.text-success} | |
|
||||||
| E011-05 | 数字比较 | 是 {.text-success} | |
|
| E011-05 | 数值比较 | 是 {.text-success} | |
|
||||||
| E011-06 | 数字数据类型之间的隐式转换 | 否。 {.text-danger} | ANSI SQL允许在数值类型之间进行任意隐式转换,而ClickHouse依赖于具有多个重载的函数而不是隐式转换 |
|
| E011-06 | 数值数据类型之间的隐式转换 | 否 {.text-danger} | ANSI SQL允许在数值类型之间进行任意隐式转换,而ClickHouse针对不同数据类型有对应的比较函数和类型转换函数 |
|
||||||
| **E021** | **字符串类型** | **部分**{.text-warning} | |
|
| **E021** | **字符串类型** | **部分**{.text-warning} | |
|
||||||
| E021-01 | 字符数据类型 | 否。 {.text-danger} | |
|
| E021-01 | CHARACTER (字符串)数据类型 | 是 {.text-success} | |
|
||||||
| E021-02 | 字符变化数据类型 | 否。 {.text-danger} | `String` 行为类似,但括号中没有长度限制 |
|
| E021-02 | CHARACTER VARYING (可变字符串)数据类型 | 是 {.text-success} | |
|
||||||
| E021-03 | 字符文字 | 部分 {.text-warning} | 不自动连接连续文字和字符集支持 |
|
| E021-03 | 字符字面量 | 是 {.text-success} | |
|
||||||
| E021-04 | 字符长度函数 | 部分 {.text-warning} | 非也。 `USING` 条款 |
|
| E021-04 | CHARACTER_LENGTH 函数 | 部分 {.text-warning} | 不支持 `using` 从句 |
|
||||||
| E021-05 | OCTET_LENGTH函数 | 非也。 {.text-danger} | `LENGTH` 表现类似 |
|
| E021-05 | OCTET_LENGTH 函数 | 否 {.text-danger} | 使用 `LENGTH` 函数代替 |
|
||||||
| E021-06 | SUBSTRING | 部分 {.text-warning} | 不支持 `SIMILAR` 和 `ESCAPE` 条款,否 `SUBSTRING_REGEX` 备选案文 |
|
| E021-06 | SUBSTRING | 部分 {.text-warning} | 不支持 `SIMILAR` 和 `ESCAPE` 从句,没有`SUBSTRING_REGEX` 函数 |
|
||||||
| E021-07 | 字符串联 | 部分 {.text-warning} | 非也。 `COLLATE` 条款 |
|
| E021-07 | 字符串拼接 | 部分 {.text-warning} | 不支持 `COLLATE` 从句 |
|
||||||
| E021-08 | 上下功能 | 是 {.text-success} | |
|
| E021-08 | 大小写转换 | 是 {.text-success} | |
|
||||||
| E021-09 | 修剪功能 | 是 {.text-success} | |
|
| E021-09 | 裁剪字符串 | 是 {.text-success} | |
|
||||||
| E021-10 | 固定长度和可变长度字符串类型之间的隐式转换 | 否。 {.text-danger} | ANSI SQL允许在字符串类型之间进行任意隐式转换,而ClickHouse依赖于具有多个重载的函数而不是隐式转换 |
|
| E021-10 | 固定长度和可变长度字符串类型之间的隐式转换 | 部分 {.text-warning} | ANSI SQL允许在数据类型之间进行任意隐式转换,而ClickHouse针对不同数据类型有对应的比较函数和类型转换函数 |
|
||||||
| E021-11 | 职位功能 | 部分 {.text-warning} | 不支持 `IN` 和 `USING` 条款,否 `POSITION_REGEX` 备选案文 |
|
| E021-11 | POSITION 函数 | 部分 {.text-warning} | 不支持 `IN` 和 `USING` 从句,不支持`POSITION_REGEX`函数 |
|
||||||
| E021-12 | 字符比较 | 是 {.text-success} | |
|
| E021-12 | 字符串比较 | 是 {.text-success} | |
|
||||||
| **E031** | **标识符** | **部分**{.text-warning} | |
|
| **E031** | **标识符** | **部分**{.text-warning} | |
|
||||||
| E031-01 | 分隔标识符 | 部分 {.text-warning} | Unicode文字支持有限 |
|
| E031-01 | 分隔标识符 | 部分 {.text-warning} | Unicode文字支持有限 |
|
||||||
| E031-02 | 小写标识符 | 是 {.text-success} | |
|
| E031-02 | 小写标识符 | 是 {.text-success} | |
|
||||||
| E031-03 | 尾部下划线 | 是 {.text-success} | |
|
| E031-03 | 标识符最后加下划线 | 是 {.text-success} | |
|
||||||
| **E051** | **基本查询规范** | **部分**{.text-warning} | |
|
| **E051** | **基本查询规范** | **部分**{.text-warning} | |
|
||||||
| E051-01 | SELECT DISTINCT | 是 {.text-success} | |
|
| E051-01 | SELECT DISTINCT | 是 {.text-success} | |
|
||||||
| E051-02 | GROUP BY子句 | 是 {.text-success} | |
|
| E051-02 | GROUP BY 从句 | 是 {.text-success} | |
|
||||||
| E051-04 | 分组依据可以包含不在列 `<select list>` | 是 {.text-success} | |
|
| E051-04 | GROUP BY 从句中的列可以包含不在 `<select list>`中出现的列 | 是 {.text-success} | |
|
||||||
| E051-05 | 选择项目可以重命名 | 是 {.text-success} | |
|
| E051-05 | SELECT 的列可以重命名 | 是 {.text-success} | |
|
||||||
| E051-06 | 有条款 | 是 {.text-success} | |
|
| E051-06 | HAVING 从句 | 是 {.text-success} | |
|
||||||
| E051-07 | 合格\*在选择列表中 | 是 {.text-success} | |
|
| E051-07 | SELECT 选择的列中允许出现\* | 是 {.text-success} | |
|
||||||
| E051-08 | FROM子句中的关联名称 | 是 {.text-success} | |
|
| E051-08 | FROM 从句中的关联名称 | 是 {.text-success} | |
|
||||||
| E051-09 | 重命名FROM子句中的列 | 否。 {.text-danger} | |
|
| E051-09 | 重命名 FROM 从句中的列 | 否 {.text-danger} | |
|
||||||
| **E061** | **基本谓词和搜索条件** | **部分**{.text-warning} | |
|
| **E061** | **基本谓词和搜索条件** | **部分**{.text-warning} | |
|
||||||
| E061-01 | 比较谓词 | 是 {.text-success} | |
|
| E061-01 | 比较谓词 | 是 {.text-success} | |
|
||||||
| E061-02 | 谓词之间 | 部分 {.text-warning} | 非也。 `SYMMETRIC` 和 `ASYMMETRIC` 条款 |
|
| E061-02 | BETWEEN 谓词 | 部分 {.text-warning} | 不支持 `SYMMETRIC` 和 `ASYMMETRIC` 从句 |
|
||||||
| E061-03 | 在具有值列表的谓词中 | 是 {.text-success} | |
|
| E061-03 | IN 谓词后可接值列表 | 是 {.text-success} | |
|
||||||
| E061-04 | 像谓词 | 是 {.text-success} | |
|
| E061-04 | LIKE 谓词 | 是 {.text-success} | |
|
||||||
| E061-05 | LIKE谓词:逃避条款 | 否。 {.text-danger} | |
|
| E061-05 | LIKE 谓词后接 ESCAPE 从句 | 否 {.text-danger} | |
|
||||||
| E061-06 | 空谓词 | 是 {.text-success} | |
|
| E061-06 | NULL 谓词 | 是 {.text-success} | |
|
||||||
| E061-07 | 量化比较谓词 | 非也。 {.text-danger} | |
|
| E061-07 | 量化比较谓词(ALL、SOME、ANY) | 否 {.text-danger} | |
|
||||||
| E061-08 | 存在谓词 | 非也。 {.text-danger} | |
|
| E061-08 | EXISTS 谓词 | 否 {.text-danger} | |
|
||||||
| E061-09 | 比较谓词中的子查询 | 是 {.text-success} | |
|
| E061-09 | 比较谓词中的子查询 | 是 {.text-success} | |
|
||||||
| E061-11 | 谓词中的子查询 | 是 {.text-success} | |
|
| E061-11 | IN 谓词中的子查询 | 是 {.text-success} | |
|
||||||
| E061-12 | 量化比较谓词中的子查询 | 否。 {.text-danger} | |
|
| E061-12 | 量化比较谓词(BETWEEN、IN、LIKE)中的子查询 | 否 {.text-danger} | |
|
||||||
| E061-13 | 相关子查询 | 否。 {.text-danger} | |
|
| E061-13 | 相关子查询 | 否 {.text-danger} | |
|
||||||
| E061-14 | 搜索条件 | 是 {.text-success} | |
|
| E061-14 | 搜索条件 | 是 {.text-success} | |
|
||||||
| **E071** | **基本查询表达式** | **部分**{.text-warning} | |
|
| **E071** | **基本查询表达式** | **部分**{.text-warning} | |
|
||||||
| E071-01 | UNION DISTINCT table运算符 | 否。 {.text-danger} | |
|
| E071-01 | UNION DISTINCT 表运算符 | 是 {.text-success} | |
|
||||||
| E071-02 | 联合所有表运算符 | 是 {.text-success} | |
|
| E071-02 | UNION ALL 表运算符 | 是 {.text-success} | |
|
||||||
| E071-03 | 除了不同的表运算符 | 非也。 {.text-danger} | |
|
| E071-03 | EXCEPT DISTINCT 表运算符 | 否 {.text-danger} | |
|
||||||
| E071-05 | 通过表运算符组合的列不必具有完全相同的数据类型 | 是 {.text-success} | |
|
| E071-05 | 通过表运算符组合的列不必具有完全相同的数据类型 | 是 {.text-success} | |
|
||||||
| E071-06 | 子查询中的表运算符 | 是 {.text-success} | |
|
| E071-06 | 子查询中的表运算符 | 是 {.text-success} | |
|
||||||
| **E081** | **基本特权** | **部分**{.text-warning} | 正在进行的工作 |
|
| **E081** | **基本权限** | **是**{.text-success} | |
|
||||||
| **E091** | **设置函数** | **是**{.text-success} | |
|
| E081-01 | 表级别的SELECT(查询)权限 | 是 {.text-success} | |
|
||||||
|
| E081-02 | DELETE(删除)权限 | 是 {.text-success} | |
|
||||||
|
| E081-03 | 表级别的INSERT(插入)权限 | 是 {.text-success} | |
|
||||||
|
| E081-04 | 表级别的UPDATE(更新)权限 | 是 {.text-success} | |
|
||||||
|
| E081-05 | 列级别的UPDATE(更新)权限 | 是 {.text-success} | |
|
||||||
|
| E081-06 | 表级别的REFERENCES(引用)权限 | 是 {.text-success} | |
|
||||||
|
| E081-07 | 列级别的REFERENCES(引用)权限 | 是 {.text-success} | |
|
||||||
|
| E081-08 | WITH GRANT OPTION | 是 {.text-success} | |
|
||||||
|
| E081-09 | USAGE(使用)权限 | 是 {.text-success} | |
|
||||||
|
| E081-10 | EXECUTE(执行)权限 | 是 {.text-success} | |
|
||||||
|
| **E091** | **集合函数** | **是**{.text-success} | |
|
||||||
| E091-01 | AVG | 是 {.text-success} | |
|
| E091-01 | AVG | 是 {.text-success} | |
|
||||||
| E091-02 | COUNT | 是 {.text-success} | |
|
| E091-02 | COUNT | 是 {.text-success} | |
|
||||||
| E091-03 | MAX | 是 {.text-success} | |
|
| E091-03 | MAX | 是 {.text-success} | |
|
||||||
| E091-04 | MIN | 是 {.text-success} | |
|
| E091-04 | MIN | 是 {.text-success} | |
|
||||||
| E091-05 | SUM | 是 {.text-success} | |
|
| E091-05 | SUM | 是 {.text-success} | |
|
||||||
| E091-06 | 全部量词 | 否。 {.text-danger} | |
|
| E091-06 | ALL修饰词 | 否。 {.text-danger} | |
|
||||||
| E091-07 | 不同的量词 | 部分 {.text-warning} | 并非所有聚合函数都受支持 |
|
| E091-07 | DISTINCT修饰词 | 是 {.text-success} | 并非所有聚合函数都支持该修饰词 |
|
||||||
| **E101** | **基本数据操作** | **部分**{.text-warning} | |
|
| **E101** | **基本数据操作** | **部分**{.text-warning} | |
|
||||||
| E101-01 | 插入语句 | 是 {.text-success} | 注:ClickHouse中的主键并不意味着 `UNIQUE` 约束 |
|
| E101-01 | INSERT(插入)语句 | 是 {.text-success} | 注:ClickHouse中的主键并不隐含`UNIQUE` 约束 |
|
||||||
| E101-03 | 搜索更新语句 | 否。 {.text-danger} | 有一个 `ALTER UPDATE` 批量数据修改语句 |
|
| E101-03 | 可指定范围的UPDATE(更新)语句 | 部分 {.text-warning} | `ALTER UPDATE` 语句用来批量更新数据 |
|
||||||
| E101-04 | 搜索的删除语句 | 否。 {.text-danger} | 有一个 `ALTER DELETE` 批量数据删除声明 |
|
| E101-04 | 可指定范围的DELETE(删除)语句 | 部分 {.text-warning} | `ALTER DELETE` 语句用来批量删除数据 |
|
||||||
| **E111** | **单行SELECT语句** | **否。**{.text-danger} | |
|
| **E111** | **返回一行的SELECT语句** | **否**{.text-danger} | |
|
||||||
| **E121** | **基本光标支持** | **否。**{.text-danger} | |
|
| **E121** | **基本游标支持** | **否**{.text-danger} | |
|
||||||
| E121-01 | DECLARE CURSOR | 否。 {.text-danger} | |
|
| E121-01 | DECLARE CURSOR | 否 {.text-danger} | |
|
||||||
| E121-02 | 按列排序不需要在选择列表中 | 否。 {.text-danger} | |
|
| E121-02 | ORDER BY 涉及的列不需要出现在SELECT的列中 | 是 {.text-success} | |
|
||||||
| E121-03 | 按顺序排列的值表达式 | 否。 {.text-danger} | |
|
| E121-03 | ORDER BY 从句中的表达式 | 是 {.text-success} | |
|
||||||
| E121-04 | 公开声明 | 否。 {.text-danger} | |
|
| E121-04 | OPEN 语句 | 否 {.text-danger} | |
|
||||||
| E121-06 | 定位更新语句 | 否。 {.text-danger} | |
|
| E121-06 | 受游标位置控制的 UPDATE 语句 | 否 {.text-danger} | |
|
||||||
| E121-07 | 定位删除语句 | 否。 {.text-danger} | |
|
| E121-07 | 受游标位置控制的 DELETE 语句 | 否 {.text-danger} | |
|
||||||
| E121-08 | 关闭声明 | 否。 {.text-danger} | |
|
| E121-08 | CLOSE 语句 | 否 {.text-danger} | |
|
||||||
| E121-10 | FETCH语句:隐式NEXT | 否。 {.text-danger} | |
|
| E121-10 | FETCH 语句中包含隐式NEXT | 否 {.text-danger} | |
|
||||||
| E121-17 | 使用保持游标 | 否。 {.text-danger} | |
|
| E121-17 | WITH HOLD 游标 | 否 {.text-danger} | |
|
||||||
| **E131** | **空值支持(空值代替值)** | **部分**{.text-warning} | 一些限制适用 |
|
| **E131** | **空值支持** | **是**{.text-success} | 有部分限制 |
|
||||||
| **E141** | **基本完整性约束** | **部分**{.text-warning} | |
|
| **E141** | **基本完整性约束** | **部分**{.text-warning} | |
|
||||||
| E141-01 | 非空约束 | 是 {.text-success} | 注: `NOT NULL` 默认情况下,表列隐含 |
|
| E141-01 | NOT NULL(非空)约束 | 是 {.text-success} | 注: 默认情况下ClickHouse表中的列隐含`NOT NULL`约束 |
|
||||||
| E141-02 | 非空列的唯一约束 | 否。 {.text-danger} | |
|
| E141-02 | NOT NULL(非空)列的UNIQUE(唯一)约束 | 否 {.text-danger} | |
|
||||||
| E141-03 | 主键约束 | 否。 {.text-danger} | |
|
| E141-03 | 主键约束 | 部分 {.text-warning} | |
|
||||||
| E141-04 | 对于引用删除操作和引用更新操作,具有默认无操作的基本外键约束 | 否。 {.text-danger} | |
|
| E141-04 | 对于引用删除和引用更新操作,基本的FOREIGN KEY(外键)约束默认不进行任何操作(NO ACTION) | 否 {.text-danger} | |
|
||||||
| E141-06 | 检查约束 | 是 {.text-success} | |
|
| E141-06 | CHECK(检查)约束 | 是 {.text-success} | |
|
||||||
| E141-07 | 列默认值 | 是 {.text-success} | |
|
| E141-07 | 列默认值 | 是 {.text-success} | |
|
||||||
| E141-08 | 在主键上推断为非NULL | 是 {.text-success} | |
|
| E141-08 | 在主键上推断非空 | 是 {.text-success} | |
|
||||||
| E141-10 | 可以按任何顺序指定外键中的名称 | 否。 {.text-danger} | |
|
| E141-10 | 可以按任何顺序指定外键中的名称 | 否 {.text-danger} | |
|
||||||
| **E151** | **交易支持** | **否。**{.text-danger} | |
|
| **E151** | **事务支持** | **否**{.text-danger} | |
|
||||||
| E151-01 | 提交语句 | 否。 {.text-danger} | |
|
| E151-01 | COMMIT(提交)语句 | 否 {.text-danger} | |
|
||||||
| E151-02 | 回滚语句 | 否。 {.text-danger} | |
|
| E151-02 | ROLLBACK(回滚)语句 | 否 {.text-danger} | |
|
||||||
| **E152** | **基本设置事务语句** | **否。**{.text-danger} | |
|
| **E152** | **基本的SET TRANSACTION(设置事务隔离级别)语句** | **否**{.text-danger} | |
|
||||||
| E152-01 | SET TRANSACTION语句:隔离级别SERIALIZABLE子句 | 否。 {.text-danger} | |
|
| E152-01 | SET TRANSACTION语句:ISOLATION LEVEL SERIALIZABLE(隔离级别为串行化)从句 | 否 {.text-danger} | |
|
||||||
| E152-02 | SET TRANSACTION语句:只读和读写子句 | 否。 {.text-danger} | |
|
| E152-02 | SET TRANSACTION语句:READ ONLY(只读)和READ WRITE(读写)从句 | 否 {.text-danger} | |
|
||||||
| **E153** | **具有子查询的可更新查询** | **否。**{.text-danger} | |
|
| **E153** | **具有子查询的可更新查询** | **是**{.text-success} | |
|
||||||
| **E161** | **SQL注释使用前导双减** | **是**{.text-success} | |
|
| **E161** | **使用“--”符号作为SQL注释** | **是**{.text-success} | |
|
||||||
| **E171** | **SQLSTATE支持** | **否。**{.text-danger} | |
|
| **E171** | **SQLSTATE支持** | **否**{.text-danger} | |
|
||||||
| **E182** | **主机语言绑定** | **否。**{.text-danger} | |
|
| **E182** | **主机语言绑定** | **否**{.text-danger} | |
|
||||||
| **F031** | **基本架构操作** | **部分**{.text-warning} | |
|
| **F031** | **基本架构操作** | **部分**{.text-warning} | |
|
||||||
| F031-01 | CREATE TABLE语句创建持久基表 | 部分 {.text-warning} | 否。 `SYSTEM VERSIONING`, `ON COMMIT`, `GLOBAL`, `LOCAL`, `PRESERVE`, `DELETE`, `REF IS`, `WITH OPTIONS`, `UNDER`, `LIKE`, `PERIOD FOR` 子句,不支持用户解析的数据类型 |
|
| F031-01 | 使用 CREATE TABLE 语句创建持久表 | 部分 {.text-warning} | 不支持 `SYSTEM VERSIONING`, `ON COMMIT`, `GLOBAL`, `LOCAL`, `PRESERVE`, `DELETE`, `REF IS`, `WITH OPTIONS`, `UNDER`, `LIKE`, `PERIOD FOR` 从句,不支持用户解析的数据类型 |
|
||||||
| F031-02 | 创建视图语句 | 部分 {.text-warning} | 否。 `RECURSIVE`, `CHECK`, `UNDER`, `WITH OPTIONS` 子句,不支持用户解析的数据类型 |
|
| F031-02 | CREATE VIEW(创建视图)语句 | 部分 {.text-warning} | 不支持 `RECURSIVE`, `CHECK`, `UNDER`, `WITH OPTIONS` 从句,不支持用户解析的数据类型 |
|
||||||
| F031-03 | 赠款声明 | 是 {.text-success} | |
|
| F031-03 | GRANT(授权)语句 | 是 {.text-success} | |
|
||||||
| F031-04 | ALTER TABLE语句:ADD COLUMN子句 | 部分 {.text-warning} | 不支持 `GENERATED` 条款和系统时间段 |
|
| F031-04 | ALTER TABLE语句:ADD COLUMN从句 | 是 {.text-success} | 不支持 `GENERATED` 从句和以系统时间做参数 |
|
||||||
| F031-13 | DROP TABLE语句:RESTRICT子句 | 否。 {.text-danger} | |
|
| F031-13 | DROP TABLE语句:RESTRICT从句 | 否 {.text-danger} | |
|
||||||
| F031-16 | DROP VIEW语句:RESTRICT子句 | 否。 {.text-danger} | |
|
| F031-16 | DROP VIEW语句:RESTRICT子句 | 否 {.text-danger} | |
|
||||||
| F031-19 | REVOKE语句:RESTRICT子句 | 否。 {.text-danger} | |
|
| F031-19 | REVOKE语句:RESTRICT子句 | 否 {.text-danger} | |
|
||||||
| **F041** | **基本连接表** | **部分**{.text-warning} | |
|
| **F041** | **基本连接关系** | **部分**{.text-warning} | |
|
||||||
| F041-01 | Inner join(但不一定是INNER关键字) | 是 {.text-success} | |
|
| F041-01 | Inner join(但不一定是INNER关键字) | 是 {.text-success} | |
|
||||||
| F041-02 | 内部关键字 | 是 {.text-success} | |
|
| F041-02 | INNER 关键字 | 是 {.text-success} | |
|
||||||
| F041-03 | LEFT OUTER JOIN | 是 {.text-success} | |
|
| F041-03 | LEFT OUTER JOIN | 是 {.text-success} | |
|
||||||
| F041-04 | RIGHT OUTER JOIN | 是 {.text-success} | |
|
| F041-04 | RIGHT OUTER JOIN | 是 {.text-success} | |
|
||||||
| F041-05 | 可以嵌套外部连接 | 是 {.text-success} | |
|
| F041-05 | 外连接可嵌套 | 是 {.text-success} | |
|
||||||
| F041-07 | 左侧或右侧外部联接中的内部表也可用于内部联接 | 是 {.text-success} | |
|
| F041-07 | 左外部连接或右外连接中的内部表也可用于内部联接 | 是 {.text-success} | |
|
||||||
| F041-08 | 支持所有比较运算符(而不仅仅是=) | 否。 {.text-danger} | |
|
| F041-08 | 支持所有比较运算符(而不仅仅是=) | 否 {.text-danger} | |
|
||||||
| **F051** | **基本日期和时间** | **部分**{.text-warning} | |
|
| **F051** | **基本日期和时间** | **部分**{.text-warning} | |
|
||||||
| F051-01 | 日期数据类型(包括对日期文字的支持) | 部分 {.text-warning} | 没有文字 |
|
| F051-01 | DATE(日期)数据类型(并支持用于表达日期的字面量) | 是 {.text-success} | |
|
||||||
| F051-02 | 时间数据类型(包括对时间文字的支持),秒小数精度至少为0 | 否。 {.text-danger} | |
|
| F051-02 | TIME(时间)数据类型(并支持用于表达时间的字面量),小数秒精度至少为0 | 否 {.text-danger} | |
|
||||||
| F051-03 | 时间戳数据类型(包括对时间戳文字的支持),小数秒精度至少为0和6 | 否。 {.text-danger} | `DateTime64` 时间提供了类似的功能 |
|
| F051-03 | 时间戳数据类型(并支持用于表达时间戳的字面量),小数秒精度至少为0和6 | 是 {.text-danger} | |
|
||||||
| F051-04 | 日期、时间和时间戳数据类型的比较谓词 | 部分 {.text-warning} | 只有一种数据类型可用 |
|
| F051-04 | 日期、时间和时间戳数据类型的比较谓词 | 是 {.text-success} | |
|
||||||
| F051-05 | Datetime类型和字符串类型之间的显式转换 | 是 {.text-success} | |
|
| F051-05 | Datetime 类型和字符串形式表达的时间之间的显式转换 | 是 {.text-success} | |
|
||||||
| F051-06 | CURRENT_DATE | 否。 {.text-danger} | `today()` 是相似的 |
|
| F051-06 | CURRENT_DATE | 否 {.text-danger} | 使用`today()`替代 |
|
||||||
| F051-07 | LOCALTIME | 否。 {.text-danger} | `now()` 是相似的 |
|
| F051-07 | LOCALTIME | 否 {.text-danger} | 使用`now()`替代 |
|
||||||
| F051-08 | LOCALTIMESTAMP | 否。 {.text-danger} | |
|
| F051-08 | LOCALTIMESTAMP | 否 {.text-danger} | |
|
||||||
| **F081** | **联盟和视图除外** | **部分**{.text-warning} | |
|
| **F081** | **视图的UNION和EXCEPT操作** | **部分**{.text-warning} | |
|
||||||
| **F131** | **分组操作** | **部分**{.text-warning} | |
|
| **F131** | **分组操作** | **部分**{.text-warning} | |
|
||||||
| F131-01 | WHERE、GROUP BY和HAVING子句在具有分组视图的查询中受支持 | 是 {.text-success} | |
|
| F131-01 | 在具有分组视图的查询中支持 WHERE、GROUP BY 和 HAVING 子句 | 是 {.text-success} | |
|
||||||
| F131-02 | 具有分组视图的查询中支持的多个表 | 是 {.text-success} | |
|
| F131-02 | 在分组视图中支持多张表 | 是 {.text-success} | |
|
||||||
| F131-03 | 设置具有分组视图的查询中支持的函数 | 是 {.text-success} | |
|
| F131-03 | 分组视图的查询中支持集合函数 | 是 {.text-success} | |
|
||||||
| F131-04 | 具有分组依据和具有子句和分组视图的子查询 | 是 {.text-success} | |
|
| F131-04 | 带有 `GROUP BY` 和 `HAVING` 从句,以及分组视图的子查询 | 是 {.text-success} | |
|
||||||
| F131-05 | 单行选择具有GROUP BY和具有子句和分组视图 | 非也。 {.text-danger} | |
|
| F131-05 | 带有 `GROUP BY` 和 `HAVING` 从句,以及分组视图的仅返回1条记录的SELECT查询 | 否 {.text-danger} | |
|
||||||
| **F181** | **多模块支持** | **否。**{.text-danger} | |
|
| **F181** | **多模块支持** | **否**{.text-danger} | |
|
||||||
| **F201** | **投函数** | **是**{.text-success} | |
|
| **F201** | **CAST 函数** | **是**{.text-success} | |
|
||||||
| **F221** | **显式默认值** | **否。**{.text-danger} | |
|
| **F221** | **显式默认值** | **否**{.text-danger} | |
|
||||||
| **F261** | **案例表达式** | **是**{.text-success} | |
|
| **F261** | **CASE 表达式** | **是**{.text-success} | |
|
||||||
| F261-01 | 简单案例 | 是 {.text-success} | |
|
| F261-01 | 简单 CASE 表达式 | 是 {.text-success} | |
|
||||||
| F261-02 | 检索案例 | 是 {.text-success} | |
|
| F261-02 | 搜索型 CASE 表达式 | 是 {.text-success} | |
|
||||||
| F261-03 | NULLIF | 是 {.text-success} | |
|
| F261-03 | NULLIF | 是 {.text-success} | |
|
||||||
| F261-04 | COALESCE | 是 {.text-success} | |
|
| F261-04 | COALESCE | 是 {.text-success} | |
|
||||||
| **F311** | **架构定义语句** | **部分**{.text-warning} | |
|
| **F311** | **架构定义语句** | **部分**{.text-warning} | |
|
||||||
| F311-01 | CREATE SCHEMA | 否。 {.text-danger} | |
|
| F311-01 | CREATE SCHEMA | 部分 {.text-warning} | 见`CREATE DATABASE` |
|
||||||
| F311-02 | 为持久基表创建表 | 是 {.text-success} | |
|
| F311-02 | 用于创建持久表的 CREATE TABLE | 是 {.text-success} | |
|
||||||
| F311-03 | CREATE VIEW | 是 {.text-success} | |
|
| F311-03 | CREATE VIEW | 是 {.text-success} | |
|
||||||
| F311-04 | CREATE VIEW: WITH CHECK OPTION | 否。 {.text-danger} | |
|
| F311-04 | CREATE VIEW: WITH CHECK OPTION | 否 {.text-danger} | |
|
||||||
| F311-05 | 赠款声明 | 是 {.text-success} | |
|
| F311-05 | GRANT 语句 | 是 {.text-success} | |
|
||||||
| **F471** | **标量子查询值** | **是**{.text-success} | |
|
| **F471** | **标量子查询** | **是**{.text-success} | |
|
||||||
| **F481** | **扩展空谓词** | **是**{.text-success} | |
|
| **F481** | **扩展 NULL 谓词** | **是**{.text-success} | |
|
||||||
| **F812** | **基本标记** | **否。**{.text-danger} | |
|
| **F812** | **基本标志位** | **否**{.text-danger} |
|
||||||
| **T321** | **基本的SQL调用例程** | **否。**{.text-danger} | |
|
| **S011** | **用于不重复数据的数据类型** | **否**{.text-danger} |
|
||||||
| T321-01 | 无重载的用户定义函数 | 否。 {.text-danger} | |
|
| **T321** | **基本的SQL调用例程** | **否**{.text-danger} | |
|
||||||
| T321-02 | 无重载的用户定义存储过程 | 否。 {.text-danger} | |
|
| T321-01 | 没有重载的用户定义函数 | 否{.text-danger} | |
|
||||||
| T321-03 | 函数调用 | 否。 {.text-danger} | |
|
| T321-02 | 没有重载的用户定义存储过程 | 否{.text-danger} | |
|
||||||
| T321-04 | 电话声明 | 否。 {.text-danger} | |
|
| T321-03 | 功能调用 | 否 {.text-danger} | |
|
||||||
| T321-05 | 退货声明 | 否。 {.text-danger} | |
|
| T321-04 | CALL 语句 | 否 {.text-danger} | |
|
||||||
| **T631** | **在一个列表元素的谓词中** | **是**{.text-success} | |
|
| T321-05 | RETURN 语句 | 否 {.text-danger} | |
|
||||||
|
| **T631** | **IN 谓词后接一个列表** | **是**{.text-success} | |
|
||||||
|
@ -286,7 +286,7 @@ bool Client::executeMultiQuery(const String & all_queries_text)
|
|||||||
// , where the inline data is delimited by semicolon and not by a
|
// , where the inline data is delimited by semicolon and not by a
|
||||||
// newline.
|
// newline.
|
||||||
auto * insert_ast = parsed_query->as<ASTInsertQuery>();
|
auto * insert_ast = parsed_query->as<ASTInsertQuery>();
|
||||||
if (insert_ast && insert_ast->data)
|
if (insert_ast && isSyncInsertWithData(*insert_ast, global_context))
|
||||||
{
|
{
|
||||||
this_query_end = insert_ast->end;
|
this_query_end = insert_ast->end;
|
||||||
adjustQueryEnd(this_query_end, all_queries_end, global_context->getSettingsRef().max_parser_depth);
|
adjustQueryEnd(this_query_end, all_queries_end, global_context->getSettingsRef().max_parser_depth);
|
||||||
|
@ -324,7 +324,7 @@ int Keeper::main(const std::vector<std::string> & /*args*/)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LOG_WARNING(log, message);
|
LOG_WARNING(log, fmt::runtime(message));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -37,7 +37,7 @@ namespace
|
|||||||
if (!response.sent())
|
if (!response.sent())
|
||||||
*response.send() << message << std::endl;
|
*response.send() << message << std::endl;
|
||||||
|
|
||||||
LOG_WARNING(&Poco::Logger::get("LibraryBridge"), message);
|
LOG_WARNING(&Poco::Logger::get("LibraryBridge"), fmt::runtime(message));
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<Block> parseColumns(std::string && column_string)
|
std::shared_ptr<Block> parseColumns(std::string && column_string)
|
||||||
@ -123,7 +123,7 @@ void LibraryRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServe
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LOG_TRACE(log, "Cannot clone from dictionary with id: {}, will call libNew instead");
|
LOG_TRACE(log, "Cannot clone from dictionary with id: {}, will call libNew instead", from_dictionary_id);
|
||||||
lib_new = true;
|
lib_new = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -178,7 +178,7 @@ void LibraryRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServe
|
|||||||
catch (const Exception & ex)
|
catch (const Exception & ex)
|
||||||
{
|
{
|
||||||
processError(response, "Invalid 'sample_block' parameter in request body '" + ex.message() + "'");
|
processError(response, "Invalid 'sample_block' parameter in request body '" + ex.message() + "'");
|
||||||
LOG_WARNING(log, ex.getStackTraceString());
|
LOG_WARNING(log, fmt::runtime(ex.getStackTraceString()));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -278,7 +278,7 @@ void LibraryRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServe
|
|||||||
catch (const Exception & ex)
|
catch (const Exception & ex)
|
||||||
{
|
{
|
||||||
processError(response, "Invalid 'requested_block' parameter in request body '" + ex.message() + "'");
|
processError(response, "Invalid 'requested_block' parameter in request body '" + ex.message() + "'");
|
||||||
LOG_WARNING(log, ex.getStackTraceString());
|
LOG_WARNING(log, fmt::runtime(ex.getStackTraceString()));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,7 +77,7 @@ void ODBCColumnsInfoHandler::handleRequest(HTTPServerRequest & request, HTTPServ
|
|||||||
response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR);
|
response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR);
|
||||||
if (!response.sent())
|
if (!response.sent())
|
||||||
*response.send() << message << std::endl;
|
*response.send() << message << std::endl;
|
||||||
LOG_WARNING(log, message);
|
LOG_WARNING(log, fmt::runtime(message));
|
||||||
};
|
};
|
||||||
|
|
||||||
if (!params.has("table"))
|
if (!params.has("table"))
|
||||||
|
@ -29,7 +29,7 @@ void IdentifierQuoteHandler::handleRequest(HTTPServerRequest & request, HTTPServ
|
|||||||
response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR);
|
response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR);
|
||||||
if (!response.sent())
|
if (!response.sent())
|
||||||
*response.send() << message << std::endl;
|
*response.send() << message << std::endl;
|
||||||
LOG_WARNING(log, message);
|
LOG_WARNING(log, fmt::runtime(message));
|
||||||
};
|
};
|
||||||
|
|
||||||
if (!params.has("connection_string"))
|
if (!params.has("connection_string"))
|
||||||
|
@ -46,7 +46,7 @@ void ODBCHandler::processError(HTTPServerResponse & response, const std::string
|
|||||||
response.setStatusAndReason(HTTPResponse::HTTP_INTERNAL_SERVER_ERROR);
|
response.setStatusAndReason(HTTPResponse::HTTP_INTERNAL_SERVER_ERROR);
|
||||||
if (!response.sent())
|
if (!response.sent())
|
||||||
*response.send() << message << std::endl;
|
*response.send() << message << std::endl;
|
||||||
LOG_WARNING(log, message);
|
LOG_WARNING(log, fmt::runtime(message));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -102,7 +102,7 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse
|
|||||||
catch (const Exception & ex)
|
catch (const Exception & ex)
|
||||||
{
|
{
|
||||||
processError(response, "Invalid 'sample_block' parameter in request body '" + ex.message() + "'");
|
processError(response, "Invalid 'sample_block' parameter in request body '" + ex.message() + "'");
|
||||||
LOG_ERROR(log, ex.getStackTraceString());
|
LOG_ERROR(log, fmt::runtime(ex.getStackTraceString()));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -37,7 +37,7 @@ void SchemaAllowedHandler::handleRequest(HTTPServerRequest & request, HTTPServer
|
|||||||
response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR);
|
response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR);
|
||||||
if (!response.sent())
|
if (!response.sent())
|
||||||
*response.send() << message << std::endl;
|
*response.send() << message << std::endl;
|
||||||
LOG_WARNING(log, message);
|
LOG_WARNING(log, fmt::runtime(message));
|
||||||
};
|
};
|
||||||
|
|
||||||
if (!params.has("connection_string"))
|
if (!params.has("connection_string"))
|
||||||
|
@ -80,6 +80,7 @@
|
|||||||
#include <Common/Elf.h>
|
#include <Common/Elf.h>
|
||||||
#include <Server/MySQLHandlerFactory.h>
|
#include <Server/MySQLHandlerFactory.h>
|
||||||
#include <Server/PostgreSQLHandlerFactory.h>
|
#include <Server/PostgreSQLHandlerFactory.h>
|
||||||
|
#include <Server/CertificateReloader.h>
|
||||||
#include <Server/ProtocolServerAdapter.h>
|
#include <Server/ProtocolServerAdapter.h>
|
||||||
#include <Server/HTTP/HTTPServer.h>
|
#include <Server/HTTP/HTTPServer.h>
|
||||||
#include <Interpreters/AsynchronousInsertQueue.h>
|
#include <Interpreters/AsynchronousInsertQueue.h>
|
||||||
@ -972,7 +973,9 @@ if (ThreadFuzzer::instance().isEffective())
|
|||||||
global_context->updateInterserverCredentials(*config);
|
global_context->updateInterserverCredentials(*config);
|
||||||
|
|
||||||
CompressionCodecEncrypted::Configuration::instance().tryLoad(*config, "encryption_codecs");
|
CompressionCodecEncrypted::Configuration::instance().tryLoad(*config, "encryption_codecs");
|
||||||
|
#if USE_SSL
|
||||||
|
CertificateReloader::instance().tryLoad(*config);
|
||||||
|
#endif
|
||||||
ProfileEvents::increment(ProfileEvents::MainConfigLoads);
|
ProfileEvents::increment(ProfileEvents::MainConfigLoads);
|
||||||
},
|
},
|
||||||
/* already_loaded = */ false); /// Reload it right now (initial loading)
|
/* already_loaded = */ false); /// Reload it right now (initial loading)
|
||||||
@ -1362,6 +1365,16 @@ if (ThreadFuzzer::instance().isEffective())
|
|||||||
ErrorCodes::NO_ELEMENTS_IN_CONFIG);
|
ErrorCodes::NO_ELEMENTS_IN_CONFIG);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (servers.empty())
|
||||||
|
throw Exception("No servers started (add valid listen_host and 'tcp_port' or 'http_port' to configuration file.)",
|
||||||
|
ErrorCodes::NO_ELEMENTS_IN_CONFIG);
|
||||||
|
|
||||||
|
#if USE_SSL
|
||||||
|
CertificateReloader::instance().tryLoad(config());
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// Must be done after initialization of `servers`, because async_metrics will access `servers` variable from its thread.
|
||||||
|
|
||||||
async_metrics.start();
|
async_metrics.start();
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -244,7 +244,7 @@
|
|||||||
openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096
|
openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096
|
||||||
Only file format with BEGIN DH PARAMETERS is supported.
|
Only file format with BEGIN DH PARAMETERS is supported.
|
||||||
-->
|
-->
|
||||||
<dhParamsFile>/etc/clickhouse-server/dhparam.pem</dhParamsFile>
|
<!-- <dhParamsFile>/etc/clickhouse-server/dhparam.pem</dhParamsFile> -->
|
||||||
<verificationMode>none</verificationMode>
|
<verificationMode>none</verificationMode>
|
||||||
<loadDefaultCAFile>true</loadDefaultCAFile>
|
<loadDefaultCAFile>true</loadDefaultCAFile>
|
||||||
<cacheSessions>true</cacheSessions>
|
<cacheSessions>true</cacheSessions>
|
||||||
|
44
src/Access/CachedAccessChecking.cpp
Normal file
44
src/Access/CachedAccessChecking.cpp
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
#include <Access/CachedAccessChecking.h>
|
||||||
|
#include <Access/ContextAccess.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
CachedAccessChecking::CachedAccessChecking(const std::shared_ptr<const ContextAccess> & access_, AccessFlags access_flags_)
|
||||||
|
: CachedAccessChecking(access_, AccessRightsElement{access_flags_})
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
CachedAccessChecking::CachedAccessChecking(const std::shared_ptr<const ContextAccess> & access_, const AccessRightsElement & element_)
|
||||||
|
: access(access_), element(element_)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
CachedAccessChecking::~CachedAccessChecking() = default;
|
||||||
|
|
||||||
|
bool CachedAccessChecking::checkAccess(bool throw_if_denied)
|
||||||
|
{
|
||||||
|
if (checked)
|
||||||
|
return result;
|
||||||
|
if (throw_if_denied)
|
||||||
|
{
|
||||||
|
try
|
||||||
|
{
|
||||||
|
access->checkAccess(element);
|
||||||
|
result = true;
|
||||||
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
result = false;
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
result = access->isGranted(element);
|
||||||
|
}
|
||||||
|
checked = true;
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
29
src/Access/CachedAccessChecking.h
Normal file
29
src/Access/CachedAccessChecking.h
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <Access/Common/AccessRightsElement.h>
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
class ContextAccess;
|
||||||
|
|
||||||
|
/// Checks if the current user has a specified access type granted,
|
||||||
|
/// and if it's checked another time later, it will just return the first result.
|
||||||
|
class CachedAccessChecking
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
CachedAccessChecking(const std::shared_ptr<const ContextAccess> & access_, AccessFlags access_flags_);
|
||||||
|
CachedAccessChecking(const std::shared_ptr<const ContextAccess> & access_, const AccessRightsElement & element_);
|
||||||
|
~CachedAccessChecking();
|
||||||
|
|
||||||
|
bool checkAccess(bool throw_if_denied = true);
|
||||||
|
|
||||||
|
private:
|
||||||
|
const std::shared_ptr<const ContextAccess> access;
|
||||||
|
const AccessRightsElement element;
|
||||||
|
bool checked = false;
|
||||||
|
bool result = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
@ -7,6 +7,7 @@
|
|||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
{
|
{
|
||||||
extern const int BAD_ARGUMENTS;
|
extern const int BAD_ARGUMENTS;
|
||||||
|
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -27,14 +28,24 @@ struct StudentTTestData : public TTestMoments<Float64>
|
|||||||
{
|
{
|
||||||
static constexpr auto name = "studentTTest";
|
static constexpr auto name = "studentTTest";
|
||||||
|
|
||||||
std::pair<Float64, Float64> getResult() const
|
bool hasEnoughObservations() const
|
||||||
{
|
{
|
||||||
Float64 mean_x = x1 / nx;
|
return nx > 0 && ny > 0 && nx + ny > 2;
|
||||||
Float64 mean_y = y1 / ny;
|
}
|
||||||
|
|
||||||
|
Float64 getDegreesOfFreedom() const
|
||||||
|
{
|
||||||
|
return nx + ny - 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::tuple<Float64, Float64> getResult() const
|
||||||
|
{
|
||||||
|
Float64 mean_x = getMeanX();
|
||||||
|
Float64 mean_y = getMeanY();
|
||||||
|
|
||||||
/// To estimate the variance we first estimate two means.
|
/// To estimate the variance we first estimate two means.
|
||||||
/// That's why the number of degrees of freedom is the total number of values of both samples minus 2.
|
/// That's why the number of degrees of freedom is the total number of values of both samples minus 2.
|
||||||
Float64 degrees_of_freedom = nx + ny - 2;
|
Float64 degrees_of_freedom = getDegreesOfFreedom();
|
||||||
|
|
||||||
/// Calculate s^2
|
/// Calculate s^2
|
||||||
/// The original formulae looks like
|
/// The original formulae looks like
|
||||||
@ -59,12 +70,14 @@ AggregateFunctionPtr createAggregateFunctionStudentTTest(
|
|||||||
const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *)
|
const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *)
|
||||||
{
|
{
|
||||||
assertBinary(name, argument_types);
|
assertBinary(name, argument_types);
|
||||||
assertNoParameters(name, parameters);
|
|
||||||
|
if (parameters.size() > 1)
|
||||||
|
throw Exception("Aggregate function " + name + " requires zero or one parameter.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||||
|
|
||||||
if (!isNumber(argument_types[0]) || !isNumber(argument_types[1]))
|
if (!isNumber(argument_types[0]) || !isNumber(argument_types[1]))
|
||||||
throw Exception("Aggregate function " + name + " only supports numerical types", ErrorCodes::BAD_ARGUMENTS);
|
throw Exception("Aggregate function " + name + " only supports numerical types", ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
|
||||||
return std::make_shared<AggregateFunctionTTest<StudentTTestData>>(argument_types);
|
return std::make_shared<AggregateFunctionTTest<StudentTTestData>>(argument_types, parameters);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
#include <DataTypes/DataTypesNumber.h>
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
#include <DataTypes/DataTypeTuple.h>
|
#include <DataTypes/DataTypeTuple.h>
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
|
#include <cfloat>
|
||||||
|
|
||||||
|
|
||||||
/// This function is used in implementations of different T-Tests.
|
/// This function is used in implementations of different T-Tests.
|
||||||
@ -28,6 +29,11 @@ struct Settings;
|
|||||||
class ReadBuffer;
|
class ReadBuffer;
|
||||||
class WriteBuffer;
|
class WriteBuffer;
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int BAD_ARGUMENTS;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If you have a cumulative distribution function F, then calculating the p-value for given statistic T is simply 1−F(T)
|
* If you have a cumulative distribution function F, then calculating the p-value for given statistic T is simply 1−F(T)
|
||||||
* In our case p-value is two-sided, so we multiply it by 2.
|
* In our case p-value is two-sided, so we multiply it by 2.
|
||||||
@ -79,10 +85,29 @@ template <typename Data>
|
|||||||
class AggregateFunctionTTest :
|
class AggregateFunctionTTest :
|
||||||
public IAggregateFunctionDataHelper<Data, AggregateFunctionTTest<Data>>
|
public IAggregateFunctionDataHelper<Data, AggregateFunctionTTest<Data>>
|
||||||
{
|
{
|
||||||
|
private:
|
||||||
|
bool need_confidence_interval = false;
|
||||||
|
Float64 confidence_level;
|
||||||
public:
|
public:
|
||||||
AggregateFunctionTTest(const DataTypes & arguments)
|
AggregateFunctionTTest(const DataTypes & arguments, const Array & params)
|
||||||
: IAggregateFunctionDataHelper<Data, AggregateFunctionTTest<Data>>({arguments}, {})
|
: IAggregateFunctionDataHelper<Data, AggregateFunctionTTest<Data>>({arguments}, params)
|
||||||
{
|
{
|
||||||
|
if (params.size() > 0)
|
||||||
|
{
|
||||||
|
need_confidence_interval = true;
|
||||||
|
confidence_level = params.at(0).safeGet<Float64>();
|
||||||
|
|
||||||
|
if (!std::isfinite(confidence_level))
|
||||||
|
{
|
||||||
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Aggregate function {} requires finite parameter values.", Data::name);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (confidence_level <= 0.0 || confidence_level >= 1.0 || fabs(confidence_level - 0.0) < DBL_EPSILON || fabs(confidence_level - 1.0) < DBL_EPSILON)
|
||||||
|
{
|
||||||
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Confidence level parameter must be between 0 and 1 in aggregate function {}.", Data::name);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
String getName() const override
|
String getName() const override
|
||||||
@ -91,6 +116,31 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
DataTypePtr getReturnType() const override
|
DataTypePtr getReturnType() const override
|
||||||
|
{
|
||||||
|
if (need_confidence_interval)
|
||||||
|
{
|
||||||
|
DataTypes types
|
||||||
|
{
|
||||||
|
std::make_shared<DataTypeNumber<Float64>>(),
|
||||||
|
std::make_shared<DataTypeNumber<Float64>>(),
|
||||||
|
std::make_shared<DataTypeNumber<Float64>>(),
|
||||||
|
std::make_shared<DataTypeNumber<Float64>>(),
|
||||||
|
};
|
||||||
|
|
||||||
|
Strings names
|
||||||
|
{
|
||||||
|
"t_statistic",
|
||||||
|
"p_value",
|
||||||
|
"confidence_interval_low",
|
||||||
|
"confidence_interval_high",
|
||||||
|
};
|
||||||
|
|
||||||
|
return std::make_shared<DataTypeTuple>(
|
||||||
|
std::move(types),
|
||||||
|
std::move(names)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
else
|
||||||
{
|
{
|
||||||
DataTypes types
|
DataTypes types
|
||||||
{
|
{
|
||||||
@ -101,7 +151,7 @@ public:
|
|||||||
Strings names
|
Strings names
|
||||||
{
|
{
|
||||||
"t_statistic",
|
"t_statistic",
|
||||||
"p_value"
|
"p_value",
|
||||||
};
|
};
|
||||||
|
|
||||||
return std::make_shared<DataTypeTuple>(
|
return std::make_shared<DataTypeTuple>(
|
||||||
@ -109,6 +159,7 @@ public:
|
|||||||
std::move(names)
|
std::move(names)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
bool allocatesMemoryInArena() const override { return false; }
|
bool allocatesMemoryInArena() const override { return false; }
|
||||||
|
|
||||||
@ -140,17 +191,46 @@ public:
|
|||||||
|
|
||||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||||
{
|
{
|
||||||
auto [t_statistic, p_value] = this->data(place).getResult();
|
auto & data = this->data(place);
|
||||||
|
auto & column_tuple = assert_cast<ColumnTuple &>(to);
|
||||||
|
|
||||||
|
if (!data.hasEnoughObservations() || data.isEssentiallyConstant())
|
||||||
|
{
|
||||||
|
auto & column_stat = assert_cast<ColumnVector<Float64> &>(column_tuple.getColumn(0));
|
||||||
|
auto & column_value = assert_cast<ColumnVector<Float64> &>(column_tuple.getColumn(1));
|
||||||
|
column_stat.getData().push_back(std::numeric_limits<Float64>::quiet_NaN());
|
||||||
|
column_value.getData().push_back(std::numeric_limits<Float64>::quiet_NaN());
|
||||||
|
|
||||||
|
if (need_confidence_interval)
|
||||||
|
{
|
||||||
|
auto & column_ci_low = assert_cast<ColumnVector<Float64> &>(column_tuple.getColumn(2));
|
||||||
|
auto & column_ci_high = assert_cast<ColumnVector<Float64> &>(column_tuple.getColumn(3));
|
||||||
|
column_ci_low.getData().push_back(std::numeric_limits<Float64>::quiet_NaN());
|
||||||
|
column_ci_high.getData().push_back(std::numeric_limits<Float64>::quiet_NaN());
|
||||||
|
}
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto [t_statistic, p_value] = data.getResult();
|
||||||
|
|
||||||
/// Because p-value is a probability.
|
/// Because p-value is a probability.
|
||||||
p_value = std::min(1.0, std::max(0.0, p_value));
|
p_value = std::min(1.0, std::max(0.0, p_value));
|
||||||
|
|
||||||
auto & column_tuple = assert_cast<ColumnTuple &>(to);
|
|
||||||
auto & column_stat = assert_cast<ColumnVector<Float64> &>(column_tuple.getColumn(0));
|
auto & column_stat = assert_cast<ColumnVector<Float64> &>(column_tuple.getColumn(0));
|
||||||
auto & column_value = assert_cast<ColumnVector<Float64> &>(column_tuple.getColumn(1));
|
auto & column_value = assert_cast<ColumnVector<Float64> &>(column_tuple.getColumn(1));
|
||||||
|
|
||||||
column_stat.getData().push_back(t_statistic);
|
column_stat.getData().push_back(t_statistic);
|
||||||
column_value.getData().push_back(p_value);
|
column_value.getData().push_back(p_value);
|
||||||
|
|
||||||
|
if (need_confidence_interval)
|
||||||
|
{
|
||||||
|
auto [ci_low, ci_high] = data.getConfidenceIntervals(confidence_level, data.getDegreesOfFreedom());
|
||||||
|
auto & column_ci_low = assert_cast<ColumnVector<Float64> &>(column_tuple.getColumn(2));
|
||||||
|
auto & column_ci_high = assert_cast<ColumnVector<Float64> &>(column_tuple.getColumn(3));
|
||||||
|
column_ci_low.getData().push_back(ci_low);
|
||||||
|
column_ci_high.getData().push_back(ci_high);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
{
|
{
|
||||||
extern const int BAD_ARGUMENTS;
|
extern const int BAD_ARGUMENTS;
|
||||||
|
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -21,34 +22,38 @@ struct WelchTTestData : public TTestMoments<Float64>
|
|||||||
{
|
{
|
||||||
static constexpr auto name = "welchTTest";
|
static constexpr auto name = "welchTTest";
|
||||||
|
|
||||||
std::pair<Float64, Float64> getResult() const
|
bool hasEnoughObservations() const
|
||||||
{
|
{
|
||||||
Float64 mean_x = x1 / nx;
|
return nx > 1 && ny > 1;
|
||||||
Float64 mean_y = y1 / ny;
|
}
|
||||||
|
|
||||||
/// s_x^2, s_y^2
|
Float64 getDegreesOfFreedom() const
|
||||||
|
{
|
||||||
/// The original formulae looks like \frac{1}{size_x - 1} \sum_{i = 1}^{size_x}{(x_i - \bar{x}) ^ 2}
|
Float64 mean_x = getMeanX();
|
||||||
/// But we made some mathematical transformations not to store original sequences.
|
Float64 mean_y = getMeanY();
|
||||||
/// Also we dropped sqrt, because later it will be squared later.
|
|
||||||
|
|
||||||
Float64 sx2 = (x2 + nx * mean_x * mean_x - 2 * mean_x * x1) / (nx - 1);
|
Float64 sx2 = (x2 + nx * mean_x * mean_x - 2 * mean_x * x1) / (nx - 1);
|
||||||
Float64 sy2 = (y2 + ny * mean_y * mean_y - 2 * mean_y * y1) / (ny - 1);
|
Float64 sy2 = (y2 + ny * mean_y * mean_y - 2 * mean_y * y1) / (ny - 1);
|
||||||
|
|
||||||
/// t-statistic
|
|
||||||
Float64 t_stat = (mean_x - mean_y) / sqrt(sx2 / nx + sy2 / ny);
|
|
||||||
|
|
||||||
/// degrees of freedom
|
|
||||||
|
|
||||||
Float64 numerator_sqrt = sx2 / nx + sy2 / ny;
|
Float64 numerator_sqrt = sx2 / nx + sy2 / ny;
|
||||||
Float64 numerator = numerator_sqrt * numerator_sqrt;
|
Float64 numerator = numerator_sqrt * numerator_sqrt;
|
||||||
|
|
||||||
Float64 denominator_x = sx2 * sx2 / (nx * nx * (nx - 1));
|
Float64 denominator_x = sx2 * sx2 / (nx * nx * (nx - 1));
|
||||||
Float64 denominator_y = sy2 * sy2 / (ny * ny * (ny - 1));
|
Float64 denominator_y = sy2 * sy2 / (ny * ny * (ny - 1));
|
||||||
|
|
||||||
Float64 degrees_of_freedom = numerator / (denominator_x + denominator_y);
|
return numerator / (denominator_x + denominator_y);
|
||||||
|
}
|
||||||
|
|
||||||
return {t_stat, getPValue(degrees_of_freedom, t_stat * t_stat)};
|
std::tuple<Float64, Float64> getResult() const
|
||||||
|
{
|
||||||
|
Float64 mean_x = getMeanX();
|
||||||
|
Float64 mean_y = getMeanY();
|
||||||
|
|
||||||
|
/// t-statistic
|
||||||
|
Float64 se = getStandardError();
|
||||||
|
Float64 t_stat = (mean_x - mean_y) / se;
|
||||||
|
|
||||||
|
return {t_stat, getPValue(getDegreesOfFreedom(), t_stat * t_stat)};
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -56,12 +61,14 @@ AggregateFunctionPtr createAggregateFunctionWelchTTest(
|
|||||||
const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *)
|
const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *)
|
||||||
{
|
{
|
||||||
assertBinary(name, argument_types);
|
assertBinary(name, argument_types);
|
||||||
assertNoParameters(name, parameters);
|
|
||||||
|
if (parameters.size() > 1)
|
||||||
|
throw Exception("Aggregate function " + name + " requires zero or one parameter.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||||
|
|
||||||
if (!isNumber(argument_types[0]) || !isNumber(argument_types[1]))
|
if (!isNumber(argument_types[0]) || !isNumber(argument_types[1]))
|
||||||
throw Exception("Aggregate function " + name + " only supports numerical types", ErrorCodes::BAD_ARGUMENTS);
|
throw Exception("Aggregate function " + name + " only supports numerical types", ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
|
||||||
return std::make_shared<AggregateFunctionTTest<WelchTTestData>>(argument_types);
|
return std::make_shared<AggregateFunctionTTest<WelchTTestData>>(argument_types, parameters);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -2,7 +2,9 @@
|
|||||||
|
|
||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
#include <IO/ReadHelpers.h>
|
#include <IO/ReadHelpers.h>
|
||||||
|
#include <boost/math/distributions/students_t.hpp>
|
||||||
#include <boost/math/distributions/normal.hpp>
|
#include <boost/math/distributions/normal.hpp>
|
||||||
|
#include <cfloat>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -358,6 +360,50 @@ struct TTestMoments
|
|||||||
{
|
{
|
||||||
readPODBinary(*this, buf);
|
readPODBinary(*this, buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Float64 getMeanX() const
|
||||||
|
{
|
||||||
|
return x1 / nx;
|
||||||
|
}
|
||||||
|
|
||||||
|
Float64 getMeanY() const
|
||||||
|
{
|
||||||
|
return y1 / ny;
|
||||||
|
}
|
||||||
|
|
||||||
|
Float64 getStandardError() const
|
||||||
|
{
|
||||||
|
/// The original formulae looks like \frac{1}{size_x - 1} \sum_{i = 1}^{size_x}{(x_i - \bar{x}) ^ 2}
|
||||||
|
/// But we made some mathematical transformations not to store original sequences.
|
||||||
|
/// Also we dropped sqrt, because later it will be squared later.
|
||||||
|
Float64 mean_x = getMeanX();
|
||||||
|
Float64 mean_y = getMeanY();
|
||||||
|
|
||||||
|
Float64 sx2 = (x2 + nx * mean_x * mean_x - 2 * mean_x * x1) / (nx - 1);
|
||||||
|
Float64 sy2 = (y2 + ny * mean_y * mean_y - 2 * mean_y * y1) / (ny - 1);
|
||||||
|
|
||||||
|
return sqrt(sx2 / nx + sy2 / ny);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::pair<Float64, Float64> getConfidenceIntervals(Float64 confidence_level, Float64 degrees_of_freedom) const
|
||||||
|
{
|
||||||
|
Float64 mean_x = getMeanX();
|
||||||
|
Float64 mean_y = getMeanY();
|
||||||
|
Float64 se = getStandardError();
|
||||||
|
|
||||||
|
boost::math::students_t dist(degrees_of_freedom);
|
||||||
|
Float64 t = boost::math::quantile(boost::math::complement(dist, (1.0 - confidence_level) / 2.0));
|
||||||
|
Float64 mean_diff = mean_x - mean_y;
|
||||||
|
Float64 ci_low = mean_diff - t * se;
|
||||||
|
Float64 ci_high = mean_diff + t * se;
|
||||||
|
|
||||||
|
return {ci_low, ci_high};
|
||||||
|
}
|
||||||
|
|
||||||
|
bool isEssentiallyConstant() const
|
||||||
|
{
|
||||||
|
return getStandardError() < 10 * DBL_EPSILON * std::max(std::abs(getMeanX()), std::abs(getMeanY()));
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
|
@ -262,9 +262,7 @@ struct QuantileExactLow : public QuantileExactBase<Value, QuantileExactLow<Value
|
|||||||
{
|
{
|
||||||
if (!array.empty())
|
if (!array.empty())
|
||||||
{
|
{
|
||||||
// sort inputs in ascending order
|
size_t n = 0;
|
||||||
::sort(array.begin(), array.end());
|
|
||||||
|
|
||||||
// if level is 0.5 then compute the "low" median of the sorted array
|
// if level is 0.5 then compute the "low" median of the sorted array
|
||||||
// by the method of rounding.
|
// by the method of rounding.
|
||||||
if (level == 0.5)
|
if (level == 0.5)
|
||||||
@ -272,11 +270,11 @@ struct QuantileExactLow : public QuantileExactBase<Value, QuantileExactLow<Value
|
|||||||
auto s = array.size();
|
auto s = array.size();
|
||||||
if (s % 2 == 1)
|
if (s % 2 == 1)
|
||||||
{
|
{
|
||||||
return array[static_cast<size_t>(floor(s / 2))];
|
n = static_cast<size_t>(floor(s / 2));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
return array[static_cast<size_t>((floor(s / 2)) - 1)];
|
n = static_cast<size_t>((floor(s / 2)) - 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -284,9 +282,10 @@ struct QuantileExactLow : public QuantileExactBase<Value, QuantileExactLow<Value
|
|||||||
// else quantile is the nth index of the sorted array obtained by multiplying
|
// else quantile is the nth index of the sorted array obtained by multiplying
|
||||||
// level and size of array. Example if level = 0.1 and size of array is 10,
|
// level and size of array. Example if level = 0.1 and size of array is 10,
|
||||||
// then return array[1].
|
// then return array[1].
|
||||||
size_t n = level < 1 ? level * array.size() : (array.size() - 1);
|
n = level < 1 ? level * array.size() : (array.size() - 1);
|
||||||
return array[n];
|
|
||||||
}
|
}
|
||||||
|
::nth_element(array.begin(), array.begin() + n, array.end());
|
||||||
|
return array[n];
|
||||||
}
|
}
|
||||||
return std::numeric_limits<Value>::quiet_NaN();
|
return std::numeric_limits<Value>::quiet_NaN();
|
||||||
}
|
}
|
||||||
@ -295,12 +294,11 @@ struct QuantileExactLow : public QuantileExactBase<Value, QuantileExactLow<Value
|
|||||||
{
|
{
|
||||||
if (!array.empty())
|
if (!array.empty())
|
||||||
{
|
{
|
||||||
// sort inputs in ascending order
|
size_t prev_n = 0;
|
||||||
::sort(array.begin(), array.end());
|
|
||||||
for (size_t i = 0; i < size; ++i)
|
for (size_t i = 0; i < size; ++i)
|
||||||
{
|
{
|
||||||
auto level = levels[indices[i]];
|
auto level = levels[indices[i]];
|
||||||
|
size_t n = 0;
|
||||||
// if level is 0.5 then compute the "low" median of the sorted array
|
// if level is 0.5 then compute the "low" median of the sorted array
|
||||||
// by the method of rounding.
|
// by the method of rounding.
|
||||||
if (level == 0.5)
|
if (level == 0.5)
|
||||||
@ -308,20 +306,22 @@ struct QuantileExactLow : public QuantileExactBase<Value, QuantileExactLow<Value
|
|||||||
auto s = array.size();
|
auto s = array.size();
|
||||||
if (s % 2 == 1)
|
if (s % 2 == 1)
|
||||||
{
|
{
|
||||||
result[indices[i]] = array[static_cast<size_t>(floor(s / 2))];
|
n = static_cast<size_t>(floor(s / 2));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
result[indices[i]] = array[static_cast<size_t>(floor((s / 2) - 1))];
|
n = static_cast<size_t>(floor((s / 2) - 1));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
// else quantile is the nth index of the sorted array obtained by multiplying
|
// else quantile is the nth index of the sorted array obtained by multiplying
|
||||||
// level and size of array. Example if level = 0.1 and size of array is 10.
|
// level and size of array. Example if level = 0.1 and size of array is 10.
|
||||||
size_t n = level < 1 ? level * array.size() : (array.size() - 1);
|
n = level < 1 ? level * array.size() : (array.size() - 1);
|
||||||
result[indices[i]] = array[n];
|
|
||||||
}
|
}
|
||||||
|
::nth_element(array.begin() + prev_n, array.begin() + n, array.end());
|
||||||
|
result[indices[i]] = array[n];
|
||||||
|
prev_n = n;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -344,23 +344,22 @@ struct QuantileExactHigh : public QuantileExactBase<Value, QuantileExactHigh<Val
|
|||||||
{
|
{
|
||||||
if (!array.empty())
|
if (!array.empty())
|
||||||
{
|
{
|
||||||
// sort inputs in ascending order
|
size_t n = 0;
|
||||||
::sort(array.begin(), array.end());
|
|
||||||
|
|
||||||
// if level is 0.5 then compute the "high" median of the sorted array
|
// if level is 0.5 then compute the "high" median of the sorted array
|
||||||
// by the method of rounding.
|
// by the method of rounding.
|
||||||
if (level == 0.5)
|
if (level == 0.5)
|
||||||
{
|
{
|
||||||
auto s = array.size();
|
auto s = array.size();
|
||||||
return array[static_cast<size_t>(floor(s / 2))];
|
n = static_cast<size_t>(floor(s / 2));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
// else quantile is the nth index of the sorted array obtained by multiplying
|
// else quantile is the nth index of the sorted array obtained by multiplying
|
||||||
// level and size of array. Example if level = 0.1 and size of array is 10.
|
// level and size of array. Example if level = 0.1 and size of array is 10.
|
||||||
size_t n = level < 1 ? level * array.size() : (array.size() - 1);
|
n = level < 1 ? level * array.size() : (array.size() - 1);
|
||||||
return array[n];
|
|
||||||
}
|
}
|
||||||
|
::nth_element(array.begin(), array.begin() + n, array.end());
|
||||||
|
return array[n];
|
||||||
}
|
}
|
||||||
return std::numeric_limits<Value>::quiet_NaN();
|
return std::numeric_limits<Value>::quiet_NaN();
|
||||||
}
|
}
|
||||||
@ -369,26 +368,27 @@ struct QuantileExactHigh : public QuantileExactBase<Value, QuantileExactHigh<Val
|
|||||||
{
|
{
|
||||||
if (!array.empty())
|
if (!array.empty())
|
||||||
{
|
{
|
||||||
// sort inputs in ascending order
|
size_t prev_n = 0;
|
||||||
::sort(array.begin(), array.end());
|
|
||||||
for (size_t i = 0; i < size; ++i)
|
for (size_t i = 0; i < size; ++i)
|
||||||
{
|
{
|
||||||
auto level = levels[indices[i]];
|
auto level = levels[indices[i]];
|
||||||
|
size_t n = 0;
|
||||||
// if level is 0.5 then compute the "high" median of the sorted array
|
// if level is 0.5 then compute the "high" median of the sorted array
|
||||||
// by the method of rounding.
|
// by the method of rounding.
|
||||||
if (level == 0.5)
|
if (level == 0.5)
|
||||||
{
|
{
|
||||||
auto s = array.size();
|
auto s = array.size();
|
||||||
result[indices[i]] = array[static_cast<size_t>(floor(s / 2))];
|
n = static_cast<size_t>(floor(s / 2));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
// else quantile is the nth index of the sorted array obtained by multiplying
|
// else quantile is the nth index of the sorted array obtained by multiplying
|
||||||
// level and size of array. Example if level = 0.1 and size of array is 10.
|
// level and size of array. Example if level = 0.1 and size of array is 10.
|
||||||
size_t n = level < 1 ? level * array.size() : (array.size() - 1);
|
n = level < 1 ? level * array.size() : (array.size() - 1);
|
||||||
result[indices[i]] = array[n];
|
|
||||||
}
|
}
|
||||||
|
::nth_element(array.begin() + prev_n, array.begin() + n, array.end());
|
||||||
|
result[indices[i]] = array[n];
|
||||||
|
prev_n = n;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -475,11 +475,6 @@ if (TARGET ch_contrib::sqlite)
|
|||||||
dbms_target_link_libraries(PUBLIC ch_contrib::sqlite)
|
dbms_target_link_libraries(PUBLIC ch_contrib::sqlite)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (USE_CASSANDRA)
|
|
||||||
dbms_target_link_libraries(PUBLIC ${CASSANDRA_LIBRARY})
|
|
||||||
dbms_target_include_directories (SYSTEM BEFORE PUBLIC ${CASS_INCLUDE_DIR})
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (TARGET ch_contrib::msgpack)
|
if (TARGET ch_contrib::msgpack)
|
||||||
target_link_libraries (clickhouse_common_io PUBLIC ch_contrib::msgpack)
|
target_link_libraries (clickhouse_common_io PUBLIC ch_contrib::msgpack)
|
||||||
endif()
|
endif()
|
||||||
|
@ -573,6 +573,18 @@ void ClientBase::updateSuggest(const ASTCreateQuery & ast_create)
|
|||||||
suggest->addWords(std::move(new_words));
|
suggest->addWords(std::move(new_words));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool ClientBase::isSyncInsertWithData(const ASTInsertQuery & insert_query, const ContextPtr & context)
|
||||||
|
{
|
||||||
|
if (!insert_query.data)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
auto settings = context->getSettings();
|
||||||
|
if (insert_query.settings_ast)
|
||||||
|
settings.applyChanges(insert_query.settings_ast->as<ASTSetQuery>()->changes);
|
||||||
|
|
||||||
|
return !settings.async_insert;
|
||||||
|
}
|
||||||
|
|
||||||
void ClientBase::processTextAsSingleQuery(const String & full_query)
|
void ClientBase::processTextAsSingleQuery(const String & full_query)
|
||||||
{
|
{
|
||||||
/// Some parts of a query (result output and formatting) are executed
|
/// Some parts of a query (result output and formatting) are executed
|
||||||
@ -597,10 +609,12 @@ void ClientBase::processTextAsSingleQuery(const String & full_query)
|
|||||||
updateSuggest(*create);
|
updateSuggest(*create);
|
||||||
}
|
}
|
||||||
|
|
||||||
// An INSERT query may have the data that follow query text. Remove the
|
/// An INSERT query may have the data that follows query text.
|
||||||
/// Send part of query without data, because data will be sent separately.
|
/// Send part of the query without data, because data will be sent separately.
|
||||||
auto * insert = parsed_query->as<ASTInsertQuery>();
|
/// But for asynchronous inserts we don't extract data, because it's needed
|
||||||
if (insert && insert->data)
|
/// to be done on server side in that case (for coalescing the data from multiple inserts on server side).
|
||||||
|
const auto * insert = parsed_query->as<ASTInsertQuery>();
|
||||||
|
if (insert && isSyncInsertWithData(*insert, global_context))
|
||||||
query_to_execute = full_query.substr(0, insert->data - full_query.data());
|
query_to_execute = full_query.substr(0, insert->data - full_query.data());
|
||||||
else
|
else
|
||||||
query_to_execute = full_query;
|
query_to_execute = full_query;
|
||||||
@ -1261,7 +1275,7 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin
|
|||||||
for (const auto & query_id_format : query_id_formats)
|
for (const auto & query_id_format : query_id_formats)
|
||||||
{
|
{
|
||||||
writeString(query_id_format.first, std_out);
|
writeString(query_id_format.first, std_out);
|
||||||
writeString(fmt::format(query_id_format.second, fmt::arg("query_id", global_context->getCurrentQueryId())), std_out);
|
writeString(fmt::format(fmt::runtime(query_id_format.second), fmt::arg("query_id", global_context->getCurrentQueryId())), std_out);
|
||||||
writeChar('\n', std_out);
|
writeChar('\n', std_out);
|
||||||
std_out.next();
|
std_out.next();
|
||||||
}
|
}
|
||||||
@ -1303,8 +1317,10 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin
|
|||||||
if (insert && insert->select)
|
if (insert && insert->select)
|
||||||
insert->tryFindInputFunction(input_function);
|
insert->tryFindInputFunction(input_function);
|
||||||
|
|
||||||
|
bool is_async_insert = global_context->getSettings().async_insert && insert && insert->hasInlinedData();
|
||||||
|
|
||||||
/// INSERT query for which data transfer is needed (not an INSERT SELECT or input()) is processed separately.
|
/// INSERT query for which data transfer is needed (not an INSERT SELECT or input()) is processed separately.
|
||||||
if (insert && (!insert->select || input_function) && !insert->watch)
|
if (insert && (!insert->select || input_function) && !insert->watch && !is_async_insert)
|
||||||
{
|
{
|
||||||
if (input_function && insert->format.empty())
|
if (input_function && insert->format.empty())
|
||||||
throw Exception("FORMAT must be specified for function input()", ErrorCodes::INVALID_USAGE_OF_INPUT);
|
throw Exception("FORMAT must be specified for function input()", ErrorCodes::INVALID_USAGE_OF_INPUT);
|
||||||
@ -1434,17 +1450,17 @@ MultiQueryProcessingStage ClientBase::analyzeMultiQueryText(
|
|||||||
// row input formats (e.g. TSV) can't tell when the input stops,
|
// row input formats (e.g. TSV) can't tell when the input stops,
|
||||||
// unlike VALUES.
|
// unlike VALUES.
|
||||||
auto * insert_ast = parsed_query->as<ASTInsertQuery>();
|
auto * insert_ast = parsed_query->as<ASTInsertQuery>();
|
||||||
|
const char * query_to_execute_end = this_query_end;
|
||||||
|
|
||||||
if (insert_ast && insert_ast->data)
|
if (insert_ast && insert_ast->data)
|
||||||
{
|
{
|
||||||
this_query_end = find_first_symbols<'\n'>(insert_ast->data, all_queries_end);
|
this_query_end = find_first_symbols<'\n'>(insert_ast->data, all_queries_end);
|
||||||
insert_ast->end = this_query_end;
|
insert_ast->end = this_query_end;
|
||||||
query_to_execute = all_queries_text.substr(this_query_begin - all_queries_text.data(), insert_ast->data - this_query_begin);
|
query_to_execute_end = isSyncInsertWithData(*insert_ast, global_context) ? insert_ast->data : this_query_end;
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
query_to_execute = all_queries_text.substr(this_query_begin - all_queries_text.data(), this_query_end - this_query_begin);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
query_to_execute = all_queries_text.substr(this_query_begin - all_queries_text.data(), query_to_execute_end - this_query_begin);
|
||||||
|
|
||||||
// Try to include the trailing comment with test hints. It is just
|
// Try to include the trailing comment with test hints. It is just
|
||||||
// a guess for now, because we don't yet know where the query ends
|
// a guess for now, because we don't yet know where the query ends
|
||||||
// if it is an INSERT query with inline data. We will do it again
|
// if it is an INSERT query with inline data. We will do it again
|
||||||
|
@ -139,6 +139,8 @@ private:
|
|||||||
void updateSuggest(const ASTCreateQuery & ast_create);
|
void updateSuggest(const ASTCreateQuery & ast_create);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
static bool isSyncInsertWithData(const ASTInsertQuery & insert_query, const ContextPtr & context);
|
||||||
|
|
||||||
bool is_interactive = false; /// Use either interactive line editing interface or batch mode.
|
bool is_interactive = false; /// Use either interactive line editing interface or batch mode.
|
||||||
bool is_multiquery = false;
|
bool is_multiquery = false;
|
||||||
bool delayed_interactive = false;
|
bool delayed_interactive = false;
|
||||||
|
@ -6,7 +6,6 @@
|
|||||||
#include <Parsers/Lexer.h>
|
#include <Parsers/Lexer.h>
|
||||||
#include <Common/UTF8Helpers.h>
|
#include <Common/UTF8Helpers.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -114,6 +113,7 @@ void highlight(const String & query, std::vector<replxx::Replxx::Color> & colors
|
|||||||
|
|
||||||
{TokenType::Comma, replxx::color::bold(Replxx::Color::DEFAULT)},
|
{TokenType::Comma, replxx::color::bold(Replxx::Color::DEFAULT)},
|
||||||
{TokenType::Semicolon, replxx::color::bold(Replxx::Color::DEFAULT)},
|
{TokenType::Semicolon, replxx::color::bold(Replxx::Color::DEFAULT)},
|
||||||
|
{TokenType::VerticalDelimiter, replxx::color::bold(Replxx::Color::DEFAULT)},
|
||||||
{TokenType::Dot, replxx::color::bold(Replxx::Color::DEFAULT)},
|
{TokenType::Dot, replxx::color::bold(Replxx::Color::DEFAULT)},
|
||||||
{TokenType::Asterisk, replxx::color::bold(Replxx::Color::DEFAULT)},
|
{TokenType::Asterisk, replxx::color::bold(Replxx::Color::DEFAULT)},
|
||||||
{TokenType::HereDoc, Replxx::Color::CYAN},
|
{TokenType::HereDoc, Replxx::Color::CYAN},
|
||||||
@ -151,6 +151,11 @@ void highlight(const String & query, std::vector<replxx::Replxx::Color> & colors
|
|||||||
|
|
||||||
for (Token token = lexer.nextToken(); !token.isEnd(); token = lexer.nextToken())
|
for (Token token = lexer.nextToken(); !token.isEnd(); token = lexer.nextToken())
|
||||||
{
|
{
|
||||||
|
if (token.type == TokenType::Semicolon || token.type == TokenType::VerticalDelimiter)
|
||||||
|
ReplxxLineReader::setLastIsDelimiter(true);
|
||||||
|
else if (token.type != TokenType::Whitespace)
|
||||||
|
ReplxxLineReader::setLastIsDelimiter(false);
|
||||||
|
|
||||||
size_t utf8_len = UTF8::countCodePoints(reinterpret_cast<const UInt8 *>(token.begin), token.size());
|
size_t utf8_len = UTF8::countCodePoints(reinterpret_cast<const UInt8 *>(token.begin), token.size());
|
||||||
for (size_t code_point_index = 0; code_point_index < utf8_len; ++code_point_index)
|
for (size_t code_point_index = 0; code_point_index < utf8_len; ++code_point_index)
|
||||||
{
|
{
|
||||||
|
@ -405,7 +405,7 @@ bool Connection::ping()
|
|||||||
}
|
}
|
||||||
catch (const Poco::Exception & e)
|
catch (const Poco::Exception & e)
|
||||||
{
|
{
|
||||||
LOG_TRACE(log_wrapper.get(), e.displayText());
|
LOG_TRACE(log_wrapper.get(), fmt::runtime(e.displayText()));
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -58,9 +58,9 @@ void ConnectionEstablisher::run(ConnectionEstablisher::TryResult & result, std::
|
|||||||
auto table_status_it = status_response.table_states_by_id.find(*table_to_check);
|
auto table_status_it = status_response.table_states_by_id.find(*table_to_check);
|
||||||
if (table_status_it == status_response.table_states_by_id.end())
|
if (table_status_it == status_response.table_states_by_id.end())
|
||||||
{
|
{
|
||||||
const char * message_pattern = "There is no table {}.{} on server: {}";
|
fail_message = fmt::format("There is no table {}.{} on server: {}",
|
||||||
fail_message = fmt::format(message_pattern, backQuote(table_to_check->database), backQuote(table_to_check->table), result.entry->getDescription());
|
backQuote(table_to_check->database), backQuote(table_to_check->table), result.entry->getDescription());
|
||||||
LOG_WARNING(log, fail_message);
|
LOG_WARNING(log, fmt::runtime(fail_message));
|
||||||
ProfileEvents::increment(ProfileEvents::DistributedConnectionMissingTable);
|
ProfileEvents::increment(ProfileEvents::DistributedConnectionMissingTable);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -80,6 +80,7 @@
|
|||||||
M(SyncDrainedConnections, "Number of connections drained synchronously.") \
|
M(SyncDrainedConnections, "Number of connections drained synchronously.") \
|
||||||
M(ActiveSyncDrainedConnections, "Number of active connections drained synchronously.") \
|
M(ActiveSyncDrainedConnections, "Number of active connections drained synchronously.") \
|
||||||
M(AsynchronousReadWait, "Number of threads waiting for asynchronous read.") \
|
M(AsynchronousReadWait, "Number of threads waiting for asynchronous read.") \
|
||||||
|
M(PendingAsyncInsert, "Number of asynchronous inserts that are waiting for flush.") \
|
||||||
|
|
||||||
namespace CurrentMetrics
|
namespace CurrentMetrics
|
||||||
{
|
{
|
||||||
|
@ -272,7 +272,7 @@ bool DNSResolver::updateCacheImpl(UpdateF && update_func, ElemsT && elems, const
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!lost_elems.empty())
|
if (!lost_elems.empty())
|
||||||
LOG_INFO(log, log_msg, lost_elems);
|
LOG_INFO(log, fmt::runtime(log_msg), lost_elems);
|
||||||
|
|
||||||
return updated;
|
return updated;
|
||||||
}
|
}
|
||||||
|
@ -37,7 +37,7 @@ public:
|
|||||||
// Format message with fmt::format, like the logging functions.
|
// Format message with fmt::format, like the logging functions.
|
||||||
template <typename ...Args>
|
template <typename ...Args>
|
||||||
Exception(int code, const std::string & fmt, Args&&... args)
|
Exception(int code, const std::string & fmt, Args&&... args)
|
||||||
: Exception(fmt::format(fmt, std::forward<Args>(args)...), code)
|
: Exception(fmt::format(fmt::runtime(fmt), std::forward<Args>(args)...), code)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
struct CreateFromPocoTag {};
|
struct CreateFromPocoTag {};
|
||||||
@ -55,7 +55,7 @@ public:
|
|||||||
template <typename ...Args>
|
template <typename ...Args>
|
||||||
void addMessage(const std::string& format, Args&&... args)
|
void addMessage(const std::string& format, Args&&... args)
|
||||||
{
|
{
|
||||||
extendedMessage(fmt::format(format, std::forward<Args>(args)...));
|
extendedMessage(fmt::format(fmt::runtime(format), std::forward<Args>(args)...));
|
||||||
}
|
}
|
||||||
|
|
||||||
void addMessage(const std::string& message)
|
void addMessage(const std::string& message)
|
||||||
@ -119,7 +119,7 @@ public:
|
|||||||
// Format message with fmt::format, like the logging functions.
|
// Format message with fmt::format, like the logging functions.
|
||||||
template <typename ...Args>
|
template <typename ...Args>
|
||||||
ParsingException(int code, const std::string & fmt, Args&&... args)
|
ParsingException(int code, const std::string & fmt, Args&&... args)
|
||||||
: Exception(fmt::format(fmt, std::forward<Args>(args)...), code)
|
: Exception(fmt::format(fmt::runtime(fmt), std::forward<Args>(args)...), code)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
|
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
M(Query, "Number of queries to be interpreted and potentially executed. Does not include queries that failed to parse or were rejected due to AST size limits, quota limits or limits on the number of simultaneously running queries. May include internal queries initiated by ClickHouse itself. Does not count subqueries.") \
|
M(Query, "Number of queries to be interpreted and potentially executed. Does not include queries that failed to parse or were rejected due to AST size limits, quota limits or limits on the number of simultaneously running queries. May include internal queries initiated by ClickHouse itself. Does not count subqueries.") \
|
||||||
M(SelectQuery, "Same as Query, but only for SELECT queries.") \
|
M(SelectQuery, "Same as Query, but only for SELECT queries.") \
|
||||||
M(InsertQuery, "Same as Query, but only for INSERT queries.") \
|
M(InsertQuery, "Same as Query, but only for INSERT queries.") \
|
||||||
|
M(AsyncInsertQuery, "Same as InsertQuery, but only for asynchronous INSERT queries.") \
|
||||||
M(FailedQuery, "Number of failed queries.") \
|
M(FailedQuery, "Number of failed queries.") \
|
||||||
M(FailedSelectQuery, "Same as FailedQuery, but only for SELECT queries.") \
|
M(FailedSelectQuery, "Same as FailedQuery, but only for SELECT queries.") \
|
||||||
M(FailedInsertQuery, "Same as FailedQuery, but only for INSERT queries.") \
|
M(FailedInsertQuery, "Same as FailedQuery, but only for INSERT queries.") \
|
||||||
|
@ -243,7 +243,7 @@ void ProgressIndication::writeProgress()
|
|||||||
|
|
||||||
if (width_of_progress_bar > 0)
|
if (width_of_progress_bar > 0)
|
||||||
{
|
{
|
||||||
size_t bar_width = UnicodeBar::getWidth(current_count, 0, max_count, width_of_progress_bar);
|
double bar_width = UnicodeBar::getWidth(current_count, 0, max_count, width_of_progress_bar);
|
||||||
std::string bar = UnicodeBar::render(bar_width);
|
std::string bar = UnicodeBar::render(bar_width);
|
||||||
|
|
||||||
/// Render profiling_msg at left on top of the progress bar.
|
/// Render profiling_msg at left on top of the progress bar.
|
||||||
|
@ -54,6 +54,9 @@ void ThreadPoolImpl<Thread>::setMaxThreads(size_t value)
|
|||||||
{
|
{
|
||||||
std::lock_guard lock(mutex);
|
std::lock_guard lock(mutex);
|
||||||
max_threads = value;
|
max_threads = value;
|
||||||
|
/// We have to also adjust queue size, because it limits the number of scheduled and already running jobs in total.
|
||||||
|
queue_size = std::max(queue_size, max_threads);
|
||||||
|
jobs.reserve(queue_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Thread>
|
template <typename Thread>
|
||||||
|
@ -1145,7 +1145,7 @@ std::string normalizeZooKeeperPath(std::string zookeeper_path, bool check_starts
|
|||||||
if (check_starts_with_slash)
|
if (check_starts_with_slash)
|
||||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "ZooKeeper path must starts with '/', got '{}'", zookeeper_path);
|
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "ZooKeeper path must starts with '/', got '{}'", zookeeper_path);
|
||||||
if (log)
|
if (log)
|
||||||
LOG_WARNING(log, "ZooKeeper path ('{}') does not start with '/'. It will not be supported in future releases");
|
LOG_WARNING(log, "ZooKeeper path ('{}') does not start with '/'. It will not be supported in future releases", zookeeper_path);
|
||||||
zookeeper_path = "/" + zookeeper_path;
|
zookeeper_path = "/" + zookeeper_path;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,7 +17,7 @@ TEST(Logger, Log)
|
|||||||
Poco::Logger * log = &Poco::Logger::get("Log");
|
Poco::Logger * log = &Poco::Logger::get("Log");
|
||||||
|
|
||||||
/// This test checks that we don't pass this string to fmtlib, because it is the only argument.
|
/// This test checks that we don't pass this string to fmtlib, because it is the only argument.
|
||||||
EXPECT_NO_THROW(LOG_INFO(log, "Hello {} World"));
|
EXPECT_NO_THROW(LOG_INFO(log, fmt::runtime("Hello {} World")));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(Logger, TestLog)
|
TEST(Logger, TestLog)
|
||||||
|
@ -39,7 +39,7 @@ public:
|
|||||||
const std::string & msg) override
|
const std::string & msg) override
|
||||||
{
|
{
|
||||||
LogsLevel db_level = static_cast<LogsLevel>(level_);
|
LogsLevel db_level = static_cast<LogsLevel>(level_);
|
||||||
LOG_IMPL(log, db_level, LEVELS.at(db_level), msg);
|
LOG_IMPL(log, db_level, LEVELS.at(db_level), fmt::runtime(msg));
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_level(int level_) override
|
void set_level(int level_) override
|
||||||
|
@ -170,6 +170,7 @@ class IColumn;
|
|||||||
M(Bool, force_index_by_date, false, "Throw an exception if there is a partition key in a table, and it is not used.", 0) \
|
M(Bool, force_index_by_date, false, "Throw an exception if there is a partition key in a table, and it is not used.", 0) \
|
||||||
M(Bool, force_primary_key, false, "Throw an exception if there is primary key in a table, and it is not used.", 0) \
|
M(Bool, force_primary_key, false, "Throw an exception if there is primary key in a table, and it is not used.", 0) \
|
||||||
M(Bool, use_skip_indexes, true, "Use data skipping indexes during query execution.", 0) \
|
M(Bool, use_skip_indexes, true, "Use data skipping indexes during query execution.", 0) \
|
||||||
|
M(Bool, use_skip_indexes_if_final, false, "If query has FINAL, then skipping data based on indexes may produce incorrect result, hence disabled by default.", 0) \
|
||||||
M(String, force_data_skipping_indices, "", "Comma separated list of strings or literals with the name of the data skipping indices that should be used during query execution, otherwise an exception will be thrown.", 0) \
|
M(String, force_data_skipping_indices, "", "Comma separated list of strings or literals with the name of the data skipping indices that should be used during query execution, otherwise an exception will be thrown.", 0) \
|
||||||
\
|
\
|
||||||
M(Float, max_streams_to_max_threads_ratio, 1, "Allows you to use more sources than the number of threads - to more evenly distribute work across threads. It is assumed that this is a temporary solution, since it will be possible in the future to make the number of sources equal to the number of threads, but for each source to dynamically select available work for itself.", 0) \
|
M(Float, max_streams_to_max_threads_ratio, 1, "Allows you to use more sources than the number of threads - to more evenly distribute work across threads. It is assumed that this is a temporary solution, since it will be possible in the future to make the number of sources equal to the number of threads, but for each source to dynamically select available work for itself.", 0) \
|
||||||
|
@ -80,7 +80,7 @@ void DatabaseAtomic::drop(ContextPtr)
|
|||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
LOG_WARNING(log, getCurrentExceptionMessage(true));
|
LOG_WARNING(log, fmt::runtime(getCurrentExceptionMessage(true)));
|
||||||
}
|
}
|
||||||
fs::remove_all(getMetadataPath());
|
fs::remove_all(getMetadataPath());
|
||||||
}
|
}
|
||||||
@ -469,7 +469,7 @@ void DatabaseAtomic::tryCreateSymlink(const String & table_name, const String &
|
|||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
LOG_WARNING(log, getCurrentExceptionMessage(true));
|
LOG_WARNING(log, fmt::runtime(getCurrentExceptionMessage(true)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -482,7 +482,7 @@ void DatabaseAtomic::tryRemoveSymlink(const String & table_name)
|
|||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
LOG_WARNING(log, getCurrentExceptionMessage(true));
|
LOG_WARNING(log, fmt::runtime(getCurrentExceptionMessage(true)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -527,7 +527,7 @@ void DatabaseAtomic::renameDatabase(ContextPtr query_context, const String & new
|
|||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
LOG_WARNING(log, getCurrentExceptionMessage(true));
|
LOG_WARNING(log, fmt::runtime(getCurrentExceptionMessage(true)));
|
||||||
}
|
}
|
||||||
|
|
||||||
auto new_name_escaped = escapeForFileName(new_name);
|
auto new_name_escaped = escapeForFileName(new_name);
|
||||||
|
@ -316,7 +316,7 @@ void DatabaseOnDisk::dropTable(ContextPtr local_context, const String & table_na
|
|||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
LOG_WARNING(log, getCurrentExceptionMessage(__PRETTY_FUNCTION__));
|
LOG_WARNING(log, fmt::runtime(getCurrentExceptionMessage(__PRETTY_FUNCTION__)));
|
||||||
attachTable(local_context, table_name, table, table_data_path_relative);
|
attachTable(local_context, table_name, table, table_data_path_relative);
|
||||||
if (renamed)
|
if (renamed)
|
||||||
fs::rename(table_metadata_path_drop, table_metadata_path);
|
fs::rename(table_metadata_path_drop, table_metadata_path);
|
||||||
|
@ -94,7 +94,7 @@ bool DatabaseSQLite::checkSQLiteTable(const String & table_name) const
|
|||||||
if (!sqlite_db)
|
if (!sqlite_db)
|
||||||
sqlite_db = openSQLiteDB(database_path, getContext(), /* throw_on_error */true);
|
sqlite_db = openSQLiteDB(database_path, getContext(), /* throw_on_error */true);
|
||||||
|
|
||||||
const String query = fmt::format("SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';", table_name);
|
const String query = fmt::format("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';", table_name);
|
||||||
|
|
||||||
auto callback_get_data = [](void * res, int, char **, char **) -> int
|
auto callback_get_data = [](void * res, int, char **, char **) -> int
|
||||||
{
|
{
|
||||||
|
@ -20,7 +20,7 @@ void processSQLiteError(const String & message, bool throw_on_error)
|
|||||||
if (throw_on_error)
|
if (throw_on_error)
|
||||||
throw Exception(ErrorCodes::PATH_ACCESS_DENIED, message);
|
throw Exception(ErrorCodes::PATH_ACCESS_DENIED, message);
|
||||||
else
|
else
|
||||||
LOG_ERROR(&Poco::Logger::get("SQLiteEngine"), message);
|
LOG_ERROR(&Poco::Logger::get("SQLiteEngine"), fmt::runtime(message));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -58,15 +58,15 @@ void cassandraLogCallback(const CassLogMessage * message, void * data)
|
|||||||
{
|
{
|
||||||
Poco::Logger * logger = static_cast<Poco::Logger *>(data);
|
Poco::Logger * logger = static_cast<Poco::Logger *>(data);
|
||||||
if (message->severity == CASS_LOG_CRITICAL || message->severity == CASS_LOG_ERROR)
|
if (message->severity == CASS_LOG_CRITICAL || message->severity == CASS_LOG_ERROR)
|
||||||
LOG_ERROR(logger, message->message);
|
LOG_ERROR(logger, fmt::runtime(message->message));
|
||||||
else if (message->severity == CASS_LOG_WARN)
|
else if (message->severity == CASS_LOG_WARN)
|
||||||
LOG_WARNING(logger, message->message);
|
LOG_WARNING(logger, fmt::runtime(message->message));
|
||||||
else if (message->severity == CASS_LOG_INFO)
|
else if (message->severity == CASS_LOG_INFO)
|
||||||
LOG_INFO(logger, message->message);
|
LOG_INFO(logger, fmt::runtime(message->message));
|
||||||
else if (message->severity == CASS_LOG_DEBUG)
|
else if (message->severity == CASS_LOG_DEBUG)
|
||||||
LOG_DEBUG(logger, message->message);
|
LOG_DEBUG(logger, fmt::runtime(message->message));
|
||||||
else if (message->severity == CASS_LOG_TRACE)
|
else if (message->severity == CASS_LOG_TRACE)
|
||||||
LOG_TRACE(logger, message->message);
|
LOG_TRACE(logger, fmt::runtime(message->message));
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -193,7 +193,7 @@ Pipe MySQLDictionarySource::loadAll()
|
|||||||
auto connection = pool->get();
|
auto connection = pool->get();
|
||||||
last_modification = getLastModification(connection, false);
|
last_modification = getLastModification(connection, false);
|
||||||
|
|
||||||
LOG_TRACE(log, load_all_query);
|
LOG_TRACE(log, fmt::runtime(load_all_query));
|
||||||
return loadFromQuery(load_all_query);
|
return loadFromQuery(load_all_query);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -203,7 +203,7 @@ Pipe MySQLDictionarySource::loadUpdatedAll()
|
|||||||
last_modification = getLastModification(connection, false);
|
last_modification = getLastModification(connection, false);
|
||||||
|
|
||||||
std::string load_update_query = getUpdateFieldAndDate();
|
std::string load_update_query = getUpdateFieldAndDate();
|
||||||
LOG_TRACE(log, load_update_query);
|
LOG_TRACE(log, fmt::runtime(load_update_query));
|
||||||
return loadFromQuery(load_update_query);
|
return loadFromQuery(load_update_query);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -289,7 +289,7 @@ LocalDateTime MySQLDictionarySource::getLastModification(mysqlxx::Pool::Entry &
|
|||||||
{
|
{
|
||||||
auto query = connection->query("SHOW TABLE STATUS LIKE " + quoteForLike(configuration.table));
|
auto query = connection->query("SHOW TABLE STATUS LIKE " + quoteForLike(configuration.table));
|
||||||
|
|
||||||
LOG_TRACE(log, query.str());
|
LOG_TRACE(log, fmt::runtime(query.str()));
|
||||||
|
|
||||||
auto result = query.use();
|
auto result = query.use();
|
||||||
|
|
||||||
|
@ -80,7 +80,7 @@ PostgreSQLDictionarySource::PostgreSQLDictionarySource(const PostgreSQLDictionar
|
|||||||
|
|
||||||
Pipe PostgreSQLDictionarySource::loadAll()
|
Pipe PostgreSQLDictionarySource::loadAll()
|
||||||
{
|
{
|
||||||
LOG_TRACE(log, load_all_query);
|
LOG_TRACE(log, fmt::runtime(load_all_query));
|
||||||
return loadBase(load_all_query);
|
return loadBase(load_all_query);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -88,7 +88,7 @@ Pipe PostgreSQLDictionarySource::loadAll()
|
|||||||
Pipe PostgreSQLDictionarySource::loadUpdatedAll()
|
Pipe PostgreSQLDictionarySource::loadUpdatedAll()
|
||||||
{
|
{
|
||||||
auto load_update_query = getUpdateFieldAndDate();
|
auto load_update_query = getUpdateFieldAndDate();
|
||||||
LOG_TRACE(log, load_update_query);
|
LOG_TRACE(log, fmt::runtime(load_update_query));
|
||||||
return loadBase(load_update_query);
|
return loadBase(load_update_query);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -121,7 +121,7 @@ std::string XDBCDictionarySource::getUpdateFieldAndDate()
|
|||||||
|
|
||||||
Pipe XDBCDictionarySource::loadAll()
|
Pipe XDBCDictionarySource::loadAll()
|
||||||
{
|
{
|
||||||
LOG_TRACE(log, load_all_query);
|
LOG_TRACE(log, fmt::runtime(load_all_query));
|
||||||
return loadFromQuery(bridge_url, sample_block, load_all_query);
|
return loadFromQuery(bridge_url, sample_block, load_all_query);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -130,7 +130,7 @@ Pipe XDBCDictionarySource::loadUpdatedAll()
|
|||||||
{
|
{
|
||||||
std::string load_query_update = getUpdateFieldAndDate();
|
std::string load_query_update = getUpdateFieldAndDate();
|
||||||
|
|
||||||
LOG_TRACE(log, load_query_update);
|
LOG_TRACE(log, fmt::runtime(load_query_update));
|
||||||
return loadFromQuery(bridge_url, sample_block, load_query_update);
|
return loadFromQuery(bridge_url, sample_block, load_query_update);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,15 +8,22 @@
|
|||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
/**
|
/**
|
||||||
* Write buffer with possibility to set and invoke callback after 'finalize' call.
|
* This buffer writes to cache, but after finalize() copy written file from cache to disk.
|
||||||
*/
|
*/
|
||||||
class CompletionAwareWriteBuffer : public WriteBufferFromFileDecorator
|
class WritingToCacheWriteBuffer final : public WriteBufferFromFileDecorator
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
CompletionAwareWriteBuffer(std::unique_ptr<WriteBufferFromFileBase> impl_, std::function<void()> completion_callback_)
|
WritingToCacheWriteBuffer(
|
||||||
: WriteBufferFromFileDecorator(std::move(impl_)), completion_callback(completion_callback_) { }
|
std::unique_ptr<WriteBufferFromFileBase> impl_,
|
||||||
|
std::function<std::unique_ptr<ReadBuffer>()> create_read_buffer_,
|
||||||
|
std::function<std::unique_ptr<WriteBuffer>()> create_write_buffer_)
|
||||||
|
: WriteBufferFromFileDecorator(std::move(impl_))
|
||||||
|
, create_read_buffer(std::move(create_read_buffer_))
|
||||||
|
, create_write_buffer(std::move(create_write_buffer_))
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
virtual ~CompletionAwareWriteBuffer() override
|
virtual ~WritingToCacheWriteBuffer() override
|
||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
@ -28,15 +35,36 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void preFinalize() override
|
||||||
|
{
|
||||||
|
impl->next();
|
||||||
|
impl->preFinalize();
|
||||||
|
impl->finalize();
|
||||||
|
|
||||||
|
read_buffer = create_read_buffer();
|
||||||
|
write_buffer = create_write_buffer();
|
||||||
|
copyData(*read_buffer, *write_buffer);
|
||||||
|
write_buffer->next();
|
||||||
|
write_buffer->preFinalize();
|
||||||
|
|
||||||
|
is_prefinalized = true;
|
||||||
|
}
|
||||||
|
|
||||||
void finalizeImpl() override
|
void finalizeImpl() override
|
||||||
{
|
{
|
||||||
WriteBufferFromFileDecorator::finalizeImpl();
|
if (!is_prefinalized)
|
||||||
|
preFinalize();
|
||||||
|
|
||||||
completion_callback();
|
write_buffer->finalize();
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const std::function<void()> completion_callback;
|
std::function<std::unique_ptr<ReadBuffer>()> create_read_buffer;
|
||||||
|
std::function<std::unique_ptr<WriteBuffer>()> create_write_buffer;
|
||||||
|
std::unique_ptr<ReadBuffer> read_buffer;
|
||||||
|
std::unique_ptr<WriteBuffer> write_buffer;
|
||||||
|
|
||||||
|
bool is_prefinalized = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum FileDownloadStatus
|
enum FileDownloadStatus
|
||||||
@ -165,21 +193,22 @@ DiskCacheWrapper::writeFile(const String & path, size_t buf_size, WriteMode mode
|
|||||||
if (!cache_file_predicate(path))
|
if (!cache_file_predicate(path))
|
||||||
return DiskDecorator::writeFile(path, buf_size, mode);
|
return DiskDecorator::writeFile(path, buf_size, mode);
|
||||||
|
|
||||||
LOG_TRACE(log, "Write file {} to cache", backQuote(path));
|
LOG_TEST(log, "Write file {} to cache", backQuote(path));
|
||||||
|
|
||||||
auto dir_path = directoryPath(path);
|
auto dir_path = directoryPath(path);
|
||||||
if (!cache_disk->exists(dir_path))
|
if (!cache_disk->exists(dir_path))
|
||||||
cache_disk->createDirectories(dir_path);
|
cache_disk->createDirectories(dir_path);
|
||||||
|
|
||||||
return std::make_unique<CompletionAwareWriteBuffer>(
|
return std::make_unique<WritingToCacheWriteBuffer>(
|
||||||
cache_disk->writeFile(path, buf_size, mode),
|
cache_disk->writeFile(path, buf_size, mode),
|
||||||
[this, path, buf_size, mode]()
|
[this, path]()
|
||||||
{
|
{
|
||||||
/// Copy file from cache to actual disk when cached buffer is finalized.
|
/// Copy file from cache to actual disk when cached buffer is finalized.
|
||||||
auto src_buffer = cache_disk->readFile(path, ReadSettings(), /* read_hint= */ {}, /* file_size= */ {});
|
return cache_disk->readFile(path, ReadSettings(), /* read_hint= */ {}, /* file_size= */ {});
|
||||||
auto dst_buffer = DiskDecorator::writeFile(path, buf_size, mode);
|
},
|
||||||
copyData(*src_buffer, *dst_buffer);
|
[this, path, buf_size, mode]()
|
||||||
dst_buffer->finalize();
|
{
|
||||||
|
return DiskDecorator::writeFile(path, buf_size, mode);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -151,6 +151,11 @@ void DiskDecorator::removeSharedFile(const String & path, bool keep_s3)
|
|||||||
delegate->removeSharedFile(path, keep_s3);
|
delegate->removeSharedFile(path, keep_s3);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void DiskDecorator::removeSharedFiles(const RemoveBatchRequest & files, bool keep_in_remote_fs)
|
||||||
|
{
|
||||||
|
delegate->removeSharedFiles(files, keep_in_remote_fs);
|
||||||
|
}
|
||||||
|
|
||||||
void DiskDecorator::removeSharedRecursive(const String & path, bool keep_s3)
|
void DiskDecorator::removeSharedRecursive(const String & path, bool keep_s3)
|
||||||
{
|
{
|
||||||
delegate->removeSharedRecursive(path, keep_s3);
|
delegate->removeSharedRecursive(path, keep_s3);
|
||||||
|
@ -52,6 +52,7 @@ public:
|
|||||||
void removeRecursive(const String & path) override;
|
void removeRecursive(const String & path) override;
|
||||||
void removeSharedFile(const String & path, bool keep_s3) override;
|
void removeSharedFile(const String & path, bool keep_s3) override;
|
||||||
void removeSharedRecursive(const String & path, bool keep_s3) override;
|
void removeSharedRecursive(const String & path, bool keep_s3) override;
|
||||||
|
void removeSharedFiles(const RemoveBatchRequest & files, bool keep_in_remote_fs) override;
|
||||||
void setLastModified(const String & path, const Poco::Timestamp & timestamp) override;
|
void setLastModified(const String & path, const Poco::Timestamp & timestamp) override;
|
||||||
Poco::Timestamp getLastModified(const String & path) override;
|
Poco::Timestamp getLastModified(const String & path) override;
|
||||||
void setReadOnly(const String & path) override;
|
void setReadOnly(const String & path) override;
|
||||||
|
@ -234,6 +234,12 @@ void DiskRestartProxy::removeSharedFile(const String & path, bool keep_s3)
|
|||||||
DiskDecorator::removeSharedFile(path, keep_s3);
|
DiskDecorator::removeSharedFile(path, keep_s3);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void DiskRestartProxy::removeSharedFiles(const RemoveBatchRequest & files, bool keep_in_remote_fs)
|
||||||
|
{
|
||||||
|
ReadLock lock (mutex);
|
||||||
|
DiskDecorator::removeSharedFiles(files, keep_in_remote_fs);
|
||||||
|
}
|
||||||
|
|
||||||
void DiskRestartProxy::removeSharedRecursive(const String & path, bool keep_s3)
|
void DiskRestartProxy::removeSharedRecursive(const String & path, bool keep_s3)
|
||||||
{
|
{
|
||||||
ReadLock lock (mutex);
|
ReadLock lock (mutex);
|
||||||
|
@ -54,6 +54,7 @@ public:
|
|||||||
void removeDirectory(const String & path) override;
|
void removeDirectory(const String & path) override;
|
||||||
void removeRecursive(const String & path) override;
|
void removeRecursive(const String & path) override;
|
||||||
void removeSharedFile(const String & path, bool keep_s3) override;
|
void removeSharedFile(const String & path, bool keep_s3) override;
|
||||||
|
void removeSharedFiles(const RemoveBatchRequest & files, bool keep_in_remote_fs) override;
|
||||||
void removeSharedRecursive(const String & path, bool keep_s3) override;
|
void removeSharedRecursive(const String & path, bool keep_s3) override;
|
||||||
void setLastModified(const String & path, const Poco::Timestamp & timestamp) override;
|
void setLastModified(const String & path, const Poco::Timestamp & timestamp) override;
|
||||||
Poco::Timestamp getLastModified(const String & path) override;
|
Poco::Timestamp getLastModified(const String & path) override;
|
||||||
|
@ -101,7 +101,7 @@ DiskSelectorPtr DiskSelector::updateFromConfig(
|
|||||||
}
|
}
|
||||||
|
|
||||||
writeString(" disappeared from configuration, this change will be applied after restart of ClickHouse", warning);
|
writeString(" disappeared from configuration, this change will be applied after restart of ClickHouse", warning);
|
||||||
LOG_WARNING(&Poco::Logger::get("DiskSelector"), warning.str());
|
LOG_WARNING(&Poco::Logger::get("DiskSelector"), fmt::runtime(warning.str()));
|
||||||
}
|
}
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
|
@ -197,6 +197,32 @@ public:
|
|||||||
/// Second bool param is a flag to remove (true) or keep (false) shared data on S3
|
/// Second bool param is a flag to remove (true) or keep (false) shared data on S3
|
||||||
virtual void removeSharedFileIfExists(const String & path, bool) { removeFileIfExists(path); }
|
virtual void removeSharedFileIfExists(const String & path, bool) { removeFileIfExists(path); }
|
||||||
|
|
||||||
|
struct RemoveRequest
|
||||||
|
{
|
||||||
|
String path;
|
||||||
|
bool if_exists = false;
|
||||||
|
|
||||||
|
explicit RemoveRequest(String path_, bool if_exists_ = false)
|
||||||
|
: path(std::move(path_)), if_exists(std::move(if_exists_))
|
||||||
|
{
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
using RemoveBatchRequest = std::vector<RemoveRequest>;
|
||||||
|
|
||||||
|
/// Batch request to remove multiple files.
|
||||||
|
/// May be much faster for blob storage.
|
||||||
|
virtual void removeSharedFiles(const RemoveBatchRequest & files, bool keep_in_remote_fs)
|
||||||
|
{
|
||||||
|
for (const auto & file : files)
|
||||||
|
{
|
||||||
|
if (file.if_exists)
|
||||||
|
removeSharedFileIfExists(file.path, keep_in_remote_fs);
|
||||||
|
else
|
||||||
|
removeSharedFile(file.path, keep_in_remote_fs);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Set last modified time to file or directory at `path`.
|
/// Set last modified time to file or directory at `path`.
|
||||||
virtual void setLastModified(const String & path, const Poco::Timestamp & timestamp) = 0;
|
virtual void setLastModified(const String & path, const Poco::Timestamp & timestamp) = 0;
|
||||||
|
|
||||||
|
@ -361,6 +361,19 @@ void IDiskRemote::removeSharedFileIfExists(const String & path, bool keep_in_rem
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void IDiskRemote::removeSharedFiles(const RemoveBatchRequest & files, bool keep_in_remote_fs)
|
||||||
|
{
|
||||||
|
RemoteFSPathKeeperPtr fs_paths_keeper = createFSPathKeeper();
|
||||||
|
for (const auto & file : files)
|
||||||
|
{
|
||||||
|
bool skip = file.if_exists && !metadata_disk->exists(file.path);
|
||||||
|
if (!skip)
|
||||||
|
removeMeta(file.path, fs_paths_keeper);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!keep_in_remote_fs)
|
||||||
|
removeFromRemoteFS(fs_paths_keeper);
|
||||||
|
}
|
||||||
|
|
||||||
void IDiskRemote::removeSharedRecursive(const String & path, bool keep_in_remote_fs)
|
void IDiskRemote::removeSharedRecursive(const String & path, bool keep_in_remote_fs)
|
||||||
{
|
{
|
||||||
@ -531,4 +544,12 @@ UInt32 IDiskRemote::getRefCount(const String & path) const
|
|||||||
return meta.ref_count;
|
return meta.ref_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ThreadPool & IDiskRemote::getThreadPoolWriter()
|
||||||
|
{
|
||||||
|
constexpr size_t pool_size = 100;
|
||||||
|
constexpr size_t queue_size = 1000000;
|
||||||
|
static ThreadPool writer(pool_size, pool_size, queue_size);
|
||||||
|
return writer;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -98,6 +98,8 @@ public:
|
|||||||
|
|
||||||
void removeSharedFileIfExists(const String & path, bool keep_in_remote_fs) override;
|
void removeSharedFileIfExists(const String & path, bool keep_in_remote_fs) override;
|
||||||
|
|
||||||
|
void removeSharedFiles(const RemoveBatchRequest & files, bool keep_in_remote_fs) override;
|
||||||
|
|
||||||
void removeSharedRecursive(const String & path, bool keep_in_remote_fs) override;
|
void removeSharedRecursive(const String & path, bool keep_in_remote_fs) override;
|
||||||
|
|
||||||
void listFiles(const String & path, std::vector<String> & file_names) override;
|
void listFiles(const String & path, std::vector<String> & file_names) override;
|
||||||
@ -135,6 +137,7 @@ public:
|
|||||||
virtual RemoteFSPathKeeperPtr createFSPathKeeper() const = 0;
|
virtual RemoteFSPathKeeperPtr createFSPathKeeper() const = 0;
|
||||||
|
|
||||||
static AsynchronousReaderPtr getThreadPoolReader();
|
static AsynchronousReaderPtr getThreadPoolReader();
|
||||||
|
static ThreadPool & getThreadPoolWriter();
|
||||||
|
|
||||||
virtual std::unique_ptr<ReadBufferFromFileBase> readMetaFile(
|
virtual std::unique_ptr<ReadBufferFromFileBase> readMetaFile(
|
||||||
const String & path,
|
const String & path,
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
|
|
||||||
#include <boost/algorithm/string.hpp>
|
#include <boost/algorithm/string.hpp>
|
||||||
|
|
||||||
|
#include <base/scope_guard_safe.h>
|
||||||
#include <base/unit.h>
|
#include <base/unit.h>
|
||||||
#include <base/FnTraits.h>
|
#include <base/FnTraits.h>
|
||||||
|
|
||||||
@ -262,6 +263,21 @@ std::unique_ptr<WriteBufferFromFileBase> DiskS3::writeFile(const String & path,
|
|||||||
LOG_TRACE(log, "{} to file by path: {}. S3 path: {}",
|
LOG_TRACE(log, "{} to file by path: {}. S3 path: {}",
|
||||||
mode == WriteMode::Rewrite ? "Write" : "Append", backQuote(metadata_disk->getPath() + path), remote_fs_root_path + s3_path);
|
mode == WriteMode::Rewrite ? "Write" : "Append", backQuote(metadata_disk->getPath() + path), remote_fs_root_path + s3_path);
|
||||||
|
|
||||||
|
ScheduleFunc schedule = [pool = &getThreadPoolWriter()](auto callback)
|
||||||
|
{
|
||||||
|
pool->scheduleOrThrow([callback = std::move(callback), thread_group = CurrentThread::getGroup()]()
|
||||||
|
{
|
||||||
|
if (thread_group)
|
||||||
|
CurrentThread::attachTo(thread_group);
|
||||||
|
|
||||||
|
SCOPE_EXIT_SAFE(
|
||||||
|
if (thread_group)
|
||||||
|
CurrentThread::detachQueryIfNotDetached();
|
||||||
|
);
|
||||||
|
callback();
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
auto s3_buffer = std::make_unique<WriteBufferFromS3>(
|
auto s3_buffer = std::make_unique<WriteBufferFromS3>(
|
||||||
settings->client,
|
settings->client,
|
||||||
bucket,
|
bucket,
|
||||||
@ -269,7 +285,8 @@ std::unique_ptr<WriteBufferFromFileBase> DiskS3::writeFile(const String & path,
|
|||||||
settings->s3_min_upload_part_size,
|
settings->s3_min_upload_part_size,
|
||||||
settings->s3_max_single_part_upload_size,
|
settings->s3_max_single_part_upload_size,
|
||||||
std::move(object_metadata),
|
std::move(object_metadata),
|
||||||
buf_size);
|
buf_size,
|
||||||
|
std::move(schedule));
|
||||||
|
|
||||||
return std::make_unique<WriteIndirectBufferFromRemoteFS<WriteBufferFromS3>>(std::move(s3_buffer), std::move(metadata), s3_path);
|
return std::make_unique<WriteIndirectBufferFromRemoteFS<WriteBufferFromS3>>(std::move(s3_buffer), std::move(metadata), s3_path);
|
||||||
}
|
}
|
||||||
|
@ -48,7 +48,7 @@ namespace
|
|||||||
"First argument for function " + getName() + " must be Constant string", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
"First argument for function " + getName() + " must be Constant string", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
|
|
||||||
static auto * log = &Poco::Logger::get("FunctionLogTrace");
|
static auto * log = &Poco::Logger::get("FunctionLogTrace");
|
||||||
LOG_TRACE(log, message);
|
LOG_TRACE(log, fmt::runtime(message));
|
||||||
|
|
||||||
return DataTypeUInt8().createColumnConst(input_rows_count, 0);
|
return DataTypeUInt8().createColumnConst(input_rows_count, 0);
|
||||||
}
|
}
|
||||||
|
@ -216,6 +216,15 @@ void readStringUntilWhitespaceInto(Vector & s, ReadBuffer & buf)
|
|||||||
readStringUntilCharsInto<' '>(s, buf);
|
readStringUntilCharsInto<' '>(s, buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename Vector>
|
||||||
|
void readStringUntilNewlineInto(Vector & s, ReadBuffer & buf)
|
||||||
|
{
|
||||||
|
readStringUntilCharsInto<'\n'>(s, buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
template void readStringUntilNewlineInto<PaddedPODArray<UInt8>>(PaddedPODArray<UInt8> & s, ReadBuffer & buf);
|
||||||
|
template void readStringUntilNewlineInto<String>(String & s, ReadBuffer & buf);
|
||||||
|
|
||||||
template <typename Vector>
|
template <typename Vector>
|
||||||
void readNullTerminated(Vector & s, ReadBuffer & buf)
|
void readNullTerminated(Vector & s, ReadBuffer & buf)
|
||||||
{
|
{
|
||||||
|
@ -604,6 +604,9 @@ bool tryReadJSONStringInto(Vector & s, ReadBuffer & buf)
|
|||||||
template <typename Vector>
|
template <typename Vector>
|
||||||
void readStringUntilWhitespaceInto(Vector & s, ReadBuffer & buf);
|
void readStringUntilWhitespaceInto(Vector & s, ReadBuffer & buf);
|
||||||
|
|
||||||
|
template <typename Vector>
|
||||||
|
void readStringUntilNewlineInto(Vector & s, ReadBuffer & buf);
|
||||||
|
|
||||||
/// This could be used as template parameter for functions above, if you want to just skip data.
|
/// This could be used as template parameter for functions above, if you want to just skip data.
|
||||||
struct NullOutput
|
struct NullOutput
|
||||||
{
|
{
|
||||||
@ -1387,4 +1390,3 @@ void readQuotedFieldIntoString(String & s, ReadBuffer & buf);
|
|||||||
void readJSONFieldIntoString(String & s, ReadBuffer & buf);
|
void readJSONFieldIntoString(String & s, ReadBuffer & buf);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -317,7 +317,7 @@ public:
|
|||||||
, load_frequency_ms(Aws::Auth::REFRESH_THRESHOLD)
|
, load_frequency_ms(Aws::Auth::REFRESH_THRESHOLD)
|
||||||
, logger(&Poco::Logger::get("AWSInstanceProfileCredentialsProvider"))
|
, logger(&Poco::Logger::get("AWSInstanceProfileCredentialsProvider"))
|
||||||
{
|
{
|
||||||
LOG_INFO(logger, "Creating Instance with injected EC2MetadataClient and refresh rate {}.");
|
LOG_INFO(logger, "Creating Instance with injected EC2MetadataClient and refresh rate.");
|
||||||
}
|
}
|
||||||
|
|
||||||
Aws::Auth::AWSCredentials GetAWSCredentials() override
|
Aws::Auth::AWSCredentials GetAWSCredentials() override
|
||||||
|
@ -104,10 +104,14 @@ public:
|
|||||||
++pos;
|
++pos;
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void sync()
|
/// This method may be called before finalize() to tell there would not be any more data written.
|
||||||
{
|
/// Used does not have to call it, implementation should check it itself if needed.
|
||||||
next();
|
///
|
||||||
}
|
/// The idea is similar to prefetch. In case if all data is written, we can flush the buffer
|
||||||
|
/// and start sending data asynchronously. It may improve writing performance in case you have
|
||||||
|
/// multiple files to finalize. Mainly, for blob storage, finalization has high latency,
|
||||||
|
/// and calling preFinalize in a loop may parallelize it.
|
||||||
|
virtual void preFinalize() { next(); }
|
||||||
|
|
||||||
/// Write the last data.
|
/// Write the last data.
|
||||||
void finalize()
|
void finalize()
|
||||||
@ -130,6 +134,13 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Wait for data to be reliably written. Mainly, call fsync for fd.
|
||||||
|
/// May be called after finalize() if needed.
|
||||||
|
virtual void sync()
|
||||||
|
{
|
||||||
|
next();
|
||||||
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
virtual void finalizeImpl()
|
virtual void finalizeImpl()
|
||||||
{
|
{
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user