Merge branch 'master' into enable-coverage-for-debug-build

This commit is contained in:
Alexey Milovidov 2024-01-28 22:24:26 +01:00
commit 4a94ae5a1b
963 changed files with 12444 additions and 4981 deletions

View File

@ -14,6 +14,7 @@ jobs:
with:
test_name: Jepsen keeper check
runner_type: style-checker
report_required: true
run_command: |
python3 jepsen_check.py keeper
# ServerJepsenRelease:

View File

@ -15,6 +15,8 @@ jobs:
outputs:
data: ${{ steps.runconfig.outputs.CI_DATA }}
steps:
- name: DebugInfo
uses: hmarr/debug-action@a701ed95a46e6f2fb0df25e1a558c16356fae35a
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
@ -33,11 +35,9 @@ jobs:
- name: PrepareRunConfig
id: runconfig
run: |
echo "::group::configure CI run"
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --rebuild-all-binaries --outfile ${{ runner.temp }}/ci_run_data.json
echo "::endgroup::"
echo "::group::CI run configure results"
echo "::group::CI configuration"
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
echo "::endgroup::"
@ -263,9 +263,9 @@ jobs:
run_command: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_server.py --release-type head \
--image-repo clickhouse/clickhouse-server --image-path docker/server
--image-repo clickhouse/clickhouse-server --image-path docker/server --allow-build-reuse
python3 docker_server.py --release-type head \
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper --allow-build-reuse
############################################################################################
##################################### BUILD REPORTER #######################################
############################################################################################

View File

@ -22,6 +22,8 @@ jobs:
outputs:
data: ${{ steps.runconfig.outputs.CI_DATA }}
steps:
- name: DebugInfo
uses: hmarr/debug-action@a701ed95a46e6f2fb0df25e1a558c16356fae35a
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
@ -44,11 +46,9 @@ jobs:
- name: PrepareRunConfig
id: runconfig
run: |
echo "::group::configure CI run"
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --outfile ${{ runner.temp }}/ci_run_data.json
echo "::endgroup::"
echo "::group::CI run configure results"
echo "::group::CI configuration"
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
echo "::endgroup::"
@ -67,6 +67,7 @@ jobs:
DOCKER_TAG=$(echo '${{ toJson(fromJson(steps.runconfig.outputs.CI_DATA).docker_data.images) }}' | tr -d '\n')
export DOCKER_TAG=$DOCKER_TAG
python3 ./tests/ci/style_check.py --no-push
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --post --job-name 'Style check'
BuildDockers:
needs: [RunConfig]
if: ${{ !failure() && !cancelled() }}
@ -820,7 +821,7 @@ jobs:
test_name: Unit tests (asan)
runner_type: fuzzer-unit-tester
data: ${{ needs.RunConfig.outputs.data }}
UnitTestsReleaseClang:
UnitTestsRelease:
needs: [RunConfig, BuilderBinRelease]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
@ -947,7 +948,7 @@ jobs:
- UnitTestsTsan
- UnitTestsMsan
- UnitTestsUBsan
- UnitTestsReleaseClang
- UnitTestsRelease
- CompatibilityCheckX86
- CompatibilityCheckAarch64
- SQLancerTestRelease

View File

@ -73,12 +73,15 @@ jobs:
- name: Pre
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --pre --job-name '${{inputs.build_name}}'
- name: Build
- name: Run
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/build_check.py" "$BUILD_NAME"
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" \
--infile ${{ toJson(inputs.data) }} \
--job-name "$BUILD_NAME" \
--run
- name: Post
# it still be build report to upload for failed build job
if: always()
if: ${{ !cancelled() }}
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --post --job-name '${{inputs.build_name}}'
- name: Mark as done

View File

@ -34,12 +34,16 @@ name: Simple job
working-directory:
description: sets custom working directory
type: string
default: ""
default: "$GITHUB_WORKSPACE/tests/ci"
git_ref:
description: commit to use, merge commit for pr or head
required: false
type: string
default: ${{ github.event.after }} # no merge commit
report_required:
description: set to true if job report with the commit status required
type: boolean
default: false
secrets:
secret_envs:
description: if given, it's passed to the environments
@ -81,12 +85,12 @@ jobs:
job_type: test
- name: Run
run: |
if [ -n '${{ inputs.working-directory }}' ]; then
cd "${{ inputs.working-directory }}"
else
cd "$GITHUB_WORKSPACE/tests/ci"
fi
cd "${{ inputs.working-directory }}"
${{ inputs.run_command }}
- name: Post
if: ${{ inputs.report_required }}
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --post --job-name '${{inputs.test_name}}'
- name: Clean
if: always()
uses: ./.github/actions/clean

View File

@ -38,7 +38,7 @@ name: Testing workflow
working-directory:
description: sets custom working directory
type: string
default: ""
default: "$GITHUB_WORKSPACE/tests/ci"
secrets:
secret_envs:
description: if given, it's passed to the environments
@ -96,19 +96,14 @@ jobs:
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --pre --job-name '${{inputs.test_name}}'
- name: Run
run: |
if [ -n "${{ inputs.working-directory }}" ]; then
cd "${{ inputs.working-directory }}"
else
cd "$GITHUB_WORKSPACE/tests/ci"
fi
if [ -n "$(echo '${{ inputs.run_command }}' | tr -d '\n')" ]; then
echo "Running command from workflow input"
${{ inputs.run_command }}
else
echo "Running command from job config"
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --run --job-name '${{inputs.test_name}}'
fi
cd "${{ inputs.working-directory }}"
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" \
--infile ${{ toJson(inputs.data) }} \
--job-name '${{inputs.test_name}}' \
--run \
--run-command '''${{inputs.run_command}}'''
- name: Post run
if: ${{ !cancelled() }}
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --post --job-name '${{inputs.test_name}}'
- name: Mark as done

View File

@ -1,9 +1,18 @@
## To avoid merge commit in CI run (add a leading space to apply):
#no-merge-commit
### CI modificators (add a leading space to apply):
## Running specified job (add a leading space to apply):
## To avoid a merge commit in CI:
#no_merge_commit
## To discard CI cache:
#no_ci_cache
## To run specified set of tests in CI:
#ci_set_<SET_NAME>
#ci_set_reduced
## To run specified job in CI:
#job_<JOB NAME>
#job_stateless_tests_release
#job_package_debug

View File

@ -26,6 +26,11 @@
#include "Poco/StreamUtil.h"
namespace DB
{
class ReadBufferFromIStream;
}
namespace Poco
{
@ -120,6 +125,8 @@ protected:
openmode getMode() const { return _mode; }
private:
friend class DB::ReadBufferFromIStream;
virtual int readFromDevice(char_type * /*buffer*/, std::streamsize /*length*/) { return 0; }
virtual int writeToDevice(const char_type * /*buffer*/, std::streamsize /*length*/) { return 0; }

View File

@ -33,7 +33,8 @@ namespace Poco
class Exception;
class Logger;
using LoggerPtr = std::shared_ptr<Logger>;
class Foundation_API Logger : public Channel
/// Logger is a special Channel that acts as the main
@ -870,6 +871,11 @@ public:
/// If the Logger does not yet exist, it is created, based
/// on its parent logger.
static LoggerPtr getShared(const std::string & name);
/// Returns a shared pointer to the Logger with the given name.
/// If the Logger does not yet exist, it is created, based
/// on its parent logger.
static Logger & unsafeGet(const std::string & name);
/// Returns a reference to the Logger with the given name.
/// If the Logger does not yet exist, it is created, based
@ -885,6 +891,11 @@ public:
/// given name. The Logger's Channel and log level as set as
/// specified.
static LoggerPtr createShared(const std::string & name, Channel * pChannel, int level = Message::PRIO_INFORMATION);
/// Creates and returns a shared pointer to a Logger with the
/// given name. The Logger's Channel and log level as set as
/// specified.
static Logger & root();
/// Returns a reference to the root logger, which is the ultimate
/// ancestor of all Loggers.
@ -893,7 +904,7 @@ public:
/// Returns a pointer to the Logger with the given name if it
/// exists, or a null pointer otherwise.
static void destroy(const std::string & name);
static bool destroy(const std::string & name);
/// Destroys the logger with the specified name. Does nothing
/// if the logger is not found.
///
@ -938,6 +949,7 @@ protected:
void log(const std::string & text, Message::Priority prio, const char * file, int line);
static std::string format(const std::string & fmt, int argc, std::string argv[]);
static Logger & unsafeCreate(const std::string & name, Channel * pChannel, int level = Message::PRIO_INFORMATION);
static Logger & parent(const std::string & name);
static void add(Logger * pLogger);
static Logger * find(const std::string & name);
@ -952,7 +964,6 @@ private:
std::atomic_int _level;
static LoggerMap * _pLoggerMap;
static Mutex _mapMtx;
};

View File

@ -38,15 +38,15 @@ public:
/// Creates the RefCountedObject.
/// The initial reference count is one.
void duplicate() const;
/// Increments the object's reference count.
size_t duplicate() const;
/// Increments the object's reference count, returns reference count before call.
void release() const throw();
size_t release() const throw();
/// Decrements the object's reference count
/// and deletes the object if the count
/// reaches zero.
/// reaches zero, returns reference count before call.
int referenceCount() const;
size_t referenceCount() const;
/// Returns the reference count.
protected:
@ -57,36 +57,40 @@ private:
RefCountedObject(const RefCountedObject &);
RefCountedObject & operator=(const RefCountedObject &);
mutable AtomicCounter _counter;
mutable std::atomic<size_t> _counter;
};
//
// inlines
//
inline int RefCountedObject::referenceCount() const
inline size_t RefCountedObject::referenceCount() const
{
return _counter.value();
return _counter.load(std::memory_order_acquire);
}
inline void RefCountedObject::duplicate() const
inline size_t RefCountedObject::duplicate() const
{
++_counter;
return _counter.fetch_add(1, std::memory_order_acq_rel);
}
inline void RefCountedObject::release() const throw()
inline size_t RefCountedObject::release() const throw()
{
size_t reference_count_before = _counter.fetch_sub(1, std::memory_order_acq_rel);
try
{
if (--_counter == 0)
if (reference_count_before == 1)
delete this;
}
catch (...)
{
poco_unexpected();
}
return reference_count_before;
}

View File

@ -20,12 +20,29 @@
#include "Poco/NumberParser.h"
#include "Poco/String.h"
#include <mutex>
namespace
{
std::mutex & getLoggerMutex()
{
auto get_logger_mutex_placeholder_memory = []()
{
static char buffer[sizeof(std::mutex)]{};
return buffer;
};
static std::mutex * logger_mutex = new (get_logger_mutex_placeholder_memory()) std::mutex();
return *logger_mutex;
}
}
namespace Poco {
Logger::LoggerMap* Logger::_pLoggerMap = 0;
Mutex Logger::_mapMtx;
const std::string Logger::ROOT;
@ -112,7 +129,7 @@ void Logger::dump(const std::string& msg, const void* buffer, std::size_t length
void Logger::setLevel(const std::string& name, int level)
{
Mutex::ScopedLock lock(_mapMtx);
std::lock_guard<std::mutex> lock(getLoggerMutex());
if (_pLoggerMap)
{
@ -131,7 +148,7 @@ void Logger::setLevel(const std::string& name, int level)
void Logger::setChannel(const std::string& name, Channel* pChannel)
{
Mutex::ScopedLock lock(_mapMtx);
std::lock_guard<std::mutex> lock(getLoggerMutex());
if (_pLoggerMap)
{
@ -150,7 +167,7 @@ void Logger::setChannel(const std::string& name, Channel* pChannel)
void Logger::setProperty(const std::string& loggerName, const std::string& propertyName, const std::string& value)
{
Mutex::ScopedLock lock(_mapMtx);
std::lock_guard<std::mutex> lock(getLoggerMutex());
if (_pLoggerMap)
{
@ -280,13 +297,41 @@ void Logger::formatDump(std::string& message, const void* buffer, std::size_t le
}
namespace
{
struct LoggerDeleter
{
void operator()(Poco::Logger * logger)
{
if (Logger::destroy(logger->name()))
return;
logger->release();
}
};
inline LoggerPtr makeLoggerPtr(Logger & logger)
{
logger.duplicate();
return std::shared_ptr<Logger>(&logger, LoggerDeleter());
}
}
Logger& Logger::get(const std::string& name)
{
Mutex::ScopedLock lock(_mapMtx);
std::lock_guard<std::mutex> lock(getLoggerMutex());
return unsafeGet(name);
}
LoggerPtr Logger::getShared(const std::string & name)
{
std::lock_guard<std::mutex> lock(getLoggerMutex());
return makeLoggerPtr(unsafeGet(name));
}
Logger& Logger::unsafeGet(const std::string& name)
{
@ -310,18 +355,21 @@ Logger& Logger::unsafeGet(const std::string& name)
Logger& Logger::create(const std::string& name, Channel* pChannel, int level)
{
Mutex::ScopedLock lock(_mapMtx);
std::lock_guard<std::mutex> lock(getLoggerMutex());
if (find(name)) throw ExistsException();
Logger* pLogger = new Logger(name, pChannel, level);
add(pLogger);
return *pLogger;
return unsafeCreate(name, pChannel, level);
}
LoggerPtr Logger::createShared(const std::string & name, Channel * pChannel, int level)
{
std::lock_guard<std::mutex> lock(getLoggerMutex());
return makeLoggerPtr(unsafeCreate(name, pChannel, level));
}
Logger& Logger::root()
{
Mutex::ScopedLock lock(_mapMtx);
std::lock_guard<std::mutex> lock(getLoggerMutex());
return unsafeGet(ROOT);
}
@ -329,7 +377,7 @@ Logger& Logger::root()
Logger* Logger::has(const std::string& name)
{
Mutex::ScopedLock lock(_mapMtx);
std::lock_guard<std::mutex> lock(getLoggerMutex());
return find(name);
}
@ -337,7 +385,7 @@ Logger* Logger::has(const std::string& name)
void Logger::shutdown()
{
Mutex::ScopedLock lock(_mapMtx);
std::lock_guard<std::mutex> lock(getLoggerMutex());
if (_pLoggerMap)
{
@ -363,25 +411,29 @@ Logger* Logger::find(const std::string& name)
}
void Logger::destroy(const std::string& name)
bool Logger::destroy(const std::string& name)
{
Mutex::ScopedLock lock(_mapMtx);
std::lock_guard<std::mutex> lock(getLoggerMutex());
if (_pLoggerMap)
{
LoggerMap::iterator it = _pLoggerMap->find(name);
if (it != _pLoggerMap->end())
{
it->second->release();
_pLoggerMap->erase(it);
if (it->second->release() == 1)
_pLoggerMap->erase(it);
return true;
}
}
return false;
}
void Logger::names(std::vector<std::string>& names)
{
Mutex::ScopedLock lock(_mapMtx);
std::lock_guard<std::mutex> lock(getLoggerMutex());
names.clear();
if (_pLoggerMap)
@ -393,6 +445,14 @@ void Logger::names(std::vector<std::string>& names)
}
}
Logger& Logger::unsafeCreate(const std::string & name, Channel * pChannel, int level)
{
if (find(name)) throw ExistsException();
Logger* pLogger = new Logger(name, pChannel, level);
add(pLogger);
return *pLogger;
}
Logger& Logger::parent(const std::string& name)
{

View File

@ -1,8 +1,5 @@
if (NOT ENABLE_LIBRARIES)
set(DEFAULT_ENABLE_RUST FALSE)
elseif((CMAKE_TOOLCHAIN_FILE MATCHES "darwin") AND (CMAKE_TOOLCHAIN_FILE MATCHES "aarch64"))
message(STATUS "Rust is not available on aarch64-apple-darwin")
set(DEFAULT_ENABLE_RUST FALSE)
else()
list (APPEND CMAKE_MODULE_PATH "${ClickHouse_SOURCE_DIR}/contrib/corrosion/cmake")
find_package(Rust)
@ -19,7 +16,9 @@ message(STATUS "Checking Rust toolchain for current target")
# See https://doc.rust-lang.org/nightly/rustc/platform-support.html
if((CMAKE_TOOLCHAIN_FILE MATCHES "linux/toolchain-x86_64") AND (CMAKE_TOOLCHAIN_FILE MATCHES "musl"))
if(CMAKE_TOOLCHAIN_FILE MATCHES "ppc64le")
set(Rust_CARGO_TARGET "powerpc64le-unknown-linux-gnu")
elseif((CMAKE_TOOLCHAIN_FILE MATCHES "linux/toolchain-x86_64") AND (CMAKE_TOOLCHAIN_FILE MATCHES "musl"))
set(Rust_CARGO_TARGET "x86_64-unknown-linux-musl")
elseif(CMAKE_TOOLCHAIN_FILE MATCHES "linux/toolchain-x86_64")
set(Rust_CARGO_TARGET "x86_64-unknown-linux-gnu")
@ -29,14 +28,14 @@ elseif(CMAKE_TOOLCHAIN_FILE MATCHES "linux/toolchain-aarch64")
set(Rust_CARGO_TARGET "aarch64-unknown-linux-gnu")
elseif((CMAKE_TOOLCHAIN_FILE MATCHES "darwin") AND (CMAKE_TOOLCHAIN_FILE MATCHES "x86_64"))
set(Rust_CARGO_TARGET "x86_64-apple-darwin")
elseif((CMAKE_TOOLCHAIN_FILE MATCHES "darwin") AND (CMAKE_TOOLCHAIN_FILE MATCHES "aarch64"))
set(Rust_CARGO_TARGET "aarch64-apple-darwin")
elseif((CMAKE_TOOLCHAIN_FILE MATCHES "freebsd") AND (CMAKE_TOOLCHAIN_FILE MATCHES "x86_64"))
set(Rust_CARGO_TARGET "x86_64-unknown-freebsd")
elseif(CMAKE_TOOLCHAIN_FILE MATCHES "linux/toolchain-riscv64")
set(Rust_CARGO_TARGET "riscv64gc-unknown-linux-gnu")
endif()
if(CMAKE_TOOLCHAIN_FILE MATCHES "ppc64le")
set(Rust_CARGO_TARGET "powerpc64le-unknown-linux-gnu")
else()
message(FATAL_ERROR "Unsupported rust target")
endif()
message(STATUS "Switched Rust target to ${Rust_CARGO_TARGET}")

View File

@ -49,17 +49,10 @@ CLICKHOUSE_PASSWORD="${CLICKHOUSE_PASSWORD:-}"
CLICKHOUSE_DB="${CLICKHOUSE_DB:-}"
CLICKHOUSE_ACCESS_MANAGEMENT="${CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT:-0}"
for dir in "$DATA_DIR" \
"$ERROR_LOG_DIR" \
"$LOG_DIR" \
"$TMP_DIR" \
"$USER_PATH" \
"$FORMAT_SCHEMA_PATH" \
"${DISKS_PATHS[@]}" \
"${DISKS_METADATA_PATHS[@]}"
do
function create_directory_and_do_chown() {
local dir=$1
# check if variable not empty
[ -z "$dir" ] && continue
[ -z "$dir" ] && return
# ensure directories exist
if [ "$DO_CHOWN" = "1" ]; then
mkdir="mkdir"
@ -81,6 +74,23 @@ do
chown -R "$USER:$GROUP" "$dir"
fi
fi
}
create_directory_and_do_chown "$DATA_DIR"
# Change working directory to $DATA_DIR in case there're paths relative to $DATA_DIR, also avoids running
# clickhouse-server at root directory.
cd "$DATA_DIR"
for dir in "$ERROR_LOG_DIR" \
"$LOG_DIR" \
"$TMP_DIR" \
"$USER_PATH" \
"$FORMAT_SCHEMA_PATH" \
"${DISKS_PATHS[@]}" \
"${DISKS_METADATA_PATHS[@]}"
do
create_directory_and_do_chown "$dir"
done
# if clickhouse user is defined - create it (user "default" already exists out of box)

View File

@ -22,7 +22,7 @@ RUN apt-get update \
zstd \
--yes --no-install-recommends
RUN pip3 install numpy scipy pandas Jinja2
RUN pip3 install numpy==1.26.3 scipy==1.12.0 pandas==1.5.3 Jinja2==3.1.3
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.4.20200302/clickhouse-odbc-1.1.4-Linux.tar.gz"

View File

@ -247,12 +247,15 @@ stop_logs_replication
# Try to get logs while server is running
successfuly_saved=0
for table in query_log zookeeper_log trace_log transactions_info_log
for table in query_log zookeeper_log trace_log transactions_info_log metric_log
do
clickhouse-client -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst || successfuly_saved=$((successfuly_saved+$?))
clickhouse-client -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst
successfuly_saved=$?
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
clickhouse-client -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst || successfuly_saved=$((successfuly_saved+$?))
clickhouse-client -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.2.tsv.zst || successfuly_saved=$((successfuly_saved+$?))
clickhouse-client -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst
successfuly_saved=$((successfuly_saved | $?))
clickhouse-client -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.2.tsv.zst
successfuly_saved=$((successfuly_saved | $?))
fi
done
@ -285,7 +288,7 @@ if [ $successfuly_saved -ne 0 ]; then
# directly
# - even though ci auto-compress some files (but not *.tsv) it does this only
# for files >64MB, we want this files to be compressed explicitly
for table in query_log zookeeper_log trace_log transactions_info_log
for table in query_log zookeeper_log trace_log transactions_info_log metric_log
do
clickhouse-local "$data_path_config" --only-system-tables -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst ||:
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then

View File

@ -11,6 +11,7 @@ sidebar_label: 2023
* Remove the `status_info` configuration option and dictionaries status from the default Prometheus handler. [#54090](https://github.com/ClickHouse/ClickHouse/pull/54090) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* The experimental parts metadata cache is removed from the codebase. [#54215](https://github.com/ClickHouse/ClickHouse/pull/54215) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Disable setting `input_format_json_try_infer_numbers_from_strings` by default, so we don't try to infer numbers from strings in JSON formats by default to avoid possible parsing errors when sample data contains strings that looks like a number. [#55099](https://github.com/ClickHouse/ClickHouse/pull/55099) ([Kruglov Pavel](https://github.com/Avogar)).
* IPv6 bloom filter indexes created prior to March 2023 are not compatible with current version and have to be rebuilt. [#54200](https://github.com/ClickHouse/ClickHouse/pull/54200) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
#### New Feature
* Added new type of authentication based on SSH keys. It works only for Native TCP protocol. [#41109](https://github.com/ClickHouse/ClickHouse/pull/41109) ([George Gamezardashvili](https://github.com/InfJoker)).

View File

@ -16,7 +16,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1],
name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2],
...
) ENGINE = MySQL('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause'])
) ENGINE = MySQL({host:port, database, table, user, password[, replace_query, on_duplicate_clause] | named_collection[, option=value [,..]]})
SETTINGS
[ connection_pool_size=16, ]
[ connection_max_tries=3, ]
@ -42,23 +42,17 @@ The MySQL Table Engine is currently not available on the ClickHouse builds for M
**Engine Parameters**
- `host:port` — MySQL server address.
- `database` — Remote database name.
- `table` — Remote table name.
- `user` — MySQL user.
- `password` — User password.
- `replace_query` — Flag that converts `INSERT INTO` queries to `REPLACE INTO`. If `replace_query=1`, the query is substituted.
- `on_duplicate_clause` — The `ON DUPLICATE KEY on_duplicate_clause` expression that is added to the `INSERT` query.
Example: `INSERT INTO t (c1,c2) VALUES ('a', 2) ON DUPLICATE KEY UPDATE c2 = c2 + 1`, where `on_duplicate_clause` is `UPDATE c2 = c2 + 1`. See the [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/insert-on-duplicate.html) to find which `on_duplicate_clause` you can use with the `ON DUPLICATE KEY` clause.
To specify `on_duplicate_clause` you need to pass `0` to the `replace_query` parameter. If you simultaneously pass `replace_query = 1` and `on_duplicate_clause`, ClickHouse generates an exception.
Arguments also can be passed using [named collections](/docs/en/operations/named-collections.md). In this case `host` and `port` should be specified separately. This approach is recommended for production environment.
Simple `WHERE` clauses such as `=, !=, >, >=, <, <=` are executed on the MySQL server.
The rest of the conditions and the `LIMIT` sampling constraint are executed in ClickHouse only after the query to MySQL finishes.
@ -71,7 +65,7 @@ CREATE TABLE test_replicas (id UInt32, name String, age UInt32, money UInt32) EN
## Usage Example {#usage-example}
Table in MySQL:
Create table in MySQL:
``` text
mysql> CREATE TABLE `test`.`test` (
@ -94,7 +88,7 @@ mysql> select * from test;
1 row in set (0,00 sec)
```
Table in ClickHouse, retrieving data from the MySQL table created above:
Create table in ClickHouse using plain arguments:
``` sql
CREATE TABLE mysql_table
@ -105,6 +99,25 @@ CREATE TABLE mysql_table
ENGINE = MySQL('localhost:3306', 'test', 'test', 'bayonet', '123')
```
Or using [named collections](/docs/en/operations/named-collections.md):
```sql
CREATE NAMED COLLECTION creds AS
host = 'localhost',
port = 3306,
database = 'test',
user = 'bayonet',
password = '123';
CREATE TABLE mysql_table
(
`float_nullable` Nullable(Float32),
`int_id` Int32
)
ENGINE = MySQL(creds, table='test')
```
Retrieving data from MySQL table:
``` sql
SELECT * FROM mysql_table
```

View File

@ -16,7 +16,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
name1 type1 [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1],
name2 type2 [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2],
...
) ENGINE = PostgreSQL('host:port', 'database', 'table', 'user', 'password'[, `schema`]);
) ENGINE = PostgreSQL({host:port, database, table, user, password[, schema, [, on_conflict]] | named_collection[, option=value [,..]]})
```
See a detailed description of the [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query) query.
@ -35,31 +35,25 @@ The table structure can differ from the original PostgreSQL table structure:
- `user` — PostgreSQL user.
- `password` — User password.
- `schema` — Non-default table schema. Optional.
- `on conflict ...` — example: `ON CONFLICT DO NOTHING`. Optional. Note: adding this option will make insertion less efficient.
- `on_conflict` — Conflict resolution strategy. Example: `ON CONFLICT DO NOTHING`. Optional. Note: adding this option will make insertion less efficient.
or via config (since version 21.11):
[Named collections](/docs/en/operations/named-collections.md) (available since version 21.11) are recommended for production environment. Here is an example:
```
<named_collections>
<postgres1>
<host></host>
<port></port>
<user></user>
<password></password>
<table></table>
</postgres1>
<postgres2>
<host></host>
<port></port>
<user></user>
<password></password>
</postgres2>
<postgres_creds>
<host>localhost</host>
<port>5432</port>
<user>postgres</user>
<password>****</password>
<schema>schema1</schema>
</postgres_creds>
</named_collections>
```
Some parameters can be overridden by key value arguments:
``` sql
SELECT * FROM postgresql(postgres1, schema='schema1', table='table1');
SELECT * FROM postgresql(postgres_creds, table='table1');
```
## Implementation Details {#implementation-details}

View File

@ -16,30 +16,32 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name
name1 [type1],
name2 [type2],
...
) ENGINE = Redis(host:port[, db_index[, password[, pool_size]]]) PRIMARY KEY(primary_key_name);
) ENGINE = Redis({host:port[, db_index[, password[, pool_size]]] | named_collection[, option=value [,..]] })
PRIMARY KEY(primary_key_name);
```
**Engine Parameters**
- `host:port` — Redis server address, you can ignore port and default Redis port 6379 will be used.
- `db_index` — Redis db index range from 0 to 15, default is 0.
- `password` — User password, default is blank string.
- `pool_size` — Redis max connection pool size, default is 16.
- `primary_key_name` - any column name in the column list.
- `primary` must be specified, it supports only one column in the primary key. The primary key will be serialized in binary as a Redis key.
:::note Serialization
`PRIMARY KEY` supports only one column. The primary key will be serialized in binary as a Redis key.
Columns other than the primary key will be serialized in binary as Redis value in corresponding order.
:::
- columns other than the primary key will be serialized in binary as Redis value in corresponding order.
Arguments also can be passed using [named collections](/docs/en/operations/named-collections.md). In this case `host` and `port` should be specified separately. This approach is recommended for production environment. At this moment, all parameters passed using named collections to redis are required.
- queries with key equals or in filtering will be optimized to multi keys lookup from Redis. If queries without filtering key full table scan will happen which is a heavy operation.
:::note Filtering
Queries with `key equals` or `in filtering` will be optimized to multi keys lookup from Redis. If queries without filtering key full table scan will happen which is a heavy operation.
:::
## Usage Example {#usage-example}
Create a table in ClickHouse which allows to read data from Redis:
Create a table in ClickHouse using `Redis` engine with plain arguments:
``` sql
CREATE TABLE redis_table
@ -52,6 +54,31 @@ CREATE TABLE redis_table
ENGINE = Redis('redis1:6379') PRIMARY KEY(key);
```
Or using [named collections](/docs/en/operations/named-collections.md):
```
<named_collections>
<redis_creds>
<host>localhost</host>
<port>6379</port>
<password>****</password>
<pool_size>16</pool_size>
<db_index>s0</db_index>
</redis_creds>
</named_collections>
```
```sql
CREATE TABLE redis_table
(
`key` String,
`v1` UInt32,
`v2` String,
`v3` Float32
)
ENGINE = Redis(redis_creds) PRIMARY KEY(key);
```
Insert:
```sql

View File

@ -39,8 +39,8 @@ If you need to update rows frequently, we recommend using the [`ReplacingMergeTr
``` sql
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
(
name1 [type1] [[NOT] NULL] [DEFAULT|MATERIALIZED|ALIAS|EPHEMERAL expr1] [COMMENT ...] [CODEC(codec1)] [STATISTIC(stat1)] [TTL expr1] [PRIMARY KEY],
name2 [type2] [[NOT] NULL] [DEFAULT|MATERIALIZED|ALIAS|EPHEMERAL expr2] [COMMENT ...] [CODEC(codec2)] [STATISTIC(stat2)] [TTL expr2] [PRIMARY KEY],
name1 [type1] [[NOT] NULL] [DEFAULT|MATERIALIZED|ALIAS|EPHEMERAL expr1] [COMMENT ...] [CODEC(codec1)] [STATISTIC(stat1)] [TTL expr1] [PRIMARY KEY] [SETTINGS (name = value, ...)],
name2 [type2] [[NOT] NULL] [DEFAULT|MATERIALIZED|ALIAS|EPHEMERAL expr2] [COMMENT ...] [CODEC(codec2)] [STATISTIC(stat2)] [TTL expr2] [PRIMARY KEY] [SETTINGS (name = value, ...)],
...
INDEX index_name1 expr1 TYPE type1(...) [GRANULARITY value1],
INDEX index_name2 expr2 TYPE type2(...) [GRANULARITY value2],
@ -56,7 +56,7 @@ ORDER BY expr
[DELETE|TO DISK 'xxx'|TO VOLUME 'xxx' [, ...] ]
[WHERE conditions]
[GROUP BY key_expr [SET v1 = aggr_func(v1) [, v2 = aggr_func(v2) ...]] ] ]
[SETTINGS name=value, ...]
[SETTINGS name = value, ...]
```
For a description of parameters, see the [CREATE query description](/docs/en/sql-reference/statements/create/table.md).
@ -620,7 +620,7 @@ The `TTL` clause cant be used for key columns.
#### Creating a table with `TTL`:
``` sql
CREATE TABLE example_table
CREATE TABLE tab
(
d DateTime,
a Int TTL d + INTERVAL 1 MONTH,
@ -635,7 +635,7 @@ ORDER BY d;
#### Adding TTL to a column of an existing table
``` sql
ALTER TABLE example_table
ALTER TABLE tab
MODIFY COLUMN
c String TTL d + INTERVAL 1 DAY;
```
@ -643,7 +643,7 @@ ALTER TABLE example_table
#### Altering TTL of the column
``` sql
ALTER TABLE example_table
ALTER TABLE tab
MODIFY COLUMN
c String TTL d + INTERVAL 1 MONTH;
```
@ -681,7 +681,7 @@ If a column is not part of the `GROUP BY` expression and is not set explicitly i
#### Creating a table with `TTL`:
``` sql
CREATE TABLE example_table
CREATE TABLE tab
(
d DateTime,
a Int
@ -697,7 +697,7 @@ TTL d + INTERVAL 1 MONTH DELETE,
#### Altering `TTL` of the table:
``` sql
ALTER TABLE example_table
ALTER TABLE tab
MODIFY TTL d + INTERVAL 1 DAY;
```
@ -1366,7 +1366,7 @@ In this sample configuration:
The statistic declaration is in the columns section of the `CREATE` query for tables from the `*MergeTree*` Family when we enable `set allow_experimental_statistic = 1`.
``` sql
CREATE TABLE example_table
CREATE TABLE tab
(
a Int64 STATISTIC(tdigest),
b Float64
@ -1378,8 +1378,8 @@ ORDER BY a
We can also manipulate statistics with `ALTER` statements.
```sql
ALTER TABLE example_table ADD STATISTIC b TYPE tdigest;
ALTER TABLE example_table DROP STATISTIC a TYPE tdigest;
ALTER TABLE tab ADD STATISTIC b TYPE tdigest;
ALTER TABLE tab DROP STATISTIC a TYPE tdigest;
```
These lightweight statistics aggregate information about distribution of values in columns.
@ -1390,3 +1390,42 @@ They can be used for query optimization when we enable `set allow_statistic_opti
- `tdigest`
Stores distribution of values from numeric columns in [TDigest](https://github.com/tdunning/t-digest) sketch.
## Column-level Settings {#column-level-settings}
Certain MergeTree settings can be override at column level:
- `max_compress_block_size` — Maximum size of blocks of uncompressed data before compressing for writing to a table.
- `min_compress_block_size` — Minimum size of blocks of uncompressed data required for compression when writing the next mark.
Example:
```sql
CREATE TABLE tab
(
id Int64,
document String SETTINGS (min_compress_block_size = 16777216, max_compress_block_size = 16777216)
)
ENGINE = MergeTree
ORDER BY id
```
Column-level settings can be modified or removed using [ALTER MODIFY COLUMN](/docs/en/sql-reference/statements/alter/column.md), for example:
- Remove `SETTINGS` from column declaration:
```sql
ALTER TABLE tab MODIFY COLUMN document REMOVE SETTINGS;
```
- Modify a setting:
```sql
ALTER TABLE tab MODIFY COLUMN document MODIFY SETTING min_compress_block_size = 8192;
```
- Reset one or more settings, also removes the setting declaration in the column expression of the table's CREATE query.
```sql
ALTER TABLE tab MODIFY COLUMN document RESET SETTING min_compress_block_size;
```

View File

@ -296,7 +296,6 @@ host = '127.0.0.1',
port = 5432,
database = 'test',
schema = 'test_schema',
connection_pool_size = 8
```
Example of configuration:
@ -310,7 +309,6 @@ Example of configuration:
<port>5432</port>
<database>test</database>
<schema>test_schema</schema>
<connection_pool_size>8</connection_pool_size>
</mypg>
</named_collections>
</clickhouse>
@ -445,4 +443,3 @@ SELECT dictGet('dict', 'b', 1);
│ a │
└─────────────────────────┘
```

View File

@ -2866,3 +2866,10 @@ This also allows a mix of resolver types can be used.
### disable_tunneling_for_https_requests_over_http_proxy {#disable_tunneling_for_https_requests_over_http_proxy}
By default, tunneling (i.e, `HTTP CONNECT`) is used to make `HTTPS` requests over `HTTP` proxy. This setting can be used to disable it.
## max_materialized_views_count_for_table {#max_materialized_views_count_for_table}
A limit on the number of materialized views attached to a table.
Note that only directly dependent views are considered here, and the creation of one view on top of another view is not considered.
Default value: `0`.

View File

@ -172,7 +172,7 @@ If you set `timeout_before_checking_execution_speed `to 0, ClickHouse will use c
## timeout_overflow_mode {#timeout-overflow-mode}
What to do if the query is run longer than `max_execution_time`: `throw` or `break`. By default, `throw`.
What to do if the query is run longer than `max_execution_time` or the estimated running time is longer than `max_estimated_execution_time`: `throw` or `break`. By default, `throw`.
# max_execution_time_leaf
@ -214,6 +214,10 @@ A maximum number of execution bytes per second. Checked on every data block when
Checks that execution speed is not too slow (no less than min_execution_speed), after the specified time in seconds has expired.
## max_estimated_execution_time {#max_estimated_execution_time}
Maximum query estimate execution time in seconds. Checked on every data block when timeout_before_checking_execution_speed expires.
## max_columns_to_read {#max-columns-to-read}
A maximum number of columns that can be read from a table in a single query. If a query requires reading a greater number of columns, it throws an exception.

View File

@ -5197,3 +5197,13 @@ The value 0 means that you can delete all tables without any restrictions.
:::note
This query setting overwrites its server setting equivalent, see [max_table_size_to_drop](/docs/en/operations/server-configuration-parameters/settings.md/#max-table-size-to-drop)
:::
## iceberg_engine_ignore_schema_evolution {#iceberg_engine_ignore_schema_evolution}
Allow to ignore schema evolution in Iceberg table engine and read all data using schema specified by the user on table creation or latest schema parsed from metadata on table creation.
:::note
Enabling this setting can lead to incorrect result as in case of evolved schema all data files will be read using the same schema.
:::
Default value: 'false'.

View File

@ -10,7 +10,7 @@ Columns:
- `hostname` ([LowCardinality(String)](../../sql-reference/data-types/string.md)) — Hostname of the server executing the query.
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — Event date.
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Event time.
- `name` ([String](../../sql-reference/data-types/string.md)) — Metric name.
- `metric` ([String](../../sql-reference/data-types/string.md)) — Metric name.
- `value` ([Float64](../../sql-reference/data-types/float.md)) — Metric value.
**Example**

View File

@ -49,7 +49,7 @@ Columns:
- `last_attempt_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was last attempted.
- `num_postponed` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of postponed tasks.
- `num_postponed` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of times the action was postponed.
- `postpone_reason` ([String](../../sql-reference/data-types/string.md)) — The reason why the task was postponed.

View File

@ -35,8 +35,8 @@ Types are equivalent to types of C:
Aliases:
- `Float32``FLOAT`.
- `Float64``DOUBLE`.
- `Float32``FLOAT`, `REAL`, `SINGLE`.
- `Float64``DOUBLE`, `DOUBLE PRECISION`.
When creating tables, numeric parameters for floating point numbers can be set (e.g. `FLOAT(12)`, `FLOAT(15, 22)`, `DOUBLE(12)`, `DOUBLE(4, 18)`), but ClickHouse ignores them.

View File

@ -21,10 +21,10 @@ When creating tables, numeric parameters for integer numbers can be set (e.g. `T
Aliases:
- `Int8``TINYINT`, `BOOL`, `BOOLEAN`, `INT1`.
- `Int16``SMALLINT`, `INT2`.
- `Int32``INT`, `INT4`, `INTEGER`.
- `Int64``BIGINT`.
- `Int8``TINYINT`, `INT1`, `BYTE`, `TINYINT SIGNED`, `INT1 SIGNED`.
- `Int16``SMALLINT`, `SMALLINT SIGNED`.
- `Int32``INT`, `INTEGER`, `MEDIUMINT`, `MEDIUMINT SIGNED`, `INT SIGNED`, `INTEGER SIGNED`.
- `Int64``BIGINT`, `SIGNED`, `BIGINT SIGNED`, `TIME`.
## UInt Ranges
@ -34,3 +34,11 @@ Aliases:
- `UInt64` — \[0 : 18446744073709551615\]
- `UInt128` — \[0 : 340282366920938463463374607431768211455\]
- `UInt256` — \[0 : 115792089237316195423570985008687907853269984665640564039457584007913129639935\]
Aliases:
- `UInt8``TINYINT UNSIGNED`, `INT1 UNSIGNED`.
- `UInt16``SMALLINT UNSIGNED`.
- `UInt32``MEDIUMINT UNSIGNED`, `INT UNSIGNED`, `INTEGER UNSIGNED`
- `UInt64``UNSIGNED`, `BIGINT UNSIGNED`, `BIT`, `SET`

View File

@ -1805,6 +1805,7 @@ Example of settings:
``` xml
<source>
<postgresql>
<host>postgresql-hostname</hoat>
<port>5432</port>
<user>clickhouse</user>
<password>qwerty</password>

View File

@ -23,10 +23,11 @@ The following actions are supported:
- [RENAME COLUMN](#rename-column) — Renames an existing column.
- [CLEAR COLUMN](#clear-column) — Resets column values.
- [COMMENT COLUMN](#comment-column) — Adds a text comment to the column.
- [MODIFY COLUMN](#modify-column) — Changes columns type, default expression and TTL.
- [MODIFY COLUMN](#modify-column) — Changes columns type, default expression, TTL, and column settings.
- [MODIFY COLUMN REMOVE](#modify-column-remove) — Removes one of the column properties.
- [MODIFY COLUMN MODIFY SETTING](#modify-column-modify-setting) - Changes column settings.
- [MODIFY COLUMN RESET SETTING](#modify-column-reset-setting) - Reset column settings.
- [MATERIALIZE COLUMN](#materialize-column) — Materializes the column in the parts where the column is missing.
These actions are described in detail below.
## ADD COLUMN
@ -208,7 +209,7 @@ The `ALTER` query for changing columns is replicated. The instructions are saved
## MODIFY COLUMN REMOVE
Removes one of the column properties: `DEFAULT`, `ALIAS`, `MATERIALIZED`, `CODEC`, `COMMENT`, `TTL`.
Removes one of the column properties: `DEFAULT`, `ALIAS`, `MATERIALIZED`, `CODEC`, `COMMENT`, `TTL`, `SETTING`.
Syntax:
@ -228,6 +229,43 @@ ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL;
- [REMOVE TTL](ttl.md).
## MODIFY COLUMN MODIFY SETTING
Modify a column setting.
Syntax:
```sql
ALTER TABLE table_name MODIFY COLUMN MODIFY SETTING name=value,...;
```
**Example**
Modify column's `max_compress_block_size` to `1MB`:
```sql
ALTER TABLE table_name MODIFY COLUMN MODIFY SETTING max_compress_block_size = 1048576;
```
## MODIFY COLUMN RESET SETTING
Reset a column setting, also removes the setting declaration in the column expression of the table's CREATE query.
Syntax:
```sql
ALTER TABLE table_name MODIFY COLUMN RESET SETTING name,...;
```
**Example**
Remove column setting `max_compress_block_size` to `1MB`:
```sql
ALTER TABLE table_name MODIFY COLUMN REMOVE SETTING max_compress_block_size;
```
## MATERIALIZE COLUMN
Materializes or updates a column with an expression for a default value (`DEFAULT` or `MATERIALIZED`).

View File

@ -8,8 +8,6 @@ sidebar_label: VIEW
You can modify `SELECT` query that was specified when a [materialized view](../create/view.md#materialized) was created with the `ALTER TABLE … MODIFY QUERY` statement without interrupting ingestion process.
The `allow_experimental_alter_materialized_view_structure` setting must be enabled.
This command is created to change materialized view created with `TO [db.]name` clause. It does not change the structure of the underling storage table and it does not change the columns' definition of the materialized view, because of this the application of this command is very limited for materialized views are created without `TO [db.]name` clause.
**Example with TO table**

View File

@ -97,7 +97,7 @@ This feature is deprecated and will be removed in the future.
For your convenience, the old documentation is located [here](https://pastila.nl/?00f32652/fdf07272a7b54bda7e13b919264e449f.md)
## Refreshable Materialized View {#refreshable-materialized-view}
## Refreshable Materialized View [Experimental] {#refreshable-materialized-view}
```sql
CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name
@ -120,7 +120,8 @@ Differences from regular non-refreshable materialized views:
:::note
Refreshable materialized views are a work in progress. Setting `allow_experimental_refreshable_materialized_view = 1` is required for creating one. Current limitations:
* not compatible with Replicated database or table engines,
* not compatible with Replicated database or table engines
* It is not supported in ClickHouse Cloud
* require [Atomic database engine](../../../engines/database-engines/atomic.md),
* no retries for failed refresh - we just skip to the next scheduled refresh time,
* no limit on number of concurrent refreshes.

View File

@ -9,10 +9,6 @@ sidebar_label: RENAME
Renames databases, tables, or dictionaries. Several entities can be renamed in a single query.
Note that the `RENAME` query with several entities is non-atomic operation. To swap entities names atomically, use the [EXCHANGE](./exchange.md) statement.
:::note
The `RENAME` query is supported by the [Atomic](../../engines/database-engines/atomic.md) database engine only.
:::
**Syntax**
```sql

View File

@ -7,7 +7,7 @@ keywords: [udf, user defined function, clickhouse, executable, table, function]
# executable Table Function for UDFs
The `executable` table function creates a table based on the output of a user-defined function (UDF) that you define in a script that outputs rows to **stdout**. The executable script is stored in the `users_scripts` directory and can read data from any source.
The `executable` table function creates a table based on the output of a user-defined function (UDF) that you define in a script that outputs rows to **stdout**. The executable script is stored in the `users_scripts` directory and can read data from any source. Make sure your ClickHouse server has all the required packages to run the executable script. For example, if it is a Python script, ensure that the server has the necessary Python packages installed.
You can optionally include one or more input queries that stream their results to **stdin** for the script to read.

View File

@ -9,7 +9,7 @@ sidebar_label: fuzzJSON
Perturbs a JSON string with random variations.
``` sql
fuzzJSON({ named_collection [option=value [,..]] | json_str[, random_seed] })
fuzzJSON({ named_collection [, option=value [,..]] | json_str[, random_seed] })
```
**Arguments**

View File

@ -16,7 +16,8 @@ If you have multiple replicas in your cluster, you can use the [s3Cluster functi
**Syntax**
``` sql
gcs(path [,hmac_key, hmac_secret] [,format] [,structure] [,compression])
gcs(url [, NOSIGN | hmac_key, hmac_secret] [,format] [,structure] [,compression_method])
gcs(named_collection[, option=value [,..]])
```
:::tip GCS
@ -24,10 +25,9 @@ The GCS Table Function integrates with Google Cloud Storage by using the GCS XML
:::
**Arguments**
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings.
**Parameters**
- `url` — Bucket path to file. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings.
:::note GCS
The GCS path is in this format as the endpoint for the Google XML API is different than the JSON API:
```
@ -35,10 +35,21 @@ The GCS Table Function integrates with Google Cloud Storage by using the GCS XML
```
and not ~~https://storage.cloud.google.com~~.
:::
- `NOSIGN` — If this keyword is provided in place of credentials, all the requests will not be signed.
- `hmac_key` and `hmac_secret` — Keys that specify credentials to use with given endpoint. Optional.
- `format` — The [format](../../interfaces/formats.md#formats) of the file.
- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`.
- `compression_method` — Parameter is optional. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. By default, it will autodetect compression method by file extension.
Arguments can also be passed using [named collections](/docs/en/operations/named-collections.md). In this case `url`, `format`, `structure`, `compression_method` work in the same way, and some extra parameters are supported:
- `access_key_id``hmac_key`, optional.
- `secret_access_key``hmac_secret`, optional.
- `filename` — appended to the url if specified.
- `use_environment_credentials` — enabled by default, allows passing extra parameters using environment variables `AWS_CONTAINER_CREDENTIALS_RELATIVE_URI`, `AWS_CONTAINER_CREDENTIALS_FULL_URI`, `AWS_CONTAINER_AUTHORIZATION_TOKEN`, `AWS_EC2_METADATA_DISABLED`.
- `no_sign_request` — disabled by default.
- `expiration_window_seconds` — default value is 120.
- `format` — The [format](../../interfaces/formats.md#formats) of the file.
- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`.
- `compression` — Parameter is optional. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. By default, it will autodetect compression by file extension.
**Returned value**
@ -61,7 +72,7 @@ LIMIT 2;
└─────────┴─────────┴─────────┘
```
The similar but from file with `gzip` compression:
The similar but from file with `gzip` compression method:
``` sql
SELECT *
@ -158,6 +169,16 @@ The below get data from all `test-data.csv.gz` files from any folder inside `my-
SELECT * FROM gcs('https://storage.googleapis.com/my-test-bucket-768/**/test-data.csv.gz', 'CSV', 'name String, value UInt32', 'gzip');
```
For production use cases it is recommended to use [named collections](/docs/en/operations/named-collections.md). Here is the example:
``` sql
CREATE NAMED COLLECTION creds AS
access_key_id = '***',
secret_access_key = '***';
SELECT count(*)
FROM gcs(creds, url='https://s3-object-url.csv')
```
## Partitioned Write
If you specify `PARTITION BY` expression when inserting data into `GCS` table, a separate file is created for each partition value. Splitting the data into separate files helps to improve reading operations efficiency.

View File

@ -11,31 +11,25 @@ Allows `SELECT` and `INSERT` queries to be performed on data that is stored on a
**Syntax**
``` sql
mysql('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause'])
mysql({host:port, database, table, user, password[, replace_query, on_duplicate_clause] | named_collection[, option=value [,..]]})
```
**Arguments**
**Parameters**
- `host:port` — MySQL server address.
- `database` — Remote database name.
- `table` — Remote table name.
- `user` — MySQL user.
- `password` — User password.
- `replace_query` — Flag that converts `INSERT INTO` queries to `REPLACE INTO`. Possible values:
- `0` - The query is executed as `INSERT INTO`.
- `1` - The query is executed as `REPLACE INTO`.
- `on_duplicate_clause` — The `ON DUPLICATE KEY on_duplicate_clause` expression that is added to the `INSERT` query. Can be specified only with `replace_query = 0` (if you simultaneously pass `replace_query = 1` and `on_duplicate_clause`, ClickHouse generates an exception).
Example: `INSERT INTO t (c1,c2) VALUES ('a', 2) ON DUPLICATE KEY UPDATE c2 = c2 + 1;`
`on_duplicate_clause` here is `UPDATE c2 = c2 + 1`. See the MySQL documentation to find which `on_duplicate_clause` you can use with the `ON DUPLICATE KEY` clause.
Arguments also can be passed using [named collections](/docs/en/operations/named-collections.md). In this case `host` and `port` should be specified separately. This approach is recommended for production environment.
Simple `WHERE` clauses such as `=, !=, >, >=, <, <=` are currently executed on the MySQL server.
The rest of the conditions and the `LIMIT` sampling constraint are executed in ClickHouse only after the query to MySQL finishes.
@ -86,6 +80,18 @@ Selecting data from ClickHouse:
SELECT * FROM mysql('localhost:3306', 'test', 'test', 'bayonet', '123');
```
Or using [named collections](/docs/en/operations/named-collections.md):
```sql
CREATE NAMED COLLECTION creds AS
host = 'localhost',
port = 3306,
database = 'test',
user = 'bayonet',
password = '123';
SELECT * FROM mysql(creds, table='test');
```
``` text
┌─int_id─┬─float─┐
│ 1 │ 2 │

View File

@ -11,10 +11,10 @@ Allows `SELECT` and `INSERT` queries to be performed on data that is stored on a
**Syntax**
``` sql
postgresql('host:port', 'database', 'table', 'user', 'password'[, `schema`])
postgresql({host:port, database, table, user, password[, schema, [, on_conflict]] | named_collection[, option=value [,..]]})
```
**Arguments**
**Parameters**
- `host:port` — PostgreSQL server address.
- `database` — Remote database name.
@ -22,6 +22,9 @@ postgresql('host:port', 'database', 'table', 'user', 'password'[, `schema`])
- `user` — PostgreSQL user.
- `password` — User password.
- `schema` — Non-default table schema. Optional.
- `on_conflict` — Conflict resolution strategy. Example: `ON CONFLICT DO NOTHING`. Optional.
Arguments also can be passed using [named collections](/docs/en/operations/named-collections.md). In this case `host` and `port` should be specified separately. This approach is recommended for production environment.
**Returned Value**
@ -86,12 +89,24 @@ postgresql> SELECT * FROM test;
(1 row)
```
Selecting data from ClickHouse:
Selecting data from ClickHouse using plain arguments:
```sql
SELECT * FROM postgresql('localhost:5432', 'test', 'test', 'postgresql_user', 'password') WHERE str IN ('test');
```
Or using [named collections](/docs/en/operations/named-collections.md):
```sql
CREATE NAMED COLLECTION mypg AS
host = 'localhost',
port = 5432,
database = 'test',
user = 'postgresql_user',
password = 'password';
SELECT * FROM postgresql(mypg, table='test') WHERE str IN ('test');
```
``` text
┌─int_id─┬─int_nullable─┬─float─┬─str──┬─float_nullable─┐
│ 1 │ ᴺᵁᴸᴸ │ 2 │ test │ ᴺᵁᴸᴸ │

View File

@ -34,6 +34,7 @@ redis(host:port, key, structure[, db_index[, password[, pool_size]]])
- queries with key equals or in filtering will be optimized to multi keys lookup from Redis. If queries without filtering key full table scan will happen which is a heavy operation.
[Named collections](/docs/en/operations/named-collections.md) are not supported for `redis` table function at the moment.
**Returned Value**
@ -41,17 +42,7 @@ A table object with key as Redis key, other columns packaged together as Redis v
## Usage Example {#usage-example}
Create a table in ClickHouse which allows to read data from Redis:
``` sql
CREATE TABLE redis_table
(
`k` String,
`m` String,
`n` UInt32
)
ENGINE = Redis('redis1:6379') PRIMARY KEY(k);
```
Read from Redis:
```sql
SELECT * FROM redis(
@ -61,6 +52,15 @@ SELECT * FROM redis(
)
```
Insert into Redis:
```sql
INSERT INTO TABLE FUNCTION redis(
'redis1:6379',
'key',
'key String, v1 String, v2 UInt32') values ('1', '1', 1);
```
**See Also**
- [The `Redis` table engine](/docs/en/engines/table-engines/integrations/redis.md)

View File

@ -13,10 +13,12 @@ Both functions can be used in `SELECT` and `INSERT` queries.
## Syntax
``` sql
remote('addresses_expr', [db, table, 'user'[, 'password'], sharding_key])
remote('addresses_expr', [db.table, 'user'[, 'password'], sharding_key])
remoteSecure('addresses_expr', [db, table, 'user'[, 'password'], sharding_key])
remoteSecure('addresses_expr', [db.table, 'user'[, 'password'], sharding_key])
remote(addresses_expr, [db, table, user [, password], sharding_key])
remote(addresses_expr, [db.table, user [, password], sharding_key])
remote(named_collection[, option=value [,..]])
remoteSecure(addresses_expr, [db, table, user [, password], sharding_key])
remoteSecure(addresses_expr, [db.table, user [, password], sharding_key])
remoteSecure(named_collection[, option=value [,..]])
```
## Parameters
@ -39,6 +41,8 @@ remoteSecure('addresses_expr', [db.table, 'user'[, 'password'], sharding_key])
- `password` — User password. If not specified, an empty password is used. Type: [String](../../sql-reference/data-types/string.md).
- `sharding_key` — Sharding key to support distributing data across nodes. For example: `insert into remote('127.0.0.1:9000,127.0.0.2', db, table, 'default', rand())`. Type: [UInt32](../../sql-reference/data-types/int-uint.md).
Arguments also can be passed using [named collections](/docs/en/operations/named-collections.md).
## Returned value
A table located on a remote server.
@ -82,7 +86,16 @@ example01-01-1,example01-02-1
SELECT * FROM remote('127.0.0.1', db.remote_engine_table) LIMIT 3;
```
### Inserting data from a remote server into a table:
Or using [named collections](/docs/en/operations/named-collections.md):
```sql
CREATE NAMED COLLECTION creds AS
host = '127.0.0.1',
database = 'db';
SELECT * FROM remote(creds, table='remote_engine_table') LIMIT 3;
```
### Inserting data into a table on a remote server:
``` sql
CREATE TABLE remote_table (name String, value UInt32) ENGINE=Memory;

View File

@ -16,33 +16,41 @@ When using the `s3 table function` with [`INSERT INTO...SELECT`](../../sql-refer
**Syntax**
``` sql
s3(path [, NOSIGN | aws_access_key_id, aws_secret_access_key [,session_token]] [,format] [,structure] [,compression])
s3(url [, NOSIGN | access_key_id, secret_access_key, [session_token]] [,format] [,structure] [,compression_method])
s3(named_collection[, option=value [,..]])
```
:::tip GCS
The S3 Table Function integrates with Google Cloud Storage by using the GCS XML API and HMAC keys. See the [Google interoperability docs]( https://cloud.google.com/storage/docs/interoperability) for more details about the endpoint and HMAC.
For GCS, substitute your HMAC key and HMAC secret where you see `aws_access_key_id` and `aws_secret_access_key`.
For GCS, substitute your HMAC key and HMAC secret where you see `access_key_id` and `secret_access_key`.
:::
**Arguments**
**Parameters**
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [here](../../engines/table-engines/integrations/s3.md#wildcards-in-path).
`s3` table function supports the following plain parameters:
- `url` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [here](../../engines/table-engines/integrations/s3.md#wildcards-in-path).
:::note GCS
The GCS path is in this format as the endpoint for the Google XML API is different than the JSON API:
The GCS url is in this format as the endpoint for the Google XML API is different than the JSON API:
```
https://storage.googleapis.com/<bucket>/<folder>/<filename(s)>
```
and not ~~https://storage.cloud.google.com~~.
:::
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.
- `access_key_id`, `secret_access_key` — Keys that specify credentials to use with given endpoint. Optional.
- `NOSIGN` — If this keyword is provided in place of credentials, all the requests will not be signed.
- `access_key_id` and `secret_access_key` — Keys that specify credentials to use with given endpoint. Optional.
- `session_token` - Session token to use with the given keys. Optional when passing keys.
- `format` — The [format](../../interfaces/formats.md#formats) of the file.
- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`.
- `compression` — Parameter is optional. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. By default, it will autodetect compression by file extension.
- `compression_method` — Parameter is optional. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. By default, it will autodetect compression method by file extension.
Arguments can also be passed using [named collections](/docs/en/operations/named-collections.md). In this case `url`, `access_key_id`, `secret_access_key`, `format`, `structure`, `compression_method` work in the same way, and some extra parameters are supported:
- `filename` — appended to the url if specified.
- `use_environment_credentials` — enabled by default, allows passing extra parameters using environment variables `AWS_CONTAINER_CREDENTIALS_RELATIVE_URI`, `AWS_CONTAINER_CREDENTIALS_FULL_URI`, `AWS_CONTAINER_AUTHORIZATION_TOKEN`, `AWS_EC2_METADATA_DISABLED`.
- `no_sign_request` — disabled by default.
- `expiration_window_seconds` — default value is 120.
**Returned value**
@ -82,7 +90,7 @@ FROM s3(
LIMIT 5;
```
ClickHouse also can determine the compression of the file. For example, if the file was zipped up with a `.csv.gz` extension, ClickHouse would decompress the file automatically.
ClickHouse also can determine the compression method of the file. For example, if the file was zipped up with a `.csv.gz` extension, ClickHouse would decompress the file automatically.
:::
@ -190,6 +198,16 @@ Custom mapper can be added into `config.xml`:
</url_scheme_mappers>
```
For production use cases it is recommended to use [named collections](/docs/en/operations/named-collections.md). Here is the example:
``` sql
CREATE NAMED COLLECTION creds AS
access_key_id = '***',
secret_access_key = '***';
SELECT count(*)
FROM s3(creds, url='https://s3-object-url.csv')
```
## Partitioned Write
If you specify `PARTITION BY` expression when inserting data into `S3` table, a separate file is created for each partition value. Splitting the data into separate files helps to improve reading operations efficiency.

View File

@ -4,23 +4,34 @@ sidebar_position: 181
sidebar_label: s3Cluster
title: "s3Cluster Table Function"
---
This is an extension to the [s3](/docs/en/sql-reference/table-functions/s3.md) table function.
Allows processing files from [Amazon S3](https://aws.amazon.com/s3/) and Google Cloud Storage [Google Cloud Storage](https://cloud.google.com/storage/) in parallel from many nodes in a specified cluster. On initiator it creates a connection to all nodes in the cluster, discloses asterisks in S3 file path, and dispatches each file dynamically. On the worker node it asks the initiator about the next task to process and processes it. This is repeated until all tasks are finished.
**Syntax**
``` sql
s3Cluster(cluster_name, source, [,access_key_id, secret_access_key, [session_token]] [,format] [,structure])
s3Cluster(cluster_name, url [, NOSIGN | access_key_id, secret_access_key, [session_token]] [,format] [,structure] [,compression_method])
s3Cluster(cluster_name, named_collection[, option=value [,..]])
```
**Arguments**
- `cluster_name` — Name of a cluster that is used to build a set of addresses and connection parameters to remote and local servers.
- `source` — URL to a file or a bunch of files. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{'abc','def'}` and `{N..M}` where `N`, `M` — numbers, `abc`, `def` — strings. For more information see [Wildcards In Path](../../engines/table-engines/integrations/s3.md#wildcards-in-path).
- `access_key_id`, `secret_access_key` — Keys that specify credentials to use with given endpoint. Optional.
- `url` — path to a file or a bunch of files. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{'abc','def'}` and `{N..M}` where `N`, `M` — numbers, `abc`, `def` — strings. For more information see [Wildcards In Path](../../engines/table-engines/integrations/s3.md#wildcards-in-path).
- `NOSIGN` — If this keyword is provided in place of credentials, all the requests will not be signed.
- `access_key_id` and `secret_access_key` — Keys that specify credentials to use with given endpoint. Optional.
- `session_token` - Session token to use with the given keys. Optional when passing keys.
- `format` — The [format](../../interfaces/formats.md#formats) of the file.
- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`.
- `compression_method` — Parameter is optional. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. By default, it will autodetect compression method by file extension.
Arguments can also be passed using [named collections](/docs/en/operations/named-collections.md). In this case `url`, `access_key_id`, `secret_access_key`, `format`, `structure`, `compression_method` work in the same way, and some extra parameters are supported:
- `filename` — appended to the url if specified.
- `use_environment_credentials` — enabled by default, allows passing extra parameters using environment variables `AWS_CONTAINER_CREDENTIALS_RELATIVE_URI`, `AWS_CONTAINER_CREDENTIALS_FULL_URI`, `AWS_CONTAINER_AUTHORIZATION_TOKEN`, `AWS_EC2_METADATA_DISABLED`.
- `no_sign_request` — disabled by default.
- `expiration_window_seconds` — default value is 120.
**Returned value**
@ -47,6 +58,18 @@ Count the total amount of rows in all files in the cluster `cluster_simple`:
If your listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
:::
For production use cases it is recommended to use [named collections](/docs/en/operations/named-collections.md). Here is the example:
``` sql
CREATE NAMED COLLECTION creds AS
access_key_id = 'minio'
secret_access_key = 'minio123';
SELECT count(*) FROM s3Cluster(
'cluster_simple', creds, url='https://s3-object-url.csv',
format='CSV', structure='name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))'
)
```
**See Also**
- [S3 engine](../../engines/table-engines/integrations/s3.md)

View File

@ -49,7 +49,7 @@ slug: /ru/operations/system-tables/replication_queue
- `last_attempt_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — дата и время последней попытки выполнить задачу.
- `num_postponed` ([UInt32](../../sql-reference/data-types/int-uint.md)) — количество отложенных задач.
- `num_postponed` ([UInt32](../../sql-reference/data-types/int-uint.md)) — количество откладываний запуска задачи.
- `postpone_reason` ([String](../../sql-reference/data-types/string.md)) — причина, по которой была отложена задача.

View File

@ -1,27 +1,27 @@
---
slug: /zh/faq/general/ne-tormozit
title: "What does \u201C\u043D\u0435 \u0442\u043E\u0440\u043C\u043E\u0437\u0438\u0442\
\u201D mean?"
title: "\u201C\u043D\u0435 \u0442\u043E\u0440\u043C\u043E\u0437\u0438\u0442\
\u201D 是什么意思?"
toc_hidden: true
sidebar_position: 11
---
# What Does “Не тормозит” Mean? {#what-does-ne-tormozit-mean}
# “Не тормозит” 是什么意思? {#what-does-ne-tormozit-mean}
This question usually arises when people see official ClickHouse t-shirts. They have large words **“ClickHouse не тормозит”** on the front.
这个问题通常出现在人们看到官方 ClickHouse T恤时。它们的正面印有大字**“ClickHouse не тормозит”**。
Before ClickHouse became open-source, it has been developed as an in-house storage system by the largest Russian IT company, [Yandex](https://yandex.com/company/). Thats why it initially got its slogan in Russian, which is “не тормозит” (pronounced as “ne tormozit”). After the open-source release we first produced some of those t-shirts for events in Russia and it was a no-brainer to use the slogan as-is.
在 ClickHouse 开源之前,它作为俄罗斯最大的 IT 公司 [Yandex](https://yandex.com/company/) 的内部存储系统而开发。这就是为什么它最初获得了俄文口号“не тормозит”发音为“ne tormozit”。在开源发布后我们首先为俄罗斯的活动制作了一些这样的T恤使用原汁原味的口号是理所当然的。
One of the following batches of those t-shirts was supposed to be given away on events outside of Russia and we tried to make the English version of the slogan. Unfortunately, the Russian language is kind of elegant in terms of expressing stuff and there was a restriction of limited space on a t-shirt, so we failed to come up with good enough translation (most options appeared to be either long or inaccurate) and decided to keep the slogan in Russian even on t-shirts produced for international events. It appeared to be a great decision because people all over the world get positively surprised and curious when they see it.
其中一批这样的T恤原本打算在俄罗斯之外的活动中赠送我们尝试制作口号的英文版本。不幸的是俄语在表达方面有些优雅而且T恤上的空间有限所以我们未能提出足够好的翻译大多数选项要么太长要么不够准确并决定即使在为国际活动制作的T恤上也保留俄文口号。这被证明是一个绝妙的决定因为全世界的人们看到它时都会感到惊喜和好奇。
So, what does it mean? Here are some ways to translate *“не тормозит”*:
那么,它是什么意思呢?以下是翻译“не тормозит”的一些方式:
- If you translate it literally, itd be something like *“ClickHouse does not press the brake pedal”*.
- If youd want to express it as close to how it sounds to a Russian person with IT background, itd be something like *“If your larger system lags, its not because it uses ClickHouse”*.
- Shorter, but not so precise versions could be *“ClickHouse is not slow”*, *“ClickHouse does not lag”* or just *“ClickHouse is fast”*.
- 如果你直译那就是“ClickHouse 不踩刹车”。
- 如果你想尽可能接近一个有 IT 背景的俄罗斯人的听觉感受,那就是“如果你的大型系统延迟,不是因为它使用了 ClickHouse”。
- 更短但不那么精确的版本可能是“ClickHouse 不慢”“ClickHouse 不卡顿”或仅仅“ClickHouse 很快”。
If you havent seen one of those t-shirts in person, you can check them out online in many ClickHouse-related videos. For example, this one:
如果您还没有亲眼见过这些 T恤可以在许多与 ClickHouse 相关的视频中在线查看。例如,这个:
![iframe](https://www.youtube.com/embed/bSyQahMVZ7w)
P.S. These t-shirts are not for sale, they are given away for free on most [ClickHouse Meetups](https://clickhouse.com/#meet), usually for best questions or other forms of active participation.
附言:这些 T恤不出售它们在大多数 [ClickHouse 聚会](https://clickhouse.com/#meet)上免费赠送,通常是给出最佳问题或其他形式的积极参与者。

View File

@ -1,63 +1,63 @@
---
slug: /zh/faq/general/why-clickhouse-is-so-fast
title: Why is ClickHouse so fast?
title: 为什么 ClickHouse 如此快速?
toc_hidden: true
sidebar_position: 8
---
# Why ClickHouse Is So Fast? {#why-clickhouse-is-so-fast}
# 为什么 ClickHouse 如此快速? {#why-clickhouse-is-so-fast}
It was designed to be fast. Query execution performance has always been a top priority during the development process, but other important characteristics like user-friendliness, scalability, and security were also considered so ClickHouse could become a real production system.
它被设计成一个快速的系统。在开发过程中,查询执行性能一直是首要考虑的优先级,但也考虑了其他重要特性,如用户友好性、可扩展性和安全性,使 ClickHouse 成为一个真正的生产系统。
ClickHouse was initially built as a prototype to do just a single task well: to filter and aggregate data as fast as possible. Thats what needs to be done to build a typical analytical report and thats what a typical [GROUP BY](../../sql-reference/statements/select/group-by.md) query does. ClickHouse team has made several high-level decisions that combined made achieving this task possible:
ClickHouse 最初是作为一个原型构建的,它的单一任务就是尽可能快速地过滤和聚合数据。这正是构建典型分析报告所需做的,也是典型 [GROUP BY](../../sql-reference/statements/select/group-by.md) 查询所做的。ClickHouse 团队做出了几个高层次的决策,这些决策组合在一起使得实现这一任务成为可能:
Column-oriented storage
: Source data often contain hundreds or even thousands of columns, while a report can use just a few of them. The system needs to avoid reading unnecessary columns, or most expensive disk read operations would be wasted.
列式存储
: 源数据通常包含数百甚至数千列,而报告可能只使用其中的几列。系统需要避免读取不必要的列,否则大部分昂贵的磁盘读取操作将被浪费。
Indexes
: ClickHouse keeps data structures in memory that allows reading not only used columns but only necessary row ranges of those columns.
索引
: ClickHouse 在内存中保留数据结构,允许不仅读取使用的列,而且只读取这些列的必要行范围。
Data compression
: Storing different values of the same column together often leads to better compression ratios (compared to row-oriented systems) because in real data column often has the same or not so many different values for neighboring rows. In addition to general-purpose compression, ClickHouse supports [specialized codecs](../../sql-reference/statements/create/table.mdx/#create-query-specialized-codecs) that can make data even more compact.
数据压缩
: 将同一列的不同值存储在一起通常会导致更好的压缩比与行式系统相比因为在实际数据中列通常对相邻行有相同或不太多的不同值。除了通用压缩之外ClickHouse 还支持 [专用编解码器](../../sql-reference/statements/create/table.mdx/#create-query-specialized-codecs),可以使数据更加紧凑。
Vectorized query execution
: ClickHouse not only stores data in columns but also processes data in columns. It leads to better CPU cache utilization and allows for [SIMD](https://en.wikipedia.org/wiki/SIMD) CPU instructions usage.
向量化查询执行
: ClickHouse 不仅以列的形式存储数据,而且以列的形式处理数据。这导致更好的 CPU 缓存利用率,并允许使用 [SIMD](https://en.wikipedia.org/wiki/SIMD) CPU 指令。
Scalability
: ClickHouse can leverage all available CPU cores and disks to execute even a single query. Not only on a single server but all CPU cores and disks of a cluster as well.
可扩展性
: ClickHouse 可以利用所有可用的 CPU 核心和磁盘来执行甚至是单个查询。不仅在单个服务器上,而且在集群的所有 CPU 核心和磁盘上。
But many other database management systems use similar techniques. What really makes ClickHouse stand out is **attention to low-level details**. Most programming languages provide implementations for most common algorithms and data structures, but they tend to be too generic to be effective. Every task can be considered as a landscape with various characteristics, instead of just throwing in random implementation. For example, if you need a hash table, here are some key questions to consider:
但许多其他数据库管理系统也使用类似的技术。真正使 ClickHouse 脱颖而出的是 **对底层细节的关注**。大多数编程语言为最常见的算法和数据结构提供了实现,但它们往往过于通用而无法高效。每个任务都可以被视为具有各种特征的景观,而不是仅仅随意投入某个实现。例如,如果您需要一个哈希表,这里有一些关键问题需要考虑:
- Which hash function to choose?
- Collision resolution algorithm: [open addressing](https://en.wikipedia.org/wiki/Open_addressing) vs [chaining](https://en.wikipedia.org/wiki/Hash_table#Separate_chaining)?
- Memory layout: one array for keys and values or separate arrays? Will it store small or large values?
- Fill factor: when and how to resize? How to move values around on resize?
- Will values be removed and which algorithm will work better if they will?
- Will we need fast probing with bitmaps, inline placement of string keys, support for non-movable values, prefetch, and batching?
- 选择哪种哈希函数?
- 冲突解决算法:[开放寻址](https://en.wikipedia.org/wiki/Open_addressing)还是[链接](https://en.wikipedia.org/wiki/Hash_table#Separate_chaining)
- 内存布局:一个数组用于键和值还是分开的数组?它会存储小值还是大值?
- 填充因子:何时以及如何调整大小?在调整大小时如何移动值?
- 是否会移除值,如果会,哪种算法会更好?
- 我们是否需要使用位图进行快速探测,字符串键的内联放置,对不可移动值的支持,预取和批处理?
Hash table is a key data structure for `GROUP BY` implementation and ClickHouse automatically chooses one of [30+ variations](https://github.com/ClickHouse/ClickHouse/blob/master/src/Interpreters/Aggregator.h) for each specific query.
哈希表是 `GROUP BY` 实现的关键数据结构ClickHouse 会根据每个特定查询自动选择 [30 多种变体](https://github.com/ClickHouse/ClickHouse/blob/master/src/Interpreters/Aggregator.h) 中的一种。
The same goes for algorithms, for example, in sorting you might consider:
算法也是如此,例如,在排序中,您可能会考虑:
- What will be sorted: an array of numbers, tuples, strings, or structures?
- Is all data available completely in RAM?
- Do we need a stable sort?
- Do we need a full sort? Maybe partial sort or n-th element will suffice?
- How to implement comparisons?
- Are we sorting data that has already been partially sorted?
- 将要排序的是数字数组、元组、字符串还是结构?
- 所有数据是否完全可用于 RAM
- 我们需要稳定排序吗?
- 我们需要完全排序吗?也许部分排序或第 n 个元素就足够了?
- 如何实现比较?
- 我们正在对已经部分排序的数据进行排序吗?
Algorithms that they rely on characteristics of data they are working with can often do better than their generic counterparts. If it is not really known in advance, the system can try various implementations and choose the one that works best in runtime. For example, see an [article on how LZ4 decompression is implemented in ClickHouse](https://habr.com/en/company/yandex/blog/457612/).
他们所依赖的算法根据其所处理的数据特性,往往可以比通用算法做得更好。如果事先真的不知道,系统可以尝试各种实现,并在运行时选择最佳的一种。例如,看一篇关于 [ClickHouse 中 LZ4 解压缩是如何实现的文章](https://habr.com/en/company/yandex/blog/457612/)。
Last but not least, the ClickHouse team always monitors the Internet on people claiming that they came up with the best implementation, algorithm, or data structure to do something and tries it out. Those claims mostly appear to be false, but from time to time youll indeed find a gem.
最后但同样重要的是ClickHouse 团队始终关注互联网上人们声称他们提出了最佳的实现、算法或数据结构来做某事,并尝试它。这些声称大多是虚假的,但有时你确实会找到一颗宝石。
:::info Tips for building your own high-performance software
- Keep in mind low-level details when designing your system.
- Design based on hardware capabilities.
- Choose data structures and abstractions based on the needs of the task.
- Provide specializations for special cases.
- Try new, “best” algorithms, that you read about yesterday.
- Choose an algorithm in runtime based on statistics.
- Benchmark on real datasets.
- Test for performance regressions in CI.
- Measure and observe everything.
:::info 构建高性能软件的提示
- 设计系统时要考虑到底层细节。
- 基于硬件能力进行设计。
- 根据任务的需求选择数据结构和抽象。
- 为特殊情况提供专门化。
- 尝试您昨天阅读的关于新的“最佳”算法。
- 根据统计数据在运行时选择算法。
- 在真实数据集上进行基准测试。
- 在 CI 中测试性能回归。
- 测量并观察一切。
:::

View File

@ -1,35 +1,35 @@
---
slug: /zh/faq/integration/json-import
title: How to import JSON into ClickHouse?
title: 如何将 JSON 导入到 ClickHouse
toc_hidden: true
sidebar_position: 11
---
# How to Import JSON Into ClickHouse? {#how-to-import-json-into-clickhouse}
# 如何将 JSON 导入到 ClickHouse {#how-to-import-json-into-clickhouse}
ClickHouse supports a wide range of [data formats for input and output](../../interfaces/formats.md). There are multiple JSON variations among them, but the most commonly used for data ingestion is [JSONEachRow](../../interfaces/formats.md#jsoneachrow). It expects one JSON object per row, each object separated by a newline.
ClickHouse 支持多种[输入和输出的数据格式](../../interfaces/formats.md)。其中包括多种 JSON 变体,但最常用于数据导入的是 [JSONEachRow](../../interfaces/formats.md#jsoneachrow)。它期望每行一个 JSON 对象,每个对象由一个新行分隔。
## Examples {#examples}
## 示例 {#examples}
Using [HTTP interface](../../interfaces/http.md):
使用 [HTTP 接口](../../interfaces/http.md)
``` bash
$ echo '{"foo":"bar"}' | curl 'http://localhost:8123/?query=INSERT%20INTO%20test%20FORMAT%20JSONEachRow' --data-binary @-
```
Using [CLI interface](../../interfaces/cli.md):
使用 [CLI接口](../../interfaces/cli.md):
``` bash
$ echo '{"foo":"bar"}' | clickhouse-client --query="INSERT INTO test FORMAT JSONEachRow"
```
Instead of inserting data manually, you might consider to use one of [client libraries](../../interfaces/index.md) instead.
除了手动插入数据外,您可能会考虑使用 [客户端库](../../interfaces/index.md) 之一。
## Useful Settings {#useful-settings}
## 实用设置 {#useful-settings}
- `input_format_skip_unknown_fields` allows to insert JSON even if there were additional fields not present in table schema (by discarding them).
- `input_format_import_nested_json` allows to insert nested JSON objects into columns of [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) type.
- `input_format_skip_unknown_fields` 允许插入 JSON即使存在表格架构中未出现的额外字段通过丢弃它们
- `input_format_import_nested_json` 允许将嵌套 JSON 对象插入到 [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) 类型的列中。
:::note
Settings are specified as `GET` parameters for the HTTP interface or as additional command-line arguments prefixed with `--` for the `CLI` interface.
对于 HTTP 接口,设置作为 `GET` 参数指定;对于 `CLI` 接口,则作为前缀为 -- 的附加命令行参数。
:::

View File

@ -1,15 +1,15 @@
---
slug: /zh/faq/integration/oracle-odbc
title: What if I have a problem with encodings when using Oracle via ODBC?
title: 使用 Oracle ODBC 时遇到编码问题怎么办?
toc_hidden: true
sidebar_position: 20
---
# What If I Have a Problem with Encodings When Using Oracle Via ODBC? {#oracle-odbc-encodings}
# 使用 Oracle ODBC 时遇到编码问题怎么办? {#oracle-odbc-encodings}
If you use Oracle as a source of ClickHouse external dictionaries via Oracle ODBC driver, you need to set the correct value for the `NLS_LANG` environment variable in `/etc/default/clickhouse`. For more information, see the [Oracle NLS_LANG FAQ](https://www.oracle.com/technetwork/products/globalization/nls-lang-099431.html).
如果您使用 Oracle 作为 ClickHouse 外部字典的数据源,并通过 Oracle ODBC 驱动程序,您需要在 `/etc/default/clickhouse` 中为 `NLS_LANG` 环境变量设置正确的值。更多信息,请参阅 [Oracle NLS_LANG FAQ](https://www.oracle.com/technetwork/products/globalization/nls-lang-099431.html)
**Example**
**示例**
``` sql
NLS_LANG=RUSSIAN_RUSSIA.UTF8

View File

@ -1,44 +1,44 @@
---
slug: /zh/faq/operations/delete-old-data
title: Is it possible to delete old records from a ClickHouse table?
title: 是否可以从ClickHouse表中删除旧记录
toc_hidden: true
sidebar_position: 20
---
# Is It Possible to Delete Old Records from a ClickHouse Table? {#is-it-possible-to-delete-old-records-from-a-clickhouse-table}
# 是否可以从ClickHouse表中删除旧记录 {#is-it-possible-to-delete-old-records-from-a-clickhouse-table}
The short answer is “yes”. ClickHouse has multiple mechanisms that allow freeing up disk space by removing old data. Each mechanism is aimed for different scenarios.
简短的答案是“可以”。ClickHouse具有多种机制允许通过删除旧数据来释放磁盘空间。每种机制都针对不同的场景。
## TTL {#ttl}
ClickHouse allows to automatically drop values when some condition happens. This condition is configured as an expression based on any columns, usually just static offset for any timestamp column.
ClickHouse 允许在某些条件发生时自动删除值。这个条件被配置为基于任何列的表达式,通常只是针对任何时间戳列的静态偏移量。
The key advantage of this approach is that it does not need any external system to trigger, once TTL is configured, data removal happens automatically in background.
这种方法的主要优势是它不需要任何外部系统来触发,一旦配置了 TTL数据删除就会自动在后台发生。
:::note
TTL can also be used to move data not only to [/dev/null](https://en.wikipedia.org/wiki/Null_device), but also between different storage systems, like from SSD to HDD.
TTL 也可以用来将数据移动到非 [/dev/null](https://en.wikipedia.org/wiki/Null_device) 的不同存储系统,例如从 SSD 到 HDD。
:::
More details on [configuring TTL](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
有关 [配置 TTL](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl) 的更多详细信息。
## ALTER DELETE {#alter-delete}
ClickHouse does not have real-time point deletes like in [OLTP](https://en.wikipedia.org/wiki/Online_transaction_processing) databases. The closest thing to them are mutations. They are issued as `ALTER ... DELETE` or `ALTER ... UPDATE` queries to distinguish from normal `DELETE` or `UPDATE` as they are asynchronous batch operations, not immediate modifications. The rest of syntax after `ALTER TABLE` prefix is similar.
ClickHouse没有像[OLTP](https://en.wikipedia.org/wiki/Online_transaction_processing)数据库那样的实时点删除。最接近的东西是 `Mutation`,执行 `ALTER ... DELETE``ALTER ... UPDATE` 查询,以区别于普通的`DELETE`或`UPDATE`。因为它们是异步批处理操作,而不是立即修改。`ALTER TABLE`前缀后的其余语法相似。
`ALTER DELETE` can be issued to flexibly remove old data. If you need to do it regularly, the main downside will be the need to have an external system to submit the query. There are also some performance considerations since mutation rewrite complete parts even theres only a single row to be deleted.
`ALTER DELETE`可以灵活地用来删除旧数据。如果你需要定期这样做,主要缺点将是需要有一个外部系统来提交查询。还有一些性能方面的考虑,因为即使只有一行要被删除,突变也会重写完整部分。
This is the most common approach to make your system based on ClickHouse [GDPR](https://gdpr-info.eu)-compliant.
这是使基于ClickHouse的系统符合[GDPR](https://gdpr-info.eu)的最常见方法。
More details on [mutations](../../sql-reference/statements/alter.md/#alter-mutations).
有关[mutations](../../sql-reference/statements/alter.md/#alter-mutations)的更多详细信息。
## DROP PARTITION {#drop-partition}
`ALTER TABLE ... DROP PARTITION` provides a cost-efficient way to drop a whole partition. Its not that flexible and needs proper partitioning scheme configured on table creation, but still covers most common cases. Like mutations need to be executed from an external system for regular use.
`ALTER TABLE ... DROP PARTITION`提供了一种成本效率高的方式来删除整个分区。它不是那么灵活,需要在创建表时配置适当的分区方案,但仍然涵盖了大多数常见情况。像 mutations 一样,需要从外部系统执行以进行常规使用。
More details on [manipulating partitions](../../sql-reference/statements/alter/partition.mdx/#alter_drop-partition).
有关[操作分区](../../sql-reference/statements/alter/partition.mdx/#alter_drop-partition)的更多详细信息。
## TRUNCATE {#truncate}
Its rather radical to drop all data from a table, but in some cases it might be exactly what you need.
从表中删除所有数据是相当激进的,但在某些情况下可能正是您所需要的。
More details on [table truncation](../../sql-reference/statements/truncate.md).
有关[truncate](../../sql-reference/statements/truncate.md)的更多详细信息。

View File

@ -1010,6 +1010,7 @@ void Client::addOptions(OptionsDescription & options_description)
("config,c", po::value<std::string>(), "config-file path (another shorthand)")
("connection", po::value<std::string>(), "connection to use (from the client config), by default connection name is hostname")
("secure,s", "Use TLS connection")
("no-secure,s", "Don't use TLS connection")
("user,u", po::value<std::string>()->default_value("default"), "user")
("password", po::value<std::string>(), "password")
("ask-password", "ask-password")
@ -1151,6 +1152,8 @@ void Client::processOptions(const OptionsDescription & options_description,
interleave_queries_files = options["interleave-queries-file"].as<std::vector<std::string>>();
if (options.count("secure"))
config().setBool("secure", true);
if (options.count("no-secure"))
config().setBool("no-secure", true);
if (options.count("user") && !options["user"].defaulted())
config().setString("user", options["user"].as<std::string>());
if (options.count("password"))

View File

@ -20,7 +20,7 @@ public:
const String & host_id_,
const String & proxy_database_name_,
ContextMutablePtr context_,
Poco::Logger * log_)
LoggerRawPtr log_)
: WithMutableContext(context_),
task_zookeeper_path(task_path_),
host_id(host_id_),
@ -230,7 +230,7 @@ private:
bool experimental_use_sample_offset{false};
Poco::Logger * log;
LoggerRawPtr log;
UInt64 max_table_tries = 3;
UInt64 max_shard_partition_tries = 3;

View File

@ -177,7 +177,7 @@ public:
auto watch_callback =
[my_stale = stale] (const Coordination::WatchResponse & rsp)
{
auto logger = &Poco::Logger::get("ClusterCopier");
auto logger = getLogger("ClusterCopier");
if (rsp.error == Coordination::Error::ZOK)
{
switch (rsp.type)

View File

@ -160,7 +160,7 @@ int DisksApp::main(const std::vector<String> & /*args*/)
}
else
{
throw Exception(ErrorCodes::BAD_ARGUMENTS, "No config-file specifiged");
throw Exception(ErrorCodes::BAD_ARGUMENTS, "No config-file specified");
}
if (config().has("save-logs"))

View File

@ -375,7 +375,7 @@ int KeeperClient::main(const std::vector<String> & /* args */)
if (!config().has("host") && !config().has("port") && !keys.empty())
{
LOG_INFO(&Poco::Logger::get("KeeperClient"), "Found keeper node in the config.xml, will use it for connection");
LOG_INFO(getLogger("KeeperClient"), "Found keeper node in the config.xml, will use it for connection");
for (const auto & key : keys)
{

View File

@ -28,7 +28,7 @@ int mainEntryClickHouseKeeperConverter(int argc, char ** argv)
po::store(po::command_line_parser(argc, argv).options(desc).run(), options);
Poco::AutoPtr<Poco::ConsoleChannel> console_channel(new Poco::ConsoleChannel);
Poco::Logger * logger = &Poco::Logger::get("KeeperConverter");
LoggerPtr logger = getLogger("KeeperConverter");
logger->setChannel(console_channel);
if (options.count("help"))

View File

@ -624,7 +624,7 @@ catch (...)
void Keeper::logRevision() const
{
LOG_INFO(&Poco::Logger::get("Application"),
LOG_INFO(getLogger("Application"),
"Starting ClickHouse Keeper {} (revision: {}, git hash: {}, build id: {}), PID {}",
VERSION_STRING,
ClickHouseRevision::getVersionRevision(),

View File

@ -13,7 +13,7 @@ CatBoostLibraryHandlerFactory & CatBoostLibraryHandlerFactory::instance()
}
CatBoostLibraryHandlerFactory::CatBoostLibraryHandlerFactory()
: log(&Poco::Logger::get("CatBoostLibraryHandlerFactory"))
: log(getLogger("CatBoostLibraryHandlerFactory"))
{
}

View File

@ -31,7 +31,7 @@ private:
/// map: model path --> catboost library handler
std::unordered_map<String, CatBoostLibraryHandlerPtr> library_handlers TSA_GUARDED_BY(mutex);
std::mutex mutex;
Poco::Logger * log;
LoggerPtr log;
};
}

View File

@ -9,40 +9,40 @@ const char DICT_LOGGER_NAME[] = "LibraryDictionarySourceExternal";
void ExternalDictionaryLibraryAPI::log(LogLevel level, CString msg)
{
auto & logger = Poco::Logger::get(DICT_LOGGER_NAME);
auto logger = getLogger(DICT_LOGGER_NAME);
switch (level)
{
case LogLevel::TRACE:
if (logger.trace())
logger.trace(msg);
if (logger->trace())
logger->trace(msg);
break;
case LogLevel::DEBUG:
if (logger.debug())
logger.debug(msg);
if (logger->debug())
logger->debug(msg);
break;
case LogLevel::INFORMATION:
if (logger.information())
logger.information(msg);
if (logger->information())
logger->information(msg);
break;
case LogLevel::NOTICE:
if (logger.notice())
logger.notice(msg);
if (logger->notice())
logger->notice(msg);
break;
case LogLevel::WARNING:
if (logger.warning())
logger.warning(msg);
if (logger->warning())
logger->warning(msg);
break;
case LogLevel::ERROR:
if (logger.error())
logger.error(msg);
if (logger->error())
logger->error(msg);
break;
case LogLevel::CRITICAL:
if (logger.critical())
logger.critical(msg);
if (logger->critical())
logger->critical(msg);
break;
case LogLevel::FATAL:
if (logger.fatal())
logger.fatal(msg);
if (logger->fatal())
logger->fatal(msg);
break;
}
}

View File

@ -26,7 +26,7 @@ void ExternalDictionaryLibraryHandlerFactory::create(
if (library_handlers.contains(dictionary_id))
{
LOG_WARNING(&Poco::Logger::get("ExternalDictionaryLibraryHandlerFactory"), "Library handler with dictionary id {} already exists", dictionary_id);
LOG_WARNING(getLogger("ExternalDictionaryLibraryHandlerFactory"), "Library handler with dictionary id {} already exists", dictionary_id);
return;
}

View File

@ -12,7 +12,7 @@ LibraryBridgeHandlerFactory::LibraryBridgeHandlerFactory(
size_t keep_alive_timeout_,
ContextPtr context_)
: WithContext(context_)
, log(&Poco::Logger::get(name_))
, log(getLogger(name_))
, name(name_)
, keep_alive_timeout(keep_alive_timeout_)
{

View File

@ -19,7 +19,7 @@ public:
std::unique_ptr<HTTPRequestHandler> createRequestHandler(const HTTPServerRequest & request) override;
private:
Poco::Logger * log;
LoggerPtr log;
const std::string name;
const size_t keep_alive_timeout;
};

View File

@ -47,7 +47,7 @@ namespace
if (!response.sent())
*response.send() << message << '\n';
LOG_WARNING(&Poco::Logger::get("LibraryBridge"), fmt::runtime(message));
LOG_WARNING(getLogger("LibraryBridge"), fmt::runtime(message));
}
std::shared_ptr<Block> parseColumns(String && column_string)
@ -92,7 +92,7 @@ static void writeData(Block data, OutputFormatPtr format)
ExternalDictionaryLibraryBridgeRequestHandler::ExternalDictionaryLibraryBridgeRequestHandler(size_t keep_alive_timeout_, ContextPtr context_)
: WithContext(context_)
, keep_alive_timeout(keep_alive_timeout_)
, log(&Poco::Logger::get("ExternalDictionaryLibraryBridgeRequestHandler"))
, log(getLogger("ExternalDictionaryLibraryBridgeRequestHandler"))
{
}
@ -380,7 +380,7 @@ void ExternalDictionaryLibraryBridgeRequestHandler::handleRequest(HTTPServerRequ
ExternalDictionaryLibraryBridgeExistsHandler::ExternalDictionaryLibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_)
: WithContext(context_)
, keep_alive_timeout(keep_alive_timeout_)
, log(&Poco::Logger::get("ExternalDictionaryLibraryBridgeExistsHandler"))
, log(getLogger("ExternalDictionaryLibraryBridgeExistsHandler"))
{
}
@ -419,7 +419,7 @@ CatBoostLibraryBridgeRequestHandler::CatBoostLibraryBridgeRequestHandler(
size_t keep_alive_timeout_, ContextPtr context_)
: WithContext(context_)
, keep_alive_timeout(keep_alive_timeout_)
, log(&Poco::Logger::get("CatBoostLibraryBridgeRequestHandler"))
, log(getLogger("CatBoostLibraryBridgeRequestHandler"))
{
}
@ -623,7 +623,7 @@ void CatBoostLibraryBridgeRequestHandler::handleRequest(HTTPServerRequest & requ
CatBoostLibraryBridgeExistsHandler::CatBoostLibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_)
: WithContext(context_)
, keep_alive_timeout(keep_alive_timeout_)
, log(&Poco::Logger::get("CatBoostLibraryBridgeExistsHandler"))
, log(getLogger("CatBoostLibraryBridgeExistsHandler"))
{
}

View File

@ -26,7 +26,7 @@ private:
static constexpr inline auto FORMAT = "RowBinary";
const size_t keep_alive_timeout;
Poco::Logger * log;
LoggerPtr log;
};
@ -40,7 +40,7 @@ public:
private:
const size_t keep_alive_timeout;
Poco::Logger * log;
LoggerPtr log;
};
@ -69,7 +69,7 @@ public:
private:
const size_t keep_alive_timeout;
Poco::Logger * log;
LoggerPtr log;
};
@ -83,7 +83,7 @@ public:
private:
const size_t keep_alive_timeout;
Poco::Logger * log;
LoggerPtr log;
};
}

View File

@ -221,7 +221,7 @@ void LocalServer::tryInitPath()
{
// The path is not provided explicitly - use a unique path in the system temporary directory
// (or in the current dir if temporary don't exist)
Poco::Logger * log = &logger();
LoggerRawPtr log = &logger();
std::filesystem::path parent_folder;
std::filesystem::path default_path;
@ -631,7 +631,7 @@ void LocalServer::processConfig()
tryInitPath();
Poco::Logger * log = &logger();
LoggerRawPtr log = &logger();
/// Maybe useless
if (config().has("macros"))

View File

@ -18,7 +18,7 @@ class ODBCColumnsInfoHandler : public HTTPRequestHandler, WithContext
public:
ODBCColumnsInfoHandler(size_t keep_alive_timeout_, ContextPtr context_)
: WithContext(context_)
, log(&Poco::Logger::get("ODBCColumnsInfoHandler"))
, log(getLogger("ODBCColumnsInfoHandler"))
, keep_alive_timeout(keep_alive_timeout_)
{
}
@ -26,7 +26,7 @@ public:
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
private:
Poco::Logger * log;
LoggerPtr log;
size_t keep_alive_timeout;
};

View File

@ -16,7 +16,7 @@ class IdentifierQuoteHandler : public HTTPRequestHandler, WithContext
public:
IdentifierQuoteHandler(size_t keep_alive_timeout_, ContextPtr context_)
: WithContext(context_)
, log(&Poco::Logger::get("IdentifierQuoteHandler"))
, log(getLogger("IdentifierQuoteHandler"))
, keep_alive_timeout(keep_alive_timeout_)
{
}
@ -24,7 +24,7 @@ public:
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
private:
Poco::Logger * log;
LoggerPtr log;
size_t keep_alive_timeout;
};

View File

@ -24,7 +24,7 @@ public:
ContextPtr context_,
const String & mode_)
: WithContext(context_)
, log(&Poco::Logger::get("ODBCHandler"))
, log(getLogger("ODBCHandler"))
, keep_alive_timeout(keep_alive_timeout_)
, mode(mode_)
{
@ -33,7 +33,7 @@ public:
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
private:
Poco::Logger * log;
LoggerPtr log;
size_t keep_alive_timeout;
String mode;

View File

@ -23,7 +23,7 @@ namespace ErrorCodes
ODBCSource::ODBCSource(
nanodbc::ConnectionHolderPtr connection_holder, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_)
: ISource(sample_block)
, log(&Poco::Logger::get("ODBCSource"))
, log(getLogger("ODBCSource"))
, max_block_size{max_block_size_}
, query(query_str)
{

View File

@ -30,7 +30,7 @@ private:
column.insertFrom(sample_column, 0);
}
Poco::Logger * log;
LoggerPtr log;
const UInt64 max_block_size;
ExternalResultDescription description;

View File

@ -19,7 +19,7 @@ ODBCSink::ODBCSink(
ContextPtr local_context_,
IdentifierQuotingStyle quoting_)
: ISink(sample_block_)
, log(&Poco::Logger::get("ODBCSink"))
, log(getLogger("ODBCSink"))
, connection_holder(std::move(connection_holder_))
, db_name(remote_database_name_)
, table_name(remote_table_name_)

View File

@ -30,7 +30,7 @@ protected:
void consume(Chunk chunk) override;
private:
Poco::Logger * log;
LoggerPtr log;
nanodbc::ConnectionHolderPtr connection_holder;
std::string db_name;

View File

@ -11,7 +11,7 @@ namespace DB
ODBCBridgeHandlerFactory::ODBCBridgeHandlerFactory(const std::string & name_, size_t keep_alive_timeout_, ContextPtr context_)
: WithContext(context_)
, log(&Poco::Logger::get(name_))
, log(getLogger(name_))
, name(name_)
, keep_alive_timeout(keep_alive_timeout_)
{

View File

@ -22,7 +22,7 @@ public:
std::unique_ptr<HTTPRequestHandler> createRequestHandler(const HTTPServerRequest & request) override;
private:
Poco::Logger * log;
LoggerPtr log;
std::string name;
size_t keep_alive_timeout;
};

View File

@ -97,7 +97,7 @@ T execute(nanodbc::ConnectionHolderPtr connection_holder, std::function<T(nanodb
/// https://docs.microsoft.com/ru-ru/sql/odbc/reference/appendixes/appendix-a-odbc-error-codes?view=sql-server-ver15
bool is_retriable = e.state().starts_with("08") || e.state().starts_with("24") || e.state().starts_with("25");
LOG_ERROR(
&Poco::Logger::get("ODBCConnection"),
getLogger("ODBCConnection"),
"ODBC query failed with error: {}, state: {}, native code: {}{}",
e.what(), e.state(), e.native(), is_retriable ? ", will retry" : "");

View File

@ -19,7 +19,7 @@ class SchemaAllowedHandler : public HTTPRequestHandler, WithContext
public:
SchemaAllowedHandler(size_t keep_alive_timeout_, ContextPtr context_)
: WithContext(context_)
, log(&Poco::Logger::get("SchemaAllowedHandler"))
, log(getLogger("SchemaAllowedHandler"))
, keep_alive_timeout(keep_alive_timeout_)
{
}
@ -27,7 +27,7 @@ public:
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
private:
Poco::Logger * log;
LoggerPtr log;
size_t keep_alive_timeout;
};

View File

@ -26,7 +26,7 @@ std::string getIdentifierQuote(nanodbc::ConnectionHolderPtr connection_holder)
}
catch (...)
{
LOG_WARNING(&Poco::Logger::get("ODBCGetIdentifierQuote"), "Cannot fetch identifier quote. Default double quote is used. Reason: {}", getCurrentExceptionMessage(false));
LOG_WARNING(getLogger("ODBCGetIdentifierQuote"), "Cannot fetch identifier quote. Default double quote is used. Reason: {}", getCurrentExceptionMessage(false));
return "\"";
}

View File

@ -365,7 +365,7 @@ void Server::createServer(
namespace
{
void setOOMScore(int value, Poco::Logger * log)
void setOOMScore(int value, LoggerRawPtr log)
{
try
{
@ -450,7 +450,7 @@ void checkForUsersNotInMainConfig(
const Poco::Util::AbstractConfiguration & config,
const std::string & config_path,
const std::string & users_config_path,
Poco::Logger * log)
LoggerPtr log)
{
if (config.getBool("skip_check_for_incorrect_settings", false))
return;
@ -1740,6 +1740,17 @@ try
LOG_INFO(log, "Loading metadata from {}", path_str);
LoadTaskPtrs load_metadata_tasks;
// Make sure that if exception is thrown during startup async, new async loading jobs are not going to be called.
// This is important for the case when exception is thrown from loading of metadata with `async_load_databases = false`
// to avoid simultaneously running table startups and destructing databases.
SCOPE_EXIT_SAFE(
LOG_INFO(log, "Stopping AsyncLoader.");
// Waits for all currently running jobs to finish and do not run any other pending jobs.
global_context->getAsyncLoader().stop();
);
try
{
auto & database_catalog = DatabaseCatalog::instance();
@ -1888,6 +1899,7 @@ try
/// Must be done after initialization of `servers`, because async_metrics will access `servers` variable from its thread.
async_metrics.start();
global_context->setAsynchronousMetrics(&async_metrics);
main_config_reloader->start();
access_control.startPeriodicReloading();
@ -2004,6 +2016,12 @@ try
else
LOG_INFO(log, "Closed all listening sockets.");
/// Wait for unfinished backups and restores.
/// This must be done after closing listening sockets (no more backups/restores) but before ProcessList::killAllQueries
/// (because killAllQueries() will cancel all running backups/restores).
if (server_settings.shutdown_wait_backups_and_restores)
global_context->waitAllBackupsAndRestores();
/// Killing remaining queries.
if (!server_settings.shutdown_wait_unfinished_queries)
global_context->getProcessList().killAllQueries();
@ -2472,7 +2490,7 @@ void Server::stopServers(
const ServerType & server_type
) const
{
Poco::Logger * log = &logger();
LoggerRawPtr log = &logger();
/// Remove servers once all their connections are closed
auto check_server = [&log](const char prefix[], auto & server)
@ -2511,7 +2529,7 @@ void Server::updateServers(
std::vector<ProtocolServerAdapter> & servers,
std::vector<ProtocolServerAdapter> & servers_to_start_before_tables)
{
Poco::Logger * log = &logger();
LoggerRawPtr log = &logger();
const auto listen_hosts = getListenHosts(config);
const auto interserver_listen_hosts = getInterserverListenHosts(config);

View File

@ -993,7 +993,16 @@
function renderError(response)
{
clear();
document.getElementById('error').innerText = response ? response : "No response.";
let message = response;
try {
let json = JSON.parse(response);
if (json.exception) {
message = json.exception;
}
} catch (e) {}
document.getElementById('error').innerText = message ? message : "No response.";
document.getElementById('error').style.display = 'block';
document.getElementById('logo-container').style.display = 'none';
}

View File

@ -13,7 +13,7 @@ serde_json = "1.0"
crate-type = ["staticlib"]
[profile.release]
debug = true
debug = false
[profile.release-thinlto]
inherits = "release"

View File

@ -17,7 +17,7 @@ cxx-build = "1.0.83"
crate-type = ["staticlib"]
[profile.release]
debug = true
debug = false
[profile.release-thinlto]
inherits = "release"

View File

@ -47,7 +47,7 @@ namespace
const Poco::Util::AbstractConfiguration & config,
const std::string & config_path,
const std::string & users_config_path,
Poco::Logger * log)
LoggerPtr log)
{
if (config.getBool("skip_check_for_incorrect_settings", false))
return;

View File

@ -443,7 +443,7 @@ public:
optimizeTree();
}
void logTree(Poco::Logger * log, const String & title) const
void logTree(LoggerPtr log, const String & title) const
{
LOG_TRACE(log, "Tree({}): level={}, name={}, flags={}, min_flags={}, max_flags={}, num_children={}",
title, level, node_name ? *node_name : "NULL", flags.toString(),
@ -1158,7 +1158,7 @@ AccessRights AccessRights::getFullAccess()
void AccessRights::logTree() const
{
auto * log = &Poco::Logger::get("AccessRights");
auto log = getLogger("AccessRights");
if (root)
{
root->logTree(log, "");

View File

@ -170,6 +170,7 @@ enum class AccessType
M(SYSTEM_RELOAD_MODEL, "SYSTEM RELOAD MODELS, RELOAD MODEL, RELOAD MODELS", GLOBAL, SYSTEM_RELOAD) \
M(SYSTEM_RELOAD_FUNCTION, "SYSTEM RELOAD FUNCTIONS, RELOAD FUNCTION, RELOAD FUNCTIONS", GLOBAL, SYSTEM_RELOAD) \
M(SYSTEM_RELOAD_EMBEDDED_DICTIONARIES, "RELOAD EMBEDDED DICTIONARIES", GLOBAL, SYSTEM_RELOAD) /* implicitly enabled by the grant SYSTEM_RELOAD_DICTIONARY ON *.* */\
M(SYSTEM_RELOAD_ASYNCHRONOUS_METRICS, "RELOAD ASYNCHRONOUS METRICS", GLOBAL, SYSTEM_RELOAD) \
M(SYSTEM_RELOAD, "", GROUP, SYSTEM) \
M(SYSTEM_RESTART_DISK, "SYSTEM RESTART DISK", GLOBAL, SYSTEM) \
M(SYSTEM_MERGES, "SYSTEM STOP MERGES, SYSTEM START MERGES, STOP MERGES, START MERGES", TABLE, SYSTEM) \

View File

@ -514,7 +514,7 @@ bool AllowedClientHosts::contains(const IPAddress & client_address) const
throw;
/// Try to ignore DNS errors: if host cannot be resolved, skip it and try next.
LOG_WARNING(
&Poco::Logger::get("AddressPatterns"),
getLogger("AddressPatterns"),
"Failed to check if the allowed client hosts contain address {}. {}, code = {}",
client_address.toString(), e.displayText(), e.code());
return false;
@ -556,7 +556,7 @@ bool AllowedClientHosts::contains(const IPAddress & client_address) const
throw;
/// Try to ignore DNS errors: if host cannot be resolved, skip it and try next.
LOG_WARNING(
&Poco::Logger::get("AddressPatterns"),
getLogger("AddressPatterns"),
"Failed to check if the allowed client hosts contain address {}. {}, code = {}",
client_address.toString(), e.displayText(), e.code());
return false;

View File

@ -298,7 +298,7 @@ void ContextAccess::setUser(const UserPtr & user_) const
}
user_name = user->getName();
trace_log = &Poco::Logger::get("ContextAccess (" + user_name + ")");
trace_log = getLogger("ContextAccess (" + user_name + ")");
std::vector<UUID> current_roles, current_roles_with_admin_option;
if (params.use_default_roles)

View File

@ -185,9 +185,10 @@ private:
mutable std::atomic<bool> initialized = false; // can be removed after Bug 5504 is resolved
mutable std::atomic<bool> user_was_dropped = false;
mutable std::atomic<Poco::Logger *> trace_log = nullptr;
mutable std::mutex mutex;
/// TODO: Fix race
mutable LoggerPtr trace_log;
mutable UserPtr user TSA_GUARDED_BY(mutex);
mutable String user_name TSA_GUARDED_BY(mutex);
mutable scope_guard subscription_for_user_change TSA_GUARDED_BY(mutex);

View File

@ -47,7 +47,7 @@ namespace
}
AccessEntityPtr tryReadEntityFile(const String & file_path, Poco::Logger & log)
AccessEntityPtr tryReadEntityFile(const String & file_path, LoggerPtr log)
{
try
{
@ -55,7 +55,7 @@ namespace
}
catch (...)
{
tryLogCurrentException(&log);
tryLogCurrentException(log);
return nullptr;
}
}
@ -378,7 +378,7 @@ void DiskAccessStorage::reloadAllAndRebuildLists()
continue;
const auto access_entity_file_path = getEntityFilePath(directory_path, id);
auto entity = tryReadEntityFile(access_entity_file_path, *getLogger());
auto entity = tryReadEntityFile(access_entity_file_path, getLogger());
if (!entity)
continue;

View File

@ -242,7 +242,10 @@ HTTPAuthClientParams parseHTTPAuthParams(const Poco::Util::AbstractConfiguration
size_t connection_timeout_ms = config.getInt(prefix + ".connection_timeout_ms", 1000);
size_t receive_timeout_ms = config.getInt(prefix + ".receive_timeout_ms", 1000);
size_t send_timeout_ms = config.getInt(prefix + ".send_timeout_ms", 1000);
http_auth_params.timeouts = ConnectionTimeouts{connection_timeout_ms, receive_timeout_ms, send_timeout_ms};
http_auth_params.timeouts = ConnectionTimeouts()
.withConnectionTimeout(Poco::Timespan(connection_timeout_ms * 1000))
.withReceiveTimeout(Poco::Timespan(receive_timeout_ms * 1000))
.withSendTimeout(Poco::Timespan(send_timeout_ms * 1000));
http_auth_params.max_tries = config.getInt(prefix + ".max_tries", 3);
http_auth_params.retry_initial_backoff_ms = config.getInt(prefix + ".retry_initial_backoff_ms", 50);
@ -276,7 +279,7 @@ void ExternalAuthenticators::reset()
resetImpl();
}
void ExternalAuthenticators::setConfiguration(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log)
void ExternalAuthenticators::setConfiguration(const Poco::Util::AbstractConfiguration & config, LoggerPtr log)
{
std::lock_guard lock(mutex);
resetImpl();

View File

@ -36,7 +36,7 @@ class ExternalAuthenticators
{
public:
void reset();
void setConfiguration(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log);
void setConfiguration(const Poco::Util::AbstractConfiguration & config, LoggerPtr log);
// The name and readiness of the credentials must be verified before calling these.
bool checkLDAPCredentials(const String & server, const BasicCredentials & credentials,

View File

@ -328,7 +328,7 @@ void GSSAcceptorContext::initHandles()
}
}
String GSSAcceptorContext::processToken(const String & input_token, Poco::Logger * log)
String GSSAcceptorContext::processToken(const String & input_token, LoggerPtr log)
{
std::lock_guard lock(gss_global_mutex);
@ -455,7 +455,7 @@ void GSSAcceptorContext::initHandles()
{
}
String GSSAcceptorContext::processToken(const String &, Poco::Logger *)
String GSSAcceptorContext::processToken(const String &, LoggerPtr)
{
throw Exception(ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME, "ClickHouse was built without GSS-API/Kerberos support");
}

View File

@ -3,6 +3,7 @@
#include "config.h"
#include <Access/Credentials.h>
#include <Common/Logger.h>
#include <base/types.h>
#include <memory>
@ -42,7 +43,7 @@ public:
const String & getRealm() const;
bool isFailed() const;
MAYBE_NORETURN String processToken(const String & input_token, Poco::Logger * log);
MAYBE_NORETURN String processToken(const String & input_token, LoggerPtr log);
private:
void reset();

View File

@ -6,6 +6,7 @@
#include <Backups/BackupEntriesCollector.h>
#include <Common/Exception.h>
#include <Common/quoteString.h>
#include <Common/callOnce.h>
#include <IO/WriteHelpers.h>
#include <Interpreters/Context.h>
#include <Poco/UUIDGenerator.h>
@ -615,7 +616,7 @@ UUID IAccessStorage::generateRandomID()
}
void IAccessStorage::clearConflictsInEntitiesList(std::vector<std::pair<UUID, AccessEntityPtr>> & entities, const Poco::Logger * log_)
void IAccessStorage::clearConflictsInEntitiesList(std::vector<std::pair<UUID, AccessEntityPtr>> & entities, const LoggerPtr log_)
{
std::unordered_map<UUID, size_t> positions_by_id;
std::unordered_map<std::string_view, size_t> positions_by_type_and_name[static_cast<size_t>(AccessEntityType::MAX)];
@ -671,12 +672,13 @@ void IAccessStorage::clearConflictsInEntitiesList(std::vector<std::pair<UUID, Ac
}
Poco::Logger * IAccessStorage::getLogger() const
LoggerPtr IAccessStorage::getLogger() const
{
Poco::Logger * ptr = log.load();
if (!ptr)
log.store(ptr = &Poco::Logger::get("Access(" + storage_name + ")"), std::memory_order_relaxed);
return ptr;
callOnce(log_initialized, [&] {
log = ::getLogger("Access(" + storage_name + ")");
});
return log;
}

View File

@ -6,6 +6,7 @@
#include <Parsers/IParser.h>
#include <Parsers/parseIdentifierOrStringLiteral.h>
#include <Common/SettingsChanges.h>
#include <Common/callOnce.h>
#include <atomic>
#include <functional>
@ -225,9 +226,9 @@ protected:
SettingsChanges & settings) const;
virtual bool isAddressAllowed(const User & user, const Poco::Net::IPAddress & address) const;
static UUID generateRandomID();
Poco::Logger * getLogger() const;
LoggerPtr getLogger() const;
static String formatEntityTypeWithName(AccessEntityType type, const String & name) { return AccessEntityTypeInfo::get(type).formatEntityNameWithType(name); }
static void clearConflictsInEntitiesList(std::vector<std::pair<UUID, AccessEntityPtr>> & entities, const Poco::Logger * log_);
static void clearConflictsInEntitiesList(std::vector<std::pair<UUID, AccessEntityPtr>> & entities, const LoggerPtr log_);
[[noreturn]] void throwNotFound(const UUID & id) const;
[[noreturn]] void throwNotFound(AccessEntityType type, const String & name) const;
[[noreturn]] static void throwBadCast(const UUID & id, AccessEntityType type, const String & name, AccessEntityType required_type);
@ -246,7 +247,9 @@ protected:
private:
const String storage_name;
mutable std::atomic<Poco::Logger *> log = nullptr;
mutable OnceFlag log_initialized;
mutable LoggerPtr log = nullptr;
};

View File

@ -63,7 +63,7 @@ String KerberosInit::fmtError(krb5_error_code code) const
void KerberosInit::init(const String & keytab_file, const String & principal, const String & cache_name)
{
auto * log = &Poco::Logger::get("KerberosInit");
auto log = getLogger("KerberosInit");
LOG_TRACE(log,"Trying to authenticate with Kerberos v5");
krb5_error_code ret;

View File

@ -532,7 +532,7 @@ LDAPClient::SearchResults LDAPClient::search(const SearchParams & search_params)
for (size_t i = 0; referrals[i]; ++i)
{
LOG_WARNING(&Poco::Logger::get("LDAPClient"), "Received reference during LDAP search but not following it: {}", referrals[i]);
LOG_WARNING(getLogger("LDAPClient"), "Received reference during LDAP search but not following it: {}", referrals[i]);
}
}

View File

@ -91,7 +91,7 @@ void RowPolicyCache::PolicyInfo::setPolicy(const RowPolicyPtr & policy_)
catch (...)
{
tryLogCurrentException(
&Poco::Logger::get("RowPolicy"),
getLogger("RowPolicy"),
String("Could not parse the condition ") + toString(filter_type) + " of row policy "
+ backQuote(policy->getName()));
}

View File

@ -37,7 +37,7 @@ SettingsAuthResponseParser::parse(const Poco::Net::HTTPResponse & response, std:
}
catch (...)
{
LOG_INFO(&Poco::Logger::get("HTTPAuthentication"), "Failed to parse settings from authentication response. Skip it.");
LOG_INFO(getLogger("HTTPAuthentication"), "Failed to parse settings from authentication response. Skip it.");
}
return result;
}

View File

@ -440,7 +440,7 @@ struct GroupArrayNodeGeneral : public GroupArrayNodeBase<GroupArrayNodeGeneral>
return node;
}
void insertInto(IColumn & column) { column.deserializeAndInsertFromArena(data()); }
void insertInto(IColumn & column) { std::ignore = column.deserializeAndInsertFromArena(data()); }
};
template <typename Node, bool has_sampler>

View File

@ -10,12 +10,28 @@ struct Settings;
void registerAggregateFunctionNothing(AggregateFunctionFactory & factory)
{
factory.registerFunction("nothing", [](const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *)
{
assertNoParameters(name, parameters);
factory.registerFunction(NameAggregateFunctionNothing::name,
[](const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *)
{
assertNoParameters(name, parameters);
return std::make_shared<AggregateFunctionNothing>(argument_types, parameters);
});
auto result_type = argument_types.empty() ? std::make_shared<DataTypeNullable>(std::make_shared<DataTypeNothing>()) : argument_types.front();
return std::make_shared<AggregateFunctionNothing>(argument_types, parameters, result_type);
factory.registerFunction(NameAggregateFunctionNothingNull::name,
[](const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *)
{
assertNoParameters(name, parameters);
return std::make_shared<AggregateFunctionNothingNull>(argument_types, parameters);
});
factory.registerFunction(NameAggregateFunctionNothingUInt64::name, {
[](const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *)
{
assertNoParameters(name, parameters);
return std::make_shared<AggregateFunctionNothingUInt64>(argument_types, parameters);
},
AggregateFunctionProperties{ .returns_default_when_only_null = true }
});
}

View File

@ -6,7 +6,8 @@
#include <AggregateFunctions/IAggregateFunction.h>
#include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h>
#include "DataTypes/IDataType.h"
#include <DataTypes/IDataType.h>
#include <DataTypes/DataTypesNumber.h>
namespace DB
@ -18,20 +19,42 @@ namespace ErrorCodes
extern const int INCORRECT_DATA;
}
/// Returns the same type as the first argument
struct NameAggregateFunctionNothing { static constexpr auto name = "nothing"; };
/// Always returns Nullable(Nothing)
struct NameAggregateFunctionNothingNull { static constexpr auto name = "nothingNull"; };
/// Always returns UInt64
struct NameAggregateFunctionNothingUInt64 { static constexpr auto name = "nothingUInt64"; };
template <typename Name> class AggregateFunctionNothingImpl;
using AggregateFunctionNothing = AggregateFunctionNothingImpl<NameAggregateFunctionNothing>;
using AggregateFunctionNothingNull = AggregateFunctionNothingImpl<NameAggregateFunctionNothingNull>;
using AggregateFunctionNothingUInt64 = AggregateFunctionNothingImpl<NameAggregateFunctionNothingUInt64>;
/** Aggregate function that takes arbitrary number of arbitrary arguments and does nothing.
*/
class AggregateFunctionNothing final : public IAggregateFunctionHelper<AggregateFunctionNothing>
template <typename Name>
class AggregateFunctionNothingImpl final : public IAggregateFunctionHelper<AggregateFunctionNothingImpl<Name>>
{
public:
AggregateFunctionNothing(const DataTypes & arguments, const Array & params, const DataTypePtr & result_type_)
: IAggregateFunctionHelper<AggregateFunctionNothing>(arguments, params, result_type_) {}
String getName() const override
static DataTypePtr getReturnType(const DataTypes & arguments [[maybe_unused]])
{
return "nothing";
if constexpr (std::is_same_v<Name, NameAggregateFunctionNothingUInt64>)
return std::make_shared<DataTypeUInt64>();
else if constexpr (std::is_same_v<Name, NameAggregateFunctionNothingNull>)
return std::make_shared<DataTypeNullable>(std::make_shared<DataTypeNothing>());
return arguments.empty() ? std::make_shared<DataTypeNullable>(std::make_shared<DataTypeNothing>()) : arguments.front();
}
public:
AggregateFunctionNothingImpl(const DataTypes & arguments, const Array & params)
: IAggregateFunctionHelper<AggregateFunctionNothingImpl<Name>>(arguments, params, getReturnType(arguments))
{
}
String getName() const override { return Name::name; }
bool allocatesMemoryInArena() const override { return false; }
void create(AggregateDataPtr __restrict) const override
@ -75,7 +98,8 @@ public:
[[maybe_unused]] char symbol;
readChar(symbol, buf);
if (symbol != '\0')
throw Exception(ErrorCodes::INCORRECT_DATA, "Incorrect state of aggregate function 'nothing', it should contain exactly one zero byte, while it is {}.", static_cast<UInt32>(symbol));
throw Exception(ErrorCodes::INCORRECT_DATA, "Incorrect state of aggregate function '{}', it should contain exactly one zero byte, while it is {}",
getName(), static_cast<UInt32>(symbol));
}
void insertResultInto(AggregateDataPtr __restrict, IColumn & to, Arena *) const override

Some files were not shown because too many files have changed in this diff Show More