mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-28 10:31:57 +00:00
Merge branch 'master' into protobuf-3.17.3
This commit is contained in:
commit
534b117ee1
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -19,9 +19,9 @@ Detailed description / Documentation draft:
|
|||||||
...
|
...
|
||||||
|
|
||||||
|
|
||||||
By adding documentation, you'll allow users to try your new feature immediately, not when someone else will have time to document it later. Documentation is necessary for all features that affect user experience in any way. You can add brief documentation draft above, or add documentation right into your patch as Markdown files in [docs](https://github.com/ClickHouse/ClickHouse/tree/master/docs) folder.
|
> By adding documentation, you'll allow users to try your new feature immediately, not when someone else will have time to document it later. Documentation is necessary for all features that affect user experience in any way. You can add brief documentation draft above, or add documentation right into your patch as Markdown files in [docs](https://github.com/ClickHouse/ClickHouse/tree/master/docs) folder.
|
||||||
|
|
||||||
If you are doing this for the first time, it's recommended to read the lightweight [Contributing to ClickHouse Documentation](https://github.com/ClickHouse/ClickHouse/tree/master/docs/README.md) guide first.
|
> If you are doing this for the first time, it's recommended to read the lightweight [Contributing to ClickHouse Documentation](https://github.com/ClickHouse/ClickHouse/tree/master/docs/README.md) guide first.
|
||||||
|
|
||||||
|
|
||||||
Information about CI checks: https://clickhouse.tech/docs/en/development/continuous-integration/
|
> Information about CI checks: https://clickhouse.tech/docs/en/development/continuous-integration/
|
||||||
|
@ -259,10 +259,25 @@ private:
|
|||||||
Poco::Logger * log;
|
Poco::Logger * log;
|
||||||
BaseDaemon & daemon;
|
BaseDaemon & daemon;
|
||||||
|
|
||||||
void onTerminate(const std::string & message, UInt32 thread_num) const
|
void onTerminate(std::string_view message, UInt32 thread_num) const
|
||||||
{
|
{
|
||||||
|
size_t pos = message.find('\n');
|
||||||
|
|
||||||
LOG_FATAL(log, "(version {}{}, {}) (from thread {}) {}",
|
LOG_FATAL(log, "(version {}{}, {}) (from thread {}) {}",
|
||||||
VERSION_STRING, VERSION_OFFICIAL, daemon.build_id_info, thread_num, message);
|
VERSION_STRING, VERSION_OFFICIAL, daemon.build_id_info, thread_num, message.substr(0, pos));
|
||||||
|
|
||||||
|
/// Print trace from std::terminate exception line-by-line to make it easy for grep.
|
||||||
|
while (pos != std::string_view::npos)
|
||||||
|
{
|
||||||
|
++pos;
|
||||||
|
size_t next_pos = message.find('\n', pos);
|
||||||
|
size_t size = next_pos;
|
||||||
|
if (next_pos != std::string_view::npos)
|
||||||
|
size = next_pos - pos;
|
||||||
|
|
||||||
|
LOG_FATAL(log, "{}", message.substr(pos, size));
|
||||||
|
pos = next_pos;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void onFault(
|
void onFault(
|
||||||
|
@ -4,13 +4,24 @@ QUERIES_FILE="queries.sql"
|
|||||||
TABLE=$1
|
TABLE=$1
|
||||||
TRIES=3
|
TRIES=3
|
||||||
|
|
||||||
|
if [ -x ./clickhouse ]
|
||||||
|
then
|
||||||
|
CLICKHOUSE_CLIENT="./clickhouse client"
|
||||||
|
elif command -v clickhouse-client >/dev/null 2>&1
|
||||||
|
then
|
||||||
|
CLICKHOUSE_CLIENT="clickhouse-client"
|
||||||
|
else
|
||||||
|
echo "clickhouse-client is not found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
cat "$QUERIES_FILE" | sed "s/{table}/${TABLE}/g" | while read query; do
|
cat "$QUERIES_FILE" | sed "s/{table}/${TABLE}/g" | while read query; do
|
||||||
sync
|
sync
|
||||||
echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null
|
echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null
|
||||||
|
|
||||||
echo -n "["
|
echo -n "["
|
||||||
for i in $(seq 1 $TRIES); do
|
for i in $(seq 1 $TRIES); do
|
||||||
RES=$(clickhouse-client --time --format=Null --query="$query" 2>&1)
|
RES=$(${CLICKHOUSE_CLIENT} --time --format=Null --max_memory_usage=100G --query="$query" 2>&1)
|
||||||
[[ "$?" == "0" ]] && echo -n "${RES}" || echo -n "null"
|
[[ "$?" == "0" ]] && echo -n "${RES}" || echo -n "null"
|
||||||
[[ "$i" != $TRIES ]] && echo -n ", "
|
[[ "$i" != $TRIES ]] && echo -n ", "
|
||||||
done
|
done
|
||||||
|
@ -11,8 +11,8 @@ DATASET="${TABLE}_v1.tar.xz"
|
|||||||
QUERIES_FILE="queries.sql"
|
QUERIES_FILE="queries.sql"
|
||||||
TRIES=3
|
TRIES=3
|
||||||
|
|
||||||
AMD64_BIN_URL="https://clickhouse-builds.s3.yandex.net/0/e29c4c3cc47ab2a6c4516486c1b77d57e7d42643/clickhouse_build_check/gcc-10_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse"
|
AMD64_BIN_URL="https://builds.clickhouse.tech/master/amd64/clickhouse"
|
||||||
AARCH64_BIN_URL="https://clickhouse-builds.s3.yandex.net/0/e29c4c3cc47ab2a6c4516486c1b77d57e7d42643/clickhouse_special_build_check/clang-10-aarch64_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse"
|
AARCH64_BIN_URL="https://builds.clickhouse.tech/master/aarch64/clickhouse"
|
||||||
|
|
||||||
# Note: on older Ubuntu versions, 'axel' does not support IPv6. If you are using IPv6-only servers on very old Ubuntu, just don't install 'axel'.
|
# Note: on older Ubuntu versions, 'axel' does not support IPv6. If you are using IPv6-only servers on very old Ubuntu, just don't install 'axel'.
|
||||||
|
|
||||||
@ -89,7 +89,7 @@ cat "$QUERIES_FILE" | sed "s/{table}/${TABLE}/g" | while read query; do
|
|||||||
|
|
||||||
echo -n "["
|
echo -n "["
|
||||||
for i in $(seq 1 $TRIES); do
|
for i in $(seq 1 $TRIES); do
|
||||||
RES=$(./clickhouse client --max_memory_usage 100000000000 --time --format=Null --query="$query" 2>&1 ||:)
|
RES=$(./clickhouse client --max_memory_usage 100G --time --format=Null --query="$query" 2>&1 ||:)
|
||||||
[[ "$?" == "0" ]] && echo -n "${RES}" || echo -n "null"
|
[[ "$?" == "0" ]] && echo -n "${RES}" || echo -n "null"
|
||||||
[[ "$i" != $TRIES ]] && echo -n ", "
|
[[ "$i" != $TRIES ]] && echo -n ", "
|
||||||
done
|
done
|
||||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 976874b7aa7f422bf4ea595bb7d1166c617b1c26
|
Subproject commit 0ce9490093021c63564cca159571a8b27772ad48
|
@ -22,6 +22,7 @@ set(SRCS
|
|||||||
"${LIBRARY_DIR}/src/launcher.cxx"
|
"${LIBRARY_DIR}/src/launcher.cxx"
|
||||||
"${LIBRARY_DIR}/src/srv_config.cxx"
|
"${LIBRARY_DIR}/src/srv_config.cxx"
|
||||||
"${LIBRARY_DIR}/src/snapshot_sync_req.cxx"
|
"${LIBRARY_DIR}/src/snapshot_sync_req.cxx"
|
||||||
|
"${LIBRARY_DIR}/src/snapshot_sync_ctx.cxx"
|
||||||
"${LIBRARY_DIR}/src/handle_timeout.cxx"
|
"${LIBRARY_DIR}/src/handle_timeout.cxx"
|
||||||
"${LIBRARY_DIR}/src/handle_append_entries.cxx"
|
"${LIBRARY_DIR}/src/handle_append_entries.cxx"
|
||||||
"${LIBRARY_DIR}/src/cluster_config.cxx"
|
"${LIBRARY_DIR}/src/cluster_config.cxx"
|
||||||
|
@ -61,4 +61,7 @@ ENV TSAN_OPTIONS='halt_on_error=1 history_size=7'
|
|||||||
ENV UBSAN_OPTIONS='print_stacktrace=1'
|
ENV UBSAN_OPTIONS='print_stacktrace=1'
|
||||||
ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'
|
ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'
|
||||||
|
|
||||||
|
ENV TZ=Europe/Moscow
|
||||||
|
RUN ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone
|
||||||
|
|
||||||
CMD sleep 1
|
CMD sleep 1
|
||||||
|
@ -299,6 +299,7 @@ function run_tests
|
|||||||
01318_decrypt # Depends on OpenSSL
|
01318_decrypt # Depends on OpenSSL
|
||||||
01663_aes_msan # Depends on OpenSSL
|
01663_aes_msan # Depends on OpenSSL
|
||||||
01667_aes_args_check # Depends on OpenSSL
|
01667_aes_args_check # Depends on OpenSSL
|
||||||
|
01683_codec_encrypted # Depends on OpenSSL
|
||||||
01776_decrypt_aead_size_check # Depends on OpenSSL
|
01776_decrypt_aead_size_check # Depends on OpenSSL
|
||||||
01811_filter_by_null # Depends on OpenSSL
|
01811_filter_by_null # Depends on OpenSSL
|
||||||
01281_unsucceeded_insert_select_queries_counter
|
01281_unsucceeded_insert_select_queries_counter
|
||||||
|
@ -14,10 +14,14 @@ services:
|
|||||||
}
|
}
|
||||||
EOF
|
EOF
|
||||||
./docker-entrypoint.sh'
|
./docker-entrypoint.sh'
|
||||||
ports:
|
expose:
|
||||||
- 9020:9019
|
- 9019
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "curl", "-s", "localhost:9019/ping"]
|
test: ["CMD", "curl", "-s", "localhost:9019/ping"]
|
||||||
interval: 5s
|
interval: 5s
|
||||||
timeout: 3s
|
timeout: 3s
|
||||||
retries: 30
|
retries: 30
|
||||||
|
volumes:
|
||||||
|
- type: ${JDBC_BRIDGE_FS:-tmpfs}
|
||||||
|
source: ${JDBC_BRIDGE_LOGS:-}
|
||||||
|
target: /app/logs
|
@ -2,7 +2,7 @@ version: '2.3'
|
|||||||
|
|
||||||
services:
|
services:
|
||||||
rabbitmq1:
|
rabbitmq1:
|
||||||
image: rabbitmq:3-management-alpine
|
image: rabbitmq:3.8-management-alpine
|
||||||
hostname: rabbitmq1
|
hostname: rabbitmq1
|
||||||
expose:
|
expose:
|
||||||
- ${RABBITMQ_PORT}
|
- ${RABBITMQ_PORT}
|
||||||
|
@ -2,6 +2,11 @@
|
|||||||
|
|
||||||
set -e -x
|
set -e -x
|
||||||
|
|
||||||
|
# Choose random timezone for this test run
|
||||||
|
TZ="$(grep -v '#' /usr/share/zoneinfo/zone.tab | awk '{print $3}' | shuf | head -n1)"
|
||||||
|
echo "Choosen random timezone $TZ"
|
||||||
|
ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone
|
||||||
|
|
||||||
dpkg -i package_folder/clickhouse-common-static_*.deb;
|
dpkg -i package_folder/clickhouse-common-static_*.deb;
|
||||||
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb
|
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb
|
||||||
dpkg -i package_folder/clickhouse-server_*.deb
|
dpkg -i package_folder/clickhouse-server_*.deb
|
||||||
|
@ -3,6 +3,11 @@
|
|||||||
# fail on errors, verbose and export all env variables
|
# fail on errors, verbose and export all env variables
|
||||||
set -e -x -a
|
set -e -x -a
|
||||||
|
|
||||||
|
# Choose random timezone for this test run.
|
||||||
|
TZ="$(grep -v '#' /usr/share/zoneinfo/zone.tab | awk '{print $3}' | shuf | head -n1)"
|
||||||
|
echo "Choosen random timezone $TZ"
|
||||||
|
ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone
|
||||||
|
|
||||||
dpkg -i package_folder/clickhouse-common-static_*.deb
|
dpkg -i package_folder/clickhouse-common-static_*.deb
|
||||||
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb
|
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb
|
||||||
dpkg -i package_folder/clickhouse-server_*.deb
|
dpkg -i package_folder/clickhouse-server_*.deb
|
||||||
@ -138,15 +143,18 @@ if [[ -n "$WITH_COVERAGE" ]] && [[ "$WITH_COVERAGE" -eq 1 ]]; then
|
|||||||
fi
|
fi
|
||||||
tar -chf /test_output/text_log_dump.tar /var/lib/clickhouse/data/system/text_log ||:
|
tar -chf /test_output/text_log_dump.tar /var/lib/clickhouse/data/system/text_log ||:
|
||||||
tar -chf /test_output/query_log_dump.tar /var/lib/clickhouse/data/system/query_log ||:
|
tar -chf /test_output/query_log_dump.tar /var/lib/clickhouse/data/system/query_log ||:
|
||||||
|
tar -chf /test_output/zookeeper_log_dump.tar /var/lib/clickhouse/data/system/zookeeper_log ||:
|
||||||
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
|
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
|
||||||
|
|
||||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||||
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server1.log ||:
|
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server1.log ||:
|
||||||
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server2.log ||:
|
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server2.log ||:
|
||||||
pigz < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.gz ||:
|
pigz < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.gz ||:
|
||||||
pigz < /var/log/clickhouse-server/clickhouse-server2.log > /test_output/clickhouse-server2.log.gz ||:
|
pigz < /var/log/clickhouse-server/clickhouse-server2.log > /test_output/clickhouse-server2.log.gz ||:
|
||||||
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:
|
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:
|
||||||
mv /var/log/clickhouse-server/stderr2.log /test_output/ ||:
|
mv /var/log/clickhouse-server/stderr2.log /test_output/ ||:
|
||||||
|
tar -chf /test_output/zookeeper_log_dump1.tar /var/lib/clickhouse1/data/system/zookeeper_log ||:
|
||||||
|
tar -chf /test_output/zookeeper_log_dump2.tar /var/lib/clickhouse2/data/system/zookeeper_log ||:
|
||||||
tar -chf /test_output/coordination1.tar /var/lib/clickhouse1/coordination ||:
|
tar -chf /test_output/coordination1.tar /var/lib/clickhouse1/coordination ||:
|
||||||
tar -chf /test_output/coordination2.tar /var/lib/clickhouse2/coordination ||:
|
tar -chf /test_output/coordination2.tar /var/lib/clickhouse2/coordination ||:
|
||||||
fi
|
fi
|
||||||
|
@ -77,9 +77,6 @@ RUN mkdir -p /tmp/clickhouse-odbc-tmp \
|
|||||||
&& odbcinst -i -s -l -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbc.ini.sample \
|
&& odbcinst -i -s -l -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbc.ini.sample \
|
||||||
&& rm -rf /tmp/clickhouse-odbc-tmp
|
&& rm -rf /tmp/clickhouse-odbc-tmp
|
||||||
|
|
||||||
ENV TZ=Europe/Moscow
|
|
||||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
|
||||||
|
|
||||||
COPY run.sh /
|
COPY run.sh /
|
||||||
CMD ["/bin/bash", "/run.sh"]
|
CMD ["/bin/bash", "/run.sh"]
|
||||||
|
|
||||||
|
@ -20,6 +20,7 @@ def get_skip_list_cmd(path):
|
|||||||
|
|
||||||
def get_options(i):
|
def get_options(i):
|
||||||
options = []
|
options = []
|
||||||
|
client_options = []
|
||||||
if 0 < i:
|
if 0 < i:
|
||||||
options.append("--order=random")
|
options.append("--order=random")
|
||||||
|
|
||||||
@ -27,25 +28,29 @@ def get_options(i):
|
|||||||
options.append("--db-engine=Ordinary")
|
options.append("--db-engine=Ordinary")
|
||||||
|
|
||||||
if i % 3 == 2:
|
if i % 3 == 2:
|
||||||
options.append('''--client-option='allow_experimental_database_replicated=1' --db-engine="Replicated('/test/db/test_{}', 's1', 'r1')"'''.format(i))
|
options.append('''--db-engine="Replicated('/test/db/test_{}', 's1', 'r1')"'''.format(i))
|
||||||
|
client_options.append('allow_experimental_database_replicated=1')
|
||||||
|
|
||||||
# If database name is not specified, new database is created for each functional test.
|
# If database name is not specified, new database is created for each functional test.
|
||||||
# Run some threads with one database for all tests.
|
# Run some threads with one database for all tests.
|
||||||
if i % 2 == 1:
|
if i % 2 == 1:
|
||||||
options.append(" --database=test_{}".format(i))
|
options.append(" --database=test_{}".format(i))
|
||||||
|
|
||||||
if i % 7 == 0:
|
if i % 5 == 1:
|
||||||
options.append(" --client-option='join_use_nulls=1'")
|
client_options.append("join_use_nulls=1")
|
||||||
|
|
||||||
if i % 14 == 0:
|
if i % 15 == 6:
|
||||||
options.append(' --client-option="join_algorithm=\'partial_merge\'"')
|
client_options.append("join_algorithm='partial_merge'")
|
||||||
|
|
||||||
if i % 21 == 0:
|
if i % 15 == 11:
|
||||||
options.append(' --client-option="join_algorithm=\'auto\'"')
|
client_options.append("join_algorithm='auto'")
|
||||||
options.append(' --client-option="max_rows_in_join=1000"')
|
client_options.append('max_rows_in_join=1000')
|
||||||
|
|
||||||
if i == 13:
|
if i == 13:
|
||||||
options.append(" --client-option='memory_tracker_fault_probability=0.00001'")
|
client_options.append('memory_tracker_fault_probability=0.001')
|
||||||
|
|
||||||
|
if client_options:
|
||||||
|
options.append(" --client-option " + ' '.join(client_options))
|
||||||
|
|
||||||
return ' '.join(options)
|
return ' '.join(options)
|
||||||
|
|
||||||
|
@ -1,8 +1,6 @@
|
|||||||
# docker build -t yandex/clickhouse-unit-test .
|
# docker build -t yandex/clickhouse-unit-test .
|
||||||
FROM yandex/clickhouse-stateless-test
|
FROM yandex/clickhouse-stateless-test
|
||||||
|
|
||||||
ENV TZ=Europe/Moscow
|
|
||||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
|
||||||
RUN apt-get install gdb
|
RUN apt-get install gdb
|
||||||
|
|
||||||
COPY run.sh /
|
COPY run.sh /
|
||||||
|
@ -8,7 +8,7 @@ toc_title: Third-Party Libraries Used
|
|||||||
The list of third-party libraries can be obtained by the following query:
|
The list of third-party libraries can be obtained by the following query:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT library_name, license_type, license_path FROM system.licenses ORDER BY library_name COLLATE 'en'
|
SELECT library_name, license_type, license_path FROM system.licenses ORDER BY library_name COLLATE 'en';
|
||||||
```
|
```
|
||||||
|
|
||||||
[Example](https://gh-api.clickhouse.tech/play?user=play#U0VMRUNUIGxpYnJhcnlfbmFtZSwgbGljZW5zZV90eXBlLCBsaWNlbnNlX3BhdGggRlJPTSBzeXN0ZW0ubGljZW5zZXMgT1JERVIgQlkgbGlicmFyeV9uYW1lIENPTExBVEUgJ2VuJw==)
|
[Example](https://gh-api.clickhouse.tech/play?user=play#U0VMRUNUIGxpYnJhcnlfbmFtZSwgbGljZW5zZV90eXBlLCBsaWNlbnNlX3BhdGggRlJPTSBzeXN0ZW0ubGljZW5zZXMgT1JERVIgQlkgbGlicmFyeV9uYW1lIENPTExBVEUgJ2VuJw==)
|
||||||
|
@ -749,7 +749,7 @@ If your code in the `master` branch is not buildable yet, exclude it from the bu
|
|||||||
|
|
||||||
**1.** The C++20 standard library is used (experimental extensions are allowed), as well as `boost` and `Poco` frameworks.
|
**1.** The C++20 standard library is used (experimental extensions are allowed), as well as `boost` and `Poco` frameworks.
|
||||||
|
|
||||||
**2.** It is not allowed to use libraries from OS packages. It is also not allowed to use pre-installed libraries. All libraries should be placed in form of source code in `contrib` directory and built with ClickHouse.
|
**2.** It is not allowed to use libraries from OS packages. It is also not allowed to use pre-installed libraries. All libraries should be placed in form of source code in `contrib` directory and built with ClickHouse. See [Guidelines for adding new third-party libraries](contrib.md#adding-third-party-libraries) for details.
|
||||||
|
|
||||||
**3.** Preference is always given to libraries that are already in use.
|
**3.** Preference is always given to libraries that are already in use.
|
||||||
|
|
||||||
|
@ -70,7 +70,13 @@ Note that integration of ClickHouse with third-party drivers is not tested. Also
|
|||||||
|
|
||||||
Unit tests are useful when you want to test not the ClickHouse as a whole, but a single isolated library or class. You can enable or disable build of tests with `ENABLE_TESTS` CMake option. Unit tests (and other test programs) are located in `tests` subdirectories across the code. To run unit tests, type `ninja test`. Some tests use `gtest`, but some are just programs that return non-zero exit code on test failure.
|
Unit tests are useful when you want to test not the ClickHouse as a whole, but a single isolated library or class. You can enable or disable build of tests with `ENABLE_TESTS` CMake option. Unit tests (and other test programs) are located in `tests` subdirectories across the code. To run unit tests, type `ninja test`. Some tests use `gtest`, but some are just programs that return non-zero exit code on test failure.
|
||||||
|
|
||||||
It’s not necessarily to have unit tests if the code is already covered by functional tests (and functional tests are usually much more simple to use).
|
It’s not necessary to have unit tests if the code is already covered by functional tests (and functional tests are usually much more simple to use).
|
||||||
|
|
||||||
|
You can run individual gtest checks by calling the executable directly, for example:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ ./src/unit_tests_dbms --gtest_filter=LocalAddress*
|
||||||
|
```
|
||||||
|
|
||||||
## Performance Tests {#performance-tests}
|
## Performance Tests {#performance-tests}
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ You can also use the following database engines:
|
|||||||
|
|
||||||
- [MySQL](../../engines/database-engines/mysql.md)
|
- [MySQL](../../engines/database-engines/mysql.md)
|
||||||
|
|
||||||
- [MaterializeMySQL](../../engines/database-engines/materialize-mysql.md)
|
- [MaterializedMySQL](../../engines/database-engines/materialized-mysql.md)
|
||||||
|
|
||||||
- [Lazy](../../engines/database-engines/lazy.md)
|
- [Lazy](../../engines/database-engines/lazy.md)
|
||||||
|
|
||||||
|
@ -1,9 +1,9 @@
|
|||||||
---
|
---
|
||||||
toc_priority: 29
|
toc_priority: 29
|
||||||
toc_title: MaterializeMySQL
|
toc_title: MaterializedMySQL
|
||||||
---
|
---
|
||||||
|
|
||||||
# MaterializeMySQL {#materialize-mysql}
|
# MaterializedMySQL {#materialized-mysql}
|
||||||
|
|
||||||
**This is experimental feature that should not be used in production.**
|
**This is experimental feature that should not be used in production.**
|
||||||
|
|
||||||
@ -17,7 +17,7 @@ This feature is experimental.
|
|||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster]
|
CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster]
|
||||||
ENGINE = MaterializeMySQL('host:port', ['database' | database], 'user', 'password') [SETTINGS ...]
|
ENGINE = MaterializedMySQL('host:port', ['database' | database], 'user', 'password') [SETTINGS ...]
|
||||||
```
|
```
|
||||||
|
|
||||||
**Engine Parameters**
|
**Engine Parameters**
|
||||||
@ -36,15 +36,22 @@ ENGINE = MaterializeMySQL('host:port', ['database' | database], 'user', 'passwor
|
|||||||
- `max_wait_time_when_mysql_unavailable` — Retry interval when MySQL is not available (milliseconds). Negative value disable retry. Default: `1000`.
|
- `max_wait_time_when_mysql_unavailable` — Retry interval when MySQL is not available (milliseconds). Negative value disable retry. Default: `1000`.
|
||||||
- `allows_query_when_mysql_lost` — Allow query materialized table when mysql is lost. Default: `0` (`false`).
|
- `allows_query_when_mysql_lost` — Allow query materialized table when mysql is lost. Default: `0` (`false`).
|
||||||
```
|
```
|
||||||
CREATE DATABASE mysql ENGINE = MaterializeMySQL('localhost:3306', 'db', 'user', '***')
|
CREATE DATABASE mysql ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***')
|
||||||
SETTINGS
|
SETTINGS
|
||||||
allows_query_when_mysql_lost=true,
|
allows_query_when_mysql_lost=true,
|
||||||
max_wait_time_when_mysql_unavailable=10000;
|
max_wait_time_when_mysql_unavailable=10000;
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Settings on MySQL-server side**
|
||||||
|
|
||||||
|
For the correct work of `MaterializeMySQL`, there are few mandatory `MySQL`-side configuration settings that should be set:
|
||||||
|
|
||||||
|
- `default_authentication_plugin = mysql_native_password` since `MaterializeMySQL` can only authorize with this method.
|
||||||
|
- `gtid_mode = on` since GTID based logging is a mandatory for providing correct `MaterializeMySQL` replication. Pay attention that while turning this mode `On` you should also specify `enforce_gtid_consistency = on`.
|
||||||
|
|
||||||
## Virtual columns {#virtual-columns}
|
## Virtual columns {#virtual-columns}
|
||||||
|
|
||||||
When working with the `MaterializeMySQL` database engine, [ReplacingMergeTree](../../engines/table-engines/mergetree-family/replacingmergetree.md) tables are used with virtual `_sign` and `_version` columns.
|
When working with the `MaterializedMySQL` database engine, [ReplacingMergeTree](../../engines/table-engines/mergetree-family/replacingmergetree.md) tables are used with virtual `_sign` and `_version` columns.
|
||||||
|
|
||||||
- `_version` — Transaction counter. Type [UInt64](../../sql-reference/data-types/int-uint.md).
|
- `_version` — Transaction counter. Type [UInt64](../../sql-reference/data-types/int-uint.md).
|
||||||
- `_sign` — Deletion mark. Type [Int8](../../sql-reference/data-types/int-uint.md). Possible values:
|
- `_sign` — Deletion mark. Type [Int8](../../sql-reference/data-types/int-uint.md). Possible values:
|
||||||
@ -70,6 +77,7 @@ When working with the `MaterializeMySQL` database engine, [ReplacingMergeTree](.
|
|||||||
| STRING | [String](../../sql-reference/data-types/string.md) |
|
| STRING | [String](../../sql-reference/data-types/string.md) |
|
||||||
| VARCHAR, VAR_STRING | [String](../../sql-reference/data-types/string.md) |
|
| VARCHAR, VAR_STRING | [String](../../sql-reference/data-types/string.md) |
|
||||||
| BLOB | [String](../../sql-reference/data-types/string.md) |
|
| BLOB | [String](../../sql-reference/data-types/string.md) |
|
||||||
|
| BINARY | [FixedString](../../sql-reference/data-types/fixedstring.md) |
|
||||||
|
|
||||||
Other types are not supported. If MySQL table contains a column of such type, ClickHouse throws exception "Unhandled data type" and stops replication.
|
Other types are not supported. If MySQL table contains a column of such type, ClickHouse throws exception "Unhandled data type" and stops replication.
|
||||||
|
|
||||||
@ -77,13 +85,21 @@ Other types are not supported. If MySQL table contains a column of such type, Cl
|
|||||||
|
|
||||||
## Specifics and Recommendations {#specifics-and-recommendations}
|
## Specifics and Recommendations {#specifics-and-recommendations}
|
||||||
|
|
||||||
|
### Compatibility restrictions
|
||||||
|
|
||||||
|
Apart of the data types limitations there are few restrictions comparing to `MySQL` databases, that should be resolved before replication will be possible:
|
||||||
|
|
||||||
|
- Each table in `MySQL` should contain `PRIMARY KEY`.
|
||||||
|
|
||||||
|
- Replication for tables, those are containing rows with `ENUM` field values out of range (specified in `ENUM` signature) will not work.
|
||||||
|
|
||||||
### DDL Queries {#ddl-queries}
|
### DDL Queries {#ddl-queries}
|
||||||
|
|
||||||
MySQL DDL queries are converted into the corresponding ClickHouse DDL queries ([ALTER](../../sql-reference/statements/alter/index.md), [CREATE](../../sql-reference/statements/create/index.md), [DROP](../../sql-reference/statements/drop.md), [RENAME](../../sql-reference/statements/rename.md)). If ClickHouse cannot parse some DDL query, the query is ignored.
|
MySQL DDL queries are converted into the corresponding ClickHouse DDL queries ([ALTER](../../sql-reference/statements/alter/index.md), [CREATE](../../sql-reference/statements/create/index.md), [DROP](../../sql-reference/statements/drop.md), [RENAME](../../sql-reference/statements/rename.md)). If ClickHouse cannot parse some DDL query, the query is ignored.
|
||||||
|
|
||||||
### Data Replication {#data-replication}
|
### Data Replication {#data-replication}
|
||||||
|
|
||||||
`MaterializeMySQL` does not support direct `INSERT`, `DELETE` and `UPDATE` queries. However, they are supported in terms of data replication:
|
`MaterializedMySQL` does not support direct `INSERT`, `DELETE` and `UPDATE` queries. However, they are supported in terms of data replication:
|
||||||
|
|
||||||
- MySQL `INSERT` query is converted into `INSERT` with `_sign=1`.
|
- MySQL `INSERT` query is converted into `INSERT` with `_sign=1`.
|
||||||
|
|
||||||
@ -91,9 +107,9 @@ MySQL DDL queries are converted into the corresponding ClickHouse DDL queries ([
|
|||||||
|
|
||||||
- MySQL `UPDATE` query is converted into `INSERT` with `_sign=-1` and `INSERT` with `_sign=1`.
|
- MySQL `UPDATE` query is converted into `INSERT` with `_sign=-1` and `INSERT` with `_sign=1`.
|
||||||
|
|
||||||
### Selecting from MaterializeMySQL Tables {#select}
|
### Selecting from MaterializedMySQL Tables {#select}
|
||||||
|
|
||||||
`SELECT` query from `MaterializeMySQL` tables has some specifics:
|
`SELECT` query from `MaterializedMySQL` tables has some specifics:
|
||||||
|
|
||||||
- If `_version` is not specified in the `SELECT` query, [FINAL](../../sql-reference/statements/select/from.md#select-from-final) modifier is used. So only rows with `MAX(_version)` are selected.
|
- If `_version` is not specified in the `SELECT` query, [FINAL](../../sql-reference/statements/select/from.md#select-from-final) modifier is used. So only rows with `MAX(_version)` are selected.
|
||||||
|
|
||||||
@ -110,10 +126,10 @@ ClickHouse has only one physical order, which is determined by `ORDER BY` clause
|
|||||||
**Notes**
|
**Notes**
|
||||||
|
|
||||||
- Rows with `_sign=-1` are not deleted physically from the tables.
|
- Rows with `_sign=-1` are not deleted physically from the tables.
|
||||||
- Cascade `UPDATE/DELETE` queries are not supported by the `MaterializeMySQL` engine.
|
- Cascade `UPDATE/DELETE` queries are not supported by the `MaterializedMySQL` engine.
|
||||||
- Replication can be easily broken.
|
- Replication can be easily broken.
|
||||||
- Manual operations on database and tables are forbidden.
|
- Manual operations on database and tables are forbidden.
|
||||||
- `MaterializeMySQL` is influenced by [optimize_on_insert](../../operations/settings/settings.md#optimize-on-insert) setting. The data is merged in the corresponding table in the `MaterializeMySQL` database when a table in the MySQL server changes.
|
- `MaterializedMySQL` is influenced by [optimize_on_insert](../../operations/settings/settings.md#optimize-on-insert) setting. The data is merged in the corresponding table in the `MaterializedMySQL` database when a table in the MySQL server changes.
|
||||||
|
|
||||||
## Examples of Use {#examples-of-use}
|
## Examples of Use {#examples-of-use}
|
||||||
|
|
||||||
@ -142,7 +158,7 @@ Database in ClickHouse, exchanging data with the MySQL server:
|
|||||||
The database and the table created:
|
The database and the table created:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE DATABASE mysql ENGINE = MaterializeMySQL('localhost:3306', 'db', 'user', '***');
|
CREATE DATABASE mysql ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***');
|
||||||
SHOW TABLES FROM mysql;
|
SHOW TABLES FROM mysql;
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -177,4 +193,4 @@ SELECT * FROM mysql.test;
|
|||||||
└───┴─────┴──────┘
|
└───┴─────┴──────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
[Original article](https://clickhouse.tech/docs/en/engines/database-engines/materialize-mysql/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/engines/database-engines/materialized-mysql/) <!--hide-->
|
@ -20,7 +20,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
|||||||
|
|
||||||
Required parameters:
|
Required parameters:
|
||||||
|
|
||||||
- `primary_key_name` – any column name in the column list.
|
- `primary_key_name` – any column name in the column list.
|
||||||
- `primary key` must be specified, it supports only one column in the primary key. The primary key will be serialized in binary as a `rocksdb key`.
|
- `primary key` must be specified, it supports only one column in the primary key. The primary key will be serialized in binary as a `rocksdb key`.
|
||||||
- columns other than the primary key will be serialized in binary as `rocksdb` value in corresponding order.
|
- columns other than the primary key will be serialized in binary as `rocksdb` value in corresponding order.
|
||||||
- queries with key `equals` or `in` filtering will be optimized to multi keys lookup from `rocksdb`.
|
- queries with key `equals` or `in` filtering will be optimized to multi keys lookup from `rocksdb`.
|
||||||
@ -39,4 +39,46 @@ ENGINE = EmbeddedRocksDB
|
|||||||
PRIMARY KEY key
|
PRIMARY KEY key
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Metrics
|
||||||
|
|
||||||
|
There is also `system.rocksdb` table, that expose rocksdb statistics:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
name,
|
||||||
|
value
|
||||||
|
FROM system.rocksdb
|
||||||
|
|
||||||
|
┌─name──────────────────────┬─value─┐
|
||||||
|
│ no.file.opens │ 1 │
|
||||||
|
│ number.block.decompressed │ 1 │
|
||||||
|
└───────────────────────────┴───────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
You can also change any [rocksdb options](https://github.com/facebook/rocksdb/wiki/Option-String-and-Option-Map) using config:
|
||||||
|
|
||||||
|
```xml
|
||||||
|
<rocksdb>
|
||||||
|
<options>
|
||||||
|
<max_background_jobs>8</max_background_jobs>
|
||||||
|
</options>
|
||||||
|
<column_family_options>
|
||||||
|
<num_levels>2</num_levels>
|
||||||
|
</column_family_options>
|
||||||
|
<tables>
|
||||||
|
<table>
|
||||||
|
<name>TABLE</name>
|
||||||
|
<options>
|
||||||
|
<max_background_jobs>8</max_background_jobs>
|
||||||
|
</options>
|
||||||
|
<column_family_options>
|
||||||
|
<num_levels>2</num_levels>
|
||||||
|
</column_family_options>
|
||||||
|
</table>
|
||||||
|
</tables>
|
||||||
|
</rocksdb>
|
||||||
|
```
|
||||||
|
|
||||||
[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/embedded-rocksdb/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/embedded-rocksdb/) <!--hide-->
|
||||||
|
@ -14,6 +14,8 @@ Engines of the family:
|
|||||||
- [Log](../../../engines/table-engines/log-family/log.md)
|
- [Log](../../../engines/table-engines/log-family/log.md)
|
||||||
- [TinyLog](../../../engines/table-engines/log-family/tinylog.md)
|
- [TinyLog](../../../engines/table-engines/log-family/tinylog.md)
|
||||||
|
|
||||||
|
`Log` family table engines can store data to [HDFS](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-hdfs) or [S3](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-s3) distributed file systems.
|
||||||
|
|
||||||
## Common Properties {#common-properties}
|
## Common Properties {#common-properties}
|
||||||
|
|
||||||
Engines:
|
Engines:
|
||||||
|
@ -5,10 +5,8 @@ toc_title: Log
|
|||||||
|
|
||||||
# Log {#log}
|
# Log {#log}
|
||||||
|
|
||||||
Engine belongs to the family of log engines. See the common properties of log engines and their differences in the [Log Engine Family](../../../engines/table-engines/log-family/index.md) article.
|
The engine belongs to the family of `Log` engines. See the common properties of `Log` engines and their differences in the [Log Engine Family](../../../engines/table-engines/log-family/index.md) article.
|
||||||
|
|
||||||
Log differs from [TinyLog](../../../engines/table-engines/log-family/tinylog.md) in that a small file of “marks” resides with the column files. These marks are written on every data block and contain offsets that indicate where to start reading the file in order to skip the specified number of rows. This makes it possible to read table data in multiple threads.
|
`Log` differs from [TinyLog](../../../engines/table-engines/log-family/tinylog.md) in that a small file of "marks" resides with the column files. These marks are written on every data block and contain offsets that indicate where to start reading the file in order to skip the specified number of rows. This makes it possible to read table data in multiple threads.
|
||||||
For concurrent data access, the read operations can be performed simultaneously, while write operations block reads and each other.
|
For concurrent data access, the read operations can be performed simultaneously, while write operations block reads and each other.
|
||||||
The Log engine does not support indexes. Similarly, if writing to a table failed, the table is broken, and reading from it returns an error. The Log engine is appropriate for temporary data, write-once tables, and for testing or demonstration purposes.
|
The `Log` engine does not support indexes. Similarly, if writing to a table failed, the table is broken, and reading from it returns an error. The `Log` engine is appropriate for temporary data, write-once tables, and for testing or demonstration purposes.
|
||||||
|
|
||||||
[Original article](https://clickhouse.tech/docs/en/operations/table_engines/log/) <!--hide-->
|
|
||||||
|
@ -728,7 +728,7 @@ During this time, they are not moved to other volumes or disks. Therefore, until
|
|||||||
|
|
||||||
## Using S3 for Data Storage {#table_engine-mergetree-s3}
|
## Using S3 for Data Storage {#table_engine-mergetree-s3}
|
||||||
|
|
||||||
`MergeTree` family table engines is able to store data to [S3](https://aws.amazon.com/s3/) using a disk with type `s3`.
|
`MergeTree` family table engines can store data to [S3](https://aws.amazon.com/s3/) using a disk with type `s3`.
|
||||||
|
|
||||||
This feature is under development and not ready for production. There are known drawbacks such as very low performance.
|
This feature is under development and not ready for production. There are known drawbacks such as very low performance.
|
||||||
|
|
||||||
@ -764,11 +764,13 @@ Configuration markup:
|
|||||||
```
|
```
|
||||||
|
|
||||||
Required parameters:
|
Required parameters:
|
||||||
- `endpoint` — S3 endpoint url in `path` or `virtual hosted` [styles](https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). Endpoint url should contain bucket and root path to store data.
|
|
||||||
|
- `endpoint` — S3 endpoint URL in `path` or `virtual hosted` [styles](https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). Endpoint URL should contain a bucket and root path to store data.
|
||||||
- `access_key_id` — S3 access key id.
|
- `access_key_id` — S3 access key id.
|
||||||
- `secret_access_key` — S3 secret access key.
|
- `secret_access_key` — S3 secret access key.
|
||||||
|
|
||||||
Optional parameters:
|
Optional parameters:
|
||||||
|
|
||||||
- `region` — S3 region name.
|
- `region` — S3 region name.
|
||||||
- `use_environment_credentials` — Reads AWS credentials from the Environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN if they exist. Default value is `false`.
|
- `use_environment_credentials` — Reads AWS credentials from the Environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN if they exist. Default value is `false`.
|
||||||
- `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Default value is `false`.
|
- `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Default value is `false`.
|
||||||
@ -784,7 +786,6 @@ Optional parameters:
|
|||||||
- `skip_access_check` — If true, disk access checks will not be performed on disk start-up. Default value is `false`.
|
- `skip_access_check` — If true, disk access checks will not be performed on disk start-up. Default value is `false`.
|
||||||
- `server_side_encryption_customer_key_base64` — If specified, required headers for accessing S3 objects with SSE-C encryption will be set.
|
- `server_side_encryption_customer_key_base64` — If specified, required headers for accessing S3 objects with SSE-C encryption will be set.
|
||||||
|
|
||||||
|
|
||||||
S3 disk can be configured as `main` or `cold` storage:
|
S3 disk can be configured as `main` or `cold` storage:
|
||||||
``` xml
|
``` xml
|
||||||
<storage_configuration>
|
<storage_configuration>
|
||||||
@ -823,4 +824,43 @@ S3 disk can be configured as `main` or `cold` storage:
|
|||||||
|
|
||||||
In case of `cold` option a data can be moved to S3 if local disk free size will be smaller than `move_factor * disk_size` or by TTL move rule.
|
In case of `cold` option a data can be moved to S3 if local disk free size will be smaller than `move_factor * disk_size` or by TTL move rule.
|
||||||
|
|
||||||
[Original article](https://clickhouse.tech/docs/ru/operations/table_engines/mergetree/) <!--hide-->
|
## Using HDFS for Data Storage {#table_engine-mergetree-hdfs}
|
||||||
|
|
||||||
|
[HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_design.html) is a distributed file system for remote data storage.
|
||||||
|
|
||||||
|
`MergeTree` family table engines can store data to HDFS using a disk with type `HDFS`.
|
||||||
|
|
||||||
|
Configuration markup:
|
||||||
|
``` xml
|
||||||
|
<yandex>
|
||||||
|
<storage_configuration>
|
||||||
|
<disks>
|
||||||
|
<hdfs>
|
||||||
|
<type>hdfs</type>
|
||||||
|
<endpoint>hdfs://hdfs1:9000/clickhouse/</endpoint>
|
||||||
|
</hdfs>
|
||||||
|
</disks>
|
||||||
|
<policies>
|
||||||
|
<hdfs>
|
||||||
|
<volumes>
|
||||||
|
<main>
|
||||||
|
<disk>hdfs</disk>
|
||||||
|
</main>
|
||||||
|
</volumes>
|
||||||
|
</hdfs>
|
||||||
|
</policies>
|
||||||
|
</storage_configuration>
|
||||||
|
|
||||||
|
<merge_tree>
|
||||||
|
<min_bytes_for_wide_part>0</min_bytes_for_wide_part>
|
||||||
|
</merge_tree>
|
||||||
|
</yandex>
|
||||||
|
```
|
||||||
|
|
||||||
|
Required parameters:
|
||||||
|
|
||||||
|
- `endpoint` — HDFS endpoint URL in `path` format. Endpoint URL should contain a root path to store data.
|
||||||
|
|
||||||
|
Optional parameters:
|
||||||
|
|
||||||
|
- `min_bytes_for_seek` — The minimal number of bytes to use seek operation instead of sequential read. Default value: `1 Mb`.
|
||||||
|
@ -43,7 +43,7 @@ toc_title: Integrations
|
|||||||
- Monitoring
|
- Monitoring
|
||||||
- [Graphite](https://graphiteapp.org)
|
- [Graphite](https://graphiteapp.org)
|
||||||
- [graphouse](https://github.com/yandex/graphouse)
|
- [graphouse](https://github.com/yandex/graphouse)
|
||||||
- [carbon-clickhouse](https://github.com/lomik/carbon-clickhouse) +
|
- [carbon-clickhouse](https://github.com/lomik/carbon-clickhouse)
|
||||||
- [graphite-clickhouse](https://github.com/lomik/graphite-clickhouse)
|
- [graphite-clickhouse](https://github.com/lomik/graphite-clickhouse)
|
||||||
- [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer) - optimizes staled partitions in [\*GraphiteMergeTree](../../engines/table-engines/mergetree-family/graphitemergetree.md#graphitemergetree) if rules from [rollup configuration](../../engines/table-engines/mergetree-family/graphitemergetree.md#rollup-configuration) could be applied
|
- [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer) - optimizes staled partitions in [\*GraphiteMergeTree](../../engines/table-engines/mergetree-family/graphitemergetree.md#graphitemergetree) if rules from [rollup configuration](../../engines/table-engines/mergetree-family/graphitemergetree.md#rollup-configuration) could be applied
|
||||||
- [Grafana](https://grafana.com/)
|
- [Grafana](https://grafana.com/)
|
||||||
|
@ -5,50 +5,67 @@ toc_title: Testing Hardware
|
|||||||
|
|
||||||
# How to Test Your Hardware with ClickHouse {#how-to-test-your-hardware-with-clickhouse}
|
# How to Test Your Hardware with ClickHouse {#how-to-test-your-hardware-with-clickhouse}
|
||||||
|
|
||||||
With this instruction you can run basic ClickHouse performance test on any server without installation of ClickHouse packages.
|
You can run basic ClickHouse performance test on any server without installation of ClickHouse packages.
|
||||||
|
|
||||||
1. Go to “commits” page: https://github.com/ClickHouse/ClickHouse/commits/master
|
|
||||||
2. Click on the first green check mark or red cross with green “ClickHouse Build Check” and click on the “Details” link near “ClickHouse Build Check”. There is no such link in some commits, for example commits with documentation. In this case, choose the nearest commit having this link.
|
## Automated Run
|
||||||
3. Copy the link to `clickhouse` binary for amd64 or aarch64.
|
|
||||||
4. ssh to the server and download it with wget:
|
You can run benchmark with a single script.
|
||||||
|
|
||||||
|
1. Download the script.
|
||||||
|
```
|
||||||
|
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/hardware.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Run the script.
|
||||||
|
```
|
||||||
|
chmod a+x ./hardware.sh
|
||||||
|
./hardware.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Copy the output and send it to clickhouse-feedback@yandex-team.com
|
||||||
|
|
||||||
|
All the results are published here: https://clickhouse.tech/benchmark/hardware/
|
||||||
|
|
||||||
|
|
||||||
|
## Manual Run
|
||||||
|
|
||||||
|
Alternatively you can perform benchmark in the following steps.
|
||||||
|
|
||||||
|
1. ssh to the server and download the binary with wget:
|
||||||
```bash
|
```bash
|
||||||
# These links are outdated, please obtain the fresh link from the "commits" page.
|
|
||||||
# For amd64:
|
# For amd64:
|
||||||
wget https://clickhouse-builds.s3.yandex.net/0/e29c4c3cc47ab2a6c4516486c1b77d57e7d42643/clickhouse_build_check/gcc-10_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse
|
wget https://builds.clickhouse.tech/master/amd64/clickhouse
|
||||||
# For aarch64:
|
# For aarch64:
|
||||||
wget https://clickhouse-builds.s3.yandex.net/0/e29c4c3cc47ab2a6c4516486c1b77d57e7d42643/clickhouse_special_build_check/clang-10-aarch64_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse
|
wget https://builds.clickhouse.tech/master/aarch64/clickhouse
|
||||||
# Then do:
|
# Then do:
|
||||||
chmod a+x clickhouse
|
chmod a+x clickhouse
|
||||||
```
|
```
|
||||||
5. Download benchmark files:
|
2. Download benchmark files:
|
||||||
```bash
|
```bash
|
||||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/benchmark-new.sh
|
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/benchmark-new.sh
|
||||||
chmod a+x benchmark-new.sh
|
chmod a+x benchmark-new.sh
|
||||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql
|
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql
|
||||||
```
|
```
|
||||||
6. Download test data according to the [Yandex.Metrica dataset](../getting-started/example-datasets/metrica.md) instruction (“hits” table containing 100 million rows).
|
3. Download test data according to the [Yandex.Metrica dataset](../getting-started/example-datasets/metrica.md) instruction (“hits” table containing 100 million rows).
|
||||||
```bash
|
```bash
|
||||||
wget https://datasets.clickhouse.tech/hits/partitions/hits_100m_obfuscated_v1.tar.xz
|
wget https://datasets.clickhouse.tech/hits/partitions/hits_100m_obfuscated_v1.tar.xz
|
||||||
tar xvf hits_100m_obfuscated_v1.tar.xz -C .
|
tar xvf hits_100m_obfuscated_v1.tar.xz -C .
|
||||||
mv hits_100m_obfuscated_v1/* .
|
mv hits_100m_obfuscated_v1/* .
|
||||||
```
|
```
|
||||||
7. Run the server:
|
4. Run the server:
|
||||||
```bash
|
```bash
|
||||||
./clickhouse server
|
./clickhouse server
|
||||||
```
|
```
|
||||||
8. Check the data: ssh to the server in another terminal
|
5. Check the data: ssh to the server in another terminal
|
||||||
```bash
|
```bash
|
||||||
./clickhouse client --query "SELECT count() FROM hits_100m_obfuscated"
|
./clickhouse client --query "SELECT count() FROM hits_100m_obfuscated"
|
||||||
100000000
|
100000000
|
||||||
```
|
```
|
||||||
9. Edit the benchmark-new.sh, change `clickhouse-client` to `./clickhouse client` and add `--max_memory_usage 100000000000` parameter.
|
6. Run the benchmark:
|
||||||
```bash
|
|
||||||
mcedit benchmark-new.sh
|
|
||||||
```
|
|
||||||
10. Run the benchmark:
|
|
||||||
```bash
|
```bash
|
||||||
./benchmark-new.sh hits_100m_obfuscated
|
./benchmark-new.sh hits_100m_obfuscated
|
||||||
```
|
```
|
||||||
11. Send the numbers and the info about your hardware configuration to clickhouse-feedback@yandex-team.com
|
7. Send the numbers and the info about your hardware configuration to clickhouse-feedback@yandex-team.com
|
||||||
|
|
||||||
All the results are published here: https://clickhouse.tech/benchmark/hardware/
|
All the results are published here: https://clickhouse.tech/benchmark/hardware/
|
||||||
|
@ -69,6 +69,28 @@ If no conditions met for a data part, ClickHouse uses the `lz4` compression.
|
|||||||
</compression>
|
</compression>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## encryption {#server-settings-encryption}
|
||||||
|
|
||||||
|
Configures a command to obtain a key to be used by [encryption codecs](../../sql-reference/statements/create/table.md#create-query-encryption-codecs). The command, or a shell script, is expected to write a Base64-encoded key of any length to the stdout.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
For Linux with systemd:
|
||||||
|
|
||||||
|
```xml
|
||||||
|
<encryption>
|
||||||
|
<key_command>/usr/bin/systemd-ask-password --id="clickhouse-server" --timeout=0 "Enter the ClickHouse encryption passphrase:" | base64</key_command>
|
||||||
|
</encryption>
|
||||||
|
```
|
||||||
|
|
||||||
|
For other systems:
|
||||||
|
|
||||||
|
```xml
|
||||||
|
<encryption>
|
||||||
|
<key_command><![CDATA[IFS=; echo -n >/dev/tty "Enter the ClickHouse encryption passphrase: "; stty=`stty -F /dev/tty -g`; stty -F /dev/tty -echo; read k </dev/tty; stty -F /dev/tty "$stty"; echo -n $k | base64]]></key_command>
|
||||||
|
</encryption>
|
||||||
|
```
|
||||||
|
|
||||||
## custom_settings_prefixes {#custom_settings_prefixes}
|
## custom_settings_prefixes {#custom_settings_prefixes}
|
||||||
|
|
||||||
List of prefixes for [custom settings](../../operations/settings/index.md#custom_settings). The prefixes must be separated with commas.
|
List of prefixes for [custom settings](../../operations/settings/index.md#custom_settings). The prefixes must be separated with commas.
|
||||||
|
@ -280,14 +280,13 @@ Default value: `0`.
|
|||||||
|
|
||||||
## check_sample_column_is_correct {#check_sample_column_is_correct}
|
## check_sample_column_is_correct {#check_sample_column_is_correct}
|
||||||
|
|
||||||
Enables to check column for sampling or sampling expression is correct at table creation.
|
Enables the check at table creation, that the data type of a column for sampling or sampling expression is correct. The data type must be one of unsigned [integer types](../../sql-reference/data-types/int-uint.md): `UInt8`, `UInt16`, `UInt32`, `UInt64`.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
- true — Check column or sampling expression is correct at table creation.
|
- true — The check is enabled.
|
||||||
- false — Do not check column or sampling expression is correct at table creation.
|
- false — The check is disabled at table creation.
|
||||||
|
|
||||||
Default value: `true`.
|
Default value: `true`.
|
||||||
|
|
||||||
By default, the ClickHouse server check column for sampling or sampling expression at table creation. If you already had tables with incorrect sampling expression, set value `false` to make ClickHouse server do not raise exception when ClickHouse server is starting.
|
By default, the ClickHouse server checks at table creation the data type of a column for sampling or sampling expression. If you already have tables with incorrect sampling expression and do not want the server to raise an exception during startup, set `check_sample_column_is_correct` to `false`.
|
||||||
[Original article](https://clickhouse.tech/docs/en/operations/settings/merge_tree_settings/) <!--hide-->
|
|
||||||
|
@ -2927,7 +2927,7 @@ Result:
|
|||||||
└─────────────┘
|
└─────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that this setting influences [Materialized view](../../sql-reference/statements/create/view.md#materialized) and [MaterializeMySQL](../../engines/database-engines/materialize-mysql.md) behaviour.
|
Note that this setting influences [Materialized view](../../sql-reference/statements/create/view.md#materialized) and [MaterializedMySQL](../../engines/database-engines/materialized-mysql.md) behaviour.
|
||||||
|
|
||||||
## engine_file_empty_if_not_exists {#engine-file-empty_if-not-exists}
|
## engine_file_empty_if_not_exists {#engine-file-empty_if-not-exists}
|
||||||
|
|
||||||
|
@ -62,4 +62,3 @@ exception_code: ZOK
|
|||||||
```
|
```
|
||||||
|
|
||||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/distributed_ddl_queuedistributed_ddl_queue.md) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/distributed_ddl_queuedistributed_ddl_queue.md) <!--hide-->
|
||||||
|
|
@ -51,6 +51,7 @@ Columns:
|
|||||||
- `databases` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — Names of the databases present in the query.
|
- `databases` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — Names of the databases present in the query.
|
||||||
- `tables` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — Names of the tables present in the query.
|
- `tables` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — Names of the tables present in the query.
|
||||||
- `columns` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — Names of the columns present in the query.
|
- `columns` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — Names of the columns present in the query.
|
||||||
|
- `projections` ([String](../../sql-reference/data-types/string.md)) — Names of the projections used during the query execution.
|
||||||
- `exception_code` ([Int32](../../sql-reference/data-types/int-uint.md)) — Code of an exception.
|
- `exception_code` ([Int32](../../sql-reference/data-types/int-uint.md)) — Code of an exception.
|
||||||
- `exception` ([String](../../sql-reference/data-types/string.md)) — Exception message.
|
- `exception` ([String](../../sql-reference/data-types/string.md)) — Exception message.
|
||||||
- `stack_trace` ([String](../../sql-reference/data-types/string.md)) — [Stack trace](https://en.wikipedia.org/wiki/Stack_trace). An empty string, if the query was completed successfully.
|
- `stack_trace` ([String](../../sql-reference/data-types/string.md)) — [Stack trace](https://en.wikipedia.org/wiki/Stack_trace). An empty string, if the query was completed successfully.
|
||||||
@ -65,6 +66,8 @@ Columns:
|
|||||||
- `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — ID of the initial query (for distributed query execution).
|
- `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — ID of the initial query (for distributed query execution).
|
||||||
- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address that the parent query was launched from.
|
- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address that the parent query was launched from.
|
||||||
- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The client port that was used to make the parent query.
|
- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The client port that was used to make the parent query.
|
||||||
|
- `initial_query_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Initial query starting time (for distributed query execution).
|
||||||
|
- `initial_query_start_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — Initial query starting time with microseconds precision (for distributed query execution).
|
||||||
- `interface` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Interface that the query was initiated from. Possible values:
|
- `interface` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Interface that the query was initiated from. Possible values:
|
||||||
- 1 — TCP.
|
- 1 — TCP.
|
||||||
- 2 — HTTP.
|
- 2 — HTTP.
|
||||||
@ -101,55 +104,77 @@ Columns:
|
|||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT * FROM system.query_log WHERE type = 'QueryFinish' AND (query LIKE '%toDate(\'2000-12-05\')%') ORDER BY query_start_time DESC LIMIT 1 FORMAT Vertical;
|
SELECT * FROM system.query_log WHERE type = 'QueryFinish' ORDER BY query_start_time DESC LIMIT 1 FORMAT Vertical;
|
||||||
```
|
```
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
Row 1:
|
Row 1:
|
||||||
──────
|
──────
|
||||||
type: QueryStart
|
type: QueryFinish
|
||||||
event_date: 2020-09-11
|
event_date: 2021-07-28
|
||||||
event_time: 2020-09-11 10:08:17
|
event_time: 2021-07-28 13:46:56
|
||||||
event_time_microseconds: 2020-09-11 10:08:17.063321
|
event_time_microseconds: 2021-07-28 13:46:56.719791
|
||||||
query_start_time: 2020-09-11 10:08:17
|
query_start_time: 2021-07-28 13:46:56
|
||||||
query_start_time_microseconds: 2020-09-11 10:08:17.063321
|
query_start_time_microseconds: 2021-07-28 13:46:56.704542
|
||||||
query_duration_ms: 0
|
query_duration_ms: 14
|
||||||
read_rows: 0
|
read_rows: 8393
|
||||||
read_bytes: 0
|
read_bytes: 374325
|
||||||
written_rows: 0
|
written_rows: 0
|
||||||
written_bytes: 0
|
written_bytes: 0
|
||||||
result_rows: 0
|
result_rows: 4201
|
||||||
result_bytes: 0
|
result_bytes: 153024
|
||||||
memory_usage: 0
|
memory_usage: 4714038
|
||||||
current_database: default
|
current_database: default
|
||||||
query: INSERT INTO test1 VALUES
|
query: SELECT DISTINCT arrayJoin(extractAll(name, '[\\w_]{2,}')) AS res FROM (SELECT name FROM system.functions UNION ALL SELECT name FROM system.table_engines UNION ALL SELECT name FROM system.formats UNION ALL SELECT name FROM system.table_functions UNION ALL SELECT name FROM system.data_type_families UNION ALL SELECT name FROM system.merge_tree_settings UNION ALL SELECT name FROM system.settings UNION ALL SELECT cluster FROM system.clusters UNION ALL SELECT macro FROM system.macros UNION ALL SELECT policy_name FROM system.storage_policies UNION ALL SELECT concat(func.name, comb.name) FROM system.functions AS func CROSS JOIN system.aggregate_function_combinators AS comb WHERE is_aggregate UNION ALL SELECT name FROM system.databases LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.tables LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.dictionaries LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.columns LIMIT 10000) WHERE notEmpty(res)
|
||||||
exception_code: 0
|
normalized_query_hash: 6666026786019643712
|
||||||
|
query_kind: Select
|
||||||
|
databases: ['system']
|
||||||
|
tables: ['system.aggregate_function_combinators','system.clusters','system.columns','system.data_type_families','system.databases','system.dictionaries','system.formats','system.functions','system.macros','system.merge_tree_settings','system.settings','system.storage_policies','system.table_engines','system.table_functions','system.tables']
|
||||||
|
columns: ['system.aggregate_function_combinators.name','system.clusters.cluster','system.columns.name','system.data_type_families.name','system.databases.name','system.dictionaries.name','system.formats.name','system.functions.is_aggregate','system.functions.name','system.macros.macro','system.merge_tree_settings.name','system.settings.name','system.storage_policies.policy_name','system.table_engines.name','system.table_functions.name','system.tables.name']
|
||||||
|
projections: []
|
||||||
|
exception_code: 0
|
||||||
exception:
|
exception:
|
||||||
stack_trace:
|
stack_trace:
|
||||||
is_initial_query: 1
|
is_initial_query: 1
|
||||||
user: default
|
user: default
|
||||||
query_id: 50a320fd-85a8-49b8-8761-98a86bcbacef
|
query_id: a3361f6e-a1fd-4d54-9f6f-f93a08bab0bf
|
||||||
address: ::ffff:127.0.0.1
|
address: ::ffff:127.0.0.1
|
||||||
port: 33452
|
port: 51006
|
||||||
initial_user: default
|
initial_user: default
|
||||||
initial_query_id: 50a320fd-85a8-49b8-8761-98a86bcbacef
|
initial_query_id: a3361f6e-a1fd-4d54-9f6f-f93a08bab0bf
|
||||||
initial_address: ::ffff:127.0.0.1
|
initial_address: ::ffff:127.0.0.1
|
||||||
initial_port: 33452
|
initial_port: 51006
|
||||||
interface: 1
|
initial_query_start_time: 2021-07-28 13:46:56
|
||||||
os_user: bharatnc
|
initial_query_start_time_microseconds: 2021-07-28 13:46:56.704542
|
||||||
client_hostname: tower
|
interface: 1
|
||||||
client_name: ClickHouse
|
os_user:
|
||||||
client_revision: 54437
|
client_hostname:
|
||||||
client_version_major: 20
|
client_name: ClickHouse client
|
||||||
client_version_minor: 7
|
client_revision: 54449
|
||||||
client_version_patch: 2
|
client_version_major: 21
|
||||||
http_method: 0
|
client_version_minor: 8
|
||||||
|
client_version_patch: 0
|
||||||
|
http_method: 0
|
||||||
http_user_agent:
|
http_user_agent:
|
||||||
|
http_referer:
|
||||||
|
forwarded_for:
|
||||||
quota_key:
|
quota_key:
|
||||||
revision: 54440
|
revision: 54453
|
||||||
thread_ids: []
|
log_comment:
|
||||||
ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'CompressedReadBufferBlocks':1,'CompressedReadBufferBytes':10,'IOBufferAllocs':1,'IOBufferAllocBytes':89,'ContextLock':15,'RWLockAcquiredReadLocks':1}
|
thread_ids: [5058,22097,22110,22094]
|
||||||
Settings: {'background_pool_size':'32','load_balancing':'random','allow_suspicious_low_cardinality_types':'1','distributed_aggregation_memory_efficient':'1','skip_unavailable_shards':'1','log_queries':'1','max_bytes_before_external_group_by':'20000000000','max_bytes_before_external_sort':'20000000000','allow_introspection_functions':'1'}
|
ProfileEvents.Names: ['Query','SelectQuery','ArenaAllocChunks','ArenaAllocBytes','FunctionExecute','NetworkSendElapsedMicroseconds','SelectedRows','SelectedBytes','ContextLock','RWLockAcquiredReadLocks','RealTimeMicroseconds','UserTimeMicroseconds','SystemTimeMicroseconds','SoftPageFaults','OSCPUWaitMicroseconds','OSCPUVirtualTimeMicroseconds','OSWriteBytes','OSWriteChars']
|
||||||
|
ProfileEvents.Values: [1,1,39,352256,64,360,8393,374325,412,440,34480,13108,4723,671,19,17828,8192,10240]
|
||||||
|
Settings.Names: ['load_balancing','max_memory_usage']
|
||||||
|
Settings.Values: ['random','10000000000']
|
||||||
|
used_aggregate_functions: []
|
||||||
|
used_aggregate_function_combinators: []
|
||||||
|
used_database_engines: []
|
||||||
|
used_data_type_families: ['UInt64','UInt8','Nullable','String','date']
|
||||||
|
used_dictionaries: []
|
||||||
|
used_formats: []
|
||||||
|
used_functions: ['concat','notEmpty','extractAll']
|
||||||
|
used_storages: []
|
||||||
|
used_table_functions: []
|
||||||
```
|
```
|
||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
@ -85,7 +85,7 @@ hex(arg)
|
|||||||
|
|
||||||
The function is using uppercase letters `A-F` and not using any prefixes (like `0x`) or suffixes (like `h`).
|
The function is using uppercase letters `A-F` and not using any prefixes (like `0x`) or suffixes (like `h`).
|
||||||
|
|
||||||
For integer arguments, it prints hex digits (“nibbles”) from the most significant to least significant (big endian or “human readable” order). It starts with the most significant non-zero byte (leading zero bytes are omitted) but always prints both digits of every byte even if leading digit is zero.
|
For integer arguments, it prints hex digits (“nibbles”) from the most significant to least significant (big-endian or “human-readable” order). It starts with the most significant non-zero byte (leading zero bytes are omitted) but always prints both digits of every byte even if the leading digit is zero.
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
@ -105,7 +105,7 @@ Values of type `Date` and `DateTime` are formatted as corresponding integers (th
|
|||||||
|
|
||||||
For `String` and `FixedString`, all bytes are simply encoded as two hexadecimal numbers. Zero bytes are not omitted.
|
For `String` and `FixedString`, all bytes are simply encoded as two hexadecimal numbers. Zero bytes are not omitted.
|
||||||
|
|
||||||
Values of floating point and Decimal types are encoded as their representation in memory. As we support little endian architecture, they are encoded in little endian. Zero leading/trailing bytes are not omitted.
|
Values of floating point and Decimal types are encoded as their representation in memory. As we support little-endian architecture, they are encoded in little-endian. Zero leading/trailing bytes are not omitted.
|
||||||
|
|
||||||
**Arguments**
|
**Arguments**
|
||||||
|
|
||||||
@ -206,6 +206,141 @@ Result:
|
|||||||
└──────┘
|
└──────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## bin {#bin}
|
||||||
|
|
||||||
|
Returns a string containing the argument’s binary representation.
|
||||||
|
|
||||||
|
Alias: `BIN`.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
bin(arg)
|
||||||
|
```
|
||||||
|
|
||||||
|
For integer arguments, it prints bin digits from the most significant to least significant (big-endian or “human-readable” order). It starts with the most significant non-zero byte (leading zero bytes are omitted) but always prints eight digits of every byte if the leading digit is zero.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT bin(1);
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
00000001
|
||||||
|
```
|
||||||
|
|
||||||
|
Values of type `Date` and `DateTime` are formatted as corresponding integers (the number of days since Epoch for Date and the value of Unix Timestamp for DateTime).
|
||||||
|
|
||||||
|
For `String` and `FixedString`, all bytes are simply encoded as eight binary numbers. Zero bytes are not omitted.
|
||||||
|
|
||||||
|
Values of floating-point and Decimal types are encoded as their representation in memory. As we support little-endian architecture, they are encoded in little-endian. Zero leading/trailing bytes are not omitted.
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `arg` — A value to convert to binary. Types: [String](../../sql-reference/data-types/string.md), [UInt](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md), [Decimal](../../sql-reference/data-types/decimal.md), [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- A string with the binary representation of the argument.
|
||||||
|
|
||||||
|
Type: `String`.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT bin(toFloat32(number)) as bin_presentation FROM numbers(15, 2);
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─bin_presentation─────────────────┐
|
||||||
|
│ 00000000000000000111000001000001 │
|
||||||
|
│ 00000000000000001000000001000001 │
|
||||||
|
└──────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT bin(toFloat64(number)) as bin_presentation FROM numbers(15, 2);
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─bin_presentation─────────────────────────────────────────────────┐
|
||||||
|
│ 0000000000000000000000000000000000000000000000000010111001000000 │
|
||||||
|
│ 0000000000000000000000000000000000000000000000000011000001000000 │
|
||||||
|
└──────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## unbin {#unbinstr}
|
||||||
|
|
||||||
|
Performs the opposite operation of [bin](#bin). It interprets each pair of binary digits (in the argument) as a number and converts it to the byte represented by the number. The return value is a binary string (BLOB).
|
||||||
|
|
||||||
|
If you want to convert the result to a number, you can use the [reverse](../../sql-reference/functions/string-functions.md#reverse) and [reinterpretAs<Type>](../../sql-reference/functions/type-conversion-functions.md#type-conversion-functions) functions.
|
||||||
|
|
||||||
|
!!! note "Note"
|
||||||
|
If `unbin` is invoked from within the `clickhouse-client`, binary strings display using UTF-8.
|
||||||
|
|
||||||
|
Alias: `UNBIN`.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
unbin(arg)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `arg` — A string containing any number of binary digits. Type: [String](../../sql-reference/data-types/string.md).
|
||||||
|
|
||||||
|
Supports binary digits `0-1`. The number of binary digits does not have to be multiples of eight. If the argument string contains anything other than binary digits, some implementation-defined result is returned (an exception isn’t thrown). For a numeric argument the inverse of bin(N) is not performed by unbin().
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- A binary string (BLOB).
|
||||||
|
|
||||||
|
Type: [String](../../sql-reference/data-types/string.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT UNBIN('001100000011000100110010'), UNBIN('0100110101111001010100110101000101001100');
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─unbin('001100000011000100110010')─┬─unbin('0100110101111001010100110101000101001100')─┐
|
||||||
|
│ 012 │ MySQL │
|
||||||
|
└───────────────────────────────────┴───────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT reinterpretAsUInt64(reverse(unbin('1010'))) AS num;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─num─┐
|
||||||
|
│ 10 │
|
||||||
|
└─────┘
|
||||||
|
```
|
||||||
|
|
||||||
## UUIDStringToNum(str) {#uuidstringtonumstr}
|
## UUIDStringToNum(str) {#uuidstringtonumstr}
|
||||||
|
|
||||||
Accepts a string containing 36 characters in the format `123e4567-e89b-12d3-a456-426655440000`, and returns it as a set of bytes in a FixedString(16).
|
Accepts a string containing 36 characters in the format `123e4567-e89b-12d3-a456-426655440000`, and returns it as a set of bytes in a FixedString(16).
|
||||||
|
@ -13,13 +13,14 @@ toc_title: Strings
|
|||||||
Returns 1 for an empty string or 0 for a non-empty string.
|
Returns 1 for an empty string or 0 for a non-empty string.
|
||||||
The result type is UInt8.
|
The result type is UInt8.
|
||||||
A string is considered non-empty if it contains at least one byte, even if this is a space or a null byte.
|
A string is considered non-empty if it contains at least one byte, even if this is a space or a null byte.
|
||||||
The function also works for arrays.
|
The function also works for arrays or UUID.
|
||||||
|
UUID is empty if it is all zeros (nil UUID).
|
||||||
|
|
||||||
## notEmpty {#notempty}
|
## notEmpty {#notempty}
|
||||||
|
|
||||||
Returns 0 for an empty string or 1 for a non-empty string.
|
Returns 0 for an empty string or 1 for a non-empty string.
|
||||||
The result type is UInt8.
|
The result type is UInt8.
|
||||||
The function also works for arrays.
|
The function also works for arrays or UUID.
|
||||||
|
|
||||||
## length {#length}
|
## length {#length}
|
||||||
|
|
||||||
|
@ -20,12 +20,11 @@ The following actions are supported:
|
|||||||
|
|
||||||
- [ADD COLUMN](#alter_add-column) — Adds a new column to the table.
|
- [ADD COLUMN](#alter_add-column) — Adds a new column to the table.
|
||||||
- [DROP COLUMN](#alter_drop-column) — Deletes the column.
|
- [DROP COLUMN](#alter_drop-column) — Deletes the column.
|
||||||
- [RENAME COLUMN](#alter_rename-column) — Renames the column.
|
- [RENAME COLUMN](#alter_rename-column) — Renames an existing column.
|
||||||
- [CLEAR COLUMN](#alter_clear-column) — Resets column values.
|
- [CLEAR COLUMN](#alter_clear-column) — Resets column values.
|
||||||
- [COMMENT COLUMN](#alter_comment-column) — Adds a text comment to the column.
|
- [COMMENT COLUMN](#alter_comment-column) — Adds a text comment to the column.
|
||||||
- [MODIFY COLUMN](#alter_modify-column) — Changes column’s type, default expression and TTL.
|
- [MODIFY COLUMN](#alter_modify-column) — Changes column’s type, default expression and TTL.
|
||||||
- [MODIFY COLUMN REMOVE](#modify-remove) — Removes one of the column properties.
|
- [MODIFY COLUMN REMOVE](#modify-remove) — Removes one of the column properties.
|
||||||
- [RENAME COLUMN](#alter_rename-column) — Renames an existing column.
|
|
||||||
|
|
||||||
These actions are described in detail below.
|
These actions are described in detail below.
|
||||||
|
|
||||||
@ -35,7 +34,7 @@ These actions are described in detail below.
|
|||||||
ADD COLUMN [IF NOT EXISTS] name [type] [default_expr] [codec] [AFTER name_after | FIRST]
|
ADD COLUMN [IF NOT EXISTS] name [type] [default_expr] [codec] [AFTER name_after | FIRST]
|
||||||
```
|
```
|
||||||
|
|
||||||
Adds a new column to the table with the specified `name`, `type`, [`codec`](../../../sql-reference/statements/create/table.md#codecs) and `default_expr` (see the section [Default expressions](../../../sql-reference/statements/create/table.md#create-default-values)).
|
Adds a new column to the table with the specified `name`, `type`, [`codec`](../create/table.md#codecs) and `default_expr` (see the section [Default expressions](../../../sql-reference/statements/create/table.md#create-default-values)).
|
||||||
|
|
||||||
If the `IF NOT EXISTS` clause is included, the query won’t return an error if the column already exists. If you specify `AFTER name_after` (the name of another column), the column is added after the specified one in the list of table columns. If you want to add a column to the beginning of the table use the `FIRST` clause. Otherwise, the column is added to the end of the table. For a chain of actions, `name_after` can be the name of a column that is added in one of the previous actions.
|
If the `IF NOT EXISTS` clause is included, the query won’t return an error if the column already exists. If you specify `AFTER name_after` (the name of another column), the column is added after the specified one in the list of table columns. If you want to add a column to the beginning of the table use the `FIRST` clause. Otherwise, the column is added to the end of the table. For a chain of actions, `name_after` can be the name of a column that is added in one of the previous actions.
|
||||||
|
|
||||||
@ -64,6 +63,7 @@ Added2 UInt32
|
|||||||
ToDrop UInt32
|
ToDrop UInt32
|
||||||
Added3 UInt32
|
Added3 UInt32
|
||||||
```
|
```
|
||||||
|
|
||||||
## DROP COLUMN {#alter_drop-column}
|
## DROP COLUMN {#alter_drop-column}
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
@ -118,7 +118,7 @@ ALTER TABLE visits CLEAR COLUMN browser IN PARTITION tuple()
|
|||||||
## COMMENT COLUMN {#alter_comment-column}
|
## COMMENT COLUMN {#alter_comment-column}
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
COMMENT COLUMN [IF EXISTS] name 'comment'
|
COMMENT COLUMN [IF EXISTS] name 'Text comment'
|
||||||
```
|
```
|
||||||
|
|
||||||
Adds a comment to the column. If the `IF EXISTS` clause is specified, the query won’t return an error if the column does not exist.
|
Adds a comment to the column. If the `IF EXISTS` clause is specified, the query won’t return an error if the column does not exist.
|
||||||
@ -136,7 +136,7 @@ ALTER TABLE visits COMMENT COLUMN browser 'The table shows the browser used for
|
|||||||
## MODIFY COLUMN {#alter_modify-column}
|
## MODIFY COLUMN {#alter_modify-column}
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
MODIFY COLUMN [IF EXISTS] name [type] [default_expr] [TTL] [AFTER name_after | FIRST]
|
MODIFY COLUMN [IF EXISTS] name [type] [default_expr] [codec] [TTL] [AFTER name_after | FIRST]
|
||||||
```
|
```
|
||||||
|
|
||||||
This query changes the `name` column properties:
|
This query changes the `name` column properties:
|
||||||
@ -145,8 +145,12 @@ This query changes the `name` column properties:
|
|||||||
|
|
||||||
- Default expression
|
- Default expression
|
||||||
|
|
||||||
|
- Compression Codec
|
||||||
|
|
||||||
- TTL
|
- TTL
|
||||||
|
|
||||||
|
For examples of columns compression CODECS modifying, see [Column Compression Codecs](../create/table.md#codecs).
|
||||||
|
|
||||||
For examples of columns TTL modifying, see [Column TTL](../../../engines/table-engines/mergetree-family/mergetree.md#mergetree-column-ttl).
|
For examples of columns TTL modifying, see [Column TTL](../../../engines/table-engines/mergetree-family/mergetree.md#mergetree-column-ttl).
|
||||||
|
|
||||||
If the `IF EXISTS` clause is specified, the query won’t return an error if the column does not exist.
|
If the `IF EXISTS` clause is specified, the query won’t return an error if the column does not exist.
|
||||||
@ -179,6 +183,8 @@ ALTER TABLE table_name MODIFY column_name REMOVE property;
|
|||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
|
Remove TTL:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL;
|
ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL;
|
||||||
```
|
```
|
||||||
@ -187,22 +193,6 @@ ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL;
|
|||||||
|
|
||||||
- [REMOVE TTL](ttl.md).
|
- [REMOVE TTL](ttl.md).
|
||||||
|
|
||||||
## RENAME COLUMN {#alter_rename-column}
|
|
||||||
|
|
||||||
Renames an existing column.
|
|
||||||
|
|
||||||
Syntax:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
ALTER TABLE table_name RENAME COLUMN column_name TO new_column_name
|
|
||||||
```
|
|
||||||
|
|
||||||
**Example**
|
|
||||||
|
|
||||||
```sql
|
|
||||||
ALTER TABLE table_with_ttl RENAME COLUMN column_ttl TO column_ttl_new;
|
|
||||||
```
|
|
||||||
|
|
||||||
## Limitations {#alter-query-limitations}
|
## Limitations {#alter-query-limitations}
|
||||||
|
|
||||||
The `ALTER` query lets you create and delete separate elements (columns) in nested data structures, but not whole nested data structures. To add a nested data structure, you can add columns with a name like `name.nested_name` and the type `Array(T)`. A nested data structure is equivalent to multiple array columns with a name that has the same prefix before the dot.
|
The `ALTER` query lets you create and delete separate elements (columns) in nested data structures, but not whole nested data structures. To add a nested data structure, you can add columns with a name like `name.nested_name` and the type `Array(T)`. A nested data structure is equivalent to multiple array columns with a name that has the same prefix before the dot.
|
||||||
@ -213,4 +203,4 @@ If the `ALTER` query is not sufficient to make the table changes you need, you c
|
|||||||
|
|
||||||
The `ALTER` query blocks all reads and writes for the table. In other words, if a long `SELECT` is running at the time of the `ALTER` query, the `ALTER` query will wait for it to complete. At the same time, all new queries to the same table will wait while this `ALTER` is running.
|
The `ALTER` query blocks all reads and writes for the table. In other words, if a long `SELECT` is running at the time of the `ALTER` query, the `ALTER` query will wait for it to complete. At the same time, all new queries to the same table will wait while this `ALTER` is running.
|
||||||
|
|
||||||
For tables that do not store data themselves (such as `Merge` and `Distributed`), `ALTER` just changes the table structure, and does not change the structure of subordinate tables. For example, when running ALTER for a `Distributed` table, you will also need to run `ALTER` for the tables on all remote servers.
|
For tables that do not store data themselves (such as [Merge](../../../sql-reference/statements/alter/index.md) and [Distributed](../../../sql-reference/statements/alter/index.md)), `ALTER` just changes the table structure, and does not change the structure of subordinate tables. For example, when running ALTER for a `Distributed` table, you will also need to run `ALTER` for the tables on all remote servers.
|
||||||
|
60
docs/en/sql-reference/statements/alter/setting.md
Normal file
60
docs/en/sql-reference/statements/alter/setting.md
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 38
|
||||||
|
toc_title: SETTING
|
||||||
|
---
|
||||||
|
|
||||||
|
# Table Settings Manipulations {#table_settings_manipulations}
|
||||||
|
|
||||||
|
There is a set of queries to change table settings. You can modify settings or reset them to default values. A single query can change several settings at once.
|
||||||
|
If a setting with the specified name does not exist, then the query raises an exception.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
ALTER TABLE [db].name [ON CLUSTER cluster] MODIFY|RESET SETTING ...
|
||||||
|
```
|
||||||
|
|
||||||
|
!!! note "Note"
|
||||||
|
These queries can be applied to [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) tables only.
|
||||||
|
|
||||||
|
|
||||||
|
## MODIFY SETTING {#alter_modify_setting}
|
||||||
|
|
||||||
|
Changes table settings.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
MODIFY SETTING setting_name=value [, ...]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE example_table (id UInt32, data String) ENGINE=MergeTree() ORDER BY id;
|
||||||
|
|
||||||
|
ALTER TABLE example_table MODIFY SETTING max_part_loading_threads=8, max_parts_in_total=50000;
|
||||||
|
```
|
||||||
|
|
||||||
|
## RESET SETTING {#alter_reset_setting}
|
||||||
|
|
||||||
|
Resets table settings to their default values. If a setting is in a default state, then no action is taken.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
RESET SETTING setting_name [, ...]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE example_table (id UInt32, data String) ENGINE=MergeTree() ORDER BY id
|
||||||
|
SETTINGS max_part_loading_threads=8;
|
||||||
|
|
||||||
|
ALTER TABLE example_table RESET SETTING max_part_loading_threads;
|
||||||
|
```
|
||||||
|
|
||||||
|
**See Also**
|
||||||
|
|
||||||
|
- [MergeTree settings](../../../operations/settings/merge-tree-settings.md)
|
@ -254,6 +254,20 @@ CREATE TABLE codec_example
|
|||||||
ENGINE = MergeTree()
|
ENGINE = MergeTree()
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Encryption Codecs {#create-query-encryption-codecs}
|
||||||
|
|
||||||
|
These codecs don't actually compress data, but instead encrypt data on disk. These are only available when an encryption key is specified by [encryption](../../../operations/server-configuration-parameters/settings.md#server-settings-encryption) settings. Note that encryption only makes sense at the end of codec pipelines, because encrypted data usually can't be compressed in any meaningful way.
|
||||||
|
|
||||||
|
Encryption codecs:
|
||||||
|
|
||||||
|
- `Encrypted('AES-128-GCM-SIV')` — Encrypts data with AES-128 in [RFC 8452](https://tools.ietf.org/html/rfc8452) GCM-SIV mode. This codec uses a fixed nonce and encryption is therefore deterministic. This makes it compatible with deduplicating engines such as [ReplicatedMergeTree](../../../engines/table-engines/mergetree-family/replication.md) but has a weakness: when the same data block is encrypted twice, the resulting ciphertext will be exactly the same so an adversary who can read the disk can see this equivalence (although only the equivalence).
|
||||||
|
|
||||||
|
!!! attention "Attention"
|
||||||
|
Most engines including the "*MergeTree" family create index files on disk without applying codecs. This means plaintext will appear on disk if an encrypted column is indexed.
|
||||||
|
|
||||||
|
!!! attention "Attention"
|
||||||
|
If you perform a SELECT query mentioning a specific value in an encrypted column (such as in its WHERE clause), the value may appear in [system.query_log](../../../operations/system-tables/query_log.md). You may want to disable the logging.
|
||||||
|
|
||||||
## Temporary Tables {#temporary-tables}
|
## Temporary Tables {#temporary-tables}
|
||||||
|
|
||||||
ClickHouse supports temporary tables which have the following characteristics:
|
ClickHouse supports temporary tables which have the following characteristics:
|
||||||
|
@ -45,7 +45,7 @@ toc_title: "\u7D71\u5408"
|
|||||||
- 監視
|
- 監視
|
||||||
- [黒鉛](https://graphiteapp.org)
|
- [黒鉛](https://graphiteapp.org)
|
||||||
- [グラファウス](https://github.com/yandex/graphouse)
|
- [グラファウス](https://github.com/yandex/graphouse)
|
||||||
- [カーボンクリックハウス](https://github.com/lomik/carbon-clickhouse) +
|
- [カーボンクリックハウス](https://github.com/lomik/carbon-clickhouse)
|
||||||
- [グラファイト-クリック](https://github.com/lomik/graphite-clickhouse)
|
- [グラファイト-クリック](https://github.com/lomik/graphite-clickhouse)
|
||||||
- [黒鉛-ch-オプティマイザー](https://github.com/innogames/graphite-ch-optimizer) -staled仕切りを最大限に活用する [\*GraphiteMergeTree](../../engines/table-engines/mergetree-family/graphitemergetree.md#graphitemergetree) からのルールの場合 [ロールアップ構成](../../engines/table-engines/mergetree-family/graphitemergetree.md#rollup-configuration) 応用できます
|
- [黒鉛-ch-オプティマイザー](https://github.com/innogames/graphite-ch-optimizer) -staled仕切りを最大限に活用する [\*GraphiteMergeTree](../../engines/table-engines/mergetree-family/graphitemergetree.md#graphitemergetree) からのルールの場合 [ロールアップ構成](../../engines/table-engines/mergetree-family/graphitemergetree.md#rollup-configuration) 応用できます
|
||||||
- [グラファナ](https://grafana.com/)
|
- [グラファナ](https://grafana.com/)
|
||||||
|
@ -38,3 +38,15 @@ toc_title: "Используемые сторонние библиотеки"
|
|||||||
| UnixODBC | [LGPL v2.1](https://github.com/ClickHouse-Extras/UnixODBC/tree/b0ad30f7f6289c12b76f04bfb9d466374bb32168) |
|
| UnixODBC | [LGPL v2.1](https://github.com/ClickHouse-Extras/UnixODBC/tree/b0ad30f7f6289c12b76f04bfb9d466374bb32168) |
|
||||||
| zlib-ng | [Zlib License](https://github.com/ClickHouse-Extras/zlib-ng/blob/develop/LICENSE.md) |
|
| zlib-ng | [Zlib License](https://github.com/ClickHouse-Extras/zlib-ng/blob/develop/LICENSE.md) |
|
||||||
| zstd | [BSD 3-Clause License](https://github.com/facebook/zstd/blob/dev/LICENSE) |
|
| zstd | [BSD 3-Clause License](https://github.com/facebook/zstd/blob/dev/LICENSE) |
|
||||||
|
|
||||||
|
## Рекомендации по добавлению сторонних библиотек и поддержанию в них пользовательских изменений {#adding-third-party-libraries}
|
||||||
|
|
||||||
|
1. Весь внешний сторонний код должен находиться в отдельных папках внутри папки `contrib` репозитория ClickHouse. По возможности, используйте сабмодули Git.
|
||||||
|
2. Клонируйте официальный репозиторий [Clickhouse-extras](https://github.com/ClickHouse-Extras). Используйте официальные репозитории GitHub, если они доступны.
|
||||||
|
3. Создавайте новую ветку на основе той ветки, которую вы хотите интегрировать: например, `master` -> `clickhouse/master` или `release/vX.Y.Z` -> `clickhouse/release/vX.Y.Z`.
|
||||||
|
4. Все копии [Clickhouse-extras](https://github.com/ClickHouse-Extras) можно автоматически синхронизировать с удаленными репозиториями. Ветки `clickhouse/...` останутся незатронутыми, поскольку скорее всего никто не будет использовать этот шаблон именования в своих репозиториях.
|
||||||
|
5. Добавьте сабмодули в папку `contrib` репозитория ClickHouse, на который ссылаются клонированные репозитории. Настройте сабмодули для отслеживания изменений в соответствующих ветках `clickhouse/...`.
|
||||||
|
6. Каждый раз, когда необходимо внести изменения в код библиотеки, следует создавать отдельную ветку, например `clickhouse/my-fix`. Затем эта ветка должна быть слита (`merge`) в ветку, отслеживаемую сабмодулем, например, в `clickhouse/master` или `clickhouse/release/vX.Y.Z`.
|
||||||
|
7. Не добавляйте код в клоны репозитория [Clickhouse-extras](https://github.com/ClickHouse-Extras), если имя ветки не соответствует шаблону `clickhouse/...`.
|
||||||
|
8. Всегда вносите изменения с учетом того, что они попадут в официальный репозиторий. После того как PR будет влит из (ветки разработки/исправлений) вашего личного клона репозитория в [Clickhouse-extras](https://github.com/ClickHouse-Extras), и сабмодуль будет добавлен в репозиторий ClickHouse, рекомендуется сделать еще один PR из (ветки разработки/исправлений) репозитория [Clickhouse-extras](https://github.com/ClickHouse-Extras) в официальный репозиторий библиотеки. Таким образом будут решены следующие задачи: 1) публикуемый код может быть использован многократно и будет иметь более высокую ценность; 2) другие пользователи также смогут использовать его в своих целях; 3) поддержкой кода будут заниматься не только разработчики ClickHouse.
|
||||||
|
9. Чтобы сабмодуль начал использовать новый код из исходной ветки (например, `master`), сначала следует аккуратно выполнить слияние (`master` -> `clickhouse/master`), и только после этого изменения могут быть добавлены в основной репозиторий ClickHouse. Это связано с тем, что в отслеживаемую ветку (например, `clickhouse/master`) могут быть внесены изменения, и поэтому ветка может отличаться от первоисточника (`master`).
|
||||||
|
@ -92,7 +92,7 @@ ClickHouse не работает и не собирается на 32-битны
|
|||||||
# Две последние команды могут быть объединены вместе:
|
# Две последние команды могут быть объединены вместе:
|
||||||
git submodule update --init
|
git submodule update --init
|
||||||
|
|
||||||
The next commands would help you to reset all submodules to the initial state (!WARING! - any chenges inside will be deleted):
|
The next commands would help you to reset all submodules to the initial state (!WARING! - any changes inside will be deleted):
|
||||||
Следующие команды помогут сбросить все сабмодули в изначальное состояние (!ВНИМАНИЕ! - все изменения в сабмодулях будут утеряны):
|
Следующие команды помогут сбросить все сабмодули в изначальное состояние (!ВНИМАНИЕ! - все изменения в сабмодулях будут утеряны):
|
||||||
|
|
||||||
# Synchronizes submodules' remote URL with .gitmodules
|
# Synchronizes submodules' remote URL with .gitmodules
|
||||||
@ -242,6 +242,8 @@ sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
|||||||
|
|
||||||
Стиль кода: https://clickhouse.tech/docs/ru/development/style/
|
Стиль кода: https://clickhouse.tech/docs/ru/development/style/
|
||||||
|
|
||||||
|
Рекомендации по добавлению сторонних библиотек и поддержанию в них пользовательских изменений: https://clickhouse.tech/docs/ru/development/contrib/#adding-third-party-libraries
|
||||||
|
|
||||||
Разработка тестов: https://clickhouse.tech/docs/ru/development/tests/
|
Разработка тестов: https://clickhouse.tech/docs/ru/development/tests/
|
||||||
|
|
||||||
Список задач: https://github.com/ClickHouse/ClickHouse/issues?q=is%3Aopen+is%3Aissue+label%3A%22easy+task%22
|
Список задач: https://github.com/ClickHouse/ClickHouse/issues?q=is%3Aopen+is%3Aissue+label%3A%22easy+task%22
|
||||||
|
@ -820,11 +820,11 @@ The dictionary is configured incorrectly.
|
|||||||
|
|
||||||
**10.** Ненужный код удаляется из исходников.
|
**10.** Ненужный код удаляется из исходников.
|
||||||
|
|
||||||
## Библиотеки {#biblioteki}
|
## Библиотеки {#libraries}
|
||||||
|
|
||||||
**1.** Используются стандартная библиотека C++20 (допустимо использовать экспериментальные расширения) а также фреймворки `boost`, `Poco`.
|
**1.** Используются стандартные библиотеки C++20 (допустимо использовать экспериментальные расширения), а также фреймворки `boost`, `Poco`.
|
||||||
|
|
||||||
**2.** Библиотеки должны быть расположены в виде исходников в директории `contrib` и собираться вместе с ClickHouse. Не разрешено использовать библиотеки, доступные в пакетах ОС или любые другие способы установки библиотек в систему.
|
**2.** Библиотеки должны быть расположены в виде исходников в директории `contrib` и собираться вместе с ClickHouse. Не разрешено использовать библиотеки, доступные в пакетах ОС, или любые другие способы установки библиотек в систему. Подробнее смотрите раздел [Рекомендации по добавлению сторонних библиотек и поддержанию в них пользовательских изменений](contrib.md#adding-third-party-libraries).
|
||||||
|
|
||||||
**3.** Предпочтение отдаётся уже использующимся библиотекам.
|
**3.** Предпочтение отдаётся уже использующимся библиотекам.
|
||||||
|
|
||||||
@ -902,4 +902,3 @@ function(
|
|||||||
const & RangesInDataParts ranges,
|
const & RangesInDataParts ranges,
|
||||||
size_t limit)
|
size_t limit)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ toc_title: "Введение"
|
|||||||
|
|
||||||
- [MySQL](../../engines/database-engines/mysql.md)
|
- [MySQL](../../engines/database-engines/mysql.md)
|
||||||
|
|
||||||
- [MaterializeMySQL](../../engines/database-engines/materialize-mysql.md)
|
- [MaterializedMySQL](../../engines/database-engines/materialized-mysql.md)
|
||||||
|
|
||||||
- [Lazy](../../engines/database-engines/lazy.md)
|
- [Lazy](../../engines/database-engines/lazy.md)
|
||||||
|
|
||||||
|
@ -1,22 +1,22 @@
|
|||||||
|
|
||||||
---
|
---
|
||||||
toc_priority: 29
|
toc_priority: 29
|
||||||
toc_title: MaterializeMySQL
|
toc_title: MaterializedMySQL
|
||||||
---
|
---
|
||||||
|
|
||||||
# MaterializeMySQL {#materialize-mysql}
|
# MaterializedMySQL {#materialized-mysql}
|
||||||
|
|
||||||
Создает базу данных ClickHouse со всеми таблицами, существующими в MySQL, и всеми данными в этих таблицах.
|
Создает базу данных ClickHouse со всеми таблицами, существующими в MySQL, и всеми данными в этих таблицах.
|
||||||
|
|
||||||
Сервер ClickHouse работает как реплика MySQL. Он читает файл binlog и выполняет DDL and DML-запросы.
|
Сервер ClickHouse работает как реплика MySQL. Он читает файл binlog и выполняет DDL and DML-запросы.
|
||||||
|
|
||||||
`MaterializeMySQL` — экспериментальный движок баз данных.
|
`MaterializedMySQL` — экспериментальный движок баз данных.
|
||||||
|
|
||||||
## Создание базы данных {#creating-a-database}
|
## Создание базы данных {#creating-a-database}
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster]
|
CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster]
|
||||||
ENGINE = MaterializeMySQL('host:port', ['database' | database], 'user', 'password') [SETTINGS ...]
|
ENGINE = MaterializedMySQL('host:port', ['database' | database], 'user', 'password') [SETTINGS ...]
|
||||||
```
|
```
|
||||||
|
|
||||||
**Параметры движка**
|
**Параметры движка**
|
||||||
@ -28,7 +28,7 @@ ENGINE = MaterializeMySQL('host:port', ['database' | database], 'user', 'passwor
|
|||||||
|
|
||||||
## Виртуальные столбцы {#virtual-columns}
|
## Виртуальные столбцы {#virtual-columns}
|
||||||
|
|
||||||
При работе с движком баз данных `MaterializeMySQL` используются таблицы семейства [ReplacingMergeTree](../../engines/table-engines/mergetree-family/replacingmergetree.md) с виртуальными столбцами `_sign` и `_version`.
|
При работе с движком баз данных `MaterializedMySQL` используются таблицы семейства [ReplacingMergeTree](../../engines/table-engines/mergetree-family/replacingmergetree.md) с виртуальными столбцами `_sign` и `_version`.
|
||||||
|
|
||||||
- `_version` — счетчик транзакций. Тип [UInt64](../../sql-reference/data-types/int-uint.md).
|
- `_version` — счетчик транзакций. Тип [UInt64](../../sql-reference/data-types/int-uint.md).
|
||||||
- `_sign` — метка удаления. Тип [Int8](../../sql-reference/data-types/int-uint.md). Возможные значения:
|
- `_sign` — метка удаления. Тип [Int8](../../sql-reference/data-types/int-uint.md). Возможные значения:
|
||||||
@ -75,9 +75,9 @@ DDL-запросы в MySQL конвертируются в соответств
|
|||||||
|
|
||||||
- Запрос `UPDATE` конвертируется в ClickHouse в `INSERT` с `_sign=-1` и `INSERT` с `_sign=1`.
|
- Запрос `UPDATE` конвертируется в ClickHouse в `INSERT` с `_sign=-1` и `INSERT` с `_sign=1`.
|
||||||
|
|
||||||
### Выборка из таблиц движка MaterializeMySQL {#select}
|
### Выборка из таблиц движка MaterializedMySQL {#select}
|
||||||
|
|
||||||
Запрос `SELECT` из таблиц движка `MaterializeMySQL` имеет некоторую специфику:
|
Запрос `SELECT` из таблиц движка `MaterializedMySQL` имеет некоторую специфику:
|
||||||
|
|
||||||
- Если в запросе `SELECT` напрямую не указан столбец `_version`, то используется модификатор [FINAL](../../sql-reference/statements/select/from.md#select-from-final). Таким образом, выбираются только строки с `MAX(_version)`.
|
- Если в запросе `SELECT` напрямую не указан столбец `_version`, то используется модификатор [FINAL](../../sql-reference/statements/select/from.md#select-from-final). Таким образом, выбираются только строки с `MAX(_version)`.
|
||||||
|
|
||||||
@ -94,10 +94,10 @@ DDL-запросы в MySQL конвертируются в соответств
|
|||||||
**Примечание**
|
**Примечание**
|
||||||
|
|
||||||
- Строки с `_sign=-1` физически не удаляются из таблиц.
|
- Строки с `_sign=-1` физически не удаляются из таблиц.
|
||||||
- Каскадные запросы `UPDATE/DELETE` не поддерживаются движком `MaterializeMySQL`.
|
- Каскадные запросы `UPDATE/DELETE` не поддерживаются движком `MaterializedMySQL`.
|
||||||
- Репликация может быть легко нарушена.
|
- Репликация может быть легко нарушена.
|
||||||
- Прямые операции изменения данных в таблицах и базах данных `MaterializeMySQL` запрещены.
|
- Прямые операции изменения данных в таблицах и базах данных `MaterializedMySQL` запрещены.
|
||||||
- На работу `MaterializeMySQL` влияет настройка [optimize_on_insert](../../operations/settings/settings.md#optimize-on-insert). Когда таблица на MySQL сервере меняется, происходит слияние данных в соответсвующей таблице в базе данных `MaterializeMySQL`.
|
- На работу `MaterializedMySQL` влияет настройка [optimize_on_insert](../../operations/settings/settings.md#optimize-on-insert). Когда таблица на MySQL сервере меняется, происходит слияние данных в соответсвующей таблице в базе данных `MaterializedMySQL`.
|
||||||
|
|
||||||
## Примеры использования {#examples-of-use}
|
## Примеры использования {#examples-of-use}
|
||||||
|
|
||||||
@ -126,7 +126,7 @@ mysql> SELECT * FROM test;
|
|||||||
База данных и созданная таблица:
|
База данных и созданная таблица:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE DATABASE mysql ENGINE = MaterializeMySQL('localhost:3306', 'db', 'user', '***');
|
CREATE DATABASE mysql ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***');
|
||||||
SHOW TABLES FROM mysql;
|
SHOW TABLES FROM mysql;
|
||||||
```
|
```
|
||||||
|
|
@ -14,6 +14,8 @@ toc_priority: 29
|
|||||||
- [Log](log.md)
|
- [Log](log.md)
|
||||||
- [TinyLog](tinylog.md)
|
- [TinyLog](tinylog.md)
|
||||||
|
|
||||||
|
Табличные движки семейства `Log` могут хранить данные в распределенных файловых системах [HDFS](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-hdfs) или [S3](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-s3).
|
||||||
|
|
||||||
## Общие свойства {#obshchie-svoistva}
|
## Общие свойства {#obshchie-svoistva}
|
||||||
|
|
||||||
Движки:
|
Движки:
|
||||||
|
@ -5,9 +5,8 @@ toc_title: Log
|
|||||||
|
|
||||||
# Log {#log}
|
# Log {#log}
|
||||||
|
|
||||||
Движок относится к семейству движков Log. Смотрите общие свойства и различия движков в статье [Семейство Log](index.md).
|
Движок относится к семейству движков `Log`. Смотрите общие свойства и различия движков в статье [Семейство Log](../../../engines/table-engines/log-family/index.md).
|
||||||
|
|
||||||
Отличается от [TinyLog](tinylog.md) тем, что вместе с файлами столбцов лежит небольшой файл «засечек». Засечки пишутся на каждый блок данных и содержат смещение - с какого места нужно читать файл, чтобы пропустить заданное количество строк. Это позволяет читать данные из таблицы в несколько потоков.
|
|
||||||
При конкурентном доступе к данным, чтения могут выполняться одновременно, а записи блокируют чтения и друг друга.
|
|
||||||
Движок Log не поддерживает индексы. Также, если при записи в таблицу произошёл сбой, то таблица станет битой, и чтения из неё будут возвращать ошибку. Движок Log подходит для временных данных, write-once таблиц, а также для тестовых и демонстрационных целей.
|
|
||||||
|
|
||||||
|
Отличается от [TinyLog](../../../engines/table-engines/log-family/tinylog.md) тем, что вместе с файлами столбцов лежит небольшой файл "засечек". Засечки пишутся на каждый блок данных и содержат смещение: с какого места нужно читать файл, чтобы пропустить заданное количество строк. Это позволяет читать данные из таблицы в несколько потоков.
|
||||||
|
При конкурентном доступе к данным чтения могут выполняться одновременно, а записи блокируют чтения и друг друга.
|
||||||
|
Движок `Log` не поддерживает индексы. Также, если при записи в таблицу произошёл сбой, то таблица станет битой, и чтения из нее будут возвращать ошибку. Движок `Log` подходит для временных данных, write-once таблиц, а также для тестовых и демонстрационных целей.
|
||||||
|
@ -771,7 +771,6 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd'
|
|||||||
- `cache_path` — путь в локальной файловой системе, где будут храниться кэш засечек и файлы индекса. Значение по умолчанию: `/var/lib/clickhouse/disks/<disk_name>/cache/`.
|
- `cache_path` — путь в локальной файловой системе, где будут храниться кэш засечек и файлы индекса. Значение по умолчанию: `/var/lib/clickhouse/disks/<disk_name>/cache/`.
|
||||||
- `skip_access_check` — признак, выполнять ли проверку доступов при запуске диска. Если установлено значение `true`, то проверка не выполняется. Значение по умолчанию: `false`.
|
- `skip_access_check` — признак, выполнять ли проверку доступов при запуске диска. Если установлено значение `true`, то проверка не выполняется. Значение по умолчанию: `false`.
|
||||||
|
|
||||||
|
|
||||||
Диск S3 может быть сконфигурирован как `main` или `cold`:
|
Диск S3 может быть сконфигурирован как `main` или `cold`:
|
||||||
|
|
||||||
``` xml
|
``` xml
|
||||||
@ -810,3 +809,44 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd'
|
|||||||
```
|
```
|
||||||
|
|
||||||
Если диск сконфигурирован как `cold`, данные будут переноситься в S3 при срабатывании правил TTL или когда свободное место на локальном диске станет меньше порогового значения, которое определяется как `move_factor * disk_size`.
|
Если диск сконфигурирован как `cold`, данные будут переноситься в S3 при срабатывании правил TTL или когда свободное место на локальном диске станет меньше порогового значения, которое определяется как `move_factor * disk_size`.
|
||||||
|
|
||||||
|
## Использование сервиса HDFS для хранения данных {#table_engine-mergetree-hdfs}
|
||||||
|
|
||||||
|
[HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_design.html) — это распределенная файловая система для удаленного хранения данных.
|
||||||
|
|
||||||
|
Таблицы семейства `MergeTree` могут хранить данные в сервисе HDFS при использовании диска типа `HDFS`.
|
||||||
|
|
||||||
|
Пример конфигурации:
|
||||||
|
``` xml
|
||||||
|
<yandex>
|
||||||
|
<storage_configuration>
|
||||||
|
<disks>
|
||||||
|
<hdfs>
|
||||||
|
<type>hdfs</type>
|
||||||
|
<endpoint>hdfs://hdfs1:9000/clickhouse/</endpoint>
|
||||||
|
</hdfs>
|
||||||
|
</disks>
|
||||||
|
<policies>
|
||||||
|
<hdfs>
|
||||||
|
<volumes>
|
||||||
|
<main>
|
||||||
|
<disk>hdfs</disk>
|
||||||
|
</main>
|
||||||
|
</volumes>
|
||||||
|
</hdfs>
|
||||||
|
</policies>
|
||||||
|
</storage_configuration>
|
||||||
|
|
||||||
|
<merge_tree>
|
||||||
|
<min_bytes_for_wide_part>0</min_bytes_for_wide_part>
|
||||||
|
</merge_tree>
|
||||||
|
</yandex>
|
||||||
|
```
|
||||||
|
|
||||||
|
Обязательные параметры:
|
||||||
|
|
||||||
|
- `endpoint` — URL точки приема запроса на стороне HDFS в формате `path`. URL точки должен содержать путь к корневой директории на сервере, где хранятся данные.
|
||||||
|
|
||||||
|
Необязательные параметры:
|
||||||
|
|
||||||
|
- `min_bytes_for_seek` — минимальное количество байтов, которые используются для операций поиска вместо последовательного чтения. Значение по умолчанию: 1 МБайт.
|
||||||
|
@ -43,7 +43,7 @@ toc_title: "Библиотеки для интеграции от сторонн
|
|||||||
- Мониторинг
|
- Мониторинг
|
||||||
- [Graphite](https://graphiteapp.org)
|
- [Graphite](https://graphiteapp.org)
|
||||||
- [graphouse](https://github.com/yandex/graphouse)
|
- [graphouse](https://github.com/yandex/graphouse)
|
||||||
- [carbon-clickhouse](https://github.com/lomik/carbon-clickhouse) +
|
- [carbon-clickhouse](https://github.com/lomik/carbon-clickhouse)
|
||||||
- [graphite-clickhouse](https://github.com/lomik/graphite-clickhouse)
|
- [graphite-clickhouse](https://github.com/lomik/graphite-clickhouse)
|
||||||
- [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer) - оптимизирует партиции таблиц [\*GraphiteMergeTree](../../engines/table-engines/mergetree-family/graphitemergetree.md#graphitemergetree) согласно правилам в [конфигурации rollup](../../engines/table-engines/mergetree-family/graphitemergetree.md#rollup-configuration)
|
- [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer) - оптимизирует партиции таблиц [\*GraphiteMergeTree](../../engines/table-engines/mergetree-family/graphitemergetree.md#graphitemergetree) согласно правилам в [конфигурации rollup](../../engines/table-engines/mergetree-family/graphitemergetree.md#rollup-configuration)
|
||||||
- [Grafana](https://grafana.com/)
|
- [Grafana](https://grafana.com/)
|
||||||
|
@ -277,4 +277,15 @@ Eсли суммарное число активных кусков во все
|
|||||||
|
|
||||||
Значение по умолчанию: `0`.
|
Значение по умолчанию: `0`.
|
||||||
|
|
||||||
[Original article](https://clickhouse.tech/docs/ru/operations/settings/merge_tree_settings/) <!--hide-->
|
## check_sample_column_is_correct {#check_sample_column_is_correct}
|
||||||
|
|
||||||
|
Разрешает проверку того, что тип данных столбца для сэмплирования или выражения сэмплирования при создании таблицы верный. Тип данных должен соответствовать одному из беззнаковых [целочисленных типов](../../sql-reference/data-types/int-uint.md): `UInt8`, `UInt16`, `UInt32`, `UInt64`.
|
||||||
|
|
||||||
|
Возможные значения:
|
||||||
|
|
||||||
|
- true — проверка включена.
|
||||||
|
- false — проверка при создании таблицы не проводится.
|
||||||
|
|
||||||
|
Значение по умолчанию: `true`.
|
||||||
|
|
||||||
|
По умолчанию сервер ClickHouse при создании таблицы проверяет тип данных столбца для сэмплирования или выражения сэмплирования. Если уже существуют таблицы с некорректным выражением сэмплирования, то чтобы не возникало исключение при запуске сервера, установите `check_sample_column_is_correct` в значение `false`.
|
||||||
|
@ -2777,7 +2777,7 @@ SELECT * FROM test2;
|
|||||||
└─────────────┘
|
└─────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
Обратите внимание на то, что эта настройка влияет на поведение [материализованных представлений](../../sql-reference/statements/create/view.md#materialized) и БД [MaterializeMySQL](../../engines/database-engines/materialize-mysql.md).
|
Обратите внимание на то, что эта настройка влияет на поведение [материализованных представлений](../../sql-reference/statements/create/view.md#materialized) и БД [MaterializedMySQL](../../engines/database-engines/materialized-mysql.md).
|
||||||
|
|
||||||
## engine_file_empty_if_not_exists {#engine-file-empty_if-not-exists}
|
## engine_file_empty_if_not_exists {#engine-file-empty_if-not-exists}
|
||||||
|
|
||||||
|
@ -35,4 +35,3 @@ SELECT * FROM system.asynchronous_metrics LIMIT 10
|
|||||||
- [system.events](#system_tables-events) — таблица с количеством произошедших событий.
|
- [system.events](#system_tables-events) — таблица с количеством произошедших событий.
|
||||||
- [system.metric_log](#system_tables-metric_log) — таблица фиксирующая историю значений метрик из `system.metrics` и `system.events`.
|
- [system.metric_log](#system_tables-metric_log) — таблица фиксирующая историю значений метрик из `system.metrics` и `system.events`.
|
||||||
|
|
||||||
|
|
@ -61,4 +61,3 @@ exception_code: ZOK
|
|||||||
2 rows in set. Elapsed: 0.025 sec.
|
2 rows in set. Elapsed: 0.025 sec.
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
@ -51,6 +51,7 @@ ClickHouse не удаляет данные из таблица автомати
|
|||||||
- `databases` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — имена баз данных, присутствующих в запросе.
|
- `databases` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — имена баз данных, присутствующих в запросе.
|
||||||
- `tables` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — имена таблиц, присутствующих в запросе.
|
- `tables` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — имена таблиц, присутствующих в запросе.
|
||||||
- `columns` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — имена столбцов, присутствующих в запросе.
|
- `columns` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — имена столбцов, присутствующих в запросе.
|
||||||
|
- `projections` ([String](../../sql-reference/data-types/string.md)) — имена проекций, использованных при выполнении запроса.
|
||||||
- `exception_code` ([Int32](../../sql-reference/data-types/int-uint.md)) — код исключения.
|
- `exception_code` ([Int32](../../sql-reference/data-types/int-uint.md)) — код исключения.
|
||||||
- `exception` ([String](../../sql-reference/data-types/string.md)) — сообщение исключения, если запрос завершился по исключению.
|
- `exception` ([String](../../sql-reference/data-types/string.md)) — сообщение исключения, если запрос завершился по исключению.
|
||||||
- `stack_trace` ([String](../../sql-reference/data-types/string.md)) — [stack trace](https://en.wikipedia.org/wiki/Stack_trace). Пустая строка, если запрос успешно завершен.
|
- `stack_trace` ([String](../../sql-reference/data-types/string.md)) — [stack trace](https://en.wikipedia.org/wiki/Stack_trace). Пустая строка, если запрос успешно завершен.
|
||||||
@ -65,6 +66,8 @@ ClickHouse не удаляет данные из таблица автомати
|
|||||||
- `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — ID родительского запроса.
|
- `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — ID родительского запроса.
|
||||||
- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP адрес, с которого пришел родительский запрос.
|
- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP адрес, с которого пришел родительский запрос.
|
||||||
- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — порт, с которого клиент сделал родительский запрос.
|
- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — порт, с которого клиент сделал родительский запрос.
|
||||||
|
- `initial_query_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время начала обработки запроса (для распределенных запросов).
|
||||||
|
- `initial_query_start_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — время начала обработки запроса с точностью до микросекунд (для распределенных запросов).
|
||||||
- `interface` ([UInt8](../../sql-reference/data-types/int-uint.md)) — интерфейс, с которого ушёл запрос. Возможные значения:
|
- `interface` ([UInt8](../../sql-reference/data-types/int-uint.md)) — интерфейс, с которого ушёл запрос. Возможные значения:
|
||||||
- 1 — TCP.
|
- 1 — TCP.
|
||||||
- 2 — HTTP.
|
- 2 — HTTP.
|
||||||
@ -101,55 +104,77 @@ ClickHouse не удаляет данные из таблица автомати
|
|||||||
**Пример**
|
**Пример**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT * FROM system.query_log WHERE type = 'QueryFinish' AND (query LIKE '%toDate(\'2000-12-05\')%') ORDER BY query_start_time DESC LIMIT 1 FORMAT Vertical;
|
SELECT * FROM system.query_log WHERE type = 'QueryFinish' ORDER BY query_start_time DESC LIMIT 1 FORMAT Vertical;
|
||||||
```
|
```
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
Row 1:
|
Row 1:
|
||||||
──────
|
──────
|
||||||
type: QueryStart
|
type: QueryFinish
|
||||||
event_date: 2020-09-11
|
event_date: 2021-07-28
|
||||||
event_time: 2020-09-11 10:08:17
|
event_time: 2021-07-28 13:46:56
|
||||||
event_time_microseconds: 2020-09-11 10:08:17.063321
|
event_time_microseconds: 2021-07-28 13:46:56.719791
|
||||||
query_start_time: 2020-09-11 10:08:17
|
query_start_time: 2021-07-28 13:46:56
|
||||||
query_start_time_microseconds: 2020-09-11 10:08:17.063321
|
query_start_time_microseconds: 2021-07-28 13:46:56.704542
|
||||||
query_duration_ms: 0
|
query_duration_ms: 14
|
||||||
read_rows: 0
|
read_rows: 8393
|
||||||
read_bytes: 0
|
read_bytes: 374325
|
||||||
written_rows: 0
|
written_rows: 0
|
||||||
written_bytes: 0
|
written_bytes: 0
|
||||||
result_rows: 0
|
result_rows: 4201
|
||||||
result_bytes: 0
|
result_bytes: 153024
|
||||||
memory_usage: 0
|
memory_usage: 4714038
|
||||||
current_database: default
|
current_database: default
|
||||||
query: INSERT INTO test1 VALUES
|
query: SELECT DISTINCT arrayJoin(extractAll(name, '[\\w_]{2,}')) AS res FROM (SELECT name FROM system.functions UNION ALL SELECT name FROM system.table_engines UNION ALL SELECT name FROM system.formats UNION ALL SELECT name FROM system.table_functions UNION ALL SELECT name FROM system.data_type_families UNION ALL SELECT name FROM system.merge_tree_settings UNION ALL SELECT name FROM system.settings UNION ALL SELECT cluster FROM system.clusters UNION ALL SELECT macro FROM system.macros UNION ALL SELECT policy_name FROM system.storage_policies UNION ALL SELECT concat(func.name, comb.name) FROM system.functions AS func CROSS JOIN system.aggregate_function_combinators AS comb WHERE is_aggregate UNION ALL SELECT name FROM system.databases LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.tables LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.dictionaries LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.columns LIMIT 10000) WHERE notEmpty(res)
|
||||||
exception_code: 0
|
normalized_query_hash: 6666026786019643712
|
||||||
|
query_kind: Select
|
||||||
|
databases: ['system']
|
||||||
|
tables: ['system.aggregate_function_combinators','system.clusters','system.columns','system.data_type_families','system.databases','system.dictionaries','system.formats','system.functions','system.macros','system.merge_tree_settings','system.settings','system.storage_policies','system.table_engines','system.table_functions','system.tables']
|
||||||
|
columns: ['system.aggregate_function_combinators.name','system.clusters.cluster','system.columns.name','system.data_type_families.name','system.databases.name','system.dictionaries.name','system.formats.name','system.functions.is_aggregate','system.functions.name','system.macros.macro','system.merge_tree_settings.name','system.settings.name','system.storage_policies.policy_name','system.table_engines.name','system.table_functions.name','system.tables.name']
|
||||||
|
projections: []
|
||||||
|
exception_code: 0
|
||||||
exception:
|
exception:
|
||||||
stack_trace:
|
stack_trace:
|
||||||
is_initial_query: 1
|
is_initial_query: 1
|
||||||
user: default
|
user: default
|
||||||
query_id: 50a320fd-85a8-49b8-8761-98a86bcbacef
|
query_id: a3361f6e-a1fd-4d54-9f6f-f93a08bab0bf
|
||||||
address: ::ffff:127.0.0.1
|
address: ::ffff:127.0.0.1
|
||||||
port: 33452
|
port: 51006
|
||||||
initial_user: default
|
initial_user: default
|
||||||
initial_query_id: 50a320fd-85a8-49b8-8761-98a86bcbacef
|
initial_query_id: a3361f6e-a1fd-4d54-9f6f-f93a08bab0bf
|
||||||
initial_address: ::ffff:127.0.0.1
|
initial_address: ::ffff:127.0.0.1
|
||||||
initial_port: 33452
|
initial_port: 51006
|
||||||
interface: 1
|
initial_query_start_time: 2021-07-28 13:46:56
|
||||||
os_user: bharatnc
|
initial_query_start_time_microseconds: 2021-07-28 13:46:56.704542
|
||||||
client_hostname: tower
|
interface: 1
|
||||||
client_name: ClickHouse
|
os_user:
|
||||||
client_revision: 54437
|
client_hostname:
|
||||||
client_version_major: 20
|
client_name: ClickHouse client
|
||||||
client_version_minor: 7
|
client_revision: 54449
|
||||||
client_version_patch: 2
|
client_version_major: 21
|
||||||
http_method: 0
|
client_version_minor: 8
|
||||||
|
client_version_patch: 0
|
||||||
|
http_method: 0
|
||||||
http_user_agent:
|
http_user_agent:
|
||||||
|
http_referer:
|
||||||
|
forwarded_for:
|
||||||
quota_key:
|
quota_key:
|
||||||
revision: 54440
|
revision: 54453
|
||||||
thread_ids: []
|
log_comment:
|
||||||
ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'CompressedReadBufferBlocks':1,'CompressedReadBufferBytes':10,'IOBufferAllocs':1,'IOBufferAllocBytes':89,'ContextLock':15,'RWLockAcquiredReadLocks':1}
|
thread_ids: [5058,22097,22110,22094]
|
||||||
Settings: {'background_pool_size':'32','load_balancing':'random','allow_suspicious_low_cardinality_types':'1','distributed_aggregation_memory_efficient':'1','skip_unavailable_shards':'1','log_queries':'1','max_bytes_before_external_group_by':'20000000000','max_bytes_before_external_sort':'20000000000','allow_introspection_functions':'1'}
|
ProfileEvents.Names: ['Query','SelectQuery','ArenaAllocChunks','ArenaAllocBytes','FunctionExecute','NetworkSendElapsedMicroseconds','SelectedRows','SelectedBytes','ContextLock','RWLockAcquiredReadLocks','RealTimeMicroseconds','UserTimeMicroseconds','SystemTimeMicroseconds','SoftPageFaults','OSCPUWaitMicroseconds','OSCPUVirtualTimeMicroseconds','OSWriteBytes','OSWriteChars']
|
||||||
|
ProfileEvents.Values: [1,1,39,352256,64,360,8393,374325,412,440,34480,13108,4723,671,19,17828,8192,10240]
|
||||||
|
Settings.Names: ['load_balancing','max_memory_usage']
|
||||||
|
Settings.Values: ['random','10000000000']
|
||||||
|
used_aggregate_functions: []
|
||||||
|
used_aggregate_function_combinators: []
|
||||||
|
used_database_engines: []
|
||||||
|
used_data_type_families: ['UInt64','UInt8','Nullable','String','date']
|
||||||
|
used_dictionaries: []
|
||||||
|
used_formats: []
|
||||||
|
used_functions: ['concat','notEmpty','extractAll']
|
||||||
|
used_storages: []
|
||||||
|
used_table_functions: []
|
||||||
```
|
```
|
||||||
|
|
||||||
**Смотрите также**
|
**Смотрите также**
|
||||||
|
@ -5,15 +5,26 @@ toc_title: "Манипуляции со столбцами"
|
|||||||
|
|
||||||
# Манипуляции со столбцами {#manipuliatsii-so-stolbtsami}
|
# Манипуляции со столбцами {#manipuliatsii-so-stolbtsami}
|
||||||
|
|
||||||
|
Набор действий, позволяющих изменять структуру таблицы.
|
||||||
|
|
||||||
|
Синтаксис:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
ALTER TABLE [db].name [ON CLUSTER cluster] ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN ...
|
||||||
|
```
|
||||||
|
|
||||||
|
В запросе можно указать сразу несколько действий над одной таблицей через запятую.
|
||||||
|
Каждое действие — это манипуляция над столбцом.
|
||||||
|
|
||||||
Существуют следующие действия:
|
Существуют следующие действия:
|
||||||
|
|
||||||
- [ADD COLUMN](#alter_add-column) — добавляет столбец в таблицу;
|
- [ADD COLUMN](#alter_add-column) — добавляет столбец в таблицу;
|
||||||
- [DROP COLUMN](#alter_drop-column) — удаляет столбец;
|
- [DROP COLUMN](#alter_drop-column) — удаляет столбец;
|
||||||
|
- [RENAME COLUMN](#alter_rename-column) — переименовывает существующий столбец.
|
||||||
- [CLEAR COLUMN](#alter_clear-column) — сбрасывает все значения в столбце для заданной партиции;
|
- [CLEAR COLUMN](#alter_clear-column) — сбрасывает все значения в столбце для заданной партиции;
|
||||||
- [COMMENT COLUMN](#alter_comment-column) — добавляет комментарий к столбцу;
|
- [COMMENT COLUMN](#alter_comment-column) — добавляет комментарий к столбцу;
|
||||||
- [MODIFY COLUMN](#alter_modify-column) — изменяет тип столбца, выражение для значения по умолчанию и TTL.
|
- [MODIFY COLUMN](#alter_modify-column) — изменяет тип столбца, выражение для значения по умолчанию и TTL.
|
||||||
- [MODIFY COLUMN REMOVE](#modify-remove) — удаляет какое-либо из свойств столбца.
|
- [MODIFY COLUMN REMOVE](#modify-remove) — удаляет какое-либо из свойств столбца.
|
||||||
- [RENAME COLUMN](#alter_rename-column) — переименовывает существующий столбец.
|
|
||||||
|
|
||||||
Подробное описание для каждого действия приведено ниже.
|
Подробное описание для каждого действия приведено ниже.
|
||||||
|
|
||||||
@ -72,6 +83,22 @@ DROP COLUMN [IF EXISTS] name
|
|||||||
ALTER TABLE visits DROP COLUMN browser
|
ALTER TABLE visits DROP COLUMN browser
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## RENAME COLUMN {#alter_rename-column}
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
RENAME COLUMN [IF EXISTS] name to new_name
|
||||||
|
```
|
||||||
|
|
||||||
|
Переименовывает столбец `name` в `new_name`. Если указано выражение `IF EXISTS`, то запрос не будет возвращать ошибку при условии, что столбец `name` не существует. Поскольку переименование не затрагивает физические данные колонки, запрос выполняется практически мгновенно.
|
||||||
|
|
||||||
|
**ЗАМЕЧЕНИЕ**: Столбцы, являющиеся частью основного ключа или ключа сортировки (заданные с помощью `ORDER BY` или `PRIMARY KEY`), не могут быть переименованы. Попытка переименовать эти слобцы приведет к `SQL Error [524]`.
|
||||||
|
|
||||||
|
Пример:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
ALTER TABLE visits RENAME COLUMN webBrowser TO browser
|
||||||
|
```
|
||||||
|
|
||||||
## CLEAR COLUMN {#alter_clear-column}
|
## CLEAR COLUMN {#alter_clear-column}
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
@ -109,7 +136,7 @@ ALTER TABLE visits COMMENT COLUMN browser 'Столбец показывает,
|
|||||||
## MODIFY COLUMN {#alter_modify-column}
|
## MODIFY COLUMN {#alter_modify-column}
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
MODIFY COLUMN [IF EXISTS] name [type] [default_expr] [TTL] [AFTER name_after | FIRST]
|
MODIFY COLUMN [IF EXISTS] name [type] [default_expr] [codec] [TTL] [AFTER name_after | FIRST]
|
||||||
```
|
```
|
||||||
|
|
||||||
Запрос изменяет следующие свойства столбца `name`:
|
Запрос изменяет следующие свойства столбца `name`:
|
||||||
@ -118,11 +145,15 @@ MODIFY COLUMN [IF EXISTS] name [type] [default_expr] [TTL] [AFTER name_after | F
|
|||||||
|
|
||||||
- Значение по умолчанию
|
- Значение по умолчанию
|
||||||
|
|
||||||
|
- Кодеки сжатия
|
||||||
|
|
||||||
- TTL
|
- TTL
|
||||||
|
|
||||||
Примеры изменения TTL столбца смотрите в разделе [TTL столбца](../../../engines/table-engines/mergetree-family/mergetree.md#mergetree-column-ttl).
|
Примеры изменения кодеков сжатия смотрите в разделе [Кодеки сжатия столбцов](../create/table.md#codecs).
|
||||||
|
|
||||||
Если указано `IF EXISTS`, запрос не возвращает ошибку, если столбца не существует.
|
Примеры изменения TTL столбца смотрите в разделе [TTL столбца](../../../engines/table-engines/mergetree-family/mergetree.md#mergetree-column-ttl).
|
||||||
|
|
||||||
|
Если указано `IF EXISTS`, запрос не возвращает ошибку при условии, что столбец не существует.
|
||||||
|
|
||||||
Запрос также может изменять порядок столбцов при помощи `FIRST | AFTER`, смотрите описание [ADD COLUMN](#alter_add-column).
|
Запрос также может изменять порядок столбцов при помощи `FIRST | AFTER`, смотрите описание [ADD COLUMN](#alter_add-column).
|
||||||
|
|
||||||
@ -162,22 +193,6 @@ ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL;
|
|||||||
|
|
||||||
- [REMOVE TTL](ttl.md).
|
- [REMOVE TTL](ttl.md).
|
||||||
|
|
||||||
## RENAME COLUMN {#alter_rename-column}
|
|
||||||
|
|
||||||
Переименовывает существующий столбец.
|
|
||||||
|
|
||||||
Синтаксис:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
ALTER TABLE table_name RENAME COLUMN column_name TO new_column_name
|
|
||||||
```
|
|
||||||
|
|
||||||
**Пример**
|
|
||||||
|
|
||||||
```sql
|
|
||||||
ALTER TABLE table_with_ttl RENAME COLUMN column_ttl TO column_ttl_new;
|
|
||||||
```
|
|
||||||
|
|
||||||
## Ограничения запроса ALTER {#ogranicheniia-zaprosa-alter}
|
## Ограничения запроса ALTER {#ogranicheniia-zaprosa-alter}
|
||||||
|
|
||||||
Запрос `ALTER` позволяет создавать и удалять отдельные элементы (столбцы) вложенных структур данных, но не вложенные структуры данных целиком. Для добавления вложенной структуры данных, вы можете добавить столбцы с именем вида `name.nested_name` и типом `Array(T)` - вложенная структура данных полностью эквивалентна нескольким столбцам-массивам с именем, имеющим одинаковый префикс до точки.
|
Запрос `ALTER` позволяет создавать и удалять отдельные элементы (столбцы) вложенных структур данных, но не вложенные структуры данных целиком. Для добавления вложенной структуры данных, вы можете добавить столбцы с именем вида `name.nested_name` и типом `Array(T)` - вложенная структура данных полностью эквивалентна нескольким столбцам-массивам с именем, имеющим одинаковый префикс до точки.
|
||||||
@ -186,7 +201,6 @@ ALTER TABLE table_with_ttl RENAME COLUMN column_ttl TO column_ttl_new;
|
|||||||
|
|
||||||
Если возможностей запроса `ALTER` не хватает для нужного изменения таблицы, вы можете создать новую таблицу, скопировать туда данные с помощью запроса [INSERT SELECT](../insert-into.md#insert_query_insert-select), затем поменять таблицы местами с помощью запроса [RENAME](../misc.md#misc_operations-rename), и удалить старую таблицу. В качестве альтернативы для запроса `INSERT SELECT`, можно использовать инструмент [clickhouse-copier](../../../sql-reference/statements/alter/index.md).
|
Если возможностей запроса `ALTER` не хватает для нужного изменения таблицы, вы можете создать новую таблицу, скопировать туда данные с помощью запроса [INSERT SELECT](../insert-into.md#insert_query_insert-select), затем поменять таблицы местами с помощью запроса [RENAME](../misc.md#misc_operations-rename), и удалить старую таблицу. В качестве альтернативы для запроса `INSERT SELECT`, можно использовать инструмент [clickhouse-copier](../../../sql-reference/statements/alter/index.md).
|
||||||
|
|
||||||
Запрос `ALTER` блокирует все чтения и записи для таблицы. То есть, если на момент запроса `ALTER`, выполнялся долгий `SELECT`, то запрос `ALTER` сначала дождётся его выполнения. И в это время, все новые запросы к той же таблице, будут ждать, пока завершится этот `ALTER`.
|
Запрос `ALTER` блокирует все чтения и записи для таблицы. То есть если на момент запроса `ALTER` выполнялся долгий `SELECT`, то запрос `ALTER` сначала дождётся его выполнения. И в это время все новые запросы к той же таблице будут ждать, пока завершится этот `ALTER`.
|
||||||
|
|
||||||
Для таблиц, которые не хранят данные самостоятельно (типа [Merge](../../../sql-reference/statements/alter/index.md) и [Distributed](../../../sql-reference/statements/alter/index.md)), `ALTER` всего лишь меняет структуру таблицы, но не меняет структуру подчинённых таблиц. Для примера, при ALTER-е таблицы типа `Distributed`, вам также потребуется выполнить запрос `ALTER` для таблиц на всех удалённых серверах.
|
Для таблиц, которые не хранят данные самостоятельно (типа [Merge](../../../sql-reference/statements/alter/index.md) и [Distributed](../../../sql-reference/statements/alter/index.md)), `ALTER` всего лишь меняет структуру таблицы, но не меняет структуру подчинённых таблиц. Для примера, при ALTER-е таблицы типа `Distributed`, вам также потребуется выполнить запрос `ALTER` для таблиц на всех удалённых серверах.
|
||||||
|
|
||||||
|
60
docs/ru/sql-reference/statements/alter/setting.md
Normal file
60
docs/ru/sql-reference/statements/alter/setting.md
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 38
|
||||||
|
toc_title: SETTING
|
||||||
|
---
|
||||||
|
|
||||||
|
# Изменение настроек таблицы {#table_settings_manipulations}
|
||||||
|
|
||||||
|
Существуют запросы, которые изменяют настройки таблицы или сбрасывают их в значения по умолчанию. В одном запросе можно изменить сразу несколько настроек.
|
||||||
|
Если настройка с указанным именем не существует, то генерируется исключение.
|
||||||
|
|
||||||
|
**Синтаксис**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
ALTER TABLE [db].name [ON CLUSTER cluster] MODIFY|RESET SETTING ...
|
||||||
|
```
|
||||||
|
|
||||||
|
!!! note "Примечание"
|
||||||
|
Эти запросы могут применяться только к таблицам на движке [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md).
|
||||||
|
|
||||||
|
|
||||||
|
## MODIFY SETTING {#alter_modify_setting}
|
||||||
|
|
||||||
|
Изменяет настройки таблицы.
|
||||||
|
|
||||||
|
**Синтаксис**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
MODIFY SETTING setting_name=value [, ...]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Пример**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE example_table (id UInt32, data String) ENGINE=MergeTree() ORDER BY id;
|
||||||
|
|
||||||
|
ALTER TABLE example_table MODIFY SETTING max_part_loading_threads=8, max_parts_in_total=50000;
|
||||||
|
```
|
||||||
|
|
||||||
|
## RESET SETTING {#alter_reset_setting}
|
||||||
|
|
||||||
|
Сбрасывает настройки таблицы в значения по умолчанию. Если настройка уже находится в состоянии по умолчанию, то никакие действия не выполняются.
|
||||||
|
|
||||||
|
**Синтаксис**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
RESET SETTING setting_name [, ...]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Пример**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE example_table (id UInt32, data String) ENGINE=MergeTree() ORDER BY id
|
||||||
|
SETTINGS max_part_loading_threads=8;
|
||||||
|
|
||||||
|
ALTER TABLE example_table RESET SETTING max_part_loading_threads;
|
||||||
|
```
|
||||||
|
|
||||||
|
**Смотрите также**
|
||||||
|
|
||||||
|
- [Настройки MergeTree таблиц](../../../operations/settings/merge-tree-settings.md)
|
@ -62,7 +62,7 @@ CREATE TABLE example (
|
|||||||
materialized_value UInt32 MATERIALIZED 12345,
|
materialized_value UInt32 MATERIALIZED 12345,
|
||||||
aliased_value UInt32 ALIAS 2,
|
aliased_value UInt32 ALIAS 2,
|
||||||
PRIMARY KEY primary_key
|
PRIMARY KEY primary_key
|
||||||
) ENGINE=MergeTree
|
) ENGINE=MergeTree
|
||||||
PARTITION BY partition_key
|
PARTITION BY partition_key
|
||||||
ORDER BY (primary_key, secondary_key);
|
ORDER BY (primary_key, secondary_key);
|
||||||
```
|
```
|
||||||
|
@ -43,7 +43,7 @@ Yandex**没有**维护下面列出的库,也没有做过任何广泛的测试
|
|||||||
- Monitoring
|
- Monitoring
|
||||||
- [Graphite](https://graphiteapp.org)
|
- [Graphite](https://graphiteapp.org)
|
||||||
- [graphouse](https://github.com/yandex/graphouse)
|
- [graphouse](https://github.com/yandex/graphouse)
|
||||||
- [carbon-clickhouse](https://github.com/lomik/carbon-clickhouse) +
|
- [carbon-clickhouse](https://github.com/lomik/carbon-clickhouse)
|
||||||
- [graphite-clickhouse](https://github.com/lomik/graphite-clickhouse)
|
- [graphite-clickhouse](https://github.com/lomik/graphite-clickhouse)
|
||||||
- [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer) - optimizes staled partitions in [\*GraphiteMergeTree](../../engines/table-engines/mergetree-family/graphitemergetree.md#graphitemergetree) if rules from [rollup configuration](../../engines/table-engines/mergetree-family/graphitemergetree.md#rollup-configuration) could be applied
|
- [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer) - optimizes staled partitions in [\*GraphiteMergeTree](../../engines/table-engines/mergetree-family/graphitemergetree.md#graphitemergetree) if rules from [rollup configuration](../../engines/table-engines/mergetree-family/graphitemergetree.md#rollup-configuration) could be applied
|
||||||
- [Grafana](https://grafana.com/)
|
- [Grafana](https://grafana.com/)
|
||||||
|
@ -6,4 +6,3 @@ toc_priority: 104
|
|||||||
|
|
||||||
选择遇到的最后一个值。
|
选择遇到的最后一个值。
|
||||||
其结果和[any](../../../sql-reference/aggregate-functions/reference/any.md) 函数一样是不确定的 。
|
其结果和[any](../../../sql-reference/aggregate-functions/reference/any.md) 函数一样是不确定的 。
|
||||||
|
|
@ -11,7 +11,6 @@ set (CLICKHOUSE_COPIER_LINK
|
|||||||
clickhouse_functions
|
clickhouse_functions
|
||||||
clickhouse_table_functions
|
clickhouse_table_functions
|
||||||
clickhouse_aggregate_functions
|
clickhouse_aggregate_functions
|
||||||
clickhouse_dictionaries
|
|
||||||
string_utils
|
string_utils
|
||||||
|
|
||||||
PUBLIC
|
PUBLIC
|
||||||
|
@ -33,7 +33,7 @@ static std::string extractFromConfig(
|
|||||||
{
|
{
|
||||||
DB::ConfigurationPtr bootstrap_configuration(new Poco::Util::XMLConfiguration(config_xml));
|
DB::ConfigurationPtr bootstrap_configuration(new Poco::Util::XMLConfiguration(config_xml));
|
||||||
zkutil::ZooKeeperPtr zookeeper = std::make_shared<zkutil::ZooKeeper>(
|
zkutil::ZooKeeperPtr zookeeper = std::make_shared<zkutil::ZooKeeper>(
|
||||||
*bootstrap_configuration, "zookeeper");
|
*bootstrap_configuration, "zookeeper", nullptr);
|
||||||
zkutil::ZooKeeperNodeCache zk_node_cache([&] { return zookeeper; });
|
zkutil::ZooKeeperNodeCache zk_node_cache([&] { return zookeeper; });
|
||||||
config_xml = processor.processConfig(&has_zk_includes, &zk_node_cache);
|
config_xml = processor.processConfig(&has_zk_includes, &zk_node_cache);
|
||||||
}
|
}
|
||||||
|
@ -181,15 +181,10 @@ void LibraryRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServe
|
|||||||
}
|
}
|
||||||
else if (method == "loadIds")
|
else if (method == "loadIds")
|
||||||
{
|
{
|
||||||
params.read(request.getStream());
|
String ids_string;
|
||||||
|
readString(ids_string, request.getStream());
|
||||||
|
std::vector<uint64_t> ids = parseIdsFromBinary(ids_string);
|
||||||
|
|
||||||
if (!params.has("ids"))
|
|
||||||
{
|
|
||||||
processError(response, "No 'ids' in request URL");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<uint64_t> ids = parseIdsFromBinary(params.get("ids"));
|
|
||||||
auto library_handler = SharedLibraryHandlerFactory::instance().get(dictionary_id);
|
auto library_handler = SharedLibraryHandlerFactory::instance().get(dictionary_id);
|
||||||
const auto & sample_block = library_handler->getSampleBlock();
|
const auto & sample_block = library_handler->getSampleBlock();
|
||||||
auto input = library_handler->loadIds(ids);
|
auto input = library_handler->loadIds(ids);
|
||||||
|
@ -6,6 +6,7 @@ namespace DB
|
|||||||
|
|
||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
{
|
{
|
||||||
|
extern const int BAD_ARGUMENTS;
|
||||||
extern const int LOGICAL_ERROR;
|
extern const int LOGICAL_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -17,7 +18,7 @@ SharedLibraryHandlerPtr SharedLibraryHandlerFactory::get(const std::string & dic
|
|||||||
if (library_handler != library_handlers.end())
|
if (library_handler != library_handlers.end())
|
||||||
return library_handler->second;
|
return library_handler->second;
|
||||||
|
|
||||||
return nullptr;
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Not found dictionary with id: {}", dictionary_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -6,7 +6,6 @@ set (CLICKHOUSE_LOCAL_LINK
|
|||||||
clickhouse_aggregate_functions
|
clickhouse_aggregate_functions
|
||||||
clickhouse_common_config
|
clickhouse_common_config
|
||||||
clickhouse_common_io
|
clickhouse_common_io
|
||||||
clickhouse_dictionaries
|
|
||||||
clickhouse_functions
|
clickhouse_functions
|
||||||
clickhouse_parsers
|
clickhouse_parsers
|
||||||
clickhouse_storages_system
|
clickhouse_storages_system
|
||||||
|
@ -322,7 +322,7 @@ struct Checker
|
|||||||
{
|
{
|
||||||
checkRequiredInstructions();
|
checkRequiredInstructions();
|
||||||
}
|
}
|
||||||
} checker;
|
} checker __attribute__((init_priority(101))); /// Run before other static initializers.
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -13,7 +13,6 @@ set (CLICKHOUSE_SERVER_LINK
|
|||||||
clickhouse_common_config
|
clickhouse_common_config
|
||||||
clickhouse_common_io
|
clickhouse_common_io
|
||||||
clickhouse_common_zookeeper
|
clickhouse_common_zookeeper
|
||||||
clickhouse_dictionaries
|
|
||||||
clickhouse_functions
|
clickhouse_functions
|
||||||
clickhouse_parsers
|
clickhouse_parsers
|
||||||
clickhouse_storages_system
|
clickhouse_storages_system
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
#include <Common/DNSResolver.h>
|
#include <Common/DNSResolver.h>
|
||||||
#include <Common/CurrentMetrics.h>
|
#include <Common/CurrentMetrics.h>
|
||||||
#include <Common/Macros.h>
|
#include <Common/Macros.h>
|
||||||
|
#include <Common/ShellCommand.h>
|
||||||
#include <Common/StringUtils/StringUtils.h>
|
#include <Common/StringUtils/StringUtils.h>
|
||||||
#include <Common/ZooKeeper/ZooKeeper.h>
|
#include <Common/ZooKeeper/ZooKeeper.h>
|
||||||
#include <Common/ZooKeeper/ZooKeeperNodeCache.h>
|
#include <Common/ZooKeeper/ZooKeeperNodeCache.h>
|
||||||
@ -39,6 +40,7 @@
|
|||||||
#include <Common/remapExecutable.h>
|
#include <Common/remapExecutable.h>
|
||||||
#include <Common/TLDListsHolder.h>
|
#include <Common/TLDListsHolder.h>
|
||||||
#include <IO/HTTPCommon.h>
|
#include <IO/HTTPCommon.h>
|
||||||
|
#include <IO/ReadHelpers.h>
|
||||||
#include <IO/UseSSL.h>
|
#include <IO/UseSSL.h>
|
||||||
#include <Interpreters/AsynchronousMetrics.h>
|
#include <Interpreters/AsynchronousMetrics.h>
|
||||||
#include <Interpreters/DDLWorker.h>
|
#include <Interpreters/DDLWorker.h>
|
||||||
@ -95,6 +97,9 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if USE_SSL
|
#if USE_SSL
|
||||||
|
# if USE_INTERNAL_SSL_LIBRARY
|
||||||
|
# include <Compression/CompressionCodecEncrypted.h>
|
||||||
|
# endif
|
||||||
# include <Poco/Net/Context.h>
|
# include <Poco/Net/Context.h>
|
||||||
# include <Poco/Net/SecureServerSocket.h>
|
# include <Poco/Net/SecureServerSocket.h>
|
||||||
#endif
|
#endif
|
||||||
@ -107,6 +112,10 @@
|
|||||||
# include <Server/KeeperTCPHandlerFactory.h>
|
# include <Server/KeeperTCPHandlerFactory.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if USE_BASE64
|
||||||
|
# include <turbob64.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
#if USE_JEMALLOC
|
#if USE_JEMALLOC
|
||||||
# include <jemalloc/jemalloc.h>
|
# include <jemalloc/jemalloc.h>
|
||||||
#endif
|
#endif
|
||||||
@ -242,6 +251,7 @@ namespace ErrorCodes
|
|||||||
extern const int SUPPORT_IS_DISABLED;
|
extern const int SUPPORT_IS_DISABLED;
|
||||||
extern const int ARGUMENT_OUT_OF_BOUND;
|
extern const int ARGUMENT_OUT_OF_BOUND;
|
||||||
extern const int EXCESSIVE_ELEMENT_IN_CONFIG;
|
extern const int EXCESSIVE_ELEMENT_IN_CONFIG;
|
||||||
|
extern const int INCORRECT_DATA;
|
||||||
extern const int INVALID_CONFIG_PARAMETER;
|
extern const int INVALID_CONFIG_PARAMETER;
|
||||||
extern const int SYSTEM_ERROR;
|
extern const int SYSTEM_ERROR;
|
||||||
extern const int FAILED_TO_GETPWUID;
|
extern const int FAILED_TO_GETPWUID;
|
||||||
@ -445,6 +455,39 @@ void checkForUsersNotInMainConfig(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void loadEncryptionKey(const std::string & key_command [[maybe_unused]], Poco::Logger * log)
|
||||||
|
{
|
||||||
|
#if USE_BASE64 && USE_SSL && USE_INTERNAL_SSL_LIBRARY
|
||||||
|
|
||||||
|
auto process = ShellCommand::execute(key_command);
|
||||||
|
|
||||||
|
std::string b64_key;
|
||||||
|
readStringUntilEOF(b64_key, process->out);
|
||||||
|
process->wait();
|
||||||
|
|
||||||
|
// turbob64 doesn't like whitespace characters in input. Strip
|
||||||
|
// them before decoding.
|
||||||
|
std::erase_if(b64_key, [](char c)
|
||||||
|
{
|
||||||
|
return c == ' ' || c == '\t' || c == '\r' || c == '\n';
|
||||||
|
});
|
||||||
|
|
||||||
|
std::vector<char> buf(b64_key.size());
|
||||||
|
const size_t key_size = tb64dec(reinterpret_cast<const unsigned char *>(b64_key.data()), b64_key.size(),
|
||||||
|
reinterpret_cast<unsigned char *>(buf.data()));
|
||||||
|
if (!key_size)
|
||||||
|
throw Exception("Failed to decode encryption key", ErrorCodes::INCORRECT_DATA);
|
||||||
|
else if (key_size < 16)
|
||||||
|
LOG_WARNING(log, "The encryption key should be at least 16 octets long.");
|
||||||
|
|
||||||
|
const std::string_view key = std::string_view(buf.data(), key_size);
|
||||||
|
CompressionCodecEncrypted::setMasterKey(key);
|
||||||
|
|
||||||
|
#else
|
||||||
|
LOG_WARNING(log, "Server was built without Base64 or SSL support. Encryption is disabled.");
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
[[noreturn]] void forceShutdown()
|
[[noreturn]] void forceShutdown()
|
||||||
{
|
{
|
||||||
@ -916,6 +959,10 @@ if (ThreadFuzzer::instance().isEffective())
|
|||||||
global_context->getMergeTreeSettings().sanityCheck(settings);
|
global_context->getMergeTreeSettings().sanityCheck(settings);
|
||||||
global_context->getReplicatedMergeTreeSettings().sanityCheck(settings);
|
global_context->getReplicatedMergeTreeSettings().sanityCheck(settings);
|
||||||
|
|
||||||
|
/// Set up encryption.
|
||||||
|
if (config().has("encryption.key_command"))
|
||||||
|
loadEncryptionKey(config().getString("encryption.key_command"), log);
|
||||||
|
|
||||||
Poco::Timespan keep_alive_timeout(config().getUInt("keep_alive_timeout", 10), 0);
|
Poco::Timespan keep_alive_timeout(config().getUInt("keep_alive_timeout", 10), 0);
|
||||||
|
|
||||||
Poco::ThreadPool server_pool(3, config().getUInt("max_connections", 1024));
|
Poco::ThreadPool server_pool(3, config().getUInt("max_connections", 1024));
|
||||||
@ -1047,6 +1094,7 @@ if (ThreadFuzzer::instance().isEffective())
|
|||||||
loadMetadataSystem(global_context);
|
loadMetadataSystem(global_context);
|
||||||
/// After attaching system databases we can initialize system log.
|
/// After attaching system databases we can initialize system log.
|
||||||
global_context->initializeSystemLogs();
|
global_context->initializeSystemLogs();
|
||||||
|
global_context->setSystemZooKeeperLogAfterInitializationIfNeeded();
|
||||||
auto & database_catalog = DatabaseCatalog::instance();
|
auto & database_catalog = DatabaseCatalog::instance();
|
||||||
/// After the system database is created, attach virtual system tables (in addition to query_log and part_log)
|
/// After the system database is created, attach virtual system tables (in addition to query_log and part_log)
|
||||||
attachSystemTablesServer(*database_catalog.getSystemDatabase(), has_zookeeper);
|
attachSystemTablesServer(*database_catalog.getSystemDatabase(), has_zookeeper);
|
||||||
|
@ -1002,6 +1002,16 @@
|
|||||||
</compression>
|
</compression>
|
||||||
-->
|
-->
|
||||||
|
|
||||||
|
<!-- Configuration of encryption. The server executes a command to
|
||||||
|
obtain an encryption key at startup if such a command is
|
||||||
|
defined, or encryption codecs will be disabled otherwise. The
|
||||||
|
command is executed through /bin/sh and is expected to write
|
||||||
|
a Base64-encoded key to the stdout. -->
|
||||||
|
<encryption>
|
||||||
|
<!-- <key_command>/usr/bin/systemd-ask-password --id="clickhouse-server" --timeout=0 "Enter the ClickHouse encryption passphrase:" | base64</key_command> -->
|
||||||
|
<!-- <key_command><![CDATA[IFS=; echo -n >/dev/tty "Enter the ClickHouse encryption passphrase: "; stty=`stty -F /dev/tty -g`; stty -F /dev/tty -echo; read k </dev/tty; stty -F /dev/tty "$stty"; echo -n $k | base64]]></key_command> -->
|
||||||
|
</encryption>
|
||||||
|
|
||||||
<!-- Allow to execute distributed DDL queries (CREATE, DROP, ALTER, RENAME) on cluster.
|
<!-- Allow to execute distributed DDL queries (CREATE, DROP, ALTER, RENAME) on cluster.
|
||||||
Works only if ZooKeeper is enabled. Comment it if such functionality isn't required. -->
|
Works only if ZooKeeper is enabled. Comment it if such functionality isn't required. -->
|
||||||
<distributed_ddl>
|
<distributed_ddl>
|
||||||
@ -1156,4 +1166,27 @@
|
|||||||
|
|
||||||
<!-- Uncomment to disable ClickHouse internal DNS caching. -->
|
<!-- Uncomment to disable ClickHouse internal DNS caching. -->
|
||||||
<!-- <disable_internal_dns_cache>1</disable_internal_dns_cache> -->
|
<!-- <disable_internal_dns_cache>1</disable_internal_dns_cache> -->
|
||||||
|
|
||||||
|
<!-- You can also configure rocksdb like this: -->
|
||||||
|
<!--
|
||||||
|
<rocksdb>
|
||||||
|
<options>
|
||||||
|
<max_background_jobs>8</max_background_jobs>
|
||||||
|
</options>
|
||||||
|
<column_family_options>
|
||||||
|
<num_levels>2</num_levels>
|
||||||
|
</column_family_options>
|
||||||
|
<tables>
|
||||||
|
<table>
|
||||||
|
<name>TABLE</name>
|
||||||
|
<options>
|
||||||
|
<max_background_jobs>8</max_background_jobs>
|
||||||
|
</options>
|
||||||
|
<column_family_options>
|
||||||
|
<num_levels>2</num_levels>
|
||||||
|
</column_family_options>
|
||||||
|
</table>
|
||||||
|
</tables>
|
||||||
|
</rocksdb>
|
||||||
|
-->
|
||||||
</yandex>
|
</yandex>
|
||||||
|
@ -43,6 +43,7 @@ SRCS(
|
|||||||
SettingsProfile.cpp
|
SettingsProfile.cpp
|
||||||
SettingsProfileElement.cpp
|
SettingsProfileElement.cpp
|
||||||
SettingsProfilesCache.cpp
|
SettingsProfilesCache.cpp
|
||||||
|
SettingsProfilesInfo.cpp
|
||||||
User.cpp
|
User.cpp
|
||||||
UsersConfigAccessStorage.cpp
|
UsersConfigAccessStorage.cpp
|
||||||
tests/gtest_access_rights_ops.cpp
|
tests/gtest_access_rights_ops.cpp
|
||||||
|
@ -43,9 +43,9 @@ public:
|
|||||||
const AggregateFunctionPtr & nested_function,
|
const AggregateFunctionPtr & nested_function,
|
||||||
const AggregateFunctionProperties &,
|
const AggregateFunctionProperties &,
|
||||||
const DataTypes & arguments,
|
const DataTypes & arguments,
|
||||||
const Array &) const override
|
const Array & params) const override
|
||||||
{
|
{
|
||||||
return std::make_shared<AggregateFunctionArray>(nested_function, arguments);
|
return std::make_shared<AggregateFunctionArray>(nested_function, arguments, params);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -29,10 +29,11 @@ private:
|
|||||||
size_t num_arguments;
|
size_t num_arguments;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
AggregateFunctionArray(AggregateFunctionPtr nested_, const DataTypes & arguments)
|
AggregateFunctionArray(AggregateFunctionPtr nested_, const DataTypes & arguments, const Array & params_)
|
||||||
: IAggregateFunctionHelper<AggregateFunctionArray>(arguments, {})
|
: IAggregateFunctionHelper<AggregateFunctionArray>(arguments, params_)
|
||||||
, nested_func(nested_), num_arguments(arguments.size())
|
, nested_func(nested_), num_arguments(arguments.size())
|
||||||
{
|
{
|
||||||
|
assert(parameters == nested_func->getParameters());
|
||||||
for (const auto & type : arguments)
|
for (const auto & type : arguments)
|
||||||
if (!isArray(type))
|
if (!isArray(type))
|
||||||
throw Exception("All arguments for aggregate function " + getName() + " must be arrays", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
throw Exception("All arguments for aggregate function " + getName() + " must be arrays", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
|
@ -34,14 +34,14 @@ public:
|
|||||||
const AggregateFunctionPtr & nested_function,
|
const AggregateFunctionPtr & nested_function,
|
||||||
const AggregateFunctionProperties &,
|
const AggregateFunctionProperties &,
|
||||||
const DataTypes & arguments,
|
const DataTypes & arguments,
|
||||||
const Array &) const override
|
const Array & params) const override
|
||||||
{
|
{
|
||||||
AggregateFunctionPtr res;
|
AggregateFunctionPtr res;
|
||||||
if (arguments.size() == 1)
|
if (arguments.size() == 1)
|
||||||
{
|
{
|
||||||
res.reset(createWithNumericType<
|
res.reset(createWithNumericType<
|
||||||
AggregateFunctionDistinct,
|
AggregateFunctionDistinct,
|
||||||
AggregateFunctionDistinctSingleNumericData>(*arguments[0], nested_function, arguments));
|
AggregateFunctionDistinctSingleNumericData>(*arguments[0], nested_function, arguments, params));
|
||||||
|
|
||||||
if (res)
|
if (res)
|
||||||
return res;
|
return res;
|
||||||
@ -49,14 +49,14 @@ public:
|
|||||||
if (arguments[0]->isValueUnambiguouslyRepresentedInContiguousMemoryRegion())
|
if (arguments[0]->isValueUnambiguouslyRepresentedInContiguousMemoryRegion())
|
||||||
return std::make_shared<
|
return std::make_shared<
|
||||||
AggregateFunctionDistinct<
|
AggregateFunctionDistinct<
|
||||||
AggregateFunctionDistinctSingleGenericData<true>>>(nested_function, arguments);
|
AggregateFunctionDistinctSingleGenericData<true>>>(nested_function, arguments, params);
|
||||||
else
|
else
|
||||||
return std::make_shared<
|
return std::make_shared<
|
||||||
AggregateFunctionDistinct<
|
AggregateFunctionDistinct<
|
||||||
AggregateFunctionDistinctSingleGenericData<false>>>(nested_function, arguments);
|
AggregateFunctionDistinctSingleGenericData<false>>>(nested_function, arguments, params);
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::make_shared<AggregateFunctionDistinct<AggregateFunctionDistinctMultipleGenericData>>(nested_function, arguments);
|
return std::make_shared<AggregateFunctionDistinct<AggregateFunctionDistinctMultipleGenericData>>(nested_function, arguments, params);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -167,8 +167,8 @@ private:
|
|||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
AggregateFunctionDistinct(AggregateFunctionPtr nested_func_, const DataTypes & arguments)
|
AggregateFunctionDistinct(AggregateFunctionPtr nested_func_, const DataTypes & arguments, const Array & params_)
|
||||||
: IAggregateFunctionDataHelper<Data, AggregateFunctionDistinct>(arguments, nested_func_->getParameters())
|
: IAggregateFunctionDataHelper<Data, AggregateFunctionDistinct>(arguments, params_)
|
||||||
, nested_func(nested_func_)
|
, nested_func(nested_func_)
|
||||||
, arguments_num(arguments.size()) {}
|
, arguments_num(arguments.size()) {}
|
||||||
|
|
||||||
|
@ -38,9 +38,9 @@ public:
|
|||||||
const AggregateFunctionPtr & nested_function,
|
const AggregateFunctionPtr & nested_function,
|
||||||
const AggregateFunctionProperties &,
|
const AggregateFunctionProperties &,
|
||||||
const DataTypes & arguments,
|
const DataTypes & arguments,
|
||||||
const Array &) const override
|
const Array & params) const override
|
||||||
{
|
{
|
||||||
return std::make_shared<AggregateFunctionForEach>(nested_function, arguments);
|
return std::make_shared<AggregateFunctionForEach>(nested_function, arguments, params);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -105,8 +105,8 @@ private:
|
|||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
AggregateFunctionForEach(AggregateFunctionPtr nested_, const DataTypes & arguments)
|
AggregateFunctionForEach(AggregateFunctionPtr nested_, const DataTypes & arguments, const Array & params_)
|
||||||
: IAggregateFunctionDataHelper<AggregateFunctionForEachData, AggregateFunctionForEach>(arguments, {})
|
: IAggregateFunctionDataHelper<AggregateFunctionForEachData, AggregateFunctionForEach>(arguments, params_)
|
||||||
, nested_func(nested_), num_arguments(arguments.size())
|
, nested_func(nested_), num_arguments(arguments.size())
|
||||||
{
|
{
|
||||||
nested_size_of_data = nested_func->sizeOfData();
|
nested_size_of_data = nested_func->sizeOfData();
|
||||||
|
@ -25,8 +25,8 @@ template <typename HasLimit>
|
|||||||
class AggregateFunctionGroupUniqArrayDate : public AggregateFunctionGroupUniqArray<DataTypeDate::FieldType, HasLimit>
|
class AggregateFunctionGroupUniqArrayDate : public AggregateFunctionGroupUniqArray<DataTypeDate::FieldType, HasLimit>
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
explicit AggregateFunctionGroupUniqArrayDate(const DataTypePtr & argument_type, UInt64 max_elems_ = std::numeric_limits<UInt64>::max())
|
explicit AggregateFunctionGroupUniqArrayDate(const DataTypePtr & argument_type, const Array & parameters_, UInt64 max_elems_ = std::numeric_limits<UInt64>::max())
|
||||||
: AggregateFunctionGroupUniqArray<DataTypeDate::FieldType, HasLimit>(argument_type, max_elems_) {}
|
: AggregateFunctionGroupUniqArray<DataTypeDate::FieldType, HasLimit>(argument_type, parameters_, max_elems_) {}
|
||||||
DataTypePtr getReturnType() const override { return std::make_shared<DataTypeArray>(std::make_shared<DataTypeDate>()); }
|
DataTypePtr getReturnType() const override { return std::make_shared<DataTypeArray>(std::make_shared<DataTypeDate>()); }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -34,8 +34,8 @@ template <typename HasLimit>
|
|||||||
class AggregateFunctionGroupUniqArrayDateTime : public AggregateFunctionGroupUniqArray<DataTypeDateTime::FieldType, HasLimit>
|
class AggregateFunctionGroupUniqArrayDateTime : public AggregateFunctionGroupUniqArray<DataTypeDateTime::FieldType, HasLimit>
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
explicit AggregateFunctionGroupUniqArrayDateTime(const DataTypePtr & argument_type, UInt64 max_elems_ = std::numeric_limits<UInt64>::max())
|
explicit AggregateFunctionGroupUniqArrayDateTime(const DataTypePtr & argument_type, const Array & parameters_, UInt64 max_elems_ = std::numeric_limits<UInt64>::max())
|
||||||
: AggregateFunctionGroupUniqArray<DataTypeDateTime::FieldType, HasLimit>(argument_type, max_elems_) {}
|
: AggregateFunctionGroupUniqArray<DataTypeDateTime::FieldType, HasLimit>(argument_type, parameters_, max_elems_) {}
|
||||||
DataTypePtr getReturnType() const override { return std::make_shared<DataTypeArray>(std::make_shared<DataTypeDateTime>()); }
|
DataTypePtr getReturnType() const override { return std::make_shared<DataTypeArray>(std::make_shared<DataTypeDateTime>()); }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -102,9 +102,9 @@ AggregateFunctionPtr createAggregateFunctionGroupUniqArray(
|
|||||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||||
|
|
||||||
if (!limit_size)
|
if (!limit_size)
|
||||||
return createAggregateFunctionGroupUniqArrayImpl<std::false_type>(name, argument_types[0]);
|
return createAggregateFunctionGroupUniqArrayImpl<std::false_type>(name, argument_types[0], parameters);
|
||||||
else
|
else
|
||||||
return createAggregateFunctionGroupUniqArrayImpl<std::true_type>(name, argument_types[0], max_elems);
|
return createAggregateFunctionGroupUniqArrayImpl<std::true_type>(name, argument_types[0], parameters, max_elems);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -48,9 +48,9 @@ private:
|
|||||||
using State = AggregateFunctionGroupUniqArrayData<T>;
|
using State = AggregateFunctionGroupUniqArrayData<T>;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
AggregateFunctionGroupUniqArray(const DataTypePtr & argument_type, UInt64 max_elems_ = std::numeric_limits<UInt64>::max())
|
AggregateFunctionGroupUniqArray(const DataTypePtr & argument_type, const Array & parameters_, UInt64 max_elems_ = std::numeric_limits<UInt64>::max())
|
||||||
: IAggregateFunctionDataHelper<AggregateFunctionGroupUniqArrayData<T>,
|
: IAggregateFunctionDataHelper<AggregateFunctionGroupUniqArrayData<T>,
|
||||||
AggregateFunctionGroupUniqArray<T, Tlimit_num_elem>>({argument_type}, {}),
|
AggregateFunctionGroupUniqArray<T, Tlimit_num_elem>>({argument_type}, parameters_),
|
||||||
max_elems(max_elems_) {}
|
max_elems(max_elems_) {}
|
||||||
|
|
||||||
String getName() const override { return "groupUniqArray"; }
|
String getName() const override { return "groupUniqArray"; }
|
||||||
@ -152,8 +152,8 @@ class AggregateFunctionGroupUniqArrayGeneric
|
|||||||
using State = AggregateFunctionGroupUniqArrayGenericData;
|
using State = AggregateFunctionGroupUniqArrayGenericData;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
AggregateFunctionGroupUniqArrayGeneric(const DataTypePtr & input_data_type_, UInt64 max_elems_ = std::numeric_limits<UInt64>::max())
|
AggregateFunctionGroupUniqArrayGeneric(const DataTypePtr & input_data_type_, const Array & parameters_, UInt64 max_elems_ = std::numeric_limits<UInt64>::max())
|
||||||
: IAggregateFunctionDataHelper<AggregateFunctionGroupUniqArrayGenericData, AggregateFunctionGroupUniqArrayGeneric<is_plain_column, Tlimit_num_elem>>({input_data_type_}, {})
|
: IAggregateFunctionDataHelper<AggregateFunctionGroupUniqArrayGenericData, AggregateFunctionGroupUniqArrayGeneric<is_plain_column, Tlimit_num_elem>>({input_data_type_}, parameters_)
|
||||||
, input_data_type(this->argument_types[0])
|
, input_data_type(this->argument_types[0])
|
||||||
, max_elems(max_elems_) {}
|
, max_elems(max_elems_) {}
|
||||||
|
|
||||||
|
@ -35,9 +35,9 @@ public:
|
|||||||
const AggregateFunctionPtr & nested_function,
|
const AggregateFunctionPtr & nested_function,
|
||||||
const AggregateFunctionProperties &,
|
const AggregateFunctionProperties &,
|
||||||
const DataTypes & arguments,
|
const DataTypes & arguments,
|
||||||
const Array &) const override
|
const Array & params) const override
|
||||||
{
|
{
|
||||||
return std::make_shared<AggregateFunctionIf>(nested_function, arguments);
|
return std::make_shared<AggregateFunctionIf>(nested_function, arguments, params);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -37,8 +37,8 @@ private:
|
|||||||
size_t num_arguments;
|
size_t num_arguments;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
AggregateFunctionIf(AggregateFunctionPtr nested, const DataTypes & types)
|
AggregateFunctionIf(AggregateFunctionPtr nested, const DataTypes & types, const Array & params_)
|
||||||
: IAggregateFunctionHelper<AggregateFunctionIf>(types, nested->getParameters())
|
: IAggregateFunctionHelper<AggregateFunctionIf>(types, params_)
|
||||||
, nested_func(nested), num_arguments(types.size())
|
, nested_func(nested), num_arguments(types.size())
|
||||||
{
|
{
|
||||||
if (num_arguments == 0)
|
if (num_arguments == 0)
|
||||||
|
@ -39,7 +39,7 @@ public:
|
|||||||
const AggregateFunctionPtr & nested_function,
|
const AggregateFunctionPtr & nested_function,
|
||||||
const AggregateFunctionProperties &,
|
const AggregateFunctionProperties &,
|
||||||
const DataTypes & arguments,
|
const DataTypes & arguments,
|
||||||
const Array &) const override
|
const Array & params) const override
|
||||||
{
|
{
|
||||||
const DataTypePtr & argument = arguments[0];
|
const DataTypePtr & argument = arguments[0];
|
||||||
|
|
||||||
@ -53,7 +53,7 @@ public:
|
|||||||
+ ", because it corresponds to different aggregate function: " + function->getFunctionName() + " instead of " + nested_function->getName(),
|
+ ", because it corresponds to different aggregate function: " + function->getFunctionName() + " instead of " + nested_function->getName(),
|
||||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
|
|
||||||
return std::make_shared<AggregateFunctionMerge>(nested_function, argument);
|
return std::make_shared<AggregateFunctionMerge>(nested_function, argument, params);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -29,15 +29,15 @@ private:
|
|||||||
AggregateFunctionPtr nested_func;
|
AggregateFunctionPtr nested_func;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
AggregateFunctionMerge(const AggregateFunctionPtr & nested_, const DataTypePtr & argument)
|
AggregateFunctionMerge(const AggregateFunctionPtr & nested_, const DataTypePtr & argument, const Array & params_)
|
||||||
: IAggregateFunctionHelper<AggregateFunctionMerge>({argument}, nested_->getParameters())
|
: IAggregateFunctionHelper<AggregateFunctionMerge>({argument}, params_)
|
||||||
, nested_func(nested_)
|
, nested_func(nested_)
|
||||||
{
|
{
|
||||||
const DataTypeAggregateFunction * data_type = typeid_cast<const DataTypeAggregateFunction *>(argument.get());
|
const DataTypeAggregateFunction * data_type = typeid_cast<const DataTypeAggregateFunction *>(argument.get());
|
||||||
|
|
||||||
if (!data_type || data_type->getFunctionName() != nested_func->getName())
|
if (!data_type || !nested_func->haveSameStateRepresentation(*data_type->getFunction()))
|
||||||
throw Exception("Illegal type " + argument->getName() + " of argument for aggregate function " + getName(),
|
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument for aggregate function {}, "
|
||||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
"expected {} or equivalent type", argument->getName(), getName(), getStateType()->getName());
|
||||||
}
|
}
|
||||||
|
|
||||||
String getName() const override
|
String getName() const override
|
||||||
|
@ -105,6 +105,11 @@ public:
|
|||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool haveSameStateRepresentation(const IAggregateFunction & rhs) const override
|
||||||
|
{
|
||||||
|
return getName() == rhs.getName() && this->haveEqualArgumentTypes(rhs);
|
||||||
|
}
|
||||||
|
|
||||||
bool allocatesMemoryInArena() const override { return false; }
|
bool allocatesMemoryInArena() const override { return false; }
|
||||||
|
|
||||||
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override
|
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override
|
||||||
|
@ -179,6 +179,11 @@ public:
|
|||||||
this->data(place).deserialize(buf);
|
this->data(place).deserialize(buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool haveSameStateRepresentation(const IAggregateFunction & rhs) const override
|
||||||
|
{
|
||||||
|
return this->getName() == rhs.getName() && this->haveEqualArgumentTypes(rhs);
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
enum class PatternActionType
|
enum class PatternActionType
|
||||||
{
|
{
|
||||||
|
@ -31,10 +31,10 @@ namespace
|
|||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
inline AggregateFunctionPtr createAggregateFunctionSequenceNodeImpl(
|
inline AggregateFunctionPtr createAggregateFunctionSequenceNodeImpl(
|
||||||
const DataTypePtr data_type, const DataTypes & argument_types, SequenceDirection direction, SequenceBase base)
|
const DataTypePtr data_type, const DataTypes & argument_types, const Array & parameters, SequenceDirection direction, SequenceBase base)
|
||||||
{
|
{
|
||||||
return std::make_shared<SequenceNextNodeImpl<T, NodeString<max_events_size>>>(
|
return std::make_shared<SequenceNextNodeImpl<T, NodeString<max_events_size>>>(
|
||||||
data_type, argument_types, base, direction, min_required_args);
|
data_type, argument_types, parameters, base, direction, min_required_args);
|
||||||
}
|
}
|
||||||
|
|
||||||
AggregateFunctionPtr
|
AggregateFunctionPtr
|
||||||
@ -116,17 +116,17 @@ createAggregateFunctionSequenceNode(const std::string & name, const DataTypes &
|
|||||||
|
|
||||||
WhichDataType timestamp_type(argument_types[0].get());
|
WhichDataType timestamp_type(argument_types[0].get());
|
||||||
if (timestamp_type.idx == TypeIndex::UInt8)
|
if (timestamp_type.idx == TypeIndex::UInt8)
|
||||||
return createAggregateFunctionSequenceNodeImpl<UInt8>(data_type, argument_types, direction, base);
|
return createAggregateFunctionSequenceNodeImpl<UInt8>(data_type, argument_types, parameters, direction, base);
|
||||||
if (timestamp_type.idx == TypeIndex::UInt16)
|
if (timestamp_type.idx == TypeIndex::UInt16)
|
||||||
return createAggregateFunctionSequenceNodeImpl<UInt16>(data_type, argument_types, direction, base);
|
return createAggregateFunctionSequenceNodeImpl<UInt16>(data_type, argument_types, parameters, direction, base);
|
||||||
if (timestamp_type.idx == TypeIndex::UInt32)
|
if (timestamp_type.idx == TypeIndex::UInt32)
|
||||||
return createAggregateFunctionSequenceNodeImpl<UInt32>(data_type, argument_types, direction, base);
|
return createAggregateFunctionSequenceNodeImpl<UInt32>(data_type, argument_types, parameters, direction, base);
|
||||||
if (timestamp_type.idx == TypeIndex::UInt64)
|
if (timestamp_type.idx == TypeIndex::UInt64)
|
||||||
return createAggregateFunctionSequenceNodeImpl<UInt64>(data_type, argument_types, direction, base);
|
return createAggregateFunctionSequenceNodeImpl<UInt64>(data_type, argument_types, parameters, direction, base);
|
||||||
if (timestamp_type.isDate())
|
if (timestamp_type.isDate())
|
||||||
return createAggregateFunctionSequenceNodeImpl<DataTypeDate::FieldType>(data_type, argument_types, direction, base);
|
return createAggregateFunctionSequenceNodeImpl<DataTypeDate::FieldType>(data_type, argument_types, parameters, direction, base);
|
||||||
if (timestamp_type.isDateTime())
|
if (timestamp_type.isDateTime())
|
||||||
return createAggregateFunctionSequenceNodeImpl<DataTypeDateTime::FieldType>(data_type, argument_types, direction, base);
|
return createAggregateFunctionSequenceNodeImpl<DataTypeDateTime::FieldType>(data_type, argument_types, parameters, direction, base);
|
||||||
|
|
||||||
throw Exception{"Illegal type " + argument_types.front().get()->getName()
|
throw Exception{"Illegal type " + argument_types.front().get()->getName()
|
||||||
+ " of first argument of aggregate function " + name + ", must be Unsigned Number, Date, DateTime",
|
+ " of first argument of aggregate function " + name + ", must be Unsigned Number, Date, DateTime",
|
||||||
|
@ -175,11 +175,12 @@ public:
|
|||||||
SequenceNextNodeImpl(
|
SequenceNextNodeImpl(
|
||||||
const DataTypePtr & data_type_,
|
const DataTypePtr & data_type_,
|
||||||
const DataTypes & arguments,
|
const DataTypes & arguments,
|
||||||
|
const Array & parameters_,
|
||||||
SequenceBase seq_base_kind_,
|
SequenceBase seq_base_kind_,
|
||||||
SequenceDirection seq_direction_,
|
SequenceDirection seq_direction_,
|
||||||
size_t min_required_args_,
|
size_t min_required_args_,
|
||||||
UInt64 max_elems_ = std::numeric_limits<UInt64>::max())
|
UInt64 max_elems_ = std::numeric_limits<UInt64>::max())
|
||||||
: IAggregateFunctionDataHelper<SequenceNextNodeGeneralData<Node>, Self>({data_type_}, {})
|
: IAggregateFunctionDataHelper<SequenceNextNodeGeneralData<Node>, Self>({data_type_}, parameters_)
|
||||||
, seq_base_kind(seq_base_kind_)
|
, seq_base_kind(seq_base_kind_)
|
||||||
, seq_direction(seq_direction_)
|
, seq_direction(seq_direction_)
|
||||||
, min_required_args(min_required_args_)
|
, min_required_args(min_required_args_)
|
||||||
@ -193,6 +194,11 @@ public:
|
|||||||
|
|
||||||
DataTypePtr getReturnType() const override { return data_type; }
|
DataTypePtr getReturnType() const override { return data_type; }
|
||||||
|
|
||||||
|
bool haveSameStateRepresentation(const IAggregateFunction & rhs) const override
|
||||||
|
{
|
||||||
|
return this->getName() == rhs.getName() && this->haveEqualArgumentTypes(rhs);
|
||||||
|
}
|
||||||
|
|
||||||
AggregateFunctionPtr getOwnNullAdapter(
|
AggregateFunctionPtr getOwnNullAdapter(
|
||||||
const AggregateFunctionPtr & nested_function, const DataTypes & arguments, const Array & params,
|
const AggregateFunctionPtr & nested_function, const DataTypes & arguments, const Array & params,
|
||||||
const AggregateFunctionProperties &) const override
|
const AggregateFunctionProperties &) const override
|
||||||
|
@ -50,4 +50,21 @@ String IAggregateFunction::getDescription() const
|
|||||||
|
|
||||||
return description;
|
return description;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool IAggregateFunction::haveEqualArgumentTypes(const IAggregateFunction & rhs) const
|
||||||
|
{
|
||||||
|
return std::equal(argument_types.begin(), argument_types.end(),
|
||||||
|
rhs.argument_types.begin(), rhs.argument_types.end(),
|
||||||
|
[](const auto & t1, const auto & t2) { return t1->equals(*t2); });
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IAggregateFunction::haveSameStateRepresentation(const IAggregateFunction & rhs) const
|
||||||
|
{
|
||||||
|
bool res = getName() == rhs.getName()
|
||||||
|
&& parameters == rhs.parameters
|
||||||
|
&& haveEqualArgumentTypes(rhs);
|
||||||
|
assert(res == (getStateType()->getName() == rhs.getStateType()->getName()));
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -74,6 +74,16 @@ public:
|
|||||||
/// Get the data type of internal state. By default it is AggregateFunction(name(params), argument_types...).
|
/// Get the data type of internal state. By default it is AggregateFunction(name(params), argument_types...).
|
||||||
virtual DataTypePtr getStateType() const;
|
virtual DataTypePtr getStateType() const;
|
||||||
|
|
||||||
|
/// Returns true if two aggregate functions have the same state representation in memory and the same serialization,
|
||||||
|
/// so state of one aggregate function can be safely used with another.
|
||||||
|
/// Examples:
|
||||||
|
/// - quantile(x), quantile(a)(x), quantile(b)(x) - parameter doesn't affect state and used for finalization only
|
||||||
|
/// - foo(x) and fooIf(x) - If combinator doesn't affect state
|
||||||
|
/// By default returns true only if functions have exactly the same names, combinators and parameters.
|
||||||
|
virtual bool haveSameStateRepresentation(const IAggregateFunction & rhs) const;
|
||||||
|
|
||||||
|
bool haveEqualArgumentTypes(const IAggregateFunction & rhs) const;
|
||||||
|
|
||||||
/// Get type which will be used for prediction result in case if function is an ML method.
|
/// Get type which will be used for prediction result in case if function is an ML method.
|
||||||
virtual DataTypePtr getReturnTypeToPredict() const
|
virtual DataTypePtr getReturnTypeToPredict() const
|
||||||
{
|
{
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
#include <IO/ReadHelpers.h>
|
#include <IO/ReadHelpers.h>
|
||||||
#include <DataStreams/OneBlockInputStream.h>
|
#include <DataStreams/OneBlockInputStream.h>
|
||||||
#include <DataStreams/OwningBlockInputStream.h>
|
#include <DataStreams/OwningBlockInputStream.h>
|
||||||
|
#include <DataStreams/formatBlock.h>
|
||||||
#include <Dictionaries/DictionarySourceHelpers.h>
|
#include <Dictionaries/DictionarySourceHelpers.h>
|
||||||
#include <Processors/Formats/InputStreamFromInputFormat.h>
|
#include <Processors/Formats/InputStreamFromInputFormat.h>
|
||||||
#include <IO/WriteBufferFromOStream.h>
|
#include <IO/WriteBufferFromOStream.h>
|
||||||
@ -128,7 +129,7 @@ BlockInputStreamPtr LibraryBridgeHelper::loadIds(const std::string ids_string)
|
|||||||
{
|
{
|
||||||
startBridgeSync();
|
startBridgeSync();
|
||||||
auto uri = createRequestURI(LOAD_IDS_METHOD);
|
auto uri = createRequestURI(LOAD_IDS_METHOD);
|
||||||
return loadBase(uri, [ids_string](std::ostream & os) { os << "ids=" << ids_string; });
|
return loadBase(uri, [ids_string](std::ostream & os) { os << ids_string; });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
#include <Client/HedgedConnections.h>
|
#include <Client/HedgedConnections.h>
|
||||||
#include <Common/ProfileEvents.h>
|
#include <Common/ProfileEvents.h>
|
||||||
#include <Interpreters/ClientInfo.h>
|
#include <Interpreters/ClientInfo.h>
|
||||||
|
#include <Interpreters/Context.h>
|
||||||
|
|
||||||
namespace ProfileEvents
|
namespace ProfileEvents
|
||||||
{
|
{
|
||||||
@ -21,13 +22,14 @@ namespace ErrorCodes
|
|||||||
|
|
||||||
HedgedConnections::HedgedConnections(
|
HedgedConnections::HedgedConnections(
|
||||||
const ConnectionPoolWithFailoverPtr & pool_,
|
const ConnectionPoolWithFailoverPtr & pool_,
|
||||||
const Settings & settings_,
|
ContextPtr context_,
|
||||||
const ConnectionTimeouts & timeouts_,
|
const ConnectionTimeouts & timeouts_,
|
||||||
const ThrottlerPtr & throttler_,
|
const ThrottlerPtr & throttler_,
|
||||||
PoolMode pool_mode,
|
PoolMode pool_mode,
|
||||||
std::shared_ptr<QualifiedTableName> table_to_check_)
|
std::shared_ptr<QualifiedTableName> table_to_check_)
|
||||||
: hedged_connections_factory(pool_, &settings_, timeouts_, table_to_check_)
|
: hedged_connections_factory(pool_, &context_->getSettingsRef(), timeouts_, table_to_check_)
|
||||||
, settings(settings_)
|
, context(std::move(context_))
|
||||||
|
, settings(context->getSettingsRef())
|
||||||
, drain_timeout(settings.drain_timeout)
|
, drain_timeout(settings.drain_timeout)
|
||||||
, allow_changing_replica_until_first_data_packet(settings.allow_changing_replica_until_first_data_packet)
|
, allow_changing_replica_until_first_data_packet(settings.allow_changing_replica_until_first_data_packet)
|
||||||
, throttler(throttler_)
|
, throttler(throttler_)
|
||||||
@ -479,6 +481,15 @@ void HedgedConnections::checkNewReplica()
|
|||||||
Connection * connection = nullptr;
|
Connection * connection = nullptr;
|
||||||
HedgedConnectionsFactory::State state = hedged_connections_factory.waitForReadyConnections(connection);
|
HedgedConnectionsFactory::State state = hedged_connections_factory.waitForReadyConnections(connection);
|
||||||
|
|
||||||
|
if (cancelled)
|
||||||
|
{
|
||||||
|
/// Do not start new connection if query is already canceled.
|
||||||
|
if (connection)
|
||||||
|
connection->disconnect();
|
||||||
|
|
||||||
|
state = HedgedConnectionsFactory::State::CANNOT_CHOOSE;
|
||||||
|
}
|
||||||
|
|
||||||
processNewReplicaState(state, connection);
|
processNewReplicaState(state, connection);
|
||||||
|
|
||||||
/// Check if we don't need to listen hedged_connections_factory file descriptor in epoll anymore.
|
/// Check if we don't need to listen hedged_connections_factory file descriptor in epoll anymore.
|
||||||
|
@ -72,7 +72,7 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
HedgedConnections(const ConnectionPoolWithFailoverPtr & pool_,
|
HedgedConnections(const ConnectionPoolWithFailoverPtr & pool_,
|
||||||
const Settings & settings_,
|
ContextPtr context_,
|
||||||
const ConnectionTimeouts & timeouts_,
|
const ConnectionTimeouts & timeouts_,
|
||||||
const ThrottlerPtr & throttler,
|
const ThrottlerPtr & throttler,
|
||||||
PoolMode pool_mode,
|
PoolMode pool_mode,
|
||||||
@ -188,6 +188,7 @@ private:
|
|||||||
Packet last_received_packet;
|
Packet last_received_packet;
|
||||||
|
|
||||||
Epoll epoll;
|
Epoll epoll;
|
||||||
|
ContextPtr context;
|
||||||
const Settings & settings;
|
const Settings & settings;
|
||||||
|
|
||||||
/// The following two fields are from settings but can be referenced outside the lifetime of
|
/// The following two fields are from settings but can be referenced outside the lifetime of
|
||||||
|
@ -12,9 +12,11 @@ PEERDIR(
|
|||||||
SRCS(
|
SRCS(
|
||||||
Connection.cpp
|
Connection.cpp
|
||||||
ConnectionEstablisher.cpp
|
ConnectionEstablisher.cpp
|
||||||
|
ConnectionPool.cpp
|
||||||
ConnectionPoolWithFailover.cpp
|
ConnectionPoolWithFailover.cpp
|
||||||
HedgedConnections.cpp
|
HedgedConnections.cpp
|
||||||
HedgedConnectionsFactory.cpp
|
HedgedConnectionsFactory.cpp
|
||||||
|
IConnections.cpp
|
||||||
MultiplexedConnections.cpp
|
MultiplexedConnections.cpp
|
||||||
|
|
||||||
)
|
)
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
#include <Common/StringUtils/StringUtils.h>
|
#include <Common/StringUtils/StringUtils.h>
|
||||||
#include <common/logger_useful.h>
|
#include <common/logger_useful.h>
|
||||||
#include <IO/ReadBufferFromFile.h>
|
#include <IO/ReadBufferFromFile.h>
|
||||||
|
#include <IO/ReadHelpers.h>
|
||||||
#include <string_view>
|
#include <string_view>
|
||||||
#include <unordered_set>
|
#include <unordered_set>
|
||||||
|
|
||||||
@ -11,11 +12,10 @@ namespace DB
|
|||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
{
|
{
|
||||||
extern const int TLD_LIST_NOT_FOUND;
|
extern const int TLD_LIST_NOT_FOUND;
|
||||||
|
extern const int LOGICAL_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
///
|
|
||||||
/// TLDList
|
/// TLDList
|
||||||
///
|
|
||||||
TLDList::TLDList(size_t size)
|
TLDList::TLDList(size_t size)
|
||||||
: tld_container(size)
|
: tld_container(size)
|
||||||
, pool(std::make_unique<Arena>(10 << 20))
|
, pool(std::make_unique<Arena>(10 << 20))
|
||||||
@ -31,9 +31,7 @@ bool TLDList::has(const StringRef & host) const
|
|||||||
return tld_container.has(host);
|
return tld_container.has(host);
|
||||||
}
|
}
|
||||||
|
|
||||||
///
|
|
||||||
/// TLDListsHolder
|
/// TLDListsHolder
|
||||||
///
|
|
||||||
TLDListsHolder & TLDListsHolder::getInstance()
|
TLDListsHolder & TLDListsHolder::getInstance()
|
||||||
{
|
{
|
||||||
static TLDListsHolder instance;
|
static TLDListsHolder instance;
|
||||||
@ -62,24 +60,22 @@ size_t TLDListsHolder::parseAndAddTldList(const std::string & name, const std::s
|
|||||||
std::unordered_set<std::string> tld_list_tmp;
|
std::unordered_set<std::string> tld_list_tmp;
|
||||||
|
|
||||||
ReadBufferFromFile in(path);
|
ReadBufferFromFile in(path);
|
||||||
|
String line;
|
||||||
while (!in.eof())
|
while (!in.eof())
|
||||||
{
|
{
|
||||||
char * newline = find_first_symbols<'\n'>(in.position(), in.buffer().end());
|
readEscapedStringUntilEOL(line, in);
|
||||||
if (newline >= in.buffer().end())
|
++in.position();
|
||||||
break;
|
|
||||||
|
|
||||||
std::string_view line(in.position(), newline - in.position());
|
|
||||||
in.position() = newline + 1;
|
|
||||||
|
|
||||||
/// Skip comments
|
/// Skip comments
|
||||||
if (line.size() > 2 && line[0] == '/' && line[1] == '/')
|
if (line.size() > 2 && line[0] == '/' && line[1] == '/')
|
||||||
continue;
|
continue;
|
||||||
trim(line);
|
line = trim(line, [](char c) { return std::isspace(c); });
|
||||||
/// Skip empty line
|
/// Skip empty line
|
||||||
if (line.empty())
|
if (line.empty())
|
||||||
continue;
|
continue;
|
||||||
tld_list_tmp.emplace(line);
|
tld_list_tmp.emplace(line);
|
||||||
}
|
}
|
||||||
|
if (!in.eof())
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Not all list had been read", name);
|
||||||
|
|
||||||
TLDList tld_list(tld_list_tmp.size());
|
TLDList tld_list(tld_list_tmp.size());
|
||||||
for (const auto & host : tld_list_tmp)
|
for (const auto & host : tld_list_tmp)
|
||||||
|
@ -111,7 +111,8 @@ void ZooKeeper::init(const std::string & implementation_, const Strings & hosts_
|
|||||||
identity_,
|
identity_,
|
||||||
Poco::Timespan(0, session_timeout_ms_ * 1000),
|
Poco::Timespan(0, session_timeout_ms_ * 1000),
|
||||||
Poco::Timespan(0, ZOOKEEPER_CONNECTION_TIMEOUT_MS * 1000),
|
Poco::Timespan(0, ZOOKEEPER_CONNECTION_TIMEOUT_MS * 1000),
|
||||||
Poco::Timespan(0, operation_timeout_ms_ * 1000));
|
Poco::Timespan(0, operation_timeout_ms_ * 1000),
|
||||||
|
zk_log);
|
||||||
|
|
||||||
if (chroot.empty())
|
if (chroot.empty())
|
||||||
LOG_TRACE(log, "Initialized, hosts: {}", fmt::join(hosts, ","));
|
LOG_TRACE(log, "Initialized, hosts: {}", fmt::join(hosts, ","));
|
||||||
@ -134,8 +135,10 @@ void ZooKeeper::init(const std::string & implementation_, const Strings & hosts_
|
|||||||
}
|
}
|
||||||
|
|
||||||
ZooKeeper::ZooKeeper(const std::string & hosts_string, const std::string & identity_, int32_t session_timeout_ms_,
|
ZooKeeper::ZooKeeper(const std::string & hosts_string, const std::string & identity_, int32_t session_timeout_ms_,
|
||||||
int32_t operation_timeout_ms_, const std::string & chroot_, const std::string & implementation_)
|
int32_t operation_timeout_ms_, const std::string & chroot_, const std::string & implementation_,
|
||||||
|
std::shared_ptr<DB::ZooKeeperLog> zk_log_)
|
||||||
{
|
{
|
||||||
|
zk_log = std::move(zk_log_);
|
||||||
Strings hosts_strings;
|
Strings hosts_strings;
|
||||||
splitInto<','>(hosts_strings, hosts_string);
|
splitInto<','>(hosts_strings, hosts_string);
|
||||||
|
|
||||||
@ -143,8 +146,10 @@ ZooKeeper::ZooKeeper(const std::string & hosts_string, const std::string & ident
|
|||||||
}
|
}
|
||||||
|
|
||||||
ZooKeeper::ZooKeeper(const Strings & hosts_, const std::string & identity_, int32_t session_timeout_ms_,
|
ZooKeeper::ZooKeeper(const Strings & hosts_, const std::string & identity_, int32_t session_timeout_ms_,
|
||||||
int32_t operation_timeout_ms_, const std::string & chroot_, const std::string & implementation_)
|
int32_t operation_timeout_ms_, const std::string & chroot_, const std::string & implementation_,
|
||||||
|
std::shared_ptr<DB::ZooKeeperLog> zk_log_)
|
||||||
{
|
{
|
||||||
|
zk_log = std::move(zk_log_);
|
||||||
init(implementation_, hosts_, identity_, session_timeout_ms_, operation_timeout_ms_, chroot_);
|
init(implementation_, hosts_, identity_, session_timeout_ms_, operation_timeout_ms_, chroot_);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -209,7 +214,8 @@ struct ZooKeeperArgs
|
|||||||
std::string implementation;
|
std::string implementation;
|
||||||
};
|
};
|
||||||
|
|
||||||
ZooKeeper::ZooKeeper(const Poco::Util::AbstractConfiguration & config, const std::string & config_name)
|
ZooKeeper::ZooKeeper(const Poco::Util::AbstractConfiguration & config, const std::string & config_name, std::shared_ptr<DB::ZooKeeperLog> zk_log_)
|
||||||
|
: zk_log(std::move(zk_log_))
|
||||||
{
|
{
|
||||||
ZooKeeperArgs args(config, config_name);
|
ZooKeeperArgs args(config, config_name);
|
||||||
init(args.implementation, args.hosts, args.identity, args.session_timeout_ms, args.operation_timeout_ms, args.chroot);
|
init(args.implementation, args.hosts, args.identity, args.session_timeout_ms, args.operation_timeout_ms, args.chroot);
|
||||||
@ -727,7 +733,7 @@ bool ZooKeeper::waitForDisappear(const std::string & path, const WaitCondition &
|
|||||||
|
|
||||||
ZooKeeperPtr ZooKeeper::startNewSession() const
|
ZooKeeperPtr ZooKeeper::startNewSession() const
|
||||||
{
|
{
|
||||||
return std::make_shared<ZooKeeper>(hosts, identity, session_timeout_ms, operation_timeout_ms, chroot, implementation);
|
return std::make_shared<ZooKeeper>(hosts, identity, session_timeout_ms, operation_timeout_ms, chroot, implementation, zk_log);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -1018,6 +1024,14 @@ void ZooKeeper::finalize()
|
|||||||
impl->finalize();
|
impl->finalize();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ZooKeeper::setZooKeeperLog(std::shared_ptr<DB::ZooKeeperLog> zk_log_)
|
||||||
|
{
|
||||||
|
zk_log = std::move(zk_log_);
|
||||||
|
if (auto * zk = dynamic_cast<Coordination::ZooKeeper *>(impl.get()))
|
||||||
|
zk->setZooKeeperLog(zk_log);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
size_t KeeperMultiException::getFailedOpIndex(Coordination::Error exception_code, const Coordination::Responses & responses)
|
size_t KeeperMultiException::getFailedOpIndex(Coordination::Error exception_code, const Coordination::Responses & responses)
|
||||||
{
|
{
|
||||||
if (responses.empty())
|
if (responses.empty())
|
||||||
|
@ -25,6 +25,10 @@ namespace CurrentMetrics
|
|||||||
extern const Metric EphemeralNode;
|
extern const Metric EphemeralNode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
class ZooKeeperLog;
|
||||||
|
}
|
||||||
|
|
||||||
namespace zkutil
|
namespace zkutil
|
||||||
{
|
{
|
||||||
@ -52,13 +56,15 @@ public:
|
|||||||
int32_t session_timeout_ms_ = Coordination::DEFAULT_SESSION_TIMEOUT_MS,
|
int32_t session_timeout_ms_ = Coordination::DEFAULT_SESSION_TIMEOUT_MS,
|
||||||
int32_t operation_timeout_ms_ = Coordination::DEFAULT_OPERATION_TIMEOUT_MS,
|
int32_t operation_timeout_ms_ = Coordination::DEFAULT_OPERATION_TIMEOUT_MS,
|
||||||
const std::string & chroot_ = "",
|
const std::string & chroot_ = "",
|
||||||
const std::string & implementation_ = "zookeeper");
|
const std::string & implementation_ = "zookeeper",
|
||||||
|
std::shared_ptr<DB::ZooKeeperLog> zk_log_ = nullptr);
|
||||||
|
|
||||||
ZooKeeper(const Strings & hosts_, const std::string & identity_ = "",
|
ZooKeeper(const Strings & hosts_, const std::string & identity_ = "",
|
||||||
int32_t session_timeout_ms_ = Coordination::DEFAULT_SESSION_TIMEOUT_MS,
|
int32_t session_timeout_ms_ = Coordination::DEFAULT_SESSION_TIMEOUT_MS,
|
||||||
int32_t operation_timeout_ms_ = Coordination::DEFAULT_OPERATION_TIMEOUT_MS,
|
int32_t operation_timeout_ms_ = Coordination::DEFAULT_OPERATION_TIMEOUT_MS,
|
||||||
const std::string & chroot_ = "",
|
const std::string & chroot_ = "",
|
||||||
const std::string & implementation_ = "zookeeper");
|
const std::string & implementation_ = "zookeeper",
|
||||||
|
std::shared_ptr<DB::ZooKeeperLog> zk_log_ = nullptr);
|
||||||
|
|
||||||
/** Config of the form:
|
/** Config of the form:
|
||||||
<zookeeper>
|
<zookeeper>
|
||||||
@ -82,7 +88,7 @@ public:
|
|||||||
<identity>user:password</identity>
|
<identity>user:password</identity>
|
||||||
</zookeeper>
|
</zookeeper>
|
||||||
*/
|
*/
|
||||||
ZooKeeper(const Poco::Util::AbstractConfiguration & config, const std::string & config_name);
|
ZooKeeper(const Poco::Util::AbstractConfiguration & config, const std::string & config_name, std::shared_ptr<DB::ZooKeeperLog> zk_log_);
|
||||||
|
|
||||||
/// Creates a new session with the same parameters. This method can be used for reconnecting
|
/// Creates a new session with the same parameters. This method can be used for reconnecting
|
||||||
/// after the session has expired.
|
/// after the session has expired.
|
||||||
@ -269,6 +275,8 @@ public:
|
|||||||
|
|
||||||
void finalize();
|
void finalize();
|
||||||
|
|
||||||
|
void setZooKeeperLog(std::shared_ptr<DB::ZooKeeperLog> zk_log_);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
friend class EphemeralNodeHolder;
|
friend class EphemeralNodeHolder;
|
||||||
|
|
||||||
@ -298,6 +306,7 @@ private:
|
|||||||
std::mutex mutex;
|
std::mutex mutex;
|
||||||
|
|
||||||
Poco::Logger * log = nullptr;
|
Poco::Logger * log = nullptr;
|
||||||
|
std::shared_ptr<DB::ZooKeeperLog> zk_log;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -537,6 +537,139 @@ void ZooKeeperSessionIDResponse::writeImpl(WriteBuffer & out) const
|
|||||||
Coordination::write(server_id, out);
|
Coordination::write(server_id, out);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void ZooKeeperRequest::createLogElements(LogElements & elems) const
|
||||||
|
{
|
||||||
|
elems.emplace_back();
|
||||||
|
auto & elem = elems.back();
|
||||||
|
elem.xid = xid;
|
||||||
|
elem.has_watch = has_watch;
|
||||||
|
elem.op_num = static_cast<uint32_t>(getOpNum());
|
||||||
|
elem.path = getPath();
|
||||||
|
elem.request_idx = elems.size() - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void ZooKeeperCreateRequest::createLogElements(LogElements & elems) const
|
||||||
|
{
|
||||||
|
ZooKeeperRequest::createLogElements(elems);
|
||||||
|
auto & elem = elems.back();
|
||||||
|
elem.data = data;
|
||||||
|
elem.is_ephemeral = is_ephemeral;
|
||||||
|
elem.is_sequential = is_sequential;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ZooKeeperRemoveRequest::createLogElements(LogElements & elems) const
|
||||||
|
{
|
||||||
|
ZooKeeperRequest::createLogElements(elems);
|
||||||
|
auto & elem = elems.back();
|
||||||
|
elem.version = version;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ZooKeeperSetRequest::createLogElements(LogElements & elems) const
|
||||||
|
{
|
||||||
|
ZooKeeperRequest::createLogElements(elems);
|
||||||
|
auto & elem = elems.back();
|
||||||
|
elem.data = data;
|
||||||
|
elem.version = version;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ZooKeeperCheckRequest::createLogElements(LogElements & elems) const
|
||||||
|
{
|
||||||
|
ZooKeeperRequest::createLogElements(elems);
|
||||||
|
auto & elem = elems.back();
|
||||||
|
elem.version = version;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ZooKeeperMultiRequest::createLogElements(LogElements & elems) const
|
||||||
|
{
|
||||||
|
ZooKeeperRequest::createLogElements(elems);
|
||||||
|
elems.back().requests_size = requests.size();
|
||||||
|
for (const auto & request : requests)
|
||||||
|
{
|
||||||
|
auto & req = dynamic_cast<ZooKeeperRequest &>(*request);
|
||||||
|
assert(!req.xid || req.xid == xid);
|
||||||
|
req.createLogElements(elems);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void ZooKeeperResponse::fillLogElements(LogElements & elems, size_t idx) const
|
||||||
|
{
|
||||||
|
auto & elem = elems[idx];
|
||||||
|
assert(!elem.xid || elem.xid == xid);
|
||||||
|
elem.xid = xid;
|
||||||
|
int32_t response_op = tryGetOpNum();
|
||||||
|
assert(!elem.op_num || elem.op_num == response_op || response_op < 0);
|
||||||
|
elem.op_num = response_op;
|
||||||
|
|
||||||
|
elem.zxid = zxid;
|
||||||
|
elem.error = static_cast<Int32>(error);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ZooKeeperWatchResponse::fillLogElements(LogElements & elems, size_t idx) const
|
||||||
|
{
|
||||||
|
ZooKeeperResponse::fillLogElements(elems, idx);
|
||||||
|
auto & elem = elems[idx];
|
||||||
|
elem.watch_type = type;
|
||||||
|
elem.watch_state = state;
|
||||||
|
elem.path = path;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ZooKeeperCreateResponse::fillLogElements(LogElements & elems, size_t idx) const
|
||||||
|
{
|
||||||
|
ZooKeeperResponse::fillLogElements(elems, idx);
|
||||||
|
auto & elem = elems[idx];
|
||||||
|
elem.path_created = path_created;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ZooKeeperExistsResponse::fillLogElements(LogElements & elems, size_t idx) const
|
||||||
|
{
|
||||||
|
ZooKeeperResponse::fillLogElements(elems, idx);
|
||||||
|
auto & elem = elems[idx];
|
||||||
|
elem.stat = stat;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ZooKeeperGetResponse::fillLogElements(LogElements & elems, size_t idx) const
|
||||||
|
{
|
||||||
|
ZooKeeperResponse::fillLogElements(elems, idx);
|
||||||
|
auto & elem = elems[idx];
|
||||||
|
elem.data = data;
|
||||||
|
elem.stat = stat;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ZooKeeperSetResponse::fillLogElements(LogElements & elems, size_t idx) const
|
||||||
|
{
|
||||||
|
ZooKeeperResponse::fillLogElements(elems, idx);
|
||||||
|
auto & elem = elems[idx];
|
||||||
|
elem.stat = stat;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ZooKeeperListResponse::fillLogElements(LogElements & elems, size_t idx) const
|
||||||
|
{
|
||||||
|
ZooKeeperResponse::fillLogElements(elems, idx);
|
||||||
|
auto & elem = elems[idx];
|
||||||
|
elem.stat = stat;
|
||||||
|
elem.children = names;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ZooKeeperMultiResponse::fillLogElements(LogElements & elems, size_t idx) const
|
||||||
|
{
|
||||||
|
assert(idx == 0);
|
||||||
|
assert(elems.size() == responses.size() + 1);
|
||||||
|
ZooKeeperResponse::fillLogElements(elems, idx);
|
||||||
|
for (const auto & response : responses)
|
||||||
|
{
|
||||||
|
auto & resp = dynamic_cast<ZooKeeperResponse &>(*response);
|
||||||
|
assert(!resp.xid || resp.xid == xid);
|
||||||
|
assert(!resp.zxid || resp.zxid == zxid);
|
||||||
|
resp.xid = xid;
|
||||||
|
resp.zxid = zxid;
|
||||||
|
resp.fillLogElements(elems, ++idx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void ZooKeeperRequestFactory::registerRequest(OpNum op_num, Creator creator)
|
void ZooKeeperRequestFactory::registerRequest(OpNum op_num, Creator creator)
|
||||||
{
|
{
|
||||||
if (!op_num_to_request.try_emplace(op_num, creator).second)
|
if (!op_num_to_request.try_emplace(op_num, creator).second)
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
#include <Common/ZooKeeper/IKeeper.h>
|
#include <Common/ZooKeeper/IKeeper.h>
|
||||||
#include <Common/ZooKeeper/ZooKeeperConstants.h>
|
#include <Common/ZooKeeper/ZooKeeperConstants.h>
|
||||||
|
#include <Interpreters/ZooKeeperLog.h>
|
||||||
|
|
||||||
#include <boost/noncopyable.hpp>
|
#include <boost/noncopyable.hpp>
|
||||||
#include <IO/ReadBuffer.h>
|
#include <IO/ReadBuffer.h>
|
||||||
@ -22,6 +23,8 @@
|
|||||||
namespace Coordination
|
namespace Coordination
|
||||||
{
|
{
|
||||||
|
|
||||||
|
using LogElements = std::vector<ZooKeeperLogElement>;
|
||||||
|
|
||||||
struct ZooKeeperResponse : virtual Response
|
struct ZooKeeperResponse : virtual Response
|
||||||
{
|
{
|
||||||
XID xid = 0;
|
XID xid = 0;
|
||||||
@ -32,6 +35,8 @@ struct ZooKeeperResponse : virtual Response
|
|||||||
virtual void writeImpl(WriteBuffer &) const = 0;
|
virtual void writeImpl(WriteBuffer &) const = 0;
|
||||||
virtual void write(WriteBuffer & out) const;
|
virtual void write(WriteBuffer & out) const;
|
||||||
virtual OpNum getOpNum() const = 0;
|
virtual OpNum getOpNum() const = 0;
|
||||||
|
virtual void fillLogElements(LogElements & elems, size_t idx) const;
|
||||||
|
virtual int32_t tryGetOpNum() const { return static_cast<int32_t>(getOpNum()); }
|
||||||
};
|
};
|
||||||
|
|
||||||
using ZooKeeperResponsePtr = std::shared_ptr<ZooKeeperResponse>;
|
using ZooKeeperResponsePtr = std::shared_ptr<ZooKeeperResponse>;
|
||||||
@ -63,6 +68,8 @@ struct ZooKeeperRequest : virtual Request
|
|||||||
|
|
||||||
virtual ZooKeeperResponsePtr makeResponse() const = 0;
|
virtual ZooKeeperResponsePtr makeResponse() const = 0;
|
||||||
virtual bool isReadRequest() const = 0;
|
virtual bool isReadRequest() const = 0;
|
||||||
|
|
||||||
|
virtual void createLogElements(LogElements & elems) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
using ZooKeeperRequestPtr = std::shared_ptr<ZooKeeperRequest>;
|
using ZooKeeperRequestPtr = std::shared_ptr<ZooKeeperRequest>;
|
||||||
@ -119,6 +126,9 @@ struct ZooKeeperWatchResponse final : WatchResponse, ZooKeeperResponse
|
|||||||
{
|
{
|
||||||
throw Exception("OpNum for watch response doesn't exist", Error::ZRUNTIMEINCONSISTENCY);
|
throw Exception("OpNum for watch response doesn't exist", Error::ZRUNTIMEINCONSISTENCY);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void fillLogElements(LogElements & elems, size_t idx) const override;
|
||||||
|
int32_t tryGetOpNum() const override { return 0; }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperAuthRequest final : ZooKeeperRequest
|
struct ZooKeeperAuthRequest final : ZooKeeperRequest
|
||||||
@ -188,6 +198,8 @@ struct ZooKeeperCreateRequest final : public CreateRequest, ZooKeeperRequest
|
|||||||
bool isReadRequest() const override { return false; }
|
bool isReadRequest() const override { return false; }
|
||||||
|
|
||||||
size_t bytesSize() const override { return CreateRequest::bytesSize() + sizeof(xid) + sizeof(has_watch); }
|
size_t bytesSize() const override { return CreateRequest::bytesSize() + sizeof(xid) + sizeof(has_watch); }
|
||||||
|
|
||||||
|
void createLogElements(LogElements & elems) const override;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperCreateResponse final : CreateResponse, ZooKeeperResponse
|
struct ZooKeeperCreateResponse final : CreateResponse, ZooKeeperResponse
|
||||||
@ -199,6 +211,8 @@ struct ZooKeeperCreateResponse final : CreateResponse, ZooKeeperResponse
|
|||||||
OpNum getOpNum() const override { return OpNum::Create; }
|
OpNum getOpNum() const override { return OpNum::Create; }
|
||||||
|
|
||||||
size_t bytesSize() const override { return CreateResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
|
size_t bytesSize() const override { return CreateResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
|
||||||
|
|
||||||
|
void fillLogElements(LogElements & elems, size_t idx) const override;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperRemoveRequest final : RemoveRequest, ZooKeeperRequest
|
struct ZooKeeperRemoveRequest final : RemoveRequest, ZooKeeperRequest
|
||||||
@ -214,6 +228,8 @@ struct ZooKeeperRemoveRequest final : RemoveRequest, ZooKeeperRequest
|
|||||||
bool isReadRequest() const override { return false; }
|
bool isReadRequest() const override { return false; }
|
||||||
|
|
||||||
size_t bytesSize() const override { return RemoveRequest::bytesSize() + sizeof(xid); }
|
size_t bytesSize() const override { return RemoveRequest::bytesSize() + sizeof(xid); }
|
||||||
|
|
||||||
|
void createLogElements(LogElements & elems) const override;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperRemoveResponse final : RemoveResponse, ZooKeeperResponse
|
struct ZooKeeperRemoveResponse final : RemoveResponse, ZooKeeperResponse
|
||||||
@ -244,6 +260,8 @@ struct ZooKeeperExistsResponse final : ExistsResponse, ZooKeeperResponse
|
|||||||
OpNum getOpNum() const override { return OpNum::Exists; }
|
OpNum getOpNum() const override { return OpNum::Exists; }
|
||||||
|
|
||||||
size_t bytesSize() const override { return ExistsResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
|
size_t bytesSize() const override { return ExistsResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
|
||||||
|
|
||||||
|
void fillLogElements(LogElements & elems, size_t idx) const override;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperGetRequest final : GetRequest, ZooKeeperRequest
|
struct ZooKeeperGetRequest final : GetRequest, ZooKeeperRequest
|
||||||
@ -265,6 +283,8 @@ struct ZooKeeperGetResponse final : GetResponse, ZooKeeperResponse
|
|||||||
OpNum getOpNum() const override { return OpNum::Get; }
|
OpNum getOpNum() const override { return OpNum::Get; }
|
||||||
|
|
||||||
size_t bytesSize() const override { return GetResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
|
size_t bytesSize() const override { return GetResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
|
||||||
|
|
||||||
|
void fillLogElements(LogElements & elems, size_t idx) const override;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperSetRequest final : SetRequest, ZooKeeperRequest
|
struct ZooKeeperSetRequest final : SetRequest, ZooKeeperRequest
|
||||||
@ -279,6 +299,8 @@ struct ZooKeeperSetRequest final : SetRequest, ZooKeeperRequest
|
|||||||
bool isReadRequest() const override { return false; }
|
bool isReadRequest() const override { return false; }
|
||||||
|
|
||||||
size_t bytesSize() const override { return SetRequest::bytesSize() + sizeof(xid); }
|
size_t bytesSize() const override { return SetRequest::bytesSize() + sizeof(xid); }
|
||||||
|
|
||||||
|
void createLogElements(LogElements & elems) const override;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperSetResponse final : SetResponse, ZooKeeperResponse
|
struct ZooKeeperSetResponse final : SetResponse, ZooKeeperResponse
|
||||||
@ -288,6 +310,8 @@ struct ZooKeeperSetResponse final : SetResponse, ZooKeeperResponse
|
|||||||
OpNum getOpNum() const override { return OpNum::Set; }
|
OpNum getOpNum() const override { return OpNum::Set; }
|
||||||
|
|
||||||
size_t bytesSize() const override { return SetResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
|
size_t bytesSize() const override { return SetResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
|
||||||
|
|
||||||
|
void fillLogElements(LogElements & elems, size_t idx) const override;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperListRequest : ListRequest, ZooKeeperRequest
|
struct ZooKeeperListRequest : ListRequest, ZooKeeperRequest
|
||||||
@ -313,6 +337,8 @@ struct ZooKeeperListResponse : ListResponse, ZooKeeperResponse
|
|||||||
OpNum getOpNum() const override { return OpNum::List; }
|
OpNum getOpNum() const override { return OpNum::List; }
|
||||||
|
|
||||||
size_t bytesSize() const override { return ListResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
|
size_t bytesSize() const override { return ListResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
|
||||||
|
|
||||||
|
void fillLogElements(LogElements & elems, size_t idx) const override;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperSimpleListResponse final : ZooKeeperListResponse
|
struct ZooKeeperSimpleListResponse final : ZooKeeperListResponse
|
||||||
@ -333,6 +359,8 @@ struct ZooKeeperCheckRequest final : CheckRequest, ZooKeeperRequest
|
|||||||
bool isReadRequest() const override { return true; }
|
bool isReadRequest() const override { return true; }
|
||||||
|
|
||||||
size_t bytesSize() const override { return CheckRequest::bytesSize() + sizeof(xid) + sizeof(has_watch); }
|
size_t bytesSize() const override { return CheckRequest::bytesSize() + sizeof(xid) + sizeof(has_watch); }
|
||||||
|
|
||||||
|
void createLogElements(LogElements & elems) const override;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperCheckResponse final : CheckResponse, ZooKeeperResponse
|
struct ZooKeeperCheckResponse final : CheckResponse, ZooKeeperResponse
|
||||||
@ -409,6 +437,8 @@ struct ZooKeeperMultiRequest final : MultiRequest, ZooKeeperRequest
|
|||||||
bool isReadRequest() const override;
|
bool isReadRequest() const override;
|
||||||
|
|
||||||
size_t bytesSize() const override { return MultiRequest::bytesSize() + sizeof(xid) + sizeof(has_watch); }
|
size_t bytesSize() const override { return MultiRequest::bytesSize() + sizeof(xid) + sizeof(has_watch); }
|
||||||
|
|
||||||
|
void createLogElements(LogElements & elems) const override;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperMultiResponse final : MultiResponse, ZooKeeperResponse
|
struct ZooKeeperMultiResponse final : MultiResponse, ZooKeeperResponse
|
||||||
@ -433,6 +463,8 @@ struct ZooKeeperMultiResponse final : MultiResponse, ZooKeeperResponse
|
|||||||
void writeImpl(WriteBuffer & out) const override;
|
void writeImpl(WriteBuffer & out) const override;
|
||||||
|
|
||||||
size_t bytesSize() const override { return MultiResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
|
size_t bytesSize() const override { return MultiResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
|
||||||
|
|
||||||
|
void fillLogElements(LogElements & elems, size_t idx) const override;
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Fake internal coordination (keeper) response. Never received from client
|
/// Fake internal coordination (keeper) response. Never received from client
|
||||||
|
@ -311,11 +311,14 @@ ZooKeeper::ZooKeeper(
|
|||||||
const String & auth_data,
|
const String & auth_data,
|
||||||
Poco::Timespan session_timeout_,
|
Poco::Timespan session_timeout_,
|
||||||
Poco::Timespan connection_timeout,
|
Poco::Timespan connection_timeout,
|
||||||
Poco::Timespan operation_timeout_)
|
Poco::Timespan operation_timeout_,
|
||||||
|
std::shared_ptr<ZooKeeperLog> zk_log_)
|
||||||
: root_path(root_path_),
|
: root_path(root_path_),
|
||||||
session_timeout(session_timeout_),
|
session_timeout(session_timeout_),
|
||||||
operation_timeout(std::min(operation_timeout_, session_timeout_))
|
operation_timeout(std::min(operation_timeout_, session_timeout_))
|
||||||
{
|
{
|
||||||
|
std::atomic_store(&zk_log, std::move(zk_log_));
|
||||||
|
|
||||||
if (!root_path.empty())
|
if (!root_path.empty())
|
||||||
{
|
{
|
||||||
if (root_path.back() == '/')
|
if (root_path.back() == '/')
|
||||||
@ -578,6 +581,8 @@ void ZooKeeper::sendThread()
|
|||||||
info.request->probably_sent = true;
|
info.request->probably_sent = true;
|
||||||
info.request->write(*out);
|
info.request->write(*out);
|
||||||
|
|
||||||
|
logOperationIfNeeded(info.request);
|
||||||
|
|
||||||
/// We sent close request, exit
|
/// We sent close request, exit
|
||||||
if (info.request->xid == CLOSE_XID)
|
if (info.request->xid == CLOSE_XID)
|
||||||
break;
|
break;
|
||||||
@ -747,6 +752,9 @@ void ZooKeeper::receiveEvent()
|
|||||||
if (!response)
|
if (!response)
|
||||||
response = request_info.request->makeResponse();
|
response = request_info.request->makeResponse();
|
||||||
|
|
||||||
|
response->xid = xid;
|
||||||
|
response->zxid = zxid;
|
||||||
|
|
||||||
if (err != Error::ZOK)
|
if (err != Error::ZOK)
|
||||||
{
|
{
|
||||||
response->error = err;
|
response->error = err;
|
||||||
@ -785,6 +793,8 @@ void ZooKeeper::receiveEvent()
|
|||||||
int32_t actual_length = in->count() - count_before_event;
|
int32_t actual_length = in->count() - count_before_event;
|
||||||
if (length != actual_length)
|
if (length != actual_length)
|
||||||
throw Exception("Response length doesn't match. Expected: " + DB::toString(length) + ", actual: " + DB::toString(actual_length), Error::ZMARSHALLINGERROR);
|
throw Exception("Response length doesn't match. Expected: " + DB::toString(length) + ", actual: " + DB::toString(actual_length), Error::ZMARSHALLINGERROR);
|
||||||
|
|
||||||
|
logOperationIfNeeded(request_info.request, response); //-V614
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
@ -802,6 +812,8 @@ void ZooKeeper::receiveEvent()
|
|||||||
{
|
{
|
||||||
if (request_info.callback)
|
if (request_info.callback)
|
||||||
request_info.callback(*response);
|
request_info.callback(*response);
|
||||||
|
|
||||||
|
logOperationIfNeeded(request_info.request, response);
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
@ -880,17 +892,19 @@ void ZooKeeper::finalize(bool error_send, bool error_receive)
|
|||||||
for (auto & op : operations)
|
for (auto & op : operations)
|
||||||
{
|
{
|
||||||
RequestInfo & request_info = op.second;
|
RequestInfo & request_info = op.second;
|
||||||
ResponsePtr response = request_info.request->makeResponse();
|
ZooKeeperResponsePtr response = request_info.request->makeResponse();
|
||||||
|
|
||||||
response->error = request_info.request->probably_sent
|
response->error = request_info.request->probably_sent
|
||||||
? Error::ZCONNECTIONLOSS
|
? Error::ZCONNECTIONLOSS
|
||||||
: Error::ZSESSIONEXPIRED;
|
: Error::ZSESSIONEXPIRED;
|
||||||
|
response->xid = request_info.request->xid;
|
||||||
|
|
||||||
if (request_info.callback)
|
if (request_info.callback)
|
||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
request_info.callback(*response);
|
request_info.callback(*response);
|
||||||
|
logOperationIfNeeded(request_info.request, response, true);
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
@ -942,13 +956,15 @@ void ZooKeeper::finalize(bool error_send, bool error_receive)
|
|||||||
{
|
{
|
||||||
if (info.callback)
|
if (info.callback)
|
||||||
{
|
{
|
||||||
ResponsePtr response = info.request->makeResponse();
|
ZooKeeperResponsePtr response = info.request->makeResponse();
|
||||||
if (response)
|
if (response)
|
||||||
{
|
{
|
||||||
response->error = Error::ZSESSIONEXPIRED;
|
response->error = Error::ZSESSIONEXPIRED;
|
||||||
|
response->xid = info.request->xid;
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
info.callback(*response);
|
info.callback(*response);
|
||||||
|
logOperationIfNeeded(info.request, response, true);
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
@ -993,6 +1009,12 @@ void ZooKeeper::pushRequest(RequestInfo && info)
|
|||||||
throw Exception("xid equal to close_xid", Error::ZSESSIONEXPIRED);
|
throw Exception("xid equal to close_xid", Error::ZSESSIONEXPIRED);
|
||||||
if (info.request->xid < 0)
|
if (info.request->xid < 0)
|
||||||
throw Exception("XID overflow", Error::ZSESSIONEXPIRED);
|
throw Exception("XID overflow", Error::ZSESSIONEXPIRED);
|
||||||
|
|
||||||
|
if (auto * multi_request = dynamic_cast<ZooKeeperMultiRequest *>(info.request.get()))
|
||||||
|
{
|
||||||
|
for (auto & request : multi_request->requests)
|
||||||
|
dynamic_cast<ZooKeeperRequest &>(*request).xid = multi_request->xid;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// We must serialize 'pushRequest' and 'finalize' (from sendThread, receiveThread) calls
|
/// We must serialize 'pushRequest' and 'finalize' (from sendThread, receiveThread) calls
|
||||||
@ -1190,4 +1212,53 @@ void ZooKeeper::close()
|
|||||||
ProfileEvents::increment(ProfileEvents::ZooKeeperClose);
|
ProfileEvents::increment(ProfileEvents::ZooKeeperClose);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void ZooKeeper::setZooKeeperLog(std::shared_ptr<DB::ZooKeeperLog> zk_log_)
|
||||||
|
{
|
||||||
|
/// logOperationIfNeeded(...) uses zk_log and can be called from different threads, so we have to use atomic shared_ptr
|
||||||
|
std::atomic_store(&zk_log, std::move(zk_log_));
|
||||||
|
}
|
||||||
|
|
||||||
|
void ZooKeeper::logOperationIfNeeded(const ZooKeeperRequestPtr & request, const ZooKeeperResponsePtr & response, bool finalize)
|
||||||
|
{
|
||||||
|
auto maybe_zk_log = std::atomic_load(&zk_log);
|
||||||
|
if (!maybe_zk_log)
|
||||||
|
return;
|
||||||
|
|
||||||
|
ZooKeeperLogElement::Type log_type = ZooKeeperLogElement::UNKNOWN;
|
||||||
|
Decimal64 event_time = std::chrono::duration_cast<std::chrono::microseconds>(
|
||||||
|
std::chrono::system_clock::now().time_since_epoch()
|
||||||
|
).count();
|
||||||
|
LogElements elems;
|
||||||
|
if (request)
|
||||||
|
{
|
||||||
|
request->createLogElements(elems);
|
||||||
|
log_type = ZooKeeperLogElement::REQUEST;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
assert(response);
|
||||||
|
assert(response->xid == PING_XID || response->xid == WATCH_XID);
|
||||||
|
elems.emplace_back();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (response)
|
||||||
|
{
|
||||||
|
response->fillLogElements(elems, 0);
|
||||||
|
log_type = ZooKeeperLogElement::RESPONSE;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (finalize)
|
||||||
|
log_type = ZooKeeperLogElement::FINALIZE;
|
||||||
|
|
||||||
|
for (auto & elem : elems)
|
||||||
|
{
|
||||||
|
elem.type = log_type;
|
||||||
|
elem.event_time = event_time;
|
||||||
|
elem.address = socket.peerAddress();
|
||||||
|
elem.session_id = session_id;
|
||||||
|
maybe_zk_log->add(elem);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -80,6 +80,10 @@ namespace CurrentMetrics
|
|||||||
extern const Metric ZooKeeperSession;
|
extern const Metric ZooKeeperSession;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
class ZooKeeperLog;
|
||||||
|
}
|
||||||
|
|
||||||
namespace Coordination
|
namespace Coordination
|
||||||
{
|
{
|
||||||
@ -110,7 +114,8 @@ public:
|
|||||||
const String & auth_data,
|
const String & auth_data,
|
||||||
Poco::Timespan session_timeout_,
|
Poco::Timespan session_timeout_,
|
||||||
Poco::Timespan connection_timeout,
|
Poco::Timespan connection_timeout,
|
||||||
Poco::Timespan operation_timeout_);
|
Poco::Timespan operation_timeout_,
|
||||||
|
std::shared_ptr<ZooKeeperLog> zk_log_);
|
||||||
|
|
||||||
~ZooKeeper() override;
|
~ZooKeeper() override;
|
||||||
|
|
||||||
@ -184,6 +189,8 @@ public:
|
|||||||
|
|
||||||
void finalize() override { finalize(false, false); }
|
void finalize() override { finalize(false, false); }
|
||||||
|
|
||||||
|
void setZooKeeperLog(std::shared_ptr<DB::ZooKeeperLog> zk_log_);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
String root_path;
|
String root_path;
|
||||||
ACLs default_acls;
|
ACLs default_acls;
|
||||||
@ -258,7 +265,10 @@ private:
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
void read(T &);
|
void read(T &);
|
||||||
|
|
||||||
|
void logOperationIfNeeded(const ZooKeeperRequestPtr & request, const ZooKeeperResponsePtr & response = nullptr, bool finalize = false);
|
||||||
|
|
||||||
CurrentMetrics::Increment active_session_metric_increment{CurrentMetrics::ZooKeeperSession};
|
CurrentMetrics::Increment active_session_metric_increment{CurrentMetrics::ZooKeeperSession};
|
||||||
|
std::shared_ptr<ZooKeeperLog> zk_log;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -25,7 +25,7 @@ int main(int argc, char ** argv)
|
|||||||
|
|
||||||
DB::ConfigProcessor processor(argv[1], false, true);
|
DB::ConfigProcessor processor(argv[1], false, true);
|
||||||
auto config = processor.loadConfig().configuration;
|
auto config = processor.loadConfig().configuration;
|
||||||
zkutil::ZooKeeper zk(*config, "zookeeper");
|
zkutil::ZooKeeper zk(*config, "zookeeper", nullptr);
|
||||||
zkutil::EventPtr watch = std::make_shared<Poco::Event>();
|
zkutil::EventPtr watch = std::make_shared<Poco::Event>();
|
||||||
|
|
||||||
/// NOTE: setting watches in multiple threads because doing it in a single thread is too slow.
|
/// NOTE: setting watches in multiple threads because doing it in a single thread is too slow.
|
||||||
|
@ -40,7 +40,7 @@ try
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
ZooKeeper zk(nodes, {}, {}, {}, {5, 0}, {0, 50000}, {0, 50000});
|
ZooKeeper zk(nodes, {}, {}, {}, {5, 0}, {0, 50000}, {0, 50000}, nullptr);
|
||||||
|
|
||||||
Poco::Event event(true);
|
Poco::Event event(true);
|
||||||
|
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
int main()
|
int main()
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
Coordination::ZooKeeper zookeeper({Coordination::ZooKeeper::Node{Poco::Net::SocketAddress{"localhost:2181"}, false}}, "", "", "", {30, 0}, {0, 50000}, {0, 50000});
|
Coordination::ZooKeeper zookeeper({Coordination::ZooKeeper::Node{Poco::Net::SocketAddress{"localhost:2181"}, false}}, "", "", "", {30, 0}, {0, 50000}, {0, 50000}, nullptr);
|
||||||
|
|
||||||
zookeeper.create("/test", "hello", false, false, {}, [](const Coordination::CreateResponse & response)
|
zookeeper.create("/test", "hello", false, false, {}, [](const Coordination::CreateResponse & response)
|
||||||
{
|
{
|
||||||
|
@ -2,8 +2,10 @@
|
|||||||
|
|
||||||
// .h autogenerated by cmake!
|
// .h autogenerated by cmake!
|
||||||
|
|
||||||
|
#cmakedefine01 USE_BASE64
|
||||||
#cmakedefine01 USE_RE2_ST
|
#cmakedefine01 USE_RE2_ST
|
||||||
#cmakedefine01 USE_SSL
|
#cmakedefine01 USE_SSL
|
||||||
|
#cmakedefine01 USE_INTERNAL_SSL_LIBRARY
|
||||||
#cmakedefine01 USE_HDFS
|
#cmakedefine01 USE_HDFS
|
||||||
#cmakedefine01 USE_INTERNAL_HDFS3_LIBRARY
|
#cmakedefine01 USE_INTERNAL_HDFS3_LIBRARY
|
||||||
#cmakedefine01 USE_AWS_S3
|
#cmakedefine01 USE_AWS_S3
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user