mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Merge remote-tracking branch 'origin/master' into HEAD
This commit is contained in:
commit
8cd560d5ad
@ -57,8 +57,8 @@ if (SANITIZE)
|
||||
endif ()
|
||||
|
||||
elseif (SANITIZE STREQUAL "undefined")
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} -fsanitize=undefined -fno-sanitize-recover=all -fno-sanitize=float-divide-by-zero")
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} -fsanitize=undefined -fno-sanitize-recover=all -fno-sanitize=float-divide-by-zero")
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} -fsanitize=undefined -fno-sanitize-recover=all -fno-sanitize=float-divide-by-zero -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/ubsan_suppressions.txt")
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} -fsanitize=undefined -fno-sanitize-recover=all -fno-sanitize=float-divide-by-zero -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/ubsan_suppressions.txt")
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=undefined")
|
||||
endif()
|
||||
|
69
debian/clickhouse-server.init
vendored
69
debian/clickhouse-server.init
vendored
@ -153,82 +153,19 @@ initdb()
|
||||
|
||||
start()
|
||||
{
|
||||
[ -x $CLICKHOUSE_BINDIR/$PROGRAM ] || exit 0
|
||||
local EXIT_STATUS
|
||||
EXIT_STATUS=0
|
||||
|
||||
echo -n "Start $PROGRAM service: "
|
||||
|
||||
if is_running; then
|
||||
echo -n "already running "
|
||||
EXIT_STATUS=1
|
||||
else
|
||||
ulimit -n 262144
|
||||
mkdir -p $CLICKHOUSE_PIDDIR
|
||||
chown -R $CLICKHOUSE_USER:$CLICKHOUSE_GROUP $CLICKHOUSE_PIDDIR
|
||||
initdb
|
||||
if ! is_running; then
|
||||
# Lock should not be held while running child process, so we release the lock. Note: obviously, there is race condition.
|
||||
# But clickhouse-server has protection from simultaneous runs with same data directory.
|
||||
su -s $SHELL ${CLICKHOUSE_USER} -c "$FLOCK -u 9; $CLICKHOUSE_PROGRAM_ENV exec -a \"$PROGRAM\" \"$CLICKHOUSE_BINDIR/$PROGRAM\" --daemon --pid-file=\"$CLICKHOUSE_PIDFILE\" --config-file=\"$CLICKHOUSE_CONFIG\""
|
||||
EXIT_STATUS=$?
|
||||
if [ $EXIT_STATUS -ne 0 ]; then
|
||||
return $EXIT_STATUS
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ $EXIT_STATUS -eq 0 ]; then
|
||||
attempts=0
|
||||
while ! is_running && [ $attempts -le ${CLICKHOUSE_START_TIMEOUT:=10} ]; do
|
||||
attempts=$(($attempts + 1))
|
||||
sleep 1
|
||||
done
|
||||
if is_running; then
|
||||
echo "DONE"
|
||||
else
|
||||
echo "UNKNOWN"
|
||||
fi
|
||||
else
|
||||
echo "FAILED"
|
||||
fi
|
||||
|
||||
return $EXIT_STATUS
|
||||
${CLICKHOUSE_GENERIC_PROGRAM} start --user "${CLICKHOUSE_USER}" --pid-path "${CLICKHOUSE_PIDDIR}" --config-path "${CLICKHOUSE_CONFDIR}" --binary-path "${CLICKHOUSE_BINDIR}"
|
||||
}
|
||||
|
||||
|
||||
stop()
|
||||
{
|
||||
#local EXIT_STATUS
|
||||
EXIT_STATUS=0
|
||||
|
||||
if [ -f $CLICKHOUSE_PIDFILE ]; then
|
||||
|
||||
echo -n "Stop $PROGRAM service: "
|
||||
|
||||
kill -TERM $(cat "$CLICKHOUSE_PIDFILE")
|
||||
|
||||
if ! wait_for_done ${CLICKHOUSE_STOP_TIMEOUT}; then
|
||||
EXIT_STATUS=2
|
||||
echo "TIMEOUT"
|
||||
else
|
||||
echo "DONE"
|
||||
fi
|
||||
|
||||
fi
|
||||
return $EXIT_STATUS
|
||||
${CLICKHOUSE_GENERIC_PROGRAM} stop --pid-path "${CLICKHOUSE_PIDDIR}"
|
||||
}
|
||||
|
||||
|
||||
restart()
|
||||
{
|
||||
check_config
|
||||
if stop; then
|
||||
if start; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
return 1
|
||||
${CLICKHOUSE_GENERIC_PROGRAM} restart --user "${CLICKHOUSE_USER}" --pid-path "${CLICKHOUSE_PIDDIR}" --config-path "${CLICKHOUSE_CONFDIR}" --binary-path "${CLICKHOUSE_BINDIR}"
|
||||
}
|
||||
|
||||
|
||||
|
104
debian/clickhouse-server.postinst
vendored
104
debian/clickhouse-server.postinst
vendored
@ -2,6 +2,7 @@
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
PROGRAM=clickhouse-server
|
||||
CLICKHOUSE_USER=${CLICKHOUSE_USER:=clickhouse}
|
||||
CLICKHOUSE_GROUP=${CLICKHOUSE_GROUP:=${CLICKHOUSE_USER}}
|
||||
# Please note that we don't support paths with whitespaces. This is rather ignorant.
|
||||
@ -12,6 +13,7 @@ CLICKHOUSE_BINDIR=${CLICKHOUSE_BINDIR:=/usr/bin}
|
||||
CLICKHOUSE_GENERIC_PROGRAM=${CLICKHOUSE_GENERIC_PROGRAM:=clickhouse}
|
||||
EXTRACT_FROM_CONFIG=${CLICKHOUSE_GENERIC_PROGRAM}-extract-from-config
|
||||
CLICKHOUSE_CONFIG=$CLICKHOUSE_CONFDIR/config.xml
|
||||
CLICKHOUSE_PIDDIR=/var/run/$PROGRAM
|
||||
|
||||
[ -f /usr/share/debconf/confmodule ] && . /usr/share/debconf/confmodule
|
||||
[ -f /etc/default/clickhouse ] && . /etc/default/clickhouse
|
||||
@ -41,105 +43,5 @@ if [ "$1" = configure ] || [ -n "$not_deb_os" ]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
# Make sure the administrative user exists
|
||||
if ! getent passwd ${CLICKHOUSE_USER} > /dev/null; then
|
||||
if [ -n "$not_deb_os" ]; then
|
||||
useradd -r -s /bin/false --home-dir /nonexistent ${CLICKHOUSE_USER} > /dev/null
|
||||
else
|
||||
adduser --system --disabled-login --no-create-home --home /nonexistent \
|
||||
--shell /bin/false --group --gecos "ClickHouse server" ${CLICKHOUSE_USER} > /dev/null
|
||||
fi
|
||||
fi
|
||||
|
||||
# if the user was created manually, make sure the group is there as well
|
||||
if ! getent group ${CLICKHOUSE_GROUP} > /dev/null; then
|
||||
groupadd -r ${CLICKHOUSE_GROUP} > /dev/null
|
||||
fi
|
||||
|
||||
# make sure user is in the correct group
|
||||
if ! id -Gn ${CLICKHOUSE_USER} | grep -qw ${CLICKHOUSE_USER}; then
|
||||
usermod -a -G ${CLICKHOUSE_GROUP} ${CLICKHOUSE_USER} > /dev/null
|
||||
fi
|
||||
|
||||
# check validity of user and group
|
||||
if [ "$(id -u ${CLICKHOUSE_USER})" -eq 0 ]; then
|
||||
echo "The ${CLICKHOUSE_USER} system user must not have uid 0 (root).
|
||||
Please fix this and reinstall this package." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$(id -g ${CLICKHOUSE_GROUP})" -eq 0 ]; then
|
||||
echo "The ${CLICKHOUSE_USER} system user must not have root as primary group.
|
||||
Please fix this and reinstall this package." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -x "$CLICKHOUSE_BINDIR/$EXTRACT_FROM_CONFIG" ] && [ -f "$CLICKHOUSE_CONFIG" ]; then
|
||||
if [ -z "$SHELL" ]; then
|
||||
SHELL="/bin/sh"
|
||||
fi
|
||||
CLICKHOUSE_DATADIR_FROM_CONFIG=$(su -s $SHELL ${CLICKHOUSE_USER} -c "$CLICKHOUSE_BINDIR/$EXTRACT_FROM_CONFIG --config-file=\"$CLICKHOUSE_CONFIG\" --key=path") ||:
|
||||
echo "Path to data directory in ${CLICKHOUSE_CONFIG}: ${CLICKHOUSE_DATADIR_FROM_CONFIG}"
|
||||
fi
|
||||
CLICKHOUSE_DATADIR_FROM_CONFIG=${CLICKHOUSE_DATADIR_FROM_CONFIG:=$CLICKHOUSE_DATADIR}
|
||||
|
||||
if [ ! -d ${CLICKHOUSE_DATADIR_FROM_CONFIG} ]; then
|
||||
mkdir -p ${CLICKHOUSE_DATADIR_FROM_CONFIG}
|
||||
chown ${CLICKHOUSE_USER}:${CLICKHOUSE_GROUP} ${CLICKHOUSE_DATADIR_FROM_CONFIG}
|
||||
chmod 700 ${CLICKHOUSE_DATADIR_FROM_CONFIG}
|
||||
fi
|
||||
|
||||
if [ -d ${CLICKHOUSE_CONFDIR} ]; then
|
||||
mkdir -p ${CLICKHOUSE_CONFDIR}/users.d
|
||||
mkdir -p ${CLICKHOUSE_CONFDIR}/config.d
|
||||
rm -fv ${CLICKHOUSE_CONFDIR}/*-preprocessed.xml ||:
|
||||
fi
|
||||
|
||||
[ -e ${CLICKHOUSE_CONFDIR}/preprocessed ] || ln -s ${CLICKHOUSE_DATADIR_FROM_CONFIG}/preprocessed_configs ${CLICKHOUSE_CONFDIR}/preprocessed ||:
|
||||
|
||||
if [ ! -d ${CLICKHOUSE_LOGDIR} ]; then
|
||||
mkdir -p ${CLICKHOUSE_LOGDIR}
|
||||
chown root:${CLICKHOUSE_GROUP} ${CLICKHOUSE_LOGDIR}
|
||||
# Allow everyone to read logs, root and clickhouse to read-write
|
||||
chmod 775 ${CLICKHOUSE_LOGDIR}
|
||||
fi
|
||||
|
||||
# Set net_admin capabilities to support introspection of "taskstats" performance metrics from the kernel
|
||||
# and ipc_lock capabilities to allow mlock of clickhouse binary.
|
||||
|
||||
# 1. Check that "setcap" tool exists.
|
||||
# 2. Check that an arbitrary program with installed capabilities can run.
|
||||
# 3. Set the capabilities.
|
||||
|
||||
# The second is important for Docker and systemd-nspawn.
|
||||
# When the container has no capabilities,
|
||||
# but the executable file inside the container has capabilities,
|
||||
# then attempt to run this file will end up with a cryptic "Operation not permitted" message.
|
||||
|
||||
TMPFILE=/tmp/test_setcap.sh
|
||||
|
||||
command -v setcap >/dev/null \
|
||||
&& echo > $TMPFILE && chmod a+x $TMPFILE && $TMPFILE && setcap "cap_net_admin,cap_ipc_lock,cap_sys_nice+ep" $TMPFILE && $TMPFILE && rm $TMPFILE \
|
||||
&& setcap "cap_net_admin,cap_ipc_lock,cap_sys_nice+ep" "${CLICKHOUSE_BINDIR}/${CLICKHOUSE_GENERIC_PROGRAM}" \
|
||||
|| echo "Cannot set 'net_admin' or 'ipc_lock' or 'sys_nice' capability for clickhouse binary. This is optional. Taskstats accounting will be disabled. To enable taskstats accounting you may add the required capability later manually."
|
||||
|
||||
# Clean old dynamic compilation results
|
||||
if [ -d "${CLICKHOUSE_DATADIR_FROM_CONFIG}/build" ]; then
|
||||
rm -f ${CLICKHOUSE_DATADIR_FROM_CONFIG}/build/*.cpp ${CLICKHOUSE_DATADIR_FROM_CONFIG}/build/*.so ||:
|
||||
fi
|
||||
|
||||
if [ -f /usr/share/debconf/confmodule ]; then
|
||||
db_get clickhouse-server/default-password
|
||||
defaultpassword="$RET"
|
||||
if [ -n "$defaultpassword" ]; then
|
||||
echo "<yandex><users><default><password>$defaultpassword</password></default></users></yandex>" > ${CLICKHOUSE_CONFDIR}/users.d/default-password.xml
|
||||
chown ${CLICKHOUSE_USER}:${CLICKHOUSE_GROUP} ${CLICKHOUSE_CONFDIR}/users.d/default-password.xml
|
||||
chmod 600 ${CLICKHOUSE_CONFDIR}/users.d/default-password.xml
|
||||
fi
|
||||
|
||||
# everything went well, so now let's reset the password
|
||||
db_set clickhouse-server/default-password ""
|
||||
# ... done with debconf here
|
||||
db_stop
|
||||
fi
|
||||
${CLICKHOUSE_GENERIC_PROGRAM} install --user "${CLICKHOUSE_USER}" --group "${CLICKHOUSE_GROUP}" --pid-path "${CLICKHOUSE_PIDDIR}" --config-path "${CLICKHOUSE_CONFDIR}" --binary-path "${CLICKHOUSE_BINDIR}" --log-path "${CLICKHOUSE_LOGDIR}" --data-path "${CLICKHOUSE_DATADIR}"
|
||||
fi
|
||||
|
@ -232,6 +232,8 @@ TESTS_TO_SKIP=(
|
||||
01268_dictionary_direct_layout
|
||||
01280_ssd_complex_key_dictionary
|
||||
01281_group_by_limit_memory_tracking # max_memory_usage_for_user can interfere another queries running concurrently
|
||||
01318_encrypt # Depends on OpenSSL
|
||||
01318_decrypt # Depends on OpenSSL
|
||||
01281_unsucceeded_insert_select_queries_counter
|
||||
01292_create_user
|
||||
01294_lazy_database_concurrent
|
||||
|
@ -48,12 +48,13 @@ This table shows queries that take significantly longer to process on the client
|
||||
#### Unexpected Query Duration
|
||||
Action required for every item -- these are errors that must be fixed.
|
||||
|
||||
Queries that have "short" duration (on the order of 0.1 s) can't be reliably tested in a normal way, where we perform a small (about ten) measurements for each server, because the signal-to-noise ratio is much smaller. There is a special mode for such queries that instead runs them for a fixed amount of time, normally with much higher number of measurements (up to thousands). This mode must be explicitly enabled by the test author to avoid accidental errors. It must be used only for queries that are meant to complete "immediately", such as `select count(*)`. If your query is not supposed to be "immediate", try to make it run longer, by e.g. processing more data.
|
||||
A query is supposed to run longer than 0.1 second. If your query runs faster, increase the amount of processed data to bring the run time above this threshold. You can use a bigger table (e.g. `hits_100m` instead of `hits_10m`), increase a `LIMIT`, make a query single-threaded, and so on. Queries that are too fast suffer from poor stability and precision.
|
||||
|
||||
This table shows queries for which the "short" marking is not consistent with the actual query run time -- i.e., a query runs for a long time but is marked as short, or it runs very fast but is not marked as short.
|
||||
Sometimes you want to test a query that is supposed to complete "instantaneously", i.e. in sublinear time. This might be `count(*)`, or parsing a complicated tuple. It might not be practical or even possible to increase the run time of such queries by adding more data. For such queries there is a specal comparison mode which runs them for a fixed amount of time, instead of a fixed number of iterations like we do normally. This mode is inferior to the normal mode, because the influence of noise and overhead is higher, which leads to less precise and stable results.
|
||||
|
||||
If your query is really supposed to complete "immediately" and can't be made to run longer, you have to mark it as "short". To do so, write `<query short="1">...` in the test file. The value of "short" attribute is evaluated as a python expression, and substitutions are performed, so you can write something like `<query short="{column1} = {column2}">select count(*) from table where {column1} > {column2}</query>`, to mark only a particular combination of variables as short.
|
||||
If it is impossible to increase the run time of a query and it is supposed to complete "immediately", you have to explicitly mark this in the test. To do so, add a `short` attribute to the query tag in the test file: `<query short="1">...`. The value of the `short` attribute is evaluated as a python expression, and substitutions are performed, so you can write something like `<query short="{column1} = {column2}">select count(*) from table where {column1} > {column2}</query>`, to mark only a particular combination of variables as short.
|
||||
|
||||
This table shows queries for which the `short` marking is not consistent with the actual query run time -- i.e., a query runs for a normal time but is marked as `short`, or it runs faster than normal but is not marked as `short`.
|
||||
|
||||
#### Partial Queries
|
||||
Action required for the cells marked in red.
|
||||
|
@ -468,14 +468,14 @@ if args.report == 'main':
|
||||
return
|
||||
|
||||
columns = [
|
||||
'Test', #0
|
||||
'Wall clock time, s', #1
|
||||
'Total client time, s', #2
|
||||
'Total queries', #3
|
||||
'Longest query<br>(sum for all runs), s', #4
|
||||
'Avg wall clock time<br>(sum for all runs), s', #5
|
||||
'Shortest query<br>(sum for all runs), s', #6
|
||||
'', # Runs #7
|
||||
'Test', #0
|
||||
'Wall clock time, entire test, s', #1
|
||||
'Total client time for measured query runs, s', #2
|
||||
'Queries', #3
|
||||
'Longest query, total for measured runs, s', #4
|
||||
'Wall clock time per query, s', #5
|
||||
'Shortest query, total for measured runs, s', #6
|
||||
'', # Runs #7
|
||||
]
|
||||
attrs = ['' for c in columns]
|
||||
attrs[7] = None
|
||||
|
@ -305,6 +305,10 @@ When enabled, replace empty input fields in TSV with default values. For complex
|
||||
|
||||
Disabled by default.
|
||||
|
||||
## input_format_tsv_enum_as_number {#settings-input_format_tsv_enum_as_number}
|
||||
|
||||
For TSV input format switches to parsing enum values as enum ids.
|
||||
|
||||
## input_format_null_as_default {#settings-input-format-null-as-default}
|
||||
|
||||
Enables or disables using default values if input data contain `NULL`, but the data type of the corresponding column in not `Nullable(T)` (for text input formats).
|
||||
@ -1161,6 +1165,10 @@ The character is interpreted as a delimiter in the CSV data. By default, the del
|
||||
|
||||
For CSV input format enables or disables parsing of unquoted `NULL` as literal (synonym for `\N`).
|
||||
|
||||
## input_format_csv_enum_as_number {#settings-input_format_csv_enum_as_number}
|
||||
|
||||
For CSV input format switches to parsing enum values as enum ids.
|
||||
|
||||
## output_format_csv_crlf_end_of_line {#settings-output-format-csv-crlf-end-of-line}
|
||||
|
||||
Use DOS/Windows-style line separator (CRLF) in CSV instead of Unix style (LF).
|
||||
@ -1398,6 +1406,17 @@ Possible values:
|
||||
|
||||
Default value: 0
|
||||
|
||||
## allow_nondeterministic_optimize_skip_unused_shards {#allow-nondeterministic-optimize-skip-unused-shards}
|
||||
|
||||
Allow nondeterministic (like `rand` or `dictGet`, since later has some caveats with updates) functions in sharding key.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Disallowed.
|
||||
- 1 — Allowed.
|
||||
|
||||
Default value: 0
|
||||
|
||||
## optimize_skip_unused_shards_nesting {#optimize-skip-unused-shards-nesting}
|
||||
|
||||
Controls [`optimize_skip_unused_shards`](#optimize-skip-unused-shards) (hence still requires [`optimize_skip_unused_shards`](#optimize-skip-unused-shards)) depends on the nesting level of the distributed query (case when you have `Distributed` table that look into another `Distributed` table).
|
||||
|
@ -34,6 +34,7 @@ ClickHouse не удаляет данные из таблица автомати
|
||||
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — дата начала запроса.
|
||||
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время начала запроса.
|
||||
- `query_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время начала обработки запроса.
|
||||
- `query_start_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — время начала обработки запроса с точностью до микросекунд.
|
||||
- `query_duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — длительность выполнения запроса в миллисекундах.
|
||||
- `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Общее количество строк, считанных из всех таблиц и табличных функций, участвующих в запросе. Включает в себя обычные подзапросы, подзапросы для `IN` и `JOIN`. Для распределенных запросов `read_rows` включает в себя общее количество строк, прочитанных на всех репликах. Каждая реплика передает собственное значение `read_rows`, а сервер-инициатор запроса суммирует все полученные и локальные значения. Объемы кэша не учитываюся.
|
||||
- `read_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Общее количество байтов, считанных из всех таблиц и табличных функций, участвующих в запросе. Включает в себя обычные подзапросы, подзапросы для `IN` и `JOIN`. Для распределенных запросов `read_bytes` включает в себя общее количество байтов, прочитанных на всех репликах. Каждая реплика передает собственное значение `read_bytes`, а сервер-инициатор запроса суммирует все полученные и локальные значения. Объемы кэша не учитываюся.
|
||||
|
@ -16,6 +16,7 @@ ClickHouse не удаляет данные из таблицы автомати
|
||||
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — дата завершения выполнения запроса потоком.
|
||||
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — дата и время завершения выполнения запроса потоком.
|
||||
- `query_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время начала обработки запроса.
|
||||
- `query_start_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — время начала обработки запроса с точностью до микросекунд.
|
||||
- `query_duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — длительность обработки запроса в миллисекундах.
|
||||
- `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — количество прочитанных строк.
|
||||
- `read_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — количество прочитанных байтов.
|
||||
|
@ -14,7 +14,7 @@ Jinja2==2.11.2
|
||||
jinja2-highlight==0.6.1
|
||||
jsmin==2.2.2
|
||||
livereload==2.6.2
|
||||
Markdown==3.2.1
|
||||
Markdown==3.3.2
|
||||
MarkupSafe==1.1.1
|
||||
mkdocs==1.1.2
|
||||
mkdocs-htmlproofer-plugin==0.0.3
|
||||
|
@ -548,11 +548,27 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
users_config_file.string(), users_d.string());
|
||||
}
|
||||
|
||||
/// Set capabilities for the binary.
|
||||
/** Set capabilities for the binary.
|
||||
*
|
||||
* 1. Check that "setcap" tool exists.
|
||||
* 2. Check that an arbitrary program with installed capabilities can run.
|
||||
* 3. Set the capabilities.
|
||||
*
|
||||
* The second is important for Docker and systemd-nspawn.
|
||||
* When the container has no capabilities,
|
||||
* but the executable file inside the container has capabilities,
|
||||
* then attempt to run this file will end up with a cryptic "Operation not permitted" message.
|
||||
*/
|
||||
|
||||
#if defined(__linux__)
|
||||
fmt::print("Setting capabilities for clickhouse binary. This is optional.\n");
|
||||
std::string command = fmt::format("command -v setcap && setcap 'cap_net_admin,cap_ipc_lock,cap_sys_nice+ep' {}", main_bin_path.string());
|
||||
std::string command = fmt::format("command -v setcap >/dev/null"
|
||||
" && echo > {0} && chmod a+x {0} && {0} && setcap 'cap_net_admin,cap_ipc_lock,cap_sys_nice+ep' {0} && {0} && rm {0}"
|
||||
" && setcap 'cap_net_admin,cap_ipc_lock,cap_sys_nice+ep' {1}"
|
||||
" || echo \"Cannot set 'net_admin' or 'ipc_lock' or 'sys_nice' capability for clickhouse binary."
|
||||
" This is optional. Taskstats accounting will be disabled."
|
||||
" To enable taskstats accounting you may add the required capability later manually.\"",
|
||||
"/tmp/test_setcap.sh", main_bin_path.string());
|
||||
fmt::print(" {}\n", command);
|
||||
executeScript(command);
|
||||
#endif
|
||||
|
@ -722,18 +722,22 @@
|
||||
-->
|
||||
<format_schema_path>/var/lib/clickhouse/format_schemas/</format_schema_path>
|
||||
|
||||
<!-- Uncomment to use query masking rules.
|
||||
<!-- Default query masking rules, matching lines would be replaced with something else in the logs
|
||||
(both text logs and system.query_log).
|
||||
name - name for the rule (optional)
|
||||
regexp - RE2 compatible regular expression (mandatory)
|
||||
replace - substitution string for sensitive data (optional, by default - six asterisks)
|
||||
-->
|
||||
<query_masking_rules>
|
||||
<rule>
|
||||
<name>hide SSN</name>
|
||||
<regexp>\b\d{3}-\d{2}-\d{4}\b</regexp>
|
||||
<replace>000-00-0000</replace>
|
||||
<name>hide encrypt/decrypt arguments</name>
|
||||
<regexp>((?:aes_)?(?:encrypt|decrypt)(?:_mysql)?)\s*\(\s*(?:'(?:\\'|.)+'|.*?)\s*\)</regexp>
|
||||
<!-- or more secure, but also more invasive:
|
||||
(aes_\w+)\s*\(.*\)
|
||||
-->
|
||||
<replace>\1(???)</replace>
|
||||
</rule>
|
||||
</query_masking_rules>
|
||||
-->
|
||||
|
||||
<!-- Uncomment to use custom http handlers.
|
||||
rules are checked from top to bottom, first match runs the handler
|
||||
|
@ -47,7 +47,7 @@
|
||||
--button-active-color: #F00;
|
||||
--button-active-text-color: #FFF;
|
||||
--misc-text-color: #888;
|
||||
--error-color: #400; /* Light-pink on light-cyan is so neat, I even want to trigger errors to see this cool combination of colors. */
|
||||
--error-color: #400;
|
||||
--table-header-color: #102020;
|
||||
--table-hover-color: #003333;
|
||||
--null-color: #A88;
|
||||
@ -282,6 +282,9 @@
|
||||
|
||||
function post()
|
||||
{
|
||||
/// TODO: Avoid race condition on subsequent requests when responses may come out of order.
|
||||
/// TODO: Check if URL already contains query string (append parameters).
|
||||
|
||||
var url = document.getElementById('url').value +
|
||||
/// Ask server to allow cross-domain requests.
|
||||
'?add_http_cors_header=1' +
|
||||
@ -309,6 +312,7 @@
|
||||
renderUnparsedResult(this.response);
|
||||
}
|
||||
} else {
|
||||
/// TODO: Proper rendering of network errors.
|
||||
renderError(this.response);
|
||||
}
|
||||
} else {
|
||||
@ -376,6 +380,7 @@
|
||||
var is_null = (cell === null);
|
||||
var content = document.createTextNode(is_null ? 'ᴺᵁᴸᴸ' : cell);
|
||||
td.appendChild(content);
|
||||
/// TODO: Execute regexp only once for each column.
|
||||
td.className = response.meta[col_idx].type.match(/^(U?Int|Decimal|Float)/) ? 'right' : 'left';
|
||||
if (is_null) {
|
||||
td.className += ' null';
|
||||
@ -400,6 +405,12 @@
|
||||
{
|
||||
clear();
|
||||
var data = document.getElementById('data-unparsed')
|
||||
|
||||
if (response === '') {
|
||||
/// TODO: Fade or remove previous result when new request will be performed.
|
||||
response = 'Ok.';
|
||||
}
|
||||
|
||||
data.innerText = response;
|
||||
/// inline-block make width adjust to the size of content.
|
||||
data.style.display = 'inline-block';
|
||||
|
@ -634,4 +634,10 @@ void ColumnString::protect()
|
||||
getOffsets().protect();
|
||||
}
|
||||
|
||||
void ColumnString::validate() const
|
||||
{
|
||||
if (!offsets.empty() && offsets.back() != chars.size())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "ColumnString validation failed: size mismatch (internal logical error) {} != {}", offsets.back(), chars.size());
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -267,6 +267,9 @@ public:
|
||||
|
||||
Offsets & getOffsets() { return offsets; }
|
||||
const Offsets & getOffsets() const { return offsets; }
|
||||
|
||||
// Throws an exception if offsets/chars are messed up
|
||||
void validate() const;
|
||||
};
|
||||
|
||||
|
||||
|
@ -510,6 +510,8 @@ namespace ErrorCodes
|
||||
extern const int ROW_AND_ROWS_TOGETHER = 544;
|
||||
extern const int FIRST_AND_NEXT_TOGETHER = 545;
|
||||
extern const int NO_ROW_DELIMITER = 546;
|
||||
extern const int INVALID_RAID_TYPE = 547;
|
||||
extern const int UNKNOWN_VOLUME = 548;
|
||||
|
||||
extern const int KEEPER_EXCEPTION = 999;
|
||||
extern const int POCO_EXCEPTION = 1000;
|
||||
|
@ -30,6 +30,8 @@ namespace ProfileEvents
|
||||
|
||||
static constexpr size_t log_peak_memory_usage_every = 1ULL << 30;
|
||||
|
||||
thread_local bool MemoryTracker::BlockerInThread::is_blocked = false;
|
||||
|
||||
MemoryTracker total_memory_tracker(nullptr, VariableContext::Global);
|
||||
|
||||
|
||||
@ -56,13 +58,15 @@ MemoryTracker::~MemoryTracker()
|
||||
void MemoryTracker::logPeakMemoryUsage() const
|
||||
{
|
||||
const auto * description = description_ptr.load(std::memory_order_relaxed);
|
||||
LOG_DEBUG(&Poco::Logger::get("MemoryTracker"), "Peak memory usage{}: {}.", (description ? " " + std::string(description) : ""), ReadableSize(peak));
|
||||
LOG_DEBUG(&Poco::Logger::get("MemoryTracker"),
|
||||
"Peak memory usage{}: {}.", (description ? " " + std::string(description) : ""), ReadableSize(peak));
|
||||
}
|
||||
|
||||
void MemoryTracker::logMemoryUsage(Int64 current) const
|
||||
{
|
||||
const auto * description = description_ptr.load(std::memory_order_relaxed);
|
||||
LOG_DEBUG(&Poco::Logger::get("MemoryTracker"), "Current memory usage{}: {}.", (description ? " " + std::string(description) : ""), ReadableSize(current));
|
||||
LOG_DEBUG(&Poco::Logger::get("MemoryTracker"),
|
||||
"Current memory usage{}: {}.", (description ? " " + std::string(description) : ""), ReadableSize(current));
|
||||
}
|
||||
|
||||
|
||||
@ -71,7 +75,7 @@ void MemoryTracker::alloc(Int64 size)
|
||||
if (size < 0)
|
||||
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Negative size ({}) is passed to MemoryTracker. It is a bug.", size);
|
||||
|
||||
if (blocker.isCancelled())
|
||||
if (BlockerInThread::isBlocked())
|
||||
return;
|
||||
|
||||
/** Using memory_order_relaxed means that if allocations are done simultaneously,
|
||||
@ -86,12 +90,15 @@ void MemoryTracker::alloc(Int64 size)
|
||||
Int64 current_hard_limit = hard_limit.load(std::memory_order_relaxed);
|
||||
Int64 current_profiler_limit = profiler_limit.load(std::memory_order_relaxed);
|
||||
|
||||
/// Cap the limit to the total_memory_tracker, since it may include some drift.
|
||||
/// Cap the limit to the total_memory_tracker, since it may include some drift
|
||||
/// for user-level memory tracker.
|
||||
///
|
||||
/// And since total_memory_tracker is reset to the process resident
|
||||
/// memory peridically (in AsynchronousMetrics::update()), any limit can be
|
||||
/// capped to it, to avoid possible drift.
|
||||
if (unlikely(current_hard_limit && will_be > current_hard_limit))
|
||||
if (unlikely(current_hard_limit
|
||||
&& will_be > current_hard_limit
|
||||
&& level == VariableContext::User))
|
||||
{
|
||||
Int64 total_amount = total_memory_tracker.get();
|
||||
if (amount > total_amount)
|
||||
@ -104,10 +111,8 @@ void MemoryTracker::alloc(Int64 size)
|
||||
std::bernoulli_distribution fault(fault_probability);
|
||||
if (unlikely(fault_probability && fault(thread_local_rng)))
|
||||
{
|
||||
free(size);
|
||||
|
||||
/// Prevent recursion. Exception::ctor -> std::string -> new[] -> MemoryTracker::alloc
|
||||
auto untrack_lock = blocker.cancel(); // NOLINT
|
||||
BlockerInThread untrack_lock;
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::QueryMemoryLimitExceeded);
|
||||
std::stringstream message;
|
||||
@ -118,12 +123,13 @@ void MemoryTracker::alloc(Int64 size)
|
||||
<< " (attempt to allocate chunk of " << size << " bytes)"
|
||||
<< ", maximum: " << formatReadableSizeWithBinarySuffix(current_hard_limit);
|
||||
|
||||
amount.fetch_sub(size, std::memory_order_relaxed);
|
||||
throw DB::Exception(message.str(), DB::ErrorCodes::MEMORY_LIMIT_EXCEEDED);
|
||||
}
|
||||
|
||||
if (unlikely(current_profiler_limit && will_be > current_profiler_limit))
|
||||
{
|
||||
auto no_track = blocker.cancel();
|
||||
BlockerInThread untrack_lock;
|
||||
DB::TraceCollector::collect(DB::TraceType::Memory, StackTrace(), size);
|
||||
setOrRaiseProfilerLimit((will_be + profiler_step - 1) / profiler_step * profiler_step);
|
||||
}
|
||||
@ -131,16 +137,14 @@ void MemoryTracker::alloc(Int64 size)
|
||||
std::bernoulli_distribution sample(sample_probability);
|
||||
if (unlikely(sample_probability && sample(thread_local_rng)))
|
||||
{
|
||||
auto no_track = blocker.cancel();
|
||||
BlockerInThread untrack_lock;
|
||||
DB::TraceCollector::collect(DB::TraceType::MemorySample, StackTrace(), size);
|
||||
}
|
||||
|
||||
if (unlikely(current_hard_limit && will_be > current_hard_limit))
|
||||
{
|
||||
free(size);
|
||||
|
||||
/// Prevent recursion. Exception::ctor -> std::string -> new[] -> MemoryTracker::alloc
|
||||
auto no_track = blocker.cancel(); // NOLINT
|
||||
BlockerInThread untrack_lock;
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::QueryMemoryLimitExceeded);
|
||||
std::stringstream message;
|
||||
@ -151,6 +155,7 @@ void MemoryTracker::alloc(Int64 size)
|
||||
<< " (attempt to allocate chunk of " << size << " bytes)"
|
||||
<< ", maximum: " << formatReadableSizeWithBinarySuffix(current_hard_limit);
|
||||
|
||||
amount.fetch_sub(size, std::memory_order_relaxed);
|
||||
throw DB::Exception(message.str(), DB::ErrorCodes::MEMORY_LIMIT_EXCEEDED);
|
||||
}
|
||||
|
||||
@ -177,13 +182,13 @@ void MemoryTracker::updatePeak(Int64 will_be)
|
||||
|
||||
void MemoryTracker::free(Int64 size)
|
||||
{
|
||||
if (blocker.isCancelled())
|
||||
if (BlockerInThread::isBlocked())
|
||||
return;
|
||||
|
||||
std::bernoulli_distribution sample(sample_probability);
|
||||
if (unlikely(sample_probability && sample(thread_local_rng)))
|
||||
{
|
||||
auto no_track = blocker.cancel();
|
||||
BlockerInThread untrack_lock;
|
||||
DB::TraceCollector::collect(DB::TraceType::MemorySample, StackTrace(), -size);
|
||||
}
|
||||
|
||||
@ -298,11 +303,3 @@ namespace CurrentMemoryTracker
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
DB::SimpleActionLock getCurrentMemoryTrackerActionLock()
|
||||
{
|
||||
auto * memory_tracker = DB::CurrentThread::getMemoryTracker();
|
||||
if (!memory_tracker)
|
||||
return {};
|
||||
return memory_tracker->blocker.cancel();
|
||||
}
|
||||
|
@ -3,7 +3,6 @@
|
||||
#include <atomic>
|
||||
#include <common/types.h>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
#include <Common/SimpleActionBlocker.h>
|
||||
#include <Common/VariableContext.h>
|
||||
|
||||
|
||||
@ -131,8 +130,18 @@ public:
|
||||
/// Prints info about peak memory consumption into log.
|
||||
void logPeakMemoryUsage() const;
|
||||
|
||||
/// To be able to temporarily stop memory tracker
|
||||
DB::SimpleActionBlocker blocker;
|
||||
/// To be able to temporarily stop memory tracking from current thread.
|
||||
struct BlockerInThread
|
||||
{
|
||||
private:
|
||||
BlockerInThread(const BlockerInThread &) = delete;
|
||||
BlockerInThread & operator=(const BlockerInThread &) = delete;
|
||||
static thread_local bool is_blocked;
|
||||
public:
|
||||
BlockerInThread() { is_blocked = true; }
|
||||
~BlockerInThread() { is_blocked = false; }
|
||||
static bool isBlocked() { return is_blocked; }
|
||||
};
|
||||
};
|
||||
|
||||
extern MemoryTracker total_memory_tracker;
|
||||
@ -145,7 +154,3 @@ namespace CurrentMemoryTracker
|
||||
void realloc(Int64 old_size, Int64 new_size);
|
||||
void free(Int64 size);
|
||||
}
|
||||
|
||||
|
||||
/// Holding this object will temporarily disable memory tracking.
|
||||
DB::SimpleActionLock getCurrentMemoryTrackerActionLock();
|
||||
|
@ -111,6 +111,7 @@ class IColumn;
|
||||
M(UInt64, distributed_group_by_no_merge, 0, "If 1, Do not merge aggregation states from different servers for distributed query processing - in case it is for certain that there are different keys on different shards. If 2 - same as 1 but also apply ORDER BY and LIMIT stages", 0) \
|
||||
M(Bool, optimize_distributed_group_by_sharding_key, false, "Optimize GROUP BY sharding_key queries (by avodiing costly aggregation on the initiator server).", 0) \
|
||||
M(Bool, optimize_skip_unused_shards, false, "Assumes that data is distributed by sharding_key. Optimization to skip unused shards if SELECT query filters by sharding_key.", 0) \
|
||||
M(Bool, allow_nondeterministic_optimize_skip_unused_shards, false, "Allow non-deterministic functions (includes dictGet) in sharding_key for optimize_skip_unused_shards", 0) \
|
||||
M(UInt64, force_optimize_skip_unused_shards, 0, "Throw an exception if unused shards cannot be skipped (1 - throw only if the table has the sharding key, 2 - always throw.", 0) \
|
||||
M(UInt64, optimize_skip_unused_shards_nesting, 0, "Same as optimize_skip_unused_shards, but accept nesting level until which it will work.", 0) \
|
||||
M(UInt64, force_optimize_skip_unused_shards_nesting, 0, "Same as force_optimize_skip_unused_shards, but accept nesting level until which it will work.", 0) \
|
||||
@ -153,6 +154,7 @@ class IColumn;
|
||||
\
|
||||
M(DistributedProductMode, distributed_product_mode, DistributedProductMode::DENY, "How are distributed subqueries performed inside IN or JOIN sections?", IMPORTANT) \
|
||||
\
|
||||
M(UInt64, max_concurrent_queries_for_all_users, 0, "The maximum number of concurrent requests for all users.", 0) \
|
||||
M(UInt64, max_concurrent_queries_for_user, 0, "The maximum number of concurrent requests per user.", 0) \
|
||||
\
|
||||
M(Bool, insert_deduplicate, true, "For INSERT queries in the replicated table, specifies that deduplication of insertings blocks should be performed", 0) \
|
||||
@ -411,12 +413,14 @@ class IColumn;
|
||||
M(Bool, format_csv_allow_double_quotes, 1, "If it is set to true, allow strings in double quotes.", 0) \
|
||||
M(Bool, output_format_csv_crlf_end_of_line, false, "If it is set true, end of line in CSV format will be \\r\\n instead of \\n.", 0) \
|
||||
M(Bool, input_format_csv_unquoted_null_literal_as_null, false, "Consider unquoted NULL literal as \\N", 0) \
|
||||
M(Bool, input_format_csv_enum_as_number, false, "Treat inserted enum values in CSV formats as enum indices \\N", 0) \
|
||||
M(Bool, input_format_skip_unknown_fields, false, "Skip columns with unknown names from input data (it works for JSONEachRow, CSVWithNames, TSVWithNames and TSKV formats).", 0) \
|
||||
M(Bool, input_format_with_names_use_header, true, "For TSVWithNames and CSVWithNames input formats this controls whether format parser is to assume that column data appear in the input exactly as they are specified in the header.", 0) \
|
||||
M(Bool, input_format_import_nested_json, false, "Map nested JSON data to nested tables (it works for JSONEachRow format).", 0) \
|
||||
M(Bool, optimize_aggregators_of_group_by_keys, true, "Eliminates min/max/any/anyLast aggregators of GROUP BY keys in SELECT section", 0) \
|
||||
M(Bool, input_format_defaults_for_omitted_fields, true, "For input data calculate default expressions for omitted fields (it works for JSONEachRow, CSV and TSV formats).", IMPORTANT) \
|
||||
M(Bool, input_format_tsv_empty_as_default, false, "Treat empty fields in TSV input as default values.", 0) \
|
||||
M(Bool, input_format_tsv_enum_as_number, false, "Treat inserted enum values in TSV formats as enum indices \\N", 0) \
|
||||
M(Bool, input_format_null_as_default, false, "For text input formats initialize null fields with default values if data type of this field is not nullable", 0) \
|
||||
\
|
||||
M(DateTimeInputFormat, date_time_input_format, FormatSettings::DateTimeInputFormat::Basic, "Method to read DateTime from text input formats. Possible values: 'basic' and 'best_effort'.", 0) \
|
||||
|
@ -146,12 +146,17 @@ void DataTypeEnum<Type>::serializeTextEscaped(const IColumn & column, size_t row
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
void DataTypeEnum<Type>::deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings &) const
|
||||
void DataTypeEnum<Type>::deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||
{
|
||||
/// NOTE It would be nice to do without creating a temporary object - at least extract std::string out.
|
||||
std::string field_name;
|
||||
readEscapedString(field_name, istr);
|
||||
assert_cast<ColumnType &>(column).getData().push_back(getValue(StringRef(field_name)));
|
||||
if (settings.tsv.input_format_enum_as_number)
|
||||
assert_cast<ColumnType &>(column).getData().push_back(readValue(istr));
|
||||
else
|
||||
{
|
||||
/// NOTE It would be nice to do without creating a temporary object - at least extract std::string out.
|
||||
std::string field_name;
|
||||
readEscapedString(field_name, istr);
|
||||
assert_cast<ColumnType &>(column).getData().push_back(getValue(StringRef(field_name)));
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
@ -169,11 +174,16 @@ void DataTypeEnum<Type>::deserializeTextQuoted(IColumn & column, ReadBuffer & is
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
void DataTypeEnum<Type>::deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings &) const
|
||||
void DataTypeEnum<Type>::deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||
{
|
||||
std::string field_name;
|
||||
readString(field_name, istr);
|
||||
assert_cast<ColumnType &>(column).getData().push_back(getValue(StringRef(field_name)));
|
||||
if (settings.tsv.input_format_enum_as_number)
|
||||
assert_cast<ColumnType &>(column).getData().push_back(readValue(istr));
|
||||
else
|
||||
{
|
||||
std::string field_name;
|
||||
readString(field_name, istr);
|
||||
assert_cast<ColumnType &>(column).getData().push_back(getValue(StringRef(field_name)));
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
@ -191,9 +201,14 @@ void DataTypeEnum<Type>::serializeTextXML(const IColumn & column, size_t row_num
|
||||
template <typename Type>
|
||||
void DataTypeEnum<Type>::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings &) const
|
||||
{
|
||||
std::string field_name;
|
||||
readJSONString(field_name, istr);
|
||||
assert_cast<ColumnType &>(column).getData().push_back(getValue(StringRef(field_name)));
|
||||
if (!istr.eof() && *istr.position() != '"')
|
||||
assert_cast<ColumnType &>(column).getData().push_back(readValue(istr));
|
||||
else
|
||||
{
|
||||
std::string field_name;
|
||||
readJSONString(field_name, istr);
|
||||
assert_cast<ColumnType &>(column).getData().push_back(getValue(StringRef(field_name)));
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
@ -205,9 +220,14 @@ void DataTypeEnum<Type>::serializeTextCSV(const IColumn & column, size_t row_num
|
||||
template <typename Type>
|
||||
void DataTypeEnum<Type>::deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||
{
|
||||
std::string field_name;
|
||||
readCSVString(field_name, istr, settings.csv);
|
||||
assert_cast<ColumnType &>(column).getData().push_back(getValue(StringRef(field_name)));
|
||||
if (settings.csv.input_format_enum_as_number)
|
||||
assert_cast<ColumnType &>(column).getData().push_back(readValue(istr));
|
||||
else
|
||||
{
|
||||
std::string field_name;
|
||||
readCSVString(field_name, istr, settings.csv);
|
||||
assert_cast<ColumnType &>(column).getData().push_back(getValue(StringRef(field_name)));
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
|
@ -66,13 +66,18 @@ public:
|
||||
|
||||
TypeIndex getTypeId() const override { return sizeof(FieldType) == 1 ? TypeIndex::Enum8 : TypeIndex::Enum16; }
|
||||
|
||||
const StringRef & getNameForValue(const FieldType & value) const
|
||||
auto findByValue(const FieldType & value) const
|
||||
{
|
||||
const auto it = value_to_name_map.find(value);
|
||||
if (it == std::end(value_to_name_map))
|
||||
throw Exception{"Unexpected value " + toString(value) + " for type " + getName(), ErrorCodes::BAD_ARGUMENTS};
|
||||
|
||||
return it->second;
|
||||
return it;
|
||||
}
|
||||
|
||||
const StringRef & getNameForValue(const FieldType & value) const
|
||||
{
|
||||
return findByValue(value)->second;
|
||||
}
|
||||
|
||||
FieldType getValue(StringRef field_name) const
|
||||
@ -84,6 +89,13 @@ public:
|
||||
return it->getMapped();
|
||||
}
|
||||
|
||||
FieldType readValue(ReadBuffer & istr) const
|
||||
{
|
||||
FieldType x;
|
||||
readText(x, istr);
|
||||
return findByValue(x)->first;
|
||||
}
|
||||
|
||||
Field castToName(const Field & value_or_name) const override;
|
||||
Field castToValue(const Field & value_or_name) const override;
|
||||
|
||||
|
@ -32,7 +32,7 @@ FileDictionarySource::FileDictionarySource(
|
||||
{
|
||||
const String user_files_path = context.getUserFilesPath();
|
||||
if (!startsWith(filepath, user_files_path))
|
||||
throw Exception("File path " + filepath + " is not inside " + user_files_path, ErrorCodes::PATH_ACCESS_DENIED);
|
||||
throw Exception(ErrorCodes::PATH_ACCESS_DENIED, "File path {} is not inside {}", filepath, user_files_path);
|
||||
}
|
||||
}
|
||||
|
||||
@ -60,7 +60,7 @@ BlockInputStreamPtr FileDictionarySource::loadAll()
|
||||
|
||||
std::string FileDictionarySource::toString() const
|
||||
{
|
||||
return "File: " + filepath + ' ' + format;
|
||||
return fmt::format("File: {}, {}", filepath, format);
|
||||
}
|
||||
|
||||
|
||||
|
@ -23,8 +23,11 @@ public:
|
||||
DiskSelector(const Poco::Util::AbstractConfiguration & config, const String & config_prefix, const Context & context);
|
||||
DiskSelector(const DiskSelector & from) : disks(from.disks) { }
|
||||
|
||||
DiskSelectorPtr
|
||||
updateFromConfig(const Poco::Util::AbstractConfiguration & config, const String & config_prefix, const Context & context) const;
|
||||
DiskSelectorPtr updateFromConfig(
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
const String & config_prefix,
|
||||
const Context & context
|
||||
) const;
|
||||
|
||||
/// Get disk by name
|
||||
DiskPtr get(const String & name) const;
|
||||
|
@ -9,7 +9,7 @@ namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int EXCESSIVE_ELEMENT_IN_CONFIG;
|
||||
extern const int NO_ELEMENTS_IN_CONFIG;
|
||||
extern const int INCONSISTENT_RESERVATIONS;
|
||||
extern const int NO_RESERVATIONS_PROVIDED;
|
||||
extern const int UNKNOWN_VOLUME_TYPE;
|
||||
@ -51,7 +51,7 @@ IVolume::IVolume(
|
||||
}
|
||||
|
||||
if (disks.empty())
|
||||
throw Exception("Volume must contain at least one disk.", ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG);
|
||||
throw Exception("Volume must contain at least one disk", ErrorCodes::NO_ELEMENTS_IN_CONFIG);
|
||||
}
|
||||
|
||||
UInt64 IVolume::getMaxUnreservedFreeSpace() const
|
||||
|
@ -64,6 +64,12 @@ public:
|
||||
virtual DiskPtr getDisk(size_t i) const { return disks[i]; }
|
||||
const Disks & getDisks() const { return disks; }
|
||||
|
||||
/// Returns effective value of whether merges are allowed on this volume (true) or not (false).
|
||||
virtual bool areMergesAvoided() const { return false; }
|
||||
|
||||
/// User setting for enabling and disabling merges on volume.
|
||||
virtual void setAvoidMergesUserOverride(bool /*avoid*/) {}
|
||||
|
||||
protected:
|
||||
Disks disks;
|
||||
const String name;
|
||||
|
@ -8,7 +8,7 @@ namespace DB
|
||||
class SingleDiskVolume : public IVolume
|
||||
{
|
||||
public:
|
||||
SingleDiskVolume(const String & name_, DiskPtr disk): IVolume(name_, {disk})
|
||||
SingleDiskVolume(const String & name_, DiskPtr disk, size_t max_data_part_size_ = 0): IVolume(name_, {disk}, max_data_part_size_)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -11,6 +11,13 @@
|
||||
#include <Poco/File.h>
|
||||
|
||||
|
||||
namespace
|
||||
{
|
||||
const auto DEFAULT_STORAGE_POLICY_NAME = "default";
|
||||
const auto DEFAULT_VOLUME_NAME = "default";
|
||||
const auto DEFAULT_DISK_NAME = "default";
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -18,11 +25,14 @@ namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int EXCESSIVE_ELEMENT_IN_CONFIG;
|
||||
extern const int NO_ELEMENTS_IN_CONFIG;
|
||||
extern const int UNKNOWN_DISK;
|
||||
extern const int UNKNOWN_POLICY;
|
||||
extern const int UNKNOWN_VOLUME;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
|
||||
StoragePolicy::StoragePolicy(
|
||||
String name_,
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
@ -30,44 +40,42 @@ StoragePolicy::StoragePolicy(
|
||||
DiskSelectorPtr disks)
|
||||
: name(std::move(name_))
|
||||
{
|
||||
String volumes_prefix = config_prefix + ".volumes";
|
||||
if (!config.has(volumes_prefix))
|
||||
throw Exception("StoragePolicy must contain at least one volume (.volumes)", ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG);
|
||||
|
||||
Poco::Util::AbstractConfiguration::Keys keys;
|
||||
config.keys(volumes_prefix, keys);
|
||||
String volumes_prefix = config_prefix + ".volumes";
|
||||
|
||||
if (!config.has(volumes_prefix))
|
||||
{
|
||||
if (name != DEFAULT_STORAGE_POLICY_NAME)
|
||||
throw Exception("Storage policy " + backQuote(name) + " must contain at least one volume (.volumes)", ErrorCodes::NO_ELEMENTS_IN_CONFIG);
|
||||
}
|
||||
else
|
||||
{
|
||||
config.keys(volumes_prefix, keys);
|
||||
}
|
||||
|
||||
for (const auto & attr_name : keys)
|
||||
{
|
||||
if (!std::all_of(attr_name.begin(), attr_name.end(), isWordCharASCII))
|
||||
throw Exception(
|
||||
"Volume name can contain only alphanumeric and '_' (" + attr_name + ")", ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG);
|
||||
volumes.push_back(std::make_shared<VolumeJBOD>(attr_name, config, volumes_prefix + "." + attr_name, disks));
|
||||
if (volumes_names.find(attr_name) != volumes_names.end())
|
||||
throw Exception("Volumes names must be unique (" + attr_name + " duplicated)", ErrorCodes::UNKNOWN_POLICY);
|
||||
volumes_names[attr_name] = volumes.size() - 1;
|
||||
"Volume name can contain only alphanumeric and '_' in storage policy " + backQuote(name) + " (" + attr_name + ")", ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG);
|
||||
volumes.emplace_back(createVolumeFromConfig(attr_name, config, volumes_prefix + "." + attr_name, disks));
|
||||
}
|
||||
|
||||
if (volumes.empty() && name == DEFAULT_STORAGE_POLICY_NAME)
|
||||
{
|
||||
auto default_volume = std::make_shared<VolumeJBOD>(DEFAULT_VOLUME_NAME, std::vector<DiskPtr>{disks->get(DEFAULT_DISK_NAME)}, 0, false);
|
||||
volumes.emplace_back(std::move(default_volume));
|
||||
}
|
||||
|
||||
if (volumes.empty())
|
||||
throw Exception("StoragePolicy must contain at least one volume.", ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG);
|
||||
throw Exception("Storage policy " + backQuote(name) + " must contain at least one volume.", ErrorCodes::NO_ELEMENTS_IN_CONFIG);
|
||||
|
||||
/// Check that disks are unique in Policy
|
||||
std::set<String> disk_names;
|
||||
for (const auto & volume : volumes)
|
||||
{
|
||||
for (const auto & disk : volume->getDisks())
|
||||
{
|
||||
if (disk_names.find(disk->getName()) != disk_names.end())
|
||||
throw Exception(
|
||||
"Duplicate disk '" + disk->getName() + "' in storage policy '" + name + "'", ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG);
|
||||
|
||||
disk_names.insert(disk->getName());
|
||||
}
|
||||
}
|
||||
|
||||
move_factor = config.getDouble(config_prefix + ".move_factor", 0.1);
|
||||
const double default_move_factor = volumes.size() > 1 ? 0.1 : 0.0;
|
||||
move_factor = config.getDouble(config_prefix + ".move_factor", default_move_factor);
|
||||
if (move_factor > 1)
|
||||
throw Exception("Disk move factor have to be in [0., 1.] interval, but set to " + toString(move_factor), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception("Disk move factor have to be in [0., 1.] interval, but set to " + toString(move_factor) + " in storage policy " + backQuote(name), ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
buildVolumeIndices();
|
||||
}
|
||||
|
||||
|
||||
@ -75,16 +83,43 @@ StoragePolicy::StoragePolicy(String name_, Volumes volumes_, double move_factor_
|
||||
: volumes(std::move(volumes_)), name(std::move(name_)), move_factor(move_factor_)
|
||||
{
|
||||
if (volumes.empty())
|
||||
throw Exception("StoragePolicy must contain at least one Volume.", ErrorCodes::UNKNOWN_POLICY);
|
||||
throw Exception("Storage policy " + backQuote(name) + " must contain at least one Volume.", ErrorCodes::NO_ELEMENTS_IN_CONFIG);
|
||||
|
||||
if (move_factor > 1)
|
||||
throw Exception("Disk move factor have to be in [0., 1.] interval, but set to " + toString(move_factor), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception("Disk move factor have to be in [0., 1.] interval, but set to " + toString(move_factor) + " in storage policy " + backQuote(name), ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
for (size_t i = 0; i < volumes.size(); ++i)
|
||||
buildVolumeIndices();
|
||||
}
|
||||
|
||||
|
||||
StoragePolicy::StoragePolicy(const StoragePolicy & storage_policy,
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
const String & config_prefix,
|
||||
DiskSelectorPtr disks)
|
||||
: StoragePolicy(storage_policy.getName(), config, config_prefix, disks)
|
||||
{
|
||||
for (auto & volume : volumes)
|
||||
{
|
||||
if (volumes_names.find(volumes[i]->getName()) != volumes_names.end())
|
||||
throw Exception("Volumes names must be unique (" + volumes[i]->getName() + " duplicated).", ErrorCodes::UNKNOWN_POLICY);
|
||||
volumes_names[volumes[i]->getName()] = i;
|
||||
if (storage_policy.volume_index_by_volume_name.count(volume->getName()) > 0)
|
||||
{
|
||||
auto old_volume = storage_policy.getVolumeByName(volume->getName());
|
||||
try
|
||||
{
|
||||
auto new_volume = updateVolumeFromConfig(old_volume, config, config_prefix + ".volumes." + volume->getName(), disks);
|
||||
volume = std::move(new_volume);
|
||||
}
|
||||
catch (Exception & e)
|
||||
{
|
||||
/// Default policies are allowed to be missed in configuration.
|
||||
if (e.code() != ErrorCodes::NO_ELEMENTS_IN_CONFIG || storage_policy.getName() != DEFAULT_STORAGE_POLICY_NAME)
|
||||
throw;
|
||||
|
||||
Poco::Util::AbstractConfiguration::Keys keys;
|
||||
config.keys(config_prefix, keys);
|
||||
if (!keys.empty())
|
||||
throw;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -93,20 +128,20 @@ bool StoragePolicy::isDefaultPolicy() const
|
||||
{
|
||||
/// Guessing if this policy is default, not 100% correct though.
|
||||
|
||||
if (getName() != "default")
|
||||
if (getName() != DEFAULT_STORAGE_POLICY_NAME)
|
||||
return false;
|
||||
|
||||
if (volumes.size() != 1)
|
||||
return false;
|
||||
|
||||
if (volumes[0]->getName() != "default")
|
||||
if (volumes[0]->getName() != DEFAULT_VOLUME_NAME)
|
||||
return false;
|
||||
|
||||
const auto & disks = volumes[0]->getDisks();
|
||||
if (disks.size() != 1)
|
||||
return false;
|
||||
|
||||
if (disks[0]->getName() != "default")
|
||||
if (disks[0]->getName() != DEFAULT_DISK_NAME)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
@ -128,10 +163,10 @@ DiskPtr StoragePolicy::getAnyDisk() const
|
||||
/// StoragePolicy must contain at least one Volume
|
||||
/// Volume must contain at least one Disk
|
||||
if (volumes.empty())
|
||||
throw Exception("StoragePolicy has no volumes. It's a bug.", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception("Storage policy " + backQuote(name) + " has no volumes. It's a bug.", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
if (volumes[0]->getDisks().empty())
|
||||
throw Exception("Volume '" + volumes[0]->getName() + "' has no disks. It's a bug.", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception("Volume " + backQuote(name) + "." + backQuote(volumes[0]->getName()) + " has no disks. It's a bug.", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
return volumes[0]->getDisks()[0];
|
||||
}
|
||||
@ -195,6 +230,24 @@ ReservationPtr StoragePolicy::makeEmptyReservationOnLargestDisk() const
|
||||
}
|
||||
|
||||
|
||||
VolumePtr StoragePolicy::getVolume(size_t index) const
|
||||
{
|
||||
if (index < volume_index_by_volume_name.size())
|
||||
return volumes[index];
|
||||
else
|
||||
throw Exception("No volume with index " + std::to_string(index) + " in storage policy " + backQuote(name), ErrorCodes::UNKNOWN_VOLUME);
|
||||
}
|
||||
|
||||
|
||||
VolumePtr StoragePolicy::getVolumeByName(const String & volume_name) const
|
||||
{
|
||||
auto it = volume_index_by_volume_name.find(volume_name);
|
||||
if (it == volume_index_by_volume_name.end())
|
||||
throw Exception("No such volume " + backQuote(volume_name) + " in storage policy " + backQuote(name), ErrorCodes::UNKNOWN_VOLUME);
|
||||
return getVolume(it->second);
|
||||
}
|
||||
|
||||
|
||||
void StoragePolicy::checkCompatibleWith(const StoragePolicyPtr & new_storage_policy) const
|
||||
{
|
||||
std::unordered_set<String> new_volume_names;
|
||||
@ -204,7 +257,7 @@ void StoragePolicy::checkCompatibleWith(const StoragePolicyPtr & new_storage_pol
|
||||
for (const auto & volume : getVolumes())
|
||||
{
|
||||
if (new_volume_names.count(volume->getName()) == 0)
|
||||
throw Exception("New storage policy shall contain volumes of old one", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception("New storage policy " + backQuote(name) + " shall contain volumes of old one", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
std::unordered_set<String> new_disk_names;
|
||||
for (const auto & disk : new_storage_policy->getVolumeByName(volume->getName())->getDisks())
|
||||
@ -212,21 +265,46 @@ void StoragePolicy::checkCompatibleWith(const StoragePolicyPtr & new_storage_pol
|
||||
|
||||
for (const auto & disk : volume->getDisks())
|
||||
if (new_disk_names.count(disk->getName()) == 0)
|
||||
throw Exception("New storage policy shall contain disks of old one", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception("New storage policy " + backQuote(name) + " shall contain disks of old one", ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
size_t StoragePolicy::getVolumeIndexByDisk(const DiskPtr & disk_ptr) const
|
||||
{
|
||||
for (size_t i = 0; i < volumes.size(); ++i)
|
||||
auto it = volume_index_by_disk_name.find(disk_ptr->getName());
|
||||
if (it != volume_index_by_disk_name.end())
|
||||
return it->second;
|
||||
else
|
||||
throw Exception("No disk " + backQuote(disk_ptr->getName()) + " in policy " + backQuote(name), ErrorCodes::UNKNOWN_DISK);
|
||||
}
|
||||
|
||||
|
||||
void StoragePolicy::buildVolumeIndices()
|
||||
{
|
||||
for (size_t index = 0; index < volumes.size(); ++index)
|
||||
{
|
||||
const auto & volume = volumes[i];
|
||||
const VolumePtr & volume = volumes[index];
|
||||
|
||||
if (volume_index_by_volume_name.find(volume->getName()) != volume_index_by_volume_name.end())
|
||||
throw Exception("Volume names must be unique in storage policy "
|
||||
+ backQuote(name) + " (" + backQuote(volume->getName()) + " is duplicated)"
|
||||
, ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG);
|
||||
|
||||
volume_index_by_volume_name[volume->getName()] = index;
|
||||
|
||||
for (const auto & disk : volume->getDisks())
|
||||
if (disk->getName() == disk_ptr->getName())
|
||||
return i;
|
||||
{
|
||||
const String & disk_name = disk->getName();
|
||||
|
||||
if (volume_index_by_disk_name.find(disk_name) != volume_index_by_disk_name.end())
|
||||
throw Exception("Disk names must be unique in storage policy "
|
||||
+ backQuote(name) + " (" + backQuote(disk_name) + " is duplicated)"
|
||||
, ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG);
|
||||
|
||||
volume_index_by_disk_name[disk_name] = index;
|
||||
}
|
||||
}
|
||||
throw Exception("No disk " + disk_ptr->getName() + " in policy " + name, ErrorCodes::UNKNOWN_DISK);
|
||||
}
|
||||
|
||||
|
||||
@ -242,44 +320,40 @@ StoragePolicySelector::StoragePolicySelector(
|
||||
{
|
||||
if (!std::all_of(name.begin(), name.end(), isWordCharASCII))
|
||||
throw Exception(
|
||||
"StoragePolicy name can contain only alphanumeric and '_' (" + name + ")", ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG);
|
||||
"Storage policy name can contain only alphanumeric and '_' (" + backQuote(name) + ")", ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG);
|
||||
|
||||
policies.emplace(name, std::make_shared<StoragePolicy>(name, config, config_prefix + "." + name, disks));
|
||||
LOG_INFO(&Poco::Logger::get("StoragePolicySelector"), "Storage policy {} loaded", backQuote(name));
|
||||
}
|
||||
|
||||
constexpr auto default_storage_policy_name = "default";
|
||||
constexpr auto default_volume_name = "default";
|
||||
constexpr auto default_disk_name = "default";
|
||||
|
||||
/// Add default policy if it's not specified explicetly
|
||||
if (policies.find(default_storage_policy_name) == policies.end())
|
||||
/// Add default policy if it isn't explicitly specified.
|
||||
if (policies.find(DEFAULT_STORAGE_POLICY_NAME) == policies.end())
|
||||
{
|
||||
auto default_volume = std::make_shared<VolumeJBOD>(default_volume_name, std::vector<DiskPtr>{disks->get(default_disk_name)}, 0);
|
||||
|
||||
auto default_policy = std::make_shared<StoragePolicy>(default_storage_policy_name, Volumes{default_volume}, 0.0);
|
||||
policies.emplace(default_storage_policy_name, default_policy);
|
||||
auto default_policy = std::make_shared<StoragePolicy>(DEFAULT_STORAGE_POLICY_NAME, config, config_prefix + "." + DEFAULT_STORAGE_POLICY_NAME, disks);
|
||||
policies.emplace(DEFAULT_STORAGE_POLICY_NAME, std::move(default_policy));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
StoragePolicySelectorPtr StoragePolicySelector::updateFromConfig(const Poco::Util::AbstractConfiguration & config, const String & config_prefix, DiskSelectorPtr disks) const
|
||||
{
|
||||
Poco::Util::AbstractConfiguration::Keys keys;
|
||||
config.keys(config_prefix, keys);
|
||||
|
||||
std::shared_ptr<StoragePolicySelector> result = std::make_shared<StoragePolicySelector>(config, config_prefix, disks);
|
||||
|
||||
constexpr auto default_storage_policy_name = "default";
|
||||
|
||||
/// First pass, check.
|
||||
for (const auto & [name, policy] : policies)
|
||||
{
|
||||
if (name != default_storage_policy_name && result->policies.count(name) == 0)
|
||||
if (result->policies.count(name) == 0)
|
||||
throw Exception("Storage policy " + backQuote(name) + " is missing in new configuration", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
policy->checkCompatibleWith(result->policies[name]);
|
||||
}
|
||||
|
||||
/// Second pass, load.
|
||||
for (const auto & [name, policy] : policies)
|
||||
{
|
||||
result->policies[name] = std::make_shared<StoragePolicy>(*policy, config, config_prefix + "." + name, disks);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -288,7 +362,7 @@ StoragePolicyPtr StoragePolicySelector::get(const String & name) const
|
||||
{
|
||||
auto it = policies.find(name);
|
||||
if (it == policies.end())
|
||||
throw Exception("Unknown StoragePolicy " + name, ErrorCodes::UNKNOWN_POLICY);
|
||||
throw Exception("Unknown storage policy " + backQuote(name), ErrorCodes::UNKNOWN_POLICY);
|
||||
|
||||
return it->second;
|
||||
}
|
||||
|
@ -14,6 +14,7 @@
|
||||
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <unordered_map>
|
||||
#include <unistd.h>
|
||||
#include <boost/noncopyable.hpp>
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
@ -36,6 +37,13 @@ public:
|
||||
|
||||
StoragePolicy(String name_, Volumes volumes_, double move_factor_);
|
||||
|
||||
StoragePolicy(
|
||||
const StoragePolicy & storage_policy,
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
const String & config_prefix,
|
||||
DiskSelectorPtr disks
|
||||
);
|
||||
|
||||
bool isDefaultPolicy() const;
|
||||
|
||||
/// Returns disks ordered by volumes priority
|
||||
@ -72,16 +80,10 @@ public:
|
||||
/// which should be kept with help of background moves
|
||||
double getMoveFactor() const { return move_factor; }
|
||||
|
||||
/// Get volume by index from storage_policy
|
||||
VolumePtr getVolume(size_t i) const { return (i < volumes_names.size() ? volumes[i] : VolumePtr()); }
|
||||
/// Get volume by index.
|
||||
VolumePtr getVolume(size_t index) const;
|
||||
|
||||
VolumePtr getVolumeByName(const String & volume_name) const
|
||||
{
|
||||
auto it = volumes_names.find(volume_name);
|
||||
if (it == volumes_names.end())
|
||||
return {};
|
||||
return getVolume(it->second);
|
||||
}
|
||||
VolumePtr getVolumeByName(const String & volume_name) const;
|
||||
|
||||
/// Checks if storage policy can be replaced by another one.
|
||||
void checkCompatibleWith(const StoragePolicyPtr & new_storage_policy) const;
|
||||
@ -89,12 +91,15 @@ public:
|
||||
private:
|
||||
Volumes volumes;
|
||||
const String name;
|
||||
std::map<String, size_t> volumes_names;
|
||||
std::unordered_map<String, size_t> volume_index_by_volume_name;
|
||||
std::unordered_map<String, size_t> volume_index_by_disk_name;
|
||||
|
||||
/// move_factor from interval [0., 1.]
|
||||
/// We move something if disk from this policy
|
||||
/// filled more than total_size * move_factor
|
||||
double move_factor = 0.1; /// by default move factor is 10%
|
||||
|
||||
void buildVolumeIndices();
|
||||
};
|
||||
|
||||
|
||||
|
@ -56,11 +56,23 @@ VolumeJBOD::VolumeJBOD(
|
||||
|
||||
/// Default value is 'true' due to backward compatibility.
|
||||
perform_ttl_move_on_insert = config.getBool(config_prefix + ".perform_ttl_move_on_insert", true);
|
||||
|
||||
are_merges_avoided = config.getBool(config_prefix + ".prefer_not_to_merge", false);
|
||||
}
|
||||
|
||||
VolumeJBOD::VolumeJBOD(const VolumeJBOD & volume_jbod,
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
const String & config_prefix,
|
||||
DiskSelectorPtr disk_selector)
|
||||
: VolumeJBOD(volume_jbod.name, config, config_prefix, disk_selector)
|
||||
{
|
||||
are_merges_avoided_user_override = volume_jbod.are_merges_avoided_user_override.load(std::memory_order_relaxed);
|
||||
last_used = volume_jbod.last_used.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
DiskPtr VolumeJBOD::getDisk(size_t /* index */) const
|
||||
{
|
||||
size_t start_from = last_used.fetch_add(1u, std::memory_order_relaxed);
|
||||
size_t start_from = last_used.fetch_add(1u, std::memory_order_acq_rel);
|
||||
size_t index = start_from % disks.size();
|
||||
return disks[index];
|
||||
}
|
||||
@ -73,7 +85,7 @@ ReservationPtr VolumeJBOD::reserve(UInt64 bytes)
|
||||
if (max_data_part_size != 0 && bytes > max_data_part_size)
|
||||
return {};
|
||||
|
||||
size_t start_from = last_used.fetch_add(1u, std::memory_order_relaxed);
|
||||
size_t start_from = last_used.fetch_add(1u, std::memory_order_acq_rel);
|
||||
size_t disks_num = disks.size();
|
||||
for (size_t i = 0; i < disks_num; ++i)
|
||||
{
|
||||
@ -87,4 +99,19 @@ ReservationPtr VolumeJBOD::reserve(UInt64 bytes)
|
||||
return {};
|
||||
}
|
||||
|
||||
bool VolumeJBOD::areMergesAvoided() const
|
||||
{
|
||||
auto are_merges_avoided_user_override_value = are_merges_avoided_user_override.load(std::memory_order_acquire);
|
||||
if (are_merges_avoided_user_override_value)
|
||||
return *are_merges_avoided_user_override_value;
|
||||
else
|
||||
return are_merges_avoided;
|
||||
}
|
||||
|
||||
void VolumeJBOD::setAvoidMergesUserOverride(bool avoid)
|
||||
{
|
||||
are_merges_avoided_user_override.store(avoid, std::memory_order_release);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
@ -1,10 +1,19 @@
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
|
||||
#include <Disks/IVolume.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class VolumeJBOD;
|
||||
|
||||
using VolumeJBODPtr = std::shared_ptr<VolumeJBOD>;
|
||||
using VolumesJBOD = std::vector<VolumeJBODPtr>;
|
||||
|
||||
/**
|
||||
* Implements something similar to JBOD (https://en.wikipedia.org/wiki/Non-RAID_drive_architectures#JBOD).
|
||||
* When MergeTree engine wants to write part — it requests VolumeJBOD to reserve space on the next available
|
||||
@ -13,8 +22,9 @@ namespace DB
|
||||
class VolumeJBOD : public IVolume
|
||||
{
|
||||
public:
|
||||
VolumeJBOD(String name_, Disks disks_, UInt64 max_data_part_size_)
|
||||
VolumeJBOD(String name_, Disks disks_, UInt64 max_data_part_size_, bool are_merges_avoided_)
|
||||
: IVolume(name_, disks_, max_data_part_size_)
|
||||
, are_merges_avoided(are_merges_avoided_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -25,6 +35,13 @@ public:
|
||||
DiskSelectorPtr disk_selector
|
||||
);
|
||||
|
||||
VolumeJBOD(
|
||||
const VolumeJBOD & volume_jbod,
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
const String & config_prefix,
|
||||
DiskSelectorPtr disk_selector
|
||||
);
|
||||
|
||||
VolumeType getType() const override { return VolumeType::JBOD; }
|
||||
|
||||
/// Always returns next disk (round-robin), ignores argument.
|
||||
@ -38,11 +55,19 @@ public:
|
||||
/// Returns valid reservation or nullptr if there is no space left on any disk.
|
||||
ReservationPtr reserve(UInt64 bytes) override;
|
||||
|
||||
bool areMergesAvoided() const override;
|
||||
|
||||
void setAvoidMergesUserOverride(bool avoid) override;
|
||||
|
||||
/// True if parts on this volume participate in merges according to configuration.
|
||||
bool are_merges_avoided = true;
|
||||
|
||||
private:
|
||||
/// Index of last used disk.
|
||||
mutable std::atomic<size_t> last_used = 0;
|
||||
|
||||
/// True if parts on this volume participate in merges according to START/STOP MERGES ON VOLUME.
|
||||
std::atomic<std::optional<bool>> are_merges_avoided_user_override{std::nullopt};
|
||||
};
|
||||
|
||||
using VolumeJBODPtr = std::shared_ptr<VolumeJBOD>;
|
||||
using VolumesJBOD = std::vector<VolumeJBODPtr>;
|
||||
|
||||
}
|
||||
|
@ -3,18 +3,23 @@
|
||||
#include <Disks/createVolume.h>
|
||||
#include <Disks/VolumeJBOD.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// Volume which reserserves space on each underlying disk.
|
||||
class VolumeRAID1;
|
||||
|
||||
using VolumeRAID1Ptr = std::shared_ptr<VolumeRAID1>;
|
||||
|
||||
/// Volume which reserves space on each underlying disk.
|
||||
///
|
||||
/// NOTE: Just interface implementation, doesn't used in codebase,
|
||||
/// also not available for user.
|
||||
class VolumeRAID1 : public VolumeJBOD
|
||||
{
|
||||
public:
|
||||
VolumeRAID1(String name_, Disks disks_, UInt64 max_data_part_size_)
|
||||
: VolumeJBOD(name_, disks_, max_data_part_size_)
|
||||
VolumeRAID1(String name_, Disks disks_, UInt64 max_data_part_size_, bool are_merges_avoided_in_config_)
|
||||
: VolumeJBOD(name_, disks_, max_data_part_size_, are_merges_avoided_in_config_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -27,11 +32,18 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
VolumeRAID1(
|
||||
VolumeRAID1 & volume_raid1,
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
const String & config_prefix,
|
||||
DiskSelectorPtr disk_selector)
|
||||
: VolumeJBOD(volume_raid1, config, config_prefix, disk_selector)
|
||||
{
|
||||
}
|
||||
|
||||
VolumeType getType() const override { return VolumeType::RAID1; }
|
||||
|
||||
ReservationPtr reserve(UInt64 bytes) override;
|
||||
};
|
||||
|
||||
using VolumeRAID1Ptr = std::shared_ptr<VolumeRAID1>;
|
||||
|
||||
}
|
||||
|
@ -12,6 +12,7 @@ namespace DB
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int UNKNOWN_RAID_TYPE;
|
||||
extern const int INVALID_RAID_TYPE;
|
||||
}
|
||||
|
||||
VolumePtr createVolumeFromReservation(const ReservationPtr & reservation, VolumePtr other_volume)
|
||||
@ -20,12 +21,12 @@ VolumePtr createVolumeFromReservation(const ReservationPtr & reservation, Volume
|
||||
{
|
||||
/// Since reservation on JBOD chooses one of disks and makes reservation there, volume
|
||||
/// for such type of reservation will be with one disk.
|
||||
return std::make_shared<SingleDiskVolume>(other_volume->getName(), reservation->getDisk());
|
||||
return std::make_shared<SingleDiskVolume>(other_volume->getName(), reservation->getDisk(), other_volume->max_data_part_size);
|
||||
}
|
||||
if (other_volume->getType() == VolumeType::RAID1)
|
||||
{
|
||||
auto volume = std::dynamic_pointer_cast<VolumeRAID1>(other_volume);
|
||||
return std::make_shared<VolumeRAID1>(volume->getName(), reservation->getDisks(), volume->max_data_part_size);
|
||||
return std::make_shared<VolumeRAID1>(volume->getName(), reservation->getDisks(), volume->max_data_part_size, volume->are_merges_avoided);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
@ -37,17 +38,31 @@ VolumePtr createVolumeFromConfig(
|
||||
DiskSelectorPtr disk_selector
|
||||
)
|
||||
{
|
||||
auto has_raid_type = config.has(config_prefix + ".raid_type");
|
||||
if (!has_raid_type)
|
||||
{
|
||||
return std::make_shared<VolumeJBOD>(name, config, config_prefix, disk_selector);
|
||||
}
|
||||
String raid_type = config.getString(config_prefix + ".raid_type");
|
||||
String raid_type = config.getString(config_prefix + ".raid_type", "JBOD");
|
||||
if (raid_type == "JBOD")
|
||||
{
|
||||
return std::make_shared<VolumeJBOD>(name, config, config_prefix, disk_selector);
|
||||
}
|
||||
throw Exception("Unknown raid type '" + raid_type + "'", ErrorCodes::UNKNOWN_RAID_TYPE);
|
||||
throw Exception("Unknown RAID type '" + raid_type + "'", ErrorCodes::UNKNOWN_RAID_TYPE);
|
||||
}
|
||||
|
||||
VolumePtr updateVolumeFromConfig(
|
||||
VolumePtr volume,
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
const String & config_prefix,
|
||||
DiskSelectorPtr & disk_selector
|
||||
)
|
||||
{
|
||||
String raid_type = config.getString(config_prefix + ".raid_type", "JBOD");
|
||||
if (raid_type == "JBOD")
|
||||
{
|
||||
VolumeJBODPtr volume_jbod = std::dynamic_pointer_cast<VolumeJBOD>(volume);
|
||||
if (!volume_jbod)
|
||||
throw Exception("Invalid RAID type '" + raid_type + "', shall be JBOD", ErrorCodes::INVALID_RAID_TYPE);
|
||||
|
||||
return std::make_shared<VolumeJBOD>(*volume_jbod, config, config_prefix, disk_selector);
|
||||
}
|
||||
throw Exception("Unknown RAID type '" + raid_type + "'", ErrorCodes::UNKNOWN_RAID_TYPE);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ namespace DB
|
||||
{
|
||||
|
||||
VolumePtr createVolumeFromReservation(const ReservationPtr & reservation, VolumePtr other_volume);
|
||||
|
||||
VolumePtr createVolumeFromConfig(
|
||||
String name_,
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
@ -13,4 +14,11 @@ VolumePtr createVolumeFromConfig(
|
||||
DiskSelectorPtr disk_selector
|
||||
);
|
||||
|
||||
VolumePtr updateVolumeFromConfig(
|
||||
VolumePtr volume,
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
const String & config_prefix,
|
||||
DiskSelectorPtr & disk_selector
|
||||
);
|
||||
|
||||
}
|
||||
|
@ -49,6 +49,7 @@ static FormatSettings getInputFormatSetting(const Settings & settings, const Con
|
||||
format_settings.csv.allow_double_quotes = settings.format_csv_allow_double_quotes;
|
||||
format_settings.csv.unquoted_null_literal_as_null = settings.input_format_csv_unquoted_null_literal_as_null;
|
||||
format_settings.csv.empty_as_default = settings.input_format_defaults_for_omitted_fields;
|
||||
format_settings.csv.input_format_enum_as_number = settings.input_format_csv_enum_as_number;
|
||||
format_settings.null_as_default = settings.input_format_null_as_default;
|
||||
format_settings.values.interpret_expressions = settings.input_format_values_interpret_expressions;
|
||||
format_settings.values.deduce_templates_of_expressions = settings.input_format_values_deduce_templates_of_expressions;
|
||||
@ -63,6 +64,7 @@ static FormatSettings getInputFormatSetting(const Settings & settings, const Con
|
||||
format_settings.template_settings.row_format = settings.format_template_row;
|
||||
format_settings.template_settings.row_between_delimiter = settings.format_template_rows_between_delimiter;
|
||||
format_settings.tsv.empty_as_default = settings.input_format_tsv_empty_as_default;
|
||||
format_settings.tsv.input_format_enum_as_number = settings.input_format_tsv_enum_as_number;
|
||||
format_settings.schema.format_schema = settings.format_schema;
|
||||
format_settings.schema.format_schema_path = context.getFormatSchemaPath();
|
||||
format_settings.schema.is_server = context.hasGlobalContext() && (context.getGlobalContext().getApplicationType() == Context::ApplicationType::SERVER);
|
||||
|
@ -34,6 +34,7 @@ struct FormatSettings
|
||||
bool unquoted_null_literal_as_null = false;
|
||||
bool empty_as_default = false;
|
||||
bool crlf_end_of_line = false;
|
||||
bool input_format_enum_as_number = false;
|
||||
};
|
||||
|
||||
CSV csv;
|
||||
@ -81,6 +82,7 @@ struct FormatSettings
|
||||
bool empty_as_default = false;
|
||||
bool crlf_end_of_line = false;
|
||||
String null_representation = "\\N";
|
||||
bool input_format_enum_as_number = false;
|
||||
};
|
||||
|
||||
TSV tsv;
|
||||
|
@ -137,7 +137,7 @@ void validateArgumentsImpl(const IFunction & func,
|
||||
const auto & arg = arguments[i + argument_offset];
|
||||
const auto descriptor = descriptors[i];
|
||||
if (int error_code = descriptor.isValid(arg.type, arg.column); error_code != 0)
|
||||
throw Exception("Illegal type of argument #" + std::to_string(i)
|
||||
throw Exception("Illegal type of argument #" + std::to_string(argument_offset + i + 1) // +1 is for human-friendly 1-based indexing
|
||||
+ (descriptor.argument_name ? " '" + std::string(descriptor.argument_name) + "'" : String{})
|
||||
+ " of function " + func.getName()
|
||||
+ (descriptor.expected_type_description ? String(", expected ") + descriptor.expected_type_description : String{})
|
||||
|
63
src/Functions/FunctionsAES.cpp
Normal file
63
src/Functions/FunctionsAES.cpp
Normal file
@ -0,0 +1,63 @@
|
||||
#include <Functions/FunctionsAES.h>
|
||||
|
||||
#if USE_SSL
|
||||
|
||||
#include <openssl/evp.h>
|
||||
#include <openssl/err.h>
|
||||
|
||||
#include <string>
|
||||
#include <cassert>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int OPENSSL_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
namespace OpenSSLDetails
|
||||
{
|
||||
void onError(std::string error_message)
|
||||
{
|
||||
error_message += ". OpenSSL error code: " + std::to_string(ERR_get_error());
|
||||
throw DB::Exception(error_message, DB::ErrorCodes::OPENSSL_ERROR);
|
||||
}
|
||||
|
||||
StringRef foldEncryptionKeyInMySQLCompatitableMode(size_t cipher_key_size, const StringRef & key, std::array<char, EVP_MAX_KEY_LENGTH> & folded_key)
|
||||
{
|
||||
assert(cipher_key_size <= EVP_MAX_KEY_LENGTH);
|
||||
memcpy(folded_key.data(), key.data, cipher_key_size);
|
||||
|
||||
for (size_t i = cipher_key_size; i < key.size; ++i)
|
||||
{
|
||||
folded_key[i % cipher_key_size] ^= key.data[i];
|
||||
}
|
||||
|
||||
return StringRef(folded_key.data(), cipher_key_size);
|
||||
}
|
||||
|
||||
const EVP_CIPHER * getCipherByName(const StringRef & cipher_name)
|
||||
{
|
||||
const auto * evp_cipher = EVP_get_cipherbyname(cipher_name.data);
|
||||
if (evp_cipher == nullptr)
|
||||
{
|
||||
// For some reasons following ciphers can't be found by name.
|
||||
if (cipher_name == "aes-128-cfb128")
|
||||
evp_cipher = EVP_aes_128_cfb128();
|
||||
else if (cipher_name == "aes-192-cfb128")
|
||||
evp_cipher = EVP_aes_192_cfb128();
|
||||
else if (cipher_name == "aes-256-cfb128")
|
||||
evp_cipher = EVP_aes_256_cfb128();
|
||||
}
|
||||
|
||||
// NOTE: cipher obtained not via EVP_CIPHER_fetch() would cause extra work on each context reset
|
||||
// with EVP_CIPHER_CTX_reset() or EVP_EncryptInit_ex(), but using EVP_CIPHER_fetch()
|
||||
// causes data race, so we stick to the slower but safer alternative here.
|
||||
return evp_cipher;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
693
src/Functions/FunctionsAES.h
Normal file
693
src/Functions/FunctionsAES.h
Normal file
@ -0,0 +1,693 @@
|
||||
#pragma once
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include <Common/config.h>
|
||||
#endif
|
||||
|
||||
#if USE_SSL
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Functions/IFunction.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/FunctionHelpers.h>
|
||||
|
||||
#include <fmt/format.h>
|
||||
|
||||
#include <openssl/evp.h>
|
||||
#include <openssl/engine.h>
|
||||
|
||||
#include <string_view>
|
||||
#include <functional>
|
||||
#include <initializer_list>
|
||||
|
||||
#include <string.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
}
|
||||
|
||||
namespace OpenSSLDetails
|
||||
{
|
||||
[[noreturn]] void onError(std::string error_message);
|
||||
StringRef foldEncryptionKeyInMySQLCompatitableMode(size_t cipher_key_size, const StringRef & key, std::array<char, EVP_MAX_KEY_LENGTH> & folded_key);
|
||||
|
||||
const EVP_CIPHER * getCipherByName(const StringRef & name);
|
||||
|
||||
enum class CompatibilityMode
|
||||
{
|
||||
MySQL,
|
||||
OpenSSL
|
||||
};
|
||||
|
||||
enum class CipherMode
|
||||
{
|
||||
MySQLCompatibility, // with key folding
|
||||
OpenSSLCompatibility, // just as regular openssl's enc application does (AEAD modes, like GCM and CCM are not supported)
|
||||
RFC5116_AEAD_AES_GCM // AEAD GCM with custom IV length and tag (HMAC) appended to the ciphertext, see https://tools.ietf.org/html/rfc5116#section-5.1
|
||||
};
|
||||
|
||||
|
||||
template <CipherMode mode>
|
||||
struct KeyHolder
|
||||
{
|
||||
inline StringRef setKey(size_t cipher_key_size, const StringRef & key) const
|
||||
{
|
||||
if (key.size != cipher_key_size)
|
||||
throw DB::Exception(fmt::format("Invalid key size: {} expected {}", key.size, cipher_key_size),
|
||||
DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
return key;
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct KeyHolder<CipherMode::MySQLCompatibility>
|
||||
{
|
||||
inline StringRef setKey(size_t cipher_key_size, const StringRef & key)
|
||||
{
|
||||
if (key.size < cipher_key_size)
|
||||
throw DB::Exception(fmt::format("Invalid key size: {} expected {}", key.size, cipher_key_size),
|
||||
DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
// MySQL does something fancy with the keys that are too long,
|
||||
// ruining compatibility with OpenSSL and not improving security.
|
||||
// But we have to do the same to be compatitable with MySQL.
|
||||
// see https://github.com/mysql/mysql-server/blob/8.0/router/src/harness/src/my_aes_openssl.cc#L71
|
||||
// (my_aes_create_key function)
|
||||
return foldEncryptionKeyInMySQLCompatitableMode(cipher_key_size, key, folded_key);
|
||||
}
|
||||
|
||||
~KeyHolder()
|
||||
{
|
||||
OPENSSL_cleanse(folded_key.data(), folded_key.size());
|
||||
}
|
||||
|
||||
private:
|
||||
std::array<char, EVP_MAX_KEY_LENGTH> folded_key;
|
||||
};
|
||||
|
||||
template <CompatibilityMode compatibility_mode>
|
||||
inline void validateCipherMode(const EVP_CIPHER * evp_cipher)
|
||||
{
|
||||
if constexpr (compatibility_mode == CompatibilityMode::MySQL)
|
||||
{
|
||||
switch (EVP_CIPHER_mode(evp_cipher))
|
||||
{
|
||||
case EVP_CIPH_ECB_MODE: [[fallthrough]];
|
||||
case EVP_CIPH_CBC_MODE: [[fallthrough]];
|
||||
case EVP_CIPH_CFB_MODE: [[fallthrough]];
|
||||
case EVP_CIPH_OFB_MODE:
|
||||
return;
|
||||
}
|
||||
}
|
||||
else if constexpr (compatibility_mode == CompatibilityMode::OpenSSL)
|
||||
{
|
||||
switch (EVP_CIPHER_mode(evp_cipher))
|
||||
{
|
||||
case EVP_CIPH_ECB_MODE: [[fallthrough]];
|
||||
case EVP_CIPH_CBC_MODE: [[fallthrough]];
|
||||
case EVP_CIPH_CFB_MODE: [[fallthrough]];
|
||||
case EVP_CIPH_OFB_MODE: [[fallthrough]];
|
||||
case EVP_CIPH_CTR_MODE: [[fallthrough]];
|
||||
case EVP_CIPH_GCM_MODE:
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
throw DB::Exception("Unsupported cipher mode " + std::string(EVP_CIPHER_name(evp_cipher)), DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
|
||||
template <CipherMode mode>
|
||||
inline void validateIV(const StringRef & iv_value, const size_t cipher_iv_size)
|
||||
{
|
||||
// In MySQL mode we don't care if IV is longer than expected, only if shorter.
|
||||
if ((mode == CipherMode::MySQLCompatibility && iv_value.size != 0 && iv_value.size < cipher_iv_size)
|
||||
|| (mode == CipherMode::OpenSSLCompatibility && iv_value.size != 0 && iv_value.size != cipher_iv_size))
|
||||
throw DB::Exception(fmt::format("Invalid IV size: {} expected {}", iv_value.size, cipher_iv_size),
|
||||
DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
template <typename Impl>
|
||||
class FunctionEncrypt : public IFunction
|
||||
{
|
||||
public:
|
||||
static constexpr OpenSSLDetails::CompatibilityMode compatibility_mode = Impl::compatibility_mode;
|
||||
static constexpr auto name = Impl::name;
|
||||
static FunctionPtr create(const Context &) { return std::make_shared<FunctionEncrypt>(); }
|
||||
|
||||
private:
|
||||
using CipherMode = OpenSSLDetails::CipherMode;
|
||||
|
||||
String getName() const override { return name; }
|
||||
bool isVariadic() const override { return true; }
|
||||
size_t getNumberOfArguments() const override { return 0; }
|
||||
ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {0}; }
|
||||
bool useDefaultImplementationForConstants() const override { return true; }
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
||||
{
|
||||
auto optional_args = FunctionArgumentDescriptors{
|
||||
{"IV", isStringOrFixedString, nullptr, "Initialization vector binary string"},
|
||||
};
|
||||
|
||||
if constexpr (compatibility_mode == OpenSSLDetails::CompatibilityMode::OpenSSL)
|
||||
{
|
||||
optional_args.emplace_back(FunctionArgumentDescriptor{
|
||||
"AAD", isStringOrFixedString, nullptr, "Additional authenticated data binary string for GCM mode"
|
||||
});
|
||||
}
|
||||
|
||||
validateFunctionArgumentTypes(*this, arguments,
|
||||
FunctionArgumentDescriptors{
|
||||
{"mode", isStringOrFixedString, isColumnConst, "encryption mode string"},
|
||||
{"input", nullptr, nullptr, "plaintext"},
|
||||
{"key", isStringOrFixedString, nullptr, "encryption key binary string"},
|
||||
},
|
||||
optional_args
|
||||
);
|
||||
|
||||
return std::make_shared<DataTypeString>();
|
||||
}
|
||||
|
||||
void executeImpl(DB::ColumnsWithTypeAndName & block, const ColumnNumbers & arguments, size_t result, size_t input_rows_count) const override
|
||||
{
|
||||
using namespace OpenSSLDetails;
|
||||
|
||||
const auto mode = block[arguments[0]].column->getDataAt(0);
|
||||
|
||||
if (mode.size == 0 || !std::string_view(mode).starts_with("aes-"))
|
||||
throw Exception("Invalid mode: " + mode.toString(), ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
auto evp_cipher = getCipherByName(mode);
|
||||
if (evp_cipher == nullptr)
|
||||
throw Exception("Invalid mode: " + mode.toString(), ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
const auto cipher_mode = EVP_CIPHER_mode(evp_cipher);
|
||||
|
||||
const auto input_column = block[arguments[1]].column;
|
||||
const auto key_column = block[arguments[2]].column;
|
||||
|
||||
OpenSSLDetails::validateCipherMode<compatibility_mode>(evp_cipher);
|
||||
|
||||
ColumnPtr result_column;
|
||||
if (arguments.size() <= 3)
|
||||
result_column = doEncrypt(evp_cipher, input_rows_count, input_column, key_column, nullptr, nullptr);
|
||||
else
|
||||
{
|
||||
const auto iv_column = block[arguments[3]].column;
|
||||
if (compatibility_mode != OpenSSLDetails::CompatibilityMode::MySQL && EVP_CIPHER_iv_length(evp_cipher) == 0)
|
||||
throw Exception(mode.toString() + " does not support IV", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
if (arguments.size() <= 4)
|
||||
{
|
||||
result_column = doEncrypt(evp_cipher, input_rows_count, input_column, key_column, iv_column, nullptr);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (cipher_mode != EVP_CIPH_GCM_MODE)
|
||||
throw Exception("AAD can be only set for GCM-mode", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
const auto aad_column = block[arguments[4]].column;
|
||||
result_column = doEncrypt(evp_cipher, input_rows_count, input_column, key_column, iv_column, aad_column);
|
||||
}
|
||||
}
|
||||
|
||||
block[result].column = std::move(result_column);
|
||||
}
|
||||
|
||||
template <typename InputColumnType, typename KeyColumnType, typename IvColumnType, typename AadColumnType>
|
||||
static ColumnPtr doEncrypt(const EVP_CIPHER * evp_cipher,
|
||||
size_t input_rows_count,
|
||||
const InputColumnType & input_column,
|
||||
const KeyColumnType & key_column,
|
||||
const IvColumnType & iv_column,
|
||||
const AadColumnType & aad_column)
|
||||
{
|
||||
if constexpr (compatibility_mode == OpenSSLDetails::CompatibilityMode::MySQL)
|
||||
{
|
||||
return doEncryptImpl<CipherMode::MySQLCompatibility>(evp_cipher, input_rows_count, input_column, key_column, iv_column, aad_column);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (EVP_CIPHER_mode(evp_cipher) == EVP_CIPH_GCM_MODE)
|
||||
{
|
||||
return doEncryptImpl<CipherMode::RFC5116_AEAD_AES_GCM>(evp_cipher, input_rows_count, input_column, key_column, iv_column, aad_column);
|
||||
}
|
||||
else
|
||||
{
|
||||
return doEncryptImpl<CipherMode::OpenSSLCompatibility>(evp_cipher, input_rows_count, input_column, key_column, iv_column, aad_column);
|
||||
}
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template <CipherMode mode, typename InputColumnType, typename KeyColumnType, typename IvColumnType, typename AadColumnType>
|
||||
static ColumnPtr doEncryptImpl(const EVP_CIPHER * evp_cipher,
|
||||
size_t input_rows_count,
|
||||
const InputColumnType & input_column,
|
||||
const KeyColumnType & key_column,
|
||||
[[maybe_unused]] const IvColumnType & iv_column,
|
||||
[[maybe_unused]] const AadColumnType & aad_column)
|
||||
{
|
||||
using namespace OpenSSLDetails;
|
||||
|
||||
auto evp_ctx_ptr = std::unique_ptr<EVP_CIPHER_CTX, decltype(&::EVP_CIPHER_CTX_free)>(EVP_CIPHER_CTX_new(), &EVP_CIPHER_CTX_free);
|
||||
auto evp_ctx = evp_ctx_ptr.get();
|
||||
|
||||
const auto block_size = static_cast<size_t>(EVP_CIPHER_block_size(evp_cipher));
|
||||
const auto key_size = static_cast<size_t>(EVP_CIPHER_key_length(evp_cipher));
|
||||
[[maybe_unused]] const auto iv_size = static_cast<size_t>(EVP_CIPHER_iv_length(evp_cipher));
|
||||
const auto tag_size = 16; // https://tools.ietf.org/html/rfc5116#section-5.1
|
||||
|
||||
auto encrypted_result_column = ColumnString::create();
|
||||
auto & encrypted_result_column_data = encrypted_result_column->getChars();
|
||||
auto & encrypted_result_column_offsets = encrypted_result_column->getOffsets();
|
||||
|
||||
{
|
||||
size_t resulting_size = 0;
|
||||
// for modes with block_size > 1, plaintext is padded up to a block_size,
|
||||
// which may result in allocating to much for block_size = 1.
|
||||
// That may lead later to reading unallocated data from underlying PaddedPODArray
|
||||
// due to assumption that it is safe to read up to 15 bytes past end.
|
||||
const auto pad_to_next_block = block_size == 1 ? 0 : 1;
|
||||
for (size_t r = 0; r < input_rows_count; ++r)
|
||||
{
|
||||
resulting_size += (input_column->getDataAt(r).size / block_size + pad_to_next_block) * block_size + 1;
|
||||
if constexpr (mode == CipherMode::RFC5116_AEAD_AES_GCM)
|
||||
resulting_size += tag_size;
|
||||
}
|
||||
#if defined(MEMORY_SANITIZER)
|
||||
encrypted_result_column_data.resize_fill(resulting_size, 0xFF);
|
||||
#else
|
||||
encrypted_result_column_data.resize(resulting_size);
|
||||
#endif
|
||||
}
|
||||
|
||||
auto encrypted = encrypted_result_column_data.data();
|
||||
|
||||
KeyHolder<mode> key_holder;
|
||||
|
||||
for (size_t r = 0; r < input_rows_count; ++r)
|
||||
{
|
||||
const auto key_value = key_holder.setKey(key_size, key_column->getDataAt(r));
|
||||
auto iv_value = StringRef{};
|
||||
if constexpr (!std::is_same_v<nullptr_t, std::decay_t<IvColumnType>>)
|
||||
{
|
||||
iv_value = iv_column->getDataAt(r);
|
||||
}
|
||||
|
||||
const auto input_value = input_column->getDataAt(r);
|
||||
auto aad_value = StringRef{};
|
||||
if constexpr (mode == CipherMode::RFC5116_AEAD_AES_GCM && !std::is_same_v<nullptr_t, std::decay_t<AadColumnType>>)
|
||||
{
|
||||
aad_value = aad_column->getDataAt(r);
|
||||
}
|
||||
|
||||
if constexpr (mode != CipherMode::MySQLCompatibility)
|
||||
{
|
||||
// in GCM mode IV can be of arbitrary size (>0), IV is optional for other modes.
|
||||
if (mode == CipherMode::RFC5116_AEAD_AES_GCM && iv_value.size == 0)
|
||||
{
|
||||
throw Exception("Invalid IV size " + std::to_string(iv_value.size) + " != expected size " + std::to_string(iv_size),
|
||||
DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
|
||||
if (mode != CipherMode::RFC5116_AEAD_AES_GCM && key_value.size != key_size)
|
||||
{
|
||||
throw Exception("Invalid key size " + std::to_string(key_value.size) + " != expected size " + std::to_string(key_size),
|
||||
DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
}
|
||||
|
||||
// Avoid extra work on empty ciphertext/plaintext for some ciphers
|
||||
if (!(input_value.size == 0 && block_size == 1 && mode != CipherMode::RFC5116_AEAD_AES_GCM))
|
||||
{
|
||||
// 1: Init CTX
|
||||
if constexpr (mode == CipherMode::RFC5116_AEAD_AES_GCM)
|
||||
{
|
||||
// 1.a.1: Init CTX with custom IV length and optionally with AAD
|
||||
if (EVP_EncryptInit_ex(evp_ctx, evp_cipher, nullptr, nullptr, nullptr) != 1)
|
||||
onError("Failed to initialize encryption context with cipher");
|
||||
|
||||
if (EVP_CIPHER_CTX_ctrl(evp_ctx, EVP_CTRL_AEAD_SET_IVLEN, iv_value.size, nullptr) != 1)
|
||||
onError("Failed to set custom IV length to " + std::to_string(iv_value.size));
|
||||
|
||||
if (EVP_EncryptInit_ex(evp_ctx, nullptr, nullptr,
|
||||
reinterpret_cast<const unsigned char*>(key_value.data),
|
||||
reinterpret_cast<const unsigned char*>(iv_value.data)) != 1)
|
||||
onError("Failed to set key and IV");
|
||||
|
||||
// 1.a.2 Set AAD
|
||||
if constexpr (!std::is_same_v<nullptr_t, std::decay_t<AadColumnType>>)
|
||||
{
|
||||
const auto aad_data = aad_column->getDataAt(r);
|
||||
int tmp_len = 0;
|
||||
if (aad_data.size != 0 && EVP_EncryptUpdate(evp_ctx, nullptr, &tmp_len,
|
||||
reinterpret_cast<const unsigned char *>(aad_data.data), aad_data.size) != 1)
|
||||
onError("Failed to set AAD data");
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// 1.b: Init CTX
|
||||
validateIV<mode>(iv_value, iv_size);
|
||||
|
||||
if (EVP_EncryptInit_ex(evp_ctx, evp_cipher, nullptr,
|
||||
reinterpret_cast<const unsigned char*>(key_value.data),
|
||||
reinterpret_cast<const unsigned char*>(iv_value.data)) != 1)
|
||||
onError("Failed to initialize cipher context");
|
||||
}
|
||||
|
||||
int output_len = 0;
|
||||
// 2: Feed the data to the cipher
|
||||
if (EVP_EncryptUpdate(evp_ctx,
|
||||
reinterpret_cast<unsigned char*>(encrypted), &output_len,
|
||||
reinterpret_cast<const unsigned char*>(input_value.data), static_cast<int>(input_value.size)) != 1)
|
||||
onError("Failed to encrypt");
|
||||
encrypted += output_len;
|
||||
|
||||
// 3: retrieve encrypted data (ciphertext)
|
||||
if (EVP_EncryptFinal_ex(evp_ctx,
|
||||
reinterpret_cast<unsigned char*>(encrypted), &output_len) != 1)
|
||||
onError("Failed to fetch ciphertext");
|
||||
encrypted += output_len;
|
||||
|
||||
// 4: optionally retrieve a tag and append it to the ciphertext (RFC5116):
|
||||
// https://tools.ietf.org/html/rfc5116#section-5.1
|
||||
if constexpr (mode == CipherMode::RFC5116_AEAD_AES_GCM)
|
||||
{
|
||||
if (EVP_CIPHER_CTX_ctrl(evp_ctx, EVP_CTRL_AEAD_GET_TAG, tag_size, encrypted) != 1)
|
||||
onError("Failed to retrieve GCM tag");
|
||||
encrypted += tag_size;
|
||||
}
|
||||
}
|
||||
|
||||
*encrypted = '\0';
|
||||
++encrypted;
|
||||
|
||||
encrypted_result_column_offsets.push_back(encrypted - encrypted_result_column_data.data());
|
||||
}
|
||||
|
||||
// in case of block size of 1, we overestimate buffer required for encrypted data, fix it up.
|
||||
if (!encrypted_result_column_offsets.empty() && encrypted_result_column_data.size() > encrypted_result_column_offsets.back())
|
||||
{
|
||||
encrypted_result_column_data.resize(encrypted_result_column_offsets.back());
|
||||
}
|
||||
|
||||
encrypted_result_column->validate();
|
||||
return encrypted_result_column;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
/// AES_decrypt(string, key, block_mode[, init_vector])
|
||||
template <typename Impl>
|
||||
class FunctionDecrypt : public IFunction
|
||||
{
|
||||
public:
|
||||
static constexpr OpenSSLDetails::CompatibilityMode compatibility_mode = Impl::compatibility_mode;
|
||||
static constexpr auto name = Impl::name;
|
||||
static FunctionPtr create(const Context &) { return std::make_shared<FunctionDecrypt>(); }
|
||||
|
||||
private:
|
||||
using CipherMode = OpenSSLDetails::CipherMode;
|
||||
|
||||
String getName() const override { return name; }
|
||||
bool isVariadic() const override { return true; }
|
||||
size_t getNumberOfArguments() const override { return 0; }
|
||||
ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {0}; }
|
||||
bool useDefaultImplementationForConstants() const override { return true; }
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
||||
{
|
||||
auto optional_args = FunctionArgumentDescriptors{
|
||||
{"IV", isStringOrFixedString, nullptr, "Initialization vector binary string"},
|
||||
};
|
||||
|
||||
if constexpr (compatibility_mode == OpenSSLDetails::CompatibilityMode::OpenSSL)
|
||||
{
|
||||
optional_args.emplace_back(FunctionArgumentDescriptor{
|
||||
"AAD", isStringOrFixedString, nullptr, "Additional authenticated data binary string for GCM mode"
|
||||
});
|
||||
}
|
||||
|
||||
validateFunctionArgumentTypes(*this, arguments,
|
||||
FunctionArgumentDescriptors{
|
||||
{"mode", isStringOrFixedString, isColumnConst, "decryption mode string"},
|
||||
{"input", nullptr, nullptr, "ciphertext"},
|
||||
{"key", isStringOrFixedString, nullptr, "decryption key binary string"},
|
||||
},
|
||||
optional_args
|
||||
);
|
||||
|
||||
return std::make_shared<DataTypeString>();
|
||||
}
|
||||
|
||||
void executeImpl(DB::ColumnsWithTypeAndName & block, const ColumnNumbers & arguments, size_t result, size_t input_rows_count) const override
|
||||
{
|
||||
using namespace OpenSSLDetails;
|
||||
|
||||
const auto mode = block[arguments[0]].column->getDataAt(0);
|
||||
if (mode.size == 0 || !std::string_view(mode).starts_with("aes-"))
|
||||
throw Exception("Invalid mode: " + mode.toString(), ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
auto evp_cipher = getCipherByName(mode);
|
||||
if (evp_cipher == nullptr)
|
||||
throw Exception("Invalid mode: " + mode.toString(), ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
OpenSSLDetails::validateCipherMode<compatibility_mode>(evp_cipher);
|
||||
|
||||
const auto input_column = block[arguments[1]].column;
|
||||
const auto key_column = block[arguments[2]].column;
|
||||
|
||||
ColumnPtr result_column;
|
||||
if (arguments.size() <= 3)
|
||||
result_column = doDecrypt(evp_cipher, input_rows_count, input_column, key_column, nullptr, nullptr);
|
||||
else
|
||||
{
|
||||
const auto iv_column = block[arguments[3]].column;
|
||||
if (compatibility_mode != OpenSSLDetails::CompatibilityMode::MySQL && EVP_CIPHER_iv_length(evp_cipher) == 0)
|
||||
throw Exception(mode.toString() + " does not support IV", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
if (arguments.size() <= 4)
|
||||
{
|
||||
result_column = doDecrypt(evp_cipher, input_rows_count, input_column, key_column, iv_column, nullptr);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (EVP_CIPHER_mode(evp_cipher) != EVP_CIPH_GCM_MODE)
|
||||
throw Exception("AAD can be only set for GCM-mode", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
const auto aad_column = block[arguments[4]].column;
|
||||
result_column = doDecrypt(evp_cipher, input_rows_count, input_column, key_column, iv_column, aad_column);
|
||||
}
|
||||
}
|
||||
|
||||
block[result].column = std::move(result_column);
|
||||
}
|
||||
|
||||
template <typename InputColumnType, typename KeyColumnType, typename IvColumnType, typename AadColumnType>
|
||||
static ColumnPtr doDecrypt(const EVP_CIPHER * evp_cipher,
|
||||
size_t input_rows_count,
|
||||
const InputColumnType & input_column,
|
||||
const KeyColumnType & key_column,
|
||||
const IvColumnType & iv_column,
|
||||
const AadColumnType & aad_column)
|
||||
{
|
||||
if constexpr (compatibility_mode == OpenSSLDetails::CompatibilityMode::MySQL)
|
||||
{
|
||||
return doDecryptImpl<CipherMode::MySQLCompatibility>(evp_cipher, input_rows_count, input_column, key_column, iv_column, aad_column);
|
||||
}
|
||||
else
|
||||
{
|
||||
const auto cipher_mode = EVP_CIPHER_mode(evp_cipher);
|
||||
if (cipher_mode == EVP_CIPH_GCM_MODE)
|
||||
{
|
||||
return doDecryptImpl<CipherMode::RFC5116_AEAD_AES_GCM>(evp_cipher, input_rows_count, input_column, key_column, iv_column, aad_column);
|
||||
}
|
||||
else
|
||||
{
|
||||
return doDecryptImpl<CipherMode::OpenSSLCompatibility>(evp_cipher, input_rows_count, input_column, key_column, iv_column, aad_column);
|
||||
}
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template <CipherMode mode, typename InputColumnType, typename KeyColumnType, typename IvColumnType, typename AadColumnType>
|
||||
static ColumnPtr doDecryptImpl(const EVP_CIPHER * evp_cipher,
|
||||
size_t input_rows_count,
|
||||
const InputColumnType & input_column,
|
||||
const KeyColumnType & key_column,
|
||||
[[maybe_unused]] const IvColumnType & iv_column,
|
||||
[[maybe_unused]] const AadColumnType & aad_column)
|
||||
{
|
||||
using namespace OpenSSLDetails;
|
||||
|
||||
auto evp_ctx_ptr = std::unique_ptr<EVP_CIPHER_CTX, decltype(&::EVP_CIPHER_CTX_free)>(EVP_CIPHER_CTX_new(), &EVP_CIPHER_CTX_free);
|
||||
auto evp_ctx = evp_ctx_ptr.get();
|
||||
|
||||
[[maybe_unused]] const auto block_size = static_cast<size_t>(EVP_CIPHER_block_size(evp_cipher));
|
||||
[[maybe_unused]] const auto iv_size = static_cast<size_t>(EVP_CIPHER_iv_length(evp_cipher));
|
||||
const auto key_size = static_cast<size_t>(EVP_CIPHER_key_length(evp_cipher));
|
||||
const auto tag_size = 16; // https://tools.ietf.org/html/rfc5116#section-5.1
|
||||
|
||||
auto decrypted_result_column = ColumnString::create();
|
||||
auto & decrypted_result_column_data = decrypted_result_column->getChars();
|
||||
auto & decrypted_result_column_offsets = decrypted_result_column->getOffsets();
|
||||
|
||||
{
|
||||
size_t resulting_size = 0;
|
||||
for (size_t r = 0; r < input_rows_count; ++r)
|
||||
{
|
||||
resulting_size += input_column->getDataAt(r).size + 1;
|
||||
if constexpr (mode == CipherMode::RFC5116_AEAD_AES_GCM)
|
||||
resulting_size -= tag_size;
|
||||
}
|
||||
|
||||
#if defined(MEMORY_SANITIZER)
|
||||
// Pre-fill result column with values to prevent MSAN from dropping dead on
|
||||
// aes-X-ecb mode with "WARNING: MemorySanitizer: use-of-uninitialized-value".
|
||||
// This is most likely to be caused by the underlying assembler implementation:
|
||||
// see crypto/aes/aesni-x86_64.s, function aesni_ecb_encrypt
|
||||
// which msan seems to fail instrument correctly.
|
||||
decrypted_result_column_data.resize_fill(resulting_size, 0xFF);
|
||||
#else
|
||||
decrypted_result_column_data.resize(resulting_size);
|
||||
#endif
|
||||
}
|
||||
auto decrypted = decrypted_result_column_data.data();
|
||||
|
||||
KeyHolder<mode> key_holder;
|
||||
for (size_t r = 0; r < input_rows_count; ++r)
|
||||
{
|
||||
// 0: prepare key if required
|
||||
auto key_value = key_holder.setKey(key_size, key_column->getDataAt(r));
|
||||
auto iv_value = StringRef{};
|
||||
if constexpr (!std::is_same_v<nullptr_t, std::decay_t<IvColumnType>>)
|
||||
{
|
||||
iv_value = iv_column->getDataAt(r);
|
||||
}
|
||||
|
||||
auto input_value = input_column->getDataAt(r);
|
||||
if constexpr (mode == CipherMode::RFC5116_AEAD_AES_GCM)
|
||||
{
|
||||
// empty plaintext results in empty ciphertext + tag, means there should be atleast tag_size bytes.
|
||||
if (input_value.size < tag_size)
|
||||
throw Exception(fmt::format("Encrypted data is too short: only {} bytes, "
|
||||
"should contain at least {} bytes of a tag.",
|
||||
input_value.size, block_size, tag_size), ErrorCodes::BAD_ARGUMENTS);
|
||||
input_value.size -= tag_size;
|
||||
}
|
||||
|
||||
if constexpr (mode != CipherMode::MySQLCompatibility)
|
||||
{
|
||||
// in GCM mode IV can be of arbitrary size (>0), for other modes IV is optional.
|
||||
if (mode == CipherMode::RFC5116_AEAD_AES_GCM && iv_value.size == 0)
|
||||
{
|
||||
throw Exception("Invalid IV size " + std::to_string(iv_value.size) + " != expected size " + std::to_string(iv_size),
|
||||
DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
|
||||
if (key_value.size != key_size)
|
||||
{
|
||||
throw Exception("Invalid key size " + std::to_string(key_value.size) + " != expected size " + std::to_string(key_size),
|
||||
DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
}
|
||||
|
||||
// Avoid extra work on empty ciphertext/plaintext for some ciphers
|
||||
if (!(input_value.size == 0 && block_size == 1 && mode != CipherMode::RFC5116_AEAD_AES_GCM))
|
||||
{
|
||||
// 1: Init CTX
|
||||
if constexpr (mode == CipherMode::RFC5116_AEAD_AES_GCM)
|
||||
{
|
||||
if (EVP_DecryptInit_ex(evp_ctx, evp_cipher, nullptr, nullptr, nullptr) != 1)
|
||||
onError("Failed to initialize cipher context 1");
|
||||
|
||||
// 1.a.1 : Set custom IV length
|
||||
if (EVP_CIPHER_CTX_ctrl(evp_ctx, EVP_CTRL_AEAD_SET_IVLEN, iv_value.size, nullptr) != 1)
|
||||
onError("Failed to set custom IV length to " + std::to_string(iv_value.size));
|
||||
|
||||
// 1.a.1 : Init CTX with key and IV
|
||||
if (EVP_DecryptInit_ex(evp_ctx, nullptr, nullptr,
|
||||
reinterpret_cast<const unsigned char*>(key_value.data),
|
||||
reinterpret_cast<const unsigned char*>(iv_value.data)) != 1)
|
||||
onError("Failed to set key and IV");
|
||||
|
||||
// 1.a.2: Set AAD if present
|
||||
if constexpr (!std::is_same_v<nullptr_t, std::decay_t<AadColumnType>>)
|
||||
{
|
||||
const auto aad_data = aad_column->getDataAt(r);
|
||||
int tmp_len = 0;
|
||||
if (aad_data.size != 0 && EVP_DecryptUpdate(evp_ctx, nullptr, &tmp_len,
|
||||
reinterpret_cast<const unsigned char *>(aad_data.data), aad_data.size) != 1)
|
||||
onError("Failed to sed AAD data");
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// 1.b: Init CTX
|
||||
validateIV<mode>(iv_value, iv_size);
|
||||
|
||||
if (EVP_DecryptInit_ex(evp_ctx, evp_cipher, nullptr,
|
||||
reinterpret_cast<const unsigned char*>(key_value.data),
|
||||
reinterpret_cast<const unsigned char*>(iv_value.data)) != 1)
|
||||
onError("Failed to initialize cipher context");
|
||||
}
|
||||
|
||||
// 2: Feed the data to the cipher
|
||||
int output_len = 0;
|
||||
if (EVP_DecryptUpdate(evp_ctx,
|
||||
reinterpret_cast<unsigned char*>(decrypted), &output_len,
|
||||
reinterpret_cast<const unsigned char*>(input_value.data), static_cast<int>(input_value.size)) != 1)
|
||||
onError("Failed to decrypt");
|
||||
decrypted += output_len;
|
||||
|
||||
// 3: optionally get tag from the ciphertext (RFC5116) and feed it to the context
|
||||
if constexpr (mode == CipherMode::RFC5116_AEAD_AES_GCM)
|
||||
{
|
||||
void * tag = const_cast<void *>(reinterpret_cast<const void *>(input_value.data + input_value.size));
|
||||
if (EVP_CIPHER_CTX_ctrl(evp_ctx, EVP_CTRL_AEAD_SET_TAG, tag_size, tag) != 1)
|
||||
onError("Failed to set tag");
|
||||
}
|
||||
|
||||
// 4: retrieve encrypted data (ciphertext)
|
||||
if (EVP_DecryptFinal_ex(evp_ctx,
|
||||
reinterpret_cast<unsigned char*>(decrypted), &output_len) != 1)
|
||||
onError("Failed to decrypt");
|
||||
decrypted += output_len;
|
||||
}
|
||||
|
||||
*decrypted = '\0';
|
||||
++decrypted;
|
||||
|
||||
decrypted_result_column_offsets.push_back(decrypted - decrypted_result_column_data.data());
|
||||
|
||||
}
|
||||
|
||||
// in case we overestimate buffer required for decrypted data, fix it up.
|
||||
if (!decrypted_result_column_offsets.empty() && decrypted_result_column_data.size() > decrypted_result_column_offsets.back())
|
||||
{
|
||||
decrypted_result_column_data.resize(decrypted_result_column_offsets.back());
|
||||
}
|
||||
|
||||
decrypted_result_column->validate();
|
||||
return decrypted_result_column;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
|
||||
#endif
|
@ -1659,7 +1659,7 @@ public:
|
||||
if (!isUInt8(second_argument))
|
||||
throw Exception{"Illegal type " + second_argument->getName()
|
||||
+ " of second argument of function " + getName()
|
||||
+ ", expected numeric type.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT};
|
||||
+ ", expected UInt8", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT};
|
||||
|
||||
DataTypePtr element = DataTypeFactory::instance().get("IPv6");
|
||||
return std::make_shared<DataTypeTuple>(DataTypes{element, element});
|
||||
@ -1673,19 +1673,21 @@ public:
|
||||
const auto & col_type_name_ip = columns[arguments[0]];
|
||||
const ColumnPtr & column_ip = col_type_name_ip.column;
|
||||
|
||||
const auto col_const_ip_in = checkAndGetColumnConst<ColumnFixedString>(column_ip.get());
|
||||
const auto col_ip_in = checkAndGetColumn<ColumnFixedString>(column_ip.get());
|
||||
|
||||
if (!col_ip_in)
|
||||
if (!col_ip_in && !col_const_ip_in)
|
||||
throw Exception("Illegal column " + columns[arguments[0]].column->getName()
|
||||
+ " of argument of function " + getName(),
|
||||
ErrorCodes::ILLEGAL_COLUMN);
|
||||
|
||||
if (col_ip_in->getN() != IPV6_BINARY_LENGTH)
|
||||
throw Exception("Illegal type " + col_type_name_ip.type->getName() +
|
||||
" of column " + col_ip_in->getName() +
|
||||
" argument of function " + getName() +
|
||||
", expected FixedString(" + toString(IPV6_BINARY_LENGTH) + ")",
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
if ((col_const_ip_in && col_const_ip_in->getValue<String>().size() != IPV6_BINARY_LENGTH) ||
|
||||
(col_ip_in && col_ip_in->getN() != IPV6_BINARY_LENGTH))
|
||||
throw Exception("Illegal type " + col_type_name_ip.type->getName() +
|
||||
" of column " + column_ip->getName() +
|
||||
" argument of function " + getName() +
|
||||
", expected FixedString(" + toString(IPV6_BINARY_LENGTH) + ")",
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
|
||||
const auto & col_type_name_cidr = columns[arguments[1]];
|
||||
const ColumnPtr & column_cidr = col_type_name_cidr.column;
|
||||
@ -1698,8 +1700,6 @@ public:
|
||||
+ " of argument of function " + getName(),
|
||||
ErrorCodes::ILLEGAL_COLUMN);
|
||||
|
||||
const auto & vec_in = col_ip_in->getChars();
|
||||
|
||||
auto col_res_lower_range = ColumnFixedString::create(IPV6_BINARY_LENGTH);
|
||||
auto col_res_upper_range = ColumnFixedString::create(IPV6_BINARY_LENGTH);
|
||||
|
||||
@ -1711,14 +1711,24 @@ public:
|
||||
|
||||
static constexpr UInt8 max_cidr_mask = IPV6_BINARY_LENGTH * 8;
|
||||
|
||||
const String col_const_ip_str = col_const_ip_in ? col_const_ip_in->getValue<String>() : "";
|
||||
const UInt8 * col_const_ip_value = col_const_ip_in ? reinterpret_cast<const UInt8 *>(col_const_ip_str.c_str()) : nullptr;
|
||||
|
||||
for (size_t offset = 0; offset < input_rows_count; ++offset)
|
||||
{
|
||||
const size_t offset_ipv6 = offset * IPV6_BINARY_LENGTH;
|
||||
|
||||
const UInt8 * ip = col_const_ip_in
|
||||
? col_const_ip_value
|
||||
: &col_ip_in->getChars()[offset_ipv6];
|
||||
|
||||
UInt8 cidr = col_const_cidr_in
|
||||
? col_const_cidr_in->getValue<UInt8>()
|
||||
: col_cidr_in->getData()[offset];
|
||||
|
||||
cidr = std::min(cidr, max_cidr_mask);
|
||||
applyCIDRMask(&vec_in[offset_ipv6], &vec_res_lower_range[offset_ipv6], &vec_res_upper_range[offset_ipv6], cidr);
|
||||
|
||||
applyCIDRMask(ip, &vec_res_lower_range[offset_ipv6], &vec_res_upper_range[offset_ipv6], cidr);
|
||||
}
|
||||
|
||||
columns[result].column = ColumnTuple::create(Columns{std::move(col_res_lower_range), std::move(col_res_upper_range)});
|
||||
@ -1763,7 +1773,7 @@ public:
|
||||
if (!isUInt8(second_argument))
|
||||
throw Exception{"Illegal type " + second_argument->getName()
|
||||
+ " of second argument of function " + getName()
|
||||
+ ", expected numeric type.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT};
|
||||
+ ", expected UInt8", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT};
|
||||
|
||||
DataTypePtr element = DataTypeFactory::instance().get("IPv4");
|
||||
return std::make_shared<DataTypeTuple>(DataTypes{element, element});
|
||||
@ -1777,8 +1787,9 @@ public:
|
||||
const auto & col_type_name_ip = columns[arguments[0]];
|
||||
const ColumnPtr & column_ip = col_type_name_ip.column;
|
||||
|
||||
const auto col_const_ip_in = checkAndGetColumnConst<ColumnUInt32>(column_ip.get());
|
||||
const auto col_ip_in = checkAndGetColumn<ColumnUInt32>(column_ip.get());
|
||||
if (!col_ip_in)
|
||||
if (!col_const_ip_in && !col_ip_in)
|
||||
throw Exception("Illegal column " + columns[arguments[0]].column->getName()
|
||||
+ " of argument of function " + getName(),
|
||||
ErrorCodes::ILLEGAL_COLUMN);
|
||||
@ -1794,8 +1805,6 @@ public:
|
||||
+ " of argument of function " + getName(),
|
||||
ErrorCodes::ILLEGAL_COLUMN);
|
||||
|
||||
const auto & vec_in = col_ip_in->getData();
|
||||
|
||||
auto col_res_lower_range = ColumnUInt32::create();
|
||||
auto col_res_upper_range = ColumnUInt32::create();
|
||||
|
||||
@ -1807,11 +1816,15 @@ public:
|
||||
|
||||
for (size_t i = 0; i < input_rows_count; ++i)
|
||||
{
|
||||
UInt32 ip = col_const_ip_in
|
||||
? col_const_ip_in->getValue<UInt32>()
|
||||
: col_ip_in->getData()[i];
|
||||
|
||||
UInt8 cidr = col_const_cidr_in
|
||||
? col_const_cidr_in->getValue<UInt8>()
|
||||
: col_cidr_in->getData()[i];
|
||||
|
||||
std::tie(vec_res_lower_range[i], vec_res_upper_range[i]) = applyCIDRMask(vec_in[i], cidr);
|
||||
std::tie(vec_res_lower_range[i], vec_res_upper_range[i]) = applyCIDRMask(ip, cidr);
|
||||
}
|
||||
|
||||
columns[result].column = ColumnTuple::create(Columns{std::move(col_res_lower_range), std::move(col_res_upper_range)});
|
||||
|
31
src/Functions/aes_decrypt_mysql.cpp
Normal file
31
src/Functions/aes_decrypt_mysql.cpp
Normal file
@ -0,0 +1,31 @@
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include <Common/config.h>
|
||||
#endif
|
||||
|
||||
#if USE_SSL
|
||||
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/FunctionsAES.h>
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
struct DecryptMySQLModeImpl
|
||||
{
|
||||
static constexpr auto name = "aes_decrypt_mysql";
|
||||
static constexpr auto compatibility_mode = OpenSSLDetails::CompatibilityMode::MySQL;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
void registerFunctionAESDecryptMysql(FunctionFactory & factory)
|
||||
{
|
||||
factory.registerFunction<FunctionDecrypt<DecryptMySQLModeImpl>>();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
31
src/Functions/aes_encrypt_mysql.cpp
Normal file
31
src/Functions/aes_encrypt_mysql.cpp
Normal file
@ -0,0 +1,31 @@
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include <Common/config.h>
|
||||
#endif
|
||||
|
||||
#if USE_SSL
|
||||
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/FunctionsAES.h>
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
struct EncryptMySQLModeImpl
|
||||
{
|
||||
static constexpr auto name = "aes_encrypt_mysql";
|
||||
static constexpr auto compatibility_mode = OpenSSLDetails::CompatibilityMode::MySQL;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
void registerFunctionAESEncryptMysql(FunctionFactory & factory)
|
||||
{
|
||||
factory.registerFunction<FunctionEncrypt<EncryptMySQLModeImpl>>();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
31
src/Functions/decrypt.cpp
Normal file
31
src/Functions/decrypt.cpp
Normal file
@ -0,0 +1,31 @@
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include <Common/config.h>
|
||||
#endif
|
||||
|
||||
#if USE_SSL
|
||||
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/FunctionsAES.h>
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
struct DecryptImpl
|
||||
{
|
||||
static constexpr auto name = "decrypt";
|
||||
static constexpr auto compatibility_mode = OpenSSLDetails::CompatibilityMode::OpenSSL;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
void registerFunctionDecrypt(FunctionFactory & factory)
|
||||
{
|
||||
factory.registerFunction<FunctionDecrypt<DecryptImpl>>();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
31
src/Functions/encrypt.cpp
Normal file
31
src/Functions/encrypt.cpp
Normal file
@ -0,0 +1,31 @@
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include <Common/config.h>
|
||||
#endif
|
||||
|
||||
#if USE_SSL
|
||||
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/FunctionsAES.h>
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
struct EncryptImpl
|
||||
{
|
||||
static constexpr auto name = "encrypt";
|
||||
static constexpr auto compatibility_mode = OpenSSLDetails::CompatibilityMode::OpenSSL;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
void registerFunctionEncrypt(FunctionFactory & factory)
|
||||
{
|
||||
factory.registerFunction<FunctionEncrypt<EncryptImpl>>();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
@ -1,3 +1,7 @@
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include <Common/config.h>
|
||||
#endif
|
||||
|
||||
#include <Functions/FunctionFactory.h>
|
||||
|
||||
|
||||
@ -37,12 +41,21 @@ void registerFunctionsNull(FunctionFactory &);
|
||||
void registerFunctionsJSON(FunctionFactory &);
|
||||
void registerFunctionsConsistentHashing(FunctionFactory & factory);
|
||||
void registerFunctionsUnixTimestamp64(FunctionFactory & factory);
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
void registerFunctionBayesAB(FunctionFactory &);
|
||||
#endif
|
||||
void registerFunctionTid(FunctionFactory & factory);
|
||||
void registerFunctionLogTrace(FunctionFactory & factory);
|
||||
|
||||
#if USE_SSL
|
||||
void registerFunctionEncrypt(FunctionFactory & factory);
|
||||
void registerFunctionDecrypt(FunctionFactory & factory);
|
||||
void registerFunctionAESEncryptMysql(FunctionFactory & factory);
|
||||
void registerFunctionAESDecryptMysql(FunctionFactory & factory);
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
void registerFunctions()
|
||||
{
|
||||
@ -84,9 +97,17 @@ void registerFunctions()
|
||||
registerFunctionsIntrospection(factory);
|
||||
registerFunctionsConsistentHashing(factory);
|
||||
registerFunctionsUnixTimestamp64(factory);
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
registerFunctionBayesAB(factory);
|
||||
#endif
|
||||
|
||||
#if USE_SSL
|
||||
registerFunctionEncrypt(factory);
|
||||
registerFunctionDecrypt(factory);
|
||||
registerFunctionAESEncryptMysql(factory);
|
||||
registerFunctionAESDecryptMysql(factory);
|
||||
#endif
|
||||
registerFunctionTid(factory);
|
||||
registerFunctionLogTrace(factory);
|
||||
}
|
||||
|
@ -47,6 +47,8 @@ SRCS(
|
||||
addSeconds.cpp
|
||||
addWeeks.cpp
|
||||
addYears.cpp
|
||||
aes_decrypt_mysql.cpp
|
||||
aes_encrypt_mysql.cpp
|
||||
appendTrailingCharIfAbsent.cpp
|
||||
array/arrayAll.cpp
|
||||
array/arrayAUC.cpp
|
||||
@ -141,6 +143,7 @@ SRCS(
|
||||
currentUser.cpp
|
||||
dateDiff.cpp
|
||||
date_trunc.cpp
|
||||
decrypt.cpp
|
||||
defaultValueOfArgumentType.cpp
|
||||
defaultValueOfTypeName.cpp
|
||||
demange.cpp
|
||||
@ -148,6 +151,7 @@ SRCS(
|
||||
dumpColumnStructure.cpp
|
||||
e.cpp
|
||||
empty.cpp
|
||||
encrypt.cpp
|
||||
endsWith.cpp
|
||||
equals.cpp
|
||||
erfc.cpp
|
||||
@ -176,6 +180,7 @@ SRCS(
|
||||
FunctionFQDN.cpp
|
||||
FunctionHelpers.cpp
|
||||
FunctionJoinGet.cpp
|
||||
FunctionsAES.cpp
|
||||
FunctionsCoding.cpp
|
||||
FunctionsConversion.cpp
|
||||
FunctionsEmbeddedDictionaries.cpp
|
||||
|
@ -590,7 +590,7 @@ VolumePtr Context::setTemporaryStorage(const String & path, const String & polic
|
||||
shared->tmp_path += '/';
|
||||
|
||||
auto disk = std::make_shared<DiskLocal>("_tmp_default", shared->tmp_path, 0);
|
||||
shared->tmp_volume = std::make_shared<SingleDiskVolume>("_tmp_default", disk);
|
||||
shared->tmp_volume = std::make_shared<SingleDiskVolume>("_tmp_default", disk, 0);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -413,6 +413,11 @@ BlockIO InterpreterInsertQuery::execute()
|
||||
res.out = std::move(out_streams.at(0));
|
||||
|
||||
res.pipeline.addStorageHolder(table);
|
||||
if (const auto * mv = dynamic_cast<const StorageMaterializedView *>(table.get()))
|
||||
{
|
||||
if (auto inner_table = mv->tryGetTargetTable())
|
||||
res.pipeline.addStorageHolder(inner_table);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@ -133,7 +133,11 @@ void InterpreterSystemQuery::startStopAction(StorageActionBlockType action_type,
|
||||
auto manager = context.getActionLocksManager();
|
||||
manager->cleanExpired();
|
||||
|
||||
if (table_id)
|
||||
if (volume_ptr && action_type == ActionLocks::PartsMerge)
|
||||
{
|
||||
volume_ptr->setAvoidMergesUserOverride(!start);
|
||||
}
|
||||
else if (table_id)
|
||||
{
|
||||
context.checkAccess(getRequiredAccessType(action_type), table_id);
|
||||
if (start)
|
||||
@ -199,6 +203,10 @@ BlockIO InterpreterSystemQuery::execute()
|
||||
if (!query.target_dictionary.empty() && !query.database.empty())
|
||||
query.target_dictionary = query.database + "." + query.target_dictionary;
|
||||
|
||||
volume_ptr = {};
|
||||
if (!query.storage_policy.empty() && !query.volume.empty())
|
||||
volume_ptr = context.getStoragePolicy(query.storage_policy)->getVolumeByName(query.volume);
|
||||
|
||||
switch (query.type)
|
||||
{
|
||||
case Type::SHUTDOWN:
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <Storages/IStorage_fwd.h>
|
||||
#include <Interpreters/StorageID.h>
|
||||
#include <Common/ActionLock.h>
|
||||
#include <Disks/IVolume.h>
|
||||
|
||||
|
||||
namespace Poco { class Logger; }
|
||||
@ -44,6 +45,7 @@ private:
|
||||
Context & context;
|
||||
Poco::Logger * log = nullptr;
|
||||
StorageID table_id = StorageID::createEmpty(); /// Will be set up if query contains table name
|
||||
VolumePtr volume_ptr;
|
||||
|
||||
/// Tries to get a replicated table and restart it
|
||||
/// Returns pointer to a newly created table if the restart was successful
|
||||
|
@ -89,6 +89,34 @@ ProcessList::EntryPtr ProcessList::insert(const String & query_, const IAST * as
|
||||
throw Exception("Too many simultaneous queries. Maximum: " + toString(max_size), ErrorCodes::TOO_MANY_SIMULTANEOUS_QUERIES);
|
||||
}
|
||||
|
||||
{
|
||||
/**
|
||||
* `max_size` check above is controlled by `max_concurrent_queries` server setting and is a "hard" limit for how many
|
||||
* queries the server can process concurrently. It is configured at startup. When the server is overloaded with queries and the
|
||||
* hard limit is reached it is impossible to connect to the server to run queries for investigation.
|
||||
*
|
||||
* With `max_concurrent_queries_for_all_users` it is possible to configure an additional, runtime configurable, limit for query concurrency.
|
||||
* Usually it should be configured just once for `default_profile` which is inherited by all users. DBAs can override
|
||||
* this setting when connecting to ClickHouse, or it can be configured for a DBA profile to have a value greater than that of
|
||||
* the default profile (or 0 for unlimited).
|
||||
*
|
||||
* One example is to set `max_size=X`, `max_concurrent_queries_for_all_users=X-10` for default profile,
|
||||
* and `max_concurrent_queries_for_all_users=0` for DBAs or accounts that are vital for ClickHouse operations (like metrics
|
||||
* exporters).
|
||||
*
|
||||
* Another creative example is to configure `max_concurrent_queries_for_all_users=50` for "analyst" profiles running adhoc queries
|
||||
* and `max_concurrent_queries_for_all_users=100` for "customer facing" services. This way "analyst" queries will be rejected
|
||||
* once is already processing 50+ concurrent queries (including analysts or any other users).
|
||||
*/
|
||||
|
||||
if (!is_unlimited_query && settings.max_concurrent_queries_for_all_users
|
||||
&& processes.size() >= settings.max_concurrent_queries_for_all_users)
|
||||
throw Exception(
|
||||
"Too many simultaneous queries for all users. Current: " + toString(processes.size())
|
||||
+ ", maximum: " + settings.max_concurrent_queries_for_all_users.toString(),
|
||||
ErrorCodes::TOO_MANY_SIMULTANEOUS_QUERIES);
|
||||
}
|
||||
|
||||
/** Why we use current user?
|
||||
* Because initial one is passed by client and credentials for it is not verified,
|
||||
* and using initial_user for limits will be insecure.
|
||||
|
@ -233,7 +233,7 @@ void SystemLog<LogElement>::add(const LogElement & element)
|
||||
/// The size of allocation can be in order of a few megabytes.
|
||||
/// But this should not be accounted for query memory usage.
|
||||
/// Otherwise the tests like 01017_uniqCombined_memory_usage.sql will be flacky.
|
||||
auto temporarily_disable_memory_tracker = getCurrentMemoryTrackerActionLock();
|
||||
MemoryTracker::BlockerInThread temporarily_disable_memory_tracker;
|
||||
|
||||
/// Should not log messages under mutex.
|
||||
bool queue_is_half_full = false;
|
||||
|
@ -157,7 +157,7 @@ static void setExceptionStackTrace(QueryLogElement & elem)
|
||||
{
|
||||
/// Disable memory tracker for stack trace.
|
||||
/// Because if exception is "Memory limit (for query) exceed", then we probably can't allocate another one string.
|
||||
auto temporarily_disable_memory_tracker = getCurrentMemoryTrackerActionLock();
|
||||
MemoryTracker::BlockerInThread temporarily_disable_memory_tracker;
|
||||
|
||||
try
|
||||
{
|
||||
@ -338,28 +338,26 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
|
||||
|
||||
try
|
||||
{
|
||||
bool ast_modified = false;
|
||||
/// Replace ASTQueryParameter with ASTLiteral for prepared statements.
|
||||
if (context.hasQueryParameters())
|
||||
{
|
||||
ReplaceQueryParameterVisitor visitor(context.getQueryParameters());
|
||||
visitor.visit(ast);
|
||||
ast_modified = true;
|
||||
query = serializeAST(*ast);
|
||||
}
|
||||
|
||||
/// MUST goes before any modification (except for prepared statements,
|
||||
/// since it substitute parameters and w/o them query does not contains
|
||||
/// parameters), to keep query as-is in query_log and server log.
|
||||
query_for_logging = prepareQueryForLogging(query, context);
|
||||
logQuery(query_for_logging, context, internal);
|
||||
|
||||
/// Propagate WITH statement to children ASTSelect.
|
||||
if (settings.enable_global_with_statement)
|
||||
{
|
||||
ApplyWithGlobalVisitor().visit(ast);
|
||||
ast_modified = true;
|
||||
}
|
||||
|
||||
if (ast_modified)
|
||||
query = serializeAST(*ast);
|
||||
|
||||
query_for_logging = prepareQueryForLogging(query, context);
|
||||
|
||||
logQuery(query_for_logging, context, internal);
|
||||
}
|
||||
|
||||
/// Check the limits.
|
||||
checkASTSizeLimits(*ast, settings);
|
||||
|
@ -118,7 +118,8 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState &,
|
||||
<< (settings.hilite ? hilite_none : "");
|
||||
};
|
||||
|
||||
auto print_drop_replica = [&] {
|
||||
auto print_drop_replica = [&]
|
||||
{
|
||||
settings.ostr << " " << quoteString(replica);
|
||||
if (!table.empty())
|
||||
{
|
||||
@ -140,6 +141,16 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState &,
|
||||
}
|
||||
};
|
||||
|
||||
auto print_on_volume = [&]
|
||||
{
|
||||
settings.ostr << " ON VOLUME "
|
||||
<< (settings.hilite ? hilite_identifier : "") << backQuoteIfNeed(storage_policy)
|
||||
<< (settings.hilite ? hilite_none : "")
|
||||
<< "."
|
||||
<< (settings.hilite ? hilite_identifier : "") << backQuoteIfNeed(volume)
|
||||
<< (settings.hilite ? hilite_none : "");
|
||||
};
|
||||
|
||||
if (!cluster.empty())
|
||||
formatOnCluster(settings);
|
||||
|
||||
@ -160,6 +171,8 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState &,
|
||||
{
|
||||
if (!table.empty())
|
||||
print_database_table();
|
||||
else if (!volume.empty())
|
||||
print_on_volume();
|
||||
}
|
||||
else if (type == Type::RESTART_REPLICA || type == Type::SYNC_REPLICA || type == Type::FLUSH_DISTRIBUTED)
|
||||
{
|
||||
|
@ -65,6 +65,8 @@ public:
|
||||
String replica;
|
||||
String replica_zk_path;
|
||||
bool is_drop_whole_replica;
|
||||
String storage_policy;
|
||||
String volume;
|
||||
|
||||
String getID(char) const override { return "SYSTEM query"; }
|
||||
|
||||
|
@ -129,6 +129,33 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected &
|
||||
|
||||
case Type::STOP_MERGES:
|
||||
case Type::START_MERGES:
|
||||
{
|
||||
String storage_policy_str;
|
||||
String volume_str;
|
||||
|
||||
if (ParserKeyword{"ON VOLUME"}.ignore(pos, expected))
|
||||
{
|
||||
ASTPtr ast;
|
||||
if (ParserIdentifier{}.parse(pos, ast, expected))
|
||||
storage_policy_str = ast->as<ASTIdentifier &>().name;
|
||||
else
|
||||
return false;
|
||||
|
||||
if (!ParserToken{TokenType::Dot}.ignore(pos, expected))
|
||||
return false;
|
||||
|
||||
if (ParserIdentifier{}.parse(pos, ast, expected))
|
||||
volume_str = ast->as<ASTIdentifier &>().name;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
res->storage_policy = storage_policy_str;
|
||||
res->volume = volume_str;
|
||||
if (res->volume.empty() && res->storage_policy.empty())
|
||||
parseDatabaseAndTableName(pos, expected, res->database, res->table);
|
||||
break;
|
||||
}
|
||||
|
||||
case Type::STOP_TTL_MERGES:
|
||||
case Type::START_TTL_MERGES:
|
||||
case Type::STOP_MOVES:
|
||||
|
@ -311,7 +311,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToMemory(
|
||||
NativeBlockInputStream block_in(in, 0);
|
||||
auto block = block_in.read();
|
||||
|
||||
auto volume = std::make_shared<SingleDiskVolume>("volume_" + part_name, reservation->getDisk());
|
||||
auto volume = std::make_shared<SingleDiskVolume>("volume_" + part_name, reservation->getDisk(), 0);
|
||||
MergeTreeData::MutableDataPartPtr new_data_part =
|
||||
std::make_shared<MergeTreeDataPartInMemory>(data, part_name, volume);
|
||||
|
||||
@ -408,7 +408,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToDisk(
|
||||
|
||||
assertEOF(in);
|
||||
|
||||
auto volume = std::make_shared<SingleDiskVolume>("volume_" + part_name, disk);
|
||||
auto volume = std::make_shared<SingleDiskVolume>("volume_" + part_name, disk, 0);
|
||||
MergeTreeData::MutableDataPartPtr new_data_part = data.createPart(part_name, volume, part_relative_path);
|
||||
new_data_part->is_temp = true;
|
||||
new_data_part->modification_time = time(nullptr);
|
||||
|
@ -408,7 +408,7 @@ void IMergeTreeDataPart::loadColumnsChecksumsIndexes(bool require_columns_checks
|
||||
/// Memory should not be limited during ATTACH TABLE query.
|
||||
/// This is already true at the server startup but must be also ensured for manual table ATTACH.
|
||||
/// Motivation: memory for index is shared between queries - not belong to the query itself.
|
||||
auto temporarily_disable_memory_tracker = getCurrentMemoryTrackerActionLock();
|
||||
MemoryTracker::BlockerInThread temporarily_disable_memory_tracker;
|
||||
|
||||
loadColumns(require_columns_checksums);
|
||||
loadChecksums(require_columns_checksums);
|
||||
@ -760,6 +760,16 @@ void IMergeTreeDataPart::loadColumns(bool require)
|
||||
column_name_to_position.emplace(column.name, pos++);
|
||||
}
|
||||
|
||||
bool IMergeTreeDataPart::shallParticipateInMerges(const StoragePolicyPtr & storage_policy) const
|
||||
{
|
||||
/// `IMergeTreeDataPart::volume` describes space where current part belongs, and holds
|
||||
/// `SingleDiskVolume` object which does not contain up-to-date settings of corresponding volume.
|
||||
/// Therefore we shall obtain volume from storage policy.
|
||||
auto volume_ptr = storage_policy->getVolume(storage_policy->getVolumeIndexByDisk(volume->getDisk()));
|
||||
|
||||
return !volume_ptr->areMergesAvoided();
|
||||
}
|
||||
|
||||
UInt64 IMergeTreeDataPart::calculateTotalSizeOnDisk(const DiskPtr & disk_, const String & from)
|
||||
{
|
||||
if (disk_->isFile(from))
|
||||
|
@ -324,7 +324,11 @@ public:
|
||||
/// NOTE: Doesn't take column renames into account, if some column renames
|
||||
/// take place, you must take original name of column for this part from
|
||||
/// storage and pass it to this method.
|
||||
virtual bool hasColumnFiles(const String & /* column */, const IDataType & /* type */) const{ return false; }
|
||||
virtual bool hasColumnFiles(const String & /* column */, const IDataType & /* type */) const { return false; }
|
||||
|
||||
/// Returns true if this part shall participate in merges according to
|
||||
/// settings of given storage policy.
|
||||
bool shallParticipateInMerges(const StoragePolicyPtr & storage_policy) const;
|
||||
|
||||
/// Calculate the total size of the entire directory with all the files
|
||||
static UInt64 calculateTotalSizeOnDisk(const DiskPtr & disk_, const String & from);
|
||||
|
@ -48,6 +48,8 @@ public:
|
||||
|
||||
/// Part compression codec definition.
|
||||
ASTPtr compression_codec_desc;
|
||||
|
||||
bool shall_participate_in_merges = true;
|
||||
};
|
||||
|
||||
/// Parts are belong to partitions. Only parts within same partition could be merged.
|
||||
|
@ -776,7 +776,7 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks)
|
||||
if (!MergeTreePartInfo::tryParsePartName(part_name, &part_info, format_version))
|
||||
return;
|
||||
|
||||
auto single_disk_volume = std::make_shared<SingleDiskVolume>("volume_" + part_name, part_disk_ptr);
|
||||
auto single_disk_volume = std::make_shared<SingleDiskVolume>("volume_" + part_name, part_disk_ptr, 0);
|
||||
auto part = createPart(part_name, part_info, single_disk_volume, part_name);
|
||||
bool broken = false;
|
||||
|
||||
@ -2996,7 +2996,7 @@ MergeTreeData::MutableDataPartsVector MergeTreeData::tryLoadPartsToAttach(const
|
||||
for (const auto & part_names : renamed_parts.old_and_new_names)
|
||||
{
|
||||
LOG_DEBUG(log, "Checking part {}", part_names.second);
|
||||
auto single_disk_volume = std::make_shared<SingleDiskVolume>("volume_" + part_names.first, name_to_disk[part_names.first]);
|
||||
auto single_disk_volume = std::make_shared<SingleDiskVolume>("volume_" + part_names.first, name_to_disk[part_names.first], 0);
|
||||
MutableDataPartPtr part = createPart(part_names.first, single_disk_volume, source_dir + part_names.second);
|
||||
loadPartAndFixMetadataImpl(part);
|
||||
loaded_parts.push_back(part);
|
||||
@ -3409,7 +3409,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeData::cloneAndLoadDataPartOnSameDisk(
|
||||
localBackup(disk, src_part_path, dst_part_path);
|
||||
disk->removeIfExists(dst_part_path + "/" + IMergeTreeDataPart::DELETE_ON_DESTROY_MARKER_FILE_NAME);
|
||||
|
||||
auto single_disk_volume = std::make_shared<SingleDiskVolume>(disk->getName(), disk);
|
||||
auto single_disk_volume = std::make_shared<SingleDiskVolume>(disk->getName(), disk, 0);
|
||||
auto dst_data_part = createPart(dst_part_name, dst_part_info, single_disk_volume, tmp_dst_part_name);
|
||||
|
||||
dst_data_part->is_temp = true;
|
||||
|
@ -226,6 +226,8 @@ bool MergeTreeDataMergerMutator::selectPartsToMerge(
|
||||
|
||||
IMergeSelector::PartsRanges parts_ranges;
|
||||
|
||||
StoragePolicyPtr storage_policy = data.getStoragePolicy();
|
||||
|
||||
const String * prev_partition_id = nullptr;
|
||||
/// Previous part only in boundaries of partition frame
|
||||
const MergeTreeData::DataPartPtr * prev_part = nullptr;
|
||||
@ -275,6 +277,7 @@ bool MergeTreeDataMergerMutator::selectPartsToMerge(
|
||||
part_info.data = ∂
|
||||
part_info.ttl_infos = &part->ttl_infos;
|
||||
part_info.compression_codec_desc = part->default_codec->getFullCodecDesc();
|
||||
part_info.shall_participate_in_merges = part->shallParticipateInMerges(storage_policy);
|
||||
|
||||
parts_ranges.back().emplace_back(part_info);
|
||||
|
||||
@ -667,7 +670,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mergePartsToTempor
|
||||
merging_columns,
|
||||
merging_column_names);
|
||||
|
||||
auto single_disk_volume = std::make_shared<SingleDiskVolume>("volume_" + future_part.name, disk);
|
||||
auto single_disk_volume = std::make_shared<SingleDiskVolume>("volume_" + future_part.name, disk, 0);
|
||||
MergeTreeData::MutableDataPartPtr new_data_part = data.createPart(
|
||||
future_part.name,
|
||||
future_part.type,
|
||||
@ -1127,7 +1130,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor
|
||||
in->setProgressCallback(MergeProgressCallback(merge_entry, watch_prev_elapsed, stage_progress));
|
||||
}
|
||||
|
||||
auto single_disk_volume = std::make_shared<SingleDiskVolume>("volume_" + future_part.name, space_reservation->getDisk());
|
||||
auto single_disk_volume = std::make_shared<SingleDiskVolume>("volume_" + future_part.name, space_reservation->getDisk(), 0);
|
||||
auto new_data_part = data.createPart(
|
||||
future_part.name, future_part.type, future_part.part_info, single_disk_volume, "tmp_mut_" + future_part.name);
|
||||
|
||||
|
@ -212,7 +212,7 @@ void MergeTreeDataPartWriterOnDisk::calculateAndSerializePrimaryIndex(const Bloc
|
||||
* And otherwise it will look like excessively growing memory consumption in context of query.
|
||||
* (observed in long INSERT SELECTs)
|
||||
*/
|
||||
auto temporarily_disable_memory_tracker = getCurrentMemoryTrackerActionLock();
|
||||
MemoryTracker::BlockerInThread temporarily_disable_memory_tracker;
|
||||
|
||||
/// Write index. The index contains Primary Key value for each `index_granularity` row.
|
||||
|
||||
|
@ -48,7 +48,7 @@ const MarkInCompressedFile & MergeTreeMarksLoader::getMark(size_t row_index, siz
|
||||
MarkCache::MappedPtr MergeTreeMarksLoader::loadMarksImpl()
|
||||
{
|
||||
/// Memory for marks must not be accounted as memory usage for query, because they are stored in shared cache.
|
||||
auto temporarily_disable_memory_tracker = getCurrentMemoryTrackerActionLock();
|
||||
MemoryTracker::BlockerInThread temporarily_disable_memory_tracker;
|
||||
|
||||
size_t file_size = disk->getFileSize(mrk_path);
|
||||
size_t mark_size = index_granularity_info.getMarkSizeInBytes(columns_in_mark);
|
||||
|
@ -199,7 +199,7 @@ MergeTreeData::DataPartPtr MergeTreePartsMover::clonePart(const MergeTreeMoveEnt
|
||||
const String directory_to_move = "moving";
|
||||
moving_part.part->makeCloneOnDisk(moving_part.reserved_space->getDisk(), directory_to_move);
|
||||
|
||||
auto single_disk_volume = std::make_shared<SingleDiskVolume>("volume_" + moving_part.part->name, moving_part.reserved_space->getDisk());
|
||||
auto single_disk_volume = std::make_shared<SingleDiskVolume>("volume_" + moving_part.part->name, moving_part.reserved_space->getDisk(), 0);
|
||||
MergeTreeData::MutableDataPartPtr cloned_part =
|
||||
data->createPart(moving_part.part->name, single_disk_volume, directory_to_move + '/' + moving_part.part->name);
|
||||
LOG_TRACE(log, "Part {} was cloned to {}", moving_part.part->name, cloned_part->getFullPath());
|
||||
|
@ -130,7 +130,7 @@ MergeTreeData::MutableDataPartsVector MergeTreeWriteAheadLog::restore(const Stor
|
||||
else if (action_type == ActionType::ADD_PART)
|
||||
{
|
||||
auto part_disk = storage.reserveSpace(0)->getDisk();
|
||||
auto single_disk_volume = std::make_shared<SingleDiskVolume>("volume_" + part_name, disk);
|
||||
auto single_disk_volume = std::make_shared<SingleDiskVolume>("volume_" + part_name, disk, 0);
|
||||
|
||||
part = storage.createPart(
|
||||
part_name,
|
||||
|
@ -71,7 +71,10 @@ void ReplicatedMergeTreeRestartingThread::run()
|
||||
|
||||
bool old_val = false;
|
||||
if (storage.is_readonly.compare_exchange_strong(old_val, true))
|
||||
{
|
||||
incr_readonly = true;
|
||||
CurrentMetrics::add(CurrentMetrics::ReadonlyReplica);
|
||||
}
|
||||
|
||||
partialShutdown();
|
||||
}
|
||||
@ -112,7 +115,10 @@ void ReplicatedMergeTreeRestartingThread::run()
|
||||
|
||||
bool old_val = true;
|
||||
if (storage.is_readonly.compare_exchange_strong(old_val, false))
|
||||
{
|
||||
incr_readonly = false;
|
||||
CurrentMetrics::sub(CurrentMetrics::ReadonlyReplica);
|
||||
}
|
||||
|
||||
first_time = false;
|
||||
}
|
||||
@ -349,6 +355,13 @@ void ReplicatedMergeTreeRestartingThread::shutdown()
|
||||
task->deactivate();
|
||||
LOG_TRACE(log, "Restarting thread finished");
|
||||
|
||||
/// For detach table query, we should reset the ReadonlyReplica metric.
|
||||
if (incr_readonly)
|
||||
{
|
||||
CurrentMetrics::sub(CurrentMetrics::ReadonlyReplica);
|
||||
incr_readonly = false;
|
||||
}
|
||||
|
||||
/// Stop other tasks.
|
||||
partialShutdown();
|
||||
}
|
||||
|
@ -36,6 +36,9 @@ private:
|
||||
Poco::Logger * log;
|
||||
std::atomic<bool> need_stop {false};
|
||||
|
||||
// We need it besides `storage.is_readonly`, bacause `shutdown()` may be called many times, that way `storage.is_readonly` will not change.
|
||||
bool incr_readonly = false;
|
||||
|
||||
/// The random data we wrote into `/replicas/me/is_active`.
|
||||
String active_node_identifier;
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include <Storages/MergeTree/SimpleMergeSelector.h>
|
||||
|
||||
#include <Common/interpolate.h>
|
||||
|
||||
#include <cmath>
|
||||
@ -152,6 +153,9 @@ void selectWithinPartition(
|
||||
if (begin > 1000)
|
||||
break;
|
||||
|
||||
if (!parts[begin].shall_participate_in_merges)
|
||||
continue;
|
||||
|
||||
size_t sum_size = parts[begin].size;
|
||||
size_t max_size = parts[begin].size;
|
||||
size_t min_age = parts[begin].age;
|
||||
@ -161,6 +165,9 @@ void selectWithinPartition(
|
||||
if (settings.max_parts_to_merge_at_once && end - begin > settings.max_parts_to_merge_at_once)
|
||||
break;
|
||||
|
||||
if (!parts[end - 1].shall_participate_in_merges)
|
||||
break;
|
||||
|
||||
size_t cur_size = parts[end - 1].size;
|
||||
size_t cur_age = parts[end - 1].age;
|
||||
|
||||
|
@ -25,6 +25,7 @@ IMergeSelector::PartsRange ITTLMergeSelector::select(
|
||||
ssize_t partition_to_merge_index = -1;
|
||||
time_t partition_to_merge_min_ttl = 0;
|
||||
|
||||
/// Find most old TTL.
|
||||
for (size_t i = 0; i < parts_ranges.size(); ++i)
|
||||
{
|
||||
const auto & mergeable_parts_in_partition = parts_ranges[i];
|
||||
@ -56,6 +57,7 @@ IMergeSelector::PartsRange ITTLMergeSelector::select(
|
||||
Iterator best_end = best_begin + 1;
|
||||
size_t total_size = 0;
|
||||
|
||||
/// Find begin of range with most old TTL.
|
||||
while (true)
|
||||
{
|
||||
time_t ttl = getTTLForPart(*best_begin);
|
||||
@ -63,6 +65,7 @@ IMergeSelector::PartsRange ITTLMergeSelector::select(
|
||||
if (!ttl || isTTLAlreadySatisfied(*best_begin) || ttl > current_time
|
||||
|| (max_total_size_to_merge && total_size > max_total_size_to_merge))
|
||||
{
|
||||
/// This condition can not be satisfied on first iteration.
|
||||
++best_begin;
|
||||
break;
|
||||
}
|
||||
@ -74,6 +77,7 @@ IMergeSelector::PartsRange ITTLMergeSelector::select(
|
||||
--best_begin;
|
||||
}
|
||||
|
||||
/// Find end of range with most old TTL.
|
||||
while (best_end != best_partition.end())
|
||||
{
|
||||
time_t ttl = getTTLForPart(*best_end);
|
||||
@ -97,6 +101,19 @@ time_t TTLDeleteMergeSelector::getTTLForPart(const IMergeSelector::Part & part)
|
||||
return only_drop_parts ? part.ttl_infos->part_max_ttl : part.ttl_infos->part_min_ttl;
|
||||
}
|
||||
|
||||
bool TTLDeleteMergeSelector::isTTLAlreadySatisfied(const IMergeSelector::Part & part) const
|
||||
{
|
||||
/// N.B. Satisfied TTL means that TTL is NOT expired.
|
||||
/// return true -- this part can not be selected
|
||||
/// return false -- this part can be selected
|
||||
|
||||
/// Dropping whole part is an exception to `shall_participate_in_merges` logic.
|
||||
if (only_drop_parts)
|
||||
return false;
|
||||
|
||||
return !part.shall_participate_in_merges;
|
||||
}
|
||||
|
||||
time_t TTLRecompressMergeSelector::getTTLForPart(const IMergeSelector::Part & part) const
|
||||
{
|
||||
return part.ttl_infos->getMinimalMaxRecompressionTTL();
|
||||
@ -104,6 +121,13 @@ time_t TTLRecompressMergeSelector::getTTLForPart(const IMergeSelector::Part & pa
|
||||
|
||||
bool TTLRecompressMergeSelector::isTTLAlreadySatisfied(const IMergeSelector::Part & part) const
|
||||
{
|
||||
/// N.B. Satisfied TTL means that TTL is NOT expired.
|
||||
/// return true -- this part can not be selected
|
||||
/// return false -- this part can be selected
|
||||
|
||||
if (!part.shall_participate_in_merges)
|
||||
return true;
|
||||
|
||||
if (recompression_ttls.empty())
|
||||
return false;
|
||||
|
||||
|
@ -64,10 +64,7 @@ public:
|
||||
|
||||
/// Delete TTL should be checked only by TTL time, there are no other ways
|
||||
/// to satisfy it.
|
||||
bool isTTLAlreadySatisfied(const IMergeSelector::Part &) const override
|
||||
{
|
||||
return false;
|
||||
}
|
||||
bool isTTLAlreadySatisfied(const IMergeSelector::Part &) const override;
|
||||
|
||||
private:
|
||||
bool only_drop_parts;
|
||||
|
@ -239,6 +239,7 @@ Pipe StorageBuffer::read(
|
||||
}
|
||||
|
||||
pipe_from_dst.addTableLock(destination_lock);
|
||||
pipe_from_dst.addStorageHolder(destination);
|
||||
}
|
||||
|
||||
Pipe pipe_from_buffers;
|
||||
@ -315,7 +316,7 @@ static void appendBlock(const Block & from, Block & to)
|
||||
|
||||
size_t old_rows = to.rows();
|
||||
|
||||
auto temporarily_disable_memory_tracker = getCurrentMemoryTrackerActionLock();
|
||||
MemoryTracker::BlockerInThread temporarily_disable_memory_tracker;
|
||||
|
||||
try
|
||||
{
|
||||
@ -693,7 +694,7 @@ void StorageBuffer::writeBlockToDestination(const Block & block, StoragePtr tabl
|
||||
}
|
||||
auto destination_metadata_snapshot = table->getInMemoryMetadataPtr();
|
||||
|
||||
auto temporarily_disable_memory_tracker = getCurrentMemoryTrackerActionLock();
|
||||
MemoryTracker::BlockerInThread temporarily_disable_memory_tracker;
|
||||
|
||||
auto insert = std::make_shared<ASTInsertQuery>();
|
||||
insert->table_id = destination_id;
|
||||
|
@ -454,7 +454,7 @@ QueryProcessingStage::Enum StorageDistributed::getQueryProcessingStage(const Con
|
||||
if (settings.optimize_skip_unused_shards &&
|
||||
settings.optimize_distributed_group_by_sharding_key &&
|
||||
has_sharding_key &&
|
||||
sharding_key_is_deterministic)
|
||||
(settings.allow_nondeterministic_optimize_skip_unused_shards || sharding_key_is_deterministic))
|
||||
{
|
||||
Block sharding_key_block = sharding_key_expr->getSampleBlock();
|
||||
auto stage = getOptimizedQueryProcessingStage(query_ptr, settings.extremes, sharding_key_block);
|
||||
@ -710,7 +710,9 @@ ClusterPtr StorageDistributed::getOptimizedCluster(const Context & context, cons
|
||||
ClusterPtr cluster = getCluster();
|
||||
const Settings & settings = context.getSettingsRef();
|
||||
|
||||
if (has_sharding_key && sharding_key_is_deterministic)
|
||||
bool sharding_key_is_usable = settings.allow_nondeterministic_optimize_skip_unused_shards || sharding_key_is_deterministic;
|
||||
|
||||
if (has_sharding_key && sharding_key_is_usable)
|
||||
{
|
||||
ClusterPtr optimized = skipUnusedShards(cluster, query_ptr, metadata_snapshot, context);
|
||||
if (optimized)
|
||||
@ -723,7 +725,7 @@ ClusterPtr StorageDistributed::getOptimizedCluster(const Context & context, cons
|
||||
std::stringstream exception_message;
|
||||
if (!has_sharding_key)
|
||||
exception_message << "No sharding key";
|
||||
else if (!sharding_key_is_deterministic)
|
||||
else if (!sharding_key_is_usable)
|
||||
exception_message << "Sharding key is not deterministic";
|
||||
else
|
||||
exception_message << "Sharding key " << sharding_key_column_name << " is not used";
|
||||
|
@ -82,6 +82,7 @@ Pipe StorageMaterializeMySQL::read(
|
||||
}
|
||||
|
||||
Pipe pipe = nested_storage->read(require_columns_name, nested_metadata, query_info, context, processed_stage, max_block_size, num_streams);
|
||||
pipe.addTableLock(lock);
|
||||
|
||||
if (!expressions->children.empty() && !pipe.empty())
|
||||
{
|
||||
|
@ -124,6 +124,7 @@ Pipe StorageMaterializedView::read(
|
||||
|
||||
Pipe pipe = storage->read(column_names, metadata_snapshot, query_info, context, processed_stage, max_block_size, num_streams);
|
||||
pipe.addTableLock(lock);
|
||||
pipe.addStorageHolder(storage);
|
||||
|
||||
return pipe;
|
||||
}
|
||||
|
@ -344,6 +344,7 @@ Pipe StorageMerge::createSources(
|
||||
convertingSourceStream(header, metadata_snapshot, *modified_context, modified_query_info.query, pipe, processed_stage);
|
||||
|
||||
pipe.addTableLock(struct_lock);
|
||||
pipe.addStorageHolder(storage);
|
||||
pipe.addInterpreterContext(modified_context);
|
||||
}
|
||||
|
||||
|
@ -29,6 +29,7 @@ StorageSystemStoragePolicies::StorageSystemStoragePolicies(const StorageID & tab
|
||||
{"volume_type", std::make_shared<DataTypeString>()},
|
||||
{"max_data_part_size", std::make_shared<DataTypeUInt64>()},
|
||||
{"move_factor", std::make_shared<DataTypeFloat32>()},
|
||||
{"prefer_not_to_merge", std::make_shared<DataTypeUInt8>()}
|
||||
}));
|
||||
// TODO: Add string column with custom volume-type-specific options
|
||||
setInMemoryMetadata(storage_metadata);
|
||||
@ -52,6 +53,7 @@ Pipe StorageSystemStoragePolicies::read(
|
||||
MutableColumnPtr col_volume_type = ColumnString::create();
|
||||
MutableColumnPtr col_max_part_size = ColumnUInt64::create();
|
||||
MutableColumnPtr col_move_factor = ColumnFloat32::create();
|
||||
MutableColumnPtr col_prefer_not_to_merge = ColumnUInt8::create();
|
||||
|
||||
for (const auto & [policy_name, policy_ptr] : context.getPoliciesMap())
|
||||
{
|
||||
@ -69,6 +71,7 @@ Pipe StorageSystemStoragePolicies::read(
|
||||
col_volume_type->insert(volumeTypeToString(volumes[i]->getType()));
|
||||
col_max_part_size->insert(volumes[i]->max_data_part_size);
|
||||
col_move_factor->insert(policy_ptr->getMoveFactor());
|
||||
col_prefer_not_to_merge->insert(volumes[i]->areMergesAvoided() ? 1 : 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -80,6 +83,7 @@ Pipe StorageSystemStoragePolicies::read(
|
||||
res_columns.emplace_back(std::move(col_volume_type));
|
||||
res_columns.emplace_back(std::move(col_max_part_size));
|
||||
res_columns.emplace_back(std::move(col_move_factor));
|
||||
res_columns.emplace_back(std::move(col_prefer_not_to_merge));
|
||||
|
||||
UInt64 num_rows = res_columns.at(0)->size();
|
||||
Chunk chunk(std::move(res_columns), num_rows);
|
||||
|
@ -0,0 +1,38 @@
|
||||
<yandex>
|
||||
<profiles>
|
||||
<default>
|
||||
<max_memory_usage>10000000000</max_memory_usage>
|
||||
<use_uncompressed_cache>0</use_uncompressed_cache>
|
||||
<load_balancing>random</load_balancing>
|
||||
<max_concurrent_queries_for_all_users>2</max_concurrent_queries_for_all_users>
|
||||
</default>
|
||||
<someuser>
|
||||
<max_memory_usage>10000000000</max_memory_usage>
|
||||
<use_uncompressed_cache>0</use_uncompressed_cache>
|
||||
<load_balancing>random</load_balancing>
|
||||
</someuser>
|
||||
</profiles>
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
<networks incl="networks" replace="replace">
|
||||
<ip>::/0</ip>
|
||||
</networks>
|
||||
<profile>default</profile>
|
||||
<quota>default</quota>
|
||||
</default>
|
||||
<someuser>
|
||||
<password></password>
|
||||
<networks incl="networks" replace="replace">
|
||||
<ip>::/0</ip>
|
||||
</networks>
|
||||
<profile>someuser</profile>
|
||||
<quota>default</quota>
|
||||
</someuser>
|
||||
</users>
|
||||
|
||||
<quotas>
|
||||
<default>
|
||||
</default>
|
||||
</quotas>
|
||||
</yandex>
|
@ -0,0 +1,41 @@
|
||||
import time
|
||||
from multiprocessing.dummy import Pool
|
||||
|
||||
import pytest
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
node1 = cluster.add_instance('node1', user_configs=['configs/user_restrictions.xml'])
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
node1.query("create table nums (number UInt64) ENGINE = MergeTree() order by tuple()")
|
||||
node1.query("insert into nums values (0), (1)")
|
||||
yield cluster
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
def test_exception_message(started_cluster):
|
||||
assert node1.query("select number from nums order by number") == "0\n1\n"
|
||||
|
||||
def node_busy(_):
|
||||
for i in range(10):
|
||||
node1.query("select sleep(2)", user='someuser', ignore_error=True)
|
||||
|
||||
busy_pool = Pool(3)
|
||||
busy_pool.map_async(node_busy, range(3))
|
||||
time.sleep(1) # wait a little until polling starts
|
||||
|
||||
with pytest.raises(Exception) as exc_info:
|
||||
for i in range(3):
|
||||
assert node1.query("select number from remote('node1', 'default', 'nums')", user='default') == "0\n1\n"
|
||||
exc_info.match("Too many simultaneous queries for all users")
|
||||
|
||||
for i in range(3):
|
||||
assert node1.query("select number from remote('node1', 'default', 'nums')", user='default',
|
||||
settings={'max_concurrent_queries_for_all_users': 0}) == "0\n1\n"
|
@ -30,6 +30,18 @@
|
||||
</volumes>
|
||||
</small_jbod_with_external>
|
||||
|
||||
<small_jbod_with_external_no_merges>
|
||||
<volumes>
|
||||
<main>
|
||||
<disk>jbod1</disk>
|
||||
</main>
|
||||
<external>
|
||||
<disk>external</disk>
|
||||
<prefer_not_to_merge>true</prefer_not_to_merge>
|
||||
</external>
|
||||
</volumes>
|
||||
</small_jbod_with_external_no_merges>
|
||||
|
||||
<one_more_small_jbod_with_external>
|
||||
<volumes>
|
||||
<m>
|
||||
|
@ -76,6 +76,7 @@ def test_system_tables(start_cluster):
|
||||
"volume_type": "JBOD",
|
||||
"max_data_part_size": "0",
|
||||
"move_factor": 0.1,
|
||||
"prefer_not_to_merge": 0,
|
||||
},
|
||||
{
|
||||
"policy_name": "small_jbod_with_external",
|
||||
@ -85,6 +86,27 @@ def test_system_tables(start_cluster):
|
||||
"volume_type": "JBOD",
|
||||
"max_data_part_size": "0",
|
||||
"move_factor": 0.1,
|
||||
"prefer_not_to_merge": 0,
|
||||
},
|
||||
{
|
||||
"policy_name": "small_jbod_with_external_no_merges",
|
||||
"volume_name": "main",
|
||||
"volume_priority": "1",
|
||||
"disks": ["jbod1"],
|
||||
"volume_type": "JBOD",
|
||||
"max_data_part_size": "0",
|
||||
"move_factor": 0.1,
|
||||
"prefer_not_to_merge": 0,
|
||||
},
|
||||
{
|
||||
"policy_name": "small_jbod_with_external_no_merges",
|
||||
"volume_name": "external",
|
||||
"volume_priority": "2",
|
||||
"disks": ["external"],
|
||||
"volume_type": "JBOD",
|
||||
"max_data_part_size": "0",
|
||||
"move_factor": 0.1,
|
||||
"prefer_not_to_merge": 1,
|
||||
},
|
||||
{
|
||||
"policy_name": "one_more_small_jbod_with_external",
|
||||
@ -94,6 +116,7 @@ def test_system_tables(start_cluster):
|
||||
"volume_type": "JBOD",
|
||||
"max_data_part_size": "0",
|
||||
"move_factor": 0.1,
|
||||
"prefer_not_to_merge": 0,
|
||||
},
|
||||
{
|
||||
"policy_name": "one_more_small_jbod_with_external",
|
||||
@ -103,6 +126,7 @@ def test_system_tables(start_cluster):
|
||||
"volume_type": "JBOD",
|
||||
"max_data_part_size": "0",
|
||||
"move_factor": 0.1,
|
||||
"prefer_not_to_merge": 0,
|
||||
},
|
||||
{
|
||||
"policy_name": "jbods_with_external",
|
||||
@ -112,6 +136,7 @@ def test_system_tables(start_cluster):
|
||||
"volume_type": "JBOD",
|
||||
"max_data_part_size": "10485760",
|
||||
"move_factor": 0.1,
|
||||
"prefer_not_to_merge": 0,
|
||||
},
|
||||
{
|
||||
"policy_name": "jbods_with_external",
|
||||
@ -121,6 +146,7 @@ def test_system_tables(start_cluster):
|
||||
"volume_type": "JBOD",
|
||||
"max_data_part_size": "0",
|
||||
"move_factor": 0.1,
|
||||
"prefer_not_to_merge": 0,
|
||||
},
|
||||
{
|
||||
"policy_name": "moving_jbod_with_external",
|
||||
@ -130,6 +156,7 @@ def test_system_tables(start_cluster):
|
||||
"volume_type": "JBOD",
|
||||
"max_data_part_size": "0",
|
||||
"move_factor": 0.7,
|
||||
"prefer_not_to_merge": 0,
|
||||
},
|
||||
{
|
||||
"policy_name": "moving_jbod_with_external",
|
||||
@ -139,6 +166,7 @@ def test_system_tables(start_cluster):
|
||||
"volume_type": "JBOD",
|
||||
"max_data_part_size": "0",
|
||||
"move_factor": 0.7,
|
||||
"prefer_not_to_merge": 0,
|
||||
},
|
||||
{
|
||||
"policy_name": "default_disk_with_external",
|
||||
@ -148,6 +176,7 @@ def test_system_tables(start_cluster):
|
||||
"volume_type": "JBOD",
|
||||
"max_data_part_size": "2097152",
|
||||
"move_factor": 0.1,
|
||||
"prefer_not_to_merge": 0,
|
||||
},
|
||||
{
|
||||
"policy_name": "default_disk_with_external",
|
||||
@ -157,6 +186,7 @@ def test_system_tables(start_cluster):
|
||||
"volume_type": "JBOD",
|
||||
"max_data_part_size": "20971520",
|
||||
"move_factor": 0.1,
|
||||
"prefer_not_to_merge": 0,
|
||||
},
|
||||
{
|
||||
"policy_name": "special_warning_policy",
|
||||
@ -166,6 +196,7 @@ def test_system_tables(start_cluster):
|
||||
"volume_type": "JBOD",
|
||||
"max_data_part_size": "0",
|
||||
"move_factor": 0.1,
|
||||
"prefer_not_to_merge": 0,
|
||||
},
|
||||
{
|
||||
"policy_name": "special_warning_policy",
|
||||
@ -175,6 +206,7 @@ def test_system_tables(start_cluster):
|
||||
"volume_type": "JBOD",
|
||||
"max_data_part_size": "0",
|
||||
"move_factor": 0.1,
|
||||
"prefer_not_to_merge": 0,
|
||||
},
|
||||
{
|
||||
"policy_name": "special_warning_policy",
|
||||
@ -184,6 +216,7 @@ def test_system_tables(start_cluster):
|
||||
"volume_type": "JBOD",
|
||||
"max_data_part_size": "1024",
|
||||
"move_factor": 0.1,
|
||||
"prefer_not_to_merge": 0,
|
||||
},
|
||||
{
|
||||
"policy_name": "special_warning_policy",
|
||||
@ -193,6 +226,7 @@ def test_system_tables(start_cluster):
|
||||
"volume_type": "JBOD",
|
||||
"max_data_part_size": "1024000000",
|
||||
"move_factor": 0.1,
|
||||
"prefer_not_to_merge": 0,
|
||||
},
|
||||
]
|
||||
|
||||
@ -306,6 +340,9 @@ def get_used_disks_for_table(node, table_name):
|
||||
table_name)).strip().split('\n')
|
||||
|
||||
|
||||
def get_used_parts_for_table(node, table_name):
|
||||
return node.query("SELECT name FROM system.parts WHERE table = '{}' AND active = 1 ORDER BY modification_time".format(table_name)).splitlines()
|
||||
|
||||
def test_no_warning_about_zero_max_data_part_size(start_cluster):
|
||||
def get_log(node):
|
||||
return node.exec_in_container(["bash", "-c", "cat /var/log/clickhouse-server/clickhouse-server.log"])
|
||||
@ -370,6 +407,8 @@ def test_round_robin(start_cluster, name, engine):
|
||||
])
|
||||
def test_max_data_part_size(start_cluster, name, engine):
|
||||
try:
|
||||
assert int(*node1.query("""SELECT max_data_part_size FROM system.storage_policies WHERE policy_name = 'jbods_with_external' AND volume_name = 'main'""").splitlines()) == 10*1024*1024
|
||||
|
||||
node1.query("""
|
||||
CREATE TABLE {name} (
|
||||
s1 String
|
||||
@ -832,7 +871,7 @@ def test_concurrent_alter_move(start_cluster, name, engine):
|
||||
tasks.append(p.apply_async(optimize_table, (100,)))
|
||||
|
||||
for task in tasks:
|
||||
task.get(timeout=120)
|
||||
task.get(timeout=240)
|
||||
|
||||
assert node1.query("SELECT 1") == "1\n"
|
||||
assert node1.query("SELECT COUNT() FROM {}".format(name)) == "500\n"
|
||||
@ -1263,8 +1302,7 @@ def test_move_while_merge(start_cluster):
|
||||
node1.query("INSERT INTO {name} VALUES (1)".format(name=name))
|
||||
node1.query("INSERT INTO {name} VALUES (2)".format(name=name))
|
||||
|
||||
parts = node1.query(
|
||||
"SELECT name FROM system.parts WHERE table = '{name}' AND active = 1".format(name=name)).splitlines()
|
||||
parts = get_used_parts_for_table(node1, name)
|
||||
assert len(parts) == 2
|
||||
|
||||
def optimize():
|
||||
@ -1329,7 +1367,10 @@ def test_move_across_policies_does_not_work(start_cluster):
|
||||
""".format(name=name))
|
||||
|
||||
node1.query("""INSERT INTO {name} VALUES (1)""".format(name=name))
|
||||
node1.query("""ALTER TABLE {name} MOVE PARTITION tuple() TO DISK 'jbod2'""".format(name=name))
|
||||
try:
|
||||
node1.query("""ALTER TABLE {name} MOVE PARTITION tuple() TO DISK 'jbod2'""".format(name=name))
|
||||
except QueryRuntimeException:
|
||||
"""All parts of partition 'all' are already on disk 'jbod2'."""
|
||||
|
||||
with pytest.raises(QueryRuntimeException, match='.*because disk does not belong to storage policy.*'):
|
||||
node1.query("""ALTER TABLE {name}2 ATTACH PARTITION tuple() FROM {name}""".format(name=name))
|
||||
@ -1345,3 +1386,160 @@ def test_move_across_policies_does_not_work(start_cluster):
|
||||
finally:
|
||||
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
|
||||
node1.query("DROP TABLE IF EXISTS {name}2".format(name=name))
|
||||
|
||||
|
||||
def _insert_merge_execute(node, name, policy, parts, cmds, parts_before_cmds, parts_after_cmds):
|
||||
try:
|
||||
node.query("""
|
||||
CREATE TABLE {name} (
|
||||
n Int64
|
||||
) ENGINE = MergeTree
|
||||
ORDER BY tuple()
|
||||
PARTITION BY tuple()
|
||||
TTL now()-1 TO VOLUME 'external'
|
||||
SETTINGS storage_policy='{policy}'
|
||||
""".format(name=name, policy=policy))
|
||||
|
||||
for i in range(parts):
|
||||
node.query("""INSERT INTO {name} VALUES ({n})""".format(name=name, n=i))
|
||||
|
||||
disks = get_used_disks_for_table(node, name)
|
||||
assert set(disks) == {"external"}
|
||||
|
||||
node.query("""OPTIMIZE TABLE {name}""".format(name=name))
|
||||
|
||||
parts = get_used_parts_for_table(node, name)
|
||||
assert len(parts) == parts_before_cmds
|
||||
|
||||
for cmd in cmds:
|
||||
node.query(cmd)
|
||||
|
||||
node.query("""OPTIMIZE TABLE {name}""".format(name=name))
|
||||
|
||||
parts = get_used_parts_for_table(node, name)
|
||||
assert len(parts) == parts_after_cmds
|
||||
|
||||
finally:
|
||||
node.query("DROP TABLE IF EXISTS {name}".format(name=name))
|
||||
|
||||
|
||||
def _check_merges_are_working(node, storage_policy, volume, shall_work):
|
||||
try:
|
||||
name = "_check_merges_are_working_{storage_policy}_{volume}".format(storage_policy=storage_policy, volume=volume)
|
||||
|
||||
node.query("""
|
||||
CREATE TABLE {name} (
|
||||
n Int64
|
||||
) ENGINE = MergeTree
|
||||
ORDER BY tuple()
|
||||
PARTITION BY tuple()
|
||||
SETTINGS storage_policy='{storage_policy}'
|
||||
""".format(name=name, storage_policy=storage_policy))
|
||||
|
||||
created_parts = 24
|
||||
|
||||
for i in range(created_parts):
|
||||
node.query("""INSERT INTO {name} VALUES ({n})""".format(name=name, n=i))
|
||||
try:
|
||||
node.query("""ALTER TABLE {name} MOVE PARTITION tuple() TO VOLUME '{volume}' """.format(name=name, volume=volume))
|
||||
except:
|
||||
"""Ignore 'nothing to move'."""
|
||||
|
||||
expected_disks = set(node.query("""
|
||||
SELECT disks FROM system.storage_policies ARRAY JOIN disks WHERE volume_name = '{volume_name}'
|
||||
""".format(volume_name=volume)).splitlines())
|
||||
|
||||
disks = get_used_disks_for_table(node, name)
|
||||
assert set(disks) <= expected_disks
|
||||
|
||||
node.query("""OPTIMIZE TABLE {name} FINAL""".format(name=name))
|
||||
|
||||
parts = get_used_parts_for_table(node, name)
|
||||
assert len(parts) == 1 if shall_work else created_parts
|
||||
|
||||
finally:
|
||||
node.query("DROP TABLE IF EXISTS {name}".format(name=name))
|
||||
|
||||
|
||||
def _get_prefer_not_to_merge_for_storage_policy(node, storage_policy):
|
||||
return list(map(int, node.query("SELECT prefer_not_to_merge FROM system.storage_policies WHERE policy_name = '{}' ORDER BY volume_priority".format(storage_policy)).splitlines()))
|
||||
|
||||
|
||||
def test_simple_merge_tree_merges_are_disabled(start_cluster):
|
||||
_check_merges_are_working(node1, "small_jbod_with_external_no_merges", "external", False)
|
||||
|
||||
|
||||
def test_no_merges_in_configuration_allow_from_query_without_reload(start_cluster):
|
||||
try:
|
||||
name = "test_no_merges_in_configuration_allow_from_query_without_reload"
|
||||
policy = "small_jbod_with_external_no_merges"
|
||||
node1.restart_clickhouse(kill=True)
|
||||
assert _get_prefer_not_to_merge_for_storage_policy(node1, policy) == [0, 1]
|
||||
_check_merges_are_working(node1, policy, "external", False)
|
||||
|
||||
_insert_merge_execute(node1, name, policy, 2, [
|
||||
"SYSTEM START MERGES ON VOLUME {}.external".format(policy)
|
||||
], 2, 1)
|
||||
assert _get_prefer_not_to_merge_for_storage_policy(node1, policy) == [0, 0]
|
||||
_check_merges_are_working(node1, policy, "external", True)
|
||||
|
||||
finally:
|
||||
node1.query("SYSTEM STOP MERGES ON VOLUME {}.external".format(policy))
|
||||
|
||||
|
||||
def test_no_merges_in_configuration_allow_from_query_with_reload(start_cluster):
|
||||
try:
|
||||
name = "test_no_merges_in_configuration_allow_from_query_with_reload"
|
||||
policy = "small_jbod_with_external_no_merges"
|
||||
node1.restart_clickhouse(kill=True)
|
||||
assert _get_prefer_not_to_merge_for_storage_policy(node1, policy) == [0, 1]
|
||||
_check_merges_are_working(node1, policy, "external", False)
|
||||
|
||||
_insert_merge_execute(node1, name, policy, 2, [
|
||||
"SYSTEM START MERGES ON VOLUME {}.external".format(policy),
|
||||
"SYSTEM RELOAD CONFIG"
|
||||
], 2, 1)
|
||||
assert _get_prefer_not_to_merge_for_storage_policy(node1, policy) == [0, 0]
|
||||
_check_merges_are_working(node1, policy, "external", True)
|
||||
|
||||
finally:
|
||||
node1.query("SYSTEM STOP MERGES ON VOLUME {}.external".format(policy))
|
||||
|
||||
|
||||
def test_yes_merges_in_configuration_disallow_from_query_without_reload(start_cluster):
|
||||
try:
|
||||
name = "test_yes_merges_in_configuration_allow_from_query_without_reload"
|
||||
policy = "small_jbod_with_external"
|
||||
node1.restart_clickhouse(kill=True)
|
||||
assert _get_prefer_not_to_merge_for_storage_policy(node1, policy) == [0, 0]
|
||||
_check_merges_are_working(node1, policy, "external", True)
|
||||
|
||||
_insert_merge_execute(node1, name, policy, 2, [
|
||||
"SYSTEM STOP MERGES ON VOLUME {}.external".format(policy),
|
||||
"INSERT INTO {name} VALUES (2)".format(name=name)
|
||||
], 1, 2)
|
||||
assert _get_prefer_not_to_merge_for_storage_policy(node1, policy) == [0, 1]
|
||||
_check_merges_are_working(node1, policy, "external", False)
|
||||
|
||||
finally:
|
||||
node1.query("SYSTEM START MERGES ON VOLUME {}.external".format(policy))
|
||||
|
||||
|
||||
def test_yes_merges_in_configuration_disallow_from_query_with_reload(start_cluster):
|
||||
try:
|
||||
name = "test_yes_merges_in_configuration_allow_from_query_with_reload"
|
||||
policy = "small_jbod_with_external"
|
||||
node1.restart_clickhouse(kill=True)
|
||||
assert _get_prefer_not_to_merge_for_storage_policy(node1, policy) == [0, 0]
|
||||
_check_merges_are_working(node1, policy, "external", True)
|
||||
|
||||
_insert_merge_execute(node1, name, policy, 2, [
|
||||
"SYSTEM STOP MERGES ON VOLUME {}.external".format(policy),
|
||||
"INSERT INTO {name} VALUES (2)".format(name=name),
|
||||
"SYSTEM RELOAD CONFIG"
|
||||
], 1, 2)
|
||||
assert _get_prefer_not_to_merge_for_storage_policy(node1, policy) == [0, 1]
|
||||
_check_merges_are_working(node1, policy, "external", False)
|
||||
|
||||
finally:
|
||||
node1.query("SYSTEM START MERGES ON VOLUME {}.external".format(policy))
|
||||
|
0
tests/integration/test_system_metrics/__init__.py
Normal file
0
tests/integration/test_system_metrics/__init__.py
Normal file
@ -0,0 +1,19 @@
|
||||
<yandex>
|
||||
<remote_servers>
|
||||
<test_cluster>
|
||||
<shard>
|
||||
<internal_replication>true</internal_replication>
|
||||
<replica>
|
||||
<default_database>shard_0</default_database>
|
||||
<host>node1</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
<replica>
|
||||
<default_database>shard_0</default_database>
|
||||
<host>node2</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_cluster>
|
||||
</remote_servers>
|
||||
</yandex>
|
61
tests/integration/test_system_metrics/test.py
Normal file
61
tests/integration/test_system_metrics/test.py
Normal file
@ -0,0 +1,61 @@
|
||||
import time
|
||||
|
||||
import pytest
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.test_tools import assert_eq_with_retry
|
||||
from helpers.network import PartitionManager
|
||||
|
||||
|
||||
def fill_nodes(nodes, shard):
|
||||
for node in nodes:
|
||||
node.query(
|
||||
'''
|
||||
CREATE DATABASE test;
|
||||
|
||||
CREATE TABLE test.test_table(date Date, id UInt32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test{shard}/replicated', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0;
|
||||
'''.format(shard=shard, replica=node.name))
|
||||
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
|
||||
node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def start_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
fill_nodes([node1, node2], 1)
|
||||
|
||||
yield cluster
|
||||
|
||||
except Exception as ex:
|
||||
print(ex)
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
def test_readonly_metrics(start_cluster):
|
||||
assert node1.query("SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'") == "0\n"
|
||||
|
||||
with PartitionManager() as pm:
|
||||
## make node1 readonly -> heal -> readonly -> heal -> detach table -> heal -> attach table
|
||||
pm.drop_instance_zk_connections(node1)
|
||||
assert_eq_with_retry(node1, "SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'", "1\n", retry_count=300, sleep_time=1)
|
||||
|
||||
pm.heal_all()
|
||||
assert_eq_with_retry(node1, "SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'", "0\n", retry_count=300, sleep_time=1)
|
||||
|
||||
pm.drop_instance_zk_connections(node1)
|
||||
assert_eq_with_retry(node1, "SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'", "1\n", retry_count=300, sleep_time=1)
|
||||
|
||||
|
||||
node1.query("DETACH TABLE test.test_table")
|
||||
assert "0\n" == node1.query("SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'")
|
||||
|
||||
pm.heal_all()
|
||||
node1.query("ATTACH TABLE test.test_table")
|
||||
assert_eq_with_retry(node1, "SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'", "0\n", retry_count=300, sleep_time=1)
|
||||
|
84
tests/performance/encrypt_decrypt.xml
Normal file
84
tests/performance/encrypt_decrypt.xml
Normal file
@ -0,0 +1,84 @@
|
||||
<test>
|
||||
<!-- "Strict" mode (key and iv length checks), non-empty plaintext.
|
||||
See also other encrypt_decrypt_*.xml for more cases. -->
|
||||
|
||||
<substitutions>
|
||||
<substitution>
|
||||
<name>func</name>
|
||||
<values>
|
||||
<!-- materialize(plaitext) is to avoid all-args-are-const optimization, resulting in executing function exactly once. -->
|
||||
<value>encrypt('aes-128-cbc', materialize(plaintext), key16, iv16)</value>
|
||||
<value>encrypt('aes-128-cfb1', materialize(plaintext), key16, iv16)</value>
|
||||
<value>encrypt('aes-128-cfb8', materialize(plaintext), key16, iv16)</value>
|
||||
<value>encrypt('aes-128-cfb128', materialize(plaintext), key16, iv16)</value>
|
||||
<value>encrypt('aes-128-ctr', materialize(plaintext), key16, iv16)</value>
|
||||
<value>encrypt('aes-128-ecb', materialize(plaintext), key16)</value>
|
||||
<value>encrypt('aes-128-ofb', materialize(plaintext), key16, iv16)</value>
|
||||
<value>encrypt('aes-128-gcm', materialize(plaintext), key16, iv12, 'aadaadaadaad')</value>
|
||||
|
||||
<value>encrypt('aes-192-cbc', materialize(plaintext), key24, iv16)</value>
|
||||
<value>encrypt('aes-192-cfb1', materialize(plaintext), key24, iv16)</value>
|
||||
<value>encrypt('aes-192-cfb8', materialize(plaintext), key24, iv16)</value>
|
||||
<value>encrypt('aes-192-cfb128', materialize(plaintext), key24, iv16)</value>
|
||||
<value>encrypt('aes-192-ctr', materialize(plaintext), key24, iv16)</value>
|
||||
<value>encrypt('aes-192-ecb', materialize(plaintext), key24)</value>
|
||||
<value>encrypt('aes-192-ofb', materialize(plaintext), key24, iv16)</value>
|
||||
<value>encrypt('aes-192-gcm', materialize(plaintext), key24, iv12, 'aadaadaadaad')</value>
|
||||
|
||||
<value>encrypt('aes-256-cbc', materialize(plaintext), key32, iv16)</value>
|
||||
<value>encrypt('aes-256-cfb1', materialize(plaintext), key32, iv16)</value>
|
||||
<value>encrypt('aes-256-cfb8', materialize(plaintext), key32, iv16)</value>
|
||||
<value>encrypt('aes-256-cfb128', materialize(plaintext), key32, iv16)</value>
|
||||
<value>encrypt('aes-256-ctr', materialize(plaintext), key32, iv16)</value>
|
||||
<value>encrypt('aes-256-ecb', materialize(plaintext), key32)</value>
|
||||
<value>encrypt('aes-256-ofb', materialize(plaintext), key32, iv16)</value>
|
||||
<value>encrypt('aes-256-gcm', materialize(plaintext), key32, iv12, 'aadaadaadaad')</value>
|
||||
|
||||
<!-- decrypt + encrypt since it is really hard to compose decrypt-only case -->
|
||||
<value>decrypt('aes-128-cbc', encrypt('aes-128-cbc', materialize(plaintext), key16, iv16), key16, iv16)</value>
|
||||
<value>decrypt('aes-128-cfb8', encrypt('aes-128-cfb8', materialize(plaintext), key16, iv16), key16, iv16)</value>
|
||||
<value>decrypt('aes-128-cfb128', encrypt('aes-128-cfb128', materialize(plaintext), key16, iv16), key16, iv16)</value>
|
||||
<value>decrypt('aes-128-ctr', encrypt('aes-128-ctr', materialize(plaintext), key16, iv16), key16, iv16)</value>
|
||||
<value>decrypt('aes-128-ecb', encrypt('aes-128-ecb', materialize(plaintext), key16), key16)</value>
|
||||
<value>decrypt('aes-128-ofb', encrypt('aes-128-ofb', materialize(plaintext), key16, iv16), key16, iv16)</value>
|
||||
<value>decrypt('aes-128-gcm', encrypt('aes-128-gcm', materialize(plaintext), key16, iv12, 'aadaadaadaad'), key16, iv12, 'aadaadaadaad')</value>
|
||||
|
||||
<value>decrypt('aes-192-cbc', encrypt('aes-192-cbc', materialize(plaintext), key24, iv16), key24, iv16)</value>
|
||||
<value>decrypt('aes-192-cfb8', encrypt('aes-192-cfb8', materialize(plaintext), key24, iv16), key24, iv16)</value>
|
||||
<value>decrypt('aes-192-cfb128', encrypt('aes-192-cfb128', materialize(plaintext), key24, iv16), key24, iv16)</value>
|
||||
<value>decrypt('aes-192-ctr', encrypt('aes-192-ctr', materialize(plaintext), key24, iv16), key24, iv16)</value>
|
||||
<value>decrypt('aes-192-ecb', encrypt('aes-192-ecb', materialize(plaintext), key24), key24)</value>
|
||||
<value>decrypt('aes-192-ofb', encrypt('aes-192-ofb', materialize(plaintext), key24, iv16), key24, iv16)</value>
|
||||
<value>decrypt('aes-192-gcm', encrypt('aes-192-gcm', materialize(plaintext), key24, iv12, 'aadaadaadaad'), key24, iv12, 'aadaadaadaad')</value>
|
||||
|
||||
<value>decrypt('aes-256-cbc', encrypt('aes-256-cbc', materialize(plaintext), key32, iv16), key32, iv16)</value>
|
||||
<value>decrypt('aes-256-cfb8', encrypt('aes-256-cfb8', materialize(plaintext), key32, iv16), key32, iv16)</value>
|
||||
<value>decrypt('aes-256-cfb128', encrypt('aes-256-cfb128', materialize(plaintext), key32, iv16), key32, iv16)</value>
|
||||
<value>decrypt('aes-256-ctr', encrypt('aes-256-ctr', materialize(plaintext), key32, iv16), key32, iv16)</value>
|
||||
<value>decrypt('aes-256-ecb', encrypt('aes-256-ecb', materialize(plaintext), key32), key32)</value>
|
||||
<value>decrypt('aes-256-ofb', encrypt('aes-256-ofb', materialize(plaintext), key32, iv16), key32, iv16)</value>
|
||||
<value>decrypt('aes-256-gcm', encrypt('aes-256-gcm', materialize(plaintext), key32, iv12, 'aadaadaadaad'), key32, iv12, 'aadaadaadaad')</value>
|
||||
|
||||
</values>
|
||||
</substitution>
|
||||
<substitution>
|
||||
<name>table</name>
|
||||
<values>
|
||||
<value>numbers(100000)</value>
|
||||
</values>
|
||||
</substitution>
|
||||
<substitution>
|
||||
<name>plaintext</name>
|
||||
<values>
|
||||
<value>number</value>
|
||||
<value>'paintext'</value>
|
||||
<value>'\x12\x2B\xF9\x16\x93\xA4\xD6\x74\x22\xD9\x17\x5E\x38\xCD\x1D\x7B\xB0\x12\xEC\x43\x6B\xC7\x76\xFD\xA1\xA2\x4E\xFC\xBC\x19\x92\x3A\x12\x8B\xD4\xB3\x62\xA8\x9D\xBB\x3E\x0C\x08\x12\x67\x20\x7D\x02\x58\xCF\xE7\xD6\x06\xB8\xB0\x14\x0A\x70\xA1\x81\x94\x14\x24\x74'</value>
|
||||
</values>
|
||||
</substitution>
|
||||
</substitutions>
|
||||
|
||||
<!-- allow OpenSSL-related code load ciphers and warm-up -->
|
||||
<fill_query>WITH {plaintext} as plaintext, repeat('k', 32) as key32, substring(key32, 1, 24) as key24, substring(key32, 1, 16) as key16, repeat('iv', 8) as iv16, substring(iv16, 1, 12) as iv12 SELECT count() FROM {table} WHERE NOT ignore({func}) LIMIT 1</fill_query>
|
||||
|
||||
<query>WITH {plaintext} as plaintext, repeat('k', 32) as key32, substring(key32, 1, 24) as key24, substring(key32, 1, 16) as key16, repeat('iv', 8) as iv16, substring(iv16, 1, 12) as iv12 SELECT count() FROM {table} WHERE NOT ignore({func})</query>
|
||||
</test>
|
67
tests/performance/encrypt_decrypt_empty_string.xml
Normal file
67
tests/performance/encrypt_decrypt_empty_string.xml
Normal file
@ -0,0 +1,67 @@
|
||||
<test>
|
||||
<!-- "Strict" mode (key and iv length checks), empty plaintext.
|
||||
Ciphers that produce empty ciphertext on empty plaintext, and hence can be optimized to almost NO-OP. -->
|
||||
|
||||
<substitutions>
|
||||
<substitution>
|
||||
<name>func</name>
|
||||
<values>
|
||||
<!-- materialize(plaitext) is to avoid all-args-are-const optimization, resulting in executing function exactly once. -->
|
||||
<value>encrypt('aes-128-cfb1', materialize(plaintext), key16, iv16)</value>
|
||||
<value>encrypt('aes-128-cfb8', materialize(plaintext), key16, iv16)</value>
|
||||
<value>encrypt('aes-128-cfb128', materialize(plaintext), key16, iv16)</value>
|
||||
<value>encrypt('aes-128-ctr', materialize(plaintext), key16, iv16)</value>
|
||||
<value>encrypt('aes-128-ofb', materialize(plaintext), key16, iv16)</value>
|
||||
|
||||
<value>encrypt('aes-192-cfb1', materialize(plaintext), key24, iv16)</value>
|
||||
<value>encrypt('aes-192-cfb8', materialize(plaintext), key24, iv16)</value>
|
||||
<value>encrypt('aes-192-cfb128', materialize(plaintext), key24, iv16)</value>
|
||||
<value>encrypt('aes-192-ctr', materialize(plaintext), key24, iv16)</value>
|
||||
<value>encrypt('aes-192-ofb', materialize(plaintext), key24, iv16)</value>
|
||||
|
||||
<value>encrypt('aes-256-cfb1', materialize(plaintext), key32, iv16)</value>
|
||||
<value>encrypt('aes-256-cfb8', materialize(plaintext), key32, iv16)</value>
|
||||
<value>encrypt('aes-256-cfb128', materialize(plaintext), key32, iv16)</value>
|
||||
<value>encrypt('aes-256-ctr', materialize(plaintext), key32, iv16)</value>
|
||||
<value>encrypt('aes-256-ofb', materialize(plaintext), key32, iv16)</value>
|
||||
|
||||
<!-- decrypt + encrypt since it is really hard to compose decrypt-only case -->
|
||||
<value>decrypt('aes-128-cfb1', encrypt('aes-128-cfb1', materialize(plaintext), key16, iv16), key16, iv16)</value>
|
||||
<value>decrypt('aes-128-cfb8', encrypt('aes-128-cfb8', materialize(plaintext), key16, iv16), key16, iv16)</value>
|
||||
<value>decrypt('aes-128-cfb128', encrypt('aes-128-cfb128', materialize(plaintext), key16, iv16), key16, iv16)</value>
|
||||
<value>decrypt('aes-128-ctr', encrypt('aes-128-ctr', materialize(plaintext), key16, iv16), key16, iv16)</value>
|
||||
<value>decrypt('aes-128-ofb', encrypt('aes-128-ofb', materialize(plaintext), key16, iv16), key16, iv16)</value>
|
||||
|
||||
<value>decrypt('aes-192-cfb1', encrypt('aes-192-cfb1', materialize(plaintext), key24, iv16), key24, iv16)</value>
|
||||
<value>decrypt('aes-192-cfb8', encrypt('aes-192-cfb8', materialize(plaintext), key24, iv16), key24, iv16)</value>
|
||||
<value>decrypt('aes-192-cfb128', encrypt('aes-192-cfb128', materialize(plaintext), key24, iv16), key24, iv16)</value>
|
||||
<value>decrypt('aes-192-ctr', encrypt('aes-192-ctr', materialize(plaintext), key24, iv16), key24, iv16)</value>
|
||||
<value>decrypt('aes-192-ofb', encrypt('aes-192-ofb', materialize(plaintext), key24, iv16), key24, iv16)</value>
|
||||
|
||||
<value>decrypt('aes-256-cfb1', encrypt('aes-256-cfb1', materialize(plaintext), key32, iv16), key32, iv16)</value>
|
||||
<value>decrypt('aes-256-cfb8', encrypt('aes-256-cfb8', materialize(plaintext), key32, iv16), key32, iv16)</value>
|
||||
<value>decrypt('aes-256-cfb128', encrypt('aes-256-cfb128', materialize(plaintext), key32, iv16), key32, iv16)</value>
|
||||
<value>decrypt('aes-256-ctr', encrypt('aes-256-ctr', materialize(plaintext), key32, iv16), key32, iv16)</value>
|
||||
<value>decrypt('aes-256-ofb', encrypt('aes-256-ofb', materialize(plaintext), key32, iv16), key32, iv16)</value>
|
||||
|
||||
</values>
|
||||
</substitution>
|
||||
<substitution>
|
||||
<name>table</name>
|
||||
<values>
|
||||
<value>numbers(10000000)</value>
|
||||
</values>
|
||||
</substitution>
|
||||
<substitution>
|
||||
<name>plaintext</name>
|
||||
<values>
|
||||
<value>''</value>
|
||||
</values>
|
||||
</substitution>
|
||||
</substitutions>
|
||||
|
||||
<!-- allow OpenSSL-related code load ciphers and warm-up -->
|
||||
<fill_query>WITH {plaintext} as plaintext, repeat('k', 32) as key32, substring(key32, 1, 24) as key24, substring(key32, 1, 16) as key16, repeat('iv', 8) as iv16, substring(iv16, 1, 12) as iv12 SELECT count() FROM {table} WHERE NOT ignore({func}) LIMIT 1</fill_query>
|
||||
|
||||
<query>WITH {plaintext} as plaintext, repeat('k', 32) as key32, substring(key32, 1, 24) as key24, substring(key32, 1, 16) as key16, repeat('iv', 8) as iv16, substring(iv16, 1, 12) as iv12 SELECT count() FROM {table} WHERE NOT ignore({func})</query>
|
||||
</test>
|
55
tests/performance/encrypt_decrypt_empty_string_slow.xml
Normal file
55
tests/performance/encrypt_decrypt_empty_string_slow.xml
Normal file
@ -0,0 +1,55 @@
|
||||
<test>
|
||||
<!-- "Strict" mode (key and iv length checks), empty plaintext.
|
||||
Ciphers that produce non-empty ciphertext on empty plaintext, and hence can't be optimized.-->
|
||||
|
||||
<substitutions>
|
||||
<substitution>
|
||||
<name>func</name>
|
||||
<values>
|
||||
<!-- materialize(plaitext) is to avoid all-args-are-const optimization, resulting in executing function exactly once. -->
|
||||
<value>encrypt('aes-128-cbc', materialize(plaintext), key16, iv16)</value>
|
||||
<value>encrypt('aes-128-ecb', materialize(plaintext), key16)</value>
|
||||
<value>encrypt('aes-128-gcm', materialize(plaintext), key16, iv12, 'aadaadaadaad')</value>
|
||||
|
||||
<value>encrypt('aes-192-cbc', materialize(plaintext), key24, iv16)</value>
|
||||
<value>encrypt('aes-192-ecb', materialize(plaintext), key24)</value>
|
||||
<value>encrypt('aes-192-gcm', materialize(plaintext), key24, iv12, 'aadaadaadaad')</value>
|
||||
|
||||
<value>encrypt('aes-256-cbc', materialize(plaintext), key32, iv16)</value>
|
||||
<value>encrypt('aes-256-ecb', materialize(plaintext), key32)</value>
|
||||
<value>encrypt('aes-256-gcm', materialize(plaintext), key32, iv12, 'aadaadaadaad')</value>
|
||||
|
||||
<!-- decrypt + encrypt since it is really hard to compose decrypt-only case -->
|
||||
<value>decrypt('aes-128-cbc', encrypt('aes-128-cbc', materialize(plaintext), key16, iv16), key16, iv16)</value>
|
||||
<value>decrypt('aes-128-ecb', encrypt('aes-128-ecb', materialize(plaintext), key16), key16)</value>
|
||||
<value>decrypt('aes-128-gcm', encrypt('aes-128-gcm', materialize(plaintext), key16, iv12, 'aadaadaadaad'), key16, iv12, 'aadaadaadaad')</value>
|
||||
|
||||
<value>decrypt('aes-192-cbc', encrypt('aes-192-cbc', materialize(plaintext), key24, iv16), key24, iv16)</value>
|
||||
<value>decrypt('aes-192-ecb', encrypt('aes-192-ecb', materialize(plaintext), key24), key24)</value>
|
||||
<value>decrypt('aes-192-gcm', encrypt('aes-192-gcm', materialize(plaintext), key24, iv12, 'aadaadaadaad'), key24, iv12, 'aadaadaadaad')</value>
|
||||
|
||||
<value>decrypt('aes-256-cbc', encrypt('aes-256-cbc', materialize(plaintext), key32, iv16), key32, iv16)</value>
|
||||
<value>decrypt('aes-256-ecb', encrypt('aes-256-ecb', materialize(plaintext), key32), key32)</value>
|
||||
<value>decrypt('aes-256-gcm', encrypt('aes-256-gcm', materialize(plaintext), key32, iv12, 'aadaadaadaad'), key32, iv12, 'aadaadaadaad')</value>
|
||||
|
||||
</values>
|
||||
</substitution>
|
||||
<substitution>
|
||||
<name>table</name>
|
||||
<values>
|
||||
<value>numbers(100000)</value>
|
||||
</values>
|
||||
</substitution>
|
||||
<substitution>
|
||||
<name>plaintext</name>
|
||||
<values>
|
||||
<value>''</value>
|
||||
</values>
|
||||
</substitution>
|
||||
</substitutions>
|
||||
|
||||
<!-- allow OpenSSL-related code load ciphers and warm-up -->
|
||||
<fill_query>WITH {plaintext} as plaintext, repeat('k', 32) as key32, substring(key32, 1, 24) as key24, substring(key32, 1, 16) as key16, repeat('iv', 8) as iv16, substring(iv16, 1, 12) as iv12 SELECT count() FROM {table} WHERE NOT ignore({func}) LIMIT 1</fill_query>
|
||||
|
||||
<query>WITH {plaintext} as plaintext, repeat('k', 32) as key32, substring(key32, 1, 24) as key24, substring(key32, 1, 16) as key16, repeat('iv', 8) as iv16, substring(iv16, 1, 12) as iv12 SELECT count() FROM {table} WHERE NOT ignore({func})</query>
|
||||
</test>
|
36
tests/performance/encrypt_decrypt_slow.xml
Normal file
36
tests/performance/encrypt_decrypt_slow.xml
Normal file
@ -0,0 +1,36 @@
|
||||
<test>
|
||||
<!-- "Strict" mode (key and iv length checks), non-empty plaintext.
|
||||
Ciphers that are slow on every input are put here. -->
|
||||
|
||||
<substitutions>
|
||||
<substitution>
|
||||
<name>func</name>
|
||||
<values>
|
||||
<!-- decrypt + encrypt since it is really hard to compose decrypt-only case -->
|
||||
<value>decrypt('aes-128-cfb1', encrypt('aes-128-cfb1', materialize(plaintext), key16, iv16), key16, iv16)</value>
|
||||
<value>decrypt('aes-192-cfb1', encrypt('aes-192-cfb1', materialize(plaintext), key24, iv16), key24, iv16)</value>
|
||||
<value>decrypt('aes-256-cfb1', encrypt('aes-256-cfb1', materialize(plaintext), key32, iv16), key32, iv16)</value>
|
||||
|
||||
</values>
|
||||
</substitution>
|
||||
<substitution>
|
||||
<name>table</name>
|
||||
<values>
|
||||
<value>numbers(50000)</value>
|
||||
</values>
|
||||
</substitution>
|
||||
<substitution>
|
||||
<name>plaintext</name>
|
||||
<values>
|
||||
<value>number</value>
|
||||
<value>'paintext'</value>
|
||||
<value>'\x12\x2B\xF9\x16\x93\xA4\xD6\x74\x22\xD9\x17\x5E\x38\xCD\x1D\x7B\xB0\x12\xEC\x43\x6B\xC7\x76\xFD\xA1\xA2\x4E\xFC\xBC\x19\x92\x3A\x12\x8B\xD4\xB3\x62\xA8\x9D\xBB\x3E\x0C\x08\x12\x67\x20\x7D\x02\x58\xCF\xE7\xD6\x06\xB8\xB0\x14\x0A\x70\xA1\x81\x94\x14\x24\x74'</value>
|
||||
</values>
|
||||
</substitution>
|
||||
</substitutions>
|
||||
|
||||
<!-- allow OpenSSL-related code load ciphers and warm-up -->
|
||||
<fill_query>WITH {plaintext} as plaintext, repeat('k', 32) as key32, substring(key32, 1, 24) as key24, substring(key32, 1, 16) as key16, repeat('iv', 8) as iv16, substring(iv16, 1, 12) as iv12 SELECT count() FROM {table} WHERE NOT ignore({func}) LIMIT 1</fill_query>
|
||||
|
||||
<query>WITH {plaintext} as plaintext, repeat('k', 32) as key32, substring(key32, 1, 24) as key24, substring(key32, 1, 16) as key16, repeat('iv', 8) as iv16, substring(iv16, 1, 12) as iv12 SELECT count() FROM {table} WHERE NOT ignore({func})</query>
|
||||
</test>
|
@ -15,12 +15,10 @@ INSERT INTO mutations_r1 VALUES (123, 1), (234, 2), (345, 3);
|
||||
CREATE TABLE for_subquery(x UInt32) ENGINE TinyLog;
|
||||
INSERT INTO for_subquery VALUES (234), (345);
|
||||
|
||||
ALTER TABLE mutations_r1 UPDATE y = y + 1 WHERE x IN for_subquery;
|
||||
ALTER TABLE mutations_r1 UPDATE y = y + 1 WHERE x IN (SELECT x FROM for_subquery);
|
||||
ALTER TABLE mutations_r1 UPDATE y = y + 1 WHERE x IN for_subquery SETTINGS mutations_sync = 2;
|
||||
ALTER TABLE mutations_r1 UPDATE y = y + 1 WHERE x IN (SELECT x FROM for_subquery) SETTINGS mutations_sync = 2;
|
||||
EOF
|
||||
|
||||
wait_for_mutation "mutations_r1" "0000000001"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query="SELECT * FROM mutations_r1"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query="DROP TABLE mutations_r1"
|
||||
|
@ -26,8 +26,8 @@ ${CLICKHOUSE_CLIENT} --query="ALTER TABLE mutations_r1 DELETE WHERE nonexistent
|
||||
${CLICKHOUSE_CLIENT} --query="ALTER TABLE mutations_r1 DELETE WHERE d = '11'" 2>/dev/null || echo "Query should fail 2"
|
||||
|
||||
# Delete some values
|
||||
${CLICKHOUSE_CLIENT} --query="ALTER TABLE mutations_r1 DELETE WHERE x % 2 = 1"
|
||||
${CLICKHOUSE_CLIENT} --query="ALTER TABLE mutations_r1 DELETE WHERE s = 'd'"
|
||||
${CLICKHOUSE_CLIENT} --query="ALTER TABLE mutations_r1 DELETE WHERE x % 2 = 1 SETTINGS mutations_sync = 2"
|
||||
${CLICKHOUSE_CLIENT} --query="ALTER TABLE mutations_r1 DELETE WHERE s = 'd' SETTINGS mutations_sync = 2"
|
||||
${CLICKHOUSE_CLIENT} --query="ALTER TABLE mutations_r1 DELETE WHERE m = 3 SETTINGS mutations_sync = 2"
|
||||
|
||||
# Insert more data
|
||||
@ -62,8 +62,8 @@ ${CLICKHOUSE_CLIENT} --query="CREATE TABLE mutations_cleaner_r2(x UInt32) ENGINE
|
||||
${CLICKHOUSE_CLIENT} --query="INSERT INTO mutations_cleaner_r1(x) VALUES (1), (2), (3), (4)"
|
||||
|
||||
# Add some mutations and wait for their execution
|
||||
${CLICKHOUSE_CLIENT} --query="ALTER TABLE mutations_cleaner_r1 DELETE WHERE x = 1"
|
||||
${CLICKHOUSE_CLIENT} --query="ALTER TABLE mutations_cleaner_r1 DELETE WHERE x = 2"
|
||||
${CLICKHOUSE_CLIENT} --query="ALTER TABLE mutations_cleaner_r1 DELETE WHERE x = 1 SETTINGS mutations_sync = 2"
|
||||
${CLICKHOUSE_CLIENT} --query="ALTER TABLE mutations_cleaner_r1 DELETE WHERE x = 2 SETTINGS mutations_sync = 2"
|
||||
${CLICKHOUSE_CLIENT} --query="ALTER TABLE mutations_cleaner_r1 DELETE WHERE x = 3 SETTINGS mutations_sync = 2"
|
||||
|
||||
# Add another mutation and prevent its execution on the second replica
|
||||
|
@ -1,3 +1,5 @@
|
||||
check invalid params
|
||||
tests
|
||||
4
|
||||
3
|
||||
2
|
||||
@ -14,3 +16,5 @@
|
||||
('192.168.5.2','192.168.5.2')
|
||||
('0.0.0.0','0.255.255.255')
|
||||
('240.0.0.0','255.255.255.255')
|
||||
('240.0.0.0','255.255.255.255')
|
||||
('248.0.0.0','255.255.255.255')
|
||||
|
@ -1,3 +1,9 @@
|
||||
SELECT 'check invalid params';
|
||||
SELECT IPv4CIDRToRange(1, 1); -- { serverError 43 }
|
||||
SELECT IPv4CIDRToRange(toUInt32(1), 512); -- { serverError 43 }
|
||||
|
||||
SELECT 'tests';
|
||||
|
||||
DROP TABLE IF EXISTS ipv4_range;
|
||||
CREATE TABLE ipv4_range(ip IPv4, cidr UInt8) ENGINE = Memory;
|
||||
|
||||
@ -16,7 +22,9 @@ WITH IPv4CIDRToRange(ip, cidr) as ip_range SELECT ip, cidr, IPv4NumToString(tupl
|
||||
DROP TABLE ipv4_range;
|
||||
|
||||
SELECT IPv4CIDRToRange(toIPv4('192.168.5.2'), 0);
|
||||
SELEcT IPv4CIDRToRange(toIPv4('255.255.255.255'), 8);
|
||||
SELECT IPv4CIDRToRange(toIPv4('255.255.255.255'), 8);
|
||||
SELECT IPv4CIDRToRange(toIPv4('192.168.5.2'), 32);
|
||||
SELECT IPv4CIDRToRange(toIPv4('0.0.0.0'), 8);
|
||||
SELECT IPv4CIDRToRange(toIPv4('255.0.0.0'), 4);
|
||||
|
||||
SELECT IPv4CIDRToRange(toIPv4('255.0.0.0'), toUInt8(4 + number)) FROM numbers(2);
|
||||
|
@ -1,3 +1,5 @@
|
||||
check invalid params
|
||||
tests
|
||||
3
|
||||
4
|
||||
3
|
||||
@ -16,3 +18,5 @@ ffff:: 4 ('f000::','ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
|
||||
('::','ff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
|
||||
('f000::','ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
|
||||
1
|
||||
('2001:db8:0:85a3::ac1f:8001','2001:db8:0:85a3::ac1f:8001')
|
||||
('2001:db8:0:85a3::ac1f:8000','2001:db8:0:85a3::ac1f:8001')
|
||||
|
@ -1,3 +1,11 @@
|
||||
SELECT 'check invalid params';
|
||||
SELECT IPv6CIDRToRange(1, 1); -- { serverError 43 }
|
||||
SELECT IPv6CIDRToRange('1234', 1); -- { serverError 43 }
|
||||
SELECT IPv6CIDRToRange(toFixedString('1234', 10), 1); -- { serverError 43 }
|
||||
SELECT IPv6CIDRToRange(toFixedString('1234', 16), toUInt16(1)); -- { serverError 43 }
|
||||
|
||||
SELECT 'tests';
|
||||
|
||||
DROP TABLE IF EXISTS ipv6_range;
|
||||
CREATE TABLE ipv6_range(ip IPv6, cidr UInt8) ENGINE = Memory;
|
||||
|
||||
@ -23,3 +31,5 @@ SELECT IPv6CIDRToRange(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'
|
||||
SELECT IPv6CIDRToRange(IPv6StringToNum('0000:0000:0000:0000:0000:0000:0000:0000'), 8);
|
||||
SELECT IPv6CIDRToRange(IPv6StringToNum('ffff:0000:0000:0000:0000:0000:0000:0000'), 4);
|
||||
SELECT IPv6CIDRToRange(IPv6StringToNum('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 128) = IPv6CIDRToRange(IPv6StringToNum('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 200) ;
|
||||
|
||||
SELECT IPv6CIDRToRange(IPv6StringToNum('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), toUInt8(128 - number)) FROM numbers(2);
|
||||
|
144
tests/queries/0_stateless/01318_decrypt.reference
Normal file
144
tests/queries/0_stateless/01318_decrypt.reference
Normal file
@ -0,0 +1,144 @@
|
||||
0
|
||||
0
|
||||
0
|
||||
1
|
||||
MySQL-compatitable mode, with key folding, no length checks, etc.
|
||||
aes-128-cbc 1
|
||||
aes-128-cbc 1
|
||||
aes-128-cbc 1
|
||||
aes-192-cbc 1
|
||||
aes-192-cbc 1
|
||||
aes-192-cbc 1
|
||||
aes-256-cbc 1
|
||||
aes-256-cbc 1
|
||||
aes-256-cbc 1
|
||||
aes-128-cfb1 1
|
||||
aes-128-cfb1 1
|
||||
aes-128-cfb1 1
|
||||
aes-192-cfb1 1
|
||||
aes-192-cfb1 1
|
||||
aes-192-cfb1 1
|
||||
aes-256-cfb1 1
|
||||
aes-256-cfb1 1
|
||||
aes-256-cfb1 1
|
||||
aes-128-cfb8 1
|
||||
aes-128-cfb8 1
|
||||
aes-128-cfb8 1
|
||||
aes-192-cfb8 1
|
||||
aes-192-cfb8 1
|
||||
aes-192-cfb8 1
|
||||
aes-256-cfb8 1
|
||||
aes-256-cfb8 1
|
||||
aes-256-cfb8 1
|
||||
aes-128-cfb128 1
|
||||
aes-128-cfb128 1
|
||||
aes-128-cfb128 1
|
||||
aes-192-cfb128 1
|
||||
aes-192-cfb128 1
|
||||
aes-192-cfb128 1
|
||||
aes-256-cfb128 1
|
||||
aes-256-cfb128 1
|
||||
aes-256-cfb128 1
|
||||
aes-128-ecb 1
|
||||
aes-128-ecb 1
|
||||
aes-128-ecb 1
|
||||
aes-192-ecb 1
|
||||
aes-192-ecb 1
|
||||
aes-192-ecb 1
|
||||
aes-256-ecb 1
|
||||
aes-256-ecb 1
|
||||
aes-256-ecb 1
|
||||
aes-128-ofb 1
|
||||
aes-128-ofb 1
|
||||
aes-128-ofb 1
|
||||
aes-192-ofb 1
|
||||
aes-192-ofb 1
|
||||
aes-192-ofb 1
|
||||
aes-256-ofb 1
|
||||
aes-256-ofb 1
|
||||
aes-256-ofb 1
|
||||
Strict mode without key folding and proper key and iv lengths checks.
|
||||
aes-128-cbc 1
|
||||
aes-128-cbc 1
|
||||
aes-128-cbc 1
|
||||
aes-192-cbc 1
|
||||
aes-192-cbc 1
|
||||
aes-192-cbc 1
|
||||
aes-256-cbc 1
|
||||
aes-256-cbc 1
|
||||
aes-256-cbc 1
|
||||
aes-128-cfb1 1
|
||||
aes-128-cfb1 1
|
||||
aes-128-cfb1 1
|
||||
aes-192-cfb1 1
|
||||
aes-192-cfb1 1
|
||||
aes-192-cfb1 1
|
||||
aes-256-cfb1 1
|
||||
aes-256-cfb1 1
|
||||
aes-256-cfb1 1
|
||||
aes-128-cfb8 1
|
||||
aes-128-cfb8 1
|
||||
aes-128-cfb8 1
|
||||
aes-192-cfb8 1
|
||||
aes-192-cfb8 1
|
||||
aes-192-cfb8 1
|
||||
aes-256-cfb8 1
|
||||
aes-256-cfb8 1
|
||||
aes-256-cfb8 1
|
||||
aes-128-cfb128 1
|
||||
aes-128-cfb128 1
|
||||
aes-128-cfb128 1
|
||||
aes-192-cfb128 1
|
||||
aes-192-cfb128 1
|
||||
aes-192-cfb128 1
|
||||
aes-256-cfb128 1
|
||||
aes-256-cfb128 1
|
||||
aes-256-cfb128 1
|
||||
aes-128-ctr 1
|
||||
aes-128-ctr 1
|
||||
aes-128-ctr 1
|
||||
aes-192-ctr 1
|
||||
aes-192-ctr 1
|
||||
aes-192-ctr 1
|
||||
aes-256-ctr 1
|
||||
aes-256-ctr 1
|
||||
aes-256-ctr 1
|
||||
aes-128-ecb 1
|
||||
aes-128-ecb 1
|
||||
aes-128-ecb 1
|
||||
aes-192-ecb 1
|
||||
aes-192-ecb 1
|
||||
aes-192-ecb 1
|
||||
aes-256-ecb 1
|
||||
aes-256-ecb 1
|
||||
aes-256-ecb 1
|
||||
aes-128-ofb 1
|
||||
aes-128-ofb 1
|
||||
aes-128-ofb 1
|
||||
aes-192-ofb 1
|
||||
aes-192-ofb 1
|
||||
aes-192-ofb 1
|
||||
aes-256-ofb 1
|
||||
aes-256-ofb 1
|
||||
aes-256-ofb 1
|
||||
GCM mode with IV
|
||||
aes-128-gcm 1
|
||||
aes-128-gcm 1
|
||||
aes-128-gcm 1
|
||||
aes-192-gcm 1
|
||||
aes-192-gcm 1
|
||||
aes-192-gcm 1
|
||||
aes-256-gcm 1
|
||||
aes-256-gcm 1
|
||||
aes-256-gcm 1
|
||||
GCM mode with IV and AAD
|
||||
aes-128-gcm 1
|
||||
aes-128-gcm 1
|
||||
aes-128-gcm 1
|
||||
aes-192-gcm 1
|
||||
aes-192-gcm 1
|
||||
aes-192-gcm 1
|
||||
aes-256-gcm 1
|
||||
aes-256-gcm 1
|
||||
aes-256-gcm 1
|
||||
F56E87055BC32D0EEB31B2EACC2BF2A5 1
|
152
tests/queries/0_stateless/01318_decrypt.sql
Normal file
152
tests/queries/0_stateless/01318_decrypt.sql
Normal file
@ -0,0 +1,152 @@
|
||||
--- aes_decrypt_mysql(string, key, block_mode[, init_vector, AAD])
|
||||
-- The MySQL-compatitable encryption, only ecb, cbc, cfb1, cfb8, cfb128 and ofb modes are supported,
|
||||
-- just like for MySQL
|
||||
-- https://dev.mysql.com/doc/refman/8.0/en/encryption-functions.html#function_aes-encrypt
|
||||
-- https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_block_encryption_mode
|
||||
-- Please note that for keys that exceed mode-specific length, keys are folded in a MySQL-specific way,
|
||||
-- meaning that whole key is used, but effective key length is still determined by mode.
|
||||
-- when key doesn't exceed the default mode length, ecryption result equals with AES_encypt()
|
||||
|
||||
-----------------------------------------------------------------------------------------
|
||||
-- error cases
|
||||
-----------------------------------------------------------------------------------------
|
||||
SELECT aes_decrypt_mysql(); --{serverError 42} not enough arguments
|
||||
SELECT aes_decrypt_mysql('aes-128-ecb'); --{serverError 42} not enough arguments
|
||||
SELECT aes_decrypt_mysql('aes-128-ecb', 'text'); --{serverError 42} not enough arguments
|
||||
|
||||
-- Mode
|
||||
SELECT aes_decrypt_mysql(789, 'text', 'key'); --{serverError 43} bad mode type
|
||||
SELECT aes_decrypt_mysql('blah blah blah', 'text', 'key'); -- {serverError 36} garbage mode value
|
||||
SELECT aes_decrypt_mysql('des-ede3-ecb', 'text', 'key'); -- {serverError 36} bad mode value of valid cipher name
|
||||
SELECT aes_decrypt_mysql('aes-128-gcm', 'text', 'key'); -- {serverError 36} mode is not supported by _mysql-functions
|
||||
|
||||
SELECT decrypt(789, 'text', 'key'); --{serverError 43} bad mode type
|
||||
SELECT decrypt('blah blah blah', 'text', 'key'); -- {serverError 36} garbage mode value
|
||||
SELECT decrypt('des-ede3-ecb', 'text', 'key'); -- {serverError 36} bad mode value of valid cipher name
|
||||
|
||||
|
||||
-- Key
|
||||
SELECT aes_decrypt_mysql('aes-128-ecb', 'text', 456); --{serverError 43} bad key type
|
||||
SELECT aes_decrypt_mysql('aes-128-ecb', 'text', 'key'); -- {serverError 36} key is too short
|
||||
|
||||
SELECT decrypt('aes-128-ecb', 'text'); --{serverError 42} key is missing
|
||||
SELECT decrypt('aes-128-ecb', 'text', 456); --{serverError 43} bad key type
|
||||
SELECT decrypt('aes-128-ecb', 'text', 'key'); -- {serverError 36} key is too short
|
||||
SELECT decrypt('aes-128-ecb', 'text', 'keykeykeykeykeykeykeykeykeykeykeykey'); -- {serverError 36} key is to long
|
||||
|
||||
-- IV
|
||||
SELECT aes_decrypt_mysql('aes-128-ecb', 'text', 'key', 1011); --{serverError 43} bad IV type 6
|
||||
SELECT aes_decrypt_mysql('aes-128-ecb', 'text', 'key', 'iv'); --{serverError 36} IV is too short 4
|
||||
|
||||
SELECT decrypt('aes-128-cbc', 'text', 'keykeykeykeykeyk', 1011); --{serverError 43} bad IV type 1
|
||||
SELECT decrypt('aes-128-cbc', 'text', 'keykeykeykeykeyk', 'iviviviviviviviviviviviviviviviviviviviviv'); --{serverError 36} IV is too long 3
|
||||
SELECT decrypt('aes-128-cbc', 'text', 'keykeykeykeykeyk', 'iv'); --{serverError 36} IV is too short 2
|
||||
|
||||
--AAD
|
||||
SELECT aes_decrypt_mysql('aes-128-ecb', 'text', 'key', 'IV', 1213); --{serverError 42} too many arguments
|
||||
|
||||
SELECT decrypt('aes-128-ecb', 'text', 'key', 'IV', 1213); --{serverError 43} bad AAD type
|
||||
SELECT decrypt('aes-128-gcm', 'text', 'key', 'IV', 1213); --{serverError 43} bad AAD type
|
||||
|
||||
-- Invalid ciphertext should cause an error or produce garbage
|
||||
SELECT ignore(decrypt('aes-128-ecb', 'hello there', '1111111111111111')); -- {serverError 454} 1
|
||||
SELECT ignore(decrypt('aes-128-cbc', 'hello there', '1111111111111111')); -- {serverError 454} 2
|
||||
SELECT ignore(decrypt('aes-128-cfb1', 'hello there', '1111111111111111')); -- GIGO
|
||||
SELECT ignore(decrypt('aes-128-ofb', 'hello there', '1111111111111111')); -- GIGO
|
||||
SELECT ignore(decrypt('aes-128-ctr', 'hello there', '1111111111111111')); -- GIGO
|
||||
SELECT decrypt('aes-128-ctr', '', '1111111111111111') == '';
|
||||
|
||||
|
||||
-----------------------------------------------------------------------------------------
|
||||
-- Validate against predefined ciphertext,plaintext,key and IV for MySQL compatibility mode
|
||||
-----------------------------------------------------------------------------------------
|
||||
CREATE TABLE encryption_test
|
||||
(
|
||||
input String,
|
||||
key String DEFAULT unhex('fb9958e2e897ef3fdb49067b51a24af645b3626eed2f9ea1dc7fd4dd71b7e38f9a68db2a3184f952382c783785f9d77bf923577108a88adaacae5c141b1576b0'),
|
||||
iv String DEFAULT unhex('8CA3554377DFF8A369BC50A89780DD85'),
|
||||
key32 String DEFAULT substring(key, 1, 32),
|
||||
key24 String DEFAULT substring(key, 1, 24),
|
||||
key16 String DEFAULT substring(key, 1, 16)
|
||||
) Engine = Memory;
|
||||
|
||||
INSERT INTO encryption_test (input)
|
||||
VALUES (''), ('text'), ('What Is ClickHouse? ClickHouse is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).');
|
||||
|
||||
|
||||
SELECT 'MySQL-compatitable mode, with key folding, no length checks, etc.';
|
||||
SELECT 'aes-128-cbc' as mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key, iv), key, iv) == input FROM encryption_test;
|
||||
SELECT 'aes-192-cbc' as mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key, iv), key, iv) == input FROM encryption_test;
|
||||
SELECT 'aes-256-cbc' as mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key, iv), key, iv) == input FROM encryption_test;
|
||||
|
||||
SELECT 'aes-128-cfb1' as mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key, iv), key, iv) == input FROM encryption_test;
|
||||
SELECT 'aes-192-cfb1' as mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key, iv), key, iv) == input FROM encryption_test;
|
||||
SELECT 'aes-256-cfb1' as mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key, iv), key, iv) == input FROM encryption_test;
|
||||
|
||||
SELECT 'aes-128-cfb8' as mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key, iv), key, iv) == input FROM encryption_test;
|
||||
SELECT 'aes-192-cfb8' as mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key, iv), key, iv) == input FROM encryption_test;
|
||||
SELECT 'aes-256-cfb8' as mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key, iv), key, iv) == input FROM encryption_test;
|
||||
|
||||
SELECT 'aes-128-cfb128' as mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key, iv), key, iv) == input FROM encryption_test;
|
||||
SELECT 'aes-192-cfb128' as mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key, iv), key, iv) == input FROM encryption_test;
|
||||
SELECT 'aes-256-cfb128' as mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key, iv), key, iv) == input FROM encryption_test;
|
||||
|
||||
SELECT 'aes-128-ecb' as mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key, iv), key, iv) == input FROM encryption_test;
|
||||
SELECT 'aes-192-ecb' as mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key, iv), key, iv) == input FROM encryption_test;
|
||||
SELECT 'aes-256-ecb' as mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key, iv), key, iv) == input FROM encryption_test;
|
||||
|
||||
SELECT 'aes-128-ofb' as mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key, iv), key, iv) == input FROM encryption_test;
|
||||
SELECT 'aes-192-ofb' as mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key, iv), key, iv) == input FROM encryption_test;
|
||||
SELECT 'aes-256-ofb' as mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key, iv), key, iv) == input FROM encryption_test;
|
||||
|
||||
SELECT 'Strict mode without key folding and proper key and iv lengths checks.';
|
||||
SELECT 'aes-128-cbc' as mode, decrypt(mode, encrypt(mode, input, key16, iv), key16, iv) == input FROM encryption_test;
|
||||
SELECT 'aes-192-cbc' as mode, decrypt(mode, encrypt(mode, input, key24, iv), key24, iv) == input FROM encryption_test;
|
||||
SELECT 'aes-256-cbc' as mode, decrypt(mode, encrypt(mode, input, key32, iv), key32, iv) == input FROM encryption_test;
|
||||
|
||||
SELECT 'aes-128-cfb1' as mode, decrypt(mode, encrypt(mode, input, key16, iv), key16, iv) == input FROM encryption_test;
|
||||
SELECT 'aes-192-cfb1' as mode, decrypt(mode, encrypt(mode, input, key24, iv), key24, iv) == input FROM encryption_test;
|
||||
SELECT 'aes-256-cfb1' as mode, decrypt(mode, encrypt(mode, input, key32, iv), key32, iv) == input FROM encryption_test;
|
||||
|
||||
SELECT 'aes-128-cfb8' as mode, decrypt(mode, encrypt(mode, input, key16, iv), key16, iv) == input FROM encryption_test;
|
||||
SELECT 'aes-192-cfb8' as mode, decrypt(mode, encrypt(mode, input, key24, iv), key24, iv) == input FROM encryption_test;
|
||||
SELECT 'aes-256-cfb8' as mode, decrypt(mode, encrypt(mode, input, key32, iv), key32, iv) == input FROM encryption_test;
|
||||
|
||||
SELECT 'aes-128-cfb128' as mode, decrypt(mode, encrypt(mode, input, key16, iv), key16, iv) == input FROM encryption_test;
|
||||
SELECT 'aes-192-cfb128' as mode, decrypt(mode, encrypt(mode, input, key24, iv), key24, iv) == input FROM encryption_test;
|
||||
SELECT 'aes-256-cfb128' as mode, decrypt(mode, encrypt(mode, input, key32, iv), key32, iv) == input FROM encryption_test;
|
||||
|
||||
SELECT 'aes-128-ctr' as mode, decrypt(mode, encrypt(mode, input, key16, iv), key16, iv) == input FROM encryption_test;
|
||||
SELECT 'aes-192-ctr' as mode, decrypt(mode, encrypt(mode, input, key24, iv), key24, iv) == input FROM encryption_test;
|
||||
SELECT 'aes-256-ctr' as mode, decrypt(mode, encrypt(mode, input, key32, iv), key32, iv) == input FROM encryption_test;
|
||||
|
||||
SELECT 'aes-128-ecb' as mode, decrypt(mode, encrypt(mode, input, key16), key16) == input FROM encryption_test;
|
||||
SELECT 'aes-192-ecb' as mode, decrypt(mode, encrypt(mode, input, key24), key24) == input FROM encryption_test;
|
||||
SELECT 'aes-256-ecb' as mode, decrypt(mode, encrypt(mode, input, key32), key32) == input FROM encryption_test;
|
||||
|
||||
SELECT 'aes-128-ofb' as mode, decrypt(mode, encrypt(mode, input, key16, iv), key16, iv) == input FROM encryption_test;
|
||||
SELECT 'aes-192-ofb' as mode, decrypt(mode, encrypt(mode, input, key24, iv), key24, iv) == input FROM encryption_test;
|
||||
SELECT 'aes-256-ofb' as mode, decrypt(mode, encrypt(mode, input, key32, iv), key32, iv) == input FROM encryption_test;
|
||||
|
||||
SELECT 'GCM mode with IV';
|
||||
SELECT 'aes-128-gcm' as mode, decrypt(mode, encrypt(mode, input, key16, iv), key16, iv) == input FROM encryption_test;
|
||||
SELECT 'aes-192-gcm' as mode, decrypt(mode, encrypt(mode, input, key24, iv), key24, iv) == input FROM encryption_test;
|
||||
SELECT 'aes-256-gcm' as mode, decrypt(mode, encrypt(mode, input, key32, iv), key32, iv) == input FROM encryption_test;
|
||||
|
||||
SELECT 'GCM mode with IV and AAD';
|
||||
SELECT 'aes-128-gcm' as mode, decrypt(mode, encrypt(mode, input, key16, iv, 'AAD'), key16, iv, 'AAD') == input FROM encryption_test;
|
||||
SELECT 'aes-192-gcm' as mode, decrypt(mode, encrypt(mode, input, key24, iv, 'AAD'), key24, iv, 'AAD') == input FROM encryption_test;
|
||||
SELECT 'aes-256-gcm' as mode, decrypt(mode, encrypt(mode, input, key32, iv, 'AAD'), key32, iv, 'AAD') == input FROM encryption_test;
|
||||
|
||||
|
||||
-- based on https://github.com/openssl/openssl/blob/master/demos/evp/aesgcm.c#L20
|
||||
WITH
|
||||
unhex('eebc1f57487f51921c0465665f8ae6d1658bb26de6f8a069a3520293a572078f') as key,
|
||||
unhex('67ba0510262ae487d737ee6298f77e0c') as tag,
|
||||
unhex('99aa3e68ed8173a0eed06684') as iv,
|
||||
unhex('f56e87055bc32d0eeb31b2eacc2bf2a5') as plaintext,
|
||||
unhex('4d23c3cec334b49bdb370c437fec78de') as aad,
|
||||
unhex('f7264413a84c0e7cd536867eb9f21736') as ciphertext
|
||||
SELECT
|
||||
hex(decrypt('aes-256-gcm', concat(ciphertext, tag), key, iv, aad)) as plaintext_actual,
|
||||
plaintext_actual = hex(plaintext);
|
147
tests/queries/0_stateless/01318_encrypt.reference
Normal file
147
tests/queries/0_stateless/01318_encrypt.reference
Normal file
@ -0,0 +1,147 @@
|
||||
UInt64 5417DEA8D67A1A03FD561809C62402FF
|
||||
Float64 9B66D0AA685DC0F1EFFA2E385F7EA2F2
|
||||
Decimal64 5417DEA8D67A1A03FD561809C62402FF
|
||||
MySQL-compatitable mode, with key folding, no length checks, etc.
|
||||
aes-128-cbc B2EB028BC2236566A946085E65A5632B
|
||||
aes-128-cbc 25026C55954363AEF90325822218C370
|
||||
aes-128-cbc 683051259880871EA8EBFBBF5360D1DA96D967450DFEFA45C89C8B2D59121602C1C5A54AAB8C95EC53F46E5A021BEDE7B5B2B9E83A416726F0DD750F6ACE9C654C986C3B3C9BEB497F54BFA2EF1B107EF204E7508C4E2D37797641404E51D496DFE477A49DCECB2EB47FC6BB6A13CF72AD19E99CEE7656D3EA29BDBC673879D7814D158FB8CB0760DFE89943BE3234C1
|
||||
aes-192-cbc 829FA9DAF77594921A16494EC005AD29
|
||||
aes-192-cbc 20727A7264B5DD601ECFE40FB9CF50B0
|
||||
aes-192-cbc BBFE507F31FF227F74EFA831CCE338443947492DD8141840B0C3D006404836574040AC80DD243311E1A791BB0C5C02AB4DF85BA39B102056DA75CF1E32BAA8836E616D27542F84EA4792F41CD1180E0FF66ACACEDAC4AFD0D2031771C370D413A077DC755F5AF38A441950958216B1130BBF6265E1CE824A6E9B0EE76993495535654B10344A201171D9F1A788FBB45A
|
||||
aes-256-cbc 8C0F6D5F2B0E751EC2033C5CA08E99F7
|
||||
aes-256-cbc D907F72499D48DB36850AF2C038CEF62
|
||||
aes-256-cbc A398D86A880BA4DE1FBA1ABBD38B9DA3B038B198134E798A4CF134029DB18B0A53F9A7177DAA5270EAD7448C56EB96351745A779A56B42A848FE265BFAB85BF349580A9E1751E627AEE80A8B0FC9046F18B8CF575358E21F5E353F115B2BF87984DB199744D4A83E58AD4764B6DFA92F933E0E1AA1F2AA95E4A9662C9BD8F1AC8D79BF531E77EDDB1A25CCD52D4E994D
|
||||
aes-128-cfb1
|
||||
aes-128-cfb1 78BF3E26
|
||||
aes-128-cfb1 4A9D5DC0463F6C4E353E20ED62EFE9B9470882BEFE403CDCEF73019133EAA6B38E92C8C8D0BA46DFEE332A4D1003481EF5E05AB30244ECBFB46E1FD878377D9A8209630C2304D42B2B8F545841535DE3C3D7FC6DD839EB8C35D9CB7172D0F5B9AE7EB3E1BE2F1E42007BA76FBBFE9B38225071468717E9C8EFBA73FDA016A533F709B1B4B18AFD4D85
|
||||
aes-192-cfb1
|
||||
aes-192-cfb1 6B44342A
|
||||
aes-192-cfb1 5D93C18B6821DA795910E27BA92A0F6C1BB74F924C5D369D4DB4697AC9F2F2F9F7159EC34C66260DB6BEE4BE13F51344EDC640F10B6ED64D1DD891FF8542ECA6B9CA7BB64DCA160C5460CE1F1BF602C16B571E35FBFFD4F26EC34FBBCE14D9C56ABE18779C9DC5266601573B4D25B188E0778EE77C98B0B16F65623BBB834F2B254B84D1B891ED4105
|
||||
aes-256-cfb1
|
||||
aes-256-cfb1 51860DF1
|
||||
aes-256-cfb1 687FB4B773E5C87F8B42E6A9B2538EC3D1B302B11BCECC0F846B2D5BB3050C41BAF43B29271F663035A27863C913C7149B5E1CF08E803616B9B649EB33C63F66EF608876A5BB43ABDD310E40597DDC93E88E4465663D7E967A0E1EA68C98CD5E039B08843EDE8E11A66DBBA67F3D4844EB0270732BE69ADFEF6DC6E801E100479AB86AFE3447454841
|
||||
aes-128-cfb8
|
||||
aes-128-cfb8 0EC82D99
|
||||
aes-128-cfb8 2DDE927A331C8482A453901E6EA1119746A5E6E7452DDC1349973A04433AD56C3473D10EFC5030B9BDC2549D607D174469134D73AC325C2B6E2BDF8F4D323B82F37222FC05C199EDA9693490EFA52427B00E872F9D89FC2262147296B5957BE8EA8FF2A6BF5BB3A6537C0A81D8BBC671E716C3B52504F2D567031AAC33B4434677BAF0944E883961DA
|
||||
aes-192-cfb8
|
||||
aes-192-cfb8 054CD2E8
|
||||
aes-192-cfb8 26AC354F7232BD5A0B3CDC241EFF3ED9258E118FC0301E1CA1A745FC20F029066D1D3DA5368A2FE7B589CD6242F68546999DF68A0E1DE018B5B3DCD5CA911506FC6EFADC769CB6CFE2A91749C2DBA06D4614E351A4AAC58C381344DB44E3A83F31A299823B2158C4E65B457072CFBAD4D14FE9960876245E840117E8B39018D6D34C4832510A1992BD
|
||||
aes-256-cfb8
|
||||
aes-256-cfb8 7FA03B1B
|
||||
aes-256-cfb8 5C67ABAE9944F8BE6C35F1B348CF2E112ECF45349EA2BCFC1789EA89B7298998E8886E9147FA9AEBC3DFBEFB3320C1661251A9129DBC14649D88983371D10185E6C6D0C935438344B161999191C05CA805E7C5A7410C50370FE3347CDE4A21F5089831116701B324A5CBB24EE604F043259B8898976B807DEB3544951C0AB2C2CE55DE964B4BBD285E
|
||||
aes-128-cfb128
|
||||
aes-128-cfb128 0EAAFAF5
|
||||
aes-128-cfb128 2DA7E3F5CD13148BED988533A2560F52959044EC2FF38A1D1A66DB2B20635FC8800060DA0062E0399CFE059E5E687F4BBA5E7182A4D79F18317B970708F079A59771C231EBA359741565B903BA820EE3EA07249777E745387B9774EE495940A50121A617B20768AA3A1A78AC9D49983E7BD43CD7BD21504640EAB23F57AB9E5B6260D875B665A63359
|
||||
aes-192-cfb128
|
||||
aes-192-cfb128 053E029A
|
||||
aes-192-cfb128 26331B9AEF235360655730F3D8905479AEACC18B2FFCC7FF355DBA918A2B09C5FEEE817C929400A3E412A7528EB6D79846B687858E250AD54A9913CB81009AC55E391163ECCEF6DA0095C4C57B2C913D70B82C0B14ADD59DD559A039B48A47C86142D15922E86FE2586707F689DFD962D2B96E3571151D642A8E8CC2F2CC09D17F009592B0963AD2AB
|
||||
aes-256-cfb128
|
||||
aes-256-cfb128 7FB039F7
|
||||
aes-256-cfb128 5CBD20F7ABD3AC41FCAA1A5C0E119E2BB5174FDACA4353FFA5D1BC2928CE015E08C7D813C51A9493902FD4BF6A864FA6F26B220C8FD21B846C90453241F369B170B6DAAF5B7E10AF025EA6EBF3852BCDA6AA92DA086B13162E3CCCC859FE3A6A8782395247785951D5095305BE97196C4A8C7607CFC0191A4DEB72F085ECF759F5AA5CBD3BE60A7FF5
|
||||
aes-128-ecb FEA8CFDE6EE2C6E7A2CC6ADDC9F62C83
|
||||
aes-128-ecb 78B16CD4BE107660156124C5FEE6454A
|
||||
aes-128-ecb 67C0B119D96F18E2823968D42871B3D126D5DDD35074303974946BE81A246757C3ACAEBFE0590EC98C4F51469E9FE27A8F8A98749E4DCAEF02F2076AC4CEB317062C0531F5FD2A505FE62413D8B0900ECAB5B8E1909A4A38FF922E3302857A16CE8E6804ACBA36C5E00EF5054288922517E59A47D0A26451905DE9E391D683ABB5852B5611886A2EF662AC8A1E156D85
|
||||
aes-192-ecb 99BA10452392CF90CC4D24489213BE78
|
||||
aes-192-ecb EB9D63FB9A457DB400EDE00878E828B1
|
||||
aes-192-ecb 4ADC9AA9BDD0A70C9FAEEA565C0C3329E2D0D5A9BB5F48ADB440F2676173CBB099898BBDF3DE98BCE4C0D663916E8CF401B063AD51BF3110C2C318DECB62F3C87B564C61794F6B393761745626A58DC3485E3930E4145E35C343DB56FB51D831C9EDB07987939009EB4241A0E3BE9CF64E235081AB5EFBBE585FE547AC49F65E5D1E772DE16A0BC85D7C60CAC34094A8
|
||||
aes-256-ecb 42575C26B6D9838CF5BB0214CFA7CA31
|
||||
aes-256-ecb 08B5C9159FA1E2C986FE57CFFE4A5CD7
|
||||
aes-256-ecb 72FC92DD17DD5E0BA4B621C2F20B9B04C3F81A42BA8E34F1138EAC99C1FD43B85AD238B61B8B68389F432E734345CC26C21D1DCCA80EF4B267BAAEEFCB5A6A00A323693758C8E31DC84BF8E017C81825C51A2D30607174403B61B7D71A3FFBFC6905A977B496DDF09E1C2BDC49AF1AAA0FD3B130404A27F7915007B2E00646C8573137E8AE1DF231C0235F53C1E7A832
|
||||
aes-128-ofb
|
||||
aes-128-ofb 0EAAFAF5
|
||||
aes-128-ofb 2DA7E3F5CD13148BED988533A2560F523B04048D918E339B531EBE886FA60448A32056AE6599984C4FB6F41381A09E822470951A7B154A157C661BEF5116633B8CF39574CB5754368011C249A9A936AA7A2D75812B42E28259D219CE5A69E3B0CF8FEE19427B607E2D02F2A3ED184B4D1387CFCEEA2BD48FF9AB7091F5A7548B8C3601DF0CCBEEBDBC
|
||||
aes-192-ofb
|
||||
aes-192-ofb 053E029A
|
||||
aes-192-ofb 26331B9AEF235360655730F3D890547987BD7D29A17C0B076546820084C2F973C28E93589C68BFBFAC8D212F36A5809F0532ABEE022C1DEC985487DF146BCAAA1A82310DE8EF397A5121873D2335FAC47D05CA27A49048F55366D7AA6BBD4E64740CB36EC538B225D7667D796665E3EFD0BDBE0226F716388A39063A85CCD0969CFA52BE4B2F523603
|
||||
aes-256-ofb
|
||||
aes-256-ofb 7FB039F7
|
||||
aes-256-ofb 5CBD20F7ABD3AC41FCAA1A5C0E119E2BCD544279C69E49904DCC791C2D5A8542FE255641D9F79B6900744A4310F0965F1CC84147CE952A32837B9F0853EC7DDB3FCBF49EC5E7C3674AA38ED3A1FB212C56FBB1A0AEFBF8E8E3AE5C0B08E86E317E3A5A998A9EF062FF95977571804F40C1120E54AFDC495EF95D532BB76F6F5351285AAF302ACCA066
|
||||
Strict mode without key folding and proper key and iv lengths checks.
|
||||
aes-128-cbc C09B247E927C81D643CDCA58B2AD3F0D
|
||||
aes-128-cbc 676ED1EA792A8E2E4B0D3CF45A945D73
|
||||
aes-128-cbc 7FDC3DAECBD2C89E41561A04ED586244BE3266643877D721F80C78E6E5F0F195A450DC2548A8DB3253D9612DB116B4B50C3B1C2EEB93704942449C7A606DE2035813B83B533FF561A6781F306A8720AE6344F30B8AE4A81920C3A8A777310FF6246B914127983C8D2E951675E929F939F05E50AA0ED635A2564EB276DD428DCB0D6B7CD655E065210955BD373C555D2E
|
||||
aes-192-cbc 0735013389B1241D9316202CD7A618A2
|
||||
aes-192-cbc 5DC3B5ACD2CF676F968E12068BA8C675
|
||||
aes-192-cbc C6390AAB7AB3B7E6A15E8CA4907AE2B5B0D767B30B0BFFF87D76FF025C384669E1DB6769234B89E5CB365B6721D118534D4CDB33977D87FE22CE9D4CF546AF96ED35F558839AFC6748759F3A36B8C44B5232038F0528254EC5FFE58A68C5306C4C4F982FEE2F9955C6833747B2E093AE1F0BF19A4AB16F4429E5FFB2C17F70588A79B67301FDD6D2A731229FF25853B1
|
||||
aes-256-cbc 8C0F6D5F2B0E751EC2033C5CA08E99F7
|
||||
aes-256-cbc D907F72499D48DB36850AF2C038CEF62
|
||||
aes-256-cbc A398D86A880BA4DE1FBA1ABBD38B9DA3B038B198134E798A4CF134029DB18B0A53F9A7177DAA5270EAD7448C56EB96351745A779A56B42A848FE265BFAB85BF349580A9E1751E627AEE80A8B0FC9046F18B8CF575358E21F5E353F115B2BF87984DB199744D4A83E58AD4764B6DFA92F933E0E1AA1F2AA95E4A9662C9BD8F1AC8D79BF531E77EDDB1A25CCD52D4E994D
|
||||
aes-128-cfb1
|
||||
aes-128-cfb1 79A4880E
|
||||
aes-128-cfb1 5A83873C33073FB2AA84F0344C5828D833DE87B85BA3B7A5F27521C072C99359F1E95ABD2C98E02712DAA23F27BDFB28089152BFD4074E1AE3BEF472EE7518FCD824C67FA767142E5BEF00D089F2BB1A31F555CE6DFBAA7D0698C9016AEA1BCF2296DB5820B36E397DD8546874C4A2135C02877828478785F536345EBAD3541D484DED181587D043B1
|
||||
aes-192-cfb1
|
||||
aes-192-cfb1 AECB3AEE
|
||||
aes-192-cfb1 8014FFC665907F3FAB5AA3C7BFEE808BFB744F7EF2AC7243D099ED3D188E6C457F497E875B023F070B7FBA2BDDB091D71CEBB4CD39B19FB61737EB06927A6406B53F6513B07ADE609FEA4D358E9396EA2BE2C3CF873C52B03BA1FAC1540E3491AABAE769C3DFF081224A1A5B8ECFBA098793D3E7FFFD5C810342E780577FF11B0A77E751F8940C1288
|
||||
aes-256-cfb1
|
||||
aes-256-cfb1 51860DF1
|
||||
aes-256-cfb1 687FB4B773E5C87F8B42E6A9B2538EC3D1B302B11BCECC0F846B2D5BB3050C41BAF43B29271F663035A27863C913C7149B5E1CF08E803616B9B649EB33C63F66EF608876A5BB43ABDD310E40597DDC93E88E4465663D7E967A0E1EA68C98CD5E039B08843EDE8E11A66DBBA67F3D4844EB0270732BE69ADFEF6DC6E801E100479AB86AFE3447454841
|
||||
aes-128-cfb8
|
||||
aes-128-cfb8 513D0801
|
||||
aes-128-cfb8 72B632E6010A526E5F7EFEC4ABFF87E2087FB91159399FCF81639B104B5CFD92D7DC4A6FDD1946FCD7883D88A65B3DAB050467886CFF35B33035C7671F85EBEDB7D934A93CE9EECEE251C95E33CC1E7EAB7F38FC37B1BE08F675CBD446B8B4856363DE1BD6976546DAB4A1125BE5A0516C9BCEEF99BC1EE20539160A973771C01EF45D7A8A78F5D3AE
|
||||
aes-192-cfb8
|
||||
aes-192-cfb8 F9B3F3EE
|
||||
aes-192-cfb8 DAB2433E165A0CD4261DCD2B77B9A2D6128D8054F02166B76CC45B681BC7556E48A06A1838C0F5F0BD6C766DBFEFC07769FF986E58F5B5DA9AE8AF1AFC64A038F8939DD51B585A3FFFD13948D6D716D574BAD875258E3E8D2D2CC589982B625E375B31C34B1F50E82125AB91F14ABCD984FA24057D1BB15395214DC830F125A6EDB3C43023F3F403DA
|
||||
aes-256-cfb8
|
||||
aes-256-cfb8 7FA03B1B
|
||||
aes-256-cfb8 5C67ABAE9944F8BE6C35F1B348CF2E112ECF45349EA2BCFC1789EA89B7298998E8886E9147FA9AEBC3DFBEFB3320C1661251A9129DBC14649D88983371D10185E6C6D0C935438344B161999191C05CA805E7C5A7410C50370FE3347CDE4A21F5089831116701B324A5CBB24EE604F043259B8898976B807DEB3544951C0AB2C2CE55DE964B4BBD285E
|
||||
aes-128-cfb128
|
||||
aes-128-cfb128 519F7556
|
||||
aes-128-cfb128 72926C569BC409EA1646E840082C18F28531DE0AEFA2F980ADCEA64A8BC57798CD549092928F115E702F325DA709A7DB445B6BE9C510452ABB78966B4D8EB622303113EB1BB955FB507A11B1092FEA78C5A05F71D8A9E553591AC6E72B833F1BECE8A5E1816742270C12495BD436C93C5DD1EC017A2EEFE5C5966A01D2BA0EED477D46234DFF333F02
|
||||
aes-192-cfb128
|
||||
aes-192-cfb128 F978FB28
|
||||
aes-192-cfb128 DA75E22875FB05DDE0145038A775E5BD6397D4DC5839CCF84C1F2D0983E87E06A7B1DB1E25FF9A3C0C7BE9FAF61AAC2DE08AAD9C08694F7E35F8E144967C0C798365AB4BA5DF2308014862C80617AF0BC6857B15806412A0E5CAB5B017196A3AFFB73DB33E3D3954FA1F8839501CD117003ED139231E15B28B5E73FBF84E3CC047A2DA0ADA74C25DE8
|
||||
aes-256-cfb128
|
||||
aes-256-cfb128 7FB039F7
|
||||
aes-256-cfb128 5CBD20F7ABD3AC41FCAA1A5C0E119E2BB5174FDACA4353FFA5D1BC2928CE015E08C7D813C51A9493902FD4BF6A864FA6F26B220C8FD21B846C90453241F369B170B6DAAF5B7E10AF025EA6EBF3852BCDA6AA92DA086B13162E3CCCC859FE3A6A8782395247785951D5095305BE97196C4A8C7607CFC0191A4DEB72F085ECF759F5AA5CBD3BE60A7FF5
|
||||
aes-128-ctr
|
||||
aes-128-ctr 519F7556
|
||||
aes-128-ctr 72926C569BC409EA1646E840082C18F24A5A4A7A178EBCBAC0F170479253ACD2A18968DEAB5148C9C2E878B8F4B7C82B6601A0CD5470AA74EA7A2AAD15107950FC50786C3DC483B8BCA6DF1E5CDD64332C2289641EF66271DFEF9D07B392D4762AEE8FD65E0E8E8CB4FBE26D9D94A207352046380BB44AF32EDB900F6796EA87FC4C52A092CEB229C7
|
||||
aes-192-ctr
|
||||
aes-192-ctr F978FB28
|
||||
aes-192-ctr DA75E22875FB05DDE0145038A775E5BDD158C02B77DD5840038E297E275500B3B8CA25422979B29D57F07B94359EF6F84552018BEC0D8CD444A852E31BCAD95811D396DA223589983AE09C80D27E690B3CCFEE1AD0E6F30493A8221698F12286F86F2202A7BABFC0F710B234994CDA7001E3CD237B171D663EB425D08D330557812F6D8897F1B30E93
|
||||
aes-256-ctr
|
||||
aes-256-ctr 7FB039F7
|
||||
aes-256-ctr 5CBD20F7ABD3AC41FCAA1A5C0E119E2B3259493C5A24845535AF1E97FACD790FB5C06D94F7292D617D38EC3319718C29F9CC533560228E892CC9C80867167AC8F26B09D34E5917A59B35D7DF30604B66F2E5891E22539F1B8581037933B623132FE4249191908457BB27E08CA8E2FE066B1119FD9DE6C7A604F4D2DDC4C64A6D37CDD7C1BA883EF759
|
||||
aes-128-ecb 4603E6862B0D94BBEC68E0B0DF51D60F
|
||||
aes-128-ecb 3004851B86D3F3950672DE7085D27C03
|
||||
aes-128-ecb E807F8C8D40A11F65076361AFC7D8B6844054F47B421F0AA0C0D693388A8779A08D71389C06C509D73FA533392DBD24F1600A9650F7F8D1D55F65E50312D48A6CFA69BDCB8D096AB47E8BDA65DC5DA6A5245536312D04882DC94ACF050F3E53A22CAC2D6C1962697DA311B595A086A8DA3EFDE5E1AE0A7009455F3CB6621EADB1E74727BF0F4AF0C4191FE504EA1BBB4
|
||||
aes-192-ecb 046D3CD33E7B61B75D1BE371CA44DD76
|
||||
aes-192-ecb 37CE413D3B953BCEB7FAD79837DB5F1C
|
||||
aes-192-ecb 60CCA1B9A0E5F2E88561E960309229385DB05D62A012FF35AF39D0577C3E31C1D55BB51C9DD3DA07F87E565031A40900745844A5CC79B143662BD392581DAFD17E829EB15C0D5D853B49FD5536F0E3F2E8B3337BBA63C06AAD32C282C98F42D45442CE8971CACE0BAC9852E656A6A7F6A8203EA1BC77AC3965CA192CC817D52A628217933A2B5C2264A71B6E60354997
|
||||
aes-256-ecb 42575C26B6D9838CF5BB0214CFA7CA31
|
||||
aes-256-ecb 08B5C9159FA1E2C986FE57CFFE4A5CD7
|
||||
aes-256-ecb 72FC92DD17DD5E0BA4B621C2F20B9B04C3F81A42BA8E34F1138EAC99C1FD43B85AD238B61B8B68389F432E734345CC26C21D1DCCA80EF4B267BAAEEFCB5A6A00A323693758C8E31DC84BF8E017C81825C51A2D30607174403B61B7D71A3FFBFC6905A977B496DDF09E1C2BDC49AF1AAA0FD3B130404A27F7915007B2E00646C8573137E8AE1DF231C0235F53C1E7A832
|
||||
aes-128-ofb
|
||||
aes-128-ofb 519F7556
|
||||
aes-128-ofb 72926C569BC409EA1646E840082C18F273A5DC2A93E6F58F6191847385035377DECB7C20E0E35B04724FA5B4473999A192B9C6125A441DA50AE071E7A0924B4770278CD219870320F9654177936CEBB5DBAC5E065596D56ED010E57FCC66B9A1FA541B96FCBEAEB4F8D177FEEAAFB9A78C0F1A55B15C1B1009F0EBBB4AEBF4D2DC537EA3012A99F7E4
|
||||
aes-192-ofb
|
||||
aes-192-ofb F978FB28
|
||||
aes-192-ofb DA75E22875FB05DDE0145038A775E5BD26133E8DFB8FC939B564D224E623B825FB59E66D34DA6499850F9A390CB7531D31AB0567D77BF2DD4EE7AA9FD39ACA53B589A12627292B4A707D2F3069DB542D797213C379EFBF700F6F638FB0A98307F2CBC7F73E1DC857885B8DF4F5BC190E65B77ED27BA283027262D953FDA346F8FD2435996BFC919171
|
||||
aes-256-ofb
|
||||
aes-256-ofb 7FB039F7
|
||||
aes-256-ofb 5CBD20F7ABD3AC41FCAA1A5C0E119E2BCD544279C69E49904DCC791C2D5A8542FE255641D9F79B6900744A4310F0965F1CC84147CE952A32837B9F0853EC7DDB3FCBF49EC5E7C3674AA38ED3A1FB212C56FBB1A0AEFBF8E8E3AE5C0B08E86E317E3A5A998A9EF062FF95977571804F40C1120E54AFDC495EF95D532BB76F6F5351285AAF302ACCA066
|
||||
GCM mode with IV
|
||||
aes-128-gcm 3D67D2B8D8F49A24C482085FEC494231
|
||||
aes-128-gcm C08B1CF60C5A2C92C55DAC62223CBA22C736446C
|
||||
aes-128-gcm E38605F61AE032292F25A35A6CDF6E827980625B9A50BB3C77CD2AD54DB9BE5578322CC55569D1B0C5B82577060C0053388F511DB7BF9C898BF4B05FB6C8C0D0F50449C06A2E89E086610CB2BAEF25B206312836884DCBCC6CD8329B2A43E2BA751183B1696AB3F070BE94FA823F1E1A5E2372A06E1AD2719AF37983D23FCD199820B7769E72EDC20AF48466DAEB6550DC7FDBA041F77D5231
|
||||
aes-192-gcm FC2C8C63A570E584AB71F19BA6E79A8F
|
||||
aes-192-gcm 9A6CF0FDA93F1614F982B5570CC1216D84E356BD
|
||||
aes-192-gcm B961E9FD9B940EBAD7ADDA75C9F198A40797A598AC7FA183AC58705EF6E4E295504D71B85D81978B4CE196AFFFA019B941F44B14DF06375688FCA986F2F3088C24E955392F0DB378F033052822D560CD8DDFF5472C66029E997AE2D63935DAAA10D6703E5AB627E8168F16CF5CDB1112DD2D49F10E087BA20831DCCE592465C95AAA5AF8F766BAEDC3FD3949EDD2E667333C83E58786504137
|
||||
aes-256-gcm E99DBEBC01F021758352D7FBD9039EFA
|
||||
aes-256-gcm 8742CE3A7B0595B281C712600D274CA881F47414
|
||||
aes-256-gcm A44FD73ACEB1A64BDE2D03808A2576EDBB6076F61614CC84A960CCBE55FBABF365671B7017BC89C8A2E0A633E0A05E40B2681B33AD3E7A0AC4925DBD9735C4D1C1E33726B1D6A83CBD337A65C50D7CC33CC4E64369D54C1B6AF3A82D206DF0698BEB61EF9AB2DF81B03DF3829A2EC42D667D87376B8A1351C69BB7A11CCBE50DA88ABA991E98D3BD71129682F35422AD73B05EC624357E77FC
|
||||
GCM mode with IV and AAD
|
||||
aes-128-gcm 5AB059BB98F087E8134B19E7EB5CD9C7
|
||||
aes-128-gcm C08B1CF67AD5D38FE0F3508689794961B8D1FAB8
|
||||
aes-128-gcm E38605F61AE032292F25A35A6CDF6E827980625B9A50BB3C77CD2AD54DB9BE5578322CC55569D1B0C5B82577060C0053388F511DB7BF9C898BF4B05FB6C8C0D0F50449C06A2E89E086610CB2BAEF25B206312836884DCBCC6CD8329B2A43E2BA751183B1696AB3F070BE94FA823F1E1A5E2372A06E1AD2719AF37983D23FCD199820B7769E72EDC20A0826DB2A479DB59F7216A9BDCBD0C989
|
||||
aes-192-gcm 04C13E4B1D62481ED22B3644595CB5DB
|
||||
aes-192-gcm 9A6CF0FD2B329B04EAD18301818F016DF8F77447
|
||||
aes-192-gcm B961E9FD9B940EBAD7ADDA75C9F198A40797A598AC7FA183AC58705EF6E4E295504D71B85D81978B4CE196AFFFA019B941F44B14DF06375688FCA986F2F3088C24E955392F0DB378F033052822D560CD8DDFF5472C66029E997AE2D63935DAAA10D6703E5AB627E8168F16CF5CDB1112DD2D49F10E087BA20831DCCE592465C95AAA5AF8F766BAEDC3668E035498D8C46FB662833CCC12C9D6
|
||||
aes-256-gcm E94F5F6ED4A99B741D492D7EA10B7147
|
||||
aes-256-gcm 8742CE3A3EA5153952DB4C0D94B501FE878FF9A7
|
||||
aes-256-gcm A44FD73ACEB1A64BDE2D03808A2576EDBB6076F61614CC84A960CCBE55FBABF365671B7017BC89C8A2E0A633E0A05E40B2681B33AD3E7A0AC4925DBD9735C4D1C1E33726B1D6A83CBD337A65C50D7CC33CC4E64369D54C1B6AF3A82D206DF0698BEB61EF9AB2DF81B03DF3829A2EC42D667D87376B8A1351C69BB7A11CCBE50DA88ABA991E98D3BD712F56268961DDAB59FA4D2B50578602C4
|
||||
Nullable and LowCardinality
|
||||
Nullable(String) \N
|
||||
Nullable(String) 7FB039F7
|
||||
LowCardinality(String) 7FB039F7
|
||||
F7264413A84C0E7CD536867EB9F2173667BA0510262AE487D737EE6298F77E0C 1
|
156
tests/queries/0_stateless/01318_encrypt.sql
Normal file
156
tests/queries/0_stateless/01318_encrypt.sql
Normal file
@ -0,0 +1,156 @@
|
||||
--- aes_encrypt_mysql(string, key, block_mode[, init_vector, AAD])
|
||||
-- The MySQL-compatitable encryption, only ecb, cbc, cfb1, cfb8, cfb128 and ofb modes are supported,
|
||||
-- just like for MySQL
|
||||
-- https://dev.mysql.com/doc/refman/8.0/en/encryption-functions.html#function_aes-encrypt
|
||||
-- https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_block_encryption_mode
|
||||
-- Please note that for keys that exceed mode-specific length, keys are folded in a MySQL-specific way,
|
||||
-- meaning that whole key is used, but effective key length is still determined by mode.
|
||||
-- when key doesn't exceed the default mode length, ecryption result equals with AES_encypt()
|
||||
|
||||
-----------------------------------------------------------------------------------------
|
||||
-- error cases
|
||||
-----------------------------------------------------------------------------------------
|
||||
SELECT aes_encrypt_mysql(); --{serverError 42} not enough arguments
|
||||
SELECT aes_encrypt_mysql('aes-128-ecb'); --{serverError 42} not enough arguments
|
||||
SELECT aes_encrypt_mysql('aes-128-ecb', 'text'); --{serverError 42} not enough arguments
|
||||
|
||||
-- Mode
|
||||
SELECT aes_encrypt_mysql(789, 'text', 'key'); --{serverError 43} bad mode type
|
||||
SELECT aes_encrypt_mysql('blah blah blah', 'text', 'key'); -- {serverError 36} garbage mode value
|
||||
SELECT aes_encrypt_mysql('des-ede3-ecb', 'text', 'key'); -- {serverError 36} bad mode value of valid cipher name
|
||||
SELECT aes_encrypt_mysql('aes-128-gcm', 'text', 'key'); -- {serverError 36} mode is not supported by _mysql-functions
|
||||
|
||||
SELECT encrypt(789, 'text', 'key'); --{serverError 43} bad mode type
|
||||
SELECT encrypt('blah blah blah', 'text', 'key'); -- {serverError 36} garbage mode value
|
||||
SELECT encrypt('des-ede3-ecb', 'text', 'key'); -- {serverError 36} bad mode value of valid cipher name
|
||||
|
||||
|
||||
-- Key
|
||||
SELECT aes_encrypt_mysql('aes-128-ecb', 'text', 456); --{serverError 43} bad key type
|
||||
SELECT aes_encrypt_mysql('aes-128-ecb', 'text', 'key'); -- {serverError 36} key is too short
|
||||
|
||||
SELECT encrypt('aes-128-ecb', 'text'); --{serverError 42} key is missing
|
||||
SELECT encrypt('aes-128-ecb', 'text', 456); --{serverError 43} bad key type
|
||||
SELECT encrypt('aes-128-ecb', 'text', 'key'); -- {serverError 36} key is too short
|
||||
SELECT encrypt('aes-128-ecb', 'text', 'keykeykeykeykeykeykeykeykeykeykeykey'); -- {serverError 36} key is to long
|
||||
|
||||
-- IV
|
||||
SELECT aes_encrypt_mysql('aes-128-ecb', 'text', 'key', 1011); --{serverError 43} bad IV type 6
|
||||
SELECT aes_encrypt_mysql('aes-128-ecb', 'text', 'key', 'iv'); --{serverError 36} IV is too short 4
|
||||
|
||||
SELECT encrypt('aes-128-cbc', 'text', 'keykeykeykeykeyk', 1011); --{serverError 43} bad IV type 1
|
||||
SELECT encrypt('aes-128-cbc', 'text', 'keykeykeykeykeyk', 'iviviviviviviviviviviviviviviviviviviviviv'); --{serverError 36} IV is too long 3
|
||||
SELECT encrypt('aes-128-cbc', 'text', 'keykeykeykeykeyk', 'iv'); --{serverError 36} IV is too short 2
|
||||
|
||||
--AAD
|
||||
SELECT aes_encrypt_mysql('aes-128-ecb', 'text', 'key', 'IV', 1213); --{serverError 42} too many arguments
|
||||
|
||||
SELECT encrypt('aes-128-ecb', 'text', 'key', 'IV', 1213); --{serverError 43} bad AAD type
|
||||
SELECT encrypt('aes-128-gcm', 'text', 'key', 'IV', 1213); --{serverError 43} bad AAD type
|
||||
|
||||
-----------------------------------------------------------------------------------------
|
||||
-- Valid cases
|
||||
-----------------------------------------------------------------------------------------
|
||||
|
||||
SELECT 'UInt64', hex(aes_encrypt_mysql('aes-128-ecb', 123456789101112, 'keykeykeykeykeykeykeykeykeykeyke'));
|
||||
SELECT 'Float64', hex(aes_encrypt_mysql('aes-128-ecb', 1234567891011.12, 'keykeykeykeykeykeykeykeykeykeyke'));
|
||||
SELECT 'Decimal64', hex(aes_encrypt_mysql('aes-128-ecb', toDecimal64(1234567891011.12, 2), 'keykeykeykeykeykeykeykeykeykeyke'));
|
||||
|
||||
-----------------------------------------------------------------------------------------
|
||||
-- Validate against predefined ciphertext,plaintext,key and IV for MySQL compatibility mode
|
||||
-----------------------------------------------------------------------------------------
|
||||
CREATE TABLE encryption_test
|
||||
(
|
||||
input String,
|
||||
key String DEFAULT unhex('fb9958e2e897ef3fdb49067b51a24af645b3626eed2f9ea1dc7fd4dd71b7e38f9a68db2a3184f952382c783785f9d77bf923577108a88adaacae5c141b1576b0'),
|
||||
iv String DEFAULT unhex('8CA3554377DFF8A369BC50A89780DD85'),
|
||||
key32 String DEFAULT substring(key, 1, 32),
|
||||
key24 String DEFAULT substring(key, 1, 24),
|
||||
key16 String DEFAULT substring(key, 1, 16)
|
||||
) Engine = Memory;
|
||||
|
||||
INSERT INTO encryption_test (input)
|
||||
VALUES (''), ('text'), ('What Is ClickHouse? ClickHouse is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).');
|
||||
|
||||
|
||||
SELECT 'MySQL-compatitable mode, with key folding, no length checks, etc.';
|
||||
SELECT 'aes-128-cbc' as mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test;
|
||||
SELECT 'aes-192-cbc' as mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test;
|
||||
SELECT 'aes-256-cbc' as mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test;
|
||||
|
||||
SELECT 'aes-128-cfb1' as mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test;
|
||||
SELECT 'aes-192-cfb1' as mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test;
|
||||
SELECT 'aes-256-cfb1' as mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test;
|
||||
|
||||
SELECT 'aes-128-cfb8' as mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test;
|
||||
SELECT 'aes-192-cfb8' as mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test;
|
||||
SELECT 'aes-256-cfb8' as mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test;
|
||||
|
||||
SELECT 'aes-128-cfb128' as mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test;
|
||||
SELECT 'aes-192-cfb128' as mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test;
|
||||
SELECT 'aes-256-cfb128' as mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test;
|
||||
|
||||
SELECT 'aes-128-ecb' as mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test;
|
||||
SELECT 'aes-192-ecb' as mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test;
|
||||
SELECT 'aes-256-ecb' as mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test;
|
||||
|
||||
SELECT 'aes-128-ofb' as mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test;
|
||||
SELECT 'aes-192-ofb' as mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test;
|
||||
SELECT 'aes-256-ofb' as mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test;
|
||||
|
||||
|
||||
SELECT 'Strict mode without key folding and proper key and iv lengths checks.';
|
||||
SELECT 'aes-128-cbc' as mode, hex(encrypt(mode, input, key16, iv)) FROM encryption_test;
|
||||
SELECT 'aes-192-cbc' as mode, hex(encrypt(mode, input, key24, iv)) FROM encryption_test;
|
||||
SELECT 'aes-256-cbc' as mode, hex(encrypt(mode, input, key32, iv)) FROM encryption_test;
|
||||
|
||||
SELECT 'aes-128-cfb1' as mode, hex(encrypt(mode, input, key16, iv)) FROM encryption_test;
|
||||
SELECT 'aes-192-cfb1' as mode, hex(encrypt(mode, input, key24, iv)) FROM encryption_test;
|
||||
SELECT 'aes-256-cfb1' as mode, hex(encrypt(mode, input, key32, iv)) FROM encryption_test;
|
||||
|
||||
SELECT 'aes-128-cfb8' as mode, hex(encrypt(mode, input, key16, iv)) FROM encryption_test;
|
||||
SELECT 'aes-192-cfb8' as mode, hex(encrypt(mode, input, key24, iv)) FROM encryption_test;
|
||||
SELECT 'aes-256-cfb8' as mode, hex(encrypt(mode, input, key32, iv)) FROM encryption_test;
|
||||
|
||||
SELECT 'aes-128-cfb128' as mode, hex(encrypt(mode, input, key16, iv)) FROM encryption_test;
|
||||
SELECT 'aes-192-cfb128' as mode, hex(encrypt(mode, input, key24, iv)) FROM encryption_test;
|
||||
SELECT 'aes-256-cfb128' as mode, hex(encrypt(mode, input, key32, iv)) FROM encryption_test;
|
||||
|
||||
SELECT 'aes-128-ctr' as mode, hex(encrypt(mode, input, key16, iv)) FROM encryption_test;
|
||||
SELECT 'aes-192-ctr' as mode, hex(encrypt(mode, input, key24, iv)) FROM encryption_test;
|
||||
SELECT 'aes-256-ctr' as mode, hex(encrypt(mode, input, key32, iv)) FROM encryption_test;
|
||||
|
||||
SELECT 'aes-128-ecb' as mode, hex(encrypt(mode, input, key16)) FROM encryption_test;
|
||||
SELECT 'aes-192-ecb' as mode, hex(encrypt(mode, input, key24)) FROM encryption_test;
|
||||
SELECT 'aes-256-ecb' as mode, hex(encrypt(mode, input, key32)) FROM encryption_test;
|
||||
|
||||
SELECT 'aes-128-ofb' as mode, hex(encrypt(mode, input, key16, iv)) FROM encryption_test;
|
||||
SELECT 'aes-192-ofb' as mode, hex(encrypt(mode, input, key24, iv)) FROM encryption_test;
|
||||
SELECT 'aes-256-ofb' as mode, hex(encrypt(mode, input, key32, iv)) FROM encryption_test;
|
||||
|
||||
SELECT 'GCM mode with IV';
|
||||
SELECT 'aes-128-gcm' as mode, hex(encrypt(mode, input, key16, iv)) FROM encryption_test;
|
||||
SELECT 'aes-192-gcm' as mode, hex(encrypt(mode, input, key24, iv)) FROM encryption_test;
|
||||
SELECT 'aes-256-gcm' as mode, hex(encrypt(mode, input, key32, iv)) FROM encryption_test;
|
||||
|
||||
SELECT 'GCM mode with IV and AAD';
|
||||
SELECT 'aes-128-gcm' as mode, hex(encrypt(mode, input, key16, iv, 'AAD')) FROM encryption_test;
|
||||
SELECT 'aes-192-gcm' as mode, hex(encrypt(mode, input, key24, iv, 'AAD')) FROM encryption_test;
|
||||
SELECT 'aes-256-gcm' as mode, hex(encrypt(mode, input, key32, iv, 'AAD')) FROM encryption_test;
|
||||
|
||||
SELECT 'Nullable and LowCardinality';
|
||||
WITH CAST(NULL as Nullable(String)) as input, 'aes-256-ofb' as mode SELECT toTypeName(input), hex(aes_encrypt_mysql(mode, input, key32,iv)) FROM encryption_test LIMIT 1;
|
||||
WITH CAST('text' as Nullable(String)) as input, 'aes-256-ofb' as mode SELECT toTypeName(input), hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test LIMIT 1;
|
||||
WITH CAST('text' as LowCardinality(String)) as input, 'aes-256-ofb' as mode SELECT toTypeName(input), hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test LIMIT 1;
|
||||
|
||||
-- based on https://github.com/openssl/openssl/blob/master/demos/evp/aesgcm.c#L20
|
||||
WITH
|
||||
unhex('eebc1f57487f51921c0465665f8ae6d1658bb26de6f8a069a3520293a572078f') as key,
|
||||
unhex('67ba0510262ae487d737ee6298f77e0c') as tag,
|
||||
unhex('99aa3e68ed8173a0eed06684') as iv,
|
||||
unhex('f56e87055bc32d0eeb31b2eacc2bf2a5') as plaintext,
|
||||
unhex('4d23c3cec334b49bdb370c437fec78de') as aad,
|
||||
unhex('f7264413a84c0e7cd536867eb9f21736') as ciphertext
|
||||
SELECT
|
||||
hex(encrypt('aes-256-gcm', plaintext, key, iv, aad)) as ciphertext_actual,
|
||||
ciphertext_actual = concat(hex(ciphertext), hex(tag));
|
@ -0,0 +1 @@
|
||||
102 es
|
@ -0,0 +1,15 @@
|
||||
DROP TABLE IF EXISTS table_with_enum_column_for_csv_insert;
|
||||
|
||||
CREATE TABLE table_with_enum_column_for_csv_insert (
|
||||
Id Int32,
|
||||
Value Enum('ef' = 1, 'es' = 2)
|
||||
) ENGINE=Memory();
|
||||
|
||||
SET input_format_csv_enum_as_number = 1;
|
||||
|
||||
INSERT INTO table_with_enum_column_for_csv_insert FORMAT CSV 102,2
|
||||
SELECT * FROM table_with_enum_column_for_csv_insert;
|
||||
|
||||
SET input_format_csv_enum_as_number = 0;
|
||||
|
||||
DROP TABLE IF EXISTS table_with_enum_column_for_csv_insert;
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user