mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 16:42:05 +00:00
Merge branch 'master' into fasttest
This commit is contained in:
commit
e35c49faa2
@ -287,7 +287,7 @@ endif ()
|
||||
|
||||
include(cmake/dbms_glob_sources.cmake)
|
||||
|
||||
if (OS_LINUX)
|
||||
if (OS_LINUX OR OS_ANDROID)
|
||||
include(cmake/linux/default_libs.cmake)
|
||||
elseif (OS_DARWIN)
|
||||
include(cmake/darwin/default_libs.cmake)
|
||||
|
@ -1,6 +1,9 @@
|
||||
#include <common/getThreadId.h>
|
||||
|
||||
#if defined(OS_LINUX)
|
||||
#if defined(OS_ANDROID)
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
#elif defined(OS_LINUX)
|
||||
#include <unistd.h>
|
||||
#include <syscall.h>
|
||||
#elif defined(OS_FREEBSD)
|
||||
@ -16,7 +19,9 @@ uint64_t getThreadId()
|
||||
{
|
||||
if (!current_tid)
|
||||
{
|
||||
#if defined(OS_LINUX)
|
||||
#if defined(OS_ANDROID)
|
||||
current_tid = gettid();
|
||||
#elif defined(OS_LINUX)
|
||||
current_tid = syscall(SYS_gettid); /// This call is always successful. - man gettid
|
||||
#elif defined(OS_FREEBSD)
|
||||
current_tid = pthread_getthreadid_np();
|
||||
|
@ -9,7 +9,6 @@
|
||||
#include <string.h>
|
||||
#include <signal.h>
|
||||
#include <cxxabi.h>
|
||||
#include <execinfo.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <typeinfo>
|
||||
|
@ -1,4 +1,4 @@
|
||||
option(ENABLE_AMQPCPP "Enable AMQPCPP" ${ENABLE_LIBRARIES})
|
||||
option(ENABLE_AMQPCPP "Enalbe AMQP-CPP" ${ENABLE_LIBRARIES})
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/AMQP-CPP/CMakeLists.txt")
|
||||
message (WARNING "submodule contrib/AMQP-CPP is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
|
@ -11,7 +11,12 @@ else ()
|
||||
set (BUILTINS_LIBRARY "-lgcc")
|
||||
endif ()
|
||||
|
||||
if (OS_ANDROID)
|
||||
# pthread and rt are included in libc
|
||||
set (DEFAULT_LIBS "${DEFAULT_LIBS} ${BUILTINS_LIBRARY} ${COVERAGE_OPTION} -lc -lm -ldl")
|
||||
else ()
|
||||
set (DEFAULT_LIBS "${DEFAULT_LIBS} ${BUILTINS_LIBRARY} ${COVERAGE_OPTION} -lc -lm -lrt -lpthread -ldl")
|
||||
endif ()
|
||||
|
||||
message(STATUS "Default libraries: ${DEFAULT_LIBS}")
|
||||
|
||||
@ -35,7 +40,11 @@ add_library(global-libs INTERFACE)
|
||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
find_package(Threads REQUIRED)
|
||||
|
||||
add_subdirectory(base/glibc-compatibility)
|
||||
if (NOT OS_ANDROID)
|
||||
# Our compatibility layer doesn't build under Android, many errors in musl.
|
||||
add_subdirectory(base/glibc-compatibility)
|
||||
endif ()
|
||||
|
||||
include (cmake/find/unwind.cmake)
|
||||
include (cmake/find/cxx.cmake)
|
||||
|
||||
|
@ -1,6 +1,11 @@
|
||||
if (CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||
set (OS_LINUX 1)
|
||||
add_definitions(-D OS_LINUX)
|
||||
elseif (CMAKE_SYSTEM_NAME MATCHES "Android")
|
||||
# This is a toy configuration and not in CI, so expect it to be broken.
|
||||
# Use cmake flags such as: -DCMAKE_TOOLCHAIN_FILE=~/ch2/android-ndk-r21d/build/cmake/android.toolchain.cmake -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=28
|
||||
set (OS_ANDROID 1)
|
||||
add_definitions(-D OS_ANDROID)
|
||||
elseif (CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
|
||||
set (OS_FREEBSD 1)
|
||||
add_definitions(-D OS_FREEBSD)
|
||||
@ -17,7 +22,7 @@ if (CMAKE_CROSSCOMPILING)
|
||||
set (ENABLE_PARQUET OFF CACHE INTERNAL "")
|
||||
set (ENABLE_ICU OFF CACHE INTERNAL "")
|
||||
set (ENABLE_FASTOPS OFF CACHE INTERNAL "")
|
||||
elseif (OS_LINUX)
|
||||
elseif (OS_LINUX OR OS_ANDROID)
|
||||
if (ARCH_AARCH64)
|
||||
# FIXME: broken dependencies
|
||||
set (ENABLE_PROTOBUF OFF CACHE INTERNAL "")
|
||||
|
@ -33,6 +33,24 @@ then
|
||||
rm /output/clickhouse-odbc-bridge ||:
|
||||
|
||||
cp -r ../docker/test/performance-comparison /output/scripts ||:
|
||||
|
||||
# We have to know the revision that corresponds to this binary build.
|
||||
# It is not the nominal SHA from pull/*/head, but the pull/*/merge, which is
|
||||
# head merged to master by github, at some point after the PR is updated.
|
||||
# There are some quirks to consider:
|
||||
# - apparently the real SHA is not recorded in system.build_options;
|
||||
# - it can change at any time as github pleases, so we can't just record
|
||||
# the SHA and use it later, it might become inaccessible;
|
||||
# - CI has an immutable snapshot of repository that it uses for all checks
|
||||
# for a given nominal SHA, but it is not accessible outside Yandex.
|
||||
# This is why we add this repository snapshot from CI to the performance test
|
||||
# package.
|
||||
mkdir /output/ch
|
||||
git -C /output/ch init --bare
|
||||
git -C /output/ch remote add origin /build
|
||||
git -C /output/ch fetch --no-tags --depth 50 origin HEAD
|
||||
git -C /output/ch reset --soft FETCH_HEAD
|
||||
git -C /output/ch log -5
|
||||
fi
|
||||
|
||||
# May be set for split build or for performance test.
|
||||
|
@ -11,6 +11,7 @@ services:
|
||||
environment:
|
||||
MINIO_ACCESS_KEY: minio
|
||||
MINIO_SECRET_KEY: minio123
|
||||
MINIO_PROMETHEUS_AUTH_TYPE: public
|
||||
command: server --address :9001 --certs-dir /certs /data1-1
|
||||
depends_on:
|
||||
- proxy1
|
||||
|
@ -498,7 +498,8 @@ create table queries engine File(TSVWithNamesAndTypes, 'report/queries.tsv')
|
||||
|
||||
left, right, diff, stat_threshold,
|
||||
if(report_threshold > 0, report_threshold, 0.10) as report_threshold,
|
||||
test, query_index, query_display_name
|
||||
query_metric_stats.test test, query_metric_stats.query_index query_index,
|
||||
query_display_name
|
||||
from query_metric_stats
|
||||
left join file('analyze/report-thresholds.tsv', TSV,
|
||||
'test text, report_threshold float') thresholds
|
||||
@ -666,7 +667,8 @@ create view query_display_names as select * from
|
||||
|
||||
create table unstable_query_runs engine File(TSVWithNamesAndTypes,
|
||||
'unstable-query-runs.$version.rep') as
|
||||
select test, query_index, query_display_name, query_id
|
||||
select query_runs.test test, query_runs.query_index query_index,
|
||||
query_display_name, query_id
|
||||
from query_runs
|
||||
join queries_for_flamegraph on
|
||||
query_runs.test = queries_for_flamegraph.test
|
||||
|
@ -23,28 +23,7 @@ RUN apt-get update -y \
|
||||
brotli
|
||||
|
||||
COPY ./stress /stress
|
||||
COPY run.sh /
|
||||
|
||||
ENV DATASETS="hits visits"
|
||||
|
||||
CMD dpkg -i package_folder/clickhouse-common-static_*.deb; \
|
||||
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb; \
|
||||
dpkg -i package_folder/clickhouse-server_*.deb; \
|
||||
dpkg -i package_folder/clickhouse-client_*.deb; \
|
||||
dpkg -i package_folder/clickhouse-test_*.deb; \
|
||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/; \
|
||||
echo "TSAN_OPTIONS='halt_on_error=1 history_size=7 ignore_noninstrumented_modules=1 verbosity=1'" >> /etc/environment; \
|
||||
echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment; \
|
||||
echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment; \
|
||||
service clickhouse-server start && sleep 5 \
|
||||
&& /s3downloader --dataset-names $DATASETS \
|
||||
&& chmod 777 -R /var/lib/clickhouse \
|
||||
&& clickhouse-client --query "ATTACH DATABASE IF NOT EXISTS datasets ENGINE = Ordinary" \
|
||||
&& clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test" \
|
||||
&& service clickhouse-server restart && sleep 5 \
|
||||
&& clickhouse-client --query "SHOW TABLES FROM datasets" \
|
||||
&& clickhouse-client --query "SHOW TABLES FROM test" \
|
||||
&& clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits" \
|
||||
&& clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits" \
|
||||
&& clickhouse-client --query "SHOW TABLES FROM test" \
|
||||
&& ./stress --output-folder test_output --skip-func-tests "$SKIP_TESTS_OPTION"
|
||||
CMD ["/bin/bash", "/run.sh"]
|
||||
|
56
docker/test/stress/run.sh
Executable file
56
docker/test/stress/run.sh
Executable file
@ -0,0 +1,56 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -x
|
||||
|
||||
dpkg -i package_folder/clickhouse-common-static_*.deb
|
||||
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb
|
||||
dpkg -i package_folder/clickhouse-server_*.deb
|
||||
dpkg -i package_folder/clickhouse-client_*.deb
|
||||
dpkg -i package_folder/clickhouse-test_*.deb
|
||||
|
||||
function wait_server()
|
||||
{
|
||||
counter=0
|
||||
until clickhouse-client --query "SELECT 1"
|
||||
do
|
||||
if [ "$counter" -gt 120 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
sleep 0.5
|
||||
counter=$(($counter + 1))
|
||||
done
|
||||
}
|
||||
|
||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
|
||||
|
||||
echo "TSAN_OPTIONS='halt_on_error=1 history_size=7 ignore_noninstrumented_modules=1 verbosity=1'" >> /etc/environment
|
||||
echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment
|
||||
echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment
|
||||
|
||||
service clickhouse-server start
|
||||
|
||||
wait_server
|
||||
|
||||
/s3downloader --dataset-names $DATASETS
|
||||
chmod 777 -R /var/lib/clickhouse
|
||||
clickhouse-client --query "ATTACH DATABASE IF NOT EXISTS datasets ENGINE = Ordinary"
|
||||
clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test"
|
||||
service clickhouse-server restart
|
||||
|
||||
wait_server
|
||||
|
||||
clickhouse-client --query "SHOW TABLES FROM datasets"
|
||||
clickhouse-client --query "SHOW TABLES FROM test"
|
||||
clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits"
|
||||
clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
|
||||
clickhouse-client --query "SHOW TABLES FROM test"
|
||||
|
||||
./stress --output-folder test_output --skip-func-tests "$SKIP_TESTS_OPTION"
|
||||
|
||||
service clickhouse-server restart
|
||||
|
||||
wait_server
|
||||
|
||||
clickhouse-client --query "SELECT 'Server successfuly started'" > /test_output/alive_check.txt || echo 'Server failed to start' > /test_output/alive_check.txt
|
@ -41,15 +41,6 @@ def run_func_test(cmd, output_prefix, num_processes, skip_tests_option):
|
||||
return pipes
|
||||
|
||||
|
||||
def check_clickhouse_alive(cmd):
|
||||
try:
|
||||
logging.info("Checking ClickHouse still alive")
|
||||
check_call("{} --query \"select 'Still alive'\"".format(cmd), shell=True)
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
|
||||
parser = argparse.ArgumentParser(description="ClickHouse script for running stresstest")
|
||||
@ -65,7 +56,6 @@ if __name__ == "__main__":
|
||||
args = parser.parse_args()
|
||||
func_pipes = []
|
||||
perf_process = None
|
||||
try:
|
||||
perf_process = run_perf_test(args.perf_test_cmd, args.perf_test_xml_path, args.output_folder)
|
||||
func_pipes = run_func_test(args.test_cmd, args.output_folder, args.num_parallel, args.skip_func_tests)
|
||||
|
||||
@ -80,14 +70,4 @@ if __name__ == "__main__":
|
||||
logging.info("Finished %s from %s processes", len(retcodes), len(func_pipes))
|
||||
time.sleep(5)
|
||||
|
||||
if not check_clickhouse_alive(args.client_cmd):
|
||||
raise Exception("Stress failed, results in logs")
|
||||
else:
|
||||
logging.info("Stress is ok")
|
||||
except Exception as ex:
|
||||
raise ex
|
||||
finally:
|
||||
if os.path.exists(args.server_log_folder):
|
||||
logging.info("Copying server log files")
|
||||
for log_file in os.listdir(args.server_log_folder):
|
||||
shutil.copy(os.path.join(args.server_log_folder, log_file), os.path.join(args.output_folder, log_file))
|
||||
logging.info("Stress test finished")
|
||||
|
@ -1,3 +1,5 @@
|
||||
#if defined(OS_LINUX)
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
@ -101,3 +103,5 @@ MemoryStatisticsOS::Data MemoryStatisticsOS::get() const
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -1,4 +1,5 @@
|
||||
#pragma once
|
||||
#if defined(OS_LINUX)
|
||||
#include <cstdint>
|
||||
|
||||
|
||||
@ -38,3 +39,5 @@ private:
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -212,6 +212,21 @@
|
||||
M(NotCreatedLogEntryForMerge, "Log entry to merge parts in ReplicatedMergeTree is not created due to concurrent log update by another replica.") \
|
||||
M(CreatedLogEntryForMutation, "Successfully created log entry to mutate parts in ReplicatedMergeTree.") \
|
||||
M(NotCreatedLogEntryForMutation, "Log entry to mutate parts in ReplicatedMergeTree is not created due to concurrent log update by another replica.") \
|
||||
\
|
||||
M(S3ReadMicroseconds, "Time of GET and HEAD requests to S3 storage.") \
|
||||
M(S3ReadBytes, "Read bytes (incoming) in GET and HEAD requests to S3 storage.") \
|
||||
M(S3ReadRequestsCount, "Number of GET and HEAD requests to S3 storage.") \
|
||||
M(S3ReadRequestsErrors, "Number of non-throttling errors in GET and HEAD requests to S3 storage.") \
|
||||
M(S3ReadRequestsThrottling, "Number of 429 and 503 errors in GET and HEAD requests to S3 storage.") \
|
||||
M(S3ReadRequestsRedirects, "Number of redirects in GET and HEAD requests to S3 storage.") \
|
||||
\
|
||||
M(S3WriteMicroseconds, "Time of POST, DELETE, PUT and PATCH requests to S3 storage.") \
|
||||
M(S3WriteBytes, "Write bytes (outgoing) in POST, DELETE, PUT and PATCH requests to S3 storage.") \
|
||||
M(S3WriteRequestsCount, "Number of POST, DELETE, PUT and PATCH requests to S3 storage.") \
|
||||
M(S3WriteRequestsErrors, "Number of non-throttling errors in POST, DELETE, PUT and PATCH requests to S3 storage.") \
|
||||
M(S3WriteRequestsThrottling, "Number of 429 and 503 errors in POST, DELETE, PUT and PATCH requests to S3 storage.") \
|
||||
M(S3WriteRequestsRedirects, "Number of redirects in POST, DELETE, PUT and PATCH requests to S3 storage.") \
|
||||
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
|
@ -372,6 +372,7 @@ struct Settings : public SettingsCollection<Settings>
|
||||
M(SettingBool, optimize_duplicate_order_by_and_distinct, true, "Remove duplicate ORDER BY and DISTINCT if it's possible", 0) \
|
||||
M(SettingBool, optimize_redundant_functions_in_order_by, true, "Remove functions from ORDER BY if its argument is also in ORDER BY", 0) \
|
||||
M(SettingBool, optimize_if_chain_to_multiif, false, "Replace if(cond1, then1, if(cond2, ...)) chains to multiIf. Currently it's not beneficial for numeric types.", 0) \
|
||||
M(SettingBool, optimize_monotonous_functions_in_order_by, true, "Replace monotonous function with its argument in ORDER BY", 0) \
|
||||
M(SettingBool, allow_experimental_alter_materialized_view_structure, false, "Allow atomic alter on Materialized views. Work in progress.", 0) \
|
||||
M(SettingBool, enable_early_constant_folding, true, "Enable query optimization where we analyze function and subqueries results and rewrite query if there're constants there", 0) \
|
||||
\
|
||||
|
@ -1,4 +1,4 @@
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||
|
||||
#include "SSDCacheDictionary.h"
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||
|
||||
#include "SSDComplexKeyCacheDictionary.h"
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||
|
||||
#include "DictionaryStructure.h"
|
||||
#include "IDictionary.h"
|
||||
|
@ -33,7 +33,7 @@ void registerDictionaries()
|
||||
registerDictionaryFlat(factory);
|
||||
registerDictionaryHashed(factory);
|
||||
registerDictionaryCache(factory);
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||
registerDictionarySSDCache(factory);
|
||||
registerDictionarySSDComplexKeyCache(factory);
|
||||
#endif
|
||||
|
@ -29,7 +29,7 @@
|
||||
#include <Dictionaries/FlatDictionary.h>
|
||||
#include <Dictionaries/HashedDictionary.h>
|
||||
#include <Dictionaries/CacheDictionary.h>
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||
#include <Dictionaries/SSDCacheDictionary.h>
|
||||
#include <Dictionaries/SSDComplexKeyCacheDictionary.h>
|
||||
#endif
|
||||
@ -182,13 +182,13 @@ private:
|
||||
!executeDispatchSimple<DirectDictionary>(block, arguments, result, dict) &&
|
||||
!executeDispatchSimple<HashedDictionary>(block, arguments, result, dict) &&
|
||||
!executeDispatchSimple<CacheDictionary>(block, arguments, result, dict) &&
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||
!executeDispatchSimple<SSDCacheDictionary>(block, arguments, result, dict) &&
|
||||
#endif
|
||||
!executeDispatchComplex<ComplexKeyHashedDictionary>(block, arguments, result, dict) &&
|
||||
!executeDispatchComplex<ComplexKeyDirectDictionary>(block, arguments, result, dict) &&
|
||||
!executeDispatchComplex<ComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||
!executeDispatchComplex<SSDComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
||||
#endif
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
@ -338,13 +338,13 @@ private:
|
||||
!executeDispatch<HashedDictionary>(block, arguments, result, dict) &&
|
||||
!executeDispatch<DirectDictionary>(block, arguments, result, dict) &&
|
||||
!executeDispatch<CacheDictionary>(block, arguments, result, dict) &&
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||
!executeDispatch<SSDCacheDictionary>(block, arguments, result, dict) &&
|
||||
#endif
|
||||
!executeDispatchComplex<ComplexKeyHashedDictionary>(block, arguments, result, dict) &&
|
||||
!executeDispatchComplex<ComplexKeyDirectDictionary>(block, arguments, result, dict) &&
|
||||
!executeDispatchComplex<ComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||
!executeDispatchComplex<SSDComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
||||
#endif
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
@ -522,13 +522,13 @@ private:
|
||||
!executeDispatch<HashedDictionary>(block, arguments, result, dict) &&
|
||||
!executeDispatch<DirectDictionary>(block, arguments, result, dict) &&
|
||||
!executeDispatch<CacheDictionary>(block, arguments, result, dict) &&
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||
!executeDispatch<SSDCacheDictionary>(block, arguments, result, dict) &&
|
||||
#endif
|
||||
!executeDispatchComplex<ComplexKeyHashedDictionary>(block, arguments, result, dict) &&
|
||||
!executeDispatchComplex<ComplexKeyDirectDictionary>(block, arguments, result, dict) &&
|
||||
!executeDispatchComplex<ComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||
!executeDispatchComplex<SSDComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
||||
#endif
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
@ -862,13 +862,13 @@ private:
|
||||
!executeDispatch<HashedDictionary>(block, arguments, result, dict) &&
|
||||
!executeDispatch<DirectDictionary>(block, arguments, result, dict) &&
|
||||
!executeDispatch<CacheDictionary>(block, arguments, result, dict) &&
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||
!executeDispatch<SSDCacheDictionary>(block, arguments, result, dict) &&
|
||||
#endif
|
||||
!executeDispatchComplex<ComplexKeyHashedDictionary>(block, arguments, result, dict) &&
|
||||
!executeDispatchComplex<ComplexKeyDirectDictionary>(block, arguments, result, dict) &&
|
||||
!executeDispatchComplex<ComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||
!executeDispatchComplex<SSDComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
||||
#endif
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
@ -1123,13 +1123,13 @@ private:
|
||||
!executeDispatch<HashedDictionary>(block, arguments, result, dict) &&
|
||||
!executeDispatch<DirectDictionary>(block, arguments, result, dict) &&
|
||||
!executeDispatch<CacheDictionary>(block, arguments, result, dict) &&
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||
!executeDispatch<SSDCacheDictionary>(block, arguments, result, dict) &&
|
||||
#endif
|
||||
!executeDispatchComplex<ComplexKeyHashedDictionary>(block, arguments, result, dict) &&
|
||||
!executeDispatchComplex<ComplexKeyDirectDictionary>(block, arguments, result, dict) &&
|
||||
!executeDispatchComplex<ComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||
!executeDispatchComplex<SSDComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
||||
#endif
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
|
@ -1,4 +1,4 @@
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||
|
||||
#include <Common/Exception.h>
|
||||
#include <common/logger_useful.h>
|
||||
|
@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||
|
||||
#include <condition_variable>
|
||||
#include <future>
|
||||
|
@ -7,7 +7,7 @@
|
||||
#include <cstring>
|
||||
#include <cassert>
|
||||
|
||||
#if defined(__OpenBSD__) || defined(__FreeBSD__)
|
||||
#if defined(__OpenBSD__) || defined(__FreeBSD__) || defined (__ANDROID__)
|
||||
# include <sys/endian.h>
|
||||
#elif defined(__APPLE__)
|
||||
# include <libkern/OSByteOrder.h>
|
||||
|
@ -1,4 +1,4 @@
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||
|
||||
#include <IO/ReadBufferAIO.h>
|
||||
#include <IO/AIOContextPool.h>
|
||||
|
@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||
|
||||
#include <IO/ReadBufferFromFileBase.h>
|
||||
#include <IO/ReadBuffer.h>
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
# include <IO/ReadBufferFromIStream.h>
|
||||
# include <IO/ReadBufferFromS3.h>
|
||||
# include <Common/Stopwatch.h>
|
||||
|
||||
# include <aws/s3/S3Client.h>
|
||||
# include <aws/s3/model/GetObjectRequest.h>
|
||||
@ -11,6 +12,12 @@
|
||||
|
||||
# include <utility>
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event S3ReadMicroseconds;
|
||||
extern const Event S3ReadBytes;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
@ -27,6 +34,7 @@ ReadBufferFromS3::ReadBufferFromS3(
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
bool ReadBufferFromS3::nextImpl()
|
||||
{
|
||||
if (!initialized)
|
||||
@ -35,9 +43,17 @@ bool ReadBufferFromS3::nextImpl()
|
||||
initialized = true;
|
||||
}
|
||||
|
||||
if (!impl->next())
|
||||
Stopwatch watch;
|
||||
auto res = impl->next();
|
||||
watch.stop();
|
||||
ProfileEvents::increment(ProfileEvents::S3ReadMicroseconds, watch.elapsedMicroseconds());
|
||||
|
||||
if (!res)
|
||||
return false;
|
||||
internal_buffer = impl->buffer();
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::S3ReadBytes, internal_buffer.size());
|
||||
|
||||
working_buffer = internal_buffer;
|
||||
return true;
|
||||
}
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <IO/HTTPCommon.h>
|
||||
#include <IO/S3/PocoHTTPResponseStream.h>
|
||||
#include <IO/S3/PocoHTTPResponseStream.cpp>
|
||||
#include <Common/Stopwatch.h>
|
||||
#include <aws/core/http/HttpRequest.h>
|
||||
#include <aws/core/http/HttpResponse.h>
|
||||
#include <aws/core/http/standard/StandardHttpResponse.h>
|
||||
@ -14,8 +15,24 @@
|
||||
#include <Poco/Net/HTTPResponse.h>
|
||||
#include <common/logger_useful.h>
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event S3ReadMicroseconds;
|
||||
extern const Event S3ReadRequestsCount;
|
||||
extern const Event S3ReadRequestsErrors;
|
||||
extern const Event S3ReadRequestsThrottling;
|
||||
extern const Event S3ReadRequestsRedirects;
|
||||
|
||||
extern const Event S3WriteMicroseconds;
|
||||
extern const Event S3WriteRequestsCount;
|
||||
extern const Event S3WriteRequestsErrors;
|
||||
extern const Event S3WriteRequestsThrottling;
|
||||
extern const Event S3WriteRequestsRedirects;
|
||||
}
|
||||
|
||||
namespace DB::ErrorCodes
|
||||
{
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
extern const int TOO_MANY_REDIRECTS;
|
||||
}
|
||||
|
||||
@ -62,6 +79,46 @@ void PocoHTTPClient::MakeRequestInternal(
|
||||
auto uri = request.GetUri().GetURIString();
|
||||
LOG_DEBUG(log, "Make request to: {}", uri);
|
||||
|
||||
enum class S3MetricType
|
||||
{
|
||||
Microseconds,
|
||||
Count,
|
||||
Errors,
|
||||
Throttling,
|
||||
Redirects,
|
||||
|
||||
EnumSize,
|
||||
};
|
||||
|
||||
auto selectMetric = [&request](S3MetricType type)
|
||||
{
|
||||
const ProfileEvents::Event events_map[][2] = {
|
||||
{ProfileEvents::S3ReadMicroseconds, ProfileEvents::S3WriteMicroseconds},
|
||||
{ProfileEvents::S3ReadRequestsCount, ProfileEvents::S3WriteRequestsCount},
|
||||
{ProfileEvents::S3ReadRequestsErrors, ProfileEvents::S3WriteRequestsErrors},
|
||||
{ProfileEvents::S3ReadRequestsThrottling, ProfileEvents::S3WriteRequestsThrottling},
|
||||
{ProfileEvents::S3ReadRequestsRedirects, ProfileEvents::S3WriteRequestsRedirects},
|
||||
};
|
||||
|
||||
static_assert((sizeof(events_map) / sizeof(events_map[0])) == static_cast<unsigned int>(S3MetricType::EnumSize));
|
||||
|
||||
switch (request.GetMethod())
|
||||
{
|
||||
case Aws::Http::HttpMethod::HTTP_GET:
|
||||
case Aws::Http::HttpMethod::HTTP_HEAD:
|
||||
return events_map[static_cast<unsigned int>(type)][0]; // Read
|
||||
case Aws::Http::HttpMethod::HTTP_POST:
|
||||
case Aws::Http::HttpMethod::HTTP_DELETE:
|
||||
case Aws::Http::HttpMethod::HTTP_PUT:
|
||||
case Aws::Http::HttpMethod::HTTP_PATCH:
|
||||
return events_map[static_cast<unsigned int>(type)][1]; // Write
|
||||
}
|
||||
|
||||
throw Exception("Unsupported request method", ErrorCodes::NOT_IMPLEMENTED);
|
||||
};
|
||||
|
||||
ProfileEvents::increment(selectMetric(S3MetricType::Count));
|
||||
|
||||
const int MAX_REDIRECT_ATTEMPTS = 10;
|
||||
try
|
||||
{
|
||||
@ -112,11 +169,15 @@ void PocoHTTPClient::MakeRequestInternal(
|
||||
poco_request.set(header_name, header_value);
|
||||
|
||||
Poco::Net::HTTPResponse poco_response;
|
||||
|
||||
Stopwatch watch;
|
||||
|
||||
auto & request_body_stream = session->sendRequest(poco_request);
|
||||
|
||||
if (request.GetContentBody())
|
||||
{
|
||||
LOG_TRACE(log, "Writing request body.");
|
||||
|
||||
if (attempt > 0) /// rewind content body buffer.
|
||||
{
|
||||
request.GetContentBody()->clear();
|
||||
@ -129,6 +190,9 @@ void PocoHTTPClient::MakeRequestInternal(
|
||||
LOG_TRACE(log, "Receiving response...");
|
||||
auto & response_body_stream = session->receiveResponse(poco_response);
|
||||
|
||||
watch.stop();
|
||||
ProfileEvents::increment(selectMetric(S3MetricType::Microseconds), watch.elapsedMicroseconds());
|
||||
|
||||
int status_code = static_cast<int>(poco_response.getStatus());
|
||||
LOG_DEBUG(log, "Response status: {}, {}", status_code, poco_response.getReason());
|
||||
|
||||
@ -138,6 +202,8 @@ void PocoHTTPClient::MakeRequestInternal(
|
||||
uri = location;
|
||||
LOG_DEBUG(log, "Redirecting request to new location: {}", location);
|
||||
|
||||
ProfileEvents::increment(selectMetric(S3MetricType::Redirects));
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -159,6 +225,15 @@ void PocoHTTPClient::MakeRequestInternal(
|
||||
|
||||
response->SetClientErrorType(Aws::Client::CoreErrors::NETWORK_CONNECTION);
|
||||
response->SetClientErrorMessage(error_message);
|
||||
|
||||
if (status_code == 429 || status_code == 503)
|
||||
{ // API throttling
|
||||
ProfileEvents::increment(selectMetric(S3MetricType::Throttling));
|
||||
}
|
||||
else
|
||||
{
|
||||
ProfileEvents::increment(selectMetric(S3MetricType::Errors));
|
||||
}
|
||||
}
|
||||
else
|
||||
response->GetResponseStream().SetUnderlyingStream(std::make_shared<PocoHTTPResponseStream>(session, response_body_stream));
|
||||
@ -173,6 +248,8 @@ void PocoHTTPClient::MakeRequestInternal(
|
||||
tryLogCurrentException(log, fmt::format("Failed to make request to: {}", uri));
|
||||
response->SetClientErrorType(Aws::Client::CoreErrors::NETWORK_CONNECTION);
|
||||
response->SetClientErrorMessage(getCurrentExceptionMessage(false));
|
||||
|
||||
ProfileEvents::increment(selectMetric(S3MetricType::Errors));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -22,6 +22,12 @@
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
const char * S3_LOGGER_TAG_NAMES[][2] = {
|
||||
{"AWSClient", "AWSClient"},
|
||||
{"AWSAuthV4Signer", "AWSClient (AWSAuthV4Signer)"},
|
||||
};
|
||||
|
||||
const std::pair<DB::LogsLevel, Poco::Message::Priority> & convertLogLevel(Aws::Utils::Logging::LogLevel log_level)
|
||||
{
|
||||
static const std::unordered_map<Aws::Utils::Logging::LogLevel, std::pair<DB::LogsLevel, Poco::Message::Priority>> mapping =
|
||||
@ -40,26 +46,46 @@ const std::pair<DB::LogsLevel, Poco::Message::Priority> & convertLogLevel(Aws::U
|
||||
class AWSLogger final : public Aws::Utils::Logging::LogSystemInterface
|
||||
{
|
||||
public:
|
||||
AWSLogger()
|
||||
{
|
||||
for (auto [tag, name] : S3_LOGGER_TAG_NAMES)
|
||||
tag_loggers[tag] = &Poco::Logger::get(name);
|
||||
|
||||
default_logger = tag_loggers[S3_LOGGER_TAG_NAMES[0][0]];
|
||||
}
|
||||
|
||||
~AWSLogger() final = default;
|
||||
|
||||
Aws::Utils::Logging::LogLevel GetLogLevel() const final { return Aws::Utils::Logging::LogLevel::Trace; }
|
||||
|
||||
void Log(Aws::Utils::Logging::LogLevel log_level, const char * tag, const char * format_str, ...) final // NOLINT
|
||||
{
|
||||
const auto & [level, prio] = convertLogLevel(log_level);
|
||||
LOG_IMPL(log, level, prio, "{}: {}", tag, format_str);
|
||||
callLogImpl(log_level, tag, format_str); /// FIXME. Variadic arguments?
|
||||
}
|
||||
|
||||
void LogStream(Aws::Utils::Logging::LogLevel log_level, const char * tag, const Aws::OStringStream & message_stream) final
|
||||
{
|
||||
callLogImpl(log_level, tag, message_stream.str().c_str());
|
||||
}
|
||||
|
||||
void callLogImpl(Aws::Utils::Logging::LogLevel log_level, const char * tag, const char * message)
|
||||
{
|
||||
const auto & [level, prio] = convertLogLevel(log_level);
|
||||
LOG_IMPL(log, level, prio, "{}: {}", tag, message_stream.str());
|
||||
if (tag_loggers.count(tag) > 0)
|
||||
{
|
||||
LOG_IMPL(tag_loggers[tag], level, prio, "{}", message);
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_IMPL(default_logger, level, prio, "{}: {}", tag, message);
|
||||
}
|
||||
}
|
||||
|
||||
void Flush() final {}
|
||||
|
||||
private:
|
||||
Poco::Logger * log = &Poco::Logger::get("AWSClient");
|
||||
Poco::Logger * default_logger;
|
||||
std::unordered_map<String, Poco::Logger *> tag_loggers;
|
||||
};
|
||||
|
||||
class S3AuthSigner : public Aws::Client::AWSAuthV4Signer
|
||||
@ -102,8 +128,10 @@ public:
|
||||
private:
|
||||
const DB::HeaderCollection headers;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
|
@ -1,4 +1,4 @@
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||
|
||||
#include <IO/WriteBufferAIO.h>
|
||||
#include <Common/MemorySanitizer.h>
|
||||
|
@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||
|
||||
#include <IO/WriteBufferFromFileBase.h>
|
||||
#include <IO/WriteBuffer.h>
|
||||
|
@ -17,6 +17,11 @@
|
||||
# include <utility>
|
||||
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event S3WriteBytes;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
// S3 protocol does not allow to have multipart upload with more than 10000 parts.
|
||||
@ -59,6 +64,8 @@ void WriteBufferFromS3::nextImpl()
|
||||
|
||||
temporary_buffer->write(working_buffer.begin(), offset());
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::S3WriteBytes, offset());
|
||||
|
||||
if (is_multipart)
|
||||
{
|
||||
last_part_size += offset();
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include <IO/createReadBufferFromFileBase.h>
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||
#include <IO/ReadBufferAIO.h>
|
||||
#endif
|
||||
#include <IO/MMapReadBufferFromFile.h>
|
||||
@ -24,7 +24,7 @@ std::unique_ptr<ReadBufferFromFileBase> createReadBufferFromFileBase(
|
||||
size_t estimated_size, size_t aio_threshold, size_t mmap_threshold,
|
||||
size_t buffer_size_, int flags_, char * existing_memory_, size_t alignment)
|
||||
{
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||
if (aio_threshold && estimated_size >= aio_threshold)
|
||||
{
|
||||
/// Attempt to open a file with O_DIRECT
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include <IO/createWriteBufferFromFileBase.h>
|
||||
#include <IO/WriteBufferFromFile.h>
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||
#include <IO/WriteBufferAIO.h>
|
||||
#endif
|
||||
#include <Common/ProfileEvents.h>
|
||||
@ -20,7 +20,7 @@ std::unique_ptr<WriteBufferFromFileBase> createWriteBufferFromFileBase(const std
|
||||
size_t aio_threshold, size_t buffer_size_, int flags_, mode_t mode, char * existing_memory_,
|
||||
size_t alignment)
|
||||
{
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||
if (aio_threshold && estimated_size >= aio_threshold)
|
||||
{
|
||||
/// Attempt to open a file with O_DIRECT
|
||||
|
@ -23,7 +23,6 @@ namespace DB
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int SUPPORT_IS_DISABLED;
|
||||
extern const int INCORRECT_QUERY;
|
||||
}
|
||||
|
||||
@ -62,10 +61,6 @@ BlockIO InterpreterAlterQuery::execute()
|
||||
alter_commands.emplace_back(std::move(*alter_command));
|
||||
else if (auto partition_command = PartitionCommand::parse(command_ast))
|
||||
{
|
||||
if (partition_command->type == PartitionCommand::DROP_DETACHED_PARTITION
|
||||
&& !context.getSettingsRef().allow_drop_detached)
|
||||
throw DB::Exception("Cannot execute query: DROP DETACHED PART is disabled "
|
||||
"(see allow_drop_detached setting)", ErrorCodes::SUPPORT_IS_DISABLED);
|
||||
partition_commands.emplace_back(std::move(*partition_command));
|
||||
}
|
||||
else if (auto mut_command = MutationCommand::parse(command_ast))
|
||||
@ -90,6 +85,7 @@ BlockIO InterpreterAlterQuery::execute()
|
||||
|
||||
if (!partition_commands.empty())
|
||||
{
|
||||
table->checkAlterPartitionIsPossible(partition_commands, metadata_snapshot, context.getSettingsRef());
|
||||
table->alterPartition(query_ptr, metadata_snapshot, partition_commands, context);
|
||||
}
|
||||
|
||||
|
142
src/Interpreters/MonotonicityCheckVisitor.h
Normal file
142
src/Interpreters/MonotonicityCheckVisitor.h
Normal file
@ -0,0 +1,142 @@
|
||||
#pragma once
|
||||
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Interpreters/InDepthNodeVisitor.h>
|
||||
#include <Interpreters/IdentifierSemantic.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
#include <Parsers/ASTOrderByElement.h>
|
||||
#include <Parsers/ASTSelectQuery.h>
|
||||
#include <Parsers/ASTTablesInSelectQuery.h>
|
||||
#include <Parsers/IAST.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
using Monotonicity = IFunctionBase::Monotonicity;
|
||||
|
||||
/// Checks from bottom to top if function composition is monotonous
|
||||
class MonotonicityCheckMatcher
|
||||
{
|
||||
public:
|
||||
struct Data
|
||||
{
|
||||
const TablesWithColumns & tables;
|
||||
const Context & context;
|
||||
const std::unordered_set<String> & group_by_function_hashes;
|
||||
Monotonicity monotonicity{true, true, true};
|
||||
ASTIdentifier * identifier = nullptr;
|
||||
DataTypePtr arg_data_type = {};
|
||||
|
||||
void reject() { monotonicity.is_monotonic = false; }
|
||||
bool isRejected() const { return !monotonicity.is_monotonic; }
|
||||
|
||||
bool canOptimize(const ASTFunction & ast_function) const
|
||||
{
|
||||
/// if GROUP BY contains the same function ORDER BY shouldn't be optimized
|
||||
auto hash = ast_function.getTreeHash();
|
||||
String key = toString(hash.first) + '_' + toString(hash.second);
|
||||
if (group_by_function_hashes.count(key))
|
||||
return false;
|
||||
|
||||
/// if ORDER BY contains aggregate function it shouldn't be optimized
|
||||
if (AggregateFunctionFactory::instance().isAggregateFunctionName(ast_function.name))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool extractIdentifierAndType(const ASTFunction & ast_function)
|
||||
{
|
||||
if (identifier)
|
||||
return true;
|
||||
|
||||
identifier = ast_function.arguments->children[0]->as<ASTIdentifier>();
|
||||
if (!identifier)
|
||||
return false;
|
||||
|
||||
auto pos = IdentifierSemantic::getMembership(*identifier);
|
||||
if (!pos)
|
||||
pos = IdentifierSemantic::chooseTableColumnMatch(*identifier, tables, true);
|
||||
if (!pos)
|
||||
return false;
|
||||
|
||||
if (auto data_type_and_name = tables[*pos].columns.tryGetByName(identifier->shortName()))
|
||||
{
|
||||
arg_data_type = data_type_and_name->type;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
static void visit(const ASTPtr & ast, Data & data)
|
||||
{
|
||||
if (const auto * ast_function = ast->as<ASTFunction>())
|
||||
visit(*ast_function, data);
|
||||
}
|
||||
|
||||
static void visit(const ASTFunction & ast_function, Data & data)
|
||||
{
|
||||
if (data.isRejected())
|
||||
return;
|
||||
|
||||
/// TODO: monotonicity for fucntions of several arguments
|
||||
auto arguments = ast_function.arguments;
|
||||
if (arguments->children.size() != 1)
|
||||
{
|
||||
data.reject();
|
||||
return;
|
||||
}
|
||||
|
||||
if (!data.canOptimize(ast_function))
|
||||
{
|
||||
data.reject();
|
||||
return;
|
||||
}
|
||||
|
||||
const auto & function = FunctionFactory::instance().tryGet(ast_function.name, data.context);
|
||||
if (!function)
|
||||
{
|
||||
data.reject();
|
||||
return;
|
||||
}
|
||||
|
||||
/// First time extract the most enclosed identifier and its data type
|
||||
if (!data.arg_data_type && !data.extractIdentifierAndType(ast_function))
|
||||
{
|
||||
data.reject();
|
||||
return;
|
||||
}
|
||||
|
||||
ColumnsWithTypeAndName args;
|
||||
args.emplace_back(data.arg_data_type, "tmp");
|
||||
auto function_base = function->build(args);
|
||||
|
||||
if (function_base && function_base->hasInformationAboutMonotonicity())
|
||||
{
|
||||
bool is_positive = data.monotonicity.is_positive;
|
||||
data.monotonicity = function_base->getMonotonicityForRange(*data.arg_data_type, Field(), Field());
|
||||
|
||||
if (!is_positive)
|
||||
data.monotonicity.is_positive = !data.monotonicity.is_positive;
|
||||
data.arg_data_type = function_base->getReturnType();
|
||||
}
|
||||
else
|
||||
data.reject();
|
||||
}
|
||||
|
||||
static bool needChildVisit(const ASTPtr &, const ASTPtr &)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
using MonotonicityCheckVisitor = ConstInDepthNodeVisitor<MonotonicityCheckMatcher, false>;
|
||||
|
||||
}
|
@ -30,6 +30,7 @@
|
||||
#include <Interpreters/AnyInputOptimize.h>
|
||||
#include <Interpreters/RemoveInjectiveFunctionsVisitor.h>
|
||||
#include <Interpreters/RedundantFunctionsInOrderByVisitor.h>
|
||||
#include <Interpreters/MonotonicityCheckVisitor.h>
|
||||
|
||||
#include <Parsers/ASTExpressionList.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
@ -529,6 +530,46 @@ void optimizeDuplicateOrderByAndDistinct(ASTPtr & query, const Context & context
|
||||
DuplicateDistinctVisitor(distinct_data).visit(query);
|
||||
}
|
||||
|
||||
/// Replace monotonous functions in ORDER BY if they don't participate in GROUP BY expression,
|
||||
/// has a single argument and not an aggregate functions.
|
||||
void optimizeMonotonousFunctionsInOrderBy(ASTSelectQuery * select_query, const Context & context,
|
||||
const TablesWithColumns & tables_with_columns)
|
||||
{
|
||||
auto order_by = select_query->orderBy();
|
||||
if (!order_by)
|
||||
return;
|
||||
|
||||
std::unordered_set<String> group_by_hashes;
|
||||
if (auto group_by = select_query->groupBy())
|
||||
{
|
||||
for (auto & elem : group_by->children)
|
||||
{
|
||||
auto hash = elem->getTreeHash();
|
||||
String key = toString(hash.first) + '_' + toString(hash.second);
|
||||
group_by_hashes.insert(key);
|
||||
}
|
||||
}
|
||||
|
||||
for (auto & child : order_by->children)
|
||||
{
|
||||
auto * order_by_element = child->as<ASTOrderByElement>();
|
||||
auto & ast_func = order_by_element->children[0];
|
||||
if (!ast_func->as<ASTFunction>())
|
||||
continue;
|
||||
|
||||
MonotonicityCheckVisitor::Data data{tables_with_columns, context, group_by_hashes};
|
||||
MonotonicityCheckVisitor(data).visit(ast_func);
|
||||
|
||||
if (!data.isRejected())
|
||||
{
|
||||
ast_func = data.identifier->clone();
|
||||
ast_func->setAlias("");
|
||||
if (!data.monotonicity.is_positive)
|
||||
order_by_element->direction *= -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// If ORDER BY has argument x followed by f(x) transfroms it to ORDER BY x.
|
||||
/// Optimize ORDER BY x, y, f(x), g(x, y), f(h(x)), t(f(x), g(x)) into ORDER BY x, y
|
||||
/// in case if f(), g(), h(), t() are deterministic (in scope of query).
|
||||
@ -1066,6 +1107,10 @@ SyntaxAnalyzerResultPtr SyntaxAnalyzer::analyzeSelect(
|
||||
if (settings.optimize_redundant_functions_in_order_by)
|
||||
optimizeRedundantFunctionsInOrderBy(select_query, context);
|
||||
|
||||
/// Replace monotonous functions with its argument
|
||||
if (settings.optimize_monotonous_functions_in_order_by)
|
||||
optimizeMonotonousFunctionsInOrderBy(select_query, context, tables_with_columns);
|
||||
|
||||
/// Remove duplicated elements from LIMIT BY clause.
|
||||
optimizeLimitBy(select_query);
|
||||
|
||||
|
@ -1003,6 +1003,9 @@ void AlterCommands::validate(const StorageInMemoryMetadata & metadata, const Con
|
||||
}
|
||||
}
|
||||
|
||||
if (all_columns.empty())
|
||||
throw Exception{"Cannot DROP or CLEAR all columns", ErrorCodes::BAD_ARGUMENTS};
|
||||
|
||||
validateColumnsDefaultsAndGetSampleBlock(default_expr_list, all_columns.getAll(), context);
|
||||
}
|
||||
|
||||
|
@ -121,6 +121,11 @@ public:
|
||||
return columns.size();
|
||||
}
|
||||
|
||||
bool empty() const
|
||||
{
|
||||
return columns.empty();
|
||||
}
|
||||
|
||||
/// Keep the sequence of columns and allow to lookup by name.
|
||||
using Container = boost::multi_index_container<
|
||||
ColumnDescription,
|
||||
|
@ -102,6 +102,10 @@ void IStorage::checkAlterIsPossible(const AlterCommands & commands, const Settin
|
||||
}
|
||||
}
|
||||
|
||||
void IStorage::checkAlterPartitionIsPossible(const PartitionCommands & /*commands*/, const StorageMetadataPtr & /*metadata_snapshot*/, const Settings & /*settings*/) const
|
||||
{
|
||||
throw Exception("Table engine " + getName() + " doesn't support partitioning", ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
StorageID IStorage::getStorageID() const
|
||||
{
|
||||
|
@ -360,6 +360,9 @@ public:
|
||||
throw Exception("Partition operations are not supported by storage " + getName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
/// Checks that partition commands can be applied to storage.
|
||||
virtual void checkAlterPartitionIsPossible(const PartitionCommands & commands, const StorageMetadataPtr & metadata_snapshot, const Settings & settings) const;
|
||||
|
||||
/** Perform any background work. For example, combining parts in a MergeTree type table.
|
||||
* Returns whether any work has been done.
|
||||
*/
|
||||
|
@ -110,6 +110,7 @@ namespace ErrorCodes
|
||||
extern const int UNKNOWN_DISK;
|
||||
extern const int NOT_ENOUGH_SPACE;
|
||||
extern const int ALTER_OF_COLUMN_IS_FORBIDDEN;
|
||||
extern const int SUPPORT_IS_DISABLED;
|
||||
}
|
||||
|
||||
|
||||
@ -1421,12 +1422,20 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, const S
|
||||
columns_in_keys.insert(columns_alter_type_metadata_only.begin(), columns_alter_type_metadata_only.end());
|
||||
columns_in_keys.insert(columns_alter_type_check_safe_for_partition.begin(), columns_alter_type_check_safe_for_partition.end());
|
||||
|
||||
NameSet dropped_columns;
|
||||
|
||||
std::map<String, const IDataType *> old_types;
|
||||
for (const auto & column : old_metadata.getColumns().getAllPhysical())
|
||||
old_types.emplace(column.name, column.type.get());
|
||||
|
||||
for (const AlterCommand & command : commands)
|
||||
{
|
||||
/// Just validate partition expression
|
||||
if (command.partition)
|
||||
{
|
||||
getPartitionIDFromQuery(command.partition, global_context);
|
||||
}
|
||||
|
||||
if (command.type == AlterCommand::MODIFY_ORDER_BY && !is_custom_partitioned)
|
||||
{
|
||||
throw Exception(
|
||||
@ -1456,6 +1465,7 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, const S
|
||||
"Trying to ALTER DROP key " + backQuoteIfNeed(command.column_name) + " column which is a part of key expression",
|
||||
ErrorCodes::ALTER_OF_COLUMN_IS_FORBIDDEN);
|
||||
}
|
||||
dropped_columns.emplace(command.column_name);
|
||||
}
|
||||
else if (command.isModifyingData(getInMemoryMetadata()))
|
||||
{
|
||||
@ -1530,6 +1540,27 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, const S
|
||||
checkStoragePolicy(global_context.getStoragePolicy(changed_setting.value.safeGet<String>()));
|
||||
}
|
||||
}
|
||||
|
||||
for (const auto & part : getDataPartsVector())
|
||||
{
|
||||
bool at_least_one_column_rest = false;
|
||||
for (const auto & column : part->getColumns())
|
||||
{
|
||||
if (!dropped_columns.count(column.name))
|
||||
{
|
||||
at_least_one_column_rest = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!at_least_one_column_rest)
|
||||
{
|
||||
std::string postfix;
|
||||
if (dropped_columns.size() > 1)
|
||||
postfix = "s";
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Cannot drop or clear column{} '{}', because all columns in part '{}' will be removed from disk. Empty parts are not allowed", postfix, boost::algorithm::join(dropped_columns, ", "), part->name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
MergeTreeDataPartType MergeTreeData::choosePartType(size_t bytes_uncompressed, size_t rows_count) const
|
||||
@ -2525,6 +2556,45 @@ void MergeTreeData::freezePartition(const ASTPtr & partition_ast, const StorageM
|
||||
context);
|
||||
}
|
||||
|
||||
void MergeTreeData::checkAlterPartitionIsPossible(const PartitionCommands & commands, const StorageMetadataPtr & /*metadata_snapshot*/, const Settings & settings) const
|
||||
{
|
||||
for (const auto & command : commands)
|
||||
{
|
||||
if (command.type == PartitionCommand::DROP_DETACHED_PARTITION
|
||||
&& !settings.allow_drop_detached)
|
||||
throw DB::Exception("Cannot execute query: DROP DETACHED PART is disabled "
|
||||
"(see allow_drop_detached setting)", ErrorCodes::SUPPORT_IS_DISABLED);
|
||||
|
||||
if (command.partition && command.type != PartitionCommand::DROP_DETACHED_PARTITION)
|
||||
{
|
||||
if (command.part)
|
||||
{
|
||||
auto part_name = command.partition->as<ASTLiteral &>().value.safeGet<String>();
|
||||
/// We able to parse it
|
||||
MergeTreePartInfo::fromPartName(part_name, format_version);
|
||||
}
|
||||
else
|
||||
{
|
||||
/// We able to parse it
|
||||
getPartitionIDFromQuery(command.partition, global_context);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MergeTreeData::checkPartitionCanBeDropped(const ASTPtr & partition)
|
||||
{
|
||||
const String partition_id = getPartitionIDFromQuery(partition, global_context);
|
||||
auto parts_to_remove = getDataPartsVectorInPartition(MergeTreeDataPartState::Committed, partition_id);
|
||||
|
||||
UInt64 partition_size = 0;
|
||||
|
||||
for (const auto & part : parts_to_remove)
|
||||
partition_size += part->getBytesOnDisk();
|
||||
|
||||
auto table_id = getStorageID();
|
||||
global_context.checkPartitionCanBeDropped(table_id.database_name, table_id.table_name, partition_size);
|
||||
}
|
||||
|
||||
void MergeTreeData::movePartitionToDisk(const ASTPtr & partition, const String & name, bool moving_part, const Context & context)
|
||||
{
|
||||
@ -2626,7 +2696,7 @@ void MergeTreeData::movePartitionToVolume(const ASTPtr & partition, const String
|
||||
}
|
||||
|
||||
|
||||
String MergeTreeData::getPartitionIDFromQuery(const ASTPtr & ast, const Context & context)
|
||||
String MergeTreeData::getPartitionIDFromQuery(const ASTPtr & ast, const Context & context) const
|
||||
{
|
||||
const auto & partition_ast = ast->as<ASTPartition &>();
|
||||
|
||||
@ -3045,7 +3115,7 @@ MergeTreeData::DataPartsVector MergeTreeData::getDataPartsVector() const
|
||||
}
|
||||
|
||||
MergeTreeData::DataPartPtr MergeTreeData::getAnyPartInPartition(
|
||||
const String & partition_id, DataPartsLock & /*data_parts_lock*/)
|
||||
const String & partition_id, DataPartsLock & /*data_parts_lock*/) const
|
||||
{
|
||||
auto it = data_parts_by_state_and_info.lower_bound(DataPartStateAndPartitionID{DataPartState::Committed, partition_id});
|
||||
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <Disks/StoragePolicy.h>
|
||||
#include <Interpreters/Aggregator.h>
|
||||
#include <Storages/extractKeyExpressionList.h>
|
||||
#include <Storages/PartitionCommands.h>
|
||||
|
||||
#include <boost/multi_index_container.hpp>
|
||||
#include <boost/multi_index/ordered_index.hpp>
|
||||
@ -505,6 +506,9 @@ public:
|
||||
/// If something is wrong, throws an exception.
|
||||
void checkAlterIsPossible(const AlterCommands & commands, const Settings & settings) const override;
|
||||
|
||||
/// Checks that partition name in all commands is valid
|
||||
void checkAlterPartitionIsPossible(const PartitionCommands & commands, const StorageMetadataPtr & metadata_snapshot, const Settings & settings) const override;
|
||||
|
||||
/// Change MergeTreeSettings
|
||||
void changeSettings(
|
||||
const ASTPtr & new_settings,
|
||||
@ -547,6 +551,8 @@ public:
|
||||
/// Moves partition to specified Volume
|
||||
void movePartitionToVolume(const ASTPtr & partition, const String & name, bool moving_part, const Context & context);
|
||||
|
||||
void checkPartitionCanBeDropped(const ASTPtr & partition) override;
|
||||
|
||||
size_t getColumnCompressedSize(const std::string & name) const
|
||||
{
|
||||
auto lock = lockParts();
|
||||
@ -561,7 +567,7 @@ public:
|
||||
}
|
||||
|
||||
/// For ATTACH/DETACH/DROP PARTITION.
|
||||
String getPartitionIDFromQuery(const ASTPtr & ast, const Context & context);
|
||||
String getPartitionIDFromQuery(const ASTPtr & ast, const Context & context) const;
|
||||
|
||||
/// Extracts MergeTreeData of other *MergeTree* storage
|
||||
/// and checks that their structure suitable for ALTER TABLE ATTACH PARTITION FROM
|
||||
@ -815,7 +821,7 @@ protected:
|
||||
void removePartContributionToColumnSizes(const DataPartPtr & part);
|
||||
|
||||
/// If there is no part in the partition with ID `partition_id`, returns empty ptr. Should be called under the lock.
|
||||
DataPartPtr getAnyPartInPartition(const String & partition_id, DataPartsLock & data_parts_lock);
|
||||
DataPartPtr getAnyPartInPartition(const String & partition_id, DataPartsLock & data_parts_lock) const;
|
||||
|
||||
/// Return parts in the Committed set that are covered by the new_part_info or the part that covers it.
|
||||
/// Will check that the new part doesn't already exist and that it doesn't intersect existing part.
|
||||
|
@ -1068,20 +1068,6 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry(
|
||||
}
|
||||
}
|
||||
|
||||
/// TODO: it makes sense to check DROP_RANGE also
|
||||
if (entry.type == LogEntry::CLEAR_COLUMN || entry.type == LogEntry::REPLACE_RANGE)
|
||||
{
|
||||
String conflicts_description;
|
||||
String range_name = (entry.type == LogEntry::REPLACE_RANGE) ? entry.replace_range_entry->drop_range_part_name : entry.new_part_name;
|
||||
auto range = MergeTreePartInfo::fromPartName(range_name, format_version);
|
||||
|
||||
if (0 != getConflictsCountForRange(range, entry, &conflicts_description, state_lock))
|
||||
{
|
||||
LOG_DEBUG(log, conflicts_description);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/// Alters must be executed one by one. First metadata change, and after that data alter (MUTATE_PART entries with).
|
||||
/// corresponding alter_version.
|
||||
if (entry.type == LogEntry::ALTER_METADATA)
|
||||
|
@ -257,6 +257,13 @@ void StorageMaterializedView::alterPartition(
|
||||
getTargetTable()->alterPartition(query, metadata_snapshot, commands, context);
|
||||
}
|
||||
|
||||
void StorageMaterializedView::checkAlterPartitionIsPossible(
|
||||
const PartitionCommands & commands, const StorageMetadataPtr & metadata_snapshot, const Settings & settings) const
|
||||
{
|
||||
checkStatementCanBeForwarded();
|
||||
getTargetTable()->checkAlterPartitionIsPossible(commands, metadata_snapshot, settings);
|
||||
}
|
||||
|
||||
void StorageMaterializedView::mutate(const MutationCommands & commands, const Context & context)
|
||||
{
|
||||
checkStatementCanBeForwarded();
|
||||
|
@ -53,6 +53,8 @@ public:
|
||||
|
||||
void alterPartition(const ASTPtr & query, const StorageMetadataPtr & metadata_snapshot, const PartitionCommands & commands, const Context & context) override;
|
||||
|
||||
void checkAlterPartitionIsPossible(const PartitionCommands & commands, const StorageMetadataPtr & metadata_snapshot, const Settings & settings) const override;
|
||||
|
||||
void mutate(const MutationCommands & commands, const Context & context) override;
|
||||
|
||||
void renameInMemory(const StorageID & new_table_id) override;
|
||||
|
@ -209,22 +209,6 @@ void StorageMergeTree::checkTableCanBeDropped() const
|
||||
global_context.checkTableCanBeDropped(table_id.database_name, table_id.table_name, getTotalActiveSizeInBytes());
|
||||
}
|
||||
|
||||
void StorageMergeTree::checkPartitionCanBeDropped(const ASTPtr & partition)
|
||||
{
|
||||
auto table_id = getStorageID();
|
||||
|
||||
const String partition_id = getPartitionIDFromQuery(partition, global_context);
|
||||
auto parts_to_remove = getDataPartsVectorInPartition(MergeTreeDataPartState::Committed, partition_id);
|
||||
|
||||
UInt64 partition_size = 0;
|
||||
|
||||
for (const auto & part : parts_to_remove)
|
||||
{
|
||||
partition_size += part->getBytesOnDisk();
|
||||
}
|
||||
global_context.checkPartitionCanBeDropped(table_id.database_name, table_id.table_name, partition_size);
|
||||
}
|
||||
|
||||
void StorageMergeTree::drop()
|
||||
{
|
||||
shutdown();
|
||||
|
@ -81,8 +81,6 @@ public:
|
||||
|
||||
void checkTableCanBeDropped() const override;
|
||||
|
||||
void checkPartitionCanBeDropped(const ASTPtr & partition) override;
|
||||
|
||||
ActionLock getActionLock(StorageActionBlockType action_type) override;
|
||||
|
||||
CheckResults checkData(const ASTPtr & query, const Context & context) override;
|
||||
|
@ -4054,22 +4054,6 @@ void StorageReplicatedMergeTree::checkTableCanBeDropped() const
|
||||
global_context.checkTableCanBeDropped(table_id.database_name, table_id.table_name, getTotalActiveSizeInBytes());
|
||||
}
|
||||
|
||||
|
||||
void StorageReplicatedMergeTree::checkPartitionCanBeDropped(const ASTPtr & partition)
|
||||
{
|
||||
const String partition_id = getPartitionIDFromQuery(partition, global_context);
|
||||
auto parts_to_remove = getDataPartsVectorInPartition(MergeTreeDataPartState::Committed, partition_id);
|
||||
|
||||
UInt64 partition_size = 0;
|
||||
|
||||
for (const auto & part : parts_to_remove)
|
||||
partition_size += part->getBytesOnDisk();
|
||||
|
||||
auto table_id = getStorageID();
|
||||
global_context.checkPartitionCanBeDropped(table_id.database_name, table_id.table_name, partition_size);
|
||||
}
|
||||
|
||||
|
||||
void StorageReplicatedMergeTree::rename(const String & new_path_to_table_data, const StorageID & new_table_id)
|
||||
{
|
||||
MergeTreeData::rename(new_path_to_table_data, new_table_id);
|
||||
@ -4514,7 +4498,11 @@ void StorageReplicatedMergeTree::getReplicaDelays(time_t & out_absolute_delay, t
|
||||
}
|
||||
|
||||
|
||||
void StorageReplicatedMergeTree::fetchPartition(const ASTPtr & partition, const StorageMetadataPtr & metadata_snapshot, const String & from_, const Context & query_context)
|
||||
void StorageReplicatedMergeTree::fetchPartition(
|
||||
const ASTPtr & partition,
|
||||
const StorageMetadataPtr & metadata_snapshot,
|
||||
const String & from_,
|
||||
const Context & query_context)
|
||||
{
|
||||
String partition_id = getPartitionIDFromQuery(partition, query_context);
|
||||
|
||||
|
@ -134,8 +134,6 @@ public:
|
||||
|
||||
void checkTableCanBeDropped() const override;
|
||||
|
||||
void checkPartitionCanBeDropped(const ASTPtr & partition) override;
|
||||
|
||||
ActionLock getActionLock(StorageActionBlockType action_type) override;
|
||||
|
||||
/// Wait when replication queue size becomes less or equal than queue_size
|
||||
|
@ -43,7 +43,7 @@ ColumnsDescription getStructureOfRemoteTable(
|
||||
|
||||
/// Expect at least some columns.
|
||||
/// This is a hack to handle the empty block case returned by Connection when skip_unavailable_shards is set.
|
||||
if (res.size() == 0)
|
||||
if (res.empty())
|
||||
continue;
|
||||
|
||||
return res;
|
||||
|
@ -0,0 +1,21 @@
|
||||
<yandex>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<endpoint>http://minio1:9001/root/data/</endpoint>
|
||||
<access_key_id>minio</access_key_id>
|
||||
<secret_access_key>minio123</secret_access_key>
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
<s3>
|
||||
<volumes>
|
||||
<main>
|
||||
<disk>s3</disk>
|
||||
</main>
|
||||
</volumes>
|
||||
</s3>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
</yandex>
|
35
tests/integration/test_profile_events_s3/configs/config.xml
Normal file
35
tests/integration/test_profile_events_s3/configs/config.xml
Normal file
@ -0,0 +1,35 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<logger>
|
||||
<level>trace</level>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
||||
<size>1000M</size>
|
||||
<count>10</count>
|
||||
</logger>
|
||||
|
||||
<query_log>
|
||||
<database>system</database>
|
||||
<table>query_log</table>
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>1000</flush_interval_milliseconds>
|
||||
</query_log>
|
||||
|
||||
<tcp_port>9000</tcp_port>
|
||||
<listen_host>127.0.0.1</listen_host>
|
||||
|
||||
<openSSL>
|
||||
<client>
|
||||
<cacheSessions>true</cacheSessions>
|
||||
<verificationMode>none</verificationMode>
|
||||
<invalidCertificateHandler>
|
||||
<name>AcceptCertificateHandler</name>
|
||||
</invalidCertificateHandler>
|
||||
</client>
|
||||
</openSSL>
|
||||
|
||||
<max_concurrent_queries>500</max_concurrent_queries>
|
||||
<mark_cache_size>5368709120</mark_cache_size>
|
||||
<path>./clickhouse/</path>
|
||||
<users_config>users.xml</users_config>
|
||||
</yandex>
|
24
tests/integration/test_profile_events_s3/configs/users.xml
Normal file
24
tests/integration/test_profile_events_s3/configs/users.xml
Normal file
@ -0,0 +1,24 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<profiles>
|
||||
<default>
|
||||
</default>
|
||||
</profiles>
|
||||
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
<networks incl="networks" replace="replace">
|
||||
<ip>::/0</ip>
|
||||
</networks>
|
||||
<profile>default</profile>
|
||||
<quota>default</quota>
|
||||
<log_queries>1</log_queries>
|
||||
</default>
|
||||
</users>
|
||||
|
||||
<quotas>
|
||||
<default>
|
||||
</default>
|
||||
</quotas>
|
||||
</yandex>
|
160
tests/integration/test_profile_events_s3/test.py
Normal file
160
tests/integration/test_profile_events_s3/test.py
Normal file
@ -0,0 +1,160 @@
|
||||
import logging
|
||||
import random
|
||||
import string
|
||||
import time
|
||||
import re
|
||||
import requests
|
||||
|
||||
import pytest
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
|
||||
logging.getLogger().setLevel(logging.INFO)
|
||||
logging.getLogger().addHandler(logging.StreamHandler())
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def cluster():
|
||||
try:
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
cluster.add_instance("node", config_dir="configs", with_minio=True)
|
||||
|
||||
logging.info("Starting cluster...")
|
||||
cluster.start()
|
||||
logging.info("Cluster started")
|
||||
|
||||
yield cluster
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
init_list = {
|
||||
"S3ReadMicroseconds" : 0,
|
||||
"S3ReadBytes" : 0,
|
||||
"S3ReadRequestsCount" : 0,
|
||||
"S3ReadRequestsErrorsTotal" : 0,
|
||||
"S3ReadRequestsErrors503" : 0,
|
||||
"S3ReadRequestsRedirects" : 0,
|
||||
"S3WriteMicroseconds" : 0,
|
||||
"S3WriteBytes" : 0,
|
||||
"S3WriteRequestsCount" : 0,
|
||||
"S3WriteRequestsErrorsTotal" : 0,
|
||||
"S3WriteRequestsErrors503" : 0,
|
||||
"S3WriteRequestsRedirects" : 0,
|
||||
}
|
||||
|
||||
def get_s3_events(instance):
|
||||
result = init_list.copy()
|
||||
events = instance.query("SELECT event,value FROM system.events WHERE event LIKE 'S3%'").split("\n")
|
||||
for event in events:
|
||||
ev = event.split("\t")
|
||||
if len(ev) == 2:
|
||||
result[ev[0]] = int(ev[1])
|
||||
return result
|
||||
|
||||
|
||||
def get_minio_stat(cluster):
|
||||
result = {
|
||||
"get_requests" : 0,
|
||||
"set_requests" : 0,
|
||||
"errors" : 0,
|
||||
"rx_bytes" : 0,
|
||||
"tx_bytes" : 0,
|
||||
}
|
||||
stat = requests.get(url="http://{}:{}/minio/prometheus/metrics".format("localhost", cluster.minio_port)).text.split("\n")
|
||||
for line in stat:
|
||||
x = re.search("s3_requests_total(\{.*\})?\s(\d+)(\s.*)?", line)
|
||||
if x != None:
|
||||
y = re.search(".*api=\"(get|list|head|select).*", x.group(1))
|
||||
if y != None:
|
||||
result["get_requests"] += int(x.group(2))
|
||||
else:
|
||||
result["set_requests"] += int(x.group(2))
|
||||
x = re.search("s3_errors_total(\{.*\})?\s(\d+)(\s.*)?", line)
|
||||
if x != None:
|
||||
result["errors"] += int(x.group(2))
|
||||
x = re.search("s3_rx_bytes_total(\{.*\})?\s([\d\.e\+\-]+)(\s.*)?", line)
|
||||
if x != None:
|
||||
result["tx_bytes"] += float(x.group(2))
|
||||
x = re.search("s3_tx_bytes_total(\{.*\})?\s([\d\.e\+\-]+)(\s.*)?", line)
|
||||
if x != None:
|
||||
result["rx_bytes"] += float(x.group(2))
|
||||
return result
|
||||
|
||||
|
||||
def get_query_stat(instance, hint):
|
||||
result = init_list.copy()
|
||||
instance.query("SYSTEM FLUSH LOGS")
|
||||
events = instance.query('''
|
||||
SELECT ProfileEvents.Names, ProfileEvents.Values
|
||||
FROM system.query_log
|
||||
ARRAY JOIN ProfileEvents
|
||||
WHERE type != 1 AND query LIKE '%{}%'
|
||||
'''.format(hint.replace("'", "\\'"))).split("\n")
|
||||
for event in events:
|
||||
ev = event.split("\t")
|
||||
if len(ev) == 2:
|
||||
if ev[0].startswith("S3"):
|
||||
result[ev[0]] += int(ev[1])
|
||||
return result
|
||||
|
||||
|
||||
def get_minio_size(cluster):
|
||||
minio = cluster.minio_client
|
||||
size = 0
|
||||
for obj in minio.list_objects(cluster.minio_bucket, 'data/'):
|
||||
size += obj.size
|
||||
return size
|
||||
|
||||
|
||||
def test_profile_events(cluster):
|
||||
instance = cluster.instances["node"]
|
||||
|
||||
instance.query("SYSTEM FLUSH LOGS")
|
||||
|
||||
instance.query("DROP TABLE IF EXISTS test_s3.test_s3")
|
||||
instance.query("DROP DATABASE IF EXISTS test_s3")
|
||||
instance.query("CREATE DATABASE IF NOT EXISTS test_s3")
|
||||
|
||||
metrics0 = get_s3_events(instance)
|
||||
minio0 = get_minio_stat(cluster)
|
||||
|
||||
query1 = "CREATE TABLE test_s3.test_s3 (key UInt32, value UInt32) ENGINE=MergeTree PRIMARY KEY key ORDER BY key SETTINGS storage_policy='s3'"
|
||||
instance.query(query1)
|
||||
|
||||
size1 = get_minio_size(cluster)
|
||||
metrics1 = get_s3_events(instance)
|
||||
minio1 = get_minio_stat(cluster)
|
||||
|
||||
assert metrics1["S3ReadRequestsCount"] - metrics0["S3ReadRequestsCount"] == minio1["get_requests"] - minio0["get_requests"] - 1 # 1 from get_minio_size
|
||||
assert metrics1["S3WriteRequestsCount"] - metrics0["S3WriteRequestsCount"] == minio1["set_requests"] - minio0["set_requests"]
|
||||
stat1 = get_query_stat(instance, query1)
|
||||
for metric in stat1:
|
||||
assert stat1[metric] == metrics1[metric] - metrics0[metric]
|
||||
assert metrics1["S3WriteBytes"] - metrics0["S3WriteBytes"] == size1
|
||||
|
||||
query2 = "INSERT INTO test_s3.test_s3 FORMAT Values"
|
||||
instance.query(query2 + " (1,1)")
|
||||
|
||||
size2 = get_minio_size(cluster)
|
||||
metrics2 = get_s3_events(instance)
|
||||
minio2 = get_minio_stat(cluster)
|
||||
|
||||
assert metrics2["S3ReadRequestsCount"] - metrics1["S3ReadRequestsCount"] == minio2["get_requests"] - minio1["get_requests"] - 1 # 1 from get_minio_size
|
||||
assert metrics2["S3WriteRequestsCount"] - metrics1["S3WriteRequestsCount"] == minio2["set_requests"] - minio1["set_requests"]
|
||||
stat2 = get_query_stat(instance, query2)
|
||||
for metric in stat2:
|
||||
assert stat2[metric] == metrics2[metric] - metrics1[metric]
|
||||
assert metrics2["S3WriteBytes"] - metrics1["S3WriteBytes"] == size2 - size1
|
||||
|
||||
query3 = "SELECT * from test_s3.test_s3"
|
||||
assert instance.query(query3) == "1\t1\n"
|
||||
|
||||
metrics3 = get_s3_events(instance)
|
||||
minio3 = get_minio_stat(cluster)
|
||||
|
||||
assert metrics3["S3ReadRequestsCount"] - metrics2["S3ReadRequestsCount"] == minio3["get_requests"] - minio2["get_requests"]
|
||||
assert metrics3["S3WriteRequestsCount"] - metrics2["S3WriteRequestsCount"] == minio3["set_requests"] - minio2["set_requests"]
|
||||
stat3 = get_query_stat(instance, query3)
|
||||
for metric in stat3:
|
||||
assert stat3[metric] == metrics3[metric] - metrics2[metric]
|
9
tests/performance/monotonous_order_by.xml
Normal file
9
tests/performance/monotonous_order_by.xml
Normal file
@ -0,0 +1,9 @@
|
||||
<test>
|
||||
<preconditions>
|
||||
<table_exists>hits_10m_single</table_exists>
|
||||
</preconditions>
|
||||
|
||||
<query>SELECT * FROM (SELECT CounterID, EventDate FROM hits_10m_single) ORDER BY toFloat32(toFloat64(toFloat32(toFloat64(CounterID)))) FORMAT Null</query>
|
||||
<query>SELECT * FROM (SELECT CounterID, EventDate FROM hits_10m_single) ORDER BY toFloat32(toFloat64(toFloat32(toFloat64(CounterID)))) DESC, toFloat32(toFloat64(toFloat32(toFloat64(EventDate)))) ASC FORMAT Null</query>
|
||||
|
||||
</test>
|
@ -0,0 +1,168 @@
|
||||
0
|
||||
1
|
||||
2
|
||||
0
|
||||
1
|
||||
2
|
||||
0
|
||||
1
|
||||
2
|
||||
2
|
||||
1
|
||||
0
|
||||
0
|
||||
1
|
||||
2
|
||||
0
|
||||
1
|
||||
2
|
||||
0
|
||||
1
|
||||
2
|
||||
2
|
||||
1
|
||||
0
|
||||
2
|
||||
1
|
||||
0
|
||||
2
|
||||
1
|
||||
0
|
||||
0
|
||||
1
|
||||
2
|
||||
2
|
||||
1
|
||||
0
|
||||
2
|
||||
1
|
||||
0
|
||||
SELECT number
|
||||
FROM numbers(3)
|
||||
ORDER BY number ASC
|
||||
SELECT number
|
||||
FROM numbers(3)
|
||||
ORDER BY abs(toFloat32(number)) ASC
|
||||
SELECT number
|
||||
FROM numbers(3)
|
||||
ORDER BY toFloat32(abs(number)) ASC
|
||||
SELECT number
|
||||
FROM numbers(3)
|
||||
ORDER BY number DESC
|
||||
SELECT number
|
||||
FROM numbers(3)
|
||||
ORDER BY exp(number) ASC
|
||||
SELECT roundToExp2(number) AS x
|
||||
FROM numbers(3)
|
||||
ORDER BY
|
||||
number ASC,
|
||||
number ASC
|
||||
SELECT number AS x
|
||||
FROM numbers(3)
|
||||
ORDER BY
|
||||
number ASC,
|
||||
number ASC
|
||||
SELECT number
|
||||
FROM numbers(3)
|
||||
ORDER BY number DESC
|
||||
SELECT number
|
||||
FROM numbers(3)
|
||||
ORDER BY abs(toFloat32(number)) DESC
|
||||
SELECT number
|
||||
FROM numbers(3)
|
||||
ORDER BY toFloat32(abs(number)) DESC
|
||||
SELECT number
|
||||
FROM numbers(3)
|
||||
ORDER BY number ASC
|
||||
SELECT number
|
||||
FROM numbers(3)
|
||||
ORDER BY exp(number) DESC
|
||||
SELECT roundToExp2(number) AS x
|
||||
FROM numbers(3)
|
||||
ORDER BY
|
||||
number DESC,
|
||||
number DESC
|
||||
0
|
||||
1
|
||||
2
|
||||
0
|
||||
1
|
||||
2
|
||||
0
|
||||
1
|
||||
2
|
||||
2
|
||||
1
|
||||
0
|
||||
0
|
||||
1
|
||||
2
|
||||
0
|
||||
1
|
||||
2
|
||||
0
|
||||
1
|
||||
2
|
||||
2
|
||||
1
|
||||
0
|
||||
2
|
||||
1
|
||||
0
|
||||
2
|
||||
1
|
||||
0
|
||||
0
|
||||
1
|
||||
2
|
||||
2
|
||||
1
|
||||
0
|
||||
2
|
||||
1
|
||||
0
|
||||
SELECT number
|
||||
FROM numbers(3)
|
||||
ORDER BY toFloat32(toFloat64(number)) ASC
|
||||
SELECT number
|
||||
FROM numbers(3)
|
||||
ORDER BY abs(toFloat32(number)) ASC
|
||||
SELECT number
|
||||
FROM numbers(3)
|
||||
ORDER BY toFloat32(abs(number)) ASC
|
||||
SELECT number
|
||||
FROM numbers(3)
|
||||
ORDER BY -number ASC
|
||||
SELECT number
|
||||
FROM numbers(3)
|
||||
ORDER BY exp(number) ASC
|
||||
SELECT roundToExp2(number) AS x
|
||||
FROM numbers(3)
|
||||
ORDER BY
|
||||
x ASC,
|
||||
toFloat32(x) ASC
|
||||
SELECT number AS x
|
||||
FROM numbers(3)
|
||||
ORDER BY
|
||||
toFloat32(x) AS k ASC,
|
||||
toFloat64(k) ASC
|
||||
SELECT number
|
||||
FROM numbers(3)
|
||||
ORDER BY toFloat32(toFloat64(number)) DESC
|
||||
SELECT number
|
||||
FROM numbers(3)
|
||||
ORDER BY abs(toFloat32(number)) DESC
|
||||
SELECT number
|
||||
FROM numbers(3)
|
||||
ORDER BY toFloat32(abs(number)) DESC
|
||||
SELECT number
|
||||
FROM numbers(3)
|
||||
ORDER BY -number DESC
|
||||
SELECT number
|
||||
FROM numbers(3)
|
||||
ORDER BY exp(number) DESC
|
||||
SELECT roundToExp2(number) AS x
|
||||
FROM numbers(3)
|
||||
ORDER BY
|
||||
x DESC,
|
||||
toFloat32(x) DESC
|
@ -0,0 +1,59 @@
|
||||
SET enable_debug_queries = 1;
|
||||
SET optimize_monotonous_functions_in_order_by = 1;
|
||||
|
||||
SELECT number FROM numbers(3) ORDER BY toFloat32(toFloat64(number));
|
||||
SELECT number FROM numbers(3) ORDER BY abs(toFloat32(number));
|
||||
SELECT number FROM numbers(3) ORDER BY toFloat32(abs(number));
|
||||
SELECT number FROM numbers(3) ORDER BY -number;
|
||||
SELECT number FROM numbers(3) ORDER BY exp(number);
|
||||
SELECT roundToExp2(number) AS x FROM numbers(3) ORDER BY x, toFloat32(x);
|
||||
SELECT number AS x FROM numbers(3) ORDER BY toFloat32(x) as k, toFloat64(k);
|
||||
SELECT number FROM numbers(3) ORDER BY toFloat32(toFloat64(number)) DESC;
|
||||
SELECT number FROM numbers(3) ORDER BY abs(toFloat32(number)) DESC;
|
||||
SELECT number FROM numbers(3) ORDER BY toFloat32(abs(number)) DESC;
|
||||
SELECT number FROM numbers(3) ORDER BY -number DESC;
|
||||
SELECT number FROM numbers(3) ORDER BY exp(number) DESC;
|
||||
SELECT roundToExp2(number) AS x FROM numbers(3) ORDER BY x DESC, toFloat32(x) DESC;
|
||||
analyze SELECT number FROM numbers(3) ORDER BY toFloat32(toFloat64(number));
|
||||
analyze SELECT number FROM numbers(3) ORDER BY abs(toFloat32(number));
|
||||
analyze SELECT number FROM numbers(3) ORDER BY toFloat32(abs(number));
|
||||
analyze SELECT number FROM numbers(3) ORDER BY -number;
|
||||
analyze SELECT number FROM numbers(3) ORDER BY exp(number);
|
||||
analyze SELECT roundToExp2(number) AS x FROM numbers(3) ORDER BY x, toFloat32(x);
|
||||
analyze SELECT number AS x FROM numbers(3) ORDER BY toFloat32(x) as k, toFloat64(k);
|
||||
analyze SELECT number FROM numbers(3) ORDER BY toFloat32(toFloat64(number)) DESC;
|
||||
analyze SELECT number FROM numbers(3) ORDER BY abs(toFloat32(number)) DESC;
|
||||
analyze SELECT number FROM numbers(3) ORDER BY toFloat32(abs(number)) DESC;
|
||||
analyze SELECT number FROM numbers(3) ORDER BY -number DESC;
|
||||
analyze SELECT number FROM numbers(3) ORDER BY exp(number) DESC;
|
||||
analyze SELECT roundToExp2(number) AS x FROM numbers(3) ORDER BY x DESC, toFloat32(x) DESC;
|
||||
|
||||
SET optimize_monotonous_functions_in_order_by = 0;
|
||||
|
||||
SELECT number FROM numbers(3) ORDER BY toFloat32(toFloat64(number));
|
||||
SELECT number FROM numbers(3) ORDER BY abs(toFloat32(number));
|
||||
SELECT number FROM numbers(3) ORDER BY toFloat32(abs(number));
|
||||
SELECT number FROM numbers(3) ORDER BY -number;
|
||||
SELECT number FROM numbers(3) ORDER BY exp(number);
|
||||
SELECT roundToExp2(number) AS x FROM numbers(3) ORDER BY x, toFloat32(x);
|
||||
SELECT number AS x FROM numbers(3) ORDER BY toFloat32(x) as k, toFloat64(k);
|
||||
SELECT number FROM numbers(3) ORDER BY toFloat32(toFloat64(number)) DESC;
|
||||
SELECT number FROM numbers(3) ORDER BY abs(toFloat32(number)) DESC;
|
||||
SELECT number FROM numbers(3) ORDER BY toFloat32(abs(number)) DESC;
|
||||
SELECT number FROM numbers(3) ORDER BY -number DESC;
|
||||
SELECT number FROM numbers(3) ORDER BY exp(number) DESC;
|
||||
SELECT roundToExp2(number) AS x FROM numbers(3) ORDER BY x DESC, toFloat32(x) DESC;
|
||||
analyze SELECT number FROM numbers(3) ORDER BY toFloat32(toFloat64(number));
|
||||
analyze SELECT number FROM numbers(3) ORDER BY abs(toFloat32(number));
|
||||
analyze SELECT number FROM numbers(3) ORDER BY toFloat32(abs(number));
|
||||
analyze SELECT number FROM numbers(3) ORDER BY -number;
|
||||
analyze SELECT number FROM numbers(3) ORDER BY exp(number);
|
||||
analyze SELECT roundToExp2(number) AS x FROM numbers(3) ORDER BY x, toFloat32(x);
|
||||
analyze SELECT number AS x FROM numbers(3) ORDER BY toFloat32(x) as k, toFloat64(k);
|
||||
analyze SELECT number FROM numbers(3) ORDER BY toFloat32(toFloat64(number)) DESC;
|
||||
analyze SELECT number FROM numbers(3) ORDER BY abs(toFloat32(number)) DESC;
|
||||
analyze SELECT number FROM numbers(3) ORDER BY toFloat32(abs(number)) DESC;
|
||||
analyze SELECT number FROM numbers(3) ORDER BY -number DESC;
|
||||
analyze SELECT number FROM numbers(3) ORDER BY exp(number) DESC;
|
||||
analyze SELECT roundToExp2(number) AS x FROM numbers(3) ORDER BY x DESC, toFloat32(x) DESC;
|
||||
-- TODO: exp() should be monotonous function
|
@ -0,0 +1,32 @@
|
||||
1 4 3
|
||||
1 3 3
|
||||
2 5 4
|
||||
2 2 4
|
||||
1 3 3
|
||||
1 4 3
|
||||
2 2 4
|
||||
2 5 4
|
||||
2
|
||||
1
|
||||
2
|
||||
1 3 3
|
||||
1 4 3
|
||||
2 5 4
|
||||
2 2 4
|
||||
2
|
||||
1 4 3
|
||||
1 3 3
|
||||
2 5 4
|
||||
2 2 4
|
||||
1 3 3
|
||||
1 4 3
|
||||
2 2 4
|
||||
2 5 4
|
||||
2
|
||||
1
|
||||
2
|
||||
1 3 3
|
||||
1 4 3
|
||||
2 5 4
|
||||
2 2 4
|
||||
2
|
@ -0,0 +1,21 @@
|
||||
DROP TABLE IF EXISTS test;
|
||||
CREATE TABLE test (x Int8, y Int8, z Int8) ENGINE = MergeTree ORDER BY tuple();
|
||||
INSERT INTO test VALUES (1, 3, 3), (1, 4, 3), (2, 5, 4), (2, 2, 4);
|
||||
|
||||
SET optimize_monotonous_functions_in_order_by = 1;
|
||||
SELECT * FROM test ORDER BY toFloat32(x), -y, -z DESC;
|
||||
SELECT * FROM test ORDER BY toFloat32(x), -(-y), -z DESC;
|
||||
SELECT max(x) as k FROM test ORDER BY k;
|
||||
SELECT roundToExp2(x) as k FROM test GROUP BY k ORDER BY k;
|
||||
SELECT roundToExp2(x) as k, y, z FROM test WHERE k >= 1 ORDER BY k;
|
||||
SELECT max(x) as k FROM test HAVING k > 0 ORDER BY k;
|
||||
|
||||
SET optimize_monotonous_functions_in_order_by = 0;
|
||||
SELECT * FROM test ORDER BY toFloat32(x), -y, -z DESC;
|
||||
SELECT * FROM test ORDER BY toFloat32(x), -(-y), -z DESC;
|
||||
SELECT max(x) as k FROM test ORDER BY k;
|
||||
SELECT roundToExp2(x) as k From test GROUP BY k ORDER BY k;
|
||||
SELECT roundToExp2(x) as k, y, z FROM test WHERE k >= 1 ORDER BY k;
|
||||
SELECT max(x) as k FROM test HAVING k > 0 ORDER BY k;
|
||||
|
||||
DROP TABLE test;
|
@ -0,0 +1,6 @@
|
||||
0 1 Hello
|
||||
0 1 Hello
|
||||
0 2 Hello
|
||||
0 2 Hello
|
||||
0 3 Hello
|
||||
0 3 Hello
|
34
tests/queries/0_stateless/01388_clear_all_columns.sql
Normal file
34
tests/queries/0_stateless/01388_clear_all_columns.sql
Normal file
@ -0,0 +1,34 @@
|
||||
DROP TABLE IF EXISTS test;
|
||||
CREATE TABLE test (x UInt8) ENGINE = MergeTree ORDER BY tuple();
|
||||
INSERT INTO test (x) VALUES (1), (2), (3);
|
||||
ALTER TABLE test CLEAR COLUMN x; --{serverError 36}
|
||||
DROP TABLE test;
|
||||
|
||||
DROP TABLE IF EXISTS test;
|
||||
|
||||
CREATE TABLE test (x UInt8, y UInt8) ENGINE = MergeTree ORDER BY tuple();
|
||||
INSERT INTO test (x, y) VALUES (1, 1), (2, 2), (3, 3);
|
||||
|
||||
ALTER TABLE test CLEAR COLUMN x;
|
||||
|
||||
ALTER TABLE test CLEAR COLUMN x IN PARTITION ''; --{serverError 248}
|
||||
ALTER TABLE test CLEAR COLUMN x IN PARTITION 'asdasd'; --{serverError 248}
|
||||
ALTER TABLE test CLEAR COLUMN x IN PARTITION '123'; --{serverError 248}
|
||||
|
||||
ALTER TABLE test CLEAR COLUMN y; --{serverError 36}
|
||||
|
||||
ALTER TABLE test ADD COLUMN z String DEFAULT 'Hello';
|
||||
|
||||
-- y is only real column in table
|
||||
ALTER TABLE test CLEAR COLUMN y; --{serverError 36}
|
||||
ALTER TABLE test CLEAR COLUMN x;
|
||||
ALTER TABLE test CLEAR COLUMN z;
|
||||
|
||||
INSERT INTO test (x, y, z) VALUES (1, 1, 'a'), (2, 2, 'b'), (3, 3, 'c');
|
||||
|
||||
ALTER TABLE test CLEAR COLUMN z;
|
||||
ALTER TABLE test CLEAR COLUMN x;
|
||||
|
||||
SELECT * FROM test ORDER BY y;
|
||||
|
||||
DROP TABLE IF EXISTS test;
|
@ -12,15 +12,16 @@ CREATE TABLE d_src (id UInt64, country_id UInt8, name String) Engine = Memory;
|
||||
INSERT INTO t VALUES (0, 0);
|
||||
INSERT INTO d_src VALUES (0, 0, 'n');
|
||||
|
||||
CREATE DICTIONARY d (id UInt32, country_id UInt8, name String) PRIMARY KEY id
|
||||
SOURCE(CLICKHOUSE(host 'localhost' port 9000 user 'default' password '' db 'db_01391' table 'd_src'))
|
||||
LIFETIME(MIN 300 MAX 360)
|
||||
CREATE DICTIONARY d (id UInt32, country_id UInt8, name String)
|
||||
PRIMARY KEY id
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' DB 'db_01391' table 'd_src'))
|
||||
LIFETIME(MIN 1 MAX 1)
|
||||
LAYOUT(HASHED());
|
||||
|
||||
select click_country_id from t cc
|
||||
left join d on toUInt32(d.id) = cc.click_city_id;
|
||||
|
||||
DROP DICTIONARY d;
|
||||
DROP TABLE t;
|
||||
DROP TABLE d_src;
|
||||
DROP DICTIONARY d;
|
||||
DROP DATABASE IF EXISTS db_01391;
|
||||
|
@ -131,3 +131,4 @@
|
||||
01370_client_autocomplete_word_break_characters
|
||||
01319_optimize_skip_unused_shards_nesting
|
||||
01376_GROUP_BY_injective_elimination_dictGet
|
||||
01391_join_on_dict_crash
|
||||
|
@ -1,5 +1,6 @@
|
||||
details {
|
||||
background: #444451;
|
||||
color: #eee;
|
||||
padding: 1rem;
|
||||
margin-bottom: 1rem;
|
||||
margin-top: 1rem;
|
||||
@ -7,7 +8,7 @@ details {
|
||||
|
||||
summary {
|
||||
font-weight: bold;
|
||||
color: #fff;
|
||||
color: #eee;
|
||||
}
|
||||
|
||||
#sidebar {
|
||||
|
Loading…
Reference in New Issue
Block a user