mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-23 16:12:01 +00:00
Merge branch 'master' into change-log-level-clickhouse-local
This commit is contained in:
commit
7a07ca56fd
2
contrib/CMakeLists.txt
vendored
2
contrib/CMakeLists.txt
vendored
@ -71,6 +71,7 @@ add_contrib (zlib-ng-cmake zlib-ng)
|
|||||||
add_contrib (bzip2-cmake bzip2)
|
add_contrib (bzip2-cmake bzip2)
|
||||||
add_contrib (minizip-ng-cmake minizip-ng)
|
add_contrib (minizip-ng-cmake minizip-ng)
|
||||||
add_contrib (snappy-cmake snappy)
|
add_contrib (snappy-cmake snappy)
|
||||||
|
add_contrib (rocksdb-cmake rocksdb)
|
||||||
add_contrib (thrift-cmake thrift)
|
add_contrib (thrift-cmake thrift)
|
||||||
# parquet/arrow/orc
|
# parquet/arrow/orc
|
||||||
add_contrib (arrow-cmake arrow) # requires: snappy, thrift, double-conversion
|
add_contrib (arrow-cmake arrow) # requires: snappy, thrift, double-conversion
|
||||||
@ -147,7 +148,6 @@ add_contrib (hive-metastore-cmake hive-metastore) # requires: thrift, avro, arro
|
|||||||
add_contrib (cppkafka-cmake cppkafka)
|
add_contrib (cppkafka-cmake cppkafka)
|
||||||
add_contrib (libpqxx-cmake libpqxx)
|
add_contrib (libpqxx-cmake libpqxx)
|
||||||
add_contrib (libpq-cmake libpq)
|
add_contrib (libpq-cmake libpq)
|
||||||
add_contrib (rocksdb-cmake rocksdb) # requires: jemalloc, snappy, zlib, lz4, zstd, liburing
|
|
||||||
add_contrib (nuraft-cmake NuRaft)
|
add_contrib (nuraft-cmake NuRaft)
|
||||||
add_contrib (fast_float-cmake fast_float)
|
add_contrib (fast_float-cmake fast_float)
|
||||||
add_contrib (idna-cmake idna)
|
add_contrib (idna-cmake idna)
|
||||||
|
2
contrib/rocksdb
vendored
2
contrib/rocksdb
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 5f003e4a22d2e48e37c98d9620241237cd30dd24
|
Subproject commit 49ce8a1064dd1ad89117899839bf136365e49e79
|
@ -5,38 +5,36 @@ if (NOT ENABLE_ROCKSDB OR NO_SSE3_OR_HIGHER) # assumes SSE4.2 and PCLMUL
|
|||||||
return()
|
return()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
# not in original build system, otherwise xxHash.cc fails to compile with ClickHouse C++23 default
|
||||||
|
set (CMAKE_CXX_STANDARD 20)
|
||||||
|
|
||||||
|
# Always disable jemalloc for rocksdb by default because it introduces non-standard jemalloc APIs
|
||||||
|
option(WITH_JEMALLOC "build with JeMalloc" OFF)
|
||||||
|
|
||||||
|
option(WITH_LIBURING "build with liburing" OFF) # TODO could try to enable this conditionally, depending on ClickHouse's ENABLE_LIBURING
|
||||||
|
|
||||||
# ClickHouse cannot be compiled without snappy, lz4, zlib, zstd
|
# ClickHouse cannot be compiled without snappy, lz4, zlib, zstd
|
||||||
option(WITH_SNAPPY "build with SNAPPY" ON)
|
option(WITH_SNAPPY "build with SNAPPY" ON)
|
||||||
option(WITH_LZ4 "build with lz4" ON)
|
option(WITH_LZ4 "build with lz4" ON)
|
||||||
option(WITH_ZLIB "build with zlib" ON)
|
option(WITH_ZLIB "build with zlib" ON)
|
||||||
option(WITH_ZSTD "build with zstd" ON)
|
option(WITH_ZSTD "build with zstd" ON)
|
||||||
|
|
||||||
if (ENABLE_JEMALLOC)
|
if(WITH_SNAPPY)
|
||||||
add_definitions(-DROCKSDB_JEMALLOC -DJEMALLOC_NO_DEMANGLE)
|
|
||||||
list (APPEND THIRDPARTY_LIBS ch_contrib::jemalloc)
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
if (ENABLE_LIBURING)
|
|
||||||
add_definitions(-DROCKSDB_IOURING_PRESENT)
|
|
||||||
list (APPEND THIRDPARTY_LIBS ch_contrib::liburing)
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
if (WITH_SNAPPY)
|
|
||||||
add_definitions(-DSNAPPY)
|
add_definitions(-DSNAPPY)
|
||||||
list(APPEND THIRDPARTY_LIBS ch_contrib::snappy)
|
list(APPEND THIRDPARTY_LIBS ch_contrib::snappy)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (WITH_ZLIB)
|
if(WITH_ZLIB)
|
||||||
add_definitions(-DZLIB)
|
add_definitions(-DZLIB)
|
||||||
list(APPEND THIRDPARTY_LIBS ch_contrib::zlib)
|
list(APPEND THIRDPARTY_LIBS ch_contrib::zlib)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (WITH_LZ4)
|
if(WITH_LZ4)
|
||||||
add_definitions(-DLZ4)
|
add_definitions(-DLZ4)
|
||||||
list(APPEND THIRDPARTY_LIBS ch_contrib::lz4)
|
list(APPEND THIRDPARTY_LIBS ch_contrib::lz4)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (WITH_ZSTD)
|
if(WITH_ZSTD)
|
||||||
add_definitions(-DZSTD)
|
add_definitions(-DZSTD)
|
||||||
list(APPEND THIRDPARTY_LIBS ch_contrib::zstd)
|
list(APPEND THIRDPARTY_LIBS ch_contrib::zstd)
|
||||||
endif()
|
endif()
|
||||||
@ -90,7 +88,6 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc
|
${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/tiered_secondary_cache.cc
|
${ROCKSDB_SOURCE_DIR}/cache/tiered_secondary_cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc
|
${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/attribute_group_iterator_impl.cc
|
|
||||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_contents.cc
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_contents.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_fetcher.cc
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_fetcher.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_addition.cc
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_addition.cc
|
||||||
@ -107,7 +104,6 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/db/blob/prefetch_buffer_collection.cc
|
${ROCKSDB_SOURCE_DIR}/db/blob/prefetch_buffer_collection.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/builder.cc
|
${ROCKSDB_SOURCE_DIR}/db/builder.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/c.cc
|
${ROCKSDB_SOURCE_DIR}/db/c.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/coalescing_iterator.cc
|
|
||||||
${ROCKSDB_SOURCE_DIR}/db/column_family.cc
|
${ROCKSDB_SOURCE_DIR}/db/column_family.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction.cc
|
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_iterator.cc
|
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_iterator.cc
|
||||||
@ -128,7 +124,6 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_write.cc
|
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_write.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_compaction_flush.cc
|
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_compaction_flush.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_files.cc
|
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_files.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_follower.cc
|
|
||||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_open.cc
|
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_open.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_debug.cc
|
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_debug.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_experimental.cc
|
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_experimental.cc
|
||||||
@ -186,7 +181,6 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/env/env_encryption.cc
|
${ROCKSDB_SOURCE_DIR}/env/env_encryption.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/env/file_system.cc
|
${ROCKSDB_SOURCE_DIR}/env/file_system.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc
|
${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/env/fs_on_demand.cc
|
|
||||||
${ROCKSDB_SOURCE_DIR}/env/fs_remap.cc
|
${ROCKSDB_SOURCE_DIR}/env/fs_remap.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/env/mock_env.cc
|
${ROCKSDB_SOURCE_DIR}/env/mock_env.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/env/unique_id_gen.cc
|
${ROCKSDB_SOURCE_DIR}/env/unique_id_gen.cc
|
||||||
@ -374,7 +368,6 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/volatile_tier_impl.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/volatile_tier_impl.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/cache_simulator.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/cache_simulator.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_for_tiering_collector.cc
|
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/trace/replayer_impl.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/trace/replayer_impl.cc
|
||||||
@ -395,7 +388,6 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn_db.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn_db.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/types_util.cc
|
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/wal_filter.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/wal_filter.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc
|
||||||
@ -426,18 +418,14 @@ if(HAS_ARMV8_CRC)
|
|||||||
endif(HAS_ARMV8_CRC)
|
endif(HAS_ARMV8_CRC)
|
||||||
|
|
||||||
list(APPEND SOURCES
|
list(APPEND SOURCES
|
||||||
${ROCKSDB_SOURCE_DIR}/port/port_posix.cc
|
"${ROCKSDB_SOURCE_DIR}/port/port_posix.cc"
|
||||||
${ROCKSDB_SOURCE_DIR}/env/env_posix.cc
|
"${ROCKSDB_SOURCE_DIR}/env/env_posix.cc"
|
||||||
${ROCKSDB_SOURCE_DIR}/env/fs_posix.cc
|
"${ROCKSDB_SOURCE_DIR}/env/fs_posix.cc"
|
||||||
${ROCKSDB_SOURCE_DIR}/env/io_posix.cc)
|
"${ROCKSDB_SOURCE_DIR}/env/io_posix.cc")
|
||||||
|
|
||||||
add_library(_rocksdb ${SOURCES})
|
add_library(_rocksdb ${SOURCES})
|
||||||
add_library(ch_contrib::rocksdb ALIAS _rocksdb)
|
add_library(ch_contrib::rocksdb ALIAS _rocksdb)
|
||||||
target_link_libraries(_rocksdb PRIVATE ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
|
target_link_libraries(_rocksdb PRIVATE ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
|
||||||
|
|
||||||
# Not in the native build system but useful anyways:
|
|
||||||
# Make all functions in xxHash.h inline. Beneficial for performance: https://github.com/Cyan4973/xxHash/tree/v0.8.2#build-modifiers
|
|
||||||
target_compile_definitions (_rocksdb PRIVATE XXH_INLINE_ALL)
|
|
||||||
|
|
||||||
# SYSTEM is required to overcome some issues
|
# SYSTEM is required to overcome some issues
|
||||||
target_include_directories(_rocksdb SYSTEM BEFORE INTERFACE "${ROCKSDB_SOURCE_DIR}/include")
|
target_include_directories(_rocksdb SYSTEM BEFORE INTERFACE "${ROCKSDB_SOURCE_DIR}/include")
|
||||||
|
@ -784,8 +784,8 @@ class ClickhouseIntegrationTestsRunner:
|
|||||||
logging.info("Starting check with retries")
|
logging.info("Starting check with retries")
|
||||||
final_retry = 0
|
final_retry = 0
|
||||||
logs = []
|
logs = []
|
||||||
tires_num = 1 if should_fail else FLAKY_TRIES_COUNT
|
tries_num = 1 if should_fail else FLAKY_TRIES_COUNT
|
||||||
for i in range(tires_num):
|
for i in range(tries_num):
|
||||||
final_retry += 1
|
final_retry += 1
|
||||||
logging.info("Running tests for the %s time", i)
|
logging.info("Running tests for the %s time", i)
|
||||||
counters, tests_times, log_paths = self.try_run_test_group(
|
counters, tests_times, log_paths = self.try_run_test_group(
|
||||||
|
@ -836,6 +836,7 @@ class SettingsRandomizer:
|
|||||||
"cross_join_min_bytes_to_compress": lambda: random.choice([0, 1, 100000000]),
|
"cross_join_min_bytes_to_compress": lambda: random.choice([0, 1, 100000000]),
|
||||||
"min_external_table_block_size_bytes": lambda: random.choice([0, 1, 100000000]),
|
"min_external_table_block_size_bytes": lambda: random.choice([0, 1, 100000000]),
|
||||||
"max_parsing_threads": lambda: random.choice([0, 1, 10]),
|
"max_parsing_threads": lambda: random.choice([0, 1, 10]),
|
||||||
|
"trace_profile_events": lambda: random.randint(0, 1),
|
||||||
"optimize_functions_to_subcolumns": lambda: random.randint(0, 1),
|
"optimize_functions_to_subcolumns": lambda: random.randint(0, 1),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -80,5 +80,3 @@ EOF""",
|
|||||||
instance.query(f"SHOW GRANTS FOR `{user_id}`")
|
instance.query(f"SHOW GRANTS FOR `{user_id}`")
|
||||||
== f"GRANT SELECT ON mydb.* TO `{user_id}`\n"
|
== f"GRANT SELECT ON mydb.* TO `{user_id}`\n"
|
||||||
)
|
)
|
||||||
instance.stop_clickhouse()
|
|
||||||
instance.start_clickhouse()
|
|
||||||
|
@ -37,16 +37,6 @@ def get_status(dictionary_name):
|
|||||||
).rstrip("\n")
|
).rstrip("\n")
|
||||||
|
|
||||||
|
|
||||||
def get_status_retry(dictionary_name, expect, retry_count=10, sleep_time=0.5):
|
|
||||||
for _ in range(retry_count):
|
|
||||||
res = get_status(dictionary_name)
|
|
||||||
if res == expect:
|
|
||||||
return res
|
|
||||||
time.sleep(sleep_time)
|
|
||||||
|
|
||||||
raise Exception(f'Expected result "{expect}" did not occur')
|
|
||||||
|
|
||||||
|
|
||||||
def get_last_exception(dictionary_name):
|
def get_last_exception(dictionary_name):
|
||||||
return (
|
return (
|
||||||
instance.query(
|
instance.query(
|
||||||
@ -263,13 +253,7 @@ def test_reload_after_fail_by_timer(started_cluster):
|
|||||||
|
|
||||||
# on sanitizers builds it can return 'FAILED_AND_RELOADING' which is not quite right
|
# on sanitizers builds it can return 'FAILED_AND_RELOADING' which is not quite right
|
||||||
# add retry for these builds
|
# add retry for these builds
|
||||||
if (
|
assert get_status("no_file_2") in ["FAILED", "FAILED_AND_RELOADING"]
|
||||||
instance.is_built_with_sanitizer()
|
|
||||||
and get_status("no_file_2") == "FAILED_AND_RELOADING"
|
|
||||||
):
|
|
||||||
get_status_retry("no_file_2", expect="FAILED")
|
|
||||||
|
|
||||||
assert get_status("no_file_2") == "FAILED"
|
|
||||||
|
|
||||||
# Creating the file source makes the dictionary able to load.
|
# Creating the file source makes the dictionary able to load.
|
||||||
instance.copy_file_to_container(
|
instance.copy_file_to_container(
|
||||||
@ -284,7 +268,7 @@ def test_reload_after_fail_by_timer(started_cluster):
|
|||||||
)
|
)
|
||||||
instance.query("SYSTEM RELOAD DICTIONARY no_file_2")
|
instance.query("SYSTEM RELOAD DICTIONARY no_file_2")
|
||||||
instance.query("SELECT dictGetInt32('no_file_2', 'a', toUInt64(9))") == "10\n"
|
instance.query("SELECT dictGetInt32('no_file_2', 'a', toUInt64(9))") == "10\n"
|
||||||
assert get_status("no_file_2") == "LOADED"
|
assert get_status("no_file_2") in ["LOADED", "LOADED_AND_RELOADING"]
|
||||||
|
|
||||||
# Removing the file source should not spoil the loaded dictionary.
|
# Removing the file source should not spoil the loaded dictionary.
|
||||||
instance.exec_in_container(
|
instance.exec_in_container(
|
||||||
@ -292,7 +276,7 @@ def test_reload_after_fail_by_timer(started_cluster):
|
|||||||
)
|
)
|
||||||
time.sleep(6)
|
time.sleep(6)
|
||||||
instance.query("SELECT dictGetInt32('no_file_2', 'a', toUInt64(9))") == "10\n"
|
instance.query("SELECT dictGetInt32('no_file_2', 'a', toUInt64(9))") == "10\n"
|
||||||
assert get_status("no_file_2") == "LOADED"
|
assert get_status("no_file_2") in ["LOADED", "LOADED_AND_RELOADING"]
|
||||||
|
|
||||||
|
|
||||||
def test_reload_after_fail_in_cache_dictionary(started_cluster):
|
def test_reload_after_fail_in_cache_dictionary(started_cluster):
|
||||||
|
@ -7,17 +7,5 @@
|
|||||||
<users>
|
<users>
|
||||||
<default>
|
<default>
|
||||||
</default>
|
</default>
|
||||||
<mysql_user>
|
|
||||||
<password>pass</password>
|
|
||||||
</mysql_user>
|
|
||||||
<postgres_user>
|
|
||||||
<password>pass</password>
|
|
||||||
</postgres_user>
|
|
||||||
<grpc_user>
|
|
||||||
<password>pass</password>
|
|
||||||
</grpc_user>
|
|
||||||
<parallel_user>
|
|
||||||
<password>pass</password>
|
|
||||||
</parallel_user>
|
|
||||||
</users>
|
</users>
|
||||||
</clickhouse>
|
</clickhouse>
|
@ -5,6 +5,7 @@ import pytest
|
|||||||
import random
|
import random
|
||||||
import sys
|
import sys
|
||||||
import threading
|
import threading
|
||||||
|
import time
|
||||||
|
|
||||||
from helpers.cluster import ClickHouseCluster, run_and_check
|
from helpers.cluster import ClickHouseCluster, run_and_check
|
||||||
|
|
||||||
@ -59,6 +60,19 @@ def next_session_id():
|
|||||||
return str(session_id)
|
return str(session_id)
|
||||||
|
|
||||||
|
|
||||||
|
user_counter = 0
|
||||||
|
|
||||||
|
|
||||||
|
def create_unique_user(prefix):
|
||||||
|
global user_counter
|
||||||
|
user_counter += 1
|
||||||
|
user_name = f"{prefix}_{os.getppid()}_{user_counter}"
|
||||||
|
instance.query(
|
||||||
|
f"CREATE USER {user_name} IDENTIFIED WITH plaintext_password BY 'pass'"
|
||||||
|
)
|
||||||
|
return user_name
|
||||||
|
|
||||||
|
|
||||||
def grpc_query(query, user_, pass_, raise_exception):
|
def grpc_query(query, user_, pass_, raise_exception):
|
||||||
try:
|
try:
|
||||||
query_info = clickhouse_grpc_pb2.QueryInfo(
|
query_info = clickhouse_grpc_pb2.QueryInfo(
|
||||||
@ -117,6 +131,50 @@ def mysql_query(query, user_, pass_, raise_exception):
|
|||||||
assert raise_exception
|
assert raise_exception
|
||||||
|
|
||||||
|
|
||||||
|
def wait_for_corresponding_login_success_and_logout(user, expected_login_count):
|
||||||
|
# The client can exit sooner than the server records its disconnection and closes the session.
|
||||||
|
# When the client disconnects, two processes happen at the same time and are in the race condition:
|
||||||
|
# - the client application exits and returns control to the shell;
|
||||||
|
# - the server closes the session and records the logout event to the session log.
|
||||||
|
# We cannot expect that after the control is returned to the shell, the server records the logout event.
|
||||||
|
sql = f"SELECT COUNT(*) FROM (SELECT {SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = '{user}' AND type = 'LoginSuccess' INTERSECT SELECT {SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = '{user}' AND type = 'Logout')"
|
||||||
|
logins_and_logouts = instance.query(sql)
|
||||||
|
while int(logins_and_logouts) != expected_login_count:
|
||||||
|
time.sleep(0.1)
|
||||||
|
logins_and_logouts = instance.query(sql)
|
||||||
|
|
||||||
|
|
||||||
|
def check_session_log(user):
|
||||||
|
instance.query("SYSTEM FLUSH LOGS")
|
||||||
|
login_success_records = instance.query(
|
||||||
|
f"SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='{user}' AND type = 'LoginSuccess'"
|
||||||
|
)
|
||||||
|
assert login_success_records == f"{user}\t1\t1\n"
|
||||||
|
logout_records = instance.query(
|
||||||
|
f"SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='{user}' AND type = 'Logout'"
|
||||||
|
)
|
||||||
|
assert logout_records == f"{user}\t1\t1\n"
|
||||||
|
login_failure_records = instance.query(
|
||||||
|
f"SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='{user}' AND type = 'LoginFailure'"
|
||||||
|
)
|
||||||
|
assert login_failure_records == f"{user}\t1\t1\n"
|
||||||
|
|
||||||
|
wait_for_corresponding_login_success_and_logout(user, 1)
|
||||||
|
|
||||||
|
|
||||||
|
def session_log_test(prefix, query_function):
|
||||||
|
user = create_unique_user(prefix)
|
||||||
|
wrong_user = "wrong_" + user
|
||||||
|
|
||||||
|
query_function("SELECT 1", user, "pass", False)
|
||||||
|
query_function("SELECT 2", user, "wrong_pass", True)
|
||||||
|
query_function("SELECT 3", wrong_user, "pass", True)
|
||||||
|
|
||||||
|
check_session_log(user)
|
||||||
|
|
||||||
|
instance.query(f"DROP USER {user}")
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
def started_cluster():
|
def started_cluster():
|
||||||
try:
|
try:
|
||||||
@ -131,78 +189,21 @@ def started_cluster():
|
|||||||
|
|
||||||
|
|
||||||
def test_grpc_session(started_cluster):
|
def test_grpc_session(started_cluster):
|
||||||
grpc_query("SELECT 1", "grpc_user", "pass", False)
|
session_log_test("grpc", grpc_query)
|
||||||
grpc_query("SELECT 2", "grpc_user", "wrong_pass", True)
|
|
||||||
grpc_query("SELECT 3", "wrong_grpc_user", "pass", True)
|
|
||||||
|
|
||||||
instance.query("SYSTEM FLUSH LOGS")
|
|
||||||
login_success_records = instance.query(
|
|
||||||
"SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='grpc_user' AND type = 'LoginSuccess'"
|
|
||||||
)
|
|
||||||
assert login_success_records == "grpc_user\t1\t1\n"
|
|
||||||
logout_records = instance.query(
|
|
||||||
"SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='grpc_user' AND type = 'Logout'"
|
|
||||||
)
|
|
||||||
assert logout_records == "grpc_user\t1\t1\n"
|
|
||||||
login_failure_records = instance.query(
|
|
||||||
"SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='grpc_user' AND type = 'LoginFailure'"
|
|
||||||
)
|
|
||||||
assert login_failure_records == "grpc_user\t1\t1\n"
|
|
||||||
logins_and_logouts = instance.query(
|
|
||||||
f"SELECT COUNT(*) FROM (SELECT {SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = 'grpc_user' AND type = 'LoginSuccess' INTERSECT SELECT {SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = 'grpc_user' AND type = 'Logout')"
|
|
||||||
)
|
|
||||||
assert logins_and_logouts == "1\n"
|
|
||||||
|
|
||||||
|
|
||||||
def test_mysql_session(started_cluster):
|
def test_mysql_session(started_cluster):
|
||||||
mysql_query("SELECT 1", "mysql_user", "pass", False)
|
session_log_test("mysql", mysql_query)
|
||||||
mysql_query("SELECT 2", "mysql_user", "wrong_pass", True)
|
|
||||||
mysql_query("SELECT 3", "wrong_mysql_user", "pass", True)
|
|
||||||
|
|
||||||
instance.query("SYSTEM FLUSH LOGS")
|
|
||||||
login_success_records = instance.query(
|
|
||||||
"SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='mysql_user' AND type = 'LoginSuccess'"
|
|
||||||
)
|
|
||||||
assert login_success_records == "mysql_user\t1\t1\n"
|
|
||||||
logout_records = instance.query(
|
|
||||||
"SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='mysql_user' AND type = 'Logout'"
|
|
||||||
)
|
|
||||||
assert logout_records == "mysql_user\t1\t1\n"
|
|
||||||
login_failure_records = instance.query(
|
|
||||||
"SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='mysql_user' AND type = 'LoginFailure'"
|
|
||||||
)
|
|
||||||
assert login_failure_records == "mysql_user\t1\t1\n"
|
|
||||||
logins_and_logouts = instance.query(
|
|
||||||
f"SELECT COUNT(*) FROM (SELECT {SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = 'mysql_user' AND type = 'LoginSuccess' INTERSECT SELECT {SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = 'mysql_user' AND type = 'Logout')"
|
|
||||||
)
|
|
||||||
assert logins_and_logouts == "1\n"
|
|
||||||
|
|
||||||
|
|
||||||
def test_postgres_session(started_cluster):
|
def test_postgres_session(started_cluster):
|
||||||
postgres_query("SELECT 1", "postgres_user", "pass", False)
|
session_log_test("postgres", postgres_query)
|
||||||
postgres_query("SELECT 2", "postgres_user", "wrong_pass", True)
|
|
||||||
postgres_query("SELECT 3", "wrong_postgres_user", "pass", True)
|
|
||||||
|
|
||||||
instance.query("SYSTEM FLUSH LOGS")
|
|
||||||
login_success_records = instance.query(
|
|
||||||
"SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='postgres_user' AND type = 'LoginSuccess'"
|
|
||||||
)
|
|
||||||
assert login_success_records == "postgres_user\t1\t1\n"
|
|
||||||
logout_records = instance.query(
|
|
||||||
"SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='postgres_user' AND type = 'Logout'"
|
|
||||||
)
|
|
||||||
assert logout_records == "postgres_user\t1\t1\n"
|
|
||||||
login_failure_records = instance.query(
|
|
||||||
"SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='postgres_user' AND type = 'LoginFailure'"
|
|
||||||
)
|
|
||||||
assert login_failure_records == "postgres_user\t1\t1\n"
|
|
||||||
logins_and_logouts = instance.query(
|
|
||||||
f"SELECT COUNT(*) FROM (SELECT {SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = 'postgres_user' AND type = 'LoginSuccess' INTERSECT SELECT {SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = 'postgres_user' AND type = 'Logout')"
|
|
||||||
)
|
|
||||||
assert logins_and_logouts == "1\n"
|
|
||||||
|
|
||||||
|
|
||||||
def test_parallel_sessions(started_cluster):
|
def test_parallel_sessions(started_cluster):
|
||||||
|
user = create_unique_user("parallel")
|
||||||
|
wrong_user = "wrong_" + user
|
||||||
|
|
||||||
thread_list = []
|
thread_list = []
|
||||||
for _ in range(10):
|
for _ in range(10):
|
||||||
# Sleep time does not significantly matter here,
|
# Sleep time does not significantly matter here,
|
||||||
@ -212,7 +213,7 @@ def test_parallel_sessions(started_cluster):
|
|||||||
target=function,
|
target=function,
|
||||||
args=(
|
args=(
|
||||||
f"SELECT sleep({random.uniform(0.03, 0.04)})",
|
f"SELECT sleep({random.uniform(0.03, 0.04)})",
|
||||||
"parallel_user",
|
user,
|
||||||
"pass",
|
"pass",
|
||||||
False,
|
False,
|
||||||
),
|
),
|
||||||
@ -223,7 +224,7 @@ def test_parallel_sessions(started_cluster):
|
|||||||
target=function,
|
target=function,
|
||||||
args=(
|
args=(
|
||||||
f"SELECT sleep({random.uniform(0.03, 0.04)})",
|
f"SELECT sleep({random.uniform(0.03, 0.04)})",
|
||||||
"parallel_user",
|
user,
|
||||||
"wrong_pass",
|
"wrong_pass",
|
||||||
True,
|
True,
|
||||||
),
|
),
|
||||||
@ -234,7 +235,7 @@ def test_parallel_sessions(started_cluster):
|
|||||||
target=function,
|
target=function,
|
||||||
args=(
|
args=(
|
||||||
f"SELECT sleep({random.uniform(0.03, 0.04)})",
|
f"SELECT sleep({random.uniform(0.03, 0.04)})",
|
||||||
"wrong_parallel_user",
|
wrong_user,
|
||||||
"pass",
|
"pass",
|
||||||
True,
|
True,
|
||||||
),
|
),
|
||||||
@ -247,41 +248,38 @@ def test_parallel_sessions(started_cluster):
|
|||||||
|
|
||||||
instance.query("SYSTEM FLUSH LOGS")
|
instance.query("SYSTEM FLUSH LOGS")
|
||||||
port_0_sessions = instance.query(
|
port_0_sessions = instance.query(
|
||||||
f"SELECT COUNT(*) FROM system.session_log WHERE user = 'parallel_user'"
|
f"SELECT COUNT(*) FROM system.session_log WHERE user = '{user}'"
|
||||||
)
|
)
|
||||||
assert port_0_sessions == "90\n"
|
assert port_0_sessions == "90\n"
|
||||||
|
|
||||||
port_0_sessions = instance.query(
|
port_0_sessions = instance.query(
|
||||||
f"SELECT COUNT(*) FROM system.session_log WHERE user = 'parallel_user' AND client_port = 0"
|
f"SELECT COUNT(*) FROM system.session_log WHERE user = '{user}' AND client_port = 0"
|
||||||
)
|
)
|
||||||
assert port_0_sessions == "0\n"
|
assert port_0_sessions == "0\n"
|
||||||
|
|
||||||
address_0_sessions = instance.query(
|
address_0_sessions = instance.query(
|
||||||
f"SELECT COUNT(*) FROM system.session_log WHERE user = 'parallel_user' AND client_address = toIPv6('::')"
|
f"SELECT COUNT(*) FROM system.session_log WHERE user = '{user}' AND client_address = toIPv6('::')"
|
||||||
)
|
)
|
||||||
assert address_0_sessions == "0\n"
|
assert address_0_sessions == "0\n"
|
||||||
|
|
||||||
grpc_sessions = instance.query(
|
grpc_sessions = instance.query(
|
||||||
f"SELECT COUNT(*) FROM system.session_log WHERE user = 'parallel_user' AND interface = 'gRPC'"
|
f"SELECT COUNT(*) FROM system.session_log WHERE user = '{user}' AND interface = 'gRPC'"
|
||||||
)
|
)
|
||||||
assert grpc_sessions == "30\n"
|
assert grpc_sessions == "30\n"
|
||||||
|
|
||||||
mysql_sessions = instance.query(
|
mysql_sessions = instance.query(
|
||||||
f"SELECT COUNT(*) FROM system.session_log WHERE user = 'parallel_user' AND interface = 'MySQL'"
|
f"SELECT COUNT(*) FROM system.session_log WHERE user = '{user}' AND interface = 'MySQL'"
|
||||||
)
|
)
|
||||||
assert mysql_sessions == "30\n"
|
assert mysql_sessions == "30\n"
|
||||||
|
|
||||||
postgres_sessions = instance.query(
|
postgres_sessions = instance.query(
|
||||||
f"SELECT COUNT(*) FROM system.session_log WHERE user = 'parallel_user' AND interface = 'PostgreSQL'"
|
f"SELECT COUNT(*) FROM system.session_log WHERE user = '{user}' AND interface = 'PostgreSQL'"
|
||||||
)
|
)
|
||||||
assert postgres_sessions == "30\n"
|
assert postgres_sessions == "30\n"
|
||||||
|
|
||||||
logins_and_logouts = instance.query(
|
wait_for_corresponding_login_success_and_logout(user, 30)
|
||||||
f"SELECT COUNT(*) FROM (SELECT {SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = 'parallel_user' AND type = 'LoginSuccess' INTERSECT SELECT {SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = 'parallel_user' AND type = 'Logout')"
|
|
||||||
)
|
|
||||||
assert logins_and_logouts == "30\n"
|
|
||||||
|
|
||||||
logout_failure_sessions = instance.query(
|
logout_failure_sessions = instance.query(
|
||||||
f"SELECT COUNT(*) FROM system.session_log WHERE user = 'parallel_user' AND type = 'LoginFailure'"
|
f"SELECT COUNT(*) FROM system.session_log WHERE user = '{user}' AND type = 'LoginFailure'"
|
||||||
)
|
)
|
||||||
assert logout_failure_sessions == "30\n"
|
assert logout_failure_sessions == "30\n"
|
||||||
|
@ -2,7 +2,9 @@
|
|||||||
-- Tag no-fasttest: Not sure why fail even in sequential mode. Disabled for now to make some progress.
|
-- Tag no-fasttest: Not sure why fail even in sequential mode. Disabled for now to make some progress.
|
||||||
|
|
||||||
SET allow_introspection_functions = 1;
|
SET allow_introspection_functions = 1;
|
||||||
|
SET trace_profile_events = 0; -- This can inhibit profiler from working, because it prevents sending samples from different profilers concurrently.
|
||||||
|
|
||||||
|
SET query_profiler_cpu_time_period_ns = 0;
|
||||||
SET query_profiler_real_time_period_ns = 100000000;
|
SET query_profiler_real_time_period_ns = 100000000;
|
||||||
SET log_queries = 1;
|
SET log_queries = 1;
|
||||||
SELECT sleep(0.5), ignore('test real time query profiler');
|
SELECT sleep(0.5), ignore('test real time query profiler');
|
||||||
|
@ -14,7 +14,7 @@ min_trace_entries=2
|
|||||||
|
|
||||||
# do not use _, they should be escaped for LIKE
|
# do not use _, they should be escaped for LIKE
|
||||||
query_id_tcp_prefix="01526-tcp-memory-tracking-$RANDOM-$$"
|
query_id_tcp_prefix="01526-tcp-memory-tracking-$RANDOM-$$"
|
||||||
${CLICKHOUSE_CLIENT} --log_queries=1 --max_threads=1 --max_untracked_memory=0 --memory_profiler_sample_probability=1 -q "with '$query_id_tcp_prefix' as __id $query FORMAT Null"
|
${CLICKHOUSE_CLIENT} --log_queries=1 --max_threads=1 --max_untracked_memory=0 --memory_profiler_sample_probability=1 --trace_profile_events 0 -q "with '$query_id_tcp_prefix' as __id $query FORMAT Null"
|
||||||
${CLICKHOUSE_CLIENT} -q "SYSTEM FLUSH LOGS"
|
${CLICKHOUSE_CLIENT} -q "SYSTEM FLUSH LOGS"
|
||||||
query_id_tcp="$(${CLICKHOUSE_CLIENT} -q "SELECT DISTINCT query_id FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE '%$query_id_tcp_prefix%'")"
|
query_id_tcp="$(${CLICKHOUSE_CLIENT} -q "SELECT DISTINCT query_id FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE '%$query_id_tcp_prefix%'")"
|
||||||
${CLICKHOUSE_CLIENT} -q "SELECT count()>=$min_trace_entries FROM system.trace_log WHERE query_id = '$query_id_tcp' AND abs(size) < 4e6 AND event_time >= now() - interval 1 hour"
|
${CLICKHOUSE_CLIENT} -q "SELECT count()>=$min_trace_entries FROM system.trace_log WHERE query_id = '$query_id_tcp' AND abs(size) < 4e6 AND event_time >= now() - interval 1 hour"
|
||||||
@ -23,7 +23,7 @@ ${CLICKHOUSE_CLIENT} -q "SELECT count()>=$min_trace_entries FROM system.trace_lo
|
|||||||
|
|
||||||
# query_id cannot be longer then 28 bytes
|
# query_id cannot be longer then 28 bytes
|
||||||
query_id_http="01526_http_${RANDOM}_$$"
|
query_id_http="01526_http_${RANDOM}_$$"
|
||||||
echo "$query" | ${CLICKHOUSE_CURL} -sSg -o /dev/null "${CLICKHOUSE_URL}&query_id=$query_id_http&max_untracked_memory=0&memory_profiler_sample_probability=1&max_threads=1" -d @-
|
echo "$query" | ${CLICKHOUSE_CURL} -sSg -o /dev/null "${CLICKHOUSE_URL}&query_id=$query_id_http&max_untracked_memory=0&memory_profiler_sample_probability=1&max_threads=1&trace_profile_events=0" -d @-
|
||||||
${CLICKHOUSE_CLIENT} -q "SYSTEM FLUSH LOGS"
|
${CLICKHOUSE_CLIENT} -q "SYSTEM FLUSH LOGS"
|
||||||
# at least 2, one allocation, one deallocation
|
# at least 2, one allocation, one deallocation
|
||||||
# (but actually even more)
|
# (but actually even more)
|
||||||
|
@ -44,6 +44,16 @@ for interface in 'TCP' 'HTTP' 'MySQL'
|
|||||||
do
|
do
|
||||||
LOGIN_COUNT=`${CLICKHOUSE_CLIENT} -q "SELECT count(*) FROM system.session_log WHERE user = '${TEST_USER}' AND type = 'LoginSuccess' AND interface = '${interface}'"`
|
LOGIN_COUNT=`${CLICKHOUSE_CLIENT} -q "SELECT count(*) FROM system.session_log WHERE user = '${TEST_USER}' AND type = 'LoginSuccess' AND interface = '${interface}'"`
|
||||||
CORRESPONDING_LOGOUT_RECORDS_COUNT=`${CLICKHOUSE_CLIENT} -q "SELECT COUNT(*) FROM (SELECT ${SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = '${TEST_USER}' AND type = 'LoginSuccess' AND interface = '${interface}' INTERSECT SELECT ${SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = '${TEST_USER}' AND type = 'Logout' AND interface = '${interface}')"`
|
CORRESPONDING_LOGOUT_RECORDS_COUNT=`${CLICKHOUSE_CLIENT} -q "SELECT COUNT(*) FROM (SELECT ${SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = '${TEST_USER}' AND type = 'LoginSuccess' AND interface = '${interface}' INTERSECT SELECT ${SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = '${TEST_USER}' AND type = 'Logout' AND interface = '${interface}')"`
|
||||||
|
# The client can exit sooner than the server records its disconnection and closes the session.
|
||||||
|
# When the client disconnects, two processes happen at the same time and are in the race condition:
|
||||||
|
# - the client application exits and returns control to the shell;
|
||||||
|
# - the server closes the session and records the logout event to the session log.
|
||||||
|
# We cannot expect that after the control is returned to the shell, the server records the logout event.
|
||||||
|
while [ "$LOGIN_COUNT" != "$CORRESPONDING_LOGOUT_RECORDS_COUNT" ]
|
||||||
|
do
|
||||||
|
sleep 0.1
|
||||||
|
CORRESPONDING_LOGOUT_RECORDS_COUNT=`${CLICKHOUSE_CLIENT} -q "SELECT COUNT(*) FROM (SELECT ${SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = '${TEST_USER}' AND type = 'LoginSuccess' AND interface = '${interface}' INTERSECT SELECT ${SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = '${TEST_USER}' AND type = 'Logout' AND interface = '${interface}')"`
|
||||||
|
done
|
||||||
|
|
||||||
if [ "$LOGIN_COUNT" == "$CORRESPONDING_LOGOUT_RECORDS_COUNT" ]; then
|
if [ "$LOGIN_COUNT" == "$CORRESPONDING_LOGOUT_RECORDS_COUNT" ]; then
|
||||||
echo "${interface} Login and logout count is equal"
|
echo "${interface} Login and logout count is equal"
|
||||||
|
@ -1,5 +1,9 @@
|
|||||||
|
-- Tags: no-parallel, no-fasttest
|
||||||
|
-- ^ because query_thread_log is not guaranteed to be written under high load
|
||||||
|
-- (when the queue is full, events are silently dropped)
|
||||||
|
|
||||||
-- enforce some defaults to be sure that the env settings will not affect the test
|
-- enforce some defaults to be sure that the env settings will not affect the test
|
||||||
SET max_threads=5, async_socket_for_remote=1, prefer_localhost_replica=1, optimize_read_in_order=1, load_marks_asynchronously=0, local_filesystem_read_method='pread', remote_filesystem_read_method='read';
|
SET max_threads=5, async_socket_for_remote=1, prefer_localhost_replica=1, optimize_read_in_order=1, load_marks_asynchronously=0, local_filesystem_read_method='pread', remote_filesystem_read_method='read', trace_profile_events=0;
|
||||||
|
|
||||||
-- we use query_thread_log to check peak thread usage
|
-- we use query_thread_log to check peak thread usage
|
||||||
-- after https://github.com/ClickHouse/ClickHouse/issues/53417 there is a simpler way to check it
|
-- after https://github.com/ClickHouse/ClickHouse/issues/53417 there is a simpler way to check it
|
||||||
|
@ -21,9 +21,9 @@ $CLICKHOUSE_CLIENT -nm -q "
|
|||||||
drop table if exists rmt_master;
|
drop table if exists rmt_master;
|
||||||
drop table if exists rmt_slave;
|
drop table if exists rmt_slave;
|
||||||
|
|
||||||
create table rmt_master (key Int) engine=ReplicatedMergeTree('/clickhouse/{database}', 'master') order by key settings always_fetch_merged_part=0;
|
create table rmt_master (key Int) engine=ReplicatedMergeTree('/clickhouse/{database}', 'master') order by key settings always_fetch_merged_part=0, old_parts_lifetime=600;
|
||||||
-- always_fetch_merged_part=1, consider this table as a 'slave'
|
-- always_fetch_merged_part=1, consider this table as a 'slave'
|
||||||
create table rmt_slave (key Int) engine=ReplicatedMergeTree('/clickhouse/{database}', 'slave') order by key settings always_fetch_merged_part=1;
|
create table rmt_slave (key Int) engine=ReplicatedMergeTree('/clickhouse/{database}', 'slave') order by key settings always_fetch_merged_part=1, old_parts_lifetime=600;
|
||||||
|
|
||||||
insert into rmt_master values (1);
|
insert into rmt_master values (1);
|
||||||
|
|
||||||
|
@ -3,14 +3,11 @@
|
|||||||
SELECT * FROM loop(numbers(3)) LIMIT 10;
|
SELECT * FROM loop(numbers(3)) LIMIT 10;
|
||||||
SELECT * FROM loop (numbers(3)) LIMIT 10 settings max_block_size = 1;
|
SELECT * FROM loop (numbers(3)) LIMIT 10 settings max_block_size = 1;
|
||||||
|
|
||||||
DROP DATABASE IF EXISTS 03147_db;
|
CREATE TABLE t (n Int8) ENGINE=MergeTree ORDER BY n;
|
||||||
CREATE DATABASE IF NOT EXISTS 03147_db;
|
INSERT INTO t SELECT * FROM numbers(10);
|
||||||
CREATE TABLE 03147_db.t (n Int8) ENGINE=MergeTree ORDER BY n;
|
|
||||||
INSERT INTO 03147_db.t SELECT * FROM numbers(10);
|
|
||||||
USE 03147_db;
|
|
||||||
|
|
||||||
SELECT * FROM loop(03147_db.t) LIMIT 15;
|
SELECT * FROM loop({CLICKHOUSE_DATABASE:Identifier}.t) LIMIT 15;
|
||||||
SELECT * FROM loop(t) LIMIT 15;
|
SELECT * FROM loop(t) LIMIT 15;
|
||||||
SELECT * FROM loop(03147_db, t) LIMIT 15;
|
SELECT * FROM loop({CLICKHOUSE_DATABASE:Identifier}, t) LIMIT 15;
|
||||||
|
|
||||||
SELECT * FROM loop('', '') -- { serverError UNKNOWN_TABLE }
|
SELECT * FROM loop('', '') -- { serverError UNKNOWN_TABLE }
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
# Tags: no-parallel
|
|
||||||
|
|
||||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||||
# shellcheck source=../shell_config.sh
|
# shellcheck source=../shell_config.sh
|
||||||
|
@ -1,11 +1,10 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
# Tags: no-parallel, no-ordinary-database
|
|
||||||
|
|
||||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||||
# shellcheck source=../shell_config.sh
|
# shellcheck source=../shell_config.sh
|
||||||
. "$CUR_DIR"/../shell_config.sh
|
. "$CUR_DIR"/../shell_config.sh
|
||||||
|
|
||||||
SQL_FILE_NAME=$"03156_default_multiquery_split_$$.sql"
|
SQL_FILE_NAME=$"03156_default_multiquery_split_${CLICKHOUSE_DATABASE}.sql"
|
||||||
|
|
||||||
# The old multiquery implementation uses '\n' to split INSERT query segmentation
|
# The old multiquery implementation uses '\n' to split INSERT query segmentation
|
||||||
# this case is mainly to test the following situations
|
# this case is mainly to test the following situations
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
-- Tags: no-fasttest, no-parallel, no-random-settings
|
-- Tags: no-fasttest, no-random-settings
|
||||||
|
|
||||||
set max_insert_threads=1;
|
set max_insert_threads=1;
|
||||||
|
|
||||||
@ -22,4 +22,4 @@ CREATE TABLE test_parquet (col1 String, col2 String, col3 String, col4 String, c
|
|||||||
INSERT INTO test_parquet SELECT rand(),rand(),rand(),rand(),rand(),rand(),rand() FROM numbers(100000);
|
INSERT INTO test_parquet SELECT rand(),rand(),rand(),rand(),rand(),rand(),rand() FROM numbers(100000);
|
||||||
SELECT max(blockSize()) FROM test_parquet;
|
SELECT max(blockSize()) FROM test_parquet;
|
||||||
|
|
||||||
DROP TABLE IF EXISTS test_parquet;
|
DROP TABLE IF EXISTS test_parquet;
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
1
|
|
||||||
3168 8613
|
3168 8613
|
||||||
[] ['SELECT(a, b) ON default.d_03168_query_log']
|
[] ['SELECT(a, b) ON default.d_03168_query_log']
|
||||||
[] []
|
[] []
|
||||||
|
@ -1,32 +1,28 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
# Tags: no-parallel
|
|
||||||
|
|
||||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||||
# shellcheck source=../shell_config.sh
|
# shellcheck source=../shell_config.sh
|
||||||
. "$CUR_DIR"/../shell_config.sh
|
. "$CUR_DIR"/../shell_config.sh
|
||||||
|
|
||||||
user_name="u_03168_query_log"
|
user_name="u_03168_query_log_${CLICKHOUSE_DATABASE}"
|
||||||
table_name="default.d_03168_query_log"
|
table_name="d_03168_query_log"
|
||||||
test_query="select a, b from ${table_name}"
|
test_query="select a, b from ${table_name}"
|
||||||
|
|
||||||
${CLICKHOUSE_CLIENT_BINARY} --query "drop user if exists ${user_name}"
|
${CLICKHOUSE_CLIENT} --query "drop user if exists ${user_name}"
|
||||||
${CLICKHOUSE_CLIENT_BINARY} --query "create user ${user_name}"
|
${CLICKHOUSE_CLIENT} --query "create user ${user_name}"
|
||||||
${CLICKHOUSE_CLIENT_BINARY} --query "drop table if exists ${table_name}"
|
${CLICKHOUSE_CLIENT} --query "drop table if exists ${table_name}"
|
||||||
${CLICKHOUSE_CLIENT_BINARY} --query "create table ${table_name} (a UInt64, b UInt64) order by a"
|
${CLICKHOUSE_CLIENT} --query "create table ${table_name} (a UInt64, b UInt64) order by a"
|
||||||
|
${CLICKHOUSE_CLIENT} --query "insert into table ${table_name} values (3168, 8613)"
|
||||||
|
|
||||||
${CLICKHOUSE_CLIENT_BINARY} --query "insert into table ${table_name} values (3168, 8613)"
|
${CLICKHOUSE_CLIENT} --user ${user_name} --query "${test_query}" 2>&1 >/dev/null | (grep -q "ACCESS_DENIED" || echo "Expected ACCESS_DENIED error not found")
|
||||||
|
|
||||||
error="$(${CLICKHOUSE_CLIENT_BINARY} --user ${user_name} --query "${test_query}" 2>&1 >/dev/null)"
|
${CLICKHOUSE_CLIENT} --query "grant select(a, b) on ${table_name} to ${user_name}"
|
||||||
echo "${error}" | grep -Fc "ACCESS_DENIED"
|
${CLICKHOUSE_CLIENT} --user ${user_name} --query "${test_query}"
|
||||||
|
|
||||||
${CLICKHOUSE_CLIENT_BINARY} --query "grant select(a, b) on ${table_name} to ${user_name}"
|
${CLICKHOUSE_CLIENT} --query "system flush logs"
|
||||||
|
${CLICKHOUSE_CLIENT} --query "select used_privileges, missing_privileges from system.query_log where query = '${test_query}' and type = 'ExceptionBeforeStart' and current_database = currentDatabase() order by event_time desc limit 1"
|
||||||
|
${CLICKHOUSE_CLIENT} --query "select used_privileges, missing_privileges from system.query_log where query = '${test_query}' and type = 'QueryStart' and current_database = currentDatabase() order by event_time desc limit 1"
|
||||||
|
${CLICKHOUSE_CLIENT} --query "select used_privileges, missing_privileges from system.query_log where query = '${test_query}' and type = 'QueryFinish' and current_database = currentDatabase() order by event_time desc limit 1"
|
||||||
|
|
||||||
${CLICKHOUSE_CLIENT_BINARY} --user ${user_name} --query "${test_query}"
|
${CLICKHOUSE_CLIENT} --query "drop table ${table_name}"
|
||||||
|
${CLICKHOUSE_CLIENT} --query "drop user ${user_name}"
|
||||||
${CLICKHOUSE_CLIENT_BINARY} --query "system flush logs"
|
|
||||||
${CLICKHOUSE_CLIENT_BINARY} --query "select used_privileges, missing_privileges from system.query_log where query = '${test_query}' and type = 'ExceptionBeforeStart' and current_database = currentDatabase() order by event_time desc limit 1"
|
|
||||||
${CLICKHOUSE_CLIENT_BINARY} --query "select used_privileges, missing_privileges from system.query_log where query = '${test_query}' and type = 'QueryStart' and current_database = currentDatabase() order by event_time desc limit 1"
|
|
||||||
${CLICKHOUSE_CLIENT_BINARY} --query "select used_privileges, missing_privileges from system.query_log where query = '${test_query}' and type = 'QueryFinish' and current_database = currentDatabase() order by event_time desc limit 1"
|
|
||||||
|
|
||||||
${CLICKHOUSE_CLIENT_BINARY} --query "drop table ${table_name}"
|
|
||||||
${CLICKHOUSE_CLIENT_BINARY} --query "drop user ${user_name}"
|
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
-- Tags: no-parallel
|
|
||||||
|
|
||||||
CREATE TABLE x ( hash_id UInt64, user_result Decimal(3, 2) ) ENGINE = Memory();
|
CREATE TABLE x ( hash_id UInt64, user_result Decimal(3, 2) ) ENGINE = Memory();
|
||||||
|
|
||||||
CREATE TABLE y ( hash_id UInt64, user_result DECIMAL(18, 6) ) ENGINE = Memory();
|
CREATE TABLE y ( hash_id UInt64, user_result DECIMAL(18, 6) ) ENGINE = Memory();
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
-- Tags: no-random-settings, no-object-storage, no-parallel
|
-- Tags: no-random-settings, no-object-storage, no-parallel
|
||||||
|
-- no-parallel: Running `DROP MARK CACHE` can have a big impact on other concurrent tests
|
||||||
-- Tag no-object-storage: this test relies on the number of opened files in MergeTree that can differ in object storages
|
-- Tag no-object-storage: this test relies on the number of opened files in MergeTree that can differ in object storages
|
||||||
|
|
||||||
SET allow_experimental_dynamic_type = 1;
|
SET allow_experimental_dynamic_type = 1;
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
# Tags: no-parallel, no-fasttest
|
# Tags: no-fasttest
|
||||||
|
# no-fasttest: Requires libraries
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user