mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 07:31:57 +00:00
Merge remote-tracking branch 'origin/master' into HEAD
This commit is contained in:
commit
1829bd09cb
@ -15,6 +15,10 @@ if (COMPILER_GCC)
|
||||
elseif (COMPILER_CLANG)
|
||||
# Require minimum version of clang/apple-clang
|
||||
if (CMAKE_CXX_COMPILER_ID MATCHES "AppleClang")
|
||||
# If you are developer you can figure out what exact versions of AppleClang are Ok,
|
||||
# remove the following line and commit changes below.
|
||||
message (FATAL_ERROR "AppleClang is not supported, you should install clang from brew.")
|
||||
|
||||
# AppleClang 10.0.1 (Xcode 10.2) corresponds to LLVM/Clang upstream version 7.0.0
|
||||
# AppleClang 11.0.0 (Xcode 11.0) corresponds to LLVM/Clang upstream version 8.0.0
|
||||
set (XCODE_MINIMUM_VERSION 10.2)
|
||||
|
@ -53,16 +53,18 @@ RUN apt-get update \
|
||||
ninja-build \
|
||||
psmisc \
|
||||
python3 \
|
||||
python3-pip \
|
||||
python3-lxml \
|
||||
python3-requests \
|
||||
python3-termcolor \
|
||||
qemu-user-static \
|
||||
rename \
|
||||
software-properties-common \
|
||||
tzdata \
|
||||
unixodbc \
|
||||
--yes --no-install-recommends
|
||||
|
||||
RUN pip3 install numpy scipy pandas
|
||||
|
||||
# This symlink required by gcc to find lld compiler
|
||||
RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld
|
||||
|
||||
|
@ -191,63 +191,65 @@ stop_server ||:
|
||||
start_server
|
||||
|
||||
TESTS_TO_SKIP=(
|
||||
parquet
|
||||
avro
|
||||
h3
|
||||
odbc
|
||||
mysql
|
||||
sha256
|
||||
_orc_
|
||||
arrow
|
||||
01098_temporary_and_external_tables
|
||||
01083_expressions_in_engine_arguments
|
||||
hdfs
|
||||
00911_tautological_compare
|
||||
protobuf
|
||||
capnproto
|
||||
java_hash
|
||||
hashing
|
||||
secure
|
||||
00490_special_line_separators_and_characters_outside_of_bmp
|
||||
00436_convert_charset
|
||||
00105_shard_collations
|
||||
01354_order_by_tuple_collate_const
|
||||
01292_create_user
|
||||
01098_msgpack_format
|
||||
00929_multi_match_edit_distance
|
||||
00926_multimatch
|
||||
00834_cancel_http_readonly_queries_on_client_close
|
||||
brotli
|
||||
parallel_alter
|
||||
00109_shard_totals_after_having
|
||||
00110_external_sort
|
||||
00302_http_compression
|
||||
00417_kill_query
|
||||
01294_lazy_database_concurrent
|
||||
01193_metadata_loading
|
||||
base64
|
||||
01031_mutations_interpreter_and_context
|
||||
json
|
||||
client
|
||||
01305_replica_create_drop_zookeeper
|
||||
01092_memory_profiler
|
||||
01355_ilike
|
||||
01281_unsucceeded_insert_select_queries_counter
|
||||
live_view
|
||||
limit_memory
|
||||
memory_limit
|
||||
memory_leak
|
||||
00110_external_sort
|
||||
00436_convert_charset
|
||||
00490_special_line_separators_and_characters_outside_of_bmp
|
||||
00652_replicated_mutations_zookeeper
|
||||
00682_empty_parts_merge
|
||||
00701_rollup
|
||||
00109_shard_totals_after_having
|
||||
ddl_dictionaries
|
||||
00834_cancel_http_readonly_queries_on_client_close
|
||||
00911_tautological_compare
|
||||
00926_multimatch
|
||||
00929_multi_match_edit_distance
|
||||
01031_mutations_interpreter_and_context
|
||||
01053_ssd_dictionary # this test mistakenly requires acces to /var/lib/clickhouse -- can't run this locally, disabled
|
||||
01083_expressions_in_engine_arguments
|
||||
01092_memory_profiler
|
||||
01098_msgpack_format
|
||||
01098_temporary_and_external_tables
|
||||
01103_check_cpu_instructions_at_startup # avoid dependency on qemu -- invonvenient when running locally
|
||||
01193_metadata_loading
|
||||
01238_http_memory_tracking # max_memory_usage_for_user can interfere another queries running concurrently
|
||||
01251_dict_is_in_infinite_loop
|
||||
01259_dictionary_custom_settings_ddl
|
||||
01268_dictionary_direct_layout
|
||||
01280_ssd_complex_key_dictionary
|
||||
00652_replicated_mutations_zookeeper
|
||||
01411_bayesian_ab_testing
|
||||
01238_http_memory_tracking # max_memory_usage_for_user can interfere another queries running concurrently
|
||||
01281_group_by_limit_memory_tracking # max_memory_usage_for_user can interfere another queries running concurrently
|
||||
01281_unsucceeded_insert_select_queries_counter
|
||||
01292_create_user
|
||||
01294_lazy_database_concurrent
|
||||
01305_replica_create_drop_zookeeper
|
||||
01354_order_by_tuple_collate_const
|
||||
01355_ilike
|
||||
01411_bayesian_ab_testing
|
||||
_orc_
|
||||
arrow
|
||||
avro
|
||||
base64
|
||||
brotli
|
||||
capnproto
|
||||
client
|
||||
ddl_dictionaries
|
||||
h3
|
||||
hashing
|
||||
hdfs
|
||||
java_hash
|
||||
json
|
||||
limit_memory
|
||||
live_view
|
||||
memory_leak
|
||||
memory_limit
|
||||
mysql
|
||||
odbc
|
||||
parallel_alter
|
||||
parquet
|
||||
protobuf
|
||||
secure
|
||||
sha256
|
||||
|
||||
# Not sure why these two fail even in sequential mode. Disabled for now
|
||||
# to make some progress.
|
||||
@ -258,7 +260,7 @@ TESTS_TO_SKIP=(
|
||||
01460_DistributedFilesToInsert
|
||||
)
|
||||
|
||||
time clickhouse-test -j 8 --no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/test_log.txt"
|
||||
time clickhouse-test -j 8 --order=random --no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/test_log.txt"
|
||||
|
||||
# substr is to remove semicolon after test name
|
||||
readarray -t FAILED_TESTS < <(awk '/FAIL|TIMEOUT|ERROR/ { print substr($3, 1, length($3)-1) }' "$FASTTEST_OUTPUT/test_log.txt" | tee "$FASTTEST_OUTPUT/failed-parallel-tests.txt")
|
||||
@ -281,7 +283,7 @@ then
|
||||
|
||||
echo "Going to run again: ${FAILED_TESTS[*]}"
|
||||
|
||||
clickhouse-test --no-long --testname --shard --zookeeper "${FAILED_TESTS[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a "$FASTTEST_OUTPUT/test_log.txt"
|
||||
clickhouse-test --order=random --no-long --testname --shard --zookeeper "${FAILED_TESTS[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a "$FASTTEST_OUTPUT/test_log.txt"
|
||||
else
|
||||
echo "No failed tests"
|
||||
fi
|
||||
|
@ -37,7 +37,28 @@ RUN apt-get update \
|
||||
ENV TZ=Europe/Moscow
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
RUN python3 -m pip install urllib3==1.23 pytest docker-compose==1.22.0 docker dicttoxml kazoo PyMySQL psycopg2==2.7.5 pymongo tzlocal kafka-python protobuf redis aerospike pytest-timeout minio grpcio grpcio-tools cassandra-driver confluent-kafka avro
|
||||
RUN python3 -m pip install \
|
||||
PyMySQL \
|
||||
aerospike \
|
||||
avro \
|
||||
cassandra-driver \
|
||||
confluent-kafka \
|
||||
dicttoxml \
|
||||
docker \
|
||||
docker-compose==1.22.0 \
|
||||
grpcio \
|
||||
grpcio-tools \
|
||||
kafka-python \
|
||||
kazoo \
|
||||
minio \
|
||||
protobuf \
|
||||
psycopg2-binary==2.7.5 \
|
||||
pymongo \
|
||||
pytest \
|
||||
pytest-timeout \
|
||||
redis \
|
||||
tzlocal \
|
||||
urllib3
|
||||
|
||||
ENV DOCKER_CHANNEL stable
|
||||
ENV DOCKER_VERSION 17.09.1-ce
|
||||
|
@ -45,7 +45,7 @@ function start()
|
||||
# for clickhouse-server (via service)
|
||||
echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment
|
||||
# for clickhouse-client
|
||||
export ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'
|
||||
export ASAN_OPTIONS='malloc_context_size=10 allocator_release_to_os_interval_ms=10000'
|
||||
|
||||
start
|
||||
|
||||
|
@ -28,8 +28,18 @@ def get_options(i):
|
||||
options = ""
|
||||
if 0 < i:
|
||||
options += " --order=random"
|
||||
|
||||
if i % 2 == 1:
|
||||
options += " --db-engine=Ordinary"
|
||||
|
||||
# If database name is not specified, new database is created for each functional test.
|
||||
# Run some threads with one database for all tests.
|
||||
if i % 3 == 1:
|
||||
options += " --database=test_{}".format(i)
|
||||
|
||||
if i == 13:
|
||||
options += " --client-option='memory_tracker_fault_probability=0.00001'"
|
||||
|
||||
return options
|
||||
|
||||
|
||||
|
@ -117,7 +117,9 @@ CREATE TABLE table_name
|
||||
|
||||
</details>
|
||||
|
||||
As the example shows, these parameters can contain substitutions in curly brackets. The substituted values are taken from the ‘macros’ section of the configuration file. Example:
|
||||
As the example shows, these parameters can contain substitutions in curly brackets. The substituted values are taken from the «[macros](../../../operations/server-configuration-parameters/settings/#macros) section of the configuration file.
|
||||
|
||||
Example:
|
||||
|
||||
``` xml
|
||||
<macros>
|
||||
@ -137,6 +139,9 @@ In this case, the path consists of the following parts:
|
||||
`table_name` is the name of the node for the table in ZooKeeper. It is a good idea to make it the same as the table name. It is defined explicitly, because in contrast to the table name, it doesn’t change after a RENAME query.
|
||||
*HINT*: you could add a database name in front of `table_name` as well. E.g. `db_name.table_name`
|
||||
|
||||
The two built-in substitutions `{database}` and `{table}` can be used, they expand into the table name and the database name respectively (unless these macros are defined in the `macros` section). So the zookeeper path can be specified as `'/clickhouse/tables/{layer}-{shard}/{database}/{table}'`.
|
||||
Be careful with table renames when using these built-in substitutions. The path in Zookeeper cannot be changed, and when the table is renamed, the macros will expand into a different path, the table will refer to a path that does not exist in Zookeeper, and will go into read-only mode.
|
||||
|
||||
The replica name identifies different replicas of the same table. You can use the server name for this, as in the example. The name only needs to be unique within each shard.
|
||||
|
||||
You can define the parameters explicitly instead of using substitutions. This might be convenient for testing and for configuring small clusters. However, you can’t use distributed DDL queries (`ON CLUSTER`) in this case.
|
||||
|
@ -113,7 +113,9 @@ CREATE TABLE table_name
|
||||
|
||||
</details>
|
||||
|
||||
Как видно в примере, эти параметры могут содержать подстановки в фигурных скобках. Подставляемые значения достаются из конфигурационного файла, из секции `macros`. Пример:
|
||||
Как видно в примере, эти параметры могут содержать подстановки в фигурных скобках. Подставляемые значения достаются из конфигурационного файла, из секции «[macros](../../../operations/server-configuration-parameters/settings/#macros)».
|
||||
|
||||
Пример:
|
||||
|
||||
``` xml
|
||||
<macros>
|
||||
@ -133,6 +135,9 @@ CREATE TABLE table_name
|
||||
`table_name` - имя узла для таблицы в ZooKeeper. Разумно делать его таким же, как имя таблицы. Оно указывается явно, так как, в отличие от имени таблицы, оно не меняется после запроса RENAME.
|
||||
*Подсказка*: можно также указать имя базы данных перед `table_name`, например `db_name.table_name`
|
||||
|
||||
Можно использовать две встроенных подстановки `{database}` и `{table}`, они раскрываются в имя таблицы и в имя базы данных соответственно (если эти подстановки не переопределены в секции `macros`). Т.о. Zookeeper путь можно задать как `'/clickhouse/tables/{layer}-{shard}/{database}/{table}'`.
|
||||
Будьте осторожны с переименованиями таблицы при использовании этих автоматических подстановок. Путь в Zookeeper-е нельзя изменить, а подстановка при переименовании таблицы раскроется в другой путь, таблица будет обращаться к несуществующему в Zookeeper-е пути и перейдет в режим только для чтения.
|
||||
|
||||
Имя реплики — то, что идентифицирует разные реплики одной и той же таблицы. Можно использовать для него имя сервера, как показано в примере. Впрочем, достаточно, чтобы имя было уникально лишь в пределах каждого шарда.
|
||||
|
||||
Можно не использовать подстановки, а указать соответствующие параметры явно. Это может быть удобным для тестирования и при настройке маленьких кластеров. Однако в этом случае нельзя пользоваться распределенными DDL-запросами (`ON CLUSTER`).
|
||||
|
@ -387,7 +387,7 @@ ClickHouse проверяет условия для `min_part_size` и `min_part
|
||||
|
||||
Можно не указывать, если реплицируемых таблицы не используются.
|
||||
|
||||
Подробнее смотрите в разделе «[Создание реплицируемых таблиц](../../operations/server-configuration-parameters/settings.md)».
|
||||
Подробнее смотрите в разделе «[Создание реплицируемых таблиц](../../engines/table-engines/mergetree-family/replication.md)».
|
||||
|
||||
**Пример**
|
||||
|
||||
|
@ -1164,9 +1164,9 @@ ClickHouse генерирует исключение
|
||||
|
||||
## insert_quorum_timeout {#settings-insert_quorum_timeout}
|
||||
|
||||
Время ожидания кворумной записи в секундах. Если время прошло, а запись так не состоялась, то ClickHouse сгенерирует исключение и клиент должен повторить запрос на запись того же блока на эту же или любую другую реплику.
|
||||
Время ожидания кворумной записи в миллисекундах. Если время прошло, а запись так не состоялась, то ClickHouse сгенерирует исключение и клиент должен повторить запрос на запись того же блока на эту же или любую другую реплику.
|
||||
|
||||
Значение по умолчанию: 60 секунд.
|
||||
Значение по умолчанию: 600000 миллисекунд (10 минут).
|
||||
|
||||
См. также:
|
||||
|
||||
|
@ -1202,8 +1202,15 @@ private:
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
// Some functions (e.g. protocol parsers) don't throw, but
|
||||
// set last_exception instead, so we'll also do it here for
|
||||
// uniformity.
|
||||
last_exception_received_from_server = std::make_unique<Exception>(getCurrentExceptionMessage(true), getCurrentExceptionCode());
|
||||
received_exception_from_server = true;
|
||||
}
|
||||
|
||||
if (received_exception_from_server)
|
||||
{
|
||||
fmt::print(stderr, "Error on processing query '{}': {}\n",
|
||||
ast_to_process->formatForErrorMessage(),
|
||||
last_exception_received_from_server->message());
|
||||
@ -1213,29 +1220,30 @@ private:
|
||||
{
|
||||
// Probably the server is dead because we found an assertion
|
||||
// failure. Fail fast.
|
||||
fmt::print(stderr, "Lost connection to the server\n");
|
||||
return begin;
|
||||
}
|
||||
|
||||
// The server is still alive so we're going to continue fuzzing.
|
||||
// Determine what we're going to use as the starting AST.
|
||||
if (received_exception_from_server)
|
||||
{
|
||||
// Query completed with error, ignore it and fuzz again.
|
||||
fprintf(stderr, "Got error, will fuzz again\n");
|
||||
|
||||
// Query completed with error, keep the previous starting AST.
|
||||
// Also discard the exception that we now know to be non-fatal,
|
||||
// so that it doesn't influence the exit code.
|
||||
last_exception_received_from_server.reset(nullptr);
|
||||
received_exception_from_server = false;
|
||||
last_exception_received_from_server.reset();
|
||||
|
||||
continue;
|
||||
}
|
||||
else if (ast_to_process->formatForErrorMessage().size() > 500)
|
||||
{
|
||||
// ast too long, start from original ast
|
||||
fprintf(stderr, "current ast too long, won't elaborate\n");
|
||||
fprintf(stderr, "Current AST is too long, discarding it and using the original AST as a start\n");
|
||||
fuzz_base = orig_ast;
|
||||
}
|
||||
else
|
||||
{
|
||||
// fuzz starting from this successful query
|
||||
fprintf(stderr, "using this ast as etalon\n");
|
||||
fprintf(stderr, "Query succeeded, using this AST as a start\n");
|
||||
fuzz_base = ast_to_process;
|
||||
}
|
||||
}
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include <string.h>
|
||||
#include <cxxabi.h>
|
||||
#include <cstdlib>
|
||||
#include <Poco/String.h>
|
||||
#include <common/logger_useful.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
@ -36,13 +37,13 @@ namespace ErrorCodes
|
||||
Exception::Exception(const std::string & msg, int code)
|
||||
: Poco::Exception(msg, code)
|
||||
{
|
||||
// In debug builds, treat LOGICAL_ERROR as an assertion failure.
|
||||
// In debug builds and builds with sanitizers, treat LOGICAL_ERROR as an assertion failure.
|
||||
// Log the message before we fail.
|
||||
#ifndef NDEBUG
|
||||
#ifdef ABORT_ON_LOGICAL_ERROR
|
||||
if (code == ErrorCodes::LOGICAL_ERROR)
|
||||
{
|
||||
LOG_ERROR(&Poco::Logger::root(), "Logical error: '{}'.", msg);
|
||||
assert(false);
|
||||
LOG_FATAL(&Poco::Logger::root(), "Logical error: '{}'.", msg);
|
||||
abort();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -10,6 +10,10 @@
|
||||
|
||||
#include <fmt/format.h>
|
||||
|
||||
#if !defined(NDEBUG) || defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) || defined(MEMORY_SANITIZER) || defined(UNDEFINED_BEHAVIOR_SANITIZER)
|
||||
#define ABORT_ON_LOGICAL_ERROR
|
||||
#endif
|
||||
|
||||
namespace Poco { class Logger; }
|
||||
|
||||
|
||||
|
@ -131,7 +131,10 @@ TEST(Common, RWLockRecursive)
|
||||
|
||||
auto lock2 = fifo_lock->getLock(RWLockImpl::Read, "q2");
|
||||
|
||||
#ifndef ABORT_ON_LOGICAL_ERROR
|
||||
/// It throws LOGICAL_ERROR
|
||||
EXPECT_ANY_THROW({fifo_lock->getLock(RWLockImpl::Write, "q2");});
|
||||
#endif
|
||||
}
|
||||
|
||||
fifo_lock->getLock(RWLockImpl::Write, "q2");
|
||||
|
@ -787,12 +787,6 @@ void DDLWorker::processTask(DDLTask & task)
|
||||
storage = DatabaseCatalog::instance().tryGetTable(table_id, context);
|
||||
}
|
||||
|
||||
/// For some reason we check consistency of cluster definition only
|
||||
/// in case of ALTER query, but not in case of CREATE/DROP etc.
|
||||
/// It's strange, but this behaviour exits for a long and we cannot change it.
|
||||
if (storage && query_with_table->as<ASTAlterQuery>())
|
||||
checkShardConfig(query_with_table->table, task, storage);
|
||||
|
||||
if (storage && taskShouldBeExecutedOnLeader(rewritten_ast, storage) && !is_circular_replicated)
|
||||
tryExecuteQueryOnLeaderReplica(task, storage, rewritten_query, task.entry_path, zookeeper);
|
||||
else
|
||||
@ -837,35 +831,6 @@ bool DDLWorker::taskShouldBeExecutedOnLeader(const ASTPtr ast_ddl, const Storage
|
||||
return storage->supportsReplication();
|
||||
}
|
||||
|
||||
|
||||
void DDLWorker::checkShardConfig(const String & table, const DDLTask & task, StoragePtr storage) const
|
||||
{
|
||||
const auto & shard_info = task.cluster->getShardsInfo().at(task.host_shard_num);
|
||||
bool config_is_replicated_shard = shard_info.hasInternalReplication();
|
||||
|
||||
if (dynamic_cast<const StorageDistributed *>(storage.get()))
|
||||
{
|
||||
LOG_TRACE(log, "Table {} is distributed, skip checking config.", backQuote(table));
|
||||
return;
|
||||
}
|
||||
|
||||
if (storage->supportsReplication() && !config_is_replicated_shard)
|
||||
{
|
||||
throw Exception(ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION,
|
||||
"Table {} is replicated, but shard #{} isn't replicated according to its cluster definition. "
|
||||
"Possibly <internal_replication>true</internal_replication> is forgotten in the cluster config.",
|
||||
backQuote(table), task.host_shard_num + 1);
|
||||
}
|
||||
|
||||
if (!storage->supportsReplication() && config_is_replicated_shard)
|
||||
{
|
||||
throw Exception(ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION,
|
||||
"Table {} isn't replicated, but shard #{} is replicated according to its cluster definition",
|
||||
backQuote(table), task.host_shard_num + 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool DDLWorker::tryExecuteQueryOnLeaderReplica(
|
||||
DDLTask & task,
|
||||
StoragePtr storage,
|
||||
|
@ -75,9 +75,6 @@ private:
|
||||
/// Check that query should be executed on leader replica only
|
||||
static bool taskShouldBeExecutedOnLeader(const ASTPtr ast_ddl, StoragePtr storage);
|
||||
|
||||
/// Check that shard has consistent config with table
|
||||
void checkShardConfig(const String & table, const DDLTask & task, StoragePtr storage) const;
|
||||
|
||||
/// Executes query only on leader replica in case of replicated table.
|
||||
/// Queries like TRUNCATE/ALTER .../OPTIMIZE have to be executed only on one node of shard.
|
||||
/// Most of these queries can be executed on non-leader replica, but actually they still send
|
||||
|
@ -421,11 +421,17 @@ bool ExpressionAnalyzer::makeAggregateDescriptions(ActionsDAGPtr & actions)
|
||||
aggregate.argument_names.resize(arguments.size());
|
||||
DataTypes types(arguments.size());
|
||||
|
||||
const auto & index = actions->getIndex();
|
||||
for (size_t i = 0; i < arguments.size(); ++i)
|
||||
{
|
||||
getRootActionsNoMakeSet(arguments[i], true, actions);
|
||||
const std::string & name = arguments[i]->getColumnName();
|
||||
types[i] = actions->getIndex().find(name)->second->result_type;
|
||||
|
||||
auto it = index.find(name);
|
||||
if (it == index.end())
|
||||
throw Exception(ErrorCodes::UNKNOWN_IDENTIFIER, "Unknown identifier (in aggregate function '{}'): {}", node->name, name);
|
||||
|
||||
types[i] = it->second->result_type;
|
||||
aggregate.argument_names[i] = name;
|
||||
}
|
||||
|
||||
|
@ -124,6 +124,8 @@ ASTPtr ASTColumns::clone() const
|
||||
res->set(res->indices, indices->clone());
|
||||
if (constraints)
|
||||
res->set(res->constraints, constraints->clone());
|
||||
if (primary_key)
|
||||
res->set(res->primary_key, primary_key->clone());
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@ -41,6 +41,7 @@ public:
|
||||
ASTExpressionList * columns = nullptr;
|
||||
ASTExpressionList * indices = nullptr;
|
||||
ASTExpressionList * constraints = nullptr;
|
||||
IAST * primary_key = nullptr;
|
||||
|
||||
String getID(char) const override { return "Columns definition"; }
|
||||
|
||||
|
@ -19,6 +19,11 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
bool ParserNestedTable::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
{
|
||||
ParserToken open(TokenType::OpeningRoundBracket);
|
||||
@ -150,10 +155,12 @@ bool ParserTablePropertyDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expecte
|
||||
{
|
||||
ParserKeyword s_index("INDEX");
|
||||
ParserKeyword s_constraint("CONSTRAINT");
|
||||
ParserKeyword s_primary_key("PRIMARY KEY");
|
||||
|
||||
ParserIndexDeclaration index_p;
|
||||
ParserConstraintDeclaration constraint_p;
|
||||
ParserColumnDeclaration column_p{true, true};
|
||||
ParserExpression primary_key_p;
|
||||
|
||||
ASTPtr new_node = nullptr;
|
||||
|
||||
@ -167,6 +174,11 @@ bool ParserTablePropertyDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expecte
|
||||
if (!constraint_p.parse(pos, new_node, expected))
|
||||
return false;
|
||||
}
|
||||
else if (s_primary_key.ignore(pos, expected))
|
||||
{
|
||||
if (!primary_key_p.parse(pos, new_node, expected))
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!column_p.parse(pos, new_node, expected))
|
||||
@ -201,6 +213,7 @@ bool ParserTablePropertiesDeclarationList::parseImpl(Pos & pos, ASTPtr & node, E
|
||||
ASTPtr columns = std::make_shared<ASTExpressionList>();
|
||||
ASTPtr indices = std::make_shared<ASTExpressionList>();
|
||||
ASTPtr constraints = std::make_shared<ASTExpressionList>();
|
||||
ASTPtr primary_key;
|
||||
|
||||
for (const auto & elem : list->children)
|
||||
{
|
||||
@ -210,6 +223,14 @@ bool ParserTablePropertiesDeclarationList::parseImpl(Pos & pos, ASTPtr & node, E
|
||||
indices->children.push_back(elem);
|
||||
else if (elem->as<ASTConstraintDeclaration>())
|
||||
constraints->children.push_back(elem);
|
||||
else if (elem->as<ASTIdentifier>() || elem->as<ASTFunction>())
|
||||
{
|
||||
if (primary_key)
|
||||
{
|
||||
throw Exception("Multiple primary keys are not allowed.", ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
primary_key = elem;
|
||||
}
|
||||
else
|
||||
return false;
|
||||
}
|
||||
@ -222,6 +243,8 @@ bool ParserTablePropertiesDeclarationList::parseImpl(Pos & pos, ASTPtr & node, E
|
||||
res->set(res->indices, indices);
|
||||
if (!constraints->children.empty())
|
||||
res->set(res->constraints, constraints);
|
||||
if (primary_key)
|
||||
res->set(res->primary_key, primary_key);
|
||||
|
||||
node = res;
|
||||
|
||||
@ -472,6 +495,15 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe
|
||||
query->set(query->columns_list, columns_list);
|
||||
query->set(query->storage, storage);
|
||||
|
||||
if (query->storage && query->columns_list && query->columns_list->primary_key)
|
||||
{
|
||||
if (query->storage->primary_key)
|
||||
{
|
||||
throw Exception("Multiple primary keys are not allowed.", ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
query->storage->primary_key = query->columns_list->primary_key;
|
||||
}
|
||||
|
||||
tryGetIdentifierNameInto(as_database, query->as_database);
|
||||
tryGetIdentifierNameInto(as_table, query->as_table);
|
||||
query->set(query->select, select);
|
||||
|
@ -391,6 +391,7 @@ protected:
|
||||
* ...
|
||||
* INDEX name1 expr TYPE type1(args) GRANULARITY value,
|
||||
* ...
|
||||
* PRIMARY KEY expr
|
||||
* ) ENGINE = engine
|
||||
*
|
||||
* Or:
|
||||
|
@ -41,6 +41,7 @@ namespace ErrorCodes
|
||||
extern const int INCORRECT_DATA;
|
||||
extern const int CANNOT_ASSIGN_OPTIMIZE;
|
||||
extern const int TIMEOUT_EXCEEDED;
|
||||
extern const int UNKNOWN_POLICY;
|
||||
}
|
||||
|
||||
namespace ActionLocks
|
||||
@ -1331,7 +1332,7 @@ void StorageMergeTree::movePartitionToTable(const StoragePtr & dest_table, const
|
||||
throw Exception("Destination table " + dest_table_storage->getStorageID().getNameForLogs() +
|
||||
" should have the same storage policy of source table " + getStorageID().getNameForLogs() + ". " +
|
||||
getStorageID().getNameForLogs() + ": " + this->getStoragePolicy()->getName() + ", " +
|
||||
dest_table_storage->getStorageID().getNameForLogs() + ": " + dest_table_storage->getStoragePolicy()->getName(), ErrorCodes::LOGICAL_ERROR);
|
||||
dest_table_storage->getStorageID().getNameForLogs() + ": " + dest_table_storage->getStoragePolicy()->getName(), ErrorCodes::UNKNOWN_POLICY);
|
||||
|
||||
auto dest_metadata_snapshot = dest_table->getInMemoryMetadataPtr();
|
||||
auto metadata_snapshot = getInMemoryMetadataPtr();
|
||||
|
@ -114,6 +114,7 @@ namespace ErrorCodes
|
||||
extern const int CANNOT_ASSIGN_ALTER;
|
||||
extern const int DIRECTORY_ALREADY_EXISTS;
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int UNKNOWN_POLICY;
|
||||
}
|
||||
|
||||
namespace ActionLocks
|
||||
@ -833,7 +834,7 @@ void StorageReplicatedMergeTree::checkTableStructure(const String & zookeeper_pr
|
||||
}
|
||||
|
||||
void StorageReplicatedMergeTree::setTableStructure(
|
||||
ColumnsDescription new_columns, const ReplicatedMergeTreeTableMetadata::Diff & metadata_diff)
|
||||
ColumnsDescription new_columns, const ReplicatedMergeTreeTableMetadata::Diff & metadata_diff)
|
||||
{
|
||||
StorageInMemoryMetadata new_metadata = getInMemoryMetadata();
|
||||
StorageInMemoryMetadata old_metadata = getInMemoryMetadata();
|
||||
@ -856,7 +857,7 @@ ColumnsDescription new_columns, const ReplicatedMergeTreeTableMetadata::Diff & m
|
||||
|
||||
if (!metadata_diff.empty())
|
||||
{
|
||||
auto parse_key_expr = [](const String & key_expr)
|
||||
auto parse_key_expr = [] (const String & key_expr)
|
||||
{
|
||||
ParserNotEmptyExpressionList parser(false);
|
||||
auto new_sorting_key_expr_list = parseQuery(parser, key_expr, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
|
||||
@ -3936,13 +3937,19 @@ void StorageReplicatedMergeTree::alter(
|
||||
|
||||
ReplicatedMergeTreeTableMetadata future_metadata_in_zk(*this, current_metadata);
|
||||
if (ast_to_str(future_metadata.sorting_key.definition_ast) != ast_to_str(current_metadata->sorting_key.definition_ast))
|
||||
future_metadata_in_zk.sorting_key = serializeAST(*future_metadata.sorting_key.expression_list_ast);
|
||||
{
|
||||
/// We serialize definition_ast as list, because code which apply ALTER (setTableStructure) expect serialized non empty expression
|
||||
/// list here and we cannot change this representation for compatibility. Also we have preparsed AST `sorting_key.expression_list_ast`
|
||||
/// in KeyDescription, but it contain version column for VersionedCollapsingMergeTree, which shouldn't be defined as a part of key definition AST.
|
||||
/// So the best compatible way is just to convert definition_ast to list and serialize it. In all other places key.expression_list_ast should be used.
|
||||
future_metadata_in_zk.sorting_key = serializeAST(*extractKeyExpressionList(future_metadata.sorting_key.definition_ast));
|
||||
}
|
||||
|
||||
if (ast_to_str(future_metadata.sampling_key.definition_ast) != ast_to_str(current_metadata->sampling_key.definition_ast))
|
||||
future_metadata_in_zk.sampling_expression = serializeAST(*future_metadata.sampling_key.expression_list_ast);
|
||||
future_metadata_in_zk.sampling_expression = serializeAST(*extractKeyExpressionList(future_metadata.sampling_key.definition_ast));
|
||||
|
||||
if (ast_to_str(future_metadata.partition_key.definition_ast) != ast_to_str(current_metadata->partition_key.definition_ast))
|
||||
future_metadata_in_zk.partition_key = serializeAST(*future_metadata.partition_key.expression_list_ast);
|
||||
future_metadata_in_zk.partition_key = serializeAST(*extractKeyExpressionList(future_metadata.partition_key.definition_ast));
|
||||
|
||||
if (ast_to_str(future_metadata.table_ttl.definition_ast) != ast_to_str(current_metadata->table_ttl.definition_ast))
|
||||
{
|
||||
@ -5604,7 +5611,7 @@ void StorageReplicatedMergeTree::movePartitionToTable(const StoragePtr & dest_ta
|
||||
throw Exception("Destination table " + dest_table_storage->getStorageID().getNameForLogs() +
|
||||
" should have the same storage policy of source table " + getStorageID().getNameForLogs() + ". " +
|
||||
getStorageID().getNameForLogs() + ": " + this->getStoragePolicy()->getName() + ", " +
|
||||
getStorageID().getNameForLogs() + ": " + dest_table_storage->getStoragePolicy()->getName(), ErrorCodes::LOGICAL_ERROR);
|
||||
getStorageID().getNameForLogs() + ": " + dest_table_storage->getStoragePolicy()->getName(), ErrorCodes::UNKNOWN_POLICY);
|
||||
|
||||
auto dest_metadata_snapshot = dest_table->getInMemoryMetadataPtr();
|
||||
auto metadata_snapshot = getInMemoryMetadataPtr();
|
||||
|
@ -353,20 +353,22 @@ def run_tests_array(all_tests_with_params):
|
||||
|
||||
if os.path.isfile(stdout_file):
|
||||
print(", result:\n")
|
||||
print(open(stdout_file).read())
|
||||
print('\n'.join(open(stdout_file).read().split('\n')[:100]))
|
||||
|
||||
elif stderr:
|
||||
failures += 1
|
||||
failures_chain += 1
|
||||
print(MSG_FAIL, end='')
|
||||
print_test_time(total_time)
|
||||
print(" - having stderror:\n{}".format(stderr))
|
||||
print(" - having stderror:\n{}".format(
|
||||
'\n'.join(stderr.split('\n')[:100])))
|
||||
elif 'Exception' in stdout:
|
||||
failures += 1
|
||||
failures_chain += 1
|
||||
print(MSG_FAIL, end='')
|
||||
print_test_time(total_time)
|
||||
print(" - having exception:\n{}".format(stdout))
|
||||
print(" - having exception:\n{}".format(
|
||||
'\n'.join(stdout.split('\n')[:100])))
|
||||
elif not os.path.isfile(reference_file):
|
||||
print(MSG_UNKNOWN, end='')
|
||||
print_test_time(total_time)
|
||||
|
@ -0,0 +1 @@
|
||||
#!/usr/bin/env python3
|
@ -0,0 +1,28 @@
|
||||
<yandex>
|
||||
<remote_servers>
|
||||
<test_cluster_mixed>
|
||||
<shard>
|
||||
<internal_replication>true</internal_replication>
|
||||
<replica>
|
||||
<host>node1</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
<replica>
|
||||
<host>node2</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<internal_replication>false</internal_replication>
|
||||
<replica>
|
||||
<host>node3</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
<replica>
|
||||
<host>node4</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_cluster_mixed>
|
||||
</remote_servers>
|
||||
</yandex>
|
93
tests/integration/test_alter_on_mixed_type_cluster/test.py
Normal file
93
tests/integration/test_alter_on_mixed_type_cluster/test.py
Normal file
@ -0,0 +1,93 @@
|
||||
import pytest
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.test_tools import assert_eq_with_retry
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
|
||||
node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
|
||||
node3 = cluster.add_instance('node3', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
|
||||
node4 = cluster.add_instance('node4', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
|
||||
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
for node in [node1, node2]:
|
||||
node.query('''
|
||||
CREATE TABLE test_table_replicated(date Date, id UInt32, value Int32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/sometable', '{replica}') ORDER BY id;
|
||||
'''.format(replica=node.name))
|
||||
node.query('''CREATE TABLE test_table(date Date, id UInt32, value Int32) ENGINE=MergeTree ORDER BY id''')
|
||||
|
||||
for node in [node3, node4]:
|
||||
node.query('''
|
||||
CREATE TABLE test_table_replicated(date Date, id UInt32, value Int32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/1/someotable', '{replica}') ORDER BY id;
|
||||
'''.format(replica=node.name))
|
||||
|
||||
node.query('''CREATE TABLE test_table(date Date, id UInt32, value Int32) ENGINE=MergeTree ORDER BY id''')
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
def test_alter_on_cluter_non_replicated(started_cluster):
|
||||
for node in [node1, node2, node3, node4]:
|
||||
node.query("INSERT INTO test_table VALUES(toDate('2019-10-01'), 1, 1)")
|
||||
|
||||
assert node1.query("SELECT COUNT() FROM test_table") == "1\n"
|
||||
assert node2.query("SELECT COUNT() FROM test_table") == "1\n"
|
||||
assert node3.query("SELECT COUNT() FROM test_table") == "1\n"
|
||||
assert node4.query("SELECT COUNT() FROM test_table") == "1\n"
|
||||
|
||||
node1.query("ALTER TABLE test_table ON CLUSTER 'test_cluster_mixed' MODIFY COLUMN date DateTime")
|
||||
|
||||
assert node1.query("SELECT date FROM test_table") == '2019-10-01 00:00:00\n'
|
||||
assert node2.query("SELECT date FROM test_table") == '2019-10-01 00:00:00\n'
|
||||
assert node3.query("SELECT date FROM test_table") == '2019-10-01 00:00:00\n'
|
||||
assert node4.query("SELECT date FROM test_table") == '2019-10-01 00:00:00\n'
|
||||
|
||||
node3.query("ALTER TABLE test_table ON CLUSTER 'test_cluster_mixed' MODIFY COLUMN value String")
|
||||
|
||||
for node in [node1, node2, node3, node4]:
|
||||
node.query("INSERT INTO test_table VALUES(toDateTime('2019-10-02 00:00:00'), 2, 'Hello')")
|
||||
|
||||
assert node1.query("SELECT COUNT() FROM test_table") == "2\n"
|
||||
assert node2.query("SELECT COUNT() FROM test_table") == "2\n"
|
||||
assert node3.query("SELECT COUNT() FROM test_table") == "2\n"
|
||||
assert node4.query("SELECT COUNT() FROM test_table") == "2\n"
|
||||
|
||||
|
||||
def test_alter_replicated_on_cluster(started_cluster):
|
||||
for node in [node1, node3]:
|
||||
node.query("INSERT INTO test_table_replicated VALUES(toDate('2019-10-01'), 1, 1)")
|
||||
|
||||
for node in [node2, node4]:
|
||||
node.query("SYSTEM SYNC REPLICA test_table_replicated", timeout=20)
|
||||
|
||||
node1.query("ALTER TABLE test_table_replicated ON CLUSTER 'test_cluster_mixed' MODIFY COLUMN date DateTime", settings={"replication_alter_partitions_sync": "2"})
|
||||
|
||||
assert node1.query("SELECT date FROM test_table_replicated") == '2019-10-01 00:00:00\n'
|
||||
assert node2.query("SELECT date FROM test_table_replicated") == '2019-10-01 00:00:00\n'
|
||||
assert node3.query("SELECT date FROM test_table_replicated") == '2019-10-01 00:00:00\n'
|
||||
assert node4.query("SELECT date FROM test_table_replicated") == '2019-10-01 00:00:00\n'
|
||||
|
||||
node3.query("ALTER TABLE test_table_replicated ON CLUSTER 'test_cluster_mixed' MODIFY COLUMN value String", settings={"replication_alter_partitions_sync": "2"})
|
||||
|
||||
for node in [node2, node4]:
|
||||
node.query("INSERT INTO test_table_replicated VALUES(toDateTime('2019-10-02 00:00:00'), 2, 'Hello')")
|
||||
|
||||
for node in [node1, node3]:
|
||||
node.query("SYSTEM SYNC REPLICA test_table_replicated", timeout=20)
|
||||
|
||||
assert node1.query("SELECT COUNT() FROM test_table_replicated") == "2\n"
|
||||
assert node2.query("SELECT COUNT() FROM test_table_replicated") == "2\n"
|
||||
assert node3.query("SELECT COUNT() FROM test_table_replicated") == "2\n"
|
||||
assert node4.query("SELECT COUNT() FROM test_table_replicated") == "2\n"
|
@ -1,8 +1,2 @@
|
||||
test_cluster_two_shards
|
||||
test_cluster_two_shards_different_databases
|
||||
test_cluster_two_shards_localhost
|
||||
test_shard_localhost
|
||||
test_shard_localhost_secure
|
||||
test_unavailable_shard
|
||||
test_cluster_two_shards
|
||||
test_shard_localhost 1 1 1 localhost ::1 9000 1 default 0 0
|
||||
|
@ -1,3 +1,3 @@
|
||||
show clusters;
|
||||
show clusters like 'test%' limit 1;
|
||||
-- don't show all clusters to reduce dependency on the configuration of server
|
||||
show clusters like 'test_shard%' limit 1;
|
||||
show cluster 'test_shard_localhost';
|
||||
|
@ -0,0 +1,6 @@
|
||||
1
|
||||
1
|
||||
1 1
|
||||
1 1
|
||||
2 2
|
||||
7 14
|
53
tests/queries/0_stateless/01516_create_table_primary_key.sql
Normal file
53
tests/queries/0_stateless/01516_create_table_primary_key.sql
Normal file
@ -0,0 +1,53 @@
|
||||
DROP DATABASE IF EXISTS test_01516;
|
||||
CREATE DATABASE test_01516 ENGINE=Ordinary; -- Full ATTACH requires UUID with Atomic
|
||||
USE test_01516;
|
||||
|
||||
DROP TABLE IF EXISTS primary_key_test;
|
||||
|
||||
CREATE TABLE primary_key_test(v Int32, PRIMARY KEY(v)) ENGINE=ReplacingMergeTree ORDER BY v;
|
||||
INSERT INTO primary_key_test VALUES (1), (1), (1);
|
||||
DETACH TABLE primary_key_test;
|
||||
ATTACH TABLE primary_key_test(v Int32, PRIMARY KEY(v)) ENGINE=ReplacingMergeTree ORDER BY v;
|
||||
SELECT * FROM primary_key_test FINAL;
|
||||
DROP TABLE primary_key_test;
|
||||
|
||||
CREATE TABLE primary_key_test(v Int32) ENGINE=ReplacingMergeTree ORDER BY v PRIMARY KEY(v);
|
||||
INSERT INTO primary_key_test VALUES (1), (1), (1);
|
||||
DETACH TABLE primary_key_test;
|
||||
ATTACH TABLE primary_key_test(v Int32) ENGINE=ReplacingMergeTree ORDER BY v PRIMARY KEY(v);
|
||||
SELECT * FROM primary_key_test FINAL;
|
||||
DROP TABLE primary_key_test;
|
||||
|
||||
CREATE TABLE primary_key_test(v Int32, PRIMARY KEY(v), PRIMARY KEY(v)) ENGINE=ReplacingMergeTree ORDER BY v; -- { clientError 36; }
|
||||
|
||||
CREATE TABLE primary_key_test(v Int32, PRIMARY KEY(v)) ENGINE=ReplacingMergeTree ORDER BY v PRIMARY KEY(v); -- { clientError 36; }
|
||||
|
||||
CREATE TABLE primary_key_test(v1 Int32, v2 Int32, PRIMARY KEY(v1, v2)) ENGINE=ReplacingMergeTree ORDER BY (v1, v2);
|
||||
INSERT INTO primary_key_test VALUES (1, 1), (1, 1), (1, 1);
|
||||
DETACH TABLE primary_key_test;
|
||||
ATTACH TABLE primary_key_test(v1 Int32, v2 Int32, PRIMARY KEY(v1, v2)) ENGINE=ReplacingMergeTree ORDER BY (v1, v2);
|
||||
SELECT * FROM primary_key_test FINAL;
|
||||
DROP TABLE primary_key_test;
|
||||
|
||||
CREATE TABLE primary_key_test(v1 Int32, v2 Int32) ENGINE=ReplacingMergeTree ORDER BY (v1, v2) PRIMARY KEY(v1, v2);
|
||||
INSERT INTO primary_key_test VALUES (1, 1), (1, 1), (1, 1);
|
||||
DETACH TABLE primary_key_test;
|
||||
ATTACH TABLE primary_key_test(v1 Int32, v2 Int32) ENGINE=ReplacingMergeTree ORDER BY (v1, v2) PRIMARY KEY(v1, v2);
|
||||
SELECT * FROM primary_key_test FINAL;
|
||||
DROP TABLE primary_key_test;
|
||||
|
||||
CREATE TABLE primary_key_test(v1 Int32, v2 Int32, PRIMARY KEY(v1, v2), PRIMARY KEY(v1, v2)) ENGINE=ReplacingMergeTree ORDER BY (v1, v2); -- { clientError 36; }
|
||||
|
||||
CREATE TABLE primary_key_test(v1 Int32, v2 Int32, PRIMARY KEY(v1, v2)) ENGINE=ReplacingMergeTree ORDER BY (v1, v2) PRIMARY KEY(v1, v2); -- { clientError 36; }
|
||||
|
||||
CREATE TABLE primary_key_test(v1 Int64, v2 Int32, v3 String, PRIMARY KEY(v1, gcd(v1, v2))) ENGINE=ReplacingMergeTree ORDER BY v1; -- { serverError 36; }
|
||||
|
||||
CREATE TABLE primary_key_test(v1 Int64, v2 Int32, v3 String, PRIMARY KEY(v1, gcd(v1, v2))) ENGINE=ReplacingMergeTree ORDER BY (v1, gcd(v1, v2));
|
||||
|
||||
INSERT INTO primary_key_test VALUES(7, 14, 'hello'), (2, 2, 'world'), (7, 14, 'duplicate');
|
||||
|
||||
SELECT v1, v2 FROM primary_key_test FINAL ORDER BY v1, v2;
|
||||
|
||||
DROP TABLE primary_key_test;
|
||||
|
||||
DROP DATABASE test_01516;
|
@ -0,0 +1 @@
|
||||
all tests passed
|
@ -0,0 +1,7 @@
|
||||
SELECT database FROM system.tables WHERE database LIKE '%' format Null;
|
||||
SELECT database AS db FROM system.tables WHERE db LIKE '%' format Null;
|
||||
SELECT CAST(database, 'String') AS db FROM system.tables WHERE db LIKE '%' format Null;
|
||||
SELECT CAST('a string', 'Nullable(String)') AS str WHERE str LIKE '%' format Null;
|
||||
SELECT CAST(database, 'Nullable(String)') AS ndb FROM system.tables WHERE ndb LIKE '%' format Null;
|
||||
SELECT 'all tests passed';
|
||||
|
@ -0,0 +1,2 @@
|
||||
0
|
||||
1
|
@ -0,0 +1,14 @@
|
||||
DROP TABLE IF EXISTS logs;
|
||||
|
||||
CREATE TABLE logs(
|
||||
date_visited DateTime,
|
||||
date Date MATERIALIZED toDate(date_visited)
|
||||
) ENGINE = MergeTree() ORDER BY tuple();
|
||||
|
||||
SELECT count() FROM logs AS plogs WHERE plogs.date = '2019-11-20';
|
||||
|
||||
INSERT INTO logs VALUES('2019-11-20 00:00:00');
|
||||
|
||||
SELECT count() FROM logs AS plogs WHERE plogs.date = '2019-11-20';
|
||||
|
||||
DROP TABLE logs;
|
80
tests/queries/0_stateless/01518_select_in_null.reference
Normal file
80
tests/queries/0_stateless/01518_select_in_null.reference
Normal file
@ -0,0 +1,80 @@
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
90
tests/queries/0_stateless/01518_select_in_null.sql
Normal file
90
tests/queries/0_stateless/01518_select_in_null.sql
Normal file
@ -0,0 +1,90 @@
|
||||
DROP TABLE IF EXISTS t1;
|
||||
|
||||
CREATE TABLE t1 (`cA` String, `c1` String) ENGINE = MergeTree ORDER BY (cA, c1);
|
||||
|
||||
insert into t1 select 'AAAAAAAAAAA', 'BBBBBB';
|
||||
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where c1 in (select 'BBBBBB' union all select null);
|
||||
|
||||
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null);
|
||||
|
||||
DROP TABLE t1;
|
@ -0,0 +1,6 @@
|
||||
2019-10-01 a 1 aa 1 1 1
|
||||
2019-10-01 a 1 aa 1 1 1 0
|
||||
CREATE TABLE default.table_for_alter\n(\n `d` Date,\n `a` String,\n `b` UInt8,\n `x` String,\n `y` Int8,\n `version` UInt64,\n `sign` Int8 DEFAULT 1,\n `order` UInt32\n)\nENGINE = ReplicatedVersionedCollapsingMergeTree(\'/clickhouse/tables/01526_alter_add/t1\', \'1\', sign, version)\nPARTITION BY y\nPRIMARY KEY d\nORDER BY (d, order)\nSETTINGS index_granularity = 8192
|
||||
2019-10-01 a 1 aa 1 1 1 0 0
|
||||
2019-10-02 b 2 bb 2 2 2 1 2
|
||||
CREATE TABLE default.table_for_alter\n(\n `d` Date,\n `a` String,\n `b` UInt8,\n `x` String,\n `y` Int8,\n `version` UInt64,\n `sign` Int8 DEFAULT 1,\n `order` UInt32,\n `datum` UInt32\n)\nENGINE = ReplicatedVersionedCollapsingMergeTree(\'/clickhouse/tables/01526_alter_add/t1\', \'1\', sign, version)\nPARTITION BY y\nPRIMARY KEY d\nORDER BY (d, order, datum)\nSETTINGS index_granularity = 8192
|
@ -0,0 +1,38 @@
|
||||
DROP TABLE IF EXISTS table_for_alter;
|
||||
|
||||
SET replication_alter_partitions_sync = 2;
|
||||
|
||||
CREATE TABLE table_for_alter
|
||||
(
|
||||
`d` Date,
|
||||
`a` String,
|
||||
`b` UInt8,
|
||||
`x` String,
|
||||
`y` Int8,
|
||||
`version` UInt64,
|
||||
`sign` Int8 DEFAULT 1
|
||||
)
|
||||
ENGINE = ReplicatedVersionedCollapsingMergeTree('/clickhouse/tables/01526_alter_add/t1', '1', sign, version)
|
||||
PARTITION BY y
|
||||
ORDER BY d
|
||||
SETTINGS index_granularity = 8192;
|
||||
|
||||
INSERT INTO table_for_alter VALUES(toDate('2019-10-01'), 'a', 1, 'aa', 1, 1, 1);
|
||||
|
||||
SELECT * FROM table_for_alter;
|
||||
|
||||
ALTER TABLE table_for_alter ADD COLUMN order UInt32, MODIFY ORDER BY (d, order);
|
||||
|
||||
SELECT * FROM table_for_alter;
|
||||
|
||||
SHOW CREATE TABLE table_for_alter;
|
||||
|
||||
ALTER TABLE table_for_alter ADD COLUMN datum UInt32, MODIFY ORDER BY (d, order, datum);
|
||||
|
||||
INSERT INTO table_for_alter VALUES(toDate('2019-10-02'), 'b', 2, 'bb', 2, 2, 2, 1, 2);
|
||||
|
||||
SELECT * FROM table_for_alter ORDER BY d;
|
||||
|
||||
SHOW CREATE TABLE table_for_alter;
|
||||
|
||||
DROP TABLE IF EXISTS table_for_alter;
|
1
tests/queries/0_stateless/01526_param_uuid.reference
Normal file
1
tests/queries/0_stateless/01526_param_uuid.reference
Normal file
@ -0,0 +1 @@
|
||||
ffffffff-ffff-ffff-ffff-ffffffffffff
|
6
tests/queries/0_stateless/01526_param_uuid.sh
Executable file
6
tests/queries/0_stateless/01526_param_uuid.sh
Executable file
@ -0,0 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
${CLICKHOUSE_CLIENT} --param_p1='ffffffff-ffff-ffff-ffff-ffffffffffff' --query "SELECT {p1:UUID}"
|
@ -0,0 +1 @@
|
||||
SELECT arrayMap(x -> x * sum(x), range(10)); -- { serverError 47 }
|
Loading…
Reference in New Issue
Block a user