diff --git a/contrib/lz4 b/contrib/lz4
index c10863b98e1..780aac520b6 160000
--- a/contrib/lz4
+++ b/contrib/lz4
@@ -1 +1 @@
-Subproject commit c10863b98e1503af90616ae99725ecd120265dfb
+Subproject commit 780aac520b69d6369f4e3995624c37e56d75498d
diff --git a/contrib/lz4-cmake/CMakeLists.txt b/contrib/lz4-cmake/CMakeLists.txt
index 382185cc339..25cceaa4574 100644
--- a/contrib/lz4-cmake/CMakeLists.txt
+++ b/contrib/lz4-cmake/CMakeLists.txt
@@ -9,8 +9,7 @@ add_library (lz4
${LIBRARY_DIR}/xxhash.h
${LIBRARY_DIR}/lz4.h
- ${LIBRARY_DIR}/lz4hc.h
- ${LIBRARY_DIR}/lz4opt.h)
+ ${LIBRARY_DIR}/lz4hc.h)
target_compile_definitions(lz4 PUBLIC LZ4_DISABLE_DEPRECATE_WARNINGS=1)
diff --git a/dbms/cmake/version.cmake b/dbms/cmake/version.cmake
index e3f512f91a1..3557d3d97df 100644
--- a/dbms/cmake/version.cmake
+++ b/dbms/cmake/version.cmake
@@ -1,11 +1,11 @@
# This strings autochanged from release_lib.sh:
-set(VERSION_REVISION 54418)
+set(VERSION_REVISION 54419)
set(VERSION_MAJOR 19)
-set(VERSION_MINOR 6)
+set(VERSION_MINOR 7)
set(VERSION_PATCH 1)
-set(VERSION_GITHASH 30d3496c36cf3945c9828ac0b7cf7d1774a9f845)
-set(VERSION_DESCRIBE v19.6.1.1-testing)
-set(VERSION_STRING 19.6.1.1)
+set(VERSION_GITHASH b0b369b30f04a5026d1da5c7d3fd5998d6de1fe4)
+set(VERSION_DESCRIBE v19.7.1.1-testing)
+set(VERSION_STRING 19.7.1.1)
# end of autochange
set(VERSION_EXTRA "" CACHE STRING "")
diff --git a/dbms/programs/server/users.xml b/dbms/programs/server/users.xml
index 32ef1d7cdc4..24b8f628c3a 100644
--- a/dbms/programs/server/users.xml
+++ b/dbms/programs/server/users.xml
@@ -16,6 +16,7 @@
with minimum number of different symbols between replica's hostname and local hostname
(Hamming distance).
in_order - first live replica is chosen in specified order.
+ first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
-->
random
diff --git a/dbms/src/Client/ConnectionPoolWithFailover.cpp b/dbms/src/Client/ConnectionPoolWithFailover.cpp
index 55859498c0c..9c12ed31560 100644
--- a/dbms/src/Client/ConnectionPoolWithFailover.cpp
+++ b/dbms/src/Client/ConnectionPoolWithFailover.cpp
@@ -62,6 +62,9 @@ IConnectionPool::Entry ConnectionPoolWithFailover::get(const Settings * settings
break;
case LoadBalancing::RANDOM:
break;
+ case LoadBalancing::FIRST_OR_RANDOM:
+ get_priority = [](size_t i) -> size_t { return i >= 1; };
+ break;
}
return Base::get(try_get_entry, get_priority);
@@ -134,6 +137,9 @@ std::vector ConnectionPoolWithFailover::g
break;
case LoadBalancing::RANDOM:
break;
+ case LoadBalancing::FIRST_OR_RANDOM:
+ get_priority = [](size_t i) -> size_t { return i >= 1; };
+ break;
}
bool fallback_to_stale_replicas = settings ? bool(settings->fallback_to_stale_replicas_for_distributed_queries) : true;
diff --git a/dbms/src/Columns/ColumnAggregateFunction.h b/dbms/src/Columns/ColumnAggregateFunction.h
index a028a95d68c..fdfaec93c88 100644
--- a/dbms/src/Columns/ColumnAggregateFunction.h
+++ b/dbms/src/Columns/ColumnAggregateFunction.h
@@ -43,13 +43,13 @@ using Arenas = std::vector;
* specifying which individual values should be destroyed and which ones should not.
* Clearly, this method would have a substantially non-zero price.
*/
-class ColumnAggregateFunction final : public COWPtrHelper
+class ColumnAggregateFunction final : public COWHelper
{
public:
using Container = PaddedPODArray;
private:
- friend class COWPtrHelper;
+ friend class COWHelper;
/// Memory pools. Aggregate states are allocated from them.
Arenas arenas;
diff --git a/dbms/src/Columns/ColumnArray.h b/dbms/src/Columns/ColumnArray.h
index 85df5550d4e..f3f7f1f4a1a 100644
--- a/dbms/src/Columns/ColumnArray.h
+++ b/dbms/src/Columns/ColumnArray.h
@@ -13,10 +13,10 @@ namespace DB
* In memory, it is represented as one column of a nested type, whose size is equal to the sum of the sizes of all arrays,
* and as an array of offsets in it, which allows you to get each element.
*/
-class ColumnArray final : public COWPtrHelper
+class ColumnArray final : public COWHelper
{
private:
- friend class COWPtrHelper;
+ friend class COWHelper;
/** Create an array column with specified values and offsets. */
ColumnArray(MutableColumnPtr && nested_column, MutableColumnPtr && offsets_column);
@@ -30,7 +30,7 @@ public:
/** Create immutable column using immutable arguments. This arguments may be shared with other columns.
* Use IColumn::mutate in order to make mutable column and mutate shared nested columns.
*/
- using Base = COWPtrHelper;
+ using Base = COWHelper;
static Ptr create(const ColumnPtr & nested_column, const ColumnPtr & offsets_column)
{
diff --git a/dbms/src/Columns/ColumnConst.h b/dbms/src/Columns/ColumnConst.h
index 87371895840..05a9562e549 100644
--- a/dbms/src/Columns/ColumnConst.h
+++ b/dbms/src/Columns/ColumnConst.h
@@ -18,10 +18,10 @@ namespace ErrorCodes
/** ColumnConst contains another column with single element,
* but looks like a column with arbitrary amount of same elements.
*/
-class ColumnConst final : public COWPtrHelper
+class ColumnConst final : public COWHelper
{
private:
- friend class COWPtrHelper;
+ friend class COWHelper;
WrappedPtr data;
size_t s;
diff --git a/dbms/src/Columns/ColumnDecimal.h b/dbms/src/Columns/ColumnDecimal.h
index 4a3c6153947..db762e5f4c3 100644
--- a/dbms/src/Columns/ColumnDecimal.h
+++ b/dbms/src/Columns/ColumnDecimal.h
@@ -55,13 +55,13 @@ private:
/// A ColumnVector for Decimals
template
-class ColumnDecimal final : public COWPtrHelper>
+class ColumnDecimal final : public COWHelper>
{
static_assert(IsDecimalNumber);
private:
using Self = ColumnDecimal;
- friend class COWPtrHelper;
+ friend class COWHelper;
public:
using Container = DecimalPaddedPODArray;
diff --git a/dbms/src/Columns/ColumnFixedString.h b/dbms/src/Columns/ColumnFixedString.h
index 1f79594b459..2b06d19d7ca 100644
--- a/dbms/src/Columns/ColumnFixedString.h
+++ b/dbms/src/Columns/ColumnFixedString.h
@@ -13,10 +13,10 @@ namespace DB
/** A column of values of "fixed-length string" type.
* If you insert a smaller string, it will be padded with zero bytes.
*/
-class ColumnFixedString final : public COWPtrHelper
+class ColumnFixedString final : public COWHelper
{
public:
- friend class COWPtrHelper;
+ friend class COWHelper;
using Chars = PaddedPODArray;
diff --git a/dbms/src/Columns/ColumnFunction.h b/dbms/src/Columns/ColumnFunction.h
index 8d52110c9ac..571123ae892 100644
--- a/dbms/src/Columns/ColumnFunction.h
+++ b/dbms/src/Columns/ColumnFunction.h
@@ -15,10 +15,10 @@ namespace DB
/** A column containing a lambda expression.
* Behaves like a constant-column. Contains an expression, but not input or output data.
*/
-class ColumnFunction final : public COWPtrHelper
+class ColumnFunction final : public COWHelper
{
private:
- friend class COWPtrHelper;
+ friend class COWHelper;
ColumnFunction(size_t size, FunctionBasePtr function, const ColumnsWithTypeAndName & columns_to_capture);
diff --git a/dbms/src/Columns/ColumnLowCardinality.h b/dbms/src/Columns/ColumnLowCardinality.h
index 91f5337b633..d36b91b0c40 100644
--- a/dbms/src/Columns/ColumnLowCardinality.h
+++ b/dbms/src/Columns/ColumnLowCardinality.h
@@ -14,9 +14,9 @@ namespace ErrorCodes
extern const int ILLEGAL_COLUMN;
}
-class ColumnLowCardinality final : public COWPtrHelper
+class ColumnLowCardinality final : public COWHelper
{
- friend class COWPtrHelper;
+ friend class COWHelper;
ColumnLowCardinality(MutableColumnPtr && column_unique, MutableColumnPtr && indexes, bool is_shared = false);
ColumnLowCardinality(const ColumnLowCardinality & other) = default;
@@ -25,7 +25,7 @@ public:
/** Create immutable column using immutable arguments. This arguments may be shared with other columns.
* Use IColumn::mutate in order to make mutable column and mutate shared nested columns.
*/
- using Base = COWPtrHelper;
+ using Base = COWHelper;
static Ptr create(const ColumnPtr & column_unique_, const ColumnPtr & indexes_, bool is_shared = false)
{
return ColumnLowCardinality::create(column_unique_->assumeMutable(), indexes_->assumeMutable(), is_shared);
diff --git a/dbms/src/Columns/ColumnNothing.h b/dbms/src/Columns/ColumnNothing.h
index 0c9f843a454..691143e2c15 100644
--- a/dbms/src/Columns/ColumnNothing.h
+++ b/dbms/src/Columns/ColumnNothing.h
@@ -6,10 +6,10 @@
namespace DB
{
-class ColumnNothing final : public COWPtrHelper
+class ColumnNothing final : public COWHelper
{
private:
- friend class COWPtrHelper;
+ friend class COWHelper;
ColumnNothing(size_t s_)
{
diff --git a/dbms/src/Columns/ColumnNullable.h b/dbms/src/Columns/ColumnNullable.h
index a66979c0683..43a6256b1a5 100644
--- a/dbms/src/Columns/ColumnNullable.h
+++ b/dbms/src/Columns/ColumnNullable.h
@@ -20,10 +20,10 @@ using ConstNullMapPtr = const NullMap *;
/// over a bitmap because columns are usually stored on disk as compressed
/// files. In this regard, using a bitmap instead of a byte map would
/// greatly complicate the implementation with little to no benefits.
-class ColumnNullable final : public COWPtrHelper
+class ColumnNullable final : public COWHelper
{
private:
- friend class COWPtrHelper;
+ friend class COWHelper;
ColumnNullable(MutableColumnPtr && nested_column_, MutableColumnPtr && null_map_);
ColumnNullable(const ColumnNullable &) = default;
@@ -32,7 +32,7 @@ public:
/** Create immutable column using immutable arguments. This arguments may be shared with other columns.
* Use IColumn::mutate in order to make mutable column and mutate shared nested columns.
*/
- using Base = COWPtrHelper;
+ using Base = COWHelper;
static Ptr create(const ColumnPtr & nested_column_, const ColumnPtr & null_map_)
{
return ColumnNullable::create(nested_column_->assumeMutable(), null_map_->assumeMutable());
diff --git a/dbms/src/Columns/ColumnSet.h b/dbms/src/Columns/ColumnSet.h
index 3ed6c4c4e84..83ab6de5578 100644
--- a/dbms/src/Columns/ColumnSet.h
+++ b/dbms/src/Columns/ColumnSet.h
@@ -14,10 +14,10 @@ using ConstSetPtr = std::shared_ptr;
* Behaves like a constant-column (because the set is one, not its own for each line).
* This column has a nonstandard value, so it can not be obtained via a normal interface.
*/
-class ColumnSet final : public COWPtrHelper
+class ColumnSet final : public COWHelper
{
private:
- friend class COWPtrHelper;
+ friend class COWHelper;
ColumnSet(size_t s_, const ConstSetPtr & data_) : data(data_) { s = s_; }
ColumnSet(const ColumnSet &) = default;
diff --git a/dbms/src/Columns/ColumnString.h b/dbms/src/Columns/ColumnString.h
index 486e6b1fd44..398077ce964 100644
--- a/dbms/src/Columns/ColumnString.h
+++ b/dbms/src/Columns/ColumnString.h
@@ -18,14 +18,14 @@ namespace DB
/** Column for String values.
*/
-class ColumnString final : public COWPtrHelper
+class ColumnString final : public COWHelper
{
public:
using Char = UInt8;
using Chars = PaddedPODArray;
private:
- friend class COWPtrHelper;
+ friend class COWHelper;
/// Maps i'th position to offset to i+1'th element. Last offset maps to the end of all chars (is the size of all chars).
Offsets offsets;
diff --git a/dbms/src/Columns/ColumnTuple.h b/dbms/src/Columns/ColumnTuple.h
index 376c099c1dc..65dd19fc6da 100644
--- a/dbms/src/Columns/ColumnTuple.h
+++ b/dbms/src/Columns/ColumnTuple.h
@@ -12,10 +12,10 @@ namespace DB
* Mixed constant/non-constant columns is prohibited in tuple
* for implementation simplicity.
*/
-class ColumnTuple final : public COWPtrHelper
+class ColumnTuple final : public COWHelper
{
private:
- friend class COWPtrHelper;
+ friend class COWHelper;
using TupleColumns = std::vector;
TupleColumns columns;
@@ -30,7 +30,7 @@ public:
/** Create immutable column using immutable arguments. This arguments may be shared with other columns.
* Use IColumn::mutate in order to make mutable column and mutate shared nested columns.
*/
- using Base = COWPtrHelper;
+ using Base = COWHelper;
static Ptr create(const Columns & columns);
static Ptr create(const TupleColumns & columns);
static Ptr create(Columns && arg) { return create(arg); }
diff --git a/dbms/src/Columns/ColumnUnique.h b/dbms/src/Columns/ColumnUnique.h
index 5882f9109b2..11344a23a1f 100644
--- a/dbms/src/Columns/ColumnUnique.h
+++ b/dbms/src/Columns/ColumnUnique.h
@@ -25,9 +25,9 @@ namespace ErrorCodes
}
template
-class ColumnUnique final : public COWPtrHelper>
+class ColumnUnique final : public COWHelper>
{
- friend class COWPtrHelper>;
+ friend class COWHelper>;
private:
explicit ColumnUnique(MutableColumnPtr && holder, bool is_nullable);
diff --git a/dbms/src/Columns/ColumnVector.h b/dbms/src/Columns/ColumnVector.h
index 43f6b0a3d52..15773637c1a 100644
--- a/dbms/src/Columns/ColumnVector.h
+++ b/dbms/src/Columns/ColumnVector.h
@@ -90,13 +90,13 @@ template <> struct CompareHelper : public FloatCompareHelper {
/** A template for columns that use a simple array to store.
*/
template
-class ColumnVector final : public COWPtrHelper>
+class ColumnVector final : public COWHelper>
{
static_assert(!IsDecimalNumber);
private:
using Self = ColumnVector;
- friend class COWPtrHelper;
+ friend class COWHelper;
struct less;
struct greater;
diff --git a/dbms/src/Columns/IColumn.h b/dbms/src/Columns/IColumn.h
index c9afd7c1bfe..74a1302d094 100644
--- a/dbms/src/Columns/IColumn.h
+++ b/dbms/src/Columns/IColumn.h
@@ -1,7 +1,7 @@
#pragma once
#include
-#include
+#include
#include
#include
#include
@@ -24,13 +24,13 @@ class Arena;
class ColumnGathererStream;
/// Declares interface to store columns in memory.
-class IColumn : public COWPtr
+class IColumn : public COW
{
private:
- friend class COWPtr;
+ friend class COW;
/// Creates the same column with the same data.
- /// This is internal method to use from COWPtr.
+ /// This is internal method to use from COW.
/// It performs shallow copy with copy-ctor and not useful from outside.
/// If you want to copy column for modification, look at 'mutate' method.
virtual MutablePtr clone() const = 0;
diff --git a/dbms/src/Common/COWPtr.h b/dbms/src/Common/COW.h
similarity index 89%
rename from dbms/src/Common/COWPtr.h
rename to dbms/src/Common/COW.h
index 88b4f0b7740..d8152af8356 100644
--- a/dbms/src/Common/COWPtr.h
+++ b/dbms/src/Common/COW.h
@@ -10,10 +10,10 @@
*
* Usage:
- class Column : public COWPtr
+ class Column : public COW
{
private:
- friend class COWPtr;
+ friend class COW;
/// Leave all constructors in private section. They will be avaliable through 'create' method.
Column();
@@ -23,7 +23,7 @@
public:
/// Correctly use const qualifiers in your interface.
- virtual ~IColumn() {}
+ virtual ~Column() {}
};
* It will provide 'create' and 'mutate' methods.
@@ -63,7 +63,7 @@
* Actually it is, if your values are small or if copying is done implicitly.
* This is the case for string implementations.
*
- * In contrast, COWPtr is intended for the cases when you need to share states of large objects,
+ * In contrast, COW is intended for the cases when you need to share states of large objects,
* (when you usually will use std::shared_ptr) but you also want precise control over modification
* of this shared state.
*
@@ -73,7 +73,7 @@
* to use std::unique_ptr for it somehow.
*/
template
-class COWPtr : public boost::intrusive_ref_counter
+class COW : public boost::intrusive_ref_counter
{
private:
Derived * derived() { return static_cast(this); }
@@ -96,8 +96,8 @@ protected:
private:
using Base = IntrusivePtr;
- template friend class COWPtr;
- template friend class COWPtrHelper;
+ template friend class COW;
+ template friend class COWHelper;
explicit mutable_ptr(T * ptr) : Base(ptr) {}
@@ -115,7 +115,7 @@ protected:
mutable_ptr() = default;
- mutable_ptr(const std::nullptr_t *) {}
+ mutable_ptr(std::nullptr_t) {}
};
public:
@@ -128,8 +128,8 @@ protected:
private:
using Base = IntrusivePtr;
- template friend class COWPtr;
- template friend class COWPtrHelper;
+ template friend class COW;
+ template friend class COWHelper;
explicit immutable_ptr(const T * ptr) : Base(ptr) {}
@@ -159,7 +159,7 @@ protected:
immutable_ptr() = default;
- immutable_ptr(const std::nullptr_t *) {}
+ immutable_ptr(std::nullptr_t) {}
};
public:
@@ -192,7 +192,7 @@ public:
MutablePtr assumeMutable() const
{
- return const_cast(this)->getPtr();
+ return const_cast(this)->getPtr();
}
Derived & assumeMutableRef() const
@@ -244,7 +244,7 @@ public:
*
* NOTE:
* If you override 'mutate' method in inherited classes, don't forget to make it virtual in base class or to make it call a virtual method.
- * (COWPtr itself doesn't force any methods to be virtual).
+ * (COW itself doesn't force any methods to be virtual).
*
* See example in "cow_compositions.cpp".
*/
@@ -255,22 +255,22 @@ public:
/** Helper class to support inheritance.
* Example:
*
- * class IColumn : public COWPtr
+ * class IColumn : public COW
* {
- * friend class COWPtr;
+ * friend class COW;
* virtual MutablePtr clone() const = 0;
* virtual ~IColumn() {}
* };
*
- * class ConcreteColumn : public COWPtrHelper
+ * class ConcreteColumn : public COWHelper
* {
- * friend class COWPtrHelper;
+ * friend class COWHelper;
* };
*
* Here is complete inheritance diagram:
*
* ConcreteColumn
- * COWPtrHelper
+ * COWHelper
* IColumn
* CowPtr
* boost::intrusive_ref_counter
@@ -278,7 +278,7 @@ public:
* See example in "cow_columns.cpp".
*/
template
-class COWPtrHelper : public Base
+class COWHelper : public Base
{
private:
Derived * derived() { return static_cast(this); }
diff --git a/dbms/src/Common/tests/cow_columns.cpp b/dbms/src/Common/tests/cow_columns.cpp
index cf48c159a96..dad2ba13de5 100644
--- a/dbms/src/Common/tests/cow_columns.cpp
+++ b/dbms/src/Common/tests/cow_columns.cpp
@@ -1,11 +1,11 @@
-#include
+#include
#include
-class IColumn : public COWPtr
+class IColumn : public COW
{
private:
- friend class COWPtr;
+ friend class COW;
virtual MutablePtr clone() const = 0;
public:
@@ -22,10 +22,10 @@ public:
using ColumnPtr = IColumn::Ptr;
using MutableColumnPtr = IColumn::MutablePtr;
-class ConcreteColumn : public COWPtrHelper
+class ConcreteColumn : public COWHelper
{
private:
- friend class COWPtrHelper;
+ friend class COWHelper;
int data;
ConcreteColumn(int data) : data(data) {}
diff --git a/dbms/src/Common/tests/cow_compositions.cpp b/dbms/src/Common/tests/cow_compositions.cpp
index 76b8a2987ab..a48624d7d64 100644
--- a/dbms/src/Common/tests/cow_compositions.cpp
+++ b/dbms/src/Common/tests/cow_compositions.cpp
@@ -1,11 +1,11 @@
-#include
+#include
#include
-class IColumn : public COWPtr
+class IColumn : public COW
{
private:
- friend class COWPtr;
+ friend class COW;
virtual MutablePtr clone() const = 0;
virtual MutablePtr deepMutate() const { return shallowMutate(); }
@@ -24,10 +24,10 @@ public:
using ColumnPtr = IColumn::Ptr;
using MutableColumnPtr = IColumn::MutablePtr;
-class ConcreteColumn : public COWPtrHelper
+class ConcreteColumn : public COWHelper
{
private:
- friend class COWPtrHelper;
+ friend class COWHelper;
int data;
ConcreteColumn(int data) : data(data) {}
@@ -38,10 +38,10 @@ public:
void set(int value) override { data = value; }
};
-class ColumnComposition : public COWPtrHelper
+class ColumnComposition : public COWHelper
{
private:
- friend class COWPtrHelper;
+ friend class COWHelper;
ConcreteColumn::WrappedPtr wrapped;
diff --git a/dbms/src/Core/SettingsCommon.cpp b/dbms/src/Core/SettingsCommon.cpp
index 950768d21db..02a9348dfd6 100644
--- a/dbms/src/Core/SettingsCommon.cpp
+++ b/dbms/src/Core/SettingsCommon.cpp
@@ -247,15 +247,16 @@ LoadBalancing SettingLoadBalancing::getLoadBalancing(const String & s)
if (s == "random") return LoadBalancing::RANDOM;
if (s == "nearest_hostname") return LoadBalancing::NEAREST_HOSTNAME;
if (s == "in_order") return LoadBalancing::IN_ORDER;
+ if (s == "first_or_random") return LoadBalancing::FIRST_OR_RANDOM;
- throw Exception("Unknown load balancing mode: '" + s + "', must be one of 'random', 'nearest_hostname', 'in_order'",
+ throw Exception("Unknown load balancing mode: '" + s + "', must be one of 'random', 'nearest_hostname', 'in_order', 'first_or_random'",
ErrorCodes::UNKNOWN_LOAD_BALANCING);
}
String SettingLoadBalancing::toString() const
{
- const char * strings[] = {"random", "nearest_hostname", "in_order"};
- if (value < LoadBalancing::RANDOM || value > LoadBalancing::IN_ORDER)
+ const char * strings[] = {"random", "nearest_hostname", "in_order", "first_or_random"};
+ if (value < LoadBalancing::RANDOM || value > LoadBalancing::FIRST_OR_RANDOM)
throw Exception("Unknown load balancing mode", ErrorCodes::UNKNOWN_LOAD_BALANCING);
return strings[static_cast(value)];
}
diff --git a/dbms/src/Core/SettingsCommon.h b/dbms/src/Core/SettingsCommon.h
index 452161e1f94..c661cef1570 100644
--- a/dbms/src/Core/SettingsCommon.h
+++ b/dbms/src/Core/SettingsCommon.h
@@ -167,6 +167,9 @@ enum class LoadBalancing
NEAREST_HOSTNAME,
/// replicas are walked through strictly in order; the number of errors does not matter
IN_ORDER,
+ /// if first replica one has higher number of errors,
+ /// pick a random one from replicas with minimum number of errors
+ FIRST_OR_RANDOM,
};
struct SettingLoadBalancing
diff --git a/dbms/src/Core/iostream_debug_helpers.cpp b/dbms/src/Core/iostream_debug_helpers.cpp
index 98a9775f15d..1f36e081f35 100644
--- a/dbms/src/Core/iostream_debug_helpers.cpp
+++ b/dbms/src/Core/iostream_debug_helpers.cpp
@@ -12,7 +12,7 @@
#include
#include
#include
-#include
+#include
#include
namespace DB
diff --git a/dbms/src/DataTypes/IDataType.h b/dbms/src/DataTypes/IDataType.h
index aa253fbdc08..60124cd3d5d 100644
--- a/dbms/src/DataTypes/IDataType.h
+++ b/dbms/src/DataTypes/IDataType.h
@@ -1,7 +1,7 @@
#pragma once
#include
-#include
+#include
#include
#include
@@ -17,8 +17,8 @@ class IDataType;
struct FormatSettings;
class IColumn;
-using ColumnPtr = COWPtr::Ptr;
-using MutableColumnPtr = COWPtr::MutablePtr;
+using ColumnPtr = COW::Ptr;
+using MutableColumnPtr = COW::MutablePtr;
using DataTypePtr = std::shared_ptr;
using DataTypes = std::vector;
diff --git a/dbms/src/Formats/ProtobufReader.cpp b/dbms/src/Formats/ProtobufReader.cpp
index 17b763b043b..d504c03203c 100644
--- a/dbms/src/Formats/ProtobufReader.cpp
+++ b/dbms/src/Formats/ProtobufReader.cpp
@@ -43,7 +43,7 @@ namespace
void unknownFormat()
{
- throw Exception("Protobuf messages are corrupted or doesn't match the provided schema", ErrorCodes::UNKNOWN_PROTOBUF_FORMAT);
+ throw Exception("Protobuf messages are corrupted or don't match the provided schema. Please note that Protobuf stream is length-delimited: every message is prefixed by its length in varint.", ErrorCodes::UNKNOWN_PROTOBUF_FORMAT);
}
}
diff --git a/dbms/src/Storages/MergeTree/MergedBlockOutputStream.cpp b/dbms/src/Storages/MergeTree/MergedBlockOutputStream.cpp
index c277ad29fdf..cd586539ccd 100644
--- a/dbms/src/Storages/MergeTree/MergedBlockOutputStream.cpp
+++ b/dbms/src/Storages/MergeTree/MergedBlockOutputStream.cpp
@@ -136,6 +136,9 @@ void fillIndexGranularityImpl(
if (index_granularity_for_block == 0) /// very rare case when index granularity bytes less then single row
index_granularity_for_block = 1;
+ /// We should be less or equal than fixed index granularity
+ index_granularity_for_block = std::min(fixed_index_granularity_rows, index_granularity_for_block);
+
for (size_t current_row = index_offset; current_row < rows_in_block; current_row += index_granularity_for_block)
index_granularity.appendMark(index_granularity_for_block);
diff --git a/dbms/src/Storages/tests/gtest_aux_funcs_for_adaptive_granularity.cpp b/dbms/src/Storages/tests/gtest_aux_funcs_for_adaptive_granularity.cpp
index 1b2e404eae4..e20ac243c80 100644
--- a/dbms/src/Storages/tests/gtest_aux_funcs_for_adaptive_granularity.cpp
+++ b/dbms/src/Storages/tests/gtest_aux_funcs_for_adaptive_granularity.cpp
@@ -55,7 +55,7 @@ TEST(AdaptiveIndexGranularity, FillGranularityToyTests)
{ /// Blocks with granule size
MergeTreeIndexGranularity index_granularity;
- fillIndexGranularityImpl(block1, 1, 1, true, 0, index_granularity);
+ fillIndexGranularityImpl(block1, 1, 100, true, 0, index_granularity);
EXPECT_EQ(index_granularity.getMarksCount(), 1);
for (size_t i = 0; i < index_granularity.getMarksCount(); ++i)
EXPECT_EQ(index_granularity.getMarkRows(i), block1.rows());
@@ -79,7 +79,7 @@ TEST(AdaptiveIndexGranularity, FillGranularitySequenceOfBlocks)
auto block3 = getBlockWithSize(65536, 8);
MergeTreeIndexGranularity index_granularity;
for (const auto & block : {block1, block2, block3})
- fillIndexGranularityImpl(block, 1024, 0, false, 0, index_granularity);
+ fillIndexGranularityImpl(block, 1024, 8192, false, 0, index_granularity);
EXPECT_EQ(index_granularity.getMarksCount(), 192); /// granules
for (size_t i = 0; i < index_granularity.getMarksCount(); ++i)
@@ -92,7 +92,7 @@ TEST(AdaptiveIndexGranularity, FillGranularitySequenceOfBlocks)
EXPECT_EQ(block1.rows() + block2.rows() + block3.rows(), 3136);
MergeTreeIndexGranularity index_granularity;
for (const auto & block : {block1, block2, block3})
- fillIndexGranularityImpl(block, 1024, 0, false, 0, index_granularity);
+ fillIndexGranularityImpl(block, 1024, 8192, false, 0, index_granularity);
EXPECT_EQ(index_granularity.getMarksCount(), 98); /// granules
for (size_t i = 0; i < index_granularity.getMarksCount(); ++i)
@@ -110,7 +110,7 @@ TEST(AdaptiveIndexGranularity, FillGranularitySequenceOfBlocks)
size_t index_offset = 0;
for (const auto & block : {block1, block2, block3})
{
- fillIndexGranularityImpl(block, 16384, 0, false, index_offset, index_granularity);
+ fillIndexGranularityImpl(block, 16384, 8192, false, index_offset, index_granularity);
index_offset = index_granularity.getLastMarkRows() - block.rows();
}
EXPECT_EQ(index_granularity.getMarksCount(), 1); /// granules
diff --git a/dbms/tests/clickhouse-test b/dbms/tests/clickhouse-test
index d80d69d7cd3..6b11331f451 100755
--- a/dbms/tests/clickhouse-test
+++ b/dbms/tests/clickhouse-test
@@ -36,9 +36,9 @@ def remove_control_characters(s):
if int(s, base) < 0x10000:
return unichr(int(s, base))
return default
- s = re.sub(ur"(\d+);?", lambda c: str_to_int(c.group(1), c.group(0)), s)
- s = re.sub(ur"[xX]([0-9a-fA-F]+);?", lambda c: str_to_int(c.group(1), c.group(0), base=16), s)
- s = re.sub(ur"[\x00-\x08\x0b\x0e-\x1f\x7f]", "", s)
+ s = re.sub(r"(\d+);?", lambda c: str_to_int(c.group(1), c.group(0)), s)
+ s = re.sub(r"[xX]([0-9a-fA-F]+);?", lambda c: str_to_int(c.group(1), c.group(0), base=16), s)
+ s = re.sub(r"[\x00-\x08\x0b\x0e-\x1f\x7f]", "", s)
return s
def run_single_test(args, ext, server_logs_level, case_file, stdout_file, stderr_file):
@@ -214,6 +214,9 @@ def main(args):
if 'stateless' in suite and args.no_stateless:
print("Won't run stateless tests because they were manually disabled.")
continue
+ if 'stateful' in suite and args.no_stateful:
+ print("Won't run stateful tests because they were manually disabled.")
+ continue
# Reverse sort order: we want run newest test first.
# And not reverse subtests
@@ -258,8 +261,9 @@ def main(args):
report_testcase = et.Element("testcase", attrib = {"name": name})
try:
- print "{0:72}".format(name + ": "),
- sys.stdout.flush()
+ sys.stdout.write("{0:72}".format(name + ": "))
+ if run_total == 1:
+ sys.stdout.flush()
if args.skip and any(s in name for s in args.skip):
report_testcase.append(et.Element("skipped", attrib = {"message": "skip"}))
@@ -466,6 +470,7 @@ if __name__ == '__main__':
parser.add_argument('--parallel', default='1/1', help='Parralel test run number/total')
parser.add_argument('--no-stateless', action='store_true', help='Disable all stateless tests')
+ parser.add_argument('--no-stateful', action='store_true', help='Disable all stateful tests')
parser.add_argument('--skip', nargs='+', help="Skip these tests")
parser.add_argument('--no-long', action='store_false', dest='no_long', help='Do not run long tests')
group=parser.add_mutually_exclusive_group(required=False)
diff --git a/dbms/tests/queries/0_stateless/00041_big_array_join.sql b/dbms/tests/queries/0_stateless/00041_big_array_join.sql
index 0a73930cbc2..6486152dafe 100644
--- a/dbms/tests/queries/0_stateless/00041_big_array_join.sql
+++ b/dbms/tests/queries/0_stateless/00041_big_array_join.sql
@@ -1,4 +1,3 @@
-CREATE DATABASE IF NOT EXISTS test;
DROP TABLE IF EXISTS big_array;
CREATE TABLE big_array (x Array(UInt8)) ENGINE=TinyLog;
SET min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0;
diff --git a/dbms/tests/queries/0_stateless/00043_summing_empty_part.sql b/dbms/tests/queries/0_stateless/00043_summing_empty_part.sql
index 8fb261b3fed..68fc4b5b1c4 100644
--- a/dbms/tests/queries/0_stateless/00043_summing_empty_part.sql
+++ b/dbms/tests/queries/0_stateless/00043_summing_empty_part.sql
@@ -1,4 +1,3 @@
-CREATE DATABASE IF NOT EXISTS test;
DROP TABLE IF EXISTS empty_summing;
CREATE TABLE empty_summing (d Date, k UInt64, v Int8) ENGINE=SummingMergeTree(d, k, 8192);
diff --git a/dbms/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.sql b/dbms/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.sql
index 78b80d4a10f..7686c27fe34 100644
--- a/dbms/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.sql
+++ b/dbms/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.sql
@@ -1,107 +1,107 @@
-DROP TABLE IF EXISTS replicated_alter1;
-DROP TABLE IF EXISTS replicated_alter2;
-CREATE TABLE replicated_alter1 (d Date, k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test/alter', 'r1', d, k, 8192);
-CREATE TABLE replicated_alter2 (d Date, k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test/alter', 'r2', d, k, 8192);
+DROP TABLE IF EXISTS test.replicated_alter1;
+DROP TABLE IF EXISTS test.replicated_alter2;
+CREATE TABLE test.replicated_alter1 (d Date, k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test/alter', 'r1', d, k, 8192);
+CREATE TABLE test.replicated_alter2 (d Date, k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test/alter', 'r2', d, k, 8192);
-INSERT INTO replicated_alter1 VALUES ('2015-01-01', 10, 42);
+INSERT INTO test.replicated_alter1 VALUES ('2015-01-01', 10, 42);
-DESC TABLE replicated_alter1;
-SHOW CREATE TABLE replicated_alter1;
-DESC TABLE replicated_alter2;
-SHOW CREATE TABLE replicated_alter2;
-SELECT * FROM replicated_alter1 ORDER BY k;
+DESC TABLE test.replicated_alter1;
+SHOW CREATE TABLE test.replicated_alter1;
+DESC TABLE test.replicated_alter2;
+SHOW CREATE TABLE test.replicated_alter2;
+SELECT * FROM test.replicated_alter1 ORDER BY k;
-ALTER TABLE replicated_alter1 ADD COLUMN dt DateTime;
-INSERT INTO replicated_alter1 VALUES ('2015-01-01', 9, 41, '1992-01-01 08:00:00');
+ALTER TABLE test.replicated_alter1 ADD COLUMN dt DateTime;
+INSERT INTO test.replicated_alter1 VALUES ('2015-01-01', 9, 41, '1992-01-01 08:00:00');
-DESC TABLE replicated_alter1;
-SHOW CREATE TABLE replicated_alter1;
-DESC TABLE replicated_alter2;
-SHOW CREATE TABLE replicated_alter2;
-SELECT * FROM replicated_alter1 ORDER BY k;
+DESC TABLE test.replicated_alter1;
+SHOW CREATE TABLE test.replicated_alter1;
+DESC TABLE test.replicated_alter2;
+SHOW CREATE TABLE test.replicated_alter2;
+SELECT * FROM test.replicated_alter1 ORDER BY k;
-ALTER TABLE replicated_alter1 ADD COLUMN n Nested(ui8 UInt8, s String);
-INSERT INTO replicated_alter1 VALUES ('2015-01-01', 8, 40, '2012-12-12 12:12:12', [1,2,3], ['12','13','14']);
+ALTER TABLE test.replicated_alter1 ADD COLUMN n Nested(ui8 UInt8, s String);
+INSERT INTO test.replicated_alter1 VALUES ('2015-01-01', 8, 40, '2012-12-12 12:12:12', [1,2,3], ['12','13','14']);
-DESC TABLE replicated_alter1;
-SHOW CREATE TABLE replicated_alter1;
-DESC TABLE replicated_alter2;
-SHOW CREATE TABLE replicated_alter2;
-SELECT * FROM replicated_alter1 ORDER BY k;
+DESC TABLE test.replicated_alter1;
+SHOW CREATE TABLE test.replicated_alter1;
+DESC TABLE test.replicated_alter2;
+SHOW CREATE TABLE test.replicated_alter2;
+SELECT * FROM test.replicated_alter1 ORDER BY k;
-ALTER TABLE replicated_alter1 ADD COLUMN `n.d` Array(Date);
-INSERT INTO replicated_alter1 VALUES ('2015-01-01', 7, 39, '2014-07-14 13:26:50', [10,20,30], ['120','130','140'],['2000-01-01','2000-01-01','2000-01-03']);
+ALTER TABLE test.replicated_alter1 ADD COLUMN `n.d` Array(Date);
+INSERT INTO test.replicated_alter1 VALUES ('2015-01-01', 7, 39, '2014-07-14 13:26:50', [10,20,30], ['120','130','140'],['2000-01-01','2000-01-01','2000-01-03']);
-DESC TABLE replicated_alter1;
-SHOW CREATE TABLE replicated_alter1;
-DESC TABLE replicated_alter2;
-SHOW CREATE TABLE replicated_alter2;
-SELECT * FROM replicated_alter1 ORDER BY k;
+DESC TABLE test.replicated_alter1;
+SHOW CREATE TABLE test.replicated_alter1;
+DESC TABLE test.replicated_alter2;
+SHOW CREATE TABLE test.replicated_alter2;
+SELECT * FROM test.replicated_alter1 ORDER BY k;
-ALTER TABLE replicated_alter1 ADD COLUMN s String DEFAULT '0';
-INSERT INTO replicated_alter1 VALUES ('2015-01-01', 6,38,'2014-07-15 13:26:50',[10,20,30],['asd','qwe','qwe'],['2000-01-01','2000-01-01','2000-01-03'],'100500');
+ALTER TABLE test.replicated_alter1 ADD COLUMN s String DEFAULT '0';
+INSERT INTO test.replicated_alter1 VALUES ('2015-01-01', 6,38,'2014-07-15 13:26:50',[10,20,30],['asd','qwe','qwe'],['2000-01-01','2000-01-01','2000-01-03'],'100500');
-DESC TABLE replicated_alter1;
-SHOW CREATE TABLE replicated_alter1;
-DESC TABLE replicated_alter2;
-SHOW CREATE TABLE replicated_alter2;
-SELECT * FROM replicated_alter1 ORDER BY k;
+DESC TABLE test.replicated_alter1;
+SHOW CREATE TABLE test.replicated_alter1;
+DESC TABLE test.replicated_alter2;
+SHOW CREATE TABLE test.replicated_alter2;
+SELECT * FROM test.replicated_alter1 ORDER BY k;
-ALTER TABLE replicated_alter1 DROP COLUMN `n.d`, MODIFY COLUMN s Int64;
+ALTER TABLE test.replicated_alter1 DROP COLUMN `n.d`, MODIFY COLUMN s Int64;
-DESC TABLE replicated_alter1;
-SHOW CREATE TABLE replicated_alter1;
-DESC TABLE replicated_alter2;
-SHOW CREATE TABLE replicated_alter2;
-SELECT * FROM replicated_alter1 ORDER BY k;
+DESC TABLE test.replicated_alter1;
+SHOW CREATE TABLE test.replicated_alter1;
+DESC TABLE test.replicated_alter2;
+SHOW CREATE TABLE test.replicated_alter2;
+SELECT * FROM test.replicated_alter1 ORDER BY k;
-ALTER TABLE replicated_alter1 ADD COLUMN `n.d` Array(Date), MODIFY COLUMN s UInt32;
+ALTER TABLE test.replicated_alter1 ADD COLUMN `n.d` Array(Date), MODIFY COLUMN s UInt32;
-DESC TABLE replicated_alter1;
-SHOW CREATE TABLE replicated_alter1;
-DESC TABLE replicated_alter2;
-SHOW CREATE TABLE replicated_alter2;
-SELECT * FROM replicated_alter1 ORDER BY k;
+DESC TABLE test.replicated_alter1;
+SHOW CREATE TABLE test.replicated_alter1;
+DESC TABLE test.replicated_alter2;
+SHOW CREATE TABLE test.replicated_alter2;
+SELECT * FROM test.replicated_alter1 ORDER BY k;
-ALTER TABLE replicated_alter1 DROP COLUMN n.ui8, DROP COLUMN n.d;
+ALTER TABLE test.replicated_alter1 DROP COLUMN n.ui8, DROP COLUMN n.d;
-DESC TABLE replicated_alter1;
-SHOW CREATE TABLE replicated_alter1;
-DESC TABLE replicated_alter2;
-SHOW CREATE TABLE replicated_alter2;
-SELECT * FROM replicated_alter1 ORDER BY k;
+DESC TABLE test.replicated_alter1;
+SHOW CREATE TABLE test.replicated_alter1;
+DESC TABLE test.replicated_alter2;
+SHOW CREATE TABLE test.replicated_alter2;
+SELECT * FROM test.replicated_alter1 ORDER BY k;
-ALTER TABLE replicated_alter1 DROP COLUMN n.s;
+ALTER TABLE test.replicated_alter1 DROP COLUMN n.s;
-DESC TABLE replicated_alter1;
-SHOW CREATE TABLE replicated_alter1;
-DESC TABLE replicated_alter2;
-SHOW CREATE TABLE replicated_alter2;
-SELECT * FROM replicated_alter1 ORDER BY k;
+DESC TABLE test.replicated_alter1;
+SHOW CREATE TABLE test.replicated_alter1;
+DESC TABLE test.replicated_alter2;
+SHOW CREATE TABLE test.replicated_alter2;
+SELECT * FROM test.replicated_alter1 ORDER BY k;
-ALTER TABLE replicated_alter1 ADD COLUMN n.s Array(String), ADD COLUMN n.d Array(Date);
+ALTER TABLE test.replicated_alter1 ADD COLUMN n.s Array(String), ADD COLUMN n.d Array(Date);
-DESC TABLE replicated_alter1;
-SHOW CREATE TABLE replicated_alter1;
-DESC TABLE replicated_alter2;
-SHOW CREATE TABLE replicated_alter2;
-SELECT * FROM replicated_alter1 ORDER BY k;
+DESC TABLE test.replicated_alter1;
+SHOW CREATE TABLE test.replicated_alter1;
+DESC TABLE test.replicated_alter2;
+SHOW CREATE TABLE test.replicated_alter2;
+SELECT * FROM test.replicated_alter1 ORDER BY k;
-ALTER TABLE replicated_alter1 DROP COLUMN n;
+ALTER TABLE test.replicated_alter1 DROP COLUMN n;
-DESC TABLE replicated_alter1;
-SHOW CREATE TABLE replicated_alter1;
-DESC TABLE replicated_alter2;
-SHOW CREATE TABLE replicated_alter2;
-SELECT * FROM replicated_alter1 ORDER BY k;
+DESC TABLE test.replicated_alter1;
+SHOW CREATE TABLE test.replicated_alter1;
+DESC TABLE test.replicated_alter2;
+SHOW CREATE TABLE test.replicated_alter2;
+SELECT * FROM test.replicated_alter1 ORDER BY k;
-ALTER TABLE replicated_alter1 MODIFY COLUMN dt Date, MODIFY COLUMN s DateTime;
+ALTER TABLE test.replicated_alter1 MODIFY COLUMN dt Date, MODIFY COLUMN s DateTime;
-DESC TABLE replicated_alter1;
-SHOW CREATE TABLE replicated_alter1;
-DESC TABLE replicated_alter2;
-SHOW CREATE TABLE replicated_alter2;
-SELECT * FROM replicated_alter1 ORDER BY k;
+DESC TABLE test.replicated_alter1;
+SHOW CREATE TABLE test.replicated_alter1;
+DESC TABLE test.replicated_alter2;
+SHOW CREATE TABLE test.replicated_alter2;
+SELECT * FROM test.replicated_alter1 ORDER BY k;
-DROP TABLE replicated_alter1;
-DROP TABLE replicated_alter2;
+DROP TABLE test.replicated_alter1;
+DROP TABLE test.replicated_alter2;
diff --git a/dbms/tests/queries/0_stateless/00109_shard_totals_after_having.sql b/dbms/tests/queries/0_stateless/00109_shard_totals_after_having.sql
index 903acec3fc2..98d0cc5a69f 100644
--- a/dbms/tests/queries/0_stateless/00109_shard_totals_after_having.sql
+++ b/dbms/tests/queries/0_stateless/00109_shard_totals_after_having.sql
@@ -2,8 +2,8 @@ SET max_rows_to_group_by = 100000;
SET max_block_size = 100001;
SET group_by_overflow_mode = 'any';
-DROP TABLE IF EXISTS numbers500k;
-CREATE VIEW numbers500k AS SELECT number FROM system.numbers LIMIT 500000;
+DROP TABLE IF EXISTS test.numbers500k;
+CREATE VIEW test.numbers500k AS SELECT number FROM system.numbers LIMIT 500000;
SET totals_mode = 'after_having_auto';
SELECT intDiv(number, 2) AS k, count(), argMax(toString(number), number) FROM remote('127.0.0.{2,3}', test, numbers500k) GROUP BY k WITH TOTALS ORDER BY k LIMIT 10;
@@ -17,4 +17,4 @@ SELECT intDiv(number, 2) AS k, count(), argMax(toString(number), number) FROM re
SET totals_mode = 'before_having';
SELECT intDiv(number, 2) AS k, count(), argMax(toString(number), number) FROM remote('127.0.0.{2,3}', test, numbers500k) GROUP BY k WITH TOTALS ORDER BY k LIMIT 10;
-DROP TABLE numbers500k;
+DROP TABLE test.numbers500k;
diff --git a/dbms/tests/queries/0_stateless/00111_shard_external_sort_distributed.sql b/dbms/tests/queries/0_stateless/00111_shard_external_sort_distributed.sql
index 89cd192fbb3..5c962b27d2a 100644
--- a/dbms/tests/queries/0_stateless/00111_shard_external_sort_distributed.sql
+++ b/dbms/tests/queries/0_stateless/00111_shard_external_sort_distributed.sql
@@ -1,9 +1,9 @@
SET max_memory_usage = 100000000;
SET max_bytes_before_external_sort = 20000000;
-DROP TABLE IF EXISTS numbers10m;
-CREATE VIEW numbers10m AS SELECT number FROM system.numbers LIMIT 10000000;
+DROP TABLE IF EXISTS test.numbers10m;
+CREATE VIEW test.numbers10m AS SELECT number FROM system.numbers LIMIT 10000000;
SELECT number FROM remote('127.0.0.{2,3}', test, numbers10m) ORDER BY number * 1234567890123456789 LIMIT 19999980, 20;
-DROP TABLE numbers10m;
+DROP TABLE test.numbers10m;
diff --git a/dbms/tests/queries/0_stateless/00126_buffer.sql b/dbms/tests/queries/0_stateless/00126_buffer.sql
index 5b06039fa81..25972f51aa6 100644
--- a/dbms/tests/queries/0_stateless/00126_buffer.sql
+++ b/dbms/tests/queries/0_stateless/00126_buffer.sql
@@ -1,62 +1,62 @@
-DROP TABLE IF EXISTS test.buffer;
-DROP TABLE IF EXISTS test.null_sink;
+DROP TABLE IF EXISTS test.buffer_00126;
+DROP TABLE IF EXISTS test.null_sink_00126;
-CREATE TABLE test.null_sink (a UInt8, b String, c Array(UInt32)) ENGINE = Null;
-CREATE TABLE test.buffer (a UInt8, b String, c Array(UInt32)) ENGINE = Buffer(test, null_sink, 1, 1000, 1000, 1000, 1000, 1000000, 1000000);
+CREATE TABLE test.null_sink_00126 (a UInt8, b String, c Array(UInt32)) ENGINE = Null;
+CREATE TABLE test.buffer_00126 (a UInt8, b String, c Array(UInt32)) ENGINE = Buffer(test, null_sink_00126, 1, 1000, 1000, 1000, 1000, 1000000, 1000000);
-INSERT INTO test.buffer VALUES (1, '2', [3]);
+INSERT INTO test.buffer_00126 VALUES (1, '2', [3]);
-SELECT a, b, c FROM test.buffer ORDER BY a, b, c;
-SELECT b, c, a FROM test.buffer ORDER BY a, b, c;
-SELECT c, a, b FROM test.buffer ORDER BY a, b, c;
-SELECT a, c, b FROM test.buffer ORDER BY a, b, c;
-SELECT b, a, c FROM test.buffer ORDER BY a, b, c;
-SELECT c, b, a FROM test.buffer ORDER BY a, b, c;
-SELECT a, b FROM test.buffer ORDER BY a, b, c;
-SELECT b, c FROM test.buffer ORDER BY a, b, c;
-SELECT c, a FROM test.buffer ORDER BY a, b, c;
-SELECT a, c FROM test.buffer ORDER BY a, b, c;
-SELECT b, a FROM test.buffer ORDER BY a, b, c;
-SELECT c, b FROM test.buffer ORDER BY a, b, c;
-SELECT a FROM test.buffer ORDER BY a, b, c;
-SELECT b FROM test.buffer ORDER BY a, b, c;
-SELECT c FROM test.buffer ORDER BY a, b, c;
+SELECT a, b, c FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT b, c, a FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT c, a, b FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT a, c, b FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT b, a, c FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT c, b, a FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT a, b FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT b, c FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT c, a FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT a, c FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT b, a FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT c, b FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT a FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT b FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT c FROM test.buffer_00126 ORDER BY a, b, c;
-INSERT INTO test.buffer (c, b, a) VALUES ([7], '8', 9);
+INSERT INTO test.buffer_00126 (c, b, a) VALUES ([7], '8', 9);
-SELECT a, b, c FROM test.buffer ORDER BY a, b, c;
-SELECT b, c, a FROM test.buffer ORDER BY a, b, c;
-SELECT c, a, b FROM test.buffer ORDER BY a, b, c;
-SELECT a, c, b FROM test.buffer ORDER BY a, b, c;
-SELECT b, a, c FROM test.buffer ORDER BY a, b, c;
-SELECT c, b, a FROM test.buffer ORDER BY a, b, c;
-SELECT a, b FROM test.buffer ORDER BY a, b, c;
-SELECT b, c FROM test.buffer ORDER BY a, b, c;
-SELECT c, a FROM test.buffer ORDER BY a, b, c;
-SELECT a, c FROM test.buffer ORDER BY a, b, c;
-SELECT b, a FROM test.buffer ORDER BY a, b, c;
-SELECT c, b FROM test.buffer ORDER BY a, b, c;
-SELECT a FROM test.buffer ORDER BY a, b, c;
-SELECT b FROM test.buffer ORDER BY a, b, c;
-SELECT c FROM test.buffer ORDER BY a, b, c;
+SELECT a, b, c FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT b, c, a FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT c, a, b FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT a, c, b FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT b, a, c FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT c, b, a FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT a, b FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT b, c FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT c, a FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT a, c FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT b, a FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT c, b FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT a FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT b FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT c FROM test.buffer_00126 ORDER BY a, b, c;
-INSERT INTO test.buffer (a, c) VALUES (11, [33]);
+INSERT INTO test.buffer_00126 (a, c) VALUES (11, [33]);
-SELECT a, b, c FROM test.buffer ORDER BY a, b, c;
-SELECT b, c, a FROM test.buffer ORDER BY a, b, c;
-SELECT c, a, b FROM test.buffer ORDER BY a, b, c;
-SELECT a, c, b FROM test.buffer ORDER BY a, b, c;
-SELECT b, a, c FROM test.buffer ORDER BY a, b, c;
-SELECT c, b, a FROM test.buffer ORDER BY a, b, c;
-SELECT a, b FROM test.buffer ORDER BY a, b, c;
-SELECT b, c FROM test.buffer ORDER BY a, b, c;
-SELECT c, a FROM test.buffer ORDER BY a, b, c;
-SELECT a, c FROM test.buffer ORDER BY a, b, c;
-SELECT b, a FROM test.buffer ORDER BY a, b, c;
-SELECT c, b FROM test.buffer ORDER BY a, b, c;
-SELECT a FROM test.buffer ORDER BY a, b, c;
-SELECT b FROM test.buffer ORDER BY a, b, c;
-SELECT c FROM test.buffer ORDER BY a, b, c;
+SELECT a, b, c FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT b, c, a FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT c, a, b FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT a, c, b FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT b, a, c FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT c, b, a FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT a, b FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT b, c FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT c, a FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT a, c FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT b, a FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT c, b FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT a FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT b FROM test.buffer_00126 ORDER BY a, b, c;
+SELECT c FROM test.buffer_00126 ORDER BY a, b, c;
-DROP TABLE test.buffer;
-DROP TABLE test.null_sink;
+DROP TABLE test.buffer_00126;
+DROP TABLE test.null_sink_00126;
diff --git a/dbms/tests/queries/0_stateless/00160_merge_and_index_in_in.sql b/dbms/tests/queries/0_stateless/00160_merge_and_index_in_in.sql
index 60fa5ac3ed8..a1a242c0b80 100644
--- a/dbms/tests/queries/0_stateless/00160_merge_and_index_in_in.sql
+++ b/dbms/tests/queries/0_stateless/00160_merge_and_index_in_in.sql
@@ -1,23 +1,23 @@
-DROP TABLE IF EXISTS test.mt;
-DROP TABLE IF EXISTS test.merge;
+DROP TABLE IF EXISTS test.mt_00160;
+DROP TABLE IF EXISTS test.merge_00160;
-CREATE TABLE test.mt (d Date DEFAULT toDate('2015-05-01'), x UInt64) ENGINE = MergeTree(d, x, 1);
-CREATE TABLE test.merge (d Date, x UInt64) ENGINE = Merge(test, '^mt$');
+CREATE TABLE test.mt_00160 (d Date DEFAULT toDate('2015-05-01'), x UInt64) ENGINE = MergeTree(d, x, 1);
+CREATE TABLE test.merge_00160 (d Date, x UInt64) ENGINE = Merge(test, '^mt_00160$');
SET min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0;
SET max_block_size = 1000000;
-INSERT INTO test.mt (x) SELECT number AS x FROM system.numbers LIMIT 100000;
+INSERT INTO test.mt_00160 (x) SELECT number AS x FROM system.numbers LIMIT 100000;
-SELECT *, b FROM test.mt WHERE x IN (12345, 67890) AND NOT ignore(blockSize() < 10 AS b) ORDER BY x;
-SELECT *, b FROM test.merge WHERE x IN (12345, 67890) AND NOT ignore(blockSize() < 10 AS b) ORDER BY x;
+SELECT *, b FROM test.mt_00160 WHERE x IN (12345, 67890) AND NOT ignore(blockSize() < 10 AS b) ORDER BY x;
+SELECT *, b FROM test.merge_00160 WHERE x IN (12345, 67890) AND NOT ignore(blockSize() < 10 AS b) ORDER BY x;
-DROP TABLE test.merge;
-DROP TABLE test.mt;
+DROP TABLE test.merge_00160;
+DROP TABLE test.mt_00160;
-CREATE TABLE test.mt (d Date DEFAULT toDate('2015-05-01'), x UInt64, y UInt64, z UInt64) ENGINE = MergeTree(d, (x, z), 1);
+CREATE TABLE test.mt_00160 (d Date DEFAULT toDate('2015-05-01'), x UInt64, y UInt64, z UInt64) ENGINE = MergeTree(d, (x, z), 1);
-INSERT INTO test.mt (x, y, z) SELECT number AS x, number + 10 AS y, number / 2 AS z FROM system.numbers LIMIT 100000;
+INSERT INTO test.mt_00160 (x, y, z) SELECT number AS x, number + 10 AS y, number / 2 AS z FROM system.numbers LIMIT 100000;
-SELECT *, b FROM test.mt WHERE (z, y, x) IN ((617, 1244, 1234), (2839, 5688, 5678), (1,1,1)) AND NOT ignore(blockSize() < 10 AS b) ORDER BY (x, y, z);
+SELECT *, b FROM test.mt_00160 WHERE (z, y, x) IN ((617, 1244, 1234), (2839, 5688, 5678), (1,1,1)) AND NOT ignore(blockSize() < 10 AS b) ORDER BY (x, y, z);
-DROP TABLE test.mt;
+DROP TABLE test.mt_00160;
diff --git a/dbms/tests/queries/0_stateless/00168_buffer_defaults.sql b/dbms/tests/queries/0_stateless/00168_buffer_defaults.sql
index a42105f6b12..33540431b9a 100644
--- a/dbms/tests/queries/0_stateless/00168_buffer_defaults.sql
+++ b/dbms/tests/queries/0_stateless/00168_buffer_defaults.sql
@@ -1,12 +1,12 @@
-DROP TABLE IF EXISTS test.mt;
-DROP TABLE IF EXISTS test.mt_buffer;
-CREATE TABLE test.mt (EventDate Date, UTCEventTime DateTime, MoscowEventDate Date DEFAULT toDate(UTCEventTime)) ENGINE = MergeTree(EventDate, UTCEventTime, 8192);
-CREATE TABLE test.mt_buffer AS test.mt ENGINE = Buffer(test, mt, 16, 10, 100, 10000, 1000000, 10000000, 100000000);
-DESC TABLE test.mt;
-DESC TABLE test.mt_buffer;
-INSERT INTO test.mt (EventDate, UTCEventTime) VALUES ('2015-06-09', '2015-06-09 01:02:03');
-SELECT * FROM test.mt_buffer;
-INSERT INTO test.mt_buffer (EventDate, UTCEventTime) VALUES ('2015-06-09', '2015-06-09 01:02:03');
-SELECT * FROM test.mt_buffer;
-DROP TABLE test.mt_buffer;
-DROP TABLE test.mt;
+DROP TABLE IF EXISTS test.mt_00168;
+DROP TABLE IF EXISTS test.mt_00168_buffer;
+CREATE TABLE test.mt_00168 (EventDate Date, UTCEventTime DateTime, MoscowEventDate Date DEFAULT toDate(UTCEventTime)) ENGINE = MergeTree(EventDate, UTCEventTime, 8192);
+CREATE TABLE test.mt_00168_buffer AS test.mt_00168 ENGINE = Buffer(test, mt_00168, 16, 10, 100, 10000, 1000000, 10000000, 100000000);
+DESC TABLE test.mt_00168;
+DESC TABLE test.mt_00168_buffer;
+INSERT INTO test.mt_00168 (EventDate, UTCEventTime) VALUES ('2015-06-09', '2015-06-09 01:02:03');
+SELECT * FROM test.mt_00168_buffer;
+INSERT INTO test.mt_00168_buffer (EventDate, UTCEventTime) VALUES ('2015-06-09', '2015-06-09 01:02:03');
+SELECT * FROM test.mt_00168_buffer;
+DROP TABLE test.mt_00168_buffer;
+DROP TABLE test.mt_00168;
diff --git a/dbms/tests/queries/0_stateless/00180_attach_materialized_view.sql b/dbms/tests/queries/0_stateless/00180_attach_materialized_view.sql
index 156c1b262f1..089e4926bcf 100644
--- a/dbms/tests/queries/0_stateless/00180_attach_materialized_view.sql
+++ b/dbms/tests/queries/0_stateless/00180_attach_materialized_view.sql
@@ -1,12 +1,12 @@
-DROP TABLE IF EXISTS t;
-DROP TABLE IF EXISTS mv;
-DROP TABLE IF EXISTS `.inner.mv`;
+DROP TABLE IF EXISTS t_00180;
+DROP TABLE IF EXISTS mv_00180;
+DROP TABLE IF EXISTS `.inner.mv_00180`;
-CREATE TABLE t (x UInt8) ENGINE = Null;
-CREATE MATERIALIZED VIEW mv ENGINE = Null AS SELECT * FROM t;
+CREATE TABLE t_00180 (x UInt8) ENGINE = Null;
+CREATE MATERIALIZED VIEW mv_00180 ENGINE = Null AS SELECT * FROM t_00180;
-DETACH TABLE mv;
-ATTACH MATERIALIZED VIEW mv ENGINE = Null AS SELECT * FROM t;
+DETACH TABLE mv_00180;
+ATTACH MATERIALIZED VIEW mv_00180 ENGINE = Null AS SELECT * FROM t_00180;
-DROP TABLE t;
-DROP TABLE mv;
+DROP TABLE t_00180;
+DROP TABLE mv_00180;
diff --git a/dbms/tests/queries/0_stateless/00223_shard_distributed_aggregation_memory_efficient.sql b/dbms/tests/queries/0_stateless/00223_shard_distributed_aggregation_memory_efficient.sql
index 93aa6eaf693..7bd6a4042af 100644
--- a/dbms/tests/queries/0_stateless/00223_shard_distributed_aggregation_memory_efficient.sql
+++ b/dbms/tests/queries/0_stateless/00223_shard_distributed_aggregation_memory_efficient.sql
@@ -1,131 +1,131 @@
SET max_block_size = 1000;
-DROP TABLE IF EXISTS test.numbers_10;
-CREATE TABLE test.numbers_10 ENGINE = Log AS SELECT * FROM system.numbers LIMIT 10000;
+DROP TABLE IF EXISTS test.numbers_10_00223;
+CREATE TABLE test.numbers_10_00223 ENGINE = Log AS SELECT * FROM system.numbers LIMIT 10000;
SET distributed_aggregation_memory_efficient = 0;
SET group_by_two_level_threshold = 1000;
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
SET distributed_aggregation_memory_efficient = 0;
SET group_by_two_level_threshold = 7;
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
SET distributed_aggregation_memory_efficient = 1;
SET group_by_two_level_threshold = 1000;
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
SET distributed_aggregation_memory_efficient = 1;
SET group_by_two_level_threshold = 7;
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
SET distributed_aggregation_memory_efficient = 1;
SET group_by_two_level_threshold = 1;
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number);
SET distributed_aggregation_memory_efficient = 1;
SET group_by_two_level_threshold = 1000;
-SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
-SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
-SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
-SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
-SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
-SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
-SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
-SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
-SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
-SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
+SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
+SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
+SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
+SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
+SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
+SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
+SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
+SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
+SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
+SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
SET distributed_aggregation_memory_efficient = 1;
SET group_by_two_level_threshold = 1;
-SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
-SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
-SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
-SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
-SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
-SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
-SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
-SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
-SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
-SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
+SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
+SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
+SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
+SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
+SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
+SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
+SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
+SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
+SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
+SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number);
SET distributed_aggregation_memory_efficient = 1;
SET group_by_two_level_threshold = 7;
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
-SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
+SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
-SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
-SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
-SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
-SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
-SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
-SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
-SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
-SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
-SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
-SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
+SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
+SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
+SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
+SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
+SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
+SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
+SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
+SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
+SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
+SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', test.numbers_10_00223) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2);
-DROP TABLE test.numbers_10;
+DROP TABLE test.numbers_10_00223;
SELECT count() FROM remote('127.0.0.{2,3}', system.one);
diff --git a/dbms/tests/queries/0_stateless/00270_views_query_processing_stage.sql b/dbms/tests/queries/0_stateless/00270_views_query_processing_stage.sql
index e049c1153b5..65a5235adad 100644
--- a/dbms/tests/queries/0_stateless/00270_views_query_processing_stage.sql
+++ b/dbms/tests/queries/0_stateless/00270_views_query_processing_stage.sql
@@ -1,13 +1,13 @@
-DROP TABLE IF EXISTS view1;
-DROP TABLE IF EXISTS view2;
-DROP TABLE IF EXISTS merge_view;
+DROP TABLE IF EXISTS test.view1;
+DROP TABLE IF EXISTS test.view2;
+DROP TABLE IF EXISTS test.merge_view;
-CREATE VIEW view1 AS SELECT number FROM system.numbers LIMIT 10;
-CREATE VIEW view2 AS SELECT number FROM system.numbers LIMIT 10;
-CREATE TABLE merge_view (number UInt64) ENGINE = Merge(test, '^view');
+CREATE VIEW test.view1 AS SELECT number FROM system.numbers LIMIT 10;
+CREATE VIEW test.view2 AS SELECT number FROM system.numbers LIMIT 10;
+CREATE TABLE test.merge_view (number UInt64) ENGINE = Merge(test, '^view');
-SELECT 'Hello, world!' FROM merge_view LIMIT 5;
+SELECT 'Hello, world!' FROM test.merge_view LIMIT 5;
-DROP TABLE view1;
-DROP TABLE view2;
-DROP TABLE merge_view;
+DROP TABLE test.view1;
+DROP TABLE test.view2;
+DROP TABLE test.merge_view;
diff --git a/dbms/tests/queries/0_stateless/00290_shard_aggregation_memory_efficient.sql b/dbms/tests/queries/0_stateless/00290_shard_aggregation_memory_efficient.sql
index 15984ec0dfa..a0444559c33 100644
--- a/dbms/tests/queries/0_stateless/00290_shard_aggregation_memory_efficient.sql
+++ b/dbms/tests/queries/0_stateless/00290_shard_aggregation_memory_efficient.sql
@@ -1,17 +1,17 @@
-DROP TABLE IF EXISTS test.numbers_10;
+DROP TABLE IF EXISTS test.numbers_10_00290;
SET max_block_size = 1000;
-CREATE TABLE test.numbers_10 ENGINE = Log AS SELECT * FROM system.numbers LIMIT 10000;
+CREATE TABLE test.numbers_10_00290 ENGINE = Log AS SELECT * FROM system.numbers LIMIT 10000;
SET distributed_aggregation_memory_efficient = 1, group_by_two_level_threshold = 5000;
-SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10;
-SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10;
-SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10;
-SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10;
-SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10;
-SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10;
-SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10;
-SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10;
-SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10;
-SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', test.numbers_10) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10;
+SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', test.numbers_10_00290) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10;
+SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', test.numbers_10_00290) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10;
+SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', test.numbers_10_00290) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10;
+SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', test.numbers_10_00290) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10;
+SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', test.numbers_10_00290) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10;
+SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', test.numbers_10_00290) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10;
+SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', test.numbers_10_00290) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10;
+SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', test.numbers_10_00290) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10;
+SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', test.numbers_10_00290) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10;
+SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', test.numbers_10_00290) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10;
-DROP TABLE test.numbers_10;
+DROP TABLE test.numbers_10_00290;
diff --git a/dbms/tests/queries/0_stateless/00314_sample_factor_virtual_column.sql b/dbms/tests/queries/0_stateless/00314_sample_factor_virtual_column.sql
index b0eee63a685..043df6c7a64 100644
--- a/dbms/tests/queries/0_stateless/00314_sample_factor_virtual_column.sql
+++ b/dbms/tests/queries/0_stateless/00314_sample_factor_virtual_column.sql
@@ -1,20 +1,20 @@
-DROP TABLE IF EXISTS sample1;
-DROP TABLE IF EXISTS sample2;
-DROP TABLE IF EXISTS sample_merge;
+DROP TABLE IF EXISTS test.sample_00314_1;
+DROP TABLE IF EXISTS test.sample_00314_2;
+DROP TABLE IF EXISTS test.sample_merge_00314;
-CREATE TABLE sample1 (x UInt64, d Date DEFAULT today()) ENGINE = MergeTree(d, intHash64(x), intHash64(x), 10);
-CREATE TABLE sample2 (x UInt64, d Date DEFAULT today()) ENGINE = MergeTree(d, intHash64(x), intHash64(x), 10);
+CREATE TABLE test.sample_00314_1 (x UInt64, d Date DEFAULT today()) ENGINE = MergeTree(d, intHash64(x), intHash64(x), 10);
+CREATE TABLE test.sample_00314_2 (x UInt64, d Date DEFAULT today()) ENGINE = MergeTree(d, intHash64(x), intHash64(x), 10);
SET min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0;
-INSERT INTO sample1 (x) SELECT number AS x FROM system.numbers LIMIT 1000000;
-INSERT INTO sample2 (x) SELECT number AS x FROM system.numbers LIMIT 2000000;
+INSERT INTO test.sample_00314_1 (x) SELECT number AS x FROM system.numbers LIMIT 1000000;
+INSERT INTO test.sample_00314_2 (x) SELECT number AS x FROM system.numbers LIMIT 2000000;
-CREATE TABLE sample_merge AS sample1 ENGINE = Merge(test, '^sample\\d$');
+CREATE TABLE test.sample_merge_00314 AS test.sample_00314_1 ENGINE = Merge(test, '^sample_00314_\\d$');
-SELECT abs(sum(_sample_factor) - 3000000) / 3000000 < 0.001 FROM sample_merge SAMPLE 100000;
-SELECT abs(sum(_sample_factor) - 3000000) / 3000000 < 0.001 FROM merge(test, '^sample\\d$') SAMPLE 100000;
+SELECT abs(sum(_sample_factor) - 3000000) / 3000000 < 0.001 FROM test.sample_merge_00314 SAMPLE 100000;
+SELECT abs(sum(_sample_factor) - 3000000) / 3000000 < 0.001 FROM merge(test, '^sample_00314_\\d$') SAMPLE 100000;
-DROP TABLE sample1;
-DROP TABLE sample2;
-DROP TABLE sample_merge;
+DROP TABLE test.sample_00314_1;
+DROP TABLE test.sample_00314_2;
+DROP TABLE test.sample_merge_00314;
diff --git a/dbms/tests/queries/0_stateless/00366_multi_statements.sh b/dbms/tests/queries/0_stateless/00366_multi_statements.sh
index 0f2652e0f2e..3223702ac0a 100755
--- a/dbms/tests/queries/0_stateless/00366_multi_statements.sh
+++ b/dbms/tests/queries/0_stateless/00366_multi_statements.sh
@@ -18,17 +18,17 @@ $CLICKHOUSE_CLIENT -n --query="SELECT 1; SELECT 2"
$CLICKHOUSE_CLIENT -n --query="SELECT 1; SELECT 2;"
$CLICKHOUSE_CLIENT -n --query="SELECT 1; SELECT 2; SELECT" 2>&1 | grep -o 'Syntax error'
-$CLICKHOUSE_CLIENT -n --query="DROP TABLE IF EXISTS t; CREATE TABLE t (x UInt64) ENGINE = TinyLog;"
+$CLICKHOUSE_CLIENT -n --query="DROP TABLE IF EXISTS t_00366; CREATE TABLE t_00366 (x UInt64) ENGINE = TinyLog;"
-$CLICKHOUSE_CLIENT --query="INSERT INTO t VALUES (1),(2),(3);"
-$CLICKHOUSE_CLIENT --query="SELECT * FROM t"
-$CLICKHOUSE_CLIENT --query="INSERT INTO t VALUES" <<< "(4),(5),(6)"
-$CLICKHOUSE_CLIENT --query="SELECT * FROM t"
+$CLICKHOUSE_CLIENT --query="INSERT INTO t_00366 VALUES (1),(2),(3);"
+$CLICKHOUSE_CLIENT --query="SELECT * FROM t_00366"
+$CLICKHOUSE_CLIENT --query="INSERT INTO t_00366 VALUES" <<< "(4),(5),(6)"
+$CLICKHOUSE_CLIENT --query="SELECT * FROM t_00366"
-$CLICKHOUSE_CLIENT -n --query="INSERT INTO t VALUES (1),(2),(3);"
-$CLICKHOUSE_CLIENT -n --query="SELECT * FROM t"
-$CLICKHOUSE_CLIENT -n --query="INSERT INTO t VALUES" <<< "(4),(5),(6)"
-$CLICKHOUSE_CLIENT -n --query="SELECT * FROM t"
+$CLICKHOUSE_CLIENT -n --query="INSERT INTO t_00366 VALUES (1),(2),(3);"
+$CLICKHOUSE_CLIENT -n --query="SELECT * FROM t_00366"
+$CLICKHOUSE_CLIENT -n --query="INSERT INTO t_00366 VALUES" <<< "(4),(5),(6)"
+$CLICKHOUSE_CLIENT -n --query="SELECT * FROM t_00366"
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL_PARAMS}" -d "SELECT 1"
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL_PARAMS}" -d "SELECT 1;"
@@ -40,11 +40,11 @@ ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL_PARAMS}" -d "SELECT 1; SELECT 2" 2>&1 |
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL_PARAMS}" -d "SELECT 1; SELECT 2;" 2>&1 | grep -o 'Syntax error'
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL_PARAMS}" -d "SELECT 1; SELECT 2; SELECT" 2>&1 | grep -o 'Syntax error'
-${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL_PARAMS}" -d "INSERT INTO t VALUES (1),(2),(3);"
-$CLICKHOUSE_CLIENT --query="SELECT * FROM t"
-${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL_PARAMS}&query=INSERT" -d "INTO t VALUES (4),(5),(6);"
-$CLICKHOUSE_CLIENT --query="SELECT * FROM t"
-${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL_PARAMS}&query=INSERT+INTO+t+VALUES" -d "(7),(8),(9)"
-$CLICKHOUSE_CLIENT --query="SELECT * FROM t"
+${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL_PARAMS}" -d "INSERT INTO t_00366 VALUES (1),(2),(3);"
+$CLICKHOUSE_CLIENT --query="SELECT * FROM t_00366"
+${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL_PARAMS}&query=INSERT" -d "INTO t_00366 VALUES (4),(5),(6);"
+$CLICKHOUSE_CLIENT --query="SELECT * FROM t_00366"
+${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL_PARAMS}&query=INSERT+INTO+t_00366+VALUES" -d "(7),(8),(9)"
+$CLICKHOUSE_CLIENT --query="SELECT * FROM t_00366"
-$CLICKHOUSE_CLIENT -n --query="DROP TABLE t;"
+$CLICKHOUSE_CLIENT -n --query="DROP TABLE t_00366;"
diff --git a/dbms/tests/queries/0_stateless/00386_long_in_pk.python b/dbms/tests/queries/0_stateless/00386_long_in_pk.python
index 327e268dc21..d1db1a32d1e 100644
--- a/dbms/tests/queries/0_stateless/00386_long_in_pk.python
+++ b/dbms/tests/queries/0_stateless/00386_long_in_pk.python
@@ -1,10 +1,10 @@
#!/usr/bin/env python
def gen_queries():
- create_template = 'create table tab (a Int8, b String, c Tuple(Int8), d Tuple(Tuple(Int8)), e Tuple(Int8, String), f Tuple(Tuple(Int8, String))) engine = MergeTree order by ({}) partition by {}'
- drop_query = 'drop table if exists tab'
+ create_template = 'create table tab_00386 (a Int8, b String, c Tuple(Int8), d Tuple(Tuple(Int8)), e Tuple(Int8, String), f Tuple(Tuple(Int8, String))) engine = MergeTree order by ({}) partition by {}'
+ drop_query = 'drop table if exists tab_00386'
values = ('1', "'a'", 'tuple(1)', 'tuple(tuple(1))', "(1, 'a')", "tuple((1, 'a'))")
- insert_query = "insert into tab values (1, 'a', tuple(1), tuple(tuple(1)), (1, 'a'), tuple((1, 'a')))"
+ insert_query = "insert into tab_00386 values (1, 'a', tuple(1), tuple(tuple(1)), (1, 'a'), tuple((1, 'a')))"
columns = tuple('a b c d'.split())
order_by_columns = tuple('a b c'.split())
partition_by_columns = tuple(' tuple() a'.split())
@@ -17,30 +17,30 @@ def gen_queries():
yield q
for column, value in zip(columns, values):
- yield 'select {} in {} from tab'.format(column, value)
- yield 'select {} in tuple({}) from tab'.format(column, value)
- yield 'select {} in (select {} from tab) from tab'.format(column, column)
+ yield 'select {} in {} from tab_00386'.format(column, value)
+ yield 'select {} in tuple({}) from tab_00386'.format(column, value)
+ yield 'select {} in (select {} from tab_00386) from tab_00386'.format(column, column)
for i in range(len(columns)):
for j in range(i, len(columns)):
- yield 'select ({}, {}) in tuple({}, {}) from tab'.format(columns[i], columns[j], values[i], values[j])
- yield 'select ({}, {}) in (select {}, {} from tab) from tab'.format(columns[i], columns[j], columns[i], columns[j])
- yield 'select ({}, {}) in (select ({}, {}) from tab) from tab'.format(columns[i], columns[j], columns[i], columns[j])
+ yield 'select ({}, {}) in tuple({}, {}) from tab_00386'.format(columns[i], columns[j], values[i], values[j])
+ yield 'select ({}, {}) in (select {}, {} from tab_00386) from tab_00386'.format(columns[i], columns[j], columns[i], columns[j])
+ yield 'select ({}, {}) in (select ({}, {}) from tab_00386) from tab_00386'.format(columns[i], columns[j], columns[i], columns[j])
- yield "select e in (1, 'a') from tab"
- yield "select f in tuple((1, 'a')) from tab"
- yield "select f in tuple(tuple((1, 'a'))) from tab"
+ yield "select e in (1, 'a') from tab_00386"
+ yield "select f in tuple((1, 'a')) from tab_00386"
+ yield "select f in tuple(tuple((1, 'a'))) from tab_00386"
- yield 'select e in (select a, b from tab) from tab'
- yield 'select e in (select (a, b) from tab) from tab'
- yield 'select f in (select tuple((a, b)) from tab) from tab'
- yield 'select tuple(f) in (select tuple(tuple((a, b))) from tab) from tab'
+ yield 'select e in (select a, b from tab_00386) from tab_00386'
+ yield 'select e in (select (a, b) from tab_00386) from tab_00386'
+ yield 'select f in (select tuple((a, b)) from tab_00386) from tab_00386'
+ yield 'select tuple(f) in (select tuple(tuple((a, b))) from tab_00386) from tab_00386'
import requests
import os
def main():
- url = os.environ['CLICKHOUSE_URL']
+ url = os.environ['CLICKHOUSE_URL_PARAMS']
for q in gen_queries():
resp = requests.post(url, data=q)
diff --git a/dbms/tests/queries/0_stateless/00401_merge_and_stripelog.sql b/dbms/tests/queries/0_stateless/00401_merge_and_stripelog.sql
index 3bbfefbaf2f..1953947153a 100644
--- a/dbms/tests/queries/0_stateless/00401_merge_and_stripelog.sql
+++ b/dbms/tests/queries/0_stateless/00401_merge_and_stripelog.sql
@@ -1,48 +1,48 @@
-DROP TABLE IF EXISTS stripe1;
-DROP TABLE IF EXISTS stripe2;
-DROP TABLE IF EXISTS stripe3;
-DROP TABLE IF EXISTS stripe4;
-DROP TABLE IF EXISTS stripe5;
-DROP TABLE IF EXISTS stripe6;
-DROP TABLE IF EXISTS stripe7;
-DROP TABLE IF EXISTS stripe8;
-DROP TABLE IF EXISTS stripe9;
-DROP TABLE IF EXISTS stripe10;
-DROP TABLE IF EXISTS merge;
+DROP TABLE IF EXISTS test.stripe1;
+DROP TABLE IF EXISTS test.stripe2;
+DROP TABLE IF EXISTS test.stripe3;
+DROP TABLE IF EXISTS test.stripe4;
+DROP TABLE IF EXISTS test.stripe5;
+DROP TABLE IF EXISTS test.stripe6;
+DROP TABLE IF EXISTS test.stripe7;
+DROP TABLE IF EXISTS test.stripe8;
+DROP TABLE IF EXISTS test.stripe9;
+DROP TABLE IF EXISTS test.stripe10;
+DROP TABLE IF EXISTS test.merge;
-CREATE TABLE stripe1 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10;
-CREATE TABLE stripe2 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10;
-CREATE TABLE stripe3 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10;
-CREATE TABLE stripe4 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10;
-CREATE TABLE stripe5 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10;
-CREATE TABLE stripe6 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10;
-CREATE TABLE stripe7 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10;
-CREATE TABLE stripe8 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10;
-CREATE TABLE stripe9 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10;
-CREATE TABLE stripe10 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10;
+CREATE TABLE test.stripe1 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10;
+CREATE TABLE test.stripe2 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10;
+CREATE TABLE test.stripe3 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10;
+CREATE TABLE test.stripe4 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10;
+CREATE TABLE test.stripe5 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10;
+CREATE TABLE test.stripe6 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10;
+CREATE TABLE test.stripe7 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10;
+CREATE TABLE test.stripe8 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10;
+CREATE TABLE test.stripe9 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10;
+CREATE TABLE test.stripe10 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10;
-CREATE TABLE merge AS stripe1 ENGINE = Merge(test, '^stripe\\d+');
+CREATE TABLE test.merge AS test.stripe1 ENGINE = Merge(test, '^stripe\\d+');
-SELECT x, count() FROM merge GROUP BY x ORDER BY x;
+SELECT x, count() FROM test.merge GROUP BY x ORDER BY x;
SET max_threads = 1;
-SELECT x, count() FROM merge GROUP BY x ORDER BY x;
+SELECT x, count() FROM test.merge GROUP BY x ORDER BY x;
SET max_threads = 2;
-SELECT x, count() FROM merge GROUP BY x ORDER BY x;
+SELECT x, count() FROM test.merge GROUP BY x ORDER BY x;
SET max_threads = 5;
-SELECT x, count() FROM merge GROUP BY x ORDER BY x;
+SELECT x, count() FROM test.merge GROUP BY x ORDER BY x;
SET max_threads = 10;
-SELECT x, count() FROM merge GROUP BY x ORDER BY x;
+SELECT x, count() FROM test.merge GROUP BY x ORDER BY x;
SET max_threads = 20;
-SELECT x, count() FROM merge GROUP BY x ORDER BY x;
+SELECT x, count() FROM test.merge GROUP BY x ORDER BY x;
-DROP TABLE IF EXISTS stripe1;
-DROP TABLE IF EXISTS stripe2;
-DROP TABLE IF EXISTS stripe3;
-DROP TABLE IF EXISTS stripe4;
-DROP TABLE IF EXISTS stripe5;
-DROP TABLE IF EXISTS stripe6;
-DROP TABLE IF EXISTS stripe7;
-DROP TABLE IF EXISTS stripe8;
-DROP TABLE IF EXISTS stripe9;
-DROP TABLE IF EXISTS stripe10;
-DROP TABLE IF EXISTS merge;
+DROP TABLE IF EXISTS test.stripe1;
+DROP TABLE IF EXISTS test.stripe2;
+DROP TABLE IF EXISTS test.stripe3;
+DROP TABLE IF EXISTS test.stripe4;
+DROP TABLE IF EXISTS test.stripe5;
+DROP TABLE IF EXISTS test.stripe6;
+DROP TABLE IF EXISTS test.stripe7;
+DROP TABLE IF EXISTS test.stripe8;
+DROP TABLE IF EXISTS test.stripe9;
+DROP TABLE IF EXISTS test.stripe10;
+DROP TABLE IF EXISTS test.merge;
diff --git a/dbms/tests/queries/0_stateless/00429_long_http_bufferization.sh b/dbms/tests/queries/0_stateless/00429_long_http_bufferization.sh
index d039b41595a..a306e7959f5 100755
--- a/dbms/tests/queries/0_stateless/00429_long_http_bufferization.sh
+++ b/dbms/tests/queries/0_stateless/00429_long_http_bufferization.sh
@@ -89,9 +89,9 @@ check_cli_and_http
function cmp_http_compression() {
$CLICKHOUSE_CLIENT -q "`query $1`" > ${CLICKHOUSE_TMP}/res0
- ch_url 'compress=1' $1 | clickhouse-compressor --decompress > ${CLICKHOUSE_TMP}/res1
- ch_url "compress=1&buffer_size=$2&wait_end_of_query=0" $1 | clickhouse-compressor --decompress > ${CLICKHOUSE_TMP}/res2
- ch_url "compress=1&buffer_size=$2&wait_end_of_query=1" $1 | clickhouse-compressor --decompress > ${CLICKHOUSE_TMP}/res3
+ ch_url 'compress=1' $1 | ${CLICKHOUSE_BINARY}-compressor --decompress > ${CLICKHOUSE_TMP}/res1
+ ch_url "compress=1&buffer_size=$2&wait_end_of_query=0" $1 | ${CLICKHOUSE_BINARY}-compressor --decompress > ${CLICKHOUSE_TMP}/res2
+ ch_url "compress=1&buffer_size=$2&wait_end_of_query=1" $1 | ${CLICKHOUSE_BINARY}-compressor --decompress > ${CLICKHOUSE_TMP}/res3
cmp ${CLICKHOUSE_TMP}/res0 ${CLICKHOUSE_TMP}/res1
cmp ${CLICKHOUSE_TMP}/res1 ${CLICKHOUSE_TMP}/res2
cmp ${CLICKHOUSE_TMP}/res1 ${CLICKHOUSE_TMP}/res3
diff --git a/dbms/tests/queries/0_stateless/00431_if_nulls.sql b/dbms/tests/queries/0_stateless/00431_if_nulls.sql
index 33ec85f8d09..1e78c2b27a3 100644
--- a/dbms/tests/queries/0_stateless/00431_if_nulls.sql
+++ b/dbms/tests/queries/0_stateless/00431_if_nulls.sql
@@ -2,21 +2,21 @@
* - constant, true
* - constant, false
* - constant, NULL
- * - non constant, non nullable
- * - non constant, nullable
+ * - non constant, non nullable_00431
+ * - non constant, nullable_00431
*
* Then and else could be:
* - constant, not NULL
* - constant, NULL
- * - non constant, non nullable
- * - non constant, nullable
+ * - non constant, non nullable_00431
+ * - non constant, nullable_00431
*
* Thus we have 5 * 4 * 4 = 80 combinations.
*/
-DROP TABLE IF EXISTS nullable;
+DROP TABLE IF EXISTS nullable_00431;
-CREATE VIEW nullable
+CREATE VIEW nullable_00431
AS SELECT
1 AS constant_true,
0 AS constant_false,
@@ -32,109 +32,109 @@ AS SELECT
FROM system.numbers LIMIT 10;
-SELECT constant_true ? then_constant : else_constant AS res FROM nullable;
-SELECT constant_true ? then_constant : constant_null AS res FROM nullable;
-SELECT constant_true ? then_constant : else_non_constant AS res FROM nullable;
-SELECT constant_true ? then_constant : else_non_constant_nullable AS res FROM nullable;
+SELECT constant_true ? then_constant : else_constant AS res FROM nullable_00431;
+SELECT constant_true ? then_constant : constant_null AS res FROM nullable_00431;
+SELECT constant_true ? then_constant : else_non_constant AS res FROM nullable_00431;
+SELECT constant_true ? then_constant : else_non_constant_nullable AS res FROM nullable_00431;
-SELECT constant_true ? constant_null : else_constant AS res FROM nullable;
-SELECT constant_true ? constant_null : constant_null AS res FROM nullable;
-SELECT constant_true ? constant_null : else_non_constant AS res FROM nullable;
-SELECT constant_true ? constant_null : else_non_constant_nullable AS res FROM nullable;
+SELECT constant_true ? constant_null : else_constant AS res FROM nullable_00431;
+SELECT constant_true ? constant_null : constant_null AS res FROM nullable_00431;
+SELECT constant_true ? constant_null : else_non_constant AS res FROM nullable_00431;
+SELECT constant_true ? constant_null : else_non_constant_nullable AS res FROM nullable_00431;
-SELECT constant_true ? then_non_constant : else_constant AS res FROM nullable;
-SELECT constant_true ? then_non_constant : constant_null AS res FROM nullable;
-SELECT constant_true ? then_non_constant : else_non_constant AS res FROM nullable;
-SELECT constant_true ? then_non_constant : else_non_constant_nullable AS res FROM nullable;
+SELECT constant_true ? then_non_constant : else_constant AS res FROM nullable_00431;
+SELECT constant_true ? then_non_constant : constant_null AS res FROM nullable_00431;
+SELECT constant_true ? then_non_constant : else_non_constant AS res FROM nullable_00431;
+SELECT constant_true ? then_non_constant : else_non_constant_nullable AS res FROM nullable_00431;
-SELECT constant_true ? then_non_constant_nullable : else_constant AS res FROM nullable;
-SELECT constant_true ? then_non_constant_nullable : constant_null AS res FROM nullable;
-SELECT constant_true ? then_non_constant_nullable : else_non_constant AS res FROM nullable;
-SELECT constant_true ? then_non_constant_nullable : else_non_constant_nullable AS res FROM nullable;
+SELECT constant_true ? then_non_constant_nullable : else_constant AS res FROM nullable_00431;
+SELECT constant_true ? then_non_constant_nullable : constant_null AS res FROM nullable_00431;
+SELECT constant_true ? then_non_constant_nullable : else_non_constant AS res FROM nullable_00431;
+SELECT constant_true ? then_non_constant_nullable : else_non_constant_nullable AS res FROM nullable_00431;
-SELECT constant_false ? then_constant : else_constant AS res FROM nullable;
-SELECT constant_false ? then_constant : constant_null AS res FROM nullable;
-SELECT constant_false ? then_constant : else_non_constant AS res FROM nullable;
-SELECT constant_false ? then_constant : else_non_constant_nullable AS res FROM nullable;
+SELECT constant_false ? then_constant : else_constant AS res FROM nullable_00431;
+SELECT constant_false ? then_constant : constant_null AS res FROM nullable_00431;
+SELECT constant_false ? then_constant : else_non_constant AS res FROM nullable_00431;
+SELECT constant_false ? then_constant : else_non_constant_nullable AS res FROM nullable_00431;
-SELECT constant_false ? constant_null : else_constant AS res FROM nullable;
-SELECT constant_false ? constant_null : constant_null AS res FROM nullable;
-SELECT constant_false ? constant_null : else_non_constant AS res FROM nullable;
-SELECT constant_false ? constant_null : else_non_constant_nullable AS res FROM nullable;
+SELECT constant_false ? constant_null : else_constant AS res FROM nullable_00431;
+SELECT constant_false ? constant_null : constant_null AS res FROM nullable_00431;
+SELECT constant_false ? constant_null : else_non_constant AS res FROM nullable_00431;
+SELECT constant_false ? constant_null : else_non_constant_nullable AS res FROM nullable_00431;
-SELECT constant_false ? then_non_constant : else_constant AS res FROM nullable;
-SELECT constant_false ? then_non_constant : constant_null AS res FROM nullable;
-SELECT constant_false ? then_non_constant : else_non_constant AS res FROM nullable;
-SELECT constant_false ? then_non_constant : else_non_constant_nullable AS res FROM nullable;
+SELECT constant_false ? then_non_constant : else_constant AS res FROM nullable_00431;
+SELECT constant_false ? then_non_constant : constant_null AS res FROM nullable_00431;
+SELECT constant_false ? then_non_constant : else_non_constant AS res FROM nullable_00431;
+SELECT constant_false ? then_non_constant : else_non_constant_nullable AS res FROM nullable_00431;
-SELECT constant_false ? then_non_constant_nullable : else_constant AS res FROM nullable;
-SELECT constant_false ? then_non_constant_nullable : constant_null AS res FROM nullable;
-SELECT constant_false ? then_non_constant_nullable : else_non_constant AS res FROM nullable;
-SELECT constant_false ? then_non_constant_nullable : else_non_constant_nullable AS res FROM nullable;
+SELECT constant_false ? then_non_constant_nullable : else_constant AS res FROM nullable_00431;
+SELECT constant_false ? then_non_constant_nullable : constant_null AS res FROM nullable_00431;
+SELECT constant_false ? then_non_constant_nullable : else_non_constant AS res FROM nullable_00431;
+SELECT constant_false ? then_non_constant_nullable : else_non_constant_nullable AS res FROM nullable_00431;
-SELECT constant_null ? then_constant : else_constant AS res FROM nullable;
-SELECT constant_null ? then_constant : constant_null AS res FROM nullable;
-SELECT constant_null ? then_constant : else_non_constant AS res FROM nullable;
-SELECT constant_null ? then_constant : else_non_constant_nullable AS res FROM nullable;
+SELECT constant_null ? then_constant : else_constant AS res FROM nullable_00431;
+SELECT constant_null ? then_constant : constant_null AS res FROM nullable_00431;
+SELECT constant_null ? then_constant : else_non_constant AS res FROM nullable_00431;
+SELECT constant_null ? then_constant : else_non_constant_nullable AS res FROM nullable_00431;
-SELECT constant_null ? constant_null : else_constant AS res FROM nullable;
-SELECT constant_null ? constant_null : constant_null AS res FROM nullable;
-SELECT constant_null ? constant_null : else_non_constant AS res FROM nullable;
-SELECT constant_null ? constant_null : else_non_constant_nullable AS res FROM nullable;
+SELECT constant_null ? constant_null : else_constant AS res FROM nullable_00431;
+SELECT constant_null ? constant_null : constant_null AS res FROM nullable_00431;
+SELECT constant_null ? constant_null : else_non_constant AS res FROM nullable_00431;
+SELECT constant_null ? constant_null : else_non_constant_nullable AS res FROM nullable_00431;
-SELECT constant_null ? then_non_constant : else_constant AS res FROM nullable;
-SELECT constant_null ? then_non_constant : constant_null AS res FROM nullable;
-SELECT constant_null ? then_non_constant : else_non_constant AS res FROM nullable;
-SELECT constant_null ? then_non_constant : else_non_constant_nullable AS res FROM nullable;
+SELECT constant_null ? then_non_constant : else_constant AS res FROM nullable_00431;
+SELECT constant_null ? then_non_constant : constant_null AS res FROM nullable_00431;
+SELECT constant_null ? then_non_constant : else_non_constant AS res FROM nullable_00431;
+SELECT constant_null ? then_non_constant : else_non_constant_nullable AS res FROM nullable_00431;
-SELECT constant_null ? then_non_constant_nullable : else_constant AS res FROM nullable;
-SELECT constant_null ? then_non_constant_nullable : constant_null AS res FROM nullable;
-SELECT constant_null ? then_non_constant_nullable : else_non_constant AS res FROM nullable;
-SELECT constant_null ? then_non_constant_nullable : else_non_constant_nullable AS res FROM nullable;
+SELECT constant_null ? then_non_constant_nullable : else_constant AS res FROM nullable_00431;
+SELECT constant_null ? then_non_constant_nullable : constant_null AS res FROM nullable_00431;
+SELECT constant_null ? then_non_constant_nullable : else_non_constant AS res FROM nullable_00431;
+SELECT constant_null ? then_non_constant_nullable : else_non_constant_nullable AS res FROM nullable_00431;
-SELECT cond_non_constant ? then_constant : else_constant AS res FROM nullable;
-SELECT cond_non_constant ? then_constant : constant_null AS res FROM nullable;
-SELECT cond_non_constant ? then_constant : else_non_constant AS res FROM nullable;
-SELECT cond_non_constant ? then_constant : else_non_constant_nullable AS res FROM nullable;
+SELECT cond_non_constant ? then_constant : else_constant AS res FROM nullable_00431;
+SELECT cond_non_constant ? then_constant : constant_null AS res FROM nullable_00431;
+SELECT cond_non_constant ? then_constant : else_non_constant AS res FROM nullable_00431;
+SELECT cond_non_constant ? then_constant : else_non_constant_nullable AS res FROM nullable_00431;
-SELECT cond_non_constant ? constant_null : else_constant AS res FROM nullable;
-SELECT cond_non_constant ? constant_null : constant_null AS res FROM nullable;
-SELECT cond_non_constant ? constant_null : else_non_constant AS res FROM nullable;
-SELECT cond_non_constant ? constant_null : else_non_constant_nullable AS res FROM nullable;
+SELECT cond_non_constant ? constant_null : else_constant AS res FROM nullable_00431;
+SELECT cond_non_constant ? constant_null : constant_null AS res FROM nullable_00431;
+SELECT cond_non_constant ? constant_null : else_non_constant AS res FROM nullable_00431;
+SELECT cond_non_constant ? constant_null : else_non_constant_nullable AS res FROM nullable_00431;
-SELECT cond_non_constant ? then_non_constant : else_constant AS res FROM nullable;
-SELECT cond_non_constant ? then_non_constant : constant_null AS res FROM nullable;
-SELECT cond_non_constant ? then_non_constant : else_non_constant AS res FROM nullable;
-SELECT cond_non_constant ? then_non_constant : else_non_constant_nullable AS res FROM nullable;
+SELECT cond_non_constant ? then_non_constant : else_constant AS res FROM nullable_00431;
+SELECT cond_non_constant ? then_non_constant : constant_null AS res FROM nullable_00431;
+SELECT cond_non_constant ? then_non_constant : else_non_constant AS res FROM nullable_00431;
+SELECT cond_non_constant ? then_non_constant : else_non_constant_nullable AS res FROM nullable_00431;
-SELECT cond_non_constant ? then_non_constant_nullable : else_constant AS res FROM nullable;
-SELECT cond_non_constant ? then_non_constant_nullable : constant_null AS res FROM nullable;
-SELECT cond_non_constant ? then_non_constant_nullable : else_non_constant AS res FROM nullable;
-SELECT cond_non_constant ? then_non_constant_nullable : else_non_constant_nullable AS res FROM nullable;
+SELECT cond_non_constant ? then_non_constant_nullable : else_constant AS res FROM nullable_00431;
+SELECT cond_non_constant ? then_non_constant_nullable : constant_null AS res FROM nullable_00431;
+SELECT cond_non_constant ? then_non_constant_nullable : else_non_constant AS res FROM nullable_00431;
+SELECT cond_non_constant ? then_non_constant_nullable : else_non_constant_nullable AS res FROM nullable_00431;
-SELECT cond_non_constant_nullable ? then_constant : else_constant AS res FROM nullable;
-SELECT cond_non_constant_nullable ? then_constant : constant_null AS res FROM nullable;
-SELECT cond_non_constant_nullable ? then_constant : else_non_constant AS res FROM nullable;
-SELECT cond_non_constant_nullable ? then_constant : else_non_constant_nullable AS res FROM nullable;
+SELECT cond_non_constant_nullable ? then_constant : else_constant AS res FROM nullable_00431;
+SELECT cond_non_constant_nullable ? then_constant : constant_null AS res FROM nullable_00431;
+SELECT cond_non_constant_nullable ? then_constant : else_non_constant AS res FROM nullable_00431;
+SELECT cond_non_constant_nullable ? then_constant : else_non_constant_nullable AS res FROM nullable_00431;
-SELECT cond_non_constant_nullable ? constant_null : else_constant AS res FROM nullable;
-SELECT cond_non_constant_nullable ? constant_null : constant_null AS res FROM nullable;
-SELECT cond_non_constant_nullable ? constant_null : else_non_constant AS res FROM nullable;
-SELECT cond_non_constant_nullable ? constant_null : else_non_constant_nullable AS res FROM nullable;
+SELECT cond_non_constant_nullable ? constant_null : else_constant AS res FROM nullable_00431;
+SELECT cond_non_constant_nullable ? constant_null : constant_null AS res FROM nullable_00431;
+SELECT cond_non_constant_nullable ? constant_null : else_non_constant AS res FROM nullable_00431;
+SELECT cond_non_constant_nullable ? constant_null : else_non_constant_nullable AS res FROM nullable_00431;
-SELECT cond_non_constant_nullable ? then_non_constant : else_constant AS res FROM nullable;
-SELECT cond_non_constant_nullable ? then_non_constant : constant_null AS res FROM nullable;
-SELECT cond_non_constant_nullable ? then_non_constant : else_non_constant AS res FROM nullable;
-SELECT cond_non_constant_nullable ? then_non_constant : else_non_constant_nullable AS res FROM nullable;
+SELECT cond_non_constant_nullable ? then_non_constant : else_constant AS res FROM nullable_00431;
+SELECT cond_non_constant_nullable ? then_non_constant : constant_null AS res FROM nullable_00431;
+SELECT cond_non_constant_nullable ? then_non_constant : else_non_constant AS res FROM nullable_00431;
+SELECT cond_non_constant_nullable ? then_non_constant : else_non_constant_nullable AS res FROM nullable_00431;
-SELECT cond_non_constant_nullable ? then_non_constant_nullable : else_constant AS res FROM nullable;
-SELECT cond_non_constant_nullable ? then_non_constant_nullable : constant_null AS res FROM nullable;
-SELECT cond_non_constant_nullable ? then_non_constant_nullable : else_non_constant AS res FROM nullable;
-SELECT cond_non_constant_nullable ? then_non_constant_nullable : else_non_constant_nullable AS res FROM nullable;
+SELECT cond_non_constant_nullable ? then_non_constant_nullable : else_constant AS res FROM nullable_00431;
+SELECT cond_non_constant_nullable ? then_non_constant_nullable : constant_null AS res FROM nullable_00431;
+SELECT cond_non_constant_nullable ? then_non_constant_nullable : else_non_constant AS res FROM nullable_00431;
+SELECT cond_non_constant_nullable ? then_non_constant_nullable : else_non_constant_nullable AS res FROM nullable_00431;
-DROP TABLE nullable;
+DROP TABLE nullable_00431;
diff --git a/dbms/tests/queries/0_stateless/00457_log_tinylog_stripelog_nullable.sql b/dbms/tests/queries/0_stateless/00457_log_tinylog_stripelog_nullable.sql
index ac0861a9b8b..cb3ca2709ed 100644
--- a/dbms/tests/queries/0_stateless/00457_log_tinylog_stripelog_nullable.sql
+++ b/dbms/tests/queries/0_stateless/00457_log_tinylog_stripelog_nullable.sql
@@ -1,40 +1,40 @@
-DROP TABLE IF EXISTS nullable;
+DROP TABLE IF EXISTS nullable_00457;
-CREATE TABLE nullable (s String, ns Nullable(String), narr Array(Nullable(UInt64))) ENGINE = Log;
+CREATE TABLE nullable_00457 (s String, ns Nullable(String), narr Array(Nullable(UInt64))) ENGINE = Log;
-INSERT INTO nullable SELECT toString(number), number % 3 = 1 ? toString(number) : NULL, arrayMap(x -> x % 2 = 1 ? x : NULL, range(number)) FROM system.numbers LIMIT 10;
-SELECT * FROM nullable ORDER BY s;
-SELECT s FROM nullable ORDER BY s;
-SELECT ns FROM nullable ORDER BY s;
-SELECT narr FROM nullable ORDER BY s;
-SELECT s, narr FROM nullable ORDER BY s;
+INSERT INTO nullable_00457 SELECT toString(number), number % 3 = 1 ? toString(number) : NULL, arrayMap(x -> x % 2 = 1 ? x : NULL, range(number)) FROM system.numbers LIMIT 10;
+SELECT * FROM nullable_00457 ORDER BY s;
+SELECT s FROM nullable_00457 ORDER BY s;
+SELECT ns FROM nullable_00457 ORDER BY s;
+SELECT narr FROM nullable_00457 ORDER BY s;
+SELECT s, narr FROM nullable_00457 ORDER BY s;
-INSERT INTO nullable SELECT toString(number), number % 3 = 1 ? toString(number) : NULL, arrayMap(x -> x % 2 = 1 ? x : NULL, range(number)) FROM system.numbers LIMIT 10, 10;
+INSERT INTO nullable_00457 SELECT toString(number), number % 3 = 1 ? toString(number) : NULL, arrayMap(x -> x % 2 = 1 ? x : NULL, range(number)) FROM system.numbers LIMIT 10, 10;
-DROP TABLE IF EXISTS nullable;
+DROP TABLE IF EXISTS nullable_00457;
-CREATE TABLE nullable (s String, ns Nullable(String), narr Array(Nullable(UInt64))) ENGINE = TinyLog;
+CREATE TABLE nullable_00457 (s String, ns Nullable(String), narr Array(Nullable(UInt64))) ENGINE = TinyLog;
-INSERT INTO nullable SELECT toString(number), number % 3 = 1 ? toString(number) : NULL, arrayMap(x -> x % 2 = 1 ? x : NULL, range(number)) FROM system.numbers LIMIT 10;
-SELECT * FROM nullable ORDER BY s;
-SELECT s FROM nullable ORDER BY s;
-SELECT ns FROM nullable ORDER BY s;
-SELECT narr FROM nullable ORDER BY s;
-SELECT s, narr FROM nullable ORDER BY s;
+INSERT INTO nullable_00457 SELECT toString(number), number % 3 = 1 ? toString(number) : NULL, arrayMap(x -> x % 2 = 1 ? x : NULL, range(number)) FROM system.numbers LIMIT 10;
+SELECT * FROM nullable_00457 ORDER BY s;
+SELECT s FROM nullable_00457 ORDER BY s;
+SELECT ns FROM nullable_00457 ORDER BY s;
+SELECT narr FROM nullable_00457 ORDER BY s;
+SELECT s, narr FROM nullable_00457 ORDER BY s;
-INSERT INTO nullable SELECT toString(number), number % 3 = 1 ? toString(number) : NULL, arrayMap(x -> x % 2 = 1 ? x : NULL, range(number)) FROM system.numbers LIMIT 10, 10;
+INSERT INTO nullable_00457 SELECT toString(number), number % 3 = 1 ? toString(number) : NULL, arrayMap(x -> x % 2 = 1 ? x : NULL, range(number)) FROM system.numbers LIMIT 10, 10;
-DROP TABLE IF EXISTS nullable;
+DROP TABLE IF EXISTS nullable_00457;
-CREATE TABLE nullable (s String, ns Nullable(String), narr Array(Nullable(UInt64))) ENGINE = StripeLog;
+CREATE TABLE nullable_00457 (s String, ns Nullable(String), narr Array(Nullable(UInt64))) ENGINE = StripeLog;
-INSERT INTO nullable SELECT toString(number), number % 3 = 1 ? toString(number) : NULL, arrayMap(x -> x % 2 = 1 ? x : NULL, range(number)) FROM system.numbers LIMIT 10;
-SELECT * FROM nullable ORDER BY s;
-SELECT s FROM nullable ORDER BY s;
-SELECT ns FROM nullable ORDER BY s;
-SELECT narr FROM nullable ORDER BY s;
-SELECT s, narr FROM nullable ORDER BY s;
+INSERT INTO nullable_00457 SELECT toString(number), number % 3 = 1 ? toString(number) : NULL, arrayMap(x -> x % 2 = 1 ? x : NULL, range(number)) FROM system.numbers LIMIT 10;
+SELECT * FROM nullable_00457 ORDER BY s;
+SELECT s FROM nullable_00457 ORDER BY s;
+SELECT ns FROM nullable_00457 ORDER BY s;
+SELECT narr FROM nullable_00457 ORDER BY s;
+SELECT s, narr FROM nullable_00457 ORDER BY s;
-INSERT INTO nullable SELECT toString(number), number % 3 = 1 ? toString(number) : NULL, arrayMap(x -> x % 2 = 1 ? x : NULL, range(number)) FROM system.numbers LIMIT 10, 10;
+INSERT INTO nullable_00457 SELECT toString(number), number % 3 = 1 ? toString(number) : NULL, arrayMap(x -> x % 2 = 1 ? x : NULL, range(number)) FROM system.numbers LIMIT 10, 10;
-DROP TABLE nullable;
+DROP TABLE nullable_00457;
diff --git a/dbms/tests/queries/0_stateless/00465_nullable_default.sql b/dbms/tests/queries/0_stateless/00465_nullable_default.sql
index e3b764c4a5a..bbc9af5a955 100644
--- a/dbms/tests/queries/0_stateless/00465_nullable_default.sql
+++ b/dbms/tests/queries/0_stateless/00465_nullable_default.sql
@@ -1,5 +1,5 @@
-DROP TABLE IF EXISTS nullable;
-CREATE TABLE nullable (id Nullable(UInt32), cat String) ENGINE = Log;
-INSERT INTO nullable (cat) VALUES ('test');
-SELECT * FROM nullable;
-DROP TABLE nullable;
+DROP TABLE IF EXISTS nullable_00465;
+CREATE TABLE nullable_00465 (id Nullable(UInt32), cat String) ENGINE = Log;
+INSERT INTO nullable_00465 (cat) VALUES ('test');
+SELECT * FROM nullable_00465;
+DROP TABLE nullable_00465;
diff --git a/dbms/tests/queries/0_stateless/00472_create_view_if_not_exists.sql b/dbms/tests/queries/0_stateless/00472_create_view_if_not_exists.sql
index bb25cf66607..cff8b605b14 100644
--- a/dbms/tests/queries/0_stateless/00472_create_view_if_not_exists.sql
+++ b/dbms/tests/queries/0_stateless/00472_create_view_if_not_exists.sql
@@ -1,9 +1,9 @@
-DROP TABLE IF EXISTS t;
-DROP TABLE IF EXISTS mv;
-DROP TABLE IF EXISTS `.inner.mv`;
+DROP TABLE IF EXISTS t_00472;
+DROP TABLE IF EXISTS mv_00472;
+DROP TABLE IF EXISTS `.inner.mv_00472`;
-CREATE TABLE t (x UInt8) ENGINE = Null;
-CREATE VIEW IF NOT EXISTS mv AS SELECT * FROM t;
+CREATE TABLE t_00472 (x UInt8) ENGINE = Null;
+CREATE VIEW IF NOT EXISTS mv_00472 AS SELECT * FROM t_00472;
-DROP TABLE t;
-DROP TABLE mv;
+DROP TABLE t_00472;
+DROP TABLE mv_00472;
diff --git a/dbms/tests/queries/0_stateless/00481_reading_from_last_granula.sql b/dbms/tests/queries/0_stateless/00481_reading_from_last_granula.sql
index 58a206854e6..c88d439cd11 100644
--- a/dbms/tests/queries/0_stateless/00481_reading_from_last_granula.sql
+++ b/dbms/tests/queries/0_stateless/00481_reading_from_last_granula.sql
@@ -1,7 +1,7 @@
-DROP TABLE IF EXISTS tab;
-CREATE TABLE tab (date Date, value UInt64, s String, m FixedString(16)) ENGINE = MergeTree(date, (date, value), 8);
-INSERT INTO tab SELECT today() as date, number as value, '' as s, toFixedString('', 16) as m from system.numbers limit 42;
+DROP TABLE IF EXISTS tab_00481;
+CREATE TABLE tab_00481 (date Date, value UInt64, s String, m FixedString(16)) ENGINE = MergeTree(date, (date, value), 8);
+INSERT INTO tab_00481 SELECT today() as date, number as value, '' as s, toFixedString('', 16) as m from system.numbers limit 42;
SET preferred_max_column_in_block_size_bytes = 32;
-SELECT blockSize(), * from tab format Null;
+SELECT blockSize(), * from tab_00481 format Null;
SELECT 0;
diff --git a/dbms/tests/queries/0_stateless/00484_preferred_max_column_in_block_size_bytes.sql b/dbms/tests/queries/0_stateless/00484_preferred_max_column_in_block_size_bytes.sql
index 80a91f38ab6..65e74ccf61d 100644
--- a/dbms/tests/queries/0_stateless/00484_preferred_max_column_in_block_size_bytes.sql
+++ b/dbms/tests/queries/0_stateless/00484_preferred_max_column_in_block_size_bytes.sql
@@ -1,34 +1,34 @@
-drop table if exists tab;
-create table tab (date Date, x UInt64, s FixedString(128)) engine = MergeTree(date, (date, x), 8192);
-insert into tab select today(), number, toFixedString('', 128) from system.numbers limit 8192;
+drop table if exists tab_00484;
+create table tab_00484 (date Date, x UInt64, s FixedString(128)) engine = MergeTree(date, (date, x), 8192);
+insert into tab_00484 select today(), number, toFixedString('', 128) from system.numbers limit 8192;
set preferred_block_size_bytes = 2000000;
set preferred_max_column_in_block_size_bytes = 0;
-select max(blockSize()), min(blockSize()), any(ignore(*)) from tab;
+select max(blockSize()), min(blockSize()), any(ignore(*)) from tab_00484;
set preferred_max_column_in_block_size_bytes = 128;
-select max(blockSize()), min(blockSize()), any(ignore(*)) from tab;
+select max(blockSize()), min(blockSize()), any(ignore(*)) from tab_00484;
set preferred_max_column_in_block_size_bytes = 256;
-select max(blockSize()), min(blockSize()), any(ignore(*)) from tab;
+select max(blockSize()), min(blockSize()), any(ignore(*)) from tab_00484;
set preferred_max_column_in_block_size_bytes = 2097152;
-select max(blockSize()), min(blockSize()), any(ignore(*)) from tab;
+select max(blockSize()), min(blockSize()), any(ignore(*)) from tab_00484;
set preferred_max_column_in_block_size_bytes = 4194304;
-select max(blockSize()), min(blockSize()), any(ignore(*)) from tab;
+select max(blockSize()), min(blockSize()), any(ignore(*)) from tab_00484;
-drop table if exists tab;
-create table tab (date Date, x UInt64, s FixedString(128)) engine = MergeTree(date, (date, x), 32);
-insert into tab select today(), number, toFixedString('', 128) from system.numbers limit 47;
+drop table if exists tab_00484;
+create table tab_00484 (date Date, x UInt64, s FixedString(128)) engine = MergeTree(date, (date, x), 32);
+insert into tab_00484 select today(), number, toFixedString('', 128) from system.numbers limit 47;
set preferred_max_column_in_block_size_bytes = 1152;
-select blockSize(), * from tab where x = 1 or x > 36 format Null;
+select blockSize(), * from tab_00484 where x = 1 or x > 36 format Null;
-drop table if exists tab;
-create table tab (date Date, x UInt64, s FixedString(128)) engine = MergeTree(date, (date, x), 8192);
-insert into tab select today(), number, toFixedString('', 128) from system.numbers limit 10;
+drop table if exists tab_00484;
+create table tab_00484 (date Date, x UInt64, s FixedString(128)) engine = MergeTree(date, (date, x), 8192);
+insert into tab_00484 select today(), number, toFixedString('', 128) from system.numbers limit 10;
set preferred_max_column_in_block_size_bytes = 128;
-select s from tab where s == '' format Null;
+select s from tab_00484 where s == '' format Null;
-drop table if exists tab;
-create table tab (date Date, x UInt64, s String) engine = MergeTree(date, (date, x), 8192);
-insert into tab select today(), number, 'abc' from system.numbers limit 81920;
+drop table if exists tab_00484;
+create table tab_00484 (date Date, x UInt64, s String) engine = MergeTree(date, (date, x), 8192);
+insert into tab_00484 select today(), number, 'abc' from system.numbers limit 81920;
set preferred_block_size_bytes = 0;
-select count(*) from tab prewhere s != 'abc' format Null;
-select count(*) from tab prewhere s = 'abc' format Null;
+select count(*) from tab_00484 prewhere s != 'abc' format Null;
+select count(*) from tab_00484 prewhere s = 'abc' format Null;
diff --git a/dbms/tests/queries/0_stateless/00502_sum_map.sql b/dbms/tests/queries/0_stateless/00502_sum_map.sql
index 23de78f2ad1..7d413aef208 100644
--- a/dbms/tests/queries/0_stateless/00502_sum_map.sql
+++ b/dbms/tests/queries/0_stateless/00502_sum_map.sql
@@ -1,6 +1,5 @@
SET send_logs_level = 'none';
-CREATE DATABASE IF NOT EXISTS test;
DROP TABLE IF EXISTS sum_map;
CREATE TABLE sum_map(date Date, timeslot DateTime, statusMap Nested(status UInt16, requests UInt64)) ENGINE = Log;
diff --git a/dbms/tests/queries/0_stateless/00504_mergetree_arrays_rw.sql b/dbms/tests/queries/0_stateless/00504_mergetree_arrays_rw.sql
index b7bd326815e..766f9dfb368 100644
--- a/dbms/tests/queries/0_stateless/00504_mergetree_arrays_rw.sql
+++ b/dbms/tests/queries/0_stateless/00504_mergetree_arrays_rw.sql
@@ -1,4 +1,3 @@
-create database if not exists test;
drop table if exists test_ins_arr;
create table test_ins_arr (date Date, val Array(UInt64)) engine = MergeTree(date, (date), 8192);
diff --git a/dbms/tests/queries/0_stateless/00508_materialized_view_to.sql b/dbms/tests/queries/0_stateless/00508_materialized_view_to.sql
index f975ec35510..b701247f7ee 100644
--- a/dbms/tests/queries/0_stateless/00508_materialized_view_to.sql
+++ b/dbms/tests/queries/0_stateless/00508_materialized_view_to.sql
@@ -1,32 +1,32 @@
DROP TABLE IF EXISTS test.src;
DROP TABLE IF EXISTS test.dst;
-DROP TABLE IF EXISTS test.mv;
+DROP TABLE IF EXISTS test.mv_00508;
CREATE TABLE test.src (x UInt8) ENGINE = Null;
CREATE TABLE test.dst (x UInt8) ENGINE = Memory;
USE test;
-CREATE MATERIALIZED VIEW mv TO dst AS SELECT * FROM src;
+CREATE MATERIALIZED VIEW test.mv_00508 TO dst AS SELECT * FROM src;
INSERT INTO src VALUES (1), (2);
-SELECT * FROM mv ORDER BY x;
+SELECT * FROM test.mv_00508 ORDER BY x;
-- Detach MV and see if the data is still readable
-DETACH TABLE mv;
+DETACH TABLE test.mv_00508;
SELECT * FROM dst ORDER BY x;
USE default;
-- Reattach MV (shortcut)
-ATTACH TABLE test.mv;
+ATTACH TABLE test.mv_00508;
INSERT INTO test.src VALUES (3);
-SELECT * FROM test.mv ORDER BY x;
+SELECT * FROM test.mv_00508 ORDER BY x;
-- Drop the MV and see if the data is still readable
-DROP TABLE test.mv;
+DROP TABLE test.mv_00508;
SELECT * FROM test.dst ORDER BY x;
DROP TABLE test.src;
diff --git a/dbms/tests/queries/0_stateless/00568_compile_catch_throw.sh b/dbms/tests/queries/0_stateless/00568_compile_catch_throw.sh
index 56e11138e2f..fbf5efcda2c 100755
--- a/dbms/tests/queries/0_stateless/00568_compile_catch_throw.sh
+++ b/dbms/tests/queries/0_stateless/00568_compile_catch_throw.sh
@@ -1,6 +1,7 @@
#!/usr/bin/env bash
-[ -z "$CLICKHOUSE_CLIENT" ] && CLICKHOUSE_CLIENT="clickhouse-client"
+CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
+. $CURDIR/../shell_config.sh
SETTINGS="--compile=1 --min_count_to_compile=0 --max_threads=1 --max_memory_usage=8000000 --server_logs_file=/dev/null"
output=$($CLICKHOUSE_CLIENT -q "SELECT length(groupArray(number)) FROM (SELECT * FROM system.numbers LIMIT 1000000)" $SETTINGS 2>&1)
diff --git a/dbms/tests/queries/0_stateless/00571_alter_nullable.sql b/dbms/tests/queries/0_stateless/00571_alter_nullable.sql
index 486c82a9133..7b3b36f7385 100644
--- a/dbms/tests/queries/0_stateless/00571_alter_nullable.sql
+++ b/dbms/tests/queries/0_stateless/00571_alter_nullable.sql
@@ -1,13 +1,13 @@
-DROP TABLE IF EXISTS nullable;
-CREATE TABLE nullable (x String) ENGINE = MergeTree ORDER BY x;
-INSERT INTO nullable VALUES ('hello'), ('world');
-SELECT * FROM nullable;
-ALTER TABLE nullable ADD COLUMN n Nullable(UInt64);
-SELECT * FROM nullable;
-ALTER TABLE nullable DROP COLUMN n;
-ALTER TABLE nullable ADD COLUMN n Nullable(UInt64) DEFAULT NULL;
-SELECT * FROM nullable;
-ALTER TABLE nullable DROP COLUMN n;
-ALTER TABLE nullable ADD COLUMN n Nullable(UInt64) DEFAULT 0;
-SELECT * FROM nullable;
-DROP TABLE nullable;
+DROP TABLE IF EXISTS nullable_00571;
+CREATE TABLE nullable_00571 (x String) ENGINE = MergeTree ORDER BY x;
+INSERT INTO nullable_00571 VALUES ('hello'), ('world');
+SELECT * FROM nullable_00571;
+ALTER TABLE nullable_00571 ADD COLUMN n Nullable(UInt64);
+SELECT * FROM nullable_00571;
+ALTER TABLE nullable_00571 DROP COLUMN n;
+ALTER TABLE nullable_00571 ADD COLUMN n Nullable(UInt64) DEFAULT NULL;
+SELECT * FROM nullable_00571;
+ALTER TABLE nullable_00571 DROP COLUMN n;
+ALTER TABLE nullable_00571 ADD COLUMN n Nullable(UInt64) DEFAULT 0;
+SELECT * FROM nullable_00571;
+DROP TABLE nullable_00571;
diff --git a/dbms/tests/queries/0_stateless/00575_merge_and_index_with_function_in_in.sql b/dbms/tests/queries/0_stateless/00575_merge_and_index_with_function_in_in.sql
index cfd1275f03e..e491ef37ada 100644
--- a/dbms/tests/queries/0_stateless/00575_merge_and_index_with_function_in_in.sql
+++ b/dbms/tests/queries/0_stateless/00575_merge_and_index_with_function_in_in.sql
@@ -1,9 +1,9 @@
-DROP TABLE IF EXISTS t;
+DROP TABLE IF EXISTS t_00575;
-create table t(d Date) engine MergeTree(d, d, 8192);
+create table t_00575(d Date) engine MergeTree(d, d, 8192);
-insert into t values ('2018-02-20');
+insert into t_00575 values ('2018-02-20');
-select count() from t where toDayOfWeek(d) in (2);
+select count() from t_00575 where toDayOfWeek(d) in (2);
-DROP TABLE t;
+DROP TABLE t_00575;
diff --git a/dbms/tests/queries/0_stateless/00577_replacing_merge_tree_vertical_merge.sql b/dbms/tests/queries/0_stateless/00577_replacing_merge_tree_vertical_merge.sql
index 57b17f7f255..b6a0eab8702 100644
--- a/dbms/tests/queries/0_stateless/00577_replacing_merge_tree_vertical_merge.sql
+++ b/dbms/tests/queries/0_stateless/00577_replacing_merge_tree_vertical_merge.sql
@@ -1,8 +1,8 @@
-drop table if exists tab;
-create table tab (date Date, version UInt64, val UInt64) engine = ReplacingMergeTree(version) partition by date order by date settings enable_vertical_merge_algorithm = 1, vertical_merge_algorithm_min_rows_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 0;
-insert into tab values ('2018-01-01', 2, 2), ('2018-01-01', 1, 1);
-insert into tab values ('2018-01-01', 0, 0);
-select * from tab order by version;
-OPTIMIZE TABLE tab;
-select * from tab;
+drop table if exists tab_00577;
+create table tab_00577 (date Date, version UInt64, val UInt64) engine = ReplacingMergeTree(version) partition by date order by date settings enable_vertical_merge_algorithm = 1, vertical_merge_algorithm_min_rows_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 0;
+insert into tab_00577 values ('2018-01-01', 2, 2), ('2018-01-01', 1, 1);
+insert into tab_00577 values ('2018-01-01', 0, 0);
+select * from tab_00577 order by version;
+OPTIMIZE TABLE tab_00577;
+select * from tab_00577;
diff --git a/dbms/tests/queries/0_stateless/00579_virtual_column_and_lazy.sql b/dbms/tests/queries/0_stateless/00579_virtual_column_and_lazy.sql
index c30133863b5..2b659e0da3d 100644
--- a/dbms/tests/queries/0_stateless/00579_virtual_column_and_lazy.sql
+++ b/dbms/tests/queries/0_stateless/00579_virtual_column_and_lazy.sql
@@ -1,18 +1,18 @@
-DROP TABLE IF EXISTS test.sample1;
-DROP TABLE IF EXISTS test.sample2;
-DROP TABLE IF EXISTS test.sample_merge;
+DROP TABLE IF EXISTS test.sample_00579_1;
+DROP TABLE IF EXISTS test.sample_00579_2;
+DROP TABLE IF EXISTS test.sample_merge_00579;
-CREATE TABLE test.sample1 (x UInt64, d Date DEFAULT today()) ENGINE = MergeTree(d, intHash64(x), intHash64(x), 10);
-CREATE TABLE test.sample2 (x UInt64, d Date DEFAULT today()) ENGINE = MergeTree(d, intHash64(x), intHash64(x), 10);
+CREATE TABLE test.sample_00579_1 (x UInt64, d Date DEFAULT today()) ENGINE = MergeTree(d, intHash64(x), intHash64(x), 10);
+CREATE TABLE test.sample_00579_2 (x UInt64, d Date DEFAULT today()) ENGINE = MergeTree(d, intHash64(x), intHash64(x), 10);
-INSERT INTO test.sample1 (x) SELECT number AS x FROM system.numbers LIMIT 1000;
-INSERT INTO test.sample2 (x) SELECT number AS x FROM system.numbers LIMIT 2000;
+INSERT INTO test.sample_00579_1 (x) SELECT number AS x FROM system.numbers LIMIT 1000;
+INSERT INTO test.sample_00579_2 (x) SELECT number AS x FROM system.numbers LIMIT 2000;
-CREATE TABLE test.sample_merge AS test.sample1 ENGINE = Merge(test, '^sample\\d$');
+CREATE TABLE test.sample_merge_00579 AS test.sample_00579_1 ENGINE = Merge(test, '^sample_00579_\\d$');
SET max_threads = 1;
-SELECT _sample_factor FROM merge(test, '^sample\\d$');
+SELECT _sample_factor FROM merge(test, '^sample_00579_\\d$');
-DROP TABLE test.sample1;
-DROP TABLE test.sample2;
-DROP TABLE test.sample_merge;
+DROP TABLE test.sample_00579_1;
+DROP TABLE test.sample_00579_2;
+DROP TABLE test.sample_merge_00579;
diff --git a/dbms/tests/queries/0_stateless/00594_alias_in_distributed.sql b/dbms/tests/queries/0_stateless/00594_alias_in_distributed.sql
index f8a17363506..54e47c78942 100644
--- a/dbms/tests/queries/0_stateless/00594_alias_in_distributed.sql
+++ b/dbms/tests/queries/0_stateless/00594_alias_in_distributed.sql
@@ -1,7 +1,7 @@
-DROP TABLE IF EXISTS alias_local10;
-DROP TABLE IF EXISTS alias10;
+DROP TABLE IF EXISTS test.alias_local10;
+DROP TABLE IF EXISTS test.alias10;
-CREATE TABLE alias_local10 (
+CREATE TABLE test.alias_local10 (
Id Int8,
EventDate Date DEFAULT '2000-01-01',
field1 Int8,
@@ -9,29 +9,29 @@ CREATE TABLE alias_local10 (
field3 ALIAS CASE WHEN field1 = 1 THEN field2 ELSE '0' END
) ENGINE = MergeTree(EventDate, (Id, EventDate), 8192);
-CREATE TABLE alias10 AS alias_local10 ENGINE = Distributed(test_shard_localhost, test, alias_local10, cityHash64(Id));
+CREATE TABLE test.alias10 AS test.alias_local10 ENGINE = Distributed(test_shard_localhost, test, alias_local10, cityHash64(Id));
-INSERT INTO alias_local10 (Id, EventDate, field1, field2) VALUES (1, '2000-01-01', 1, '12345'), (2, '2000-01-01', 2, '54321'), (3, '2000-01-01', 0, '');
+INSERT INTO test.alias_local10 (Id, EventDate, field1, field2) VALUES (1, '2000-01-01', 1, '12345'), (2, '2000-01-01', 2, '54321'), (3, '2000-01-01', 0, '');
-SELECT field1, field2, field3 FROM alias_local10;
-SELECT field1, field2, field3 FROM alias_local10 WHERE EventDate='2000-01-01';
-SELECT field1, field2 FROM alias_local10 WHERE EventDate='2000-01-01';
+SELECT field1, field2, field3 FROM test.alias_local10;
+SELECT field1, field2, field3 FROM test.alias_local10 WHERE EventDate='2000-01-01';
+SELECT field1, field2 FROM test.alias_local10 WHERE EventDate='2000-01-01';
-SELECT field1, field2, field3 FROM alias10;
-SELECT field1, field2, field3 FROM alias10 WHERE EventDate='2000-01-01';
-SELECT field1, field2 FROM alias10 WHERE EventDate='2000-01-01';
+SELECT field1, field2, field3 FROM test.alias10;
+SELECT field1, field2, field3 FROM test.alias10 WHERE EventDate='2000-01-01';
+SELECT field1, field2 FROM test.alias10 WHERE EventDate='2000-01-01';
-SELECT field2, field3 FROM alias10 WHERE EventDate='2000-01-01';
-SELECT field3 FROM alias10 WHERE EventDate='2000-01-01';
-SELECT field2, field3 FROM alias10;
-SELECT field3 FROM alias10;
+SELECT field2, field3 FROM test.alias10 WHERE EventDate='2000-01-01';
+SELECT field3 FROM test.alias10 WHERE EventDate='2000-01-01';
+SELECT field2, field3 FROM test.alias10;
+SELECT field3 FROM test.alias10;
-SELECT field1 FROM alias10 WHERE field3 = '12345';
-SELECT field2 FROM alias10 WHERE field3 = '12345';
-SELECT field3 FROM alias10 WHERE field3 = '12345';
+SELECT field1 FROM test.alias10 WHERE field3 = '12345';
+SELECT field2 FROM test.alias10 WHERE field3 = '12345';
+SELECT field3 FROM test.alias10 WHERE field3 = '12345';
-DROP TABLE alias10;
-CREATE TABLE alias10 (
+DROP TABLE test.alias10;
+CREATE TABLE test.alias10 (
Id Int8,
EventDate Date,
field1 Int8,
@@ -39,22 +39,22 @@ CREATE TABLE alias10 (
field3 String
) ENGINE = Distributed(test_shard_localhost, test, alias_local10);
-SELECT field1, field2, field3 FROM alias_local10;
-SELECT field1, field2, field3 FROM alias_local10 WHERE EventDate='2000-01-01';
-SELECT field1, field2 FROM alias_local10 WHERE EventDate='2000-01-01';
+SELECT field1, field2, field3 FROM test.alias_local10;
+SELECT field1, field2, field3 FROM test.alias_local10 WHERE EventDate='2000-01-01';
+SELECT field1, field2 FROM test.alias_local10 WHERE EventDate='2000-01-01';
-SELECT field1, field2, field3 FROM alias10;
-SELECT field1, field2, field3 FROM alias10 WHERE EventDate='2000-01-01';
-SELECT field1, field2 FROM alias10 WHERE EventDate='2000-01-01';
+SELECT field1, field2, field3 FROM test.alias10;
+SELECT field1, field2, field3 FROM test.alias10 WHERE EventDate='2000-01-01';
+SELECT field1, field2 FROM test.alias10 WHERE EventDate='2000-01-01';
-SELECT field2, field3 FROM alias10 WHERE EventDate='2000-01-01';
-SELECT field3 FROM alias10 WHERE EventDate='2000-01-01';
-SELECT field2, field3 FROM alias10;
-SELECT field3 FROM alias10;
+SELECT field2, field3 FROM test.alias10 WHERE EventDate='2000-01-01';
+SELECT field3 FROM test.alias10 WHERE EventDate='2000-01-01';
+SELECT field2, field3 FROM test.alias10;
+SELECT field3 FROM test.alias10;
-SELECT field1 FROM alias10 WHERE field3 = '12345';
-SELECT field2 FROM alias10 WHERE field3 = '12345';
-SELECT field3 FROM alias10 WHERE field3 = '12345';
+SELECT field1 FROM test.alias10 WHERE field3 = '12345';
+SELECT field2 FROM test.alias10 WHERE field3 = '12345';
+SELECT field3 FROM test.alias10 WHERE field3 = '12345';
-DROP TABLE alias_local10;
-DROP TABLE alias10;
+DROP TABLE test.alias_local10;
+DROP TABLE test.alias10;
diff --git a/dbms/tests/queries/0_stateless/00610_materialized_view_forward_alter_partition_statements.sql b/dbms/tests/queries/0_stateless/00610_materialized_view_forward_alter_partition_statements.sql
index 182ef7a2eca..6f5ba07e5db 100644
--- a/dbms/tests/queries/0_stateless/00610_materialized_view_forward_alter_partition_statements.sql
+++ b/dbms/tests/queries/0_stateless/00610_materialized_view_forward_alter_partition_statements.sql
@@ -1,18 +1,18 @@
-DROP TABLE IF EXISTS tab;
-DROP TABLE IF EXISTS mv;
+DROP TABLE IF EXISTS tab_00610;
+DROP TABLE IF EXISTS mv_00610;
-CREATE TABLE tab(d Date, x UInt32) ENGINE MergeTree(d, x, 8192);
-CREATE MATERIALIZED VIEW mv(d Date, y UInt64) ENGINE MergeTree(d, y, 8192) AS SELECT d, x + 1 AS y FROM tab;
+CREATE TABLE tab_00610(d Date, x UInt32) ENGINE MergeTree(d, x, 8192);
+CREATE MATERIALIZED VIEW mv_00610(d Date, y UInt64) ENGINE MergeTree(d, y, 8192) AS SELECT d, x + 1 AS y FROM tab_00610;
-INSERT INTO tab VALUES ('2018-01-01', 1), ('2018-01-01', 2), ('2018-02-01', 3);
+INSERT INTO tab_00610 VALUES ('2018-01-01', 1), ('2018-01-01', 2), ('2018-02-01', 3);
SELECT '-- Before DROP PARTITION --';
-SELECT * FROM mv ORDER BY y;
+SELECT * FROM mv_00610 ORDER BY y;
-ALTER TABLE mv DROP PARTITION 201801;
+ALTER TABLE mv_00610 DROP PARTITION 201801;
SELECT '-- After DROP PARTITION --';
-SELECT * FROM mv ORDER BY y;
+SELECT * FROM mv_00610 ORDER BY y;
-DROP TABLE tab;
-DROP TABLE mv;
+DROP TABLE tab_00610;
+DROP TABLE mv_00610;
diff --git a/dbms/tests/queries/0_stateless/00612_http_max_query_size.sh b/dbms/tests/queries/0_stateless/00612_http_max_query_size.sh
index a0c3c637ae1..8e38e52941e 100755
--- a/dbms/tests/queries/0_stateless/00612_http_max_query_size.sh
+++ b/dbms/tests/queries/0_stateless/00612_http_max_query_size.sh
@@ -11,11 +11,11 @@ echo "select '1'" | ${CLICKHOUSE_CURL} -sSg ${CLICKHOUSE_URL}/?max_query_size=10
echo -
echo "select '11'" | ${CLICKHOUSE_CURL} -sSg ${CLICKHOUSE_URL}/?max_query_size=10 -d @- 2>&1 | grep -o "Max query size exceeded"
-echo 'drop table if exists tab' | ${CLICKHOUSE_CURL} -sSg ${CLICKHOUSE_URL} -d @-
-echo 'create table tab (key UInt64, val UInt64) engine = MergeTree order by key' | ${CLICKHOUSE_CURL} -sSg ${CLICKHOUSE_URL} -d @-
-echo 'into tab values (1, 1), (2, 2), (3, 3), (4, 4), (5, 5)' | ${CLICKHOUSE_CURL} -sSg "${CLICKHOUSE_URL}/?max_query_size=30&query=insert" -d @-
-echo 'select val from tab order by val' | ${CLICKHOUSE_CURL} -sSg ${CLICKHOUSE_URL} -d @-
-echo 'drop table tab' | ${CLICKHOUSE_CURL} -sSg ${CLICKHOUSE_URL} -d @-
+echo 'drop table if exists tab_00612_1' | ${CLICKHOUSE_CURL} -sSg ${CLICKHOUSE_URL_PARAMS} -d @-
+echo 'create table tab_00612_1 (key UInt64, val UInt64) engine = MergeTree order by key' | ${CLICKHOUSE_CURL} -sSg ${CLICKHOUSE_URL_PARAMS} -d @-
+echo 'into tab_00612_1 values (1, 1), (2, 2), (3, 3), (4, 4), (5, 5)' | ${CLICKHOUSE_CURL} -sSg "${CLICKHOUSE_URL_PARAMS}&max_query_size=30&query=insert" -d @-
+echo 'select val from tab_00612_1 order by val' | ${CLICKHOUSE_CURL} -sSg ${CLICKHOUSE_URL_PARAMS} -d @-
+echo 'drop table tab_00612_1' | ${CLICKHOUSE_CURL} -sSg ${CLICKHOUSE_URL_PARAMS} -d @-
echo "
import requests
diff --git a/dbms/tests/queries/0_stateless/00612_pk_in_tuple.reference b/dbms/tests/queries/0_stateless/00612_pk_in_tuple.reference
index 351e10ca3ff..74e8e642f20 100644
--- a/dbms/tests/queries/0_stateless/00612_pk_in_tuple.reference
+++ b/dbms/tests/queries/0_stateless/00612_pk_in_tuple.reference
@@ -24,14 +24,14 @@ key, arrayJoin(n.x) in ((1, 1), (2, 2))
(key, left array join n.x) in ((1, 1), (2, 2))
1
2
-max(key) from tab where (key, left array join n.x) in (1, 1)
+max(key) from tab_00612 where (key, left array join n.x) in (1, 1)
1
1
-max(key) from tab where (key, left array join n.x) in ((1, 1), (2, 2))
+max(key) from tab_00612 where (key, left array join n.x) in ((1, 1), (2, 2))
2
2
-max(key) from tab any left join (select key, arrayJoin(n.x) as val from tab) using key where (key, val) in (1, 1)
+max(key) from tab_00612 any left join (select key, arrayJoin(n.x) as val from tab_00612) using key where (key, val) in (1, 1)
1
-max(key) from tab any left join (select key, arrayJoin(n.x) as val from tab) using key where (key, val) in ((1, 1), (2, 2))
+max(key) from tab_00612 any left join (select key, arrayJoin(n.x) as val from tab_00612) using key where (key, val) in ((1, 1), (2, 2))
2
1
diff --git a/dbms/tests/queries/0_stateless/00612_pk_in_tuple.sql b/dbms/tests/queries/0_stateless/00612_pk_in_tuple.sql
index d558a9ffa04..fbfaf8407a0 100644
--- a/dbms/tests/queries/0_stateless/00612_pk_in_tuple.sql
+++ b/dbms/tests/queries/0_stateless/00612_pk_in_tuple.sql
@@ -1,45 +1,44 @@
-create database if not exists test;
-drop table if exists tab;
-create table tab (key UInt64, arr Array(UInt64)) Engine = MergeTree order by key;
-insert into tab values (1, [1]);
-insert into tab values (2, [2]);
+drop table if exists tab_00612;
+create table tab_00612 (key UInt64, arr Array(UInt64)) Engine = MergeTree order by key;
+insert into tab_00612 values (1, [1]);
+insert into tab_00612 values (2, [2]);
select 'all';
-select * from tab order by key;
+select * from tab_00612 order by key;
select 'key, arrayJoin(arr) in (1, 1)';
-select key, arrayJoin(arr) as val from tab where (key, val) in (1, 1);
+select key, arrayJoin(arr) as val from tab_00612 where (key, val) in (1, 1);
select 'key, arrayJoin(arr) in ((1, 1), (2, 2))';
-select key, arrayJoin(arr) as val from tab where (key, val) in ((1, 1), (2, 2)) order by key;
+select key, arrayJoin(arr) as val from tab_00612 where (key, val) in ((1, 1), (2, 2)) order by key;
select '(key, left array join arr) in (1, 1)';
-select key from tab left array join arr as val where (key, val) in (1, 1);
+select key from tab_00612 left array join arr as val where (key, val) in (1, 1);
select '(key, left array join arr) in ((1, 1), (2, 2))';
-select key from tab left array join arr as val where (key, val) in ((1, 1), (2, 2)) order by key;
+select key from tab_00612 left array join arr as val where (key, val) in ((1, 1), (2, 2)) order by key;
-drop table if exists tab;
-create table tab (key UInt64, n Nested(x UInt64)) Engine = MergeTree order by key;
-insert into tab values (1, [1]);
-insert into tab values (2, [2]);
+drop table if exists tab_00612;
+create table tab_00612 (key UInt64, n Nested(x UInt64)) Engine = MergeTree order by key;
+insert into tab_00612 values (1, [1]);
+insert into tab_00612 values (2, [2]);
select 'all';
-select * from tab order by key;
+select * from tab_00612 order by key;
select 'key, arrayJoin(n.x) in (1, 1)';
-select key, arrayJoin(n.x) as val from tab where (key, val) in (1, 1);
+select key, arrayJoin(n.x) as val from tab_00612 where (key, val) in (1, 1);
select 'key, arrayJoin(n.x) in ((1, 1), (2, 2))';
-select key, arrayJoin(n.x) as val from tab where (key, val) in ((1, 1), (2, 2)) order by key;
+select key, arrayJoin(n.x) as val from tab_00612 where (key, val) in ((1, 1), (2, 2)) order by key;
select '(key, left array join n.x) in (1, 1)';
-select key from tab left array join n.x as val where (key, val) in (1, 1);
+select key from tab_00612 left array join n.x as val where (key, val) in (1, 1);
select '(key, left array join n.x) in ((1, 1), (2, 2))';
-select key from tab left array join n.x as val where (key, val) in ((1, 1), (2, 2)) order by key;
-select 'max(key) from tab where (key, left array join n.x) in (1, 1)';
-select max(key) from tab left array join `n.x` as val where (key, val) in ((1, 1));
-select max(key) from tab left array join n as val where (key, val.x) in (1, 1);
-select 'max(key) from tab where (key, left array join n.x) in ((1, 1), (2, 2))';
-select max(key) from tab left array join `n.x` as val where (key, val) in ((1, 1), (2, 2));
-select max(key) from tab left array join n as val where (key, val.x) in ((1, 1), (2, 2));
-select 'max(key) from tab any left join (select key, arrayJoin(n.x) as val from tab) using key where (key, val) in (1, 1)';
-select max(key) from tab any left join (select key, arrayJoin(n.x) as val from tab) using key where (key, val) in (1, 1);
-select 'max(key) from tab any left join (select key, arrayJoin(n.x) as val from tab) using key where (key, val) in ((1, 1), (2, 2))';
-select max(key) from tab any left join (select key, arrayJoin(n.x) as val from tab) using key where (key, val) in ((1, 1), (2, 2));
+select key from tab_00612 left array join n.x as val where (key, val) in ((1, 1), (2, 2)) order by key;
+select 'max(key) from tab_00612 where (key, left array join n.x) in (1, 1)';
+select max(key) from tab_00612 left array join `n.x` as val where (key, val) in ((1, 1));
+select max(key) from tab_00612 left array join n as val where (key, val.x) in (1, 1);
+select 'max(key) from tab_00612 where (key, left array join n.x) in ((1, 1), (2, 2))';
+select max(key) from tab_00612 left array join `n.x` as val where (key, val) in ((1, 1), (2, 2));
+select max(key) from tab_00612 left array join n as val where (key, val.x) in ((1, 1), (2, 2));
+select 'max(key) from tab_00612 any left join (select key, arrayJoin(n.x) as val from tab_00612) using key where (key, val) in (1, 1)';
+select max(key) from tab_00612 any left join (select key, arrayJoin(n.x) as val from tab_00612) using key where (key, val) in (1, 1);
+select 'max(key) from tab_00612 any left join (select key, arrayJoin(n.x) as val from tab_00612) using key where (key, val) in ((1, 1), (2, 2))';
+select max(key) from tab_00612 any left join (select key, arrayJoin(n.x) as val from tab_00612) using key where (key, val) in ((1, 1), (2, 2));
-drop table if exists tab;
-CREATE TABLE tab (key1 Int32, id1 Int64, c1 Int64) ENGINE = MergeTree PARTITION BY id1 ORDER BY (key1) ;
-insert into tab values ( -1, 1, 0 );
-SELECT count(*) FROM tab PREWHERE id1 IN (1);
+drop table if exists tab_00612;
+CREATE TABLE tab_00612 (key1 Int32, id1 Int64, c1 Int64) ENGINE = MergeTree PARTITION BY id1 ORDER BY (key1) ;
+insert into tab_00612 values ( -1, 1, 0 );
+SELECT count(*) FROM tab_00612 PREWHERE id1 IN (1);
diff --git a/dbms/tests/queries/0_stateless/00625_summing_merge_tree_merge.sql b/dbms/tests/queries/0_stateless/00625_summing_merge_tree_merge.sql
index f7216b6da66..066259bbd00 100644
--- a/dbms/tests/queries/0_stateless/00625_summing_merge_tree_merge.sql
+++ b/dbms/tests/queries/0_stateless/00625_summing_merge_tree_merge.sql
@@ -1,6 +1,6 @@
-DROP TABLE IF EXISTS tab ;
+DROP TABLE IF EXISTS tab_00625;
-CREATE TABLE tab
+CREATE TABLE tab_00625
(
date Date,
key UInt32,
@@ -10,7 +10,7 @@ CREATE TABLE tab
)
ENGINE = SummingMergeTree(date, (date, key), 1);
-INSERT INTO tab SELECT
+INSERT INTO tab_00625 SELECT
today(),
number,
[toUInt16(number)],
@@ -18,7 +18,7 @@ INSERT INTO tab SELECT
FROM system.numbers
LIMIT 8190;
-INSERT INTO tab SELECT
+INSERT INTO tab_00625 SELECT
today(),
number + 8190,
[toUInt16(number)],
@@ -26,4 +26,4 @@ INSERT INTO tab SELECT
FROM system.numbers
LIMIT 10;
-OPTIMIZE TABLE tab;
+OPTIMIZE TABLE tab_00625;
diff --git a/dbms/tests/queries/0_stateless/00642_cast.sql b/dbms/tests/queries/0_stateless/00642_cast.sql
index 26fcec938ed..4f0c1e7f699 100644
--- a/dbms/tests/queries/0_stateless/00642_cast.sql
+++ b/dbms/tests/queries/0_stateless/00642_cast.sql
@@ -17,8 +17,8 @@ SELECT cast(1, 'Enum8(\'hello\' = 1,\n\t\'world\' = 2)');
SELECT toTimeZone(CAST(1 AS TIMESTAMP), 'UTC');
-DROP TABLE IF EXISTS cast;
-CREATE TABLE cast
+DROP TABLE IF EXISTS test.cast;
+CREATE TABLE test.cast
(
x UInt8,
e Enum8
@@ -39,10 +39,10 @@ CREATE TABLE cast
)
) ENGINE = MergeTree ORDER BY e;
-SHOW CREATE TABLE cast FORMAT TSVRaw;
-DESC TABLE cast;
+SHOW CREATE TABLE test.cast FORMAT TSVRaw;
+DESC TABLE test.cast;
-INSERT INTO cast (x) VALUES (1);
-SELECT * FROM cast;
+INSERT INTO test.cast (x) VALUES (1);
+SELECT * FROM test.cast;
-DROP TABLE cast;
+DROP TABLE test.cast;
diff --git a/dbms/tests/queries/0_stateless/00643_cast_zookeeper.sql b/dbms/tests/queries/0_stateless/00643_cast_zookeeper.sql
index cb4507c713f..8ebfee12da5 100644
--- a/dbms/tests/queries/0_stateless/00643_cast_zookeeper.sql
+++ b/dbms/tests/queries/0_stateless/00643_cast_zookeeper.sql
@@ -1,7 +1,7 @@
-DROP TABLE IF EXISTS cast1;
-DROP TABLE IF EXISTS cast2;
+DROP TABLE IF EXISTS test.cast1;
+DROP TABLE IF EXISTS test.cast2;
-CREATE TABLE cast1
+CREATE TABLE test.cast1
(
x UInt8,
e Enum8
@@ -22,17 +22,17 @@ CREATE TABLE cast1
)
) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_cast', 'r1') ORDER BY e;
-SHOW CREATE TABLE cast1 FORMAT TSVRaw;
-DESC TABLE cast1;
+SHOW CREATE TABLE test.cast1 FORMAT TSVRaw;
+DESC TABLE test.cast1;
-INSERT INTO cast1 (x) VALUES (1);
-SELECT * FROM cast1;
+INSERT INTO test.cast1 (x) VALUES (1);
+SELECT * FROM test.cast1;
-CREATE TABLE cast2 AS cast1 ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_cast', 'r2') ORDER BY e;
+CREATE TABLE test.cast2 AS test.cast1 ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_cast', 'r2') ORDER BY e;
-SYSTEM SYNC REPLICA cast2;
+SYSTEM SYNC REPLICA test.cast2;
-SELECT * FROM cast2;
+SELECT * FROM test.cast2;
-DROP TABLE cast1;
-DROP TABLE cast2;
+DROP TABLE test.cast1;
+DROP TABLE test.cast2;
diff --git a/dbms/tests/queries/0_stateless/00650_array_enumerate_uniq_with_tuples.sql b/dbms/tests/queries/0_stateless/00650_array_enumerate_uniq_with_tuples.sql
index 0948bdc8e51..8ea600f6d15 100644
--- a/dbms/tests/queries/0_stateless/00650_array_enumerate_uniq_with_tuples.sql
+++ b/dbms/tests/queries/0_stateless/00650_array_enumerate_uniq_with_tuples.sql
@@ -1,21 +1,21 @@
-drop table if exists tab;
-create table tab (val UInt32, n Nested(x UInt8, y String)) engine = Memory;
-insert into tab values (1, [1, 2, 1, 1, 2, 1], ['a', 'a', 'b', 'a', 'b', 'b']);
-select arrayEnumerateUniq(n.x) from tab;
-select arrayEnumerateUniq(n.y) from tab;
-select arrayEnumerateUniq(n.x, n.y) from tab;
-select arrayEnumerateUniq(arrayMap((a, b) -> (a, b), n.x, n.y)) from tab;
-select arrayEnumerateUniq(arrayMap((a, b) -> (a, b), n.x, n.y), n.x) from tab;
-select arrayEnumerateUniq(arrayMap((a, b) -> (a, b), n.x, n.y), arrayMap((a, b) -> (b, a), n.x, n.y)) from tab;
+drop table if exists tab_00650;
+create table tab_00650 (val UInt32, n Nested(x UInt8, y String)) engine = Memory;
+insert into tab_00650 values (1, [1, 2, 1, 1, 2, 1], ['a', 'a', 'b', 'a', 'b', 'b']);
+select arrayEnumerateUniq(n.x) from tab_00650;
+select arrayEnumerateUniq(n.y) from tab_00650;
+select arrayEnumerateUniq(n.x, n.y) from tab_00650;
+select arrayEnumerateUniq(arrayMap((a, b) -> (a, b), n.x, n.y)) from tab_00650;
+select arrayEnumerateUniq(arrayMap((a, b) -> (a, b), n.x, n.y), n.x) from tab_00650;
+select arrayEnumerateUniq(arrayMap((a, b) -> (a, b), n.x, n.y), arrayMap((a, b) -> (b, a), n.x, n.y)) from tab_00650;
-drop table tab;
-create table tab (val UInt32, n Nested(x Nullable(UInt8), y String)) engine = Memory;
-insert into tab values (1, [1, Null, 2, 1, 1, 2, 1, Null, Null], ['a', 'a', 'a', 'b', 'a', 'b', 'b', 'b', 'a']);
-select arrayEnumerateUniq(n.x) from tab;
-select arrayEnumerateUniq(n.y) from tab;
-select arrayEnumerateUniq(n.x, n.y) from tab;
-select arrayEnumerateUniq(arrayMap((a, b) -> (a, b), n.x, n.y)) from tab;
-select arrayEnumerateUniq(arrayMap((a, b) -> (a, b), n.x, n.y), n.x) from tab;
-select arrayEnumerateUniq(arrayMap((a, b) -> (a, b), n.x, n.y), arrayMap((a, b) -> (b, a), n.x, n.y)) from tab;
+drop table tab_00650;
+create table tab_00650 (val UInt32, n Nested(x Nullable(UInt8), y String)) engine = Memory;
+insert into tab_00650 values (1, [1, Null, 2, 1, 1, 2, 1, Null, Null], ['a', 'a', 'a', 'b', 'a', 'b', 'b', 'b', 'a']);
+select arrayEnumerateUniq(n.x) from tab_00650;
+select arrayEnumerateUniq(n.y) from tab_00650;
+select arrayEnumerateUniq(n.x, n.y) from tab_00650;
+select arrayEnumerateUniq(arrayMap((a, b) -> (a, b), n.x, n.y)) from tab_00650;
+select arrayEnumerateUniq(arrayMap((a, b) -> (a, b), n.x, n.y), n.x) from tab_00650;
+select arrayEnumerateUniq(arrayMap((a, b) -> (a, b), n.x, n.y), arrayMap((a, b) -> (b, a), n.x, n.y)) from tab_00650;
-drop table tab;
+drop table tab_00650;
diff --git a/dbms/tests/queries/0_stateless/00651_default_database_on_client_reconnect.sh b/dbms/tests/queries/0_stateless/00651_default_database_on_client_reconnect.sh
index e0a7c8e01ef..119e25636dc 100755
--- a/dbms/tests/queries/0_stateless/00651_default_database_on_client_reconnect.sh
+++ b/dbms/tests/queries/0_stateless/00651_default_database_on_client_reconnect.sh
@@ -3,4 +3,4 @@
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. $CURDIR/../shell_config.sh
-${CLICKHOUSE_CLIENT} --ignore-error --multiquery --query "DROP TABLE IF EXISTS tab; CREATE TABLE tab (val UInt64) engine = Memory; SHOW CREATE TABLE tab format abcd; DESC tab; DROP TABLE tab;" ||: 2> /dev/null
+${CLICKHOUSE_CLIENT} --ignore-error --multiquery --query "DROP TABLE IF EXISTS tab_00651; CREATE TABLE tab_00651 (val UInt64) engine = Memory; SHOW CREATE TABLE tab_00651 format abcd; DESC tab_00651; DROP TABLE tab_00651;" ||: 2> /dev/null
diff --git a/dbms/tests/queries/0_stateless/00673_subquery_prepared_set_performance.sql b/dbms/tests/queries/0_stateless/00673_subquery_prepared_set_performance.sql
index ed94a1aab30..0591592344c 100644
--- a/dbms/tests/queries/0_stateless/00673_subquery_prepared_set_performance.sql
+++ b/dbms/tests/queries/0_stateless/00673_subquery_prepared_set_performance.sql
@@ -1,12 +1,12 @@
-DROP TABLE IF EXISTS mergetree;
+DROP TABLE IF EXISTS mergetree_00673;
-CREATE TABLE mergetree (x UInt64) ENGINE = MergeTree ORDER BY x;
-INSERT INTO mergetree VALUES (1);
+CREATE TABLE mergetree_00673 (x UInt64) ENGINE = MergeTree ORDER BY x;
+INSERT INTO mergetree_00673 VALUES (1);
-SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM mergetree WHERE x IN (SELECT * FROM numbers(10000000))))))))))));
+SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM mergetree_00673 WHERE x IN (SELECT * FROM numbers(10000000))))))))))));
SET force_primary_key = 1;
-SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM mergetree WHERE x IN (SELECT * FROM numbers(10000000))))))))))));
+SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM mergetree_00673 WHERE x IN (SELECT * FROM numbers(10000000))))))))))));
-DROP TABLE mergetree;
+DROP TABLE mergetree_00673;
diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_in.sql b/dbms/tests/queries/0_stateless/00688_low_cardinality_in.sql
index d8f59f57dce..b3ae940e155 100644
--- a/dbms/tests/queries/0_stateless/00688_low_cardinality_in.sql
+++ b/dbms/tests/queries/0_stateless/00688_low_cardinality_in.sql
@@ -1,10 +1,10 @@
-drop table if exists lc;
-create table lc (str StringWithDictionary, val UInt8WithDictionary) engine = MergeTree order by tuple();
-insert into lc values ('a', 1), ('b', 2);
-select str, str in ('a', 'd') from lc;
-select val, val in (1, 3) from lc;
-select str, str in (select arrayJoin(['a', 'd'])) from lc;
-select val, val in (select arrayJoin([1, 3])) from lc;
-select str, str in (select str from lc) from lc;
-select val, val in (select val from lc) from lc;
-drop table if exists lc;
+drop table if exists lc_00688;
+create table lc_00688 (str StringWithDictionary, val UInt8WithDictionary) engine = MergeTree order by tuple();
+insert into lc_00688 values ('a', 1), ('b', 2);
+select str, str in ('a', 'd') from lc_00688;
+select val, val in (1, 3) from lc_00688;
+select str, str in (select arrayJoin(['a', 'd'])) from lc_00688;
+select val, val in (select arrayJoin([1, 3])) from lc_00688;
+select str, str in (select str from lc_00688) from lc_00688;
+select val, val in (select val from lc_00688) from lc_00688;
+drop table if exists lc_00688;
diff --git a/dbms/tests/queries/0_stateless/00693_max_block_size_system_tables_columns.reference b/dbms/tests/queries/0_stateless/00693_max_block_size_system_tables_columns.reference
index 1caad944cb7..77e2334353a 100644
--- a/dbms/tests/queries/0_stateless/00693_max_block_size_system_tables_columns.reference
+++ b/dbms/tests/queries/0_stateless/00693_max_block_size_system_tables_columns.reference
@@ -2,7 +2,7 @@
1
1
1
- t Memory 1 0000-00-00 00:00:00 [] [] Memory
+ t_00693 Memory 1 0000-00-00 00:00:00 [] [] Memory
1
1
1
diff --git a/dbms/tests/queries/0_stateless/00693_max_block_size_system_tables_columns.sql b/dbms/tests/queries/0_stateless/00693_max_block_size_system_tables_columns.sql
index 585abb1296c..b0d88e3d881 100644
--- a/dbms/tests/queries/0_stateless/00693_max_block_size_system_tables_columns.sql
+++ b/dbms/tests/queries/0_stateless/00693_max_block_size_system_tables_columns.sql
@@ -3,8 +3,8 @@ SELECT avg(blockSize()) <= 10 FROM system.tables LIMIT 10 SETTINGS max_block_siz
SELECT (SELECT count() FROM system.tables SETTINGS max_block_size = 10) = (SELECT count() FROM system.tables SETTINGS max_block_size = 9);
SELECT (SELECT count() FROM system.tables SETTINGS max_block_size = 100) = (SELECT count() FROM system.tables SETTINGS max_block_size = 1000);
-CREATE TEMPORARY TABLE t (x UInt8);
-SELECT * FROM system.tables WHERE is_temporary;
+CREATE TEMPORARY TABLE t_00693 (x UInt8);
+SELECT * FROM system.tables WHERE is_temporary AND name='t_00693';
SELECT avg(blockSize()) <= 10000 FROM system.columns SETTINGS max_block_size = 10;
SELECT avg(blockSize()) <= 10000 FROM system.columns LIMIT 10 SETTINGS max_block_size = 10;
diff --git a/dbms/tests/queries/0_stateless/00698_validate_array_sizes_for_nested.sql b/dbms/tests/queries/0_stateless/00698_validate_array_sizes_for_nested.sql
index 1e5b2a45d1a..8bdc7c4c1a1 100644
--- a/dbms/tests/queries/0_stateless/00698_validate_array_sizes_for_nested.sql
+++ b/dbms/tests/queries/0_stateless/00698_validate_array_sizes_for_nested.sql
@@ -1,12 +1,12 @@
SET send_logs_level = 'none';
-DROP TABLE IF EXISTS mergetree;
-CREATE TABLE mergetree (k UInt32, `n.x` Array(UInt64), `n.y` Array(UInt64)) ENGINE = MergeTree ORDER BY k;
+DROP TABLE IF EXISTS mergetree_00698;
+CREATE TABLE mergetree_00698 (k UInt32, `n.x` Array(UInt64), `n.y` Array(UInt64)) ENGINE = MergeTree ORDER BY k;
-INSERT INTO mergetree VALUES (3, [], [1, 2, 3]), (1, [111], []), (2, [], []); -- { serverError 190 }
-SELECT * FROM mergetree;
+INSERT INTO mergetree_00698 VALUES (3, [], [1, 2, 3]), (1, [111], []), (2, [], []); -- { serverError 190 }
+SELECT * FROM mergetree_00698;
-INSERT INTO mergetree VALUES (3, [4, 5, 6], [1, 2, 3]), (1, [111], [222]), (2, [], []);
-SELECT * FROM mergetree;
+INSERT INTO mergetree_00698 VALUES (3, [4, 5, 6], [1, 2, 3]), (1, [111], [222]), (2, [], []);
+SELECT * FROM mergetree_00698;
-DROP TABLE mergetree;
+DROP TABLE mergetree_00698;
diff --git a/dbms/tests/queries/0_stateless/00700_decimal_aggregates.sql b/dbms/tests/queries/0_stateless/00700_decimal_aggregates.sql
index 7970c07b230..491ae2d603c 100644
--- a/dbms/tests/queries/0_stateless/00700_decimal_aggregates.sql
+++ b/dbms/tests/queries/0_stateless/00700_decimal_aggregates.sql
@@ -1,4 +1,3 @@
-CREATE DATABASE IF NOT EXISTS test;
DROP TABLE IF EXISTS decimal;
CREATE TABLE decimal
diff --git a/dbms/tests/queries/0_stateless/00700_decimal_arithm.sql b/dbms/tests/queries/0_stateless/00700_decimal_arithm.sql
index b04b0a1feed..8cdc81e21b4 100644
--- a/dbms/tests/queries/0_stateless/00700_decimal_arithm.sql
+++ b/dbms/tests/queries/0_stateless/00700_decimal_arithm.sql
@@ -1,4 +1,3 @@
-CREATE DATABASE IF NOT EXISTS test;
DROP TABLE IF EXISTS decimal;
CREATE TABLE IF NOT EXISTS decimal
diff --git a/dbms/tests/queries/0_stateless/00700_decimal_bounds.sql b/dbms/tests/queries/0_stateless/00700_decimal_bounds.sql
index 677979afb16..f454ce20fcd 100644
--- a/dbms/tests/queries/0_stateless/00700_decimal_bounds.sql
+++ b/dbms/tests/queries/0_stateless/00700_decimal_bounds.sql
@@ -1,4 +1,3 @@
-CREATE DATABASE IF NOT EXISTS test;
DROP TABLE IF EXISTS decimal;
CREATE TABLE IF NOT EXISTS decimal (x DECIMAL(10, -2)) ENGINE = Memory; -- { serverError 69 }
diff --git a/dbms/tests/queries/0_stateless/00700_decimal_compare.sql b/dbms/tests/queries/0_stateless/00700_decimal_compare.sql
index 92bb86736c4..24b4ce588e5 100644
--- a/dbms/tests/queries/0_stateless/00700_decimal_compare.sql
+++ b/dbms/tests/queries/0_stateless/00700_decimal_compare.sql
@@ -1,4 +1,3 @@
-CREATE DATABASE IF NOT EXISTS test;
DROP TABLE IF EXISTS decimal;
CREATE TABLE IF NOT EXISTS decimal
diff --git a/dbms/tests/queries/0_stateless/00700_decimal_complex_types.sql b/dbms/tests/queries/0_stateless/00700_decimal_complex_types.sql
index 64c22be058d..2d506b124a2 100644
--- a/dbms/tests/queries/0_stateless/00700_decimal_complex_types.sql
+++ b/dbms/tests/queries/0_stateless/00700_decimal_complex_types.sql
@@ -1,4 +1,3 @@
-CREATE DATABASE IF NOT EXISTS test;
DROP TABLE IF EXISTS decimal;
CREATE TABLE decimal
diff --git a/dbms/tests/queries/0_stateless/00700_decimal_defaults.sql b/dbms/tests/queries/0_stateless/00700_decimal_defaults.sql
index 32410847726..8694d866ed5 100644
--- a/dbms/tests/queries/0_stateless/00700_decimal_defaults.sql
+++ b/dbms/tests/queries/0_stateless/00700_decimal_defaults.sql
@@ -1,4 +1,3 @@
-CREATE DATABASE IF NOT EXISTS test;
DROP TABLE IF EXISTS decimal;
CREATE TABLE IF NOT EXISTS decimal
diff --git a/dbms/tests/queries/0_stateless/00700_decimal_formats.sql b/dbms/tests/queries/0_stateless/00700_decimal_formats.sql
index 16a47f5e9b1..45b2a5f1078 100644
--- a/dbms/tests/queries/0_stateless/00700_decimal_formats.sql
+++ b/dbms/tests/queries/0_stateless/00700_decimal_formats.sql
@@ -1,4 +1,3 @@
-CREATE DATABASE IF NOT EXISTS test;
DROP TABLE IF EXISTS decimal;
CREATE TABLE IF NOT EXISTS decimal
diff --git a/dbms/tests/queries/0_stateless/00700_decimal_in_keys.sql b/dbms/tests/queries/0_stateless/00700_decimal_in_keys.sql
index 9c52572a1f7..817d5e8556c 100644
--- a/dbms/tests/queries/0_stateless/00700_decimal_in_keys.sql
+++ b/dbms/tests/queries/0_stateless/00700_decimal_in_keys.sql
@@ -1,4 +1,3 @@
-CREATE DATABASE IF NOT EXISTS test;
DROP TABLE IF EXISTS decimal;
CREATE TABLE IF NOT EXISTS decimal
diff --git a/dbms/tests/queries/0_stateless/00700_decimal_null.sql b/dbms/tests/queries/0_stateless/00700_decimal_null.sql
index 84398182b43..dbd9ced4393 100644
--- a/dbms/tests/queries/0_stateless/00700_decimal_null.sql
+++ b/dbms/tests/queries/0_stateless/00700_decimal_null.sql
@@ -1,4 +1,3 @@
-CREATE DATABASE IF NOT EXISTS test;
DROP TABLE IF EXISTS decimal;
CREATE TABLE IF NOT EXISTS decimal
diff --git a/dbms/tests/queries/0_stateless/00701_join_default_strictness.sql b/dbms/tests/queries/0_stateless/00701_join_default_strictness.sql
index ad3dbde629a..f2381f54e75 100644
--- a/dbms/tests/queries/0_stateless/00701_join_default_strictness.sql
+++ b/dbms/tests/queries/0_stateless/00701_join_default_strictness.sql
@@ -1,4 +1,3 @@
-CREATE DATABASE IF NOT EXISTS test;
DROP TABLE IF EXISTS a1;
DROP TABLE IF EXISTS a2;
diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_alias_and_virtual_column.sql b/dbms/tests/queries/0_stateless/00712_prewhere_with_alias_and_virtual_column.sql
index 60301fa8bf9..a6a394d48ac 100644
--- a/dbms/tests/queries/0_stateless/00712_prewhere_with_alias_and_virtual_column.sql
+++ b/dbms/tests/queries/0_stateless/00712_prewhere_with_alias_and_virtual_column.sql
@@ -1,5 +1,5 @@
-drop table if exists tab;
-create table tab (a UInt32, b UInt32 alias a + 1, c UInt32) engine = MergeTree order by tuple();
-insert into tab values (1, 2);
-select ignore(_part) from tab prewhere b = 2;
+drop table if exists tab_00712_1;
+create table tab_00712_1 (a UInt32, b UInt32 alias a + 1, c UInt32) engine = MergeTree order by tuple();
+insert into tab_00712_1 values (1, 2);
+select ignore(_part) from tab_00712_1 prewhere b = 2;
diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_missing_columns.sql b/dbms/tests/queries/0_stateless/00712_prewhere_with_missing_columns.sql
index 56f6c273550..7652ebcb75d 100644
--- a/dbms/tests/queries/0_stateless/00712_prewhere_with_missing_columns.sql
+++ b/dbms/tests/queries/0_stateless/00712_prewhere_with_missing_columns.sql
@@ -1,14 +1,14 @@
-DROP TABLE IF EXISTS mergetree;
-CREATE TABLE mergetree (x UInt8, s String) ENGINE = MergeTree ORDER BY tuple();
+DROP TABLE IF EXISTS mergetree_00712;
+CREATE TABLE mergetree_00712 (x UInt8, s String) ENGINE = MergeTree ORDER BY tuple();
-INSERT INTO mergetree VALUES (1, 'Hello, world!');
-SELECT * FROM mergetree;
+INSERT INTO mergetree_00712 VALUES (1, 'Hello, world!');
+SELECT * FROM mergetree_00712;
-ALTER TABLE mergetree ADD COLUMN y UInt8 DEFAULT 0;
-INSERT INTO mergetree VALUES (2, 'Goodbye.', 3);
-SELECT * FROM mergetree ORDER BY x;
+ALTER TABLE mergetree_00712 ADD COLUMN y UInt8 DEFAULT 0;
+INSERT INTO mergetree_00712 VALUES (2, 'Goodbye.', 3);
+SELECT * FROM mergetree_00712 ORDER BY x;
-SELECT s FROM mergetree PREWHERE x AND y ORDER BY s;
-SELECT s, y FROM mergetree PREWHERE x AND y ORDER BY s;
+SELECT s FROM mergetree_00712 PREWHERE x AND y ORDER BY s;
+SELECT s, y FROM mergetree_00712 PREWHERE x AND y ORDER BY s;
-DROP TABLE mergetree;
+DROP TABLE mergetree_00712;
diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_missing_columns_2.sql b/dbms/tests/queries/0_stateless/00712_prewhere_with_missing_columns_2.sql
index 2c27f5f5bb6..fac4552ae6f 100644
--- a/dbms/tests/queries/0_stateless/00712_prewhere_with_missing_columns_2.sql
+++ b/dbms/tests/queries/0_stateless/00712_prewhere_with_missing_columns_2.sql
@@ -1,13 +1,12 @@
-create database if not exists test;
-drop table if exists t;
-create table t (a Int32, b Int32) engine = MergeTree partition by (a,b) order by (a);
+drop table if exists t_00712_1;
+create table t_00712_1 (a Int32, b Int32) engine = MergeTree partition by (a,b) order by (a);
-insert into t values (1, 1);
-alter table t add column c Int32;
+insert into t_00712_1 values (1, 1);
+alter table t_00712_1 add column c Int32;
-select b from t prewhere a < 1000;
-select c from t where a < 1000;
-select c from t prewhere a < 1000;
+select b from t_00712_1 prewhere a < 1000;
+select c from t_00712_1 where a < 1000;
+select c from t_00712_1 prewhere a < 1000;
-drop table t;
+drop table t_00712_1;
diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_sampling.sql b/dbms/tests/queries/0_stateless/00712_prewhere_with_sampling.sql
index 22e05e2ff12..4e94ff8f43f 100644
--- a/dbms/tests/queries/0_stateless/00712_prewhere_with_sampling.sql
+++ b/dbms/tests/queries/0_stateless/00712_prewhere_with_sampling.sql
@@ -1,8 +1,8 @@
-drop table if exists tab;
-create table tab (a UInt32, b UInt32) engine = MergeTree order by b % 2 sample by b % 2;
-insert into tab values (1, 2), (1, 4);
-select a from tab sample 1 / 2 prewhere b = 2;
-drop table if exists tab;
+drop table if exists tab_00712_2;
+create table tab_00712_2 (a UInt32, b UInt32) engine = MergeTree order by b % 2 sample by b % 2;
+insert into tab_00712_2 values (1, 2), (1, 4);
+select a from tab_00712_2 sample 1 / 2 prewhere b = 2;
+drop table if exists tab_00712_2;
DROP TABLE IF EXISTS sample_prewhere;
CREATE TABLE sample_prewhere (CounterID UInt32, UserID UInt64) ENGINE = MergeTree ORDER BY UserID SAMPLE BY UserID;
diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_sampling_and_alias.sql b/dbms/tests/queries/0_stateless/00712_prewhere_with_sampling_and_alias.sql
index 2e837c2be6f..d011e1b368d 100644
--- a/dbms/tests/queries/0_stateless/00712_prewhere_with_sampling_and_alias.sql
+++ b/dbms/tests/queries/0_stateless/00712_prewhere_with_sampling_and_alias.sql
@@ -1,6 +1,6 @@
-drop table if exists t;
-create table t (date Date, counter UInt64, sampler UInt64, alias_col alias date + 1) engine = MergeTree(date, intHash32(sampler), (counter, date, intHash32(sampler)), 8192);
-insert into t values ('2018-01-01', 1, 1);
-select alias_col from t sample 1 / 2 where date = '2018-01-01' and counter = 1 and sampler = 1;
-drop table if exists t;
+drop table if exists t_00712_2;
+create table t_00712_2 (date Date, counter UInt64, sampler UInt64, alias_col alias date + 1) engine = MergeTree(date, intHash32(sampler), (counter, date, intHash32(sampler)), 8192);
+insert into t_00712_2 values ('2018-01-01', 1, 1);
+select alias_col from t_00712_2 sample 1 / 2 where date = '2018-01-01' and counter = 1 and sampler = 1;
+drop table if exists t_00712_2;
diff --git a/dbms/tests/queries/0_stateless/00717_low_cardinaliry_group_by.sql b/dbms/tests/queries/0_stateless/00717_low_cardinaliry_group_by.sql
index e9ec65a144d..02915d4e611 100644
--- a/dbms/tests/queries/0_stateless/00717_low_cardinaliry_group_by.sql
+++ b/dbms/tests/queries/0_stateless/00717_low_cardinaliry_group_by.sql
@@ -1,22 +1,21 @@
-drop table if exists tab;
-create table tab (a String, b StringWithDictionary) engine = MergeTree order by a;
-insert into tab values ('a_1', 'b_1'), ('a_2', 'b_2');
-select count() from tab;
-select a from tab group by a order by a;
-select b from tab group by b order by b;
-select length(b) as l from tab group by l;
-select sum(length(a)), b from tab group by b order by b;
-select sum(length(b)), a from tab group by a order by a;
-select a, b from tab group by a, b order by a, b;
-select sum(length(a)) from tab group by b, b || '_';
-select length(b) as l from tab group by l;
-select length(b) as l from tab group by l, l + 1;
-select length(b) as l from tab group by l, l + 1, l + 2;
-select length(b) as l from tab group by l, l + 1, l + 2, l + 3;
-select length(b) as l from tab group by l, l + 1, l + 2, l + 3, l + 4;
-select length(b) as l from tab group by l, l + 1, l + 2, l + 3, l + 4, l + 5;
-select a, length(b) as l from tab group by a, l, l + 1 order by a;
-select b, length(b) as l from tab group by b, l, l + 1 order by b;
-select a, b, length(b) as l from tab group by a, b, l, l + 1 order by a, b;
-drop table if exists tab;
-
+drop table if exists tab_00717;
+create table tab_00717 (a String, b StringWithDictionary) engine = MergeTree order by a;
+insert into tab_00717 values ('a_1', 'b_1'), ('a_2', 'b_2');
+select count() from tab_00717;
+select a from tab_00717 group by a order by a;
+select b from tab_00717 group by b order by b;
+select length(b) as l from tab_00717 group by l;
+select sum(length(a)), b from tab_00717 group by b order by b;
+select sum(length(b)), a from tab_00717 group by a order by a;
+select a, b from tab_00717 group by a, b order by a, b;
+select sum(length(a)) from tab_00717 group by b, b || '_';
+select length(b) as l from tab_00717 group by l;
+select length(b) as l from tab_00717 group by l, l + 1;
+select length(b) as l from tab_00717 group by l, l + 1, l + 2;
+select length(b) as l from tab_00717 group by l, l + 1, l + 2, l + 3;
+select length(b) as l from tab_00717 group by l, l + 1, l + 2, l + 3, l + 4;
+select length(b) as l from tab_00717 group by l, l + 1, l + 2, l + 3, l + 4, l + 5;
+select a, length(b) as l from tab_00717 group by a, l, l + 1 order by a;
+select b, length(b) as l from tab_00717 group by b, l, l + 1 order by b;
+select a, b, length(b) as l from tab_00717 group by a, b, l, l + 1 order by a, b;
+drop table if exists tab_00717;
diff --git a/dbms/tests/queries/0_stateless/00718_low_cardinaliry_alter.sql b/dbms/tests/queries/0_stateless/00718_low_cardinaliry_alter.sql
index df89ff9d91f..45b647584ad 100644
--- a/dbms/tests/queries/0_stateless/00718_low_cardinaliry_alter.sql
+++ b/dbms/tests/queries/0_stateless/00718_low_cardinaliry_alter.sql
@@ -1,17 +1,17 @@
-create table tab (a String, b LowCardinality(UInt32)) engine = MergeTree order by a;
-insert into tab values ('a', 1);
-select *, toTypeName(b) from tab;
-alter table tab modify column b UInt32;
-select *, toTypeName(b) from tab;
-alter table tab modify column b LowCardinality(UInt32);
-select *, toTypeName(b) from tab;
-alter table tab modify column b StringWithDictionary;
-select *, toTypeName(b) from tab;
-alter table tab modify column b LowCardinality(UInt32);
-select *, toTypeName(b) from tab;
-alter table tab modify column b String;
-select *, toTypeName(b) from tab;
-alter table tab modify column b LowCardinality(UInt32);
-select *, toTypeName(b) from tab;
-drop table if exists tab;
-
+drop table if exists tab_00718;
+create table tab_00718 (a String, b LowCardinality(UInt32)) engine = MergeTree order by a;
+insert into tab_00718 values ('a', 1);
+select *, toTypeName(b) from tab_00718;
+alter table tab_00718 modify column b UInt32;
+select *, toTypeName(b) from tab_00718;
+alter table tab_00718 modify column b LowCardinality(UInt32);
+select *, toTypeName(b) from tab_00718;
+alter table tab_00718 modify column b StringWithDictionary;
+select *, toTypeName(b) from tab_00718;
+alter table tab_00718 modify column b LowCardinality(UInt32);
+select *, toTypeName(b) from tab_00718;
+alter table tab_00718 modify column b String;
+select *, toTypeName(b) from tab_00718;
+alter table tab_00718 modify column b LowCardinality(UInt32);
+select *, toTypeName(b) from tab_00718;
+drop table if exists tab_00718;
diff --git a/dbms/tests/queries/0_stateless/00725_join_on_bug_2.sql b/dbms/tests/queries/0_stateless/00725_join_on_bug_2.sql
index c0b33c11d1a..047dd5c7c7d 100644
--- a/dbms/tests/queries/0_stateless/00725_join_on_bug_2.sql
+++ b/dbms/tests/queries/0_stateless/00725_join_on_bug_2.sql
@@ -1,23 +1,23 @@
-drop table if exists t;
-drop table if exists s;
+drop table if exists t_00725_2;
+drop table if exists s_00725_2;
-create table t(a Int64, b Int64) engine = TinyLog;
-insert into t values(1,1);
-insert into t values(2,2);
-create table s(a Int64, b Int64) engine = TinyLog;
-insert into s values(1,1);
+create table t_00725_2(a Int64, b Int64) engine = TinyLog;
+insert into t_00725_2 values(1,1);
+insert into t_00725_2 values(2,2);
+create table s_00725_2(a Int64, b Int64) engine = TinyLog;
+insert into s_00725_2 values(1,1);
-select a, b, s_a, s_b from t all left join (select a,b,a s_a, b s_b from s) using (a,b);
+select a, b, s_a, s_b from t_00725_2 all left join (select a,b,a s_a, b s_b from s_00725_2) using (a,b);
select '-';
-select t.*, s.* from t all left join s using (a,b);
+select t_00725_2.*, s_00725_2.* from t_00725_2 all left join s_00725_2 using (a,b);
select '-';
-select a,b,s_a,s_b from t all left join (select a, b, a s_a, b s_b from s) s on (s.a = t.a and s.b = t.b);
+select a,b,s_a,s_b from t_00725_2 all left join (select a, b, a s_a, b s_b from s_00725_2) s_00725_2 on (s_00725_2.a = t_00725_2.a and s_00725_2.b = t_00725_2.b);
select '-';
-select * from t all left join (select a s_a, b s_b from s) on (s_a = t.a and s_b = t.b);
+select * from t_00725_2 all left join (select a s_a, b s_b from s_00725_2) on (s_a = t_00725_2.a and s_b = t_00725_2.b);
select '-';
-select a,b,s_a,s_b from t all left join (select a,b, a s_a, b s_b from s) on (s_a = t.a and s_b = t.b);
+select a,b,s_a,s_b from t_00725_2 all left join (select a,b, a s_a, b s_b from s_00725_2) on (s_a = t_00725_2.a and s_b = t_00725_2.b);
select '-';
-select t.*, s.* from t all left join s on (s.a = t.a and s.b = t.b);
+select t_00725_2.*, s_00725_2.* from t_00725_2 all left join s_00725_2 on (s_00725_2.a = t_00725_2.a and s_00725_2.b = t_00725_2.b);
-drop table if exists t;
-drop table if exists s;
+drop table if exists t_00725_2;
+drop table if exists s_00725_2;
diff --git a/dbms/tests/queries/0_stateless/00725_join_on_bug_3.sql b/dbms/tests/queries/0_stateless/00725_join_on_bug_3.sql
index ad9ff23eae1..08b39d899cf 100644
--- a/dbms/tests/queries/0_stateless/00725_join_on_bug_3.sql
+++ b/dbms/tests/queries/0_stateless/00725_join_on_bug_3.sql
@@ -1,14 +1,14 @@
-drop table if exists t;
-drop table if exists z;
+drop table if exists t_00725_3;
+drop table if exists z_00725_3;
-create table t(a Int64, b Int64) engine = TinyLog;
-insert into t values(1,1);
-insert into t values(2,2);
-create table z(c Int64, d Int64, e Int64) engine = TinyLog;
-insert into z values(1,1,1);
+create table t_00725_3(a Int64, b Int64) engine = TinyLog;
+insert into t_00725_3 values(1,1);
+insert into t_00725_3 values(2,2);
+create table z_00725_3(c Int64, d Int64, e Int64) engine = TinyLog;
+insert into z_00725_3 values(1,1,1);
-select * from t all left join z on (z.c = t.a and z.d = t.b);
+select * from t_00725_3 all left join z_00725_3 on (z_00725_3.c = t_00725_3.a and z_00725_3.d = t_00725_3.b);
-drop table if exists t;
-drop table if exists z;
+drop table if exists t_00725_3;
+drop table if exists z_00725_3;
diff --git a/dbms/tests/queries/0_stateless/00725_join_on_bug_4.sql b/dbms/tests/queries/0_stateless/00725_join_on_bug_4.sql
index 94c6931471e..a456408bb61 100644
--- a/dbms/tests/queries/0_stateless/00725_join_on_bug_4.sql
+++ b/dbms/tests/queries/0_stateless/00725_join_on_bug_4.sql
@@ -1,13 +1,13 @@
-drop table if exists t;
-drop table if exists s;
+drop table if exists t_00725_4;
+drop table if exists s_00725_4;
-create table t(a Int64, b Int64, c String) engine = TinyLog;
-insert into t values(1,1,'a'),(2,2,'b');
-create table s(a Int64, b Int64, c String) engine = TinyLog;
-insert into s values(1,1,'a');
+create table t_00725_4(a Int64, b Int64, c String) engine = TinyLog;
+insert into t_00725_4 values(1,1,'a'),(2,2,'b');
+create table s_00725_4(a Int64, b Int64, c String) engine = TinyLog;
+insert into s_00725_4 values(1,1,'a');
-select t.* from t all left join s on (s.a = t.a and s.b = t.b) where s.a = 0 and s.b = 0;
+select t_00725_4.* from t_00725_4 all left join s_00725_4 on (s_00725_4.a = t_00725_4.a and s_00725_4.b = t_00725_4.b) where s_00725_4.a = 0 and s_00725_4.b = 0;
-drop table if exists t;
-drop table if exists s;
+drop table if exists t_00725_4;
+drop table if exists s_00725_4;
diff --git a/dbms/tests/queries/0_stateless/00730_unicode_terminal_format.sql b/dbms/tests/queries/0_stateless/00730_unicode_terminal_format.sql
index 546ba4d68fe..f01c5defdfb 100644
--- a/dbms/tests/queries/0_stateless/00730_unicode_terminal_format.sql
+++ b/dbms/tests/queries/0_stateless/00730_unicode_terminal_format.sql
@@ -1,4 +1,3 @@
-CREATE DATABASE IF NOT EXISTS test;
DROP TABLE IF EXISTS unicode;
CREATE TABLE unicode(c1 String, c2 String) ENGINE = Memory;
diff --git a/dbms/tests/queries/0_stateless/00732_decimal_summing_merge_tree.sql b/dbms/tests/queries/0_stateless/00732_decimal_summing_merge_tree.sql
index 536983e0783..1524776c16e 100644
--- a/dbms/tests/queries/0_stateless/00732_decimal_summing_merge_tree.sql
+++ b/dbms/tests/queries/0_stateless/00732_decimal_summing_merge_tree.sql
@@ -1,4 +1,3 @@
-CREATE DATABASE IF NOT EXISTS test;
DROP TABLE IF EXISTS decimal_sum;
CREATE TABLE decimal_sum
(
diff --git a/dbms/tests/queries/0_stateless/00735_or_expr_optimize_bug.sql b/dbms/tests/queries/0_stateless/00735_or_expr_optimize_bug.sql
index 2f10146472a..ef8b541e74b 100644
--- a/dbms/tests/queries/0_stateless/00735_or_expr_optimize_bug.sql
+++ b/dbms/tests/queries/0_stateless/00735_or_expr_optimize_bug.sql
@@ -1,4 +1,3 @@
-CREATE DATABASE IF NOT EXISTS test;
DROP TABLE IF EXISTS or_expr_bug;
CREATE TABLE or_expr_bug (a UInt64, b UInt64) ENGINE = Memory;
diff --git a/dbms/tests/queries/0_stateless/00738_lock_for_inner_table.sh b/dbms/tests/queries/0_stateless/00738_lock_for_inner_table.sh
index 00a817ea1d9..9e20c9469a3 100755
--- a/dbms/tests/queries/0_stateless/00738_lock_for_inner_table.sh
+++ b/dbms/tests/queries/0_stateless/00738_lock_for_inner_table.sh
@@ -4,12 +4,12 @@ set -e
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. $CURDIR/../shell_config.sh
-echo "DROP TABLE IF EXISTS tab;
+echo "DROP TABLE IF EXISTS tab_00738;
DROP TABLE IF EXISTS mv;
-CREATE TABLE tab(a Int) ENGINE = Log;
-CREATE MATERIALIZED VIEW mv ENGINE = Log AS SELECT a FROM tab;" | ${CLICKHOUSE_CLIENT} -n
+CREATE TABLE tab_00738(a Int) ENGINE = Log;
+CREATE MATERIALIZED VIEW mv ENGINE = Log AS SELECT a FROM tab_00738;" | ${CLICKHOUSE_CLIENT} -n
-${CLICKHOUSE_CLIENT} --query "INSERT INTO tab SELECT number FROM numbers(10000000)" &
+${CLICKHOUSE_CLIENT} --query "INSERT INTO tab_00738 SELECT number FROM numbers(10000000)" &
function drop()
{
@@ -21,5 +21,5 @@ drop &
wait
-echo "DROP TABLE IF EXISTS tab;
+echo "DROP TABLE IF EXISTS tab_00738;
DROP TABLE IF EXISTS mv;" | ${CLICKHOUSE_CLIENT} -n
diff --git a/dbms/tests/queries/0_stateless/00751_default_databasename_for_view.reference b/dbms/tests/queries/0_stateless/00751_default_databasename_for_view.reference
index a7dc6f0b32a..35217410c2d 100644
--- a/dbms/tests/queries/0_stateless/00751_default_databasename_for_view.reference
+++ b/dbms/tests/queries/0_stateless/00751_default_databasename_for_view.reference
@@ -1,4 +1,4 @@
-CREATE MATERIALIZED VIEW test.t_mv (`date` Date, `platform` Enum8('a' = 0, 'b' = 1), `app` Enum8('a' = 0, 'b' = 1)) ENGINE = MergeTree ORDER BY date SETTINGS index_granularity = 8192 AS SELECT date, platform, app FROM test.t WHERE (app = (SELECT min(app) FROM test.u )) AND (platform = (SELECT (SELECT min(platform) FROM test.v )))
+CREATE MATERIALIZED VIEW test.t_mv_00751 (`date` Date, `platform` Enum8('a' = 0, 'b' = 1), `app` Enum8('a' = 0, 'b' = 1)) ENGINE = MergeTree ORDER BY date SETTINGS index_granularity = 8192 AS SELECT date, platform, app FROM test.t_00751 WHERE (app = (SELECT min(app) FROM test.u_00751 )) AND (platform = (SELECT (SELECT min(platform) FROM test.v_00751 )))
2000-01-01 a a
2000-01-02 b b
2000-01-03 a a
diff --git a/dbms/tests/queries/0_stateless/00751_default_databasename_for_view.sql b/dbms/tests/queries/0_stateless/00751_default_databasename_for_view.sql
index e2ce7b2a094..bb43b99b461 100644
--- a/dbms/tests/queries/0_stateless/00751_default_databasename_for_view.sql
+++ b/dbms/tests/queries/0_stateless/00751_default_databasename_for_view.sql
@@ -1,44 +1,44 @@
USE test;
-DROP TABLE IF EXISTS t;
-DROP TABLE IF EXISTS t_mv;
-DROP TABLE IF EXISTS u;
-DROP TABLE IF EXISTS v;
+DROP TABLE IF EXISTS t_00751;
+DROP TABLE IF EXISTS t_mv_00751;
+DROP TABLE IF EXISTS u_00751;
+DROP TABLE IF EXISTS v_00751;
-CREATE TABLE t
+CREATE TABLE t_00751
(
date Date,
platform Enum8('a' = 0, 'b' = 1),
app Enum8('a' = 0, 'b' = 1)
) ENGINE = Memory;
-CREATE TABLE u (app Enum8('a' = 0, 'b' = 1)) ENGINE = Memory;
-CREATE TABLE v (platform Enum8('a' = 0, 'b' = 1)) ENGINE = Memory;
+CREATE TABLE u_00751 (app Enum8('a' = 0, 'b' = 1)) ENGINE = Memory;
+CREATE TABLE v_00751 (platform Enum8('a' = 0, 'b' = 1)) ENGINE = Memory;
-INSERT INTO u VALUES ('b');
-INSERT INTO v VALUES ('b');
+INSERT INTO u_00751 VALUES ('b');
+INSERT INTO v_00751 VALUES ('b');
-CREATE MATERIALIZED VIEW t_mv ENGINE = MergeTree ORDER BY date
- AS SELECT date, platform, app FROM t
- WHERE app = (SELECT min(app) from u) AND platform = (SELECT (SELECT min(platform) from v));
+CREATE MATERIALIZED VIEW t_mv_00751 ENGINE = MergeTree ORDER BY date
+ AS SELECT date, platform, app FROM t_00751
+ WHERE app = (SELECT min(app) from u_00751) AND platform = (SELECT (SELECT min(platform) from v_00751));
-SHOW CREATE TABLE test.t_mv FORMAT TabSeparatedRaw;
+SHOW CREATE TABLE test.t_mv_00751 FORMAT TabSeparatedRaw;
USE default;
-DETACH TABLE test.t_mv;
-ATTACH TABLE test.t_mv;
+DETACH TABLE test.t_mv_00751;
+ATTACH TABLE test.t_mv_00751;
-INSERT INTO test.t VALUES ('2000-01-01', 'a', 'a') ('2000-01-02', 'b', 'b');
+INSERT INTO test.t_00751 VALUES ('2000-01-01', 'a', 'a') ('2000-01-02', 'b', 'b');
-INSERT INTO test.u VALUES ('a');
-INSERT INTO test.v VALUES ('a');
+INSERT INTO test.u_00751 VALUES ('a');
+INSERT INTO test.v_00751 VALUES ('a');
-INSERT INTO test.t VALUES ('2000-01-03', 'a', 'a') ('2000-01-04', 'b', 'b');
+INSERT INTO test.t_00751 VALUES ('2000-01-03', 'a', 'a') ('2000-01-04', 'b', 'b');
-SELECT * FROM test.t ORDER BY date;
-SELECT * FROM test.t_mv ORDER BY date;
+SELECT * FROM test.t_00751 ORDER BY date;
+SELECT * FROM test.t_mv_00751 ORDER BY date;
-DROP TABLE test.t;
-DROP TABLE test.t_mv;
-DROP TABLE test.u;
-DROP TABLE test.v;
+DROP TABLE test.t_00751;
+DROP TABLE test.t_mv_00751;
+DROP TABLE test.u_00751;
+DROP TABLE test.v_00751;
diff --git a/dbms/tests/queries/0_stateless/00752_low_cardinality_mv_1.sql b/dbms/tests/queries/0_stateless/00752_low_cardinality_mv_1.sql
index 733fce41e02..60cc30ec2c3 100644
--- a/dbms/tests/queries/0_stateless/00752_low_cardinality_mv_1.sql
+++ b/dbms/tests/queries/0_stateless/00752_low_cardinality_mv_1.sql
@@ -1,15 +1,15 @@
-drop table if exists lc;
-drop table if exists lc_mv;
+drop table if exists lc_00752;
+drop table if exists lc_mv_00752;
-create table lc (str StringWithDictionary) engine = MergeTree order by tuple();
+create table lc_00752 (str StringWithDictionary) engine = MergeTree order by tuple();
-insert into lc values ('a'), ('bbb'), ('ab'), ('accccc'), ('baasddas'), ('bcde');
+insert into lc_00752 values ('a'), ('bbb'), ('ab'), ('accccc'), ('baasddas'), ('bcde');
-CREATE MATERIALIZED VIEW lc_mv ENGINE = AggregatingMergeTree() ORDER BY tuple() populate AS SELECT substring(str, 1, 1) as letter, min(length(str)) AS min_len, max(length(str)) AS max_len FROM lc GROUP BY substring(str, 1, 1);
+CREATE MATERIALIZED VIEW lc_mv_00752 ENGINE = AggregatingMergeTree() ORDER BY tuple() populate AS SELECT substring(str, 1, 1) as letter, min(length(str)) AS min_len, max(length(str)) AS max_len FROM lc_00752 GROUP BY substring(str, 1, 1);
-insert into lc values ('a'), ('bbb'), ('ab'), ('accccc'), ('baasddas'), ('bcde');
-select * from lc_mv order by letter;
+insert into lc_00752 values ('a'), ('bbb'), ('ab'), ('accccc'), ('baasddas'), ('bcde');
+select * from lc_mv_00752 order by letter;
-drop table if exists lc;
-drop table if exists lc_mv;
+drop table if exists lc_00752;
+drop table if exists lc_mv_00752;
diff --git a/dbms/tests/queries/0_stateless/00753_alter_destination_for_storage_buffer.sql b/dbms/tests/queries/0_stateless/00753_alter_destination_for_storage_buffer.sql
index 2193f5984b9..578e30dbbb9 100644
--- a/dbms/tests/queries/0_stateless/00753_alter_destination_for_storage_buffer.sql
+++ b/dbms/tests/queries/0_stateless/00753_alter_destination_for_storage_buffer.sql
@@ -1,32 +1,32 @@
-DROP TABLE IF EXISTS test.dst;
-DROP TABLE IF EXISTS test.buffer;
+DROP TABLE IF EXISTS test.dst_00753;
+DROP TABLE IF EXISTS test.buffer_00753;
SET send_logs_level = 'error';
-CREATE TABLE test.dst (x UInt64, y UInt64) ENGINE = MergeTree ORDER BY tuple();
-CREATE TABLE test.buffer (x UInt64, y UInt64) ENGINE = Buffer(test, dst, 1, 99999, 99999, 1, 1, 99999, 99999);
+CREATE TABLE test.dst_00753 (x UInt64, y UInt64) ENGINE = MergeTree ORDER BY tuple();
+CREATE TABLE test.buffer_00753 (x UInt64, y UInt64) ENGINE = Buffer(test, dst_00753, 1, 99999, 99999, 1, 1, 99999, 99999);
-INSERT INTO test.buffer VALUES (1, 100);
-INSERT INTO test.buffer VALUES (2, 200);
-INSERT INTO test.buffer VALUES (3, 300);
+INSERT INTO test.buffer_00753 VALUES (1, 100);
+INSERT INTO test.buffer_00753 VALUES (2, 200);
+INSERT INTO test.buffer_00753 VALUES (3, 300);
SELECT 'init';
-SELECT * FROM test.dst ORDER BY x;
+SELECT * FROM test.dst_00753 ORDER BY x;
SELECT '-';
-SELECT * FROM test.buffer ORDER BY x;
+SELECT * FROM test.buffer_00753 ORDER BY x;
-ALTER TABLE test.dst DROP COLUMN x, MODIFY COLUMN y String, ADD COLUMN z String DEFAULT 'DEFZ';
+ALTER TABLE test.dst_00753 DROP COLUMN x, MODIFY COLUMN y String, ADD COLUMN z String DEFAULT 'DEFZ';
-INSERT INTO test.buffer VALUES (4, 400);
+INSERT INTO test.buffer_00753 VALUES (4, 400);
SELECT 'alt';
-SELECT * FROM test.dst ORDER BY y;
+SELECT * FROM test.dst_00753 ORDER BY y;
SELECT '-';
-SELECT * FROM test.buffer ORDER BY y;
+SELECT * FROM test.buffer_00753 ORDER BY y;
-OPTIMIZE TABLE test.buffer;
+OPTIMIZE TABLE test.buffer_00753;
SELECT 'opt';
-SELECT * FROM test.dst ORDER BY y;
+SELECT * FROM test.dst_00753 ORDER BY y;
SELECT '-';
-SELECT * FROM test.buffer ORDER BY y;
+SELECT * FROM test.buffer_00753 ORDER BY y;
SET send_logs_level = 'warning';
-DROP TABLE IF EXISTS test.dst;
-DROP TABLE IF EXISTS test.buffer;
+DROP TABLE IF EXISTS test.dst_00753;
+DROP TABLE IF EXISTS test.buffer_00753;
diff --git a/dbms/tests/queries/0_stateless/00753_system_columns_and_system_tables.sql b/dbms/tests/queries/0_stateless/00753_system_columns_and_system_tables.sql
index a0f8f69fd4b..666f9c6c3f6 100644
--- a/dbms/tests/queries/0_stateless/00753_system_columns_and_system_tables.sql
+++ b/dbms/tests/queries/0_stateless/00753_system_columns_and_system_tables.sql
@@ -1,4 +1,3 @@
-CREATE DATABASE IF NOT EXISTS test;
DROP TABLE IF EXISTS check_system_tables;
-- Check MergeTree declaration in new format
diff --git a/dbms/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh b/dbms/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh
index 745334a4a99..c41e648ebb4 100755
--- a/dbms/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh
+++ b/dbms/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh
@@ -3,16 +3,16 @@
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. $CURDIR/../shell_config.sh
-${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS mergetree;"
+${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS mergetree_00754;"
${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS distributed;"
-${CLICKHOUSE_CLIENT} --query "CREATE TABLE mergetree (a Int64, b Int64, c Int64) ENGINE = MergeTree ORDER BY (a, b);"
-${CLICKHOUSE_CLIENT} --query "CREATE TABLE distributed AS mergetree ENGINE = Distributed(test_unavailable_shard, ${CLICKHOUSE_DATABASE}, mergetree, jumpConsistentHash(a+b, 2));"
+${CLICKHOUSE_CLIENT} --query "CREATE TABLE mergetree_00754 (a Int64, b Int64, c Int64) ENGINE = MergeTree ORDER BY (a, b);"
+${CLICKHOUSE_CLIENT} --query "CREATE TABLE distributed AS mergetree_00754 ENGINE = Distributed(test_unavailable_shard, ${CLICKHOUSE_DATABASE}, mergetree_00754, jumpConsistentHash(a+b, 2));"
-${CLICKHOUSE_CLIENT} --query "INSERT INTO mergetree VALUES (0, 0, 0);"
-${CLICKHOUSE_CLIENT} --query "INSERT INTO mergetree VALUES (1, 0, 0);"
-${CLICKHOUSE_CLIENT} --query "INSERT INTO mergetree VALUES (0, 1, 1);"
-${CLICKHOUSE_CLIENT} --query "INSERT INTO mergetree VALUES (1, 1, 1);"
+${CLICKHOUSE_CLIENT} --query "INSERT INTO mergetree_00754 VALUES (0, 0, 0);"
+${CLICKHOUSE_CLIENT} --query "INSERT INTO mergetree_00754 VALUES (1, 0, 0);"
+${CLICKHOUSE_CLIENT} --query "INSERT INTO mergetree_00754 VALUES (0, 1, 1);"
+${CLICKHOUSE_CLIENT} --query "INSERT INTO mergetree_00754 VALUES (1, 1, 1);"
# Should fail because second shard is unavailable
${CLICKHOUSE_CLIENT} --query "SELECT count(*) FROM distributed;" 2>&1 \
diff --git a/dbms/tests/queries/0_stateless/00760_insert_json_with_defaults.sql b/dbms/tests/queries/0_stateless/00760_insert_json_with_defaults.sql
index 483d5e6e522..1c269aa6423 100644
--- a/dbms/tests/queries/0_stateless/00760_insert_json_with_defaults.sql
+++ b/dbms/tests/queries/0_stateless/00760_insert_json_with_defaults.sql
@@ -1,6 +1,5 @@
SET input_format_defaults_for_omitted_fields=1;
-CREATE DATABASE IF NOT EXISTS test;
DROP TABLE IF EXISTS defaults;
CREATE TABLE defaults
(
diff --git a/dbms/tests/queries/0_stateless/00763_lock_buffer.sh b/dbms/tests/queries/0_stateless/00763_lock_buffer.sh
index fdf5996a699..4ec4875e3e2 100755
--- a/dbms/tests/queries/0_stateless/00763_lock_buffer.sh
+++ b/dbms/tests/queries/0_stateless/00763_lock_buffer.sh
@@ -4,20 +4,20 @@ set -e
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. $CURDIR/../shell_config.sh
-${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test.mt"
-${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test.buffer"
+${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test.mt_00763_2"
+${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test.buffer_00763_2"
-${CLICKHOUSE_CLIENT} --query="CREATE TABLE test.buffer (s String) ENGINE = Buffer(test, mt, 1, 1, 1, 1, 1, 1, 1)"
+${CLICKHOUSE_CLIENT} --query="CREATE TABLE test.buffer_00763_2 (s String) ENGINE = Buffer(test, mt_00763_2, 1, 1, 1, 1, 1, 1, 1)"
function thread1()
{
- seq 1 500 | sed -r -e 's/.+/DROP TABLE IF EXISTS test.mt; CREATE TABLE test.mt (s String) ENGINE = MergeTree ORDER BY s; INSERT INTO test.mt SELECT toString(number) FROM numbers(10);/' | ${CLICKHOUSE_CLIENT} --multiquery --ignore-error ||:
+ seq 1 500 | sed -r -e 's/.+/DROP TABLE IF EXISTS test.mt_00763_2; CREATE TABLE test.mt_00763_2 (s String) ENGINE = MergeTree ORDER BY s; INSERT INTO test.mt_00763_2 SELECT toString(number) FROM numbers(10);/' | ${CLICKHOUSE_CLIENT} --multiquery --ignore-error ||:
}
function thread2()
{
- seq 1 1000 | sed -r -e 's/.+/SELECT count() FROM test.buffer;/' | ${CLICKHOUSE_CLIENT} --multiquery --server_logs_file='/dev/null' --ignore-error 2>&1 | grep -vP '^0$|^10$|^Received exception|^Code: 60|^Code: 218'
+ seq 1 1000 | sed -r -e 's/.+/SELECT count() FROM test.buffer_00763_2;/' | ${CLICKHOUSE_CLIENT} --multiquery --server_logs_file='/dev/null' --ignore-error 2>&1 | grep -vP '^0$|^10$|^Received exception|^Code: 60|^Code: 218'
}
thread1 &
@@ -25,5 +25,5 @@ thread2 &
wait
-${CLICKHOUSE_CLIENT} --query="DROP TABLE test.mt"
-${CLICKHOUSE_CLIENT} --query="DROP TABLE test.buffer"
+${CLICKHOUSE_CLIENT} --query="DROP TABLE test.mt_00763_2"
+${CLICKHOUSE_CLIENT} --query="DROP TABLE test.buffer_00763_2"
diff --git a/dbms/tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.sh b/dbms/tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.sh
index cebb69ba44a..ba50d4e9f04 100755
--- a/dbms/tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.sh
+++ b/dbms/tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.sh
@@ -4,21 +4,21 @@ set -e
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. $CURDIR/../shell_config.sh
-${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test.mt"
-${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test.buffer"
+${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test.mt_00763_1"
+${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test.buffer_00763_1"
-${CLICKHOUSE_CLIENT} --query="CREATE TABLE test.buffer (s String) ENGINE = Buffer(test, mt, 1, 1, 1, 1, 1, 1, 1)"
-${CLICKHOUSE_CLIENT} --query="CREATE TABLE test.mt (x UInt32, s String) ENGINE = MergeTree ORDER BY x"
-${CLICKHOUSE_CLIENT} --query="INSERT INTO test.mt VALUES (1, '1'), (2, '2'), (3, '3')"
+${CLICKHOUSE_CLIENT} --query="CREATE TABLE test.buffer_00763_1 (s String) ENGINE = Buffer(test, mt_00763_1, 1, 1, 1, 1, 1, 1, 1)"
+${CLICKHOUSE_CLIENT} --query="CREATE TABLE test.mt_00763_1 (x UInt32, s String) ENGINE = MergeTree ORDER BY x"
+${CLICKHOUSE_CLIENT} --query="INSERT INTO test.mt_00763_1 VALUES (1, '1'), (2, '2'), (3, '3')"
function thread1()
{
- seq 1 300 | sed -r -e 's/.+/ALTER TABLE test.mt MODIFY column s UInt32; ALTER TABLE test.mt MODIFY column s String;/' | ${CLICKHOUSE_CLIENT} --multiquery --ignore-error ||:
+ seq 1 300 | sed -r -e 's/.+/ALTER TABLE test.mt_00763_1 MODIFY column s UInt32; ALTER TABLE test.mt_00763_1 MODIFY column s String;/' | ${CLICKHOUSE_CLIENT} --multiquery --ignore-error ||:
}
function thread2()
{
- seq 1 2000 | sed -r -e 's/.+/SELECT sum(length(s)) FROM test.buffer;/' | ${CLICKHOUSE_CLIENT} --multiquery --server_logs_file='/dev/null' --ignore-error 2>&1 | grep -vP '^3$'
+ seq 1 2000 | sed -r -e 's/.+/SELECT sum(length(s)) FROM test.buffer_00763_1;/' | ${CLICKHOUSE_CLIENT} --multiquery --server_logs_file='/dev/null' --ignore-error 2>&1 | grep -vP '^3$'
}
thread1 &
@@ -26,5 +26,5 @@ thread2 &
wait
-${CLICKHOUSE_CLIENT} --query="DROP TABLE test.mt"
-${CLICKHOUSE_CLIENT} --query="DROP TABLE test.buffer"
+${CLICKHOUSE_CLIENT} --query="DROP TABLE test.mt_00763_1"
+${CLICKHOUSE_CLIENT} --query="DROP TABLE test.buffer_00763_1"
diff --git a/dbms/tests/queries/0_stateless/00800_low_cardinality_distinct_numeric.sql b/dbms/tests/queries/0_stateless/00800_low_cardinality_distinct_numeric.sql
index bac864e6e21..da818cb7323 100644
--- a/dbms/tests/queries/0_stateless/00800_low_cardinality_distinct_numeric.sql
+++ b/dbms/tests/queries/0_stateless/00800_low_cardinality_distinct_numeric.sql
@@ -1,5 +1,6 @@
-drop table if exists lc;
-create table lc (val LowCardinality(UInt64)) engine = MergeTree order by val;
-insert into lc select number % 123 from system.numbers limit 100000;
-select distinct(val) from lc order by val;
-drop table if exists lc;
+drop table if exists lc_00800_2;
+create table lc_00800_2 (val LowCardinality(UInt64)) engine = MergeTree order by val;
+insert into lc_00800_2 select number % 123 from system.numbers limit 100000;
+select distinct(val) from lc_00800_2 order by val;
+drop table if exists lc_00800_2
+;
diff --git a/dbms/tests/queries/0_stateless/00800_low_cardinality_empty_array.sql b/dbms/tests/queries/0_stateless/00800_low_cardinality_empty_array.sql
index 71bfb38d3c4..62d01b11861 100644
--- a/dbms/tests/queries/0_stateless/00800_low_cardinality_empty_array.sql
+++ b/dbms/tests/queries/0_stateless/00800_low_cardinality_empty_array.sql
@@ -1,7 +1,7 @@
-drop table if exists lc;
-create table lc (names Array(LowCardinality(String))) engine=MergeTree order by tuple();
-insert into lc values ([]);
-insert into lc select emptyArrayString();
-select * from lc;
-drop table if exists lc;
+drop table if exists lc_00800_1;
+create table lc_00800_1 (names Array(LowCardinality(String))) engine=MergeTree order by tuple();
+insert into lc_00800_1 values ([]);
+insert into lc_00800_1 select emptyArrayString();
+select * from lc_00800_1;
+drop table if exists lc_00800_1;
diff --git a/dbms/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql b/dbms/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql
index f11d75d40aa..b7cb405006b 100644
--- a/dbms/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql
+++ b/dbms/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql
@@ -55,34 +55,34 @@ ALTER TABLE test.alter_bad_codec ADD COLUMN alter_column DateTime DEFAULT '2019-
DROP TABLE IF EXISTS test.alter_bad_codec;
-DROP TABLE IF EXISTS test.large_alter_table;
-DROP TABLE IF EXISTS test.store_of_hash;
+DROP TABLE IF EXISTS test.large_alter_table_00804;
+DROP TABLE IF EXISTS test.store_of_hash_00804;
-CREATE TABLE test.large_alter_table (
+CREATE TABLE test.large_alter_table_00804 (
somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12)),
id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC),
data String CODEC(ZSTD(2), LZ4HC, NONE, LZ4, LZ4)
) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS index_granularity = 2;
-INSERT INTO test.large_alter_table SELECT toDate('2019-01-01'), number, toString(number + rand()) FROM system.numbers LIMIT 300000;
+INSERT INTO test.large_alter_table_00804 SELECT toDate('2019-01-01'), number, toString(number + rand()) FROM system.numbers LIMIT 300000;
-CREATE TABLE test.store_of_hash (hash UInt64) ENGINE = Memory();
+CREATE TABLE test.store_of_hash_00804 (hash UInt64) ENGINE = Memory();
-INSERT INTO test.store_of_hash SELECT sum(cityHash64(*)) FROM test.large_alter_table;
+INSERT INTO test.store_of_hash_00804 SELECT sum(cityHash64(*)) FROM test.large_alter_table_00804;
-ALTER TABLE test.large_alter_table MODIFY COLUMN data CODEC(NONE, LZ4, LZ4HC, ZSTD);
+ALTER TABLE test.large_alter_table_00804 MODIFY COLUMN data CODEC(NONE, LZ4, LZ4HC, ZSTD);
-OPTIMIZE TABLE test.large_alter_table;
+OPTIMIZE TABLE test.large_alter_table_00804;
-SELECT compression_codec FROM system.columns WHERE database = 'test' AND table = 'large_alter_table' AND name = 'data';
+SELECT compression_codec FROM system.columns WHERE database = 'test' AND table = 'large_alter_table_00804' AND name = 'data';
-DETACH TABLE test.large_alter_table;
-ATTACH TABLE test.large_alter_table;
+DETACH TABLE test.large_alter_table_00804;
+ATTACH TABLE test.large_alter_table_00804;
-INSERT INTO test.store_of_hash SELECT sum(cityHash64(*)) FROM test.large_alter_table;
+INSERT INTO test.store_of_hash_00804 SELECT sum(cityHash64(*)) FROM test.large_alter_table_00804;
-SELECT COUNT(hash) FROM test.store_of_hash;
-SELECT COUNT(DISTINCT hash) FROM test.store_of_hash;
+SELECT COUNT(hash) FROM test.store_of_hash_00804;
+SELECT COUNT(DISTINCT hash) FROM test.store_of_hash_00804;
-DROP TABLE IF EXISTS test.large_alter_table;
-DROP TABLE IF EXISTS test.store_of_hash;
+DROP TABLE IF EXISTS test.large_alter_table_00804;
+DROP TABLE IF EXISTS test.store_of_hash_00804;
diff --git a/dbms/tests/queries/0_stateless/00818_join_bug_4271.sql b/dbms/tests/queries/0_stateless/00818_join_bug_4271.sql
index c273b3f1925..ce11088fd95 100644
--- a/dbms/tests/queries/0_stateless/00818_join_bug_4271.sql
+++ b/dbms/tests/queries/0_stateless/00818_join_bug_4271.sql
@@ -1,17 +1,17 @@
-drop table if exists t;
-drop table if exists s;
+drop table if exists t_00818;
+drop table if exists s_00818;
-create table t(a Nullable(Int64), b Nullable(Int64), c Nullable(String)) engine = Memory;
-create table s(a Nullable(Int64), b Nullable(Int64), c Nullable(String)) engine = Memory;
+create table t_00818(a Nullable(Int64), b Nullable(Int64), c Nullable(String)) engine = Memory;
+create table s_00818(a Nullable(Int64), b Nullable(Int64), c Nullable(String)) engine = Memory;
-insert into t values(1,1,'a'), (2,2,'b');
-insert into s values(1,1,'a');
+insert into t_00818 values(1,1,'a'), (2,2,'b');
+insert into s_00818 values(1,1,'a');
-select * from t left join s on t.a = s.a;
-select * from t left join s on t.a = s.a and t.a = s.b;
-select * from t left join s on t.a = s.a where s.a = 1;
-select * from t left join s on t.a = s.a and t.a = s.a;
-select * from t left join s on t.a = s.a and t.b = s.a;
+select * from t_00818 left join s_00818 on t_00818.a = s_00818.a;
+select * from t_00818 left join s_00818 on t_00818.a = s_00818.a and t_00818.a = s_00818.b;
+select * from t_00818 left join s_00818 on t_00818.a = s_00818.a where s_00818.a = 1;
+select * from t_00818 left join s_00818 on t_00818.a = s_00818.a and t_00818.a = s_00818.a;
+select * from t_00818 left join s_00818 on t_00818.a = s_00818.a and t_00818.b = s_00818.a;
-drop table t;
-drop table s;
+drop table t_00818;
+drop table s_00818;
diff --git a/dbms/tests/queries/0_stateless/00825_protobuf_format_input.sh b/dbms/tests/queries/0_stateless/00825_protobuf_format_input.sh
index beffbca12f9..d56864e0b0e 100755
--- a/dbms/tests/queries/0_stateless/00825_protobuf_format_input.sh
+++ b/dbms/tests/queries/0_stateless/00825_protobuf_format_input.sh
@@ -43,3 +43,5 @@ EOF
source $CURDIR/00825_protobuf_format_input.insh
$CLICKHOUSE_CLIENT --query "SELECT * FROM table_00825 ORDER BY uuid;"
+
+$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS table_00825;"
diff --git a/dbms/tests/queries/0_stateless/00825_protobuf_format_output.sh b/dbms/tests/queries/0_stateless/00825_protobuf_format_output.sh
index 979bb3d878d..3fd2a5abd18 100755
--- a/dbms/tests/queries/0_stateless/00825_protobuf_format_output.sh
+++ b/dbms/tests/queries/0_stateless/00825_protobuf_format_output.sh
@@ -51,4 +51,6 @@ SELECT 'STRINGS->';
SELECT * FROM table_00825 ORDER BY name FORMAT Protobuf SETTINGS format_schema = '$CURDIR/00825_protobuf_format:StrPerson';
SELECT 'SYNTAX2->';
SELECT * FROM table_00825 ORDER BY name FORMAT Protobuf SETTINGS format_schema = '$CURDIR/00825_protobuf_format_syntax2:Syntax2Person';
+
+DROP TABLE IF EXISTS table_00825;
EOF
diff --git a/dbms/tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.sh b/dbms/tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.sh
index 221e5848e77..3ec5bcd7791 100755
--- a/dbms/tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.sh
+++ b/dbms/tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.sh
@@ -3,10 +3,10 @@
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. $CURDIR/../shell_config.sh
-${CLICKHOUSE_CURL} --max-time 0.1 -sS "${CLICKHOUSE_URL}?query_id=cancel_http_readonly_queries_on_client_close&cancel_http_readonly_queries_on_client_close=1&query=SELECT+count()+FROM+system.numbers" 2>&1 | grep -cF 'curl: (28)'
+${CLICKHOUSE_CURL} --max-time 1 -sS "${CLICKHOUSE_URL_PARAMS}&query_id=cancel_http_readonly_queries_on_client_close&cancel_http_readonly_queries_on_client_close=1&query=SELECT+count()+FROM+system.numbers" 2>&1 | grep -cF 'curl: (28)'
for i in {1..10}
do
- ${CLICKHOUSE_CURL} -sS --data "SELECT count() FROM system.processes WHERE query_id = 'cancel_http_readonly_queries_on_client_close'" "${CLICKHOUSE_URL}" | grep '0' && break
- sleep 0.1
+ ${CLICKHOUSE_CURL} -sS --data "SELECT count() FROM system.processes WHERE query_id = 'cancel_http_readonly_queries_on_client_close'" "${CLICKHOUSE_URL_PARAMS}" | grep '0' && break
+ sleep 0.2
done
diff --git a/dbms/tests/queries/0_stateless/00834_kill_mutation.sh b/dbms/tests/queries/0_stateless/00834_kill_mutation.sh
index 8c604ef8fac..03369dbff7a 100755
--- a/dbms/tests/queries/0_stateless/00834_kill_mutation.sh
+++ b/dbms/tests/queries/0_stateless/00834_kill_mutation.sh
@@ -5,38 +5,38 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. $CURDIR/mergetree_mutations.lib
-${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS kill_mutation"
+${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test.kill_mutation"
-${CLICKHOUSE_CLIENT} --query="CREATE TABLE kill_mutation(d Date, x UInt32, s String) ENGINE MergeTree ORDER BY x PARTITION BY d"
+${CLICKHOUSE_CLIENT} --query="CREATE TABLE test.kill_mutation(d Date, x UInt32, s String) ENGINE MergeTree ORDER BY x PARTITION BY d"
-${CLICKHOUSE_CLIENT} --query="INSERT INTO kill_mutation VALUES ('2000-01-01', 1, 'a')"
-${CLICKHOUSE_CLIENT} --query="INSERT INTO kill_mutation VALUES ('2001-01-01', 2, 'b')"
+${CLICKHOUSE_CLIENT} --query="INSERT INTO test.kill_mutation VALUES ('2000-01-01', 1, 'a')"
+${CLICKHOUSE_CLIENT} --query="INSERT INTO test.kill_mutation VALUES ('2001-01-01', 2, 'b')"
${CLICKHOUSE_CLIENT} --query="SELECT '*** Create and kill a single invalid mutation ***'"
-${CLICKHOUSE_CLIENT} --query="ALTER TABLE kill_mutation DELETE WHERE toUInt32(s) = 1"
+${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation DELETE WHERE toUInt32(s) = 1"
sleep 0.1
-${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, latest_failed_part IN ('20000101_1_1_0', '20010101_2_2_0'), latest_fail_time != 0, substr(latest_fail_reason, 1, 8) FROM system.mutations WHERE database = '${CLICKHOUSE_DATABASE}' AND table = 'kill_mutation'"
+${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, latest_failed_part IN ('20000101_1_1_0', '20010101_2_2_0'), latest_fail_time != 0, substr(latest_fail_reason, 1, 8) FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation'"
-${CLICKHOUSE_CLIENT} --query="KILL MUTATION WHERE database = '${CLICKHOUSE_DATABASE}' AND table = 'kill_mutation'"
+${CLICKHOUSE_CLIENT} --query="KILL MUTATION WHERE database = 'test' AND table = 'kill_mutation'"
-${CLICKHOUSE_CLIENT} --query="SELECT mutation_id FROM system.mutations WHERE database = '${CLICKHOUSE_DATABASE}' AND table = 'kill_mutation'"
+${CLICKHOUSE_CLIENT} --query="SELECT mutation_id FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation'"
${CLICKHOUSE_CLIENT} --query="SELECT '*** Create and kill invalid mutation that blocks another mutation ***'"
-${CLICKHOUSE_CLIENT} --query="ALTER TABLE kill_mutation DELETE WHERE toUInt32(s) = 1"
-${CLICKHOUSE_CLIENT} --query="ALTER TABLE kill_mutation DELETE WHERE x = 1"
+${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation DELETE WHERE toUInt32(s) = 1"
+${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation DELETE WHERE x = 1"
-${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, latest_failed_part IN ('20000101_1_1_0', '20010101_2_2_0'), latest_fail_time != 0, substr(latest_fail_reason, 1, 8) FROM system.mutations WHERE database = '${CLICKHOUSE_DATABASE}' AND table = 'kill_mutation' AND mutation_id = 'mutation_4.txt'"
+${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, latest_failed_part IN ('20000101_1_1_0', '20010101_2_2_0'), latest_fail_time != 0, substr(latest_fail_reason, 1, 8) FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation' AND mutation_id = 'mutation_4.txt'"
sleep 0.1
-${CLICKHOUSE_CLIENT} --query="KILL MUTATION WHERE database = '${CLICKHOUSE_DATABASE}' AND table = 'kill_mutation' AND mutation_id = 'mutation_4.txt'"
+${CLICKHOUSE_CLIENT} --query="KILL MUTATION WHERE database = 'test' AND table = 'kill_mutation' AND mutation_id = 'mutation_4.txt'"
-wait_for_mutation "kill_mutation" "mutation_5.txt"
+wait_for_mutation "kill_mutation" "mutation_5.txt" "test"
-${CLICKHOUSE_CLIENT} --query="SELECT * FROM kill_mutation"
+${CLICKHOUSE_CLIENT} --query="SELECT * FROM test.kill_mutation"
-${CLICKHOUSE_CLIENT} --query="DROP TABLE kill_mutation"
+${CLICKHOUSE_CLIENT} --query="DROP TABLE test.kill_mutation"
diff --git a/dbms/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.sh b/dbms/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.sh
index 6322ce9e3ba..59c9882a388 100755
--- a/dbms/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.sh
+++ b/dbms/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.sh
@@ -5,43 +5,43 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. $CURDIR/mergetree_mutations.lib
-${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS kill_mutation_r1"
-${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS kill_mutation_r2"
+${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test.kill_mutation_r1"
+${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test.kill_mutation_r2"
-${CLICKHOUSE_CLIENT} --query="CREATE TABLE kill_mutation_r1(d Date, x UInt32, s String) ENGINE ReplicatedMergeTree('/clickhouse/tables/test/kill_mutation', '1') ORDER BY x PARTITION BY d"
-${CLICKHOUSE_CLIENT} --query="CREATE TABLE kill_mutation_r2(d Date, x UInt32, s String) ENGINE ReplicatedMergeTree('/clickhouse/tables/test/kill_mutation', '2') ORDER BY x PARTITION BY d"
+${CLICKHOUSE_CLIENT} --query="CREATE TABLE test.kill_mutation_r1(d Date, x UInt32, s String) ENGINE ReplicatedMergeTree('/clickhouse/tables/test/kill_mutation', '1') ORDER BY x PARTITION BY d"
+${CLICKHOUSE_CLIENT} --query="CREATE TABLE test.kill_mutation_r2(d Date, x UInt32, s String) ENGINE ReplicatedMergeTree('/clickhouse/tables/test/kill_mutation', '2') ORDER BY x PARTITION BY d"
-${CLICKHOUSE_CLIENT} --query="INSERT INTO kill_mutation_r1 VALUES ('2000-01-01', 1, 'a')"
-${CLICKHOUSE_CLIENT} --query="INSERT INTO kill_mutation_r1 VALUES ('2001-01-01', 2, 'b')"
+${CLICKHOUSE_CLIENT} --query="INSERT INTO test.kill_mutation_r1 VALUES ('2000-01-01', 1, 'a')"
+${CLICKHOUSE_CLIENT} --query="INSERT INTO test.kill_mutation_r1 VALUES ('2001-01-01', 2, 'b')"
${CLICKHOUSE_CLIENT} --query="SELECT '*** Create and kill a single invalid mutation ***'"
-${CLICKHOUSE_CLIENT} --query="ALTER TABLE kill_mutation_r1 DELETE WHERE toUInt32(s) = 1"
+${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation_r1 DELETE WHERE toUInt32(s) = 1"
sleep 1
-${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, latest_failed_part IN ('20000101_0_0_0', '20010101_0_0_0'), latest_fail_time != 0, substr(latest_fail_reason, 1, 8) FROM system.mutations WHERE database = '${CLICKHOUSE_DATABASE}' AND table = 'kill_mutation_r1'"
+${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, latest_failed_part IN ('20000101_0_0_0', '20010101_0_0_0'), latest_fail_time != 0, substr(latest_fail_reason, 1, 8) FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation_r1'"
-${CLICKHOUSE_CLIENT} --query="KILL MUTATION WHERE database = '${CLICKHOUSE_DATABASE}' AND table = 'kill_mutation_r1'"
+${CLICKHOUSE_CLIENT} --query="KILL MUTATION WHERE database = 'test' AND table = 'kill_mutation_r1'"
-${CLICKHOUSE_CLIENT} --query="SELECT mutation_id FROM system.mutations WHERE database = '${CLICKHOUSE_DATABASE}' AND table = 'kill_mutation_r1'"
+${CLICKHOUSE_CLIENT} --query="SELECT mutation_id FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation_r1'"
${CLICKHOUSE_CLIENT} --query="SELECT '*** Create and kill invalid mutation that blocks another mutation ***'"
-${CLICKHOUSE_CLIENT} --query="SYSTEM SYNC REPLICA kill_mutation_r1"
-${CLICKHOUSE_CLIENT} --query="ALTER TABLE kill_mutation_r1 DELETE WHERE toUInt32(s) = 1"
-${CLICKHOUSE_CLIENT} --query="ALTER TABLE kill_mutation_r1 DELETE WHERE x = 1"
+${CLICKHOUSE_CLIENT} --query="SYSTEM SYNC REPLICA test.kill_mutation_r1"
+${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation_r1 DELETE WHERE toUInt32(s) = 1"
+${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation_r1 DELETE WHERE x = 1"
sleep 1
-${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, latest_failed_part IN ('20000101_0_0_0_1', '20010101_0_0_0_1'), latest_fail_time != 0, substr(latest_fail_reason, 1, 8) FROM system.mutations WHERE database = '${CLICKHOUSE_DATABASE}' AND table = 'kill_mutation_r1' AND mutation_id = '0000000001'"
+${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, latest_failed_part IN ('20000101_0_0_0_1', '20010101_0_0_0_1'), latest_fail_time != 0, substr(latest_fail_reason, 1, 8) FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation_r1' AND mutation_id = '0000000001'"
-${CLICKHOUSE_CLIENT} --query="KILL MUTATION WHERE database = '${CLICKHOUSE_DATABASE}' AND table = 'kill_mutation_r1' AND mutation_id = '0000000001'"
+${CLICKHOUSE_CLIENT} --query="KILL MUTATION WHERE database = 'test' AND table = 'kill_mutation_r1' AND mutation_id = '0000000001'"
-wait_for_mutation "kill_mutation_r2" "0000000002"
+wait_for_mutation "kill_mutation_r2" "0000000002" "test"
-${CLICKHOUSE_CLIENT} --query="SELECT * FROM kill_mutation_r2"
+${CLICKHOUSE_CLIENT} --query="SELECT * FROM test.kill_mutation_r2"
-${CLICKHOUSE_CLIENT} --query="DROP TABLE kill_mutation_r1"
-${CLICKHOUSE_CLIENT} --query="DROP TABLE kill_mutation_r2"
+${CLICKHOUSE_CLIENT} --query="DROP TABLE test.kill_mutation_r1"
+${CLICKHOUSE_CLIENT} --query="DROP TABLE test.kill_mutation_r2"
diff --git a/dbms/tests/queries/0_stateless/00906_low_cardinality_cache.sql b/dbms/tests/queries/0_stateless/00906_low_cardinality_cache.sql
index b8ffce12e23..cd2ceabcf6d 100644
--- a/dbms/tests/queries/0_stateless/00906_low_cardinality_cache.sql
+++ b/dbms/tests/queries/0_stateless/00906_low_cardinality_cache.sql
@@ -1,5 +1,5 @@
-drop table if exists lc;
-create table lc (b LowCardinality(String)) engine=MergeTree order by b;
-insert into lc select '0123456789' from numbers(100000000);
-select count(), b from lc group by b;
-drop table if exists lc;
+drop table if exists lc_00906;
+create table lc_00906 (b LowCardinality(String)) engine=MergeTree order by b;
+insert into lc_00906 select '0123456789' from numbers(100000000);
+select count(), b from lc_00906 group by b;
+drop table if exists lc_00906;
diff --git a/dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_pk.sql b/dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_pk.sql
index eaececd68e4..42b6ee8f771 100644
--- a/dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_pk.sql
+++ b/dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_pk.sql
@@ -54,32 +54,32 @@ SELECT '----00804----';
SET max_rows_to_read = 0;
SET force_primary_key = 0;
-DROP TABLE IF EXISTS test.large_alter_table;
-DROP TABLE IF EXISTS test.store_of_hash;
+DROP TABLE IF EXISTS test.large_alter_table_00926;
+DROP TABLE IF EXISTS test.store_of_hash_00926;
-CREATE TABLE test.large_alter_table (
+CREATE TABLE test.large_alter_table_00926 (
somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12)),
id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC),
data String CODEC(ZSTD(2), LZ4HC, NONE, LZ4, LZ4)
) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS index_granularity_bytes=40;
-INSERT INTO test.large_alter_table SELECT toDate('2019-01-01'), number, toString(number + rand()) FROM system.numbers LIMIT 300000;
+INSERT INTO test.large_alter_table_00926 SELECT toDate('2019-01-01'), number, toString(number + rand()) FROM system.numbers LIMIT 300000;
-CREATE TABLE test.store_of_hash (hash UInt64) ENGINE = Memory();
+CREATE TABLE test.store_of_hash_00926 (hash UInt64) ENGINE = Memory();
-INSERT INTO test.store_of_hash SELECT sum(cityHash64(*)) FROM test.large_alter_table;
+INSERT INTO test.store_of_hash_00926 SELECT sum(cityHash64(*)) FROM test.large_alter_table_00926;
-ALTER TABLE test.large_alter_table MODIFY COLUMN data CODEC(NONE, LZ4, LZ4HC, ZSTD);
+ALTER TABLE test.large_alter_table_00926 MODIFY COLUMN data CODEC(NONE, LZ4, LZ4HC, ZSTD);
-OPTIMIZE TABLE test.large_alter_table;
+OPTIMIZE TABLE test.large_alter_table_00926;
-DETACH TABLE test.large_alter_table;
-ATTACH TABLE test.large_alter_table;
+DETACH TABLE test.large_alter_table_00926;
+ATTACH TABLE test.large_alter_table_00926;
-INSERT INTO test.store_of_hash SELECT sum(cityHash64(*)) FROM test.large_alter_table;
+INSERT INTO test.store_of_hash_00926 SELECT sum(cityHash64(*)) FROM test.large_alter_table_00926;
-SELECT COUNT(hash) FROM test.store_of_hash;
-SELECT COUNT(DISTINCT hash) FROM test.store_of_hash;
+SELECT COUNT(hash) FROM test.store_of_hash_00926;
+SELECT COUNT(DISTINCT hash) FROM test.store_of_hash_00926;
-DROP TABLE IF EXISTS test.large_alter_table;
-DROP TABLE IF EXISTS test.store_of_hash;
+DROP TABLE IF EXISTS test.large_alter_table_00926;
+DROP TABLE IF EXISTS test.store_of_hash_00926;
diff --git a/dbms/tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree.sql b/dbms/tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree.sql
index 0ac96ce751b..df9d791d824 100644
--- a/dbms/tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree.sql
+++ b/dbms/tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree.sql
@@ -35,7 +35,7 @@ SELECT distinct(marks) from system.parts WHERE table = 'zero_rows_per_granule2'
SELECT distinct(marks) from system.parts WHERE table = 'zero_rows_per_granule1' and database='test' and active=1;
-SELECT sleep(0.5) Format Null;
+SELECT sleep(0.7) Format Null;
OPTIMIZE TABLE test.zero_rows_per_granule2 FINAL;
@@ -92,7 +92,7 @@ SELECT distinct(marks) from system.parts WHERE table = 'four_rows_per_granule2'
SELECT distinct(marks) from system.parts WHERE table = 'four_rows_per_granule1' and database='test' and active=1;
-SELECT sleep(0.5) Format Null;
+SELECT sleep(0.7) Format Null;
OPTIMIZE TABLE test.four_rows_per_granule2 FINAL;
@@ -180,7 +180,7 @@ ATTACH TABLE test.adaptive_granularity_alter1;
INSERT INTO test.adaptive_granularity_alter1 (p, k, v1, v2) VALUES ('2018-05-15', 100, 1000, 'aaaa'), ('2018-05-16', 101, 3000, 'bbbb'), ('2018-05-17', 102, 5000, 'cccc'), ('2018-05-19', 103, 7000, 'dddd');
-SELECT sleep(0.5) Format Null;
+SELECT sleep(0.7) Format Null;
OPTIMIZE TABLE test.adaptive_granularity_alter1 FINAL;
diff --git a/dbms/tests/queries/0_stateless/00927_table_filter.sql b/dbms/tests/queries/0_stateless/00927_table_filter.sql
index af9cfa11d59..8fef82c55c8 100644
--- a/dbms/tests/queries/0_stateless/00927_table_filter.sql
+++ b/dbms/tests/queries/0_stateless/00927_table_filter.sql
@@ -1,44 +1,44 @@
-DROP TABLE IF EXISTS filtered_table1;
-DROP TABLE IF EXISTS filtered_table2;
-DROP TABLE IF EXISTS filtered_table3;
+DROP TABLE IF EXISTS test.filtered_table1;
+DROP TABLE IF EXISTS test.filtered_table2;
+DROP TABLE IF EXISTS test.filtered_table3;
-- Filter: a = 1, values: (1, 0), (1, 1)
-CREATE TABLE filtered_table1 (a UInt8, b UInt8) ENGINE MergeTree ORDER BY a;
-INSERT INTO filtered_table1 values (0, 0), (0, 1), (1, 0), (1, 1);
+CREATE TABLE test.filtered_table1 (a UInt8, b UInt8) ENGINE MergeTree ORDER BY a;
+INSERT INTO test.filtered_table1 values (0, 0), (0, 1), (1, 0), (1, 1);
-- Filter: a + b < 1 or c - d > 5, values: (0, 0, 0, 0), (0, 0, 6, 0)
-CREATE TABLE filtered_table2 (a UInt8, b UInt8, c UInt8, d UInt8) ENGINE MergeTree ORDER BY a;
-INSERT INTO filtered_table2 values (0, 0, 0, 0), (1, 2, 3, 4), (4, 3, 2, 1), (0, 0, 6, 0);
+CREATE TABLE test.filtered_table2 (a UInt8, b UInt8, c UInt8, d UInt8) ENGINE MergeTree ORDER BY a;
+INSERT INTO test.filtered_table2 values (0, 0, 0, 0), (1, 2, 3, 4), (4, 3, 2, 1), (0, 0, 6, 0);
-- Filter: c = 1, values: (0, 1), (1, 0)
-CREATE TABLE filtered_table3 (a UInt8, b UInt8, c UInt16 ALIAS a + b) ENGINE MergeTree ORDER BY a;
-INSERT INTO filtered_table3 values (0, 0), (0, 1), (1, 0), (1, 1);
+CREATE TABLE test.filtered_table3 (a UInt8, b UInt8, c UInt16 ALIAS a + b) ENGINE MergeTree ORDER BY a;
+INSERT INTO test.filtered_table3 values (0, 0), (0, 1), (1, 0), (1, 1);
SELECT '-- PREWHERE should fail';
-SELECT * FROM filtered_table1 PREWHERE 1; -- { serverError 182 }
-SELECT * FROM filtered_table2 PREWHERE 1; -- { serverError 182 }
-SELECT * FROM filtered_table3 PREWHERE 1; -- { serverError 182 }
+SELECT * FROM test.filtered_table1 PREWHERE 1; -- { serverError 182 }
+SELECT * FROM test.filtered_table2 PREWHERE 1; -- { serverError 182 }
+SELECT * FROM test.filtered_table3 PREWHERE 1; -- { serverError 182 }
-SELECT * FROM filtered_table1;
-SELECT * FROM filtered_table2;
-SELECT * FROM filtered_table3;
+SELECT * FROM test.filtered_table1;
+SELECT * FROM test.filtered_table2;
+SELECT * FROM test.filtered_table3;
-SELECT a FROM filtered_table1;
-SELECT b FROM filtered_table1;
-SELECT a FROM filtered_table1 WHERE a = 1;
-SELECT a = 1 FROM filtered_table1;
+SELECT a FROM test.filtered_table1;
+SELECT b FROM test.filtered_table1;
+SELECT a FROM test.filtered_table1 WHERE a = 1;
+SELECT a = 1 FROM test.filtered_table1;
-SELECT a FROM filtered_table3;
-SELECT b FROM filtered_table3;
-SELECT c FROM filtered_table3;
-SELECT a + b FROM filtered_table3;
-SELECT a FROM filtered_table3 WHERE c = 1;
-SELECT c = 1 FROM filtered_table3;
-SELECT a + b = 1 FROM filtered_table3;
+SELECT a FROM test.filtered_table3;
+SELECT b FROM test.filtered_table3;
+SELECT c FROM test.filtered_table3;
+SELECT a + b FROM test.filtered_table3;
+SELECT a FROM test.filtered_table3 WHERE c = 1;
+SELECT c = 1 FROM test.filtered_table3;
+SELECT a + b = 1 FROM test.filtered_table3;
-SELECT * FROM filtered_table1 as t1 ANY LEFT JOIN filtered_table1 as t2 ON t1.a = t2.b;
-SELECT * FROM filtered_table1 as t2 ANY RIGHT JOIN filtered_table1 as t1 ON t2.b = t1.a;
+SELECT * FROM test.filtered_table1 as t1 ANY LEFT JOIN test.filtered_table1 as t2 ON t1.a = t2.b;
+SELECT * FROM test.filtered_table1 as t2 ANY RIGHT JOIN test.filtered_table1 as t1 ON t2.b = t1.a;
-DROP TABLE filtered_table1;
-DROP TABLE filtered_table2;
-DROP TABLE filtered_table3;
+DROP TABLE test.filtered_table1;
+DROP TABLE test.filtered_table2;
+DROP TABLE test.filtered_table3;
diff --git a/dbms/tests/queries/0_stateless/00931_low_cardinality_read_with_empty_array.sql b/dbms/tests/queries/0_stateless/00931_low_cardinality_read_with_empty_array.sql
index 039ba0c0b53..a3b90f94f74 100644
--- a/dbms/tests/queries/0_stateless/00931_low_cardinality_read_with_empty_array.sql
+++ b/dbms/tests/queries/0_stateless/00931_low_cardinality_read_with_empty_array.sql
@@ -1,7 +1,7 @@
-drop table if exists lc;
-create table lc (key UInt64, value Array(LowCardinality(String))) engine = MergeTree order by key;
-insert into lc select number, if(number < 10000 or number > 100000, [toString(number)], emptyArrayString()) from system.numbers limit 200000;
-select * from lc where (key < 100 or key > 50000) and not has(value, toString(key)) and length(value) == 1 limit 10 settings max_block_size = 8192, max_threads = 1;
+drop table if exists lc_00931;
+create table lc_00931 (key UInt64, value Array(LowCardinality(String))) engine = MergeTree order by key;
+insert into lc_00931 select number, if(number < 10000 or number > 100000, [toString(number)], emptyArrayString()) from system.numbers limit 200000;
+select * from lc_00931 where (key < 100 or key > 50000) and not has(value, toString(key)) and length(value) == 1 limit 10 settings max_block_size = 8192, max_threads = 1;
-drop table if exists lc;
+drop table if exists lc_00931;
diff --git a/dbms/tests/queries/0_stateless/00933_alter_ttl.sql b/dbms/tests/queries/0_stateless/00933_alter_ttl.sql
index e5c50c5ce60..55977dbbbbc 100644
--- a/dbms/tests/queries/0_stateless/00933_alter_ttl.sql
+++ b/dbms/tests/queries/0_stateless/00933_alter_ttl.sql
@@ -23,3 +23,4 @@ alter table test.ttl modify column a Int ttl d + interval 1 day;
desc table test.ttl;
alter table test.ttl modify column d Int ttl d + interval 1 day; -- { serverError 44}
+drop table if exists test.ttl;
diff --git a/dbms/tests/queries/0_stateless/00933_ttl_simple.sql b/dbms/tests/queries/0_stateless/00933_ttl_simple.sql
index ba9f7ee2e38..62b320cc0b0 100644
--- a/dbms/tests/queries/0_stateless/00933_ttl_simple.sql
+++ b/dbms/tests/queries/0_stateless/00933_ttl_simple.sql
@@ -1,66 +1,49 @@
-drop table if exists test.ttl;
+drop table if exists ttl_00933_1;
-create table test.ttl (d DateTime, a Int ttl d + interval 1 second, b Int ttl d + interval 1 second) engine = MergeTree order by tuple() partition by toMinute(d);
-insert into test.ttl values (now(), 1, 2);
-insert into test.ttl values (now(), 3, 4);
+create table ttl_00933_1 (d DateTime, a Int ttl d + interval 1 second, b Int ttl d + interval 1 second) engine = MergeTree order by tuple() partition by toMinute(d);
+insert into ttl_00933_1 values (now(), 1, 2);
+insert into ttl_00933_1 values (now(), 3, 4);
select sleep(1.1) format Null;
-optimize table test.ttl final;
-select a, b from test.ttl;
+optimize table ttl_00933_1 final;
+select a, b from ttl_00933_1;
-drop table if exists test.ttl;
+drop table if exists ttl_00933_1;
-create table test.ttl (d DateTime, a Int ttl d + interval 1 DAY) engine = MergeTree order by tuple() partition by toDayOfMonth(d);
-insert into test.ttl values (toDateTime('2000-10-10 00:00:00'), 1);
-insert into test.ttl values (toDateTime('2000-10-10 00:00:00'), 2);
-insert into test.ttl values (toDateTime('2000-10-10 00:00:00'), 3);
+create table ttl_00933_1 (d DateTime, a Int ttl d + interval 1 DAY) engine = MergeTree order by tuple() partition by toDayOfMonth(d);
+insert into ttl_00933_1 values (toDateTime('2000-10-10 00:00:00'), 1);
+insert into ttl_00933_1 values (toDateTime('2000-10-10 00:00:00'), 2);
+insert into ttl_00933_1 values (toDateTime('2000-10-10 00:00:00'), 3);
select sleep(0.7) format Null; -- wait if very fast merge happen
-optimize table test.ttl final;
-select * from test.ttl order by d;
+optimize table ttl_00933_1 final;
+select * from ttl_00933_1 order by d;
-drop table if exists test.ttl;
+drop table if exists ttl_00933_1;
-create table test.ttl (d DateTime, a Int) engine = MergeTree order by tuple() partition by tuple() ttl d + interval 1 day;
-insert into test.ttl values (toDateTime('2000-10-10 00:00:00'), 1);
-insert into test.ttl values (toDateTime('2000-10-10 00:00:00'), 2);
-insert into test.ttl values (toDateTime('2100-10-10 00:00:00'), 3);
+create table ttl_00933_1 (d DateTime, a Int) engine = MergeTree order by tuple() partition by tuple() ttl d + interval 1 day;
+insert into ttl_00933_1 values (toDateTime('2000-10-10 00:00:00'), 1);
+insert into ttl_00933_1 values (toDateTime('2000-10-10 00:00:00'), 2);
+insert into ttl_00933_1 values (toDateTime('2100-10-10 00:00:00'), 3);
select sleep(0.7) format Null; -- wait if very fast merge happen
-optimize table test.ttl final;
-select * from test.ttl order by d;
+optimize table ttl_00933_1 final;
+select * from ttl_00933_1 order by d;
-drop table if exists test.ttl;
+drop table if exists ttl_00933_1;
-create table test.ttl (d Date, a Int) engine = MergeTree order by a partition by toDayOfMonth(d) ttl d + interval 1 day;
-insert into test.ttl values (toDate('2000-10-10'), 1);
-insert into test.ttl values (toDate('2100-10-10'), 2);
+create table ttl_00933_1 (d Date, a Int) engine = MergeTree order by a partition by toDayOfMonth(d) ttl d + interval 1 day;
+insert into ttl_00933_1 values (toDate('2000-10-10'), 1);
+insert into ttl_00933_1 values (toDate('2100-10-10'), 2);
select sleep(0.7) format Null; -- wait if very fast merge happen
-optimize table test.ttl final;
-select * from test.ttl order by d;
+optimize table ttl_00933_1 final;
+select * from ttl_00933_1 order by d;
set send_logs_level = 'none';
-drop table if exists test.ttl;
+drop table if exists ttl_00933_1;
-create table test.ttl (d DateTime ttl d) engine = MergeTree order by tuple() partition by toSecond(d); -- { serverError 44}
-create table test.ttl (d DateTime, a Int ttl d) engine = MergeTree order by a partition by toSecond(d); -- { serverError 44}
-create table test.ttl (d DateTime, a Int ttl 2 + 2) engine = MergeTree order by tuple() partition by toSecond(d); -- { serverError 450 }
-create table test.ttl (d DateTime, a Int ttl toDateTime(1)) engine = MergeTree order by tuple() partition by toSecond(d); -- { serverError 450 }
-create table test.ttl (d DateTime, a Int ttl d - d) engine = MergeTree order by tuple() partition by toSecond(d); -- { serverError 450 }
+create table ttl_00933_1 (d DateTime ttl d) engine = MergeTree order by tuple() partition by toSecond(d); -- { serverError 44}
+create table ttl_00933_1 (d DateTime, a Int ttl d) engine = MergeTree order by a partition by toSecond(d); -- { serverError 44}
+create table ttl_00933_1 (d DateTime, a Int ttl 2 + 2) engine = MergeTree order by tuple() partition by toSecond(d); -- { serverError 450 }
+create table ttl_00933_1 (d DateTime, a Int ttl toDateTime(1)) engine = MergeTree order by tuple() partition by toSecond(d); -- { serverError 450 }
+create table ttl_00933_1 (d DateTime, a Int ttl d - d) engine = MergeTree order by tuple() partition by toSecond(d); -- { serverError 450 }
-/*
-
-Alexey Milovidov, [17.04.19 20:09]
-sleep(0.7)
-sleep(1.1)
-- почему? @Alesapin
-
-Alexander Sapin, [17.04.19 23:16]
-[In reply to Alexey Milovidov]
-1.1 по логике теста, я попробовал с 0.5 и у меня флапнуло. С 1 не флапало, но работало долго. Попробовал 0.7 и тоже не флапает.
-
-Alexey Milovidov, [17.04.19 23:18]
-Слабо такой комментарий добавить прямо в тест? :)
-
-Alexander Sapin, [17.04.19 23:20]
-как-то неловко :)
-
-*/
+drop table if exists ttl_00933_1;
diff --git a/dbms/tests/queries/0_stateless/00933_ttl_with_default.sql b/dbms/tests/queries/0_stateless/00933_ttl_with_default.sql
index b72d86f9e76..e6c0a6e700c 100644
--- a/dbms/tests/queries/0_stateless/00933_ttl_with_default.sql
+++ b/dbms/tests/queries/0_stateless/00933_ttl_with_default.sql
@@ -1,30 +1,31 @@
-drop table if exists test.ttl;
+drop table if exists ttl_00933_2;
-create table test.ttl (d DateTime, a Int default 111 ttl d + interval 1 DAY) engine = MergeTree order by tuple() partition by toDayOfMonth(d);
-insert into test.ttl values (toDateTime('2000-10-10 00:00:00'), 1);
-insert into test.ttl values (toDateTime('2000-10-10 00:00:00'), 2);
-insert into test.ttl values (toDateTime('2100-10-10 00:00:00'), 3);
-insert into test.ttl values (toDateTime('2100-10-10 00:00:00'), 4);
-optimize table test.ttl final;
-select a from test.ttl order by a;
+create table ttl_00933_2 (d DateTime, a Int default 111 ttl d + interval 1 DAY) engine = MergeTree order by tuple() partition by toDayOfMonth(d);
+insert into ttl_00933_2 values (toDateTime('2000-10-10 00:00:00'), 1);
+insert into ttl_00933_2 values (toDateTime('2000-10-10 00:00:00'), 2);
+insert into ttl_00933_2 values (toDateTime('2100-10-10 00:00:00'), 3);
+insert into ttl_00933_2 values (toDateTime('2100-10-10 00:00:00'), 4);
+optimize table ttl_00933_2 final;
+select a from ttl_00933_2 order by a;
-drop table if exists test.ttl;
+drop table if exists ttl_00933_2;
-create table test.ttl (d DateTime, a Int, b default a * 2 ttl d + interval 1 DAY) engine = MergeTree order by tuple() partition by toDayOfMonth(d);
-insert into test.ttl values (toDateTime('2000-10-10 00:00:00'), 1, 100);
-insert into test.ttl values (toDateTime('2000-10-10 00:00:00'), 2, 200);
-insert into test.ttl values (toDateTime('2100-10-10 00:00:00'), 3, 300);
-insert into test.ttl values (toDateTime('2100-10-10 00:00:00'), 4, 400);
-optimize table test.ttl final;
-select a, b from test.ttl order by a;
+create table ttl_00933_2 (d DateTime, a Int, b default a * 2 ttl d + interval 1 DAY) engine = MergeTree order by tuple() partition by toDayOfMonth(d);
+insert into ttl_00933_2 values (toDateTime('2000-10-10 00:00:00'), 1, 100);
+insert into ttl_00933_2 values (toDateTime('2000-10-10 00:00:00'), 2, 200);
+insert into ttl_00933_2 values (toDateTime('2100-10-10 00:00:00'), 3, 300);
+insert into ttl_00933_2 values (toDateTime('2100-10-10 00:00:00'), 4, 400);
+optimize table ttl_00933_2 final;
+select a, b from ttl_00933_2 order by a;
-drop table if exists test.ttl;
+drop table if exists ttl_00933_2;
-create table test.ttl (d DateTime, a Int, b default 222 ttl d + interval 1 DAY) engine = MergeTree order by tuple() partition by toDayOfMonth(d);
-insert into test.ttl values (toDateTime('2000-10-10 00:00:00'), 1, 5);
-insert into test.ttl values (toDateTime('2000-10-10 00:00:00'), 2, 10);
-insert into test.ttl values (toDateTime('2100-10-10 00:00:00'), 3, 15);
-insert into test.ttl values (toDateTime('2100-10-10 00:00:00'), 4, 20);
-optimize table test.ttl final;
-select a, b from test.ttl order by a;
+create table ttl_00933_2 (d DateTime, a Int, b default 222 ttl d + interval 1 DAY) engine = MergeTree order by tuple() partition by toDayOfMonth(d);
+insert into ttl_00933_2 values (toDateTime('2000-10-10 00:00:00'), 1, 5);
+insert into ttl_00933_2 values (toDateTime('2000-10-10 00:00:00'), 2, 10);
+insert into ttl_00933_2 values (toDateTime('2100-10-10 00:00:00'), 3, 15);
+insert into ttl_00933_2 values (toDateTime('2100-10-10 00:00:00'), 4, 20);
+optimize table ttl_00933_2 final;
+select a, b from ttl_00933_2 order by a;
+drop table if exists ttl_00933_2;
diff --git a/dbms/tests/queries/0_stateless/mergetree_mutations.lib b/dbms/tests/queries/0_stateless/mergetree_mutations.lib
index eb2f4030eba..d10ac883764 100644
--- a/dbms/tests/queries/0_stateless/mergetree_mutations.lib
+++ b/dbms/tests/queries/0_stateless/mergetree_mutations.lib
@@ -4,11 +4,13 @@ function wait_for_mutation()
{
local table=$1
local mutation_id=$2
+ local database=$3
+ database=${database:="${CLICKHOUSE_DATABASE}"}
for i in {1..100}
do
sleep 0.1
- if [[ $(${CLICKHOUSE_CLIENT} --query="SELECT min(is_done) FROM system.mutations WHERE table='$table' AND mutation_id='$mutation_id'") -eq 1 ]]; then
+ if [[ $(${CLICKHOUSE_CLIENT} --query="SELECT min(is_done) FROM system.mutations WHERE database='$database' AND table='$table' AND mutation_id='$mutation_id'") -eq 1 ]]; then
break
fi
diff --git a/dbms/tests/queries/1_stateful/00090_thread_pool_deadlock.sh b/dbms/tests/queries/1_stateful/00090_thread_pool_deadlock.sh
index c30f137e8a6..ce5a4085607 100755
--- a/dbms/tests/queries/1_stateful/00090_thread_pool_deadlock.sh
+++ b/dbms/tests/queries/1_stateful/00090_thread_pool_deadlock.sh
@@ -1,21 +1,24 @@
-#!/bin/sh
+#!/usr/bin/env bash
+
+CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
+. $CURDIR/../shell_config.sh
echo '1';
-clickhouse-client --distributed_aggregation_memory_efficient=1 --group_by_two_level_threshold=1 --max_execution_time=1 --query="SELECT SearchPhrase AS k, count() AS c FROM remote('127.0.0.{1,2}', test.hits) GROUP BY k ORDER BY c DESC LIMIT 10" --format=Null 2>/dev/null;
+$CLICKHOUSE_CLIENT --distributed_aggregation_memory_efficient=1 --group_by_two_level_threshold=1 --max_execution_time=1 --query="SELECT SearchPhrase AS k, count() AS c FROM remote('127.0.0.{1,2}', test.hits) GROUP BY k ORDER BY c DESC LIMIT 10" --format=Null 2>/dev/null;
echo '2';
-clickhouse-client --distributed_aggregation_memory_efficient=1 --group_by_two_level_threshold=1 --max_execution_time=1 --query="SELECT SearchPhrase AS k, count() AS c FROM remote('127.0.0.{1,2,3}', test.hits) GROUP BY k ORDER BY c DESC LIMIT 10" --format=Null 2>/dev/null;
+$CLICKHOUSE_CLIENT --distributed_aggregation_memory_efficient=1 --group_by_two_level_threshold=1 --max_execution_time=1 --query="SELECT SearchPhrase AS k, count() AS c FROM remote('127.0.0.{1,2,3}', test.hits) GROUP BY k ORDER BY c DESC LIMIT 10" --format=Null 2>/dev/null;
echo '3';
-clickhouse-client --distributed_aggregation_memory_efficient=1 --group_by_two_level_threshold=1 --max_execution_time=1 --query="SELECT SearchPhrase AS k, count() AS c FROM remote('127.0.0.{1,2,3,4}', test.hits) GROUP BY k ORDER BY c DESC LIMIT 10" --format=Null 2>/dev/null;
+$CLICKHOUSE_CLIENT --distributed_aggregation_memory_efficient=1 --group_by_two_level_threshold=1 --max_execution_time=1 --query="SELECT SearchPhrase AS k, count() AS c FROM remote('127.0.0.{1,2,3,4}', test.hits) GROUP BY k ORDER BY c DESC LIMIT 10" --format=Null 2>/dev/null;
echo '4';
-clickhouse-client --distributed_aggregation_memory_efficient=1 --group_by_two_level_threshold=1 --max_execution_time=1 --query="SELECT SearchPhrase AS k, count() AS c FROM remote('127.0.0.{1,2,3,4,5}', test.hits) GROUP BY k ORDER BY c DESC LIMIT 10" --format=Null 2>/dev/null;
+$CLICKHOUSE_CLIENT --distributed_aggregation_memory_efficient=1 --group_by_two_level_threshold=1 --max_execution_time=1 --query="SELECT SearchPhrase AS k, count() AS c FROM remote('127.0.0.{1,2,3,4,5}', test.hits) GROUP BY k ORDER BY c DESC LIMIT 10" --format=Null 2>/dev/null;
echo '5';
-clickhouse-client --distributed_aggregation_memory_efficient=1 --group_by_two_level_threshold=1 --max_execution_time=1 --query="SELECT SearchPhrase AS k, count() AS c FROM remote('127.0.0.{1,2,3,4,5,6}', test.hits) GROUP BY k ORDER BY c DESC LIMIT 10" --format=Null 2>/dev/null;
+$CLICKHOUSE_CLIENT --distributed_aggregation_memory_efficient=1 --group_by_two_level_threshold=1 --max_execution_time=1 --query="SELECT SearchPhrase AS k, count() AS c FROM remote('127.0.0.{1,2,3,4,5,6}', test.hits) GROUP BY k ORDER BY c DESC LIMIT 10" --format=Null 2>/dev/null;
echo '6';
-clickhouse-client --distributed_aggregation_memory_efficient=1 --group_by_two_level_threshold=1 --max_execution_time=1 --query="SELECT SearchPhrase AS k, count() AS c FROM remote('127.0.0.{1,2,3,4,5,6,7}', test.hits) GROUP BY k ORDER BY c DESC LIMIT 10" --format=Null 2>/dev/null;
+$CLICKHOUSE_CLIENT --distributed_aggregation_memory_efficient=1 --group_by_two_level_threshold=1 --max_execution_time=1 --query="SELECT SearchPhrase AS k, count() AS c FROM remote('127.0.0.{1,2,3,4,5,6,7}', test.hits) GROUP BY k ORDER BY c DESC LIMIT 10" --format=Null 2>/dev/null;
echo '7';
-clickhouse-client --distributed_aggregation_memory_efficient=1 --group_by_two_level_threshold=1 --max_execution_time=1 --query="SELECT SearchPhrase AS k, count() AS c FROM remote('127.0.0.{1,2,3,4,5,6,7,8}', test.hits) GROUP BY k ORDER BY c DESC LIMIT 10" --format=Null 2>/dev/null;
+$CLICKHOUSE_CLIENT --distributed_aggregation_memory_efficient=1 --group_by_two_level_threshold=1 --max_execution_time=1 --query="SELECT SearchPhrase AS k, count() AS c FROM remote('127.0.0.{1,2,3,4,5,6,7,8}', test.hits) GROUP BY k ORDER BY c DESC LIMIT 10" --format=Null 2>/dev/null;
echo '8';
-clickhouse-client --distributed_aggregation_memory_efficient=1 --group_by_two_level_threshold=1 --max_execution_time=1 --query="SELECT SearchPhrase AS k, count() AS c FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9}', test.hits) GROUP BY k ORDER BY c DESC LIMIT 10" --format=Null 2>/dev/null;
+$CLICKHOUSE_CLIENT --distributed_aggregation_memory_efficient=1 --group_by_two_level_threshold=1 --max_execution_time=1 --query="SELECT SearchPhrase AS k, count() AS c FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9}', test.hits) GROUP BY k ORDER BY c DESC LIMIT 10" --format=Null 2>/dev/null;
echo '9';
-clickhouse-client --distributed_aggregation_memory_efficient=1 --group_by_two_level_threshold=1 --max_execution_time=1 --query="SELECT SearchPhrase AS k, count() AS c FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) GROUP BY k ORDER BY c DESC LIMIT 10" --format=Null 2>/dev/null;
+$CLICKHOUSE_CLIENT --distributed_aggregation_memory_efficient=1 --group_by_two_level_threshold=1 --max_execution_time=1 --query="SELECT SearchPhrase AS k, count() AS c FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) GROUP BY k ORDER BY c DESC LIMIT 10" --format=Null 2>/dev/null;
echo '10';
diff --git a/debian/changelog b/debian/changelog
index 48e6a8c19bd..9cc1178d4c0 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,5 +1,5 @@
-clickhouse (19.6.1.1) unstable; urgency=low
+clickhouse (19.7.1.1) unstable; urgency=low
* Modified source code
- -- clickhouse-release Tue, 02 Apr 2019 19:28:15 +300
+ -- clickhouse-release Fri, 19 Apr 2019 00:01:38 +0300
diff --git a/docker/client/Dockerfile b/docker/client/Dockerfile
index 995cdbf29af..ffd196bc27c 100644
--- a/docker/client/Dockerfile
+++ b/docker/client/Dockerfile
@@ -1,7 +1,7 @@
FROM ubuntu:18.04
ARG repository="deb http://repo.yandex.ru/clickhouse/deb/stable/ main/"
-ARG version=19.6.1.*
+ARG version=19.7.1.*
RUN apt-get update \
&& apt-get install --yes --no-install-recommends \
diff --git a/docker/server/Dockerfile b/docker/server/Dockerfile
index 07f49a044c9..aaf395ee216 100644
--- a/docker/server/Dockerfile
+++ b/docker/server/Dockerfile
@@ -1,7 +1,7 @@
FROM ubuntu:18.04
ARG repository="deb http://repo.yandex.ru/clickhouse/deb/stable/ main/"
-ARG version=19.6.1.*
+ARG version=19.7.1.*
ARG gosu_ver=1.10
RUN apt-get update \
diff --git a/docker/test/Dockerfile b/docker/test/Dockerfile
index 08c5014405c..0b7740bf098 100644
--- a/docker/test/Dockerfile
+++ b/docker/test/Dockerfile
@@ -1,7 +1,7 @@
FROM ubuntu:18.04
ARG repository="deb http://repo.yandex.ru/clickhouse/deb/stable/ main/"
-ARG version=19.6.1.*
+ARG version=19.7.1.*
RUN apt-get update && \
apt-get install -y apt-transport-https dirmngr && \
diff --git a/docs/en/operations/table_engines/replication.md b/docs/en/operations/table_engines/replication.md
index 0b4ccc496fa..d6900ca2f32 100644
--- a/docs/en/operations/table_engines/replication.md
+++ b/docs/en/operations/table_engines/replication.md
@@ -55,7 +55,7 @@ For very large clusters, you can use different ZooKeeper clusters for different
Replication is asynchronous and multi-master. `INSERT` queries (as well as `ALTER`) can be sent to any available server. Data is inserted on the server where the query is run, and then it is copied to the other servers. Because it is asynchronous, recently inserted data appears on the other replicas with some latency. If part of the replicas are not available, the data is written when they become available. If a replica is available, the latency is the amount of time it takes to transfer the block of compressed data over the network.
-By default, an INSERT query waits for confirmation of writing the data from only one replica. If the data was successfully written to only one replica and the server with this replica ceases to exist, the stored data will be lost. Tp enable getting confirmation of data writes from multiple replicas, use the `insert_quorum` option.
+By default, an INSERT query waits for confirmation of writing the data from only one replica. If the data was successfully written to only one replica and the server with this replica ceases to exist, the stored data will be lost. To enable getting confirmation of data writes from multiple replicas, use the `insert_quorum` option.
Each block of data is written atomically. The INSERT query is divided into blocks up to `max_insert_block_size = 1048576` rows. In other words, if the `INSERT` query has less than 1048576 rows, it is made atomically.
diff --git a/docs/en/query_language/select.md b/docs/en/query_language/select.md
index 458fa732f81..5e0877430b2 100644
--- a/docs/en/query_language/select.md
+++ b/docs/en/query_language/select.md
@@ -156,7 +156,7 @@ Here, a sample of 10% is taken from the second half of the data.
### ARRAY JOIN Clause {#select-array-join-clause}
-Allows executing `JOIN` with an array or nested data structure. Allows you to perform `JOIN` both with the external array and with the inner array in the table. The intent is similar to the [arrayJoin](functions/array_functions.md#array_functions-join) function, but its functionality is broader.
+Allows executing `JOIN` with an array or nested data structure. The intent is similar to the [arrayJoin](functions/array_join.md#functions_arrayjoin) function, but its functionality is broader.
``` sql
SELECT
@@ -168,14 +168,14 @@ FROM
You can specify only a single `ARRAY JOIN` clause in a query.
-When running the `ARRAY JOIN`, there is an optimization of the query execution order. Although the `ARRAY JOIN` must be always specified before the `WHERE/PREWHERE` clause, it can be performed as before the `WHERE/PREWHERE` (if its result is needed in this clause), as after completing it (to reduce the volume of calculations). The processing order is controlled by the query optimizer.
+The query execution order is optimized when running `ARRAY JOIN`. Although `ARRAY JOIN` must always be specified before the `WHERE/PREWHERE` clause, it can be performed either before `WHERE/PREWHERE` (if the result is needed in this clause), or after completing it (to reduce the volume of calculations). The processing order is controlled by the query optimizer.
Supported types of `ARRAY JOIN` are listed below:
-- `ARRAY JOIN` - Executing `JOIN` with an array or nested data structure. Empty arrays are not included in the result.
-- `LEFT ARRAY JOIN` - Unlike `ARRAY JOIN`, when using the `LEFT ARRAY JOIN` the result contains the rows with empty arrays. The value for an empty array is set to default value for an array element type (usually 0, empty string or NULL).
+- `ARRAY JOIN` - In this case, empty arrays are not included in the result of `JOIN`.
+- `LEFT ARRAY JOIN` - The result of `JOIN` contains rows with empty arrays. The value for an empty array is set to the default value for the array element type (usually 0, empty string or NULL).
-Examples below demonstrate the usage of the `ARRAY JOIN` clause. Let's create a table with an [Array](../data_types/array.md) type column and insert values into it:
+The examples below demonstrate the usage of the `ARRAY JOIN` and `LEFT ARRAY JOIN` clauses. Let's create a table with an [Array](../data_types/array.md) type column and insert values into it:
``` sql
CREATE TABLE arrays_test
@@ -195,7 +195,7 @@ VALUES ('Hello', [1,2]), ('World', [3,4,5]), ('Goodbye', []);
└─────────────┴─────────┘
```
-The first example shows using the `ARRAY JOIN` clause:
+The example below uses the `ARRAY JOIN` clause:
``` sql
SELECT s, arr
@@ -212,7 +212,7 @@ ARRAY JOIN arr;
└───────┴─────┘
```
-The second example shows using the `LEFT ARRAY JOIN` clause:
+The next example uses the `LEFT ARRAY JOIN` clause:
``` sql
SELECT s, arr
@@ -230,7 +230,27 @@ LEFT ARRAY JOIN arr;
└─────────────┴─────┘
```
-The next example demonstrates using the `ARRAY JOIN` with the external array:
+#### Using Aliases
+
+An alias can be specified for an array in the `ARRAY JOIN` clause. In this case, an array item can be accessed by this alias, but the array itself is accessed by the original name. Example:
+
+``` sql
+SELECT s, arr, a
+FROM arrays_test
+ARRAY JOIN arr AS a;
+```
+
+```
+┌─s─────┬─arr─────┬─a─┐
+│ Hello │ [1,2] │ 1 │
+│ Hello │ [1,2] │ 2 │
+│ World │ [3,4,5] │ 3 │
+│ World │ [3,4,5] │ 4 │
+│ World │ [3,4,5] │ 5 │
+└───────┴─────────┴───┘
+```
+
+Using aliases, you can perform `ARRAY JOIN` with an external array. For example:
``` sql
SELECT s, arr_external
@@ -252,27 +272,7 @@ ARRAY JOIN [1, 2, 3] AS arr_external;
└─────────────┴──────────────┘
```
-#### Using Aliases
-
-An alias can be specified for an array in the `ARRAY JOIN` clause. In this case, an array item can be accessed by this alias, but the array itself by the original name. Example:
-
-``` sql
-SELECT s, arr, a
-FROM arrays_test
-ARRAY JOIN arr AS a;
-```
-
-```
-┌─s─────┬─arr─────┬─a─┐
-│ Hello │ [1,2] │ 1 │
-│ Hello │ [1,2] │ 2 │
-│ World │ [3,4,5] │ 3 │
-│ World │ [3,4,5] │ 4 │
-│ World │ [3,4,5] │ 5 │
-└───────┴─────────┴───┘
-```
-
-Multiple arrays of the same size can be comma-separated in the `ARRAY JOIN` clause. In this case, `JOIN` is performed with them simultaneously (the direct sum, not the cartesian product). Example:
+Multiple arrays can be comma-separated in the `ARRAY JOIN` clause. In this case, `JOIN` is performed with them simultaneously (the direct sum, not the cartesian product). Note that all the arrays must have the same size. Example:
``` sql
SELECT s, arr, a, num, mapped
@@ -290,6 +290,8 @@ ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num, arrayMap(x -> x + 1, arr) AS ma
└───────┴─────────┴───┴─────┴────────┘
```
+The example below uses the [arrayEnumerate](functions/array_functions.md#array_functions-arrayenumerate) function:
+
``` sql
SELECT s, arr, a, num, arrayEnumerate(arr)
FROM arrays_test
@@ -308,7 +310,7 @@ ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num;
#### ARRAY JOIN With Nested Data Structure
-`ARRAY JOIN` also works with [nested data structure](../data_types/nested_data_structures/nested.md). Example:
+`ARRAY `JOIN`` also works with [nested data structures](../data_types/nested_data_structures/nested.md). Example:
``` sql
CREATE TABLE nested_test
@@ -401,7 +403,7 @@ ARRAY JOIN nest AS n;
└───────┴─────┴─────┴─────────┴────────────┘
```
-The example of using the [arrayEnumerate](functions/array_functions.md#array_functions-arrayenumerate) function:
+Example of using the [arrayEnumerate](functions/array_functions.md#array_functions-arrayenumerate) function:
``` sql
SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y`, num
@@ -444,7 +446,7 @@ The table names can be specified instead of `` and `
+FROM
+[LEFT] ARRAY JOIN
+[WHERE|PREWHERE ]
+...
```
-:) CREATE TABLE arrays_test (s String, arr Array(UInt8)) ENGINE = Memory
+В запросе может быть указано не более одной секции `ARRAY JOIN`.
+
+При использовании `ARRAY JOIN`, порядок выполнения запроса оптимизируется. Несмотря на то что секция `ARRAY JOIN` всегда указывается перед выражением `WHERE / PREWHERE`, преобразование `JOIN` может быть выполнено как до выполнения выражения `WHERE / PREWHERE` (если результат необходим в этом выражении), так и после (чтобы уменьшить объем расчетов). Порядок обработки контролируется оптимизатором запросов.
+
+Секция `ARRAY JOIN` поддерживает следующие формы записи:
+
+- `ARRAY JOIN` — в этом случае результат `JOIN` не будет содержать пустые массивы;
+- `LEFT ARRAY JOIN` — пустые массивы попадут в результат выполнения `JOIN`. В качестве значения для пустых массивов устанавливается значение по умолчанию. Обычно это 0, пустая строка или NULL, в зависимости от типа элементов массива.
+
+Рассмотрим примеры использования `ARRAY JOIN` и `LEFT ARRAY JOIN`. Для начала создадим таблицу, содержащую столбец с типом [Array](../data_types/array.md), и добавим в него значение:
+
+``` sql
CREATE TABLE arrays_test
(
s String,
arr Array(UInt8)
-) ENGINE = Memory
+) ENGINE = Memory;
-Ok.
+INSERT INTO arrays_test
+VALUES ('Hello', [1,2]), ('World', [3,4,5]), ('Goodbye', []);
+```
+```
+┌─s───────────┬─arr─────┐
+│ Hello │ [1,2] │
+│ World │ [3,4,5] │
+│ Goodbye │ [] │
+└─────────────┴─────────┘
+```
-0 rows in set. Elapsed: 0.001 sec.
-
-:) INSERT INTO arrays_test VALUES ('Hello', [1,2]), ('World', [3,4,5]), ('Goodbye', [])
-
-INSERT INTO arrays_test VALUES
-
-Ok.
-
-3 rows in set. Elapsed: 0.001 sec.
-
-:) SELECT * FROM arrays_test
-
-SELECT *
-FROM arrays_test
-
-┌─s───────┬─arr─────┐
-│ Hello │ [1,2] │
-│ World │ [3,4,5] │
-│ Goodbye │ [] │
-└─────────┴─────────┘
-
-3 rows in set. Elapsed: 0.001 sec.
-
-:) SELECT s, arr FROM arrays_test ARRAY JOIN arr
+В примере ниже используется `ARRAY JOIN`:
+``` sql
SELECT s, arr
FROM arrays_test
-ARRAY JOIN arr
-
+ARRAY JOIN arr;
+```
+```
┌─s─────┬─arr─┐
│ Hello │ 1 │
│ Hello │ 2 │
@@ -212,19 +215,37 @@ ARRAY JOIN arr
│ World │ 4 │
│ World │ 5 │
└───────┴─────┘
-
-5 rows in set. Elapsed: 0.001 sec.
```
-Для массива в секции ARRAY JOIN может быть указан алиас. В этом случае, элемент массива будет доступен под этим алиасом, а сам массив - под исходным именем. Пример:
+Следующий пример использует `LEFT ARRAY JOIN`:
+``` sql
+SELECT s, arr
+FROM arrays_test
+LEFT ARRAY JOIN arr;
```
-:) SELECT s, arr, a FROM arrays_test ARRAY JOIN arr AS a
+```
+┌─s───────────┬─arr─┐
+│ Hello │ 1 │
+│ Hello │ 2 │
+│ World │ 3 │
+│ World │ 4 │
+│ World │ 5 │
+│ Goodbye │ 0 │
+└─────────────┴─────┘
+```
+#### Использование алиасов
+
+Для массива в секции `ARRAY JOIN` может быть указан алиас. В этом случае, элемент массива будет доступен под этим алиасом, а сам массив — под исходным именем. Пример:
+
+``` sql
SELECT s, arr, a
FROM arrays_test
-ARRAY JOIN arr AS a
+ARRAY JOIN arr AS a;
+```
+```
┌─s─────┬─arr─────┬─a─┐
│ Hello │ [1,2] │ 1 │
│ Hello │ [1,2] │ 2 │
@@ -232,19 +253,39 @@ ARRAY JOIN arr AS a
│ World │ [3,4,5] │ 4 │
│ World │ [3,4,5] │ 5 │
└───────┴─────────┴───┘
-
-5 rows in set. Elapsed: 0.001 sec.
```
-В секции ARRAY JOIN может быть указано несколько массивов одинаковых размеров через запятую. В этом случае, JOIN делается с ними одновременно (прямая сумма, а не прямое произведение). Пример:
+Используя алиасы, можно выполнять `JOIN` с внешними массивами:
+
+``` sql
+SELECT s, arr_external
+FROM arrays_test
+ARRAY JOIN [1, 2, 3] AS arr_external;
+```
```
-:) SELECT s, arr, a, num, mapped FROM arrays_test ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num, arrayMap(x -> x + 1, arr) AS mapped
+┌─s───────────┬─arr_external─┐
+│ Hello │ 1 │
+│ Hello │ 2 │
+│ Hello │ 3 │
+│ World │ 1 │
+│ World │ 2 │
+│ World │ 3 │
+│ Goodbye │ 1 │
+│ Goodbye │ 2 │
+│ Goodbye │ 3 │
+└─────────────┴──────────────┘
+```
+В секции `ARRAY JOIN` можно указать через запятую сразу несколько массивов. В этом случае, `JOIN` делается с ними одновременно (прямая сумма, а не прямое произведение). Обратите внимание, массивы должны быть одинаковых размеров. Примеры:
+
+``` sql
SELECT s, arr, a, num, mapped
FROM arrays_test
-ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num, arrayMap(lambda(tuple(x), plus(x, 1)), arr) AS mapped
+ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num, arrayMap(x -> x + 1, arr) AS mapped;
+```
+```
┌─s─────┬─arr─────┬─a─┬─num─┬─mapped─┐
│ Hello │ [1,2] │ 1 │ 1 │ 2 │
│ Hello │ [1,2] │ 2 │ 2 │ 3 │
@@ -252,15 +293,17 @@ ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num, arrayMap(lambda(tuple(x), plus(
│ World │ [3,4,5] │ 4 │ 2 │ 5 │
│ World │ [3,4,5] │ 5 │ 3 │ 6 │
└───────┴─────────┴───┴─────┴────────┘
+```
-5 rows in set. Elapsed: 0.002 sec.
-
-:) SELECT s, arr, a, num, arrayEnumerate(arr) FROM arrays_test ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num
+В примере ниже используется функция [arrayEnumerate](functions/array_functions.md#array_functions-arrayenumerate):
+``` sql
SELECT s, arr, a, num, arrayEnumerate(arr)
FROM arrays_test
-ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num
+ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num;
+```
+```
┌─s─────┬─arr─────┬─a─┬─num─┬─arrayEnumerate(arr)─┐
│ Hello │ [1,2] │ 1 │ 1 │ [1,2] │
│ Hello │ [1,2] │ 2 │ 2 │ [1,2] │
@@ -268,54 +311,40 @@ ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num
│ World │ [3,4,5] │ 4 │ 2 │ [1,2,3] │
│ World │ [3,4,5] │ 5 │ 3 │ [1,2,3] │
└───────┴─────────┴───┴─────┴─────────────────────┘
-
-5 rows in set. Elapsed: 0.002 sec.
```
-ARRAY JOIN также работает с вложенными структурами данных. Пример:
+#### ARRAY JOIN с вложенными структурами данных
-```
-:) CREATE TABLE nested_test (s String, nest Nested(x UInt8, y UInt32)) ENGINE = Memory
+`ARRAY JOIN` также работает с [вложенными структурами данных](../data_types/nested_data_structures/nested.md). Пример:
+``` sql
CREATE TABLE nested_test
(
s String,
nest Nested(
x UInt8,
y UInt32)
-) ENGINE = Memory
+) ENGINE = Memory;
-Ok.
-
-0 rows in set. Elapsed: 0.006 sec.
-
-:) INSERT INTO nested_test VALUES ('Hello', [1,2], [10,20]), ('World', [3,4,5], [30,40,50]), ('Goodbye', [], [])
-
-INSERT INTO nested_test VALUES
-
-Ok.
-
-3 rows in set. Elapsed: 0.001 sec.
-
-:) SELECT * FROM nested_test
-
-SELECT *
-FROM nested_test
+INSERT INTO nested_test
+VALUES ('Hello', [1,2], [10,20]), ('World', [3,4,5], [30,40,50]), ('Goodbye', [], []);
+```
+```
┌─s───────┬─nest.x──┬─nest.y─────┐
│ Hello │ [1,2] │ [10,20] │
│ World │ [3,4,5] │ [30,40,50] │
│ Goodbye │ [] │ [] │
└─────────┴─────────┴────────────┘
+```
-3 rows in set. Elapsed: 0.001 sec.
-
-:) SELECT s, nest.x, nest.y FROM nested_test ARRAY JOIN nest
-
+``` sql
SELECT s, `nest.x`, `nest.y`
FROM nested_test
-ARRAY JOIN nest
+ARRAY JOIN nest;
+```
+```
┌─s─────┬─nest.x─┬─nest.y─┐
│ Hello │ 1 │ 10 │
│ Hello │ 2 │ 20 │
@@ -323,19 +352,17 @@ ARRAY JOIN nest
│ World │ 4 │ 40 │
│ World │ 5 │ 50 │
└───────┴────────┴────────┘
-
-5 rows in set. Elapsed: 0.001 sec.
```
-При указании имени вложенной структуры данных в ARRAY JOIN, смысл такой же, как ARRAY JOIN со всеми элементами-массивами, из которых она состоит. Пример:
-
-```
-:) SELECT s, nest.x, nest.y FROM nested_test ARRAY JOIN nest.x, nest.y
+При указании имени вложенной структуры данных в `ARRAY JOIN`, смысл такой же, как `ARRAY JOIN` со всеми элементами-массивами, из которых она состоит. Пример:
+``` sql
SELECT s, `nest.x`, `nest.y`
FROM nested_test
-ARRAY JOIN `nest.x`, `nest.y`
+ARRAY JOIN `nest.x`, `nest.y`;
+```
+```
┌─s─────┬─nest.x─┬─nest.y─┐
│ Hello │ 1 │ 10 │
│ Hello │ 2 │ 20 │
@@ -343,19 +370,17 @@ ARRAY JOIN `nest.x`, `nest.y`
│ World │ 4 │ 40 │
│ World │ 5 │ 50 │
└───────┴────────┴────────┘
-
-5 rows in set. Elapsed: 0.001 sec.
```
Такой вариант тоже имеет смысл:
-```
-:) SELECT s, nest.x, nest.y FROM nested_test ARRAY JOIN nest.x
-
+``` sql
SELECT s, `nest.x`, `nest.y`
FROM nested_test
-ARRAY JOIN `nest.x`
+ARRAY JOIN `nest.x`;
+```
+```
┌─s─────┬─nest.x─┬─nest.y─────┐
│ Hello │ 1 │ [10,20] │
│ Hello │ 2 │ [10,20] │
@@ -363,19 +388,17 @@ ARRAY JOIN `nest.x`
│ World │ 4 │ [30,40,50] │
│ World │ 5 │ [30,40,50] │
└───────┴────────┴────────────┘
-
-5 rows in set. Elapsed: 0.001 sec.
```
-Алиас для вложенной структуры данных можно использовать, чтобы выбрать как результат JOIN-а, так и исходный массив. Пример:
-
-```
-:) SELECT s, n.x, n.y, nest.x, nest.y FROM nested_test ARRAY JOIN nest AS n
+Алиас для вложенной структуры данных можно использовать, чтобы выбрать как результат `JOIN`-а, так и исходный массив. Пример:
+``` sql
SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y`
FROM nested_test
-ARRAY JOIN nest AS n
+ARRAY JOIN nest AS n;
+```
+```
┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┐
│ Hello │ 1 │ 10 │ [1,2] │ [10,20] │
│ Hello │ 2 │ 20 │ [1,2] │ [10,20] │
@@ -383,19 +406,17 @@ ARRAY JOIN nest AS n
│ World │ 4 │ 40 │ [3,4,5] │ [30,40,50] │
│ World │ 5 │ 50 │ [3,4,5] │ [30,40,50] │
└───────┴─────┴─────┴─────────┴────────────┘
-
-5 rows in set. Elapsed: 0.001 sec.
```
-Пример использования функции arrayEnumerate:
-
-```
-:) SELECT s, n.x, n.y, nest.x, nest.y, num FROM nested_test ARRAY JOIN nest AS n, arrayEnumerate(nest.x) AS num
+Пример использования функции [arrayEnumerate](functions/array_functions.md#array_functions-arrayenumerate):
+``` sql
SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y`, num
FROM nested_test
-ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num
+ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num;
+```
+```
┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┬─num─┐
│ Hello │ 1 │ 10 │ [1,2] │ [10,20] │ 1 │
│ Hello │ 2 │ 20 │ [1,2] │ [10,20] │ 2 │
@@ -403,16 +424,8 @@ ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num
│ World │ 4 │ 40 │ [3,4,5] │ [30,40,50] │ 2 │
│ World │ 5 │ 50 │ [3,4,5] │ [30,40,50] │ 3 │
└───────┴─────┴─────┴─────────┴────────────┴─────┘
-
-5 rows in set. Elapsed: 0.002 sec.
```
-В запросе может быть указано не более одной секции ARRAY JOIN.
-
-Соответствующее преобразование может выполняться как до секции WHERE/PREWHERE (если его результат нужен в этой секции), так и после выполнения WHERE/PREWHERE (чтобы уменьшить объём вычислений).
-
-
-
### Секция JOIN {#select-join}
Соединяет данные в привычном для [SQL JOIN](https://en.wikipedia.org/wiki/Join_(SQL)) смысле.
diff --git a/docs/zh/operations/table_engines/mergetree.md b/docs/zh/operations/table_engines/mergetree.md
index ac671887dea..c8febd0a21b 100644
--- a/docs/zh/operations/table_engines/mergetree.md
+++ b/docs/zh/operations/table_engines/mergetree.md
@@ -1,8 +1,8 @@
# MergeTree {#table_engines-mergetree}
-Clickhouse 中最强大的表引擎当属 `MergeTree` (合并树)引擎及该家族(`*MergeTree`)中的其他引擎。
+Clickhouse 中最强大的表引擎当属 `MergeTree` (合并树)引擎及该系列(`*MergeTree`)中的其他引擎。
-`MergeTree` 引擎家族的基本理念如下。当你有巨量数据要插入到表中,你要高效地一批批写入数据分片,并希望这些数据分片在后台按照一定规则合并。相比在插入时不断修改(重写)数据进存储,这种策略会高效很多。
+`MergeTree` 引擎系列的基本理念如下。当你有巨量数据要插入到表中,你要高效地一批批写入数据片段,并希望这些数据片段在后台按照一定规则合并。相比在插入时不断修改(重写)数据进存储,这种策略会高效很多。
主要特点:
@@ -16,14 +16,14 @@ Clickhouse 中最强大的表引擎当属 `MergeTree` (合并树)引擎及
- 支持数据副本。
- `ReplicatedMergeTree` 家族的表便是用于此。更多信息,请参阅 [数据副本](replication.md) 一节。
+ `ReplicatedMergeTree` 系列的表便是用于此。更多信息,请参阅 [数据副本](replication.md) 一节。
- 支持数据采样。
需要的话,你可以给表设置一个采样方法。
!!! 注意
- [Merge](merge.md) 引擎并不属于 `*MergeTree` 家族。
+ [Merge](merge.md) 引擎并不属于 `*MergeTree` 系列。
## 建表 {#table_engine-mergetree-creating-a-table}
@@ -70,8 +70,8 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
- `SETTINGS` — 影响 `MergeTree` 性能的额外参数:
- `index_granularity` — 索引粒度。即索引中相邻『标记』间的数据行数。默认值,8192 。该列表中所有可用的参数可以从这里查看 [MergeTreeSettings.h](https://github.com/yandex/ClickHouse/blob/master/dbms/src/Storages/MergeTree/MergeTreeSettings.h) 。
- - `use_minimalistic_part_header_in_zookeeper` — 数据分片头在 ZooKeeper 中的存储方式。如果设置了 `use_minimalistic_part_header_in_zookeeper=1` ,ZooKeeper 会存储更少的数据。更多信息参考『服务配置参数』这章中的 [设置描述](../server_settings/settings.md#server-settings-use_minimalistic_part_header_in_zookeeper) 。
- - `min_merge_bytes_to_use_direct_io` — 使用直接 I/O 来操作磁盘的合并操作时要求的最小数据量。合并数据分片时,ClickHouse 会计算要被合并的所有数据的总存储空间。如果大小超过了 `min_merge_bytes_to_use_direct_io` 设置的字节数,则 ClickHouse 将使用直接 I/O 接口(`O_DIRECT` 选项)对磁盘读写。如果设置 `min_merge_bytes_to_use_direct_io = 0` ,则会禁用直接 I/O。默认值:`10 * 1024 * 1024 * 1024` 字节。
+ - `use_minimalistic_part_header_in_zookeeper` — 数据片段头在 ZooKeeper 中的存储方式。如果设置了 `use_minimalistic_part_header_in_zookeeper=1` ,ZooKeeper 会存储更少的数据。更多信息参考『服务配置参数』这章中的 [设置描述](../server_settings/settings.md#server-settings-use_minimalistic_part_header_in_zookeeper) 。
+ - `min_merge_bytes_to_use_direct_io` — 使用直接 I/O 来操作磁盘的合并操作时要求的最小数据量。合并数据片段时,ClickHouse 会计算要被合并的所有数据的总存储空间。如果大小超过了 `min_merge_bytes_to_use_direct_io` 设置的字节数,则 ClickHouse 将使用直接 I/O 接口(`O_DIRECT` 选项)对磁盘读写。如果设置 `min_merge_bytes_to_use_direct_io = 0` ,则会禁用直接 I/O。默认值:`10 * 1024 * 1024 * 1024` 字节。
**示例配置**
@@ -117,17 +117,17 @@ MergeTree(EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID)
## 数据存储
-表由按主键排序的数据 *分片* 组成。
+表由按主键排序的数据 *片段* 组成。
-当数据被插入到表中时,会分成数据分片并按主键的字典序排序。例如,主键是 `(CounterID, Date)` 时,分片中数据按 `CounterID` 排序,具有相同 `CounterID` 的部分按 `Date` 排序。
+当数据被插入到表中时,会分成数据片段并按主键的字典序排序。例如,主键是 `(CounterID, Date)` 时,片段中数据按 `CounterID` 排序,具有相同 `CounterID` 的部分按 `Date` 排序。
-不同分区的数据会被分成不同的分片,ClickHouse 在后台合并数据分片以便更高效存储。不会合并来自不同分区的数据分片。这个合并机制并不保证相同主键的所有行都会合并到同一个数据分片中。
+不同分区的数据会被分成不同的片段,ClickHouse 在后台合并数据片段以便更高效存储。不会合并来自不同分区的数据片段。这个合并机制并不保证相同主键的所有行都会合并到同一个数据片段中。
-ClickHouse 会为每个数据分片创建一个索引文件,索引文件包含每个索引行(『标记』)的主键值。索引行号定义为 `n * index_granularity` 。最大的 `n` 等于总行数除以 `index_granularity` 的值的整数部分。对于每列,跟主键相同的索引行处也会写入『标记』。这些『标记』让你可以直接找到数据所在的列。
+ClickHouse 会为每个数据片段创建一个索引文件,索引文件包含每个索引行(『标记』)的主键值。索引行号定义为 `n * index_granularity` 。最大的 `n` 等于总行数除以 `index_granularity` 的值的整数部分。对于每列,跟主键相同的索引行处也会写入『标记』。这些『标记』让你可以直接找到数据所在的列。
你可以只用一单一大表并不断地一块块往里面加入数据 – `MergeTree` 引擎的就是为了这样的场景。
-## 主键和索引在查询中的表现 {#primary-keys-and-indexes-in-queriesko
+## 主键和索引在查询中的表现 {#primary-keys-and-indexes-in-queries}
我们以 `(CounterID, Date)` 以主键。排序好的索引的图示会是下面这样:
@@ -166,7 +166,7 @@ ClickHouse 不要求主键惟一。所以,你可以插入多条具有相同主
- 改善数据压缩。
- ClickHouse 以主键排序分片数据,所以,数据的一致性越高,压缩越好。
+ ClickHouse 以主键排序片段数据,所以,数据的一致性越高,压缩越好。
- [CollapsingMergeTree](collapsingmergetree.md#table_engine-collapsingmergetree) 和 [SummingMergeTree](summingmergetree.md) 引擎里,数据合并时,会有额外的处理逻辑。
@@ -177,7 +177,7 @@ ClickHouse 不要求主键惟一。所以,你可以插入多条具有相同主
### 选择跟排序键不一样主键
-指定一个跟排序键(用于排序数据分片中行的表达式)
+指定一个跟排序键(用于排序数据片段中行的表达式)
不一样的主键(用于计算写到索引文件的每个标记值的表达式)是可以的。
这种情况下,主键表达式元组必须是排序键表达式元组的一个前缀。
@@ -192,7 +192,7 @@ ClickHouse 不要求主键惟一。所以,你可以插入多条具有相同主
这种情况下,主键中仅预留少量列保证高效范围扫描,
剩下的维度列放到排序键元组里。这样是合理的。
-[排序键的修改](../../query_language/alter.md) 是轻量级的操作,因为一个新列同时被加入到表里和排序键后时,已存在的数据分片并不需要修改。由于旧的排序键是新排序键的前缀,并且刚刚添加的列中没有数据,因此在表修改时的数据对于新旧的排序键来说都是有序的。
+[排序键的修改](../../query_language/alter.md) 是轻量级的操作,因为一个新列同时被加入到表里和排序键后时,已存在的数据片段并不需要修改。由于旧的排序键是新排序键的前缀,并且刚刚添加的列中没有数据,因此在表修改时的数据对于新旧的排序键来说都是有序的。
### 索引和分区在查询中的应用
@@ -238,7 +238,7 @@ SELECT count() FROM table WHERE CounterID = 34 OR URL LIKE '%upyachka%'
INDEX index_name expr TYPE type(...) GRANULARITY granularity_value
```
-`*MergeTree` 家族的表都能指定跳数索引。
+`*MergeTree` 系列的表都能指定跳数索引。
这些索引是由数据块按粒度分割后的每部分在指定表达式上汇总信息 `granularity_value` 组成(粒度大小用表引擎里 `index_granularity` 的指定)。
这些汇总信息有助于用 `where` 语句跳过大片不满足的数据,从而减少 `SELECT` 查询从磁盘读取的数据量,
@@ -292,7 +292,7 @@ INDEX sample_index3 (lower(str), str) TYPE ngrambf_v1(3, 256, 2, 0) GRANULARITY
## 并发数据访问
-应对表的并发访问,我们使用多版本机制。换言之,当同时读和更新表时,数据从当前查询到的一组分片中读取。没有冗长的的锁。插入不会阻碍读取。
+应对表的并发访问,我们使用多版本机制。换言之,当同时读和更新表时,数据从当前查询到的一组片段中读取。没有冗长的的锁。插入不会阻碍读取。
对表的读操作是自动并行的。
diff --git a/docs/zh/operations/table_engines/replication.md b/docs/zh/operations/table_engines/replication.md
index 0564408ca76..76a465de978 100644
--- a/docs/zh/operations/table_engines/replication.md
+++ b/docs/zh/operations/table_engines/replication.md
@@ -1,27 +1,28 @@
-# Data Replication {#table_engines-replication}
+# 数据副本 {#table_engines-replication}
-Replication is only supported for tables in the MergeTree family:
+只有 MergeTree 系列里的表可支持副本:
- ReplicatedMergeTree
- ReplicatedSummingMergeTree
- ReplicatedReplacingMergeTree
- ReplicatedAggregatingMergeTree
- ReplicatedCollapsingMergeTree
+- ReplicatedVersionedCollapsingMergeTree
- ReplicatedGraphiteMergeTree
-Replication works at the level of an individual table, not the entire server. A server can store both replicated and non-replicated tables at the same time.
+副本是表级别的,不是整个服务器级的。所以,服务器里可以同时有复制表和非复制表。
-Replication does not depend on sharding. Each shard has its own independent replication.
+副本不依赖分片。每个分片有它自己的独立副本。
-Compressed data for `INSERT` and `ALTER` queries is replicated (for more information, see the documentation for [ALTER](../../query_language/alter.md#query_language_queries_alter)).
+对于 `INSERT` 和 `ALTER` 语句操作数据的会在压缩的情况下被复制(更多信息,看 [ALTER](../../query_language/alter.md#query_language_queries_alter) )。
-`CREATE`, `DROP`, `ATTACH`, `DETACH` and `RENAME` queries are executed on a single server and are not replicated:
+而 `CREATE`,`DROP`,`ATTACH`,`DETACH` 和 `RENAME` 语句只会在单个服务器上执行,不会被复制。
-- `The CREATE TABLE` query creates a new replicatable table on the server where the query is run. If this table already exists on other servers, it adds a new replica.
-- `The DROP TABLE` query deletes the replica located on the server where the query is run.
-- `The RENAME` query renames the table on one of the replicas. In other words, replicated tables can have different names on different replicas.
+- `The CREATE TABLE` 在运行此语句的服务器上创建一个新的可复制表。如果此表已存在其他服务器上,则给该表添加新副本。
+- `The DROP TABLE` 删除运行此查询的服务器上的副本。
+- `The RENAME` 重命名一个副本。换句话说,可复制表不同的副本可以有不同的名称。
-To use replication, set the addresses of the ZooKeeper cluster in the config file. Example:
+要使用副本,需在配置文件中设置 ZooKeeper 集群的地址。例如:
```xml
@@ -40,43 +41,44 @@ To use replication, set the addresses of the ZooKeeper cluster in the config fil
```
-Use ZooKeeper version 3.4.5 or later.
+需要 ZooKeeper 3.4.5 或更高版本。
-You can specify any existing ZooKeeper cluster and the system will use a directory on it for its own data (the directory is specified when creating a replicatable table).
+你可以配置任何现有的 ZooKeeper 集群,系统会使用里面的目录来存取元数据(该目录在创建可复制表时指定)。
-If ZooKeeper isn't set in the config file, you can't create replicated tables, and any existing replicated tables will be read-only.
+如果配置文件中没有设置 ZooKeeper ,则无法创建复制表,并且任何现有的复制表都将变为只读。
-ZooKeeper is not used in `SELECT` queries because replication does not affect the performance of `SELECT` and queries run just as fast as they do for non-replicated tables. When querying distributed replicated tables, ClickHouse behavior is controlled by the settings [max_replica_delay_for_distributed_queries](../settings/settings.md#settings-max_replica_delay_for_distributed_queries) and [fallback_to_stale_replicas_for_distributed_queries](../settings/settings.md).
+ `SELECT` 查询并不需要借助 ZooKeeper ,复本并不影响 `SELECT` 的性能,查询复制表与非复制表速度是一样的。查询分布式表时,ClickHouse的处理方式可通过设置 [max_replica_delay_for_distributed_queries](../settings/settings.md#settings-max_replica_delay_for_distributed_queries) 和 [fallback_to_stale_replicas_for_distributed_queries](../settings/settings.md) 修改。
-For each `INSERT` query, approximately ten entries are added to ZooKeeper through several transactions. (To be more precise, this is for each inserted block of data; an INSERT query contains one block or one block per `max_insert_block_size = 1048576` rows.) This leads to slightly longer latencies for `INSERT` compared to non-replicated tables. But if you follow the recommendations to insert data in batches of no more than one `INSERT` per second, it doesn't create any problems. The entire ClickHouse cluster used for coordinating one ZooKeeper cluster has a total of several hundred `INSERTs` per second. The throughput on data inserts (the number of rows per second) is just as high as for non-replicated data.
+对于每个 `INSERT` 语句,会通过几个事务将十来个记录添加到 ZooKeeper。(确切地说,这是针对每个插入的数据块; 每个 INSERT 语句的每 `max_insert_block_size = 1048576` 行和最后剩余的都各算作一个块。)相比非复制表,写 zk 会导致 `INSERT` 的延迟略长一些。但只要你按照建议每秒不超过一个 `INSERT` 地批量插入数据,不会有任何问题。一个 ZooKeeper 集群能给整个 ClickHouse 集群支撑协调每秒几百个 `INSERT`。数据插入的吞吐量(每秒的行数)可以跟不用复制的数据一样高。
-For very large clusters, you can use different ZooKeeper clusters for different shards. However, this hasn't proven necessary on the Yandex.Metrica cluster (approximately 300 servers).
+对于非常大的集群,你可以把不同的 ZooKeeper 集群用于不同的分片。然而,即使 Yandex.Metrica 集群(大约300台服务器)也证明还不需要这么做。
-Replication is asynchronous and multi-master. `INSERT` queries (as well as `ALTER`) can be sent to any available server. Data is inserted on the server where the query is run, and then it is copied to the other servers. Because it is asynchronous, recently inserted data appears on the other replicas with some latency. If part of the replicas are not available, the data is written when they become available. If a replica is available, the latency is the amount of time it takes to transfer the block of compressed data over the network.
+复制是多主异步。 `INSERT` 语句(以及 `ALTER` )可以发给任意可用的服务器。数据会先插入到执行该语句的服务器上,然后被复制到其他服务器。由于它是异步的,在其他副本上最近插入的数据会有一些延迟。如果部分副本不可用,则数据在其可用时再写入。副本可用的情况下,则延迟时长是通过网络传输压缩数据块所需的时间。
-By default, an INSERT query waits for confirmation of writing the data from only one replica. If the data was successfully written to only one replica and the server with this replica ceases to exist, the stored data will be lost. Tp enable getting confirmation of data writes from multiple replicas, use the `insert_quorum` option.
+默认情况下,INSERT 语句仅等待一个副本写入成功后返回。如果数据只成功写入一个副本后该副本所在的服务器不再存在,则存储的数据会丢失。要启用数据写入多个副本才确认返回,使用 `insert_quorum` 选项。
-Each block of data is written atomically. The INSERT query is divided into blocks up to `max_insert_block_size = 1048576` rows. In other words, if the `INSERT` query has less than 1048576 rows, it is made atomically.
+单个数据块写入是原子的。 INSERT 的数据按每块最多 `max_insert_block_size = 1048576` 行进行分块,换句话说,如果 `INSERT` 插入的行少于 1048576,则该 INSERT 是原子的。
-Data blocks are deduplicated. For multiple writes of the same data block (data blocks of the same size containing the same rows in the same order), the block is only written once. The reason for this is in case of network failures when the client application doesn't know if the data was written to the DB, so the `INSERT` query can simply be repeated. It doesn't matter which replica INSERTs were sent to with identical data. `INSERTs` are idempotent. Deduplication parameters are controlled by [merge_tree](../server_settings/settings.md) server settings.
+数据块会去重。对于被多次写的相同数据块(大小相同且具有相同顺序的相同行的数据块),该块仅会写入一次。这样设计的原因是万一在网络故障时客户端应用程序不知道数据是否成功写入DB,此时可以简单地重复 `INSERT` 。把相同的数据发送给多个副本 INSERT 并不会有问题。因为这些 `INSERT` 是完全相同的(会被去重)。去重参数参看服务器设置 [merge_tree](../server_settings/settings.md) 。(注意:Replicated\*MergeTree 才会去重,不需要 zookeeper 的不带 MergeTree 不会去重)
-During replication, only the source data to insert is transferred over the network. Further data transformation (merging) is coordinated and performed on all the replicas in the same way. This minimizes network usage, which means that replication works well when replicas reside in different datacenters. (Note that duplicating data in different datacenters is the main goal of replication.)
+在复制期间,只有要插入的源数据通过网络传输。进一步的数据转换(合并)会在所有副本上以相同的方式进行处理执行。这样可以最大限度地减少网络使用,这意味着即使副本在不同的数据中心,数据同步也能工作良好。(能在不同数据中心中的同步数据是副本机制的主要目标。)
-You can have any number of replicas of the same data. Yandex.Metrica uses double replication in production. Each server uses RAID-5 or RAID-6, and RAID-10 in some cases. This is a relatively reliable and convenient solution.
+你可以给数据做任意多的副本。Yandex.Metrica 在生产中使用双副本。某一些情况下,给每台服务器都使用 RAID-5 或 RAID-6 和 RAID-10。是一种相对可靠和方便的解决方案。
-The system monitors data synchronicity on replicas and is able to recover after a failure. Failover is automatic (for small differences in data) or semi-automatic (when data differs too much, which may indicate a configuration error).
+系统会监视副本数据同步情况,并能在发生故障后恢复。故障转移是自动的(对于小的数据差异)或半自动的(当数据差异很大时,这可能意味是有配置错误)。
-## Creating Replicated Tables
+## 创建复制表 {#creating-replicated-tables}
-The `Replicated` prefix is added to the table engine name. For example:`ReplicatedMergeTree`.
-**Replicated\*MergeTree parameters**
+在表引擎名称上加上 `Replicated` 前缀。例如:`ReplicatedMergeTree`。
-- `zoo_path` — The path to the table in ZooKeeper.
-- `replica_name` — The replica name in ZooKeeper.
+**Replicated\*MergeTree 参数**
-Example:
+- `zoo_path` — ZooKeeper 中该表的路径。
+- `replica_name` — ZooKeeper 中的该表的副本名称。
+
+示例:
```sql
CREATE TABLE table_name
@@ -90,7 +92,7 @@ ORDER BY (CounterID, EventDate, intHash32(UserID))
SAMPLE BY intHash32(UserID)
```
-Example in deprecated syntax:
+已弃用的建表语法示例:
```sql
CREATE TABLE table_name
@@ -101,7 +103,7 @@ CREATE TABLE table_name
) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/hits', '{replica}', EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID), EventTime), 8192)
```
-As the example shows, these parameters can contain substitutions in curly brackets. The substituted values are taken from the 'macros' section of the configuration file. Example:
+如上例所示,这些参数可以包含宏替换的占位符,即大括号的部分。它们会被替换为配置文件里 'macros' 那部分配置的值。示例:
```xml
@@ -111,92 +113,93 @@ As the example shows, these parameters can contain substitutions in curly bracke
```
-The path to the table in ZooKeeper should be unique for each replicated table. Tables on different shards should have different paths.
-In this case, the path consists of the following parts:
+“ZooKeeper 中该表的路径”对每个可复制表都要是唯一的。不同分片上的表要有不同的路径。
+这种情况下,路径包含下面这些部分:
-`/clickhouse/tables/` is the common prefix. We recommend using exactly this one.
+`/clickhouse/tables/` 是公共前缀,我们推荐使用这个。
-`{layer}-{shard}` is the shard identifier. In this example it consists of two parts, since the Yandex.Metrica cluster uses bi-level sharding. For most tasks, you can leave just the {shard} substitution, which will be expanded to the shard identifier.
+`{layer}-{shard}` 是分片标识部分。在此示例中,由于 Yandex.Metrica 集群使用了两级分片,所以它是由两部分组成的。但对于大多数情况来说,你只需保留 {shard} 占位符即可,它会替换展开为分片标识。
-`hits` is the name of the node for the table in ZooKeeper. It is a good idea to make it the same as the table name. It is defined explicitly, because in contrast to the table name, it doesn't change after a RENAME query.
+`hits` 是该表在 ZooKeeper 中的名称。使其与 ClickHouse 中的表名相同比较好。 这里它被明确定义,跟 ClickHouse 表名不一样,它并不会被 RENAME 语句修改。
-The replica name identifies different replicas of the same table. You can use the server name for this, as in the example. The name only needs to be unique within each shard.
+副本名称用于标识同一个表分片的不同副本。你可以使用服务器名称,如上例所示。同个分片中不同副本的副本名称要唯一。
-You can define the parameters explicitly instead of using substitutions. This might be convenient for testing and for configuring small clusters. However, you can't use distributed DDL queries (`ON CLUSTER`) in this case.
+你也可以显式指定这些参数,而不是使用宏替换。对于测试和配置小型集群这可能会很方便。但是,这种情况下,则不能使用分布式 DDL 语句(`ON CLUSTER`)。
-When working with large clusters, we recommend using substitutions because they reduce the probability of error.
+使用大型集群时,我们建议使用宏替换,因为它可以降低出错的可能性。
-Run the `CREATE TABLE` query on each replica. This query creates a new replicated table, or adds a new replica to an existing one.
+在每个副本服务器上运行 `CREATE TABLE` 查询。将创建新的复制表,或给现有表添加新副本。
-If you add a new replica after the table already contains some data on other replicas, the data will be copied from the other replicas to the new one after running the query. In other words, the new replica syncs itself with the others.
+如果其他副本上已包含了某些数据,在表上添加新副本,则在运行语句后,数据会从其他副本复制到新副本。换句话说,新副本会与其他副本同步。
-To delete a replica, run `DROP TABLE`. However, only one replica is deleted – the one that resides on the server where you run the query.
+要删除副本,使用 `DROP TABLE`。但它只删除那个 – 位于运行该语句的服务器上的副本。
-## Recovery After Failures
+## 故障恢复
-If ZooKeeper is unavailable when a server starts, replicated tables switch to read-only mode. The system periodically attempts to connect to ZooKeeper.
+如果服务器启动时 ZooKeeper 不可用,则复制表会切换为只读模式。系统会定期尝试去连接 ZooKeeper。
-If ZooKeeper is unavailable during an `INSERT`, or an error occurs when interacting with ZooKeeper, an exception is thrown.
+如果在 `INSERT` 期间 ZooKeeper 不可用,或者在与 ZooKeeper 交互时发生错误,则抛出异常。
-After connecting to ZooKeeper, the system checks whether the set of data in the local file system matches the expected set of data (ZooKeeper stores this information). If there are minor inconsistencies, the system resolves them by syncing data with the replicas.
+连接到 ZooKeeper 后,系统会检查本地文件系统中的数据集是否与预期的数据集( ZooKeeper 存储此信息)一致。如果存在轻微的不一致,系统会通过与副本同步数据来解决。
-If the system detects broken data parts (with the wrong size of files) or unrecognized parts (parts written to the file system but not recorded in ZooKeeper), it moves them to the 'detached' subdirectory (they are not deleted). Any missing parts are copied from the replicas.
+如果系统检测到损坏的数据片段(文件大小错误)或无法识别的片段(写入文件系统但未记录在 ZooKeeper 中的部分),则会把它们移动到 'detached' 子目录(不会删除)。而副本中其他任何缺少的但正常数据片段都会被复制同步。
-Note that ClickHouse does not perform any destructive actions such as automatically deleting a large amount of data.
+注意,ClickHouse 不会执行任何破坏性操作,例如自动删除大量数据。
-When the server starts (or establishes a new session with ZooKeeper), it only checks the quantity and sizes of all files. If the file sizes match but bytes have been changed somewhere in the middle, this is not detected immediately, but only when attempting to read the data for a `SELECT` query. The query throws an exception about a non-matching checksum or size of a compressed block. In this case, data parts are added to the verification queue and copied from the replicas if necessary.
+当服务器启动(或与 ZooKeeper 建立新会话)时,它只检查所有文件的数量和大小。 如果文件大小一致但中间某处已有字节被修改过,不会立即被检测到,只有在尝试读取 `SELECT` 查询的数据时才会检测到。该查询会引发校验和不匹配或压缩块大小不一致的异常。这种情况下,数据片段会添加到验证队列中,并在必要时从其他副本中复制。
-If the local set of data differs too much from the expected one, a safety mechanism is triggered. The server enters this in the log and refuses to launch. The reason for this is that this case may indicate a configuration error, such as if a replica on a shard was accidentally configured like a replica on a different shard. However, the thresholds for this mechanism are set fairly low, and this situation might occur during normal failure recovery. In this case, data is restored semi-automatically - by "pushing a button".
+如果本地数据集与预期数据的差异太大,则会触发安全机制。服务器在日志中记录此内容并拒绝启动。这种情况很可能是配置错误,例如,一个分片上的副本意外配置为别的分片上的副本。然而,此机制的阈值设置得相当低,在正常故障恢复期间可能会出现这种情况。在这种情况下,数据恢复则是半自动模式,通过用户主动操作触发。
-To start recovery, create the node `/path_to_table/replica_name/flags/force_restore_data` in ZooKeeper with any content, or run the command to restore all replicated tables:
+要触发启动恢复,可在 ZooKeeper 中创建节点 `/path_to_table/replica_name/flags/force_restore_data`,节点值可以是任何内容,或运行命令来恢复所有的可复制表:
```bash
sudo -u clickhouse touch /var/lib/clickhouse/flags/force_restore_data
```
-Then restart the server. On start, the server deletes these flags and starts recovery.
+然后重启服务器。启动时,服务器会删除这些标志并开始恢复。
-## Recovery After Complete Data Loss
+## 在数据完全丢失后的恢复
-If all data and metadata disappeared from one of the servers, follow these steps for recovery:
+如果其中一个服务器的所有数据和元数据都消失了,请按照以下步骤进行恢复:
-1. Install ClickHouse on the server. Define substitutions correctly in the config file that contains the shard identifier and replicas, if you use them.
-2. If you had unreplicated tables that must be manually duplicated on the servers, copy their data from a replica (in the directory `/var/lib/clickhouse/data/db_name/table_name/`).
-3. Copy table definitions located in `/var/lib/clickhouse/metadata/` from a replica. If a shard or replica identifier is defined explicitly in the table definitions, correct it so that it corresponds to this replica. (Alternatively, start the server and make all the `ATTACH TABLE` queries that should have been in the .sql files in `/var/lib/clickhouse/metadata/`.)
-4. To start recovery, create the ZooKeeper node `/path_to_table/replica_name/flags/force_restore_data` with any content, or run the command to restore all replicated tables: `sudo -u clickhouse touch /var/lib/clickhouse/flags/force_restore_data`
+1. 在服务器上安装 ClickHouse。在包含分片标识符和副本的配置文件中正确定义宏配置,如果有用到的话,
+2. 如果服务器上有非复制表则必须手动复制,可以从副本服务器上(在 `/var/lib/clickhouse/data/db_name/table_name/` 目录中)复制它们的数据。
+3. 从副本服务器上中复制位于 `/var/lib/clickhouse/metadata/` 中的表定义信息。如果在表定义信息中显式指定了分片或副本标识符,请更正它以使其对应于该副本。(另外,启动服务器,然后会在 `/var/lib/clickhouse/metadata/` 中的.sql文件中生成所有的 `ATTACH TABLE` 语句。)
+4.要开始恢复,ZooKeeper 中创建节点 `/path_to_table/replica_name/flags/force_restore_data`,节点内容不限,或运行命令来恢复所有复制的表:`sudo -u clickhouse touch /var/lib/clickhouse/flags/force_restore_data`
-Then start the server (restart, if it is already running). Data will be downloaded from replicas.
+然后启动服务器(如果它已运行则重启)。数据会从副本中下载。
-An alternative recovery option is to delete information about the lost replica from ZooKeeper (`/path_to_table/replica_name`), then create the replica again as described in "[Creating replicated tables](#creating-replicated-tables)".
+另一种恢复方式是从 ZooKeeper(`/path_to_table/replica_name`)中删除有数据丢的副本的所有元信息,然后再按照“[创建可复制表](#creating-replicated-tables)”中的描述重新创建副本。
-There is no restriction on network bandwidth during recovery. Keep this in mind if you are restoring many replicas at once.
+恢复期间的网络带宽没有限制。特别注意这一点,尤其是要一次恢复很多副本。
-## Converting from MergeTree to ReplicatedMergeTree
+## MergeTree 转换为 ReplicatedMergeTree
-We use the term `MergeTree` to refer to all table engines in the ` MergeTree family`, the same as for `ReplicatedMergeTree`.
+我们使用 `MergeTree` 来表示 `MergeTree系列` 中的所有表引擎,`ReplicatedMergeTree` 同理。
-If you had a `MergeTree` table that was manually replicated, you can convert it to a replicatable table. You might need to do this if you have already collected a large amount of data in a `MergeTree` table and now you want to enable replication.
+如果你有一个手动同步的 `MergeTree` 表,您可以将其转换为可复制表。如果你已经在 `MergeTree` 表中收集了大量数据,并且现在要启用复制,则可以执行这些操作。
-If the data differs on various replicas, first sync it, or delete this data on all the replicas except one.
+如果各个副本上的数据不一致,则首先对其进行同步,或者除保留的一个副本外,删除其他所有副本上的数据。
-Rename the existing MergeTree table, then create a `ReplicatedMergeTree` table with the old name.
-Move the data from the old table to the 'detached' subdirectory inside the directory with the new table data (`/var/lib/clickhouse/data/db_name/table_name/`).
-Then run `ALTER TABLE ATTACH PARTITION` on one of the replicas to add these data parts to the working set.
+重命名现有的 MergeTree 表,然后使用旧名称创建 `ReplicatedMergeTree` 表。
+将数据从旧表移动到新表(`/var/lib/clickhouse/data/db_name/table_name/`)目录内的 'detached' 目录中。
+然后在其中一个副本上运行`ALTER TABLE ATTACH PARTITION`,将这些数据片段添加到工作集中。
-## Converting from ReplicatedMergeTree to MergeTree
+## ReplicatedMergeTree 转换为 MergeTree
-Create a MergeTree table with a different name. Move all the data from the directory with the `ReplicatedMergeTree` table data to the new table's data directory. Then delete the `ReplicatedMergeTree` table and restart the server.
+使用其他名称创建 MergeTree 表。将具有`ReplicatedMergeTree`表数据的目录中的所有数据移动到新表的数据目录中。然后删除`ReplicatedMergeTree`表并重新启动服务器。
+如果你想在不启动服务器的情况下清除 `ReplicatedMergeTree` 表:
If you want to get rid of a `ReplicatedMergeTree` table without launching the server:
-- Delete the corresponding `.sql` file in the metadata directory (`/var/lib/clickhouse/metadata/`).
-- Delete the corresponding path in ZooKeeper (`/path_to_table/replica_name`).
+- 删除元数据目录中的相应 `.sql` 文件(`/var/lib/clickhouse/metadata/`)。
+- 删除 ZooKeeper 中的相应路径(`/path_to_table/replica_name`)。
-After this, you can launch the server, create a `MergeTree` table, move the data to its directory, and then restart the server.
+之后,你可以启动服务器,创建一个 `MergeTree` 表,将数据移动到其目录,然后重新启动服务器。
-## Recovery When Metadata in The ZooKeeper Cluster is Lost or Damaged
+## 当 ZooKeeper 集群中的元数据丢失或损坏时恢复方法
-If the data in ZooKeeper was lost or damaged, you can save data by moving it to an unreplicated table as described above.
+如果 ZooKeeper 中的数据丢失或损坏,如上所述,你可以通过将数据转移到非复制表来保存数据。
-[Original article](https://clickhouse.yandex/docs/en/operations/table_engines/replication/)
+[来源文章](https://clickhouse.yandex/docs/en/operations/table_engines/replication/)