Compare commits

...

25 Commits

Author SHA1 Message Date
Arthur Passos
d51afdb65f
Merge bc866e64ea into 7eee149487 2024-11-21 07:58:54 -03:00
Vladimir Cherkasov
7eee149487
Merge pull request #72145 from ClickHouse/vdimir/fix02374_analyzer_join_using
FIx 02374_analyzer_join_using
2024-11-21 10:35:46 +00:00
Antonio Andelic
cc3c7e74ae
Merge pull request #70523 from ClickHouse/randomize-keeper-feature-flasgs-keeper
Randomize Keeper feature flags in integration tests
2024-11-21 08:20:07 +00:00
Arthur Passos
bc866e64ea minor adjustment and tests 2024-11-20 15:26:02 -03:00
Arthur Passos
cbda101228 missing double quotes 2024-11-20 12:42:45 -03:00
Arthur Passos
8b3c15b22a Merge branch 'master' into parquet_native_reader_int_logical 2024-11-20 10:27:02 -03:00
Arthur Passos
92a1f0c562 approach1 2024-11-20 10:18:07 -03:00
vdimir
f45bd58849
FIx 02374_analyzer_join_using 2024-11-20 11:51:42 +00:00
Antonio Andelic
57db5cf24c Randomize correctly 2024-11-19 13:39:54 +01:00
Antonio Andelic
459fa898ed Merge branch 'master' into randomize-keeper-feature-flasgs-keeper 2024-11-19 10:00:41 +01:00
Antonio Andelic
0d875ecf5c
Always randomize in private 2024-11-04 12:56:14 +01:00
Antonio Andelic
6698212b5a Fix test 2024-10-31 13:39:41 +01:00
Antonio Andelic
c787838cb2 Merge branch 'master' into randomize-keeper-feature-flasgs-keeper 2024-10-31 12:01:31 +01:00
Antonio Andelic
eb020f1c4b Fix RemoveRecursive 2024-10-29 09:05:31 +01:00
Antonio Andelic
1a40df4d0c Merge branch 'master' into randomize-keeper-feature-flasgs-keeper 2024-10-28 12:07:38 +01:00
Antonio Andelic
4380c6035d Merge branch 'master' into randomize-keeper-feature-flasgs-keeper 2024-10-15 16:51:36 +02:00
Antonio Andelic
5145281088 Correct randomization 2024-10-15 16:51:32 +02:00
Antonio Andelic
35fa4c43e4 More fixes 2024-10-10 19:39:28 +02:00
robot-clickhouse
293e076493 Automatic style fix 2024-10-10 14:03:18 +00:00
Antonio Andelic
8b92603c6d Fix old version 2024-10-10 15:52:56 +02:00
Antonio Andelic
fb14f6e029 Fix MultiRead 2024-10-10 15:52:37 +02:00
robot-clickhouse
e1f37ec2bb Automatic style fix 2024-10-10 07:54:28 +00:00
Antonio Andelic
cc0ef6104f Fix MultiRead 2024-10-10 09:45:42 +02:00
robot-clickhouse
46ce65e66e Automatic style fix 2024-10-09 16:21:09 +00:00
Antonio Andelic
e048893b85 Randomize feature flags in integration test 2024-10-09 18:11:50 +02:00
22 changed files with 526 additions and 212 deletions

View File

@ -341,7 +341,10 @@ Coordination::Error ZooKeeper::tryGetChildren(
const EventPtr & watch,
Coordination::ListRequestType list_request_type)
{
return tryGetChildrenWatch(path, res, stat,
return tryGetChildrenWatch(
path,
res,
stat,
watch ? std::make_shared<Coordination::WatchCallback>(callbackForEvent(watch)) : Coordination::WatchCallbackPtr{},
list_request_type);
}
@ -975,11 +978,14 @@ void ZooKeeper::removeRecursive(const std::string & path, uint32_t remove_nodes_
Coordination::Error ZooKeeper::tryRemoveRecursive(const std::string & path, uint32_t remove_nodes_limit)
{
if (!isFeatureEnabled(DB::KeeperFeatureFlag::REMOVE_RECURSIVE))
const auto fallback_method = [&]
{
tryRemoveChildrenRecursive(path);
return tryRemove(path);
}
};
if (!isFeatureEnabled(DB::KeeperFeatureFlag::REMOVE_RECURSIVE))
return fallback_method();
auto promise = std::make_shared<std::promise<Coordination::RemoveRecursiveResponse>>();
auto future = promise->get_future();
@ -998,6 +1004,10 @@ Coordination::Error ZooKeeper::tryRemoveRecursive(const std::string & path, uint
}
auto response = future.get();
if (response.error == Coordination::Error::ZNOTEMPTY) /// limit was too low, try without RemoveRecursive request
return fallback_method();
return response.error;
}

View File

@ -486,13 +486,13 @@ public:
/// Remove the node with the subtree.
/// If Keeper supports RemoveRecursive operation then it will be performed atomically.
/// Otherwise if someone concurrently adds or removes a node in the subtree, the result is undefined.
void removeRecursive(const std::string & path, uint32_t remove_nodes_limit = 100);
void removeRecursive(const std::string & path, uint32_t remove_nodes_limit = 1000);
/// Same as removeRecursive but in case if Keeper does not supports RemoveRecursive and
/// if someone concurrently removes a node in the subtree, this will not cause errors.
/// For instance, you can call this method twice concurrently for the same node and the end
/// result would be the same as for the single call.
Coordination::Error tryRemoveRecursive(const std::string & path, uint32_t remove_nodes_limit = 100);
Coordination::Error tryRemoveRecursive(const std::string & path, uint32_t remove_nodes_limit = 1000);
/// Similar to removeRecursive(...) and tryRemoveRecursive(...), but does not remove path itself.
/// Node defined as RemoveException will not be deleted.

View File

@ -767,6 +767,11 @@ size_t ZooKeeperMultiRequest::sizeImpl() const
}
void ZooKeeperMultiRequest::readImpl(ReadBuffer & in)
{
readImpl(in, /*request_validator=*/{});
}
void ZooKeeperMultiRequest::readImpl(ReadBuffer & in, RequestValidator request_validator)
{
while (true)
{
@ -788,6 +793,8 @@ void ZooKeeperMultiRequest::readImpl(ReadBuffer & in)
ZooKeeperRequestPtr request = ZooKeeperRequestFactory::instance().get(op_num);
request->readImpl(in);
if (request_validator)
request_validator(*request);
requests.push_back(request);
if (in.eof())

View File

@ -570,6 +570,9 @@ struct ZooKeeperMultiRequest final : MultiRequest<ZooKeeperRequestPtr>, ZooKeepe
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
void readImpl(ReadBuffer & in) override;
using RequestValidator = std::function<void(const ZooKeeperRequest &)>;
void readImpl(ReadBuffer & in, RequestValidator request_validator);
std::string toStringImpl(bool short_format) const override;
ZooKeeperResponsePtr makeResponse() const override;

View File

@ -514,7 +514,13 @@ void KeeperContext::initializeFeatureFlags(const Poco::Util::AbstractConfigurati
feature_flags.disableFeatureFlag(feature_flag.value());
}
if (feature_flags.isEnabled(KeeperFeatureFlag::MULTI_READ))
feature_flags.enableFeatureFlag(KeeperFeatureFlag::FILTERED_LIST);
else
system_nodes_with_data[keeper_api_version_path] = toString(static_cast<uint8_t>(KeeperApiVersion::ZOOKEEPER_COMPATIBLE));
system_nodes_with_data[keeper_api_feature_flags_path] = feature_flags.getFeatureFlags();
}
feature_flags.logFlags(getLogger("KeeperContext"));
@ -569,6 +575,25 @@ const CoordinationSettings & KeeperContext::getCoordinationSettings() const
return *coordination_settings;
}
bool KeeperContext::isOperationSupported(Coordination::OpNum operation) const
{
switch (operation)
{
case Coordination::OpNum::FilteredList:
return feature_flags.isEnabled(KeeperFeatureFlag::FILTERED_LIST);
case Coordination::OpNum::MultiRead:
return feature_flags.isEnabled(KeeperFeatureFlag::MULTI_READ);
case Coordination::OpNum::CreateIfNotExists:
return feature_flags.isEnabled(KeeperFeatureFlag::CREATE_IF_NOT_EXISTS);
case Coordination::OpNum::CheckNotExists:
return feature_flags.isEnabled(KeeperFeatureFlag::CHECK_NOT_EXISTS);
case Coordination::OpNum::RemoveRecursive:
return feature_flags.isEnabled(KeeperFeatureFlag::REMOVE_RECURSIVE);
default:
return true;
}
}
uint64_t KeeperContext::lastCommittedIndex() const
{
return last_committed_log_idx.load(std::memory_order_relaxed);

View File

@ -1,6 +1,7 @@
#pragma once
#include <Coordination/KeeperFeatureFlags.h>
#include <Poco/Util/AbstractConfiguration.h>
#include <Common/ZooKeeper/ZooKeeperConstants.h>
#include <atomic>
#include <condition_variable>
#include <cstdint>
@ -103,6 +104,7 @@ public:
return precommit_sleep_probability_for_testing;
}
bool isOperationSupported(Coordination::OpNum operation) const;
private:
/// local disk defined using path or disk name
using Storage = std::variant<DiskPtr, std::string>;

View File

@ -48,6 +48,22 @@ public:
consume(bytes);
}
template <typename TValue, typename ParquetType>
void ALWAYS_INLINE readValuesOfDifferentSize(TValue * dst, size_t count)
{
auto necessary_bytes = count * sizeof(ParquetType);
checkAvaible(necessary_bytes);
const ParquetType* src = reinterpret_cast<const ParquetType*>(data);
for (std::size_t i = 0; i < count; i++)
{
dst[i] = static_cast<TValue>(src[i]);
}
consume(necessary_bytes);
}
void ALWAYS_INLINE readDateTime64FromInt96(DateTime64 & dst)
{
static const int max_scale_num = 9;

View File

@ -240,8 +240,8 @@ TValue * getResizedPrimitiveData(TColumn & column, size_t size)
} // anoynomous namespace
template <>
void ParquetPlainValuesReader<ColumnString>::readBatch(
template <typename TColumn>
void ParquetPlainByteArrayValuesReader<TColumn>::readBatch(
MutableColumnPtr & col_ptr, LazyNullMap & null_map, UInt32 num_values)
{
auto & column = *assert_cast<ColumnString *>(col_ptr.get());
@ -322,8 +322,8 @@ void ParquetBitPlainReader<TColumn>::readBatch(
}
template <>
void ParquetPlainValuesReader<ColumnDecimal<DateTime64>, ParquetReaderTypes::TimestampInt96>::readBatch(
template <typename TColumn>
void ParquetPlainInt96ValuesReader<TColumn>::readBatch(
MutableColumnPtr & col_ptr, LazyNullMap & null_map, UInt32 num_values)
{
auto cursor = col_ptr->size();
@ -350,8 +350,8 @@ void ParquetPlainValuesReader<ColumnDecimal<DateTime64>, ParquetReaderTypes::Tim
);
}
template <typename TColumn, ParquetReaderTypes reader_type>
void ParquetPlainValuesReader<TColumn, reader_type>::readBatch(
template <typename TColumn, typename ParquetType>
void ParquetPlainValuesReader<TColumn, ParquetType>::readBatch(
MutableColumnPtr & col_ptr, LazyNullMap & null_map, UInt32 num_values)
{
auto cursor = col_ptr->size();
@ -365,11 +365,11 @@ void ParquetPlainValuesReader<TColumn, reader_type>::readBatch(
null_map,
/* individual_visitor */ [&](size_t nest_cursor)
{
plain_data_buffer.readValue(column_data[nest_cursor]);
plain_data_buffer.readValuesOfDifferentSize<TValue, ParquetType>(column_data + nest_cursor, 1);
},
/* repeated_visitor */ [&](size_t nest_cursor, UInt32 count)
{
plain_data_buffer.readBytes(column_data + nest_cursor, count * sizeof(TValue));
plain_data_buffer.readValuesOfDifferentSize<TValue, ParquetType>(column_data + nest_cursor, count);
}
);
}
@ -576,18 +576,19 @@ void ParquetRleDictReader<TColumnVector>::readBatch(
}
template class ParquetPlainValuesReader<ColumnInt32>;
template class ParquetPlainValuesReader<ColumnUInt32>;
template class ParquetPlainValuesReader<ColumnInt64>;
template class ParquetPlainValuesReader<ColumnUInt64>;
template class ParquetPlainValuesReader<ColumnBFloat16>;
template class ParquetPlainValuesReader<ColumnFloat32>;
template class ParquetPlainValuesReader<ColumnFloat64>;
template class ParquetPlainValuesReader<ColumnDecimal<Decimal32>>;
template class ParquetPlainValuesReader<ColumnDecimal<Decimal64>>;
template class ParquetPlainValuesReader<ColumnDecimal<DateTime64>>;
template class ParquetPlainValuesReader<ColumnString>;
template class ParquetPlainValuesReader<ColumnUInt8>;
template class ParquetPlainValuesReader<ColumnUInt8, int32_t>;
template class ParquetPlainValuesReader<ColumnInt8, int32_t>;
template class ParquetPlainValuesReader<ColumnUInt16, int32_t>;
template class ParquetPlainValuesReader<ColumnInt16, int32_t>;
template class ParquetPlainValuesReader<ColumnUInt32, int32_t>;
template class ParquetPlainValuesReader<ColumnInt32, int32_t>;
template class ParquetPlainValuesReader<ColumnUInt64, int64_t>;
template class ParquetPlainValuesReader<ColumnInt64, int64_t>;
template class ParquetPlainValuesReader<ColumnFloat32, float>;
template class ParquetPlainValuesReader<ColumnFloat64, double>;
template class ParquetPlainValuesReader<ColumnDecimal<Decimal32>, int32_t>;
template class ParquetPlainValuesReader<ColumnDecimal<Decimal64>, int64_t>;
template class ParquetPlainValuesReader<ColumnDecimal<DateTime64>, int64_t>;
template class ParquetBitPlainReader<ColumnUInt8>;
@ -598,12 +599,10 @@ template class ParquetRleLCReader<ColumnUInt8>;
template class ParquetRleLCReader<ColumnUInt16>;
template class ParquetRleLCReader<ColumnUInt32>;
template class ParquetRleDictReader<ColumnUInt8>;
template class ParquetRleDictReader<ColumnInt32>;
template class ParquetRleDictReader<ColumnUInt32>;
template class ParquetRleDictReader<ColumnInt64>;
template class ParquetRleDictReader<ColumnUInt64>;
template class ParquetRleDictReader<ColumnBFloat16>;
template class ParquetRleDictReader<ColumnFloat32>;
template class ParquetRleDictReader<ColumnFloat64>;
template class ParquetRleDictReader<ColumnDecimal<Decimal32>>;
@ -613,4 +612,8 @@ template class ParquetRleDictReader<ColumnDecimal<Decimal256>>;
template class ParquetRleDictReader<ColumnDecimal<DateTime64>>;
template class ParquetRleDictReader<ColumnString>;
template class ParquetPlainByteArrayValuesReader<ColumnString>;
template class ParquetPlainInt96ValuesReader<ColumnDecimal<DateTime64>>;
}

View File

@ -150,7 +150,7 @@ enum class ParquetReaderTypes
/**
* The definition level is RLE or BitPacked encoding, while data is read directly
*/
template <typename TColumn, ParquetReaderTypes reader_type = ParquetReaderTypes::Normal>
template <typename TColumn, typename ParquetType>
class ParquetPlainValuesReader : public ParquetDataValuesReader
{
public:
@ -172,6 +172,50 @@ private:
ParquetDataBuffer plain_data_buffer;
};
template <typename TColumn>
class ParquetPlainInt96ValuesReader : public ParquetDataValuesReader
{
public:
ParquetPlainInt96ValuesReader(
Int32 max_def_level_,
std::unique_ptr<RleValuesReader> def_level_reader_,
ParquetDataBuffer data_buffer_)
: max_def_level(max_def_level_)
, def_level_reader(std::move(def_level_reader_))
, plain_data_buffer(std::move(data_buffer_))
{}
void readBatch(MutableColumnPtr & col_ptr, LazyNullMap & null_map, UInt32 num_values) override;
private:
Int32 max_def_level;
std::unique_ptr<RleValuesReader> def_level_reader;
ParquetDataBuffer plain_data_buffer;
};
template <typename TColumn>
class ParquetPlainByteArrayValuesReader : public ParquetDataValuesReader
{
public:
ParquetPlainByteArrayValuesReader(
Int32 max_def_level_,
std::unique_ptr<RleValuesReader> def_level_reader_,
ParquetDataBuffer data_buffer_)
: max_def_level(max_def_level_)
, def_level_reader(std::move(def_level_reader_))
, plain_data_buffer(std::move(data_buffer_))
{}
void readBatch(MutableColumnPtr & col_ptr, LazyNullMap & null_map, UInt32 num_values) override;
private:
Int32 max_def_level;
std::unique_ptr<RleValuesReader> def_level_reader;
ParquetDataBuffer plain_data_buffer;
};
template <typename TColumn>
class ParquetBitPlainReader : public ParquetDataValuesReader
{

View File

@ -173,13 +173,7 @@ ColumnPtr readDictPage(
}
template <typename TColumn>
std::unique_ptr<ParquetDataValuesReader> createPlainReader(
const parquet::ColumnDescriptor & col_des,
RleValuesReaderPtr def_level_reader,
ParquetDataBuffer buffer);
template <is_col_over_big_decimal TColumnDecimal>
template <is_col_over_big_decimal TColumnDecimal, typename ParquetType>
std::unique_ptr<ParquetDataValuesReader> createPlainReader(
const parquet::ColumnDescriptor & col_des,
RleValuesReaderPtr def_level_reader,
@ -192,25 +186,62 @@ std::unique_ptr<ParquetDataValuesReader> createPlainReader(
std::move(buffer));
}
template <typename TColumn>
template <typename TColumn, typename ParquetType>
std::unique_ptr<ParquetDataValuesReader> createPlainReader(
const parquet::ColumnDescriptor & col_des,
RleValuesReaderPtr def_level_reader,
ParquetDataBuffer buffer)
{
if (std::is_same_v<TColumn, ColumnDecimal<DateTime64>> && col_des.physical_type() == parquet::Type::INT96)
return std::make_unique<ParquetPlainValuesReader<TColumn, ParquetReaderTypes::TimestampInt96>>(
if constexpr (std::is_same_v<TColumn, ColumnDecimal<DateTime64>> && std::is_same_v<ParquetType, ParquetInt96TypeStub>)
return std::make_unique<ParquetPlainInt96ValuesReader<TColumn>>(
col_des.max_definition_level(), std::move(def_level_reader), std::move(buffer));
return std::make_unique<ParquetPlainValuesReader<TColumn>>(
if constexpr (std::is_same_v<ParquetType, ParquetByteArrayTypeStub>)
{
return std::make_unique<ParquetPlainByteArrayValuesReader<TColumn>>(
col_des.max_definition_level(), std::move(def_level_reader), std::move(buffer));
}
return std::make_unique<ParquetPlainValuesReader<TColumn, ParquetType>>(
col_des.max_definition_level(), std::move(def_level_reader), std::move(buffer));
}
template <typename TColumn, typename ParquetType>
std::unique_ptr<ParquetDataValuesReader> createReader(
const parquet::ColumnDescriptor & col_descriptor,
RleValuesReaderPtr def_level_reader,
const uint8_t * buffer,
std::size_t buffer_max_size,
const DataTypePtr & base_data_type)
{
if constexpr (std::is_same_v<ParquetType, bool>)
{
auto bit_reader = std::make_unique<arrow::bit_util::BitReader>(buffer, buffer_max_size);
return std::make_unique<ParquetBitPlainReader<TColumn>>(
col_descriptor.max_definition_level(), std::move(def_level_reader), std::move(bit_reader));
}
else
{
ParquetDataBuffer parquet_buffer = [&]()
{
if constexpr (!std::is_same_v<ColumnDecimal<DateTime64>, TColumn>)
return ParquetDataBuffer(buffer, buffer_max_size);
auto scale = assert_cast<const DataTypeDateTime64 &>(*base_data_type).getScale();
return ParquetDataBuffer(buffer, buffer_max_size, scale);
}();
return createPlainReader<TColumn, ParquetType>(col_descriptor, std::move(def_level_reader), parquet_buffer);
}
}
} // anonymous namespace
template <typename TColumn>
ParquetLeafColReader<TColumn>::ParquetLeafColReader(
template <typename TColumn, typename ParquetType>
ParquetLeafColReader<TColumn, ParquetType>::ParquetLeafColReader(
const parquet::ColumnDescriptor & col_descriptor_,
DataTypePtr base_type_,
std::unique_ptr<parquet::ColumnChunkMetaData> meta_,
@ -223,8 +254,8 @@ ParquetLeafColReader<TColumn>::ParquetLeafColReader(
{
}
template <typename TColumn>
ColumnWithTypeAndName ParquetLeafColReader<TColumn>::readBatch(UInt64 rows_num, const String & name)
template <typename TColumn, typename ParquetType>
ColumnWithTypeAndName ParquetLeafColReader<TColumn, ParquetType>::readBatch(UInt64 rows_num, const String & name)
{
reading_rows_num = rows_num;
auto readPageIfEmpty = [&]()
@ -251,41 +282,42 @@ ColumnWithTypeAndName ParquetLeafColReader<TColumn>::readBatch(UInt64 rows_num,
return releaseColumn(name);
}
template <>
void ParquetLeafColReader<ColumnString>::resetColumn(UInt64 rows_num)
template <typename TColumn, typename ParquetType>
void ParquetLeafColReader<TColumn, ParquetType>::resetColumn(UInt64 rows_num)
{
if (reading_low_cardinality)
if constexpr (std::is_same_v<TColumn, ColumnString>)
{
assert(dictionary);
visitColStrIndexType(dictionary->size(), [&]<typename TColVec>(TColVec *)
if (reading_low_cardinality)
{
column = TColVec::create();
});
assert(dictionary);
visitColStrIndexType(dictionary->size(), [&]<typename TColVec>(TColVec *)
{
column = TColVec::create();
});
// only first position is used
null_map = std::make_unique<LazyNullMap>(1);
column->reserve(rows_num);
// only first position is used
null_map = std::make_unique<LazyNullMap>(1);
column->reserve(rows_num);
}
else
{
null_map = std::make_unique<LazyNullMap>(rows_num);
column = ColumnString::create();
reserveColumnStrRows(column, rows_num);
}
}
else
{
assert(!reading_low_cardinality);
column = base_data_type->createColumn();
column->reserve(rows_num);
null_map = std::make_unique<LazyNullMap>(rows_num);
column = ColumnString::create();
reserveColumnStrRows(column, rows_num);
}
}
template <typename TColumn>
void ParquetLeafColReader<TColumn>::resetColumn(UInt64 rows_num)
{
assert(!reading_low_cardinality);
column = base_data_type->createColumn();
column->reserve(rows_num);
null_map = std::make_unique<LazyNullMap>(rows_num);
}
template <typename TColumn>
void ParquetLeafColReader<TColumn>::degradeDictionary()
template <typename TColumn, typename ParquetType>
void ParquetLeafColReader<TColumn, ParquetType>::degradeDictionary()
{
// if last batch read all dictionary indices, then degrade is not needed this time
if (!column)
@ -331,8 +363,8 @@ void ParquetLeafColReader<TColumn>::degradeDictionary()
LOG_DEBUG(log, "degraded dictionary to normal column");
}
template <typename TColumn>
ColumnWithTypeAndName ParquetLeafColReader<TColumn>::releaseColumn(const String & name)
template <typename TColumn, typename ParquetType>
ColumnWithTypeAndName ParquetLeafColReader<TColumn, ParquetType>::releaseColumn(const String & name)
{
DataTypePtr data_type = base_data_type;
if (reading_low_cardinality)
@ -365,8 +397,8 @@ ColumnWithTypeAndName ParquetLeafColReader<TColumn>::releaseColumn(const String
return res;
}
template <typename TColumn>
void ParquetLeafColReader<TColumn>::readPage()
template <typename TColumn, typename ParquetType>
void ParquetLeafColReader<TColumn, ParquetType>::readPage()
{
// refer to: ColumnReaderImplBase::ReadNewPage in column_reader.cc
// this is where decompression happens
@ -408,8 +440,8 @@ void ParquetLeafColReader<TColumn>::readPage()
}
}
template <typename TColumn>
void ParquetLeafColReader<TColumn>::initDataReader(
template <typename TColumn, typename ParquetType>
void ParquetLeafColReader<TColumn, ParquetType>::initDataReader(
parquet::Encoding::type enconding_type,
const uint8_t * buffer,
std::size_t max_size,
@ -425,29 +457,8 @@ void ParquetLeafColReader<TColumn>::initDataReader(
degradeDictionary();
}
if (col_descriptor.physical_type() == parquet::Type::BOOLEAN)
{
if constexpr (std::is_same_v<TColumn, ColumnUInt8>)
{
auto bit_reader = std::make_unique<arrow::bit_util::BitReader>(buffer, max_size);
data_values_reader = std::make_unique<ParquetBitPlainReader<ColumnUInt8>>(col_descriptor.max_definition_level(),
std::move(def_level_reader),
std::move(bit_reader));
}
}
else
{
ParquetDataBuffer parquet_buffer = [&]()
{
if constexpr (!std::is_same_v<ColumnDecimal<DateTime64>, TColumn>)
return ParquetDataBuffer(buffer, max_size);
auto scale = assert_cast<const DataTypeDateTime64 &>(*base_data_type).getScale();
return ParquetDataBuffer(buffer, max_size, scale);
}();
data_values_reader = createPlainReader<TColumn>(
col_descriptor, std::move(def_level_reader), std::move(parquet_buffer));
}
data_values_reader = createReader<TColumn, ParquetType>(
col_descriptor, std::move(def_level_reader), buffer, max_size, base_data_type);
break;
}
case parquet::Encoding::RLE_DICTIONARY:
@ -476,8 +487,8 @@ void ParquetLeafColReader<TColumn>::initDataReader(
}
}
template <typename TColumn>
void ParquetLeafColReader<TColumn>::readPageV1(const parquet::DataPageV1 & page)
template <typename TColumn, typename ParquetType>
void ParquetLeafColReader<TColumn, ParquetType>::readPageV1(const parquet::DataPageV1 & page)
{
cur_page_values = page.num_values();
@ -562,8 +573,8 @@ void ParquetLeafColReader<TColumn>::readPageV1(const parquet::DataPageV1 & page)
* The data buffer is "offset-ed" by rl bytes length and then dl decoder is built using RLE decoder. Since dl bytes length was present in the header,
* there is no need to read it and apply an offset like in page v1.
* */
template <typename TColumn>
void ParquetLeafColReader<TColumn>::readPageV2(const parquet::DataPageV2 & page)
template <typename TColumn, typename ParquetType>
void ParquetLeafColReader<TColumn, ParquetType>::readPageV2(const parquet::DataPageV2 & page)
{
cur_page_values = page.num_values();
@ -609,28 +620,32 @@ void ParquetLeafColReader<TColumn>::readPageV2(const parquet::DataPageV2 & page)
initDataReader(page.encoding(), buffer, page.size() - total_levels_length, std::move(def_level_reader));
}
template <typename TColumn>
std::unique_ptr<ParquetDataValuesReader> ParquetLeafColReader<TColumn>::createDictReader(
template <typename TColumn, typename ParquetType>
std::unique_ptr<ParquetDataValuesReader> ParquetLeafColReader<TColumn, ParquetType>::createDictReader(
std::unique_ptr<RleValuesReader> def_level_reader, std::unique_ptr<RleValuesReader> rle_data_reader)
{
if (reading_low_cardinality && std::same_as<TColumn, ColumnString>)
{
std::unique_ptr<ParquetDataValuesReader> res;
visitColStrIndexType(dictionary->size(), [&]<typename TCol>(TCol *)
{
res = std::make_unique<ParquetRleLCReader<TCol>>(
col_descriptor.max_definition_level(),
std::move(def_level_reader),
std::move(rle_data_reader));
});
return res;
}
if (col_descriptor.physical_type() == parquet::Type::type::BOOLEAN)
if constexpr (std::is_same_v<TColumn, ColumnUInt8> || std::is_same_v<TColumn, ColumnInt8>
|| std::is_same_v<TColumn, ColumnUInt16> || std::is_same_v<TColumn, ColumnInt16>)
{
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Dictionary encoding for booleans is not supported");
}
if (reading_low_cardinality)
{
if constexpr (std::same_as<TColumn, ColumnString>)
{
std::unique_ptr<ParquetDataValuesReader> res;
visitColStrIndexType(dictionary->size(), [&]<typename TCol>(TCol *)
{
res = std::make_unique<ParquetRleLCReader<TCol>>(
col_descriptor.max_definition_level(),
std::move(def_level_reader),
std::move(rle_data_reader));
});
return res;
}
}
return std::make_unique<ParquetRleDictReader<TColumn>>(
col_descriptor.max_definition_level(),
std::move(def_level_reader),
@ -639,19 +654,23 @@ std::unique_ptr<ParquetDataValuesReader> ParquetLeafColReader<TColumn>::createDi
}
template class ParquetLeafColReader<ColumnUInt8>;
template class ParquetLeafColReader<ColumnInt32>;
template class ParquetLeafColReader<ColumnUInt32>;
template class ParquetLeafColReader<ColumnInt64>;
template class ParquetLeafColReader<ColumnUInt64>;
template class ParquetLeafColReader<ColumnBFloat16>;
template class ParquetLeafColReader<ColumnFloat32>;
template class ParquetLeafColReader<ColumnFloat64>;
template class ParquetLeafColReader<ColumnString>;
template class ParquetLeafColReader<ColumnDecimal<Decimal32>>;
template class ParquetLeafColReader<ColumnDecimal<Decimal64>>;
template class ParquetLeafColReader<ColumnDecimal<Decimal128>>;
template class ParquetLeafColReader<ColumnDecimal<Decimal256>>;
template class ParquetLeafColReader<ColumnDecimal<DateTime64>>;
template class ParquetLeafColReader<ColumnUInt8, bool>;
template class ParquetLeafColReader<ColumnUInt8, int32_t>;
template class ParquetLeafColReader<ColumnInt8, int32_t>;
template class ParquetLeafColReader<ColumnUInt16, int32_t>;
template class ParquetLeafColReader<ColumnInt16, int32_t>;
template class ParquetLeafColReader<ColumnUInt32, int32_t>;
template class ParquetLeafColReader<ColumnInt32, int32_t>;
template class ParquetLeafColReader<ColumnUInt64, int64_t>;
template class ParquetLeafColReader<ColumnInt64, int64_t>;
template class ParquetLeafColReader<ColumnFloat32, float>;
template class ParquetLeafColReader<ColumnFloat64, double>;
template class ParquetLeafColReader<ColumnString, ParquetByteArrayTypeStub>;
template class ParquetLeafColReader<ColumnDecimal<Decimal32>, int32_t>;
template class ParquetLeafColReader<ColumnDecimal<Decimal64>, int64_t>;
template class ParquetLeafColReader<ColumnDecimal<Decimal128>, ParquetByteArrayTypeStub>;
template class ParquetLeafColReader<ColumnDecimal<Decimal256>, ParquetByteArrayTypeStub>;
template class ParquetLeafColReader<ColumnDecimal<DateTime64>, ParquetInt96TypeStub>;
template class ParquetLeafColReader<ColumnDecimal<DateTime64>, int64_t>;
}

View File

@ -17,7 +17,10 @@ class ColumnDescriptor;
namespace DB
{
template <typename TColumn>
struct ParquetByteArrayTypeStub {};
struct ParquetInt96TypeStub {};
template <typename TColumn, typename ParquetType>
class ParquetLeafColReader : public ParquetColumnReader
{
public:

View File

@ -93,19 +93,20 @@ private:
std::unique_ptr<ParquetColumnReader> fromInt32INT(const parquet::IntLogicalType & int_type);
std::unique_ptr<ParquetColumnReader> fromInt64INT(const parquet::IntLogicalType & int_type);
template<class DataType>
template<class ClickHouseType, typename ParquetType>
auto makeLeafReader()
{
return std::make_unique<ParquetLeafColReader<typename DataType::ColumnType>>(
col_descriptor, std::make_shared<DataType>(), std::move(meta), std::move(page_reader));
return std::make_unique<ParquetLeafColReader<typename ClickHouseType::ColumnType, ParquetType>>(
col_descriptor, std::make_shared<ClickHouseType>(), std::move(meta), std::move(page_reader));
}
template<class DecimalType>
template<class DecimalType, typename ParquetType>
auto makeDecimalLeafReader()
{
auto data_type = std::make_shared<DataTypeDecimal<DecimalType>>(
col_descriptor.type_precision(), col_descriptor.type_scale());
return std::make_unique<ParquetLeafColReader<ColumnDecimal<DecimalType>>>(
return std::make_unique<ParquetLeafColReader<ColumnDecimal<DecimalType>, ParquetType>>(
col_descriptor, std::move(data_type), std::move(meta), std::move(page_reader));
}
@ -157,11 +158,11 @@ std::unique_ptr<ParquetColumnReader> ColReaderFactory::fromInt32()
case parquet::LogicalType::Type::INT:
return fromInt32INT(dynamic_cast<const parquet::IntLogicalType &>(*col_descriptor.logical_type()));
case parquet::LogicalType::Type::NONE:
return makeLeafReader<DataTypeInt32>();
return makeLeafReader<DataTypeInt32, int32_t>();
case parquet::LogicalType::Type::DATE:
return makeLeafReader<DataTypeDate32>();
return makeLeafReader<DataTypeDate32, int32_t>();
case parquet::LogicalType::Type::DECIMAL:
return makeDecimalLeafReader<Decimal32>();
return makeDecimalLeafReader<Decimal32, int32_t>();
default:
return throwUnsupported();
}
@ -174,16 +175,16 @@ std::unique_ptr<ParquetColumnReader> ColReaderFactory::fromInt64()
case parquet::LogicalType::Type::INT:
return fromInt64INT(dynamic_cast<const parquet::IntLogicalType &>(*col_descriptor.logical_type()));
case parquet::LogicalType::Type::NONE:
return makeLeafReader<DataTypeInt64>();
return makeLeafReader<DataTypeInt64, int64_t>();
case parquet::LogicalType::Type::TIMESTAMP:
{
const auto & tm_type = dynamic_cast<const parquet::TimestampLogicalType &>(*col_descriptor.logical_type());
auto read_type = std::make_shared<DataTypeDateTime64>(getScaleFromLogicalTimestamp(tm_type.time_unit()));
return std::make_unique<ParquetLeafColReader<ColumnDecimal<DateTime64>>>(
return std::make_unique<ParquetLeafColReader<ColumnDecimal<DateTime64>, int64_t>>(
col_descriptor, std::move(read_type), std::move(meta), std::move(page_reader));
}
case parquet::LogicalType::Type::DECIMAL:
return makeDecimalLeafReader<Decimal64>();
return makeDecimalLeafReader<Decimal64, int64_t>();
default:
return throwUnsupported();
}
@ -195,7 +196,7 @@ std::unique_ptr<ParquetColumnReader> ColReaderFactory::fromByteArray()
{
case parquet::LogicalType::Type::STRING:
case parquet::LogicalType::Type::NONE:
return makeLeafReader<DataTypeString>();
return makeLeafReader<DataTypeString, ParquetByteArrayTypeStub>();
default:
return throwUnsupported();
}
@ -210,9 +211,9 @@ std::unique_ptr<ParquetColumnReader> ColReaderFactory::fromFLBA()
if (col_descriptor.type_length() > 0)
{
if (col_descriptor.type_length() <= static_cast<int>(sizeof(Decimal128)))
return makeDecimalLeafReader<Decimal128>();
return makeDecimalLeafReader<Decimal128, ParquetByteArrayTypeStub>();
if (col_descriptor.type_length() <= static_cast<int>(sizeof(Decimal256)))
return makeDecimalLeafReader<Decimal256>();
return makeDecimalLeafReader<Decimal256, ParquetByteArrayTypeStub>();
}
return throwUnsupported(PreformattedMessage::create(
@ -227,11 +228,23 @@ std::unique_ptr<ParquetColumnReader> ColReaderFactory::fromInt32INT(const parque
{
switch (int_type.bit_width())
{
case 8:
{
if (int_type.is_signed())
return makeLeafReader<DataTypeInt8, int32_t>();
return makeLeafReader<DataTypeUInt8, int32_t>();
}
case 16:
{
if (int_type.is_signed())
return makeLeafReader<DataTypeInt16, int32_t>();
return makeLeafReader<DataTypeUInt16, int32_t>();
}
case 32:
{
if (int_type.is_signed())
return makeLeafReader<DataTypeInt32>();
return makeLeafReader<DataTypeUInt32>();
return makeLeafReader<DataTypeInt32, int32_t>();
return makeLeafReader<DataTypeUInt32, int32_t>();
}
default:
return throwUnsupported(PreformattedMessage::create(", bit width: {}", int_type.bit_width()));
@ -245,8 +258,8 @@ std::unique_ptr<ParquetColumnReader> ColReaderFactory::fromInt64INT(const parque
case 64:
{
if (int_type.is_signed())
return makeLeafReader<DataTypeInt64>();
return makeLeafReader<DataTypeUInt64>();
return makeLeafReader<DataTypeInt64, int64_t>();
return makeLeafReader<DataTypeUInt64, int64_t>();
}
default:
return throwUnsupported(PreformattedMessage::create(", bit width: {}", int_type.bit_width()));
@ -263,7 +276,7 @@ std::unique_ptr<ParquetColumnReader> ColReaderFactory::makeReader()
switch (col_descriptor.physical_type())
{
case parquet::Type::BOOLEAN:
return makeLeafReader<DataTypeUInt8>();
return makeLeafReader<DataTypeUInt8, bool>();
case parquet::Type::INT32:
return fromInt32();
case parquet::Type::INT64:
@ -276,13 +289,13 @@ std::unique_ptr<ParquetColumnReader> ColReaderFactory::makeReader()
auto scale = getScaleFromArrowTimeUnit(arrow_properties.coerce_int96_timestamp_unit());
read_type = std::make_shared<DataTypeDateTime64>(scale);
}
return std::make_unique<ParquetLeafColReader<ColumnDecimal<DateTime64>>>(
return std::make_unique<ParquetLeafColReader<ColumnDecimal<DateTime64>, ParquetInt96TypeStub>>(
col_descriptor, read_type, std::move(meta), std::move(page_reader));
}
case parquet::Type::FLOAT:
return makeLeafReader<DataTypeFloat32>();
return makeLeafReader<DataTypeFloat32, float>();
case parquet::Type::DOUBLE:
return makeLeafReader<DataTypeFloat64>();
return makeLeafReader<DataTypeFloat64, double>();
case parquet::Type::BYTE_ARRAY:
return fromByteArray();
case parquet::Type::FIXED_LEN_BYTE_ARRAY:

View File

@ -1,5 +1,4 @@
#include <Server/KeeperTCPHandler.h>
#include "Common/ZooKeeper/ZooKeeperConstants.h"
#if USE_NURAFT
@ -19,6 +18,8 @@
# include <Common/NetException.h>
# include <Common/PipeFDs.h>
# include <Common/Stopwatch.h>
# include <Common/ZooKeeper/ZooKeeperCommon.h>
# include <Common/ZooKeeper/ZooKeeperConstants.h>
# include <Common/ZooKeeper/ZooKeeperIO.h>
# include <Common/logger_useful.h>
# include <Common/setThreadName.h>
@ -63,6 +64,7 @@ namespace ErrorCodes
extern const int LOGICAL_ERROR;
extern const int UNEXPECTED_PACKET_FROM_CLIENT;
extern const int TIMEOUT_EXCEEDED;
extern const int BAD_ARGUMENTS;
}
struct PollResult
@ -637,7 +639,23 @@ std::pair<Coordination::OpNum, Coordination::XID> KeeperTCPHandler::receiveReque
Coordination::ZooKeeperRequestPtr request = Coordination::ZooKeeperRequestFactory::instance().get(opnum);
request->xid = xid;
request->readImpl(read_buffer);
auto request_validator = [&](const Coordination::ZooKeeperRequest & current_request)
{
if (!keeper_dispatcher->getKeeperContext()->isOperationSupported(current_request.getOpNum()))
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unsupported operation: {}", current_request.getOpNum());
};
if (auto * multi_request = dynamic_cast<Coordination::ZooKeeperMultiRequest *>(request.get()))
{
multi_request->readImpl(read_buffer, request_validator);
}
else
{
request->readImpl(read_buffer);
request_validator(*request);
}
if (!keeper_dispatcher->putRequest(request, session_id, use_xid_64))
throw Exception(ErrorCodes::TIMEOUT_EXCEEDED, "Session {} already disconnected", session_id);

View File

@ -7,6 +7,7 @@ import os.path as p
import platform
import pprint
import pwd
import random
import re
import shlex
import shutil
@ -1650,6 +1651,8 @@ class ClickHouseCluster:
minio_certs_dir=None,
minio_data_dir=None,
use_keeper=True,
keeper_randomize_feature_flags=True,
keeper_required_feature_flags=[],
main_config_name="config.xml",
users_config_name="users.xml",
copy_common_configs=True,
@ -1682,6 +1685,8 @@ class ClickHouseCluster:
if not env_variables:
env_variables = {}
self.use_keeper = use_keeper
self.keeper_randomize_feature_flags = keeper_randomize_feature_flags
self.keeper_required_feature_flags = keeper_required_feature_flags
# Code coverage files will be placed in database directory
# (affect only WITH_COVERAGE=1 build)
@ -2828,15 +2833,51 @@ class ClickHouseCluster:
if self.use_keeper: # TODO: remove hardcoded paths from here
for i in range(1, 4):
current_keeper_config_dir = os.path.join(
f"{self.keeper_instance_dir_prefix}{i}", "config"
)
shutil.copy(
os.path.join(
self.keeper_config_dir, f"keeper_config{i}.xml"
),
os.path.join(
self.keeper_instance_dir_prefix + f"{i}", "config"
),
current_keeper_config_dir,
)
extra_configs_dir = os.path.join(
current_keeper_config_dir, f"keeper_config{i}.d"
)
os.mkdir(extra_configs_dir)
feature_flags_config = os.path.join(
extra_configs_dir, "feature_flags.yaml"
)
indentation = 4 * " "
def get_feature_flag_value(feature_flag):
if not self.keeper_randomize_feature_flags:
return 1
if feature_flag in self.keeper_required_feature_flags:
return 1
return random.randint(0, 1)
with open(feature_flags_config, "w") as ff_config:
ff_config.write("keeper_server:\n")
ff_config.write(f"{indentation}feature_flags:\n")
indentation *= 2
for feature_flag in [
"filtered_list",
"multi_read",
"check_not_exists",
"create_if_not_exists",
"remove_recursive",
]:
ff_config.write(
f"{indentation}{feature_flag}: {get_feature_flag_value(feature_flag)}\n"
)
run_and_check(self.base_zookeeper_cmd + common_opts, env=self.env)
self.up_called = True

View File

@ -13,6 +13,7 @@ node = cluster.add_instance(
main_configs=["configs/enable_keeper_map.xml"],
user_configs=["configs/keeper_retries.xml"],
with_zookeeper=True,
keeper_required_feature_flags=["multi_read"],
stay_alive=True,
)

View File

@ -20,6 +20,7 @@ node1 = cluster.add_instance(
main_configs=["configs/config.xml"],
user_configs=["configs/users.xml"],
with_zookeeper=True,
keeper_required_feature_flags=["multi_read", "create_if_not_exists"],
macros={"shard": "shard1", "replica": "1"},
stay_alive=True,
)
@ -28,6 +29,7 @@ node2 = cluster.add_instance(
main_configs=["configs/config.xml"],
user_configs=["configs/users.xml"],
with_zookeeper=True,
keeper_required_feature_flags=["multi_read", "create_if_not_exists"],
macros={"shard": "shard1", "replica": "2"},
)
nodes = [node1, node2]

View File

@ -59,6 +59,9 @@ instance = cluster.add_instance(
user_configs=["configs/users.xml"],
with_kafka=True,
with_zookeeper=True, # For Replicated Table
keeper_required_feature_flags=[
"create_if_not_exists"
], # new Kafka doesn't work without this feature
macros={
"kafka_broker": "kafka1",
"kafka_topic_old": KAFKA_TOPIC_OLD,

View File

@ -99,6 +99,7 @@ def started_cluster():
with_minio=True,
with_azurite=True,
with_zookeeper=True,
keeper_required_feature_flags=["create_if_not_exists"],
main_configs=[
"configs/zookeeper.xml",
"configs/s3queue_log.xml",
@ -110,6 +111,7 @@ def started_cluster():
user_configs=["configs/users.xml"],
with_minio=True,
with_zookeeper=True,
keeper_required_feature_flags=["create_if_not_exists"],
main_configs=[
"configs/s3queue_log.xml",
],
@ -118,6 +120,7 @@ def started_cluster():
cluster.add_instance(
"old_instance",
with_zookeeper=True,
keeper_required_feature_flags=["create_if_not_exists"],
image="clickhouse/clickhouse-server",
tag="23.12",
stay_alive=True,
@ -127,6 +130,7 @@ def started_cluster():
cluster.add_instance(
"node1",
with_zookeeper=True,
keeper_required_feature_flags=["create_if_not_exists"],
stay_alive=True,
main_configs=[
"configs/zookeeper.xml",
@ -137,6 +141,7 @@ def started_cluster():
cluster.add_instance(
"node2",
with_zookeeper=True,
keeper_required_feature_flags=["create_if_not_exists"],
stay_alive=True,
main_configs=[
"configs/zookeeper.xml",
@ -149,6 +154,7 @@ def started_cluster():
user_configs=["configs/users.xml"],
with_minio=True,
with_zookeeper=True,
keeper_required_feature_flags=["create_if_not_exists"],
main_configs=[
"configs/s3queue_log.xml",
"configs/merge_tree.xml",
@ -158,6 +164,7 @@ def started_cluster():
cluster.add_instance(
"instance_24.5",
with_zookeeper=True,
keeper_required_feature_flags=["create_if_not_exists"],
image="clickhouse/clickhouse-server",
tag="24.5",
stay_alive=True,
@ -170,6 +177,7 @@ def started_cluster():
cluster.add_instance(
"node_cloud_mode",
with_zookeeper=True,
keeper_required_feature_flags=["create_if_not_exists"],
stay_alive=True,
main_configs=[
"configs/zookeeper.xml",

View File

@ -96,13 +96,15 @@ SELECT 'First JOIN INNER second JOIN INNER';
First JOIN INNER second JOIN INNER
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
SELECT '--';
--
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
SELECT '--';
@ -115,13 +117,15 @@ SELECT 'First JOIN INNER second JOIN LEFT';
First JOIN INNER second JOIN LEFT
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
SELECT '--';
--
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
SELECT '--';
@ -134,17 +138,19 @@ SELECT 'First JOIN INNER second JOIN RIGHT';
First JOIN INNER second JOIN RIGHT
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
4 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String
SELECT '--';
--
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
String String Join_3_Value_4 String
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
String String Join_3_Value_4 String
SELECT '--';
--
SELECT 1 FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id);
@ -156,17 +162,19 @@ SELECT 'First JOIN INNER second JOIN FULL';
First JOIN INNER second JOIN FULL
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
0 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
0 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String
SELECT '--';
--
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
String String Join_3_Value_4 String
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
String String Join_3_Value_4 String
SELECT '--';
--
SELECT 1 FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id);
@ -178,13 +186,15 @@ SELECT 'First JOIN LEFT second JOIN INNER';
First JOIN LEFT second JOIN INNER
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
SELECT '--';
--
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
SELECT '--';
@ -197,14 +207,16 @@ SELECT 'First JOIN LEFT second JOIN LEFT';
First JOIN LEFT second JOIN LEFT
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
2 UInt64 2 UInt64 Join_1_Value_2 String 0 UInt64 String 0 UInt64 String
SELECT '--';
--
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
Join_1_Value_2 String String String
@ -219,17 +231,19 @@ SELECT 'First JOIN LEFT second JOIN RIGHT';
First JOIN LEFT second JOIN RIGHT
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
4 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String
SELECT '--';
--
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
String String Join_3_Value_4 String
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
String String Join_3_Value_4 String
SELECT '--';
--
SELECT 1 FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id);
@ -241,19 +255,21 @@ SELECT 'First JOIN LEFT second JOIN FULL';
First JOIN LEFT second JOIN FULL
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
0 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
2 UInt64 2 UInt64 Join_1_Value_2 String 0 UInt64 String 0 UInt64 String
0 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String
SELECT '--';
--
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
String String Join_3_Value_4 String
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
Join_1_Value_2 String String String
String String Join_3_Value_4 String
SELECT '--';
--
SELECT 1 FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id);
@ -266,13 +282,15 @@ SELECT 'First JOIN RIGHT second JOIN INNER';
First JOIN RIGHT second JOIN INNER
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
SELECT '--';
--
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
SELECT '--';
@ -285,17 +303,19 @@ SELECT 'First JOIN RIGHT second JOIN LEFT';
First JOIN RIGHT second JOIN LEFT
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
3 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 String
SELECT '--';
--
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
String Join_2_Value_3 String String
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
String Join_2_Value_3 String String
SELECT '--';
--
SELECT 1 FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id);
@ -307,17 +327,19 @@ SELECT 'First JOIN RIGHT second JOIN RIGHT';
First JOIN RIGHT second JOIN RIGHT
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
4 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String
SELECT '--';
--
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
String String Join_3_Value_4 String
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
String String Join_3_Value_4 String
SELECT '--';
--
SELECT 1 FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id);
@ -329,19 +351,21 @@ SELECT 'First JOIN RIGHT second JOIN FULL';
First JOIN RIGHT second JOIN FULL
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
0 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
3 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 String
0 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String
SELECT '--';
--
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
String String Join_3_Value_4 String
String Join_2_Value_3 String String
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
String Join_2_Value_3 String String
String String Join_3_Value_4 String
SELECT '--';
--
SELECT 1 FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id);
@ -354,14 +378,16 @@ SELECT 'First JOIN FULL second JOIN INNER';
First JOIN FULL second JOIN INNER
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
0 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 Join_3_Value_0 String
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
0 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 Join_3_Value_0 String
SELECT '--';
--
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
SELECT '--';
@ -374,19 +400,21 @@ SELECT 'First JOIN FULL second JOIN LEFT';
First JOIN FULL second JOIN LEFT
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
0 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 Join_3_Value_0 String
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
2 UInt64 2 UInt64 Join_1_Value_2 String 0 UInt64 String 0 UInt64 String
0 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 Join_3_Value_0 String
SELECT '--';
--
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
String Join_2_Value_3 String String
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
Join_1_Value_2 String String String
String Join_2_Value_3 String String
SELECT '--';
--
SELECT 1 FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id);
@ -399,18 +427,20 @@ SELECT 'First JOIN FULL second JOIN RIGHT';
First JOIN FULL second JOIN RIGHT
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
0 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 Join_3_Value_0 String
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
0 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 Join_3_Value_0 String
4 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String
SELECT '--';
--
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
String String Join_3_Value_4 String
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
String String Join_3_Value_4 String
SELECT '--';
--
SELECT 1 FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id);
@ -422,21 +452,23 @@ SELECT 'First JOIN FULL second JOIN FULL';
First JOIN FULL second JOIN FULL
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
0 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String
0 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 Join_3_Value_0 String
0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String
1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String
2 UInt64 2 UInt64 Join_1_Value_2 String 0 UInt64 String 0 UInt64 String
0 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 Join_3_Value_0 String
0 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String
SELECT '--';
--
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
String String Join_3_Value_4 String
String Join_2_Value_3 String String
Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String
Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String
Join_1_Value_2 String String String
String Join_2_Value_3 String String
String String Join_3_Value_4 String
SELECT '--';
--
SELECT 1 FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id);

View File

@ -64,12 +64,14 @@ SELECT 'First JOIN {{ first_join_type }} second JOIN {{ second_join_type }}';
SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value),
t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 USING (id) {{ second_join_type }} JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 USING (id) {{ second_join_type }} JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
SELECT '--';
SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value)
FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 USING (id) {{ second_join_type }} JOIN test_table_join_3 AS t3 USING(id);
FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 USING (id) {{ second_join_type }} JOIN test_table_join_3 AS t3 USING(id)
ORDER BY ALL;
SELECT '--';

View File

@ -0,0 +1,40 @@
-94 53304 17815465730223871
57 15888 33652524900575246
-4 14877 53832092832965652
33 3387 86326601511136103
104 3383 115438187156564782
-11 37403 145056169255259589
-72 46473 159324626361233509
103 35510 173644182696185097
-26 60902 185175917734318892
70 48767 193167023342307884
2 21648 247953090704786001
20 2986 268127160817221407
76 20277 290178827409195337
61 28692 305149163504092270
-74 65427 326871531363668398
-15 20256 351812901947846888
-39 65472 357371822264135234
79 38671 371605113770958364
-29 41706 394460710549666968
92 25026 412913269933311543
-94 53304 17815465730223871
57 15888 33652524900575246
-4 14877 53832092832965652
33 3387 86326601511136103
104 3383 115438187156564782
-11 37403 145056169255259589
-72 46473 159324626361233509
103 35510 173644182696185097
-26 60902 185175917734318892
70 48767 193167023342307884
2 21648 247953090704786001
20 2986 268127160817221407
76 20277 290178827409195337
61 28692 305149163504092270
-74 65427 326871531363668398
-15 20256 351812901947846888
-39 65472 357371822264135234
79 38671 371605113770958364
-29 41706 394460710549666968
92 25026 412913269933311543

View File

@ -0,0 +1,22 @@
#!/usr/bin/env bash
# Tags: no-ubsan, no-fasttest
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}')
WORKING_DIR="${USER_FILES_PATH}/${CLICKHOUSE_TEST_UNIQUE_NAME}"
mkdir -p "${WORKING_DIR}"
DATA_FILE="${CUR_DIR}/data_parquet/multi_column_bf.gz.parquet"
DATA_FILE_USER_PATH="${WORKING_DIR}/multi_column_bf.gz.parquet"
cp ${DATA_FILE} ${DATA_FILE_USER_PATH}
${CLICKHOUSE_CLIENT} --query="select int8_logical, uint16_logical, uint64_logical from file('${DATA_FILE_USER_PATH}', Parquet) order by uint64_logical limit 20 SETTINGS input_format_parquet_use_native_reader=false;";
${CLICKHOUSE_CLIENT} --query="select int8_logical, uint16_logical, uint64_logical from file('${DATA_FILE_USER_PATH}', Parquet) order by uint64_logical limit 20 SETTINGS input_format_parquet_use_native_reader=true;";