mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 00:22:29 +00:00
Merge pull request #35123 from zhanghuajieHIT/fix_build_fail_with_gcc
fix build fail with gcc
This commit is contained in:
commit
ad6b3693e1
@ -4,12 +4,21 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wredundant-decls"
|
||||
#endif
|
||||
|
||||
#include <jemalloc/jemalloc_defs.h>
|
||||
#include <jemalloc/jemalloc_rename.h>
|
||||
#include <jemalloc/jemalloc_macros.h>
|
||||
#include <jemalloc/jemalloc_protos.h>
|
||||
#include <jemalloc/jemalloc_typedefs.h>
|
||||
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
@ -194,7 +194,7 @@ StoragePtr DatabasePostgreSQL::fetchTable(const String & table_name, ContextPtr,
|
||||
if (cache_tables)
|
||||
cached_tables[table_name] = storage;
|
||||
|
||||
return std::move(storage);
|
||||
return storage;
|
||||
}
|
||||
|
||||
if (table_checked || checkPostgresTable(table_name))
|
||||
@ -414,7 +414,7 @@ ASTPtr DatabasePostgreSQL::getCreateTableQueryImpl(const String & table_name, Co
|
||||
assert(storage_engine_arguments->children.size() >= 2);
|
||||
storage_engine_arguments->children.insert(storage_engine_arguments->children.begin() + 2, std::make_shared<ASTLiteral>(table_id.table_name));
|
||||
|
||||
return std::move(create_table_query);
|
||||
return create_table_query;
|
||||
}
|
||||
|
||||
|
||||
|
@ -5,7 +5,7 @@ namespace DB
|
||||
|
||||
enum class MsgPackExtensionTypes
|
||||
{
|
||||
UUID = 0x02,
|
||||
UUIDType = 0x02,
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -55,9 +55,9 @@ void JSONCompactEachRowRowOutputFormat::writeRowEndDelimiter()
|
||||
void JSONCompactEachRowRowOutputFormat::writeTotals(const Columns & columns, size_t row_num)
|
||||
{
|
||||
writeChar('\n', out);
|
||||
size_t num_columns = columns.size();
|
||||
size_t columns_size = columns.size();
|
||||
writeRowStartDelimiter();
|
||||
for (size_t i = 0; i < num_columns; ++i)
|
||||
for (size_t i = 0; i < columns_size; ++i)
|
||||
{
|
||||
if (i != 0)
|
||||
writeFieldDelimiter();
|
||||
|
@ -154,9 +154,9 @@ void JSONRowOutputFormat::writeBeforeTotals()
|
||||
|
||||
void JSONRowOutputFormat::writeTotals(const Columns & columns, size_t row_num)
|
||||
{
|
||||
size_t num_columns = columns.size();
|
||||
size_t columns_size = columns.size();
|
||||
|
||||
for (size_t i = 0; i < num_columns; ++i)
|
||||
for (size_t i = 0; i < columns_size; ++i)
|
||||
{
|
||||
if (i != 0)
|
||||
writeTotalsFieldDelimiter();
|
||||
|
@ -353,7 +353,7 @@ bool MsgPackVisitor::visit_nil()
|
||||
bool MsgPackVisitor::visit_ext(const char * value, uint32_t size)
|
||||
{
|
||||
int8_t type = *value;
|
||||
if (*value == int8_t(MsgPackExtensionTypes::UUID))
|
||||
if (*value == int8_t(MsgPackExtensionTypes::UUIDType))
|
||||
{
|
||||
insertUUID(info_stack.top().column, info_stack.top().type, value + 1, size - 1);
|
||||
return true;
|
||||
@ -496,11 +496,12 @@ DataTypePtr MsgPackSchemaReader::getDataType(const msgpack::object & object)
|
||||
case msgpack::type::object_type::EXT:
|
||||
{
|
||||
msgpack::object_ext object_ext = object.via.ext;
|
||||
if (object_ext.type() == int8_t(MsgPackExtensionTypes::UUID))
|
||||
if (object_ext.type() == int8_t(MsgPackExtensionTypes::UUIDType))
|
||||
return std::make_shared<DataTypeUUID>();
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Msgpack extension type {%x} is not supported", object_ext.type());
|
||||
}
|
||||
}
|
||||
__builtin_unreachable();
|
||||
}
|
||||
|
||||
DataTypes MsgPackSchemaReader::readRowAndGetDataTypes()
|
||||
|
@ -199,7 +199,7 @@ void MsgPackRowOutputFormat::serializeField(const IColumn & column, DataTypePtr
|
||||
writeBinaryBigEndian(value.toUnderType().items[0], buf);
|
||||
writeBinaryBigEndian(value.toUnderType().items[1], buf);
|
||||
StringRef uuid_ext = buf.stringRef();
|
||||
packer.pack_ext(sizeof(UUID), int8_t(MsgPackExtensionTypes::UUID));
|
||||
packer.pack_ext(sizeof(UUID), int8_t(MsgPackExtensionTypes::UUIDType));
|
||||
packer.pack_ext_body(uuid_ext.data, uuid_ext.size);
|
||||
return;
|
||||
}
|
||||
@ -213,8 +213,8 @@ void MsgPackRowOutputFormat::serializeField(const IColumn & column, DataTypePtr
|
||||
|
||||
void MsgPackRowOutputFormat::write(const Columns & columns, size_t row_num)
|
||||
{
|
||||
size_t num_columns = columns.size();
|
||||
for (size_t i = 0; i < num_columns; ++i)
|
||||
size_t columns_size = columns.size();
|
||||
for (size_t i = 0; i < columns_size; ++i)
|
||||
{
|
||||
serializeField(*columns[i], types[i], row_num);
|
||||
}
|
||||
|
@ -141,7 +141,7 @@ void VerticalRowOutputFormat::writeSpecialRow(const Columns & columns, size_t ro
|
||||
row_number = 0;
|
||||
field_number = 0;
|
||||
|
||||
size_t num_columns = columns.size();
|
||||
size_t columns_size = columns.size();
|
||||
|
||||
writeCString(title, out);
|
||||
writeCString(":\n", out);
|
||||
@ -151,7 +151,7 @@ void VerticalRowOutputFormat::writeSpecialRow(const Columns & columns, size_t ro
|
||||
writeCString("─", out);
|
||||
writeChar('\n', out);
|
||||
|
||||
for (size_t i = 0; i < num_columns; ++i)
|
||||
for (size_t i = 0; i < columns_size; ++i)
|
||||
writeField(*columns[i], *serializations[i], row_num);
|
||||
}
|
||||
|
||||
|
@ -266,7 +266,7 @@ void GroupingAggregatedTransform::addChunk(Chunk chunk, size_t input)
|
||||
last_bucket_number[input] = bucket;
|
||||
}
|
||||
}
|
||||
else if (const auto * in_order_info = typeid_cast<const ChunkInfoWithAllocatedBytes *>(info.get()))
|
||||
else if (typeid_cast<const ChunkInfoWithAllocatedBytes *>(info.get()))
|
||||
{
|
||||
single_level_chunks.emplace_back(std::move(chunk));
|
||||
}
|
||||
@ -334,7 +334,7 @@ void MergingAggregatedBucketTransform::transform(Chunk & chunk)
|
||||
|
||||
blocks_list.emplace_back(std::move(block));
|
||||
}
|
||||
else if (const auto * in_order_info = typeid_cast<const ChunkInfoWithAllocatedBytes *>(cur_info.get()))
|
||||
else if (typeid_cast<const ChunkInfoWithAllocatedBytes *>(cur_info.get()))
|
||||
{
|
||||
Block block = header.cloneWithColumns(cur_chunk.detachColumns());
|
||||
block.info.is_overflows = false;
|
||||
|
@ -49,7 +49,7 @@ void MergingAggregatedTransform::consume(Chunk chunk)
|
||||
|
||||
bucket_to_blocks[agg_info->bucket_num].emplace_back(std::move(block));
|
||||
}
|
||||
else if (const auto * in_order_info = typeid_cast<const ChunkInfoWithAllocatedBytes *>(info.get()))
|
||||
else if (typeid_cast<const ChunkInfoWithAllocatedBytes *>(info.get()))
|
||||
{
|
||||
auto block = getInputPort().getHeader().cloneWithColumns(chunk.getColumns());
|
||||
block.info.is_overflows = false;
|
||||
|
@ -352,7 +352,7 @@ ASTPtr StorageMaterializedPostgreSQL::getColumnDeclaration(const DataTypePtr & d
|
||||
ast_expression->name = "DateTime64";
|
||||
ast_expression->arguments = std::make_shared<ASTExpressionList>();
|
||||
ast_expression->arguments->children.emplace_back(std::make_shared<ASTLiteral>(UInt32(6)));
|
||||
return std::move(ast_expression);
|
||||
return ast_expression;
|
||||
}
|
||||
|
||||
return std::make_shared<ASTIdentifier>(data_type->getName());
|
||||
@ -534,7 +534,7 @@ ASTPtr StorageMaterializedPostgreSQL::getCreateNestedTableQuery(
|
||||
storage_metadata.setConstraints(constraints);
|
||||
setInMemoryMetadata(storage_metadata);
|
||||
|
||||
return std::move(create_table_query);
|
||||
return create_table_query;
|
||||
}
|
||||
|
||||
|
||||
|
@ -7149,9 +7149,9 @@ void StorageReplicatedMergeTree::createTableSharedID()
|
||||
if (!zookeeper->tryGet(zookeeper_table_id_path, id))
|
||||
{
|
||||
UUID table_id_candidate;
|
||||
auto storage_id = getStorageID();
|
||||
if (storage_id.uuid != UUIDHelpers::Nil)
|
||||
table_id_candidate = storage_id.uuid;
|
||||
auto local_storage_id = getStorageID();
|
||||
if (local_storage_id.uuid != UUIDHelpers::Nil)
|
||||
table_id_candidate = local_storage_id.uuid;
|
||||
else
|
||||
table_id_candidate = UUIDHelpers::generateV4();
|
||||
|
||||
|
@ -17,7 +17,7 @@ target_include_directories(
|
||||
${ClickHouse_SOURCE_DIR}/contrib/double-conversion ${ClickHouse_SOURCE_DIR}/contrib/dragonbox/include
|
||||
${ClickHouse_SOURCE_DIR}/contrib/fmtlib/include
|
||||
${ClickHouse_SOURCE_DIR}/contrib/cityhash102/include
|
||||
${RE2_INCLUDE_DIR} ${CMAKE_BINARY_DIR}/contrib/re2_st
|
||||
${RE2_INCLUDE_DIR} ${CMAKE_BINARY_DIR}/contrib/re2-cmake
|
||||
)
|
||||
|
||||
target_compile_definitions(graphite-rollup-bench PRIVATE RULES_DIR="${CMAKE_CURRENT_SOURCE_DIR}")
|
||||
|
Loading…
Reference in New Issue
Block a user