mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-28 10:31:57 +00:00
Merge branch 'master' into async-loader
This commit is contained in:
commit
0b2860d822
@ -53,7 +53,7 @@ float logf(float x)
|
||||
tmp = ix - OFF;
|
||||
i = (tmp >> (23 - LOGF_TABLE_BITS)) % N;
|
||||
k = (int32_t)tmp >> 23; /* arithmetic shift */
|
||||
iz = ix - (tmp & 0x1ff << 23);
|
||||
iz = ix - (tmp & 0xff800000);
|
||||
invc = T[i].invc;
|
||||
logc = T[i].logc;
|
||||
z = (double_t)asfloat(iz);
|
||||
|
@ -21,7 +21,7 @@ set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
|
||||
set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=bfd")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=bfd")
|
||||
|
||||
# Currently, lld does not work with the error:
|
||||
# ld.lld: error: section size decrease is too large
|
||||
|
@ -111,6 +111,8 @@ elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "mips")
|
||||
set(ARCH "generic")
|
||||
elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "ppc64le")
|
||||
set(ARCH "ppc64le")
|
||||
elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "riscv64")
|
||||
set(ARCH "riscv64")
|
||||
else()
|
||||
message(FATAL_ERROR "Unknown processor:" ${CMAKE_SYSTEM_PROCESSOR})
|
||||
endif()
|
||||
|
@ -36,12 +36,10 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# repo versions doesn't work correctly with C++17
|
||||
# also we push reports to s3, so we add index.html to subfolder urls
|
||||
# https://github.com/ClickHouse-Extras/woboq_codebrowser/commit/37e15eaf377b920acb0b48dbe82471be9203f76b
|
||||
RUN git clone https://github.com/ClickHouse/woboq_codebrowser \
|
||||
&& cd woboq_codebrowser \
|
||||
RUN git clone --depth=1 https://github.com/ClickHouse/woboq_codebrowser /woboq_codebrowser \
|
||||
&& cd /woboq_codebrowser \
|
||||
&& cmake . -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_COMPILER=clang\+\+-${LLVM_VERSION} -DCMAKE_C_COMPILER=clang-${LLVM_VERSION} \
|
||||
&& ninja \
|
||||
&& cd .. \
|
||||
&& rm -rf woboq_codebrowser
|
||||
&& ninja
|
||||
|
||||
ENV CODEGEN=/woboq_codebrowser/generator/codebrowser_generator
|
||||
ENV CODEINDEX=/woboq_codebrowser/indexgenerator/codebrowser_indexgenerator
|
||||
|
@ -90,15 +90,17 @@ SELECT * FROM mySecondReplacingMT FINAL;
|
||||
|
||||
### is_deleted
|
||||
|
||||
`is_deleted` — Name of the column with the type of row: `1` is a “deleted“ row, `0` is a “state“ row.
|
||||
`is_deleted` — Name of a column used during a merge to determine whether the data in this row represents the state or is to be deleted; `1` is a “deleted“ row, `0` is a “state“ row.
|
||||
|
||||
Column data type — `Int8`.
|
||||
Column data type — `UInt8`.
|
||||
|
||||
Can only be enabled when `ver` is used.
|
||||
The row is deleted when use the `OPTIMIZE ... FINAL CLEANUP`, or `OPTIMIZE ... FINAL` if the engine settings `clean_deleted_rows` has been set to `Always`.
|
||||
No matter the operation on the data, the version must be increased. If two inserted rows have the same version number, the last inserted one is the one kept.
|
||||
:::note
|
||||
`is_deleted` can only be enabled when `ver` is used.
|
||||
|
||||
The row is deleted when `OPTIMIZE ... FINAL CLEANUP` or `OPTIMIZE ... FINAL` is used, or if the engine setting `clean_deleted_rows` has been set to `Always`.
|
||||
|
||||
No matter the operation on the data, the version must be increased. If two inserted rows have the same version number, the last inserted row is the one kept.
|
||||
:::
|
||||
|
||||
## Query clauses
|
||||
|
||||
|
@ -1410,8 +1410,8 @@ and [enable_writes_to_query_cache](#enable-writes-to-query-cache) control in mor
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 - Yes
|
||||
- 1 - No
|
||||
- 0 - Disabled
|
||||
- 1 - Enabled
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
|
@ -103,7 +103,11 @@ ALTER TABLE table2 [ON CLUSTER cluster] ATTACH PARTITION partition_expr FROM tab
|
||||
```
|
||||
|
||||
This query copies the data partition from `table1` to `table2`.
|
||||
Note that data will be deleted neither from `table1` nor from `table2`.
|
||||
|
||||
Note that:
|
||||
|
||||
- Data will be deleted neither from `table1` nor from `table2`.
|
||||
- `table1` may be a temporary table.
|
||||
|
||||
For the query to run successfully, the following conditions must be met:
|
||||
|
||||
@ -117,7 +121,12 @@ For the query to run successfully, the following conditions must be met:
|
||||
ALTER TABLE table2 [ON CLUSTER cluster] REPLACE PARTITION partition_expr FROM table1
|
||||
```
|
||||
|
||||
This query copies the data partition from the `table1` to `table2` and replaces existing partition in the `table2`. Note that data won’t be deleted from `table1`.
|
||||
This query copies the data partition from the `table1` to `table2` and replaces existing partition in the `table2`.
|
||||
|
||||
Note that:
|
||||
|
||||
- Data won’t be deleted from `table1`.
|
||||
- `table1` may be a temporary table.
|
||||
|
||||
For the query to run successfully, the following conditions must be met:
|
||||
|
||||
|
@ -12,7 +12,7 @@ Compressed files are supported. Compression type is detected by the extension of
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
SELECT <expr_list> INTO OUTFILE file_name [AND STDOUT] [COMPRESSION type [LEVEL level]]
|
||||
SELECT <expr_list> INTO OUTFILE file_name [AND STDOUT] [APPEND] [COMPRESSION type [LEVEL level]]
|
||||
```
|
||||
|
||||
`file_name` and `type` are string literals. Supported compression types are: `'none'`, `'gzip'`, `'deflate'`, `'br'`, `'xz'`, `'zstd'`, `'lz4'`, `'bz2'`.
|
||||
@ -25,6 +25,7 @@ SELECT <expr_list> INTO OUTFILE file_name [AND STDOUT] [COMPRESSION type [LEVEL
|
||||
- The query will fail if a file with the same file name already exists.
|
||||
- The default [output format](../../../interfaces/formats.md) is `TabSeparated` (like in the command-line client batch mode). Use [FORMAT](format.md) clause to change it.
|
||||
- If `AND STDOUT` is mentioned in the query then the output that is written to the file is also displayed on standard output. If used with compression, the plaintext is displayed on standard output.
|
||||
- If `APPEND` is mentioned in the query then the output is appended to an existing file. If compression is used, append cannot be used.
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/dictionary
|
||||
sidebar_position: 54
|
||||
sidebar_label: dictionary function
|
||||
sidebar_label: dictionary
|
||||
title: dictionary
|
||||
---
|
||||
|
||||
|
@ -102,7 +102,11 @@ ALTER TABLE table2 [ON CLUSTER cluster] ATTACH PARTITION partition_expr FROM tab
|
||||
```
|
||||
|
||||
Копирует партицию из таблицы `table1` в таблицу `table2`.
|
||||
Обратите внимание, что данные не удаляются ни из `table1`, ни из `table2`.
|
||||
|
||||
Обратите внимание, что:
|
||||
|
||||
- Данные не удаляются ни из `table1`, ни из `table2`.
|
||||
- `table1` может быть временной таблицей.
|
||||
|
||||
Следует иметь в виду:
|
||||
|
||||
@ -118,7 +122,12 @@ ALTER TABLE table2 [ON CLUSTER cluster] ATTACH PARTITION partition_expr FROM tab
|
||||
ALTER TABLE table2 [ON CLUSTER cluster] REPLACE PARTITION partition_expr FROM table1
|
||||
```
|
||||
|
||||
Копирует партицию из таблицы `table1` в таблицу `table2` с заменой существующих данных в `table2`. Данные из `table1` не удаляются.
|
||||
Копирует партицию из таблицы `table1` в таблицу `table2` с заменой существующих данных в `table2`.
|
||||
|
||||
Обратите внимание, что:
|
||||
|
||||
- Данные из `table1` не удаляются.
|
||||
- `table1` может быть временной таблицей.
|
||||
|
||||
Следует иметь в виду:
|
||||
|
||||
|
@ -862,7 +862,8 @@ bool Client::processWithFuzzing(const String & full_query)
|
||||
const auto * tmp_pos = text_2.c_str();
|
||||
const auto ast_3 = parseQuery(tmp_pos, tmp_pos + text_2.size(),
|
||||
false /* allow_multi_statements */);
|
||||
const auto text_3 = ast_3->formatForErrorMessage();
|
||||
const auto text_3 = ast_3 ? ast_3->formatForErrorMessage() : "";
|
||||
|
||||
if (text_3 != text_2)
|
||||
{
|
||||
fmt::print(stderr, "Found error: The query formatting is broken.\n");
|
||||
@ -877,7 +878,7 @@ bool Client::processWithFuzzing(const String & full_query)
|
||||
fmt::print(stderr, "Text-1 (AST-1 formatted):\n'{}'\n", query_to_execute);
|
||||
fmt::print(stderr, "AST-2 (Text-1 parsed):\n'{}'\n", ast_2->dumpTree());
|
||||
fmt::print(stderr, "Text-2 (AST-2 formatted):\n'{}'\n", text_2);
|
||||
fmt::print(stderr, "AST-3 (Text-2 parsed):\n'{}'\n", ast_3->dumpTree());
|
||||
fmt::print(stderr, "AST-3 (Text-2 parsed):\n'{}'\n", ast_3 ? ast_3->dumpTree() : "");
|
||||
fmt::print(stderr, "Text-3 (AST-3 formatted):\n'{}'\n", text_3);
|
||||
fmt::print(stderr, "Text-3 must be equal to Text-2, but it is not.\n");
|
||||
|
||||
|
@ -162,14 +162,13 @@ private:
|
||||
class PushOrVisitor
|
||||
{
|
||||
public:
|
||||
PushOrVisitor(ContextPtr context, size_t max_atoms_, size_t num_atoms_)
|
||||
PushOrVisitor(ContextPtr context, size_t max_atoms_)
|
||||
: max_atoms(max_atoms_)
|
||||
, num_atoms(num_atoms_)
|
||||
, and_resolver(FunctionFactory::instance().get("and", context))
|
||||
, or_resolver(FunctionFactory::instance().get("or", context))
|
||||
{}
|
||||
|
||||
bool visit(QueryTreeNodePtr & node)
|
||||
bool visit(QueryTreeNodePtr & node, size_t num_atoms)
|
||||
{
|
||||
if (max_atoms && num_atoms > max_atoms)
|
||||
return false;
|
||||
@ -187,7 +186,10 @@ public:
|
||||
{
|
||||
auto & arguments = function_node->getArguments().getNodes();
|
||||
for (auto & argument : arguments)
|
||||
visit(argument);
|
||||
{
|
||||
if (!visit(argument, num_atoms))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (name == "or")
|
||||
@ -217,7 +219,7 @@ public:
|
||||
auto rhs = createFunctionNode(or_resolver, std::move(other_node), std::move(and_function_arguments[1]));
|
||||
node = createFunctionNode(and_resolver, std::move(lhs), std::move(rhs));
|
||||
|
||||
visit(node);
|
||||
return visit(node, num_atoms);
|
||||
}
|
||||
|
||||
return true;
|
||||
@ -225,7 +227,6 @@ public:
|
||||
|
||||
private:
|
||||
size_t max_atoms;
|
||||
size_t num_atoms;
|
||||
|
||||
const FunctionOverloadResolverPtr and_resolver;
|
||||
const FunctionOverloadResolverPtr or_resolver;
|
||||
@ -516,8 +517,8 @@ std::optional<CNF> CNF::tryBuildCNF(const QueryTreeNodePtr & node, ContextPtr co
|
||||
visitor.visit(node_cloned, false);
|
||||
}
|
||||
|
||||
if (PushOrVisitor visitor(context, max_atoms, atom_count);
|
||||
!visitor.visit(node_cloned))
|
||||
if (PushOrVisitor visitor(context, max_atoms);
|
||||
!visitor.visit(node_cloned, atom_count))
|
||||
return std::nullopt;
|
||||
|
||||
CollectGroupsVisitor collect_visitor;
|
||||
|
@ -573,6 +573,13 @@ try
|
||||
CompressionMethod compression_method = chooseCompressionMethod(out_file, compression_method_string);
|
||||
UInt64 compression_level = 3;
|
||||
|
||||
if (query_with_output->is_outfile_append && compression_method != CompressionMethod::None)
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::BAD_ARGUMENTS,
|
||||
"Cannot append to compressed file. Please use uncompressed file or remove APPEND keyword.");
|
||||
}
|
||||
|
||||
if (query_with_output->compression_level)
|
||||
{
|
||||
const auto & compression_level_node = query_with_output->compression_level->as<ASTLiteral &>();
|
||||
@ -587,8 +594,14 @@ try
|
||||
range.second);
|
||||
}
|
||||
|
||||
auto flags = O_WRONLY | O_EXCL;
|
||||
if (query_with_output->is_outfile_append)
|
||||
flags |= O_APPEND;
|
||||
else
|
||||
flags |= O_CREAT;
|
||||
|
||||
out_file_buf = wrapWriteBufferWithCompressionMethod(
|
||||
std::make_unique<WriteBufferFromFile>(out_file, DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_EXCL | O_CREAT),
|
||||
std::make_unique<WriteBufferFromFile>(out_file, DBMS_DEFAULT_BUFFER_SIZE, flags),
|
||||
compression_method,
|
||||
static_cast<int>(compression_level)
|
||||
);
|
||||
|
@ -29,21 +29,14 @@
|
||||
M(13, SIZES_OF_COLUMNS_IN_TUPLE_DOESNT_MATCH) \
|
||||
M(15, DUPLICATE_COLUMN) \
|
||||
M(16, NO_SUCH_COLUMN_IN_TABLE) \
|
||||
M(17, DELIMITER_IN_STRING_LITERAL_DOESNT_MATCH) \
|
||||
M(18, CANNOT_INSERT_ELEMENT_INTO_CONSTANT_COLUMN) \
|
||||
M(19, SIZE_OF_FIXED_STRING_DOESNT_MATCH) \
|
||||
M(20, NUMBER_OF_COLUMNS_DOESNT_MATCH) \
|
||||
M(21, CANNOT_READ_ALL_DATA_FROM_TAB_SEPARATED_INPUT) \
|
||||
M(22, CANNOT_PARSE_ALL_VALUE_FROM_TAB_SEPARATED_INPUT) \
|
||||
M(23, CANNOT_READ_FROM_ISTREAM) \
|
||||
M(24, CANNOT_WRITE_TO_OSTREAM) \
|
||||
M(25, CANNOT_PARSE_ESCAPE_SEQUENCE) \
|
||||
M(26, CANNOT_PARSE_QUOTED_STRING) \
|
||||
M(27, CANNOT_PARSE_INPUT_ASSERTION_FAILED) \
|
||||
M(28, CANNOT_PRINT_FLOAT_OR_DOUBLE_NUMBER) \
|
||||
M(29, CANNOT_PRINT_INTEGER) \
|
||||
M(30, CANNOT_READ_SIZE_OF_COMPRESSED_CHUNK) \
|
||||
M(31, CANNOT_READ_COMPRESSED_CHUNK) \
|
||||
M(32, ATTEMPT_TO_READ_AFTER_EOF) \
|
||||
M(33, CANNOT_READ_ALL_DATA) \
|
||||
M(34, TOO_MANY_ARGUMENTS_FOR_FUNCTION) \
|
||||
@ -57,7 +50,6 @@
|
||||
M(42, NUMBER_OF_ARGUMENTS_DOESNT_MATCH) \
|
||||
M(43, ILLEGAL_TYPE_OF_ARGUMENT) \
|
||||
M(44, ILLEGAL_COLUMN) \
|
||||
M(45, ILLEGAL_NUMBER_OF_RESULT_COLUMNS) \
|
||||
M(46, UNKNOWN_FUNCTION) \
|
||||
M(47, UNKNOWN_IDENTIFIER) \
|
||||
M(48, NOT_IMPLEMENTED) \
|
||||
@ -66,20 +58,14 @@
|
||||
M(51, EMPTY_LIST_OF_COLUMNS_QUERIED) \
|
||||
M(52, COLUMN_QUERIED_MORE_THAN_ONCE) \
|
||||
M(53, TYPE_MISMATCH) \
|
||||
M(54, STORAGE_DOESNT_ALLOW_PARAMETERS) \
|
||||
M(55, STORAGE_REQUIRES_PARAMETER) \
|
||||
M(56, UNKNOWN_STORAGE) \
|
||||
M(57, TABLE_ALREADY_EXISTS) \
|
||||
M(58, TABLE_METADATA_ALREADY_EXISTS) \
|
||||
M(59, ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER) \
|
||||
M(60, UNKNOWN_TABLE) \
|
||||
M(61, ONLY_FILTER_COLUMN_IN_BLOCK) \
|
||||
M(62, SYNTAX_ERROR) \
|
||||
M(63, UNKNOWN_AGGREGATE_FUNCTION) \
|
||||
M(64, CANNOT_READ_AGGREGATE_FUNCTION_FROM_TEXT) \
|
||||
M(65, CANNOT_WRITE_AGGREGATE_FUNCTION_AS_TEXT) \
|
||||
M(66, NOT_A_COLUMN) \
|
||||
M(67, ILLEGAL_KEY_OF_AGGREGATION) \
|
||||
M(68, CANNOT_GET_SIZE_OF_FIELD) \
|
||||
M(69, ARGUMENT_OUT_OF_BOUND) \
|
||||
M(70, CANNOT_CONVERT_TYPE) \
|
||||
@ -109,16 +95,11 @@
|
||||
M(94, CANNOT_MERGE_DIFFERENT_AGGREGATED_DATA_VARIANTS) \
|
||||
M(95, CANNOT_READ_FROM_SOCKET) \
|
||||
M(96, CANNOT_WRITE_TO_SOCKET) \
|
||||
M(97, CANNOT_READ_ALL_DATA_FROM_CHUNKED_INPUT) \
|
||||
M(98, CANNOT_WRITE_TO_EMPTY_BLOCK_OUTPUT_STREAM) \
|
||||
M(99, UNKNOWN_PACKET_FROM_CLIENT) \
|
||||
M(100, UNKNOWN_PACKET_FROM_SERVER) \
|
||||
M(101, UNEXPECTED_PACKET_FROM_CLIENT) \
|
||||
M(102, UNEXPECTED_PACKET_FROM_SERVER) \
|
||||
M(103, RECEIVED_DATA_FOR_WRONG_QUERY_ID) \
|
||||
M(104, TOO_SMALL_BUFFER_SIZE) \
|
||||
M(105, CANNOT_READ_HISTORY) \
|
||||
M(106, CANNOT_APPEND_HISTORY) \
|
||||
M(107, FILE_DOESNT_EXIST) \
|
||||
M(108, NO_DATA_TO_INSERT) \
|
||||
M(109, CANNOT_BLOCK_SIGNAL) \
|
||||
@ -137,7 +118,6 @@
|
||||
M(123, UNKNOWN_TYPE_OF_AST_NODE) \
|
||||
M(124, INCORRECT_ELEMENT_OF_SET) \
|
||||
M(125, INCORRECT_RESULT_OF_SCALAR_SUBQUERY) \
|
||||
M(126, CANNOT_GET_RETURN_TYPE) \
|
||||
M(127, ILLEGAL_INDEX) \
|
||||
M(128, TOO_LARGE_ARRAY_SIZE) \
|
||||
M(129, FUNCTION_IS_SPECIAL) \
|
||||
@ -149,30 +129,17 @@
|
||||
M(137, UNKNOWN_ELEMENT_IN_CONFIG) \
|
||||
M(138, EXCESSIVE_ELEMENT_IN_CONFIG) \
|
||||
M(139, NO_ELEMENTS_IN_CONFIG) \
|
||||
M(140, ALL_REQUESTED_COLUMNS_ARE_MISSING) \
|
||||
M(141, SAMPLING_NOT_SUPPORTED) \
|
||||
M(142, NOT_FOUND_NODE) \
|
||||
M(143, FOUND_MORE_THAN_ONE_NODE) \
|
||||
M(144, FIRST_DATE_IS_BIGGER_THAN_LAST_DATE) \
|
||||
M(145, UNKNOWN_OVERFLOW_MODE) \
|
||||
M(146, QUERY_SECTION_DOESNT_MAKE_SENSE) \
|
||||
M(147, NOT_FOUND_FUNCTION_ELEMENT_FOR_AGGREGATE) \
|
||||
M(148, NOT_FOUND_RELATION_ELEMENT_FOR_CONDITION) \
|
||||
M(149, NOT_FOUND_RHS_ELEMENT_FOR_CONDITION) \
|
||||
M(150, EMPTY_LIST_OF_ATTRIBUTES_PASSED) \
|
||||
M(151, INDEX_OF_COLUMN_IN_SORT_CLAUSE_IS_OUT_OF_RANGE) \
|
||||
M(152, UNKNOWN_DIRECTION_OF_SORTING) \
|
||||
M(153, ILLEGAL_DIVISION) \
|
||||
M(154, AGGREGATE_FUNCTION_NOT_APPLICABLE) \
|
||||
M(155, UNKNOWN_RELATION) \
|
||||
M(156, DICTIONARIES_WAS_NOT_LOADED) \
|
||||
M(157, ILLEGAL_OVERFLOW_MODE) \
|
||||
M(158, TOO_MANY_ROWS) \
|
||||
M(159, TIMEOUT_EXCEEDED) \
|
||||
M(160, TOO_SLOW) \
|
||||
M(161, TOO_MANY_COLUMNS) \
|
||||
M(162, TOO_DEEP_SUBQUERIES) \
|
||||
M(163, TOO_DEEP_PIPELINE) \
|
||||
M(164, READONLY) \
|
||||
M(165, TOO_MANY_TEMPORARY_COLUMNS) \
|
||||
M(166, TOO_MANY_TEMPORARY_NON_CONST_COLUMNS) \
|
||||
@ -183,20 +150,14 @@
|
||||
M(172, CANNOT_CREATE_DIRECTORY) \
|
||||
M(173, CANNOT_ALLOCATE_MEMORY) \
|
||||
M(174, CYCLIC_ALIASES) \
|
||||
M(176, CHUNK_NOT_FOUND) \
|
||||
M(177, DUPLICATE_CHUNK_NAME) \
|
||||
M(178, MULTIPLE_ALIASES_FOR_EXPRESSION) \
|
||||
M(179, MULTIPLE_EXPRESSIONS_FOR_ALIAS) \
|
||||
M(180, THERE_IS_NO_PROFILE) \
|
||||
M(181, ILLEGAL_FINAL) \
|
||||
M(182, ILLEGAL_PREWHERE) \
|
||||
M(183, UNEXPECTED_EXPRESSION) \
|
||||
M(184, ILLEGAL_AGGREGATION) \
|
||||
M(185, UNSUPPORTED_MYISAM_BLOCK_TYPE) \
|
||||
M(186, UNSUPPORTED_COLLATION_LOCALE) \
|
||||
M(187, COLLATION_COMPARISON_FAILED) \
|
||||
M(188, UNKNOWN_ACTION) \
|
||||
M(189, TABLE_MUST_NOT_BE_CREATED_MANUALLY) \
|
||||
M(190, SIZES_OF_ARRAYS_DONT_MATCH) \
|
||||
M(191, SET_SIZE_LIMIT_EXCEEDED) \
|
||||
M(192, UNKNOWN_USER) \
|
||||
@ -204,15 +165,12 @@
|
||||
M(194, REQUIRED_PASSWORD) \
|
||||
M(195, IP_ADDRESS_NOT_ALLOWED) \
|
||||
M(196, UNKNOWN_ADDRESS_PATTERN_TYPE) \
|
||||
M(197, SERVER_REVISION_IS_TOO_OLD) \
|
||||
M(198, DNS_ERROR) \
|
||||
M(199, UNKNOWN_QUOTA) \
|
||||
M(200, QUOTA_DOESNT_ALLOW_KEYS) \
|
||||
M(201, QUOTA_EXCEEDED) \
|
||||
M(202, TOO_MANY_SIMULTANEOUS_QUERIES) \
|
||||
M(203, NO_FREE_CONNECTION) \
|
||||
M(204, CANNOT_FSYNC) \
|
||||
M(205, NESTED_TYPE_TOO_DEEP) \
|
||||
M(206, ALIAS_REQUIRED) \
|
||||
M(207, AMBIGUOUS_IDENTIFIER) \
|
||||
M(208, EMPTY_NESTED_TABLE) \
|
||||
@ -229,7 +187,6 @@
|
||||
M(219, DATABASE_NOT_EMPTY) \
|
||||
M(220, DUPLICATE_INTERSERVER_IO_ENDPOINT) \
|
||||
M(221, NO_SUCH_INTERSERVER_IO_ENDPOINT) \
|
||||
M(222, ADDING_REPLICA_TO_NON_EMPTY_TABLE) \
|
||||
M(223, UNEXPECTED_AST_STRUCTURE) \
|
||||
M(224, REPLICA_IS_ALREADY_ACTIVE) \
|
||||
M(225, NO_ZOOKEEPER) \
|
||||
@ -253,9 +210,7 @@
|
||||
M(243, NOT_ENOUGH_SPACE) \
|
||||
M(244, UNEXPECTED_ZOOKEEPER_ERROR) \
|
||||
M(246, CORRUPTED_DATA) \
|
||||
M(247, INCORRECT_MARK) \
|
||||
M(248, INVALID_PARTITION_VALUE) \
|
||||
M(250, NOT_ENOUGH_BLOCK_NUMBERS) \
|
||||
M(251, NO_SUCH_REPLICA) \
|
||||
M(252, TOO_MANY_PARTS) \
|
||||
M(253, REPLICA_ALREADY_EXISTS) \
|
||||
@ -271,8 +226,6 @@
|
||||
M(264, INCOMPATIBLE_TYPE_OF_JOIN) \
|
||||
M(265, NO_AVAILABLE_REPLICA) \
|
||||
M(266, MISMATCH_REPLICAS_DATA_SOURCES) \
|
||||
M(267, STORAGE_DOESNT_SUPPORT_PARALLEL_REPLICAS) \
|
||||
M(268, CPUID_ERROR) \
|
||||
M(269, INFINITE_LOOP) \
|
||||
M(270, CANNOT_COMPRESS) \
|
||||
M(271, CANNOT_DECOMPRESS) \
|
||||
@ -295,9 +248,7 @@
|
||||
M(290, LIMIT_EXCEEDED) \
|
||||
M(291, DATABASE_ACCESS_DENIED) \
|
||||
M(293, MONGODB_CANNOT_AUTHENTICATE) \
|
||||
M(294, INVALID_BLOCK_EXTRA_INFO) \
|
||||
M(295, RECEIVED_EMPTY_DATA) \
|
||||
M(296, NO_REMOTE_SHARD_FOUND) \
|
||||
M(297, SHARD_HAS_NO_CONNECTIONS) \
|
||||
M(298, CANNOT_PIPE) \
|
||||
M(299, CANNOT_FORK) \
|
||||
@ -311,13 +262,10 @@
|
||||
M(307, TOO_MANY_BYTES) \
|
||||
M(308, UNEXPECTED_NODE_IN_ZOOKEEPER) \
|
||||
M(309, FUNCTION_CANNOT_HAVE_PARAMETERS) \
|
||||
M(317, INVALID_SHARD_WEIGHT) \
|
||||
M(318, INVALID_CONFIG_PARAMETER) \
|
||||
M(319, UNKNOWN_STATUS_OF_INSERT) \
|
||||
M(321, VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE) \
|
||||
M(335, BARRIER_TIMEOUT) \
|
||||
M(336, UNKNOWN_DATABASE_ENGINE) \
|
||||
M(337, DDL_GUARD_IS_ACTIVE) \
|
||||
M(341, UNFINISHED) \
|
||||
M(342, METADATA_MISMATCH) \
|
||||
M(344, SUPPORT_IS_DISABLED) \
|
||||
@ -325,14 +273,10 @@
|
||||
M(346, CANNOT_CONVERT_CHARSET) \
|
||||
M(347, CANNOT_LOAD_CONFIG) \
|
||||
M(349, CANNOT_INSERT_NULL_IN_ORDINARY_COLUMN) \
|
||||
M(350, INCOMPATIBLE_SOURCE_TABLES) \
|
||||
M(351, AMBIGUOUS_TABLE_NAME) \
|
||||
M(352, AMBIGUOUS_COLUMN_NAME) \
|
||||
M(353, INDEX_OF_POSITIONAL_ARGUMENT_IS_OUT_OF_RANGE) \
|
||||
M(354, ZLIB_INFLATE_FAILED) \
|
||||
M(355, ZLIB_DEFLATE_FAILED) \
|
||||
M(356, BAD_LAMBDA) \
|
||||
M(357, RESERVED_IDENTIFIER_NAME) \
|
||||
M(358, INTO_OUTFILE_NOT_ALLOWED) \
|
||||
M(359, TABLE_SIZE_EXCEEDS_MAX_DROP_SIZE_LIMIT) \
|
||||
M(360, CANNOT_CREATE_CHARSET_CONVERTER) \
|
||||
@ -341,7 +285,6 @@
|
||||
M(363, CANNOT_CREATE_IO_BUFFER) \
|
||||
M(364, RECEIVED_ERROR_TOO_MANY_REQUESTS) \
|
||||
M(366, SIZES_OF_NESTED_COLUMNS_ARE_INCONSISTENT) \
|
||||
M(367, TOO_MANY_FETCHES) \
|
||||
M(369, ALL_REPLICAS_ARE_STALE) \
|
||||
M(370, DATA_TYPE_CANNOT_BE_USED_IN_TABLES) \
|
||||
M(371, INCONSISTENT_CLUSTER_DEFINITION) \
|
||||
@ -352,7 +295,6 @@
|
||||
M(376, CANNOT_PARSE_UUID) \
|
||||
M(377, ILLEGAL_SYNTAX_FOR_DATA_TYPE) \
|
||||
M(378, DATA_TYPE_CANNOT_HAVE_ARGUMENTS) \
|
||||
M(379, UNKNOWN_STATUS_OF_DISTRIBUTED_DDL_TASK) \
|
||||
M(380, CANNOT_KILL) \
|
||||
M(381, HTTP_LENGTH_REQUIRED) \
|
||||
M(382, CANNOT_LOAD_CATBOOST_MODEL) \
|
||||
@ -378,11 +320,9 @@
|
||||
M(402, CANNOT_IOSETUP) \
|
||||
M(403, INVALID_JOIN_ON_EXPRESSION) \
|
||||
M(404, BAD_ODBC_CONNECTION_STRING) \
|
||||
M(405, PARTITION_SIZE_EXCEEDS_MAX_DROP_SIZE_LIMIT) \
|
||||
M(406, TOP_AND_LIMIT_TOGETHER) \
|
||||
M(407, DECIMAL_OVERFLOW) \
|
||||
M(408, BAD_REQUEST_PARAMETER) \
|
||||
M(409, EXTERNAL_EXECUTABLE_NOT_FOUND) \
|
||||
M(410, EXTERNAL_SERVER_IS_NOT_RESPONDING) \
|
||||
M(411, PTHREAD_ERROR) \
|
||||
M(412, NETLINK_ERROR) \
|
||||
@ -399,7 +339,6 @@
|
||||
M(424, CANNOT_LINK) \
|
||||
M(425, SYSTEM_ERROR) \
|
||||
M(427, CANNOT_COMPILE_REGEXP) \
|
||||
M(428, UNKNOWN_LOG_LEVEL) \
|
||||
M(429, FAILED_TO_GETPWUID) \
|
||||
M(430, MISMATCHING_USERS_FOR_PROCESS_AND_DATA) \
|
||||
M(431, ILLEGAL_SYNTAX_FOR_CODEC_TYPE) \
|
||||
@ -433,7 +372,6 @@
|
||||
M(459, CANNOT_SET_THREAD_PRIORITY) \
|
||||
M(460, CANNOT_CREATE_TIMER) \
|
||||
M(461, CANNOT_SET_TIMER_PERIOD) \
|
||||
M(462, CANNOT_DELETE_TIMER) \
|
||||
M(463, CANNOT_FCNTL) \
|
||||
M(464, CANNOT_PARSE_ELF) \
|
||||
M(465, CANNOT_PARSE_DWARF) \
|
||||
@ -456,15 +394,12 @@
|
||||
M(482, DICTIONARY_ACCESS_DENIED) \
|
||||
M(483, TOO_MANY_REDIRECTS) \
|
||||
M(484, INTERNAL_REDIS_ERROR) \
|
||||
M(485, SCALAR_ALREADY_EXISTS) \
|
||||
M(487, CANNOT_GET_CREATE_DICTIONARY_QUERY) \
|
||||
M(488, UNKNOWN_DICTIONARY) \
|
||||
M(489, INCORRECT_DICTIONARY_DEFINITION) \
|
||||
M(490, CANNOT_FORMAT_DATETIME) \
|
||||
M(491, UNACCEPTABLE_URL) \
|
||||
M(492, ACCESS_ENTITY_NOT_FOUND) \
|
||||
M(493, ACCESS_ENTITY_ALREADY_EXISTS) \
|
||||
M(494, ACCESS_ENTITY_FOUND_DUPLICATES) \
|
||||
M(495, ACCESS_STORAGE_READONLY) \
|
||||
M(496, QUOTA_REQUIRES_CLIENT_KEY) \
|
||||
M(497, ACCESS_DENIED) \
|
||||
@ -475,8 +410,6 @@
|
||||
M(502, CANNOT_SIGQUEUE) \
|
||||
M(503, AGGREGATE_FUNCTION_THROW) \
|
||||
M(504, FILE_ALREADY_EXISTS) \
|
||||
M(505, CANNOT_DELETE_DIRECTORY) \
|
||||
M(506, UNEXPECTED_ERROR_CODE) \
|
||||
M(507, UNABLE_TO_SKIP_UNUSED_SHARDS) \
|
||||
M(508, UNKNOWN_ACCESS_TYPE) \
|
||||
M(509, INVALID_GRANT) \
|
||||
@ -501,8 +434,6 @@
|
||||
M(530, CANNOT_CONNECT_RABBITMQ) \
|
||||
M(531, CANNOT_FSTAT) \
|
||||
M(532, LDAP_ERROR) \
|
||||
M(533, INCONSISTENT_RESERVATIONS) \
|
||||
M(534, NO_RESERVATIONS_PROVIDED) \
|
||||
M(535, UNKNOWN_RAID_TYPE) \
|
||||
M(536, CANNOT_RESTORE_FROM_FIELD_DUMP) \
|
||||
M(537, ILLEGAL_MYSQL_VARIABLE) \
|
||||
@ -518,8 +449,6 @@
|
||||
M(547, INVALID_RAID_TYPE) \
|
||||
M(548, UNKNOWN_VOLUME) \
|
||||
M(549, DATA_TYPE_CANNOT_BE_USED_IN_KEY) \
|
||||
M(550, CONDITIONAL_TREE_PARENT_NOT_FOUND) \
|
||||
M(551, ILLEGAL_PROJECTION_MANIPULATOR) \
|
||||
M(552, UNRECOGNIZED_ARGUMENTS) \
|
||||
M(553, LZMA_STREAM_ENCODER_FAILED) \
|
||||
M(554, LZMA_STREAM_DECODER_FAILED) \
|
||||
@ -580,8 +509,6 @@
|
||||
M(609, FUNCTION_ALREADY_EXISTS) \
|
||||
M(610, CANNOT_DROP_FUNCTION) \
|
||||
M(611, CANNOT_CREATE_RECURSIVE_FUNCTION) \
|
||||
M(612, OBJECT_ALREADY_STORED_ON_DISK) \
|
||||
M(613, OBJECT_WAS_NOT_STORED_ON_DISK) \
|
||||
M(614, POSTGRESQL_CONNECTION_FAILURE) \
|
||||
M(615, CANNOT_ADVISE) \
|
||||
M(616, UNKNOWN_READ_METHOD) \
|
||||
@ -612,9 +539,7 @@
|
||||
M(641, CANNOT_APPEND_TO_FILE) \
|
||||
M(642, CANNOT_PACK_ARCHIVE) \
|
||||
M(643, CANNOT_UNPACK_ARCHIVE) \
|
||||
M(644, REMOTE_FS_OBJECT_CACHE_ERROR) \
|
||||
M(645, NUMBER_OF_DIMENSIONS_MISMATCHED) \
|
||||
M(646, CANNOT_BACKUP_DATABASE) \
|
||||
M(647, CANNOT_BACKUP_TABLE) \
|
||||
M(648, WRONG_DDL_RENAMING_SETTINGS) \
|
||||
M(649, INVALID_TRANSACTION) \
|
||||
|
@ -726,7 +726,7 @@ static UUID getTableUUIDIfReplicated(const String & metadata, ContextPtr context
|
||||
return create.uuid;
|
||||
}
|
||||
|
||||
void DatabaseReplicated::recoverLostReplica(const ZooKeeperPtr & current_zookeeper, UInt32 our_log_ptr, UInt32 max_log_ptr)
|
||||
void DatabaseReplicated::recoverLostReplica(const ZooKeeperPtr & current_zookeeper, UInt32 our_log_ptr, UInt32 & max_log_ptr)
|
||||
{
|
||||
is_recovering = true;
|
||||
SCOPE_EXIT({ is_recovering = false; });
|
||||
|
@ -102,7 +102,7 @@ private:
|
||||
|
||||
void checkQueryValid(const ASTPtr & query, ContextPtr query_context) const;
|
||||
|
||||
void recoverLostReplica(const ZooKeeperPtr & current_zookeeper, UInt32 our_log_ptr, UInt32 max_log_ptr);
|
||||
void recoverLostReplica(const ZooKeeperPtr & current_zookeeper, UInt32 our_log_ptr, UInt32 & max_log_ptr);
|
||||
std::map<String, String> tryGetConsistentMetadataSnapshot(const ZooKeeperPtr & zookeeper, UInt32 & max_log_ptr);
|
||||
|
||||
ASTPtr parseQueryFromMetadataInZooKeeper(const String & node_name, const String & query);
|
||||
|
@ -580,7 +580,7 @@ private:
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
unalignedStoreLittleEndian<UInt64>(buf + 8, 0x00000000FFFF0000ull | (static_cast<UInt64>(ntohl(in)) << 32));
|
||||
#else
|
||||
unalignedStoreLittleEndian<UInt64>(buf + 8, 0x00000000FFFF0000ull | (static_cast<UInt64>(in)) << 32));
|
||||
unalignedStoreLittleEndian<UInt64>(buf + 8, 0x00000000FFFF0000ull | (static_cast<UInt64>(__builtin_bswap32(in))) << 32));
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
@ -239,11 +239,11 @@ namespace
|
||||
// Multipart upload failed because it wasn't possible to schedule all the tasks.
|
||||
// To avoid execution of already scheduled tasks we abort MultipartUpload.
|
||||
abortMultipartUpload();
|
||||
waitForAllBackGroundTasks();
|
||||
waitForAllBackgroundTasks();
|
||||
throw;
|
||||
}
|
||||
|
||||
waitForAllBackGroundTasks();
|
||||
waitForAllBackgroundTasks();
|
||||
completeMultipartUpload();
|
||||
}
|
||||
|
||||
@ -381,7 +381,7 @@ namespace
|
||||
virtual std::unique_ptr<Aws::AmazonWebServiceRequest> fillUploadPartRequest(size_t part_number, size_t part_offset, size_t part_size) = 0;
|
||||
virtual String processUploadPartRequest(Aws::AmazonWebServiceRequest & request) = 0;
|
||||
|
||||
void waitForAllBackGroundTasks()
|
||||
void waitForAllBackgroundTasks()
|
||||
{
|
||||
if (!schedule)
|
||||
return;
|
||||
|
@ -109,7 +109,7 @@ void WriteBufferFromS3::nextImpl()
|
||||
else
|
||||
processWithDynamicParts();
|
||||
|
||||
waitForReadyBackGroundTasks();
|
||||
waitForReadyBackgroundTasks();
|
||||
}
|
||||
|
||||
void WriteBufferFromS3::processWithStrictParts()
|
||||
@ -225,7 +225,7 @@ void WriteBufferFromS3::finalizeImpl()
|
||||
if (!is_prefinalized)
|
||||
preFinalize();
|
||||
|
||||
waitForAllBackGroundTasks();
|
||||
waitForAllBackgroundTasks();
|
||||
|
||||
if (!multipart_upload_id.empty())
|
||||
completeMultipartUpload();
|
||||
@ -632,7 +632,7 @@ void WriteBufferFromS3::processPutRequest(const PutObjectTask & task)
|
||||
max_retry, key, bucket);
|
||||
}
|
||||
|
||||
void WriteBufferFromS3::waitForReadyBackGroundTasks()
|
||||
void WriteBufferFromS3::waitForReadyBackgroundTasks()
|
||||
{
|
||||
if (schedule)
|
||||
{
|
||||
@ -650,7 +650,7 @@ void WriteBufferFromS3::waitForReadyBackGroundTasks()
|
||||
|
||||
if (exception)
|
||||
{
|
||||
waitForAllBackGroundTasksUnlocked(lock);
|
||||
waitForAllBackgroundTasksUnlocked(lock);
|
||||
std::rethrow_exception(exception);
|
||||
}
|
||||
|
||||
@ -659,16 +659,16 @@ void WriteBufferFromS3::waitForReadyBackGroundTasks()
|
||||
}
|
||||
}
|
||||
|
||||
void WriteBufferFromS3::waitForAllBackGroundTasks()
|
||||
void WriteBufferFromS3::waitForAllBackgroundTasks()
|
||||
{
|
||||
if (schedule)
|
||||
{
|
||||
std::unique_lock lock(bg_tasks_mutex);
|
||||
waitForAllBackGroundTasksUnlocked(lock);
|
||||
waitForAllBackgroundTasksUnlocked(lock);
|
||||
}
|
||||
}
|
||||
|
||||
void WriteBufferFromS3::waitForAllBackGroundTasksUnlocked(std::unique_lock<std::mutex> & bg_tasks_lock)
|
||||
void WriteBufferFromS3::waitForAllBackgroundTasksUnlocked(std::unique_lock<std::mutex> & bg_tasks_lock)
|
||||
{
|
||||
if (schedule)
|
||||
{
|
||||
|
@ -79,9 +79,9 @@ private:
|
||||
void fillPutRequest(S3::PutObjectRequest & req);
|
||||
void processPutRequest(const PutObjectTask & task);
|
||||
|
||||
void waitForReadyBackGroundTasks();
|
||||
void waitForAllBackGroundTasks();
|
||||
void waitForAllBackGroundTasksUnlocked(std::unique_lock<std::mutex> & bg_tasks_lock);
|
||||
void waitForReadyBackgroundTasks();
|
||||
void waitForAllBackgroundTasks();
|
||||
void waitForAllBackgroundTasksUnlocked(std::unique_lock<std::mutex> & bg_tasks_lock);
|
||||
|
||||
const String bucket;
|
||||
const String key;
|
||||
|
@ -533,6 +533,11 @@ bool FileCache::tryReserve(FileSegment & file_segment, size_t size)
|
||||
assertInitialized();
|
||||
auto cache_lock = cache_guard.lock();
|
||||
|
||||
LOG_TEST(
|
||||
log, "Trying to reserve space ({} bytes) for {}:{}, current usage {}/{}",
|
||||
size, file_segment.key(), file_segment.offset(),
|
||||
main_priority->getSize(cache_lock), main_priority->getSizeLimit());
|
||||
|
||||
/// In case of per query cache limit (by default disabled), we add/remove entries from both
|
||||
/// (main_priority and query_priority) priority queues, but iterate entries in order of query_priority,
|
||||
/// while checking the limits in both.
|
||||
@ -545,7 +550,17 @@ bool FileCache::tryReserve(FileSegment & file_segment, size_t size)
|
||||
|
||||
const bool query_limit_exceeded = query_priority->getSize(cache_lock) + size > query_priority->getSizeLimit();
|
||||
if (query_limit_exceeded && !query_context->recacheOnFileCacheQueryLimitExceeded())
|
||||
{
|
||||
LOG_TEST(log, "Query limit exceeded, space reservation failed, "
|
||||
"recache_on_query_limit_exceeded is disabled (while reserving for {}:{})",
|
||||
file_segment.key(), file_segment.offset());
|
||||
return false;
|
||||
}
|
||||
|
||||
LOG_TEST(
|
||||
log, "Using query limit, current usage: {}/{} (while reserving for {}:{})",
|
||||
query_priority->getSize(cache_lock), query_priority->getSizeLimit(),
|
||||
file_segment.key(), file_segment.offset());
|
||||
}
|
||||
|
||||
size_t queue_size = main_priority->getElementsCount(cache_lock);
|
||||
@ -630,6 +645,10 @@ bool FileCache::tryReserve(FileSegment & file_segment, size_t size)
|
||||
|
||||
if (is_query_priority_overflow())
|
||||
return false;
|
||||
|
||||
LOG_TEST(
|
||||
log, "Query limits satisfied (while reserving for {}:{})",
|
||||
file_segment.key(), file_segment.offset());
|
||||
}
|
||||
|
||||
auto is_main_priority_overflow = [&]
|
||||
|
@ -47,7 +47,7 @@ void FileCacheSettings::loadFromConfig(const Poco::Util::AbstractConfiguration &
|
||||
else
|
||||
bypass_cache_threashold = FILECACHE_BYPASS_THRESHOLD;
|
||||
|
||||
do_not_evict_index_and_mark_files = config.getUInt64(config_prefix + ".do_not_evict_index_and_mark_files", false);
|
||||
do_not_evict_index_and_mark_files = config.getUInt64(config_prefix + ".do_not_evict_index_and_mark_files", true);
|
||||
|
||||
delayed_cleanup_interval_ms = config.getUInt64(config_prefix + ".delayed_cleanup_interval_ms", FILECACHE_DELAYED_CLEANUP_INTERVAL_MS);
|
||||
}
|
||||
|
@ -753,7 +753,7 @@ bool FileSegment::assertCorrectnessUnlocked(const FileSegmentGuard::Lock &) cons
|
||||
}
|
||||
else
|
||||
{
|
||||
if (download_state == State::DOWNLOADED)
|
||||
if (download_state == State::DOWNLOADING)
|
||||
{
|
||||
chassert(!downloader_id.empty());
|
||||
}
|
||||
|
@ -32,7 +32,8 @@ IFileCachePriority::Iterator LRUFileCachePriority::add(
|
||||
if (entry.size != 0 && entry.key == key && entry.offset == offset)
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Attempt to add duplicate queue entry to queue. (Key: {}, offset: {}, size: {})",
|
||||
"Attempt to add duplicate queue entry to queue. "
|
||||
"(Key: {}, offset: {}, size: {})",
|
||||
entry.key, entry.offset, entry.size);
|
||||
}
|
||||
#endif
|
||||
@ -46,14 +47,15 @@ IFileCachePriority::Iterator LRUFileCachePriority::add(
|
||||
key, offset, size, current_size, getSizeLimit());
|
||||
}
|
||||
|
||||
current_size += size;
|
||||
|
||||
auto iter = queue.insert(queue.end(), Entry(key, offset, size, key_metadata));
|
||||
current_size += size;
|
||||
|
||||
CurrentMetrics::add(CurrentMetrics::FilesystemCacheSize, size);
|
||||
CurrentMetrics::add(CurrentMetrics::FilesystemCacheElements);
|
||||
|
||||
LOG_TEST(log, "Added entry into LRU queue, key: {}, offset: {}, size: {}", key, offset, size);
|
||||
LOG_TEST(
|
||||
log, "Added entry into LRU queue, key: {}, offset: {}, size: {}",
|
||||
key, offset, size);
|
||||
|
||||
return std::make_shared<LRUFileCacheIterator>(this, iter);
|
||||
}
|
||||
@ -81,13 +83,18 @@ LRUFileCachePriority::LRUQueueIterator LRUFileCachePriority::remove(LRUQueueIter
|
||||
CurrentMetrics::sub(CurrentMetrics::FilesystemCacheSize, it->size);
|
||||
CurrentMetrics::sub(CurrentMetrics::FilesystemCacheElements);
|
||||
|
||||
LOG_TEST(log, "Removed entry from LRU queue, key: {}, offset: {}", it->key, it->offset);
|
||||
LOG_TEST(
|
||||
log, "Removed entry from LRU queue, key: {}, offset: {}, size: {}",
|
||||
it->key, it->offset, it->size);
|
||||
|
||||
return queue.erase(it);
|
||||
}
|
||||
|
||||
LRUFileCachePriority::LRUFileCacheIterator::LRUFileCacheIterator(
|
||||
LRUFileCachePriority * cache_priority_, LRUFileCachePriority::LRUQueueIterator queue_iter_)
|
||||
: cache_priority(cache_priority_), queue_iter(queue_iter_)
|
||||
LRUFileCachePriority * cache_priority_,
|
||||
LRUFileCachePriority::LRUQueueIterator queue_iter_)
|
||||
: cache_priority(cache_priority_)
|
||||
, queue_iter(queue_iter_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -113,7 +120,8 @@ void LRUFileCachePriority::iterate(IterateFunc && func, const CacheGuard::Lock &
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Mismatch of file segment size in file segment metadata and priority queue: {} != {} ({})",
|
||||
"Mismatch of file segment size in file segment metadata "
|
||||
"and priority queue: {} != {} ({})",
|
||||
it->size, metadata->size(), metadata->file_segment->getInfoForLog());
|
||||
}
|
||||
|
||||
@ -138,28 +146,26 @@ void LRUFileCachePriority::iterate(IterateFunc && func, const CacheGuard::Lock &
|
||||
}
|
||||
}
|
||||
|
||||
LRUFileCachePriority::Iterator LRUFileCachePriority::LRUFileCacheIterator::remove(const CacheGuard::Lock &)
|
||||
LRUFileCachePriority::Iterator
|
||||
LRUFileCachePriority::LRUFileCacheIterator::remove(const CacheGuard::Lock &)
|
||||
{
|
||||
return std::make_shared<LRUFileCacheIterator>(cache_priority, cache_priority->remove(queue_iter));
|
||||
return std::make_shared<LRUFileCacheIterator>(
|
||||
cache_priority, cache_priority->remove(queue_iter));
|
||||
}
|
||||
|
||||
void LRUFileCachePriority::LRUFileCacheIterator::annul()
|
||||
{
|
||||
cache_priority->current_size -= queue_iter->size;
|
||||
queue_iter->size = 0;
|
||||
updateSize(-queue_iter->size);
|
||||
chassert(queue_iter->size == 0);
|
||||
}
|
||||
|
||||
void LRUFileCachePriority::LRUFileCacheIterator::updateSize(int64_t size)
|
||||
{
|
||||
cache_priority->current_size += size;
|
||||
|
||||
if (size > 0)
|
||||
CurrentMetrics::add(CurrentMetrics::FilesystemCacheSize, size);
|
||||
else
|
||||
CurrentMetrics::sub(CurrentMetrics::FilesystemCacheSize, size);
|
||||
|
||||
queue_iter->size += size;
|
||||
|
||||
CurrentMetrics::add(CurrentMetrics::FilesystemCacheSize, size);
|
||||
|
||||
chassert(cache_priority->current_size >= 0);
|
||||
chassert(queue_iter->size >= 0);
|
||||
}
|
||||
|
@ -336,13 +336,18 @@ void LockedKey::removeAllReleasable()
|
||||
|
||||
KeyMetadata::iterator LockedKey::removeFileSegment(size_t offset, const FileSegmentGuard::Lock & segment_lock)
|
||||
{
|
||||
LOG_DEBUG(log, "Remove from cache. Key: {}, offset: {}", getKey(), offset);
|
||||
|
||||
auto it = key_metadata->find(offset);
|
||||
if (it == key_metadata->end())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no offset {}", offset);
|
||||
|
||||
auto file_segment = it->second->file_segment;
|
||||
|
||||
LOG_DEBUG(
|
||||
log, "Remove from cache. Key: {}, offset: {}, size: {}",
|
||||
getKey(), offset, file_segment->reserved_size);
|
||||
|
||||
chassert(file_segment->assertCorrectnessUnlocked(segment_lock));
|
||||
|
||||
if (file_segment->queue_iterator)
|
||||
file_segment->queue_iterator->annul();
|
||||
|
||||
|
@ -669,13 +669,15 @@ SharedContextHolder Context::createShared()
|
||||
|
||||
ContextMutablePtr Context::createCopy(const ContextPtr & other)
|
||||
{
|
||||
auto lock = other->getLock();
|
||||
return std::shared_ptr<Context>(new Context(*other));
|
||||
}
|
||||
|
||||
ContextMutablePtr Context::createCopy(const ContextWeakPtr & other)
|
||||
{
|
||||
auto ptr = other.lock();
|
||||
if (!ptr) throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't copy an expired context");
|
||||
if (!ptr)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't copy an expired context");
|
||||
return createCopy(ptr);
|
||||
}
|
||||
|
||||
|
49
src/Interpreters/tests/gtest_context_race.cpp
Normal file
49
src/Interpreters/tests/gtest_context_race.cpp
Normal file
@ -0,0 +1,49 @@
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Common/tests/gtest_global_context.h>
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
using namespace DB;
|
||||
|
||||
template <typename Ptr>
|
||||
void run(Ptr context)
|
||||
{
|
||||
for (size_t i = 0; i < 100; ++i)
|
||||
{
|
||||
std::thread t1([context]
|
||||
{
|
||||
if constexpr (std::is_same_v<ContextWeakPtr, Ptr>)
|
||||
context.lock()->getAsyncReadCounters();
|
||||
else
|
||||
context->getAsyncReadCounters();
|
||||
});
|
||||
|
||||
std::thread t2([context]
|
||||
{
|
||||
Context::createCopy(context);
|
||||
});
|
||||
|
||||
t1.join();
|
||||
t2.join();
|
||||
}
|
||||
}
|
||||
|
||||
TEST(Context, MutableRace)
|
||||
{
|
||||
auto context = Context::createCopy(getContext().context);
|
||||
context->makeQueryContext();
|
||||
run<ContextMutablePtr>(context);
|
||||
}
|
||||
|
||||
TEST(Context, ConstRace)
|
||||
{
|
||||
auto context = Context::createCopy(getContext().context);
|
||||
context->makeQueryContext();
|
||||
run<ContextPtr>(context);
|
||||
}
|
||||
|
||||
TEST(Context, WeakRace)
|
||||
{
|
||||
auto context = Context::createCopy(getContext().context);
|
||||
context->makeQueryContext();
|
||||
run<ContextWeakPtr>(context);
|
||||
}
|
@ -16,6 +16,7 @@ class ASTQueryWithOutput : public IAST
|
||||
public:
|
||||
ASTPtr out_file;
|
||||
bool is_into_outfile_with_stdout;
|
||||
bool is_outfile_append;
|
||||
ASTPtr format;
|
||||
ASTPtr settings_ast;
|
||||
ASTPtr compression;
|
||||
|
@ -103,6 +103,12 @@ bool ParserQueryWithOutput::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
|
||||
if (!out_file_p.parse(pos, query_with_output.out_file, expected))
|
||||
return false;
|
||||
|
||||
ParserKeyword s_append("APPEND");
|
||||
if (s_append.ignore(pos, expected))
|
||||
{
|
||||
query_with_output.is_outfile_append = true;
|
||||
}
|
||||
|
||||
ParserKeyword s_stdout("AND STDOUT");
|
||||
if (s_stdout.ignore(pos, expected))
|
||||
{
|
||||
|
@ -4985,8 +4985,8 @@ Pipe MergeTreeData::alterPartition(
|
||||
if (command.replace)
|
||||
checkPartitionCanBeDropped(command.partition, query_context);
|
||||
|
||||
String from_database = query_context->resolveDatabase(command.from_database);
|
||||
auto from_storage = DatabaseCatalog::instance().getTable({from_database, command.from_table}, query_context);
|
||||
auto resolved = query_context->resolveStorageID({command.from_database, command.from_table});
|
||||
auto from_storage = DatabaseCatalog::instance().getTable(resolved, query_context);
|
||||
|
||||
auto * from_storage_merge_tree = dynamic_cast<MergeTreeData *>(from_storage.get());
|
||||
if (!from_storage_merge_tree)
|
||||
|
@ -124,7 +124,6 @@
|
||||
02324_map_combinator_bug
|
||||
02241_join_rocksdb_bs
|
||||
02003_WithMergeableStateAfterAggregationAndLimit_LIMIT_BY_LIMIT_OFFSET
|
||||
01626_cnf_fuzz_long
|
||||
01115_join_with_dictionary
|
||||
01009_global_array_join_names
|
||||
00917_multiple_joins_denny_crane
|
||||
|
@ -2244,7 +2244,7 @@ def main(args):
|
||||
"\nFound hung queries in processlist:", args, "red", attrs=["bold"]
|
||||
)
|
||||
)
|
||||
print(processlist)
|
||||
print(processlist.decode())
|
||||
print(get_transactions_list(args))
|
||||
|
||||
print_stacktraces()
|
||||
|
@ -0,0 +1,2 @@
|
||||
Hello, World! From client.
|
||||
Hello, World! From local.
|
15
tests/queries/0_stateless/02001_append_output_file.sh
Executable file
15
tests/queries/0_stateless/02001_append_output_file.sh
Executable file
@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
set -e
|
||||
|
||||
[ -e "${CLICKHOUSE_TMP}"/test_append_to_output_file ] && rm "${CLICKHOUSE_TMP}"/test_append_to_output_file
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "SELECT * FROM (SELECT 'Hello, World! From client.') INTO OUTFILE '${CLICKHOUSE_TMP}/test_append_to_output_file'"
|
||||
${CLICKHOUSE_LOCAL} --query "SELECT * FROM (SELECT 'Hello, World! From local.') INTO OUTFILE '${CLICKHOUSE_TMP}/test_append_to_output_file' APPEND"
|
||||
cat ${CLICKHOUSE_TMP}/test_append_to_output_file
|
||||
|
||||
rm -f "${CLICKHOUSE_TMP}/test_append_to_output_file"
|
@ -62,8 +62,8 @@ def test_ks_all_alternatives(rvs1, rvs2):
|
||||
|
||||
|
||||
def test_kolmogorov_smirnov():
|
||||
rvs1 = np.round(stats.norm.rvs(loc=1, scale=5, size=10), 2)
|
||||
rvs2 = np.round(stats.norm.rvs(loc=1.5, scale=5, size=20), 2)
|
||||
rvs1 = np.round(stats.norm.rvs(loc=1, scale=5, size=100), 2)
|
||||
rvs2 = np.round(stats.norm.rvs(loc=1.5, scale=5, size=200), 2)
|
||||
test_ks_all_alternatives(rvs1, rvs2)
|
||||
|
||||
rvs1 = np.round(stats.norm.rvs(loc=13, scale=1, size=100), 2)
|
||||
|
@ -0,0 +1,4 @@
|
||||
8
|
||||
8
|
||||
2
|
||||
2
|
27
tests/queries/0_stateless/02725_cnf_large_check.sql
Normal file
27
tests/queries/0_stateless/02725_cnf_large_check.sql
Normal file
@ -0,0 +1,27 @@
|
||||
DROP TABLE IF EXISTS 02725_cnf;
|
||||
|
||||
CREATE TABLE 02725_cnf (c0 UInt8, c1 UInt8, c2 UInt8, c3 UInt8, c4 UInt8, c5 UInt8, c6 UInt8, c7 UInt8, c8 UInt8, c9 UInt8) ENGINE = Memory;
|
||||
|
||||
INSERT INTO 02725_cnf VALUES (0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 1), (0, 0, 0, 0, 0, 0, 0, 0, 1, 0), (0, 0, 0, 0, 0, 0, 0, 0, 1, 1), (0, 0, 0, 0, 0, 0, 0, 1, 0, 0), (0, 0, 0, 0, 0, 0, 0, 1, 0, 1), (0, 0, 0, 0, 0, 0, 0, 1, 1, 0), (0, 0, 0, 0, 0, 0, 0, 1, 1, 1);
|
||||
|
||||
SELECT count()
|
||||
FROM 02725_cnf
|
||||
WHERE (c5 AND (NOT c0)) OR ((NOT c3) AND (NOT c6) AND (NOT c1) AND (NOT c6)) OR (c7 AND (NOT c3) AND (NOT c5) AND (NOT c7)) OR ((NOT c8) AND c5) OR ((NOT c0)) OR ((NOT c8) AND (NOT c5) AND c1 AND c6 AND c3) OR (c7 AND (NOT c0) AND c6 AND c1 AND (NOT c2)) OR (c3 AND (NOT c9) AND c1)
|
||||
SETTINGS convert_query_to_cnf = 1, allow_experimental_analyzer = 1;
|
||||
|
||||
SELECT count()
|
||||
FROM 02725_cnf
|
||||
WHERE (c5 AND (NOT c0)) OR ((NOT c3) AND (NOT c6) AND (NOT c1) AND (NOT c6)) OR (c7 AND (NOT c3) AND (NOT c5) AND (NOT c7)) OR ((NOT c8) AND c5) OR ((NOT c0)) OR ((NOT c8) AND (NOT c5) AND c1 AND c6 AND c3) OR (c7 AND (NOT c0) AND c6 AND c1 AND (NOT c2)) OR (c3 AND (NOT c9) AND c1)
|
||||
SETTINGS convert_query_to_cnf = 1, allow_experimental_analyzer = 0;
|
||||
|
||||
SELECT count()
|
||||
FROM 02725_cnf
|
||||
WHERE ((NOT c2) AND c2 AND (NOT c1)) OR ((NOT c2) AND c3 AND (NOT c5)) OR ((NOT c7) AND (NOT c8)) OR (c9 AND c6 AND c8 AND (NOT c8) AND (NOT c7))
|
||||
SETTINGS convert_query_to_cnf = 1, allow_experimental_analyzer = 1;
|
||||
|
||||
SELECT count()
|
||||
FROM 02725_cnf
|
||||
WHERE ((NOT c2) AND c2 AND (NOT c1)) OR ((NOT c2) AND c3 AND (NOT c5)) OR ((NOT c7) AND (NOT c8)) OR (c9 AND c6 AND c8 AND (NOT c8) AND (NOT c7))
|
||||
SETTINGS convert_query_to_cnf = 1, allow_experimental_analyzer = 0;
|
||||
|
||||
DROP TABLE 02725_cnf;
|
@ -0,0 +1,9 @@
|
||||
Initial
|
||||
6 12
|
||||
6 12
|
||||
REPLACE simple
|
||||
6 10
|
||||
6 10
|
||||
ATTACH FROM
|
||||
6 10
|
||||
6 10
|
@ -0,0 +1,48 @@
|
||||
-- Tags: no-replicated-database
|
||||
|
||||
DROP TEMPORARY TABLE IF EXISTS src;
|
||||
DROP TABLE IF EXISTS dst;
|
||||
DROP TABLE IF EXISTS rdst;
|
||||
|
||||
CREATE TEMPORARY TABLE src (p UInt64, k String, d UInt64) ENGINE = MergeTree PARTITION BY p ORDER BY k;
|
||||
CREATE TABLE dst (p UInt64, k String, d UInt64) ENGINE = MergeTree PARTITION BY p ORDER BY k;
|
||||
CREATE TABLE rdst (p UInt64, k String, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_alter_attach_00626_rdst', 'r1') PARTITION BY p ORDER BY k;
|
||||
|
||||
SELECT 'Initial';
|
||||
INSERT INTO src VALUES (0, '0', 1);
|
||||
INSERT INTO src VALUES (1, '0', 1);
|
||||
INSERT INTO src VALUES (1, '1', 1);
|
||||
INSERT INTO src VALUES (2, '0', 1);
|
||||
INSERT INTO src VALUES (3, '0', 1);
|
||||
INSERT INTO src VALUES (3, '1', 1);
|
||||
|
||||
INSERT INTO dst VALUES (0, '1', 2);
|
||||
INSERT INTO dst VALUES (1, '1', 2), (1, '2', 2);
|
||||
INSERT INTO dst VALUES (2, '1', 2);
|
||||
INSERT INTO dst VALUES (3, '1', 2), (3, '2', 2);
|
||||
|
||||
INSERT INTO rdst VALUES (0, '1', 2);
|
||||
INSERT INTO rdst VALUES (1, '1', 2), (1, '2', 2);
|
||||
INSERT INTO rdst VALUES (2, '1', 2);
|
||||
INSERT INTO rdst VALUES (3, '1', 2), (3, '2', 2);
|
||||
|
||||
SELECT count(), sum(d) FROM dst;
|
||||
SELECT count(), sum(d) FROM rdst;
|
||||
|
||||
SELECT 'REPLACE simple';
|
||||
ALTER TABLE dst REPLACE PARTITION 1 FROM src;
|
||||
SELECT count(), sum(d) FROM dst;
|
||||
ALTER TABLE rdst REPLACE PARTITION 3 FROM src;
|
||||
SELECT count(), sum(d) FROM rdst;
|
||||
|
||||
SELECT 'ATTACH FROM';
|
||||
ALTER TABLE dst DROP PARTITION 1;
|
||||
ALTER TABLE dst ATTACH PARTITION 1 FROM src;
|
||||
SELECT count(), sum(d) FROM dst;
|
||||
ALTER TABLE rdst DROP PARTITION 3;
|
||||
ALTER TABLE rdst ATTACH PARTITION 1 FROM src;
|
||||
SELECT count(), sum(d) FROM rdst;
|
||||
|
||||
DROP TEMPORARY TABLE IF EXISTS src;
|
||||
DROP TABLE IF EXISTS dst;
|
||||
DROP TABLE IF EXISTS rdst;
|
Loading…
Reference in New Issue
Block a user