mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-23 16:12:01 +00:00
Merge branch 'master' into memory-tracking-overloads
This commit is contained in:
commit
4fb66013fe
16
.github/workflows/pull_request.yml
vendored
16
.github/workflows/pull_request.yml
vendored
@ -153,13 +153,19 @@ jobs:
|
||||
EOF
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Fast Test
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE"
|
||||
mkdir "$GITHUB_WORKSPACE"
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.TEMP_PATH }}
|
||||
- name: Fast Test
|
||||
run: |
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 fast_test_check.py
|
||||
- name: Cleanup
|
||||
|
@ -115,6 +115,7 @@ function start_server
|
||||
|
||||
function clone_root
|
||||
{
|
||||
git config --global --add safe.directory "$FASTTEST_SOURCE"
|
||||
git clone --depth 1 https://github.com/ClickHouse/ClickHouse.git -- "$FASTTEST_SOURCE" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/clone_log.txt"
|
||||
|
||||
(
|
||||
|
@ -329,8 +329,8 @@ then
|
||||
-e "Code: 1000, e.code() = 111, Connection refused" \
|
||||
-e "UNFINISHED" \
|
||||
-e "Renaming unexpected part" \
|
||||
/var/log/clickhouse-server/clickhouse-server.backward.*.log | zgrep -Fa "<Error>" > /test_output/bc_check_error_messages.txt \
|
||||
&& echo -e 'Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)\tOK' >> /test_output/test_results.tsv \
|
||||
/var/log/clickhouse-server/clickhouse-server.backward.clean.log | zgrep -Fa "<Error>" > /test_output/bc_check_error_messages.txt \
|
||||
&& echo -e 'Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No Error messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# Remove file bc_check_error_messages.txt if it's empty
|
||||
@ -346,7 +346,7 @@ then
|
||||
|
||||
# OOM
|
||||
zgrep -Fa " <Fatal> Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /dev/null \
|
||||
&& echo -e 'Backward compatibility check: OOM killer (or signal 9) in clickhouse-server.log\tOK' >> /test_output/test_results.tsv \
|
||||
&& echo -e 'Backward compatibility check: OOM killer (or signal 9) in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No OOM messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# Logical errors
|
||||
@ -366,7 +366,7 @@ then
|
||||
# It also checks for crash without stacktrace (printed by watchdog)
|
||||
echo "Check for Fatal message in server log:"
|
||||
zgrep -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server.backward.*.log > /test_output/bc_check_fatal_messages.txt \
|
||||
&& echo -e 'Backward compatibility check: Fatal message in clickhouse-server.log (see bc_check_fatal_messages.txt)\tOK' >> /test_output/test_results.tsv \
|
||||
&& echo -e 'Backward compatibility check: Fatal message in clickhouse-server.log (see bc_check_fatal_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No fatal messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# Remove file bc_check_fatal_messages.txt if it's empty
|
||||
|
@ -47,7 +47,7 @@ Optional parameters:
|
||||
|
||||
- `kafka_row_delimiter` — Delimiter character, which ends the message.
|
||||
- `kafka_schema` — Parameter that must be used if the format requires a schema definition. For example, [Cap’n Proto](https://capnproto.org/) requires the path to the schema file and the name of the root `schema.capnp:Message` object.
|
||||
- `kafka_num_consumers` — The number of consumers per table. Default: `1`. Specify more consumers if the throughput of one consumer is insufficient. The total number of consumers should not exceed the number of partitions in the topic, since only one consumer can be assigned per partition.
|
||||
- `kafka_num_consumers` — The number of consumers per table. Default: `1`. Specify more consumers if the throughput of one consumer is insufficient. The total number of consumers should not exceed the number of partitions in the topic, since only one consumer can be assigned per partition, and must not be greater than the number of physical cores on the server where ClickHouse is deployed.
|
||||
- `kafka_max_block_size` — The maximum batch size (in messages) for poll (default: `max_block_size`).
|
||||
- `kafka_skip_broken_messages` — Kafka message parser tolerance to schema-incompatible messages per block. Default: `0`. If `kafka_skip_broken_messages = N` then the engine skips *N* Kafka messages that cannot be parsed (a message equals a row of data).
|
||||
- `kafka_commit_every_batch` — Commit every consumed and handled batch instead of a single commit after writing a whole block (default: `0`).
|
||||
|
@ -137,14 +137,14 @@ static void incrementProfileEventsBlock(Block & dst, const Block & src)
|
||||
|
||||
auto & dst_column_host_name = typeid_cast<ColumnString &>(*mutable_columns[name_pos["host_name"]]);
|
||||
auto & dst_array_current_time = typeid_cast<ColumnUInt32 &>(*mutable_columns[name_pos["current_time"]]).getData();
|
||||
auto & dst_array_thread_id = typeid_cast<ColumnUInt64 &>(*mutable_columns[name_pos["thread_id"]]).getData();
|
||||
// auto & dst_array_thread_id = typeid_cast<ColumnUInt64 &>(*mutable_columns[name_pos["thread_id"]]).getData();
|
||||
auto & dst_array_type = typeid_cast<ColumnInt8 &>(*mutable_columns[name_pos["type"]]).getData();
|
||||
auto & dst_column_name = typeid_cast<ColumnString &>(*mutable_columns[name_pos["name"]]);
|
||||
auto & dst_array_value = typeid_cast<ColumnInt64 &>(*mutable_columns[name_pos["value"]]).getData();
|
||||
|
||||
const auto & src_column_host_name = typeid_cast<const ColumnString &>(*src.getByName("host_name").column);
|
||||
const auto & src_array_current_time = typeid_cast<const ColumnUInt32 &>(*src.getByName("current_time").column).getData();
|
||||
// const auto & src_array_thread_id = typeid_cast<const ColumnUInt64 &>(*src.getByName("thread_id").column).getData();
|
||||
const auto & src_array_thread_id = typeid_cast<const ColumnUInt64 &>(*src.getByName("thread_id").column).getData();
|
||||
const auto & src_column_name = typeid_cast<const ColumnString &>(*src.getByName("name").column);
|
||||
const auto & src_array_value = typeid_cast<const ColumnInt64 &>(*src.getByName("value").column).getData();
|
||||
|
||||
@ -169,6 +169,16 @@ static void incrementProfileEventsBlock(Block & dst, const Block & src)
|
||||
rows_by_name[id] = src_row;
|
||||
}
|
||||
|
||||
/// Filter out snapshots
|
||||
std::set<size_t> thread_id_filter_mask;
|
||||
for (size_t i = 0; i < src_array_thread_id.size(); ++i)
|
||||
{
|
||||
if (src_array_thread_id[i] != 0)
|
||||
{
|
||||
thread_id_filter_mask.emplace(i);
|
||||
}
|
||||
}
|
||||
|
||||
/// Merge src into dst.
|
||||
for (size_t dst_row = 0; dst_row < dst_rows; ++dst_row)
|
||||
{
|
||||
@ -180,6 +190,11 @@ static void incrementProfileEventsBlock(Block & dst, const Block & src)
|
||||
if (auto it = rows_by_name.find(id); it != rows_by_name.end())
|
||||
{
|
||||
size_t src_row = it->second;
|
||||
if (thread_id_filter_mask.contains(src_row))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
dst_array_current_time[dst_row] = src_array_current_time[src_row];
|
||||
|
||||
switch (dst_array_type[dst_row])
|
||||
@ -199,24 +214,18 @@ static void incrementProfileEventsBlock(Block & dst, const Block & src)
|
||||
/// Copy rows from src that dst does not contains.
|
||||
for (const auto & [id, pos] : rows_by_name)
|
||||
{
|
||||
if (thread_id_filter_mask.contains(pos))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
for (size_t col = 0; col < src.columns(); ++col)
|
||||
{
|
||||
mutable_columns[col]->insert((*src.getByPosition(col).column)[pos]);
|
||||
}
|
||||
}
|
||||
|
||||
/// Filter out snapshots
|
||||
std::set<size_t> thread_id_filter_mask;
|
||||
for (size_t i = 0; i < dst_array_thread_id.size(); ++i)
|
||||
{
|
||||
if (dst_array_thread_id[i] != 0)
|
||||
{
|
||||
thread_id_filter_mask.emplace(i);
|
||||
}
|
||||
}
|
||||
|
||||
dst.setColumns(std::move(mutable_columns));
|
||||
dst.erase(thread_id_filter_mask);
|
||||
}
|
||||
|
||||
|
||||
@ -394,8 +403,16 @@ void ClientBase::onData(Block & block, ASTPtr parsed_query)
|
||||
if (need_render_progress && (stdout_is_a_tty || is_interactive) && !select_into_file)
|
||||
progress_indication.clearProgressOutput();
|
||||
|
||||
output_format->write(materializeBlock(block));
|
||||
written_first_block = true;
|
||||
try
|
||||
{
|
||||
output_format->write(materializeBlock(block));
|
||||
written_first_block = true;
|
||||
}
|
||||
catch (const Exception &)
|
||||
{
|
||||
/// Catch client errors like NO_ROW_DELIMITER
|
||||
throw LocalFormatError(getCurrentExceptionMessage(print_stack_trace), getCurrentExceptionCode());
|
||||
}
|
||||
|
||||
/// Received data block is immediately displayed to the user.
|
||||
output_format->flush();
|
||||
@ -1413,6 +1430,8 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin
|
||||
progress_indication.clearProgressOutput();
|
||||
logs_out_stream->writeProfileEvents(profile_events.last_block);
|
||||
logs_out_stream->flush();
|
||||
|
||||
profile_events.last_block = {};
|
||||
}
|
||||
|
||||
if (is_interactive)
|
||||
@ -1838,7 +1857,7 @@ void ClientBase::runInteractive()
|
||||
}
|
||||
|
||||
LineReader::Patterns query_extenders = {"\\"};
|
||||
LineReader::Patterns query_delimiters = {";", "\\G"};
|
||||
LineReader::Patterns query_delimiters = {";", "\\G", "\\G;"};
|
||||
|
||||
#if USE_REPLXX
|
||||
replxx::Replxx::highlighter_callback_t highlight_callback{};
|
||||
@ -1860,9 +1879,13 @@ void ClientBase::runInteractive()
|
||||
break;
|
||||
|
||||
has_vertical_output_suffix = false;
|
||||
if (input.ends_with("\\G"))
|
||||
if (input.ends_with("\\G") || input.ends_with("\\G;"))
|
||||
{
|
||||
input.resize(input.size() - 2);
|
||||
if (input.ends_with("\\G"))
|
||||
input.resize(input.size() - 2);
|
||||
else if (input.ends_with("\\G;"))
|
||||
input.resize(input.size() - 3);
|
||||
|
||||
has_vertical_output_suffix = true;
|
||||
}
|
||||
|
||||
|
@ -201,9 +201,6 @@ void LocalConnection::finishQuery()
|
||||
{
|
||||
next_packet_type = Protocol::Server::EndOfStream;
|
||||
|
||||
if (!state)
|
||||
return;
|
||||
|
||||
if (state->executor)
|
||||
{
|
||||
state->executor.reset();
|
||||
@ -219,6 +216,7 @@ void LocalConnection::finishQuery()
|
||||
|
||||
state->io.onFinish();
|
||||
state.reset();
|
||||
last_sent_snapshots.clear();
|
||||
}
|
||||
|
||||
bool LocalConnection::poll(size_t)
|
||||
@ -326,6 +324,21 @@ bool LocalConnection::poll(size_t)
|
||||
}
|
||||
}
|
||||
|
||||
if (state->is_finished && !state->sent_profile_events)
|
||||
{
|
||||
state->sent_profile_events = true;
|
||||
|
||||
if (send_profile_events && state->executor)
|
||||
{
|
||||
Block block;
|
||||
state->after_send_profile_events.restart();
|
||||
next_packet_type = Protocol::Server::ProfileEvents;
|
||||
getProfileEvents(block);
|
||||
state->block.emplace(std::move(block));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (state->is_finished)
|
||||
{
|
||||
finishQuery();
|
||||
|
@ -47,6 +47,7 @@ struct LocalQueryState
|
||||
bool sent_extremes = false;
|
||||
bool sent_progress = false;
|
||||
bool sent_profile_info = false;
|
||||
bool sent_profile_events = false;
|
||||
|
||||
/// To output progress, the difference after the previous sending of progress.
|
||||
Progress progress;
|
||||
|
@ -57,6 +57,8 @@ struct ZooKeeperRequest : virtual Request
|
||||
bool restored_from_zookeeper_log = false;
|
||||
|
||||
UInt64 request_created_time_ns = 0;
|
||||
UInt64 thread_id = 0;
|
||||
String query_id;
|
||||
|
||||
ZooKeeperRequest() = default;
|
||||
ZooKeeperRequest(const ZooKeeperRequest &) = default;
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <base/logger_useful.h>
|
||||
#include <base/getThreadId.h>
|
||||
|
||||
#include <Common/config.h>
|
||||
|
||||
@ -1016,6 +1017,11 @@ void ZooKeeper::pushRequest(RequestInfo && info)
|
||||
try
|
||||
{
|
||||
info.time = clock::now();
|
||||
if (zk_log)
|
||||
{
|
||||
info.request->thread_id = getThreadId();
|
||||
info.request->query_id = String(CurrentThread::getQueryId());
|
||||
}
|
||||
|
||||
if (!info.request->xid)
|
||||
{
|
||||
@ -1269,6 +1275,11 @@ void ZooKeeper::logOperationIfNeeded(const ZooKeeperRequestPtr & request, const
|
||||
elem.event_time = event_time;
|
||||
elem.address = socket_address;
|
||||
elem.session_id = session_id;
|
||||
if (request)
|
||||
{
|
||||
elem.thread_id = request->thread_id;
|
||||
elem.query_id = request->query_id;
|
||||
}
|
||||
maybe_zk_log->add(elem);
|
||||
}
|
||||
}
|
||||
|
@ -33,6 +33,7 @@ bool ParallelReadBuffer::addReaderToPool(std::unique_lock<std::mutex> & /*buffer
|
||||
|
||||
auto worker = read_workers.emplace_back(std::make_shared<ReadWorker>(std::move(reader)));
|
||||
|
||||
++active_working_reader;
|
||||
schedule([this, worker = std::move(worker)]() mutable { readerThreadFunction(std::move(worker)); });
|
||||
|
||||
return true;
|
||||
@ -203,11 +204,6 @@ bool ParallelReadBuffer::nextImpl()
|
||||
|
||||
void ParallelReadBuffer::readerThreadFunction(ReadWorkerPtr read_worker)
|
||||
{
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
++active_working_reader;
|
||||
}
|
||||
|
||||
SCOPE_EXIT({
|
||||
std::lock_guard lock{mutex};
|
||||
--active_working_reader;
|
||||
|
@ -250,21 +250,6 @@ bool MergeTreeTransaction::rollback() noexcept
|
||||
|
||||
/// Discard changes in active parts set
|
||||
/// Remove parts that were created, restore parts that were removed (except parts that were created by this transaction too)
|
||||
for (const auto & part : parts_to_remove)
|
||||
{
|
||||
if (part->version.isRemovalTIDLocked())
|
||||
{
|
||||
/// Don't need to remove part from working set if it was created and removed by this transaction
|
||||
assert(part->version.removal_tid_lock == tid.getHash());
|
||||
continue;
|
||||
}
|
||||
/// FIXME do not lock removal_tid when rolling back part creation, it's ugly
|
||||
const_cast<MergeTreeData &>(part->storage).removePartsFromWorkingSet(NO_TRANSACTION_RAW, {part}, true);
|
||||
}
|
||||
|
||||
for (const auto & part : parts_to_activate)
|
||||
if (part->version.getCreationTID() != tid)
|
||||
const_cast<MergeTreeData &>(part->storage).restoreAndActivatePart(part);
|
||||
|
||||
/// Kind of optimization: cleanup thread can remove these parts immediately
|
||||
for (const auto & part : parts_to_remove)
|
||||
@ -274,6 +259,18 @@ bool MergeTreeTransaction::rollback() noexcept
|
||||
part->appendCSNToVersionMetadata(VersionMetadata::CREATION);
|
||||
}
|
||||
|
||||
for (const auto & part : parts_to_remove)
|
||||
{
|
||||
/// NOTE It's possible that part is already removed from working set in the same transaction
|
||||
/// (or, even worse, in a separate non-transactional query with PrehistoricTID),
|
||||
/// but it's not a problem: removePartsFromWorkingSet(...) will do nothing in this case.
|
||||
const_cast<MergeTreeData &>(part->storage).removePartsFromWorkingSet(NO_TRANSACTION_RAW, {part}, true);
|
||||
}
|
||||
|
||||
for (const auto & part : parts_to_activate)
|
||||
if (part->version.getCreationTID() != tid)
|
||||
const_cast<MergeTreeData &>(part->storage).restoreAndActivatePart(part);
|
||||
|
||||
for (const auto & part : parts_to_activate)
|
||||
{
|
||||
/// Clear removal_tid from version metadata file, so we will not need to distinguish TIDs that were not committed
|
||||
|
@ -105,7 +105,7 @@ void getProfileEvents(
|
||||
{"value", std::make_shared<DataTypeInt64>()},
|
||||
};
|
||||
|
||||
ColumnsWithTypeAndName temp_columns;
|
||||
ColumnsWithTypeAndName temp_columns;
|
||||
for (auto const & name_and_type : column_names_and_types)
|
||||
temp_columns.emplace_back(name_and_type.type, name_and_type.name);
|
||||
|
||||
|
@ -212,7 +212,7 @@ void TransactionLog::runUpdatingThread()
|
||||
if (stop_flag.load())
|
||||
return;
|
||||
|
||||
if (!zookeeper)
|
||||
if (getZooKeeper()->expired())
|
||||
{
|
||||
auto new_zookeeper = global_context->getZooKeeper();
|
||||
std::lock_guard lock{mutex};
|
||||
@ -222,16 +222,11 @@ void TransactionLog::runUpdatingThread()
|
||||
loadNewEntries();
|
||||
removeOldEntries();
|
||||
}
|
||||
catch (const Coordination::Exception & e)
|
||||
catch (const Coordination::Exception &)
|
||||
{
|
||||
tryLogCurrentException(log);
|
||||
/// TODO better backoff
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
|
||||
if (Coordination::isHardwareError(e.code))
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
zookeeper.reset();
|
||||
}
|
||||
log_updated_event->set();
|
||||
}
|
||||
catch (...)
|
||||
|
@ -116,6 +116,8 @@ NamesAndTypesList ZooKeeperLogElement::getNamesAndTypes()
|
||||
{"type", std::move(type_enum)},
|
||||
{"event_date", std::make_shared<DataTypeDate>()},
|
||||
{"event_time", std::make_shared<DataTypeDateTime64>(6)},
|
||||
{"thread_id", std::make_shared<DataTypeUInt64>()},
|
||||
{"query_id", std::make_shared<DataTypeString>()},
|
||||
{"address", DataTypeFactory::instance().get("IPv6")},
|
||||
{"port", std::make_shared<DataTypeUInt16>()},
|
||||
{"session_id", std::make_shared<DataTypeInt64>()},
|
||||
@ -164,6 +166,8 @@ void ZooKeeperLogElement::appendToBlock(MutableColumns & columns) const
|
||||
auto event_time_seconds = event_time / 1000000;
|
||||
columns[i++]->insert(DateLUT::instance().toDayNum(event_time_seconds).toUnderType());
|
||||
columns[i++]->insert(event_time);
|
||||
columns[i++]->insert(thread_id);
|
||||
columns[i++]->insert(query_id);
|
||||
columns[i++]->insertData(IPv6ToBinary(address.host()).data(), 16);
|
||||
columns[i++]->insert(address.port());
|
||||
columns[i++]->insert(session_id);
|
||||
|
@ -22,6 +22,8 @@ struct ZooKeeperLogElement
|
||||
|
||||
Type type = UNKNOWN;
|
||||
Decimal64 event_time = 0;
|
||||
UInt64 thread_id = 0;
|
||||
String query_id;
|
||||
Poco::Net::SocketAddress address;
|
||||
Int64 session_id = 0;
|
||||
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <Processors/QueryPlan/LimitStep.h>
|
||||
#include <Processors/QueryPlan/TotalsHavingStep.h>
|
||||
#include <Processors/QueryPlan/SortingStep.h>
|
||||
#include <Processors/QueryPlan/WindowStep.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
|
||||
namespace DB::QueryPlanOptimizations
|
||||
@ -66,6 +67,11 @@ size_t tryPushDownLimit(QueryPlan::Node * parent_node, QueryPlan::Nodes &)
|
||||
if (typeid_cast<const TotalsHavingStep *>(child.get()))
|
||||
return 0;
|
||||
|
||||
/// Disable for WindowStep.
|
||||
/// TODO: we can push down limit in some cases if increase the limit value.
|
||||
if (typeid_cast<const WindowStep *>(child.get()))
|
||||
return 0;
|
||||
|
||||
/// Now we should decide if pushing down limit possible for this step.
|
||||
|
||||
const auto & transform_traits = transforming->getTransformTraits();
|
||||
|
@ -728,6 +728,7 @@ void TCPHandler::processOrdinaryQueryWithProcessors()
|
||||
return;
|
||||
|
||||
sendData({});
|
||||
last_sent_snapshots.clear();
|
||||
}
|
||||
|
||||
sendProgress();
|
||||
|
@ -76,4 +76,5 @@ def get_images_with_versions(
|
||||
|
||||
|
||||
def get_image_with_version(reports_path, image, pull=True, version=None):
|
||||
logging.info("Looking for images file in %s", reports_path)
|
||||
return get_images_with_versions(reports_path, [image], pull, version=version)[0]
|
||||
|
@ -1407,3 +1407,24 @@ def test_insert_select_schema_inference(started_cluster):
|
||||
f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_insert_select.native')"
|
||||
)
|
||||
assert int(result) == 1
|
||||
|
||||
|
||||
def test_parallel_reading_with_memory_limit(started_cluster):
|
||||
bucket = started_cluster.minio_bucket
|
||||
instance = started_cluster.instances["dummy"]
|
||||
|
||||
instance.query(
|
||||
f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_memory_limit.native') select * from numbers(100000)"
|
||||
)
|
||||
|
||||
result = instance.query_and_get_error(
|
||||
f"select * from url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_memory_limit.native') settings max_memory_usage=10000"
|
||||
)
|
||||
|
||||
assert "Memory limit (for query) exceeded" in result
|
||||
|
||||
sleep(5)
|
||||
|
||||
# Check that server didn't crash
|
||||
result = instance.query("select 1")
|
||||
assert int(result) == 1
|
||||
|
@ -1,5 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Tags: no-parallel
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
@ -12,7 +14,7 @@ function run_selects()
|
||||
{
|
||||
thread_num=$1
|
||||
readarray -t tables_arr < <(${CLICKHOUSE_CLIENT} -q "SELECT database || '.' || name FROM system.tables
|
||||
WHERE database in ('system', 'information_schema', 'INFORMATION_SCHEMA') and name!='zookeeper' and name!='merge_tree_metadata_cache'
|
||||
WHERE database in ('system', 'information_schema', 'INFORMATION_SCHEMA') and name!='zookeeper' and name!='merge_tree_metadata_cache'
|
||||
AND sipHash64(name || toString($RAND)) % $THREADS = $thread_num")
|
||||
|
||||
for t in "${tables_arr[@]}"
|
||||
|
@ -6,7 +6,7 @@
|
||||
3 all_1_1_0 0
|
||||
3 all_3_3_0 1
|
||||
4 all_1_1_0 1 (0,0,'00000000-0000-0000-0000-000000000000') 0
|
||||
4 all_2_2_0 18446744073709551615 (1,1,'00000000-0000-0000-0000-000000000000') 0
|
||||
4 all_2_2_0 18446744073709551615 (0,0,'00000000-0000-0000-0000-000000000000') 0
|
||||
4 all_3_3_0 0 (0,0,'00000000-0000-0000-0000-000000000000') 0
|
||||
5 1
|
||||
6 all_1_1_0 0
|
||||
@ -19,7 +19,6 @@
|
||||
1 1 AddPart 1 1 1 1 all_1_1_0
|
||||
2 1 Begin 1 1 1 1
|
||||
2 1 AddPart 1 1 1 1 all_2_2_0
|
||||
1 1 LockPart 1 1 1 1 all_2_2_0
|
||||
2 1 Rollback 1 1 1 1
|
||||
3 1 Begin 1 1 1 1
|
||||
3 1 AddPart 1 1 1 1 all_3_3_0
|
||||
|
@ -27,6 +27,11 @@ expect "Row 1:"
|
||||
expect "1: 1"
|
||||
expect ":) "
|
||||
|
||||
send -- "SELECT 1\\G;\r"
|
||||
expect "Row 1:"
|
||||
expect "1: 1"
|
||||
expect ":) "
|
||||
|
||||
send -- "SELECT 1\\\r"
|
||||
expect ":-] "
|
||||
send -- ", 2\r"
|
||||
@ -41,6 +46,14 @@ expect "1: 1"
|
||||
expect "2: 2"
|
||||
expect ":) "
|
||||
|
||||
send -- "SELECT 1\\\r"
|
||||
expect ":-] "
|
||||
send -- ", 2\\G;\r"
|
||||
expect "Row 1:"
|
||||
expect "1: 1"
|
||||
expect "2: 2"
|
||||
expect ":) "
|
||||
|
||||
send -- ""
|
||||
expect eof
|
||||
|
||||
@ -56,6 +69,11 @@ expect "Row 1:"
|
||||
expect "1: 1"
|
||||
expect ":) "
|
||||
|
||||
send -- "SELECT 1\\G;\r"
|
||||
expect "Row 1:"
|
||||
expect "1: 1"
|
||||
expect ":) "
|
||||
|
||||
send -- "SELECT 1; \r"
|
||||
expect "│ 1 │"
|
||||
expect ":) "
|
||||
@ -65,6 +83,11 @@ expect "Row 1:"
|
||||
expect "1: 1"
|
||||
expect ":) "
|
||||
|
||||
send -- "SELECT 1\\G; \r"
|
||||
expect "Row 1:"
|
||||
expect "1: 1"
|
||||
expect ":) "
|
||||
|
||||
send -- "SELECT 1\r"
|
||||
expect ":-] "
|
||||
send -- ";\r"
|
||||
@ -78,6 +101,13 @@ expect "Row 1:"
|
||||
expect "1: 1"
|
||||
expect ":) "
|
||||
|
||||
send -- "SELECT 1\r"
|
||||
expect ":-] "
|
||||
send -- "\\G;\r"
|
||||
expect "Row 1:"
|
||||
expect "1: 1"
|
||||
expect ":) "
|
||||
|
||||
send -- "SELECT 1\r"
|
||||
expect ":-] "
|
||||
send -- ", 2;\r"
|
||||
@ -92,5 +122,14 @@ expect "1: 1"
|
||||
expect "2: 2"
|
||||
expect ":) "
|
||||
|
||||
|
||||
send -- "SELECT 1\r"
|
||||
expect ":-] "
|
||||
send -- ", 2\\G;\r"
|
||||
expect "Row 1:"
|
||||
expect "1: 1"
|
||||
expect "2: 2"
|
||||
expect ":) "
|
||||
|
||||
send -- ""
|
||||
expect eof
|
||||
|
@ -23,6 +23,12 @@ expect "Row 1:"
|
||||
expect "1: 1"
|
||||
expect ":) "
|
||||
|
||||
|
||||
send -- "SELECT 1\\G;\r"
|
||||
expect "Row 1:"
|
||||
expect "1: 1"
|
||||
expect ":) "
|
||||
|
||||
send -- "SELECT 1\\\r"
|
||||
expect ":-] "
|
||||
send -- ", 2\r"
|
||||
@ -37,5 +43,14 @@ expect "1: 1"
|
||||
expect "2: 2"
|
||||
expect ":) "
|
||||
|
||||
|
||||
send -- "SELECT 1\\\r"
|
||||
expect ":-] "
|
||||
send -- ", 2\\G;\r"
|
||||
expect "Row 1:"
|
||||
expect "1: 1"
|
||||
expect "2: 2"
|
||||
expect ":) "
|
||||
|
||||
send -- ""
|
||||
expect eof
|
||||
|
@ -1,5 +1,18 @@
|
||||
do not print any ProfileEvents packets
|
||||
0
|
||||
100000
|
||||
print only last (and also number of rows to provide more info in case of failures)
|
||||
[ 0 ] SelectedRows: 131010 (increment)
|
||||
regression test for incorrect filtering out snapshots
|
||||
0
|
||||
regression test for overlap profile events snapshots between queries
|
||||
[ 0 ] SelectedRows: 1 (increment)
|
||||
[ 0 ] SelectedRows: 1 (increment)
|
||||
regression test for overlap profile events snapshots between queries (clickhouse-local)
|
||||
[ 0 ] SelectedRows: 1 (increment)
|
||||
[ 0 ] SelectedRows: 1 (increment)
|
||||
print everything
|
||||
OK
|
||||
print each 100 ms
|
||||
OK
|
||||
check that ProfileEvents is new for each query
|
||||
OK
|
||||
|
@ -4,13 +4,30 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
# do not print any ProfileEvents packets
|
||||
echo 'do not print any ProfileEvents packets'
|
||||
$CLICKHOUSE_CLIENT -q 'select * from numbers(1e5) format Null' |& grep -c 'SelectedRows'
|
||||
# print only last (and also number of rows to provide more info in case of failures)
|
||||
$CLICKHOUSE_CLIENT --max_block_size=65505 --print-profile-events --profile-events-delay-ms=-1 -q 'select * from numbers(1e5)' 2> >(grep -o -e '\[ 0 \] SelectedRows: .*$' -e Exception) 1> >(wc -l)
|
||||
# print everything
|
||||
|
||||
echo 'print only last (and also number of rows to provide more info in case of failures)'
|
||||
$CLICKHOUSE_CLIENT --max_block_size=65505 --print-profile-events --profile-events-delay-ms=-1 -q 'select * from numbers(1e5)' |& grep -o -e '\[ 0 \] SelectedRows: .*$' -e Exception
|
||||
|
||||
echo 'regression test for incorrect filtering out snapshots'
|
||||
$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -n -q 'select 1; select 1' >& /dev/null
|
||||
echo $?
|
||||
|
||||
echo 'regression test for overlap profile events snapshots between queries'
|
||||
$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -n -q 'select 1; select 1' |& grep -F -o '[ 0 ] SelectedRows: 1 (increment)'
|
||||
|
||||
echo 'regression test for overlap profile events snapshots between queries (clickhouse-local)'
|
||||
$CLICKHOUSE_LOCAL --print-profile-events --profile-events-delay-ms=-1 -n -q 'select 1; select 1' |& grep -F -o '[ 0 ] SelectedRows: 1 (increment)'
|
||||
|
||||
echo 'print everything'
|
||||
profile_events="$($CLICKHOUSE_CLIENT --max_block_size 1 --print-profile-events -q 'select sleep(1) from numbers(2) format Null' |& grep -c 'SelectedRows')"
|
||||
test "$profile_events" -gt 1 && echo OK || echo "FAIL ($profile_events)"
|
||||
# print each 100 ms
|
||||
|
||||
echo 'print each 100 ms'
|
||||
profile_events="$($CLICKHOUSE_CLIENT --max_block_size 1 --print-profile-events --profile-events-delay-ms=100 -q 'select sleep(1) from numbers(2) format Null' |& grep -c 'SelectedRows')"
|
||||
test "$profile_events" -gt 1 && echo OK || echo "FAIL ($profile_events)"
|
||||
|
||||
echo 'check that ProfileEvents is new for each query'
|
||||
sleep_function_calls=$($CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -n -q 'select sleep(1); select 1' |& grep -c 'SleepFunctionCalls')
|
||||
test "$sleep_function_calls" -eq 1 && echo OK || echo "FAIL ($sleep_function_calls)"
|
||||
|
@ -0,0 +1,10 @@
|
||||
0 1
|
||||
1 2
|
||||
2 3
|
||||
a 2
|
||||
0 10000000
|
||||
1 10000000
|
||||
2 10000000
|
||||
0 10000000
|
||||
1 10000000
|
||||
2 10000000
|
@ -0,0 +1,16 @@
|
||||
SELECT
|
||||
number,
|
||||
leadInFrame(number) OVER w AS W
|
||||
FROM numbers(10)
|
||||
WINDOW w AS (ORDER BY number ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)
|
||||
LIMIT 3;
|
||||
|
||||
WITH arrayJoin(['a', 'a', 'b', 'b']) AS field
|
||||
SELECT
|
||||
field,
|
||||
count() OVER (PARTITION BY field)
|
||||
ORDER BY field ASC
|
||||
LIMIT 1;
|
||||
|
||||
select * from ( ( select *, count() over () cnt from ( select * from numbers(10000000) ) ) ) limit 3 ;
|
||||
select * from ( ( select *, count() over () cnt from ( select * from numbers(10000000) ) ) ) order by number limit 3 ;
|
@ -9,11 +9,11 @@ FORMATS=('CSV' 'CSVWithNames')
|
||||
for format in "${FORMATS[@]}"
|
||||
do
|
||||
echo "$format, false";
|
||||
$CLICKHOUSE_CLIENT --output_format_parallel_formatting=false -q \
|
||||
$CLICKHOUSE_CLIENT --max_threads=0 --output_format_parallel_formatting=false -q \
|
||||
"SELECT ClientEventTime::DateTime('Asia/Dubai') as a, MobilePhoneModel as b, ClientIP6 as c FROM test.hits ORDER BY a, b, c Format $format" | md5sum
|
||||
|
||||
echo "$format, true";
|
||||
$CLICKHOUSE_CLIENT --output_format_parallel_formatting=true -q \
|
||||
$CLICKHOUSE_CLIENT --max_threads=0 --output_format_parallel_formatting=true -q \
|
||||
"SELECT ClientEventTime::DateTime('Asia/Dubai') as a, MobilePhoneModel as b, ClientIP6 as c FROM test.hits ORDER BY a, b, c Format $format" | md5sum
|
||||
done
|
||||
|
||||
|
@ -11,10 +11,10 @@ FORMATS=('JSONEachRow' 'JSONCompactEachRow' 'JSONCompactStringsEachRow' 'JSONCom
|
||||
for format in "${FORMATS[@]}"
|
||||
do
|
||||
echo "$format, false";
|
||||
$CLICKHOUSE_CLIENT --output_format_parallel_formatting=false -q \
|
||||
$CLICKHOUSE_CLIENT --max_threads=0 --output_format_parallel_formatting=false -q \
|
||||
"SELECT ClientEventTime::DateTime('Asia/Dubai') as a, MobilePhoneModel as b, ClientIP6 as c FROM test.hits ORDER BY a, b, c LIMIT 3000000 Format $format" | md5sum
|
||||
|
||||
echo "$format, true";
|
||||
$CLICKHOUSE_CLIENT --output_format_parallel_formatting=true -q \
|
||||
$CLICKHOUSE_CLIENT --max_threads=0 --output_format_parallel_formatting=true -q \
|
||||
"SELECT ClientEventTime::DateTime('Asia/Dubai') as a, MobilePhoneModel as b, ClientIP6 as c FROM test.hits ORDER BY a, b, c LIMIT 3000000 Format $format" | md5sum
|
||||
done
|
||||
|
@ -13,9 +13,9 @@ do
|
||||
$CLICKHOUSE_CLIENT -q "CREATE TABLE parsing_with_names(c FixedString(16), a DateTime('Asia/Dubai'), b String) ENGINE=Memory()"
|
||||
|
||||
echo "$format, false";
|
||||
$CLICKHOUSE_CLIENT --max_block_size=65505 --output_format_parallel_formatting=false -q \
|
||||
$CLICKHOUSE_CLIENT --max_threads=0 --max_block_size=65505 --output_format_parallel_formatting=false -q \
|
||||
"SELECT URLRegions as d, toTimeZone(ClientEventTime, 'Asia/Dubai') as a, MobilePhoneModel as b, ParamPrice as e, ClientIP6 as c FROM test.hits LIMIT 50000 Format $format" | \
|
||||
$CLICKHOUSE_CLIENT --max_block_size=65505 --input_format_skip_unknown_fields=1 --input_format_parallel_parsing=false -q "INSERT INTO parsing_with_names FORMAT $format"
|
||||
$CLICKHOUSE_CLIENT --max_threads=0 --max_block_size=65505 --input_format_skip_unknown_fields=1 --input_format_parallel_parsing=false -q "INSERT INTO parsing_with_names FORMAT $format"
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "SELECT * FROM parsing_with_names;" | md5sum
|
||||
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS parsing_with_names"
|
||||
@ -23,9 +23,9 @@ do
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "CREATE TABLE parsing_with_names(c FixedString(16), a DateTime('Asia/Dubai'), b String) ENGINE=Memory()"
|
||||
echo "$format, true";
|
||||
$CLICKHOUSE_CLIENT --max_block_size=65505 --output_format_parallel_formatting=false -q \
|
||||
$CLICKHOUSE_CLIENT --max_threads=0 --max_block_size=65505 --output_format_parallel_formatting=false -q \
|
||||
"SELECT URLRegions as d, toTimeZone(ClientEventTime, 'Asia/Dubai') as a, MobilePhoneModel as b, ParamPrice as e, ClientIP6 as c FROM test.hits LIMIT 50000 Format $format" | \
|
||||
$CLICKHOUSE_CLIENT --max_block_size=65505 --input_format_skip_unknown_fields=1 --input_format_parallel_parsing=true -q "INSERT INTO parsing_with_names FORMAT $format"
|
||||
$CLICKHOUSE_CLIENT --max_threads=0 --max_block_size=65505 --input_format_skip_unknown_fields=1 --input_format_parallel_parsing=true -q "INSERT INTO parsing_with_names FORMAT $format"
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "SELECT * FROM parsing_with_names;" | md5sum
|
||||
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS parsing_with_names"
|
||||
|
@ -13,9 +13,9 @@ do
|
||||
$CLICKHOUSE_CLIENT -q "CREATE TABLE parsing_with_names(c FixedString(16), a DateTime('Asia/Dubai'), b String) ENGINE=Memory()"
|
||||
|
||||
echo "$format, false";
|
||||
$CLICKHOUSE_CLIENT --output_format_parallel_formatting=false -q \
|
||||
$CLICKHOUSE_CLIENT --max_threads=0 --output_format_parallel_formatting=false -q \
|
||||
"SELECT URLRegions as d, toTimeZone(ClientEventTime, 'Asia/Dubai') as a, MobilePhoneModel as b, ParamPrice as e, ClientIP6 as c FROM test.hits LIMIT 5000 Format $format" | \
|
||||
$CLICKHOUSE_CLIENT --input_format_skip_unknown_fields=1 --input_format_parallel_parsing=false -q "INSERT INTO parsing_with_names SETTINGS input_format_null_as_default=0 FORMAT $format"
|
||||
$CLICKHOUSE_CLIENT --max_threads=0 --input_format_skip_unknown_fields=1 --input_format_parallel_parsing=false -q "INSERT INTO parsing_with_names SETTINGS input_format_null_as_default=0 FORMAT $format"
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "SELECT * FROM parsing_with_names;" | md5sum
|
||||
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS parsing_with_names"
|
||||
@ -23,9 +23,9 @@ do
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "CREATE TABLE parsing_with_names(c FixedString(16), a DateTime('Asia/Dubai'), b String) ENGINE=Memory()"
|
||||
echo "$format, true";
|
||||
$CLICKHOUSE_CLIENT --output_format_parallel_formatting=false -q \
|
||||
$CLICKHOUSE_CLIENT --max_threads=0 --output_format_parallel_formatting=false -q \
|
||||
"SELECT URLRegions as d, toTimeZone(ClientEventTime, 'Asia/Dubai') as a, MobilePhoneModel as b, ParamPrice as e, ClientIP6 as c FROM test.hits LIMIT 5000 Format $format" | \
|
||||
$CLICKHOUSE_CLIENT --input_format_skip_unknown_fields=1 --input_format_parallel_parsing=true -q "INSERT INTO parsing_with_names SETTINGS input_format_null_as_default=0 FORMAT $format"
|
||||
$CLICKHOUSE_CLIENT --max_threads=0 --input_format_skip_unknown_fields=1 --input_format_parallel_parsing=true -q "INSERT INTO parsing_with_names SETTINGS input_format_null_as_default=0 FORMAT $format"
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "SELECT * FROM parsing_with_names;" | md5sum
|
||||
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS parsing_with_names"
|
||||
|
Loading…
Reference in New Issue
Block a user