mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-11 17:02:25 +00:00
upd comments
This commit is contained in:
parent
c2700c4ae6
commit
4e5cfd11d0
@ -516,10 +516,10 @@ BlockIO InterpreterInsertQuery::execute()
|
||||
auto table_id = table->getStorageID();
|
||||
auto views = DatabaseCatalog::instance().getDependentViews(table_id);
|
||||
|
||||
/// TODO: should we really skip views or not? they have special `parallel_view_processing`, it should be enough?
|
||||
pre_streams_size = !table->isView() && views.empty()
|
||||
? settings.max_insert_threads
|
||||
: std::min<size_t>(settings.max_insert_threads, pipeline.getNumStreams());
|
||||
/// It breaks some views-related tests and we have dedicated `parallel_view_processing` for views, so let's just skip them.
|
||||
const bool resize_to_max_insert_threads = !table->isView() && views.empty();
|
||||
pre_streams_size = resize_to_max_insert_threads ? settings.max_insert_threads
|
||||
: std::min<size_t>(settings.max_insert_threads, pipeline.getNumStreams());
|
||||
if (table->supportsParallelInsert())
|
||||
sink_streams_size = pre_streams_size;
|
||||
}
|
||||
|
@ -65,7 +65,6 @@ struct ViewsData
|
||||
StorageID source_storage_id;
|
||||
StorageMetadataPtr source_metadata_snapshot;
|
||||
StoragePtr source_storage;
|
||||
/// This value is actually only for logs.
|
||||
size_t max_threads = 1;
|
||||
|
||||
/// In case of exception happened while inserting into main table, it is pushed to pipeline.
|
||||
|
Loading…
Reference in New Issue
Block a user