mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-17 20:02:05 +00:00
Merge branch 'master' of github.com:ClickHouse/ClickHouse into pipe_reading
This commit is contained in:
commit
2e05063298
@ -2,11 +2,11 @@
|
|||||||
|
|
||||||
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||||
SET(VERSION_REVISION 54454)
|
SET(VERSION_REVISION 54455)
|
||||||
SET(VERSION_MAJOR 21)
|
SET(VERSION_MAJOR 21)
|
||||||
SET(VERSION_MINOR 9)
|
SET(VERSION_MINOR 10)
|
||||||
SET(VERSION_PATCH 1)
|
SET(VERSION_PATCH 1)
|
||||||
SET(VERSION_GITHASH f48c5af90c2ad51955d1ee3b6b05d006b03e4238)
|
SET(VERSION_GITHASH a091f2e36054061ceebb6826a590f8fb86f01196)
|
||||||
SET(VERSION_DESCRIBE v21.9.1.1-prestable)
|
SET(VERSION_DESCRIBE v21.10.1.1-prestable)
|
||||||
SET(VERSION_STRING 21.9.1.1)
|
SET(VERSION_STRING 21.10.1.1)
|
||||||
# end of autochange
|
# end of autochange
|
||||||
|
2
contrib/rocksdb
vendored
2
contrib/rocksdb
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 6ff0adefdc84dac44e78804f7ca4122fe992cf8d
|
Subproject commit dac0e9a68080c837d6b6223921f3fc151abbfcdc
|
@ -70,11 +70,6 @@ else()
|
|||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(BUILD_VERSION_CC rocksdb_build_version.cc)
|
|
||||||
add_library(rocksdb_build_version OBJECT ${BUILD_VERSION_CC})
|
|
||||||
|
|
||||||
target_include_directories(rocksdb_build_version PRIVATE "${ROCKSDB_SOURCE_DIR}/util")
|
|
||||||
|
|
||||||
include(CheckCCompilerFlag)
|
include(CheckCCompilerFlag)
|
||||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
||||||
CHECK_C_COMPILER_FLAG("-mcpu=power9" HAS_POWER9)
|
CHECK_C_COMPILER_FLAG("-mcpu=power9" HAS_POWER9)
|
||||||
@ -243,272 +238,293 @@ find_package(Threads REQUIRED)
|
|||||||
# Main library source code
|
# Main library source code
|
||||||
|
|
||||||
set(SOURCES
|
set(SOURCES
|
||||||
"${ROCKSDB_SOURCE_DIR}/cache/cache.cc"
|
${ROCKSDB_SOURCE_DIR}/cache/cache.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/cache/clock_cache.cc"
|
${ROCKSDB_SOURCE_DIR}/cache/cache_entry_roles.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc"
|
${ROCKSDB_SOURCE_DIR}/cache/clock_cache.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc"
|
${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc"
|
${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_addition.cc"
|
${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_builder.cc"
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_fetcher.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_cache.cc"
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_addition.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_garbage.cc"
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_builder.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_meta.cc"
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_cache.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_reader.cc"
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_garbage.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_format.cc"
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_meta.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_sequential_reader.cc"
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_reader.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_writer.cc"
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_garbage_meter.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/builder.cc"
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_format.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/c.cc"
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_sequential_reader.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/column_family.cc"
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_writer.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/compacted_db_impl.cc"
|
${ROCKSDB_SOURCE_DIR}/db/builder.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/compaction/compaction.cc"
|
${ROCKSDB_SOURCE_DIR}/db/c.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_iterator.cc"
|
${ROCKSDB_SOURCE_DIR}/db/column_family.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker.cc"
|
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_job.cc"
|
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_iterator.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_fifo.cc"
|
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_level.cc"
|
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_job.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_universal.cc"
|
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_fifo.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/compaction/sst_partitioner.cc"
|
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_level.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/convenience.cc"
|
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_universal.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/db_filesnapshot.cc"
|
${ROCKSDB_SOURCE_DIR}/db/compaction/sst_partitioner.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl.cc"
|
${ROCKSDB_SOURCE_DIR}/db/convenience.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_write.cc"
|
${ROCKSDB_SOURCE_DIR}/db/db_filesnapshot.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_compaction_flush.cc"
|
${ROCKSDB_SOURCE_DIR}/db/db_impl/compacted_db_impl.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_files.cc"
|
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_open.cc"
|
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_write.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_debug.cc"
|
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_compaction_flush.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_experimental.cc"
|
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_files.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_readonly.cc"
|
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_open.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_secondary.cc"
|
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_debug.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/db_info_dumper.cc"
|
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_experimental.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/db_iter.cc"
|
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_readonly.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/dbformat.cc"
|
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_secondary.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/error_handler.cc"
|
${ROCKSDB_SOURCE_DIR}/db/db_info_dumper.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/event_helpers.cc"
|
${ROCKSDB_SOURCE_DIR}/db/db_iter.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/experimental.cc"
|
${ROCKSDB_SOURCE_DIR}/db/dbformat.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/external_sst_file_ingestion_job.cc"
|
${ROCKSDB_SOURCE_DIR}/db/error_handler.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/file_indexer.cc"
|
${ROCKSDB_SOURCE_DIR}/db/event_helpers.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/flush_job.cc"
|
${ROCKSDB_SOURCE_DIR}/db/experimental.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/flush_scheduler.cc"
|
${ROCKSDB_SOURCE_DIR}/db/external_sst_file_ingestion_job.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/forward_iterator.cc"
|
${ROCKSDB_SOURCE_DIR}/db/file_indexer.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/import_column_family_job.cc"
|
${ROCKSDB_SOURCE_DIR}/db/flush_job.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/internal_stats.cc"
|
${ROCKSDB_SOURCE_DIR}/db/flush_scheduler.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/logs_with_prep_tracker.cc"
|
${ROCKSDB_SOURCE_DIR}/db/forward_iterator.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/log_reader.cc"
|
${ROCKSDB_SOURCE_DIR}/db/import_column_family_job.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/log_writer.cc"
|
${ROCKSDB_SOURCE_DIR}/db/internal_stats.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/malloc_stats.cc"
|
${ROCKSDB_SOURCE_DIR}/db/logs_with_prep_tracker.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/memtable.cc"
|
${ROCKSDB_SOURCE_DIR}/db/log_reader.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/memtable_list.cc"
|
${ROCKSDB_SOURCE_DIR}/db/log_writer.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/merge_helper.cc"
|
${ROCKSDB_SOURCE_DIR}/db/malloc_stats.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/merge_operator.cc"
|
${ROCKSDB_SOURCE_DIR}/db/memtable.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/output_validator.cc"
|
${ROCKSDB_SOURCE_DIR}/db/memtable_list.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/periodic_work_scheduler.cc"
|
${ROCKSDB_SOURCE_DIR}/db/merge_helper.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/range_del_aggregator.cc"
|
${ROCKSDB_SOURCE_DIR}/db/merge_operator.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/range_tombstone_fragmenter.cc"
|
${ROCKSDB_SOURCE_DIR}/db/output_validator.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/repair.cc"
|
${ROCKSDB_SOURCE_DIR}/db/periodic_work_scheduler.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/snapshot_impl.cc"
|
${ROCKSDB_SOURCE_DIR}/db/range_del_aggregator.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/table_cache.cc"
|
${ROCKSDB_SOURCE_DIR}/db/range_tombstone_fragmenter.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/table_properties_collector.cc"
|
${ROCKSDB_SOURCE_DIR}/db/repair.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/transaction_log_impl.cc"
|
${ROCKSDB_SOURCE_DIR}/db/snapshot_impl.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/trim_history_scheduler.cc"
|
${ROCKSDB_SOURCE_DIR}/db/table_cache.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/version_builder.cc"
|
${ROCKSDB_SOURCE_DIR}/db/table_properties_collector.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/version_edit.cc"
|
${ROCKSDB_SOURCE_DIR}/db/transaction_log_impl.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/version_edit_handler.cc"
|
${ROCKSDB_SOURCE_DIR}/db/trim_history_scheduler.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/version_set.cc"
|
${ROCKSDB_SOURCE_DIR}/db/version_builder.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/wal_edit.cc"
|
${ROCKSDB_SOURCE_DIR}/db/version_edit.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/wal_manager.cc"
|
${ROCKSDB_SOURCE_DIR}/db/version_edit_handler.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/write_batch.cc"
|
${ROCKSDB_SOURCE_DIR}/db/version_set.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/write_batch_base.cc"
|
${ROCKSDB_SOURCE_DIR}/db/wal_edit.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/write_controller.cc"
|
${ROCKSDB_SOURCE_DIR}/db/wal_manager.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/db/write_thread.cc"
|
${ROCKSDB_SOURCE_DIR}/db/write_batch.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/env/env.cc"
|
${ROCKSDB_SOURCE_DIR}/db/write_batch_base.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/env/env_chroot.cc"
|
${ROCKSDB_SOURCE_DIR}/db/write_controller.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/env/env_encryption.cc"
|
${ROCKSDB_SOURCE_DIR}/db/write_thread.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/env/env_hdfs.cc"
|
${ROCKSDB_SOURCE_DIR}/env/composite_env.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/env/file_system.cc"
|
${ROCKSDB_SOURCE_DIR}/env/env.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc"
|
${ROCKSDB_SOURCE_DIR}/env/env_chroot.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/env/mock_env.cc"
|
${ROCKSDB_SOURCE_DIR}/env/env_encryption.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/file/delete_scheduler.cc"
|
${ROCKSDB_SOURCE_DIR}/env/env_hdfs.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/file/file_prefetch_buffer.cc"
|
${ROCKSDB_SOURCE_DIR}/env/file_system.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/file/file_util.cc"
|
${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/file/filename.cc"
|
${ROCKSDB_SOURCE_DIR}/env/fs_remap.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/file/random_access_file_reader.cc"
|
${ROCKSDB_SOURCE_DIR}/env/mock_env.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/file/read_write_util.cc"
|
${ROCKSDB_SOURCE_DIR}/file/delete_scheduler.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/file/readahead_raf.cc"
|
${ROCKSDB_SOURCE_DIR}/file/file_prefetch_buffer.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/file/sequence_file_reader.cc"
|
${ROCKSDB_SOURCE_DIR}/file/file_util.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/file/sst_file_manager_impl.cc"
|
${ROCKSDB_SOURCE_DIR}/file/filename.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/file/writable_file_writer.cc"
|
${ROCKSDB_SOURCE_DIR}/file/line_file_reader.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/logging/auto_roll_logger.cc"
|
${ROCKSDB_SOURCE_DIR}/file/random_access_file_reader.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/logging/event_logger.cc"
|
${ROCKSDB_SOURCE_DIR}/file/read_write_util.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/logging/log_buffer.cc"
|
${ROCKSDB_SOURCE_DIR}/file/readahead_raf.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/memory/arena.cc"
|
${ROCKSDB_SOURCE_DIR}/file/sequence_file_reader.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/memory/concurrent_arena.cc"
|
${ROCKSDB_SOURCE_DIR}/file/sst_file_manager_impl.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/memory/jemalloc_nodump_allocator.cc"
|
${ROCKSDB_SOURCE_DIR}/file/writable_file_writer.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/memory/memkind_kmem_allocator.cc"
|
${ROCKSDB_SOURCE_DIR}/logging/auto_roll_logger.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/memtable/alloc_tracker.cc"
|
${ROCKSDB_SOURCE_DIR}/logging/event_logger.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/memtable/hash_linklist_rep.cc"
|
${ROCKSDB_SOURCE_DIR}/logging/log_buffer.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/memtable/hash_skiplist_rep.cc"
|
${ROCKSDB_SOURCE_DIR}/memory/arena.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/memtable/skiplistrep.cc"
|
${ROCKSDB_SOURCE_DIR}/memory/concurrent_arena.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/memtable/vectorrep.cc"
|
${ROCKSDB_SOURCE_DIR}/memory/jemalloc_nodump_allocator.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/memtable/write_buffer_manager.cc"
|
${ROCKSDB_SOURCE_DIR}/memory/memkind_kmem_allocator.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/monitoring/histogram.cc"
|
${ROCKSDB_SOURCE_DIR}/memtable/alloc_tracker.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/monitoring/histogram_windowing.cc"
|
${ROCKSDB_SOURCE_DIR}/memtable/hash_linklist_rep.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/monitoring/in_memory_stats_history.cc"
|
${ROCKSDB_SOURCE_DIR}/memtable/hash_skiplist_rep.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/monitoring/instrumented_mutex.cc"
|
${ROCKSDB_SOURCE_DIR}/memtable/skiplistrep.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/monitoring/iostats_context.cc"
|
${ROCKSDB_SOURCE_DIR}/memtable/vectorrep.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/monitoring/perf_context.cc"
|
${ROCKSDB_SOURCE_DIR}/memtable/write_buffer_manager.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/monitoring/perf_level.cc"
|
${ROCKSDB_SOURCE_DIR}/monitoring/histogram.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/monitoring/persistent_stats_history.cc"
|
${ROCKSDB_SOURCE_DIR}/monitoring/histogram_windowing.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/monitoring/statistics.cc"
|
${ROCKSDB_SOURCE_DIR}/monitoring/in_memory_stats_history.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_impl.cc"
|
${ROCKSDB_SOURCE_DIR}/monitoring/instrumented_mutex.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_updater.cc"
|
${ROCKSDB_SOURCE_DIR}/monitoring/iostats_context.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util.cc"
|
${ROCKSDB_SOURCE_DIR}/monitoring/perf_context.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util_debug.cc"
|
${ROCKSDB_SOURCE_DIR}/monitoring/perf_level.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/options/cf_options.cc"
|
${ROCKSDB_SOURCE_DIR}/monitoring/persistent_stats_history.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/options/configurable.cc"
|
${ROCKSDB_SOURCE_DIR}/monitoring/statistics.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/options/customizable.cc"
|
${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_impl.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/options/db_options.cc"
|
${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_updater.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/options/options.cc"
|
${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/options/options_helper.cc"
|
${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util_debug.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/options/options_parser.cc"
|
${ROCKSDB_SOURCE_DIR}/options/cf_options.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/port/stack_trace.cc"
|
${ROCKSDB_SOURCE_DIR}/options/configurable.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/adaptive/adaptive_table_factory.cc"
|
${ROCKSDB_SOURCE_DIR}/options/customizable.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/binary_search_index_reader.cc"
|
${ROCKSDB_SOURCE_DIR}/options/db_options.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/block.cc"
|
${ROCKSDB_SOURCE_DIR}/options/options.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_filter_block.cc"
|
${ROCKSDB_SOURCE_DIR}/options/options_helper.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_builder.cc"
|
${ROCKSDB_SOURCE_DIR}/options/options_parser.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_factory.cc"
|
${ROCKSDB_SOURCE_DIR}/port/stack_trace.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_iterator.cc"
|
${ROCKSDB_SOURCE_DIR}/table/adaptive/adaptive_table_factory.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_reader.cc"
|
${ROCKSDB_SOURCE_DIR}/table/block_based/binary_search_index_reader.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_builder.cc"
|
${ROCKSDB_SOURCE_DIR}/table/block_based/block.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefetcher.cc"
|
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_filter_block.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefix_index.cc"
|
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_builder.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_hash_index.cc"
|
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_factory.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_footer.cc"
|
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_iterator.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/filter_block_reader_common.cc"
|
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_reader.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/filter_policy.cc"
|
${ROCKSDB_SOURCE_DIR}/table/block_based/block_builder.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/flush_block_policy.cc"
|
${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefetcher.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/full_filter_block.cc"
|
${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefix_index.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/hash_index_reader.cc"
|
${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_hash_index.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/index_builder.cc"
|
${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_footer.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/index_reader_common.cc"
|
${ROCKSDB_SOURCE_DIR}/table/block_based/filter_block_reader_common.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/parsed_full_filter_block.cc"
|
${ROCKSDB_SOURCE_DIR}/table/block_based/filter_policy.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_filter_block.cc"
|
${ROCKSDB_SOURCE_DIR}/table/block_based/flush_block_policy.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_index_iterator.cc"
|
${ROCKSDB_SOURCE_DIR}/table/block_based/full_filter_block.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_index_reader.cc"
|
${ROCKSDB_SOURCE_DIR}/table/block_based/hash_index_reader.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/reader_common.cc"
|
${ROCKSDB_SOURCE_DIR}/table/block_based/index_builder.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/uncompression_dict_reader.cc"
|
${ROCKSDB_SOURCE_DIR}/table/block_based/index_reader_common.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/block_fetcher.cc"
|
${ROCKSDB_SOURCE_DIR}/table/block_based/parsed_full_filter_block.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_builder.cc"
|
${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_filter_block.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_factory.cc"
|
${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_index_iterator.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_reader.cc"
|
${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_index_reader.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/format.cc"
|
${ROCKSDB_SOURCE_DIR}/table/block_based/reader_common.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/get_context.cc"
|
${ROCKSDB_SOURCE_DIR}/table/block_based/uncompression_dict_reader.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/iterator.cc"
|
${ROCKSDB_SOURCE_DIR}/table/block_fetcher.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/merging_iterator.cc"
|
${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_builder.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/meta_blocks.cc"
|
${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_factory.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/persistent_cache_helper.cc"
|
${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_reader.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_bloom.cc"
|
${ROCKSDB_SOURCE_DIR}/table/format.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_builder.cc"
|
${ROCKSDB_SOURCE_DIR}/table/get_context.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_factory.cc"
|
${ROCKSDB_SOURCE_DIR}/table/iterator.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_index.cc"
|
${ROCKSDB_SOURCE_DIR}/table/merging_iterator.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_key_coding.cc"
|
${ROCKSDB_SOURCE_DIR}/table/meta_blocks.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_reader.cc"
|
${ROCKSDB_SOURCE_DIR}/table/persistent_cache_helper.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/sst_file_dumper.cc"
|
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_bloom.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/sst_file_reader.cc"
|
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_builder.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/sst_file_writer.cc"
|
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_factory.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/table_factory.cc"
|
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_index.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/table_properties.cc"
|
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_key_coding.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/table/two_level_iterator.cc"
|
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_reader.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/test_util/sync_point.cc"
|
${ROCKSDB_SOURCE_DIR}/table/sst_file_dumper.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/test_util/sync_point_impl.cc"
|
${ROCKSDB_SOURCE_DIR}/table/sst_file_reader.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/test_util/testutil.cc"
|
${ROCKSDB_SOURCE_DIR}/table/sst_file_writer.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/test_util/transaction_test_util.cc"
|
${ROCKSDB_SOURCE_DIR}/table/table_factory.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/tools/block_cache_analyzer/block_cache_trace_analyzer.cc"
|
${ROCKSDB_SOURCE_DIR}/table/table_properties.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/tools/dump/db_dump_tool.cc"
|
${ROCKSDB_SOURCE_DIR}/table/two_level_iterator.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/tools/io_tracer_parser_tool.cc"
|
${ROCKSDB_SOURCE_DIR}/test_util/sync_point.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/tools/ldb_cmd.cc"
|
${ROCKSDB_SOURCE_DIR}/test_util/sync_point_impl.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/tools/ldb_tool.cc"
|
${ROCKSDB_SOURCE_DIR}/test_util/testutil.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/tools/sst_dump_tool.cc"
|
${ROCKSDB_SOURCE_DIR}/test_util/transaction_test_util.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/tools/trace_analyzer_tool.cc"
|
${ROCKSDB_SOURCE_DIR}/tools/block_cache_analyzer/block_cache_trace_analyzer.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc"
|
${ROCKSDB_SOURCE_DIR}/tools/dump/db_dump_tool.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/trace_replay/block_cache_tracer.cc"
|
${ROCKSDB_SOURCE_DIR}/tools/io_tracer_parser_tool.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/trace_replay/io_tracer.cc"
|
${ROCKSDB_SOURCE_DIR}/tools/ldb_cmd.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/util/coding.cc"
|
${ROCKSDB_SOURCE_DIR}/tools/ldb_tool.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/util/compaction_job_stats_impl.cc"
|
${ROCKSDB_SOURCE_DIR}/tools/sst_dump_tool.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/util/comparator.cc"
|
${ROCKSDB_SOURCE_DIR}/tools/trace_analyzer_tool.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/util/compression_context_cache.cc"
|
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/util/concurrent_task_limiter_impl.cc"
|
${ROCKSDB_SOURCE_DIR}/trace_replay/block_cache_tracer.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/util/crc32c.cc"
|
${ROCKSDB_SOURCE_DIR}/trace_replay/io_tracer.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/util/dynamic_bloom.cc"
|
${ROCKSDB_SOURCE_DIR}/util/coding.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/util/hash.cc"
|
${ROCKSDB_SOURCE_DIR}/util/compaction_job_stats_impl.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc"
|
${ROCKSDB_SOURCE_DIR}/util/comparator.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/util/random.cc"
|
${ROCKSDB_SOURCE_DIR}/util/compression_context_cache.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/util/rate_limiter.cc"
|
${ROCKSDB_SOURCE_DIR}/util/concurrent_task_limiter_impl.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/util/slice.cc"
|
${ROCKSDB_SOURCE_DIR}/util/crc32c.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/util/file_checksum_helper.cc"
|
${ROCKSDB_SOURCE_DIR}/util/dynamic_bloom.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/util/status.cc"
|
${ROCKSDB_SOURCE_DIR}/util/hash.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/util/string_util.cc"
|
${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/util/thread_local.cc"
|
${ROCKSDB_SOURCE_DIR}/util/random.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/util/threadpool_imp.cc"
|
${ROCKSDB_SOURCE_DIR}/util/rate_limiter.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/util/xxhash.cc"
|
${ROCKSDB_SOURCE_DIR}/util/ribbon_config.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/backupable/backupable_db.cc"
|
${ROCKSDB_SOURCE_DIR}/util/slice.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_compaction_filter.cc"
|
${ROCKSDB_SOURCE_DIR}/util/file_checksum_helper.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db.cc"
|
${ROCKSDB_SOURCE_DIR}/util/status.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl.cc"
|
${ROCKSDB_SOURCE_DIR}/util/string_util.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl_filesnapshot.cc"
|
${ROCKSDB_SOURCE_DIR}/util/thread_local.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_dump_tool.cc"
|
${ROCKSDB_SOURCE_DIR}/util/threadpool_imp.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_file.cc"
|
${ROCKSDB_SOURCE_DIR}/util/xxhash.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/cassandra/cassandra_compaction_filter.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/backupable/backupable_db.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/cassandra/format.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_compaction_filter.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/cassandra/merge_operator.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/checkpoint/checkpoint_impl.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl_filesnapshot.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/debug.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_dump_tool.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/env_mirror.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_file.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/env_timed.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/cassandra_compaction_filter.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_env.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/format.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_fs.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/merge_operator.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/leveldb_options/leveldb_options.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/checkpoint/checkpoint_impl.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/memory/memory_util.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/bytesxor.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/debug.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/max.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/env_mirror.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/put.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/env_timed.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/sortlist.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_env.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/string_append/stringappend.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_fs.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/string_append/stringappend2.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/leveldb_options/leveldb_options.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/uint64add.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/memory/memory_util.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/object_registry.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/bytesxor.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/option_change_migration/option_change_migration.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/max.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/options/options_util.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/put.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/sortlist.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier_file.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/string_append/stringappend.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier_metadata.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/string_append/stringappend2.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/persistent_cache_tier.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/uint64add.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/volatile_tier_impl.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/object_registry.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/cache_simulator.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/option_change_migration/option_change_migration.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/options/options_util.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier_file.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/lock_manager.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier_metadata.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_tracker.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/persistent_cache_tier.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_manager.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/volatile_tier_impl.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction_db_impl.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/cache_simulator.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/pessimistic_transaction.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/pessimistic_transaction_db.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/snapshot_checker.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/lock_manager.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_base.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_tracker.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_db_mutex_impl.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_manager.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_util.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/range_tree_lock_manager.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/range_tree_lock_tracker.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn_db.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction_db_impl.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/pessimistic_transaction.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/pessimistic_transaction_db.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/snapshot_checker.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index_internal.cc"
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_base.cc
|
||||||
$<TARGET_OBJECTS:rocksdb_build_version>)
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_db_mutex_impl.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_util.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn_db.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index_internal.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/concurrent_tree.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/keyrange.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/lock_request.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/locktree.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/manager.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/range_buffer.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/treenode.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/txnid_set.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/wfg.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/standalone_port.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/dbt.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/memarena.cc
|
||||||
|
rocksdb_build_version.cc)
|
||||||
|
|
||||||
if(HAVE_SSE42 AND NOT MSVC)
|
if(HAVE_SSE42 AND NOT MSVC)
|
||||||
set_source_files_properties(
|
set_source_files_properties(
|
||||||
|
@ -1,3 +1,62 @@
|
|||||||
const char* rocksdb_build_git_sha = "rocksdb_build_git_sha:0";
|
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||||
const char* rocksdb_build_git_date = "rocksdb_build_git_date:2000-01-01";
|
/// This file was edited for ClickHouse.
|
||||||
const char* rocksdb_build_compile_date = "2000-01-01";
|
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
#include "rocksdb/version.h"
|
||||||
|
#include "util/string_util.h"
|
||||||
|
|
||||||
|
// The build script may replace these values with real values based
|
||||||
|
// on whether or not GIT is available and the platform settings
|
||||||
|
static const std::string rocksdb_build_git_sha = "rocksdb_build_git_sha:0";
|
||||||
|
static const std::string rocksdb_build_git_tag = "rocksdb_build_git_tag:master";
|
||||||
|
static const std::string rocksdb_build_date = "rocksdb_build_date:2000-01-01";
|
||||||
|
|
||||||
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
static void AddProperty(std::unordered_map<std::string, std::string> *props, const std::string& name) {
|
||||||
|
size_t colon = name.find(":");
|
||||||
|
if (colon != std::string::npos && colon > 0 && colon < name.length() - 1) {
|
||||||
|
// If we found a "@:", then this property was a build-time substitution that failed. Skip it
|
||||||
|
size_t at = name.find("@", colon);
|
||||||
|
if (at != colon + 1) {
|
||||||
|
// Everything before the colon is the name, after is the value
|
||||||
|
(*props)[name.substr(0, colon)] = name.substr(colon + 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::unordered_map<std::string, std::string>* LoadPropertiesSet() {
|
||||||
|
auto * properties = new std::unordered_map<std::string, std::string>();
|
||||||
|
AddProperty(properties, rocksdb_build_git_sha);
|
||||||
|
AddProperty(properties, rocksdb_build_git_tag);
|
||||||
|
AddProperty(properties, rocksdb_build_date);
|
||||||
|
return properties;
|
||||||
|
}
|
||||||
|
|
||||||
|
const std::unordered_map<std::string, std::string>& GetRocksBuildProperties() {
|
||||||
|
static std::unique_ptr<std::unordered_map<std::string, std::string>> props(LoadPropertiesSet());
|
||||||
|
return *props;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string GetRocksVersionAsString(bool with_patch) {
|
||||||
|
std::string version = ToString(ROCKSDB_MAJOR) + "." + ToString(ROCKSDB_MINOR);
|
||||||
|
if (with_patch) {
|
||||||
|
return version + "." + ToString(ROCKSDB_PATCH);
|
||||||
|
} else {
|
||||||
|
return version;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string GetRocksBuildInfoAsString(const std::string& program, bool verbose) {
|
||||||
|
std::string info = program + " (RocksDB) " + GetRocksVersionAsString(true);
|
||||||
|
if (verbose) {
|
||||||
|
for (const auto& it : GetRocksBuildProperties()) {
|
||||||
|
info.append("\n ");
|
||||||
|
info.append(it.first);
|
||||||
|
info.append(": ");
|
||||||
|
info.append(it.second);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return info;
|
||||||
|
}
|
||||||
|
} // namespace ROCKSDB_NAMESPACE
|
||||||
|
4
debian/changelog
vendored
4
debian/changelog
vendored
@ -1,5 +1,5 @@
|
|||||||
clickhouse (21.9.1.1) unstable; urgency=low
|
clickhouse (21.10.1.1) unstable; urgency=low
|
||||||
|
|
||||||
* Modified source code
|
* Modified source code
|
||||||
|
|
||||||
-- clickhouse-release <clickhouse-release@yandex-team.ru> Sat, 10 Jul 2021 08:22:49 +0300
|
-- clickhouse-release <clickhouse-release@yandex-team.ru> Sat, 17 Jul 2021 08:45:03 +0300
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
FROM ubuntu:18.04
|
FROM ubuntu:18.04
|
||||||
|
|
||||||
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
|
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
|
||||||
ARG version=21.9.1.*
|
ARG version=21.10.1.*
|
||||||
|
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
&& apt-get install --yes --no-install-recommends \
|
&& apt-get install --yes --no-install-recommends \
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
FROM ubuntu:20.04
|
FROM ubuntu:20.04
|
||||||
|
|
||||||
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
|
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
|
||||||
ARG version=21.9.1.*
|
ARG version=21.10.1.*
|
||||||
ARG gosu_ver=1.10
|
ARG gosu_ver=1.10
|
||||||
|
|
||||||
# set non-empty deb_location_url url to create a docker image
|
# set non-empty deb_location_url url to create a docker image
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
FROM ubuntu:18.04
|
FROM ubuntu:18.04
|
||||||
|
|
||||||
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
|
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
|
||||||
ARG version=21.9.1.*
|
ARG version=21.10.1.*
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y apt-transport-https dirmngr && \
|
apt-get install -y apt-transport-https dirmngr && \
|
||||||
|
@ -37,6 +37,14 @@ Also, it accepts the following settings:
|
|||||||
|
|
||||||
- `max_delay_to_insert` - max delay of inserting data into Distributed table in seconds, if there are a lot of pending bytes for async send. Default 60.
|
- `max_delay_to_insert` - max delay of inserting data into Distributed table in seconds, if there are a lot of pending bytes for async send. Default 60.
|
||||||
|
|
||||||
|
- `monitor_batch_inserts` - same as [distributed_directory_monitor_batch_inserts](../../../operations/settings/settings.md#distributed_directory_monitor_batch_inserts)
|
||||||
|
|
||||||
|
- `monitor_split_batch_on_failure` - same as [distributed_directory_monitor_split_batch_on_failure](../../../operations/settings/settings.md#distributed_directory_monitor_split_batch_on_failure)
|
||||||
|
|
||||||
|
- `monitor_sleep_time_ms` - same as [distributed_directory_monitor_sleep_time_ms](../../../operations/settings/settings.md#distributed_directory_monitor_sleep_time_ms)
|
||||||
|
|
||||||
|
- `monitor_max_sleep_time_ms` - same as [distributed_directory_monitor_max_sleep_time_ms](../../../operations/settings/settings.md#distributed_directory_monitor_max_sleep_time_ms)
|
||||||
|
|
||||||
!!! note "Note"
|
!!! note "Note"
|
||||||
|
|
||||||
**Durability settings** (`fsync_...`):
|
**Durability settings** (`fsync_...`):
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
#include <boost/algorithm/string/replace.hpp>
|
#include <boost/algorithm/string/replace.hpp>
|
||||||
#include <Poco/String.h>
|
#include <Poco/String.h>
|
||||||
#include <Poco/Util/Application.h>
|
#include <Poco/Util/Application.h>
|
||||||
|
#include <Columns/ColumnString.h>
|
||||||
#include <common/find_symbols.h>
|
#include <common/find_symbols.h>
|
||||||
#include <common/LineReader.h>
|
#include <common/LineReader.h>
|
||||||
#include <Common/ClickHouseRevision.h>
|
#include <Common/ClickHouseRevision.h>
|
||||||
@ -301,26 +302,9 @@ private:
|
|||||||
}
|
}
|
||||||
catch (const Exception & e)
|
catch (const Exception & e)
|
||||||
{
|
{
|
||||||
bool print_stack_trace = config().getBool("stacktrace", false);
|
bool print_stack_trace = config().getBool("stacktrace", false) && e.code() != ErrorCodes::NETWORK_ERROR;
|
||||||
|
|
||||||
std::string text = e.displayText();
|
std::cerr << getExceptionMessage(e, print_stack_trace, true) << std::endl << std::endl;
|
||||||
|
|
||||||
/** If exception is received from server, then stack trace is embedded in message.
|
|
||||||
* If exception is thrown on client, then stack trace is in separate field.
|
|
||||||
*/
|
|
||||||
|
|
||||||
auto embedded_stack_trace_pos = text.find("Stack trace");
|
|
||||||
if (std::string::npos != embedded_stack_trace_pos && !print_stack_trace)
|
|
||||||
text.resize(embedded_stack_trace_pos);
|
|
||||||
|
|
||||||
std::cerr << "Code: " << e.code() << ". " << text << std::endl << std::endl;
|
|
||||||
|
|
||||||
/// Don't print the stack trace on the client if it was logged on the server.
|
|
||||||
/// Also don't print the stack trace in case of network errors.
|
|
||||||
if (print_stack_trace && e.code() != ErrorCodes::NETWORK_ERROR && std::string::npos == embedded_stack_trace_pos)
|
|
||||||
{
|
|
||||||
std::cerr << "Stack trace:" << std::endl << e.getStackTraceString();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// If exception code isn't zero, we should return non-zero return code anyway.
|
/// If exception code isn't zero, we should return non-zero return code anyway.
|
||||||
return e.code() ? e.code() : -1;
|
return e.code() ? e.code() : -1;
|
||||||
@ -487,6 +471,52 @@ private:
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/// Make query to get all server warnings
|
||||||
|
std::vector<String> loadWarningMessages()
|
||||||
|
{
|
||||||
|
std::vector<String> messages;
|
||||||
|
connection->sendQuery(connection_parameters.timeouts, "SELECT message FROM system.warnings", "" /* query_id */, QueryProcessingStage::Complete);
|
||||||
|
while (true)
|
||||||
|
{
|
||||||
|
Packet packet = connection->receivePacket();
|
||||||
|
switch (packet.type)
|
||||||
|
{
|
||||||
|
case Protocol::Server::Data:
|
||||||
|
if (packet.block)
|
||||||
|
{
|
||||||
|
const ColumnString & column = typeid_cast<const ColumnString &>(*packet.block.getByPosition(0).column);
|
||||||
|
|
||||||
|
size_t rows = packet.block.rows();
|
||||||
|
for (size_t i = 0; i < rows; ++i)
|
||||||
|
messages.emplace_back(column.getDataAt(i).toString());
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
|
||||||
|
case Protocol::Server::Progress:
|
||||||
|
continue;
|
||||||
|
case Protocol::Server::ProfileInfo:
|
||||||
|
continue;
|
||||||
|
case Protocol::Server::Totals:
|
||||||
|
continue;
|
||||||
|
case Protocol::Server::Extremes:
|
||||||
|
continue;
|
||||||
|
case Protocol::Server::Log:
|
||||||
|
continue;
|
||||||
|
|
||||||
|
case Protocol::Server::Exception:
|
||||||
|
packet.exception->rethrow();
|
||||||
|
return messages;
|
||||||
|
|
||||||
|
case Protocol::Server::EndOfStream:
|
||||||
|
return messages;
|
||||||
|
|
||||||
|
default:
|
||||||
|
throw Exception(ErrorCodes::UNKNOWN_PACKET_FROM_SERVER, "Unknown packet {} from server {}",
|
||||||
|
packet.type, connection->getDescription());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int mainImpl()
|
int mainImpl()
|
||||||
{
|
{
|
||||||
UseSSL use_ssl;
|
UseSSL use_ssl;
|
||||||
@ -565,6 +595,26 @@ private:
|
|||||||
suggest->load(connection_parameters, config().getInt("suggestion_limit"));
|
suggest->load(connection_parameters, config().getInt("suggestion_limit"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Load Warnings at the beginning of connection
|
||||||
|
if (!config().has("no-warnings"))
|
||||||
|
{
|
||||||
|
try
|
||||||
|
{
|
||||||
|
std::vector<String> messages = loadWarningMessages();
|
||||||
|
if (!messages.empty())
|
||||||
|
{
|
||||||
|
std::cout << "Warnings:" << std::endl;
|
||||||
|
for (const auto & message : messages)
|
||||||
|
std::cout << "* " << message << std::endl;
|
||||||
|
}
|
||||||
|
std::cout << std::endl;
|
||||||
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
/// Ignore exception
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Load command history if present.
|
/// Load command history if present.
|
||||||
if (config().has("history_file"))
|
if (config().has("history_file"))
|
||||||
history_file = config().getString("history_file");
|
history_file = config().getString("history_file");
|
||||||
@ -633,17 +683,10 @@ private:
|
|||||||
}
|
}
|
||||||
catch (const Exception & e)
|
catch (const Exception & e)
|
||||||
{
|
{
|
||||||
// We don't need to handle the test hints in the interactive
|
/// We don't need to handle the test hints in the interactive mode.
|
||||||
// mode.
|
|
||||||
std::cerr << std::endl
|
|
||||||
<< "Exception on client:" << std::endl
|
|
||||||
<< "Code: " << e.code() << ". " << e.displayText() << std::endl;
|
|
||||||
|
|
||||||
if (config().getBool("stacktrace", false))
|
|
||||||
std::cerr << "Stack trace:" << std::endl << e.getStackTraceString() << std::endl;
|
|
||||||
|
|
||||||
std::cerr << std::endl;
|
|
||||||
|
|
||||||
|
bool print_stack_trace = config().getBool("stacktrace", false);
|
||||||
|
std::cerr << "Exception on client:" << std::endl << getExceptionMessage(e, print_stack_trace, true) << std::endl << std::endl;
|
||||||
client_exception = std::make_unique<Exception>(e);
|
client_exception = std::make_unique<Exception>(e);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -940,19 +983,12 @@ private:
|
|||||||
{
|
{
|
||||||
if (server_exception)
|
if (server_exception)
|
||||||
{
|
{
|
||||||
std::string text = server_exception->displayText();
|
bool print_stack_trace = config().getBool("stacktrace", false);
|
||||||
auto embedded_stack_trace_pos = text.find("Stack trace");
|
|
||||||
if (std::string::npos != embedded_stack_trace_pos && !config().getBool("stacktrace", false))
|
|
||||||
{
|
|
||||||
text.resize(embedded_stack_trace_pos);
|
|
||||||
}
|
|
||||||
std::cerr << "Received exception from server (version " << server_version << "):" << std::endl
|
std::cerr << "Received exception from server (version " << server_version << "):" << std::endl
|
||||||
<< "Code: " << server_exception->code() << ". " << text << std::endl;
|
<< getExceptionMessage(*server_exception, print_stack_trace, true) << std::endl;
|
||||||
if (is_interactive)
|
if (is_interactive)
|
||||||
{
|
|
||||||
std::cerr << std::endl;
|
std::cerr << std::endl;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if (client_exception)
|
if (client_exception)
|
||||||
{
|
{
|
||||||
@ -1410,8 +1446,7 @@ private:
|
|||||||
{
|
{
|
||||||
// Just report it, we'll terminate below.
|
// Just report it, we'll terminate below.
|
||||||
fmt::print(stderr,
|
fmt::print(stderr,
|
||||||
"Error while reconnecting to the server: Code: {}: {}\n",
|
"Error while reconnecting to the server: {}\n",
|
||||||
getCurrentExceptionCode(),
|
|
||||||
getCurrentExceptionMessage(true));
|
getCurrentExceptionMessage(true));
|
||||||
|
|
||||||
assert(!connection->isConnected());
|
assert(!connection->isConnected());
|
||||||
@ -2529,6 +2564,7 @@ public:
|
|||||||
("opentelemetry-traceparent", po::value<std::string>(), "OpenTelemetry traceparent header as described by W3C Trace Context recommendation")
|
("opentelemetry-traceparent", po::value<std::string>(), "OpenTelemetry traceparent header as described by W3C Trace Context recommendation")
|
||||||
("opentelemetry-tracestate", po::value<std::string>(), "OpenTelemetry tracestate header as described by W3C Trace Context recommendation")
|
("opentelemetry-tracestate", po::value<std::string>(), "OpenTelemetry tracestate header as described by W3C Trace Context recommendation")
|
||||||
("history_file", po::value<std::string>(), "path to history file")
|
("history_file", po::value<std::string>(), "path to history file")
|
||||||
|
("no-warnings", "disable warnings when client connects to server")
|
||||||
;
|
;
|
||||||
|
|
||||||
Settings cmd_settings;
|
Settings cmd_settings;
|
||||||
@ -2596,8 +2632,7 @@ public:
|
|||||||
}
|
}
|
||||||
catch (const Exception & e)
|
catch (const Exception & e)
|
||||||
{
|
{
|
||||||
std::string text = e.displayText();
|
std::cerr << getExceptionMessage(e, false) << std::endl;
|
||||||
std::cerr << "Code: " << e.code() << ". " << text << std::endl;
|
|
||||||
std::cerr << "Table №" << i << std::endl << std::endl;
|
std::cerr << "Table №" << i << std::endl << std::endl;
|
||||||
/// Avoid the case when error exit code can possibly overflow to normal (zero).
|
/// Avoid the case when error exit code can possibly overflow to normal (zero).
|
||||||
auto exit_code = e.code() % 256;
|
auto exit_code = e.code() % 256;
|
||||||
@ -2689,6 +2724,8 @@ public:
|
|||||||
config().setBool("highlight", options["highlight"].as<bool>());
|
config().setBool("highlight", options["highlight"].as<bool>());
|
||||||
if (options.count("history_file"))
|
if (options.count("history_file"))
|
||||||
config().setString("history_file", options["history_file"].as<std::string>());
|
config().setString("history_file", options["history_file"].as<std::string>());
|
||||||
|
if (options.count("no-warnings"))
|
||||||
|
config().setBool("no-warnings", true);
|
||||||
|
|
||||||
if ((query_fuzzer_runs = options["query-fuzzer-runs"].as<int>()))
|
if ((query_fuzzer_runs = options["query-fuzzer-runs"].as<int>()))
|
||||||
{
|
{
|
||||||
@ -2740,8 +2777,7 @@ int mainEntryClickHouseClient(int argc, char ** argv)
|
|||||||
}
|
}
|
||||||
catch (const DB::Exception & e)
|
catch (const DB::Exception & e)
|
||||||
{
|
{
|
||||||
std::string text = e.displayText();
|
std::cerr << DB::getExceptionMessage(e, false) << std::endl;
|
||||||
std::cerr << "Code: " << e.code() << ". " << text << std::endl;
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
|
@ -353,6 +353,11 @@ bool HedgedConnections::resumePacketReceiver(const HedgedConnections::ReplicaLoc
|
|||||||
if (offset_states[location.offset].active_connection_count == 0 && !offset_states[location.offset].next_replica_in_process)
|
if (offset_states[location.offset].active_connection_count == 0 && !offset_states[location.offset].next_replica_in_process)
|
||||||
throw NetException("Receive timeout expired", ErrorCodes::SOCKET_TIMEOUT);
|
throw NetException("Receive timeout expired", ErrorCodes::SOCKET_TIMEOUT);
|
||||||
}
|
}
|
||||||
|
else if (std::holds_alternative<std::exception_ptr>(res))
|
||||||
|
{
|
||||||
|
finishProcessReplica(replica_state, true);
|
||||||
|
std::rethrow_exception(std::move(std::get<std::exception_ptr>(res)));
|
||||||
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -31,7 +31,7 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Resume packet receiving.
|
/// Resume packet receiving.
|
||||||
std::variant<int, Packet, Poco::Timespan> resume()
|
std::variant<int, Packet, Poco::Timespan, std::exception_ptr> resume()
|
||||||
{
|
{
|
||||||
/// If there is no pending data, check receive timeout.
|
/// If there is no pending data, check receive timeout.
|
||||||
if (!connection->hasReadPendingData() && !checkReceiveTimeout())
|
if (!connection->hasReadPendingData() && !checkReceiveTimeout())
|
||||||
@ -43,7 +43,7 @@ public:
|
|||||||
/// Resume fiber.
|
/// Resume fiber.
|
||||||
fiber = std::move(fiber).resume();
|
fiber = std::move(fiber).resume();
|
||||||
if (exception)
|
if (exception)
|
||||||
std::rethrow_exception(std::move(exception));
|
return std::move(exception);
|
||||||
|
|
||||||
if (is_read_in_process)
|
if (is_read_in_process)
|
||||||
return epoll.getFileDescriptor();
|
return epoll.getFileDescriptor();
|
||||||
|
@ -313,7 +313,7 @@ std::string getCurrentExceptionMessage(bool with_stacktrace, bool check_embedded
|
|||||||
try
|
try
|
||||||
{
|
{
|
||||||
stream << "Poco::Exception. Code: " << ErrorCodes::POCO_EXCEPTION << ", e.code() = " << e.code()
|
stream << "Poco::Exception. Code: " << ErrorCodes::POCO_EXCEPTION << ", e.code() = " << e.code()
|
||||||
<< ", e.displayText() = " << e.displayText()
|
<< ", " << e.displayText()
|
||||||
<< (with_stacktrace ? ", Stack trace (when copying this message, always include the lines below):\n\n" + getExceptionStackTraceString(e) : "")
|
<< (with_stacktrace ? ", Stack trace (when copying this message, always include the lines below):\n\n" + getExceptionStackTraceString(e) : "")
|
||||||
<< (with_extra_info ? getExtraExceptionInfo(e) : "")
|
<< (with_extra_info ? getExtraExceptionInfo(e) : "")
|
||||||
<< " (version " << VERSION_STRING << VERSION_OFFICIAL << ")";
|
<< " (version " << VERSION_STRING << VERSION_OFFICIAL << ")";
|
||||||
@ -433,7 +433,12 @@ std::string getExceptionMessage(const Exception & e, bool with_stacktrace, bool
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
stream << "Code: " << e.code() << ", e.displayText() = " << text;
|
stream << "Code: " << e.code() << ". " << text;
|
||||||
|
|
||||||
|
if (!text.empty() && text.back() != '.')
|
||||||
|
stream << '.';
|
||||||
|
|
||||||
|
stream << " (" << ErrorCodes::getName(e.code()) << ")";
|
||||||
|
|
||||||
if (with_stacktrace && !has_embedded_stack_trace)
|
if (with_stacktrace && !has_embedded_stack_trace)
|
||||||
stream << ", Stack trace (when copying this message, always include the lines below):\n\n" << e.getStackTraceString();
|
stream << ", Stack trace (when copying this message, always include the lines below):\n\n" << e.getStackTraceString();
|
||||||
|
@ -248,6 +248,9 @@
|
|||||||
M(S3WriteRequestsThrottling, "Number of 429 and 503 errors in POST, DELETE, PUT and PATCH requests to S3 storage.") \
|
M(S3WriteRequestsThrottling, "Number of 429 and 503 errors in POST, DELETE, PUT and PATCH requests to S3 storage.") \
|
||||||
M(S3WriteRequestsRedirects, "Number of redirects in POST, DELETE, PUT and PATCH requests to S3 storage.") \
|
M(S3WriteRequestsRedirects, "Number of redirects in POST, DELETE, PUT and PATCH requests to S3 storage.") \
|
||||||
M(QueryMemoryLimitExceeded, "Number of times when memory limit exceeded for query.") \
|
M(QueryMemoryLimitExceeded, "Number of times when memory limit exceeded for query.") \
|
||||||
|
\
|
||||||
|
M(SleepFunctionCalls, "Number of times a sleep function (sleep, sleepEachRow) has been called.") \
|
||||||
|
M(SleepFunctionMicroseconds, "Time spent sleeping due to a sleep function call.") \
|
||||||
|
|
||||||
|
|
||||||
namespace ProfileEvents
|
namespace ProfileEvents
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
#include <IO/ReadBufferFromString.h>
|
#include <IO/ReadBufferFromString.h>
|
||||||
#include <IO/WriteBufferFromString.h>
|
#include <IO/WriteBufferFromString.h>
|
||||||
|
#include <sparsehash/dense_hash_map>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -161,18 +162,24 @@ NamesAndTypesList NamesAndTypesList::filter(const Names & names) const
|
|||||||
|
|
||||||
NamesAndTypesList NamesAndTypesList::addTypes(const Names & names) const
|
NamesAndTypesList NamesAndTypesList::addTypes(const Names & names) const
|
||||||
{
|
{
|
||||||
std::unordered_map<std::string_view, const NameAndTypePair *> self_columns;
|
/// NOTE: It's better to make a map in `IStorage` than to create it here every time again.
|
||||||
|
#if !defined(ARCADIA_BUILD)
|
||||||
|
google::dense_hash_map<StringRef, const DataTypePtr *, StringRefHash> types;
|
||||||
|
#else
|
||||||
|
google::sparsehash::dense_hash_map<StringRef, const DataTypePtr *, StringRefHash> types;
|
||||||
|
#endif
|
||||||
|
types.set_empty_key(StringRef());
|
||||||
|
|
||||||
for (const auto & column : *this)
|
for (const auto & column : *this)
|
||||||
self_columns[column.name] = &column;
|
types[column.name] = &column.type;
|
||||||
|
|
||||||
NamesAndTypesList res;
|
NamesAndTypesList res;
|
||||||
for (const String & name : names)
|
for (const String & name : names)
|
||||||
{
|
{
|
||||||
auto it = self_columns.find(name);
|
auto it = types.find(name);
|
||||||
if (it == self_columns.end())
|
if (it == types.end())
|
||||||
throw Exception("No column " + name, ErrorCodes::THERE_IS_NO_COLUMN);
|
throw Exception("No column " + name, ErrorCodes::THERE_IS_NO_COLUMN);
|
||||||
res.emplace_back(*it->second);
|
res.emplace_back(name, *it->second);
|
||||||
}
|
}
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
|
@ -57,7 +57,7 @@ class IColumn;
|
|||||||
M(Seconds, tcp_keep_alive_timeout, 0, "The time in seconds the connection needs to remain idle before TCP starts sending keepalive probes", 0) \
|
M(Seconds, tcp_keep_alive_timeout, 0, "The time in seconds the connection needs to remain idle before TCP starts sending keepalive probes", 0) \
|
||||||
M(Milliseconds, hedged_connection_timeout_ms, DBMS_DEFAULT_HEDGED_CONNECTION_TIMEOUT_MS, "Connection timeout for establishing connection with replica for Hedged requests", 0) \
|
M(Milliseconds, hedged_connection_timeout_ms, DBMS_DEFAULT_HEDGED_CONNECTION_TIMEOUT_MS, "Connection timeout for establishing connection with replica for Hedged requests", 0) \
|
||||||
M(Milliseconds, receive_data_timeout_ms, DBMS_DEFAULT_RECEIVE_DATA_TIMEOUT_MS, "Connection timeout for receiving first packet of data or packet with positive progress from replica", 0) \
|
M(Milliseconds, receive_data_timeout_ms, DBMS_DEFAULT_RECEIVE_DATA_TIMEOUT_MS, "Connection timeout for receiving first packet of data or packet with positive progress from replica", 0) \
|
||||||
M(Bool, use_hedged_requests, false, "Use hedged requests for distributed queries", 0) \
|
M(Bool, use_hedged_requests, true, "Use hedged requests for distributed queries", 0) \
|
||||||
M(Bool, allow_changing_replica_until_first_data_packet, false, "Allow HedgedConnections to change replica until receiving first data packet", 0) \
|
M(Bool, allow_changing_replica_until_first_data_packet, false, "Allow HedgedConnections to change replica until receiving first data packet", 0) \
|
||||||
M(Milliseconds, queue_max_wait_ms, 0, "The wait time in the request queue, if the number of concurrent requests exceeds the maximum.", 0) \
|
M(Milliseconds, queue_max_wait_ms, 0, "The wait time in the request queue, if the number of concurrent requests exceeds the maximum.", 0) \
|
||||||
M(Milliseconds, connection_pool_max_wait_ms, 0, "The wait time when the connection pool is full.", 0) \
|
M(Milliseconds, connection_pool_max_wait_ms, 0, "The wait time when the connection pool is full.", 0) \
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
#include <IO/FileEncryptionCommon.h>
|
#include <IO/FileEncryptionCommon.h>
|
||||||
#include <IO/ReadBufferFromEncryptedFile.h>
|
#include <IO/ReadBufferFromEncryptedFile.h>
|
||||||
#include <IO/WriteBufferFromEncryptedFile.h>
|
#include <IO/WriteBufferFromEncryptedFile.h>
|
||||||
|
#include <boost/algorithm/hex.hpp>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -12,14 +13,82 @@ namespace DB
|
|||||||
|
|
||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
{
|
{
|
||||||
|
extern const int BAD_ARGUMENTS;
|
||||||
extern const int INCORRECT_DISK_INDEX;
|
extern const int INCORRECT_DISK_INDEX;
|
||||||
extern const int UNKNOWN_ELEMENT_IN_CONFIG;
|
|
||||||
extern const int LOGICAL_ERROR;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
using DiskEncryptedPtr = std::shared_ptr<DiskEncrypted>;
|
using DiskEncryptedPtr = std::shared_ptr<DiskEncrypted>;
|
||||||
using namespace FileEncryption;
|
using namespace FileEncryption;
|
||||||
|
|
||||||
|
String unhexKey(const String & hex, const String & disk_name)
|
||||||
|
{
|
||||||
|
try
|
||||||
|
{
|
||||||
|
return boost::algorithm::unhex(hex);
|
||||||
|
}
|
||||||
|
catch (const std::exception &)
|
||||||
|
{
|
||||||
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot read key_hex for disk {}, check for valid characters [0-9a-fA-F] and length", disk_name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct DiskEncryptedSettings
|
||||||
|
{
|
||||||
|
String key;
|
||||||
|
DiskPtr wrapped_disk;
|
||||||
|
String path_on_wrapped_disk;
|
||||||
|
|
||||||
|
DiskEncryptedSettings(
|
||||||
|
const String & disk_name, const Poco::Util::AbstractConfiguration & config, const String & config_prefix, const DisksMap & map)
|
||||||
|
{
|
||||||
|
String wrapped_disk_name = config.getString(config_prefix + ".disk", "");
|
||||||
|
if (wrapped_disk_name.empty())
|
||||||
|
throw Exception(
|
||||||
|
ErrorCodes::BAD_ARGUMENTS,
|
||||||
|
"Name of the wrapped disk must not be empty. An encrypted disk is a wrapper over another disk. "
|
||||||
|
"Disk {}", disk_name);
|
||||||
|
|
||||||
|
key = config.getString(config_prefix + ".key", "");
|
||||||
|
String key_hex = config.getString(config_prefix + ".key_hex", "");
|
||||||
|
if (!key.empty() && !key_hex.empty())
|
||||||
|
throw Exception(
|
||||||
|
ErrorCodes::BAD_ARGUMENTS,
|
||||||
|
"Both 'key' and 'key_hex' are specified. There should be only one. Disk {}", disk_name);
|
||||||
|
|
||||||
|
if (!key_hex.empty())
|
||||||
|
{
|
||||||
|
assert(key.empty());
|
||||||
|
key = unhexKey(key_hex, disk_name);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (key.empty())
|
||||||
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Key of the encrypted disk must not be empty. Disk {}", disk_name);
|
||||||
|
|
||||||
|
if (!FileEncryption::isKeyLengthSupported(key.length()))
|
||||||
|
throw Exception(
|
||||||
|
ErrorCodes::BAD_ARGUMENTS,
|
||||||
|
"Key length is not supported, supported only keys of length 16, 24, or 32 bytes. Disk {}", disk_name);
|
||||||
|
|
||||||
|
auto wrapped_disk_it = map.find(wrapped_disk_name);
|
||||||
|
if (wrapped_disk_it == map.end())
|
||||||
|
throw Exception(
|
||||||
|
ErrorCodes::BAD_ARGUMENTS,
|
||||||
|
"The wrapped disk must have been announced earlier. No disk with name {}. Disk {}",
|
||||||
|
wrapped_disk_name, disk_name);
|
||||||
|
wrapped_disk = wrapped_disk_it->second;
|
||||||
|
|
||||||
|
path_on_wrapped_disk = config.getString(config_prefix + ".path", "");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
bool inline isSameDiskType(const IDisk & one, const IDisk & another)
|
||||||
|
{
|
||||||
|
return typeid(one) == typeid(another);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
class DiskEncryptedReservation : public IReservation
|
class DiskEncryptedReservation : public IReservation
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
@ -57,23 +126,45 @@ ReservationPtr DiskEncrypted::reserve(UInt64 bytes)
|
|||||||
DiskEncrypted::DiskEncrypted(const String & name_, DiskPtr disk_, const String & key_, const String & path_)
|
DiskEncrypted::DiskEncrypted(const String & name_, DiskPtr disk_, const String & key_, const String & path_)
|
||||||
: DiskDecorator(disk_)
|
: DiskDecorator(disk_)
|
||||||
, name(name_), key(key_), disk_path(path_)
|
, name(name_), key(key_), disk_path(path_)
|
||||||
, disk_absolute_path(delegate->getPath() + disk_path)
|
|
||||||
{
|
{
|
||||||
initialize();
|
initialize();
|
||||||
}
|
}
|
||||||
|
|
||||||
void DiskEncrypted::initialize()
|
void DiskEncrypted::initialize()
|
||||||
{
|
{
|
||||||
|
disk_absolute_path = delegate->getPath() + disk_path;
|
||||||
|
|
||||||
// use wrapped_disk as an EncryptedDisk store
|
// use wrapped_disk as an EncryptedDisk store
|
||||||
if (disk_path.empty())
|
if (disk_path.empty())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (disk_path.back() != '/')
|
if (disk_path.back() != '/')
|
||||||
throw Exception("Disk path must ends with '/', but '" + disk_path + "' doesn't.", ErrorCodes::LOGICAL_ERROR);
|
throw Exception("Disk path must ends with '/', but '" + disk_path + "' doesn't.", ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
|
||||||
delegate->createDirectories(disk_path);
|
delegate->createDirectories(disk_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void DiskEncrypted::copy(const String & from_path, const std::shared_ptr<IDisk> & to_disk, const String & to_path)
|
||||||
|
{
|
||||||
|
/// Check if we can copy the file without deciphering.
|
||||||
|
if (isSameDiskType(*this, *to_disk))
|
||||||
|
{
|
||||||
|
/// Disk type is the same, check if the key is the same too.
|
||||||
|
if (auto * to_encrypted_disk = typeid_cast<DiskEncrypted *>(to_disk.get()))
|
||||||
|
{
|
||||||
|
if (key == to_encrypted_disk->key)
|
||||||
|
{
|
||||||
|
/// Key is the same so we can simply copy the encrypted file.
|
||||||
|
delegate->copy(wrappedPath(from_path), to_encrypted_disk->delegate, wrappedPath(to_path));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Copy the file through buffers with deciphering.
|
||||||
|
copyThroughBuffers(from_path, to_disk, to_path);
|
||||||
|
}
|
||||||
|
|
||||||
std::unique_ptr<ReadBufferFromFileBase> DiskEncrypted::readFile(
|
std::unique_ptr<ReadBufferFromFileBase> DiskEncrypted::readFile(
|
||||||
const String & path,
|
const String & path,
|
||||||
size_t buf_size,
|
size_t buf_size,
|
||||||
@ -85,37 +176,28 @@ std::unique_ptr<ReadBufferFromFileBase> DiskEncrypted::readFile(
|
|||||||
auto wrapped_path = wrappedPath(path);
|
auto wrapped_path = wrappedPath(path);
|
||||||
auto buffer = delegate->readFile(wrapped_path, buf_size, estimated_size, aio_threshold, mmap_threshold, mmap_cache);
|
auto buffer = delegate->readFile(wrapped_path, buf_size, estimated_size, aio_threshold, mmap_threshold, mmap_cache);
|
||||||
|
|
||||||
String iv;
|
InitVector iv;
|
||||||
size_t offset = 0;
|
iv.read(*buffer);
|
||||||
|
return std::make_unique<ReadBufferFromEncryptedFile>(buf_size, std::move(buffer), key, iv);
|
||||||
if (exists(path) && getFileSize(path))
|
|
||||||
{
|
|
||||||
iv = readIV(kIVSize, *buffer);
|
|
||||||
offset = kIVSize;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
iv = randomString(kIVSize);
|
|
||||||
|
|
||||||
return std::make_unique<ReadBufferFromEncryptedFile>(buf_size, std::move(buffer), iv, key, offset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<WriteBufferFromFileBase> DiskEncrypted::writeFile(const String & path, size_t buf_size, WriteMode mode)
|
std::unique_ptr<WriteBufferFromFileBase> DiskEncrypted::writeFile(const String & path, size_t buf_size, WriteMode mode)
|
||||||
{
|
{
|
||||||
String iv;
|
InitVector iv;
|
||||||
size_t start_offset = 0;
|
UInt64 old_file_size = 0;
|
||||||
auto wrapped_path = wrappedPath(path);
|
auto wrapped_path = wrappedPath(path);
|
||||||
|
|
||||||
if (mode == WriteMode::Append && exists(path) && getFileSize(path))
|
if (mode == WriteMode::Append && exists(path) && getFileSize(path))
|
||||||
{
|
{
|
||||||
auto read_buffer = delegate->readFile(wrapped_path, kIVSize);
|
auto read_buffer = delegate->readFile(wrapped_path, InitVector::kSize);
|
||||||
iv = readIV(kIVSize, *read_buffer);
|
iv.read(*read_buffer);
|
||||||
start_offset = getFileSize(path);
|
old_file_size = getFileSize(path);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
iv = randomString(kIVSize);
|
iv = InitVector::random();
|
||||||
|
|
||||||
auto buffer = delegate->writeFile(wrapped_path, buf_size, mode);
|
auto buffer = delegate->writeFile(wrapped_path, buf_size, mode);
|
||||||
return std::make_unique<WriteBufferFromEncryptedFile>(buf_size, std::move(buffer), iv, key, start_offset);
|
return std::make_unique<WriteBufferFromEncryptedFile>(buf_size, std::move(buffer), key, iv, old_file_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -123,13 +205,13 @@ size_t DiskEncrypted::getFileSize(const String & path) const
|
|||||||
{
|
{
|
||||||
auto wrapped_path = wrappedPath(path);
|
auto wrapped_path = wrappedPath(path);
|
||||||
size_t size = delegate->getFileSize(wrapped_path);
|
size_t size = delegate->getFileSize(wrapped_path);
|
||||||
return size > kIVSize ? (size - kIVSize) : 0;
|
return size > InitVector::kSize ? (size - InitVector::kSize) : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void DiskEncrypted::truncateFile(const String & path, size_t size)
|
void DiskEncrypted::truncateFile(const String & path, size_t size)
|
||||||
{
|
{
|
||||||
auto wrapped_path = wrappedPath(path);
|
auto wrapped_path = wrappedPath(path);
|
||||||
delegate->truncateFile(wrapped_path, size ? (size + kIVSize) : 0);
|
delegate->truncateFile(wrapped_path, size ? (size + InitVector::kSize) : 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
SyncGuardPtr DiskEncrypted::getDirectorySyncGuard(const String & path) const
|
SyncGuardPtr DiskEncrypted::getDirectorySyncGuard(const String & path) const
|
||||||
@ -144,22 +226,10 @@ void DiskEncrypted::applyNewSettings(
|
|||||||
const String & config_prefix,
|
const String & config_prefix,
|
||||||
const DisksMap & map)
|
const DisksMap & map)
|
||||||
{
|
{
|
||||||
String wrapped_disk_name = config.getString(config_prefix + ".disk", "");
|
DiskEncryptedSettings settings{name, config, config_prefix, map};
|
||||||
if (wrapped_disk_name.empty())
|
key = settings.key;
|
||||||
throw Exception("The wrapped disk name can not be empty. An encrypted disk is a wrapper over another disk. "
|
delegate = settings.wrapped_disk;
|
||||||
"Disk " + name, ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG);
|
disk_path = settings.path_on_wrapped_disk;
|
||||||
|
|
||||||
key = config.getString(config_prefix + ".key", "");
|
|
||||||
if (key.empty())
|
|
||||||
throw Exception("Encrypted disk key can not be empty. Disk " + name, ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG);
|
|
||||||
|
|
||||||
auto wrapped_disk = map.find(wrapped_disk_name);
|
|
||||||
if (wrapped_disk == map.end())
|
|
||||||
throw Exception("The wrapped disk must have been announced earlier. No disk with name " + wrapped_disk_name + ". Disk " + name,
|
|
||||||
ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG);
|
|
||||||
delegate = wrapped_disk->second;
|
|
||||||
|
|
||||||
disk_path = config.getString(config_prefix + ".path", "");
|
|
||||||
initialize();
|
initialize();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -169,28 +239,10 @@ void registerDiskEncrypted(DiskFactory & factory)
|
|||||||
const Poco::Util::AbstractConfiguration & config,
|
const Poco::Util::AbstractConfiguration & config,
|
||||||
const String & config_prefix,
|
const String & config_prefix,
|
||||||
ContextPtr /*context*/,
|
ContextPtr /*context*/,
|
||||||
const DisksMap & map) -> DiskPtr {
|
const DisksMap & map) -> DiskPtr
|
||||||
|
{
|
||||||
String wrapped_disk_name = config.getString(config_prefix + ".disk", "");
|
DiskEncryptedSettings settings{name, config, config_prefix, map};
|
||||||
if (wrapped_disk_name.empty())
|
return std::make_shared<DiskEncrypted>(name, settings.wrapped_disk, settings.key, settings.path_on_wrapped_disk);
|
||||||
throw Exception("The wrapped disk name can not be empty. An encrypted disk is a wrapper over another disk. "
|
|
||||||
"Disk " + name, ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG);
|
|
||||||
|
|
||||||
String key = config.getString(config_prefix + ".key", "");
|
|
||||||
if (key.empty())
|
|
||||||
throw Exception("Encrypted disk key can not be empty. Disk " + name, ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG);
|
|
||||||
if (key.size() != cipherKeyLength(defaultCipher()))
|
|
||||||
throw Exception("Expected key with size " + std::to_string(cipherKeyLength(defaultCipher())) + ", got key with size " + std::to_string(key.size()),
|
|
||||||
ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG);
|
|
||||||
|
|
||||||
auto wrapped_disk = map.find(wrapped_disk_name);
|
|
||||||
if (wrapped_disk == map.end())
|
|
||||||
throw Exception("The wrapped disk must have been announced earlier. No disk with name " + wrapped_disk_name + ". Disk " + name,
|
|
||||||
ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG);
|
|
||||||
|
|
||||||
String relative_path = config.getString(config_prefix + ".path", "");
|
|
||||||
|
|
||||||
return std::make_shared<DiskEncrypted>(name, wrapped_disk->second, key, relative_path);
|
|
||||||
};
|
};
|
||||||
factory.registerDiskType("encrypted", creator);
|
factory.registerDiskType("encrypted", creator);
|
||||||
}
|
}
|
||||||
|
@ -14,6 +14,9 @@ namespace DB
|
|||||||
class ReadBufferFromFileBase;
|
class ReadBufferFromFileBase;
|
||||||
class WriteBufferFromFileBase;
|
class WriteBufferFromFileBase;
|
||||||
|
|
||||||
|
/// Encrypted disk ciphers all written files on the fly and writes the encrypted files to an underlying (normal) disk.
|
||||||
|
/// And when we read files from an encrypted disk it deciphers them automatically,
|
||||||
|
/// so we can work with a encrypted disk like it's a normal disk.
|
||||||
class DiskEncrypted : public DiskDecorator
|
class DiskEncrypted : public DiskDecorator
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
@ -102,10 +105,7 @@ public:
|
|||||||
delegate->listFiles(wrapped_path, file_names);
|
delegate->listFiles(wrapped_path, file_names);
|
||||||
}
|
}
|
||||||
|
|
||||||
void copy(const String & from_path, const std::shared_ptr<IDisk> & to_disk, const String & to_path) override
|
void copy(const String & from_path, const std::shared_ptr<IDisk> & to_disk, const String & to_path) override;
|
||||||
{
|
|
||||||
IDisk::copy(from_path, to_disk, to_path);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::unique_ptr<ReadBufferFromFileBase> readFile(
|
std::unique_ptr<ReadBufferFromFileBase> readFile(
|
||||||
const String & path,
|
const String & path,
|
||||||
|
@ -309,7 +309,7 @@ void DiskLocal::copy(const String & from_path, const std::shared_ptr<IDisk> & to
|
|||||||
fs::copy(from, to, fs::copy_options::recursive | fs::copy_options::overwrite_existing); /// Use more optimal way.
|
fs::copy(from, to, fs::copy_options::recursive | fs::copy_options::overwrite_existing); /// Use more optimal way.
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
IDisk::copy(from_path, to_disk, to_path); /// Copy files through buffers.
|
copyThroughBuffers(from_path, to_disk, to_path); /// Base implementation.
|
||||||
}
|
}
|
||||||
|
|
||||||
SyncGuardPtr DiskLocal::getDirectorySyncGuard(const String & path) const
|
SyncGuardPtr DiskLocal::getDirectorySyncGuard(const String & path) const
|
||||||
|
@ -58,7 +58,7 @@ void asyncCopy(IDisk & from_disk, String from_path, IDisk & to_disk, String to_p
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void IDisk::copy(const String & from_path, const std::shared_ptr<IDisk> & to_disk, const String & to_path)
|
void IDisk::copyThroughBuffers(const String & from_path, const std::shared_ptr<IDisk> & to_disk, const String & to_path)
|
||||||
{
|
{
|
||||||
auto & exec = to_disk->getExecutor();
|
auto & exec = to_disk->getExecutor();
|
||||||
ResultsCollector results;
|
ResultsCollector results;
|
||||||
@ -71,6 +71,11 @@ void IDisk::copy(const String & from_path, const std::shared_ptr<IDisk> & to_dis
|
|||||||
result.get();
|
result.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void IDisk::copy(const String & from_path, const std::shared_ptr<IDisk> & to_disk, const String & to_path)
|
||||||
|
{
|
||||||
|
copyThroughBuffers(from_path, to_disk, to_path);
|
||||||
|
}
|
||||||
|
|
||||||
void IDisk::truncateFile(const String &, size_t)
|
void IDisk::truncateFile(const String &, size_t)
|
||||||
{
|
{
|
||||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Truncate operation is not implemented for disk of type {}", getType());
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Truncate operation is not implemented for disk of type {}", getType());
|
||||||
|
@ -246,6 +246,11 @@ protected:
|
|||||||
/// Returns executor to perform asynchronous operations.
|
/// Returns executor to perform asynchronous operations.
|
||||||
virtual Executor & getExecutor() { return *executor; }
|
virtual Executor & getExecutor() { return *executor; }
|
||||||
|
|
||||||
|
/// Base implementation of the function copy().
|
||||||
|
/// It just opens two files, reads data by portions from the first file, and writes it to the second one.
|
||||||
|
/// A derived class may override copy() to provide a faster implementation.
|
||||||
|
void copyThroughBuffers(const String & from_path, const std::shared_ptr<IDisk> & to_disk, const String & to_path);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::unique_ptr<Executor> executor;
|
std::unique_ptr<Executor> executor;
|
||||||
};
|
};
|
||||||
|
@ -5,11 +5,17 @@
|
|||||||
#include <Columns/ColumnConst.h>
|
#include <Columns/ColumnConst.h>
|
||||||
#include <DataTypes/DataTypesNumber.h>
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
#include <Common/FieldVisitorConvertToNumber.h>
|
#include <Common/FieldVisitorConvertToNumber.h>
|
||||||
|
#include <Common/ProfileEvents.h>
|
||||||
#include <Common/assert_cast.h>
|
#include <Common/assert_cast.h>
|
||||||
#include <common/sleep.h>
|
#include <common/sleep.h>
|
||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
#include <Interpreters/Context_fwd.h>
|
#include <Interpreters/Context_fwd.h>
|
||||||
|
|
||||||
|
namespace ProfileEvents
|
||||||
|
{
|
||||||
|
extern const Event SleepFunctionCalls;
|
||||||
|
extern const Event SleepFunctionMicroseconds;
|
||||||
|
}
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -91,8 +97,11 @@ public:
|
|||||||
if (seconds > 3.0) /// The choice is arbitrary
|
if (seconds > 3.0) /// The choice is arbitrary
|
||||||
throw Exception("The maximum sleep time is 3 seconds. Requested: " + toString(seconds), ErrorCodes::TOO_SLOW);
|
throw Exception("The maximum sleep time is 3 seconds. Requested: " + toString(seconds), ErrorCodes::TOO_SLOW);
|
||||||
|
|
||||||
UInt64 microseconds = seconds * (variant == FunctionSleepVariant::PerBlock ? 1 : size) * 1e6;
|
UInt64 count = (variant == FunctionSleepVariant::PerBlock ? 1 : size);
|
||||||
|
UInt64 microseconds = seconds * count * 1e6;
|
||||||
sleepForMicroseconds(microseconds);
|
sleepForMicroseconds(microseconds);
|
||||||
|
ProfileEvents::increment(ProfileEvents::SleepFunctionCalls, count);
|
||||||
|
ProfileEvents::increment(ProfileEvents::SleepFunctionMicroseconds, microseconds);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// convertToFullColumn needed, because otherwise (constant expression case) function will not get called on each columns.
|
/// convertToFullColumn needed, because otherwise (constant expression case) function will not get called on each columns.
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
#include <IO/FileEncryptionCommon.h>
|
#include <IO/FileEncryptionCommon.h>
|
||||||
|
|
||||||
#if USE_SSL
|
#if USE_SSL
|
||||||
#include <IO/ReadHelpers.h>
|
|
||||||
#include <IO/ReadBuffer.h>
|
#include <IO/ReadBuffer.h>
|
||||||
|
#include <IO/ReadHelpers.h>
|
||||||
#include <IO/WriteBuffer.h>
|
#include <IO/WriteBuffer.h>
|
||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
|
|
||||||
@ -23,244 +23,281 @@ namespace FileEncryption
|
|||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
String toBigEndianString(UInt128 value)
|
constexpr const size_t kBlockSize = 16;
|
||||||
|
|
||||||
|
size_t blockOffset(size_t pos) { return pos % kBlockSize; }
|
||||||
|
size_t blocks(size_t pos) { return pos / kBlockSize; }
|
||||||
|
|
||||||
|
size_t partBlockSize(size_t size, size_t off)
|
||||||
{
|
{
|
||||||
WriteBufferFromOwnString out;
|
assert(off < kBlockSize);
|
||||||
writeBinaryBigEndian(value, out);
|
|
||||||
return std::move(out.str());
|
|
||||||
}
|
|
||||||
|
|
||||||
UInt128 fromBigEndianString(const String & str)
|
|
||||||
{
|
|
||||||
ReadBufferFromMemory in{str.data(), str.length()};
|
|
||||||
UInt128 result;
|
|
||||||
readBinaryBigEndian(result, in);
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
InitVector::InitVector(const String & iv_) : iv(fromBigEndianString(iv_)) {}
|
|
||||||
|
|
||||||
const String & InitVector::str() const
|
|
||||||
{
|
|
||||||
local = toBigEndianString(iv + counter);
|
|
||||||
return local;
|
|
||||||
}
|
|
||||||
|
|
||||||
Encryption::Encryption(const String & iv_, const EncryptionKey & key_, size_t offset_)
|
|
||||||
: evp_cipher(defaultCipher())
|
|
||||||
, init_vector(iv_)
|
|
||||||
, key(key_)
|
|
||||||
, block_size(cipherIVLength(evp_cipher))
|
|
||||||
{
|
|
||||||
if (iv_.size() != cipherIVLength(evp_cipher))
|
|
||||||
throw DB::Exception("Expected iv with size " + std::to_string(cipherIVLength(evp_cipher)) + ", got iv with size " + std::to_string(iv_.size()),
|
|
||||||
DB::ErrorCodes::DATA_ENCRYPTION_ERROR);
|
|
||||||
if (key_.size() != cipherKeyLength(evp_cipher))
|
|
||||||
throw DB::Exception("Expected key with size " + std::to_string(cipherKeyLength(evp_cipher)) + ", got iv with size " + std::to_string(key_.size()),
|
|
||||||
DB::ErrorCodes::DATA_ENCRYPTION_ERROR);
|
|
||||||
|
|
||||||
offset = offset_;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t Encryption::partBlockSize(size_t size, size_t off) const
|
|
||||||
{
|
|
||||||
assert(off < block_size);
|
|
||||||
/// write the part as usual block
|
/// write the part as usual block
|
||||||
if (off == 0)
|
if (off == 0)
|
||||||
return 0;
|
return 0;
|
||||||
return off + size <= block_size ? size : (block_size - off) % block_size;
|
return off + size <= kBlockSize ? size : (kBlockSize - off) % kBlockSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Encryptor::encrypt(const char * plaintext, WriteBuffer & buf, size_t size)
|
size_t encryptBlocks(EVP_CIPHER_CTX * evp_ctx, const char * data, size_t size, WriteBuffer & out)
|
||||||
{
|
{
|
||||||
if (!size)
|
const uint8_t * in = reinterpret_cast<const uint8_t *>(data);
|
||||||
return;
|
size_t in_size = 0;
|
||||||
|
size_t out_size = 0;
|
||||||
|
|
||||||
auto iv = InitVector(init_vector);
|
while (in_size < size)
|
||||||
auto off = blockOffset(offset);
|
|
||||||
iv.set(blocks(offset));
|
|
||||||
|
|
||||||
size_t part_size = partBlockSize(size, off);
|
|
||||||
if (off)
|
|
||||||
{
|
{
|
||||||
buf.write(encryptPartialBlock(plaintext, part_size, iv, off).data(), part_size);
|
out.nextIfAtEnd();
|
||||||
offset += part_size;
|
size_t part_size = std::min(size - in_size, out.available());
|
||||||
size -= part_size;
|
uint8_t * ciphertext = reinterpret_cast<uint8_t *>(out.position());
|
||||||
iv.inc();
|
int ciphertext_size = 0;
|
||||||
}
|
if (!EVP_EncryptUpdate(evp_ctx, ciphertext, &ciphertext_size, &in[in_size], part_size))
|
||||||
|
|
||||||
if (size)
|
|
||||||
{
|
|
||||||
buf.write(encryptNBytes(plaintext + part_size, size, iv).data(), size);
|
|
||||||
offset += size;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
String Encryptor::encryptPartialBlock(const char * partial_block, size_t size, const InitVector & iv, size_t off) const
|
|
||||||
{
|
|
||||||
if (size > block_size)
|
|
||||||
throw Exception("Expected partial block, got block with size > block_size: size = " + std::to_string(size) + " and offset = " + std::to_string(off),
|
|
||||||
ErrorCodes::DATA_ENCRYPTION_ERROR);
|
|
||||||
|
|
||||||
String plaintext(block_size, '\0');
|
|
||||||
for (size_t i = 0; i < size; ++i)
|
|
||||||
plaintext[i + off] = partial_block[i];
|
|
||||||
|
|
||||||
return String(encryptNBytes(plaintext.data(), block_size, iv), off, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
String Encryptor::encryptNBytes(const char * data, size_t bytes, const InitVector & iv) const
|
|
||||||
{
|
|
||||||
String ciphertext(bytes, '\0');
|
|
||||||
auto * ciphertext_ref = ciphertext.data();
|
|
||||||
|
|
||||||
auto evp_ctx_ptr = std::unique_ptr<EVP_CIPHER_CTX, decltype(&::EVP_CIPHER_CTX_free)>(EVP_CIPHER_CTX_new(), &EVP_CIPHER_CTX_free);
|
|
||||||
auto * evp_ctx = evp_ctx_ptr.get();
|
|
||||||
|
|
||||||
if (EVP_EncryptInit_ex(evp_ctx, evp_cipher, nullptr, nullptr, nullptr) != 1)
|
|
||||||
throw Exception("Failed to initialize encryption context with cipher", ErrorCodes::DATA_ENCRYPTION_ERROR);
|
|
||||||
|
|
||||||
if (EVP_EncryptInit_ex(evp_ctx, nullptr, nullptr,
|
|
||||||
reinterpret_cast<const unsigned char*>(key.str().data()),
|
|
||||||
reinterpret_cast<const unsigned char*>(iv.str().data())) != 1)
|
|
||||||
throw Exception("Failed to set key and IV for encryption", ErrorCodes::DATA_ENCRYPTION_ERROR);
|
|
||||||
|
|
||||||
int output_len = 0;
|
|
||||||
if (EVP_EncryptUpdate(evp_ctx,
|
|
||||||
reinterpret_cast<unsigned char*>(ciphertext_ref), &output_len,
|
|
||||||
reinterpret_cast<const unsigned char*>(data), static_cast<int>(bytes)) != 1)
|
|
||||||
throw Exception("Failed to encrypt", ErrorCodes::DATA_ENCRYPTION_ERROR);
|
throw Exception("Failed to encrypt", ErrorCodes::DATA_ENCRYPTION_ERROR);
|
||||||
|
|
||||||
ciphertext_ref += output_len;
|
in_size += part_size;
|
||||||
|
if (ciphertext_size)
|
||||||
int final_output_len = 0;
|
{
|
||||||
if (EVP_EncryptFinal_ex(evp_ctx,
|
out.position() += ciphertext_size;
|
||||||
reinterpret_cast<unsigned char*>(ciphertext_ref), &final_output_len) != 1)
|
out_size += ciphertext_size;
|
||||||
throw Exception("Failed to fetch ciphertext", ErrorCodes::DATA_ENCRYPTION_ERROR);
|
}
|
||||||
|
|
||||||
if (output_len < 0 || final_output_len < 0 || static_cast<size_t>(output_len) + static_cast<size_t>(final_output_len) != bytes)
|
|
||||||
throw Exception("Only part of the data was encrypted", ErrorCodes::DATA_ENCRYPTION_ERROR);
|
|
||||||
|
|
||||||
return ciphertext;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Decryptor::decrypt(const char * ciphertext, BufferBase::Position buf, size_t size, size_t off)
|
return out_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t encryptBlockWithPadding(EVP_CIPHER_CTX * evp_ctx, const char * data, size_t size, size_t pad_left, WriteBuffer & out)
|
||||||
|
{
|
||||||
|
assert((size <= kBlockSize) && (size + pad_left <= kBlockSize));
|
||||||
|
uint8_t padded_data[kBlockSize] = {};
|
||||||
|
memcpy(&padded_data[pad_left], data, size);
|
||||||
|
size_t padded_data_size = pad_left + size;
|
||||||
|
|
||||||
|
uint8_t ciphertext[kBlockSize];
|
||||||
|
int ciphertext_size = 0;
|
||||||
|
if (!EVP_EncryptUpdate(evp_ctx, ciphertext, &ciphertext_size, padded_data, padded_data_size))
|
||||||
|
throw Exception("Failed to encrypt", ErrorCodes::DATA_ENCRYPTION_ERROR);
|
||||||
|
|
||||||
|
if (!ciphertext_size)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (static_cast<size_t>(ciphertext_size) < pad_left)
|
||||||
|
throw Exception(ErrorCodes::DATA_ENCRYPTION_ERROR, "Unexpected size of encrypted data: {} < {}", ciphertext_size, pad_left);
|
||||||
|
|
||||||
|
uint8_t * ciphertext_begin = &ciphertext[pad_left];
|
||||||
|
ciphertext_size -= pad_left;
|
||||||
|
out.write(reinterpret_cast<const char *>(ciphertext_begin), ciphertext_size);
|
||||||
|
return ciphertext_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t encryptFinal(EVP_CIPHER_CTX * evp_ctx, WriteBuffer & out)
|
||||||
|
{
|
||||||
|
uint8_t ciphertext[kBlockSize];
|
||||||
|
int ciphertext_size = 0;
|
||||||
|
if (!EVP_EncryptFinal_ex(evp_ctx,
|
||||||
|
ciphertext, &ciphertext_size))
|
||||||
|
throw Exception("Failed to finalize encrypting", ErrorCodes::DATA_ENCRYPTION_ERROR);
|
||||||
|
if (ciphertext_size)
|
||||||
|
out.write(reinterpret_cast<const char *>(ciphertext), ciphertext_size);
|
||||||
|
return ciphertext_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t decryptBlocks(EVP_CIPHER_CTX * evp_ctx, const char * data, size_t size, char * out)
|
||||||
|
{
|
||||||
|
const uint8_t * in = reinterpret_cast<const uint8_t *>(data);
|
||||||
|
uint8_t * plaintext = reinterpret_cast<uint8_t *>(out);
|
||||||
|
int plaintext_size = 0;
|
||||||
|
if (!EVP_DecryptUpdate(evp_ctx, plaintext, &plaintext_size, in, size))
|
||||||
|
throw Exception("Failed to decrypt", ErrorCodes::DATA_ENCRYPTION_ERROR);
|
||||||
|
return plaintext_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t decryptBlockWithPadding(EVP_CIPHER_CTX * evp_ctx, const char * data, size_t size, size_t pad_left, char * out)
|
||||||
|
{
|
||||||
|
assert((size <= kBlockSize) && (size + pad_left <= kBlockSize));
|
||||||
|
uint8_t padded_data[kBlockSize] = {};
|
||||||
|
memcpy(&padded_data[pad_left], data, size);
|
||||||
|
size_t padded_data_size = pad_left + size;
|
||||||
|
|
||||||
|
uint8_t plaintext[kBlockSize];
|
||||||
|
int plaintext_size = 0;
|
||||||
|
if (!EVP_DecryptUpdate(evp_ctx, plaintext, &plaintext_size, padded_data, padded_data_size))
|
||||||
|
throw Exception("Failed to decrypt", ErrorCodes::DATA_ENCRYPTION_ERROR);
|
||||||
|
|
||||||
|
if (!plaintext_size)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (static_cast<size_t>(plaintext_size) < pad_left)
|
||||||
|
throw Exception(ErrorCodes::DATA_ENCRYPTION_ERROR, "Unexpected size of decrypted data: {} < {}", plaintext_size, pad_left);
|
||||||
|
|
||||||
|
const uint8_t * plaintext_begin = &plaintext[pad_left];
|
||||||
|
plaintext_size -= pad_left;
|
||||||
|
memcpy(out, plaintext_begin, plaintext_size);
|
||||||
|
return plaintext_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t decryptFinal(EVP_CIPHER_CTX * evp_ctx, char * out)
|
||||||
|
{
|
||||||
|
uint8_t plaintext[kBlockSize];
|
||||||
|
int plaintext_size = 0;
|
||||||
|
if (!EVP_DecryptFinal_ex(evp_ctx, plaintext, &plaintext_size))
|
||||||
|
throw Exception("Failed to finalize decrypting", ErrorCodes::DATA_ENCRYPTION_ERROR);
|
||||||
|
if (plaintext_size)
|
||||||
|
memcpy(out, plaintext, plaintext_size);
|
||||||
|
return plaintext_size;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
String InitVector::toString() const
|
||||||
|
{
|
||||||
|
static_assert(sizeof(counter) == InitVector::kSize);
|
||||||
|
WriteBufferFromOwnString out;
|
||||||
|
writeBinaryBigEndian(counter, out);
|
||||||
|
return std::move(out.str());
|
||||||
|
}
|
||||||
|
|
||||||
|
InitVector InitVector::fromString(const String & str)
|
||||||
|
{
|
||||||
|
if (str.length() != InitVector::kSize)
|
||||||
|
throw Exception(ErrorCodes::DATA_ENCRYPTION_ERROR, "Expected iv with size {}, got iv with size {}", InitVector::kSize, str.length());
|
||||||
|
ReadBufferFromMemory in{str.data(), str.length()};
|
||||||
|
UInt128 counter;
|
||||||
|
readBinaryBigEndian(counter, in);
|
||||||
|
return InitVector{counter};
|
||||||
|
}
|
||||||
|
|
||||||
|
void InitVector::read(ReadBuffer & in)
|
||||||
|
{
|
||||||
|
readBinaryBigEndian(counter, in);
|
||||||
|
}
|
||||||
|
|
||||||
|
void InitVector::write(WriteBuffer & out) const
|
||||||
|
{
|
||||||
|
writeBinaryBigEndian(counter, out);
|
||||||
|
}
|
||||||
|
|
||||||
|
InitVector InitVector::random()
|
||||||
|
{
|
||||||
|
std::random_device rd;
|
||||||
|
std::mt19937 gen{rd()};
|
||||||
|
std::uniform_int_distribution<UInt128::base_type> dis;
|
||||||
|
UInt128 counter;
|
||||||
|
for (size_t i = 0; i != std::size(counter.items); ++i)
|
||||||
|
counter.items[i] = dis(gen);
|
||||||
|
return InitVector{counter};
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Encryptor::Encryptor(const String & key_, const InitVector & iv_)
|
||||||
|
: key(key_)
|
||||||
|
, init_vector(iv_)
|
||||||
|
{
|
||||||
|
if (key_.length() == 16)
|
||||||
|
evp_cipher = EVP_aes_128_ctr();
|
||||||
|
else if (key_.length() == 24)
|
||||||
|
evp_cipher = EVP_aes_192_ctr();
|
||||||
|
else if (key_.length() == 32)
|
||||||
|
evp_cipher = EVP_aes_256_ctr();
|
||||||
|
else
|
||||||
|
throw Exception(ErrorCodes::DATA_ENCRYPTION_ERROR, "Key length {} is not supported, supported only keys of length 128, 192, or 256 bits", key_.length());
|
||||||
|
|
||||||
|
size_t cipher_key_length = static_cast<size_t>(EVP_CIPHER_key_length(evp_cipher));
|
||||||
|
if (cipher_key_length != key_.length())
|
||||||
|
throw Exception(ErrorCodes::DATA_ENCRYPTION_ERROR, "Got unexpected key length from cipher: {} != {}", cipher_key_length, key_.length());
|
||||||
|
|
||||||
|
size_t cipher_iv_length = static_cast<size_t>(EVP_CIPHER_iv_length(evp_cipher));
|
||||||
|
if (cipher_iv_length != InitVector::kSize)
|
||||||
|
throw Exception(ErrorCodes::DATA_ENCRYPTION_ERROR, "Got unexpected init vector's length from cipher: {} != {}", cipher_iv_length, InitVector::kSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Encryptor::encrypt(const char * data, size_t size, WriteBuffer & out)
|
||||||
{
|
{
|
||||||
if (!size)
|
if (!size)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
auto iv = InitVector(init_vector);
|
auto current_iv = (init_vector + blocks(offset)).toString();
|
||||||
iv.set(blocks(off));
|
|
||||||
off = blockOffset(off);
|
|
||||||
|
|
||||||
size_t part_size = partBlockSize(size, off);
|
|
||||||
if (off)
|
|
||||||
{
|
|
||||||
decryptPartialBlock(buf, ciphertext, part_size, iv, off);
|
|
||||||
size -= part_size;
|
|
||||||
if (part_size + off == block_size)
|
|
||||||
iv.inc();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (size)
|
|
||||||
decryptNBytes(buf, ciphertext + part_size, size, iv);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Decryptor::decryptPartialBlock(BufferBase::Position & to, const char * partial_block, size_t size, const InitVector & iv, size_t off) const
|
|
||||||
{
|
|
||||||
if (size > block_size)
|
|
||||||
throw Exception("Expecter partial block, got block with size > block_size: size = " + std::to_string(size) + " and offset = " + std::to_string(off),
|
|
||||||
ErrorCodes::DATA_ENCRYPTION_ERROR);
|
|
||||||
|
|
||||||
String ciphertext(block_size, '\0');
|
|
||||||
String plaintext(block_size, '\0');
|
|
||||||
for (size_t i = 0; i < size; ++i)
|
|
||||||
ciphertext[i + off] = partial_block[i];
|
|
||||||
|
|
||||||
auto * plaintext_ref = plaintext.data();
|
|
||||||
decryptNBytes(plaintext_ref, ciphertext.data(), off + size, iv);
|
|
||||||
|
|
||||||
for (size_t i = 0; i < size; ++i)
|
|
||||||
*(to++) = plaintext[i + off];
|
|
||||||
}
|
|
||||||
|
|
||||||
void Decryptor::decryptNBytes(BufferBase::Position & to, const char * data, size_t bytes, const InitVector & iv) const
|
|
||||||
{
|
|
||||||
auto evp_ctx_ptr = std::unique_ptr<EVP_CIPHER_CTX, decltype(&::EVP_CIPHER_CTX_free)>(EVP_CIPHER_CTX_new(), &EVP_CIPHER_CTX_free);
|
auto evp_ctx_ptr = std::unique_ptr<EVP_CIPHER_CTX, decltype(&::EVP_CIPHER_CTX_free)>(EVP_CIPHER_CTX_new(), &EVP_CIPHER_CTX_free);
|
||||||
auto * evp_ctx = evp_ctx_ptr.get();
|
auto * evp_ctx = evp_ctx_ptr.get();
|
||||||
|
|
||||||
if (EVP_DecryptInit_ex(evp_ctx, evp_cipher, nullptr, nullptr, nullptr) != 1)
|
if (!EVP_EncryptInit_ex(evp_ctx, evp_cipher, nullptr, nullptr, nullptr))
|
||||||
|
throw Exception("Failed to initialize encryption context with cipher", ErrorCodes::DATA_ENCRYPTION_ERROR);
|
||||||
|
|
||||||
|
if (!EVP_EncryptInit_ex(evp_ctx, nullptr, nullptr,
|
||||||
|
reinterpret_cast<const uint8_t*>(key.c_str()), reinterpret_cast<const uint8_t*>(current_iv.c_str())))
|
||||||
|
throw Exception("Failed to set key and IV for encryption", ErrorCodes::DATA_ENCRYPTION_ERROR);
|
||||||
|
|
||||||
|
size_t in_size = 0;
|
||||||
|
size_t out_size = 0;
|
||||||
|
|
||||||
|
auto off = blockOffset(offset);
|
||||||
|
if (off)
|
||||||
|
{
|
||||||
|
size_t in_part_size = partBlockSize(size, off);
|
||||||
|
size_t out_part_size = encryptBlockWithPadding(evp_ctx, &data[in_size], in_part_size, off, out);
|
||||||
|
in_size += in_part_size;
|
||||||
|
out_size += out_part_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (in_size < size)
|
||||||
|
{
|
||||||
|
size_t in_part_size = size - in_size;
|
||||||
|
size_t out_part_size = encryptBlocks(evp_ctx, &data[in_size], in_part_size, out);
|
||||||
|
in_size += in_part_size;
|
||||||
|
out_size += out_part_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
out_size += encryptFinal(evp_ctx, out);
|
||||||
|
|
||||||
|
if (out_size != in_size)
|
||||||
|
throw Exception("Only part of the data was encrypted", ErrorCodes::DATA_ENCRYPTION_ERROR);
|
||||||
|
offset += in_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Encryptor::decrypt(const char * data, size_t size, char * out)
|
||||||
|
{
|
||||||
|
if (!size)
|
||||||
|
return;
|
||||||
|
|
||||||
|
auto current_iv = (init_vector + blocks(offset)).toString();
|
||||||
|
|
||||||
|
auto evp_ctx_ptr = std::unique_ptr<EVP_CIPHER_CTX, decltype(&::EVP_CIPHER_CTX_free)>(EVP_CIPHER_CTX_new(), &EVP_CIPHER_CTX_free);
|
||||||
|
auto * evp_ctx = evp_ctx_ptr.get();
|
||||||
|
|
||||||
|
if (!EVP_DecryptInit_ex(evp_ctx, evp_cipher, nullptr, nullptr, nullptr))
|
||||||
throw Exception("Failed to initialize decryption context with cipher", ErrorCodes::DATA_ENCRYPTION_ERROR);
|
throw Exception("Failed to initialize decryption context with cipher", ErrorCodes::DATA_ENCRYPTION_ERROR);
|
||||||
|
|
||||||
if (EVP_DecryptInit_ex(evp_ctx, nullptr, nullptr,
|
if (!EVP_DecryptInit_ex(evp_ctx, nullptr, nullptr,
|
||||||
reinterpret_cast<const unsigned char*>(key.str().data()),
|
reinterpret_cast<const uint8_t*>(key.c_str()), reinterpret_cast<const uint8_t*>(current_iv.c_str())))
|
||||||
reinterpret_cast<const unsigned char*>(iv.str().data())) != 1)
|
|
||||||
throw Exception("Failed to set key and IV for decryption", ErrorCodes::DATA_ENCRYPTION_ERROR);
|
throw Exception("Failed to set key and IV for decryption", ErrorCodes::DATA_ENCRYPTION_ERROR);
|
||||||
|
|
||||||
int output_len = 0;
|
size_t in_size = 0;
|
||||||
if (EVP_DecryptUpdate(evp_ctx,
|
size_t out_size = 0;
|
||||||
reinterpret_cast<unsigned char*>(to), &output_len,
|
|
||||||
reinterpret_cast<const unsigned char*>(data), static_cast<int>(bytes)) != 1)
|
|
||||||
throw Exception("Failed to decrypt", ErrorCodes::DATA_ENCRYPTION_ERROR);
|
|
||||||
|
|
||||||
to += output_len;
|
auto off = blockOffset(offset);
|
||||||
|
if (off)
|
||||||
|
{
|
||||||
|
size_t in_part_size = partBlockSize(size, off);
|
||||||
|
size_t out_part_size = decryptBlockWithPadding(evp_ctx, &data[in_size], in_part_size, off, &out[out_size]);
|
||||||
|
in_size += in_part_size;
|
||||||
|
out_size += out_part_size;
|
||||||
|
}
|
||||||
|
|
||||||
int final_output_len = 0;
|
if (in_size < size)
|
||||||
if (EVP_DecryptFinal_ex(evp_ctx,
|
{
|
||||||
reinterpret_cast<unsigned char*>(to), &final_output_len) != 1)
|
size_t in_part_size = size - in_size;
|
||||||
throw Exception("Failed to fetch plaintext", ErrorCodes::DATA_ENCRYPTION_ERROR);
|
size_t out_part_size = decryptBlocks(evp_ctx, &data[in_size], in_part_size, &out[out_size]);
|
||||||
|
in_size += in_part_size;
|
||||||
|
out_size += out_part_size;
|
||||||
|
}
|
||||||
|
|
||||||
if (output_len < 0 || final_output_len < 0 || static_cast<size_t>(output_len) + static_cast<size_t>(final_output_len) != bytes)
|
out_size += decryptFinal(evp_ctx, &out[out_size]);
|
||||||
|
|
||||||
|
if (out_size != in_size)
|
||||||
throw Exception("Only part of the data was decrypted", ErrorCodes::DATA_ENCRYPTION_ERROR);
|
throw Exception("Only part of the data was decrypted", ErrorCodes::DATA_ENCRYPTION_ERROR);
|
||||||
|
offset += in_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
String readIV(size_t size, ReadBuffer & in)
|
bool isKeyLengthSupported(size_t key_length)
|
||||||
{
|
{
|
||||||
String iv(size, 0);
|
return (key_length == 16) || (key_length == 24) || (key_length == 32);
|
||||||
in.readStrict(reinterpret_cast<char *>(iv.data()), size);
|
|
||||||
return iv;
|
|
||||||
}
|
|
||||||
|
|
||||||
String randomString(size_t size)
|
|
||||||
{
|
|
||||||
String iv(size, 0);
|
|
||||||
|
|
||||||
std::random_device rd;
|
|
||||||
std::mt19937 gen{rd()};
|
|
||||||
std::uniform_int_distribution<size_t> dis;
|
|
||||||
|
|
||||||
char * ptr = iv.data();
|
|
||||||
while (size)
|
|
||||||
{
|
|
||||||
auto value = dis(gen);
|
|
||||||
size_t n = std::min(size, sizeof(value));
|
|
||||||
memcpy(ptr, &value, n);
|
|
||||||
ptr += n;
|
|
||||||
size -= n;
|
|
||||||
}
|
|
||||||
|
|
||||||
return iv;
|
|
||||||
}
|
|
||||||
|
|
||||||
void writeIV(const String & iv, WriteBuffer & out)
|
|
||||||
{
|
|
||||||
out.write(iv.data(), iv.length());
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t cipherKeyLength(const EVP_CIPHER * evp_cipher)
|
|
||||||
{
|
|
||||||
return static_cast<size_t>(EVP_CIPHER_key_length(evp_cipher));
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t cipherIVLength(const EVP_CIPHER * evp_cipher)
|
|
||||||
{
|
|
||||||
return static_cast<size_t>(EVP_CIPHER_iv_length(evp_cipher));
|
|
||||||
}
|
|
||||||
|
|
||||||
const EVP_CIPHER * defaultCipher()
|
|
||||||
{
|
|
||||||
return EVP_aes_128_ctr();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -16,87 +16,82 @@ class WriteBuffer;
|
|||||||
namespace FileEncryption
|
namespace FileEncryption
|
||||||
{
|
{
|
||||||
|
|
||||||
constexpr size_t kIVSize = sizeof(UInt128);
|
/// Initialization vector. Its size is always 16 bytes.
|
||||||
|
|
||||||
class InitVector
|
class InitVector
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
InitVector(const String & iv_);
|
static constexpr const size_t kSize = 16;
|
||||||
const String & str() const;
|
|
||||||
void inc() { ++counter; }
|
InitVector() = default;
|
||||||
void inc(size_t n) { counter += n; }
|
explicit InitVector(const UInt128 & counter_) { set(counter_); }
|
||||||
void set(size_t n) { counter = n; }
|
|
||||||
|
void set(const UInt128 & counter_) { counter = counter_; }
|
||||||
|
UInt128 get() const { return counter; }
|
||||||
|
|
||||||
|
void read(ReadBuffer & in);
|
||||||
|
void write(WriteBuffer & out) const;
|
||||||
|
|
||||||
|
/// Write 16 bytes of the counter to a string in big endian order.
|
||||||
|
/// We need big endian because the used cipher algorithms treat an initialization vector as a counter in big endian.
|
||||||
|
String toString() const;
|
||||||
|
|
||||||
|
/// Converts a string of 16 bytes length in big endian order to a counter.
|
||||||
|
static InitVector fromString(const String & str_);
|
||||||
|
|
||||||
|
/// Adds a specified offset to the counter.
|
||||||
|
InitVector & operator++() { ++counter; return *this; }
|
||||||
|
InitVector operator++(int) { InitVector res = *this; ++counter; return res; }
|
||||||
|
InitVector & operator+=(size_t offset) { counter += offset; return *this; }
|
||||||
|
InitVector operator+(size_t offset) const { InitVector res = *this; return res += offset; }
|
||||||
|
|
||||||
|
/// Generates a random initialization vector.
|
||||||
|
static InitVector random();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
UInt128 iv;
|
|
||||||
UInt128 counter = 0;
|
UInt128 counter = 0;
|
||||||
mutable String local;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
class EncryptionKey
|
/// Encrypts or decrypts data.
|
||||||
|
class Encryptor
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
EncryptionKey(const String & key_) : key(key_) { }
|
/// The `key` should have length 128 or 192 or 256.
|
||||||
size_t size() const { return key.size(); }
|
/// According to the key's length aes_128_ctr or aes_192_ctr or aes_256_ctr will be used for encryption.
|
||||||
const String & str() const { return key; }
|
/// We chose to use CTR cipther algorithms because they have the following features which are important for us:
|
||||||
|
/// - No right padding, so we can append encrypted files without deciphering;
|
||||||
|
/// - One byte is always ciphered as one byte, so we get random access to encrypted files easily.
|
||||||
|
Encryptor(const String & key_, const InitVector & iv_);
|
||||||
|
|
||||||
|
/// Sets the current position in the data stream from the very beginning of data.
|
||||||
|
/// It affects how the data will be encrypted or decrypted because
|
||||||
|
/// the initialization vector is increased by an index of the current block
|
||||||
|
/// and the index of the current block is calculated from this offset.
|
||||||
|
void setOffset(size_t offset_) { offset = offset_; }
|
||||||
|
|
||||||
|
/// Encrypts some data.
|
||||||
|
/// Also the function moves `offset` by `size` (for successive encryptions).
|
||||||
|
void encrypt(const char * data, size_t size, WriteBuffer & out);
|
||||||
|
|
||||||
|
/// Decrypts some data.
|
||||||
|
/// The used cipher algorithms generate the same number of bytes in output as they were in input,
|
||||||
|
/// so the function always writes `size` bytes of the plaintext to `out`.
|
||||||
|
/// Also the function moves `offset` by `size` (for successive decryptions).
|
||||||
|
void decrypt(const char * data, size_t size, char * out);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
String key;
|
const String key;
|
||||||
};
|
const InitVector init_vector;
|
||||||
|
|
||||||
|
|
||||||
class Encryption
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
Encryption(const String & iv_, const EncryptionKey & key_, size_t offset_);
|
|
||||||
|
|
||||||
protected:
|
|
||||||
size_t blockOffset(size_t pos) const { return pos % block_size; }
|
|
||||||
size_t blocks(size_t pos) const { return pos / block_size; }
|
|
||||||
size_t partBlockSize(size_t size, size_t off) const;
|
|
||||||
const EVP_CIPHER * get() const { return evp_cipher; }
|
|
||||||
|
|
||||||
const EVP_CIPHER * evp_cipher;
|
const EVP_CIPHER * evp_cipher;
|
||||||
const String init_vector;
|
|
||||||
const EncryptionKey key;
|
|
||||||
size_t block_size;
|
|
||||||
|
|
||||||
/// absolute offset
|
/// The current position in the data stream from the very beginning of data.
|
||||||
size_t offset = 0;
|
size_t offset = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
class Encryptor : public Encryption
|
/// Checks whether a passed key length is supported, i.e.
|
||||||
{
|
/// whether its length is 128 or 192 or 256 bits (16 or 24 or 32 bytes).
|
||||||
public:
|
bool isKeyLengthSupported(size_t key_length);
|
||||||
using Encryption::Encryption;
|
|
||||||
void encrypt(const char * plaintext, WriteBuffer & buf, size_t size);
|
|
||||||
|
|
||||||
private:
|
|
||||||
String encryptPartialBlock(const char * partial_block, size_t size, const InitVector & iv, size_t off) const;
|
|
||||||
String encryptNBytes(const char * data, size_t bytes, const InitVector & iv) const;
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
class Decryptor : public Encryption
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
Decryptor(const String & iv_, const EncryptionKey & key_) : Encryption(iv_, key_, 0) { }
|
|
||||||
void decrypt(const char * ciphertext, char * buf, size_t size, size_t off);
|
|
||||||
|
|
||||||
private:
|
|
||||||
void decryptPartialBlock(char *& to, const char * partial_block, size_t size, const InitVector & iv, size_t off) const;
|
|
||||||
void decryptNBytes(char *& to, const char * data, size_t bytes, const InitVector & iv) const;
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
String readIV(size_t size, ReadBuffer & in);
|
|
||||||
String randomString(size_t size);
|
|
||||||
void writeIV(const String & iv, WriteBuffer & out);
|
|
||||||
size_t cipherKeyLength(const EVP_CIPHER * evp_cipher);
|
|
||||||
size_t cipherIVLength(const EVP_CIPHER * evp_cipher);
|
|
||||||
const EVP_CIPHER * defaultCipher();
|
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -9,91 +9,95 @@ namespace ErrorCodes
|
|||||||
extern const int ARGUMENT_OUT_OF_BOUND;
|
extern const int ARGUMENT_OUT_OF_BOUND;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
using InitVector = FileEncryption::InitVector;
|
||||||
|
|
||||||
ReadBufferFromEncryptedFile::ReadBufferFromEncryptedFile(
|
ReadBufferFromEncryptedFile::ReadBufferFromEncryptedFile(
|
||||||
size_t buf_size_,
|
size_t buffer_size_,
|
||||||
std::unique_ptr<ReadBufferFromFileBase> in_,
|
std::unique_ptr<ReadBufferFromFileBase> in_,
|
||||||
const String & init_vector_,
|
const String & key_,
|
||||||
const FileEncryption::EncryptionKey & key_,
|
const InitVector & init_vector_)
|
||||||
const size_t iv_offset_)
|
: ReadBufferFromFileBase(buffer_size_, nullptr, 0)
|
||||||
: ReadBufferFromFileBase(buf_size_, nullptr, 0)
|
|
||||||
, in(std::move(in_))
|
, in(std::move(in_))
|
||||||
, buf_size(buf_size_)
|
, encrypted_buffer(buffer_size_)
|
||||||
, decryptor(FileEncryption::Decryptor(init_vector_, key_))
|
, encryptor(key_, init_vector_)
|
||||||
, iv_offset(iv_offset_)
|
|
||||||
{
|
{
|
||||||
|
/// We should start reading from `in` at the offset == InitVector::kSize.
|
||||||
|
need_seek = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
off_t ReadBufferFromEncryptedFile::seek(off_t off, int whence)
|
off_t ReadBufferFromEncryptedFile::seek(off_t off, int whence)
|
||||||
{
|
{
|
||||||
if (whence == SEEK_CUR)
|
off_t new_pos;
|
||||||
{
|
if (whence == SEEK_SET)
|
||||||
if (off < 0 && -off > getPosition())
|
|
||||||
throw Exception("SEEK_CUR shift out of bounds", ErrorCodes::ARGUMENT_OUT_OF_BOUND);
|
|
||||||
|
|
||||||
if (!working_buffer.empty() && static_cast<size_t>(offset() + off) < working_buffer.size())
|
|
||||||
{
|
|
||||||
pos += off;
|
|
||||||
return getPosition();
|
|
||||||
}
|
|
||||||
else
|
|
||||||
start_pos = off + getPosition();
|
|
||||||
}
|
|
||||||
else if (whence == SEEK_SET)
|
|
||||||
{
|
{
|
||||||
if (off < 0)
|
if (off < 0)
|
||||||
throw Exception("SEEK_SET underflow: off = " + std::to_string(off), ErrorCodes::ARGUMENT_OUT_OF_BOUND);
|
throw Exception("SEEK_SET underflow: off = " + std::to_string(off), ErrorCodes::ARGUMENT_OUT_OF_BOUND);
|
||||||
|
new_pos = off;
|
||||||
if (!working_buffer.empty() && static_cast<size_t>(off) >= start_pos
|
}
|
||||||
&& static_cast<size_t>(off) < (start_pos + working_buffer.size()))
|
else if (whence == SEEK_CUR)
|
||||||
{
|
{
|
||||||
pos = working_buffer.begin() + (off - start_pos);
|
if (off < 0 && -off > getPosition())
|
||||||
return getPosition();
|
throw Exception("SEEK_CUR shift out of bounds", ErrorCodes::ARGUMENT_OUT_OF_BOUND);
|
||||||
|
new_pos = getPosition() + off;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
start_pos = off;
|
throw Exception("ReadBufferFromFileEncrypted::seek expects SEEK_SET or SEEK_CUR as whence", ErrorCodes::ARGUMENT_OUT_OF_BOUND);
|
||||||
}
|
|
||||||
else
|
|
||||||
throw Exception("ReadBufferFromEncryptedFile::seek expects SEEK_SET or SEEK_CUR as whence", ErrorCodes::ARGUMENT_OUT_OF_BOUND);
|
|
||||||
|
|
||||||
initialize();
|
if ((offset - static_cast<off_t>(working_buffer.size()) <= new_pos) && (new_pos <= offset) && !need_seek)
|
||||||
return start_pos;
|
{
|
||||||
|
/// Position is still inside buffer.
|
||||||
|
pos = working_buffer.end() - offset + new_pos;
|
||||||
|
assert(pos >= working_buffer.begin());
|
||||||
|
assert(pos <= working_buffer.end());
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
need_seek = true;
|
||||||
|
offset = new_pos;
|
||||||
|
|
||||||
|
/// No more reading from the current working buffer until next() is called.
|
||||||
|
pos = working_buffer.end();
|
||||||
|
assert(!hasPendingData());
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The encryptor always needs to know what the current offset is.
|
||||||
|
encryptor.setOffset(new_pos);
|
||||||
|
|
||||||
|
return new_pos;
|
||||||
|
}
|
||||||
|
|
||||||
|
off_t ReadBufferFromEncryptedFile::getPosition()
|
||||||
|
{
|
||||||
|
return offset - available();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ReadBufferFromEncryptedFile::nextImpl()
|
bool ReadBufferFromEncryptedFile::nextImpl()
|
||||||
{
|
{
|
||||||
|
if (need_seek)
|
||||||
|
{
|
||||||
|
off_t raw_offset = offset + InitVector::kSize;
|
||||||
|
if (in->seek(raw_offset, SEEK_SET) != raw_offset)
|
||||||
|
return false;
|
||||||
|
need_seek = false;
|
||||||
|
}
|
||||||
|
|
||||||
if (in->eof())
|
if (in->eof())
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (initialized)
|
/// Read up to the size of `encrypted_buffer`.
|
||||||
start_pos += working_buffer.size();
|
size_t bytes_read = 0;
|
||||||
initialize();
|
while (bytes_read < encrypted_buffer.size() && !in->eof())
|
||||||
return true;
|
{
|
||||||
|
bytes_read += in->read(encrypted_buffer.data() + bytes_read, encrypted_buffer.size() - bytes_read);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ReadBufferFromEncryptedFile::initialize()
|
/// The used cipher algorithms generate the same number of bytes in output as it were in input,
|
||||||
{
|
/// so after deciphering the numbers of bytes will be still `bytes_read`.
|
||||||
size_t in_pos = start_pos + iv_offset;
|
working_buffer.resize(bytes_read);
|
||||||
|
encryptor.decrypt(encrypted_buffer.data(), bytes_read, working_buffer.begin());
|
||||||
String data;
|
|
||||||
data.resize(buf_size);
|
|
||||||
size_t data_size = 0;
|
|
||||||
|
|
||||||
in->seek(in_pos, SEEK_SET);
|
|
||||||
while (data_size < buf_size && !in->eof())
|
|
||||||
{
|
|
||||||
auto size = in->read(data.data() + data_size, buf_size - data_size);
|
|
||||||
data_size += size;
|
|
||||||
in_pos += size;
|
|
||||||
in->seek(in_pos, SEEK_SET);
|
|
||||||
}
|
|
||||||
|
|
||||||
data.resize(data_size);
|
|
||||||
working_buffer.resize(data_size);
|
|
||||||
|
|
||||||
decryptor.decrypt(data.data(), working_buffer.begin(), data_size, start_pos);
|
|
||||||
|
|
||||||
pos = working_buffer.begin();
|
pos = working_buffer.begin();
|
||||||
initialized = true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -12,39 +12,33 @@
|
|||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
/// Reads data from the underlying read buffer and decrypts it.
|
||||||
class ReadBufferFromEncryptedFile : public ReadBufferFromFileBase
|
class ReadBufferFromEncryptedFile : public ReadBufferFromFileBase
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
ReadBufferFromEncryptedFile(
|
ReadBufferFromEncryptedFile(
|
||||||
size_t buf_size_,
|
size_t buffer_size_,
|
||||||
std::unique_ptr<ReadBufferFromFileBase> in_,
|
std::unique_ptr<ReadBufferFromFileBase> in_,
|
||||||
const String & init_vector_,
|
const String & key_,
|
||||||
const FileEncryption::EncryptionKey & key_,
|
const FileEncryption::InitVector & init_vector_);
|
||||||
const size_t iv_offset_);
|
|
||||||
|
|
||||||
off_t seek(off_t off, int whence) override;
|
off_t seek(off_t off, int whence) override;
|
||||||
|
off_t getPosition() override;
|
||||||
off_t getPosition() override { return start_pos + offset(); }
|
|
||||||
|
|
||||||
std::string getFileName() const override { return in->getFileName(); }
|
std::string getFileName() const override { return in->getFileName(); }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
bool nextImpl() override;
|
bool nextImpl() override;
|
||||||
|
|
||||||
void initialize();
|
|
||||||
|
|
||||||
std::unique_ptr<ReadBufferFromFileBase> in;
|
std::unique_ptr<ReadBufferFromFileBase> in;
|
||||||
size_t buf_size;
|
|
||||||
|
|
||||||
FileEncryption::Decryptor decryptor;
|
off_t offset = 0;
|
||||||
bool initialized = false;
|
bool need_seek = false;
|
||||||
|
|
||||||
// current working_buffer.begin() offset from decrypted file
|
Memory<> encrypted_buffer;
|
||||||
size_t start_pos = 0;
|
FileEncryption::Encryptor encryptor;
|
||||||
size_t iv_offset = 0;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -6,18 +6,21 @@
|
|||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
using InitVector = FileEncryption::InitVector;
|
||||||
|
|
||||||
WriteBufferFromEncryptedFile::WriteBufferFromEncryptedFile(
|
WriteBufferFromEncryptedFile::WriteBufferFromEncryptedFile(
|
||||||
size_t buf_size_,
|
size_t buffer_size_,
|
||||||
std::unique_ptr<WriteBufferFromFileBase> out_,
|
std::unique_ptr<WriteBufferFromFileBase> out_,
|
||||||
const String & init_vector_,
|
const String & key_,
|
||||||
const FileEncryption::EncryptionKey & key_,
|
const InitVector & init_vector_,
|
||||||
const size_t & file_size)
|
size_t old_file_size)
|
||||||
: WriteBufferFromFileBase(buf_size_, nullptr, 0)
|
: WriteBufferFromFileBase(buffer_size_, nullptr, 0)
|
||||||
, out(std::move(out_))
|
, out(std::move(out_))
|
||||||
, flush_iv(!file_size)
|
|
||||||
, iv(init_vector_)
|
, iv(init_vector_)
|
||||||
, encryptor(FileEncryption::Encryptor(init_vector_, key_, file_size))
|
, flush_iv(!old_file_size)
|
||||||
|
, encryptor(key_, init_vector_)
|
||||||
{
|
{
|
||||||
|
encryptor.setOffset(old_file_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
WriteBufferFromEncryptedFile::~WriteBufferFromEncryptedFile()
|
WriteBufferFromEncryptedFile::~WriteBufferFromEncryptedFile()
|
||||||
@ -51,6 +54,11 @@ void WriteBufferFromEncryptedFile::finishImpl()
|
|||||||
{
|
{
|
||||||
/// If buffer has pending data - write it.
|
/// If buffer has pending data - write it.
|
||||||
next();
|
next();
|
||||||
|
|
||||||
|
/// Note that if there is no data to write an empty file will be written, even without the initialization vector
|
||||||
|
/// (see nextImpl(): it writes the initialization vector only if there is some data ready to write).
|
||||||
|
/// That's fine because DiskEncrypted allows files without initialization vectors when they're empty.
|
||||||
|
|
||||||
out->finalize();
|
out->finalize();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -58,6 +66,7 @@ void WriteBufferFromEncryptedFile::sync()
|
|||||||
{
|
{
|
||||||
/// If buffer has pending data - write it.
|
/// If buffer has pending data - write it.
|
||||||
next();
|
next();
|
||||||
|
|
||||||
out->sync();
|
out->sync();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -68,12 +77,13 @@ void WriteBufferFromEncryptedFile::nextImpl()
|
|||||||
|
|
||||||
if (flush_iv)
|
if (flush_iv)
|
||||||
{
|
{
|
||||||
FileEncryption::writeIV(iv, *out);
|
iv.write(*out);
|
||||||
flush_iv = false;
|
flush_iv = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
encryptor.encrypt(working_buffer.begin(), *out, offset());
|
encryptor.encrypt(working_buffer.begin(), offset(), *out);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -12,15 +12,17 @@
|
|||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
/// Encrypts data and writes the encrypted data to the underlying write buffer.
|
||||||
class WriteBufferFromEncryptedFile : public WriteBufferFromFileBase
|
class WriteBufferFromEncryptedFile : public WriteBufferFromFileBase
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
/// `old_file_size` should be set to non-zero if we're going to append an existing file.
|
||||||
WriteBufferFromEncryptedFile(
|
WriteBufferFromEncryptedFile(
|
||||||
size_t buf_size_,
|
size_t buffer_size_,
|
||||||
std::unique_ptr<WriteBufferFromFileBase> out_,
|
std::unique_ptr<WriteBufferFromFileBase> out_,
|
||||||
const String & init_vector_,
|
const String & key_,
|
||||||
const FileEncryption::EncryptionKey & key_,
|
const FileEncryption::InitVector & init_vector_,
|
||||||
const size_t & file_size);
|
size_t old_file_size = 0);
|
||||||
~WriteBufferFromEncryptedFile() override;
|
~WriteBufferFromEncryptedFile() override;
|
||||||
|
|
||||||
void sync() override;
|
void sync() override;
|
||||||
@ -37,8 +39,9 @@ private:
|
|||||||
bool finished = false;
|
bool finished = false;
|
||||||
std::unique_ptr<WriteBufferFromFileBase> out;
|
std::unique_ptr<WriteBufferFromFileBase> out;
|
||||||
|
|
||||||
bool flush_iv;
|
FileEncryption::InitVector iv;
|
||||||
String iv;
|
bool flush_iv = false;
|
||||||
|
|
||||||
FileEncryption::Encryptor encryptor;
|
FileEncryption::Encryptor encryptor;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -15,25 +15,15 @@ struct InitVectorTestParam
|
|||||||
{
|
{
|
||||||
const std::string_view comment;
|
const std::string_view comment;
|
||||||
const String init;
|
const String init;
|
||||||
UInt128 adder;
|
|
||||||
UInt128 setter;
|
|
||||||
const String after_inc;
|
const String after_inc;
|
||||||
|
UInt64 adder;
|
||||||
const String after_add;
|
const String after_add;
|
||||||
const String after_set;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
class InitVectorTest : public ::testing::TestWithParam<InitVectorTestParam> {};
|
class InitVectorTest : public ::testing::TestWithParam<InitVectorTestParam> {};
|
||||||
|
|
||||||
|
|
||||||
String string_ends_with(size_t size, String str)
|
|
||||||
{
|
|
||||||
String res(size, 0);
|
|
||||||
res.replace(size - str.size(), str.size(), str);
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static std::ostream & operator << (std::ostream & ostr, const InitVectorTestParam & param)
|
static std::ostream & operator << (std::ostream & ostr, const InitVectorTestParam & param)
|
||||||
{
|
{
|
||||||
return ostr << param.comment;
|
return ostr << param.comment;
|
||||||
@ -44,20 +34,14 @@ TEST_P(InitVectorTest, InitVector)
|
|||||||
{
|
{
|
||||||
const auto & param = GetParam();
|
const auto & param = GetParam();
|
||||||
|
|
||||||
auto iv = InitVector(param.init);
|
auto iv = InitVector::fromString(param.init);
|
||||||
ASSERT_EQ(param.init, iv.str());
|
ASSERT_EQ(param.init, iv.toString());
|
||||||
|
|
||||||
iv.inc();
|
++iv;
|
||||||
ASSERT_EQ(param.after_inc, iv.str());
|
ASSERT_EQ(param.after_inc, iv.toString());
|
||||||
|
|
||||||
iv.inc(param.adder);
|
iv += param.adder;
|
||||||
ASSERT_EQ(param.after_add, iv.str());
|
ASSERT_EQ(param.after_add, iv.toString());
|
||||||
|
|
||||||
iv.set(param.setter);
|
|
||||||
ASSERT_EQ(param.after_set, iv.str());
|
|
||||||
|
|
||||||
iv.set(0);
|
|
||||||
ASSERT_EQ(param.init, iv.str());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -65,31 +49,32 @@ INSTANTIATE_TEST_SUITE_P(InitVectorInputs,
|
|||||||
InitVectorTest,
|
InitVectorTest,
|
||||||
::testing::ValuesIn(std::initializer_list<InitVectorTestParam>{
|
::testing::ValuesIn(std::initializer_list<InitVectorTestParam>{
|
||||||
{
|
{
|
||||||
"Basic init vector test. Get zero-string, add 0, set 0",
|
"Basic init vector test. Get zero-string, add 1, add 0",
|
||||||
String(16, 0),
|
String(16, 0),
|
||||||
|
String("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01", 16),
|
||||||
0,
|
0,
|
||||||
0,
|
String("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01", 16),
|
||||||
string_ends_with(16, "\x1"),
|
|
||||||
string_ends_with(16, "\x1"),
|
|
||||||
String(16, 0),
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"Init vector test. Get zero-string, add 85, set 1024",
|
"Init vector test. Get zero-string, add 1, add 85, add 1024",
|
||||||
String(16, 0),
|
String(16, 0),
|
||||||
|
String("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01", 16),
|
||||||
85,
|
85,
|
||||||
|
String("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x56", 16),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Init vector test #2. Get zero-string, add 1, add 1024",
|
||||||
|
String(16, 0),
|
||||||
|
String("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01", 16),
|
||||||
1024,
|
1024,
|
||||||
string_ends_with(16, "\x1"),
|
String("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x01", 16)
|
||||||
string_ends_with(16, "\x56"),
|
|
||||||
string_ends_with(16, String("\x4\0", 2)),
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"Long init vector test",
|
"Long init vector test",
|
||||||
"\xa8\x65\x9c\x73\xf8\x5d\x83\xb4\x5c\xa6\x8c\x19\xf4\x77\x80\xe1",
|
String("\xa8\x65\x9c\x73\xf8\x5d\x83\xb4\x9c\xa6\x8c\x19\xf4\x77\x80\xe1", 16),
|
||||||
3349249125638641,
|
String("\xa8\x65\x9c\x73\xf8\x5d\x83\xb4\x9c\xa6\x8c\x19\xf4\x77\x80\xe2", 16),
|
||||||
1698923461902341,
|
9349249176525638641ULL,
|
||||||
"\xa8\x65\x9c\x73\xf8\x5d\x83\xb4\x5c\xa6\x8c\x19\xf4\x77\x80\xe2",
|
String("\xa8\x65\x9c\x73\xf8\x5d\x83\xb5\x1e\x65\xc0\xb1\x67\xe4\x0c\xd3", 16)
|
||||||
"\xa8\x65\x9c\x73\xf8\x5d\x83\xb4\x5c\xb2\x72\x39\xc8\xdd\x62\xd3",
|
|
||||||
String("\xa8\x65\x9c\x73\xf8\x5d\x83\xb4\x5c\xac\x95\x43\x65\xea\x00\xe6", 16)
|
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
);
|
);
|
||||||
@ -97,53 +82,87 @@ INSTANTIATE_TEST_SUITE_P(InitVectorInputs,
|
|||||||
|
|
||||||
TEST(FileEncryption, Encryption)
|
TEST(FileEncryption, Encryption)
|
||||||
{
|
{
|
||||||
String iv(16, 0);
|
String key = "1234567812345678";
|
||||||
EncryptionKey key("1234567812345678");
|
InitVector iv;
|
||||||
String input = "abcd1234efgh5678ijkl";
|
Encryptor encryptor{key, iv};
|
||||||
String expected = "\xfb\x8a\x9e\x66\x82\x72\x1b\xbe\x6b\x1d\xd8\x98\xc5\x8c\x63\xee\xcd\x36\x4a\x50";
|
|
||||||
|
std::string_view input = "abcd1234efgh5678ijkl";
|
||||||
|
std::string_view expected = "\xfb\x8a\x9e\x66\x82\x72\x1b\xbe\x6b\x1d\xd8\x98\xc5\x8c\x63\xee\xcd\x36\x4a\x50";
|
||||||
|
|
||||||
|
for (size_t i = 0; i < expected.size(); ++i)
|
||||||
|
{
|
||||||
|
WriteBufferFromOwnString buf;
|
||||||
|
encryptor.encrypt(&input[i], 1, buf);
|
||||||
|
ASSERT_EQ(expected.substr(i, 1), buf.str());
|
||||||
|
}
|
||||||
|
|
||||||
|
for (size_t i = 0; i < expected.size(); ++i)
|
||||||
|
{
|
||||||
|
WriteBufferFromOwnString buf;
|
||||||
|
encryptor.setOffset(i);
|
||||||
|
encryptor.encrypt(&input[i], 1, buf);
|
||||||
|
ASSERT_EQ(expected.substr(i, 1), buf.str());
|
||||||
|
}
|
||||||
|
|
||||||
String result(expected.size(), 0);
|
|
||||||
for (size_t i = 0; i <= expected.size(); ++i)
|
for (size_t i = 0; i <= expected.size(); ++i)
|
||||||
{
|
{
|
||||||
auto buf = WriteBufferFromString(result);
|
WriteBufferFromOwnString buf;
|
||||||
auto encryptor = Encryptor(iv, key, 0);
|
encryptor.setOffset(0);
|
||||||
encryptor.encrypt(input.data(), buf, i);
|
encryptor.encrypt(input.data(), i, buf);
|
||||||
ASSERT_EQ(expected.substr(0, i), result.substr(0, i));
|
ASSERT_EQ(expected.substr(0, i), buf.str());
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t offset = 25;
|
size_t offset = 25;
|
||||||
String offset_expected = "\x6c\x67\xe4\xf5\x8f\x86\xb0\x19\xe5\xcd\x53\x59\xe0\xc6\x01\x5e\xc1\xfd\x60\x9d";
|
std::string_view offset_expected = "\x6c\x67\xe4\xf5\x8f\x86\xb0\x19\xe5\xcd\x53\x59\xe0\xc6\x01\x5e\xc1\xfd\x60\x9d";
|
||||||
for (size_t i = 0; i <= expected.size(); ++i)
|
for (size_t i = 0; i <= expected.size(); ++i)
|
||||||
{
|
{
|
||||||
auto buf = WriteBufferFromString(result);
|
WriteBufferFromOwnString buf;
|
||||||
auto encryptor = Encryptor(iv, key, offset);
|
encryptor.setOffset(offset);
|
||||||
encryptor.encrypt(input.data(), buf, i);
|
encryptor.encrypt(input.data(), i, buf);
|
||||||
ASSERT_EQ(offset_expected.substr(0, i), result.substr(0, i));
|
ASSERT_EQ(offset_expected.substr(0, i), buf.str());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
TEST(FileEncryption, Decryption)
|
TEST(FileEncryption, Decryption)
|
||||||
{
|
{
|
||||||
String iv(16, 0);
|
String key("1234567812345678");
|
||||||
EncryptionKey key("1234567812345678");
|
InitVector iv;
|
||||||
String expected = "abcd1234efgh5678ijkl";
|
Encryptor encryptor{key, iv};
|
||||||
String input = "\xfb\x8a\x9e\x66\x82\x72\x1b\xbe\x6b\x1d\xd8\x98\xc5\x8c\x63\xee\xcd\x36\x4a\x50";
|
|
||||||
auto decryptor = Decryptor(iv, key);
|
|
||||||
String result(expected.size(), 0);
|
|
||||||
|
|
||||||
|
std::string_view input = "\xfb\x8a\x9e\x66\x82\x72\x1b\xbe\x6b\x1d\xd8\x98\xc5\x8c\x63\xee\xcd\x36\x4a\x50";
|
||||||
|
std::string_view expected = "abcd1234efgh5678ijkl";
|
||||||
|
|
||||||
|
for (size_t i = 0; i < expected.size(); ++i)
|
||||||
|
{
|
||||||
|
char c;
|
||||||
|
encryptor.decrypt(&input[i], 1, &c);
|
||||||
|
ASSERT_EQ(expected[i], c);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (size_t i = 0; i < expected.size(); ++i)
|
||||||
|
{
|
||||||
|
char c;
|
||||||
|
encryptor.setOffset(i);
|
||||||
|
encryptor.decrypt(&input[i], 1, &c);
|
||||||
|
ASSERT_EQ(expected[i], c);
|
||||||
|
}
|
||||||
|
|
||||||
|
String buf(expected.size(), 0);
|
||||||
for (size_t i = 0; i <= expected.size(); ++i)
|
for (size_t i = 0; i <= expected.size(); ++i)
|
||||||
{
|
{
|
||||||
decryptor.decrypt(input.data(), result.data(), i, 0);
|
encryptor.setOffset(0);
|
||||||
ASSERT_EQ(expected.substr(0, i), result.substr(0, i));
|
encryptor.decrypt(input.data(), i, buf.data());
|
||||||
|
ASSERT_EQ(expected.substr(0, i), buf.substr(0, i));
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t offset = 25;
|
size_t offset = 25;
|
||||||
String offset_input = "\x6c\x67\xe4\xf5\x8f\x86\xb0\x19\xe5\xcd\x53\x59\xe0\xc6\x01\x5e\xc1\xfd\x60\x9d";
|
String offset_input = "\x6c\x67\xe4\xf5\x8f\x86\xb0\x19\xe5\xcd\x53\x59\xe0\xc6\x01\x5e\xc1\xfd\x60\x9d";
|
||||||
for (size_t i = 0; i <= expected.size(); ++i)
|
for (size_t i = 0; i <= expected.size(); ++i)
|
||||||
{
|
{
|
||||||
decryptor.decrypt(offset_input.data(), result.data(), i, offset);
|
encryptor.setOffset(offset);
|
||||||
ASSERT_EQ(expected.substr(0, i), result.substr(0, i));
|
encryptor.decrypt(offset_input.data(), i, buf.data());
|
||||||
|
ASSERT_EQ(expected.substr(0, i), buf.substr(0, i));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,6 +18,8 @@ using Pipes = std::vector<Pipe>;
|
|||||||
class QueryPlan;
|
class QueryPlan;
|
||||||
using QueryPlanPtr = std::unique_ptr<QueryPlan>;
|
using QueryPlanPtr = std::unique_ptr<QueryPlan>;
|
||||||
|
|
||||||
|
struct StorageID;
|
||||||
|
|
||||||
namespace ClusterProxy
|
namespace ClusterProxy
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -28,15 +30,31 @@ class IStreamFactory
|
|||||||
public:
|
public:
|
||||||
virtual ~IStreamFactory() = default;
|
virtual ~IStreamFactory() = default;
|
||||||
|
|
||||||
|
struct Shard
|
||||||
|
{
|
||||||
|
/// Query and header may be changed depending on shard.
|
||||||
|
ASTPtr query;
|
||||||
|
Block header;
|
||||||
|
|
||||||
|
size_t shard_num = 0;
|
||||||
|
ConnectionPoolWithFailoverPtr pool;
|
||||||
|
|
||||||
|
/// If we connect to replicas lazily.
|
||||||
|
/// (When there is a local replica with big delay).
|
||||||
|
bool lazy = false;
|
||||||
|
UInt32 local_delay = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
using Shards = std::vector<Shard>;
|
||||||
|
|
||||||
virtual void createForShard(
|
virtual void createForShard(
|
||||||
const Cluster::ShardInfo & shard_info,
|
const Cluster::ShardInfo & shard_info,
|
||||||
const ASTPtr & query_ast,
|
const ASTPtr & query_ast,
|
||||||
ContextPtr context, const ThrottlerPtr & throttler,
|
const StorageID & main_table,
|
||||||
const SelectQueryInfo & query_info,
|
const ASTPtr & table_func_ptr,
|
||||||
std::vector<QueryPlanPtr> & res,
|
ContextPtr context,
|
||||||
Pipes & remote_pipes,
|
std::vector<QueryPlanPtr> & local_plans,
|
||||||
Pipes & delayed_pipes,
|
Shards & remote_shards) = 0;
|
||||||
Poco::Logger * log) = 0;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
#include <Interpreters/ClusterProxy/SelectStreamFactory.h>
|
#include <Interpreters/ClusterProxy/SelectStreamFactory.h>
|
||||||
#include <Interpreters/InterpreterSelectQuery.h>
|
#include <Interpreters/InterpreterSelectQuery.h>
|
||||||
#include <DataStreams/RemoteBlockInputStream.h>
|
|
||||||
#include <Storages/StorageReplicatedMergeTree.h>
|
#include <Storages/StorageReplicatedMergeTree.h>
|
||||||
#include <Storages/VirtualColumnUtils.h>
|
#include <Storages/VirtualColumnUtils.h>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
@ -11,10 +10,6 @@
|
|||||||
#include <Interpreters/RequiredSourceColumnsVisitor.h>
|
#include <Interpreters/RequiredSourceColumnsVisitor.h>
|
||||||
|
|
||||||
#include <common/logger_useful.h>
|
#include <common/logger_useful.h>
|
||||||
#include <Processors/Pipe.h>
|
|
||||||
#include <Processors/Sources/RemoteSource.h>
|
|
||||||
#include <Processors/Sources/DelayedSource.h>
|
|
||||||
#include <Processors/Transforms/ExpressionTransform.h>
|
|
||||||
#include <Processors/QueryPlan/QueryPlan.h>
|
#include <Processors/QueryPlan/QueryPlan.h>
|
||||||
#include <Processors/QueryPlan/ExpressionStep.h>
|
#include <Processors/QueryPlan/ExpressionStep.h>
|
||||||
#include <Processors/QueryPlan/BuildQueryPipelineSettings.h>
|
#include <Processors/QueryPlan/BuildQueryPipelineSettings.h>
|
||||||
@ -32,7 +27,6 @@ namespace DB
|
|||||||
|
|
||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
{
|
{
|
||||||
extern const int ALL_CONNECTION_TRIES_FAILED;
|
|
||||||
extern const int ALL_REPLICAS_ARE_STALE;
|
extern const int ALL_REPLICAS_ARE_STALE;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -42,35 +36,13 @@ namespace ClusterProxy
|
|||||||
SelectStreamFactory::SelectStreamFactory(
|
SelectStreamFactory::SelectStreamFactory(
|
||||||
const Block & header_,
|
const Block & header_,
|
||||||
QueryProcessingStage::Enum processed_stage_,
|
QueryProcessingStage::Enum processed_stage_,
|
||||||
StorageID main_table_,
|
bool has_virtual_shard_num_column_)
|
||||||
const Scalars & scalars_,
|
|
||||||
bool has_virtual_shard_num_column_,
|
|
||||||
const Tables & external_tables_)
|
|
||||||
: header(header_),
|
: header(header_),
|
||||||
processed_stage{processed_stage_},
|
processed_stage{processed_stage_},
|
||||||
main_table(std::move(main_table_)),
|
has_virtual_shard_num_column(has_virtual_shard_num_column_)
|
||||||
table_func_ptr{nullptr},
|
|
||||||
scalars{scalars_},
|
|
||||||
has_virtual_shard_num_column(has_virtual_shard_num_column_),
|
|
||||||
external_tables{external_tables_}
|
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
SelectStreamFactory::SelectStreamFactory(
|
|
||||||
const Block & header_,
|
|
||||||
QueryProcessingStage::Enum processed_stage_,
|
|
||||||
ASTPtr table_func_ptr_,
|
|
||||||
const Scalars & scalars_,
|
|
||||||
bool has_virtual_shard_num_column_,
|
|
||||||
const Tables & external_tables_)
|
|
||||||
: header(header_),
|
|
||||||
processed_stage{processed_stage_},
|
|
||||||
table_func_ptr{table_func_ptr_},
|
|
||||||
scalars{scalars_},
|
|
||||||
has_virtual_shard_num_column(has_virtual_shard_num_column_),
|
|
||||||
external_tables{external_tables_}
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
@ -152,18 +124,6 @@ void addConvertingActions(QueryPlan & plan, const Block & header)
|
|||||||
plan.addStep(std::move(converting));
|
plan.addStep(std::move(converting));
|
||||||
}
|
}
|
||||||
|
|
||||||
void addConvertingActions(Pipe & pipe, const Block & header)
|
|
||||||
{
|
|
||||||
if (blocksHaveEqualStructure(pipe.getHeader(), header))
|
|
||||||
return;
|
|
||||||
|
|
||||||
auto convert_actions = std::make_shared<ExpressionActions>(getConvertingDAG(pipe.getHeader(), header));
|
|
||||||
pipe.addSimpleTransform([&](const Block & cur_header, Pipe::StreamType) -> ProcessorPtr
|
|
||||||
{
|
|
||||||
return std::make_shared<ExpressionTransform>(cur_header, convert_actions);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
std::unique_ptr<QueryPlan> createLocalPlan(
|
std::unique_ptr<QueryPlan> createLocalPlan(
|
||||||
const ASTPtr & query_ast,
|
const ASTPtr & query_ast,
|
||||||
const Block & header,
|
const Block & header,
|
||||||
@ -182,37 +142,17 @@ std::unique_ptr<QueryPlan> createLocalPlan(
|
|||||||
return query_plan;
|
return query_plan;
|
||||||
}
|
}
|
||||||
|
|
||||||
String formattedAST(const ASTPtr & ast)
|
|
||||||
{
|
|
||||||
if (!ast)
|
|
||||||
return {};
|
|
||||||
WriteBufferFromOwnString buf;
|
|
||||||
formatAST(*ast, buf, false, true);
|
|
||||||
return buf.str();
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void SelectStreamFactory::createForShard(
|
void SelectStreamFactory::createForShard(
|
||||||
const Cluster::ShardInfo & shard_info,
|
const Cluster::ShardInfo & shard_info,
|
||||||
const ASTPtr & query_ast,
|
const ASTPtr & query_ast,
|
||||||
ContextPtr context, const ThrottlerPtr & throttler,
|
const StorageID & main_table,
|
||||||
const SelectQueryInfo &,
|
const ASTPtr & table_func_ptr,
|
||||||
std::vector<QueryPlanPtr> & plans,
|
ContextPtr context,
|
||||||
Pipes & remote_pipes,
|
std::vector<QueryPlanPtr> & local_plans,
|
||||||
Pipes & delayed_pipes,
|
Shards & remote_shards)
|
||||||
Poco::Logger * log)
|
|
||||||
{
|
{
|
||||||
bool add_agg_info = processed_stage == QueryProcessingStage::WithMergeableState;
|
|
||||||
bool add_totals = false;
|
|
||||||
bool add_extremes = false;
|
|
||||||
bool async_read = context->getSettingsRef().async_socket_for_remote;
|
|
||||||
if (processed_stage == QueryProcessingStage::Complete)
|
|
||||||
{
|
|
||||||
add_totals = query_ast->as<ASTSelectQuery &>().group_by_with_totals;
|
|
||||||
add_extremes = context->getSettingsRef().extremes;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto modified_query_ast = query_ast->clone();
|
auto modified_query_ast = query_ast->clone();
|
||||||
auto modified_header = header;
|
auto modified_header = header;
|
||||||
if (has_virtual_shard_num_column)
|
if (has_virtual_shard_num_column)
|
||||||
@ -231,25 +171,19 @@ void SelectStreamFactory::createForShard(
|
|||||||
|
|
||||||
auto emplace_local_stream = [&]()
|
auto emplace_local_stream = [&]()
|
||||||
{
|
{
|
||||||
plans.emplace_back(createLocalPlan(modified_query_ast, modified_header, context, processed_stage));
|
local_plans.emplace_back(createLocalPlan(modified_query_ast, modified_header, context, processed_stage));
|
||||||
addConvertingActions(*plans.back(), header);
|
addConvertingActions(*local_plans.back(), header);
|
||||||
};
|
};
|
||||||
|
|
||||||
String modified_query = formattedAST(modified_query_ast);
|
|
||||||
|
|
||||||
auto emplace_remote_stream = [&]()
|
auto emplace_remote_stream = [&]()
|
||||||
{
|
{
|
||||||
auto remote_query_executor = std::make_shared<RemoteQueryExecutor>(
|
remote_shards.emplace_back(Shard{
|
||||||
shard_info.pool, modified_query, modified_header, context, throttler, scalars, external_tables, processed_stage);
|
.query = modified_query_ast,
|
||||||
remote_query_executor->setLogger(log);
|
.header = modified_header,
|
||||||
|
.shard_num = shard_info.shard_num,
|
||||||
remote_query_executor->setPoolMode(PoolMode::GET_MANY);
|
.pool = shard_info.pool,
|
||||||
if (!table_func_ptr)
|
.lazy = false
|
||||||
remote_query_executor->setMainTable(main_table);
|
});
|
||||||
|
|
||||||
remote_pipes.emplace_back(createRemoteSourcePipe(remote_query_executor, add_agg_info, add_totals, add_extremes, async_read));
|
|
||||||
remote_pipes.back().addInterpreterContext(context);
|
|
||||||
addConvertingActions(remote_pipes.back(), header);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const auto & settings = context->getSettingsRef();
|
const auto & settings = context->getSettingsRef();
|
||||||
@ -340,65 +274,14 @@ void SelectStreamFactory::createForShard(
|
|||||||
/// Try our luck with remote replicas, but if they are stale too, then fallback to local replica.
|
/// Try our luck with remote replicas, but if they are stale too, then fallback to local replica.
|
||||||
/// Do it lazily to avoid connecting in the main thread.
|
/// Do it lazily to avoid connecting in the main thread.
|
||||||
|
|
||||||
auto lazily_create_stream = [
|
remote_shards.emplace_back(Shard{
|
||||||
pool = shard_info.pool, shard_num = shard_info.shard_num, modified_query, header = modified_header, modified_query_ast,
|
.query = modified_query_ast,
|
||||||
context, throttler,
|
.header = modified_header,
|
||||||
main_table = main_table, table_func_ptr = table_func_ptr, scalars = scalars, external_tables = external_tables,
|
.shard_num = shard_info.shard_num,
|
||||||
stage = processed_stage, local_delay, add_agg_info, add_totals, add_extremes, async_read]()
|
.pool = shard_info.pool,
|
||||||
-> Pipe
|
.lazy = true,
|
||||||
{
|
.local_delay = local_delay
|
||||||
auto current_settings = context->getSettingsRef();
|
});
|
||||||
auto timeouts = ConnectionTimeouts::getTCPTimeoutsWithFailover(
|
|
||||||
current_settings).getSaturated(
|
|
||||||
current_settings.max_execution_time);
|
|
||||||
std::vector<ConnectionPoolWithFailover::TryResult> try_results;
|
|
||||||
try
|
|
||||||
{
|
|
||||||
if (table_func_ptr)
|
|
||||||
try_results = pool->getManyForTableFunction(timeouts, ¤t_settings, PoolMode::GET_MANY);
|
|
||||||
else
|
|
||||||
try_results = pool->getManyChecked(timeouts, ¤t_settings, PoolMode::GET_MANY, main_table.getQualifiedName());
|
|
||||||
}
|
|
||||||
catch (const Exception & ex)
|
|
||||||
{
|
|
||||||
if (ex.code() == ErrorCodes::ALL_CONNECTION_TRIES_FAILED)
|
|
||||||
LOG_WARNING(&Poco::Logger::get("ClusterProxy::SelectStreamFactory"),
|
|
||||||
"Connections to remote replicas of local shard {} failed, will use stale local replica", shard_num);
|
|
||||||
else
|
|
||||||
throw;
|
|
||||||
}
|
|
||||||
|
|
||||||
double max_remote_delay = 0.0;
|
|
||||||
for (const auto & try_result : try_results)
|
|
||||||
{
|
|
||||||
if (!try_result.is_up_to_date)
|
|
||||||
max_remote_delay = std::max(try_result.staleness, max_remote_delay);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (try_results.empty() || local_delay < max_remote_delay)
|
|
||||||
{
|
|
||||||
auto plan = createLocalPlan(modified_query_ast, header, context, stage);
|
|
||||||
return QueryPipeline::getPipe(std::move(*plan->buildQueryPipeline(
|
|
||||||
QueryPlanOptimizationSettings::fromContext(context),
|
|
||||||
BuildQueryPipelineSettings::fromContext(context))));
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
std::vector<IConnectionPool::Entry> connections;
|
|
||||||
connections.reserve(try_results.size());
|
|
||||||
for (auto & try_result : try_results)
|
|
||||||
connections.emplace_back(std::move(try_result.entry));
|
|
||||||
|
|
||||||
auto remote_query_executor = std::make_shared<RemoteQueryExecutor>(
|
|
||||||
std::move(connections), modified_query, header, context, throttler, scalars, external_tables, stage);
|
|
||||||
|
|
||||||
return createRemoteSourcePipe(remote_query_executor, add_agg_info, add_totals, add_extremes, async_read);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
delayed_pipes.emplace_back(createDelayedPipe(modified_header, lazily_create_stream, add_totals, add_extremes));
|
|
||||||
delayed_pipes.back().addInterpreterContext(context);
|
|
||||||
addConvertingActions(delayed_pipes.back(), header);
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
emplace_remote_stream();
|
emplace_remote_stream();
|
||||||
|
@ -14,42 +14,25 @@ namespace ClusterProxy
|
|||||||
class SelectStreamFactory final : public IStreamFactory
|
class SelectStreamFactory final : public IStreamFactory
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
/// Database in a query.
|
|
||||||
SelectStreamFactory(
|
SelectStreamFactory(
|
||||||
const Block & header_,
|
const Block & header_,
|
||||||
QueryProcessingStage::Enum processed_stage_,
|
QueryProcessingStage::Enum processed_stage_,
|
||||||
StorageID main_table_,
|
bool has_virtual_shard_num_column_);
|
||||||
const Scalars & scalars_,
|
|
||||||
bool has_virtual_shard_num_column_,
|
|
||||||
const Tables & external_tables);
|
|
||||||
|
|
||||||
/// TableFunction in a query.
|
|
||||||
SelectStreamFactory(
|
|
||||||
const Block & header_,
|
|
||||||
QueryProcessingStage::Enum processed_stage_,
|
|
||||||
ASTPtr table_func_ptr_,
|
|
||||||
const Scalars & scalars_,
|
|
||||||
bool has_virtual_shard_num_column_,
|
|
||||||
const Tables & external_tables_);
|
|
||||||
|
|
||||||
void createForShard(
|
void createForShard(
|
||||||
const Cluster::ShardInfo & shard_info,
|
const Cluster::ShardInfo & shard_info,
|
||||||
const ASTPtr & query_ast,
|
const ASTPtr & query_ast,
|
||||||
ContextPtr context, const ThrottlerPtr & throttler,
|
const StorageID & main_table,
|
||||||
const SelectQueryInfo & query_info,
|
const ASTPtr & table_func_ptr,
|
||||||
std::vector<QueryPlanPtr> & plans,
|
ContextPtr context,
|
||||||
Pipes & remote_pipes,
|
std::vector<QueryPlanPtr> & local_plans,
|
||||||
Pipes & delayed_pipes,
|
Shards & remote_shards) override;
|
||||||
Poco::Logger * log) override;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const Block header;
|
const Block header;
|
||||||
QueryProcessingStage::Enum processed_stage;
|
QueryProcessingStage::Enum processed_stage;
|
||||||
StorageID main_table = StorageID::createEmpty();
|
|
||||||
ASTPtr table_func_ptr;
|
|
||||||
Scalars scalars;
|
|
||||||
bool has_virtual_shard_num_column = false;
|
bool has_virtual_shard_num_column = false;
|
||||||
Tables external_tables;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
#include <Interpreters/OptimizeShardingKeyRewriteInVisitor.h>
|
#include <Interpreters/OptimizeShardingKeyRewriteInVisitor.h>
|
||||||
#include <Processors/Pipe.h>
|
#include <Processors/Pipe.h>
|
||||||
#include <Processors/QueryPlan/QueryPlan.h>
|
#include <Processors/QueryPlan/QueryPlan.h>
|
||||||
#include <Processors/QueryPlan/ReadFromPreparedSource.h>
|
#include <Processors/QueryPlan/ReadFromRemote.h>
|
||||||
#include <Processors/QueryPlan/UnionStep.h>
|
#include <Processors/QueryPlan/UnionStep.h>
|
||||||
#include <Storages/SelectQueryInfo.h>
|
#include <Storages/SelectQueryInfo.h>
|
||||||
|
|
||||||
@ -101,6 +101,10 @@ ContextMutablePtr updateSettingsForCluster(const Cluster & cluster, ContextPtr c
|
|||||||
|
|
||||||
void executeQuery(
|
void executeQuery(
|
||||||
QueryPlan & query_plan,
|
QueryPlan & query_plan,
|
||||||
|
const Block & header,
|
||||||
|
QueryProcessingStage::Enum processed_stage,
|
||||||
|
const StorageID & main_table,
|
||||||
|
const ASTPtr & table_func_ptr,
|
||||||
IStreamFactory & stream_factory, Poco::Logger * log,
|
IStreamFactory & stream_factory, Poco::Logger * log,
|
||||||
const ASTPtr & query_ast, ContextPtr context, const SelectQueryInfo & query_info,
|
const ASTPtr & query_ast, ContextPtr context, const SelectQueryInfo & query_info,
|
||||||
const ExpressionActionsPtr & sharding_key_expr,
|
const ExpressionActionsPtr & sharding_key_expr,
|
||||||
@ -115,8 +119,7 @@ void executeQuery(
|
|||||||
throw Exception("Maximum distributed depth exceeded", ErrorCodes::TOO_LARGE_DISTRIBUTED_DEPTH);
|
throw Exception("Maximum distributed depth exceeded", ErrorCodes::TOO_LARGE_DISTRIBUTED_DEPTH);
|
||||||
|
|
||||||
std::vector<QueryPlanPtr> plans;
|
std::vector<QueryPlanPtr> plans;
|
||||||
Pipes remote_pipes;
|
IStreamFactory::Shards remote_shards;
|
||||||
Pipes delayed_pipes;
|
|
||||||
|
|
||||||
auto new_context = updateSettingsForCluster(*query_info.getCluster(), context, settings, log);
|
auto new_context = updateSettingsForCluster(*query_info.getCluster(), context, settings, log);
|
||||||
|
|
||||||
@ -161,29 +164,33 @@ void executeQuery(
|
|||||||
query_ast_for_shard = query_ast;
|
query_ast_for_shard = query_ast;
|
||||||
|
|
||||||
stream_factory.createForShard(shard_info,
|
stream_factory.createForShard(shard_info,
|
||||||
query_ast_for_shard,
|
query_ast_for_shard, main_table, table_func_ptr,
|
||||||
new_context, throttler, query_info, plans,
|
new_context, plans, remote_shards);
|
||||||
remote_pipes, delayed_pipes, log);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!remote_pipes.empty())
|
if (!remote_shards.empty())
|
||||||
{
|
{
|
||||||
|
const Scalars & scalars = context->hasQueryContext() ? context->getQueryContext()->getScalars() : Scalars{};
|
||||||
|
auto external_tables = context->getExternalTables();
|
||||||
|
|
||||||
auto plan = std::make_unique<QueryPlan>();
|
auto plan = std::make_unique<QueryPlan>();
|
||||||
auto read_from_remote = std::make_unique<ReadFromPreparedSource>(Pipe::unitePipes(std::move(remote_pipes)));
|
auto read_from_remote = std::make_unique<ReadFromRemote>(
|
||||||
|
std::move(remote_shards),
|
||||||
|
header,
|
||||||
|
processed_stage,
|
||||||
|
main_table,
|
||||||
|
table_func_ptr,
|
||||||
|
new_context,
|
||||||
|
throttler,
|
||||||
|
scalars,
|
||||||
|
std::move(external_tables),
|
||||||
|
log);
|
||||||
|
|
||||||
read_from_remote->setStepDescription("Read from remote replica");
|
read_from_remote->setStepDescription("Read from remote replica");
|
||||||
plan->addStep(std::move(read_from_remote));
|
plan->addStep(std::move(read_from_remote));
|
||||||
plans.emplace_back(std::move(plan));
|
plans.emplace_back(std::move(plan));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!delayed_pipes.empty())
|
|
||||||
{
|
|
||||||
auto plan = std::make_unique<QueryPlan>();
|
|
||||||
auto read_from_remote = std::make_unique<ReadFromPreparedSource>(Pipe::unitePipes(std::move(delayed_pipes)));
|
|
||||||
read_from_remote->setStepDescription("Read from delayed local replica");
|
|
||||||
plan->addStep(std::move(read_from_remote));
|
|
||||||
plans.emplace_back(std::move(plan));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (plans.empty())
|
if (plans.empty())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Interpreters/Context_fwd.h>
|
#include <Interpreters/Context_fwd.h>
|
||||||
|
#include <Core/QueryProcessingStage.h>
|
||||||
#include <Parsers/IAST.h>
|
#include <Parsers/IAST.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -17,6 +18,8 @@ class QueryPlan;
|
|||||||
class ExpressionActions;
|
class ExpressionActions;
|
||||||
using ExpressionActionsPtr = std::shared_ptr<ExpressionActions>;
|
using ExpressionActionsPtr = std::shared_ptr<ExpressionActions>;
|
||||||
|
|
||||||
|
struct StorageID;
|
||||||
|
|
||||||
namespace ClusterProxy
|
namespace ClusterProxy
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -38,6 +41,10 @@ ContextMutablePtr updateSettingsForCluster(const Cluster & cluster, ContextPtr c
|
|||||||
/// (currently SELECT, DESCRIBE).
|
/// (currently SELECT, DESCRIBE).
|
||||||
void executeQuery(
|
void executeQuery(
|
||||||
QueryPlan & query_plan,
|
QueryPlan & query_plan,
|
||||||
|
const Block & header,
|
||||||
|
QueryProcessingStage::Enum processed_stage,
|
||||||
|
const StorageID & main_table,
|
||||||
|
const ASTPtr & table_func_ptr,
|
||||||
IStreamFactory & stream_factory, Poco::Logger * log,
|
IStreamFactory & stream_factory, Poco::Logger * log,
|
||||||
const ASTPtr & query_ast, ContextPtr context, const SelectQueryInfo & query_info,
|
const ASTPtr & query_ast, ContextPtr context, const SelectQueryInfo & query_info,
|
||||||
const ExpressionActionsPtr & sharding_key_expr,
|
const ExpressionActionsPtr & sharding_key_expr,
|
||||||
|
@ -157,9 +157,14 @@ void ExecuteScalarSubqueriesMatcher::visit(const ASTSubquery & subquery, ASTPtr
|
|||||||
if (data.only_analyze || !settings.enable_scalar_subquery_optimization || worthConvertingToLiteral(scalar)
|
if (data.only_analyze || !settings.enable_scalar_subquery_optimization || worthConvertingToLiteral(scalar)
|
||||||
|| !data.getContext()->hasQueryContext())
|
|| !data.getContext()->hasQueryContext())
|
||||||
{
|
{
|
||||||
|
/// subquery and ast can be the same object and ast will be moved.
|
||||||
|
/// Save these fields to avoid use after move.
|
||||||
|
auto alias = subquery.alias;
|
||||||
|
auto prefer_alias_to_column_name = subquery.prefer_alias_to_column_name;
|
||||||
|
|
||||||
auto lit = std::make_unique<ASTLiteral>((*scalar.safeGetByPosition(0).column)[0]);
|
auto lit = std::make_unique<ASTLiteral>((*scalar.safeGetByPosition(0).column)[0]);
|
||||||
lit->alias = subquery.alias;
|
lit->alias = alias;
|
||||||
lit->prefer_alias_to_column_name = subquery.prefer_alias_to_column_name;
|
lit->prefer_alias_to_column_name = prefer_alias_to_column_name;
|
||||||
ast = addTypeConversionToAST(std::move(lit), scalar.safeGetByPosition(0).type->getName());
|
ast = addTypeConversionToAST(std::move(lit), scalar.safeGetByPosition(0).type->getName());
|
||||||
|
|
||||||
/// If only analyze was requested the expression is not suitable for constant folding, disable it.
|
/// If only analyze was requested the expression is not suitable for constant folding, disable it.
|
||||||
@ -167,8 +172,8 @@ void ExecuteScalarSubqueriesMatcher::visit(const ASTSubquery & subquery, ASTPtr
|
|||||||
{
|
{
|
||||||
ast->as<ASTFunction>()->alias.clear();
|
ast->as<ASTFunction>()->alias.clear();
|
||||||
auto func = makeASTFunction("identity", std::move(ast));
|
auto func = makeASTFunction("identity", std::move(ast));
|
||||||
func->alias = subquery.alias;
|
func->alias = alias;
|
||||||
func->prefer_alias_to_column_name = subquery.prefer_alias_to_column_name;
|
func->prefer_alias_to_column_name = prefer_alias_to_column_name;
|
||||||
ast = std::move(func);
|
ast = std::move(func);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
228
src/Processors/QueryPlan/ReadFromRemote.cpp
Normal file
228
src/Processors/QueryPlan/ReadFromRemote.cpp
Normal file
@ -0,0 +1,228 @@
|
|||||||
|
#include <Processors/QueryPlan/ReadFromRemote.h>
|
||||||
|
#include <Processors/QueryPlan/ExpressionStep.h>
|
||||||
|
#include <Processors/QueryPlan/Optimizations/QueryPlanOptimizationSettings.h>
|
||||||
|
#include <DataStreams/RemoteQueryExecutor.h>
|
||||||
|
#include <Parsers/ASTSelectQuery.h>
|
||||||
|
#include <Parsers/formatAST.h>
|
||||||
|
#include <Processors/Sources/RemoteSource.h>
|
||||||
|
#include <Processors/Sources/DelayedSource.h>
|
||||||
|
#include <Processors/Transforms/ExpressionTransform.h>
|
||||||
|
#include <Interpreters/ActionsDAG.h>
|
||||||
|
#include <Interpreters/InterpreterSelectQuery.h>
|
||||||
|
#include <IO/ConnectionTimeoutsContext.h>
|
||||||
|
#include <Common/checkStackSize.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int ALL_CONNECTION_TRIES_FAILED;
|
||||||
|
}
|
||||||
|
|
||||||
|
static ActionsDAGPtr getConvertingDAG(const Block & block, const Block & header)
|
||||||
|
{
|
||||||
|
/// Convert header structure to expected.
|
||||||
|
/// Also we ignore constants from result and replace it with constants from header.
|
||||||
|
/// It is needed for functions like `now64()` or `randConstant()` because their values may be different.
|
||||||
|
return ActionsDAG::makeConvertingActions(
|
||||||
|
block.getColumnsWithTypeAndName(),
|
||||||
|
header.getColumnsWithTypeAndName(),
|
||||||
|
ActionsDAG::MatchColumnsMode::Name,
|
||||||
|
true);
|
||||||
|
}
|
||||||
|
|
||||||
|
void addConvertingActions(QueryPlan & plan, const Block & header)
|
||||||
|
{
|
||||||
|
if (blocksHaveEqualStructure(plan.getCurrentDataStream().header, header))
|
||||||
|
return;
|
||||||
|
|
||||||
|
auto convert_actions_dag = getConvertingDAG(plan.getCurrentDataStream().header, header);
|
||||||
|
auto converting = std::make_unique<ExpressionStep>(plan.getCurrentDataStream(), convert_actions_dag);
|
||||||
|
plan.addStep(std::move(converting));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void addConvertingActions(Pipe & pipe, const Block & header)
|
||||||
|
{
|
||||||
|
if (blocksHaveEqualStructure(pipe.getHeader(), header))
|
||||||
|
return;
|
||||||
|
|
||||||
|
auto convert_actions = std::make_shared<ExpressionActions>(getConvertingDAG(pipe.getHeader(), header));
|
||||||
|
pipe.addSimpleTransform([&](const Block & cur_header, Pipe::StreamType) -> ProcessorPtr
|
||||||
|
{
|
||||||
|
return std::make_shared<ExpressionTransform>(cur_header, convert_actions);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
static String formattedAST(const ASTPtr & ast)
|
||||||
|
{
|
||||||
|
if (!ast)
|
||||||
|
return {};
|
||||||
|
WriteBufferFromOwnString buf;
|
||||||
|
formatAST(*ast, buf, false, true);
|
||||||
|
return buf.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::unique_ptr<QueryPlan> createLocalPlan(
|
||||||
|
const ASTPtr & query_ast,
|
||||||
|
const Block & header,
|
||||||
|
ContextPtr context,
|
||||||
|
QueryProcessingStage::Enum processed_stage)
|
||||||
|
{
|
||||||
|
checkStackSize();
|
||||||
|
|
||||||
|
auto query_plan = std::make_unique<QueryPlan>();
|
||||||
|
|
||||||
|
InterpreterSelectQuery interpreter(query_ast, context, SelectQueryOptions(processed_stage));
|
||||||
|
interpreter.buildQueryPlan(*query_plan);
|
||||||
|
|
||||||
|
addConvertingActions(*query_plan, header);
|
||||||
|
|
||||||
|
return query_plan;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
ReadFromRemote::ReadFromRemote(
|
||||||
|
ClusterProxy::IStreamFactory::Shards shards_,
|
||||||
|
Block header_,
|
||||||
|
QueryProcessingStage::Enum stage_,
|
||||||
|
StorageID main_table_,
|
||||||
|
ASTPtr table_func_ptr_,
|
||||||
|
ContextPtr context_,
|
||||||
|
ThrottlerPtr throttler_,
|
||||||
|
Scalars scalars_,
|
||||||
|
Tables external_tables_,
|
||||||
|
Poco::Logger * log_)
|
||||||
|
: ISourceStep(DataStream{.header = std::move(header_)})
|
||||||
|
, shards(std::move(shards_))
|
||||||
|
, stage(stage_)
|
||||||
|
, main_table(std::move(main_table_))
|
||||||
|
, table_func_ptr(std::move(table_func_ptr_))
|
||||||
|
, context(std::move(context_))
|
||||||
|
, throttler(std::move(throttler_))
|
||||||
|
, scalars(std::move(scalars_))
|
||||||
|
, external_tables(std::move(external_tables_))
|
||||||
|
, log(log_)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
void ReadFromRemote::addLazyPipe(Pipes & pipes, const ClusterProxy::IStreamFactory::Shard & shard)
|
||||||
|
{
|
||||||
|
bool add_agg_info = stage == QueryProcessingStage::WithMergeableState;
|
||||||
|
bool add_totals = false;
|
||||||
|
bool add_extremes = false;
|
||||||
|
bool async_read = context->getSettingsRef().async_socket_for_remote;
|
||||||
|
if (stage == QueryProcessingStage::Complete)
|
||||||
|
{
|
||||||
|
add_totals = shard.query->as<ASTSelectQuery &>().group_by_with_totals;
|
||||||
|
add_extremes = context->getSettingsRef().extremes;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto lazily_create_stream = [
|
||||||
|
pool = shard.pool, shard_num = shard.shard_num, query = shard.query, header = shard.header,
|
||||||
|
context = context, throttler = throttler,
|
||||||
|
main_table = main_table, table_func_ptr = table_func_ptr,
|
||||||
|
scalars = scalars, external_tables = external_tables,
|
||||||
|
stage = stage, local_delay = shard.local_delay,
|
||||||
|
add_agg_info, add_totals, add_extremes, async_read]()
|
||||||
|
-> Pipe
|
||||||
|
{
|
||||||
|
auto current_settings = context->getSettingsRef();
|
||||||
|
auto timeouts = ConnectionTimeouts::getTCPTimeoutsWithFailover(
|
||||||
|
current_settings).getSaturated(
|
||||||
|
current_settings.max_execution_time);
|
||||||
|
std::vector<ConnectionPoolWithFailover::TryResult> try_results;
|
||||||
|
try
|
||||||
|
{
|
||||||
|
if (table_func_ptr)
|
||||||
|
try_results = pool->getManyForTableFunction(timeouts, ¤t_settings, PoolMode::GET_MANY);
|
||||||
|
else
|
||||||
|
try_results = pool->getManyChecked(timeouts, ¤t_settings, PoolMode::GET_MANY, main_table.getQualifiedName());
|
||||||
|
}
|
||||||
|
catch (const Exception & ex)
|
||||||
|
{
|
||||||
|
if (ex.code() == ErrorCodes::ALL_CONNECTION_TRIES_FAILED)
|
||||||
|
LOG_WARNING(&Poco::Logger::get("ClusterProxy::SelectStreamFactory"),
|
||||||
|
"Connections to remote replicas of local shard {} failed, will use stale local replica", shard_num);
|
||||||
|
else
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
|
||||||
|
double max_remote_delay = 0.0;
|
||||||
|
for (const auto & try_result : try_results)
|
||||||
|
{
|
||||||
|
if (!try_result.is_up_to_date)
|
||||||
|
max_remote_delay = std::max(try_result.staleness, max_remote_delay);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (try_results.empty() || local_delay < max_remote_delay)
|
||||||
|
{
|
||||||
|
auto plan = createLocalPlan(query, header, context, stage);
|
||||||
|
return QueryPipeline::getPipe(std::move(*plan->buildQueryPipeline(
|
||||||
|
QueryPlanOptimizationSettings::fromContext(context),
|
||||||
|
BuildQueryPipelineSettings::fromContext(context))));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
std::vector<IConnectionPool::Entry> connections;
|
||||||
|
connections.reserve(try_results.size());
|
||||||
|
for (auto & try_result : try_results)
|
||||||
|
connections.emplace_back(std::move(try_result.entry));
|
||||||
|
|
||||||
|
String query_string = formattedAST(query);
|
||||||
|
|
||||||
|
auto remote_query_executor = std::make_shared<RemoteQueryExecutor>(
|
||||||
|
std::move(connections), query_string, header, context, throttler, scalars, external_tables, stage);
|
||||||
|
|
||||||
|
return createRemoteSourcePipe(remote_query_executor, add_agg_info, add_totals, add_extremes, async_read);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
pipes.emplace_back(createDelayedPipe(shard.header, lazily_create_stream, add_totals, add_extremes));
|
||||||
|
pipes.back().addInterpreterContext(context);
|
||||||
|
addConvertingActions(pipes.back(), output_stream->header);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ReadFromRemote::addPipe(Pipes & pipes, const ClusterProxy::IStreamFactory::Shard & shard)
|
||||||
|
{
|
||||||
|
bool add_agg_info = stage == QueryProcessingStage::WithMergeableState;
|
||||||
|
bool add_totals = false;
|
||||||
|
bool add_extremes = false;
|
||||||
|
bool async_read = context->getSettingsRef().async_socket_for_remote;
|
||||||
|
if (stage == QueryProcessingStage::Complete)
|
||||||
|
{
|
||||||
|
add_totals = shard.query->as<ASTSelectQuery &>().group_by_with_totals;
|
||||||
|
add_extremes = context->getSettingsRef().extremes;
|
||||||
|
}
|
||||||
|
|
||||||
|
String query_string = formattedAST(shard.query);
|
||||||
|
|
||||||
|
auto remote_query_executor = std::make_shared<RemoteQueryExecutor>(
|
||||||
|
shard.pool, query_string, shard.header, context, throttler, scalars, external_tables, stage);
|
||||||
|
remote_query_executor->setLogger(log);
|
||||||
|
|
||||||
|
remote_query_executor->setPoolMode(PoolMode::GET_MANY);
|
||||||
|
if (!table_func_ptr)
|
||||||
|
remote_query_executor->setMainTable(main_table);
|
||||||
|
|
||||||
|
pipes.emplace_back(createRemoteSourcePipe(remote_query_executor, add_agg_info, add_totals, add_extremes, async_read));
|
||||||
|
pipes.back().addInterpreterContext(context);
|
||||||
|
addConvertingActions(pipes.back(), output_stream->header);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ReadFromRemote::initializePipeline(QueryPipeline & pipeline, const BuildQueryPipelineSettings &)
|
||||||
|
{
|
||||||
|
Pipes pipes;
|
||||||
|
for (const auto & shard : shards)
|
||||||
|
{
|
||||||
|
if (shard.lazy)
|
||||||
|
addLazyPipe(pipes, shard);
|
||||||
|
else
|
||||||
|
addPipe(pipes, shard);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto pipe = Pipe::unitePipes(std::move(pipes));
|
||||||
|
pipeline.init(std::move(pipe));
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
57
src/Processors/QueryPlan/ReadFromRemote.h
Normal file
57
src/Processors/QueryPlan/ReadFromRemote.h
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
#pragma once
|
||||||
|
#include <Processors/QueryPlan/ISourceStep.h>
|
||||||
|
#include <Core/QueryProcessingStage.h>
|
||||||
|
#include <Storages/IStorage_fwd.h>
|
||||||
|
#include <Interpreters/StorageID.h>
|
||||||
|
#include <Interpreters/ClusterProxy/IStreamFactory.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
class ConnectionPoolWithFailover;
|
||||||
|
using ConnectionPoolWithFailoverPtr = std::shared_ptr<ConnectionPoolWithFailover>;
|
||||||
|
|
||||||
|
class Throttler;
|
||||||
|
using ThrottlerPtr = std::shared_ptr<Throttler>;
|
||||||
|
|
||||||
|
/// Reading step from remote servers.
|
||||||
|
/// Unite query results from several shards.
|
||||||
|
class ReadFromRemote final : public ISourceStep
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
ReadFromRemote(
|
||||||
|
ClusterProxy::IStreamFactory::Shards shards_,
|
||||||
|
Block header_,
|
||||||
|
QueryProcessingStage::Enum stage_,
|
||||||
|
StorageID main_table_,
|
||||||
|
ASTPtr table_func_ptr_,
|
||||||
|
ContextPtr context_,
|
||||||
|
ThrottlerPtr throttler_,
|
||||||
|
Scalars scalars_,
|
||||||
|
Tables external_tables_,
|
||||||
|
Poco::Logger * log_);
|
||||||
|
|
||||||
|
String getName() const override { return "ReadFromRemote"; }
|
||||||
|
|
||||||
|
void initializePipeline(QueryPipeline & pipeline, const BuildQueryPipelineSettings &) override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
ClusterProxy::IStreamFactory::Shards shards;
|
||||||
|
QueryProcessingStage::Enum stage;
|
||||||
|
|
||||||
|
StorageID main_table;
|
||||||
|
ASTPtr table_func_ptr;
|
||||||
|
|
||||||
|
ContextPtr context;
|
||||||
|
|
||||||
|
ThrottlerPtr throttler;
|
||||||
|
Scalars scalars;
|
||||||
|
Tables external_tables;
|
||||||
|
|
||||||
|
Poco::Logger * log;
|
||||||
|
|
||||||
|
void addLazyPipe(Pipes & pipes, const ClusterProxy::IStreamFactory::Shard & shard);
|
||||||
|
void addPipe(Pipes & pipes, const ClusterProxy::IStreamFactory::Shard & shard);
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
@ -126,6 +126,7 @@ SRCS(
|
|||||||
QueryPlan/QueryPlan.cpp
|
QueryPlan/QueryPlan.cpp
|
||||||
QueryPlan/ReadFromMergeTree.cpp
|
QueryPlan/ReadFromMergeTree.cpp
|
||||||
QueryPlan/ReadFromPreparedSource.cpp
|
QueryPlan/ReadFromPreparedSource.cpp
|
||||||
|
QueryPlan/ReadFromRemote.cpp
|
||||||
QueryPlan/ReadNothingStep.cpp
|
QueryPlan/ReadNothingStep.cpp
|
||||||
QueryPlan/RollupStep.cpp
|
QueryPlan/RollupStep.cpp
|
||||||
QueryPlan/SettingQuotaAndLimitsStep.cpp
|
QueryPlan/SettingQuotaAndLimitsStep.cpp
|
||||||
|
@ -1150,7 +1150,7 @@ namespace
|
|||||||
{
|
{
|
||||||
io.onException();
|
io.onException();
|
||||||
|
|
||||||
LOG_ERROR(log, "Code: {}, e.displayText() = {}, Stack trace:\n\n{}", exception.code(), exception.displayText(), exception.getStackTraceString());
|
LOG_ERROR(log, getExceptionMessage(exception, true));
|
||||||
|
|
||||||
if (responder && !responder_finished)
|
if (responder && !responder_finished)
|
||||||
{
|
{
|
||||||
|
@ -149,7 +149,7 @@ void TCPHandler::runImpl()
|
|||||||
if (!DatabaseCatalog::instance().isDatabaseExist(default_database))
|
if (!DatabaseCatalog::instance().isDatabaseExist(default_database))
|
||||||
{
|
{
|
||||||
Exception e("Database " + backQuote(default_database) + " doesn't exist", ErrorCodes::UNKNOWN_DATABASE);
|
Exception e("Database " + backQuote(default_database) + " doesn't exist", ErrorCodes::UNKNOWN_DATABASE);
|
||||||
LOG_ERROR(log, "Code: {}, e.displayText() = {}, Stack trace:\n\n{}", e.code(), e.displayText(), e.getStackTraceString());
|
LOG_ERROR(log, getExceptionMessage(e, true));
|
||||||
sendException(e, connection_context->getSettingsRef().calculate_text_stack_trace);
|
sendException(e, connection_context->getSettingsRef().calculate_text_stack_trace);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -422,7 +422,7 @@ void TCPHandler::runImpl()
|
|||||||
}
|
}
|
||||||
|
|
||||||
const auto & e = *exception;
|
const auto & e = *exception;
|
||||||
LOG_ERROR(log, "Code: {}, e.displayText() = {}, Stack trace:\n\n{}", e.code(), e.displayText(), e.getStackTraceString());
|
LOG_ERROR(log, getExceptionMessage(e, true));
|
||||||
sendException(*exception, send_exception_with_stack_trace);
|
sendException(*exception, send_exception_with_stack_trace);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -168,7 +168,7 @@ ColumnsDescription::ColumnsDescription(NamesAndTypesList ordinary, NamesAndAlias
|
|||||||
/// We are trying to find first column from end with name `column_name` or with a name beginning with `column_name` and ".".
|
/// We are trying to find first column from end with name `column_name` or with a name beginning with `column_name` and ".".
|
||||||
/// For example "fruits.bananas"
|
/// For example "fruits.bananas"
|
||||||
/// names are considered the same if they completely match or `name_without_dot` matches the part of the name to the point
|
/// names are considered the same if they completely match or `name_without_dot` matches the part of the name to the point
|
||||||
static auto getNameRange(const ColumnsDescription::Container & columns, const String & name_without_dot)
|
static auto getNameRange(const ColumnsDescription::ColumnsContainer & columns, const String & name_without_dot)
|
||||||
{
|
{
|
||||||
String name_with_dot = name_without_dot + ".";
|
String name_with_dot = name_without_dot + ".";
|
||||||
|
|
||||||
@ -228,7 +228,7 @@ void ColumnsDescription::remove(const String & column_name)
|
|||||||
|
|
||||||
for (auto list_it = range.first; list_it != range.second;)
|
for (auto list_it = range.first; list_it != range.second;)
|
||||||
{
|
{
|
||||||
removeSubcolumns(list_it->name, list_it->type);
|
removeSubcolumns(list_it->name);
|
||||||
list_it = columns.get<0>().erase(list_it);
|
list_it = columns.get<0>().erase(list_it);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -303,7 +303,7 @@ void ColumnsDescription::flattenNested()
|
|||||||
}
|
}
|
||||||
|
|
||||||
ColumnDescription column = std::move(*it);
|
ColumnDescription column = std::move(*it);
|
||||||
removeSubcolumns(column.name, column.type);
|
removeSubcolumns(column.name);
|
||||||
it = columns.get<0>().erase(it);
|
it = columns.get<0>().erase(it);
|
||||||
|
|
||||||
const DataTypes & elements = type_tuple->getElements();
|
const DataTypes & elements = type_tuple->getElements();
|
||||||
@ -372,12 +372,7 @@ bool ColumnsDescription::hasNested(const String & column_name) const
|
|||||||
|
|
||||||
bool ColumnsDescription::hasSubcolumn(const String & column_name) const
|
bool ColumnsDescription::hasSubcolumn(const String & column_name) const
|
||||||
{
|
{
|
||||||
return subcolumns.find(column_name) != subcolumns.end();
|
return subcolumns.get<0>().count(column_name);
|
||||||
}
|
|
||||||
|
|
||||||
bool ColumnsDescription::hasInStorageOrSubcolumn(const String & column_name) const
|
|
||||||
{
|
|
||||||
return has(column_name) || hasSubcolumn(column_name);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const ColumnDescription & ColumnsDescription::get(const String & column_name) const
|
const ColumnDescription & ColumnsDescription::get(const String & column_name) const
|
||||||
@ -390,6 +385,50 @@ const ColumnDescription & ColumnsDescription::get(const String & column_name) co
|
|||||||
return *it;
|
return *it;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ColumnsDescription::GetFlags defaultKindToGetFlag(ColumnDefaultKind kind)
|
||||||
|
{
|
||||||
|
switch (kind)
|
||||||
|
{
|
||||||
|
case ColumnDefaultKind::Default:
|
||||||
|
return ColumnsDescription::Ordinary;
|
||||||
|
case ColumnDefaultKind::Materialized:
|
||||||
|
return ColumnsDescription::Materialized;
|
||||||
|
case ColumnDefaultKind::Alias:
|
||||||
|
return ColumnsDescription::Aliases;
|
||||||
|
}
|
||||||
|
__builtin_unreachable();
|
||||||
|
}
|
||||||
|
|
||||||
|
NamesAndTypesList ColumnsDescription::getByNames(GetFlags flags, const Names & names, bool with_subcolumns) const
|
||||||
|
{
|
||||||
|
NamesAndTypesList res;
|
||||||
|
for (const auto & name : names)
|
||||||
|
{
|
||||||
|
if (auto it = columns.get<1>().find(name); it != columns.get<1>().end())
|
||||||
|
{
|
||||||
|
auto kind = defaultKindToGetFlag(it->default_desc.kind);
|
||||||
|
if (flags & kind)
|
||||||
|
{
|
||||||
|
res.emplace_back(name, it->type);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (with_subcolumns)
|
||||||
|
{
|
||||||
|
auto jt = subcolumns.get<0>().find(name);
|
||||||
|
if (jt != subcolumns.get<0>().end())
|
||||||
|
{
|
||||||
|
res.push_back(*jt);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
throw Exception(ErrorCodes::NO_SUCH_COLUMN_IN_TABLE, "There is no column {} in table", name);
|
||||||
|
}
|
||||||
|
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
NamesAndTypesList ColumnsDescription::getAllPhysical() const
|
NamesAndTypesList ColumnsDescription::getAllPhysical() const
|
||||||
{
|
{
|
||||||
@ -409,29 +448,46 @@ Names ColumnsDescription::getNamesOfPhysical() const
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
NameAndTypePair ColumnsDescription::getPhysical(const String & column_name) const
|
std::optional<NameAndTypePair> ColumnsDescription::tryGetColumnOrSubcolumn(GetFlags flags, const String & column_name) const
|
||||||
|
{
|
||||||
|
auto it = columns.get<1>().find(column_name);
|
||||||
|
if (it != columns.get<1>().end() && (defaultKindToGetFlag(it->default_desc.kind) & flags))
|
||||||
|
return NameAndTypePair(it->name, it->type);
|
||||||
|
|
||||||
|
auto jt = subcolumns.get<0>().find(column_name);
|
||||||
|
if (jt != subcolumns.get<0>().end())
|
||||||
|
return *jt;
|
||||||
|
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
NameAndTypePair ColumnsDescription::getColumnOrSubcolumn(GetFlags flags, const String & column_name) const
|
||||||
|
{
|
||||||
|
auto column = tryGetColumnOrSubcolumn(flags, column_name);
|
||||||
|
if (!column)
|
||||||
|
throw Exception(ErrorCodes::NO_SUCH_COLUMN_IN_TABLE,
|
||||||
|
"There is no column or subcolumn {} in table.", column_name);
|
||||||
|
|
||||||
|
return *column;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::optional<NameAndTypePair> ColumnsDescription::tryGetPhysical(const String & column_name) const
|
||||||
{
|
{
|
||||||
auto it = columns.get<1>().find(column_name);
|
auto it = columns.get<1>().find(column_name);
|
||||||
if (it == columns.get<1>().end() || it->default_desc.kind == ColumnDefaultKind::Alias)
|
if (it == columns.get<1>().end() || it->default_desc.kind == ColumnDefaultKind::Alias)
|
||||||
throw Exception("There is no physical column " + column_name + " in table.", ErrorCodes::NO_SUCH_COLUMN_IN_TABLE);
|
return {};
|
||||||
|
|
||||||
return NameAndTypePair(it->name, it->type);
|
return NameAndTypePair(it->name, it->type);
|
||||||
}
|
}
|
||||||
|
|
||||||
NameAndTypePair ColumnsDescription::getPhysicalOrSubcolumn(const String & column_name) const
|
NameAndTypePair ColumnsDescription::getPhysical(const String & column_name) const
|
||||||
{
|
{
|
||||||
if (auto it = columns.get<1>().find(column_name); it != columns.get<1>().end()
|
auto column = tryGetPhysical(column_name);
|
||||||
&& it->default_desc.kind != ColumnDefaultKind::Alias)
|
if (!column)
|
||||||
{
|
|
||||||
return NameAndTypePair(it->name, it->type);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (auto it = subcolumns.find(column_name); it != subcolumns.end())
|
|
||||||
{
|
|
||||||
return it->second;
|
|
||||||
}
|
|
||||||
|
|
||||||
throw Exception(ErrorCodes::NO_SUCH_COLUMN_IN_TABLE,
|
throw Exception(ErrorCodes::NO_SUCH_COLUMN_IN_TABLE,
|
||||||
"There is no physical column or subcolumn {} in table.", column_name);
|
"There is no physical column {} in table.", column_name);
|
||||||
|
|
||||||
|
return *column;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ColumnsDescription::hasPhysical(const String & column_name) const
|
bool ColumnsDescription::hasPhysical(const String & column_name) const
|
||||||
@ -440,32 +496,36 @@ bool ColumnsDescription::hasPhysical(const String & column_name) const
|
|||||||
return it != columns.get<1>().end() && it->default_desc.kind != ColumnDefaultKind::Alias;
|
return it != columns.get<1>().end() && it->default_desc.kind != ColumnDefaultKind::Alias;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ColumnsDescription::hasPhysicalOrSubcolumn(const String & column_name) const
|
bool ColumnsDescription::hasColumnOrSubcolumn(GetFlags flags, const String & column_name) const
|
||||||
{
|
{
|
||||||
return hasPhysical(column_name) || subcolumns.find(column_name) != subcolumns.end();
|
auto it = columns.get<1>().find(column_name);
|
||||||
|
return (it != columns.get<1>().end()
|
||||||
|
&& (defaultKindToGetFlag(it->default_desc.kind) & flags))
|
||||||
|
|| hasSubcolumn(column_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
static NamesAndTypesList getWithSubcolumns(NamesAndTypesList && source_list)
|
void ColumnsDescription::addSubcolumnsToList(NamesAndTypesList & source_list) const
|
||||||
{
|
{
|
||||||
NamesAndTypesList ret;
|
|
||||||
for (const auto & col : source_list)
|
for (const auto & col : source_list)
|
||||||
{
|
{
|
||||||
ret.emplace_back(col.name, col.type);
|
auto range = subcolumns.get<1>().equal_range(col.name);
|
||||||
for (const auto & subcolumn : col.type->getSubcolumnNames())
|
if (range.first != range.second)
|
||||||
ret.emplace_back(col.name, subcolumn, col.type, col.type->getSubcolumnType(subcolumn));
|
source_list.insert(source_list.end(), range.first, range.second);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
NamesAndTypesList ColumnsDescription::getAllWithSubcolumns() const
|
NamesAndTypesList ColumnsDescription::getAllWithSubcolumns() const
|
||||||
{
|
{
|
||||||
return getWithSubcolumns(getAll());
|
auto columns_list = getAll();
|
||||||
|
addSubcolumnsToList(columns_list);
|
||||||
|
return columns_list;
|
||||||
}
|
}
|
||||||
|
|
||||||
NamesAndTypesList ColumnsDescription::getAllPhysicalWithSubcolumns() const
|
NamesAndTypesList ColumnsDescription::getAllPhysicalWithSubcolumns() const
|
||||||
{
|
{
|
||||||
return getWithSubcolumns(getAllPhysical());
|
auto columns_list = getAllPhysical();
|
||||||
|
addSubcolumnsToList(columns_list);
|
||||||
|
return columns_list;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ColumnsDescription::hasDefaults() const
|
bool ColumnsDescription::hasDefaults() const
|
||||||
@ -591,14 +651,15 @@ void ColumnsDescription::addSubcolumns(const String & name_in_storage, const Dat
|
|||||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN,
|
throw Exception(ErrorCodes::ILLEGAL_COLUMN,
|
||||||
"Cannot add subcolumn {}: column with this name already exists", subcolumn.name);
|
"Cannot add subcolumn {}: column with this name already exists", subcolumn.name);
|
||||||
|
|
||||||
subcolumns[subcolumn.name] = subcolumn;
|
subcolumns.get<0>().insert(std::move(subcolumn));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ColumnsDescription::removeSubcolumns(const String & name_in_storage, const DataTypePtr & type_in_storage)
|
void ColumnsDescription::removeSubcolumns(const String & name_in_storage)
|
||||||
{
|
{
|
||||||
for (const auto & subcolumn_name : type_in_storage->getSubcolumnNames())
|
auto range = subcolumns.get<1>().equal_range(name_in_storage);
|
||||||
subcolumns.erase(name_in_storage + "." + subcolumn_name);
|
if (range.first != range.second)
|
||||||
|
subcolumns.get<1>().erase(range.first, range.second);
|
||||||
}
|
}
|
||||||
|
|
||||||
Block validateColumnsDefaultsAndGetSampleBlock(ASTPtr default_expr_list, const NamesAndTypesList & all_columns, ContextPtr context)
|
Block validateColumnsDefaultsAndGetSampleBlock(ASTPtr default_expr_list, const NamesAndTypesList & all_columns, ContextPtr context)
|
||||||
|
@ -11,6 +11,8 @@
|
|||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
|
|
||||||
#include <boost/multi_index/member.hpp>
|
#include <boost/multi_index/member.hpp>
|
||||||
|
#include <boost/multi_index/mem_fun.hpp>
|
||||||
|
#include <boost/multi_index/hashed_index.hpp>
|
||||||
#include <boost/multi_index/ordered_index.hpp>
|
#include <boost/multi_index/ordered_index.hpp>
|
||||||
#include <boost/multi_index/sequenced_index.hpp>
|
#include <boost/multi_index/sequenced_index.hpp>
|
||||||
#include <boost/multi_index_container.hpp>
|
#include <boost/multi_index_container.hpp>
|
||||||
@ -77,6 +79,18 @@ public:
|
|||||||
auto begin() const { return columns.begin(); }
|
auto begin() const { return columns.begin(); }
|
||||||
auto end() const { return columns.end(); }
|
auto end() const { return columns.end(); }
|
||||||
|
|
||||||
|
enum GetFlags : UInt8
|
||||||
|
{
|
||||||
|
Ordinary = 1,
|
||||||
|
Materialized = 2,
|
||||||
|
Aliases = 4,
|
||||||
|
|
||||||
|
AllPhysical = Ordinary | Materialized,
|
||||||
|
All = AllPhysical | Aliases,
|
||||||
|
};
|
||||||
|
|
||||||
|
NamesAndTypesList getByNames(GetFlags flags, const Names & names, bool with_subcolumns) const;
|
||||||
|
|
||||||
NamesAndTypesList getOrdinary() const;
|
NamesAndTypesList getOrdinary() const;
|
||||||
NamesAndTypesList getMaterialized() const;
|
NamesAndTypesList getMaterialized() const;
|
||||||
NamesAndTypesList getAliases() const;
|
NamesAndTypesList getAliases() const;
|
||||||
@ -91,7 +105,6 @@ public:
|
|||||||
bool has(const String & column_name) const;
|
bool has(const String & column_name) const;
|
||||||
bool hasNested(const String & column_name) const;
|
bool hasNested(const String & column_name) const;
|
||||||
bool hasSubcolumn(const String & column_name) const;
|
bool hasSubcolumn(const String & column_name) const;
|
||||||
bool hasInStorageOrSubcolumn(const String & column_name) const;
|
|
||||||
const ColumnDescription & get(const String & column_name) const;
|
const ColumnDescription & get(const String & column_name) const;
|
||||||
|
|
||||||
template <typename F>
|
template <typename F>
|
||||||
@ -113,10 +126,15 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
Names getNamesOfPhysical() const;
|
Names getNamesOfPhysical() const;
|
||||||
|
|
||||||
bool hasPhysical(const String & column_name) const;
|
bool hasPhysical(const String & column_name) const;
|
||||||
bool hasPhysicalOrSubcolumn(const String & column_name) const;
|
bool hasColumnOrSubcolumn(GetFlags flags, const String & column_name) const;
|
||||||
|
|
||||||
NameAndTypePair getPhysical(const String & column_name) const;
|
NameAndTypePair getPhysical(const String & column_name) const;
|
||||||
NameAndTypePair getPhysicalOrSubcolumn(const String & column_name) const;
|
NameAndTypePair getColumnOrSubcolumn(GetFlags flags, const String & column_name) const;
|
||||||
|
|
||||||
|
std::optional<NameAndTypePair> tryGetPhysical(const String & column_name) const;
|
||||||
|
std::optional<NameAndTypePair> tryGetColumnOrSubcolumn(GetFlags flags, const String & column_name) const;
|
||||||
|
|
||||||
ColumnDefaults getDefaults() const; /// TODO: remove
|
ColumnDefaults getDefaults() const; /// TODO: remove
|
||||||
bool hasDefault(const String & column_name) const;
|
bool hasDefault(const String & column_name) const;
|
||||||
@ -143,21 +161,27 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Keep the sequence of columns and allow to lookup by name.
|
/// Keep the sequence of columns and allow to lookup by name.
|
||||||
using Container = boost::multi_index_container<
|
using ColumnsContainer = boost::multi_index_container<
|
||||||
ColumnDescription,
|
ColumnDescription,
|
||||||
boost::multi_index::indexed_by<
|
boost::multi_index::indexed_by<
|
||||||
boost::multi_index::sequenced<>,
|
boost::multi_index::sequenced<>,
|
||||||
boost::multi_index::ordered_unique<boost::multi_index::member<ColumnDescription, String, &ColumnDescription::name>>>>;
|
boost::multi_index::ordered_unique<boost::multi_index::member<ColumnDescription, String, &ColumnDescription::name>>>>;
|
||||||
|
|
||||||
private:
|
using SubcolumnsContainter = boost::multi_index_container<
|
||||||
Container columns;
|
NameAndTypePair,
|
||||||
|
boost::multi_index::indexed_by<
|
||||||
|
boost::multi_index::hashed_unique<boost::multi_index::member<NameAndTypePair, String, &NameAndTypePair::name>>,
|
||||||
|
boost::multi_index::hashed_non_unique<boost::multi_index::const_mem_fun<NameAndTypePair, String, &NameAndTypePair::getNameInStorage>>>>;
|
||||||
|
|
||||||
using SubcolumnsContainer = std::unordered_map<String, NameAndTypePair>;
|
private:
|
||||||
SubcolumnsContainer subcolumns;
|
ColumnsContainer columns;
|
||||||
|
SubcolumnsContainter subcolumns;
|
||||||
|
|
||||||
void modifyColumnOrder(const String & column_name, const String & after_column, bool first);
|
void modifyColumnOrder(const String & column_name, const String & after_column, bool first);
|
||||||
|
void addSubcolumnsToList(NamesAndTypesList & source_list) const;
|
||||||
|
|
||||||
void addSubcolumns(const String & name_in_storage, const DataTypePtr & type_in_storage);
|
void addSubcolumns(const String & name_in_storage, const DataTypePtr & type_in_storage);
|
||||||
void removeSubcolumns(const String & name_in_storage, const DataTypePtr & type_in_storage);
|
void removeSubcolumns(const String & name_in_storage);
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Validate default expressions and corresponding types compatibility, i.e.
|
/// Validate default expressions and corresponding types compatibility, i.e.
|
||||||
|
@ -345,15 +345,15 @@ StorageDistributedDirectoryMonitor::StorageDistributedDirectoryMonitor(
|
|||||||
, disk(disk_)
|
, disk(disk_)
|
||||||
, relative_path(relative_path_)
|
, relative_path(relative_path_)
|
||||||
, path(fs::path(disk->getPath()) / relative_path / "")
|
, path(fs::path(disk->getPath()) / relative_path / "")
|
||||||
, should_batch_inserts(storage.getContext()->getSettingsRef().distributed_directory_monitor_batch_inserts)
|
, should_batch_inserts(storage.getDistributedSettingsRef().monitor_batch_inserts)
|
||||||
, split_batch_on_failure(storage.getContext()->getSettingsRef().distributed_directory_monitor_split_batch_on_failure)
|
, split_batch_on_failure(storage.getDistributedSettingsRef().monitor_split_batch_on_failure)
|
||||||
, dir_fsync(storage.getDistributedSettingsRef().fsync_directories)
|
, dir_fsync(storage.getDistributedSettingsRef().fsync_directories)
|
||||||
, min_batched_block_size_rows(storage.getContext()->getSettingsRef().min_insert_block_size_rows)
|
, min_batched_block_size_rows(storage.getContext()->getSettingsRef().min_insert_block_size_rows)
|
||||||
, min_batched_block_size_bytes(storage.getContext()->getSettingsRef().min_insert_block_size_bytes)
|
, min_batched_block_size_bytes(storage.getContext()->getSettingsRef().min_insert_block_size_bytes)
|
||||||
, current_batch_file_path(path + "current_batch.txt")
|
, current_batch_file_path(path + "current_batch.txt")
|
||||||
, default_sleep_time(storage.getContext()->getSettingsRef().distributed_directory_monitor_sleep_time_ms.totalMilliseconds())
|
, default_sleep_time(storage.getDistributedSettingsRef().monitor_sleep_time_ms.totalMilliseconds())
|
||||||
, sleep_time(default_sleep_time)
|
, sleep_time(default_sleep_time)
|
||||||
, max_sleep_time(storage.getContext()->getSettingsRef().distributed_directory_monitor_max_sleep_time_ms.totalMilliseconds())
|
, max_sleep_time(storage.getDistributedSettingsRef().monitor_max_sleep_time_ms.totalMilliseconds())
|
||||||
, log(&Poco::Logger::get(getLoggerName()))
|
, log(&Poco::Logger::get(getLoggerName()))
|
||||||
, monitor_blocker(monitor_blocker_)
|
, monitor_blocker(monitor_blocker_)
|
||||||
, metric_pending_files(CurrentMetrics::DistributedFilesToInsert, 0)
|
, metric_pending_files(CurrentMetrics::DistributedFilesToInsert, 0)
|
||||||
@ -432,7 +432,7 @@ void StorageDistributedDirectoryMonitor::run()
|
|||||||
do_sleep = true;
|
do_sleep = true;
|
||||||
++status.error_count;
|
++status.error_count;
|
||||||
sleep_time = std::min(
|
sleep_time = std::min(
|
||||||
std::chrono::milliseconds{Int64(default_sleep_time.count() * std::exp2(status.error_count))},
|
std::chrono::milliseconds{UInt64(default_sleep_time.count() * std::exp2(status.error_count))},
|
||||||
max_sleep_time);
|
max_sleep_time);
|
||||||
tryLogCurrentException(getLoggerName().data());
|
tryLogCurrentException(getLoggerName().data());
|
||||||
status.last_exception = std::current_exception();
|
status.last_exception = std::current_exception();
|
||||||
|
@ -21,6 +21,11 @@ class ASTStorage;
|
|||||||
M(UInt64, bytes_to_throw_insert, 0, "If more than this number of compressed bytes will be pending for async INSERT, an exception will be thrown. 0 - do not throw.", 0) \
|
M(UInt64, bytes_to_throw_insert, 0, "If more than this number of compressed bytes will be pending for async INSERT, an exception will be thrown. 0 - do not throw.", 0) \
|
||||||
M(UInt64, bytes_to_delay_insert, 0, "If more than this number of compressed bytes will be pending for async INSERT, the query will be delayed. 0 - do not delay.", 0) \
|
M(UInt64, bytes_to_delay_insert, 0, "If more than this number of compressed bytes will be pending for async INSERT, the query will be delayed. 0 - do not delay.", 0) \
|
||||||
M(UInt64, max_delay_to_insert, 60, "Max delay of inserting data into Distributed table in seconds, if there are a lot of pending bytes for async send.", 0) \
|
M(UInt64, max_delay_to_insert, 60, "Max delay of inserting data into Distributed table in seconds, if there are a lot of pending bytes for async send.", 0) \
|
||||||
|
/** Directory monitor settings */ \
|
||||||
|
M(UInt64, monitor_batch_inserts, 0, "Default - distributed_directory_monitor_batch_inserts", 0) \
|
||||||
|
M(UInt64, monitor_split_batch_on_failure, 0, "Default - distributed_directory_monitor_split_batch_on_failure", 0) \
|
||||||
|
M(Milliseconds, monitor_sleep_time_ms, 0, "Default - distributed_directory_monitor_sleep_time_ms", 0) \
|
||||||
|
M(Milliseconds, monitor_max_sleep_time_ms, 0, "Default - distributed_directory_monitor_max_sleep_time_ms", 0) \
|
||||||
|
|
||||||
DECLARE_SETTINGS_TRAITS(DistributedSettingsTraits, LIST_OF_DISTRIBUTED_SETTINGS)
|
DECLARE_SETTINGS_TRAITS(DistributedSettingsTraits, LIST_OF_DISTRIBUTED_SETTINGS)
|
||||||
|
|
||||||
|
@ -33,6 +33,7 @@ IMergeTreeReader::IMergeTreeReader(
|
|||||||
: data_part(data_part_)
|
: data_part(data_part_)
|
||||||
, avg_value_size_hints(avg_value_size_hints_)
|
, avg_value_size_hints(avg_value_size_hints_)
|
||||||
, columns(columns_)
|
, columns(columns_)
|
||||||
|
, part_columns(data_part->getColumns())
|
||||||
, uncompressed_cache(uncompressed_cache_)
|
, uncompressed_cache(uncompressed_cache_)
|
||||||
, mark_cache(mark_cache_)
|
, mark_cache(mark_cache_)
|
||||||
, settings(settings_)
|
, settings(settings_)
|
||||||
@ -41,15 +42,15 @@ IMergeTreeReader::IMergeTreeReader(
|
|||||||
, all_mark_ranges(all_mark_ranges_)
|
, all_mark_ranges(all_mark_ranges_)
|
||||||
, alter_conversions(storage.getAlterConversionsForPart(data_part))
|
, alter_conversions(storage.getAlterConversionsForPart(data_part))
|
||||||
{
|
{
|
||||||
auto part_columns = data_part->getColumns();
|
|
||||||
if (settings.convert_nested_to_subcolumns)
|
if (settings.convert_nested_to_subcolumns)
|
||||||
{
|
{
|
||||||
columns = Nested::convertToSubcolumns(columns);
|
columns = Nested::convertToSubcolumns(columns);
|
||||||
part_columns = Nested::collect(part_columns);
|
part_columns = Nested::collect(part_columns);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (const NameAndTypePair & column_from_part : part_columns)
|
columns_from_part.set_empty_key(StringRef());
|
||||||
columns_from_part[column_from_part.name] = column_from_part.type;
|
for (const auto & column_from_part : part_columns)
|
||||||
|
columns_from_part.emplace(column_from_part.name, &column_from_part.type);
|
||||||
}
|
}
|
||||||
|
|
||||||
IMergeTreeReader::~IMergeTreeReader() = default;
|
IMergeTreeReader::~IMergeTreeReader() = default;
|
||||||
@ -226,18 +227,19 @@ NameAndTypePair IMergeTreeReader::getColumnFromPart(const NameAndTypePair & requ
|
|||||||
if (it == columns_from_part.end())
|
if (it == columns_from_part.end())
|
||||||
return required_column;
|
return required_column;
|
||||||
|
|
||||||
|
const auto & type = *it->second;
|
||||||
if (required_column.isSubcolumn())
|
if (required_column.isSubcolumn())
|
||||||
{
|
{
|
||||||
auto subcolumn_name = required_column.getSubcolumnName();
|
auto subcolumn_name = required_column.getSubcolumnName();
|
||||||
auto subcolumn_type = it->second->tryGetSubcolumnType(subcolumn_name);
|
auto subcolumn_type = type->tryGetSubcolumnType(subcolumn_name);
|
||||||
|
|
||||||
if (!subcolumn_type)
|
if (!subcolumn_type)
|
||||||
return required_column;
|
return required_column;
|
||||||
|
|
||||||
return {it->first, subcolumn_name, it->second, subcolumn_type};
|
return {String(it->first), subcolumn_name, type, subcolumn_type};
|
||||||
}
|
}
|
||||||
|
|
||||||
return {it->first, it->second};
|
return {String(it->first), type};
|
||||||
}
|
}
|
||||||
|
|
||||||
void IMergeTreeReader::performRequiredConversions(Columns & res_columns)
|
void IMergeTreeReader::performRequiredConversions(Columns & res_columns)
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
#include <Core/NamesAndTypes.h>
|
#include <Core/NamesAndTypes.h>
|
||||||
#include <Storages/MergeTree/MergeTreeReaderStream.h>
|
#include <Storages/MergeTree/MergeTreeReaderStream.h>
|
||||||
#include <Storages/MergeTree/MergeTreeBlockReadUtils.h>
|
#include <Storages/MergeTree/MergeTreeBlockReadUtils.h>
|
||||||
|
#include <sparsehash/dense_hash_map>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -72,6 +73,7 @@ protected:
|
|||||||
|
|
||||||
/// Columns that are read.
|
/// Columns that are read.
|
||||||
NamesAndTypesList columns;
|
NamesAndTypesList columns;
|
||||||
|
NamesAndTypesList part_columns;
|
||||||
|
|
||||||
UncompressedCache * uncompressed_cache;
|
UncompressedCache * uncompressed_cache;
|
||||||
MarkCache * mark_cache;
|
MarkCache * mark_cache;
|
||||||
@ -92,7 +94,7 @@ private:
|
|||||||
MergeTreeData::AlterConversions alter_conversions;
|
MergeTreeData::AlterConversions alter_conversions;
|
||||||
|
|
||||||
/// Actual data type of columns in part
|
/// Actual data type of columns in part
|
||||||
std::unordered_map<String, DataTypePtr> columns_from_part;
|
google::dense_hash_map<StringRef, const DataTypePtr *, StringRefHash> columns_from_part;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -35,16 +35,16 @@ bool injectRequiredColumnsRecursively(
|
|||||||
/// stages.
|
/// stages.
|
||||||
checkStackSize();
|
checkStackSize();
|
||||||
|
|
||||||
if (storage_columns.hasPhysicalOrSubcolumn(column_name))
|
auto column_in_storage = storage_columns.tryGetColumnOrSubcolumn(ColumnsDescription::AllPhysical, column_name);
|
||||||
|
if (column_in_storage)
|
||||||
{
|
{
|
||||||
auto column_in_storage = storage_columns.getPhysicalOrSubcolumn(column_name);
|
auto column_name_in_part = column_in_storage->getNameInStorage();
|
||||||
auto column_name_in_part = column_in_storage.getNameInStorage();
|
|
||||||
if (alter_conversions.isColumnRenamed(column_name_in_part))
|
if (alter_conversions.isColumnRenamed(column_name_in_part))
|
||||||
column_name_in_part = alter_conversions.getColumnOldName(column_name_in_part);
|
column_name_in_part = alter_conversions.getColumnOldName(column_name_in_part);
|
||||||
|
|
||||||
auto column_in_part = NameAndTypePair(
|
auto column_in_part = NameAndTypePair(
|
||||||
column_name_in_part, column_in_storage.getSubcolumnName(),
|
column_name_in_part, column_in_storage->getSubcolumnName(),
|
||||||
column_in_storage.getTypeInStorage(), column_in_storage.type);
|
column_in_storage->getTypeInStorage(), column_in_storage->type);
|
||||||
|
|
||||||
/// column has files and hence does not require evaluation
|
/// column has files and hence does not require evaluation
|
||||||
if (part->hasColumnFiles(column_in_part))
|
if (part->hasColumnFiles(column_in_part))
|
||||||
@ -93,7 +93,7 @@ NameSet injectRequiredColumns(const MergeTreeData & storage, const StorageMetada
|
|||||||
for (size_t i = 0; i < columns.size(); ++i)
|
for (size_t i = 0; i < columns.size(); ++i)
|
||||||
{
|
{
|
||||||
/// We are going to fetch only physical columns
|
/// We are going to fetch only physical columns
|
||||||
if (!storage_columns.hasPhysicalOrSubcolumn(columns[i]))
|
if (!storage_columns.hasColumnOrSubcolumn(ColumnsDescription::AllPhysical, columns[i]))
|
||||||
throw Exception("There is no physical column or subcolumn " + columns[i] + " in table.", ErrorCodes::NO_SUCH_COLUMN_IN_TABLE);
|
throw Exception("There is no physical column or subcolumn " + columns[i] + " in table.", ErrorCodes::NO_SUCH_COLUMN_IN_TABLE);
|
||||||
|
|
||||||
have_at_least_one_physical_column |= injectRequiredColumnsRecursively(
|
have_at_least_one_physical_column |= injectRequiredColumnsRecursively(
|
||||||
@ -310,9 +310,9 @@ MergeTreeReadTaskColumns getReadTaskColumns(
|
|||||||
|
|
||||||
if (check_columns)
|
if (check_columns)
|
||||||
{
|
{
|
||||||
const NamesAndTypesList & physical_columns = metadata_snapshot->getColumns().getAllWithSubcolumns();
|
const auto & columns = metadata_snapshot->getColumns();
|
||||||
result.pre_columns = physical_columns.addTypes(pre_column_names);
|
result.pre_columns = columns.getByNames(ColumnsDescription::All, pre_column_names, true);
|
||||||
result.columns = physical_columns.addTypes(column_names);
|
result.columns = columns.getByNames(ColumnsDescription::All, column_names, true);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -43,8 +43,7 @@ MergeTreeSequentialSource::MergeTreeSequentialSource(
|
|||||||
NamesAndTypesList columns_for_reader;
|
NamesAndTypesList columns_for_reader;
|
||||||
if (take_column_types_from_storage)
|
if (take_column_types_from_storage)
|
||||||
{
|
{
|
||||||
const NamesAndTypesList & physical_columns = metadata_snapshot->getColumns().getAllPhysical();
|
columns_for_reader = metadata_snapshot->getColumns().getByNames(ColumnsDescription::AllPhysical, columns_to_read, false);
|
||||||
columns_for_reader = physical_columns.addTypes(columns_to_read);
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -886,7 +886,6 @@ bool ReplicatedMergeTreeQueue::checkReplaceRangeCanBeRemoved(const MergeTreePart
|
|||||||
if (entry_ptr->replace_range_entry == current.replace_range_entry) /// same partition, don't want to drop ourselves
|
if (entry_ptr->replace_range_entry == current.replace_range_entry) /// same partition, don't want to drop ourselves
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
|
||||||
if (!part_info.contains(MergeTreePartInfo::fromPartName(entry_ptr->replace_range_entry->drop_range_part_name, format_version)))
|
if (!part_info.contains(MergeTreePartInfo::fromPartName(entry_ptr->replace_range_entry->drop_range_part_name, format_version)))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
@ -137,7 +137,7 @@ public:
|
|||||||
BufferSource(const Names & column_names_, StorageBuffer::Buffer & buffer_, const StorageBuffer & storage, const StorageMetadataPtr & metadata_snapshot)
|
BufferSource(const Names & column_names_, StorageBuffer::Buffer & buffer_, const StorageBuffer & storage, const StorageMetadataPtr & metadata_snapshot)
|
||||||
: SourceWithProgress(
|
: SourceWithProgress(
|
||||||
metadata_snapshot->getSampleBlockForColumns(column_names_, storage.getVirtuals(), storage.getStorageID()))
|
metadata_snapshot->getSampleBlockForColumns(column_names_, storage.getVirtuals(), storage.getStorageID()))
|
||||||
, column_names_and_types(metadata_snapshot->getColumns().getAllWithSubcolumns().addTypes(column_names_))
|
, column_names_and_types(metadata_snapshot->getColumns().getByNames(ColumnsDescription::All, column_names_, true))
|
||||||
, buffer(buffer_) {}
|
, buffer(buffer_) {}
|
||||||
|
|
||||||
String getName() const override { return "Buffer"; }
|
String getName() const override { return "Buffer"; }
|
||||||
@ -242,8 +242,8 @@ void StorageBuffer::read(
|
|||||||
{
|
{
|
||||||
const auto & dest_columns = destination_metadata_snapshot->getColumns();
|
const auto & dest_columns = destination_metadata_snapshot->getColumns();
|
||||||
const auto & our_columns = metadata_snapshot->getColumns();
|
const auto & our_columns = metadata_snapshot->getColumns();
|
||||||
return dest_columns.hasPhysicalOrSubcolumn(column_name) &&
|
auto dest_columm = dest_columns.tryGetColumnOrSubcolumn(ColumnsDescription::AllPhysical, column_name);
|
||||||
dest_columns.getPhysicalOrSubcolumn(column_name).type->equals(*our_columns.getPhysicalOrSubcolumn(column_name).type);
|
return dest_columm && dest_columm->type->equals(*our_columns.getColumnOrSubcolumn(ColumnsDescription::AllPhysical, column_name).type);
|
||||||
});
|
});
|
||||||
|
|
||||||
if (dst_has_same_structure)
|
if (dst_has_same_structure)
|
||||||
|
@ -290,26 +290,27 @@ void replaceConstantExpressions(
|
|||||||
/// - QueryProcessingStage::WithMergeableStateAfterAggregation
|
/// - QueryProcessingStage::WithMergeableStateAfterAggregation
|
||||||
/// - QueryProcessingStage::WithMergeableStateAfterAggregationAndLimit
|
/// - QueryProcessingStage::WithMergeableStateAfterAggregationAndLimit
|
||||||
/// - none (in this case regular WithMergeableState should be used)
|
/// - none (in this case regular WithMergeableState should be used)
|
||||||
std::optional<QueryProcessingStage::Enum> getOptimizedQueryProcessingStage(const SelectQueryInfo & query_info, bool extremes, const Block & sharding_key_block)
|
std::optional<QueryProcessingStage::Enum> getOptimizedQueryProcessingStage(const SelectQueryInfo & query_info, bool extremes, const Names & sharding_key_columns)
|
||||||
{
|
{
|
||||||
const auto & select = query_info.query->as<ASTSelectQuery &>();
|
const auto & select = query_info.query->as<ASTSelectQuery &>();
|
||||||
|
|
||||||
auto sharding_block_has = [&](const auto & exprs, size_t limit = SIZE_MAX) -> bool
|
auto sharding_block_has = [&](const auto & exprs) -> bool
|
||||||
{
|
{
|
||||||
size_t i = 0;
|
std::unordered_set<std::string> expr_columns;
|
||||||
for (auto & expr : exprs)
|
for (auto & expr : exprs)
|
||||||
{
|
{
|
||||||
++i;
|
|
||||||
if (i > limit)
|
|
||||||
break;
|
|
||||||
|
|
||||||
auto id = expr->template as<ASTIdentifier>();
|
auto id = expr->template as<ASTIdentifier>();
|
||||||
if (!id)
|
if (!id)
|
||||||
return false;
|
continue;
|
||||||
/// TODO: if GROUP BY contains multiIf()/if() it should contain only columns from sharding_key
|
expr_columns.emplace(id->name());
|
||||||
if (!sharding_key_block.has(id->name()))
|
}
|
||||||
|
|
||||||
|
for (const auto & column : sharding_key_columns)
|
||||||
|
{
|
||||||
|
if (!expr_columns.contains(column))
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -343,7 +344,7 @@ std::optional<QueryProcessingStage::Enum> getOptimizedQueryProcessingStage(const
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (!sharding_block_has(group_by->children, 1))
|
if (!sharding_block_has(group_by->children))
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -547,8 +548,7 @@ QueryProcessingStage::Enum StorageDistributed::getQueryProcessingStage(
|
|||||||
has_sharding_key &&
|
has_sharding_key &&
|
||||||
(settings.allow_nondeterministic_optimize_skip_unused_shards || sharding_key_is_deterministic))
|
(settings.allow_nondeterministic_optimize_skip_unused_shards || sharding_key_is_deterministic))
|
||||||
{
|
{
|
||||||
Block sharding_key_block = sharding_key_expr->getSampleBlock();
|
auto stage = getOptimizedQueryProcessingStage(query_info, settings.extremes, sharding_key_expr->getRequiredColumns());
|
||||||
auto stage = getOptimizedQueryProcessingStage(query_info, settings.extremes, sharding_key_block);
|
|
||||||
if (stage)
|
if (stage)
|
||||||
{
|
{
|
||||||
LOG_DEBUG(log, "Force processing stage to {}", QueryProcessingStage::toString(*stage));
|
LOG_DEBUG(log, "Force processing stage to {}", QueryProcessingStage::toString(*stage));
|
||||||
@ -602,25 +602,25 @@ void StorageDistributed::read(
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const Scalars & scalars = local_context->hasQueryContext() ? local_context->getQueryContext()->getScalars() : Scalars{};
|
|
||||||
|
|
||||||
bool has_virtual_shard_num_column = std::find(column_names.begin(), column_names.end(), "_shard_num") != column_names.end();
|
bool has_virtual_shard_num_column = std::find(column_names.begin(), column_names.end(), "_shard_num") != column_names.end();
|
||||||
if (has_virtual_shard_num_column && !isVirtualColumn("_shard_num", metadata_snapshot))
|
if (has_virtual_shard_num_column && !isVirtualColumn("_shard_num", metadata_snapshot))
|
||||||
has_virtual_shard_num_column = false;
|
has_virtual_shard_num_column = false;
|
||||||
|
|
||||||
ClusterProxy::SelectStreamFactory select_stream_factory = remote_table_function_ptr
|
StorageID main_table = StorageID::createEmpty();
|
||||||
? ClusterProxy::SelectStreamFactory(
|
if (!remote_table_function_ptr)
|
||||||
header, processed_stage, remote_table_function_ptr, scalars, has_virtual_shard_num_column, local_context->getExternalTables())
|
main_table = StorageID{remote_database, remote_table};
|
||||||
: ClusterProxy::SelectStreamFactory(
|
|
||||||
|
ClusterProxy::SelectStreamFactory select_stream_factory =
|
||||||
|
ClusterProxy::SelectStreamFactory(
|
||||||
header,
|
header,
|
||||||
processed_stage,
|
processed_stage,
|
||||||
StorageID{remote_database, remote_table},
|
has_virtual_shard_num_column);
|
||||||
scalars,
|
|
||||||
has_virtual_shard_num_column,
|
|
||||||
local_context->getExternalTables());
|
|
||||||
|
|
||||||
ClusterProxy::executeQuery(query_plan, select_stream_factory, log,
|
ClusterProxy::executeQuery(
|
||||||
modified_query_ast, local_context, query_info,
|
query_plan, header, processed_stage,
|
||||||
|
main_table, remote_table_function_ptr,
|
||||||
|
select_stream_factory, log, modified_query_ast,
|
||||||
|
local_context, query_info,
|
||||||
sharding_key_expr, sharding_key_column_name,
|
sharding_key_expr, sharding_key_column_name,
|
||||||
query_info.cluster);
|
query_info.cluster);
|
||||||
|
|
||||||
@ -1292,8 +1292,11 @@ void registerStorageDistributed(StorageFactory & factory)
|
|||||||
|
|
||||||
String cluster_name = getClusterNameAndMakeLiteral(engine_args[0]);
|
String cluster_name = getClusterNameAndMakeLiteral(engine_args[0]);
|
||||||
|
|
||||||
engine_args[1] = evaluateConstantExpressionOrIdentifierAsLiteral(engine_args[1], args.getLocalContext());
|
const ContextPtr & context = args.getContext();
|
||||||
engine_args[2] = evaluateConstantExpressionOrIdentifierAsLiteral(engine_args[2], args.getLocalContext());
|
const ContextPtr & local_context = args.getLocalContext();
|
||||||
|
|
||||||
|
engine_args[1] = evaluateConstantExpressionOrIdentifierAsLiteral(engine_args[1], local_context);
|
||||||
|
engine_args[2] = evaluateConstantExpressionOrIdentifierAsLiteral(engine_args[2], local_context);
|
||||||
|
|
||||||
String remote_database = engine_args[1]->as<ASTLiteral &>().value.safeGet<String>();
|
String remote_database = engine_args[1]->as<ASTLiteral &>().value.safeGet<String>();
|
||||||
String remote_table = engine_args[2]->as<ASTLiteral &>().value.safeGet<String>();
|
String remote_table = engine_args[2]->as<ASTLiteral &>().value.safeGet<String>();
|
||||||
@ -1304,7 +1307,7 @@ void registerStorageDistributed(StorageFactory & factory)
|
|||||||
/// Check that sharding_key exists in the table and has numeric type.
|
/// Check that sharding_key exists in the table and has numeric type.
|
||||||
if (sharding_key)
|
if (sharding_key)
|
||||||
{
|
{
|
||||||
auto sharding_expr = buildShardingKeyExpression(sharding_key, args.getContext(), args.columns.getAllPhysical(), true);
|
auto sharding_expr = buildShardingKeyExpression(sharding_key, context, args.columns.getAllPhysical(), true);
|
||||||
const Block & block = sharding_expr->getSampleBlock();
|
const Block & block = sharding_expr->getSampleBlock();
|
||||||
|
|
||||||
if (block.columns() != 1)
|
if (block.columns() != 1)
|
||||||
@ -1335,6 +1338,16 @@ void registerStorageDistributed(StorageFactory & factory)
|
|||||||
"bytes_to_throw_insert cannot be less or equal to bytes_to_delay_insert (since it is handled first)");
|
"bytes_to_throw_insert cannot be less or equal to bytes_to_delay_insert (since it is handled first)");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Set default values from the distributed_directory_monitor_* global context settings.
|
||||||
|
if (!distributed_settings.monitor_batch_inserts.changed)
|
||||||
|
distributed_settings.monitor_batch_inserts = context->getSettingsRef().distributed_directory_monitor_batch_inserts;
|
||||||
|
if (!distributed_settings.monitor_split_batch_on_failure.changed)
|
||||||
|
distributed_settings.monitor_split_batch_on_failure = context->getSettingsRef().distributed_directory_monitor_split_batch_on_failure;
|
||||||
|
if (!distributed_settings.monitor_sleep_time_ms.changed)
|
||||||
|
distributed_settings.monitor_sleep_time_ms = Poco::Timespan(context->getSettingsRef().distributed_directory_monitor_sleep_time_ms);
|
||||||
|
if (!distributed_settings.monitor_max_sleep_time_ms.changed)
|
||||||
|
distributed_settings.monitor_max_sleep_time_ms = Poco::Timespan(context->getSettingsRef().distributed_directory_monitor_max_sleep_time_ms);
|
||||||
|
|
||||||
return StorageDistributed::create(
|
return StorageDistributed::create(
|
||||||
args.table_id,
|
args.table_id,
|
||||||
args.columns,
|
args.columns,
|
||||||
@ -1343,7 +1356,7 @@ void registerStorageDistributed(StorageFactory & factory)
|
|||||||
remote_database,
|
remote_database,
|
||||||
remote_table,
|
remote_table,
|
||||||
cluster_name,
|
cluster_name,
|
||||||
args.getContext(),
|
context,
|
||||||
sharding_key,
|
sharding_key,
|
||||||
storage_policy,
|
storage_policy,
|
||||||
args.relative_data_path,
|
args.relative_data_path,
|
||||||
|
@ -320,23 +320,26 @@ Block StorageInMemoryMetadata::getSampleBlockForColumns(
|
|||||||
{
|
{
|
||||||
Block res;
|
Block res;
|
||||||
|
|
||||||
auto all_columns = getColumns().getAllWithSubcolumns();
|
google::dense_hash_map<StringRef, const DataTypePtr *, StringRefHash> virtuals_map;
|
||||||
std::unordered_map<String, DataTypePtr> columns_map;
|
virtuals_map.set_empty_key(StringRef());
|
||||||
columns_map.reserve(all_columns.size());
|
|
||||||
|
|
||||||
for (const auto & elem : all_columns)
|
|
||||||
columns_map.emplace(elem.name, elem.type);
|
|
||||||
|
|
||||||
/// Virtual columns must be appended after ordinary, because user can
|
/// Virtual columns must be appended after ordinary, because user can
|
||||||
/// override them.
|
/// override them.
|
||||||
for (const auto & column : virtuals)
|
for (const auto & column : virtuals)
|
||||||
columns_map.emplace(column.name, column.type);
|
virtuals_map.emplace(column.name, &column.type);
|
||||||
|
|
||||||
for (const auto & name : column_names)
|
for (const auto & name : column_names)
|
||||||
{
|
{
|
||||||
auto it = columns_map.find(name);
|
auto column = getColumns().tryGetColumnOrSubcolumn(ColumnsDescription::All, name);
|
||||||
if (it != columns_map.end())
|
if (column)
|
||||||
res.insert({it->second->createColumn(), it->second, it->first});
|
{
|
||||||
|
res.insert({column->type->createColumn(), column->type, column->name});
|
||||||
|
}
|
||||||
|
else if (auto it = virtuals_map.find(name); it != virtuals_map.end())
|
||||||
|
{
|
||||||
|
const auto & type = *it->second;
|
||||||
|
res.insert({type->createColumn(), type, name});
|
||||||
|
}
|
||||||
else
|
else
|
||||||
throw Exception(
|
throw Exception(
|
||||||
"Column " + backQuote(name) + " not found in table " + (storage_id.empty() ? "" : storage_id.getNameForLogs()),
|
"Column " + backQuote(name) + " not found in table " + (storage_id.empty() ? "" : storage_id.getNameForLogs()),
|
||||||
@ -508,26 +511,31 @@ namespace
|
|||||||
|
|
||||||
void StorageInMemoryMetadata::check(const Names & column_names, const NamesAndTypesList & virtuals, const StorageID & storage_id) const
|
void StorageInMemoryMetadata::check(const Names & column_names, const NamesAndTypesList & virtuals, const StorageID & storage_id) const
|
||||||
{
|
{
|
||||||
NamesAndTypesList available_columns = getColumns().getAllPhysicalWithSubcolumns();
|
|
||||||
available_columns.insert(available_columns.end(), virtuals.begin(), virtuals.end());
|
|
||||||
|
|
||||||
const String list_of_columns = listOfColumns(available_columns);
|
|
||||||
|
|
||||||
if (column_names.empty())
|
if (column_names.empty())
|
||||||
throw Exception("Empty list of columns queried. There are columns: " + list_of_columns, ErrorCodes::EMPTY_LIST_OF_COLUMNS_QUERIED);
|
{
|
||||||
|
auto list_of_columns = listOfColumns(getColumns().getAllPhysicalWithSubcolumns());
|
||||||
const auto columns_map = getColumnsMap(available_columns);
|
throw Exception(ErrorCodes::EMPTY_LIST_OF_COLUMNS_QUERIED,
|
||||||
|
"Empty list of columns queried. There are columns: {}", list_of_columns);
|
||||||
|
}
|
||||||
|
|
||||||
|
const auto virtuals_map = getColumnsMap(virtuals);
|
||||||
auto unique_names = initUniqueStrings();
|
auto unique_names = initUniqueStrings();
|
||||||
|
|
||||||
for (const auto & name : column_names)
|
for (const auto & name : column_names)
|
||||||
{
|
{
|
||||||
if (columns_map.end() == columns_map.find(name))
|
bool has_column = getColumns().hasColumnOrSubcolumn(ColumnsDescription::AllPhysical, name) || virtuals_map.count(name);
|
||||||
throw Exception(
|
|
||||||
"There is no column with name " + backQuote(name) + " in table " + storage_id.getNameForLogs() + ". There are columns: " + list_of_columns,
|
if (!has_column)
|
||||||
ErrorCodes::NO_SUCH_COLUMN_IN_TABLE);
|
{
|
||||||
|
auto list_of_columns = listOfColumns(getColumns().getAllPhysicalWithSubcolumns());
|
||||||
|
throw Exception(ErrorCodes::NO_SUCH_COLUMN_IN_TABLE,
|
||||||
|
"There is no column with name {} in table {}. There are columns: {}",
|
||||||
|
backQuote(name), storage_id.getNameForLogs(), list_of_columns);
|
||||||
|
}
|
||||||
|
|
||||||
if (unique_names.end() != unique_names.find(name))
|
if (unique_names.end() != unique_names.find(name))
|
||||||
throw Exception("Column " + name + " queried more than once", ErrorCodes::COLUMN_QUERIED_MORE_THAN_ONCE);
|
throw Exception(ErrorCodes::COLUMN_QUERIED_MORE_THAN_ONCE, "Column {} queried more than once", name);
|
||||||
|
|
||||||
unique_names.insert(name);
|
unique_names.insert(name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -660,7 +660,7 @@ Pipe StorageLog::read(
|
|||||||
auto lock_timeout = getLockTimeout(context);
|
auto lock_timeout = getLockTimeout(context);
|
||||||
loadMarks(lock_timeout);
|
loadMarks(lock_timeout);
|
||||||
|
|
||||||
auto all_columns = metadata_snapshot->getColumns().getAllWithSubcolumns().addTypes(column_names);
|
auto all_columns = metadata_snapshot->getColumns().getByNames(ColumnsDescription::All, column_names, true);
|
||||||
all_columns = Nested::convertToSubcolumns(all_columns);
|
all_columns = Nested::convertToSubcolumns(all_columns);
|
||||||
|
|
||||||
std::shared_lock lock(rwlock, lock_timeout);
|
std::shared_lock lock(rwlock, lock_timeout);
|
||||||
|
@ -35,7 +35,7 @@ public:
|
|||||||
std::shared_ptr<std::atomic<size_t>> parallel_execution_index_,
|
std::shared_ptr<std::atomic<size_t>> parallel_execution_index_,
|
||||||
InitializerFunc initializer_func_ = {})
|
InitializerFunc initializer_func_ = {})
|
||||||
: SourceWithProgress(metadata_snapshot->getSampleBlockForColumns(column_names_, storage.getVirtuals(), storage.getStorageID()))
|
: SourceWithProgress(metadata_snapshot->getSampleBlockForColumns(column_names_, storage.getVirtuals(), storage.getStorageID()))
|
||||||
, column_names_and_types(metadata_snapshot->getColumns().getAllWithSubcolumns().addTypes(std::move(column_names_)))
|
, column_names_and_types(metadata_snapshot->getColumns().getByNames(ColumnsDescription::All, column_names_, true))
|
||||||
, data(data_)
|
, data(data_)
|
||||||
, parallel_execution_index(parallel_execution_index_)
|
, parallel_execution_index(parallel_execution_index_)
|
||||||
, initializer_func(std::move(initializer_func_))
|
, initializer_func(std::move(initializer_func_))
|
||||||
|
@ -489,7 +489,7 @@ Pipe StorageTinyLog::read(
|
|||||||
{
|
{
|
||||||
metadata_snapshot->check(column_names, getVirtuals(), getStorageID());
|
metadata_snapshot->check(column_names, getVirtuals(), getStorageID());
|
||||||
|
|
||||||
auto all_columns = metadata_snapshot->getColumns().getAllWithSubcolumns().addTypes(column_names);
|
auto all_columns = metadata_snapshot->getColumns().getByNames(ColumnsDescription::All, column_names, true);
|
||||||
|
|
||||||
// When reading, we lock the entire storage, because we only have one file
|
// When reading, we lock the entire storage, because we only have one file
|
||||||
// per column and can't modify it concurrently.
|
// per column and can't modify it concurrently.
|
||||||
|
@ -54,6 +54,7 @@ const char * auto_contributors[] {
|
|||||||
"Alexander Sapin",
|
"Alexander Sapin",
|
||||||
"Alexander Tokmakov",
|
"Alexander Tokmakov",
|
||||||
"Alexander Tretiakov",
|
"Alexander Tretiakov",
|
||||||
|
"Alexandra",
|
||||||
"Alexandra Latysheva",
|
"Alexandra Latysheva",
|
||||||
"Alexandre Snarskii",
|
"Alexandre Snarskii",
|
||||||
"Alexandr Kondratev",
|
"Alexandr Kondratev",
|
||||||
@ -95,6 +96,7 @@ const char * auto_contributors[] {
|
|||||||
"Anatoly Pugachev",
|
"Anatoly Pugachev",
|
||||||
"ana-uvarova",
|
"ana-uvarova",
|
||||||
"AnaUvarova",
|
"AnaUvarova",
|
||||||
|
"Andr0901",
|
||||||
"Andreas Hunkeler",
|
"Andreas Hunkeler",
|
||||||
"AndreevDm",
|
"AndreevDm",
|
||||||
"Andrei Bodrov",
|
"Andrei Bodrov",
|
||||||
@ -140,6 +142,7 @@ const char * auto_contributors[] {
|
|||||||
"aprudaev",
|
"aprudaev",
|
||||||
"Ariel Robaldo",
|
"Ariel Robaldo",
|
||||||
"Arsen Hakobyan",
|
"Arsen Hakobyan",
|
||||||
|
"Arslan G",
|
||||||
"ArtCorp",
|
"ArtCorp",
|
||||||
"Artem Andreenko",
|
"Artem Andreenko",
|
||||||
"Artemeey",
|
"Artemeey",
|
||||||
@ -335,6 +338,7 @@ const char * auto_contributors[] {
|
|||||||
"fessmage",
|
"fessmage",
|
||||||
"FgoDt",
|
"FgoDt",
|
||||||
"fibersel",
|
"fibersel",
|
||||||
|
"Filatenkov Artur",
|
||||||
"filimonov",
|
"filimonov",
|
||||||
"filipe",
|
"filipe",
|
||||||
"Filipe Caixeta",
|
"Filipe Caixeta",
|
||||||
@ -389,6 +393,7 @@ const char * auto_contributors[] {
|
|||||||
"hexiaoting",
|
"hexiaoting",
|
||||||
"Hiroaki Nakamura",
|
"Hiroaki Nakamura",
|
||||||
"hotid",
|
"hotid",
|
||||||
|
"huangzhaowei",
|
||||||
"HuFuwang",
|
"HuFuwang",
|
||||||
"Hui Wang",
|
"Hui Wang",
|
||||||
"hustnn",
|
"hustnn",
|
||||||
@ -404,6 +409,7 @@ const char * auto_contributors[] {
|
|||||||
"Igr",
|
"Igr",
|
||||||
"Igr Mineev",
|
"Igr Mineev",
|
||||||
"ikarishinjieva",
|
"ikarishinjieva",
|
||||||
|
"Ikko Ashimine",
|
||||||
"ikopylov",
|
"ikopylov",
|
||||||
"Ildar Musin",
|
"Ildar Musin",
|
||||||
"Ildus Kurbangaliev",
|
"Ildus Kurbangaliev",
|
||||||
@ -442,6 +448,7 @@ const char * auto_contributors[] {
|
|||||||
"Jacob Hayes",
|
"Jacob Hayes",
|
||||||
"jakalletti",
|
"jakalletti",
|
||||||
"JaosnHsieh",
|
"JaosnHsieh",
|
||||||
|
"jasine",
|
||||||
"Jason",
|
"Jason",
|
||||||
"javartisan",
|
"javartisan",
|
||||||
"javi",
|
"javi",
|
||||||
@ -449,6 +456,7 @@ const char * auto_contributors[] {
|
|||||||
"Javi Santana",
|
"Javi Santana",
|
||||||
"Javi santana bot",
|
"Javi santana bot",
|
||||||
"Jean Baptiste Favre",
|
"Jean Baptiste Favre",
|
||||||
|
"Jeffrey Dang",
|
||||||
"jennyma",
|
"jennyma",
|
||||||
"jetgm",
|
"jetgm",
|
||||||
"Jiading Guo",
|
"Jiading Guo",
|
||||||
@ -502,6 +510,7 @@ const char * auto_contributors[] {
|
|||||||
"Leopold Schabel",
|
"Leopold Schabel",
|
||||||
"leozhang",
|
"leozhang",
|
||||||
"Lev Borodin",
|
"Lev Borodin",
|
||||||
|
"levie",
|
||||||
"levushkin aleksej",
|
"levushkin aleksej",
|
||||||
"levysh",
|
"levysh",
|
||||||
"Lewinma",
|
"Lewinma",
|
||||||
@ -634,6 +643,8 @@ const char * auto_contributors[] {
|
|||||||
"nauta",
|
"nauta",
|
||||||
"nautaa",
|
"nautaa",
|
||||||
"Neeke Gao",
|
"Neeke Gao",
|
||||||
|
"neng.liu",
|
||||||
|
"Neng Liu",
|
||||||
"NengLiu",
|
"NengLiu",
|
||||||
"never lee",
|
"never lee",
|
||||||
"NeZeD [Mac Pro]",
|
"NeZeD [Mac Pro]",
|
||||||
@ -839,6 +850,7 @@ const char * auto_contributors[] {
|
|||||||
"TCeason",
|
"TCeason",
|
||||||
"Tema Novikov",
|
"Tema Novikov",
|
||||||
"templarzq",
|
"templarzq",
|
||||||
|
"terrylin",
|
||||||
"The-Alchemist",
|
"The-Alchemist",
|
||||||
"Tiaonmmn",
|
"Tiaonmmn",
|
||||||
"tiger.yan",
|
"tiger.yan",
|
||||||
@ -877,6 +889,7 @@ const char * auto_contributors[] {
|
|||||||
"Veloman Yunkan",
|
"Veloman Yunkan",
|
||||||
"Veniamin Gvozdikov",
|
"Veniamin Gvozdikov",
|
||||||
"Veselkov Konstantin",
|
"Veselkov Konstantin",
|
||||||
|
"vgocoder",
|
||||||
"vic",
|
"vic",
|
||||||
"vicdashkov",
|
"vicdashkov",
|
||||||
"Victor",
|
"Victor",
|
||||||
@ -925,6 +938,7 @@ const char * auto_contributors[] {
|
|||||||
"wzl",
|
"wzl",
|
||||||
"Xianda Ke",
|
"Xianda Ke",
|
||||||
"Xiang Zhou",
|
"Xiang Zhou",
|
||||||
|
"xiedeyantu",
|
||||||
"xPoSx",
|
"xPoSx",
|
||||||
"Yağızcan Değirmenci",
|
"Yağızcan Değirmenci",
|
||||||
"yang",
|
"yang",
|
||||||
|
@ -33,7 +33,7 @@ SELECTS_SQL = {
|
|||||||
"ORDER BY node"),
|
"ORDER BY node"),
|
||||||
}
|
}
|
||||||
|
|
||||||
EXCEPTION_NETWORK = 'e.displayText() = DB::NetException: '
|
EXCEPTION_NETWORK = 'DB::NetException: '
|
||||||
EXCEPTION_TIMEOUT = 'Timeout exceeded while reading from socket ('
|
EXCEPTION_TIMEOUT = 'Timeout exceeded while reading from socket ('
|
||||||
EXCEPTION_CONNECT = 'Timeout: connect timed out: '
|
EXCEPTION_CONNECT = 'Timeout: connect timed out: '
|
||||||
|
|
||||||
@ -76,13 +76,13 @@ def _check_exception(exception, expected_tries=3):
|
|||||||
|
|
||||||
for i, line in enumerate(lines[3:3 + expected_tries]):
|
for i, line in enumerate(lines[3:3 + expected_tries]):
|
||||||
expected_lines = (
|
expected_lines = (
|
||||||
'Code: 209, ' + EXCEPTION_NETWORK + EXCEPTION_TIMEOUT,
|
'Code: 209. ' + EXCEPTION_NETWORK + EXCEPTION_TIMEOUT,
|
||||||
'Code: 209, ' + EXCEPTION_NETWORK + EXCEPTION_CONNECT,
|
'Code: 209. ' + EXCEPTION_NETWORK + EXCEPTION_CONNECT,
|
||||||
EXCEPTION_TIMEOUT,
|
EXCEPTION_TIMEOUT,
|
||||||
)
|
)
|
||||||
|
|
||||||
assert any(line.startswith(expected) for expected in expected_lines), \
|
assert any(line.startswith(expected) for expected in expected_lines), \
|
||||||
'Unexpected exception at one of the connection attempts'
|
'Unexpected exception "{}" at one of the connection attempts'.format(line)
|
||||||
|
|
||||||
assert lines[3 + expected_tries] == '', 'Wrong number of connect attempts'
|
assert lines[3 + expected_tries] == '', 'Wrong number of connect attempts'
|
||||||
|
|
||||||
|
@ -25,7 +25,7 @@
|
|||||||
<type>encrypted</type>
|
<type>encrypted</type>
|
||||||
<disk>disk_local</disk>
|
<disk>disk_local</disk>
|
||||||
<path>encrypted/</path>
|
<path>encrypted/</path>
|
||||||
<key>abcdefghijklmnop</key>
|
<key_hex>109105c600c12066f82f1a4dbb41a08e</key_hex>
|
||||||
</disk_local_encrypted>
|
</disk_local_encrypted>
|
||||||
</disks>
|
</disks>
|
||||||
<policies>
|
<policies>
|
||||||
|
@ -95,8 +95,11 @@ def test_mysql_client(started_cluster):
|
|||||||
'''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True)
|
'''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True)
|
||||||
|
|
||||||
assert stdout.decode() == 'count()\n1\n'
|
assert stdout.decode() == 'count()\n1\n'
|
||||||
assert stderr[0:182].decode() == "mysql: [Warning] Using a password on the command line interface can be insecure.\n" \
|
expected_msg = '\n'.join([
|
||||||
"ERROR 81 (00000) at line 1: Code: 81, e.displayText() = DB::Exception: Database system2 doesn't exist"
|
"mysql: [Warning] Using a password on the command line interface can be insecure.",
|
||||||
|
"ERROR 81 (00000) at line 1: Code: 81. DB::Exception: Database system2 doesn't exist",
|
||||||
|
])
|
||||||
|
assert stderr[:len(expected_msg)].decode() == expected_msg
|
||||||
|
|
||||||
code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run('''
|
code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run('''
|
||||||
mysql --protocol tcp -h {host} -P {port} default -u default --password=123
|
mysql --protocol tcp -h {host} -P {port} default -u default --password=123
|
||||||
@ -122,8 +125,11 @@ def test_mysql_client_exception(started_cluster):
|
|||||||
-e "CREATE TABLE default.t1_remote_mysql AS mysql('127.0.0.1:10086','default','t1_local','default','');"
|
-e "CREATE TABLE default.t1_remote_mysql AS mysql('127.0.0.1:10086','default','t1_local','default','');"
|
||||||
'''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True)
|
'''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True)
|
||||||
|
|
||||||
assert stderr[0:258].decode() == "mysql: [Warning] Using a password on the command line interface can be insecure.\n" \
|
expected_msg = '\n'.join([
|
||||||
"ERROR 1000 (00000) at line 1: Poco::Exception. Code: 1000, e.code() = 0, e.displayText() = Exception: Connections to all replicas failed: default@127.0.0.1:10086 as user default"
|
"mysql: [Warning] Using a password on the command line interface can be insecure.",
|
||||||
|
"ERROR 1000 (00000) at line 1: Poco::Exception. Code: 1000, e.code() = 0, Exception: Connections to all replicas failed: default@127.0.0.1:10086 as user default",
|
||||||
|
])
|
||||||
|
assert stderr[:len(expected_msg)].decode() == expected_msg
|
||||||
|
|
||||||
|
|
||||||
def test_mysql_affected_rows(started_cluster):
|
def test_mysql_affected_rows(started_cluster):
|
||||||
@ -328,8 +334,7 @@ def test_python_client(started_cluster):
|
|||||||
with pytest.raises(pymysql.InternalError) as exc_info:
|
with pytest.raises(pymysql.InternalError) as exc_info:
|
||||||
client.query('select name from tables')
|
client.query('select name from tables')
|
||||||
|
|
||||||
assert exc_info.value.args[1][
|
assert exc_info.value.args[1].startswith("Code: 60. DB::Exception: Table default.tables doesn't exist"), exc_info.value.args[1]
|
||||||
0:77] == "Code: 60, e.displayText() = DB::Exception: Table default.tables doesn't exist"
|
|
||||||
|
|
||||||
cursor = client.cursor(pymysql.cursors.DictCursor)
|
cursor = client.cursor(pymysql.cursors.DictCursor)
|
||||||
cursor.execute("select 1 as a, 'тест' as b")
|
cursor.execute("select 1 as a, 'тест' as b")
|
||||||
@ -348,8 +353,7 @@ def test_python_client(started_cluster):
|
|||||||
with pytest.raises(pymysql.InternalError) as exc_info:
|
with pytest.raises(pymysql.InternalError) as exc_info:
|
||||||
client.query('select name from tables')
|
client.query('select name from tables')
|
||||||
|
|
||||||
assert exc_info.value.args[1][
|
assert exc_info.value.args[1].startswith("Code: 60. DB::Exception: Table default.tables doesn't exist"), exc_info.value.args[1]
|
||||||
0:77] == "Code: 60, e.displayText() = DB::Exception: Table default.tables doesn't exist"
|
|
||||||
|
|
||||||
cursor = client.cursor(pymysql.cursors.DictCursor)
|
cursor = client.cursor(pymysql.cursors.DictCursor)
|
||||||
cursor.execute("select 1 as a, 'тест' as b")
|
cursor.execute("select 1 as a, 'тест' as b")
|
||||||
@ -360,7 +364,7 @@ def test_python_client(started_cluster):
|
|||||||
with pytest.raises(pymysql.InternalError) as exc_info:
|
with pytest.raises(pymysql.InternalError) as exc_info:
|
||||||
client.select_db('system2')
|
client.select_db('system2')
|
||||||
|
|
||||||
assert exc_info.value.args[1][0:73] == "Code: 81, e.displayText() = DB::Exception: Database system2 doesn't exist"
|
assert exc_info.value.args[1].startswith("Code: 81. DB::Exception: Database system2 doesn't exist"), exc_info.value.args[1]
|
||||||
|
|
||||||
cursor = client.cursor(pymysql.cursors.DictCursor)
|
cursor = client.cursor(pymysql.cursors.DictCursor)
|
||||||
cursor.execute('CREATE DATABASE x')
|
cursor.execute('CREATE DATABASE x')
|
||||||
|
@ -78,7 +78,7 @@ def test_no_stall(started_cluster):
|
|||||||
"""
|
"""
|
||||||
SELECT count()
|
SELECT count()
|
||||||
FROM system.replication_queue
|
FROM system.replication_queue
|
||||||
WHERE last_exception LIKE '%e.displayText() = Timeout%'
|
WHERE last_exception LIKE '%Timeout%'
|
||||||
AND last_exception NOT LIKE '%connect timed out%'
|
AND last_exception NOT LIKE '%connect timed out%'
|
||||||
""").strip())
|
""").strip())
|
||||||
|
|
||||||
|
@ -1339,7 +1339,7 @@ def test_librdkafka_compression(kafka_cluster):
|
|||||||
|
|
||||||
Example of corruption:
|
Example of corruption:
|
||||||
|
|
||||||
2020.12.10 09:59:56.831507 [ 20 ] {} <Error> void DB::StorageKafka::threadFunc(size_t): Code: 27, e.displayText() = DB::Exception: Cannot parse input: expected '"' before: 'foo"}': (while reading the value of key value): (at row 1)
|
2020.12.10 09:59:56.831507 [ 20 ] {} <Error> void DB::StorageKafka::threadFunc(size_t): Code: 27. DB::Exception: Cannot parse input: expected '"' before: 'foo"}': (while reading the value of key value): (at row 1)
|
||||||
|
|
||||||
To trigger this regression there should duplicated messages
|
To trigger this regression there should duplicated messages
|
||||||
|
|
||||||
|
23
tests/performance/lot_of_subcolumns.xml
Normal file
23
tests/performance/lot_of_subcolumns.xml
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
<test>
|
||||||
|
<create_query>
|
||||||
|
CREATE TABLE lot_of_arrays(id UInt64,
|
||||||
|
`nested.arr0` Array(UInt64), `nested.arr1` Array(UInt64), `nested.arr2` Array(UInt64), `nested.arr3` Array(UInt64), `nested.arr4` Array(UInt64), `nested.arr5` Array(UInt64), `nested.arr6` Array(UInt64), `nested.arr7` Array(UInt64), `nested.arr8` Array(UInt64), `nested.arr9` Array(UInt64), `nested.arr10` Array(UInt64), `nested.arr11` Array(UInt64), `nested.arr12` Array(UInt64), `nested.arr13` Array(UInt64), `nested.arr14` Array(UInt64), `nested.arr15` Array(UInt64), `nested.arr16` Array(UInt64), `nested.arr17` Array(UInt64), `nested.arr18` Array(UInt64), `nested.arr19` Array(UInt64), `nested.arr20` Array(UInt64), `nested.arr21` Array(UInt64), `nested.arr22` Array(UInt64), `nested.arr23` Array(UInt64), `nested.arr24` Array(UInt64), `nested.arr25` Array(UInt64), `nested.arr26` Array(UInt64), `nested.arr27` Array(UInt64), `nested.arr28` Array(UInt64), `nested.arr29` Array(UInt64), `nested.arr30` Array(UInt64), `nested.arr31` Array(UInt64), `nested.arr32` Array(UInt64), `nested.arr33` Array(UInt64), `nested.arr34` Array(UInt64), `nested.arr35` Array(UInt64), `nested.arr36` Array(UInt64), `nested.arr37` Array(UInt64), `nested.arr38` Array(UInt64), `nested.arr39` Array(UInt64), `nested.arr40` Array(UInt64), `nested.arr41` Array(UInt64), `nested.arr42` Array(UInt64), `nested.arr43` Array(UInt64), `nested.arr44` Array(UInt64), `nested.arr45` Array(UInt64), `nested.arr46` Array(UInt64), `nested.arr47` Array(UInt64), `nested.arr48` Array(UInt64), `nested.arr49` Array(UInt64), `nested.arr50` Array(UInt64), `nested.arr51` Array(UInt64), `nested.arr52` Array(UInt64), `nested.arr53` Array(UInt64), `nested.arr54` Array(UInt64), `nested.arr55` Array(UInt64), `nested.arr56` Array(UInt64), `nested.arr57` Array(UInt64), `nested.arr58` Array(UInt64), `nested.arr59` Array(UInt64), `nested.arr60` Array(UInt64), `nested.arr61` Array(UInt64), `nested.arr62` Array(UInt64), `nested.arr63` Array(UInt64), `nested.arr64` Array(UInt64), `nested.arr65` Array(UInt64), `nested.arr66` Array(UInt64), `nested.arr67` Array(UInt64), `nested.arr68` Array(UInt64), `nested.arr69` Array(UInt64), `nested.arr70` Array(UInt64), `nested.arr71` Array(UInt64), `nested.arr72` Array(UInt64), `nested.arr73` Array(UInt64), `nested.arr74` Array(UInt64), `nested.arr75` Array(UInt64), `nested.arr76` Array(UInt64), `nested.arr77` Array(UInt64), `nested.arr78` Array(UInt64), `nested.arr79` Array(UInt64), `nested.arr80` Array(UInt64), `nested.arr81` Array(UInt64), `nested.arr82` Array(UInt64), `nested.arr83` Array(UInt64), `nested.arr84` Array(UInt64), `nested.arr85` Array(UInt64), `nested.arr86` Array(UInt64), `nested.arr87` Array(UInt64), `nested.arr88` Array(UInt64), `nested.arr89` Array(UInt64), `nested.arr90` Array(UInt64), `nested.arr91` Array(UInt64), `nested.arr92` Array(UInt64), `nested.arr93` Array(UInt64), `nested.arr94` Array(UInt64), `nested.arr95` Array(UInt64), `nested.arr96` Array(UInt64), `nested.arr97` Array(UInt64), `nested.arr98` Array(UInt64), `nested.arr99` Array(UInt64),
|
||||||
|
`nested.arr100` Array(UInt64), `nested.arr101` Array(UInt64), `nested.arr102` Array(UInt64), `nested.arr103` Array(UInt64), `nested.arr104` Array(UInt64), `nested.arr105` Array(UInt64), `nested.arr106` Array(UInt64), `nested.arr107` Array(UInt64), `nested.arr108` Array(UInt64), `nested.arr109` Array(UInt64), `nested.arr110` Array(UInt64), `nested.arr111` Array(UInt64), `nested.arr112` Array(UInt64), `nested.arr113` Array(UInt64), `nested.arr114` Array(UInt64), `nested.arr115` Array(UInt64), `nested.arr116` Array(UInt64), `nested.arr117` Array(UInt64), `nested.arr118` Array(UInt64), `nested.arr119` Array(UInt64), `nested.arr120` Array(UInt64), `nested.arr121` Array(UInt64), `nested.arr122` Array(UInt64), `nested.arr123` Array(UInt64), `nested.arr124` Array(UInt64), `nested.arr125` Array(UInt64), `nested.arr126` Array(UInt64), `nested.arr127` Array(UInt64), `nested.arr128` Array(UInt64), `nested.arr129` Array(UInt64), `nested.arr130` Array(UInt64), `nested.arr131` Array(UInt64), `nested.arr132` Array(UInt64), `nested.arr133` Array(UInt64), `nested.arr134` Array(UInt64), `nested.arr135` Array(UInt64), `nested.arr136` Array(UInt64), `nested.arr137` Array(UInt64), `nested.arr138` Array(UInt64), `nested.arr139` Array(UInt64), `nested.arr140` Array(UInt64), `nested.arr141` Array(UInt64), `nested.arr142` Array(UInt64), `nested.arr143` Array(UInt64), `nested.arr144` Array(UInt64), `nested.arr145` Array(UInt64), `nested.arr146` Array(UInt64), `nested.arr147` Array(UInt64), `nested.arr148` Array(UInt64), `nested.arr149` Array(UInt64), `nested.arr150` Array(UInt64), `nested.arr151` Array(UInt64), `nested.arr152` Array(UInt64), `nested.arr153` Array(UInt64), `nested.arr154` Array(UInt64), `nested.arr155` Array(UInt64), `nested.arr156` Array(UInt64), `nested.arr157` Array(UInt64), `nested.arr158` Array(UInt64), `nested.arr159` Array(UInt64), `nested.arr160` Array(UInt64), `nested.arr161` Array(UInt64), `nested.arr162` Array(UInt64), `nested.arr163` Array(UInt64), `nested.arr164` Array(UInt64), `nested.arr165` Array(UInt64), `nested.arr166` Array(UInt64), `nested.arr167` Array(UInt64), `nested.arr168` Array(UInt64), `nested.arr169` Array(UInt64), `nested.arr170` Array(UInt64), `nested.arr171` Array(UInt64), `nested.arr172` Array(UInt64), `nested.arr173` Array(UInt64), `nested.arr174` Array(UInt64), `nested.arr175` Array(UInt64), `nested.arr176` Array(UInt64), `nested.arr177` Array(UInt64), `nested.arr178` Array(UInt64), `nested.arr179` Array(UInt64), `nested.arr180` Array(UInt64), `nested.arr181` Array(UInt64), `nested.arr182` Array(UInt64), `nested.arr183` Array(UInt64), `nested.arr184` Array(UInt64), `nested.arr185` Array(UInt64), `nested.arr186` Array(UInt64), `nested.arr187` Array(UInt64), `nested.arr188` Array(UInt64), `nested.arr189` Array(UInt64), `nested.arr190` Array(UInt64), `nested.arr191` Array(UInt64), `nested.arr192` Array(UInt64), `nested.arr193` Array(UInt64), `nested.arr194` Array(UInt64), `nested.arr195` Array(UInt64), `nested.arr196` Array(UInt64), `nested.arr197` Array(UInt64), `nested.arr198` Array(UInt64), `nested.arr199` Array(UInt64),
|
||||||
|
`nested.arr200` Array(UInt64), `nested.arr201` Array(UInt64), `nested.arr202` Array(UInt64), `nested.arr203` Array(UInt64), `nested.arr204` Array(UInt64), `nested.arr205` Array(UInt64), `nested.arr206` Array(UInt64), `nested.arr207` Array(UInt64), `nested.arr208` Array(UInt64), `nested.arr209` Array(UInt64), `nested.arr210` Array(UInt64), `nested.arr211` Array(UInt64), `nested.arr212` Array(UInt64), `nested.arr213` Array(UInt64), `nested.arr214` Array(UInt64), `nested.arr215` Array(UInt64), `nested.arr216` Array(UInt64), `nested.arr217` Array(UInt64), `nested.arr218` Array(UInt64), `nested.arr219` Array(UInt64), `nested.arr220` Array(UInt64), `nested.arr221` Array(UInt64), `nested.arr222` Array(UInt64), `nested.arr223` Array(UInt64), `nested.arr224` Array(UInt64), `nested.arr225` Array(UInt64), `nested.arr226` Array(UInt64), `nested.arr227` Array(UInt64), `nested.arr228` Array(UInt64), `nested.arr229` Array(UInt64), `nested.arr230` Array(UInt64), `nested.arr231` Array(UInt64), `nested.arr232` Array(UInt64), `nested.arr233` Array(UInt64), `nested.arr234` Array(UInt64), `nested.arr235` Array(UInt64), `nested.arr236` Array(UInt64), `nested.arr237` Array(UInt64), `nested.arr238` Array(UInt64), `nested.arr239` Array(UInt64), `nested.arr240` Array(UInt64), `nested.arr241` Array(UInt64), `nested.arr242` Array(UInt64), `nested.arr243` Array(UInt64), `nested.arr244` Array(UInt64), `nested.arr245` Array(UInt64), `nested.arr246` Array(UInt64), `nested.arr247` Array(UInt64), `nested.arr248` Array(UInt64), `nested.arr249` Array(UInt64), `nested.arr250` Array(UInt64), `nested.arr251` Array(UInt64), `nested.arr252` Array(UInt64), `nested.arr253` Array(UInt64), `nested.arr254` Array(UInt64), `nested.arr255` Array(UInt64), `nested.arr256` Array(UInt64), `nested.arr257` Array(UInt64), `nested.arr258` Array(UInt64), `nested.arr259` Array(UInt64), `nested.arr260` Array(UInt64), `nested.arr261` Array(UInt64), `nested.arr262` Array(UInt64), `nested.arr263` Array(UInt64), `nested.arr264` Array(UInt64), `nested.arr265` Array(UInt64), `nested.arr266` Array(UInt64), `nested.arr267` Array(UInt64), `nested.arr268` Array(UInt64), `nested.arr269` Array(UInt64), `nested.arr270` Array(UInt64), `nested.arr271` Array(UInt64), `nested.arr272` Array(UInt64), `nested.arr273` Array(UInt64), `nested.arr274` Array(UInt64), `nested.arr275` Array(UInt64), `nested.arr276` Array(UInt64), `nested.arr277` Array(UInt64), `nested.arr278` Array(UInt64), `nested.arr279` Array(UInt64), `nested.arr280` Array(UInt64), `nested.arr281` Array(UInt64), `nested.arr282` Array(UInt64), `nested.arr283` Array(UInt64), `nested.arr284` Array(UInt64), `nested.arr285` Array(UInt64), `nested.arr286` Array(UInt64), `nested.arr287` Array(UInt64), `nested.arr288` Array(UInt64), `nested.arr289` Array(UInt64), `nested.arr290` Array(UInt64), `nested.arr291` Array(UInt64), `nested.arr292` Array(UInt64), `nested.arr293` Array(UInt64), `nested.arr294` Array(UInt64), `nested.arr295` Array(UInt64), `nested.arr296` Array(UInt64), `nested.arr297` Array(UInt64), `nested.arr298` Array(UInt64), `nested.arr299` Array(UInt64),
|
||||||
|
`nested.arr300` Array(UInt64), `nested.arr301` Array(UInt64), `nested.arr302` Array(UInt64), `nested.arr303` Array(UInt64), `nested.arr304` Array(UInt64), `nested.arr305` Array(UInt64), `nested.arr306` Array(UInt64), `nested.arr307` Array(UInt64), `nested.arr308` Array(UInt64), `nested.arr309` Array(UInt64), `nested.arr310` Array(UInt64), `nested.arr311` Array(UInt64), `nested.arr312` Array(UInt64), `nested.arr313` Array(UInt64), `nested.arr314` Array(UInt64), `nested.arr315` Array(UInt64), `nested.arr316` Array(UInt64), `nested.arr317` Array(UInt64), `nested.arr318` Array(UInt64), `nested.arr319` Array(UInt64), `nested.arr320` Array(UInt64), `nested.arr321` Array(UInt64), `nested.arr322` Array(UInt64), `nested.arr323` Array(UInt64), `nested.arr324` Array(UInt64), `nested.arr325` Array(UInt64), `nested.arr326` Array(UInt64), `nested.arr327` Array(UInt64), `nested.arr328` Array(UInt64), `nested.arr329` Array(UInt64), `nested.arr330` Array(UInt64), `nested.arr331` Array(UInt64), `nested.arr332` Array(UInt64), `nested.arr333` Array(UInt64), `nested.arr334` Array(UInt64), `nested.arr335` Array(UInt64), `nested.arr336` Array(UInt64), `nested.arr337` Array(UInt64), `nested.arr338` Array(UInt64), `nested.arr339` Array(UInt64), `nested.arr340` Array(UInt64), `nested.arr341` Array(UInt64), `nested.arr342` Array(UInt64), `nested.arr343` Array(UInt64), `nested.arr344` Array(UInt64), `nested.arr345` Array(UInt64), `nested.arr346` Array(UInt64), `nested.arr347` Array(UInt64), `nested.arr348` Array(UInt64), `nested.arr349` Array(UInt64), `nested.arr350` Array(UInt64), `nested.arr351` Array(UInt64), `nested.arr352` Array(UInt64), `nested.arr353` Array(UInt64), `nested.arr354` Array(UInt64), `nested.arr355` Array(UInt64), `nested.arr356` Array(UInt64), `nested.arr357` Array(UInt64), `nested.arr358` Array(UInt64), `nested.arr359` Array(UInt64), `nested.arr360` Array(UInt64), `nested.arr361` Array(UInt64), `nested.arr362` Array(UInt64), `nested.arr363` Array(UInt64), `nested.arr364` Array(UInt64), `nested.arr365` Array(UInt64), `nested.arr366` Array(UInt64), `nested.arr367` Array(UInt64), `nested.arr368` Array(UInt64), `nested.arr369` Array(UInt64), `nested.arr370` Array(UInt64), `nested.arr371` Array(UInt64), `nested.arr372` Array(UInt64), `nested.arr373` Array(UInt64), `nested.arr374` Array(UInt64), `nested.arr375` Array(UInt64), `nested.arr376` Array(UInt64), `nested.arr377` Array(UInt64), `nested.arr378` Array(UInt64), `nested.arr379` Array(UInt64), `nested.arr380` Array(UInt64), `nested.arr381` Array(UInt64), `nested.arr382` Array(UInt64), `nested.arr383` Array(UInt64), `nested.arr384` Array(UInt64), `nested.arr385` Array(UInt64), `nested.arr386` Array(UInt64), `nested.arr387` Array(UInt64), `nested.arr388` Array(UInt64), `nested.arr389` Array(UInt64), `nested.arr390` Array(UInt64), `nested.arr391` Array(UInt64), `nested.arr392` Array(UInt64), `nested.arr393` Array(UInt64), `nested.arr394` Array(UInt64), `nested.arr395` Array(UInt64), `nested.arr396` Array(UInt64), `nested.arr397` Array(UInt64), `nested.arr398` Array(UInt64), `nested.arr399` Array(UInt64),
|
||||||
|
`nested.arr400` Array(UInt64), `nested.arr401` Array(UInt64), `nested.arr402` Array(UInt64), `nested.arr403` Array(UInt64), `nested.arr404` Array(UInt64), `nested.arr405` Array(UInt64), `nested.arr406` Array(UInt64), `nested.arr407` Array(UInt64), `nested.arr408` Array(UInt64), `nested.arr409` Array(UInt64), `nested.arr410` Array(UInt64), `nested.arr411` Array(UInt64), `nested.arr412` Array(UInt64), `nested.arr413` Array(UInt64), `nested.arr414` Array(UInt64), `nested.arr415` Array(UInt64), `nested.arr416` Array(UInt64), `nested.arr417` Array(UInt64), `nested.arr418` Array(UInt64), `nested.arr419` Array(UInt64), `nested.arr420` Array(UInt64), `nested.arr421` Array(UInt64), `nested.arr422` Array(UInt64), `nested.arr423` Array(UInt64), `nested.arr424` Array(UInt64), `nested.arr425` Array(UInt64), `nested.arr426` Array(UInt64), `nested.arr427` Array(UInt64), `nested.arr428` Array(UInt64), `nested.arr429` Array(UInt64), `nested.arr430` Array(UInt64), `nested.arr431` Array(UInt64), `nested.arr432` Array(UInt64), `nested.arr433` Array(UInt64), `nested.arr434` Array(UInt64), `nested.arr435` Array(UInt64), `nested.arr436` Array(UInt64), `nested.arr437` Array(UInt64), `nested.arr438` Array(UInt64), `nested.arr439` Array(UInt64), `nested.arr440` Array(UInt64), `nested.arr441` Array(UInt64), `nested.arr442` Array(UInt64), `nested.arr443` Array(UInt64), `nested.arr444` Array(UInt64), `nested.arr445` Array(UInt64), `nested.arr446` Array(UInt64), `nested.arr447` Array(UInt64), `nested.arr448` Array(UInt64), `nested.arr449` Array(UInt64), `nested.arr450` Array(UInt64), `nested.arr451` Array(UInt64), `nested.arr452` Array(UInt64), `nested.arr453` Array(UInt64), `nested.arr454` Array(UInt64), `nested.arr455` Array(UInt64), `nested.arr456` Array(UInt64), `nested.arr457` Array(UInt64), `nested.arr458` Array(UInt64), `nested.arr459` Array(UInt64), `nested.arr460` Array(UInt64), `nested.arr461` Array(UInt64), `nested.arr462` Array(UInt64), `nested.arr463` Array(UInt64), `nested.arr464` Array(UInt64), `nested.arr465` Array(UInt64), `nested.arr466` Array(UInt64), `nested.arr467` Array(UInt64), `nested.arr468` Array(UInt64), `nested.arr469` Array(UInt64), `nested.arr470` Array(UInt64), `nested.arr471` Array(UInt64), `nested.arr472` Array(UInt64), `nested.arr473` Array(UInt64), `nested.arr474` Array(UInt64), `nested.arr475` Array(UInt64), `nested.arr476` Array(UInt64), `nested.arr477` Array(UInt64), `nested.arr478` Array(UInt64), `nested.arr479` Array(UInt64), `nested.arr480` Array(UInt64), `nested.arr481` Array(UInt64), `nested.arr482` Array(UInt64), `nested.arr483` Array(UInt64), `nested.arr484` Array(UInt64), `nested.arr485` Array(UInt64), `nested.arr486` Array(UInt64), `nested.arr487` Array(UInt64), `nested.arr488` Array(UInt64), `nested.arr489` Array(UInt64), `nested.arr490` Array(UInt64), `nested.arr491` Array(UInt64), `nested.arr492` Array(UInt64), `nested.arr493` Array(UInt64), `nested.arr494` Array(UInt64), `nested.arr495` Array(UInt64), `nested.arr496` Array(UInt64), `nested.arr497` Array(UInt64), `nested.arr498` Array(UInt64), `nested.arr499` Array(UInt64),
|
||||||
|
arr500 Array(Array(Nullable(UInt64))), arr501 Array(Array(Nullable(UInt64))), arr502 Array(Array(Nullable(UInt64))), arr503 Array(Array(Nullable(UInt64))), arr504 Array(Array(Nullable(UInt64))), arr505 Array(Array(Nullable(UInt64))), arr506 Array(Array(Nullable(UInt64))), arr507 Array(Array(Nullable(UInt64))), arr508 Array(Array(Nullable(UInt64))), arr509 Array(Array(Nullable(UInt64))), arr510 Array(Array(Nullable(UInt64))), arr511 Array(Array(Nullable(UInt64))), arr512 Array(Array(Nullable(UInt64))), arr513 Array(Array(Nullable(UInt64))), arr514 Array(Array(Nullable(UInt64))), arr515 Array(Array(Nullable(UInt64))), arr516 Array(Array(Nullable(UInt64))), arr517 Array(Array(Nullable(UInt64))), arr518 Array(Array(Nullable(UInt64))), arr519 Array(Array(Nullable(UInt64))), arr520 Array(Array(Nullable(UInt64))), arr521 Array(Array(Nullable(UInt64))), arr522 Array(Array(Nullable(UInt64))), arr523 Array(Array(Nullable(UInt64))), arr524 Array(Array(Nullable(UInt64))), arr525 Array(Array(Nullable(UInt64))), arr526 Array(Array(Nullable(UInt64))), arr527 Array(Array(Nullable(UInt64))), arr528 Array(Array(Nullable(UInt64))), arr529 Array(Array(Nullable(UInt64))), arr530 Array(Array(Nullable(UInt64))), arr531 Array(Array(Nullable(UInt64))), arr532 Array(Array(Nullable(UInt64))), arr533 Array(Array(Nullable(UInt64))), arr534 Array(Array(Nullable(UInt64))), arr535 Array(Array(Nullable(UInt64))), arr536 Array(Array(Nullable(UInt64))), arr537 Array(Array(Nullable(UInt64))), arr538 Array(Array(Nullable(UInt64))), arr539 Array(Array(Nullable(UInt64))), arr540 Array(Array(Nullable(UInt64))), arr541 Array(Array(Nullable(UInt64))), arr542 Array(Array(Nullable(UInt64))), arr543 Array(Array(Nullable(UInt64))), arr544 Array(Array(Nullable(UInt64))), arr545 Array(Array(Nullable(UInt64))), arr546 Array(Array(Nullable(UInt64))), arr547 Array(Array(Nullable(UInt64))), arr548 Array(Array(Nullable(UInt64))), arr549 Array(Array(Nullable(UInt64))), arr550 Array(Array(Nullable(UInt64))), arr551 Array(Array(Nullable(UInt64))), arr552 Array(Array(Nullable(UInt64))), arr553 Array(Array(Nullable(UInt64))), arr554 Array(Array(Nullable(UInt64))), arr555 Array(Array(Nullable(UInt64))), arr556 Array(Array(Nullable(UInt64))), arr557 Array(Array(Nullable(UInt64))), arr558 Array(Array(Nullable(UInt64))), arr559 Array(Array(Nullable(UInt64))), arr560 Array(Array(Nullable(UInt64))), arr561 Array(Array(Nullable(UInt64))), arr562 Array(Array(Nullable(UInt64))), arr563 Array(Array(Nullable(UInt64))), arr564 Array(Array(Nullable(UInt64))), arr565 Array(Array(Nullable(UInt64))), arr566 Array(Array(Nullable(UInt64))), arr567 Array(Array(Nullable(UInt64))), arr568 Array(Array(Nullable(UInt64))), arr569 Array(Array(Nullable(UInt64))), arr570 Array(Array(Nullable(UInt64))), arr571 Array(Array(Nullable(UInt64))), arr572 Array(Array(Nullable(UInt64))), arr573 Array(Array(Nullable(UInt64))), arr574 Array(Array(Nullable(UInt64))), arr575 Array(Array(Nullable(UInt64))), arr576 Array(Array(Nullable(UInt64))), arr577 Array(Array(Nullable(UInt64))), arr578 Array(Array(Nullable(UInt64))), arr579 Array(Array(Nullable(UInt64))), arr580 Array(Array(Nullable(UInt64))), arr581 Array(Array(Nullable(UInt64))), arr582 Array(Array(Nullable(UInt64))), arr583 Array(Array(Nullable(UInt64))), arr584 Array(Array(Nullable(UInt64))), arr585 Array(Array(Nullable(UInt64))), arr586 Array(Array(Nullable(UInt64))), arr587 Array(Array(Nullable(UInt64))), arr588 Array(Array(Nullable(UInt64))), arr589 Array(Array(Nullable(UInt64))), arr590 Array(Array(Nullable(UInt64))), arr591 Array(Array(Nullable(UInt64))), arr592 Array(Array(Nullable(UInt64))), arr593 Array(Array(Nullable(UInt64))), arr594 Array(Array(Nullable(UInt64))), arr595 Array(Array(Nullable(UInt64))), arr596 Array(Array(Nullable(UInt64))), arr597 Array(Array(Nullable(UInt64))), arr598 Array(Array(Nullable(UInt64))), arr599 Array(Array(Nullable(UInt64))),
|
||||||
|
arr600 Array(Array(Nullable(UInt64))), arr601 Array(Array(Nullable(UInt64))), arr602 Array(Array(Nullable(UInt64))), arr603 Array(Array(Nullable(UInt64))), arr604 Array(Array(Nullable(UInt64))), arr605 Array(Array(Nullable(UInt64))), arr606 Array(Array(Nullable(UInt64))), arr607 Array(Array(Nullable(UInt64))), arr608 Array(Array(Nullable(UInt64))), arr609 Array(Array(Nullable(UInt64))), arr610 Array(Array(Nullable(UInt64))), arr611 Array(Array(Nullable(UInt64))), arr612 Array(Array(Nullable(UInt64))), arr613 Array(Array(Nullable(UInt64))), arr614 Array(Array(Nullable(UInt64))), arr615 Array(Array(Nullable(UInt64))), arr616 Array(Array(Nullable(UInt64))), arr617 Array(Array(Nullable(UInt64))), arr618 Array(Array(Nullable(UInt64))), arr619 Array(Array(Nullable(UInt64))), arr620 Array(Array(Nullable(UInt64))), arr621 Array(Array(Nullable(UInt64))), arr622 Array(Array(Nullable(UInt64))), arr623 Array(Array(Nullable(UInt64))), arr624 Array(Array(Nullable(UInt64))), arr625 Array(Array(Nullable(UInt64))), arr626 Array(Array(Nullable(UInt64))), arr627 Array(Array(Nullable(UInt64))), arr628 Array(Array(Nullable(UInt64))), arr629 Array(Array(Nullable(UInt64))), arr630 Array(Array(Nullable(UInt64))), arr631 Array(Array(Nullable(UInt64))), arr632 Array(Array(Nullable(UInt64))), arr633 Array(Array(Nullable(UInt64))), arr634 Array(Array(Nullable(UInt64))), arr635 Array(Array(Nullable(UInt64))), arr636 Array(Array(Nullable(UInt64))), arr637 Array(Array(Nullable(UInt64))), arr638 Array(Array(Nullable(UInt64))), arr639 Array(Array(Nullable(UInt64))), arr640 Array(Array(Nullable(UInt64))), arr641 Array(Array(Nullable(UInt64))), arr642 Array(Array(Nullable(UInt64))), arr643 Array(Array(Nullable(UInt64))), arr644 Array(Array(Nullable(UInt64))), arr645 Array(Array(Nullable(UInt64))), arr646 Array(Array(Nullable(UInt64))), arr647 Array(Array(Nullable(UInt64))), arr648 Array(Array(Nullable(UInt64))), arr649 Array(Array(Nullable(UInt64))), arr650 Array(Array(Nullable(UInt64))), arr651 Array(Array(Nullable(UInt64))), arr652 Array(Array(Nullable(UInt64))), arr653 Array(Array(Nullable(UInt64))), arr654 Array(Array(Nullable(UInt64))), arr655 Array(Array(Nullable(UInt64))), arr656 Array(Array(Nullable(UInt64))), arr657 Array(Array(Nullable(UInt64))), arr658 Array(Array(Nullable(UInt64))), arr659 Array(Array(Nullable(UInt64))), arr660 Array(Array(Nullable(UInt64))), arr661 Array(Array(Nullable(UInt64))), arr662 Array(Array(Nullable(UInt64))), arr663 Array(Array(Nullable(UInt64))), arr664 Array(Array(Nullable(UInt64))), arr665 Array(Array(Nullable(UInt64))), arr666 Array(Array(Nullable(UInt64))), arr667 Array(Array(Nullable(UInt64))), arr668 Array(Array(Nullable(UInt64))), arr669 Array(Array(Nullable(UInt64))), arr670 Array(Array(Nullable(UInt64))), arr671 Array(Array(Nullable(UInt64))), arr672 Array(Array(Nullable(UInt64))), arr673 Array(Array(Nullable(UInt64))), arr674 Array(Array(Nullable(UInt64))), arr675 Array(Array(Nullable(UInt64))), arr676 Array(Array(Nullable(UInt64))), arr677 Array(Array(Nullable(UInt64))), arr678 Array(Array(Nullable(UInt64))), arr679 Array(Array(Nullable(UInt64))), arr680 Array(Array(Nullable(UInt64))), arr681 Array(Array(Nullable(UInt64))), arr682 Array(Array(Nullable(UInt64))), arr683 Array(Array(Nullable(UInt64))), arr684 Array(Array(Nullable(UInt64))), arr685 Array(Array(Nullable(UInt64))), arr686 Array(Array(Nullable(UInt64))), arr687 Array(Array(Nullable(UInt64))), arr688 Array(Array(Nullable(UInt64))), arr689 Array(Array(Nullable(UInt64))), arr690 Array(Array(Nullable(UInt64))), arr691 Array(Array(Nullable(UInt64))), arr692 Array(Array(Nullable(UInt64))), arr693 Array(Array(Nullable(UInt64))), arr694 Array(Array(Nullable(UInt64))), arr695 Array(Array(Nullable(UInt64))), arr696 Array(Array(Nullable(UInt64))), arr697 Array(Array(Nullable(UInt64))), arr698 Array(Array(Nullable(UInt64))), arr699 Array(Array(Nullable(UInt64))),
|
||||||
|
arr700 Array(Array(Nullable(UInt64))), arr701 Array(Array(Nullable(UInt64))), arr702 Array(Array(Nullable(UInt64))), arr703 Array(Array(Nullable(UInt64))), arr704 Array(Array(Nullable(UInt64))), arr705 Array(Array(Nullable(UInt64))), arr706 Array(Array(Nullable(UInt64))), arr707 Array(Array(Nullable(UInt64))), arr708 Array(Array(Nullable(UInt64))), arr709 Array(Array(Nullable(UInt64))), arr710 Array(Array(Nullable(UInt64))), arr711 Array(Array(Nullable(UInt64))), arr712 Array(Array(Nullable(UInt64))), arr713 Array(Array(Nullable(UInt64))), arr714 Array(Array(Nullable(UInt64))), arr715 Array(Array(Nullable(UInt64))), arr716 Array(Array(Nullable(UInt64))), arr717 Array(Array(Nullable(UInt64))), arr718 Array(Array(Nullable(UInt64))), arr719 Array(Array(Nullable(UInt64))), arr720 Array(Array(Nullable(UInt64))), arr721 Array(Array(Nullable(UInt64))), arr722 Array(Array(Nullable(UInt64))), arr723 Array(Array(Nullable(UInt64))), arr724 Array(Array(Nullable(UInt64))), arr725 Array(Array(Nullable(UInt64))), arr726 Array(Array(Nullable(UInt64))), arr727 Array(Array(Nullable(UInt64))), arr728 Array(Array(Nullable(UInt64))), arr729 Array(Array(Nullable(UInt64))), arr730 Array(Array(Nullable(UInt64))), arr731 Array(Array(Nullable(UInt64))), arr732 Array(Array(Nullable(UInt64))), arr733 Array(Array(Nullable(UInt64))), arr734 Array(Array(Nullable(UInt64))), arr735 Array(Array(Nullable(UInt64))), arr736 Array(Array(Nullable(UInt64))), arr737 Array(Array(Nullable(UInt64))), arr738 Array(Array(Nullable(UInt64))), arr739 Array(Array(Nullable(UInt64))), arr740 Array(Array(Nullable(UInt64))), arr741 Array(Array(Nullable(UInt64))), arr742 Array(Array(Nullable(UInt64))), arr743 Array(Array(Nullable(UInt64))), arr744 Array(Array(Nullable(UInt64))), arr745 Array(Array(Nullable(UInt64))), arr746 Array(Array(Nullable(UInt64))), arr747 Array(Array(Nullable(UInt64))), arr748 Array(Array(Nullable(UInt64))), arr749 Array(Array(Nullable(UInt64))), arr750 Array(Array(Nullable(UInt64))), arr751 Array(Array(Nullable(UInt64))), arr752 Array(Array(Nullable(UInt64))), arr753 Array(Array(Nullable(UInt64))), arr754 Array(Array(Nullable(UInt64))), arr755 Array(Array(Nullable(UInt64))), arr756 Array(Array(Nullable(UInt64))), arr757 Array(Array(Nullable(UInt64))), arr758 Array(Array(Nullable(UInt64))), arr759 Array(Array(Nullable(UInt64))), arr760 Array(Array(Nullable(UInt64))), arr761 Array(Array(Nullable(UInt64))), arr762 Array(Array(Nullable(UInt64))), arr763 Array(Array(Nullable(UInt64))), arr764 Array(Array(Nullable(UInt64))), arr765 Array(Array(Nullable(UInt64))), arr766 Array(Array(Nullable(UInt64))), arr767 Array(Array(Nullable(UInt64))), arr768 Array(Array(Nullable(UInt64))), arr769 Array(Array(Nullable(UInt64))), arr770 Array(Array(Nullable(UInt64))), arr771 Array(Array(Nullable(UInt64))), arr772 Array(Array(Nullable(UInt64))), arr773 Array(Array(Nullable(UInt64))), arr774 Array(Array(Nullable(UInt64))), arr775 Array(Array(Nullable(UInt64))), arr776 Array(Array(Nullable(UInt64))), arr777 Array(Array(Nullable(UInt64))), arr778 Array(Array(Nullable(UInt64))), arr779 Array(Array(Nullable(UInt64))), arr780 Array(Array(Nullable(UInt64))), arr781 Array(Array(Nullable(UInt64))), arr782 Array(Array(Nullable(UInt64))), arr783 Array(Array(Nullable(UInt64))), arr784 Array(Array(Nullable(UInt64))), arr785 Array(Array(Nullable(UInt64))), arr786 Array(Array(Nullable(UInt64))), arr787 Array(Array(Nullable(UInt64))), arr788 Array(Array(Nullable(UInt64))), arr789 Array(Array(Nullable(UInt64))), arr790 Array(Array(Nullable(UInt64))), arr791 Array(Array(Nullable(UInt64))), arr792 Array(Array(Nullable(UInt64))), arr793 Array(Array(Nullable(UInt64))), arr794 Array(Array(Nullable(UInt64))), arr795 Array(Array(Nullable(UInt64))), arr796 Array(Array(Nullable(UInt64))), arr797 Array(Array(Nullable(UInt64))), arr798 Array(Array(Nullable(UInt64))), arr799 Array(Array(Nullable(UInt64))),
|
||||||
|
arr800 Array(Array(Nullable(UInt64))), arr801 Array(Array(Nullable(UInt64))), arr802 Array(Array(Nullable(UInt64))), arr803 Array(Array(Nullable(UInt64))), arr804 Array(Array(Nullable(UInt64))), arr805 Array(Array(Nullable(UInt64))), arr806 Array(Array(Nullable(UInt64))), arr807 Array(Array(Nullable(UInt64))), arr808 Array(Array(Nullable(UInt64))), arr809 Array(Array(Nullable(UInt64))), arr810 Array(Array(Nullable(UInt64))), arr811 Array(Array(Nullable(UInt64))), arr812 Array(Array(Nullable(UInt64))), arr813 Array(Array(Nullable(UInt64))), arr814 Array(Array(Nullable(UInt64))), arr815 Array(Array(Nullable(UInt64))), arr816 Array(Array(Nullable(UInt64))), arr817 Array(Array(Nullable(UInt64))), arr818 Array(Array(Nullable(UInt64))), arr819 Array(Array(Nullable(UInt64))), arr820 Array(Array(Nullable(UInt64))), arr821 Array(Array(Nullable(UInt64))), arr822 Array(Array(Nullable(UInt64))), arr823 Array(Array(Nullable(UInt64))), arr824 Array(Array(Nullable(UInt64))), arr825 Array(Array(Nullable(UInt64))), arr826 Array(Array(Nullable(UInt64))), arr827 Array(Array(Nullable(UInt64))), arr828 Array(Array(Nullable(UInt64))), arr829 Array(Array(Nullable(UInt64))), arr830 Array(Array(Nullable(UInt64))), arr831 Array(Array(Nullable(UInt64))), arr832 Array(Array(Nullable(UInt64))), arr833 Array(Array(Nullable(UInt64))), arr834 Array(Array(Nullable(UInt64))), arr835 Array(Array(Nullable(UInt64))), arr836 Array(Array(Nullable(UInt64))), arr837 Array(Array(Nullable(UInt64))), arr838 Array(Array(Nullable(UInt64))), arr839 Array(Array(Nullable(UInt64))), arr840 Array(Array(Nullable(UInt64))), arr841 Array(Array(Nullable(UInt64))), arr842 Array(Array(Nullable(UInt64))), arr843 Array(Array(Nullable(UInt64))), arr844 Array(Array(Nullable(UInt64))), arr845 Array(Array(Nullable(UInt64))), arr846 Array(Array(Nullable(UInt64))), arr847 Array(Array(Nullable(UInt64))), arr848 Array(Array(Nullable(UInt64))), arr849 Array(Array(Nullable(UInt64))), arr850 Array(Array(Nullable(UInt64))), arr851 Array(Array(Nullable(UInt64))), arr852 Array(Array(Nullable(UInt64))), arr853 Array(Array(Nullable(UInt64))), arr854 Array(Array(Nullable(UInt64))), arr855 Array(Array(Nullable(UInt64))), arr856 Array(Array(Nullable(UInt64))), arr857 Array(Array(Nullable(UInt64))), arr858 Array(Array(Nullable(UInt64))), arr859 Array(Array(Nullable(UInt64))), arr860 Array(Array(Nullable(UInt64))), arr861 Array(Array(Nullable(UInt64))), arr862 Array(Array(Nullable(UInt64))), arr863 Array(Array(Nullable(UInt64))), arr864 Array(Array(Nullable(UInt64))), arr865 Array(Array(Nullable(UInt64))), arr866 Array(Array(Nullable(UInt64))), arr867 Array(Array(Nullable(UInt64))), arr868 Array(Array(Nullable(UInt64))), arr869 Array(Array(Nullable(UInt64))), arr870 Array(Array(Nullable(UInt64))), arr871 Array(Array(Nullable(UInt64))), arr872 Array(Array(Nullable(UInt64))), arr873 Array(Array(Nullable(UInt64))), arr874 Array(Array(Nullable(UInt64))), arr875 Array(Array(Nullable(UInt64))), arr876 Array(Array(Nullable(UInt64))), arr877 Array(Array(Nullable(UInt64))), arr878 Array(Array(Nullable(UInt64))), arr879 Array(Array(Nullable(UInt64))), arr880 Array(Array(Nullable(UInt64))), arr881 Array(Array(Nullable(UInt64))), arr882 Array(Array(Nullable(UInt64))), arr883 Array(Array(Nullable(UInt64))), arr884 Array(Array(Nullable(UInt64))), arr885 Array(Array(Nullable(UInt64))), arr886 Array(Array(Nullable(UInt64))), arr887 Array(Array(Nullable(UInt64))), arr888 Array(Array(Nullable(UInt64))), arr889 Array(Array(Nullable(UInt64))), arr890 Array(Array(Nullable(UInt64))), arr891 Array(Array(Nullable(UInt64))), arr892 Array(Array(Nullable(UInt64))), arr893 Array(Array(Nullable(UInt64))), arr894 Array(Array(Nullable(UInt64))), arr895 Array(Array(Nullable(UInt64))), arr896 Array(Array(Nullable(UInt64))), arr897 Array(Array(Nullable(UInt64))), arr898 Array(Array(Nullable(UInt64))), arr899 Array(Array(Nullable(UInt64))),
|
||||||
|
arr900 Array(Array(Nullable(UInt64))), arr901 Array(Array(Nullable(UInt64))), arr902 Array(Array(Nullable(UInt64))), arr903 Array(Array(Nullable(UInt64))), arr904 Array(Array(Nullable(UInt64))), arr905 Array(Array(Nullable(UInt64))), arr906 Array(Array(Nullable(UInt64))), arr907 Array(Array(Nullable(UInt64))), arr908 Array(Array(Nullable(UInt64))), arr909 Array(Array(Nullable(UInt64))), arr910 Array(Array(Nullable(UInt64))), arr911 Array(Array(Nullable(UInt64))), arr912 Array(Array(Nullable(UInt64))), arr913 Array(Array(Nullable(UInt64))), arr914 Array(Array(Nullable(UInt64))), arr915 Array(Array(Nullable(UInt64))), arr916 Array(Array(Nullable(UInt64))), arr917 Array(Array(Nullable(UInt64))), arr918 Array(Array(Nullable(UInt64))), arr919 Array(Array(Nullable(UInt64))), arr920 Array(Array(Nullable(UInt64))), arr921 Array(Array(Nullable(UInt64))), arr922 Array(Array(Nullable(UInt64))), arr923 Array(Array(Nullable(UInt64))), arr924 Array(Array(Nullable(UInt64))), arr925 Array(Array(Nullable(UInt64))), arr926 Array(Array(Nullable(UInt64))), arr927 Array(Array(Nullable(UInt64))), arr928 Array(Array(Nullable(UInt64))), arr929 Array(Array(Nullable(UInt64))), arr930 Array(Array(Nullable(UInt64))), arr931 Array(Array(Nullable(UInt64))), arr932 Array(Array(Nullable(UInt64))), arr933 Array(Array(Nullable(UInt64))), arr934 Array(Array(Nullable(UInt64))), arr935 Array(Array(Nullable(UInt64))), arr936 Array(Array(Nullable(UInt64))), arr937 Array(Array(Nullable(UInt64))), arr938 Array(Array(Nullable(UInt64))), arr939 Array(Array(Nullable(UInt64))), arr940 Array(Array(Nullable(UInt64))), arr941 Array(Array(Nullable(UInt64))), arr942 Array(Array(Nullable(UInt64))), arr943 Array(Array(Nullable(UInt64))), arr944 Array(Array(Nullable(UInt64))), arr945 Array(Array(Nullable(UInt64))), arr946 Array(Array(Nullable(UInt64))), arr947 Array(Array(Nullable(UInt64))), arr948 Array(Array(Nullable(UInt64))), arr949 Array(Array(Nullable(UInt64))), arr950 Array(Array(Nullable(UInt64))), arr951 Array(Array(Nullable(UInt64))), arr952 Array(Array(Nullable(UInt64))), arr953 Array(Array(Nullable(UInt64))), arr954 Array(Array(Nullable(UInt64))), arr955 Array(Array(Nullable(UInt64))), arr956 Array(Array(Nullable(UInt64))), arr957 Array(Array(Nullable(UInt64))), arr958 Array(Array(Nullable(UInt64))), arr959 Array(Array(Nullable(UInt64))), arr960 Array(Array(Nullable(UInt64))), arr961 Array(Array(Nullable(UInt64))), arr962 Array(Array(Nullable(UInt64))), arr963 Array(Array(Nullable(UInt64))), arr964 Array(Array(Nullable(UInt64))), arr965 Array(Array(Nullable(UInt64))), arr966 Array(Array(Nullable(UInt64))), arr967 Array(Array(Nullable(UInt64))), arr968 Array(Array(Nullable(UInt64))), arr969 Array(Array(Nullable(UInt64))), arr970 Array(Array(Nullable(UInt64))), arr971 Array(Array(Nullable(UInt64))), arr972 Array(Array(Nullable(UInt64))), arr973 Array(Array(Nullable(UInt64))), arr974 Array(Array(Nullable(UInt64))), arr975 Array(Array(Nullable(UInt64))), arr976 Array(Array(Nullable(UInt64))), arr977 Array(Array(Nullable(UInt64))), arr978 Array(Array(Nullable(UInt64))), arr979 Array(Array(Nullable(UInt64))), arr980 Array(Array(Nullable(UInt64))), arr981 Array(Array(Nullable(UInt64))), arr982 Array(Array(Nullable(UInt64))), arr983 Array(Array(Nullable(UInt64))), arr984 Array(Array(Nullable(UInt64))), arr985 Array(Array(Nullable(UInt64))), arr986 Array(Array(Nullable(UInt64))), arr987 Array(Array(Nullable(UInt64))), arr988 Array(Array(Nullable(UInt64))), arr989 Array(Array(Nullable(UInt64))), arr990 Array(Array(Nullable(UInt64))), arr991 Array(Array(Nullable(UInt64))), arr992 Array(Array(Nullable(UInt64))), arr993 Array(Array(Nullable(UInt64))), arr994 Array(Array(Nullable(UInt64))), arr995 Array(Array(Nullable(UInt64))), arr996 Array(Array(Nullable(UInt64))), arr997 Array(Array(Nullable(UInt64))), arr998 Array(Array(Nullable(UInt64))), arr999 Array(Array(Nullable(UInt64))))
|
||||||
|
ENGINE = MergeTree ORDER BY id PARTITION BY id % 100
|
||||||
|
</create_query>
|
||||||
|
|
||||||
|
<fill_query>INSERT INTO lot_of_arrays(id) SELECT number FROM numbers(1000)</fill_query>
|
||||||
|
<fill_query>OPTIMIZE TABLE lot_of_arrays FINAL</fill_query>
|
||||||
|
|
||||||
|
<query>SELECT nested.arr0 FROM lot_of_arrays WHERE id > 10 FORMAT Null</query>
|
||||||
|
|
||||||
|
<drop_query>DROP TABLE IF EXISTS lot_of_arrays</drop_query>
|
||||||
|
</test>
|
@ -18,7 +18,7 @@ function ch_url() {
|
|||||||
|
|
||||||
# Check correct exceptions handling
|
# Check correct exceptions handling
|
||||||
|
|
||||||
exception_pattern="displayText() = DB::Exception:[[:print:]]*"
|
exception_pattern="DB::Exception:[[:print:]]*"
|
||||||
|
|
||||||
function check_only_exception() {
|
function check_only_exception() {
|
||||||
local res
|
local res
|
||||||
|
@ -23,7 +23,7 @@ INSERT INTO memory SELECT * FROM numbers(1000);"
|
|||||||
|
|
||||||
${CLICKHOUSE_CLIENT} --multiquery --query="
|
${CLICKHOUSE_CLIENT} --multiquery --query="
|
||||||
SET max_threads = 1;
|
SET max_threads = 1;
|
||||||
SELECT count() FROM memory WHERE NOT ignore(sleep(0.0001));" 2>&1 | grep -c -P '^1000$|^0$|Table .+? doesn.t exist' &
|
SELECT count() FROM memory WHERE NOT ignore(sleep(0.0001));" 2>&1 | grep -c -P '^1000$|^0$|Exception' &
|
||||||
|
|
||||||
sleep 0.05;
|
sleep 0.05;
|
||||||
|
|
||||||
|
@ -89,7 +89,7 @@ idx10 ['This','is','a','test']
|
|||||||
23.00
|
23.00
|
||||||
24.00
|
24.00
|
||||||
=== Try load data from datapage_v2.snappy.parquet
|
=== Try load data from datapage_v2.snappy.parquet
|
||||||
Code: 33. DB::ParsingEx---tion: Error while reading Parquet data: IOError: Not yet implemented: Unsupported encoding.: data for INSERT was parsed from stdin
|
Code: 33. DB::ParsingEx---tion: Error while reading Parquet data: IOError: Not yet implemented: Unsupported encoding.: data for INSERT was parsed from stdin. (CANNOT_READ_ALL_DATA)
|
||||||
|
|
||||||
=== Try load data from dict-page-offset-zero.parquet
|
=== Try load data from dict-page-offset-zero.parquet
|
||||||
1552
|
1552
|
||||||
|
@ -1,12 +1,11 @@
|
|||||||
SET distributed_directory_monitor_batch_inserts=1;
|
|
||||||
SET distributed_directory_monitor_sleep_time_ms=10;
|
|
||||||
SET distributed_directory_monitor_max_sleep_time_ms=100;
|
|
||||||
|
|
||||||
DROP TABLE IF EXISTS test_01040;
|
DROP TABLE IF EXISTS test_01040;
|
||||||
DROP TABLE IF EXISTS dist_test_01040;
|
DROP TABLE IF EXISTS dist_test_01040;
|
||||||
|
|
||||||
CREATE TABLE test_01040 (key UInt64) ENGINE=TinyLog();
|
CREATE TABLE test_01040 (key UInt64) ENGINE=TinyLog();
|
||||||
CREATE TABLE dist_test_01040 AS test_01040 Engine=Distributed(test_cluster_two_shards, currentDatabase(), test_01040, key);
|
CREATE TABLE dist_test_01040 AS test_01040 Engine=Distributed(test_cluster_two_shards, currentDatabase(), test_01040, key) SETTINGS
|
||||||
|
monitor_batch_inserts=1,
|
||||||
|
monitor_sleep_time_ms=10,
|
||||||
|
monitor_max_sleep_time_ms=100;
|
||||||
|
|
||||||
-- internal_replication=false
|
-- internal_replication=false
|
||||||
SELECT 'test_cluster_two_shards prefer_localhost_replica=0';
|
SELECT 'test_cluster_two_shards prefer_localhost_replica=0';
|
||||||
@ -26,7 +25,10 @@ TRUNCATE TABLE test_01040;
|
|||||||
DROP TABLE dist_test_01040;
|
DROP TABLE dist_test_01040;
|
||||||
|
|
||||||
-- internal_replication=true
|
-- internal_replication=true
|
||||||
CREATE TABLE dist_test_01040 AS test_01040 Engine=Distributed(test_cluster_two_shards_internal_replication, currentDatabase(), test_01040, key);
|
CREATE TABLE dist_test_01040 AS test_01040 Engine=Distributed(test_cluster_two_shards_internal_replication, currentDatabase(), test_01040, key) SETTINGS
|
||||||
|
monitor_batch_inserts=1,
|
||||||
|
monitor_sleep_time_ms=10,
|
||||||
|
monitor_max_sleep_time_ms=100;
|
||||||
SELECT 'test_cluster_two_shards_internal_replication prefer_localhost_replica=0';
|
SELECT 'test_cluster_two_shards_internal_replication prefer_localhost_replica=0';
|
||||||
SET prefer_localhost_replica=0;
|
SET prefer_localhost_replica=0;
|
||||||
INSERT INTO dist_test_01040 SELECT toUInt64(number) FROM numbers(2);
|
INSERT INTO dist_test_01040 SELECT toUInt64(number) FROM numbers(2);
|
||||||
|
@ -1,25 +1,25 @@
|
|||||||
none
|
none
|
||||||
Received exception from server:
|
Received exception from server:
|
||||||
Code: 57. Error: Received from localhost:9000. Error: There was an error on [localhost:9000]: Code: 57, e.displayText() = Error: Table default.throw already exists
|
Code: 57. Error: Received from localhost:9000. Error: There was an error on [localhost:9000]: Code: 57. Error: Table default.throw already exists. (TABLE_ALREADY_EXISTS)
|
||||||
Received exception from server:
|
Received exception from server:
|
||||||
Code: 159. Error: Received from localhost:9000. Error: Watching task <task> is executing longer than distributed_ddl_task_timeout (=8) seconds. There are 1 unfinished hosts (0 of them are currently active), they are going to execute the query in background.
|
Code: 159. Error: Received from localhost:9000. Error: Watching task <task> is executing longer than distributed_ddl_task_timeout (=8) seconds. There are 1 unfinished hosts (0 of them are currently active), they are going to execute the query in background.(TIMEOUT_EXCEEDED)
|
||||||
throw
|
throw
|
||||||
localhost 9000 0 0 0
|
localhost 9000 0 0 0
|
||||||
localhost 9000 57 Code: 57, e.displayText() = Error: Table default.throw already exists. 0 0
|
localhost 9000 57 Code: 57. Error: Table default.throw already exists. (TABLE_ALREADY_EXISTS) 0 0
|
||||||
Received exception from server:
|
Received exception from server:
|
||||||
Code: 57. Error: Received from localhost:9000. Error: There was an error on [localhost:9000]: Code: 57, e.displayText() = Error: Table default.throw already exists
|
Code: 57. Error: Received from localhost:9000. Error: There was an error on [localhost:9000]: Code: 57. Error: Table default.throw already exists. (TABLE_ALREADY_EXISTS)
|
||||||
localhost 9000 0 1 0
|
localhost 9000 0 1 0
|
||||||
Received exception from server:
|
Received exception from server:
|
||||||
Code: 159. Error: Received from localhost:9000. Error: Watching task <task> is executing longer than distributed_ddl_task_timeout (=8) seconds. There are 1 unfinished hosts (0 of them are currently active), they are going to execute the query in background.
|
Code: 159. Error: Received from localhost:9000. Error: Watching task <task> is executing longer than distributed_ddl_task_timeout (=8) seconds. There are 1 unfinished hosts (0 of them are currently active), they are going to execute the query in background.(TIMEOUT_EXCEEDED)
|
||||||
null_status_on_timeout
|
null_status_on_timeout
|
||||||
localhost 9000 0 0 0
|
localhost 9000 0 0 0
|
||||||
localhost 9000 57 Code: 57, e.displayText() = Error: Table default.null_status already exists. 0 0
|
localhost 9000 57 Code: 57. Error: Table default.null_status already exists. (TABLE_ALREADY_EXISTS) 0 0
|
||||||
Received exception from server:
|
Received exception from server:
|
||||||
Code: 57. Error: Received from localhost:9000. Error: There was an error on [localhost:9000]: Code: 57, e.displayText() = Error: Table default.null_status already exists
|
Code: 57. Error: Received from localhost:9000. Error: There was an error on [localhost:9000]: Code: 57. Error: Table default.null_status already exists. (TABLE_ALREADY_EXISTS)
|
||||||
localhost 9000 0 1 0
|
localhost 9000 0 1 0
|
||||||
localhost 1 \N \N 1 0
|
localhost 1 \N \N 1 0
|
||||||
never_throw
|
never_throw
|
||||||
localhost 9000 0 0 0
|
localhost 9000 0 0 0
|
||||||
localhost 9000 57 Code: 57, e.displayText() = Error: Table default.never_throw already exists. 0 0
|
localhost 9000 57 Code: 57. Error: Table default.never_throw already exists. (TABLE_ALREADY_EXISTS) 0 0
|
||||||
localhost 9000 0 1 0
|
localhost 9000 0 1 0
|
||||||
localhost 1 \N \N 1 0
|
localhost 1 \N \N 1 0
|
||||||
|
@ -115,6 +115,7 @@ GROUP BY WITH TOTALS LIMIT
|
|||||||
2 0
|
2 0
|
||||||
|
|
||||||
4 0
|
4 0
|
||||||
|
GROUP BY (compound)
|
||||||
GROUP BY sharding_key, ...
|
GROUP BY sharding_key, ...
|
||||||
0 0
|
0 0
|
||||||
1 0
|
1 0
|
||||||
@ -123,6 +124,15 @@ GROUP BY sharding_key, ...
|
|||||||
GROUP BY ..., sharding_key
|
GROUP BY ..., sharding_key
|
||||||
0 0
|
0 0
|
||||||
1 0
|
1 0
|
||||||
|
0 0
|
||||||
|
1 0
|
||||||
|
sharding_key (compound)
|
||||||
|
1 2 3
|
||||||
|
1 2 3
|
||||||
|
1 2 6
|
||||||
|
1 2
|
||||||
|
1 2
|
||||||
|
2
|
||||||
window functions
|
window functions
|
||||||
0 0
|
0 0
|
||||||
1 0
|
1 0
|
||||||
|
@ -97,6 +97,7 @@ select 'GROUP BY WITH TOTALS LIMIT';
|
|||||||
select count(), * from dist_01247 group by number with totals limit 1;
|
select count(), * from dist_01247 group by number with totals limit 1;
|
||||||
|
|
||||||
-- GROUP BY (compound)
|
-- GROUP BY (compound)
|
||||||
|
select 'GROUP BY (compound)';
|
||||||
drop table if exists dist_01247;
|
drop table if exists dist_01247;
|
||||||
drop table if exists data_01247;
|
drop table if exists data_01247;
|
||||||
create table data_01247 engine=Memory() as select number key, 0 value from numbers(2);
|
create table data_01247 engine=Memory() as select number key, 0 value from numbers(2);
|
||||||
@ -106,6 +107,13 @@ select * from dist_01247 group by key, value;
|
|||||||
select 'GROUP BY ..., sharding_key';
|
select 'GROUP BY ..., sharding_key';
|
||||||
select * from dist_01247 group by value, key;
|
select * from dist_01247 group by value, key;
|
||||||
|
|
||||||
|
-- sharding_key (compound)
|
||||||
|
select 'sharding_key (compound)';
|
||||||
|
select k1, k2, sum(v) from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v), cityHash64(k1, k2)) group by k1, k2; -- optimization applied
|
||||||
|
select k1, any(k2), sum(v) from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v), cityHash64(k1, k2)) group by k1; -- optimization does not applied
|
||||||
|
select distinct k1, k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v), cityHash64(k1, k2)); -- optimization applied
|
||||||
|
select distinct on (k1) k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v), cityHash64(k1, k2)); -- optimization does not applied
|
||||||
|
|
||||||
-- window functions
|
-- window functions
|
||||||
select 'window functions';
|
select 'window functions';
|
||||||
select key, sum(sum(value)) over (rows unbounded preceding) from dist_01247 group by key settings allow_experimental_window_functions=1;
|
select key, sum(sum(value)) over (rows unbounded preceding) from dist_01247 group by key settings allow_experimental_window_functions=1;
|
||||||
|
@ -0,0 +1 @@
|
|||||||
|
0
|
@ -0,0 +1,3 @@
|
|||||||
|
SELECT a FROM (SELECT ignore((SELECT 1)) AS a, a AS b);
|
||||||
|
|
||||||
|
SELECT x FROM (SELECT dummy AS x, plus(ignore(ignore(ignore(ignore('-922337203.6854775808', ignore(NULL)), ArrLen = 256, ignore(100, Arr.C3, ignore(NULL), (SELECT 10.000100135803223, count(*) FROM system.time_zones) > NULL)))), dummy, 65535) AS dummy ORDER BY ignore(-2) ASC, identity(x) DESC NULLS FIRST) FORMAT Null; -- { serverError 47 }
|
@ -1,3 +1,10 @@
|
|||||||
|
-- Leaf limits is unreliable w/ prefer_localhost_replica=1.
|
||||||
|
-- Since in this case initial query and the query on the local node (to the
|
||||||
|
-- underlying table) has the same counters, so if query on the remote node
|
||||||
|
-- will be finished before local, then local node will already have some rows
|
||||||
|
-- read, and leaf limit will fail.
|
||||||
|
SET prefer_localhost_replica=0;
|
||||||
|
|
||||||
SELECT count() FROM (SELECT * FROM remote('127.0.0.1', system.numbers) LIMIT 100) SETTINGS max_rows_to_read_leaf=1; -- { serverError 158 }
|
SELECT count() FROM (SELECT * FROM remote('127.0.0.1', system.numbers) LIMIT 100) SETTINGS max_rows_to_read_leaf=1; -- { serverError 158 }
|
||||||
SELECT count() FROM (SELECT * FROM remote('127.0.0.1', system.numbers) LIMIT 100) SETTINGS max_bytes_to_read_leaf=1; -- { serverError 307 }
|
SELECT count() FROM (SELECT * FROM remote('127.0.0.1', system.numbers) LIMIT 100) SETTINGS max_bytes_to_read_leaf=1; -- { serverError 307 }
|
||||||
SELECT count() FROM (SELECT * FROM remote('127.0.0.1', system.numbers) LIMIT 100) SETTINGS max_rows_to_read_leaf=100;
|
SELECT count() FROM (SELECT * FROM remote('127.0.0.1', system.numbers) LIMIT 100) SETTINGS max_rows_to_read_leaf=100;
|
||||||
|
@ -2,33 +2,31 @@
|
|||||||
-- (i.e. no .bin files and hence no sending is required)
|
-- (i.e. no .bin files and hence no sending is required)
|
||||||
set prefer_localhost_replica=0;
|
set prefer_localhost_replica=0;
|
||||||
|
|
||||||
set distributed_directory_monitor_sleep_time_ms=50;
|
|
||||||
|
|
||||||
drop table if exists data_01460;
|
drop table if exists data_01460;
|
||||||
drop table if exists dist_01460;
|
drop table if exists dist_01460;
|
||||||
|
|
||||||
create table data_01460 as system.one engine=Null();
|
create table data_01460 as system.one engine=Null();
|
||||||
create table dist_01460 as data_01460 engine=Distributed(test_shard_localhost, currentDatabase(), data_01460);
|
create table dist_01460 as data_01460 engine=Distributed(test_shard_localhost, currentDatabase(), data_01460) settings monitor_sleep_time_ms=50;
|
||||||
|
|
||||||
select 'INSERT';
|
select 'INSERT';
|
||||||
select value from system.metrics where metric = 'DistributedFilesToInsert';
|
select value from system.metrics where metric = 'DistributedFilesToInsert';
|
||||||
insert into dist_01460 select * from system.one;
|
insert into dist_01460 select * from system.one;
|
||||||
select sleep(1) format Null; -- distributed_directory_monitor_sleep_time_ms
|
select sleep(1) format Null; -- monitor_sleep_time_ms
|
||||||
select value from system.metrics where metric = 'DistributedFilesToInsert';
|
select value from system.metrics where metric = 'DistributedFilesToInsert';
|
||||||
|
|
||||||
select 'STOP/START DISTRIBUTED SENDS';
|
select 'STOP/START DISTRIBUTED SENDS';
|
||||||
system stop distributed sends dist_01460;
|
system stop distributed sends dist_01460;
|
||||||
insert into dist_01460 select * from system.one;
|
insert into dist_01460 select * from system.one;
|
||||||
select sleep(1) format Null; -- distributed_directory_monitor_sleep_time_ms
|
select sleep(1) format Null; -- monitor_sleep_time_ms
|
||||||
select value from system.metrics where metric = 'DistributedFilesToInsert';
|
select value from system.metrics where metric = 'DistributedFilesToInsert';
|
||||||
system start distributed sends dist_01460;
|
system start distributed sends dist_01460;
|
||||||
select sleep(1) format Null; -- distributed_directory_monitor_sleep_time_ms
|
select sleep(1) format Null; -- monitor_sleep_time_ms
|
||||||
select value from system.metrics where metric = 'DistributedFilesToInsert';
|
select value from system.metrics where metric = 'DistributedFilesToInsert';
|
||||||
|
|
||||||
select 'FLUSH DISTRIBUTED';
|
select 'FLUSH DISTRIBUTED';
|
||||||
system stop distributed sends dist_01460;
|
system stop distributed sends dist_01460;
|
||||||
insert into dist_01460 select * from system.one;
|
insert into dist_01460 select * from system.one;
|
||||||
select sleep(1) format Null; -- distributed_directory_monitor_sleep_time_ms
|
select sleep(1) format Null; -- monitor_sleep_time_ms
|
||||||
select value from system.metrics where metric = 'DistributedFilesToInsert';
|
select value from system.metrics where metric = 'DistributedFilesToInsert';
|
||||||
system flush distributed dist_01460;
|
system flush distributed dist_01460;
|
||||||
select value from system.metrics where metric = 'DistributedFilesToInsert';
|
select value from system.metrics where metric = 'DistributedFilesToInsert';
|
||||||
@ -36,7 +34,7 @@ select value from system.metrics where metric = 'DistributedFilesToInsert';
|
|||||||
select 'DROP TABLE';
|
select 'DROP TABLE';
|
||||||
system stop distributed sends dist_01460;
|
system stop distributed sends dist_01460;
|
||||||
insert into dist_01460 select * from system.one;
|
insert into dist_01460 select * from system.one;
|
||||||
select sleep(1) format Null; -- distributed_directory_monitor_sleep_time_ms
|
select sleep(1) format Null; -- monitor_sleep_time_ms
|
||||||
select value from system.metrics where metric = 'DistributedFilesToInsert';
|
select value from system.metrics where metric = 'DistributedFilesToInsert';
|
||||||
drop table dist_01460;
|
drop table dist_01460;
|
||||||
select value from system.metrics where metric = 'DistributedFilesToInsert';
|
select value from system.metrics where metric = 'DistributedFilesToInsert';
|
||||||
|
@ -4,7 +4,7 @@ log_user 1
|
|||||||
set timeout 5
|
set timeout 5
|
||||||
match_max 100000
|
match_max 100000
|
||||||
|
|
||||||
spawn bash -c "$env(CLICKHOUSE_CLIENT_BINARY) $env(CLICKHOUSE_CLIENT_OPT)"
|
spawn bash -c "$env(CLICKHOUSE_CLIENT_BINARY) --no-warnings $env(CLICKHOUSE_CLIENT_OPT)"
|
||||||
expect ":) "
|
expect ":) "
|
||||||
send -- "\4"
|
send -- "\4"
|
||||||
expect eof
|
expect eof
|
||||||
|
@ -1,8 +1 @@
|
|||||||
Waiting for query to be started...
|
"total_rows_to_read":"10"
|
||||||
Query started.
|
|
||||||
Checking total_rows_approx.
|
|
||||||
10
|
|
||||||
10
|
|
||||||
10
|
|
||||||
10
|
|
||||||
10
|
|
||||||
|
@ -1,23 +1,12 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
# Check that total_rows_approx (via system.processes) includes all rows from
|
# Check that total_rows_approx (via http headers) includes all rows from
|
||||||
# all parts at the query start.
|
# all parts at the query start.
|
||||||
#
|
#
|
||||||
# At some point total_rows_approx was accounted only when the query starts
|
# At some point total_rows_approx was accounted only when the query starts
|
||||||
# reading the part, and so total_rows_approx wasn't reliable, even for simple
|
# reading the part, and so total_rows_approx wasn't reliable, even for simple
|
||||||
# SELECT FROM MergeTree()
|
# SELECT FROM MergeTree()
|
||||||
# It was fixed by take total_rows_approx into account as soon as possible.
|
# It was fixed by take total_rows_approx into account as soon as possible.
|
||||||
#
|
|
||||||
# To check total_rows_approx this query starts the query in background,
|
|
||||||
# that sleep's 1 second for each part, and by using max_threads=1 the query
|
|
||||||
# reads parts sequentially and sleeps 1 second between parts.
|
|
||||||
# Also the test spawns background process to check total_rows_approx for this
|
|
||||||
# query.
|
|
||||||
# It checks multiple times since at first few iterations the query may not
|
|
||||||
# start yet (since there are 3 excessive sleep calls - 1 for primary key
|
|
||||||
# analysis and 2 for partition pruning), and get only last 5 total_rows_approx
|
|
||||||
# rows (one row is not enough since when the query finishes total_rows_approx
|
|
||||||
# will be set to 10 anyway, regardless proper accounting).
|
|
||||||
|
|
||||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||||
# shellcheck source=../shell_config.sh
|
# shellcheck source=../shell_config.sh
|
||||||
@ -25,31 +14,14 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
|||||||
|
|
||||||
$CLICKHOUSE_CLIENT -q "drop table if exists data_01882"
|
$CLICKHOUSE_CLIENT -q "drop table if exists data_01882"
|
||||||
$CLICKHOUSE_CLIENT -q "create table data_01882 (key Int) Engine=MergeTree() partition by key order by key as select * from numbers(10)"
|
$CLICKHOUSE_CLIENT -q "create table data_01882 (key Int) Engine=MergeTree() partition by key order by key as select * from numbers(10)"
|
||||||
QUERY_ID="$CLICKHOUSE_TEST_NAME-$(tr -cd '[:lower:]' < /dev/urandom | head -c10)"
|
# send_progress_in_http_headers will periodically send the progress
|
||||||
|
# but this is not stable, i.e. it can be dumped on query end,
|
||||||
function check_background_query()
|
# thus check few times to be sure that this is not coincidence.
|
||||||
{
|
for _ in {1..30}; do
|
||||||
echo "Waiting for query to be started..."
|
$CLICKHOUSE_CURL -vsS "${CLICKHOUSE_URL}&max_threads=1&default_format=Null&send_progress_in_http_headers=1&http_headers_progress_interval_ms=1" --data-binary @- <<< "select * from data_01882" |& {
|
||||||
while [[ $($CLICKHOUSE_CLIENT --param_query_id="$QUERY_ID" -q 'select count() from system.processes where query_id = {query_id:String}') != 1 ]]; do
|
grep -o -F '"total_rows_to_read":"10"'
|
||||||
sleep 0.01
|
} | {
|
||||||
done
|
# grep out final result
|
||||||
echo "Query started."
|
grep -v -F '"read_rows":"10"'
|
||||||
|
|
||||||
echo "Checking total_rows_approx."
|
|
||||||
# check total_rows_approx multiple times
|
|
||||||
# (to make test more reliable to what it covers)
|
|
||||||
local i=0
|
|
||||||
for ((i = 0; i < 20; ++i)); do
|
|
||||||
$CLICKHOUSE_CLIENT --param_query_id="$QUERY_ID" -q 'select total_rows_approx from system.processes where query_id = {query_id:String}'
|
|
||||||
(( ++i ))
|
|
||||||
sleep 1
|
|
||||||
done | tail -n5
|
|
||||||
}
|
}
|
||||||
check_background_query &
|
done | uniq
|
||||||
|
|
||||||
# this query will sleep 10 seconds in total, 1 seconds for each part (10 parts).
|
|
||||||
$CLICKHOUSE_CLIENT -q "select *, sleepEachRow(1) from data_01882" --max_threads=1 --format Null --query_id="$QUERY_ID" --max_block_size=1
|
|
||||||
|
|
||||||
wait
|
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT -q "drop table data_01882"
|
|
||||||
|
@ -13,4 +13,4 @@ yml
|
|||||||
yaml
|
yaml
|
||||||
2
|
2
|
||||||
ini
|
ini
|
||||||
Code: 347. Unknown format of '/config_default.ini' config
|
Code: 347. Unknown format of '/config_default.ini' config. (CANNOT_LOAD_CONFIG)
|
||||||
|
50
tests/queries/0_stateless/01945_show_debug_warning.expect
Executable file
50
tests/queries/0_stateless/01945_show_debug_warning.expect
Executable file
@ -0,0 +1,50 @@
|
|||||||
|
#!/usr/bin/expect -f
|
||||||
|
|
||||||
|
# This is a test for system.warnings. Testing in interactive mode is necessary,
|
||||||
|
# as we want to see certain warnings from client
|
||||||
|
|
||||||
|
log_user 0
|
||||||
|
set timeout 60
|
||||||
|
match_max 100000
|
||||||
|
|
||||||
|
# A default timeout action is to do nothing, change it to fail
|
||||||
|
expect_after {
|
||||||
|
timeout {
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
set basedir [file dirname $argv0]
|
||||||
|
set Debug_type 0
|
||||||
|
|
||||||
|
spawn bash -c "source $basedir/../shell_config.sh ; \$CLICKHOUSE_CLIENT_BINARY \$CLICKHOUSE_CLIENT_OPT --disable_suggestion"
|
||||||
|
expect ":) "
|
||||||
|
|
||||||
|
# Check debug type
|
||||||
|
send -- "SELECT value FROM system.build_options WHERE name='BUILD_TYPE'\r"
|
||||||
|
expect {
|
||||||
|
"Debug" {
|
||||||
|
set Debug_type 1
|
||||||
|
expect ":) "
|
||||||
|
}
|
||||||
|
"RelWithDebInfo"
|
||||||
|
}
|
||||||
|
|
||||||
|
send -- "q\r"
|
||||||
|
expect eof
|
||||||
|
|
||||||
|
if { $Debug_type > 0} {
|
||||||
|
|
||||||
|
spawn bash -c "source $basedir/../shell_config.sh ; \$CLICKHOUSE_CLIENT_BINARY \$CLICKHOUSE_CLIENT_OPT --disable_suggestion"
|
||||||
|
expect "Warnings:"
|
||||||
|
expect " * Server was built in debug mode. It will work slowly."
|
||||||
|
expect ":) "
|
||||||
|
|
||||||
|
# Check debug message in system.warnings
|
||||||
|
send -- "SELECT message FROM system.warnings WHERE message='Server was built in debug mode. It will work slowly.'\r"
|
||||||
|
expect "Server was built in debug mode. It will work slowly."
|
||||||
|
expect ":) "
|
||||||
|
|
||||||
|
send -- "q\r"
|
||||||
|
expect eof
|
||||||
|
}
|
@ -36,5 +36,5 @@ expect {
|
|||||||
}
|
}
|
||||||
|
|
||||||
# Finish test
|
# Finish test
|
||||||
send -- "\4"
|
send -- "q\r"
|
||||||
expect eof
|
expect eof
|
||||||
|
6
tests/queries/0_stateless/01946_profile_sleep.reference
Normal file
6
tests/queries/0_stateless/01946_profile_sleep.reference
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
{"'SLEEP #1 CHECK'":"SLEEP #1 CHECK","calls":"1","microseconds":"1000"}
|
||||||
|
{"'SLEEP #2 CHECK'":"SLEEP #2 CHECK","calls":"1","microseconds":"1000"}
|
||||||
|
{"'SLEEP #3 CHECK'":"SLEEP #3 CHECK","calls":"1","microseconds":"1000"}
|
||||||
|
{"'SLEEP #4 CHECK'":"SLEEP #4 CHECK","calls":"2","microseconds":"2000"}
|
||||||
|
{"'SLEEP #5 CHECK'":"SLEEP #5 CHECK","calls":"0","microseconds":"0"}
|
||||||
|
{"'SLEEP #6 CHECK'":"SLEEP #6 CHECK","calls":"10","microseconds":"10000"}
|
65
tests/queries/0_stateless/01946_profile_sleep.sql
Normal file
65
tests/queries/0_stateless/01946_profile_sleep.sql
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
SET log_queries=1;
|
||||||
|
SET log_profile_events=true;
|
||||||
|
|
||||||
|
SELECT 'SLEEP #1 TEST', sleep(0.001) FORMAT Null;
|
||||||
|
SYSTEM FLUSH LOGS;
|
||||||
|
SELECT 'SLEEP #1 CHECK', ProfileEvents['SleepFunctionCalls'] as calls, ProfileEvents['SleepFunctionMicroseconds'] as microseconds
|
||||||
|
FROM system.query_log
|
||||||
|
WHERE query like '%SELECT ''SLEEP #1 TEST''%'
|
||||||
|
AND type > 1
|
||||||
|
AND current_database = currentDatabase()
|
||||||
|
AND event_date >= yesterday()
|
||||||
|
FORMAT JSONEachRow;
|
||||||
|
|
||||||
|
SELECT 'SLEEP #2 TEST', sleep(0.001) FROM numbers(2) FORMAT Null;
|
||||||
|
SYSTEM FLUSH LOGS;
|
||||||
|
SELECT 'SLEEP #2 CHECK', ProfileEvents['SleepFunctionCalls'] as calls, ProfileEvents['SleepFunctionMicroseconds'] as microseconds
|
||||||
|
FROM system.query_log
|
||||||
|
WHERE query like '%SELECT ''SLEEP #2 TEST''%'
|
||||||
|
AND type > 1
|
||||||
|
AND current_database = currentDatabase()
|
||||||
|
AND event_date >= yesterday()
|
||||||
|
FORMAT JSONEachRow;
|
||||||
|
|
||||||
|
SELECT 'SLEEP #3 TEST', sleepEachRow(0.001) FORMAT Null;
|
||||||
|
SYSTEM FLUSH LOGS;
|
||||||
|
SELECT 'SLEEP #3 CHECK', ProfileEvents['SleepFunctionCalls'] as calls, ProfileEvents['SleepFunctionMicroseconds'] as microseconds
|
||||||
|
FROM system.query_log
|
||||||
|
WHERE query like '%SELECT ''SLEEP #3 TEST''%'
|
||||||
|
AND type > 1
|
||||||
|
AND current_database = currentDatabase()
|
||||||
|
AND event_date >= yesterday()
|
||||||
|
FORMAT JSONEachRow;
|
||||||
|
|
||||||
|
SELECT 'SLEEP #4 TEST', sleepEachRow(0.001) FROM numbers(2) FORMAT Null;
|
||||||
|
SYSTEM FLUSH LOGS;
|
||||||
|
SELECT 'SLEEP #4 CHECK', ProfileEvents['SleepFunctionCalls'] as calls, ProfileEvents['SleepFunctionMicroseconds'] as microseconds
|
||||||
|
FROM system.query_log
|
||||||
|
WHERE query like '%SELECT ''SLEEP #4 TEST''%'
|
||||||
|
AND type > 1
|
||||||
|
AND current_database = currentDatabase()
|
||||||
|
AND event_date >= yesterday()
|
||||||
|
FORMAT JSONEachRow;
|
||||||
|
|
||||||
|
|
||||||
|
CREATE VIEW sleep_view AS SELECT sleepEachRow(0.001) FROM system.numbers;
|
||||||
|
SYSTEM FLUSH LOGS;
|
||||||
|
SELECT 'SLEEP #5 CHECK', ProfileEvents['SleepFunctionCalls'] as calls, ProfileEvents['SleepFunctionMicroseconds'] as microseconds
|
||||||
|
FROM system.query_log
|
||||||
|
WHERE query like '%CREATE VIEW sleep_view AS%'
|
||||||
|
AND type > 1
|
||||||
|
AND current_database = currentDatabase()
|
||||||
|
AND event_date >= yesterday()
|
||||||
|
FORMAT JSONEachRow;
|
||||||
|
|
||||||
|
SELECT 'SLEEP #6 TEST', sleepEachRow(0.001) FROM sleep_view LIMIT 10 FORMAT Null;
|
||||||
|
SYSTEM FLUSH LOGS;
|
||||||
|
SELECT 'SLEEP #6 CHECK', ProfileEvents['SleepFunctionCalls'] as calls, ProfileEvents['SleepFunctionMicroseconds'] as microseconds
|
||||||
|
FROM system.query_log
|
||||||
|
WHERE query like '%SELECT ''SLEEP #6 TEST''%'
|
||||||
|
AND type > 1
|
||||||
|
AND current_database = currentDatabase()
|
||||||
|
AND event_date >= yesterday()
|
||||||
|
FORMAT JSONEachRow;
|
||||||
|
|
||||||
|
DROP TABLE sleep_view;
|
@ -7,14 +7,9 @@ append_path(sys.path, ".")
|
|||||||
from helpers.common import Pool, join, run_scenario
|
from helpers.common import Pool, join, run_scenario
|
||||||
from helpers.argparser import argparser
|
from helpers.argparser import argparser
|
||||||
|
|
||||||
xfails = {
|
|
||||||
"kerberos/config/principal and realm specified/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/26197")],
|
|
||||||
}
|
|
||||||
|
|
||||||
@TestModule
|
@TestModule
|
||||||
@Name("clickhouse")
|
@Name("clickhouse")
|
||||||
@ArgumentParser(argparser)
|
@ArgumentParser(argparser)
|
||||||
@XFails(xfails)
|
|
||||||
def regression(self, local, clickhouse_binary_path, stress=None, parallel=None):
|
def regression(self, local, clickhouse_binary_path, stress=None, parallel=None):
|
||||||
"""ClickHouse regression.
|
"""ClickHouse regression.
|
||||||
"""
|
"""
|
||||||
@ -29,13 +24,13 @@ def regression(self, local, clickhouse_binary_path, stress=None, parallel=None):
|
|||||||
try:
|
try:
|
||||||
run_scenario(pool, tasks, Feature(test=load("example.regression", "regression")), args)
|
run_scenario(pool, tasks, Feature(test=load("example.regression", "regression")), args)
|
||||||
# run_scenario(pool, tasks, Feature(test=load("ldap.regression", "regression")), args)
|
# run_scenario(pool, tasks, Feature(test=load("ldap.regression", "regression")), args)
|
||||||
# run_scenario(pool, tasks, Feature(test=load("rbac.regression", "regression")), args)
|
run_scenario(pool, tasks, Feature(test=load("rbac.regression", "regression")), args)
|
||||||
# run_scenario(pool, tasks, Feature(test=load("aes_encryption.regression", "regression")), args)
|
run_scenario(pool, tasks, Feature(test=load("aes_encryption.regression", "regression")), args)
|
||||||
# run_scenario(pool, tasks, Feature(test=load("map_type.regression", "regression")), args)
|
run_scenario(pool, tasks, Feature(test=load("map_type.regression", "regression")), args)
|
||||||
# run_scenario(pool, tasks, Feature(test=load("window_functions.regression", "regression")), args)
|
run_scenario(pool, tasks, Feature(test=load("window_functions.regression", "regression")), args)
|
||||||
# run_scenario(pool, tasks, Feature(test=load("datetime64_extended_range.regression", "regression")), args)
|
run_scenario(pool, tasks, Feature(test=load("datetime64_extended_range.regression", "regression")), args)
|
||||||
run_scenario(pool, tasks, Feature(test=load("kerberos.regression", "regression")), args)
|
run_scenario(pool, tasks, Feature(test=load("kerberos.regression", "regression")), args)
|
||||||
# run_scenario(pool, tasks, Feature(test=load("extended_precision_data_types.regression", "regression")), args)
|
run_scenario(pool, tasks, Feature(test=load("extended_precision_data_types.regression", "regression")), args)
|
||||||
finally:
|
finally:
|
||||||
join(tasks)
|
join(tasks)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user