Merge branch 'master' into grant_by_replace

This commit is contained in:
Caspian 2021-07-20 17:41:12 +08:00 committed by GitHub
commit 7c6dc56bcd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
368 changed files with 8598 additions and 5239 deletions

View File

@ -2,25 +2,23 @@ I hereby agree to the terms of the CLA available at: https://yandex.ru/legal/cla
Changelog category (leave one):
- New Feature
- Bug Fix
- Improvement
- Bug Fix
- Performance Improvement
- Backward Incompatible Change
- Build/Testing/Packaging Improvement
- Documentation (changelog entry is not required)
- Other
- Not for changelog (changelog entry is not required)
Changelog entry (a user-readable short description of the changes that goes to CHANGELOG.md):
...
Detailed description / Documentation draft:
...
By adding documentation, you'll allow users to try your new feature immediately, not when someone else will have time to document it later. Documentation is necessary for all features that affect user experience in any way. You can add brief documentation draft above, or add documentation right into your patch as Markdown files in [docs](https://github.com/ClickHouse/ClickHouse/tree/master/docs) folder.
If you are doing this for the first time, it's recommended to read the lightweight [Contributing to ClickHouse Documentation](https://github.com/ClickHouse/ClickHouse/tree/master/docs/README.md) guide first.

View File

@ -18,6 +18,8 @@
#define DATE_LUT_MAX (0xFFFFFFFFU - 86400)
#define DATE_LUT_MAX_DAY_NUM 0xFFFF
/// Max int value of Date32, DATE LUT cache size minus daynum_offset_epoch
#define DATE_LUT_MAX_EXTEND_DAY_NUM (DATE_LUT_SIZE - 16436)
/// A constant to add to time_t so every supported time point becomes non-negative and still has the same remainder of division by 3600.
/// If we treat "remainder of division" operation in the sense of modular arithmetic (not like in C++).
@ -270,6 +272,8 @@ public:
auto getOffsetAtStartOfEpoch() const { return offset_at_start_of_epoch; }
auto getTimeOffsetAtStartOfLUT() const { return offset_at_start_of_lut; }
auto getDayNumOffsetEpoch() const { return daynum_offset_epoch; }
/// All functions below are thread-safe; arguments are not checked.
inline ExtendedDayNum toDayNum(ExtendedDayNum d) const
@ -926,15 +930,17 @@ public:
{
if (unlikely(year < DATE_LUT_MIN_YEAR || year > DATE_LUT_MAX_YEAR || month < 1 || month > 12 || day_of_month < 1 || day_of_month > 31))
return LUTIndex(0);
return LUTIndex{years_months_lut[(year - DATE_LUT_MIN_YEAR) * 12 + month - 1] + day_of_month - 1};
auto year_lut_index = (year - DATE_LUT_MIN_YEAR) * 12 + month - 1;
UInt32 index = years_months_lut[year_lut_index].toUnderType() + day_of_month - 1;
/// When date is out of range, default value is DATE_LUT_SIZE - 1 (2283-11-11)
return LUTIndex{std::min(index, static_cast<UInt32>(DATE_LUT_SIZE - 1))};
}
/// Create DayNum from year, month, day of month.
inline ExtendedDayNum makeDayNum(Int16 year, UInt8 month, UInt8 day_of_month) const
inline ExtendedDayNum makeDayNum(Int16 year, UInt8 month, UInt8 day_of_month, Int32 default_error_day_num = 0) const
{
if (unlikely(year < DATE_LUT_MIN_YEAR || year > DATE_LUT_MAX_YEAR || month < 1 || month > 12 || day_of_month < 1 || day_of_month > 31))
return ExtendedDayNum(0);
return ExtendedDayNum(default_error_day_num);
return toDayNum(makeLUTIndex(year, month, day_of_month));
}
@ -1091,9 +1097,9 @@ public:
return lut[new_index].date + time;
}
inline NO_SANITIZE_UNDEFINED Time addWeeks(Time t, Int64 delta) const
inline NO_SANITIZE_UNDEFINED Time addWeeks(Time t, Int32 delta) const
{
return addDays(t, delta * 7);
return addDays(t, static_cast<Int64>(delta) * 7);
}
inline UInt8 saturateDayOfMonth(Int16 year, UInt8 month, UInt8 day_of_month) const
@ -1158,14 +1164,14 @@ public:
return toDayNum(addMonthsIndex(d, delta));
}
inline Time NO_SANITIZE_UNDEFINED addQuarters(Time t, Int64 delta) const
inline Time NO_SANITIZE_UNDEFINED addQuarters(Time t, Int32 delta) const
{
return addMonths(t, delta * 3);
return addMonths(t, static_cast<Int64>(delta) * 3);
}
inline ExtendedDayNum addQuarters(ExtendedDayNum d, Int64 delta) const
inline ExtendedDayNum addQuarters(ExtendedDayNum d, Int32 delta) const
{
return addMonths(d, delta * 3);
return addMonths(d, static_cast<Int64>(delta) * 3);
}
template <typename DateOrTime>

View File

@ -70,6 +70,14 @@ public:
m_day = values.day_of_month;
}
explicit LocalDate(ExtendedDayNum day_num)
{
const auto & values = DateLUT::instance().getValues(day_num);
m_year = values.year;
m_month = values.month;
m_day = values.day_of_month;
}
LocalDate(unsigned short year_, unsigned char month_, unsigned char day_)
: m_year(year_), m_month(month_), m_day(day_)
{
@ -98,6 +106,12 @@ public:
return DayNum(lut.makeDayNum(m_year, m_month, m_day).toUnderType());
}
ExtendedDayNum getExtenedDayNum() const
{
const auto & lut = DateLUT::instance();
return ExtendedDayNum (lut.makeDayNum(m_year, m_month, m_day).toUnderType());
}
operator DayNum() const
{
return getDayNum();

2
contrib/rocksdb vendored

@ -1 +1 @@
Subproject commit 07c77549a20b63ff6981b400085eba36bb5c80c4
Subproject commit dac0e9a68080c837d6b6223921f3fc151abbfcdc

View File

@ -70,11 +70,6 @@ else()
endif()
endif()
set(BUILD_VERSION_CC rocksdb_build_version.cc)
add_library(rocksdb_build_version OBJECT ${BUILD_VERSION_CC})
target_include_directories(rocksdb_build_version PRIVATE "${ROCKSDB_SOURCE_DIR}/util")
include(CheckCCompilerFlag)
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
CHECK_C_COMPILER_FLAG("-mcpu=power9" HAS_POWER9)
@ -243,272 +238,293 @@ find_package(Threads REQUIRED)
# Main library source code
set(SOURCES
"${ROCKSDB_SOURCE_DIR}/cache/cache.cc"
"${ROCKSDB_SOURCE_DIR}/cache/clock_cache.cc"
"${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc"
"${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc"
"${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc"
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_addition.cc"
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_builder.cc"
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_cache.cc"
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_garbage.cc"
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_meta.cc"
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_reader.cc"
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_format.cc"
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_sequential_reader.cc"
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_writer.cc"
"${ROCKSDB_SOURCE_DIR}/db/builder.cc"
"${ROCKSDB_SOURCE_DIR}/db/c.cc"
"${ROCKSDB_SOURCE_DIR}/db/column_family.cc"
"${ROCKSDB_SOURCE_DIR}/db/compacted_db_impl.cc"
"${ROCKSDB_SOURCE_DIR}/db/compaction/compaction.cc"
"${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_iterator.cc"
"${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker.cc"
"${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_job.cc"
"${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_fifo.cc"
"${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_level.cc"
"${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_universal.cc"
"${ROCKSDB_SOURCE_DIR}/db/compaction/sst_partitioner.cc"
"${ROCKSDB_SOURCE_DIR}/db/convenience.cc"
"${ROCKSDB_SOURCE_DIR}/db/db_filesnapshot.cc"
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl.cc"
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_write.cc"
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_compaction_flush.cc"
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_files.cc"
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_open.cc"
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_debug.cc"
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_experimental.cc"
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_readonly.cc"
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_secondary.cc"
"${ROCKSDB_SOURCE_DIR}/db/db_info_dumper.cc"
"${ROCKSDB_SOURCE_DIR}/db/db_iter.cc"
"${ROCKSDB_SOURCE_DIR}/db/dbformat.cc"
"${ROCKSDB_SOURCE_DIR}/db/error_handler.cc"
"${ROCKSDB_SOURCE_DIR}/db/event_helpers.cc"
"${ROCKSDB_SOURCE_DIR}/db/experimental.cc"
"${ROCKSDB_SOURCE_DIR}/db/external_sst_file_ingestion_job.cc"
"${ROCKSDB_SOURCE_DIR}/db/file_indexer.cc"
"${ROCKSDB_SOURCE_DIR}/db/flush_job.cc"
"${ROCKSDB_SOURCE_DIR}/db/flush_scheduler.cc"
"${ROCKSDB_SOURCE_DIR}/db/forward_iterator.cc"
"${ROCKSDB_SOURCE_DIR}/db/import_column_family_job.cc"
"${ROCKSDB_SOURCE_DIR}/db/internal_stats.cc"
"${ROCKSDB_SOURCE_DIR}/db/logs_with_prep_tracker.cc"
"${ROCKSDB_SOURCE_DIR}/db/log_reader.cc"
"${ROCKSDB_SOURCE_DIR}/db/log_writer.cc"
"${ROCKSDB_SOURCE_DIR}/db/malloc_stats.cc"
"${ROCKSDB_SOURCE_DIR}/db/memtable.cc"
"${ROCKSDB_SOURCE_DIR}/db/memtable_list.cc"
"${ROCKSDB_SOURCE_DIR}/db/merge_helper.cc"
"${ROCKSDB_SOURCE_DIR}/db/merge_operator.cc"
"${ROCKSDB_SOURCE_DIR}/db/output_validator.cc"
"${ROCKSDB_SOURCE_DIR}/db/periodic_work_scheduler.cc"
"${ROCKSDB_SOURCE_DIR}/db/range_del_aggregator.cc"
"${ROCKSDB_SOURCE_DIR}/db/range_tombstone_fragmenter.cc"
"${ROCKSDB_SOURCE_DIR}/db/repair.cc"
"${ROCKSDB_SOURCE_DIR}/db/snapshot_impl.cc"
"${ROCKSDB_SOURCE_DIR}/db/table_cache.cc"
"${ROCKSDB_SOURCE_DIR}/db/table_properties_collector.cc"
"${ROCKSDB_SOURCE_DIR}/db/transaction_log_impl.cc"
"${ROCKSDB_SOURCE_DIR}/db/trim_history_scheduler.cc"
"${ROCKSDB_SOURCE_DIR}/db/version_builder.cc"
"${ROCKSDB_SOURCE_DIR}/db/version_edit.cc"
"${ROCKSDB_SOURCE_DIR}/db/version_edit_handler.cc"
"${ROCKSDB_SOURCE_DIR}/db/version_set.cc"
"${ROCKSDB_SOURCE_DIR}/db/wal_edit.cc"
"${ROCKSDB_SOURCE_DIR}/db/wal_manager.cc"
"${ROCKSDB_SOURCE_DIR}/db/write_batch.cc"
"${ROCKSDB_SOURCE_DIR}/db/write_batch_base.cc"
"${ROCKSDB_SOURCE_DIR}/db/write_controller.cc"
"${ROCKSDB_SOURCE_DIR}/db/write_thread.cc"
"${ROCKSDB_SOURCE_DIR}/env/env.cc"
"${ROCKSDB_SOURCE_DIR}/env/env_chroot.cc"
"${ROCKSDB_SOURCE_DIR}/env/env_encryption.cc"
"${ROCKSDB_SOURCE_DIR}/env/env_hdfs.cc"
"${ROCKSDB_SOURCE_DIR}/env/file_system.cc"
"${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc"
"${ROCKSDB_SOURCE_DIR}/env/mock_env.cc"
"${ROCKSDB_SOURCE_DIR}/file/delete_scheduler.cc"
"${ROCKSDB_SOURCE_DIR}/file/file_prefetch_buffer.cc"
"${ROCKSDB_SOURCE_DIR}/file/file_util.cc"
"${ROCKSDB_SOURCE_DIR}/file/filename.cc"
"${ROCKSDB_SOURCE_DIR}/file/random_access_file_reader.cc"
"${ROCKSDB_SOURCE_DIR}/file/read_write_util.cc"
"${ROCKSDB_SOURCE_DIR}/file/readahead_raf.cc"
"${ROCKSDB_SOURCE_DIR}/file/sequence_file_reader.cc"
"${ROCKSDB_SOURCE_DIR}/file/sst_file_manager_impl.cc"
"${ROCKSDB_SOURCE_DIR}/file/writable_file_writer.cc"
"${ROCKSDB_SOURCE_DIR}/logging/auto_roll_logger.cc"
"${ROCKSDB_SOURCE_DIR}/logging/event_logger.cc"
"${ROCKSDB_SOURCE_DIR}/logging/log_buffer.cc"
"${ROCKSDB_SOURCE_DIR}/memory/arena.cc"
"${ROCKSDB_SOURCE_DIR}/memory/concurrent_arena.cc"
"${ROCKSDB_SOURCE_DIR}/memory/jemalloc_nodump_allocator.cc"
"${ROCKSDB_SOURCE_DIR}/memory/memkind_kmem_allocator.cc"
"${ROCKSDB_SOURCE_DIR}/memtable/alloc_tracker.cc"
"${ROCKSDB_SOURCE_DIR}/memtable/hash_linklist_rep.cc"
"${ROCKSDB_SOURCE_DIR}/memtable/hash_skiplist_rep.cc"
"${ROCKSDB_SOURCE_DIR}/memtable/skiplistrep.cc"
"${ROCKSDB_SOURCE_DIR}/memtable/vectorrep.cc"
"${ROCKSDB_SOURCE_DIR}/memtable/write_buffer_manager.cc"
"${ROCKSDB_SOURCE_DIR}/monitoring/histogram.cc"
"${ROCKSDB_SOURCE_DIR}/monitoring/histogram_windowing.cc"
"${ROCKSDB_SOURCE_DIR}/monitoring/in_memory_stats_history.cc"
"${ROCKSDB_SOURCE_DIR}/monitoring/instrumented_mutex.cc"
"${ROCKSDB_SOURCE_DIR}/monitoring/iostats_context.cc"
"${ROCKSDB_SOURCE_DIR}/monitoring/perf_context.cc"
"${ROCKSDB_SOURCE_DIR}/monitoring/perf_level.cc"
"${ROCKSDB_SOURCE_DIR}/monitoring/persistent_stats_history.cc"
"${ROCKSDB_SOURCE_DIR}/monitoring/statistics.cc"
"${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_impl.cc"
"${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_updater.cc"
"${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util.cc"
"${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util_debug.cc"
"${ROCKSDB_SOURCE_DIR}/options/cf_options.cc"
"${ROCKSDB_SOURCE_DIR}/options/configurable.cc"
"${ROCKSDB_SOURCE_DIR}/options/customizable.cc"
"${ROCKSDB_SOURCE_DIR}/options/db_options.cc"
"${ROCKSDB_SOURCE_DIR}/options/options.cc"
"${ROCKSDB_SOURCE_DIR}/options/options_helper.cc"
"${ROCKSDB_SOURCE_DIR}/options/options_parser.cc"
"${ROCKSDB_SOURCE_DIR}/port/stack_trace.cc"
"${ROCKSDB_SOURCE_DIR}/table/adaptive/adaptive_table_factory.cc"
"${ROCKSDB_SOURCE_DIR}/table/block_based/binary_search_index_reader.cc"
"${ROCKSDB_SOURCE_DIR}/table/block_based/block.cc"
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_filter_block.cc"
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_builder.cc"
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_factory.cc"
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_iterator.cc"
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_reader.cc"
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_builder.cc"
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefetcher.cc"
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefix_index.cc"
"${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_hash_index.cc"
"${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_footer.cc"
"${ROCKSDB_SOURCE_DIR}/table/block_based/filter_block_reader_common.cc"
"${ROCKSDB_SOURCE_DIR}/table/block_based/filter_policy.cc"
"${ROCKSDB_SOURCE_DIR}/table/block_based/flush_block_policy.cc"
"${ROCKSDB_SOURCE_DIR}/table/block_based/full_filter_block.cc"
"${ROCKSDB_SOURCE_DIR}/table/block_based/hash_index_reader.cc"
"${ROCKSDB_SOURCE_DIR}/table/block_based/index_builder.cc"
"${ROCKSDB_SOURCE_DIR}/table/block_based/index_reader_common.cc"
"${ROCKSDB_SOURCE_DIR}/table/block_based/parsed_full_filter_block.cc"
"${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_filter_block.cc"
"${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_index_iterator.cc"
"${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_index_reader.cc"
"${ROCKSDB_SOURCE_DIR}/table/block_based/reader_common.cc"
"${ROCKSDB_SOURCE_DIR}/table/block_based/uncompression_dict_reader.cc"
"${ROCKSDB_SOURCE_DIR}/table/block_fetcher.cc"
"${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_builder.cc"
"${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_factory.cc"
"${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_reader.cc"
"${ROCKSDB_SOURCE_DIR}/table/format.cc"
"${ROCKSDB_SOURCE_DIR}/table/get_context.cc"
"${ROCKSDB_SOURCE_DIR}/table/iterator.cc"
"${ROCKSDB_SOURCE_DIR}/table/merging_iterator.cc"
"${ROCKSDB_SOURCE_DIR}/table/meta_blocks.cc"
"${ROCKSDB_SOURCE_DIR}/table/persistent_cache_helper.cc"
"${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_bloom.cc"
"${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_builder.cc"
"${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_factory.cc"
"${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_index.cc"
"${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_key_coding.cc"
"${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_reader.cc"
"${ROCKSDB_SOURCE_DIR}/table/sst_file_dumper.cc"
"${ROCKSDB_SOURCE_DIR}/table/sst_file_reader.cc"
"${ROCKSDB_SOURCE_DIR}/table/sst_file_writer.cc"
"${ROCKSDB_SOURCE_DIR}/table/table_factory.cc"
"${ROCKSDB_SOURCE_DIR}/table/table_properties.cc"
"${ROCKSDB_SOURCE_DIR}/table/two_level_iterator.cc"
"${ROCKSDB_SOURCE_DIR}/test_util/sync_point.cc"
"${ROCKSDB_SOURCE_DIR}/test_util/sync_point_impl.cc"
"${ROCKSDB_SOURCE_DIR}/test_util/testutil.cc"
"${ROCKSDB_SOURCE_DIR}/test_util/transaction_test_util.cc"
"${ROCKSDB_SOURCE_DIR}/tools/block_cache_analyzer/block_cache_trace_analyzer.cc"
"${ROCKSDB_SOURCE_DIR}/tools/dump/db_dump_tool.cc"
"${ROCKSDB_SOURCE_DIR}/tools/io_tracer_parser_tool.cc"
"${ROCKSDB_SOURCE_DIR}/tools/ldb_cmd.cc"
"${ROCKSDB_SOURCE_DIR}/tools/ldb_tool.cc"
"${ROCKSDB_SOURCE_DIR}/tools/sst_dump_tool.cc"
"${ROCKSDB_SOURCE_DIR}/tools/trace_analyzer_tool.cc"
"${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc"
"${ROCKSDB_SOURCE_DIR}/trace_replay/block_cache_tracer.cc"
"${ROCKSDB_SOURCE_DIR}/trace_replay/io_tracer.cc"
"${ROCKSDB_SOURCE_DIR}/util/coding.cc"
"${ROCKSDB_SOURCE_DIR}/util/compaction_job_stats_impl.cc"
"${ROCKSDB_SOURCE_DIR}/util/comparator.cc"
"${ROCKSDB_SOURCE_DIR}/util/compression_context_cache.cc"
"${ROCKSDB_SOURCE_DIR}/util/concurrent_task_limiter_impl.cc"
"${ROCKSDB_SOURCE_DIR}/util/crc32c.cc"
"${ROCKSDB_SOURCE_DIR}/util/dynamic_bloom.cc"
"${ROCKSDB_SOURCE_DIR}/util/hash.cc"
"${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc"
"${ROCKSDB_SOURCE_DIR}/util/random.cc"
"${ROCKSDB_SOURCE_DIR}/util/rate_limiter.cc"
"${ROCKSDB_SOURCE_DIR}/util/slice.cc"
"${ROCKSDB_SOURCE_DIR}/util/file_checksum_helper.cc"
"${ROCKSDB_SOURCE_DIR}/util/status.cc"
"${ROCKSDB_SOURCE_DIR}/util/string_util.cc"
"${ROCKSDB_SOURCE_DIR}/util/thread_local.cc"
"${ROCKSDB_SOURCE_DIR}/util/threadpool_imp.cc"
"${ROCKSDB_SOURCE_DIR}/util/xxhash.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/backupable/backupable_db.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_compaction_filter.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl_filesnapshot.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_dump_tool.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_file.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/cassandra/cassandra_compaction_filter.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/cassandra/format.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/cassandra/merge_operator.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/checkpoint/checkpoint_impl.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/debug.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/env_mirror.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/env_timed.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_env.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_fs.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/leveldb_options/leveldb_options.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/memory/memory_util.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/bytesxor.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/max.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/put.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/sortlist.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/string_append/stringappend.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/string_append/stringappend2.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/uint64add.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/object_registry.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/option_change_migration/option_change_migration.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/options/options_util.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier_file.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier_metadata.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/persistent_cache_tier.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/volatile_tier_impl.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/cache_simulator.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/lock_manager.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_tracker.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_manager.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction_db_impl.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/pessimistic_transaction.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/pessimistic_transaction_db.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/snapshot_checker.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_base.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_db_mutex_impl.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_util.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn_db.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc"
"${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index_internal.cc"
$<TARGET_OBJECTS:rocksdb_build_version>)
${ROCKSDB_SOURCE_DIR}/cache/cache.cc
${ROCKSDB_SOURCE_DIR}/cache/cache_entry_roles.cc
${ROCKSDB_SOURCE_DIR}/cache/clock_cache.cc
${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc
${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc
${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc
${ROCKSDB_SOURCE_DIR}/db/blob/blob_fetcher.cc
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_addition.cc
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_builder.cc
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_cache.cc
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_garbage.cc
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_meta.cc
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_reader.cc
${ROCKSDB_SOURCE_DIR}/db/blob/blob_garbage_meter.cc
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_format.cc
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_sequential_reader.cc
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_writer.cc
${ROCKSDB_SOURCE_DIR}/db/builder.cc
${ROCKSDB_SOURCE_DIR}/db/c.cc
${ROCKSDB_SOURCE_DIR}/db/column_family.cc
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction.cc
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_iterator.cc
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker.cc
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_job.cc
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_fifo.cc
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_level.cc
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_universal.cc
${ROCKSDB_SOURCE_DIR}/db/compaction/sst_partitioner.cc
${ROCKSDB_SOURCE_DIR}/db/convenience.cc
${ROCKSDB_SOURCE_DIR}/db/db_filesnapshot.cc
${ROCKSDB_SOURCE_DIR}/db/db_impl/compacted_db_impl.cc
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl.cc
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_write.cc
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_compaction_flush.cc
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_files.cc
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_open.cc
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_debug.cc
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_experimental.cc
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_readonly.cc
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_secondary.cc
${ROCKSDB_SOURCE_DIR}/db/db_info_dumper.cc
${ROCKSDB_SOURCE_DIR}/db/db_iter.cc
${ROCKSDB_SOURCE_DIR}/db/dbformat.cc
${ROCKSDB_SOURCE_DIR}/db/error_handler.cc
${ROCKSDB_SOURCE_DIR}/db/event_helpers.cc
${ROCKSDB_SOURCE_DIR}/db/experimental.cc
${ROCKSDB_SOURCE_DIR}/db/external_sst_file_ingestion_job.cc
${ROCKSDB_SOURCE_DIR}/db/file_indexer.cc
${ROCKSDB_SOURCE_DIR}/db/flush_job.cc
${ROCKSDB_SOURCE_DIR}/db/flush_scheduler.cc
${ROCKSDB_SOURCE_DIR}/db/forward_iterator.cc
${ROCKSDB_SOURCE_DIR}/db/import_column_family_job.cc
${ROCKSDB_SOURCE_DIR}/db/internal_stats.cc
${ROCKSDB_SOURCE_DIR}/db/logs_with_prep_tracker.cc
${ROCKSDB_SOURCE_DIR}/db/log_reader.cc
${ROCKSDB_SOURCE_DIR}/db/log_writer.cc
${ROCKSDB_SOURCE_DIR}/db/malloc_stats.cc
${ROCKSDB_SOURCE_DIR}/db/memtable.cc
${ROCKSDB_SOURCE_DIR}/db/memtable_list.cc
${ROCKSDB_SOURCE_DIR}/db/merge_helper.cc
${ROCKSDB_SOURCE_DIR}/db/merge_operator.cc
${ROCKSDB_SOURCE_DIR}/db/output_validator.cc
${ROCKSDB_SOURCE_DIR}/db/periodic_work_scheduler.cc
${ROCKSDB_SOURCE_DIR}/db/range_del_aggregator.cc
${ROCKSDB_SOURCE_DIR}/db/range_tombstone_fragmenter.cc
${ROCKSDB_SOURCE_DIR}/db/repair.cc
${ROCKSDB_SOURCE_DIR}/db/snapshot_impl.cc
${ROCKSDB_SOURCE_DIR}/db/table_cache.cc
${ROCKSDB_SOURCE_DIR}/db/table_properties_collector.cc
${ROCKSDB_SOURCE_DIR}/db/transaction_log_impl.cc
${ROCKSDB_SOURCE_DIR}/db/trim_history_scheduler.cc
${ROCKSDB_SOURCE_DIR}/db/version_builder.cc
${ROCKSDB_SOURCE_DIR}/db/version_edit.cc
${ROCKSDB_SOURCE_DIR}/db/version_edit_handler.cc
${ROCKSDB_SOURCE_DIR}/db/version_set.cc
${ROCKSDB_SOURCE_DIR}/db/wal_edit.cc
${ROCKSDB_SOURCE_DIR}/db/wal_manager.cc
${ROCKSDB_SOURCE_DIR}/db/write_batch.cc
${ROCKSDB_SOURCE_DIR}/db/write_batch_base.cc
${ROCKSDB_SOURCE_DIR}/db/write_controller.cc
${ROCKSDB_SOURCE_DIR}/db/write_thread.cc
${ROCKSDB_SOURCE_DIR}/env/composite_env.cc
${ROCKSDB_SOURCE_DIR}/env/env.cc
${ROCKSDB_SOURCE_DIR}/env/env_chroot.cc
${ROCKSDB_SOURCE_DIR}/env/env_encryption.cc
${ROCKSDB_SOURCE_DIR}/env/env_hdfs.cc
${ROCKSDB_SOURCE_DIR}/env/file_system.cc
${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc
${ROCKSDB_SOURCE_DIR}/env/fs_remap.cc
${ROCKSDB_SOURCE_DIR}/env/mock_env.cc
${ROCKSDB_SOURCE_DIR}/file/delete_scheduler.cc
${ROCKSDB_SOURCE_DIR}/file/file_prefetch_buffer.cc
${ROCKSDB_SOURCE_DIR}/file/file_util.cc
${ROCKSDB_SOURCE_DIR}/file/filename.cc
${ROCKSDB_SOURCE_DIR}/file/line_file_reader.cc
${ROCKSDB_SOURCE_DIR}/file/random_access_file_reader.cc
${ROCKSDB_SOURCE_DIR}/file/read_write_util.cc
${ROCKSDB_SOURCE_DIR}/file/readahead_raf.cc
${ROCKSDB_SOURCE_DIR}/file/sequence_file_reader.cc
${ROCKSDB_SOURCE_DIR}/file/sst_file_manager_impl.cc
${ROCKSDB_SOURCE_DIR}/file/writable_file_writer.cc
${ROCKSDB_SOURCE_DIR}/logging/auto_roll_logger.cc
${ROCKSDB_SOURCE_DIR}/logging/event_logger.cc
${ROCKSDB_SOURCE_DIR}/logging/log_buffer.cc
${ROCKSDB_SOURCE_DIR}/memory/arena.cc
${ROCKSDB_SOURCE_DIR}/memory/concurrent_arena.cc
${ROCKSDB_SOURCE_DIR}/memory/jemalloc_nodump_allocator.cc
${ROCKSDB_SOURCE_DIR}/memory/memkind_kmem_allocator.cc
${ROCKSDB_SOURCE_DIR}/memtable/alloc_tracker.cc
${ROCKSDB_SOURCE_DIR}/memtable/hash_linklist_rep.cc
${ROCKSDB_SOURCE_DIR}/memtable/hash_skiplist_rep.cc
${ROCKSDB_SOURCE_DIR}/memtable/skiplistrep.cc
${ROCKSDB_SOURCE_DIR}/memtable/vectorrep.cc
${ROCKSDB_SOURCE_DIR}/memtable/write_buffer_manager.cc
${ROCKSDB_SOURCE_DIR}/monitoring/histogram.cc
${ROCKSDB_SOURCE_DIR}/monitoring/histogram_windowing.cc
${ROCKSDB_SOURCE_DIR}/monitoring/in_memory_stats_history.cc
${ROCKSDB_SOURCE_DIR}/monitoring/instrumented_mutex.cc
${ROCKSDB_SOURCE_DIR}/monitoring/iostats_context.cc
${ROCKSDB_SOURCE_DIR}/monitoring/perf_context.cc
${ROCKSDB_SOURCE_DIR}/monitoring/perf_level.cc
${ROCKSDB_SOURCE_DIR}/monitoring/persistent_stats_history.cc
${ROCKSDB_SOURCE_DIR}/monitoring/statistics.cc
${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_impl.cc
${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_updater.cc
${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util.cc
${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util_debug.cc
${ROCKSDB_SOURCE_DIR}/options/cf_options.cc
${ROCKSDB_SOURCE_DIR}/options/configurable.cc
${ROCKSDB_SOURCE_DIR}/options/customizable.cc
${ROCKSDB_SOURCE_DIR}/options/db_options.cc
${ROCKSDB_SOURCE_DIR}/options/options.cc
${ROCKSDB_SOURCE_DIR}/options/options_helper.cc
${ROCKSDB_SOURCE_DIR}/options/options_parser.cc
${ROCKSDB_SOURCE_DIR}/port/stack_trace.cc
${ROCKSDB_SOURCE_DIR}/table/adaptive/adaptive_table_factory.cc
${ROCKSDB_SOURCE_DIR}/table/block_based/binary_search_index_reader.cc
${ROCKSDB_SOURCE_DIR}/table/block_based/block.cc
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_filter_block.cc
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_builder.cc
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_factory.cc
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_iterator.cc
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_reader.cc
${ROCKSDB_SOURCE_DIR}/table/block_based/block_builder.cc
${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefetcher.cc
${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefix_index.cc
${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_hash_index.cc
${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_footer.cc
${ROCKSDB_SOURCE_DIR}/table/block_based/filter_block_reader_common.cc
${ROCKSDB_SOURCE_DIR}/table/block_based/filter_policy.cc
${ROCKSDB_SOURCE_DIR}/table/block_based/flush_block_policy.cc
${ROCKSDB_SOURCE_DIR}/table/block_based/full_filter_block.cc
${ROCKSDB_SOURCE_DIR}/table/block_based/hash_index_reader.cc
${ROCKSDB_SOURCE_DIR}/table/block_based/index_builder.cc
${ROCKSDB_SOURCE_DIR}/table/block_based/index_reader_common.cc
${ROCKSDB_SOURCE_DIR}/table/block_based/parsed_full_filter_block.cc
${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_filter_block.cc
${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_index_iterator.cc
${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_index_reader.cc
${ROCKSDB_SOURCE_DIR}/table/block_based/reader_common.cc
${ROCKSDB_SOURCE_DIR}/table/block_based/uncompression_dict_reader.cc
${ROCKSDB_SOURCE_DIR}/table/block_fetcher.cc
${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_builder.cc
${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_factory.cc
${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_reader.cc
${ROCKSDB_SOURCE_DIR}/table/format.cc
${ROCKSDB_SOURCE_DIR}/table/get_context.cc
${ROCKSDB_SOURCE_DIR}/table/iterator.cc
${ROCKSDB_SOURCE_DIR}/table/merging_iterator.cc
${ROCKSDB_SOURCE_DIR}/table/meta_blocks.cc
${ROCKSDB_SOURCE_DIR}/table/persistent_cache_helper.cc
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_bloom.cc
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_builder.cc
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_factory.cc
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_index.cc
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_key_coding.cc
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_reader.cc
${ROCKSDB_SOURCE_DIR}/table/sst_file_dumper.cc
${ROCKSDB_SOURCE_DIR}/table/sst_file_reader.cc
${ROCKSDB_SOURCE_DIR}/table/sst_file_writer.cc
${ROCKSDB_SOURCE_DIR}/table/table_factory.cc
${ROCKSDB_SOURCE_DIR}/table/table_properties.cc
${ROCKSDB_SOURCE_DIR}/table/two_level_iterator.cc
${ROCKSDB_SOURCE_DIR}/test_util/sync_point.cc
${ROCKSDB_SOURCE_DIR}/test_util/sync_point_impl.cc
${ROCKSDB_SOURCE_DIR}/test_util/testutil.cc
${ROCKSDB_SOURCE_DIR}/test_util/transaction_test_util.cc
${ROCKSDB_SOURCE_DIR}/tools/block_cache_analyzer/block_cache_trace_analyzer.cc
${ROCKSDB_SOURCE_DIR}/tools/dump/db_dump_tool.cc
${ROCKSDB_SOURCE_DIR}/tools/io_tracer_parser_tool.cc
${ROCKSDB_SOURCE_DIR}/tools/ldb_cmd.cc
${ROCKSDB_SOURCE_DIR}/tools/ldb_tool.cc
${ROCKSDB_SOURCE_DIR}/tools/sst_dump_tool.cc
${ROCKSDB_SOURCE_DIR}/tools/trace_analyzer_tool.cc
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc
${ROCKSDB_SOURCE_DIR}/trace_replay/block_cache_tracer.cc
${ROCKSDB_SOURCE_DIR}/trace_replay/io_tracer.cc
${ROCKSDB_SOURCE_DIR}/util/coding.cc
${ROCKSDB_SOURCE_DIR}/util/compaction_job_stats_impl.cc
${ROCKSDB_SOURCE_DIR}/util/comparator.cc
${ROCKSDB_SOURCE_DIR}/util/compression_context_cache.cc
${ROCKSDB_SOURCE_DIR}/util/concurrent_task_limiter_impl.cc
${ROCKSDB_SOURCE_DIR}/util/crc32c.cc
${ROCKSDB_SOURCE_DIR}/util/dynamic_bloom.cc
${ROCKSDB_SOURCE_DIR}/util/hash.cc
${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc
${ROCKSDB_SOURCE_DIR}/util/random.cc
${ROCKSDB_SOURCE_DIR}/util/rate_limiter.cc
${ROCKSDB_SOURCE_DIR}/util/ribbon_config.cc
${ROCKSDB_SOURCE_DIR}/util/slice.cc
${ROCKSDB_SOURCE_DIR}/util/file_checksum_helper.cc
${ROCKSDB_SOURCE_DIR}/util/status.cc
${ROCKSDB_SOURCE_DIR}/util/string_util.cc
${ROCKSDB_SOURCE_DIR}/util/thread_local.cc
${ROCKSDB_SOURCE_DIR}/util/threadpool_imp.cc
${ROCKSDB_SOURCE_DIR}/util/xxhash.cc
${ROCKSDB_SOURCE_DIR}/utilities/backupable/backupable_db.cc
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_compaction_filter.cc
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db.cc
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl.cc
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl_filesnapshot.cc
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_dump_tool.cc
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_file.cc
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/cassandra_compaction_filter.cc
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/format.cc
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/merge_operator.cc
${ROCKSDB_SOURCE_DIR}/utilities/checkpoint/checkpoint_impl.cc
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc
${ROCKSDB_SOURCE_DIR}/utilities/debug.cc
${ROCKSDB_SOURCE_DIR}/utilities/env_mirror.cc
${ROCKSDB_SOURCE_DIR}/utilities/env_timed.cc
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_env.cc
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_fs.cc
${ROCKSDB_SOURCE_DIR}/utilities/leveldb_options/leveldb_options.cc
${ROCKSDB_SOURCE_DIR}/utilities/memory/memory_util.cc
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/bytesxor.cc
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/max.cc
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/put.cc
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/sortlist.cc
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/string_append/stringappend.cc
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/string_append/stringappend2.cc
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/uint64add.cc
${ROCKSDB_SOURCE_DIR}/utilities/object_registry.cc
${ROCKSDB_SOURCE_DIR}/utilities/option_change_migration/option_change_migration.cc
${ROCKSDB_SOURCE_DIR}/utilities/options/options_util.cc
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier.cc
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier_file.cc
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier_metadata.cc
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/persistent_cache_tier.cc
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/volatile_tier_impl.cc
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/cache_simulator.cc
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc
${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc
${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/lock_manager.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_tracker.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_manager.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/range_tree_lock_manager.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/range_tree_lock_tracker.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction_db_impl.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/pessimistic_transaction.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/pessimistic_transaction_db.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/snapshot_checker.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_base.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_db_mutex_impl.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_util.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn_db.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc
${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index_internal.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/concurrent_tree.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/keyrange.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/lock_request.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/locktree.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/manager.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/range_buffer.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/treenode.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/txnid_set.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/wfg.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/standalone_port.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/dbt.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/memarena.cc
rocksdb_build_version.cc)
if(HAVE_SSE42 AND NOT MSVC)
set_source_files_properties(

View File

@ -1,3 +1,62 @@
const char* rocksdb_build_git_sha = "rocksdb_build_git_sha:0";
const char* rocksdb_build_git_date = "rocksdb_build_git_date:2000-01-01";
const char* rocksdb_build_compile_date = "2000-01-01";
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
/// This file was edited for ClickHouse.
#include <memory>
#include "rocksdb/version.h"
#include "util/string_util.h"
// The build script may replace these values with real values based
// on whether or not GIT is available and the platform settings
static const std::string rocksdb_build_git_sha = "rocksdb_build_git_sha:0";
static const std::string rocksdb_build_git_tag = "rocksdb_build_git_tag:master";
static const std::string rocksdb_build_date = "rocksdb_build_date:2000-01-01";
namespace ROCKSDB_NAMESPACE {
static void AddProperty(std::unordered_map<std::string, std::string> *props, const std::string& name) {
size_t colon = name.find(":");
if (colon != std::string::npos && colon > 0 && colon < name.length() - 1) {
// If we found a "@:", then this property was a build-time substitution that failed. Skip it
size_t at = name.find("@", colon);
if (at != colon + 1) {
// Everything before the colon is the name, after is the value
(*props)[name.substr(0, colon)] = name.substr(colon + 1);
}
}
}
static std::unordered_map<std::string, std::string>* LoadPropertiesSet() {
auto * properties = new std::unordered_map<std::string, std::string>();
AddProperty(properties, rocksdb_build_git_sha);
AddProperty(properties, rocksdb_build_git_tag);
AddProperty(properties, rocksdb_build_date);
return properties;
}
const std::unordered_map<std::string, std::string>& GetRocksBuildProperties() {
static std::unique_ptr<std::unordered_map<std::string, std::string>> props(LoadPropertiesSet());
return *props;
}
std::string GetRocksVersionAsString(bool with_patch) {
std::string version = ToString(ROCKSDB_MAJOR) + "." + ToString(ROCKSDB_MINOR);
if (with_patch) {
return version + "." + ToString(ROCKSDB_PATCH);
} else {
return version;
}
}
std::string GetRocksBuildInfoAsString(const std::string& program, bool verbose) {
std::string info = program + " (RocksDB) " + GetRocksVersionAsString(true);
if (verbose) {
for (const auto& it : GetRocksBuildProperties()) {
info.append("\n ");
info.append(it.first);
info.append(": ");
info.append(it.second);
}
}
return info;
}
} // namespace ROCKSDB_NAMESPACE

View File

@ -115,6 +115,8 @@ set(S2_SRCS
add_library(s2 ${S2_SRCS})
set_property(TARGET s2 PROPERTY CXX_STANDARD 11)
if (OPENSSL_FOUND)
target_link_libraries(s2 PRIVATE ${OPENSSL_LIBRARIES})
endif()

View File

@ -27,7 +27,7 @@ RUN apt-get update \
# Special dpkg-deb (https://github.com/ClickHouse-Extras/dpkg) version which is able
# to compress files using pigz (https://zlib.net/pigz/) instead of gzip.
# Significantly increase deb packaging speed and compatible with old systems
RUN curl -O https://clickhouse-builds.s3.yandex.net/utils/1/dpkg-deb \
RUN curl -O https://clickhouse-datasets.s3.yandex.net/utils/1/dpkg-deb \
&& chmod +x dpkg-deb \
&& cp dpkg-deb /usr/bin

View File

@ -2,7 +2,7 @@
FROM yandex/clickhouse-deb-builder
RUN export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \
&& wget -nv -O /tmp/arrow-keyring.deb "https://apache.bintray.com/arrow/ubuntu/apache-arrow-archive-keyring-latest-${CODENAME}.deb" \
&& wget -nv -O /tmp/arrow-keyring.deb "https://apache.jfrog.io/artifactory/arrow/ubuntu/apache-arrow-apt-source-latest-${CODENAME}.deb" \
&& dpkg -i /tmp/arrow-keyring.deb
# Libraries from OS are only needed to test the "unbundled" build (that is not used in production).

View File

@ -27,7 +27,7 @@ RUN apt-get update \
# Special dpkg-deb (https://github.com/ClickHouse-Extras/dpkg) version which is able
# to compress files using pigz (https://zlib.net/pigz/) instead of gzip.
# Significantly increase deb packaging speed and compatible with old systems
RUN curl -O https://clickhouse-builds.s3.yandex.net/utils/1/dpkg-deb \
RUN curl -O https://clickhouse-datasets.s3.yandex.net/utils/1/dpkg-deb \
&& chmod +x dpkg-deb \
&& cp dpkg-deb /usr/bin

View File

@ -27,7 +27,7 @@ RUN apt-get update \
# Special dpkg-deb (https://github.com/ClickHouse-Extras/dpkg) version which is able
# to compress files using pigz (https://zlib.net/pigz/) instead of gzip.
# Significantly increase deb packaging speed and compatible with old systems
RUN curl -O https://clickhouse-builds.s3.yandex.net/utils/1/dpkg-deb \
RUN curl -O https://clickhouse-datasets.s3.yandex.net/utils/1/dpkg-deb \
&& chmod +x dpkg-deb \
&& cp dpkg-deb /usr/bin

View File

@ -76,6 +76,7 @@ RUN python3 -m pip install \
pytest \
pytest-timeout \
pytest-xdist \
pytest-repeat \
redis \
tzlocal \
urllib3 \

View File

@ -2,7 +2,7 @@ version: '2.3'
services:
postgres1:
image: postgres
command: ["postgres", "-c", "logging_collector=on", "-c", "log_directory=/postgres/logs", "-c", "log_filename=postgresql.log", "-c", "log_statement=all"]
command: ["postgres", "-c", "logging_collector=on", "-c", "log_directory=/postgres/logs", "-c", "log_filename=postgresql.log", "-c", "log_statement=all", "-c", "max_connections=200"]
restart: always
expose:
- ${POSTGRES_PORT}

View File

@ -58,11 +58,11 @@ function start()
echo "Cannot start clickhouse-server"
cat /var/log/clickhouse-server/stdout.log
tail -n1000 /var/log/clickhouse-server/stderr.log
tail -n1000 /var/log/clickhouse-server/clickhouse-server.log
tail -n100000 /var/log/clickhouse-server/clickhouse-server.log | grep -F -v '<Warning> RaftInstance:' -e '<Information> RaftInstance' | tail -n1000
break
fi
# use root to match with current uid
clickhouse start --user root >/var/log/clickhouse-server/stdout.log 2>/var/log/clickhouse-server/stderr.log
clickhouse start --user root >/var/log/clickhouse-server/stdout.log 2>>/var/log/clickhouse-server/stderr.log
sleep 0.5
counter=$((counter + 1))
done
@ -118,35 +118,35 @@ clickhouse-client --query "SELECT 'Server successfully started', 'OK'" >> /test_
[ -f /var/log/clickhouse-server/stderr.log ] || echo -e "Stderr log does not exist\tFAIL"
# Print Fatal log messages to stdout
zgrep -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server.log
zgrep -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server.log*
# Grep logs for sanitizer asserts, crashes and other critical errors
# Sanitizer asserts
zgrep -Fa "==================" /var/log/clickhouse-server/stderr.log >> /test_output/tmp
zgrep -Fa "WARNING" /var/log/clickhouse-server/stderr.log >> /test_output/tmp
zgrep -Fav "ASan doesn't fully support makecontext/swapcontext functions" > /dev/null \
zgrep -Fav "ASan doesn't fully support makecontext/swapcontext functions" /test_output/tmp > /dev/null \
&& echo -e 'Sanitizer assert (in stderr.log)\tFAIL' >> /test_output/test_results.tsv \
|| echo -e 'No sanitizer asserts\tOK' >> /test_output/test_results.tsv
rm -f /test_output/tmp
# OOM
zgrep -Fa " <Fatal> Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server.log > /dev/null \
zgrep -Fa " <Fatal> Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server.log* > /dev/null \
&& echo -e 'OOM killer (or signal 9) in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \
|| echo -e 'No OOM messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
# Logical errors
zgrep -Fa "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.log > /dev/null \
zgrep -Fa "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.log* > /dev/null \
&& echo -e 'Logical error thrown (see clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \
|| echo -e 'No logical errors\tOK' >> /test_output/test_results.tsv
# Crash
zgrep -Fa "########################################" /var/log/clickhouse-server/clickhouse-server.log > /dev/null \
zgrep -Fa "########################################" /var/log/clickhouse-server/clickhouse-server.log* > /dev/null \
&& echo -e 'Killed by signal (in clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \
|| echo -e 'Not crashed\tOK' >> /test_output/test_results.tsv
# It also checks for crash without stacktrace (printed by watchdog)
zgrep -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server.log > /dev/null \
zgrep -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server.log* > /dev/null \
&& echo -e 'Fatal message in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \
|| echo -e 'No fatal messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv

View File

@ -123,7 +123,7 @@ For installing CMake and Ninja on Mac OS X first install Homebrew and then insta
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
brew install cmake ninja
Next, check the version of CMake: `cmake --version`. If it is below 3.3, you should install a newer version from the website: https://cmake.org/download/.
Next, check the version of CMake: `cmake --version`. If it is below 3.12, you should install a newer version from the website: https://cmake.org/download/.
## Optional External Libraries {#optional-external-libraries}

View File

@ -47,7 +47,7 @@ EXCHANGE TABLES new_table AND old_table;
### ReplicatedMergeTree in Atomic Database {#replicatedmergetree-in-atomic-database}
For [ReplicatedMergeTree](../table-engines/mergetree-family/replication.md#table_engines-replication) tables, it is recommended to not specify engine parameters - path in ZooKeeper and replica name. In this case, configuration parameters will be used [default_replica_path](../../operations/server-configuration-parameters/settings.md#default_replica_path) and [default_replica_name](../../operations/server-configuration-parameters/settings.md#default_replica_name). If you want to specify engine parameters explicitly, it is recommended to use {uuid} macros. This is useful so that unique paths are automatically generated for each table in ZooKeeper.
For [ReplicatedMergeTree](../table-engines/mergetree-family/replication.md#table_engines-replication) tables, it is recommended to not specify engine parameters - path in ZooKeeper and replica name. In this case, configuration parameters will be used [default_replica_path](../../operations/server-configuration-parameters/settings.md#default_replica_path) and [default_replica_name](../../operations/server-configuration-parameters/settings.md#default_replica_name). If you want to specify engine parameters explicitly, it is recommended to use `{uuid}` macros. This is useful so that unique paths are automatically generated for each table in ZooKeeper.
## See Also

View File

@ -22,4 +22,4 @@ You can also use the following database engines:
- [PostgreSQL](../../engines/database-engines/postgresql.md)
[Original article](https://clickhouse.tech/docs/en/database_engines/) <!--hide-->
- [Replicated](../../engines/database-engines/replicated.md)

View File

@ -82,6 +82,8 @@ MySQL DDL queries are converted into the corresponding ClickHouse DDL queries ([
- If `_sign` is not specified in the `SELECT` query, `WHERE _sign=1` is used by default. So the deleted rows are not included into the result set.
- The result includes columns comments in case they exist in MySQL database tables.
### Index Conversion {#index-conversion}
MySQL `PRIMARY KEY` and `INDEX` clauses are converted into `ORDER BY` tuples in ClickHouse tables.

View File

@ -0,0 +1,115 @@
# [experimental] Replicated {#replicated}
The engine is based on the [Atomic](../../engines/database-engines/atomic.md) engine. It supports replication of metadata via DDL log being written to ZooKeeper and executed on all of the replicas for a given database.
One ClickHouse server can have multiple replicated databases running and updating at the same time. But there can't be multiple replicas of the same replicated database.
## Creating a Database {#creating-a-database}
``` sql
CREATE DATABASE testdb ENGINE = Replicated('zoo_path', 'shard_name', 'replica_name') [SETTINGS ...]
```
**Engine Parameters**
- `zoo_path` — ZooKeeper path. The same ZooKeeper path corresponds to the same database.
- `shard_name` — Shard name. Database replicas are grouped into shards by `shard_name`.
- `replica_name` — Replica name. Replica names must be different for all replicas of the same shard.
!!! note "Warning"
For [ReplicatedMergeTree](../table-engines/mergetree-family/replication.md#table_engines-replication) tables if no arguments provided, then default arguments are used: `/clickhouse/tables/{uuid}/{shard}` and `{replica}`. These can be changed in the server settings [default_replica_path](../../operations/server-configuration-parameters/settings.md#default_replica_path) and [default_replica_name](../../operations/server-configuration-parameters/settings.md#default_replica_name). Macro `{uuid}` is unfolded to table's uuid, `{shard}` and `{replica}` are unfolded to values from server config, not from database engine arguments. But in the future, it will be possible to use `shard_name` and `replica_name` of Replicated database.
## Specifics and Recommendations {#specifics-and-recommendations}
DDL queries with `Replicated` database work in a similar way to [ON CLUSTER](../../sql-reference/distributed-ddl.md) queries, but with minor differences.
First, the DDL request tries to execute on the initiator (the host that originally received the request from the user). If the request is not fulfilled, then the user immediately receives an error, other hosts do not try to fulfill it. If the request has been successfully completed on the initiator, then all other hosts will automatically retry until they complete it. The initiator will try to wait for the query to be completed on other hosts (no longer than [distributed_ddl_task_timeout](../../operations/settings/settings.md#distributed_ddl_task_timeout)) and will return a table with the query execution statuses on each host.
The behavior in case of errors is regulated by the [distributed_ddl_output_mode](../../operations/settings/settings.md#distributed_ddl_output_mode) setting, for a `Replicated` database it is better to set it to `null_status_on_timeout` — i.e. if some hosts did not have time to execute the request for [distributed_ddl_task_timeout](../../operations/settings/settings.md#distributed_ddl_task_timeout), then do not throw an exception, but show the `NULL` status for them in the table.
The [system.clusters](../../operations/system-tables/clusters.md) system table contains a cluster named like the replicated database, which consists of all replicas of the database. This cluster is updated automatically when creating/deleting replicas, and it can be used for [Distributed](../../engines/table-engines/special/distributed.md#distributed) tables.
When creating a new replica of the database, this replica creates tables by itself. If the replica has been unavailable for a long time and has lagged behind the replication log — it checks its local metadata with the current metadata in ZooKeeper, moves the extra tables with data to a separate non-replicated database (so as not to accidentally delete anything superfluous), creates the missing tables, updates the table names if they have been renamed. The data is replicated at the `ReplicatedMergeTree` level, i.e. if the table is not replicated, the data will not be replicated (the database is responsible only for metadata).
## Usage Example {#usage-example}
Creating a cluster with three hosts:
``` sql
node1 :) CREATE DATABASE r ENGINE=Replicated('some/path/r','shard1','replica1');
node2 :) CREATE DATABASE r ENGINE=Replicated('some/path/r','shard1','other_replica');
node3 :) CREATE DATABASE r ENGINE=Replicated('some/path/r','other_shard','{replica}');
```
Running the DDL-query:
``` sql
CREATE TABLE r.rmt (n UInt64) ENGINE=ReplicatedMergeTree ORDER BY n;
```
``` text
┌─────hosts────────────┬──status─┬─error─┬─num_hosts_remaining─┬─num_hosts_active─┐
│ shard1|replica1 │ 0 │ │ 2 │ 0 │
│ shard1|other_replica │ 0 │ │ 1 │ 0 │
│ other_shard|r1 │ 0 │ │ 0 │ 0 │
└──────────────────────┴─────────┴───────┴─────────────────────┴──────────────────┘
```
Showing the system table:
``` sql
SELECT cluster, shard_num, replica_num, host_name, host_address, port, is_local
FROM system.clusters WHERE cluster='r';
```
``` text
┌─cluster─┬─shard_num─┬─replica_num─┬─host_name─┬─host_address─┬─port─┬─is_local─┐
│ r │ 1 │ 1 │ node3 │ 127.0.0.1 │ 9002 │ 0 │
│ r │ 2 │ 1 │ node2 │ 127.0.0.1 │ 9001 │ 0 │
│ r │ 2 │ 2 │ node1 │ 127.0.0.1 │ 9000 │ 1 │
└─────────┴───────────┴─────────────┴───────────┴──────────────┴──────┴──────────┘
```
Creating a distributed table and inserting the data:
``` sql
node2 :) CREATE TABLE r.d (n UInt64) ENGINE=Distributed('r','r','rmt', n % 2);
node3 :) INSERT INTO r SELECT * FROM numbers(10);
node1 :) SELECT materialize(hostName()) AS host, groupArray(n) FROM r.d GROUP BY host;
```
``` text
┌─hosts─┬─groupArray(n)─┐
│ node1 │ [1,3,5,7,9] │
│ node2 │ [0,2,4,6,8] │
└───────┴───────────────┘
```
Adding replica on the one more host:
``` sql
node4 :) CREATE DATABASE r ENGINE=Replicated('some/path/r','other_shard','r2');
```
The cluster configuration will look like this:
``` text
┌─cluster─┬─shard_num─┬─replica_num─┬─host_name─┬─host_address─┬─port─┬─is_local─┐
│ r │ 1 │ 1 │ node3 │ 127.0.0.1 │ 9002 │ 0 │
│ r │ 1 │ 2 │ node4 │ 127.0.0.1 │ 9003 │ 0 │
│ r │ 2 │ 1 │ node2 │ 127.0.0.1 │ 9001 │ 0 │
│ r │ 2 │ 2 │ node1 │ 127.0.0.1 │ 9000 │ 1 │
└─────────┴───────────┴─────────────┴───────────┴──────────────┴──────┴──────────┘
```
The distributed table also will get data from the new host:
```sql
node2 :) SELECT materialize(hostName()) AS host, groupArray(n) FROM r.d GROUP BY host;
```
```text
┌─hosts─┬─groupArray(n)─┐
│ node2 │ [1,3,5,7,9] │
│ node4 │ [0,2,4,6,8] │
└───────┴───────────────┘
```

View File

@ -76,7 +76,7 @@ For a description of parameters, see the [CREATE query description](../../../sql
- `SAMPLE BY` — An expression for sampling. Optional.
If a sampling expression is used, the primary key must contain it. Example: `SAMPLE BY intHash32(UserID) ORDER BY (CounterID, EventDate, intHash32(UserID))`.
If a sampling expression is used, the primary key must contain it. The result of sampling expression must be unsigned integer. Example: `SAMPLE BY intHash32(UserID) ORDER BY (CounterID, EventDate, intHash32(UserID))`.
- `TTL` — A list of rules specifying storage duration of rows and defining logic of automatic parts movement [between disks and volumes](#table_engine-mergetree-multiple-volumes). Optional.

View File

@ -37,6 +37,14 @@ Also, it accepts the following settings:
- `max_delay_to_insert` - max delay of inserting data into Distributed table in seconds, if there are a lot of pending bytes for async send. Default 60.
- `monitor_batch_inserts` - same as [distributed_directory_monitor_batch_inserts](../../../operations/settings/settings.md#distributed_directory_monitor_batch_inserts)
- `monitor_split_batch_on_failure` - same as [distributed_directory_monitor_split_batch_on_failure](../../../operations/settings/settings.md#distributed_directory_monitor_split_batch_on_failure)
- `monitor_sleep_time_ms` - same as [distributed_directory_monitor_sleep_time_ms](../../../operations/settings/settings.md#distributed_directory_monitor_sleep_time_ms)
- `monitor_max_sleep_time_ms` - same as [distributed_directory_monitor_max_sleep_time_ms](../../../operations/settings/settings.md#distributed_directory_monitor_max_sleep_time_ms)
!!! note "Note"
**Durability settings** (`fsync_...`):

View File

@ -1130,17 +1130,18 @@ The table below shows supported data types and how they match ClickHouse [data t
| `boolean`, `int`, `long`, `float`, `double` | [Int64](../sql-reference/data-types/int-uint.md), [UInt64](../sql-reference/data-types/int-uint.md) | `long` |
| `boolean`, `int`, `long`, `float`, `double` | [Float32](../sql-reference/data-types/float.md) | `float` |
| `boolean`, `int`, `long`, `float`, `double` | [Float64](../sql-reference/data-types/float.md) | `double` |
| `bytes`, `string`, `fixed`, `enum` | [String](../sql-reference/data-types/string.md) | `bytes` |
| `bytes`, `string`, `fixed`, `enum` | [String](../sql-reference/data-types/string.md) | `bytes` or `string` \* |
| `bytes`, `string`, `fixed` | [FixedString(N)](../sql-reference/data-types/fixedstring.md) | `fixed(N)` |
| `enum` | [Enum(8\|16)](../sql-reference/data-types/enum.md) | `enum` |
| `array(T)` | [Array(T)](../sql-reference/data-types/array.md) | `array(T)` |
| `union(null, T)`, `union(T, null)` | [Nullable(T)](../sql-reference/data-types/date.md) | `union(null, T)` |
| `null` | [Nullable(Nothing)](../sql-reference/data-types/special-data-types/nothing.md) | `null` |
| `int (date)` \* | [Date](../sql-reference/data-types/date.md) | `int (date)` \* |
| `long (timestamp-millis)` \* | [DateTime64(3)](../sql-reference/data-types/datetime.md) | `long (timestamp-millis)` \* |
| `long (timestamp-micros)` \* | [DateTime64(6)](../sql-reference/data-types/datetime.md) | `long (timestamp-micros)` \* |
| `int (date)` \** | [Date](../sql-reference/data-types/date.md) | `int (date)` \** |
| `long (timestamp-millis)` \** | [DateTime64(3)](../sql-reference/data-types/datetime.md) | `long (timestamp-millis)` \* |
| `long (timestamp-micros)` \** | [DateTime64(6)](../sql-reference/data-types/datetime.md) | `long (timestamp-micros)` \* |
\* [Avro logical types](https://avro.apache.org/docs/current/spec.html#Logical+Types)
\* `bytes` is default, controlled by [output_format_avro_string_column_pattern](../operations/settings/settings.md#settings-output_format_avro_string_column_pattern)
\** [Avro logical types](https://avro.apache.org/docs/current/spec.html#Logical+Types)
Unsupported Avro data types: `record` (non-root), `map`
@ -1246,12 +1247,14 @@ The table below shows supported data types and how they match ClickHouse [data t
| `DOUBLE` | [Float64](../sql-reference/data-types/float.md) | `DOUBLE` |
| `DATE32` | [Date](../sql-reference/data-types/date.md) | `UINT16` |
| `DATE64`, `TIMESTAMP` | [DateTime](../sql-reference/data-types/datetime.md) | `UINT32` |
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `STRING` |
| — | [FixedString](../sql-reference/data-types/fixedstring.md) | `STRING` |
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` |
| — | [FixedString](../sql-reference/data-types/fixedstring.md) | `BINARY` |
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` |
| `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` |
| `STRUCT` | [Tuple](../sql-reference/data-types/tuple.md) | `STRUCT` |
| `MAP` | [Map](../sql-reference/data-types/map.md) | `MAP` |
Arrays can be nested and can have a value of the `Nullable` type as an argument.
Arrays can be nested and can have a value of the `Nullable` type as an argument. `Tuple` and `Map` types also can be nested.
ClickHouse supports configurable precision of `Decimal` type. The `INSERT` query treats the Parquet `DECIMAL` type as the ClickHouse `Decimal128` type.
@ -1299,13 +1302,17 @@ The table below shows supported data types and how they match ClickHouse [data t
| `DOUBLE` | [Float64](../sql-reference/data-types/float.md) | `FLOAT64` |
| `DATE32` | [Date](../sql-reference/data-types/date.md) | `UINT16` |
| `DATE64`, `TIMESTAMP` | [DateTime](../sql-reference/data-types/datetime.md) | `UINT32` |
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `UTF8` |
| `STRING`, `BINARY` | [FixedString](../sql-reference/data-types/fixedstring.md) | `UTF8` |
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` |
| `STRING`, `BINARY` | [FixedString](../sql-reference/data-types/fixedstring.md) | `BINARY` |
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` |
| `DECIMAL256` | [Decimal256](../sql-reference/data-types/decimal.md)| `DECIMAL256` |
| `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` |
| `STRUCT` | [Tuple](../sql-reference/data-types/tuple.md) | `STRUCT` |
| `MAP` | [Map](../sql-reference/data-types/map.md) | `MAP` |
Arrays can be nested and can have a value of the `Nullable` type as an argument.
Arrays can be nested and can have a value of the `Nullable` type as an argument. `Tuple` and `Map` types also can be nested.
The `DICTIONARY` type is supported for `INSERT` queries, and for `SELECT` queries there is an [output_format_arrow_low_cardinality_as_dictionary](../operations/settings/settings.md#output-format-arrow-low-cardinality-as-dictionary) setting that allows to output [LowCardinality](../sql-reference/data-types/lowcardinality.md) type as a `DICTIONARY` type.
ClickHouse supports configurable precision of the `Decimal` type. The `INSERT` query treats the Arrow `DECIMAL` type as the ClickHouse `Decimal128` type.
@ -1358,8 +1365,10 @@ The table below shows supported data types and how they match ClickHouse [data t
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` |
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` |
| `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` |
| `STRUCT` | [Tuple](../sql-reference/data-types/tuple.md) | `STRUCT` |
| `MAP` | [Map](../sql-reference/data-types/map.md) | `MAP` |
Arrays can be nested and can have a value of the `Nullable` type as an argument.
Arrays can be nested and can have a value of the `Nullable` type as an argument. `Tuple` and `Map` types also can be nested.
ClickHouse supports configurable precision of the `Decimal` type. The `INSERT` query treats the ORC `DECIMAL` type as the ClickHouse `Decimal128` type.

View File

@ -10,7 +10,7 @@ ClickHouse server use [ZooKeeper](https://zookeeper.apache.org/) coordination sy
!!! warning "Warning"
This feature currently in pre-production stage. We test it in our CI and on small internal installations.
## Implemetation details
## Implementation details
ZooKeeper is one of the first well-known open-source coordination systems. It's implemented in Java, has quite a simple and powerful data model. ZooKeeper's coordination algorithm called ZAB (ZooKeeper Atomic Broadcast) doesn't provide linearizability guarantees for reads, because each ZooKeeper node serves reads locally. Unlike ZooKeeper `clickhouse-keeper` written in C++ and use [RAFT algorithm](https://raft.github.io/) [implementation](https://github.com/eBay/NuRaft). This algorithm allows to have linearizability for reads and writes, has several open-source implementations in different languages.

View File

@ -278,4 +278,16 @@ Possible values:
Default value: `0`.
## check_sample_column_is_correct {#check_sample_column_is_correct}
Enables to check column for sampling or sampling expression is correct at table creation.
Possible values:
- true — Check column or sampling expression is correct at table creation.
- false — Do not check column or sampling expression is correct at table creation.
Default value: `true`.
By default, the ClickHouse server check column for sampling or sampling expression at table creation. If you already had tables with incorrect sampling expression, set value `false` to make ClickHouse server do not raise exception when ClickHouse server is starting.
[Original article](https://clickhouse.tech/docs/en/operations/settings/merge_tree_settings/) <!--hide-->

View File

@ -1738,7 +1738,7 @@ Default value: 0.
## optimize_functions_to_subcolumns {#optimize-functions-to-subcolumns}
Enables or disables optimization by transforming some functions to reading subcolumns. This reduces the amount of data to read.
Enables or disables optimization by transforming some functions to reading subcolumns. This reduces the amount of data to read.
These functions can be transformed:
@ -1969,6 +1969,13 @@ Possible values: 32 (32 bytes) - 1073741824 (1 GiB)
Default value: 32768 (32 KiB)
## output_format_avro_string_column_pattern {#output_format_avro_string_column_pattern}
Regexp of column names of type String to output as Avro `string` (default is `bytes`).
RE2 syntax is supported.
Type: string
## format_avro_schema_registry_url {#format_avro_schema_registry_url}
Sets [Confluent Schema Registry](https://docs.confluent.io/current/schema-registry/index.html) URL to use with [AvroConfluent](../../interfaces/formats.md#data-format-avro-confluent) format.
@ -3141,6 +3148,53 @@ SELECT
FROM fuse_tbl
```
## allow_experimental_database_replicated {#allow_experimental_database_replicated}
Enables to create databases with [Replicated](../../engines/database-engines/replicated.md) engine.
Possible values:
- 0 — Disabled.
- 1 — Enabled.
Default value: `0`.
## database_replicated_initial_query_timeout_sec {#database_replicated_initial_query_timeout_sec}
Sets how long initial DDL query should wait for Replicated database to precess previous DDL queue entries in seconds.
Possible values:
- Positive integer.
- 0 — Unlimited.
Default value: `300`.
## distributed_ddl_task_timeout {#distributed_ddl_task_timeout}
Sets timeout for DDL query responses from all hosts in cluster. If a DDL request has not been performed on all hosts, a response will contain a timeout error and a request will be executed in an async mode. Negative value means infinite.
Possible values:
- Positive integer.
- 0 — Async mode.
- Negative integer — infinite timeout.
Default value: `180`.
## distributed_ddl_output_mode {#distributed_ddl_output_mode}
Sets format of distributed DDL query result.
Possible values:
- `throw` — Returns result set with query execution status for all hosts where query is finished. If query has failed on some hosts, then it will rethrow the first exception. If query is not finished yet on some hosts and [distributed_ddl_task_timeout](#distributed_ddl_task_timeout) exceeded, then it throws `TIMEOUT_EXCEEDED` exception.
- `none` — Is similar to throw, but distributed DDL query returns no result set.
- `null_status_on_timeout` — Returns `NULL` as execution status in some rows of result set instead of throwing `TIMEOUT_EXCEEDED` if query is not finished on the corresponding hosts.
- `never_throw` — Do not throw `TIMEOUT_EXCEEDED` and do not rethrow exceptions if query has failed on some hosts.
Default value: `throw`.
## flatten_nested {#flatten-nested}
Sets the data format of a [nested](../../sql-reference/data-types/nested-data-structures/nested.md) columns.
@ -3220,3 +3274,14 @@ Default value: `1`.
**Usage**
If the setting is set to `0`, the table function does not make Nullable columns and inserts default values instead of NULL. This is also applicable for NULL values inside arrays.
## output_format_arrow_low_cardinality_as_dictionary {#output-format-arrow-low-cardinality-as-dictionary}
Allows to convert the [LowCardinality](../../sql-reference/data-types/lowcardinality.md) type to the `DICTIONARY` type of the [Arrow](../../interfaces/formats.md#data-format-arrow) format for `SELECT` queries.
Possible values:
- 0 — The `LowCardinality` type is not converted to the `DICTIONARY` type.
- 1 — The `LowCardinality` type is converted to the `DICTIONARY` type.
Default value: `0`.

View File

@ -47,6 +47,7 @@ Settings:
- [low_cardinality_use_single_dictionary_for_part](../../operations/settings/settings.md#low_cardinality_use_single_dictionary_for_part)
- [low_cardinality_allow_in_native_format](../../operations/settings/settings.md#low_cardinality_allow_in_native_format)
- [allow_suspicious_low_cardinality_types](../../operations/settings/settings.md#allow_suspicious_low_cardinality_types)
- [output_format_arrow_low_cardinality_as_dictionary](../../operations/settings/settings.md#output-format-arrow-low-cardinality-as-dictionary)
Functions:
@ -57,5 +58,3 @@ Functions:
- [A Magical Mystery Tour of the LowCardinality Data Type](https://www.altinity.com/blog/2019/3/27/low-cardinality).
- [Reducing ClickHouse Storage Cost with the Low Cardinality Type Lessons from an Instana Engineer](https://www.instana.com/blog/reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer/).
- [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/yandex/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf).
[Original article](https://clickhouse.tech/docs/en/sql-reference/data-types/lowcardinality/) <!--hide-->

View File

@ -211,7 +211,7 @@ SELECT nullIf(1, 2);
## assumeNotNull {#assumenotnull}
Results in a value of type [Nullable](../../sql-reference/data-types/nullable.md) for a non- `Nullable`, if the value is not `NULL`.
Results in an equivalent non-`Nullable` value for a [Nullable](../../sql-reference/data-types/nullable.md) type. In case the original value is `NULL` the result is undetermined. See also `ifNull` and `coalesce` functions.
``` sql
assumeNotNull(x)

View File

@ -465,27 +465,29 @@ Result:
## CAST(x, T) {#type_conversion_function-cast}
Converts input value `x` to the `T` data type. Unlike to `reinterpret` function, type conversion is performed in a natural way.
The syntax `CAST(x AS t)` is also supported.
!!! note "Note"
If value `x` does not fit the bounds of type `T`, the function overflows. For example, `CAST(-1, 'UInt8')` returns `255`.
Converts an input value to the specified data type. Unlike the [reinterpret](#type_conversion_function-reinterpret) function, `CAST` tries to present the same value using the new data type. If the conversion can not be done then an exception is raised.
Several syntax variants are supported.
**Syntax**
``` sql
CAST(x, T)
CAST(x AS t)
x::t
```
**Arguments**
- `x` — Any type.
- `T` — Destination type. [String](../../sql-reference/data-types/string.md).
- `x` — A value to convert. May be of any type.
- `T` — The name of the target data type. [String](../../sql-reference/data-types/string.md).
- `t` — The target data type.
**Returned value**
- Destination type value.
- Converted value.
!!! note "Note"
If the input value does not fit the bounds of the target type, the result overflows. For example, `CAST(-1, 'UInt8')` returns `255`.
**Examples**
@ -494,16 +496,16 @@ Query:
```sql
SELECT
CAST(toInt8(-1), 'UInt8') AS cast_int_to_uint,
CAST(toInt8(1), 'Float32') AS cast_int_to_float,
CAST('1', 'UInt32') AS cast_string_to_int;
CAST(1.5 AS Decimal(3,2)) AS cast_float_to_decimal,
'1'::Int32 AS cast_string_to_int;
```
Result:
```
┌─cast_int_to_uint─┬─cast_int_to_float─┬─cast_string_to_int─┐
│ 255 │ 1 │ 1 │
└──────────────────┴───────────────────┴────────────────────┘
┌─cast_int_to_uint─┬─cast_float_to_decimal─┬─cast_string_to_int─┐
│ 255 │ 1.50 │ 1 │
└──────────────────┴───────────────────────┴────────────────────┘
```
Query:

View File

@ -189,7 +189,7 @@ CREATE TABLE codec_example
dt Date CODEC(ZSTD),
ts DateTime CODEC(LZ4HC),
float_value Float32 CODEC(NONE),
double_value Float64 CODEC(LZ4HC(9))
double_value Float64 CODEC(LZ4HC(9)),
value Float32 CODEC(Delta, ZSTD)
)
ENGINE = <Engine>

View File

@ -128,7 +128,7 @@ Ninja - система запуска сборочных задач.
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
brew install cmake ninja
Проверьте версию CMake: `cmake --version`. Если версия меньше 3.3, то установите новую версию с сайта https://cmake.org/download/
Проверьте версию CMake: `cmake --version`. Если версия меньше 3.12, то установите новую версию с сайта https://cmake.org/download/
## Необязательные внешние библиотеки {#neobiazatelnye-vneshnie-biblioteki}

View File

@ -20,3 +20,5 @@ toc_title: "Введение"
- [PostgreSQL](../../engines/database-engines/postgresql.md)
- [Replicated](../../engines/database-engines/replicated.md)

View File

@ -1,3 +1,4 @@
---
toc_priority: 29
toc_title: MaterializeMySQL
@ -80,7 +81,9 @@ DDL-запросы в MySQL конвертируются в соответств
- Если в запросе `SELECT` напрямую не указан столбец `_version`, то используется модификатор [FINAL](../../sql-reference/statements/select/from.md#select-from-final). Таким образом, выбираются только строки с `MAX(_version)`.
- Если в запросе `SELECT` напрямую не указан столбец `_sign`, то по умолчанию используется `WHERE _sign=1`. Таким образом, удаленные строки не включаются в результирующий набор.
- Если в запросе `SELECT` напрямую не указан столбец `_sign`, то по умолчанию используется `WHERE _sign=1`. Таким образом, удаленные строки не включаются в результирующий набор.
- Результат включает комментарии к столбцам, если они существуют в таблицах базы данных MySQL.
### Конвертация индексов {#index-conversion}

View File

@ -0,0 +1,119 @@
# [экспериментальный] Replicated {#replicated}
Движок основан на движке [Atomic](../../engines/database-engines/atomic.md). Он поддерживает репликацию метаданных через журнал DDL, записываемый в ZooKeeper и выполняемый на всех репликах для данной базы данных.
На одном сервере ClickHouse может одновременно работать и обновляться несколько реплицированных баз данных. Но не может существовать нескольких реплик одной и той же реплицированной базы данных.
## Создание базы данных {#creating-a-database}
``` sql
CREATE DATABASE testdb ENGINE = Replicated('zoo_path', 'shard_name', 'replica_name') [SETTINGS ...]
```
**Параметры движка**
- `zoo_path` — путь в ZooKeeper. Один и тот же путь ZooKeeper соответствует одной и той же базе данных.
- `shard_name` — Имя шарда. Реплики базы данных группируются в шарды по имени.
- `replica_name` — Имя реплики. Имена реплик должны быть разными для всех реплик одного и того же шарда.
!!! note "Предупреждение"
Для таблиц [ReplicatedMergeTree](../table-engines/mergetree-family/replication.md#table_engines-replication) если аргументы не заданы, то используются аргументы по умолчанию: `/clickhouse/tables/{uuid}/{shard}` и `{replica}`. Они могут быть изменены в серверных настройках: [default_replica_path](../../operations/server-configuration-parameters/settings.md#default_replica_path) и [default_replica_name](../../operations/server-configuration-parameters/settings.md#default_replica_name). Макрос `{uuid}` раскрывается в `UUID` таблицы, `{shard}` и `{replica}` — в значения из конфига сервера. В будущем появится возможность использовать значения `shard_name` и `replica_name` аргументов движка базы данных `Replicated`.
## Особенности и рекомендации {#specifics-and-recommendations}
DDL-запросы с базой данных `Replicated` работают похожим образом на [ON CLUSTER](../../sql-reference/distributed-ddl.md) запросы, но с небольшими отличиями.
Сначала DDL-запрос пытается выполниться на инициаторе (том хосте, который изначально получил запрос от пользователя). Если запрос не выполнился, то пользователь сразу получает ошибку, другие хосты не пытаются его выполнить. Если запрос успешно выполнился на инициаторе, то все остальные хосты будут автоматически делать попытки выполнить его.
Инициатор попытается дождаться выполнения запроса на других хостах (не дольше [distributed_ddl_task_timeout](../../operations/settings/settings.md#distributed_ddl_task_timeout)) и вернёт таблицу со статусами выполнения запроса на каждом хосте.
Поведение в случае ошибок регулируется настройкой [distributed_ddl_output_mode](../../operations/settings/settings.md#distributed_ddl_output_mode), для `Replicated` лучше выставлять её в `null_status_on_timeout` — т.е. если какие-то хосты не успели выполнить запрос за [distributed_ddl_task_timeout](../../operations/settings/settings.md#distributed_ddl_task_timeout), то вместо исключения для них будет показан статус `NULL` в таблице.
В системной таблице [system.clusters](../../operations/system-tables/clusters.md) есть кластер с именем, как у реплицируемой базы, который состоит из всех реплик базы. Этот кластер обновляется автоматически при создании/удалении реплик, и его можно использовать для [Distributed](../../engines/table-engines/special/distributed.md#distributed) таблиц.
При создании новой реплики базы, эта реплика сама создаёт таблицы. Если реплика долго была недоступна и отстала от лога репликации — она сверяет свои локальные метаданные с актуальными метаданными в ZooKeeper, перекладывает лишние таблицы с данными в отдельную нереплицируемую базу (чтобы случайно не удалить что-нибудь лишнее), создаёт недостающие таблицы, обновляет имена таблиц, если были переименования. Данные реплицируются на уровне `ReplicatedMergeTree`, т.е. если таблица не реплицируемая, то данные реплицироваться не будут (база отвечает только за метаданные).
## Примеры использования {#usage-example}
Создадим реплицируемую базу на трех хостах:
``` sql
node1 :) CREATE DATABASE r ENGINE=Replicated('some/path/r','shard1','replica1');
node2 :) CREATE DATABASE r ENGINE=Replicated('some/path/r','shard1','other_replica');
node3 :) CREATE DATABASE r ENGINE=Replicated('some/path/r','other_shard','{replica}');
```
Выполним DDL-запрос на одном из хостов:
``` sql
CREATE TABLE r.rmt (n UInt64) ENGINE=ReplicatedMergeTree ORDER BY n;
```
Запрос выполнится на всех остальных хостах:
``` text
┌─────hosts────────────┬──status─┬─error─┬─num_hosts_remaining─┬─num_hosts_active─┐
│ shard1|replica1 │ 0 │ │ 2 │ 0 │
│ shard1|other_replica │ 0 │ │ 1 │ 0 │
│ other_shard|r1 │ 0 │ │ 0 │ 0 │
└──────────────────────┴─────────┴───────┴─────────────────────┴──────────────────┘
```
Кластер в системной таблице `system.clusters`:
``` sql
SELECT cluster, shard_num, replica_num, host_name, host_address, port, is_local
FROM system.clusters WHERE cluster='r';
```
``` text
┌─cluster─┬─shard_num─┬─replica_num─┬─host_name─┬─host_address─┬─port─┬─is_local─┐
│ r │ 1 │ 1 │ node3 │ 127.0.0.1 │ 9002 │ 0 │
│ r │ 2 │ 1 │ node2 │ 127.0.0.1 │ 9001 │ 0 │
│ r │ 2 │ 2 │ node1 │ 127.0.0.1 │ 9000 │ 1 │
└─────────┴───────────┴─────────────┴───────────┴──────────────┴──────┴──────────┘
```
Создадим распределенную таблицу и вставим в нее данные:
``` sql
node2 :) CREATE TABLE r.d (n UInt64) ENGINE=Distributed('r','r','rmt', n % 2);
node3 :) INSERT INTO r SELECT * FROM numbers(10);
node1 :) SELECT materialize(hostName()) AS host, groupArray(n) FROM r.d GROUP BY host;
```
``` text
┌─hosts─┬─groupArray(n)─┐
│ node1 │ [1,3,5,7,9] │
│ node2 │ [0,2,4,6,8] │
└───────┴───────────────┘
```
Добавление реплики:
``` sql
node4 :) CREATE DATABASE r ENGINE=Replicated('some/path/r','other_shard','r2');
```
Новая реплика автоматически создаст все таблицы, которые есть в базе, а старые реплики перезагрузят из ZooKeeper-а конфигурацию кластера:
``` text
┌─cluster─┬─shard_num─┬─replica_num─┬─host_name─┬─host_address─┬─port─┬─is_local─┐
│ r │ 1 │ 1 │ node3 │ 127.0.0.1 │ 9002 │ 0 │
│ r │ 1 │ 2 │ node4 │ 127.0.0.1 │ 9003 │ 0 │
│ r │ 2 │ 1 │ node2 │ 127.0.0.1 │ 9001 │ 0 │
│ r │ 2 │ 2 │ node1 │ 127.0.0.1 │ 9000 │ 1 │
└─────────┴───────────┴─────────────┴───────────┴──────────────┴──────┴──────────┘
```
Распределенная таблица также получит данные от нового хоста:
```sql
node2 :) SELECT materialize(hostName()) AS host, groupArray(n) FROM r.d GROUP BY host;
```
```text
┌─hosts─┬─groupArray(n)─┐
│ node2 │ [1,3,5,7,9] │
│ node4 │ [0,2,4,6,8] │
└───────┴───────────────┘
```

View File

@ -100,9 +100,9 @@ sudo ./clickhouse install
Для других операционных систем и архитектуры AArch64 сборки ClickHouse предоставляются в виде кросс-компилированного бинарного файла из последнего коммита ветки `master` (с задержкой в несколько часов).
- [macOS](https://builds.clickhouse.tech/master/macos/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/macos/clickhouse' && chmod a+x ./clickhouse`
- [AArch64](https://builds.clickhouse.tech/master/aarch64/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/aarch64/clickhouse' && chmod a+x ./clickhouse`
- [FreeBSD](https://builds.clickhouse.tech/master/freebsd/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/freebsd/clickhouse' && chmod a+x ./clickhouse`
- [macOS](https://builds.clickhouse.tech/master/macos/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/macos/clickhouse' && chmod a+x ./clickhouse`
- [FreeBSD](https://builds.clickhouse.tech/master/freebsd/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/freebsd/clickhouse' && chmod a+x ./clickhouse`
- [AArch64](https://builds.clickhouse.tech/master/aarch64/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/aarch64/clickhouse' && chmod a+x ./clickhouse`
После скачивания можно воспользоваться `clickhouse client` для подключения к серверу или `clickhouse local` для обработки локальных данных.

View File

@ -1165,12 +1165,14 @@ SELECT * FROM topic1_stream;
| `DOUBLE` | [Float64](../sql-reference/data-types/float.md) | `DOUBLE` |
| `DATE32` | [Date](../sql-reference/data-types/date.md) | `UINT16` |
| `DATE64`, `TIMESTAMP` | [DateTime](../sql-reference/data-types/datetime.md) | `UINT32` |
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `STRING` |
| — | [FixedString](../sql-reference/data-types/fixedstring.md) | `STRING` |
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` |
| — | [FixedString](../sql-reference/data-types/fixedstring.md) | `BINARY` |
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` |
| `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` |
| `STRUCT` | [Tuple](../sql-reference/data-types/tuple.md) | `STRUCT` |
| `MAP` | [Map](../sql-reference/data-types/map.md) | `MAP` |
Массивы могут быть вложенными и иметь в качестве аргумента значение типа `Nullable`.
Массивы могут быть вложенными и иметь в качестве аргумента значение типа `Nullable`. Типы `Tuple` и `Map` также могут быть вложенными.
ClickHouse поддерживает настраиваемую точность для формата `Decimal`. При выполнении запроса `INSERT` ClickHouse обрабатывает тип данных Parquet `DECIMAL` как `Decimal128`.
@ -1218,12 +1220,17 @@ $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Parquet" > {some_
| `DOUBLE` | [Float64](../sql-reference/data-types/float.md) | `FLOAT64` |
| `DATE32` | [Date](../sql-reference/data-types/date.md) | `UINT16` |
| `DATE64`, `TIMESTAMP` | [DateTime](../sql-reference/data-types/datetime.md) | `UINT32` |
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `UTF8` |
| `STRING`, `BINARY` | [FixedString](../sql-reference/data-types/fixedstring.md) | `UTF8` |
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` |
| `STRING`, `BINARY` | [FixedString](../sql-reference/data-types/fixedstring.md) | `BINARY` |
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` |
| `DECIMAL256` | [Decimal256](../sql-reference/data-types/decimal.md)| `DECIMAL256` |
| `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` |
| `STRUCT` | [Tuple](../sql-reference/data-types/tuple.md) | `STRUCT` |
| `MAP` | [Map](../sql-reference/data-types/map.md) | `MAP` |
Массивы могут быть вложенными и иметь в качестве аргумента значение типа `Nullable`.
Массивы могут быть вложенными и иметь в качестве аргумента значение типа `Nullable`. Типы `Tuple` и `Map` также могут быть вложенными.
Тип `DICTIONARY` поддерживается для запросов `INSERT`. Для запросов `SELECT` есть настройка [output_format_arrow_low_cardinality_as_dictionary](../operations/settings/settings.md#output-format-arrow-low-cardinality-as-dictionary), которая позволяет выводить тип [LowCardinality](../sql-reference/data-types/lowcardinality.md) как `DICTIONARY`.
ClickHouse поддерживает настраиваемую точность для формата `Decimal`. При выполнении запроса `INSERT` ClickHouse обрабатывает тип данных Arrow `DECIMAL` как `Decimal128`.
@ -1276,8 +1283,10 @@ $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Arrow" > {filenam
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` |
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` |
| `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` |
| `STRUCT` | [Tuple](../sql-reference/data-types/tuple.md) | `STRUCT` |
| `MAP` | [Map](../sql-reference/data-types/map.md) | `MAP` |
Массивы могут быть вложенными и иметь в качестве аргумента значение типа `Nullable`.
Массивы могут быть вложенными и иметь в качестве аргумента значение типа `Nullable`. Типы `Tuple` и `Map` также могут быть вложенными.
ClickHouse поддерживает настраиваемую точность для формата `Decimal`. При выполнении запроса `INSERT` ClickHouse обрабатывает тип данных ORC `DECIMAL` как `Decimal128`.

View File

@ -2986,6 +2986,53 @@ SELECT
FROM fuse_tbl
```
## allow_experimental_database_replicated {#allow_experimental_database_replicated}
Позволяет создавать базы данных с движком [Replicated](../../engines/database-engines/replicated.md).
Возможные значения:
- 0 — Disabled.
- 1 — Enabled.
Значение по умолчанию: `0`.
## database_replicated_initial_query_timeout_sec {#database_replicated_initial_query_timeout_sec}
Устанавливает, как долго начальный DDL-запрос должен ждать, пока реплицированная база данных прецессирует предыдущие записи очереди DDL в секундах.
Возможные значения:
- Положительное целое число.
- 0 — Не ограничено.
Значение по умолчанию: `300`.
## distributed_ddl_task_timeout {#distributed_ddl_task_timeout}
Устанавливает тайм-аут для ответов на DDL-запросы от всех хостов в кластере. Если DDL-запрос не был выполнен на всех хостах, ответ будет содержать ошибку тайм-аута, и запрос будет выполнен в асинхронном режиме.
Возможные значения:
- Положительное целое число.
- 0 — Асинхронный режим.
- Отрицательное число — бесконечный тайм-аут.
Значение по умолчанию: `180`.
## distributed_ddl_output_mode {#distributed_ddl_output_mode}
Задает формат результата распределенного DDL-запроса.
Возможные значения:
- `throw` — возвращает набор результатов со статусом выполнения запросов для всех хостов, где завершен запрос. Если запрос не выполнился на некоторых хостах, то будет выброшено исключение. Если запрос еще не закончен на некоторых хостах и таймаут [distributed_ddl_task_timeout](#distributed_ddl_task_timeout) превышен, то выбрасывается исключение `TIMEOUT_EXCEEDED`.
- `none` — идентично `throw`, но распределенный DDL-запрос не возвращает набор результатов.
- `null_status_on_timeout` — возвращает `NULL` в качестве статуса выполнения в некоторых строках набора результатов вместо выбрасывания `TIMEOUT_EXCEEDED`, если запрос не закончен на соответствующих хостах.
- `never_throw` — не выбрасывает исключение и `TIMEOUT_EXCEEDED`, если запрос не удался на некоторых хостах.
Значение по умолчанию: `throw`.
## flatten_nested {#flatten-nested}
Устанавливает формат данных у [вложенных](../../sql-reference/data-types/nested-data-structures/nested.md) столбцов.
@ -3066,3 +3113,14 @@ SETTINGS index_granularity = 8192 │
**Использование**
Если установлено значение `0`, то табличная функция не делает Nullable столбцы, а вместо NULL выставляет значения по умолчанию для скалярного типа. Это также применимо для значений NULL внутри массивов.
## output_format_arrow_low_cardinality_as_dictionary {#output-format-arrow-low-cardinality-as-dictionary}
Позволяет конвертировать тип [LowCardinality](../../sql-reference/data-types/lowcardinality.md) в тип `DICTIONARY` формата [Arrow](../../interfaces/formats.md#data-format-arrow) для запросов `SELECT`.
Возможные значения:
- 0 — тип `LowCardinality` не конвертируется в тип `DICTIONARY`.
- 1 — тип `LowCardinality` конвертируется в тип `DICTIONARY`.
Значение по умолчанию: `0`.

View File

@ -15,7 +15,7 @@ LowCardinality(data_type)
**Параметры**
- `data_type` — [String](string.md), [FixedString](fixedstring.md), [Date](date.md), [DateTime](datetime.md) и числа за исключением типа [Decimal](decimal.md). `LowCardinality` неэффективен для некоторых типов данных, см. описание настройки [allow_suspicious_low_cardinality_types](../../operations/settings/settings.md#allow_suspicious_low_cardinality_types).
- `data_type` — [String](string.md), [FixedString](fixedstring.md), [Date](date.md), [DateTime](datetime.md) и числа за исключением типа [Decimal](decimal.md). `LowCardinality` неэффективен для некоторых типов данных, см. описание настройки [allow_suspicious_low_cardinality_types](../../operations/settings/settings.md#allow_suspicious_low_cardinality_types).
## Описание {#lowcardinality-dscr}
@ -23,11 +23,11 @@ LowCardinality(data_type)
Эффективность использования типа данных `LowCarditality` зависит от разнообразия данных. Если словарь содержит менее 10 000 различных значений, ClickHouse в основном показывает более высокую эффективность чтения и хранения данных. Если же словарь содержит более 100 000 различных значений, ClickHouse может работать хуже, чем при использовании обычных типов данных.
При работе со строками, использование `LowCardinality` вместо [Enum](enum.md) обеспечивает большую гибкость в использовании и часто показывает такую же или более высокую эффективность.
При работе со строками использование `LowCardinality` вместо [Enum](enum.md) обеспечивает большую гибкость в использовании и часто показывает такую же или более высокую эффективность.
## Пример
Создать таблицу со столбцами типа `LowCardinality`:
Создание таблицы со столбцами типа `LowCardinality`:
```sql
CREATE TABLE lc_t
@ -43,18 +43,18 @@ ORDER BY id
Настройки:
- [low_cardinality_max_dictionary_size](../../operations/settings/settings.md#low_cardinality_max_dictionary_size)
- [low_cardinality_use_single_dictionary_for_part](../../operations/settings/settings.md#low_cardinality_use_single_dictionary_for_part)
- [low_cardinality_allow_in_native_format](../../operations/settings/settings.md#low_cardinality_allow_in_native_format)
- [allow_suspicious_low_cardinality_types](../../operations/settings/settings.md#allow_suspicious_low_cardinality_types)
- [low_cardinality_max_dictionary_size](../../operations/settings/settings.md#low_cardinality_max_dictionary_size)
- [low_cardinality_use_single_dictionary_for_part](../../operations/settings/settings.md#low_cardinality_use_single_dictionary_for_part)
- [low_cardinality_allow_in_native_format](../../operations/settings/settings.md#low_cardinality_allow_in_native_format)
- [allow_suspicious_low_cardinality_types](../../operations/settings/settings.md#allow_suspicious_low_cardinality_types)
- [output_format_arrow_low_cardinality_as_dictionary](../../operations/settings/settings.md#output-format-arrow-low-cardinality-as-dictionary)
Функции:
- [toLowCardinality](../functions/type-conversion-functions.md#tolowcardinality)
- [toLowCardinality](../functions/type-conversion-functions.md#tolowcardinality)
## Смотрите также
- [A Magical Mystery Tour of the LowCardinality Data Type](https://www.altinity.com/blog/2019/3/27/low-cardinality).
- [Reducing Clickhouse Storage Cost with the Low Cardinality Type Lessons from an Instana Engineer](https://www.instana.com/blog/reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer/).
- [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/yandex/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf).
- [A Magical Mystery Tour of the LowCardinality Data Type](https://www.altinity.com/blog/2019/3/27/low-cardinality).
- [Reducing Clickhouse Storage Cost with the Low Cardinality Type Lessons from an Instana Engineer](https://www.instana.com/blog/reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer/).
- [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/yandex/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf).

View File

@ -462,27 +462,29 @@ SELECT reinterpret(toInt8(-1), 'UInt8') as int_to_uint,
## CAST(x, T) {#type_conversion_function-cast}
Преобразует входное значение `x` в указанный тип данных `T`. В отличии от функции `reinterpret` использует внешнее представление значения `x`.
Поддерживается также синтаксис `CAST(x AS t)`.
!!! warning "Предупреждение"
Если значение `x` не может быть преобразовано к типу `T`, возникает переполнение. Например, `CAST(-1, 'UInt8')` возвращает 255.
Преобразует входное значение к указанному типу данных. В отличие от функции [reinterpret](#type_conversion_function-reinterpret) `CAST` пытается представить то же самое значение в новом типе данных. Если преобразование невозможно, то возникает исключение.
Поддерживается несколько вариантов синтаксиса.
**Синтаксис**
``` sql
CAST(x, T)
CAST(x AS t)
x::t
```
**Аргументы**
- `x` — любой тип данных.
- `T` — конечный тип данных. [String](../../sql-reference/data-types/string.md).
- `x` — значение, которое нужно преобразовать. Может быть любого типа.
- `T` — имя типа данных. [String](../../sql-reference/data-types/string.md).
- `t` — тип данных.
**Возвращаемое значение**
- Значение конечного типа данных.
- Преобразованное значение.
!!! note "Примечание"
Если входное значение выходит за границы нового типа, то результат переполняется. Например, `CAST(-1, 'UInt8')` возвращает `255`.
**Примеры**
@ -491,16 +493,16 @@ CAST(x, T)
```sql
SELECT
CAST(toInt8(-1), 'UInt8') AS cast_int_to_uint,
CAST(toInt8(1), 'Float32') AS cast_int_to_float,
CAST('1', 'UInt32') AS cast_string_to_int
CAST(1.5 AS Decimal(3,2)) AS cast_float_to_decimal,
'1'::Int32 AS cast_string_to_int;
```
Результат:
```
┌─cast_int_to_uint─┬─cast_int_to_float─┬─cast_string_to_int─┐
│ 255 │ 1 │ 1 │
└──────────────────┴───────────────────┴────────────────────┘
┌─cast_int_to_uint─┬─cast_float_to_decimal─┬─cast_string_to_int─┐
│ 255 │ 1.50 │ 1 │
└──────────────────┴───────────────────────┴────────────────────┘
```
Запрос:
@ -524,7 +526,7 @@ SELECT
Преобразование в FixedString(N) работает только для аргументов типа [String](../../sql-reference/data-types/string.md) или [FixedString](../../sql-reference/data-types/fixedstring.md).
Поддерживается преобразование к типу [Nullable](../../sql-reference/functions/type-conversion-functions.md) и обратно.
Поддерживается преобразование к типу [Nullable](../../sql-reference/data-types/nullable.md) и обратно.
**Примеры**

View File

@ -6,12 +6,12 @@ toc_title: Atomic
# Atomic {#atomic}
It is supports non-blocking `DROP` and `RENAME TABLE` queries and atomic `EXCHANGE TABLES t1 AND t2` queries. Atomic database engine is used by default.
它支持非阻塞 DROP 和 RENAME TABLE 查询以及原子 EXCHANGE TABLES t1 AND t2 查询。默认情况下使用Atomic数据库引擎。
## Creating a Database {#creating-a-database}
## 创建数据库 {#creating-a-database}
```sql
CREATE DATABASE test ENGINE = Atomic;
```
[Original article](https://clickhouse.tech/docs/en/engines/database_engines/atomic/) <!--hide-->
[原文](https://clickhouse.tech/docs/en/engines/database_engines/atomic/) <!--hide-->

View File

@ -1,4 +1,4 @@
# 折叠树 {#table_engine-collapsingmergetree}
# CollapsingMergeTree {#table_engine-collapsingmergetree}
该引擎继承于 [MergeTree](mergetree.md),并在数据块合并算法中添加了折叠行的逻辑。
@ -203,4 +203,4 @@ SELECT * FROM UAct FINAL
这种查询数据的方法是非常低效的。不要在大表中使用它。
[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/collapsingmergetree/) <!--hide-->
[原文](https://clickhouse.tech/docs/en/operations/table_engines/collapsingmergetree/) <!--hide-->

View File

@ -3,7 +3,7 @@ toc_priority: 37
toc_title: "版本折叠MergeTree"
---
# 版本折叠MergeTree {#versionedcollapsingmergetree}
# VersionedCollapsingMergeTree {#versionedcollapsingmergetree}
这个引擎:

View File

@ -5,6 +5,6 @@ toc_title: 原生接口(TCP)
# 原生接口TCP{#native-interface-tcp}
原生接口用于[命令行客户端](cli.md)用于分布式查询处理期间的服务器间通信以及其他C++程序。可惜的是,原生的ClickHouse协议还没有正式的规范但它可以从ClickHouse[源代码](https://github.com/ClickHouse/ClickHouse/tree/master/src/Client)通过拦截和分析TCP流量进行反向工程。
原生接口协议用于[命令行客户端](cli.md)用于分布式查询处理期间的服务器间通信以及其他C++ 程序。不幸的是,原生ClickHouse协议还没有正式的规范但它可以从ClickHouse源代码[从这里开始](https://github.com/ClickHouse/ClickHouse/tree/master/src/Client)或通过拦截和分析TCP流量进行逆向工程。
[来源文章](https://clickhouse.tech/docs/zh/interfaces/tcp/) <!--hide-->
[原文](https://clickhouse.tech/docs/en/interfaces/tcp/) <!--hide-->

View File

@ -57,9 +57,9 @@ ClickHouse Web 界面 [Tabix](https://github.com/tabixio/tabix).
- 表格预览。
- 自动完成。
### ツ环板-ョツ嘉ッツ偲 {#clickhouse-cli}
### clickhouse-cli {#clickhouse-cli}
[ツ环板-ョツ嘉ッツ偲](https://github.com/hatarist/clickhouse-cli) 是ClickHouse的替代命令行客户端用Python 3编写。
[clickhouse-cli](https://github.com/hatarist/clickhouse-cli) 是ClickHouse的替代命令行客户端用Python 3编写。
特征:
@ -68,15 +68,15 @@ ClickHouse Web 界面 [Tabix](https://github.com/tabixio/tabix).
- 寻呼机支持数据输出。
- 自定义PostgreSQL类命令。
### ツ暗ェツ氾环催ツ団ツ法ツ人 {#clickhouse-flamegraph}
### clickhouse-flamegraph {#clickhouse-flamegraph}
[clickhouse-flamegraph](https://github.com/Slach/clickhouse-flamegraph) 是一个可视化的专业工具`system.trace_log`如[flamegraph](http://www.brendangregg.com/flamegraphs.html).
## 商业 {#shang-ye}
### ツ环板Softwareョツ嘉ッ {#holistics-software}
### Holistics {#holistics-software}
[整体学](https://www.holistics.io/) 在2019年被Gartner FrontRunners列为可用性最高排名第二的商业智能工具之一。 Holistics是一个基于SQL的全栈数据平台和商业智能工具用于设置您的分析流程。
[Holistics](https://www.holistics.io/) 在2019年被Gartner FrontRunners列为可用性最高排名第二的商业智能工具之一。 Holistics是一个基于SQL的全栈数据平台和商业智能工具用于设置您的分析流程。
特征:

View File

@ -5,9 +5,21 @@ toc_title: "操作"
# 操作 {#operations}
Clickhouse运维手册主要包含下面几部分
ClickHouse操作手册由以下主要部分组成
- 安装要求
- [安装要求](../operations/requirements.md)
- [监控](../operations/monitoring.md)
- [故障排除](../operations/troubleshooting.md)
- [使用建议](../operations/tips.md)
- [更新程序](../operations/update.md)
- [访问权限](../operations/access-rights.md)
- [数据备份](../operations/backup.md)
- [配置文件](../operations/configuration-files.md)
- [配额](../operations/quotas.md)
- [系统表](../operations/system-tables/index.md)
- [服务器配置参数](../operations/server-configuration-parameters/index.md)
- [如何用ClickHouse测试你的硬件](../operations/performance-test.md)
- [设置](../operations/settings/index.md)
- [实用工具](../operations/utilities/index.md)
[原始文章](https://clickhouse.tech/docs/en/operations/) <!--hide-->
[原文](https://clickhouse.tech/docs/en/operations/) <!--hide-->

View File

@ -26,6 +26,7 @@
#include <boost/algorithm/string/replace.hpp>
#include <Poco/String.h>
#include <Poco/Util/Application.h>
#include <Columns/ColumnString.h>
#include <common/find_symbols.h>
#include <common/LineReader.h>
#include <Common/ClickHouseRevision.h>
@ -301,26 +302,9 @@ private:
}
catch (const Exception & e)
{
bool print_stack_trace = config().getBool("stacktrace", false);
bool print_stack_trace = config().getBool("stacktrace", false) && e.code() != ErrorCodes::NETWORK_ERROR;
std::string text = e.displayText();
/** If exception is received from server, then stack trace is embedded in message.
* If exception is thrown on client, then stack trace is in separate field.
*/
auto embedded_stack_trace_pos = text.find("Stack trace");
if (std::string::npos != embedded_stack_trace_pos && !print_stack_trace)
text.resize(embedded_stack_trace_pos);
std::cerr << "Code: " << e.code() << ". " << text << std::endl << std::endl;
/// Don't print the stack trace on the client if it was logged on the server.
/// Also don't print the stack trace in case of network errors.
if (print_stack_trace && e.code() != ErrorCodes::NETWORK_ERROR && std::string::npos == embedded_stack_trace_pos)
{
std::cerr << "Stack trace:" << std::endl << e.getStackTraceString();
}
std::cerr << getExceptionMessage(e, print_stack_trace, true) << std::endl << std::endl;
/// If exception code isn't zero, we should return non-zero return code anyway.
return e.code() ? e.code() : -1;
@ -487,6 +471,52 @@ private:
}
#endif
/// Make query to get all server warnings
std::vector<String> loadWarningMessages()
{
std::vector<String> messages;
connection->sendQuery(connection_parameters.timeouts, "SELECT message FROM system.warnings", "" /* query_id */, QueryProcessingStage::Complete);
while (true)
{
Packet packet = connection->receivePacket();
switch (packet.type)
{
case Protocol::Server::Data:
if (packet.block)
{
const ColumnString & column = typeid_cast<const ColumnString &>(*packet.block.getByPosition(0).column);
size_t rows = packet.block.rows();
for (size_t i = 0; i < rows; ++i)
messages.emplace_back(column.getDataAt(i).toString());
}
continue;
case Protocol::Server::Progress:
continue;
case Protocol::Server::ProfileInfo:
continue;
case Protocol::Server::Totals:
continue;
case Protocol::Server::Extremes:
continue;
case Protocol::Server::Log:
continue;
case Protocol::Server::Exception:
packet.exception->rethrow();
return messages;
case Protocol::Server::EndOfStream:
return messages;
default:
throw Exception(ErrorCodes::UNKNOWN_PACKET_FROM_SERVER, "Unknown packet {} from server {}",
packet.type, connection->getDescription());
}
}
}
int mainImpl()
{
UseSSL use_ssl;
@ -565,6 +595,26 @@ private:
suggest->load(connection_parameters, config().getInt("suggestion_limit"));
}
/// Load Warnings at the beginning of connection
if (!config().has("no-warnings"))
{
try
{
std::vector<String> messages = loadWarningMessages();
if (!messages.empty())
{
std::cout << "Warnings:" << std::endl;
for (const auto & message : messages)
std::cout << "* " << message << std::endl;
std::cout << std::endl;
}
}
catch (...)
{
/// Ignore exception
}
}
/// Load command history if present.
if (config().has("history_file"))
history_file = config().getString("history_file");
@ -633,17 +683,10 @@ private:
}
catch (const Exception & e)
{
// We don't need to handle the test hints in the interactive
// mode.
std::cerr << std::endl
<< "Exception on client:" << std::endl
<< "Code: " << e.code() << ". " << e.displayText() << std::endl;
if (config().getBool("stacktrace", false))
std::cerr << "Stack trace:" << std::endl << e.getStackTraceString() << std::endl;
std::cerr << std::endl;
/// We don't need to handle the test hints in the interactive mode.
bool print_stack_trace = config().getBool("stacktrace", false);
std::cerr << "Exception on client:" << std::endl << getExceptionMessage(e, print_stack_trace, true) << std::endl << std::endl;
client_exception = std::make_unique<Exception>(e);
}
@ -940,18 +983,11 @@ private:
{
if (server_exception)
{
std::string text = server_exception->displayText();
auto embedded_stack_trace_pos = text.find("Stack trace");
if (std::string::npos != embedded_stack_trace_pos && !config().getBool("stacktrace", false))
{
text.resize(embedded_stack_trace_pos);
}
bool print_stack_trace = config().getBool("stacktrace", false);
std::cerr << "Received exception from server (version " << server_version << "):" << std::endl
<< "Code: " << server_exception->code() << ". " << text << std::endl;
<< getExceptionMessage(*server_exception, print_stack_trace, true) << std::endl;
if (is_interactive)
{
std::cerr << std::endl;
}
}
if (client_exception)
@ -1410,8 +1446,7 @@ private:
{
// Just report it, we'll terminate below.
fmt::print(stderr,
"Error while reconnecting to the server: Code: {}: {}\n",
getCurrentExceptionCode(),
"Error while reconnecting to the server: {}\n",
getCurrentExceptionMessage(true));
assert(!connection->isConnected());
@ -2529,6 +2564,7 @@ public:
("opentelemetry-traceparent", po::value<std::string>(), "OpenTelemetry traceparent header as described by W3C Trace Context recommendation")
("opentelemetry-tracestate", po::value<std::string>(), "OpenTelemetry tracestate header as described by W3C Trace Context recommendation")
("history_file", po::value<std::string>(), "path to history file")
("no-warnings", "disable warnings when client connects to server")
;
Settings cmd_settings;
@ -2596,8 +2632,7 @@ public:
}
catch (const Exception & e)
{
std::string text = e.displayText();
std::cerr << "Code: " << e.code() << ". " << text << std::endl;
std::cerr << getExceptionMessage(e, false) << std::endl;
std::cerr << "Table №" << i << std::endl << std::endl;
/// Avoid the case when error exit code can possibly overflow to normal (zero).
auto exit_code = e.code() % 256;
@ -2689,6 +2724,8 @@ public:
config().setBool("highlight", options["highlight"].as<bool>());
if (options.count("history_file"))
config().setString("history_file", options["history_file"].as<std::string>());
if (options.count("no-warnings"))
config().setBool("no-warnings", true);
if ((query_fuzzer_runs = options["query-fuzzer-runs"].as<int>()))
{
@ -2740,8 +2777,7 @@ int mainEntryClickHouseClient(int argc, char ** argv)
}
catch (const DB::Exception & e)
{
std::string text = e.displayText();
std::cerr << "Code: " << e.code() << ". " << text << std::endl;
std::cerr << DB::getExceptionMessage(e, false) << std::endl;
return 1;
}
catch (...)

View File

@ -433,7 +433,7 @@ void LocalServer::processQueries()
try
{
executeQuery(read_buf, write_buf, /* allow_into_outfile = */ true, context, {}, finalize_progress);
executeQuery(read_buf, write_buf, /* allow_into_outfile = */ true, context, {}, {}, finalize_progress);
}
catch (...)
{

View File

@ -613,10 +613,16 @@
}
/// Huge JS libraries should be loaded only if needed.
function loadJS(src) {
function loadJS(src, integrity) {
return new Promise((resolve, reject) => {
const script = document.createElement('script');
script.src = src;
if (integrity) {
script.crossOrigin = 'anonymous';
script.integrity = integrity;
} else {
console.warn('no integrity for', src)
}
script.addEventListener('load', function() { resolve(true); });
document.head.appendChild(script);
});
@ -627,10 +633,14 @@
if (load_dagre_promise) { return load_dagre_promise; }
load_dagre_promise = Promise.all([
loadJS('https://dagrejs.github.io/project/dagre/v0.8.5/dagre.min.js'),
loadJS('https://dagrejs.github.io/project/graphlib-dot/v0.6.4/graphlib-dot.min.js'),
loadJS('https://dagrejs.github.io/project/dagre-d3/v0.6.4/dagre-d3.min.js'),
loadJS('https://cdn.jsdelivr.net/npm/d3@7.0.0'),
loadJS('https://dagrejs.github.io/project/dagre/v0.8.5/dagre.min.js',
'sha384-2IH3T69EIKYC4c+RXZifZRvaH5SRUdacJW7j6HtE5rQbvLhKKdawxq6vpIzJ7j9M'),
loadJS('https://dagrejs.github.io/project/graphlib-dot/v0.6.4/graphlib-dot.min.js',
'sha384-Q7oatU+b+y0oTkSoiRH9wTLH6sROySROCILZso/AbMMm9uKeq++r8ujD4l4f+CWj'),
loadJS('https://dagrejs.github.io/project/dagre-d3/v0.6.4/dagre-d3.min.js',
'sha384-9N1ty7Yz7VKL3aJbOk+8ParYNW8G5W+MvxEfFL9G7CRYPmkHI9gJqyAfSI/8190W'),
loadJS('https://cdn.jsdelivr.net/npm/d3@7.0.0',
'sha384-S+Kf0r6YzKIhKA8d1k2/xtYv+j0xYUU3E7+5YLrcPVab6hBh/r1J6cq90OXhw80u'),
]);
return load_dagre_promise;

View File

@ -64,7 +64,12 @@ public:
std::lock_guard lock{mutex};
auto x = cache.get(params);
if (x)
return *x;
{
if ((*x)->getUser())
return *x;
/// No user, probably the user has been dropped while it was in the cache.
cache.remove(params);
}
auto res = std::shared_ptr<ContextAccess>(new ContextAccess(manager, params));
cache.add(params, res);
return res;

View File

@ -655,7 +655,7 @@ private:
for (auto & [lhs_childname, lhs_child] : *children)
{
if (!rhs.tryGetChild(lhs_childname))
lhs_child.flags |= rhs.flags & lhs_child.getAllGrantableFlags();
lhs_child.addGrantsRec(rhs.flags);
}
}
}
@ -673,7 +673,7 @@ private:
for (auto & [lhs_childname, lhs_child] : *children)
{
if (!rhs.tryGetChild(lhs_childname))
lhs_child.flags &= rhs.flags;
lhs_child.removeGrantsRec(~rhs.flags);
}
}
}
@ -1041,17 +1041,15 @@ void AccessRights::makeIntersection(const AccessRights & other)
auto helper = [](std::unique_ptr<Node> & root_node, const std::unique_ptr<Node> & other_root_node)
{
if (!root_node)
return;
if (!other_root_node)
{
if (other_root_node)
root_node = std::make_unique<Node>(*other_root_node);
root_node = nullptr;
return;
}
if (other_root_node)
{
root_node->makeIntersection(*other_root_node);
if (!root_node->flags && !root_node->children)
root_node = nullptr;
}
root_node->makeIntersection(*other_root_node);
if (!root_node->flags && !root_node->children)
root_node = nullptr;
};
helper(root, other.root);
helper(root_with_grant_option, other.root_with_grant_option);

View File

@ -163,11 +163,10 @@ void ContextAccess::setUser(const UserPtr & user_) const
if (!user)
{
/// User has been dropped.
auto nothing_granted = std::make_shared<AccessRights>();
access = nothing_granted;
access_with_implicit = nothing_granted;
subscription_for_user_change = {};
subscription_for_roles_changes = {};
access = nullptr;
access_with_implicit = nullptr;
enabled_roles = nullptr;
roles_info = nullptr;
enabled_row_policies = nullptr;
@ -252,32 +251,45 @@ String ContextAccess::getUserName() const
std::shared_ptr<const EnabledRolesInfo> ContextAccess::getRolesInfo() const
{
std::lock_guard lock{mutex};
return roles_info;
if (roles_info)
return roles_info;
static const auto no_roles = std::make_shared<EnabledRolesInfo>();
return no_roles;
}
std::shared_ptr<const EnabledRowPolicies> ContextAccess::getEnabledRowPolicies() const
{
std::lock_guard lock{mutex};
return enabled_row_policies;
if (enabled_row_policies)
return enabled_row_policies;
static const auto no_row_policies = std::make_shared<EnabledRowPolicies>();
return no_row_policies;
}
ASTPtr ContextAccess::getRowPolicyCondition(const String & database, const String & table_name, RowPolicy::ConditionType index, const ASTPtr & extra_condition) const
{
std::lock_guard lock{mutex};
return enabled_row_policies ? enabled_row_policies->getCondition(database, table_name, index, extra_condition) : nullptr;
if (enabled_row_policies)
return enabled_row_policies->getCondition(database, table_name, index, extra_condition);
return nullptr;
}
std::shared_ptr<const EnabledQuota> ContextAccess::getQuota() const
{
std::lock_guard lock{mutex};
return enabled_quota;
if (enabled_quota)
return enabled_quota;
static const auto unlimited_quota = EnabledQuota::getUnlimitedQuota();
return unlimited_quota;
}
std::optional<QuotaUsage> ContextAccess::getQuotaUsage() const
{
std::lock_guard lock{mutex};
return enabled_quota ? enabled_quota->getUsage() : std::optional<QuotaUsage>{};
if (enabled_quota)
return enabled_quota->getUsage();
return {};
}
@ -288,7 +300,7 @@ std::shared_ptr<const ContextAccess> ContextAccess::getFullAccess()
auto full_access = std::shared_ptr<ContextAccess>(new ContextAccess);
full_access->is_full_access = true;
full_access->access = std::make_shared<AccessRights>(AccessRights::getFullAccess());
full_access->enabled_quota = EnabledQuota::getUnlimitedQuota();
full_access->access_with_implicit = std::make_shared<AccessRights>(addImplicitAccessRights(*full_access->access));
return full_access;
}();
return res;
@ -298,28 +310,40 @@ std::shared_ptr<const ContextAccess> ContextAccess::getFullAccess()
std::shared_ptr<const Settings> ContextAccess::getDefaultSettings() const
{
std::lock_guard lock{mutex};
return enabled_settings ? enabled_settings->getSettings() : nullptr;
if (enabled_settings)
return enabled_settings->getSettings();
static const auto everything_by_default = std::make_shared<Settings>();
return everything_by_default;
}
std::shared_ptr<const SettingsConstraints> ContextAccess::getSettingsConstraints() const
{
std::lock_guard lock{mutex};
return enabled_settings ? enabled_settings->getConstraints() : nullptr;
if (enabled_settings)
return enabled_settings->getConstraints();
static const auto no_constraints = std::make_shared<SettingsConstraints>();
return no_constraints;
}
std::shared_ptr<const AccessRights> ContextAccess::getAccessRights() const
{
std::lock_guard lock{mutex};
return access;
if (access)
return access;
static const auto nothing_granted = std::make_shared<AccessRights>();
return nothing_granted;
}
std::shared_ptr<const AccessRights> ContextAccess::getAccessRightsWithImplicit() const
{
std::lock_guard lock{mutex};
return access_with_implicit;
if (access_with_implicit)
return access_with_implicit;
static const auto nothing_granted = std::make_shared<AccessRights>();
return nothing_granted;
}
@ -551,7 +575,7 @@ bool ContextAccess::checkAdminOptionImplHelper(const Container & role_ids, const
for (auto it = std::begin(role_ids); it != std::end(role_ids); ++it, ++i)
{
const UUID & role_id = *it;
if (info && info->enabled_roles_with_admin_option.count(role_id))
if (info->enabled_roles_with_admin_option.count(role_id))
continue;
if (throw_if_denied)
@ -560,7 +584,7 @@ bool ContextAccess::checkAdminOptionImplHelper(const Container & role_ids, const
if (!role_name)
role_name = "ID {" + toString(role_id) + "}";
if (info && info->enabled_roles.count(role_id))
if (info->enabled_roles.count(role_id))
show_error("Not enough privileges. "
"Role " + backQuote(*role_name) + " is granted, but without ADMIN option. "
"To execute this query it's necessary to have the role " + backQuoteIfNeed(*role_name) + " granted with ADMIN option.",

View File

@ -71,11 +71,9 @@ public:
String getUserName() const;
/// Returns information about current and enabled roles.
/// The function can return nullptr.
std::shared_ptr<const EnabledRolesInfo> getRolesInfo() const;
/// Returns information about enabled row policies.
/// The function can return nullptr.
std::shared_ptr<const EnabledRowPolicies> getEnabledRowPolicies() const;
/// Returns the row policy filter for a specified table.
@ -83,16 +81,13 @@ public:
ASTPtr getRowPolicyCondition(const String & database, const String & table_name, RowPolicy::ConditionType index, const ASTPtr & extra_condition = nullptr) const;
/// Returns the quota to track resource consumption.
/// The function returns nullptr if no tracking or limitation is needed.
std::shared_ptr<const EnabledQuota> getQuota() const;
std::optional<QuotaUsage> getQuotaUsage() const;
/// Returns the default settings, i.e. the settings to apply on user's login.
/// The function returns nullptr if it's no need to apply settings.
std::shared_ptr<const Settings> getDefaultSettings() const;
/// Returns the settings' constraints.
/// The function returns nullptr if there are no constraints.
std::shared_ptr<const SettingsConstraints> getSettingsConstraints() const;
/// Returns the current access rights.

View File

@ -12,8 +12,11 @@ size_t EnabledRowPolicies::Hash::operator()(const MixedConditionKey & key) const
}
EnabledRowPolicies::EnabledRowPolicies(const Params & params_)
: params(params_)
EnabledRowPolicies::EnabledRowPolicies() : params()
{
}
EnabledRowPolicies::EnabledRowPolicies(const Params & params_) : params(params_)
{
}

View File

@ -32,6 +32,7 @@ public:
friend bool operator >=(const Params & lhs, const Params & rhs) { return !(lhs < rhs); }
};
EnabledRowPolicies();
~EnabledRowPolicies();
using ConditionType = RowPolicy::ConditionType;

View File

@ -18,6 +18,8 @@ namespace ErrorCodes
}
SettingsConstraints::SettingsConstraints() = default;
SettingsConstraints::SettingsConstraints(const AccessControlManager & manager_) : manager(&manager_)
{
}
@ -199,10 +201,13 @@ bool SettingsConstraints::checkImpl(const Settings & current_settings, SettingCh
}
};
if (reaction == THROW_ON_VIOLATION)
manager->checkSettingNameIsAllowed(setting_name);
else if (!manager->isSettingNameAllowed(setting_name))
return false;
if (manager)
{
if (reaction == THROW_ON_VIOLATION)
manager->checkSettingNameIsAllowed(setting_name);
else if (!manager->isSettingNameAllowed(setting_name))
return false;
}
Field current_value, new_value;
if (current_settings.tryGet(setting_name, current_value))

View File

@ -51,6 +51,7 @@ class AccessControlManager;
class SettingsConstraints
{
public:
SettingsConstraints();
SettingsConstraints(const AccessControlManager & manager_);
SettingsConstraints(const SettingsConstraints & src);
SettingsConstraints & operator =(const SettingsConstraints & src);

View File

@ -0,0 +1,94 @@
#include <gtest/gtest.h>
#include <Access/AccessRights.h>
using namespace DB;
TEST(AccessRights, Union)
{
AccessRights lhs, rhs;
lhs.grant(AccessType::CREATE_TABLE, "db1", "tb1");
rhs.grant(AccessType::SELECT, "db2");
lhs.makeUnion(rhs);
ASSERT_EQ(lhs.toString(), "GRANT CREATE TABLE ON db1.tb1, GRANT SELECT ON db2.*");
lhs.clear();
rhs.clear();
rhs.grant(AccessType::SELECT, "db2");
lhs.grant(AccessType::CREATE_TABLE, "db1", "tb1");
lhs.makeUnion(rhs);
ASSERT_EQ(lhs.toString(), "GRANT CREATE TABLE ON db1.tb1, GRANT SELECT ON db2.*");
lhs = {};
rhs = {};
lhs.grant(AccessType::SELECT);
rhs.grant(AccessType::SELECT, "db1", "tb1");
lhs.makeUnion(rhs);
ASSERT_EQ(lhs.toString(), "GRANT SELECT ON *.*");
lhs = {};
rhs = {};
lhs.grant(AccessType::SELECT, "db1", "tb1", Strings{"col1", "col2"});
rhs.grant(AccessType::SELECT, "db1", "tb1", Strings{"col2", "col3"});
lhs.makeUnion(rhs);
ASSERT_EQ(lhs.toString(), "GRANT SELECT(col1, col2, col3) ON db1.tb1");
lhs = {};
rhs = {};
lhs.grant(AccessType::SELECT, "db1", "tb1", Strings{"col1", "col2"});
rhs.grantWithGrantOption(AccessType::SELECT, "db1", "tb1", Strings{"col2", "col3"});
lhs.makeUnion(rhs);
ASSERT_EQ(lhs.toString(), "GRANT SELECT(col1) ON db1.tb1, GRANT SELECT(col2, col3) ON db1.tb1 WITH GRANT OPTION");
lhs = {};
rhs = {};
lhs.grant(AccessType::INSERT);
rhs.grant(AccessType::ALL, "db1");
lhs.makeUnion(rhs);
ASSERT_EQ(lhs.toString(), "GRANT INSERT ON *.*, GRANT SHOW, SELECT, ALTER, CREATE DATABASE, CREATE TABLE, CREATE VIEW, CREATE DICTIONARY, DROP, TRUNCATE, OPTIMIZE, SYSTEM MERGES, SYSTEM TTL MERGES, SYSTEM FETCHES, SYSTEM MOVES, SYSTEM SENDS, SYSTEM REPLICATION QUEUES, SYSTEM DROP REPLICA, SYSTEM SYNC REPLICA, SYSTEM RESTART REPLICA, SYSTEM RESTORE REPLICA, SYSTEM FLUSH DISTRIBUTED, dictGet ON db1.*");
}
TEST(AccessRights, Intersection)
{
AccessRights lhs, rhs;
lhs.grant(AccessType::CREATE_TABLE, "db1", "tb1");
rhs.grant(AccessType::SELECT, "db2");
lhs.makeIntersection(rhs);
ASSERT_EQ(lhs.toString(), "GRANT USAGE ON *.*");
lhs.clear();
rhs.clear();
lhs.grant(AccessType::SELECT, "db2");
rhs.grant(AccessType::CREATE_TABLE, "db1", "tb1");
lhs.makeIntersection(rhs);
ASSERT_EQ(lhs.toString(), "GRANT USAGE ON *.*");
lhs = {};
rhs = {};
lhs.grant(AccessType::SELECT);
rhs.grant(AccessType::SELECT, "db1", "tb1");
lhs.makeIntersection(rhs);
ASSERT_EQ(lhs.toString(), "GRANT SELECT ON db1.tb1");
lhs = {};
rhs = {};
lhs.grant(AccessType::SELECT, "db1", "tb1", Strings{"col1", "col2"});
rhs.grant(AccessType::SELECT, "db1", "tb1", Strings{"col2", "col3"});
lhs.makeIntersection(rhs);
ASSERT_EQ(lhs.toString(), "GRANT SELECT(col2) ON db1.tb1");
lhs = {};
rhs = {};
lhs.grant(AccessType::SELECT, "db1", "tb1", Strings{"col1", "col2"});
rhs.grantWithGrantOption(AccessType::SELECT, "db1", "tb1", Strings{"col2", "col3"});
lhs.makeIntersection(rhs);
ASSERT_EQ(lhs.toString(), "GRANT SELECT(col2) ON db1.tb1");
lhs = {};
rhs = {};
lhs.grant(AccessType::INSERT);
rhs.grant(AccessType::ALL, "db1");
lhs.makeIntersection(rhs);
ASSERT_EQ(lhs.toString(), "GRANT INSERT ON db1.*");
}

View File

@ -3,6 +3,7 @@
#include <AggregateFunctions/AggregateFunctionSequenceMatch.h>
#include <DataTypes/DataTypeDate.h>
#include <DataTypes/DataTypeDate32.h>
#include <DataTypes/DataTypeDateTime.h>
#include <common/range.h>

View File

@ -459,6 +459,8 @@ public:
explicit FieldVisitorMax(const Field & rhs_) : rhs(rhs_) {}
bool operator() (Null &) const { throw Exception("Cannot compare Nulls", ErrorCodes::LOGICAL_ERROR); }
bool operator() (NegativeInfinity &) const { throw Exception("Cannot compare -Inf", ErrorCodes::LOGICAL_ERROR); }
bool operator() (PositiveInfinity &) const { throw Exception("Cannot compare +Inf", ErrorCodes::LOGICAL_ERROR); }
bool operator() (AggregateFunctionStateData &) const { throw Exception("Cannot compare AggregateFunctionStates", ErrorCodes::LOGICAL_ERROR); }
bool operator() (Array & x) const { return compareImpl<Array>(x); }
@ -494,6 +496,8 @@ public:
explicit FieldVisitorMin(const Field & rhs_) : rhs(rhs_) {}
bool operator() (Null &) const { throw Exception("Cannot compare Nulls", ErrorCodes::LOGICAL_ERROR); }
bool operator() (NegativeInfinity &) const { throw Exception("Cannot compare -Inf", ErrorCodes::LOGICAL_ERROR); }
bool operator() (PositiveInfinity &) const { throw Exception("Cannot compare +Inf", ErrorCodes::LOGICAL_ERROR); }
bool operator() (AggregateFunctionStateData &) const { throw Exception("Cannot sum AggregateFunctionStates", ErrorCodes::LOGICAL_ERROR); }
bool operator() (Array & x) const { return compareImpl<Array>(x); }

View File

@ -4,6 +4,7 @@
#include <AggregateFunctions/FactoryHelpers.h>
#include <DataTypes/DataTypeDate.h>
#include <DataTypes/DataTypeDate32.h>
#include <DataTypes/DataTypeDateTime.h>
#include <DataTypes/DataTypeTuple.h>
#include <DataTypes/DataTypeUUID.h>
@ -49,6 +50,8 @@ AggregateFunctionPtr createAggregateFunctionUniq(const std::string & name, const
return res;
else if (which.isDate())
return std::make_shared<AggregateFunctionUniq<DataTypeDate::FieldType, Data>>(argument_types);
else if (which.isDate32())
return std::make_shared<AggregateFunctionUniq<DataTypeDate32::FieldType, Data>>(argument_types);
else if (which.isDateTime())
return std::make_shared<AggregateFunctionUniq<DataTypeDateTime::FieldType, Data>>(argument_types);
else if (which.isStringOrFixedString())
@ -95,6 +98,8 @@ AggregateFunctionPtr createAggregateFunctionUniq(const std::string & name, const
return res;
else if (which.isDate())
return std::make_shared<AggregateFunctionUniq<DataTypeDate::FieldType, Data<DataTypeDate::FieldType>>>(argument_types);
else if (which.isDate32())
return std::make_shared<AggregateFunctionUniq<DataTypeDate32::FieldType, Data<DataTypeDate32::FieldType>>>(argument_types);
else if (which.isDateTime())
return std::make_shared<AggregateFunctionUniq<DataTypeDateTime::FieldType, Data<DataTypeDateTime::FieldType>>>(argument_types);
else if (which.isStringOrFixedString())

View File

@ -6,6 +6,7 @@
#include <Common/FieldVisitorConvertToNumber.h>
#include <DataTypes/DataTypeDate.h>
#include <DataTypes/DataTypeDate32.h>
#include <DataTypes/DataTypeDateTime.h>
#include <functional>
@ -51,6 +52,8 @@ namespace
return res;
else if (which.isDate())
return std::make_shared<typename WithK<K, HashValueType>::template AggregateFunction<DataTypeDate::FieldType>>(argument_types, params);
else if (which.isDate32())
return std::make_shared<typename WithK<K, HashValueType>::template AggregateFunction<DataTypeDate32::FieldType>>(argument_types, params);
else if (which.isDateTime())
return std::make_shared<typename WithK<K, HashValueType>::template AggregateFunction<DataTypeDateTime::FieldType>>(argument_types, params);
else if (which.isStringOrFixedString())

View File

@ -3,6 +3,7 @@
#include <AggregateFunctions/AggregateFunctionUniqUpTo.h>
#include <Common/FieldVisitorConvertToNumber.h>
#include <DataTypes/DataTypeDate.h>
#include <DataTypes/DataTypeDate32.h>
#include <DataTypes/DataTypeDateTime.h>
#include <DataTypes/DataTypeString.h>
#include <DataTypes/DataTypeFixedString.h>
@ -61,6 +62,8 @@ AggregateFunctionPtr createAggregateFunctionUniqUpTo(const std::string & name, c
return res;
else if (which.isDate())
return std::make_shared<AggregateFunctionUniqUpTo<DataTypeDate::FieldType>>(threshold, argument_types, params);
else if (which.isDate32())
return std::make_shared<AggregateFunctionUniqUpTo<DataTypeDate32::FieldType>>(threshold, argument_types, params);
else if (which.isDateTime())
return std::make_shared<AggregateFunctionUniqUpTo<DataTypeDateTime::FieldType>>(threshold, argument_types, params);
else if (which.isStringOrFixedString())

View File

@ -4,6 +4,7 @@
#include <AggregateFunctions/Helpers.h>
#include <Core/Settings.h>
#include <DataTypes/DataTypeDate.h>
#include <DataTypes/DataTypeDate32.h>
#include <DataTypes/DataTypeDateTime.h>
#include <common/range.h>

View File

@ -353,6 +353,11 @@ bool HedgedConnections::resumePacketReceiver(const HedgedConnections::ReplicaLoc
if (offset_states[location.offset].active_connection_count == 0 && !offset_states[location.offset].next_replica_in_process)
throw NetException("Receive timeout expired", ErrorCodes::SOCKET_TIMEOUT);
}
else if (std::holds_alternative<std::exception_ptr>(res))
{
finishProcessReplica(replica_state, true);
std::rethrow_exception(std::move(std::get<std::exception_ptr>(res)));
}
return false;
}

View File

@ -31,7 +31,7 @@ public:
}
/// Resume packet receiving.
std::variant<int, Packet, Poco::Timespan> resume()
std::variant<int, Packet, Poco::Timespan, std::exception_ptr> resume()
{
/// If there is no pending data, check receive timeout.
if (!connection->hasReadPendingData() && !checkReceiveTimeout())
@ -43,7 +43,7 @@ public:
/// Resume fiber.
fiber = std::move(fiber).resume();
if (exception)
std::rethrow_exception(std::move(exception));
return std::move(exception);
if (is_read_in_process)
return epoll.getFileDescriptor();

View File

@ -546,97 +546,54 @@ namespace
{
/// The following function implements a slightly more general version
/// of getExtremes() than the implementation from ColumnVector.
/// of getExtremes() than the implementation from Not-Null IColumns.
/// It takes into account the possible presence of nullable values.
template <typename T>
void getExtremesFromNullableContent(const ColumnVector<T> & col, const NullMap & null_map, Field & min, Field & max)
void getExtremesWithNulls(const IColumn & nested_column, const NullMap & null_array, Field & min, Field & max, bool null_last = false)
{
const auto & data = col.getData();
size_t size = data.size();
if (size == 0)
size_t number_of_nulls = 0;
size_t n = null_array.size();
NullMap not_null_array(n);
for (auto i = 0ul; i < n; ++i)
{
min = Null();
max = Null();
return;
}
bool has_not_null = false;
bool has_not_nan = false;
T cur_min = 0;
T cur_max = 0;
for (size_t i = 0; i < size; ++i)
{
const T x = data[i];
if (null_map[i])
continue;
if (!has_not_null)
if (null_array[i])
{
cur_min = x;
cur_max = x;
has_not_null = true;
has_not_nan = !isNaN(x);
continue;
++number_of_nulls;
not_null_array[i] = 0;
}
if (isNaN(x))
continue;
if (!has_not_nan)
else
{
cur_min = x;
cur_max = x;
has_not_nan = true;
continue;
not_null_array[i] = 1;
}
if (x < cur_min)
cur_min = x;
else if (x > cur_max)
cur_max = x;
}
if (has_not_null)
if (number_of_nulls == 0)
{
min = cur_min;
max = cur_max;
nested_column.getExtremes(min, max);
}
else if (number_of_nulls == n)
{
min = PositiveInfinity();
max = PositiveInfinity();
}
else
{
auto filtered_column = nested_column.filter(not_null_array, -1);
filtered_column->getExtremes(min, max);
if (null_last)
max = PositiveInfinity();
}
}
}
void ColumnNullable::getExtremes(Field & min, Field & max) const
{
min = Null();
max = Null();
getExtremesWithNulls(getNestedColumn(), getNullMapData(), min, max);
}
const auto & null_map_data = getNullMapData();
if (const auto * col_i8 = typeid_cast<const ColumnInt8 *>(nested_column.get()))
getExtremesFromNullableContent<Int8>(*col_i8, null_map_data, min, max);
else if (const auto * col_i16 = typeid_cast<const ColumnInt16 *>(nested_column.get()))
getExtremesFromNullableContent<Int16>(*col_i16, null_map_data, min, max);
else if (const auto * col_i32 = typeid_cast<const ColumnInt32 *>(nested_column.get()))
getExtremesFromNullableContent<Int32>(*col_i32, null_map_data, min, max);
else if (const auto * col_i64 = typeid_cast<const ColumnInt64 *>(nested_column.get()))
getExtremesFromNullableContent<Int64>(*col_i64, null_map_data, min, max);
else if (const auto * col_u8 = typeid_cast<const ColumnUInt8 *>(nested_column.get()))
getExtremesFromNullableContent<UInt8>(*col_u8, null_map_data, min, max);
else if (const auto * col_u16 = typeid_cast<const ColumnUInt16 *>(nested_column.get()))
getExtremesFromNullableContent<UInt16>(*col_u16, null_map_data, min, max);
else if (const auto * col_u32 = typeid_cast<const ColumnUInt32 *>(nested_column.get()))
getExtremesFromNullableContent<UInt32>(*col_u32, null_map_data, min, max);
else if (const auto * col_u64 = typeid_cast<const ColumnUInt64 *>(nested_column.get()))
getExtremesFromNullableContent<UInt64>(*col_u64, null_map_data, min, max);
else if (const auto * col_f32 = typeid_cast<const ColumnFloat32 *>(nested_column.get()))
getExtremesFromNullableContent<Float32>(*col_f32, null_map_data, min, max);
else if (const auto * col_f64 = typeid_cast<const ColumnFloat64 *>(nested_column.get()))
getExtremesFromNullableContent<Float64>(*col_f64, null_map_data, min, max);
void ColumnNullable::getExtremesNullLast(Field & min, Field & max) const
{
getExtremesWithNulls(getNestedColumn(), getNullMapData(), min, max, true);
}

View File

@ -111,6 +111,8 @@ public:
void updateWeakHash32(WeakHash32 & hash) const override;
void updateHashFast(SipHash & hash) const override;
void getExtremes(Field & min, Field & max) const override;
// Special function for nullable minmax index
void getExtremesNullLast(Field & min, Field & max) const;
MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override
{

View File

@ -109,11 +109,23 @@ static DNSResolver::IPAddresses resolveIPAddressImpl(const std::string & host)
/// It should not affect client address checking, since client cannot connect from IPv6 address
/// if server has no IPv6 addresses.
flags |= Poco::Net::DNS::DNS_HINT_AI_ADDRCONFIG;
DNSResolver::IPAddresses addresses;
try
{
#if defined(ARCADIA_BUILD)
auto addresses = Poco::Net::DNS::hostByName(host, &Poco::Net::DNS::DEFAULT_DNS_TIMEOUT, flags).addresses();
addresses = Poco::Net::DNS::hostByName(host, &Poco::Net::DNS::DEFAULT_DNS_TIMEOUT, flags).addresses();
#else
auto addresses = Poco::Net::DNS::hostByName(host, flags).addresses();
addresses = Poco::Net::DNS::hostByName(host, flags).addresses();
#endif
}
catch (const Poco::Net::DNSException & e)
{
LOG_ERROR(&Poco::Logger::get("DNSResolver"), "Cannot resolve host ({}), error {}: {}.", host, e.code(), e.message());
addresses.clear();
}
if (addresses.empty())
throw Exception("Not found address of host: " + host, ErrorCodes::DNS_ERROR);

View File

@ -313,7 +313,7 @@ std::string getCurrentExceptionMessage(bool with_stacktrace, bool check_embedded
try
{
stream << "Poco::Exception. Code: " << ErrorCodes::POCO_EXCEPTION << ", e.code() = " << e.code()
<< ", e.displayText() = " << e.displayText()
<< ", " << e.displayText()
<< (with_stacktrace ? ", Stack trace (when copying this message, always include the lines below):\n\n" + getExceptionStackTraceString(e) : "")
<< (with_extra_info ? getExtraExceptionInfo(e) : "")
<< " (version " << VERSION_STRING << VERSION_OFFICIAL << ")";
@ -433,7 +433,12 @@ std::string getExceptionMessage(const Exception & e, bool with_stacktrace, bool
}
}
stream << "Code: " << e.code() << ", e.displayText() = " << text;
stream << "Code: " << e.code() << ". " << text;
if (!text.empty() && text.back() != '.')
stream << '.';
stream << " (" << ErrorCodes::getName(e.code()) << ")";
if (with_stacktrace && !has_embedded_stack_trace)
stream << ", Stack trace (when copying this message, always include the lines below):\n\n" << e.getStackTraceString();

View File

@ -26,6 +26,16 @@ public:
throw Exception("Cannot convert NULL to " + demangle(typeid(T).name()), ErrorCodes::CANNOT_CONVERT_TYPE);
}
T operator() (const NegativeInfinity &) const
{
throw Exception("Cannot convert -Inf to " + demangle(typeid(T).name()), ErrorCodes::CANNOT_CONVERT_TYPE);
}
T operator() (const PositiveInfinity &) const
{
throw Exception("Cannot convert +Inf to " + demangle(typeid(T).name()), ErrorCodes::CANNOT_CONVERT_TYPE);
}
T operator() (const String &) const
{
throw Exception("Cannot convert String to " + demangle(typeid(T).name()), ErrorCodes::CANNOT_CONVERT_TYPE);

View File

@ -25,6 +25,8 @@ static inline void writeQuoted(const DecimalField<T> & x, WriteBuffer & buf)
}
String FieldVisitorDump::operator() (const Null &) const { return "NULL"; }
String FieldVisitorDump::operator() (const NegativeInfinity &) const { return "-Inf"; }
String FieldVisitorDump::operator() (const PositiveInfinity &) const { return "+Inf"; }
String FieldVisitorDump::operator() (const UInt64 & x) const { return formatQuotedWithPrefix(x, "UInt64_"); }
String FieldVisitorDump::operator() (const Int64 & x) const { return formatQuotedWithPrefix(x, "Int64_"); }
String FieldVisitorDump::operator() (const Float64 & x) const { return formatQuotedWithPrefix(x, "Float64_"); }

View File

@ -10,6 +10,8 @@ class FieldVisitorDump : public StaticVisitor<String>
{
public:
String operator() (const Null & x) const;
String operator() (const NegativeInfinity & x) const;
String operator() (const PositiveInfinity & x) const;
String operator() (const UInt64 & x) const;
String operator() (const UInt128 & x) const;
String operator() (const UInt256 & x) const;

View File

@ -14,6 +14,18 @@ void FieldVisitorHash::operator() (const Null &) const
hash.update(type);
}
void FieldVisitorHash::operator() (const NegativeInfinity &) const
{
UInt8 type = Field::Types::NegativeInfinity;
hash.update(type);
}
void FieldVisitorHash::operator() (const PositiveInfinity &) const
{
UInt8 type = Field::Types::PositiveInfinity;
hash.update(type);
}
void FieldVisitorHash::operator() (const UInt64 & x) const
{
UInt8 type = Field::Types::UInt64;

View File

@ -16,6 +16,8 @@ public:
FieldVisitorHash(SipHash & hash_);
void operator() (const Null & x) const;
void operator() (const NegativeInfinity & x) const;
void operator() (const PositiveInfinity & x) const;
void operator() (const UInt64 & x) const;
void operator() (const UInt128 & x) const;
void operator() (const UInt256 & x) const;

View File

@ -22,6 +22,8 @@ bool FieldVisitorSum::operator() (UInt64 & x) const
bool FieldVisitorSum::operator() (Float64 & x) const { x += get<Float64>(rhs); return x != 0; }
bool FieldVisitorSum::operator() (Null &) const { throw Exception("Cannot sum Nulls", ErrorCodes::LOGICAL_ERROR); }
bool FieldVisitorSum::operator() (NegativeInfinity &) const { throw Exception("Cannot sum -Inf", ErrorCodes::LOGICAL_ERROR); }
bool FieldVisitorSum::operator() (PositiveInfinity &) const { throw Exception("Cannot sum +Inf", ErrorCodes::LOGICAL_ERROR); }
bool FieldVisitorSum::operator() (String &) const { throw Exception("Cannot sum Strings", ErrorCodes::LOGICAL_ERROR); }
bool FieldVisitorSum::operator() (Array &) const { throw Exception("Cannot sum Arrays", ErrorCodes::LOGICAL_ERROR); }
bool FieldVisitorSum::operator() (Tuple &) const { throw Exception("Cannot sum Tuples", ErrorCodes::LOGICAL_ERROR); }

View File

@ -21,6 +21,8 @@ public:
bool operator() (UInt64 & x) const;
bool operator() (Float64 & x) const;
bool operator() (Null &) const;
bool operator() (NegativeInfinity & x) const;
bool operator() (PositiveInfinity & x) const;
bool operator() (String &) const;
bool operator() (Array &) const;
bool operator() (Tuple &) const;

View File

@ -53,6 +53,8 @@ static String formatFloat(const Float64 x)
String FieldVisitorToString::operator() (const Null &) const { return "NULL"; }
String FieldVisitorToString::operator() (const NegativeInfinity &) const { return "-Inf"; }
String FieldVisitorToString::operator() (const PositiveInfinity &) const { return "+Inf"; }
String FieldVisitorToString::operator() (const UInt64 & x) const { return formatQuoted(x); }
String FieldVisitorToString::operator() (const Int64 & x) const { return formatQuoted(x); }
String FieldVisitorToString::operator() (const Float64 & x) const { return formatFloat(x); }

View File

@ -10,6 +10,8 @@ class FieldVisitorToString : public StaticVisitor<String>
{
public:
String operator() (const Null & x) const;
String operator() (const NegativeInfinity & x) const;
String operator() (const PositiveInfinity & x) const;
String operator() (const UInt64 & x) const;
String operator() (const UInt128 & x) const;
String operator() (const UInt256 & x) const;

View File

@ -7,6 +7,8 @@ namespace DB
{
void FieldVisitorWriteBinary::operator() (const Null &, WriteBuffer &) const { }
void FieldVisitorWriteBinary::operator() (const NegativeInfinity &, WriteBuffer &) const { }
void FieldVisitorWriteBinary::operator() (const PositiveInfinity &, WriteBuffer &) const { }
void FieldVisitorWriteBinary::operator() (const UInt64 & x, WriteBuffer & buf) const { writeVarUInt(x, buf); }
void FieldVisitorWriteBinary::operator() (const Int64 & x, WriteBuffer & buf) const { writeVarInt(x, buf); }
void FieldVisitorWriteBinary::operator() (const Float64 & x, WriteBuffer & buf) const { writeFloatBinary(x, buf); }

View File

@ -9,6 +9,8 @@ class FieldVisitorWriteBinary
{
public:
void operator() (const Null & x, WriteBuffer & buf) const;
void operator() (const NegativeInfinity & x, WriteBuffer & buf) const;
void operator() (const PositiveInfinity & x, WriteBuffer & buf) const;
void operator() (const UInt64 & x, WriteBuffer & buf) const;
void operator() (const UInt128 & x, WriteBuffer & buf) const;
void operator() (const UInt256 & x, WriteBuffer & buf) const;

View File

@ -26,8 +26,12 @@ public:
template <typename T, typename U>
bool operator() (const T & l, const U & r) const
{
if constexpr (std::is_same_v<T, Null> || std::is_same_v<U, Null>)
if constexpr (std::is_same_v<T, Null> || std::is_same_v<U, Null>
|| std::is_same_v<T, NegativeInfinity> || std::is_same_v<T, PositiveInfinity>
|| std::is_same_v<U, NegativeInfinity> || std::is_same_v<U, PositiveInfinity>)
{
return std::is_same_v<T, U>;
}
else
{
if constexpr (std::is_same_v<T, U>)
@ -77,6 +81,10 @@ public:
{
if constexpr (std::is_same_v<T, Null> || std::is_same_v<U, Null>)
return false;
else if constexpr (std::is_same_v<T, NegativeInfinity> || std::is_same_v<U, PositiveInfinity>)
return !std::is_same_v<T, U>;
else if constexpr (std::is_same_v<U, NegativeInfinity> || std::is_same_v<T, PositiveInfinity>)
return false;
else
{
if constexpr (std::is_same_v<T, U>)

View File

@ -224,7 +224,7 @@
M(PerfLocalMemoryReferences, "Local NUMA node memory reads") \
M(PerfLocalMemoryMisses, "Local NUMA node memory read misses") \
\
M(CreatedHTTPConnections, "Total amount of created HTTP connections (closed or opened).") \
M(CreatedHTTPConnections, "Total amount of created HTTP connections (counter increase every time connection is created).") \
\
M(CannotWriteToWriteBufferDiscard, "Number of stack traces dropped by query profiler or signal handler because pipe is full or cannot write to pipe.") \
M(QueryProfilerSignalOverruns, "Number of times we drop processing of a signal due to overrun plus the number of signals that OS has not delivered due to overrun.") \
@ -248,6 +248,9 @@
M(S3WriteRequestsThrottling, "Number of 429 and 503 errors in POST, DELETE, PUT and PATCH requests to S3 storage.") \
M(S3WriteRequestsRedirects, "Number of redirects in POST, DELETE, PUT and PATCH requests to S3 storage.") \
M(QueryMemoryLimitExceeded, "Number of times when memory limit exceeded for query.") \
\
M(SleepFunctionCalls, "Number of times a sleep function (sleep, sleepEachRow) has been called.") \
M(SleepFunctionMicroseconds, "Time spent sleeping due to a sleep function call.") \
namespace ProfileEvents

View File

@ -375,9 +375,13 @@ void Block::setColumn(size_t position, ColumnWithTypeAndName && column)
throw Exception(ErrorCodes::POSITION_OUT_OF_BOUND, "Position {} out of bound in Block::setColumn(), max position {}",
position, toString(data.size()));
data[position].name = std::move(column.name);
data[position].type = std::move(column.type);
data[position].column = std::move(column.column);
if (data[position].name != column.name)
{
index_by_name.erase(data[position].name);
index_by_name.emplace(column.name, position);
}
data[position] = std::move(column);
}

View File

@ -62,6 +62,8 @@ void ExternalResultDescription::init(const Block & sample_block_)
types.emplace_back(ValueType::vtString, is_nullable);
else if (which.isDate())
types.emplace_back(ValueType::vtDate, is_nullable);
else if (which.isDate32())
types.emplace_back(ValueType::vtDate32, is_nullable);
else if (which.isDateTime())
types.emplace_back(ValueType::vtDateTime, is_nullable);
else if (which.isUUID())

View File

@ -26,6 +26,7 @@ struct ExternalResultDescription
vtEnum16,
vtString,
vtDate,
vtDate32,
vtDateTime,
vtUUID,
vtDateTime64,

View File

@ -455,6 +455,16 @@ inline void writeText(const Null &, WriteBuffer & buf)
writeText(std::string("NULL"), buf);
}
inline void writeText(const NegativeInfinity &, WriteBuffer & buf)
{
writeText(std::string("-Inf"), buf);
}
inline void writeText(const PositiveInfinity &, WriteBuffer & buf)
{
writeText(std::string("+Inf"), buf);
}
String toString(const Field & x)
{
return Field::dispatch(

View File

@ -218,6 +218,8 @@ template <> struct NearestFieldTypeImpl<Tuple> { using Type = Tuple; };
template <> struct NearestFieldTypeImpl<Map> { using Type = Map; };
template <> struct NearestFieldTypeImpl<bool> { using Type = UInt64; };
template <> struct NearestFieldTypeImpl<Null> { using Type = Null; };
template <> struct NearestFieldTypeImpl<NegativeInfinity> { using Type = NegativeInfinity; };
template <> struct NearestFieldTypeImpl<PositiveInfinity> { using Type = PositiveInfinity; };
template <> struct NearestFieldTypeImpl<AggregateFunctionStateData> { using Type = AggregateFunctionStateData; };
@ -269,6 +271,10 @@ public:
Int256 = 25,
Map = 26,
UUID = 27,
// Special types for index analysis
NegativeInfinity = 254,
PositiveInfinity = 255,
};
static const char * toString(Which which)
@ -276,6 +282,8 @@ public:
switch (which)
{
case Null: return "Null";
case NegativeInfinity: return "-Inf";
case PositiveInfinity: return "+Inf";
case UInt64: return "UInt64";
case UInt128: return "UInt128";
case UInt256: return "UInt256";
@ -404,7 +412,10 @@ public:
Types::Which getType() const { return which; }
const char * getTypeName() const { return Types::toString(which); }
bool isNull() const { return which == Types::Null; }
// Non-valued field are all denoted as Null
bool isNull() const { return which == Types::Null || which == Types::NegativeInfinity || which == Types::PositiveInfinity; }
bool isNegativeInfinity() const { return which == Types::NegativeInfinity; }
bool isPositiveInfinity() const { return which == Types::PositiveInfinity; }
template <typename T>
@ -459,7 +470,10 @@ public:
switch (which)
{
case Types::Null: return false;
case Types::Null:
case Types::NegativeInfinity:
case Types::PositiveInfinity:
return false;
case Types::UInt64: return get<UInt64>() < rhs.get<UInt64>();
case Types::UInt128: return get<UInt128>() < rhs.get<UInt128>();
case Types::UInt256: return get<UInt256>() < rhs.get<UInt256>();
@ -496,7 +510,10 @@ public:
switch (which)
{
case Types::Null: return true;
case Types::Null:
case Types::NegativeInfinity:
case Types::PositiveInfinity:
return true;
case Types::UInt64: return get<UInt64>() <= rhs.get<UInt64>();
case Types::UInt128: return get<UInt128>() <= rhs.get<UInt128>();
case Types::UInt256: return get<UInt256>() <= rhs.get<UInt256>();
@ -533,8 +550,11 @@ public:
switch (which)
{
case Types::Null: return true;
case Types::UInt64: return get<UInt64>() == rhs.get<UInt64>();
case Types::Null:
case Types::NegativeInfinity:
case Types::PositiveInfinity:
return true;
case Types::UInt64: return get<UInt64>() == rhs.get<UInt64>();
case Types::Int64: return get<Int64>() == rhs.get<Int64>();
case Types::Float64:
{
@ -573,6 +593,8 @@ public:
switch (field.which)
{
case Types::Null: return f(field.template get<Null>());
case Types::NegativeInfinity: return f(field.template get<NegativeInfinity>());
case Types::PositiveInfinity: return f(field.template get<PositiveInfinity>());
// gcc 8.2.1
#if !defined(__clang__)
#pragma GCC diagnostic push
@ -731,6 +753,8 @@ using Row = std::vector<Field>;
template <> struct Field::TypeToEnum<Null> { static const Types::Which value = Types::Null; };
template <> struct Field::TypeToEnum<NegativeInfinity> { static const Types::Which value = Types::NegativeInfinity; };
template <> struct Field::TypeToEnum<PositiveInfinity> { static const Types::Which value = Types::PositiveInfinity; };
template <> struct Field::TypeToEnum<UInt64> { static const Types::Which value = Types::UInt64; };
template <> struct Field::TypeToEnum<UInt128> { static const Types::Which value = Types::UInt128; };
template <> struct Field::TypeToEnum<UInt256> { static const Types::Which value = Types::UInt256; };
@ -751,6 +775,8 @@ template <> struct Field::TypeToEnum<DecimalField<DateTime64>>{ static const Typ
template <> struct Field::TypeToEnum<AggregateFunctionStateData>{ static const Types::Which value = Types::AggregateFunctionState; };
template <> struct Field::EnumToType<Field::Types::Null> { using Type = Null; };
template <> struct Field::EnumToType<Field::Types::NegativeInfinity> { using Type = NegativeInfinity; };
template <> struct Field::EnumToType<Field::Types::PositiveInfinity> { using Type = PositiveInfinity; };
template <> struct Field::EnumToType<Field::Types::UInt64> { using Type = UInt64; };
template <> struct Field::EnumToType<Field::Types::UInt128> { using Type = UInt128; };
template <> struct Field::EnumToType<Field::Types::UInt256> { using Type = UInt256; };

View File

@ -24,16 +24,15 @@ namespace ErrorCodes
}
MySQLClient::MySQLClient(const String & host_, UInt16 port_, const String & user_, const String & password_)
: host(host_), port(port_), user(user_), password(std::move(password_))
: host(host_), port(port_), user(user_), password(std::move(password_)),
client_capabilities(CLIENT_PROTOCOL_41 | CLIENT_PLUGIN_AUTH | CLIENT_SECURE_CONNECTION)
{
mysql_context.client_capabilities = CLIENT_PROTOCOL_41 | CLIENT_PLUGIN_AUTH | CLIENT_SECURE_CONNECTION;
}
MySQLClient::MySQLClient(MySQLClient && other)
: host(std::move(other.host)), port(other.port), user(std::move(other.user)), password(std::move(other.password))
, mysql_context(other.mysql_context)
, client_capabilities(other.client_capabilities)
{
mysql_context.sequence_id = 0;
}
void MySQLClient::connect()
@ -57,7 +56,8 @@ void MySQLClient::connect()
in = std::make_shared<ReadBufferFromPocoSocket>(*socket);
out = std::make_shared<WriteBufferFromPocoSocket>(*socket);
packet_endpoint = mysql_context.makeEndpoint(*in, *out);
packet_endpoint = MySQLProtocol::PacketEndpoint::create(*in, *out, sequence_id);
handshake();
}
@ -69,7 +69,7 @@ void MySQLClient::disconnect()
socket->close();
socket = nullptr;
connected = false;
mysql_context.sequence_id = 0;
sequence_id = 0;
}
/// https://dev.mysql.com/doc/internals/en/connection-phase-packets.html
@ -88,10 +88,10 @@ void MySQLClient::handshake()
String auth_plugin_data = native41.getAuthPluginData();
HandshakeResponse handshake_response(
mysql_context.client_capabilities, MAX_PACKET_LENGTH, charset_utf8, user, "", auth_plugin_data, mysql_native_password);
client_capabilities, MAX_PACKET_LENGTH, charset_utf8, user, "", auth_plugin_data, mysql_native_password);
packet_endpoint->sendPacket<HandshakeResponse>(handshake_response, true);
ResponsePacket packet_response(mysql_context.client_capabilities, true);
ResponsePacket packet_response(client_capabilities, true);
packet_endpoint->receivePacket(packet_response);
packet_endpoint->resetSequenceId();
@ -106,7 +106,7 @@ void MySQLClient::writeCommand(char command, String query)
WriteCommand write_command(command, query);
packet_endpoint->sendPacket<WriteCommand>(write_command, true);
ResponsePacket packet_response(mysql_context.client_capabilities);
ResponsePacket packet_response(client_capabilities);
packet_endpoint->receivePacket(packet_response);
switch (packet_response.getType())
{
@ -125,7 +125,7 @@ void MySQLClient::registerSlaveOnMaster(UInt32 slave_id)
RegisterSlave register_slave(slave_id);
packet_endpoint->sendPacket<RegisterSlave>(register_slave, true);
ResponsePacket packet_response(mysql_context.client_capabilities);
ResponsePacket packet_response(client_capabilities);
packet_endpoint->receivePacket(packet_response);
packet_endpoint->resetSequenceId();
if (packet_response.getType() == PACKET_ERR)

View File

@ -45,7 +45,9 @@ private:
String password;
bool connected = false;
MySQLWireContext mysql_context;
uint8_t sequence_id = 0;
uint32_t client_capabilities = 0;
const UInt8 charset_utf8 = 33;
const String mysql_native_password = "mysql_native_password";

View File

@ -68,15 +68,4 @@ String PacketEndpoint::packetToText(const String & payload)
}
MySQLProtocol::PacketEndpointPtr MySQLWireContext::makeEndpoint(WriteBuffer & out)
{
return MySQLProtocol::PacketEndpoint::create(out, sequence_id);
}
MySQLProtocol::PacketEndpointPtr MySQLWireContext::makeEndpoint(ReadBuffer & in, WriteBuffer & out)
{
return MySQLProtocol::PacketEndpoint::create(in, out, sequence_id);
}
}

View File

@ -58,14 +58,4 @@ using PacketEndpointPtr = std::shared_ptr<PacketEndpoint>;
}
struct MySQLWireContext
{
uint8_t sequence_id = 0;
uint32_t client_capabilities = 0;
size_t max_packet_size = 0;
MySQLProtocol::PacketEndpointPtr makeEndpoint(WriteBuffer & out);
MySQLProtocol::PacketEndpointPtr makeEndpoint(ReadBuffer & in, WriteBuffer & out);
};
}

View File

@ -6,6 +6,7 @@
#include <IO/WriteHelpers.h>
#include <IO/ReadBufferFromString.h>
#include <IO/WriteBufferFromString.h>
#include <sparsehash/dense_hash_map>
namespace DB
@ -161,18 +162,24 @@ NamesAndTypesList NamesAndTypesList::filter(const Names & names) const
NamesAndTypesList NamesAndTypesList::addTypes(const Names & names) const
{
std::unordered_map<std::string_view, const NameAndTypePair *> self_columns;
/// NOTE: It's better to make a map in `IStorage` than to create it here every time again.
#if !defined(ARCADIA_BUILD)
google::dense_hash_map<StringRef, const DataTypePtr *, StringRefHash> types;
#else
google::sparsehash::dense_hash_map<StringRef, const DataTypePtr *, StringRefHash> types;
#endif
types.set_empty_key(StringRef());
for (const auto & column : *this)
self_columns[column.name] = &column;
types[column.name] = &column.type;
NamesAndTypesList res;
for (const String & name : names)
{
auto it = self_columns.find(name);
if (it == self_columns.end())
auto it = types.find(name);
if (it == types.end())
throw Exception("No column " + name, ErrorCodes::THERE_IS_NO_COLUMN);
res.emplace_back(*it->second);
res.emplace_back(name, *it->second);
}
return res;

View File

@ -90,6 +90,9 @@ void insertPostgreSQLValue(
case ExternalResultDescription::ValueType::vtDate:
assert_cast<ColumnUInt16 &>(column).insertValue(UInt16{LocalDate{std::string(value)}.getDayNum()});
break;
case ExternalResultDescription::ValueType::vtDate32:
assert_cast<ColumnInt32 &>(column).insertValue(Int32{LocalDate{std::string(value)}.getExtenedDayNum()});
break;
case ExternalResultDescription::ValueType::vtDateTime:
{
ReadBufferFromString in(value);

View File

@ -7,7 +7,6 @@
#if USE_LIBPQXX
#include <Core/Block.h>
#include <DataStreams/IBlockInputStream.h>
#include <Core/ExternalResultDescription.h>
#include <Core/Field.h>

View File

@ -57,7 +57,7 @@ class IColumn;
M(Seconds, tcp_keep_alive_timeout, 0, "The time in seconds the connection needs to remain idle before TCP starts sending keepalive probes", 0) \
M(Milliseconds, hedged_connection_timeout_ms, DBMS_DEFAULT_HEDGED_CONNECTION_TIMEOUT_MS, "Connection timeout for establishing connection with replica for Hedged requests", 0) \
M(Milliseconds, receive_data_timeout_ms, DBMS_DEFAULT_RECEIVE_DATA_TIMEOUT_MS, "Connection timeout for receiving first packet of data or packet with positive progress from replica", 0) \
M(Bool, use_hedged_requests, false, "Use hedged requests for distributed queries", 0) \
M(Bool, use_hedged_requests, true, "Use hedged requests for distributed queries", 0) \
M(Bool, allow_changing_replica_until_first_data_packet, false, "Allow HedgedConnections to change replica until receiving first data packet", 0) \
M(Milliseconds, queue_max_wait_ms, 0, "The wait time in the request queue, if the number of concurrent requests exceeds the maximum.", 0) \
M(Milliseconds, connection_pool_max_wait_ms, 0, "The wait time when the connection pool is full.", 0) \
@ -526,6 +526,7 @@ class IColumn;
M(Bool, input_format_values_accurate_types_of_literals, true, "For Values format: when parsing and interpreting expressions using template, check actual type of literal to avoid possible overflow and precision issues.", 0) \
M(Bool, input_format_avro_allow_missing_fields, false, "For Avro/AvroConfluent format: when field is not found in schema use default value instead of error", 0) \
M(URI, format_avro_schema_registry_url, "", "For AvroConfluent format: Confluent Schema Registry URL.", 0) \
M(String, output_format_avro_string_column_pattern, "", "For Avro format: regexp of String columns to select as AVRO string.", 0) \
\
M(Bool, output_format_json_quote_64bit_integers, true, "Controls quoting of 64-bit integers in JSON output format.", 0) \
\

View File

@ -14,6 +14,8 @@ namespace DB
/// Data types for representing elementary values from a database in RAM.
struct Null {};
struct NegativeInfinity {};
struct PositiveInfinity {};
/// Ignore strange gcc warning https://gcc.gnu.org/bugzilla/show_bug.cgi?id=55776
#if !defined(__clang__)
@ -39,6 +41,7 @@ enum class TypeIndex
Float32,
Float64,
Date,
Date32,
DateTime,
DateTime64,
String,
@ -257,6 +260,7 @@ inline constexpr const char * getTypeName(TypeIndex idx)
case TypeIndex::Float32: return "Float32";
case TypeIndex::Float64: return "Float64";
case TypeIndex::Date: return "Date";
case TypeIndex::Date32: return "Date32";
case TypeIndex::DateTime: return "DateTime";
case TypeIndex::DateTime64: return "DateTime64";
case TypeIndex::String: return "String";

View File

@ -73,6 +73,7 @@ bool callOnBasicType(TypeIndex number, F && f)
switch (number)
{
case TypeIndex::Date: return f(TypePair<T, UInt16>());
case TypeIndex::Date32: return f(TypePair<T, Int32>());
case TypeIndex::DateTime: return f(TypePair<T, UInt32>());
case TypeIndex::DateTime64: return f(TypePair<T, DateTime64>());
default:
@ -142,6 +143,7 @@ inline bool callOnBasicTypes(TypeIndex type_num1, TypeIndex type_num2, F && f)
switch (type_num1)
{
case TypeIndex::Date: return callOnBasicType<UInt16, _int, _float, _decimal, _datetime>(type_num2, std::forward<F>(f));
case TypeIndex::Date32: return callOnBasicType<Int32, _int, _float, _decimal, _datetime>(type_num2, std::forward<F>(f));
case TypeIndex::DateTime: return callOnBasicType<UInt32, _int, _float, _decimal, _datetime>(type_num2, std::forward<F>(f));
case TypeIndex::DateTime64: return callOnBasicType<DateTime64, _int, _float, _decimal, _datetime>(type_num2, std::forward<F>(f));
default:
@ -154,6 +156,7 @@ inline bool callOnBasicTypes(TypeIndex type_num1, TypeIndex type_num2, F && f)
class DataTypeDate;
class DataTypeDate32;
class DataTypeString;
class DataTypeFixedString;
class DataTypeUUID;
@ -192,6 +195,7 @@ bool callOnIndexAndDataType(TypeIndex number, F && f, ExtraArgs && ... args)
case TypeIndex::Decimal256: return f(TypePair<DataTypeDecimal<Decimal256>, T>(), std::forward<ExtraArgs>(args)...);
case TypeIndex::Date: return f(TypePair<DataTypeDate, T>(), std::forward<ExtraArgs>(args)...);
case TypeIndex::Date32: return f(TypePair<DataTypeDate32, T>(), std::forward<ExtraArgs>(args)...);
case TypeIndex::DateTime: return f(TypePair<DataTypeDateTime, T>(), std::forward<ExtraArgs>(args)...);
case TypeIndex::DateTime64: return f(TypePair<DataTypeDateTime64, T>(), std::forward<ExtraArgs>(args)...);

View File

@ -6,7 +6,6 @@
#include <Core/ColumnWithTypeAndName.h>
#include <Core/Field.h>
#include <Core/NamesAndTypes.h>
#include <DataStreams/IBlockInputStream.h>
#include <DataTypes/IDataType.h>
#include <Functions/IFunction.h>
#include <IO/WriteBufferFromOStream.h>
@ -28,12 +27,6 @@ std::ostream & operator<< <Field>(std::ostream & stream, const Field & what)
return stream;
}
std::ostream & operator<<(std::ostream & stream, const IBlockInputStream & what)
{
stream << "IBlockInputStream(name = " << what.getName() << ")";
return stream;
}
std::ostream & operator<<(std::ostream & stream, const NameAndTypePair & what)
{
stream << "NameAndTypePair(name = " << what.name << ", type = " << what.type << ")";

View File

@ -10,9 +10,6 @@ class Field;
template <typename T, typename U = std::enable_if_t<std::is_same_v<T, Field>>>
std::ostream & operator<<(std::ostream & stream, const T & what);
class IBlockInputStream;
std::ostream & operator<<(std::ostream & stream, const IBlockInputStream & what);
struct NameAndTypePair;
std::ostream & operator<<(std::ostream & stream, const NameAndTypePair & what);

View File

@ -13,6 +13,7 @@
#include <DataTypes/DataTypesNumber.h>
#include <DataTypes/DataTypesDecimal.h>
#include <DataTypes/DataTypeDate.h>
#include <DataTypes/DataTypeDate32.h>
#include <DataTypes/DataTypeDateTime.h>
#include <DataTypes/DataTypeDateTime64.h>
#include <DataTypes/DataTypeEnum.h>

View File

@ -11,7 +11,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <DataStreams/IBlockInputStream.h>
#include <Processors/Sources/SourceWithProgress.h>
#include <Processors/Transforms/AggregatingTransform.h>

Some files were not shown because too many files have changed in this diff Show More