mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 15:12:02 +00:00
Revert "Merge branch 'master' into master"
This reverts commit0543736da1
, reversing changes made toe8e34ba786
.
This commit is contained in:
parent
0543736da1
commit
28279e13f2
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -2,22 +2,24 @@ I hereby agree to the terms of the CLA available at: https://yandex.ru/legal/cla
|
||||
|
||||
Changelog category (leave one):
|
||||
- New Feature
|
||||
- Improvement
|
||||
- Bug Fix
|
||||
- Improvement
|
||||
- Performance Improvement
|
||||
- Backward Incompatible Change
|
||||
- Build/Testing/Packaging Improvement
|
||||
- Documentation (changelog entry is not required)
|
||||
- Other
|
||||
- Not for changelog (changelog entry is not required)
|
||||
|
||||
|
||||
Changelog entry (a user-readable short description of the changes that goes to CHANGELOG.md):
|
||||
|
||||
...
|
||||
|
||||
|
||||
Detailed description / Documentation draft:
|
||||
...
|
||||
|
||||
...
|
||||
|
||||
By adding documentation, you'll allow users to try your new feature immediately, not when someone else will have time to document it later. Documentation is necessary for all features that affect user experience in any way. You can add brief documentation draft above, or add documentation right into your patch as Markdown files in [docs](https://github.com/ClickHouse/ClickHouse/tree/master/docs) folder.
|
||||
|
||||
|
6
.gitmodules
vendored
6
.gitmodules
vendored
@ -228,9 +228,3 @@
|
||||
[submodule "contrib/libpqxx"]
|
||||
path = contrib/libpqxx
|
||||
url = https://github.com/ClickHouse-Extras/libpqxx.git
|
||||
[submodule "contrib/sqlite-amalgamation"]
|
||||
path = contrib/sqlite-amalgamation
|
||||
url = https://github.com/azadkuh/sqlite-amalgamation
|
||||
[submodule "contrib/s2geometry"]
|
||||
path = contrib/s2geometry
|
||||
url = https://github.com/ClickHouse-Extras/s2geometry.git
|
||||
|
@ -536,12 +536,10 @@ include (cmake/find/rapidjson.cmake)
|
||||
include (cmake/find/fastops.cmake)
|
||||
include (cmake/find/odbc.cmake)
|
||||
include (cmake/find/nanodbc.cmake)
|
||||
include (cmake/find/sqlite.cmake)
|
||||
include (cmake/find/rocksdb.cmake)
|
||||
include (cmake/find/libpqxx.cmake)
|
||||
include (cmake/find/nuraft.cmake)
|
||||
include (cmake/find/yaml-cpp.cmake)
|
||||
include (cmake/find/s2geometry.cmake)
|
||||
|
||||
if(NOT USE_INTERNAL_PARQUET_LIBRARY)
|
||||
set (ENABLE_ORC OFF CACHE INTERNAL "")
|
||||
|
@ -1,24 +0,0 @@
|
||||
|
||||
option(ENABLE_S2_GEOMETRY "Enable S2 geometry library" ${ENABLE_LIBRARIES})
|
||||
|
||||
if (ENABLE_S2_GEOMETRY)
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/s2geometry")
|
||||
message (WARNING "submodule contrib/s2geometry is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
set (ENABLE_S2_GEOMETRY 0)
|
||||
set (USE_S2_GEOMETRY 0)
|
||||
else()
|
||||
if (OPENSSL_FOUND)
|
||||
set (S2_GEOMETRY_LIBRARY s2)
|
||||
set (S2_GEOMETRY_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/s2geometry/src/s2)
|
||||
set (USE_S2_GEOMETRY 1)
|
||||
else()
|
||||
message (WARNING "S2 uses OpenSSL, but the latter is absent.")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (NOT USE_S2_GEOMETRY)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't enable S2 geometry library")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
message (STATUS "Using s2geometry=${USE_S2_GEOMETRY} : ${S2_GEOMETRY_INCLUDE_DIR}")
|
@ -1,16 +0,0 @@
|
||||
option(ENABLE_SQLITE "Enable sqlite" ${ENABLE_LIBRARIES})
|
||||
|
||||
if (NOT ENABLE_SQLITE)
|
||||
return()
|
||||
endif()
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/sqlite-amalgamation/sqlite3.c")
|
||||
message (WARNING "submodule contrib/sqlite3-amalgamation is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal sqlite library")
|
||||
set (USE_SQLITE 0)
|
||||
return()
|
||||
endif()
|
||||
|
||||
set (USE_SQLITE 1)
|
||||
set (SQLITE_LIBRARY sqlite)
|
||||
message (STATUS "Using sqlite=${USE_SQLITE}")
|
@ -1,4 +1,4 @@
|
||||
option(ENABLE_STATS "Enable StatsLib library" ${ENABLE_LIBRARIES})
|
||||
option(ENABLE_STATS "Enalbe StatsLib library" ${ENABLE_LIBRARIES})
|
||||
|
||||
if (ENABLE_STATS)
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/stats")
|
||||
|
14
contrib/CMakeLists.txt
vendored
14
contrib/CMakeLists.txt
vendored
@ -1,4 +1,3 @@
|
||||
# Third-party libraries may have substandard code.
|
||||
|
||||
# Put all targets defined here and in added subfolders under "contrib/" folder in GUI-based IDEs by default.
|
||||
# Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they will
|
||||
@ -11,8 +10,10 @@ else ()
|
||||
endif ()
|
||||
unset (_current_dir_name)
|
||||
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w")
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w")
|
||||
# Third-party libraries may have substandard code.
|
||||
# Also remove a possible source of nondeterminism.
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w -D__DATE__= -D__TIME__= -D__TIMESTAMP__=")
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w -D__DATE__= -D__TIME__= -D__TIMESTAMP__=")
|
||||
|
||||
if (WITH_COVERAGE)
|
||||
set (WITHOUT_COVERAGE_LIST ${WITHOUT_COVERAGE})
|
||||
@ -328,10 +329,3 @@ endif()
|
||||
|
||||
add_subdirectory(fast_float)
|
||||
|
||||
if (USE_SQLITE)
|
||||
add_subdirectory(sqlite-cmake)
|
||||
endif()
|
||||
|
||||
if (USE_S2_GEOMETRY)
|
||||
add_subdirectory(s2geometry-cmake)
|
||||
endif()
|
||||
|
2
contrib/poco
vendored
2
contrib/poco
vendored
@ -1 +1 @@
|
||||
Subproject commit 7351c4691b5d401f59e3959adfc5b4fa263b32da
|
||||
Subproject commit 5994506908028612869fee627d68d8212dfe7c1e
|
2
contrib/rocksdb
vendored
2
contrib/rocksdb
vendored
@ -1 +1 @@
|
||||
Subproject commit dac0e9a68080c837d6b6223921f3fc151abbfcdc
|
||||
Subproject commit 07c77549a20b63ff6981b400085eba36bb5c80c4
|
@ -70,6 +70,11 @@ else()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set(BUILD_VERSION_CC rocksdb_build_version.cc)
|
||||
add_library(rocksdb_build_version OBJECT ${BUILD_VERSION_CC})
|
||||
|
||||
target_include_directories(rocksdb_build_version PRIVATE "${ROCKSDB_SOURCE_DIR}/util")
|
||||
|
||||
include(CheckCCompilerFlag)
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
||||
CHECK_C_COMPILER_FLAG("-mcpu=power9" HAS_POWER9)
|
||||
@ -238,293 +243,272 @@ find_package(Threads REQUIRED)
|
||||
# Main library source code
|
||||
|
||||
set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/cache/cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/cache_entry_roles.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/clock_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_fetcher.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_addition.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_garbage.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_meta.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_garbage_meter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_format.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_sequential_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_writer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/c.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/column_family.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_job.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_fifo.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_level.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_universal.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/sst_partitioner.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/convenience.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_filesnapshot.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/compacted_db_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_write.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_compaction_flush.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_files.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_open.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_debug.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_experimental.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_readonly.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_secondary.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_info_dumper.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_iter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/dbformat.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/error_handler.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/event_helpers.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/experimental.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/external_sst_file_ingestion_job.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/file_indexer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/flush_job.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/flush_scheduler.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/forward_iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/import_column_family_job.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/internal_stats.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/logs_with_prep_tracker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/log_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/log_writer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/malloc_stats.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/memtable.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/memtable_list.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/merge_helper.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/merge_operator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/output_validator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/periodic_work_scheduler.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/range_del_aggregator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/range_tombstone_fragmenter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/repair.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/snapshot_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/table_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/table_properties_collector.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/transaction_log_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/trim_history_scheduler.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/version_builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/version_edit.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/version_edit_handler.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/version_set.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/wal_edit.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/wal_manager.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/write_batch.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/write_batch_base.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/write_controller.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/write_thread.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/composite_env.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/env.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/env_chroot.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/env_encryption.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/env_hdfs.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/file_system.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/fs_remap.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/mock_env.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/delete_scheduler.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/file_prefetch_buffer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/file_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/filename.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/line_file_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/random_access_file_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/read_write_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/readahead_raf.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/sequence_file_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/sst_file_manager_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/writable_file_writer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/logging/auto_roll_logger.cc
|
||||
${ROCKSDB_SOURCE_DIR}/logging/event_logger.cc
|
||||
${ROCKSDB_SOURCE_DIR}/logging/log_buffer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memory/arena.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memory/concurrent_arena.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memory/jemalloc_nodump_allocator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memory/memkind_kmem_allocator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memtable/alloc_tracker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memtable/hash_linklist_rep.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memtable/hash_skiplist_rep.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memtable/skiplistrep.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memtable/vectorrep.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memtable/write_buffer_manager.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/histogram.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/histogram_windowing.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/in_memory_stats_history.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/instrumented_mutex.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/iostats_context.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/perf_context.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/perf_level.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/persistent_stats_history.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/statistics.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_updater.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util_debug.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/cf_options.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/configurable.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/customizable.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/db_options.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/options.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/options_helper.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/options_parser.cc
|
||||
${ROCKSDB_SOURCE_DIR}/port/stack_trace.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/adaptive/adaptive_table_factory.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/binary_search_index_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_filter_block.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_factory.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefetcher.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefix_index.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_hash_index.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_footer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/filter_block_reader_common.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/filter_policy.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/flush_block_policy.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/full_filter_block.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/hash_index_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/index_builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/index_reader_common.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/parsed_full_filter_block.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_filter_block.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_index_iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_index_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/reader_common.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/uncompression_dict_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_fetcher.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_factory.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/format.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/get_context.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/merging_iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/meta_blocks.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/persistent_cache_helper.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_bloom.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_factory.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_index.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_key_coding.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/sst_file_dumper.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/sst_file_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/sst_file_writer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/table_factory.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/table_properties.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/two_level_iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/test_util/sync_point.cc
|
||||
${ROCKSDB_SOURCE_DIR}/test_util/sync_point_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/test_util/testutil.cc
|
||||
${ROCKSDB_SOURCE_DIR}/test_util/transaction_test_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/tools/block_cache_analyzer/block_cache_trace_analyzer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/tools/dump/db_dump_tool.cc
|
||||
${ROCKSDB_SOURCE_DIR}/tools/io_tracer_parser_tool.cc
|
||||
${ROCKSDB_SOURCE_DIR}/tools/ldb_cmd.cc
|
||||
${ROCKSDB_SOURCE_DIR}/tools/ldb_tool.cc
|
||||
${ROCKSDB_SOURCE_DIR}/tools/sst_dump_tool.cc
|
||||
${ROCKSDB_SOURCE_DIR}/tools/trace_analyzer_tool.cc
|
||||
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc
|
||||
${ROCKSDB_SOURCE_DIR}/trace_replay/block_cache_tracer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/trace_replay/io_tracer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/coding.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/compaction_job_stats_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/comparator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/compression_context_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/concurrent_task_limiter_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/crc32c.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/dynamic_bloom.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/hash.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/random.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/rate_limiter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/ribbon_config.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/slice.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/file_checksum_helper.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/status.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/string_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/thread_local.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/threadpool_imp.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/xxhash.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/backupable/backupable_db.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_compaction_filter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl_filesnapshot.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_dump_tool.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_file.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/cassandra_compaction_filter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/format.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/merge_operator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/checkpoint/checkpoint_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/debug.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/env_mirror.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/env_timed.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_env.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_fs.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/leveldb_options/leveldb_options.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/memory/memory_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/bytesxor.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/max.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/put.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/sortlist.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/string_append/stringappend.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/string_append/stringappend2.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/uint64add.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/object_registry.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/option_change_migration/option_change_migration.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/options/options_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier_file.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier_metadata.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/persistent_cache_tier.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/volatile_tier_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/cache_simulator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/lock_manager.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_tracker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_manager.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/range_tree_lock_manager.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/range_tree_lock_tracker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction_db_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/pessimistic_transaction.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/pessimistic_transaction_db.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/snapshot_checker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_base.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_db_mutex_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn_db.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index_internal.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/concurrent_tree.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/keyrange.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/lock_request.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/locktree.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/manager.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/range_buffer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/treenode.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/txnid_set.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/wfg.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/standalone_port.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/dbt.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/memarena.cc
|
||||
rocksdb_build_version.cc)
|
||||
"${ROCKSDB_SOURCE_DIR}/cache/cache.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/cache/clock_cache.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_addition.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_builder.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_cache.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_garbage.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_meta.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_reader.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_format.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_sequential_reader.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_writer.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/builder.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/c.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/column_family.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/compacted_db_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/compaction/compaction.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_iterator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_job.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_fifo.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_level.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_universal.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/compaction/sst_partitioner.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/convenience.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/db_filesnapshot.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_write.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_compaction_flush.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_files.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_open.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_debug.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_experimental.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_readonly.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_secondary.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/db_info_dumper.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/db_iter.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/dbformat.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/error_handler.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/event_helpers.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/experimental.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/external_sst_file_ingestion_job.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/file_indexer.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/flush_job.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/flush_scheduler.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/forward_iterator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/import_column_family_job.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/internal_stats.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/logs_with_prep_tracker.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/log_reader.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/log_writer.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/malloc_stats.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/memtable.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/memtable_list.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/merge_helper.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/merge_operator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/output_validator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/periodic_work_scheduler.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/range_del_aggregator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/range_tombstone_fragmenter.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/repair.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/snapshot_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/table_cache.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/table_properties_collector.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/transaction_log_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/trim_history_scheduler.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/version_builder.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/version_edit.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/version_edit_handler.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/version_set.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/wal_edit.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/wal_manager.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/write_batch.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/write_batch_base.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/write_controller.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/db/write_thread.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/env/env.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/env/env_chroot.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/env/env_encryption.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/env/env_hdfs.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/env/file_system.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/env/mock_env.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/file/delete_scheduler.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/file/file_prefetch_buffer.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/file/file_util.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/file/filename.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/file/random_access_file_reader.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/file/read_write_util.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/file/readahead_raf.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/file/sequence_file_reader.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/file/sst_file_manager_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/file/writable_file_writer.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/logging/auto_roll_logger.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/logging/event_logger.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/logging/log_buffer.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/memory/arena.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/memory/concurrent_arena.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/memory/jemalloc_nodump_allocator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/memory/memkind_kmem_allocator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/memtable/alloc_tracker.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/memtable/hash_linklist_rep.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/memtable/hash_skiplist_rep.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/memtable/skiplistrep.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/memtable/vectorrep.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/memtable/write_buffer_manager.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/monitoring/histogram.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/monitoring/histogram_windowing.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/monitoring/in_memory_stats_history.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/monitoring/instrumented_mutex.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/monitoring/iostats_context.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/monitoring/perf_context.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/monitoring/perf_level.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/monitoring/persistent_stats_history.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/monitoring/statistics.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_updater.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util_debug.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/options/cf_options.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/options/configurable.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/options/customizable.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/options/db_options.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/options/options.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/options/options_helper.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/options/options_parser.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/port/stack_trace.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/adaptive/adaptive_table_factory.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/binary_search_index_reader.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/block.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_filter_block.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_builder.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_factory.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_iterator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_reader.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_builder.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefetcher.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefix_index.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_hash_index.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_footer.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/filter_block_reader_common.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/filter_policy.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/flush_block_policy.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/full_filter_block.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/hash_index_reader.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/index_builder.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/index_reader_common.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/parsed_full_filter_block.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_filter_block.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_index_iterator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_index_reader.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/reader_common.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_based/uncompression_dict_reader.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/block_fetcher.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_builder.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_factory.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_reader.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/format.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/get_context.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/iterator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/merging_iterator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/meta_blocks.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/persistent_cache_helper.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_bloom.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_builder.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_factory.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_index.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_key_coding.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_reader.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/sst_file_dumper.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/sst_file_reader.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/sst_file_writer.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/table_factory.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/table_properties.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/table/two_level_iterator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/test_util/sync_point.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/test_util/sync_point_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/test_util/testutil.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/test_util/transaction_test_util.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/tools/block_cache_analyzer/block_cache_trace_analyzer.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/tools/dump/db_dump_tool.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/tools/io_tracer_parser_tool.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/tools/ldb_cmd.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/tools/ldb_tool.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/tools/sst_dump_tool.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/tools/trace_analyzer_tool.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/trace_replay/block_cache_tracer.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/trace_replay/io_tracer.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/coding.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/compaction_job_stats_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/comparator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/compression_context_cache.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/concurrent_task_limiter_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/crc32c.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/dynamic_bloom.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/hash.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/random.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/rate_limiter.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/slice.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/file_checksum_helper.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/status.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/string_util.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/thread_local.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/threadpool_imp.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/util/xxhash.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/backupable/backupable_db.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_compaction_filter.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl_filesnapshot.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_dump_tool.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_file.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/cassandra/cassandra_compaction_filter.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/cassandra/format.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/cassandra/merge_operator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/checkpoint/checkpoint_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/debug.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/env_mirror.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/env_timed.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_env.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_fs.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/leveldb_options/leveldb_options.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/memory/memory_util.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/bytesxor.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/max.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/put.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/sortlist.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/string_append/stringappend.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/string_append/stringappend2.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/uint64add.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/object_registry.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/option_change_migration/option_change_migration.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/options/options_util.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier_file.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier_metadata.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/persistent_cache_tier.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/volatile_tier_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/cache_simulator.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/lock_manager.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_tracker.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_manager.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction_db_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/pessimistic_transaction.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/pessimistic_transaction_db.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/snapshot_checker.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_base.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_db_mutex_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_util.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn_db.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index_internal.cc"
|
||||
$<TARGET_OBJECTS:rocksdb_build_version>)
|
||||
|
||||
if(HAVE_SSE42 AND NOT MSVC)
|
||||
set_source_files_properties(
|
||||
|
@ -1,62 +1,3 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
/// This file was edited for ClickHouse.
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "rocksdb/version.h"
|
||||
#include "util/string_util.h"
|
||||
|
||||
// The build script may replace these values with real values based
|
||||
// on whether or not GIT is available and the platform settings
|
||||
static const std::string rocksdb_build_git_sha = "rocksdb_build_git_sha:0";
|
||||
static const std::string rocksdb_build_git_tag = "rocksdb_build_git_tag:master";
|
||||
static const std::string rocksdb_build_date = "rocksdb_build_date:2000-01-01";
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
static void AddProperty(std::unordered_map<std::string, std::string> *props, const std::string& name) {
|
||||
size_t colon = name.find(":");
|
||||
if (colon != std::string::npos && colon > 0 && colon < name.length() - 1) {
|
||||
// If we found a "@:", then this property was a build-time substitution that failed. Skip it
|
||||
size_t at = name.find("@", colon);
|
||||
if (at != colon + 1) {
|
||||
// Everything before the colon is the name, after is the value
|
||||
(*props)[name.substr(0, colon)] = name.substr(colon + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static std::unordered_map<std::string, std::string>* LoadPropertiesSet() {
|
||||
auto * properties = new std::unordered_map<std::string, std::string>();
|
||||
AddProperty(properties, rocksdb_build_git_sha);
|
||||
AddProperty(properties, rocksdb_build_git_tag);
|
||||
AddProperty(properties, rocksdb_build_date);
|
||||
return properties;
|
||||
}
|
||||
|
||||
const std::unordered_map<std::string, std::string>& GetRocksBuildProperties() {
|
||||
static std::unique_ptr<std::unordered_map<std::string, std::string>> props(LoadPropertiesSet());
|
||||
return *props;
|
||||
}
|
||||
|
||||
std::string GetRocksVersionAsString(bool with_patch) {
|
||||
std::string version = ToString(ROCKSDB_MAJOR) + "." + ToString(ROCKSDB_MINOR);
|
||||
if (with_patch) {
|
||||
return version + "." + ToString(ROCKSDB_PATCH);
|
||||
} else {
|
||||
return version;
|
||||
}
|
||||
}
|
||||
|
||||
std::string GetRocksBuildInfoAsString(const std::string& program, bool verbose) {
|
||||
std::string info = program + " (RocksDB) " + GetRocksVersionAsString(true);
|
||||
if (verbose) {
|
||||
for (const auto& it : GetRocksBuildProperties()) {
|
||||
info.append("\n ");
|
||||
info.append(it.first);
|
||||
info.append(": ");
|
||||
info.append(it.second);
|
||||
}
|
||||
}
|
||||
return info;
|
||||
}
|
||||
} // namespace ROCKSDB_NAMESPACE
|
||||
const char* rocksdb_build_git_sha = "rocksdb_build_git_sha:0";
|
||||
const char* rocksdb_build_git_date = "rocksdb_build_git_date:2000-01-01";
|
||||
const char* rocksdb_build_compile_date = "2000-01-01";
|
||||
|
1
contrib/s2geometry
vendored
1
contrib/s2geometry
vendored
@ -1 +0,0 @@
|
||||
Subproject commit 20ea540d81f4575a3fc0aea585aac611bcd03ede
|
@ -1,128 +0,0 @@
|
||||
set(S2_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/s2geometry/src")
|
||||
|
||||
set(S2_SRCS
|
||||
"${S2_SOURCE_DIR}/s2/base/stringprintf.cc"
|
||||
"${S2_SOURCE_DIR}/s2/base/strtoint.cc"
|
||||
"${S2_SOURCE_DIR}/s2/encoded_s2cell_id_vector.cc"
|
||||
"${S2_SOURCE_DIR}/s2/encoded_s2point_vector.cc"
|
||||
"${S2_SOURCE_DIR}/s2/encoded_s2shape_index.cc"
|
||||
"${S2_SOURCE_DIR}/s2/encoded_string_vector.cc"
|
||||
"${S2_SOURCE_DIR}/s2/id_set_lexicon.cc"
|
||||
"${S2_SOURCE_DIR}/s2/mutable_s2shape_index.cc"
|
||||
"${S2_SOURCE_DIR}/s2/r2rect.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s1angle.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s1chord_angle.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s1interval.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2boolean_operation.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2builder.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2builder_graph.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2builderutil_closed_set_normalizer.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2builderutil_find_polygon_degeneracies.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2builderutil_lax_polygon_layer.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2builderutil_s2point_vector_layer.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2builderutil_s2polygon_layer.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2builderutil_s2polyline_layer.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2builderutil_s2polyline_vector_layer.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2builderutil_snap_functions.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2cap.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2cell.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2cell_id.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2cell_index.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2cell_union.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2centroids.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2closest_cell_query.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2closest_edge_query.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2closest_point_query.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2contains_vertex_query.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2convex_hull_query.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2coords.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2crossing_edge_query.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2debug.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2earth.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2edge_clipping.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2edge_crosser.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2edge_crossings.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2edge_distances.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2edge_tessellator.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2error.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2furthest_edge_query.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2latlng.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2latlng_rect.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2latlng_rect_bounder.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2lax_loop_shape.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2lax_polygon_shape.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2lax_polyline_shape.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2loop.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2loop_measures.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2measures.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2metrics.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2max_distance_targets.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2min_distance_targets.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2padded_cell.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2point_compression.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2point_region.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2pointutil.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2polygon.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2polyline.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2polyline_alignment.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2polyline_measures.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2polyline_simplifier.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2predicates.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2projections.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2r2rect.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2region.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2region_term_indexer.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2region_coverer.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2region_intersection.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2region_union.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shape_index.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shape_index_buffered_region.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shape_index_measures.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shape_measures.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shapeutil_build_polygon_boundaries.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shapeutil_coding.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shapeutil_contains_brute_force.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shapeutil_edge_iterator.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shapeutil_get_reference_point.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shapeutil_range_iterator.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shapeutil_visit_crossing_edge_pairs.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2text_format.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2wedge_relations.cc"
|
||||
"${S2_SOURCE_DIR}/s2/strings/ostringstream.cc"
|
||||
"${S2_SOURCE_DIR}/s2/strings/serialize.cc"
|
||||
# ClickHouse doesn't use strings from abseil.
|
||||
# So, there is no duplicate symbols.
|
||||
"${S2_SOURCE_DIR}/s2/third_party/absl/base/dynamic_annotations.cc"
|
||||
"${S2_SOURCE_DIR}/s2/third_party/absl/base/internal/raw_logging.cc"
|
||||
"${S2_SOURCE_DIR}/s2/third_party/absl/base/internal/throw_delegate.cc"
|
||||
"${S2_SOURCE_DIR}/s2/third_party/absl/numeric/int128.cc"
|
||||
"${S2_SOURCE_DIR}/s2/third_party/absl/strings/ascii.cc"
|
||||
"${S2_SOURCE_DIR}/s2/third_party/absl/strings/match.cc"
|
||||
"${S2_SOURCE_DIR}/s2/third_party/absl/strings/numbers.cc"
|
||||
"${S2_SOURCE_DIR}/s2/third_party/absl/strings/str_cat.cc"
|
||||
"${S2_SOURCE_DIR}/s2/third_party/absl/strings/str_split.cc"
|
||||
"${S2_SOURCE_DIR}/s2/third_party/absl/strings/string_view.cc"
|
||||
"${S2_SOURCE_DIR}/s2/third_party/absl/strings/strip.cc"
|
||||
"${S2_SOURCE_DIR}/s2/third_party/absl/strings/internal/memutil.cc"
|
||||
"${S2_SOURCE_DIR}/s2/util/bits/bit-interleave.cc"
|
||||
"${S2_SOURCE_DIR}/s2/util/bits/bits.cc"
|
||||
"${S2_SOURCE_DIR}/s2/util/coding/coder.cc"
|
||||
"${S2_SOURCE_DIR}/s2/util/coding/varint.cc"
|
||||
"${S2_SOURCE_DIR}/s2/util/math/exactfloat/exactfloat.cc"
|
||||
"${S2_SOURCE_DIR}/s2/util/math/mathutil.cc"
|
||||
"${S2_SOURCE_DIR}/s2/util/units/length-units.cc"
|
||||
)
|
||||
|
||||
add_library(s2 ${S2_SRCS})
|
||||
|
||||
set_property(TARGET s2 PROPERTY CXX_STANDARD 11)
|
||||
|
||||
if (OPENSSL_FOUND)
|
||||
target_link_libraries(s2 PRIVATE ${OPENSSL_LIBRARIES})
|
||||
endif()
|
||||
|
||||
target_include_directories(s2 SYSTEM BEFORE PUBLIC "${S2_SOURCE_DIR}/")
|
||||
|
||||
if(M_LIBRARY)
|
||||
target_link_libraries(s2 PRIVATE ${M_LIBRARY})
|
||||
endif()
|
1
contrib/sqlite-amalgamation
vendored
1
contrib/sqlite-amalgamation
vendored
@ -1 +0,0 @@
|
||||
Subproject commit 9818baa5d027ffb26d57f810dc4c597d4946781c
|
@ -1,6 +0,0 @@
|
||||
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/sqlite-amalgamation")
|
||||
|
||||
set(SRCS ${LIBRARY_DIR}/sqlite3.c)
|
||||
|
||||
add_library(sqlite ${SRCS})
|
||||
target_include_directories(sqlite SYSTEM PUBLIC "${LIBRARY_DIR}")
|
@ -27,7 +27,7 @@ RUN apt-get update \
|
||||
# Special dpkg-deb (https://github.com/ClickHouse-Extras/dpkg) version which is able
|
||||
# to compress files using pigz (https://zlib.net/pigz/) instead of gzip.
|
||||
# Significantly increase deb packaging speed and compatible with old systems
|
||||
RUN curl -O https://clickhouse-datasets.s3.yandex.net/utils/1/dpkg-deb \
|
||||
RUN curl -O https://clickhouse-builds.s3.yandex.net/utils/1/dpkg-deb \
|
||||
&& chmod +x dpkg-deb \
|
||||
&& cp dpkg-deb /usr/bin
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
FROM yandex/clickhouse-deb-builder
|
||||
|
||||
RUN export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \
|
||||
&& wget -nv -O /tmp/arrow-keyring.deb "https://apache.jfrog.io/artifactory/arrow/ubuntu/apache-arrow-apt-source-latest-${CODENAME}.deb" \
|
||||
&& wget -nv -O /tmp/arrow-keyring.deb "https://apache.bintray.com/arrow/ubuntu/apache-arrow-archive-keyring-latest-${CODENAME}.deb" \
|
||||
&& dpkg -i /tmp/arrow-keyring.deb
|
||||
|
||||
# Libraries from OS are only needed to test the "unbundled" build (that is not used in production).
|
||||
|
@ -27,7 +27,7 @@ RUN apt-get update \
|
||||
# Special dpkg-deb (https://github.com/ClickHouse-Extras/dpkg) version which is able
|
||||
# to compress files using pigz (https://zlib.net/pigz/) instead of gzip.
|
||||
# Significantly increase deb packaging speed and compatible with old systems
|
||||
RUN curl -O https://clickhouse-datasets.s3.yandex.net/utils/1/dpkg-deb \
|
||||
RUN curl -O https://clickhouse-builds.s3.yandex.net/utils/1/dpkg-deb \
|
||||
&& chmod +x dpkg-deb \
|
||||
&& cp dpkg-deb /usr/bin
|
||||
|
||||
|
@ -27,7 +27,7 @@ RUN apt-get update \
|
||||
# Special dpkg-deb (https://github.com/ClickHouse-Extras/dpkg) version which is able
|
||||
# to compress files using pigz (https://zlib.net/pigz/) instead of gzip.
|
||||
# Significantly increase deb packaging speed and compatible with old systems
|
||||
RUN curl -O https://clickhouse-datasets.s3.yandex.net/utils/1/dpkg-deb \
|
||||
RUN curl -O https://clickhouse-builds.s3.yandex.net/utils/1/dpkg-deb \
|
||||
&& chmod +x dpkg-deb \
|
||||
&& cp dpkg-deb /usr/bin
|
||||
|
||||
|
@ -378,16 +378,6 @@ function run_tests
|
||||
|
||||
# needs pv
|
||||
01923_network_receive_time_metric_insert
|
||||
|
||||
01889_sqlite_read_write
|
||||
|
||||
# needs s2
|
||||
01849_geoToS2
|
||||
01851_s2_to_geo
|
||||
01852_s2_get_neighbours
|
||||
01853_s2_cells_intersect
|
||||
01854_s2_cap_contains
|
||||
01854_s2_cap_union
|
||||
)
|
||||
|
||||
time clickhouse-test --hung-check -j 8 --order=random --use-skip-list \
|
||||
|
@ -32,7 +32,7 @@ RUN rm -rf \
|
||||
RUN apt-get clean
|
||||
|
||||
# Install MySQL ODBC driver
|
||||
RUN curl 'https://downloads.mysql.com/archives/get/p/10/file/mysql-connector-odbc-8.0.21-linux-glibc2.12-x86-64bit.tar.gz' --location --output 'mysql-connector.tar.gz' && tar -xzf mysql-connector.tar.gz && cd mysql-connector-odbc-8.0.21-linux-glibc2.12-x86-64bit/lib && mv * /usr/local/lib && ln -s /usr/local/lib/libmyodbc8a.so /usr/lib/x86_64-linux-gnu/odbc/libmyodbc.so
|
||||
RUN curl 'https://cdn.mysql.com//Downloads/Connector-ODBC/8.0/mysql-connector-odbc-8.0.21-linux-glibc2.12-x86-64bit.tar.gz' --output 'mysql-connector.tar.gz' && tar -xzf mysql-connector.tar.gz && cd mysql-connector-odbc-8.0.21-linux-glibc2.12-x86-64bit/lib && mv * /usr/local/lib && ln -s /usr/local/lib/libmyodbc8a.so /usr/lib/x86_64-linux-gnu/odbc/libmyodbc.so
|
||||
|
||||
# Unfortunately this is required for a single test for conversion data from zookeeper to clickhouse-keeper.
|
||||
# ZooKeeper is not started by default, but consumes some space in containers.
|
||||
@ -49,3 +49,4 @@ RUN mkdir /zookeeper && chmod -R 777 /zookeeper
|
||||
|
||||
ENV TZ=Europe/Moscow
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
|
@ -76,7 +76,6 @@ RUN python3 -m pip install \
|
||||
pytest \
|
||||
pytest-timeout \
|
||||
pytest-xdist \
|
||||
pytest-repeat \
|
||||
redis \
|
||||
tzlocal \
|
||||
urllib3 \
|
||||
|
@ -2,7 +2,7 @@ version: '2.3'
|
||||
services:
|
||||
postgres1:
|
||||
image: postgres
|
||||
command: ["postgres", "-c", "logging_collector=on", "-c", "log_directory=/postgres/logs", "-c", "log_filename=postgresql.log", "-c", "log_statement=all", "-c", "max_connections=200"]
|
||||
command: ["postgres", "-c", "logging_collector=on", "-c", "log_directory=/postgres/logs", "-c", "log_filename=postgresql.log", "-c", "log_statement=all"]
|
||||
restart: always
|
||||
expose:
|
||||
- ${POSTGRES_PORT}
|
||||
|
@ -29,8 +29,7 @@ RUN apt-get update -y \
|
||||
unixodbc \
|
||||
wget \
|
||||
mysql-client=5.7* \
|
||||
postgresql-client \
|
||||
sqlite3
|
||||
postgresql-client
|
||||
|
||||
RUN pip3 install numpy scipy pandas
|
||||
|
||||
|
@ -58,11 +58,11 @@ function start()
|
||||
echo "Cannot start clickhouse-server"
|
||||
cat /var/log/clickhouse-server/stdout.log
|
||||
tail -n1000 /var/log/clickhouse-server/stderr.log
|
||||
tail -n100000 /var/log/clickhouse-server/clickhouse-server.log | grep -F -v '<Warning> RaftInstance:' -e '<Information> RaftInstance' | tail -n1000
|
||||
tail -n1000 /var/log/clickhouse-server/clickhouse-server.log
|
||||
break
|
||||
fi
|
||||
# use root to match with current uid
|
||||
clickhouse start --user root >/var/log/clickhouse-server/stdout.log 2>>/var/log/clickhouse-server/stderr.log
|
||||
clickhouse start --user root >/var/log/clickhouse-server/stdout.log 2>/var/log/clickhouse-server/stderr.log
|
||||
sleep 0.5
|
||||
counter=$((counter + 1))
|
||||
done
|
||||
@ -118,35 +118,35 @@ clickhouse-client --query "SELECT 'Server successfully started', 'OK'" >> /test_
|
||||
[ -f /var/log/clickhouse-server/stderr.log ] || echo -e "Stderr log does not exist\tFAIL"
|
||||
|
||||
# Print Fatal log messages to stdout
|
||||
zgrep -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server.log*
|
||||
zgrep -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server.log
|
||||
|
||||
# Grep logs for sanitizer asserts, crashes and other critical errors
|
||||
|
||||
# Sanitizer asserts
|
||||
zgrep -Fa "==================" /var/log/clickhouse-server/stderr.log >> /test_output/tmp
|
||||
zgrep -Fa "WARNING" /var/log/clickhouse-server/stderr.log >> /test_output/tmp
|
||||
zgrep -Fav "ASan doesn't fully support makecontext/swapcontext functions" /test_output/tmp > /dev/null \
|
||||
zgrep -Fav "ASan doesn't fully support makecontext/swapcontext functions" > /dev/null \
|
||||
&& echo -e 'Sanitizer assert (in stderr.log)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'No sanitizer asserts\tOK' >> /test_output/test_results.tsv
|
||||
rm -f /test_output/tmp
|
||||
|
||||
# OOM
|
||||
zgrep -Fa " <Fatal> Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server.log* > /dev/null \
|
||||
zgrep -Fa " <Fatal> Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server.log > /dev/null \
|
||||
&& echo -e 'OOM killer (or signal 9) in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'No OOM messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# Logical errors
|
||||
zgrep -Fa "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.log* > /dev/null \
|
||||
zgrep -Fa "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.log > /dev/null \
|
||||
&& echo -e 'Logical error thrown (see clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'No logical errors\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# Crash
|
||||
zgrep -Fa "########################################" /var/log/clickhouse-server/clickhouse-server.log* > /dev/null \
|
||||
zgrep -Fa "########################################" /var/log/clickhouse-server/clickhouse-server.log > /dev/null \
|
||||
&& echo -e 'Killed by signal (in clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Not crashed\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# It also checks for crash without stacktrace (printed by watchdog)
|
||||
zgrep -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server.log* > /dev/null \
|
||||
zgrep -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server.log > /dev/null \
|
||||
&& echo -e 'Fatal message in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'No fatal messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
|
@ -105,11 +105,11 @@ clickhouse-client -nmT < tests/queries/0_stateless/01521_dummy_test.sql | tee te
|
||||
|
||||
5) ensure everything is correct, if the test output is incorrect (due to some bug for example), adjust the reference file using text editor.
|
||||
|
||||
#### How to create a good test
|
||||
#### How to create good test
|
||||
|
||||
- A test should be
|
||||
- test should be
|
||||
- minimal - create only tables related to tested functionality, remove unrelated columns and parts of query
|
||||
- fast - should not take longer than a few seconds (better subseconds)
|
||||
- fast - should not take longer than few seconds (better subseconds)
|
||||
- correct - fails then feature is not working
|
||||
- deterministic
|
||||
- isolated / stateless
|
||||
@ -126,16 +126,6 @@ clickhouse-client -nmT < tests/queries/0_stateless/01521_dummy_test.sql | tee te
|
||||
- use other SQL files in the `0_stateless` folder as an example
|
||||
- ensure the feature / feature combination you want to test is not yet covered with existing tests
|
||||
|
||||
#### Test naming rules
|
||||
|
||||
It's important to name tests correctly, so one could turn some tests subset off in clickhouse-test invocation.
|
||||
|
||||
| Tester flag| What should be in test name | When flag should be added |
|
||||
|---|---|---|---|
|
||||
| `--[no-]zookeeper`| "zookeeper" or "replica" | Test uses tables from ReplicatedMergeTree family |
|
||||
| `--[no-]shard` | "shard" or "distributed" or "global"| Test using connections to 127.0.0.2 or similar |
|
||||
| `--[no-]long` | "long" or "deadlock" or "race" | Test runs longer than 60 seconds |
|
||||
|
||||
#### Commit / push / create PR.
|
||||
|
||||
1) commit & push your changes
|
||||
|
@ -79,7 +79,6 @@ SELECT library_name, license_type, license_path FROM system.licenses ORDER BY li
|
||||
| re2 | BSD 3-clause | /contrib/re2/LICENSE |
|
||||
| replxx | BSD 3-clause | /contrib/replxx/LICENSE.md |
|
||||
| rocksdb | BSD 3-clause | /contrib/rocksdb/LICENSE.leveldb |
|
||||
| s2geometry | Apache | /contrib/s2geometry/LICENSE |
|
||||
| sentry-native | MIT | /contrib/sentry-native/LICENSE |
|
||||
| simdjson | Apache | /contrib/simdjson/LICENSE |
|
||||
| snappy | Public Domain | /contrib/snappy/COPYING |
|
||||
|
@ -123,7 +123,7 @@ For installing CMake and Ninja on Mac OS X first install Homebrew and then insta
|
||||
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
|
||||
brew install cmake ninja
|
||||
|
||||
Next, check the version of CMake: `cmake --version`. If it is below 3.12, you should install a newer version from the website: https://cmake.org/download/.
|
||||
Next, check the version of CMake: `cmake --version`. If it is below 3.3, you should install a newer version from the website: https://cmake.org/download/.
|
||||
|
||||
## Optional External Libraries {#optional-external-libraries}
|
||||
|
||||
|
@ -47,7 +47,7 @@ EXCHANGE TABLES new_table AND old_table;
|
||||
|
||||
### ReplicatedMergeTree in Atomic Database {#replicatedmergetree-in-atomic-database}
|
||||
|
||||
For [ReplicatedMergeTree](../table-engines/mergetree-family/replication.md#table_engines-replication) tables, it is recommended to not specify engine parameters - path in ZooKeeper and replica name. In this case, configuration parameters will be used [default_replica_path](../../operations/server-configuration-parameters/settings.md#default_replica_path) and [default_replica_name](../../operations/server-configuration-parameters/settings.md#default_replica_name). If you want to specify engine parameters explicitly, it is recommended to use `{uuid}` macros. This is useful so that unique paths are automatically generated for each table in ZooKeeper.
|
||||
For [ReplicatedMergeTree](../table-engines/mergetree-family/replication.md#table_engines-replication) tables, it is recommended to not specify engine parameters - path in ZooKeeper and replica name. In this case, configuration parameters will be used [default_replica_path](../../operations/server-configuration-parameters/settings.md#default_replica_path) and [default_replica_name](../../operations/server-configuration-parameters/settings.md#default_replica_name). If you want to specify engine parameters explicitly, it is recommended to use {uuid} macros. This is useful so that unique paths are automatically generated for each table in ZooKeeper.
|
||||
|
||||
## See Also
|
||||
|
||||
|
@ -22,4 +22,4 @@ You can also use the following database engines:
|
||||
|
||||
- [PostgreSQL](../../engines/database-engines/postgresql.md)
|
||||
|
||||
- [Replicated](../../engines/database-engines/replicated.md)
|
||||
[Original article](https://clickhouse.tech/docs/en/database_engines/) <!--hide-->
|
||||
|
@ -82,8 +82,6 @@ MySQL DDL queries are converted into the corresponding ClickHouse DDL queries ([
|
||||
|
||||
- If `_sign` is not specified in the `SELECT` query, `WHERE _sign=1` is used by default. So the deleted rows are not included into the result set.
|
||||
|
||||
- The result includes columns comments in case they exist in MySQL database tables.
|
||||
|
||||
### Index Conversion {#index-conversion}
|
||||
|
||||
MySQL `PRIMARY KEY` and `INDEX` clauses are converted into `ORDER BY` tuples in ClickHouse tables.
|
||||
|
@ -1,115 +0,0 @@
|
||||
# [experimental] Replicated {#replicated}
|
||||
|
||||
The engine is based on the [Atomic](../../engines/database-engines/atomic.md) engine. It supports replication of metadata via DDL log being written to ZooKeeper and executed on all of the replicas for a given database.
|
||||
|
||||
One ClickHouse server can have multiple replicated databases running and updating at the same time. But there can't be multiple replicas of the same replicated database.
|
||||
|
||||
## Creating a Database {#creating-a-database}
|
||||
``` sql
|
||||
CREATE DATABASE testdb ENGINE = Replicated('zoo_path', 'shard_name', 'replica_name') [SETTINGS ...]
|
||||
```
|
||||
|
||||
**Engine Parameters**
|
||||
|
||||
- `zoo_path` — ZooKeeper path. The same ZooKeeper path corresponds to the same database.
|
||||
- `shard_name` — Shard name. Database replicas are grouped into shards by `shard_name`.
|
||||
- `replica_name` — Replica name. Replica names must be different for all replicas of the same shard.
|
||||
|
||||
!!! note "Warning"
|
||||
For [ReplicatedMergeTree](../table-engines/mergetree-family/replication.md#table_engines-replication) tables if no arguments provided, then default arguments are used: `/clickhouse/tables/{uuid}/{shard}` and `{replica}`. These can be changed in the server settings [default_replica_path](../../operations/server-configuration-parameters/settings.md#default_replica_path) and [default_replica_name](../../operations/server-configuration-parameters/settings.md#default_replica_name). Macro `{uuid}` is unfolded to table's uuid, `{shard}` and `{replica}` are unfolded to values from server config, not from database engine arguments. But in the future, it will be possible to use `shard_name` and `replica_name` of Replicated database.
|
||||
|
||||
## Specifics and Recommendations {#specifics-and-recommendations}
|
||||
|
||||
DDL queries with `Replicated` database work in a similar way to [ON CLUSTER](../../sql-reference/distributed-ddl.md) queries, but with minor differences.
|
||||
|
||||
First, the DDL request tries to execute on the initiator (the host that originally received the request from the user). If the request is not fulfilled, then the user immediately receives an error, other hosts do not try to fulfill it. If the request has been successfully completed on the initiator, then all other hosts will automatically retry until they complete it. The initiator will try to wait for the query to be completed on other hosts (no longer than [distributed_ddl_task_timeout](../../operations/settings/settings.md#distributed_ddl_task_timeout)) and will return a table with the query execution statuses on each host.
|
||||
|
||||
The behavior in case of errors is regulated by the [distributed_ddl_output_mode](../../operations/settings/settings.md#distributed_ddl_output_mode) setting, for a `Replicated` database it is better to set it to `null_status_on_timeout` — i.e. if some hosts did not have time to execute the request for [distributed_ddl_task_timeout](../../operations/settings/settings.md#distributed_ddl_task_timeout), then do not throw an exception, but show the `NULL` status for them in the table.
|
||||
|
||||
The [system.clusters](../../operations/system-tables/clusters.md) system table contains a cluster named like the replicated database, which consists of all replicas of the database. This cluster is updated automatically when creating/deleting replicas, and it can be used for [Distributed](../../engines/table-engines/special/distributed.md#distributed) tables.
|
||||
|
||||
When creating a new replica of the database, this replica creates tables by itself. If the replica has been unavailable for a long time and has lagged behind the replication log — it checks its local metadata with the current metadata in ZooKeeper, moves the extra tables with data to a separate non-replicated database (so as not to accidentally delete anything superfluous), creates the missing tables, updates the table names if they have been renamed. The data is replicated at the `ReplicatedMergeTree` level, i.e. if the table is not replicated, the data will not be replicated (the database is responsible only for metadata).
|
||||
|
||||
## Usage Example {#usage-example}
|
||||
|
||||
Creating a cluster with three hosts:
|
||||
|
||||
``` sql
|
||||
node1 :) CREATE DATABASE r ENGINE=Replicated('some/path/r','shard1','replica1');
|
||||
node2 :) CREATE DATABASE r ENGINE=Replicated('some/path/r','shard1','other_replica');
|
||||
node3 :) CREATE DATABASE r ENGINE=Replicated('some/path/r','other_shard','{replica}');
|
||||
```
|
||||
|
||||
Running the DDL-query:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE r.rmt (n UInt64) ENGINE=ReplicatedMergeTree ORDER BY n;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─────hosts────────────┬──status─┬─error─┬─num_hosts_remaining─┬─num_hosts_active─┐
|
||||
│ shard1|replica1 │ 0 │ │ 2 │ 0 │
|
||||
│ shard1|other_replica │ 0 │ │ 1 │ 0 │
|
||||
│ other_shard|r1 │ 0 │ │ 0 │ 0 │
|
||||
└──────────────────────┴─────────┴───────┴─────────────────────┴──────────────────┘
|
||||
```
|
||||
|
||||
Showing the system table:
|
||||
|
||||
``` sql
|
||||
SELECT cluster, shard_num, replica_num, host_name, host_address, port, is_local
|
||||
FROM system.clusters WHERE cluster='r';
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─cluster─┬─shard_num─┬─replica_num─┬─host_name─┬─host_address─┬─port─┬─is_local─┐
|
||||
│ r │ 1 │ 1 │ node3 │ 127.0.0.1 │ 9002 │ 0 │
|
||||
│ r │ 2 │ 1 │ node2 │ 127.0.0.1 │ 9001 │ 0 │
|
||||
│ r │ 2 │ 2 │ node1 │ 127.0.0.1 │ 9000 │ 1 │
|
||||
└─────────┴───────────┴─────────────┴───────────┴──────────────┴──────┴──────────┘
|
||||
```
|
||||
|
||||
Creating a distributed table and inserting the data:
|
||||
|
||||
``` sql
|
||||
node2 :) CREATE TABLE r.d (n UInt64) ENGINE=Distributed('r','r','rmt', n % 2);
|
||||
node3 :) INSERT INTO r SELECT * FROM numbers(10);
|
||||
node1 :) SELECT materialize(hostName()) AS host, groupArray(n) FROM r.d GROUP BY host;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─hosts─┬─groupArray(n)─┐
|
||||
│ node1 │ [1,3,5,7,9] │
|
||||
│ node2 │ [0,2,4,6,8] │
|
||||
└───────┴───────────────┘
|
||||
```
|
||||
|
||||
Adding replica on the one more host:
|
||||
|
||||
``` sql
|
||||
node4 :) CREATE DATABASE r ENGINE=Replicated('some/path/r','other_shard','r2');
|
||||
```
|
||||
|
||||
The cluster configuration will look like this:
|
||||
|
||||
``` text
|
||||
┌─cluster─┬─shard_num─┬─replica_num─┬─host_name─┬─host_address─┬─port─┬─is_local─┐
|
||||
│ r │ 1 │ 1 │ node3 │ 127.0.0.1 │ 9002 │ 0 │
|
||||
│ r │ 1 │ 2 │ node4 │ 127.0.0.1 │ 9003 │ 0 │
|
||||
│ r │ 2 │ 1 │ node2 │ 127.0.0.1 │ 9001 │ 0 │
|
||||
│ r │ 2 │ 2 │ node1 │ 127.0.0.1 │ 9000 │ 1 │
|
||||
└─────────┴───────────┴─────────────┴───────────┴──────────────┴──────┴──────────┘
|
||||
```
|
||||
|
||||
The distributed table also will get data from the new host:
|
||||
|
||||
```sql
|
||||
node2 :) SELECT materialize(hostName()) AS host, groupArray(n) FROM r.d GROUP BY host;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─hosts─┬─groupArray(n)─┐
|
||||
│ node2 │ [1,3,5,7,9] │
|
||||
│ node4 │ [0,2,4,6,8] │
|
||||
└───────┴───────────────┘
|
||||
```
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 12
|
||||
toc_title: MaterializedPostgreSQL
|
||||
toc_title: MateriaziePostgreSQL
|
||||
---
|
||||
|
||||
# MaterializedPostgreSQL {#materialize-postgresql}
|
||||
|
@ -76,7 +76,7 @@ For a description of parameters, see the [CREATE query description](../../../sql
|
||||
|
||||
- `SAMPLE BY` — An expression for sampling. Optional.
|
||||
|
||||
If a sampling expression is used, the primary key must contain it. The result of sampling expression must be unsigned integer. Example: `SAMPLE BY intHash32(UserID) ORDER BY (CounterID, EventDate, intHash32(UserID))`.
|
||||
If a sampling expression is used, the primary key must contain it. Example: `SAMPLE BY intHash32(UserID) ORDER BY (CounterID, EventDate, intHash32(UserID))`.
|
||||
|
||||
- `TTL` — A list of rules specifying storage duration of rows and defining logic of automatic parts movement [between disks and volumes](#table_engine-mergetree-multiple-volumes). Optional.
|
||||
|
||||
|
@ -37,14 +37,6 @@ Also, it accepts the following settings:
|
||||
|
||||
- `max_delay_to_insert` - max delay of inserting data into Distributed table in seconds, if there are a lot of pending bytes for async send. Default 60.
|
||||
|
||||
- `monitor_batch_inserts` - same as [distributed_directory_monitor_batch_inserts](../../../operations/settings/settings.md#distributed_directory_monitor_batch_inserts)
|
||||
|
||||
- `monitor_split_batch_on_failure` - same as [distributed_directory_monitor_split_batch_on_failure](../../../operations/settings/settings.md#distributed_directory_monitor_split_batch_on_failure)
|
||||
|
||||
- `monitor_sleep_time_ms` - same as [distributed_directory_monitor_sleep_time_ms](../../../operations/settings/settings.md#distributed_directory_monitor_sleep_time_ms)
|
||||
|
||||
- `monitor_max_sleep_time_ms` - same as [distributed_directory_monitor_max_sleep_time_ms](../../../operations/settings/settings.md#distributed_directory_monitor_max_sleep_time_ms)
|
||||
|
||||
!!! note "Note"
|
||||
|
||||
**Durability settings** (`fsync_...`):
|
||||
|
@ -1130,18 +1130,17 @@ The table below shows supported data types and how they match ClickHouse [data t
|
||||
| `boolean`, `int`, `long`, `float`, `double` | [Int64](../sql-reference/data-types/int-uint.md), [UInt64](../sql-reference/data-types/int-uint.md) | `long` |
|
||||
| `boolean`, `int`, `long`, `float`, `double` | [Float32](../sql-reference/data-types/float.md) | `float` |
|
||||
| `boolean`, `int`, `long`, `float`, `double` | [Float64](../sql-reference/data-types/float.md) | `double` |
|
||||
| `bytes`, `string`, `fixed`, `enum` | [String](../sql-reference/data-types/string.md) | `bytes` or `string` \* |
|
||||
| `bytes`, `string`, `fixed`, `enum` | [String](../sql-reference/data-types/string.md) | `bytes` |
|
||||
| `bytes`, `string`, `fixed` | [FixedString(N)](../sql-reference/data-types/fixedstring.md) | `fixed(N)` |
|
||||
| `enum` | [Enum(8\|16)](../sql-reference/data-types/enum.md) | `enum` |
|
||||
| `array(T)` | [Array(T)](../sql-reference/data-types/array.md) | `array(T)` |
|
||||
| `union(null, T)`, `union(T, null)` | [Nullable(T)](../sql-reference/data-types/date.md) | `union(null, T)` |
|
||||
| `null` | [Nullable(Nothing)](../sql-reference/data-types/special-data-types/nothing.md) | `null` |
|
||||
| `int (date)` \** | [Date](../sql-reference/data-types/date.md) | `int (date)` \** |
|
||||
| `long (timestamp-millis)` \** | [DateTime64(3)](../sql-reference/data-types/datetime.md) | `long (timestamp-millis)` \* |
|
||||
| `long (timestamp-micros)` \** | [DateTime64(6)](../sql-reference/data-types/datetime.md) | `long (timestamp-micros)` \* |
|
||||
| `int (date)` \* | [Date](../sql-reference/data-types/date.md) | `int (date)` \* |
|
||||
| `long (timestamp-millis)` \* | [DateTime64(3)](../sql-reference/data-types/datetime.md) | `long (timestamp-millis)` \* |
|
||||
| `long (timestamp-micros)` \* | [DateTime64(6)](../sql-reference/data-types/datetime.md) | `long (timestamp-micros)` \* |
|
||||
|
||||
\* `bytes` is default, controlled by [output_format_avro_string_column_pattern](../operations/settings/settings.md#settings-output_format_avro_string_column_pattern)
|
||||
\** [Avro logical types](https://avro.apache.org/docs/current/spec.html#Logical+Types)
|
||||
\* [Avro logical types](https://avro.apache.org/docs/current/spec.html#Logical+Types)
|
||||
|
||||
Unsupported Avro data types: `record` (non-root), `map`
|
||||
|
||||
@ -1247,14 +1246,12 @@ The table below shows supported data types and how they match ClickHouse [data t
|
||||
| `DOUBLE` | [Float64](../sql-reference/data-types/float.md) | `DOUBLE` |
|
||||
| `DATE32` | [Date](../sql-reference/data-types/date.md) | `UINT16` |
|
||||
| `DATE64`, `TIMESTAMP` | [DateTime](../sql-reference/data-types/datetime.md) | `UINT32` |
|
||||
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` |
|
||||
| — | [FixedString](../sql-reference/data-types/fixedstring.md) | `BINARY` |
|
||||
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `STRING` |
|
||||
| — | [FixedString](../sql-reference/data-types/fixedstring.md) | `STRING` |
|
||||
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` |
|
||||
| `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` |
|
||||
| `STRUCT` | [Tuple](../sql-reference/data-types/tuple.md) | `STRUCT` |
|
||||
| `MAP` | [Map](../sql-reference/data-types/map.md) | `MAP` |
|
||||
|
||||
Arrays can be nested and can have a value of the `Nullable` type as an argument. `Tuple` and `Map` types also can be nested.
|
||||
Arrays can be nested and can have a value of the `Nullable` type as an argument.
|
||||
|
||||
ClickHouse supports configurable precision of `Decimal` type. The `INSERT` query treats the Parquet `DECIMAL` type as the ClickHouse `Decimal128` type.
|
||||
|
||||
@ -1302,17 +1299,13 @@ The table below shows supported data types and how they match ClickHouse [data t
|
||||
| `DOUBLE` | [Float64](../sql-reference/data-types/float.md) | `FLOAT64` |
|
||||
| `DATE32` | [Date](../sql-reference/data-types/date.md) | `UINT16` |
|
||||
| `DATE64`, `TIMESTAMP` | [DateTime](../sql-reference/data-types/datetime.md) | `UINT32` |
|
||||
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` |
|
||||
| `STRING`, `BINARY` | [FixedString](../sql-reference/data-types/fixedstring.md) | `BINARY` |
|
||||
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `UTF8` |
|
||||
| `STRING`, `BINARY` | [FixedString](../sql-reference/data-types/fixedstring.md) | `UTF8` |
|
||||
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` |
|
||||
| `DECIMAL256` | [Decimal256](../sql-reference/data-types/decimal.md)| `DECIMAL256` |
|
||||
| `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` |
|
||||
| `STRUCT` | [Tuple](../sql-reference/data-types/tuple.md) | `STRUCT` |
|
||||
| `MAP` | [Map](../sql-reference/data-types/map.md) | `MAP` |
|
||||
|
||||
Arrays can be nested and can have a value of the `Nullable` type as an argument. `Tuple` and `Map` types also can be nested.
|
||||
|
||||
The `DICTIONARY` type is supported for `INSERT` queries, and for `SELECT` queries there is an [output_format_arrow_low_cardinality_as_dictionary](../operations/settings/settings.md#output-format-arrow-low-cardinality-as-dictionary) setting that allows to output [LowCardinality](../sql-reference/data-types/lowcardinality.md) type as a `DICTIONARY` type.
|
||||
Arrays can be nested and can have a value of the `Nullable` type as an argument.
|
||||
|
||||
ClickHouse supports configurable precision of the `Decimal` type. The `INSERT` query treats the Arrow `DECIMAL` type as the ClickHouse `Decimal128` type.
|
||||
|
||||
@ -1365,10 +1358,8 @@ The table below shows supported data types and how they match ClickHouse [data t
|
||||
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` |
|
||||
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` |
|
||||
| `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` |
|
||||
| `STRUCT` | [Tuple](../sql-reference/data-types/tuple.md) | `STRUCT` |
|
||||
| `MAP` | [Map](../sql-reference/data-types/map.md) | `MAP` |
|
||||
|
||||
Arrays can be nested and can have a value of the `Nullable` type as an argument. `Tuple` and `Map` types also can be nested.
|
||||
Arrays can be nested and can have a value of the `Nullable` type as an argument.
|
||||
|
||||
ClickHouse supports configurable precision of the `Decimal` type. The `INSERT` query treats the ORC `DECIMAL` type as the ClickHouse `Decimal128` type.
|
||||
|
||||
|
@ -157,6 +157,5 @@ toc_title: Adopters
|
||||
| <a href="https://signoz.io/" class="favicon">SigNoz</a> | Observability Platform | Main Product | — | — | [Source code](https://github.com/SigNoz/signoz) |
|
||||
| <a href="https://chelpipegroup.com/" class="favicon">ChelPipe Group</a> | Analytics | — | — | — | [Blog post, June 2021](https://vc.ru/trade/253172-tyazhelomu-proizvodstvu-user-friendly-sayt-internet-magazin-trub-dlya-chtpz) |
|
||||
| <a href="https://zagravagames.com/en/" class="favicon">Zagrava Trading</a> | — | — | — | — | [Job offer, May 2021](https://twitter.com/datastackjobs/status/1394707267082063874) |
|
||||
| <a href="https://beeline.ru/" class="favicon">Beeline</a> | Telecom | Data Platform | — | — | [Blog post, July 2021](https://habr.com/en/company/beeline/blog/567508/) |
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/introduction/adopters/) <!--hide-->
|
||||
|
@ -10,7 +10,7 @@ ClickHouse server use [ZooKeeper](https://zookeeper.apache.org/) coordination sy
|
||||
!!! warning "Warning"
|
||||
This feature currently in pre-production stage. We test it in our CI and on small internal installations.
|
||||
|
||||
## Implementation details
|
||||
## Implemetation details
|
||||
|
||||
ZooKeeper is one of the first well-known open-source coordination systems. It's implemented in Java, has quite a simple and powerful data model. ZooKeeper's coordination algorithm called ZAB (ZooKeeper Atomic Broadcast) doesn't provide linearizability guarantees for reads, because each ZooKeeper node serves reads locally. Unlike ZooKeeper `clickhouse-keeper` written in C++ and use [RAFT algorithm](https://raft.github.io/) [implementation](https://github.com/eBay/NuRaft). This algorithm allows to have linearizability for reads and writes, has several open-source implementations in different languages.
|
||||
|
||||
|
@ -278,16 +278,4 @@ Possible values:
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## check_sample_column_is_correct {#check_sample_column_is_correct}
|
||||
|
||||
Enables to check column for sampling or sampling expression is correct at table creation.
|
||||
|
||||
Possible values:
|
||||
|
||||
- true — Check column or sampling expression is correct at table creation.
|
||||
- false — Do not check column or sampling expression is correct at table creation.
|
||||
|
||||
Default value: `true`.
|
||||
|
||||
By default, the ClickHouse server check column for sampling or sampling expression at table creation. If you already had tables with incorrect sampling expression, set value `false` to make ClickHouse server do not raise exception when ClickHouse server is starting.
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/settings/merge_tree_settings/) <!--hide-->
|
||||
|
@ -509,23 +509,6 @@ Possible values:
|
||||
|
||||
Default value: `ALL`.
|
||||
|
||||
## join_algorithm {#settings-join_algorithm}
|
||||
|
||||
Specifies [JOIN](../../sql-reference/statements/select/join.md) algorithm.
|
||||
|
||||
Possible values:
|
||||
|
||||
- `hash` — [Hash join algorithm](https://en.wikipedia.org/wiki/Hash_join) is used.
|
||||
- `partial_merge` — [Sort-merge algorithm](https://en.wikipedia.org/wiki/Sort-merge_join) is used.
|
||||
- `prefer_partial_merge` — ClickHouse always tries to use `merge` join if possible.
|
||||
- `auto` — ClickHouse tries to change `hash` join to `merge` join on the fly to avoid out of memory.
|
||||
|
||||
Default value: `hash`.
|
||||
|
||||
When using `hash` algorithm the right part of `JOIN` is uploaded into RAM.
|
||||
|
||||
When using `partial_merge` algorithm ClickHouse sorts the data and dumps it to the disk. The `merge` algorithm in ClickHouse differs a bit from the classic realization. First ClickHouse sorts the right table by [join key](../../sql-reference/statements/select/join.md#select-join) in blocks and creates min-max index for sorted blocks. Then it sorts parts of left table by `join key` and joins them over right table. The min-max index is also used to skip unneeded right table blocks.
|
||||
|
||||
## join_any_take_last_row {#settings-join_any_take_last_row}
|
||||
|
||||
Changes behaviour of join operations with `ANY` strictness.
|
||||
@ -1230,15 +1213,7 @@ Default value: `3`.
|
||||
|
||||
## output_format_json_quote_64bit_integers {#session_settings-output_format_json_quote_64bit_integers}
|
||||
|
||||
Controls quoting of 64-bit or bigger [integers](../../sql-reference/data-types/int-uint.md) (like `UInt64` or `Int128`) when they are output in a [JSON](../../interfaces/formats.md#json) format.
|
||||
Such integers are enclosed in quotes by default. This behavior is compatible with most JavaScript implementations.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Integers are output without quotes.
|
||||
- 1 — Integers are enclosed in quotes.
|
||||
|
||||
Default value: 1.
|
||||
If the value is true, integers appear in quotes when using JSON\* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes.
|
||||
|
||||
## output_format_json_quote_denormals {#settings-output_format_json_quote_denormals}
|
||||
|
||||
@ -1986,13 +1961,6 @@ Possible values: 32 (32 bytes) - 1073741824 (1 GiB)
|
||||
|
||||
Default value: 32768 (32 KiB)
|
||||
|
||||
## output_format_avro_string_column_pattern {#output_format_avro_string_column_pattern}
|
||||
|
||||
Regexp of column names of type String to output as Avro `string` (default is `bytes`).
|
||||
RE2 syntax is supported.
|
||||
|
||||
Type: string
|
||||
|
||||
## format_avro_schema_registry_url {#format_avro_schema_registry_url}
|
||||
|
||||
Sets [Confluent Schema Registry](https://docs.confluent.io/current/schema-registry/index.html) URL to use with [AvroConfluent](../../interfaces/formats.md#data-format-avro-confluent) format.
|
||||
@ -2022,16 +1990,6 @@ Possible values:
|
||||
|
||||
Default value: 16.
|
||||
|
||||
## merge_selecting_sleep_ms {#merge_selecting_sleep_ms}
|
||||
|
||||
Sleep time for merge selecting when no part selected, a lower setting will trigger selecting tasks in background_schedule_pool frequently which result in large amount of requests to zookeeper in large-scale clusters
|
||||
|
||||
Possible values:
|
||||
|
||||
- Any positive integer.
|
||||
|
||||
Default value: 5000
|
||||
|
||||
## parallel_distributed_insert_select {#parallel_distributed_insert_select}
|
||||
|
||||
Enables parallel distributed `INSERT ... SELECT` query.
|
||||
@ -3165,53 +3123,6 @@ SELECT
|
||||
FROM fuse_tbl
|
||||
```
|
||||
|
||||
## allow_experimental_database_replicated {#allow_experimental_database_replicated}
|
||||
|
||||
Enables to create databases with [Replicated](../../engines/database-engines/replicated.md) engine.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Disabled.
|
||||
- 1 — Enabled.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## database_replicated_initial_query_timeout_sec {#database_replicated_initial_query_timeout_sec}
|
||||
|
||||
Sets how long initial DDL query should wait for Replicated database to precess previous DDL queue entries in seconds.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
- 0 — Unlimited.
|
||||
|
||||
Default value: `300`.
|
||||
|
||||
## distributed_ddl_task_timeout {#distributed_ddl_task_timeout}
|
||||
|
||||
Sets timeout for DDL query responses from all hosts in cluster. If a DDL request has not been performed on all hosts, a response will contain a timeout error and a request will be executed in an async mode. Negative value means infinite.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
- 0 — Async mode.
|
||||
- Negative integer — infinite timeout.
|
||||
|
||||
Default value: `180`.
|
||||
|
||||
## distributed_ddl_output_mode {#distributed_ddl_output_mode}
|
||||
|
||||
Sets format of distributed DDL query result.
|
||||
|
||||
Possible values:
|
||||
|
||||
- `throw` — Returns result set with query execution status for all hosts where query is finished. If query has failed on some hosts, then it will rethrow the first exception. If query is not finished yet on some hosts and [distributed_ddl_task_timeout](#distributed_ddl_task_timeout) exceeded, then it throws `TIMEOUT_EXCEEDED` exception.
|
||||
- `none` — Is similar to throw, but distributed DDL query returns no result set.
|
||||
- `null_status_on_timeout` — Returns `NULL` as execution status in some rows of result set instead of throwing `TIMEOUT_EXCEEDED` if query is not finished on the corresponding hosts.
|
||||
- `never_throw` — Do not throw `TIMEOUT_EXCEEDED` and do not rethrow exceptions if query has failed on some hosts.
|
||||
|
||||
Default value: `throw`.
|
||||
|
||||
## flatten_nested {#flatten-nested}
|
||||
|
||||
Sets the data format of a [nested](../../sql-reference/data-types/nested-data-structures/nested.md) columns.
|
||||
@ -3291,14 +3202,3 @@ Default value: `1`.
|
||||
**Usage**
|
||||
|
||||
If the setting is set to `0`, the table function does not make Nullable columns and inserts default values instead of NULL. This is also applicable for NULL values inside arrays.
|
||||
|
||||
## output_format_arrow_low_cardinality_as_dictionary {#output-format-arrow-low-cardinality-as-dictionary}
|
||||
|
||||
Allows to convert the [LowCardinality](../../sql-reference/data-types/lowcardinality.md) type to the `DICTIONARY` type of the [Arrow](../../interfaces/formats.md#data-format-arrow) format for `SELECT` queries.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — The `LowCardinality` type is not converted to the `DICTIONARY` type.
|
||||
- 1 — The `LowCardinality` type is converted to the `DICTIONARY` type.
|
||||
|
||||
Default value: `0`.
|
||||
|
@ -8,11 +8,12 @@ Columns:
|
||||
- `table` ([String](../../sql-reference/data-types/string.md)) — Table name.
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — Index name.
|
||||
- `type` ([String](../../sql-reference/data-types/string.md)) — Index type.
|
||||
- `expr` ([String](../../sql-reference/data-types/string.md)) — Expression for the index calculation.
|
||||
- `granularity` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The number of granules in the block.
|
||||
- `expr` ([String](../../sql-reference/data-types/string.md)) — Expression used to calculate the index.
|
||||
- `granularity` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of granules in the block.
|
||||
|
||||
**Example**
|
||||
|
||||
|
||||
```sql
|
||||
SELECT * FROM system.data_skipping_indices LIMIT 2 FORMAT Vertical;
|
||||
```
|
||||
|
@ -34,7 +34,7 @@ Input table:
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT medianDeterministic(val, 1) FROM t;
|
||||
SELECT medianDeterministic(val, 1) FROM t
|
||||
```
|
||||
|
||||
Result:
|
||||
|
@ -47,7 +47,6 @@ Settings:
|
||||
- [low_cardinality_use_single_dictionary_for_part](../../operations/settings/settings.md#low_cardinality_use_single_dictionary_for_part)
|
||||
- [low_cardinality_allow_in_native_format](../../operations/settings/settings.md#low_cardinality_allow_in_native_format)
|
||||
- [allow_suspicious_low_cardinality_types](../../operations/settings/settings.md#allow_suspicious_low_cardinality_types)
|
||||
- [output_format_arrow_low_cardinality_as_dictionary](../../operations/settings/settings.md#output-format-arrow-low-cardinality-as-dictionary)
|
||||
|
||||
Functions:
|
||||
|
||||
@ -58,3 +57,5 @@ Functions:
|
||||
- [A Magical Mystery Tour of the LowCardinality Data Type](https://www.altinity.com/blog/2019/3/27/low-cardinality).
|
||||
- [Reducing ClickHouse Storage Cost with the Low Cardinality Type – Lessons from an Instana Engineer](https://www.instana.com/blog/reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer/).
|
||||
- [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/yandex/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf).
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/sql-reference/data-types/lowcardinality/) <!--hide-->
|
||||
|
@ -9,8 +9,11 @@ toc_title: Map(key, value)
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `key` — The key part of the pair. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md), [LowCardinality](../../sql-reference/data-types/lowcardinality.md), or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
- `value` — The value part of the pair. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md), [Array](../../sql-reference/data-types/array.md), [LowCardinality](../../sql-reference/data-types/lowcardinality.md), or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
- `key` — The key part of the pair. [String](../../sql-reference/data-types/string.md) or [Integer](../../sql-reference/data-types/int-uint.md).
|
||||
- `value` — The value part of the pair. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md) or [Array](../../sql-reference/data-types/array.md).
|
||||
|
||||
!!! warning "Warning"
|
||||
Currently `Map` data type is an experimental feature. To work with it you must set `allow_experimental_map_type = 1`.
|
||||
|
||||
To get the value from an `a Map('key', 'value')` column, use `a['key']` syntax. This lookup works now with a linear complexity.
|
||||
|
||||
|
@ -211,7 +211,7 @@ SELECT nullIf(1, 2);
|
||||
|
||||
## assumeNotNull {#assumenotnull}
|
||||
|
||||
Results in an equivalent non-`Nullable` value for a [Nullable](../../sql-reference/data-types/nullable.md) type. In case the original value is `NULL` the result is undetermined. See also `ifNull` and `coalesce` functions.
|
||||
Results in a value of type [Nullable](../../sql-reference/data-types/nullable.md) for a non- `Nullable`, if the value is not `NULL`.
|
||||
|
||||
``` sql
|
||||
assumeNotNull(x)
|
||||
|
@ -195,41 +195,6 @@ Result:
|
||||
└────────────────────┘
|
||||
```
|
||||
|
||||
## h3ToGeo {#h3togeo}
|
||||
|
||||
Returns `(lon, lat)` that corresponds to the provided H3 index.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
h3ToGeo(h3Index)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `h3Index` — H3 Index. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- `lon` — Longitude. Type: [Float64](../../../sql-reference/data-types/float.md).
|
||||
- `lat` — Latitude. Type: [Float64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT h3ToGeo(644325524701193974) coordinates;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─coordinates───────────────────────────┐
|
||||
│ (37.79506616830252,55.71290243145668) │
|
||||
└───────────────────────────────────────┘
|
||||
```
|
||||
## h3kRing {#h3kring}
|
||||
|
||||
Lists all the [H3](#h3index) hexagons in the raduis of `k` from the given hexagon in random order.
|
||||
|
@ -306,49 +306,3 @@ Result:
|
||||
└───────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## toJSONString {#tojsonstring}
|
||||
|
||||
Serializes a value to its JSON representation. Various data types and nested structures are supported.
|
||||
64-bit [integers](../../sql-reference/data-types/int-uint.md) or bigger (like `UInt64` or `Int128`) are enclosed in quotes by default. [output_format_json_quote_64bit_integers](../../operations/settings/settings.md#session_settings-output_format_json_quote_64bit_integers) controls this behavior.
|
||||
Special values `NaN` and `inf` are replaced with `null`. Enable [output_format_json_quote_denormals](../../operations/settings/settings.md#settings-output_format_json_quote_denormals) setting to show them.
|
||||
When serializing an [Enum](../../sql-reference/data-types/enum.md) value, the function outputs its name.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toJSONString(value)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `value` — Value to serialize. Value may be of any data type.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- JSON representation of the value.
|
||||
|
||||
Type: [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Example**
|
||||
|
||||
The first example shows serialization of a [Map](../../sql-reference/data-types/map.md).
|
||||
The second example shows some special values wrapped into a [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT toJSONString(map('key1', 1, 'key2', 2));
|
||||
SELECT toJSONString(tuple(1.25, NULL, NaN, +inf, -inf, [])) SETTINGS output_format_json_quote_denormals = 1;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
{"key1":1,"key2":2}
|
||||
[1.25,null,"nan","inf","-inf",[]]
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [output_format_json_quote_64bit_integers](../../operations/settings/settings.md#session_settings-output_format_json_quote_64bit_integers)
|
||||
- [output_format_json_quote_denormals](../../operations/settings/settings.md#settings-output_format_json_quote_denormals)
|
||||
|
@ -465,29 +465,27 @@ Result:
|
||||
|
||||
## CAST(x, T) {#type_conversion_function-cast}
|
||||
|
||||
Converts an input value to the specified data type. Unlike the [reinterpret](#type_conversion_function-reinterpret) function, `CAST` tries to present the same value using the new data type. If the conversion can not be done then an exception is raised.
|
||||
Several syntax variants are supported.
|
||||
Converts input value `x` to the `T` data type. Unlike to `reinterpret` function, type conversion is performed in a natural way.
|
||||
|
||||
The syntax `CAST(x AS t)` is also supported.
|
||||
|
||||
!!! note "Note"
|
||||
If value `x` does not fit the bounds of type `T`, the function overflows. For example, `CAST(-1, 'UInt8')` returns `255`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
CAST(x, T)
|
||||
CAST(x AS t)
|
||||
x::t
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — A value to convert. May be of any type.
|
||||
- `T` — The name of the target data type. [String](../../sql-reference/data-types/string.md).
|
||||
- `t` — The target data type.
|
||||
- `x` — Any type.
|
||||
- `T` — Destination type. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Converted value.
|
||||
|
||||
!!! note "Note"
|
||||
If the input value does not fit the bounds of the target type, the result overflows. For example, `CAST(-1, 'UInt8')` returns `255`.
|
||||
- Destination type value.
|
||||
|
||||
**Examples**
|
||||
|
||||
@ -496,16 +494,16 @@ Query:
|
||||
```sql
|
||||
SELECT
|
||||
CAST(toInt8(-1), 'UInt8') AS cast_int_to_uint,
|
||||
CAST(1.5 AS Decimal(3,2)) AS cast_float_to_decimal,
|
||||
'1'::Int32 AS cast_string_to_int;
|
||||
CAST(toInt8(1), 'Float32') AS cast_int_to_float,
|
||||
CAST('1', 'UInt32') AS cast_string_to_int;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
┌─cast_int_to_uint─┬─cast_float_to_decimal─┬─cast_string_to_int─┐
|
||||
│ 255 │ 1.50 │ 1 │
|
||||
└──────────────────┴───────────────────────┴────────────────────┘
|
||||
┌─cast_int_to_uint─┬─cast_int_to_float─┬─cast_string_to_int─┐
|
||||
│ 255 │ 1 │ 1 │
|
||||
└──────────────────┴───────────────────┴────────────────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
@ -189,7 +189,7 @@ CREATE TABLE codec_example
|
||||
dt Date CODEC(ZSTD),
|
||||
ts DateTime CODEC(LZ4HC),
|
||||
float_value Float32 CODEC(NONE),
|
||||
double_value Float64 CODEC(LZ4HC(9)),
|
||||
double_value Float64 CODEC(LZ4HC(9))
|
||||
value Float32 CODEC(Delta, ZSTD)
|
||||
)
|
||||
ENGINE = <Engine>
|
||||
|
@ -36,23 +36,14 @@ Additional join types available in ClickHouse:
|
||||
- `LEFT ANY JOIN`, `RIGHT ANY JOIN` and `INNER ANY JOIN`, partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types.
|
||||
- `ASOF JOIN` and `LEFT ASOF JOIN`, joining sequences with a non-exact match. `ASOF JOIN` usage is described below.
|
||||
|
||||
## Settings {#join-settings}
|
||||
## Setting {#join-settings}
|
||||
|
||||
The default join type can be overriden using [join_default_strictness](../../../operations/settings/settings.md#settings-join_default_strictness) setting.
|
||||
!!! note "Note"
|
||||
The default join type can be overriden using [join_default_strictness](../../../operations/settings/settings.md#settings-join_default_strictness) setting.
|
||||
|
||||
The behavior of ClickHouse server for `ANY JOIN` operations depends on the [any_join_distinct_right_table_keys](../../../operations/settings/settings.md#any_join_distinct_right_table_keys) setting.
|
||||
Also the behavior of ClickHouse server for `ANY JOIN` operations depends on the [any_join_distinct_right_table_keys](../../../operations/settings/settings.md#any_join_distinct_right_table_keys) setting.
|
||||
|
||||
**See also**
|
||||
|
||||
- [join_algorithm](../../../operations/settings/settings.md#settings-join_algorithm)
|
||||
- [join_any_take_last_row](../../../operations/settings/settings.md#settings-join_any_take_last_row)
|
||||
- [join_use_nulls](../../../operations/settings/settings.md#join_use_nulls)
|
||||
- [partial_merge_join_optimizations](../../../operations/settings/settings.md#partial_merge_join_optimizations)
|
||||
- [partial_merge_join_rows_in_right_blocks](../../../operations/settings/settings.md#partial_merge_join_rows_in_right_blocks)
|
||||
- [join_on_disk_max_files_to_merge](../../../operations/settings/settings.md#join_on_disk_max_files_to_merge)
|
||||
- [any_join_distinct_right_table_keys](../../../operations/settings/settings.md#any_join_distinct_right_table_keys)
|
||||
|
||||
## ASOF JOIN Usage {#asof-join-usage}
|
||||
### ASOF JOIN Usage {#asof-join-usage}
|
||||
|
||||
`ASOF JOIN` is useful when you need to join records that have no exact match.
|
||||
|
||||
@ -102,7 +93,7 @@ For example, consider the following tables:
|
||||
!!! note "Note"
|
||||
`ASOF` join is **not** supported in the [Join](../../../engines/table-engines/special/join.md) table engine.
|
||||
|
||||
## Distributed JOIN {#global-join}
|
||||
## Distributed Join {#global-join}
|
||||
|
||||
There are two ways to execute join involving distributed tables:
|
||||
|
||||
@ -111,42 +102,6 @@ There are two ways to execute join involving distributed tables:
|
||||
|
||||
Be careful when using `GLOBAL`. For more information, see the [Distributed subqueries](../../../sql-reference/operators/in.md#select-distributed-subqueries) section.
|
||||
|
||||
## Implicit Type Conversion {#implicit-type-conversion}
|
||||
|
||||
`INNER JOIN`, `LEFT JOIN`, `RIGHT JOIN`, and `FULL JOIN` queries support the implicit type conversion for "join keys". However the query can not be executed, if join keys from the left and the right tables cannot be converted to a single type (for example, there is no data type that can hold all values from both `UInt64` and `Int64`, or `String` and `Int32`).
|
||||
|
||||
**Example**
|
||||
|
||||
Consider the table `t_1`:
|
||||
```text
|
||||
┌─a─┬─b─┬─toTypeName(a)─┬─toTypeName(b)─┐
|
||||
│ 1 │ 1 │ UInt16 │ UInt8 │
|
||||
│ 2 │ 2 │ UInt16 │ UInt8 │
|
||||
└───┴───┴───────────────┴───────────────┘
|
||||
```
|
||||
and the table `t_2`:
|
||||
```text
|
||||
┌──a─┬────b─┬─toTypeName(a)─┬─toTypeName(b)───┐
|
||||
│ -1 │ 1 │ Int16 │ Nullable(Int64) │
|
||||
│ 1 │ -1 │ Int16 │ Nullable(Int64) │
|
||||
│ 1 │ 1 │ Int16 │ Nullable(Int64) │
|
||||
└────┴──────┴───────────────┴─────────────────┘
|
||||
```
|
||||
|
||||
The query
|
||||
```sql
|
||||
SELECT a, b, toTypeName(a), toTypeName(b) FROM t_1 FULL JOIN t_2 USING (a, b);
|
||||
```
|
||||
returns the set:
|
||||
```text
|
||||
┌──a─┬────b─┬─toTypeName(a)─┬─toTypeName(b)───┐
|
||||
│ 1 │ 1 │ Int32 │ Nullable(Int64) │
|
||||
│ 2 │ 2 │ Int32 │ Nullable(Int64) │
|
||||
│ -1 │ 1 │ Int32 │ Nullable(Int64) │
|
||||
│ 1 │ -1 │ Int32 │ Nullable(Int64) │
|
||||
└────┴──────┴───────────────┴─────────────────┘
|
||||
```
|
||||
|
||||
## Usage Recommendations {#usage-recommendations}
|
||||
|
||||
### Processing of Empty or NULL Cells {#processing-of-empty-or-null-cells}
|
||||
@ -184,9 +139,9 @@ If you need a `JOIN` for joining with dimension tables (these are relatively sma
|
||||
|
||||
### Memory Limitations {#memory-limitations}
|
||||
|
||||
By default, ClickHouse uses the [hash join](https://en.wikipedia.org/wiki/Hash_join) algorithm. ClickHouse takes the right_table and creates a hash table for it in RAM. If `join_algorithm = 'auto'` is enabled, then after some threshold of memory consumption, ClickHouse falls back to [merge](https://en.wikipedia.org/wiki/Sort-merge_join) join algorithm. For `JOIN` algorithms description see the [join_algorithm](../../../operations/settings/settings.md#settings-join_algorithm) setting.
|
||||
By default, ClickHouse uses the [hash join](https://en.wikipedia.org/wiki/Hash_join) algorithm. ClickHouse takes the `<right_table>` and creates a hash table for it in RAM. After some threshold of memory consumption, ClickHouse falls back to merge join algorithm.
|
||||
|
||||
If you need to restrict `JOIN` operation memory consumption use the following settings:
|
||||
If you need to restrict join operation memory consumption use the following settings:
|
||||
|
||||
- [max_rows_in_join](../../../operations/settings/query-complexity.md#settings-max_rows_in_join) — Limits number of rows in the hash table.
|
||||
- [max_bytes_in_join](../../../operations/settings/query-complexity.md#settings-max_bytes_in_join) — Limits size of the hash table.
|
||||
|
@ -3,16 +3,6 @@ toc_priority: 76
|
||||
toc_title: Security Changelog
|
||||
---
|
||||
|
||||
## Fixed in ClickHouse 21.4.3.21, 2021-04-12 {#fixed-in-clickhouse-release-21-4-3-21-2021-04-12}
|
||||
|
||||
### CVE-2021-25263 {#cve-2021-25263}
|
||||
|
||||
An attacker that has CREATE DICTIONARY privilege, can read arbitary file outside permitted directory.
|
||||
|
||||
Fix has been pushed to versions 20.8.18.32-lts, 21.1.9.41-stable, 21.2.9.41-stable, 21.3.6.55-lts, 21.4.3.21-stable and later.
|
||||
|
||||
Credits: [Vyacheslav Egoshin](https://twitter.com/vegoshin)
|
||||
|
||||
## Fixed in ClickHouse Release 19.14.3.3, 2019-09-10 {#fixed-in-clickhouse-release-19-14-3-3-2019-09-10}
|
||||
|
||||
### CVE-2019-15024 {#cve-2019-15024}
|
||||
|
@ -1,125 +0,0 @@
|
||||
---
|
||||
toc_priority: 65
|
||||
toc_title: Сборка на Mac OS X
|
||||
---
|
||||
# Как собрать ClickHouse на Mac OS X {#how-to-build-clickhouse-on-mac-os-x}
|
||||
|
||||
Сборка должна запускаться с x86_64 (Intel) на macOS версии 10.15 (Catalina) и выше в последней версии компилятора Xcode's native AppleClang, Homebrew's vanilla Clang или в GCC-компиляторах.
|
||||
|
||||
## Установка Homebrew {#install-homebrew}
|
||||
|
||||
``` bash
|
||||
$ /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
|
||||
```
|
||||
|
||||
## Установка Xcode и инструментов командной строки {#install-xcode-and-command-line-tools}
|
||||
|
||||
1. Установите из App Store последнюю версию [Xcode](https://apps.apple.com/am/app/xcode/id497799835?mt=12).
|
||||
|
||||
2. Запустите ее, чтобы принять лицензионное соглашение. Необходимые компоненты установятся автоматически.
|
||||
|
||||
3. Затем убедитесь, что в системе выбрана последняя версия инструментов командной строки:
|
||||
|
||||
``` bash
|
||||
$ sudo rm -rf /Library/Developer/CommandLineTools
|
||||
$ sudo xcode-select --install
|
||||
```
|
||||
|
||||
4. Перезагрузитесь.
|
||||
|
||||
## Установка компиляторов, инструментов и библиотек {#install-required-compilers-tools-and-libraries}
|
||||
|
||||
``` bash
|
||||
$ brew update
|
||||
$ brew install cmake ninja libtool gettext llvm gcc
|
||||
```
|
||||
|
||||
## Просмотр исходников ClickHouse {#checkout-clickhouse-sources}
|
||||
|
||||
``` bash
|
||||
$ git clone --recursive git@github.com:ClickHouse/ClickHouse.git # or https://github.com/ClickHouse/ClickHouse.git
|
||||
```
|
||||
|
||||
## Сборка ClickHouse {#build-clickhouse}
|
||||
|
||||
Чтобы запустить сборку в компиляторе Xcode's native AppleClang:
|
||||
|
||||
``` bash
|
||||
$ cd ClickHouse
|
||||
$ rm -rf build
|
||||
$ mkdir build
|
||||
$ cd build
|
||||
$ cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_JEMALLOC=OFF ..
|
||||
$ cmake --build . --config RelWithDebInfo
|
||||
$ cd ..
|
||||
```
|
||||
|
||||
Чтобы запустить сборку в компиляторе Homebrew's vanilla Clang:
|
||||
|
||||
``` bash
|
||||
$ cd ClickHouse
|
||||
$ rm -rf build
|
||||
$ mkdir build
|
||||
$ cd build
|
||||
$ cmake -DCMAKE_C_COMPILER=$(brew --prefix llvm)/bin/clang -DCMAKE_CXX_COMPILER==$(brew --prefix llvm)/bin/clang++ -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_JEMALLOC=OFF ..
|
||||
$ cmake -DCMAKE_C_COMPILER=$(brew --prefix llvm)/bin/clang -DCMAKE_CXX_COMPILER=$(brew --prefix llvm)/bin/clang++ -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_JEMALLOC=OFF ..
|
||||
$ cmake --build . --config RelWithDebInfo
|
||||
$ cd ..
|
||||
```
|
||||
|
||||
Чтобы собрать с помощью компилятора Homebrew's vanilla GCC:
|
||||
|
||||
``` bash
|
||||
$ cd ClickHouse
|
||||
$ rm -rf build
|
||||
$ mkdir build
|
||||
$ cd build
|
||||
$ cmake -DCMAKE_C_COMPILER=$(brew --prefix gcc)/bin/gcc-10 -DCMAKE_CXX_COMPILER=$(brew --prefix gcc)/bin/g++-10 -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_JEMALLOC=OFF ..
|
||||
$ cmake --build . --config RelWithDebInfo
|
||||
$ cd ..
|
||||
```
|
||||
|
||||
## Предупреждения {#caveats}
|
||||
|
||||
Если будете запускать `clickhouse-server`, убедитесь, что увеличили системную переменную `maxfiles`.
|
||||
|
||||
!!! info "Note"
|
||||
Вам понадобится команда `sudo`.
|
||||
|
||||
1. Создайте файл `/Library/LaunchDaemons/limit.maxfiles.plist` и поместите в него следующее:
|
||||
|
||||
``` xml
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN"
|
||||
"http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>Label</key>
|
||||
<string>limit.maxfiles</string>
|
||||
<key>ProgramArguments</key>
|
||||
<array>
|
||||
<string>launchctl</string>
|
||||
<string>limit</string>
|
||||
<string>maxfiles</string>
|
||||
<string>524288</string>
|
||||
<string>524288</string>
|
||||
</array>
|
||||
<key>RunAtLoad</key>
|
||||
<true/>
|
||||
<key>ServiceIPC</key>
|
||||
<false/>
|
||||
</dict>
|
||||
</plist>
|
||||
```
|
||||
|
||||
2. Выполните команду:
|
||||
|
||||
``` bash
|
||||
$ sudo chown root:wheel /Library/LaunchDaemons/limit.maxfiles.plist
|
||||
```
|
||||
|
||||
3. Перезагрузитесь.
|
||||
|
||||
4. Чтобы проверить, как это работает, выполните команду `ulimit -n`.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/development/build_osx/) <!--hide-->
|
1
docs/ru/development/build-osx.md
Symbolic link
1
docs/ru/development/build-osx.md
Symbolic link
@ -0,0 +1 @@
|
||||
../../en/development/build-osx.md
|
@ -128,7 +128,7 @@ Ninja - система запуска сборочных задач.
|
||||
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
|
||||
brew install cmake ninja
|
||||
|
||||
Проверьте версию CMake: `cmake --version`. Если версия меньше 3.12, то установите новую версию с сайта https://cmake.org/download/
|
||||
Проверьте версию CMake: `cmake --version`. Если версия меньше 3.3, то установите новую версию с сайта https://cmake.org/download/
|
||||
|
||||
## Необязательные внешние библиотеки {#neobiazatelnye-vneshnie-biblioteki}
|
||||
|
||||
|
@ -20,5 +20,3 @@ toc_title: "Введение"
|
||||
|
||||
- [PostgreSQL](../../engines/database-engines/postgresql.md)
|
||||
|
||||
- [Replicated](../../engines/database-engines/replicated.md)
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
|
||||
---
|
||||
toc_priority: 29
|
||||
toc_title: MaterializeMySQL
|
||||
@ -50,7 +49,6 @@ ENGINE = MaterializeMySQL('host:port', ['database' | database], 'user', 'passwor
|
||||
| DATE, NEWDATE | [Date](../../sql-reference/data-types/date.md) |
|
||||
| DATETIME, TIMESTAMP | [DateTime](../../sql-reference/data-types/datetime.md) |
|
||||
| DATETIME2, TIMESTAMP2 | [DateTime64](../../sql-reference/data-types/datetime64.md) |
|
||||
| ENUM | [Enum](../../sql-reference/data-types/enum.md) |
|
||||
| STRING | [String](../../sql-reference/data-types/string.md) |
|
||||
| VARCHAR, VAR_STRING | [String](../../sql-reference/data-types/string.md) |
|
||||
| BLOB | [String](../../sql-reference/data-types/string.md) |
|
||||
@ -83,8 +81,6 @@ DDL-запросы в MySQL конвертируются в соответств
|
||||
|
||||
- Если в запросе `SELECT` напрямую не указан столбец `_sign`, то по умолчанию используется `WHERE _sign=1`. Таким образом, удаленные строки не включаются в результирующий набор.
|
||||
|
||||
- Результат включает комментарии к столбцам, если они существуют в таблицах базы данных MySQL.
|
||||
|
||||
### Конвертация индексов {#index-conversion}
|
||||
|
||||
Секции `PRIMARY KEY` и `INDEX` в MySQL конвертируются в кортежи `ORDER BY` в таблицах ClickHouse.
|
||||
|
@ -1,119 +0,0 @@
|
||||
|
||||
# [экспериментальный] Replicated {#replicated}
|
||||
|
||||
Движок основан на движке [Atomic](../../engines/database-engines/atomic.md). Он поддерживает репликацию метаданных через журнал DDL, записываемый в ZooKeeper и выполняемый на всех репликах для данной базы данных.
|
||||
|
||||
На одном сервере ClickHouse может одновременно работать и обновляться несколько реплицированных баз данных. Но не может существовать нескольких реплик одной и той же реплицированной базы данных.
|
||||
|
||||
## Создание базы данных {#creating-a-database}
|
||||
``` sql
|
||||
CREATE DATABASE testdb ENGINE = Replicated('zoo_path', 'shard_name', 'replica_name') [SETTINGS ...]
|
||||
```
|
||||
|
||||
**Параметры движка**
|
||||
|
||||
- `zoo_path` — путь в ZooKeeper. Один и тот же путь ZooKeeper соответствует одной и той же базе данных.
|
||||
- `shard_name` — Имя шарда. Реплики базы данных группируются в шарды по имени.
|
||||
- `replica_name` — Имя реплики. Имена реплик должны быть разными для всех реплик одного и того же шарда.
|
||||
|
||||
!!! note "Предупреждение"
|
||||
Для таблиц [ReplicatedMergeTree](../table-engines/mergetree-family/replication.md#table_engines-replication) если аргументы не заданы, то используются аргументы по умолчанию: `/clickhouse/tables/{uuid}/{shard}` и `{replica}`. Они могут быть изменены в серверных настройках: [default_replica_path](../../operations/server-configuration-parameters/settings.md#default_replica_path) и [default_replica_name](../../operations/server-configuration-parameters/settings.md#default_replica_name). Макрос `{uuid}` раскрывается в `UUID` таблицы, `{shard}` и `{replica}` — в значения из конфига сервера. В будущем появится возможность использовать значения `shard_name` и `replica_name` аргументов движка базы данных `Replicated`.
|
||||
|
||||
## Особенности и рекомендации {#specifics-and-recommendations}
|
||||
|
||||
DDL-запросы с базой данных `Replicated` работают похожим образом на [ON CLUSTER](../../sql-reference/distributed-ddl.md) запросы, но с небольшими отличиями.
|
||||
|
||||
Сначала DDL-запрос пытается выполниться на инициаторе (том хосте, который изначально получил запрос от пользователя). Если запрос не выполнился, то пользователь сразу получает ошибку, другие хосты не пытаются его выполнить. Если запрос успешно выполнился на инициаторе, то все остальные хосты будут автоматически делать попытки выполнить его.
|
||||
Инициатор попытается дождаться выполнения запроса на других хостах (не дольше [distributed_ddl_task_timeout](../../operations/settings/settings.md#distributed_ddl_task_timeout)) и вернёт таблицу со статусами выполнения запроса на каждом хосте.
|
||||
|
||||
Поведение в случае ошибок регулируется настройкой [distributed_ddl_output_mode](../../operations/settings/settings.md#distributed_ddl_output_mode), для `Replicated` лучше выставлять её в `null_status_on_timeout` — т.е. если какие-то хосты не успели выполнить запрос за [distributed_ddl_task_timeout](../../operations/settings/settings.md#distributed_ddl_task_timeout), то вместо исключения для них будет показан статус `NULL` в таблице.
|
||||
|
||||
В системной таблице [system.clusters](../../operations/system-tables/clusters.md) есть кластер с именем, как у реплицируемой базы, который состоит из всех реплик базы. Этот кластер обновляется автоматически при создании/удалении реплик, и его можно использовать для [Distributed](../../engines/table-engines/special/distributed.md#distributed) таблиц.
|
||||
|
||||
При создании новой реплики базы, эта реплика сама создаёт таблицы. Если реплика долго была недоступна и отстала от лога репликации — она сверяет свои локальные метаданные с актуальными метаданными в ZooKeeper, перекладывает лишние таблицы с данными в отдельную нереплицируемую базу (чтобы случайно не удалить что-нибудь лишнее), создаёт недостающие таблицы, обновляет имена таблиц, если были переименования. Данные реплицируются на уровне `ReplicatedMergeTree`, т.е. если таблица не реплицируемая, то данные реплицироваться не будут (база отвечает только за метаданные).
|
||||
|
||||
## Примеры использования {#usage-example}
|
||||
|
||||
Создадим реплицируемую базу на трех хостах:
|
||||
|
||||
``` sql
|
||||
node1 :) CREATE DATABASE r ENGINE=Replicated('some/path/r','shard1','replica1');
|
||||
node2 :) CREATE DATABASE r ENGINE=Replicated('some/path/r','shard1','other_replica');
|
||||
node3 :) CREATE DATABASE r ENGINE=Replicated('some/path/r','other_shard','{replica}');
|
||||
```
|
||||
|
||||
Выполним DDL-запрос на одном из хостов:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE r.rmt (n UInt64) ENGINE=ReplicatedMergeTree ORDER BY n;
|
||||
```
|
||||
|
||||
Запрос выполнится на всех остальных хостах:
|
||||
|
||||
``` text
|
||||
┌─────hosts────────────┬──status─┬─error─┬─num_hosts_remaining─┬─num_hosts_active─┐
|
||||
│ shard1|replica1 │ 0 │ │ 2 │ 0 │
|
||||
│ shard1|other_replica │ 0 │ │ 1 │ 0 │
|
||||
│ other_shard|r1 │ 0 │ │ 0 │ 0 │
|
||||
└──────────────────────┴─────────┴───────┴─────────────────────┴──────────────────┘
|
||||
```
|
||||
|
||||
Кластер в системной таблице `system.clusters`:
|
||||
|
||||
``` sql
|
||||
SELECT cluster, shard_num, replica_num, host_name, host_address, port, is_local
|
||||
FROM system.clusters WHERE cluster='r';
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─cluster─┬─shard_num─┬─replica_num─┬─host_name─┬─host_address─┬─port─┬─is_local─┐
|
||||
│ r │ 1 │ 1 │ node3 │ 127.0.0.1 │ 9002 │ 0 │
|
||||
│ r │ 2 │ 1 │ node2 │ 127.0.0.1 │ 9001 │ 0 │
|
||||
│ r │ 2 │ 2 │ node1 │ 127.0.0.1 │ 9000 │ 1 │
|
||||
└─────────┴───────────┴─────────────┴───────────┴──────────────┴──────┴──────────┘
|
||||
```
|
||||
|
||||
Создадим распределенную таблицу и вставим в нее данные:
|
||||
|
||||
``` sql
|
||||
node2 :) CREATE TABLE r.d (n UInt64) ENGINE=Distributed('r','r','rmt', n % 2);
|
||||
node3 :) INSERT INTO r SELECT * FROM numbers(10);
|
||||
node1 :) SELECT materialize(hostName()) AS host, groupArray(n) FROM r.d GROUP BY host;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─hosts─┬─groupArray(n)─┐
|
||||
│ node1 │ [1,3,5,7,9] │
|
||||
│ node2 │ [0,2,4,6,8] │
|
||||
└───────┴───────────────┘
|
||||
```
|
||||
|
||||
Добавление реплики:
|
||||
|
||||
``` sql
|
||||
node4 :) CREATE DATABASE r ENGINE=Replicated('some/path/r','other_shard','r2');
|
||||
```
|
||||
|
||||
Новая реплика автоматически создаст все таблицы, которые есть в базе, а старые реплики перезагрузят из ZooKeeper-а конфигурацию кластера:
|
||||
|
||||
``` text
|
||||
┌─cluster─┬─shard_num─┬─replica_num─┬─host_name─┬─host_address─┬─port─┬─is_local─┐
|
||||
│ r │ 1 │ 1 │ node3 │ 127.0.0.1 │ 9002 │ 0 │
|
||||
│ r │ 1 │ 2 │ node4 │ 127.0.0.1 │ 9003 │ 0 │
|
||||
│ r │ 2 │ 1 │ node2 │ 127.0.0.1 │ 9001 │ 0 │
|
||||
│ r │ 2 │ 2 │ node1 │ 127.0.0.1 │ 9000 │ 1 │
|
||||
└─────────┴───────────┴─────────────┴───────────┴──────────────┴──────┴──────────┘
|
||||
```
|
||||
|
||||
Распределенная таблица также получит данные от нового хоста:
|
||||
|
||||
```sql
|
||||
node2 :) SELECT materialize(hostName()) AS host, groupArray(n) FROM r.d GROUP BY host;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─hosts─┬─groupArray(n)─┐
|
||||
│ node2 │ [1,3,5,7,9] │
|
||||
│ node4 │ [0,2,4,6,8] │
|
||||
└───────┴───────────────┘
|
||||
```
|
@ -100,9 +100,9 @@ sudo ./clickhouse install
|
||||
|
||||
Для других операционных систем и архитектуры AArch64 сборки ClickHouse предоставляются в виде кросс-компилированного бинарного файла из последнего коммита ветки `master` (с задержкой в несколько часов).
|
||||
|
||||
- [macOS](https://builds.clickhouse.tech/master/macos/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/macos/clickhouse' && chmod a+x ./clickhouse`
|
||||
- [FreeBSD](https://builds.clickhouse.tech/master/freebsd/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/freebsd/clickhouse' && chmod a+x ./clickhouse`
|
||||
- [AArch64](https://builds.clickhouse.tech/master/aarch64/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/aarch64/clickhouse' && chmod a+x ./clickhouse`
|
||||
- [macOS](https://builds.clickhouse.tech/master/macos/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/macos/clickhouse' && chmod a+x ./clickhouse`
|
||||
- [AArch64](https://builds.clickhouse.tech/master/aarch64/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/aarch64/clickhouse' && chmod a+x ./clickhouse`
|
||||
- [FreeBSD](https://builds.clickhouse.tech/master/freebsd/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/freebsd/clickhouse' && chmod a+x ./clickhouse`
|
||||
|
||||
После скачивания можно воспользоваться `clickhouse client` для подключения к серверу или `clickhouse local` для обработки локальных данных.
|
||||
|
||||
|
@ -1165,14 +1165,12 @@ SELECT * FROM topic1_stream;
|
||||
| `DOUBLE` | [Float64](../sql-reference/data-types/float.md) | `DOUBLE` |
|
||||
| `DATE32` | [Date](../sql-reference/data-types/date.md) | `UINT16` |
|
||||
| `DATE64`, `TIMESTAMP` | [DateTime](../sql-reference/data-types/datetime.md) | `UINT32` |
|
||||
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` |
|
||||
| — | [FixedString](../sql-reference/data-types/fixedstring.md) | `BINARY` |
|
||||
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `STRING` |
|
||||
| — | [FixedString](../sql-reference/data-types/fixedstring.md) | `STRING` |
|
||||
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` |
|
||||
| `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` |
|
||||
| `STRUCT` | [Tuple](../sql-reference/data-types/tuple.md) | `STRUCT` |
|
||||
| `MAP` | [Map](../sql-reference/data-types/map.md) | `MAP` |
|
||||
|
||||
Массивы могут быть вложенными и иметь в качестве аргумента значение типа `Nullable`. Типы `Tuple` и `Map` также могут быть вложенными.
|
||||
Массивы могут быть вложенными и иметь в качестве аргумента значение типа `Nullable`.
|
||||
|
||||
ClickHouse поддерживает настраиваемую точность для формата `Decimal`. При выполнении запроса `INSERT` ClickHouse обрабатывает тип данных Parquet `DECIMAL` как `Decimal128`.
|
||||
|
||||
@ -1220,17 +1218,12 @@ $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Parquet" > {some_
|
||||
| `DOUBLE` | [Float64](../sql-reference/data-types/float.md) | `FLOAT64` |
|
||||
| `DATE32` | [Date](../sql-reference/data-types/date.md) | `UINT16` |
|
||||
| `DATE64`, `TIMESTAMP` | [DateTime](../sql-reference/data-types/datetime.md) | `UINT32` |
|
||||
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` |
|
||||
| `STRING`, `BINARY` | [FixedString](../sql-reference/data-types/fixedstring.md) | `BINARY` |
|
||||
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `UTF8` |
|
||||
| `STRING`, `BINARY` | [FixedString](../sql-reference/data-types/fixedstring.md) | `UTF8` |
|
||||
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` |
|
||||
| `DECIMAL256` | [Decimal256](../sql-reference/data-types/decimal.md)| `DECIMAL256` |
|
||||
| `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` |
|
||||
| `STRUCT` | [Tuple](../sql-reference/data-types/tuple.md) | `STRUCT` |
|
||||
| `MAP` | [Map](../sql-reference/data-types/map.md) | `MAP` |
|
||||
|
||||
Массивы могут быть вложенными и иметь в качестве аргумента значение типа `Nullable`. Типы `Tuple` и `Map` также могут быть вложенными.
|
||||
|
||||
Тип `DICTIONARY` поддерживается для запросов `INSERT`. Для запросов `SELECT` есть настройка [output_format_arrow_low_cardinality_as_dictionary](../operations/settings/settings.md#output-format-arrow-low-cardinality-as-dictionary), которая позволяет выводить тип [LowCardinality](../sql-reference/data-types/lowcardinality.md) как `DICTIONARY`.
|
||||
Массивы могут быть вложенными и иметь в качестве аргумента значение типа `Nullable`.
|
||||
|
||||
ClickHouse поддерживает настраиваемую точность для формата `Decimal`. При выполнении запроса `INSERT` ClickHouse обрабатывает тип данных Arrow `DECIMAL` как `Decimal128`.
|
||||
|
||||
@ -1283,10 +1276,8 @@ $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Arrow" > {filenam
|
||||
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` |
|
||||
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` |
|
||||
| `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` |
|
||||
| `STRUCT` | [Tuple](../sql-reference/data-types/tuple.md) | `STRUCT` |
|
||||
| `MAP` | [Map](../sql-reference/data-types/map.md) | `MAP` |
|
||||
|
||||
Массивы могут быть вложенными и иметь в качестве аргумента значение типа `Nullable`. Типы `Tuple` и `Map` также могут быть вложенными.
|
||||
Массивы могут быть вложенными и иметь в качестве аргумента значение типа `Nullable`.
|
||||
|
||||
ClickHouse поддерживает настраиваемую точность для формата `Decimal`. При выполнении запроса `INSERT` ClickHouse обрабатывает тип данных ORC `DECIMAL` как `Decimal128`.
|
||||
|
||||
|
@ -490,23 +490,6 @@ ClickHouse может парсить только базовый формат `Y
|
||||
|
||||
Значение по умолчанию: `ALL`.
|
||||
|
||||
## join_algorithm {#settings-join_algorithm}
|
||||
|
||||
Определяет алгоритм выполнения запроса [JOIN](../../sql-reference/statements/select/join.md).
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- `hash` — используется [алгоритм соединения хешированием](https://ru.wikipedia.org/wiki/Алгоритм_соединения_хешированием).
|
||||
- `partial_merge` — используется [алгоритм соединения слиянием сортированных списков](https://ru.wikipedia.org/wiki/Алгоритм_соединения_слиянием_сортированных_списков).
|
||||
- `prefer_partial_merge` — используется алгоритм соединения слиянием сортированных списков, когда это возможно.
|
||||
- `auto` — сервер ClickHouse пытается на лету заменить алгоритм `hash` на `merge`, чтобы избежать переполнения памяти.
|
||||
|
||||
Значение по умолчанию: `hash`.
|
||||
|
||||
При использовании алгоритма `hash` правая часть `JOIN` загружается в оперативную память.
|
||||
|
||||
При использовании алгоритма `partial_merge` сервер сортирует данные и сбрасывает их на диск. Работа алгоритма `merge` в ClickHouse немного отличается от классической реализации. Сначала ClickHouse сортирует правую таблицу по блокам на основе [ключей соединения](../../sql-reference/statements/select/join.md#select-join) и для отсортированных блоков строит индексы min-max. Затем он сортирует куски левой таблицы на основе ключей соединения и объединяет их с правой таблицей операцией `JOIN`. Созданные min-max индексы используются для пропуска тех блоков из правой таблицы, которые не участвуют в данной операции `JOIN`.
|
||||
|
||||
## join_any_take_last_row {#settings-join_any_take_last_row}
|
||||
|
||||
Изменяет поведение операций, выполняемых со строгостью `ANY`.
|
||||
@ -1221,15 +1204,8 @@ load_balancing = round_robin
|
||||
Работает для форматов JSONEachRow и TSKV.
|
||||
|
||||
## output_format_json_quote_64bit_integers {#session_settings-output_format_json_quote_64bit_integers}
|
||||
Управляет кавычками при выводе 64-битных или более [целых чисел](../../sql-reference/data-types/int-uint.md) (например, `UInt64` или `Int128`) в формате [JSON](../../interfaces/formats.md#json).
|
||||
По умолчанию такие числа заключаются в кавычки. Это поведение соответствует большинству реализаций JavaScript.
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- 0 — числа выводятся без кавычек.
|
||||
- 1 — числа выводятся в кавычках.
|
||||
|
||||
Значение по умолчанию: 1.
|
||||
Если значение истинно, то при использовании JSON\* форматов UInt64 и Int64 числа выводятся в кавычках (из соображений совместимости с большинством реализаций JavaScript), иначе - без кавычек.
|
||||
|
||||
## output_format_json_quote_denormals {#settings-output_format_json_quote_denormals}
|
||||
|
||||
@ -3003,53 +2979,6 @@ SELECT
|
||||
FROM fuse_tbl
|
||||
```
|
||||
|
||||
## allow_experimental_database_replicated {#allow_experimental_database_replicated}
|
||||
|
||||
Позволяет создавать базы данных с движком [Replicated](../../engines/database-engines/replicated.md).
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- 0 — Disabled.
|
||||
- 1 — Enabled.
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
|
||||
## database_replicated_initial_query_timeout_sec {#database_replicated_initial_query_timeout_sec}
|
||||
|
||||
Устанавливает, как долго начальный DDL-запрос должен ждать, пока реплицированная база данных прецессирует предыдущие записи очереди DDL в секундах.
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- Положительное целое число.
|
||||
- 0 — Не ограничено.
|
||||
|
||||
Значение по умолчанию: `300`.
|
||||
|
||||
## distributed_ddl_task_timeout {#distributed_ddl_task_timeout}
|
||||
|
||||
Устанавливает тайм-аут для ответов на DDL-запросы от всех хостов в кластере. Если DDL-запрос не был выполнен на всех хостах, ответ будет содержать ошибку тайм-аута, и запрос будет выполнен в асинхронном режиме.
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- Положительное целое число.
|
||||
- 0 — Асинхронный режим.
|
||||
- Отрицательное число — бесконечный тайм-аут.
|
||||
|
||||
Значение по умолчанию: `180`.
|
||||
|
||||
## distributed_ddl_output_mode {#distributed_ddl_output_mode}
|
||||
|
||||
Задает формат результата распределенного DDL-запроса.
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- `throw` — возвращает набор результатов со статусом выполнения запросов для всех хостов, где завершен запрос. Если запрос не выполнился на некоторых хостах, то будет выброшено исключение. Если запрос еще не закончен на некоторых хостах и таймаут [distributed_ddl_task_timeout](#distributed_ddl_task_timeout) превышен, то выбрасывается исключение `TIMEOUT_EXCEEDED`.
|
||||
- `none` — идентично `throw`, но распределенный DDL-запрос не возвращает набор результатов.
|
||||
- `null_status_on_timeout` — возвращает `NULL` в качестве статуса выполнения в некоторых строках набора результатов вместо выбрасывания `TIMEOUT_EXCEEDED`, если запрос не закончен на соответствующих хостах.
|
||||
- `never_throw` — не выбрасывает исключение и `TIMEOUT_EXCEEDED`, если запрос не удался на некоторых хостах.
|
||||
|
||||
Значение по умолчанию: `throw`.
|
||||
|
||||
## flatten_nested {#flatten-nested}
|
||||
|
||||
Устанавливает формат данных у [вложенных](../../sql-reference/data-types/nested-data-structures/nested.md) столбцов.
|
||||
@ -3130,14 +3059,3 @@ SETTINGS index_granularity = 8192 │
|
||||
**Использование**
|
||||
|
||||
Если установлено значение `0`, то табличная функция не делает Nullable столбцы, а вместо NULL выставляет значения по умолчанию для скалярного типа. Это также применимо для значений NULL внутри массивов.
|
||||
|
||||
## output_format_arrow_low_cardinality_as_dictionary {#output-format-arrow-low-cardinality-as-dictionary}
|
||||
|
||||
Позволяет конвертировать тип [LowCardinality](../../sql-reference/data-types/lowcardinality.md) в тип `DICTIONARY` формата [Arrow](../../interfaces/formats.md#data-format-arrow) для запросов `SELECT`.
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- 0 — тип `LowCardinality` не конвертируется в тип `DICTIONARY`.
|
||||
- 1 — тип `LowCardinality` конвертируется в тип `DICTIONARY`.
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
|
@ -1,38 +0,0 @@
|
||||
# system.data_skipping_indices {#system-data-skipping-indices}
|
||||
|
||||
Содержит информацию о существующих индексах пропуска данных во всех таблицах.
|
||||
|
||||
Столбцы:
|
||||
|
||||
- `database` ([String](../../sql-reference/data-types/string.md)) — имя базы данных.
|
||||
- `table` ([String](../../sql-reference/data-types/string.md)) — имя таблицы.
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — имя индекса.
|
||||
- `type` ([String](../../sql-reference/data-types/string.md)) — тип индекса.
|
||||
- `expr` ([String](../../sql-reference/data-types/string.md)) — выражение, используемое для вычисления индекса.
|
||||
- `granularity` ([UInt64](../../sql-reference/data-types/int-uint.md)) — количество гранул в блоке данных.
|
||||
|
||||
**Пример**
|
||||
|
||||
```sql
|
||||
SELECT * FROM system.data_skipping_indices LIMIT 2 FORMAT Vertical;
|
||||
```
|
||||
|
||||
```text
|
||||
Row 1:
|
||||
──────
|
||||
database: default
|
||||
table: user_actions
|
||||
name: clicks_idx
|
||||
type: minmax
|
||||
expr: clicks
|
||||
granularity: 1
|
||||
|
||||
Row 2:
|
||||
──────
|
||||
database: default
|
||||
table: users
|
||||
name: contacts_null_idx
|
||||
type: minmax
|
||||
expr: assumeNotNull(contacts_null)
|
||||
granularity: 1
|
||||
```
|
@ -4,6 +4,7 @@
|
||||
|
||||
Функции:
|
||||
|
||||
|
||||
- `median` — синоним для [quantile](../../../sql-reference/aggregate-functions/reference/quantile.md#quantile).
|
||||
- `medianDeterministic` — синоним для [quantileDeterministic](../../../sql-reference/aggregate-functions/reference/quantiledeterministic.md#quantiledeterministic).
|
||||
- `medianExact` — синоним для [quantileExact](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantileexact).
|
||||
@ -30,7 +31,7 @@
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT medianDeterministic(val, 1) FROM t;
|
||||
SELECT medianDeterministic(val, 1) FROM t
|
||||
```
|
||||
|
||||
Результат:
|
||||
@ -40,3 +41,4 @@ SELECT medianDeterministic(val, 1) FROM t;
|
||||
│ 1.5 │
|
||||
└─────────────────────────────┘
|
||||
```
|
||||
|
||||
|
@ -15,7 +15,7 @@ LowCardinality(data_type)
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `data_type` — [String](string.md), [FixedString](fixedstring.md), [Date](date.md), [DateTime](datetime.md) и числа за исключением типа [Decimal](decimal.md). `LowCardinality` неэффективен для некоторых типов данных, см. описание настройки [allow_suspicious_low_cardinality_types](../../operations/settings/settings.md#allow_suspicious_low_cardinality_types).
|
||||
- `data_type` — [String](string.md), [FixedString](fixedstring.md), [Date](date.md), [DateTime](datetime.md) и числа за исключением типа [Decimal](decimal.md). `LowCardinality` неэффективен для некоторых типов данных, см. описание настройки [allow_suspicious_low_cardinality_types](../../operations/settings/settings.md#allow_suspicious_low_cardinality_types).
|
||||
|
||||
## Описание {#lowcardinality-dscr}
|
||||
|
||||
@ -23,11 +23,11 @@ LowCardinality(data_type)
|
||||
|
||||
Эффективность использования типа данных `LowCarditality` зависит от разнообразия данных. Если словарь содержит менее 10 000 различных значений, ClickHouse в основном показывает более высокую эффективность чтения и хранения данных. Если же словарь содержит более 100 000 различных значений, ClickHouse может работать хуже, чем при использовании обычных типов данных.
|
||||
|
||||
При работе со строками использование `LowCardinality` вместо [Enum](enum.md) обеспечивает большую гибкость в использовании и часто показывает такую же или более высокую эффективность.
|
||||
При работе со строками, использование `LowCardinality` вместо [Enum](enum.md) обеспечивает большую гибкость в использовании и часто показывает такую же или более высокую эффективность.
|
||||
|
||||
## Пример
|
||||
|
||||
Создание таблицы со столбцами типа `LowCardinality`:
|
||||
Создать таблицу со столбцами типа `LowCardinality`:
|
||||
|
||||
```sql
|
||||
CREATE TABLE lc_t
|
||||
@ -43,18 +43,18 @@ ORDER BY id
|
||||
|
||||
Настройки:
|
||||
|
||||
- [low_cardinality_max_dictionary_size](../../operations/settings/settings.md#low_cardinality_max_dictionary_size)
|
||||
- [low_cardinality_use_single_dictionary_for_part](../../operations/settings/settings.md#low_cardinality_use_single_dictionary_for_part)
|
||||
- [low_cardinality_allow_in_native_format](../../operations/settings/settings.md#low_cardinality_allow_in_native_format)
|
||||
- [allow_suspicious_low_cardinality_types](../../operations/settings/settings.md#allow_suspicious_low_cardinality_types)
|
||||
- [output_format_arrow_low_cardinality_as_dictionary](../../operations/settings/settings.md#output-format-arrow-low-cardinality-as-dictionary)
|
||||
- [low_cardinality_max_dictionary_size](../../operations/settings/settings.md#low_cardinality_max_dictionary_size)
|
||||
- [low_cardinality_use_single_dictionary_for_part](../../operations/settings/settings.md#low_cardinality_use_single_dictionary_for_part)
|
||||
- [low_cardinality_allow_in_native_format](../../operations/settings/settings.md#low_cardinality_allow_in_native_format)
|
||||
- [allow_suspicious_low_cardinality_types](../../operations/settings/settings.md#allow_suspicious_low_cardinality_types)
|
||||
|
||||
Функции:
|
||||
|
||||
- [toLowCardinality](../functions/type-conversion-functions.md#tolowcardinality)
|
||||
- [toLowCardinality](../functions/type-conversion-functions.md#tolowcardinality)
|
||||
|
||||
## Смотрите также
|
||||
|
||||
- [A Magical Mystery Tour of the LowCardinality Data Type](https://www.altinity.com/blog/2019/3/27/low-cardinality).
|
||||
- [Reducing Clickhouse Storage Cost with the Low Cardinality Type – Lessons from an Instana Engineer](https://www.instana.com/blog/reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer/).
|
||||
- [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/yandex/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf).
|
||||
- [A Magical Mystery Tour of the LowCardinality Data Type](https://www.altinity.com/blog/2019/3/27/low-cardinality).
|
||||
- [Reducing Clickhouse Storage Cost with the Low Cardinality Type – Lessons from an Instana Engineer](https://www.instana.com/blog/reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer/).
|
||||
- [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/yandex/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf).
|
||||
|
||||
|
@ -9,8 +9,11 @@ toc_title: Map(key, value)
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `key` — ключ. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md), [LowCardinality](../../sql-reference/data-types/lowcardinality.md) или [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
- `value` — значение. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md), [Array](../../sql-reference/data-types/array.md), [LowCardinality](../../sql-reference/data-types/lowcardinality.md) или [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
- `key` — ключ. [String](../../sql-reference/data-types/string.md) или [Integer](../../sql-reference/data-types/int-uint.md).
|
||||
- `value` — значение. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md) или [Array](../../sql-reference/data-types/array.md).
|
||||
|
||||
!!! warning "Предупреждение"
|
||||
Сейчас использование типа данных `Map` является экспериментальной возможностью. Чтобы использовать этот тип данных, включите настройку `allow_experimental_map_type = 1`.
|
||||
|
||||
Чтобы получить значение из колонки `a Map('key', 'value')`, используйте синтаксис `a['key']`. В настоящее время такая подстановка работает по алгоритму с линейной сложностью.
|
||||
|
||||
|
@ -306,51 +306,3 @@ SELECT JSONExtractKeysAndValuesRaw('{"a": [-100, 200.0], "b":{"c": {"d": "hello"
|
||||
│ [('d','"hello"'),('f','"world"')] │
|
||||
└───────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
## toJSONString {#tojsonstring}
|
||||
|
||||
Сериализует значение в JSON представление. Поддерживаются различные типы данных и вложенные структуры.
|
||||
По умолчанию 64-битные [целые числа](../../sql-reference/data-types/int-uint.md) и более (например, `UInt64` или `Int128`) заключаются в кавычки. Настройка [output_format_json_quote_64bit_integers](../../operations/settings/settings.md#session_settings-output_format_json_quote_64bit_integers) управляет этим поведением.
|
||||
Специальные значения `NaN` и `inf` заменяются на `null`. Чтобы они отображались, включите настройку [output_format_json_quote_denormals](../../operations/settings/settings.md#settings-output_format_json_quote_denormals).
|
||||
Когда сериализуется значение [Enum](../../sql-reference/data-types/enum.md), то функция выводит его имя.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
toJSONString(value)
|
||||
```
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `value` — значение, которое необходимо сериализовать. Может быть любого типа.
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- JSON представление значения.
|
||||
|
||||
Тип: [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
Первый пример показывает сериализацию [Map](../../sql-reference/data-types/map.md).
|
||||
Во втором примере есть специальные значения, обернутые в [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT toJSONString(map('key1', 1, 'key2', 2));
|
||||
SELECT toJSONString(tuple(1.25, NULL, NaN, +inf, -inf, [])) SETTINGS output_format_json_quote_denormals = 1;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
{"key1":1,"key2":2}
|
||||
[1.25,null,"nan","inf","-inf",[]]
|
||||
```
|
||||
|
||||
**Смотрите также**
|
||||
|
||||
- [output_format_json_quote_64bit_integers](../../operations/settings/settings.md#session_settings-output_format_json_quote_64bit_integers)
|
||||
- [output_format_json_quote_denormals](../../operations/settings/settings.md#settings-output_format_json_quote_denormals)
|
||||
|
@ -462,29 +462,27 @@ SELECT reinterpret(toInt8(-1), 'UInt8') as int_to_uint,
|
||||
|
||||
## CAST(x, T) {#type_conversion_function-cast}
|
||||
|
||||
Преобразует входное значение к указанному типу данных. В отличие от функции [reinterpret](#type_conversion_function-reinterpret) `CAST` пытается представить то же самое значение в новом типе данных. Если преобразование невозможно, то возникает исключение.
|
||||
Поддерживается несколько вариантов синтаксиса.
|
||||
Преобразует входное значение `x` в указанный тип данных `T`. В отличии от функции `reinterpret` использует внешнее представление значения `x`.
|
||||
|
||||
Поддерживается также синтаксис `CAST(x AS t)`.
|
||||
|
||||
!!! warning "Предупреждение"
|
||||
Если значение `x` не может быть преобразовано к типу `T`, возникает переполнение. Например, `CAST(-1, 'UInt8')` возвращает 255.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
CAST(x, T)
|
||||
CAST(x AS t)
|
||||
x::t
|
||||
```
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `x` — значение, которое нужно преобразовать. Может быть любого типа.
|
||||
- `T` — имя типа данных. [String](../../sql-reference/data-types/string.md).
|
||||
- `t` — тип данных.
|
||||
- `x` — любой тип данных.
|
||||
- `T` — конечный тип данных. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Преобразованное значение.
|
||||
|
||||
!!! note "Примечание"
|
||||
Если входное значение выходит за границы нового типа, то результат переполняется. Например, `CAST(-1, 'UInt8')` возвращает `255`.
|
||||
- Значение конечного типа данных.
|
||||
|
||||
**Примеры**
|
||||
|
||||
@ -493,16 +491,16 @@ x::t
|
||||
```sql
|
||||
SELECT
|
||||
CAST(toInt8(-1), 'UInt8') AS cast_int_to_uint,
|
||||
CAST(1.5 AS Decimal(3,2)) AS cast_float_to_decimal,
|
||||
'1'::Int32 AS cast_string_to_int;
|
||||
CAST(toInt8(1), 'Float32') AS cast_int_to_float,
|
||||
CAST('1', 'UInt32') AS cast_string_to_int
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
```
|
||||
┌─cast_int_to_uint─┬─cast_float_to_decimal─┬─cast_string_to_int─┐
|
||||
│ 255 │ 1.50 │ 1 │
|
||||
└──────────────────┴───────────────────────┴────────────────────┘
|
||||
┌─cast_int_to_uint─┬─cast_int_to_float─┬─cast_string_to_int─┐
|
||||
│ 255 │ 1 │ 1 │
|
||||
└──────────────────┴───────────────────┴────────────────────┘
|
||||
```
|
||||
|
||||
Запрос:
|
||||
@ -526,7 +524,7 @@ SELECT
|
||||
|
||||
Преобразование в FixedString(N) работает только для аргументов типа [String](../../sql-reference/data-types/string.md) или [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
|
||||
Поддерживается преобразование к типу [Nullable](../../sql-reference/data-types/nullable.md) и обратно.
|
||||
Поддерживается преобразование к типу [Nullable](../../sql-reference/functions/type-conversion-functions.md) и обратно.
|
||||
|
||||
**Примеры**
|
||||
|
||||
|
@ -17,7 +17,7 @@ toc_title: PARTITION
|
||||
- [CLEAR INDEX IN PARTITION](#alter_clear-index-partition) — очистить построенные вторичные индексы для заданной партиции;
|
||||
- [FREEZE PARTITION](#alter_freeze-partition) — создать резервную копию партиции;
|
||||
- [UNFREEZE PARTITION](#alter_unfreeze-partition) — удалить резервную копию партиции;
|
||||
- [FETCH PARTITION\|PART](#alter_fetch-partition) — скачать партицию/кусок с другого сервера;
|
||||
- [FETCH PARTITION](#alter_fetch-partition) — скачать партицию с другого сервера;
|
||||
- [MOVE PARTITION\|PART](#alter_move-partition) — переместить партицию/кускок на другой диск или том.
|
||||
- [UPDATE IN PARTITION](#update-in-partition) — обновить данные внутри партиции по условию.
|
||||
- [DELETE IN PARTITION](#delete-in-partition) — удалить данные внутри партиции по условию.
|
||||
@ -209,35 +209,29 @@ ALTER TABLE 'table_name' UNFREEZE [PARTITION 'part_expr'] WITH NAME 'backup_name
|
||||
|
||||
Удаляет с диска "замороженные" партиции с указанным именем. Если секция `PARTITION` опущена, запрос удаляет резервную копию всех партиций сразу.
|
||||
|
||||
## FETCH PARTITION\|PART {#alter_fetch-partition}
|
||||
## FETCH PARTITION {#alter_fetch-partition}
|
||||
|
||||
``` sql
|
||||
ALTER TABLE table_name FETCH PARTITION|PART partition_expr FROM 'path-in-zookeeper'
|
||||
ALTER TABLE table_name FETCH PARTITION partition_expr FROM 'path-in-zookeeper'
|
||||
```
|
||||
|
||||
Загружает партицию с другого сервера. Этот запрос работает только для реплицированных таблиц.
|
||||
|
||||
Запрос выполняет следующее:
|
||||
|
||||
1. Загружает партицию/кусок с указанного шарда. Путь к шарду задается в секции `FROM` (‘path-in-zookeeper’). Обратите внимание, нужно задавать путь к шарду в ZooKeeper.
|
||||
1. Загружает партицию с указанного шарда. Путь к шарду задается в секции `FROM` (‘path-in-zookeeper’). Обратите внимание, нужно задавать путь к шарду в ZooKeeper.
|
||||
2. Помещает загруженные данные в директорию `detached` таблицы `table_name`. Чтобы прикрепить эти данные к таблице, используйте запрос [ATTACH PARTITION\|PART](#alter_attach-partition).
|
||||
|
||||
Например:
|
||||
|
||||
1. FETCH PARTITION
|
||||
``` sql
|
||||
ALTER TABLE users FETCH PARTITION 201902 FROM '/clickhouse/tables/01-01/visits';
|
||||
ALTER TABLE users ATTACH PARTITION 201902;
|
||||
```
|
||||
2. FETCH PART
|
||||
``` sql
|
||||
ALTER TABLE users FETCH PART 201901_2_2_0 FROM '/clickhouse/tables/01-01/visits';
|
||||
ALTER TABLE users ATTACH PART 201901_2_2_0;
|
||||
```
|
||||
|
||||
Следует иметь в виду:
|
||||
|
||||
- Запрос `ALTER TABLE t FETCH PARTITION|PART` не реплицируется. Он загружает партицию в директорию `detached` только на локальном сервере.
|
||||
- Запрос `ALTER TABLE t FETCH PARTITION` не реплицируется. Он загружает партицию в директорию `detached` только на локальном сервере.
|
||||
- Запрос `ALTER TABLE t ATTACH` реплицируется — он добавляет данные в таблицу сразу на всех репликах. На одной из реплик данные будут добавлены из директории `detached`, а на других — из соседних реплик.
|
||||
|
||||
Перед загрузкой данных система проверяет, существует ли партиция и совпадает ли её структура со структурой таблицы. При этом автоматически выбирается наиболее актуальная реплика среди всех живых реплик.
|
||||
|
@ -282,7 +282,7 @@ GRANT INSERT(x,y) ON db.table TO john
|
||||
- `ALTER MATERIALIZE TTL`. Уровень: `TABLE`. Алиасы: `MATERIALIZE TTL`
|
||||
- `ALTER SETTINGS`. Уровень: `TABLE`. Алиасы: `ALTER SETTING`, `ALTER MODIFY SETTING`, `MODIFY SETTING`
|
||||
- `ALTER MOVE PARTITION`. Уровень: `TABLE`. Алиасы: `ALTER MOVE PART`, `MOVE PARTITION`, `MOVE PART`
|
||||
- `ALTER FETCH PARTITION`. Уровень: `TABLE`. Алиасы: `ALTER FETCH PART`, `FETCH PARTITION`, `FETCH PART`
|
||||
- `ALTER FETCH PARTITION`. Уровень: `TABLE`. Алиасы: `FETCH PARTITION`
|
||||
- `ALTER FREEZE PARTITION`. Уровень: `TABLE`. Алиасы: `FREEZE PARTITION`
|
||||
- `ALTER VIEW` Уровень: `GROUP`
|
||||
- `ALTER VIEW REFRESH `. Уровень: `VIEW`. Алиасы: `ALTER LIVE VIEW REFRESH`, `REFRESH VIEW`
|
||||
|
@ -4,7 +4,7 @@ toc_title: JOIN
|
||||
|
||||
# Секция JOIN {#select-join}
|
||||
|
||||
`JOIN` создаёт новую таблицу путем объединения столбцов из одной или нескольких таблиц с использованием общих для каждой из них значений. Это обычная операция в базах данных с поддержкой SQL, которая соответствует join из [реляционной алгебры](https://en.wikipedia.org/wiki/Relational_algebra#Joins_and_join-like_operators). Частный случай соединения одной таблицы часто называют self-join.
|
||||
Join создаёт новую таблицу путем объединения столбцов из одной или нескольких таблиц с использованием общих для каждой из них значений. Это обычная операция в базах данных с поддержкой SQL, которая соответствует join из [реляционной алгебры](https://en.wikipedia.org/wiki/Relational_algebra#Joins_and_join-like_operators). Частный случай соединения одной таблицы часто называют «self-join».
|
||||
|
||||
Синтаксис:
|
||||
|
||||
@ -38,21 +38,12 @@ FROM <left_table>
|
||||
|
||||
## Настройки {#join-settings}
|
||||
|
||||
Значение строгости по умолчанию может быть переопределено с помощью настройки [join_default_strictness](../../../operations/settings/settings.md#settings-join_default_strictness).
|
||||
!!! note "Примечание"
|
||||
Значение строгости по умолчанию может быть переопределено с помощью настройки [join_default_strictness](../../../operations/settings/settings.md#settings-join_default_strictness).
|
||||
|
||||
Поведение сервера ClickHouse для операций `ANY JOIN` зависит от параметра [any_join_distinct_right_table_keys](../../../operations/settings/settings.md#any_join_distinct_right_table_keys).
|
||||
|
||||
**См. также**
|
||||
|
||||
- [join_algorithm](../../../operations/settings/settings.md#settings-join_algorithm)
|
||||
- [join_any_take_last_row](../../../operations/settings/settings.md#settings-join_any_take_last_row)
|
||||
- [join_use_nulls](../../../operations/settings/settings.md#join_use_nulls)
|
||||
- [partial_merge_join_optimizations](../../../operations/settings/settings.md#partial_merge_join_optimizations)
|
||||
- [partial_merge_join_rows_in_right_blocks](../../../operations/settings/settings.md#partial_merge_join_rows_in_right_blocks)
|
||||
- [join_on_disk_max_files_to_merge](../../../operations/settings/settings.md#join_on_disk_max_files_to_merge)
|
||||
- [any_join_distinct_right_table_keys](../../../operations/settings/settings.md#any_join_distinct_right_table_keys)
|
||||
|
||||
## Использование ASOF JOIN {#asof-join-usage}
|
||||
### Использование ASOF JOIN {#asof-join-usage}
|
||||
|
||||
`ASOF JOIN` применим в том случае, когда необходимо объединять записи, которые не имеют точного совпадения.
|
||||
|
||||
@ -104,7 +95,7 @@ USING (equi_column1, ... equi_columnN, asof_column)
|
||||
|
||||
Чтобы задать значение строгости по умолчанию, используйте сессионный параметр [join_default_strictness](../../../operations/settings/settings.md#settings-join_default_strictness).
|
||||
|
||||
## Распределённый JOIN {#global-join}
|
||||
#### Распределённый join {#global-join}
|
||||
|
||||
Есть два пути для выполнения соединения с участием распределённых таблиц:
|
||||
|
||||
@ -113,42 +104,6 @@ USING (equi_column1, ... equi_columnN, asof_column)
|
||||
|
||||
Будьте аккуратны при использовании `GLOBAL`. За дополнительной информацией обращайтесь в раздел [Распределенные подзапросы](../../../sql-reference/operators/in.md#select-distributed-subqueries).
|
||||
|
||||
## Неявные преобразования типов {#implicit-type-conversion}
|
||||
|
||||
Запросы `INNER JOIN`, `LEFT JOIN`, `RIGHT JOIN` и `FULL JOIN` поддерживают неявные преобразования типов для ключей соединения. Однако запрос не может быть выполнен, если не существует типа, к которому можно привести значения ключей с обеих сторон (например, нет типа, который бы одновременно вмещал в себя значения `UInt64` и `Int64`, или `String` и `Int32`).
|
||||
|
||||
**Пример**
|
||||
|
||||
Рассмотрим таблицу `t_1`:
|
||||
```text
|
||||
┌─a─┬─b─┬─toTypeName(a)─┬─toTypeName(b)─┐
|
||||
│ 1 │ 1 │ UInt16 │ UInt8 │
|
||||
│ 2 │ 2 │ UInt16 │ UInt8 │
|
||||
└───┴───┴───────────────┴───────────────┘
|
||||
```
|
||||
и таблицу `t_2`:
|
||||
```text
|
||||
┌──a─┬────b─┬─toTypeName(a)─┬─toTypeName(b)───┐
|
||||
│ -1 │ 1 │ Int16 │ Nullable(Int64) │
|
||||
│ 1 │ -1 │ Int16 │ Nullable(Int64) │
|
||||
│ 1 │ 1 │ Int16 │ Nullable(Int64) │
|
||||
└────┴──────┴───────────────┴─────────────────┘
|
||||
```
|
||||
|
||||
Запрос
|
||||
```sql
|
||||
SELECT a, b, toTypeName(a), toTypeName(b) FROM t_1 FULL JOIN t_2 USING (a, b);
|
||||
```
|
||||
вернёт результат:
|
||||
```text
|
||||
┌──a─┬────b─┬─toTypeName(a)─┬─toTypeName(b)───┐
|
||||
│ 1 │ 1 │ Int32 │ Nullable(Int64) │
|
||||
│ 2 │ 2 │ Int32 │ Nullable(Int64) │
|
||||
│ -1 │ 1 │ Int32 │ Nullable(Int64) │
|
||||
│ 1 │ -1 │ Int32 │ Nullable(Int64) │
|
||||
└────┴──────┴───────────────┴─────────────────┘
|
||||
```
|
||||
|
||||
## Рекомендации по использованию {#usage-recommendations}
|
||||
|
||||
### Обработка пустых ячеек и NULL {#processing-of-empty-or-null-cells}
|
||||
@ -187,14 +142,12 @@ SELECT a, b, toTypeName(a), toTypeName(b) FROM t_1 FULL JOIN t_2 USING (a, b);
|
||||
|
||||
### Ограничения по памяти {#memory-limitations}
|
||||
|
||||
По умолчанию ClickHouse использует алгоритм [hash join](https://ru.wikipedia.org/wiki/Алгоритм_соединения_хешированием). ClickHouse берет правую таблицу и создает для нее хеш-таблицу в оперативной памяти. При включённой настройке `join_algorithm = 'auto'`, после некоторого порога потребления памяти ClickHouse переходит к алгоритму [merge join](https://ru.wikipedia.org/wiki/Алгоритм_соединения_слиянием_сортированных_списков). Описание алгоритмов `JOIN` см. в настройке [join_algorithm](../../../operations/settings/settings.md#settings-join_algorithm).
|
||||
По умолчанию ClickHouse использует алгоритм [hash join](https://en.wikipedia.org/wiki/Hash_join). ClickHouse берет `<right_table>` и создает для него хэш-таблицу в оперативной памяти. После некоторого порога потребления памяти ClickHouse переходит к алгоритму merge join.
|
||||
|
||||
Если вы хотите ограничить потребление памяти во время выполнения операции `JOIN`, используйте настройки:
|
||||
- [max_rows_in_join](../../../operations/settings/query-complexity.md#settings-max_rows_in_join) — ограничивает количество строк в хэш-таблице.
|
||||
- [max_bytes_in_join](../../../operations/settings/query-complexity.md#settings-max_bytes_in_join) — ограничивает размер хэш-таблицы.
|
||||
|
||||
- [max_rows_in_join](../../../operations/settings/query-complexity.md#settings-max_rows_in_join) — ограничивает количество строк в хеш-таблице.
|
||||
- [max_bytes_in_join](../../../operations/settings/query-complexity.md#settings-max_bytes_in_join) — ограничивает размер хеш-таблицы.
|
||||
|
||||
По достижении любого из этих ограничений ClickHouse действует в соответствии с настройкой [join_overflow_mode](../../../operations/settings/query-complexity.md#settings-join_overflow_mode).
|
||||
По достижении любого из этих ограничений, ClickHouse действует в соответствии с настройкой [join_overflow_mode](../../../operations/settings/query-complexity.md#settings-join_overflow_mode).
|
||||
|
||||
## Примеры {#examples}
|
||||
|
||||
|
@ -5,17 +5,6 @@ toc_title: Security Changelog
|
||||
|
||||
# Security Changelog {#security-changelog}
|
||||
|
||||
## Исправлено в релизе 21.4.3.21, 2021-04-12 {#fixed-in-clickhouse-release-21-4-3-21-2019-09-10}
|
||||
|
||||
### CVE-2021-25263 {#cve-2021-25263}
|
||||
|
||||
Злоумышленник с доступом к созданию словарей может читать файлы на файловой системе сервера Clickhouse.
|
||||
Злоумышленник может обойти некорректную проверку пути к файлу словаря и загрузить часть любого файла как словарь. При этом, манипулируя опциями парсинга файла, можно получить следующую часть файла и пошагово прочитать весь файл.
|
||||
|
||||
Исправление доступно в версиях 20.8.18.32-lts, 21.1.9.41-stable, 21.2.9.41-stable, 21.3.6.55-lts, 21.4.3.21-stable и выше.
|
||||
|
||||
Обнаружено благодаря: [Вячеславу Егошину](https://twitter.com/vegoshin)
|
||||
|
||||
## Исправлено в релизе 19.14.3.3, 2019-09-10 {#ispravleno-v-relize-19-14-3-3-2019-09-10}
|
||||
|
||||
### CVE-2019-15024 {#cve-2019-15024}
|
||||
|
@ -6,12 +6,12 @@ toc_title: Atomic
|
||||
|
||||
# Atomic {#atomic}
|
||||
|
||||
它支持非阻塞 DROP 和 RENAME TABLE 查询以及原子 EXCHANGE TABLES t1 AND t2 查询。默认情况下使用Atomic数据库引擎。
|
||||
It is supports non-blocking `DROP` and `RENAME TABLE` queries and atomic `EXCHANGE TABLES t1 AND t2` queries. Atomic database engine is used by default.
|
||||
|
||||
## 创建数据库 {#creating-a-database}
|
||||
## Creating a Database {#creating-a-database}
|
||||
|
||||
```sql
|
||||
CREATE DATABASE test ENGINE = Atomic;
|
||||
```
|
||||
|
||||
[原文](https://clickhouse.tech/docs/en/engines/database_engines/atomic/) <!--hide-->
|
||||
[Original article](https://clickhouse.tech/docs/en/engines/database_engines/atomic/) <!--hide-->
|
||||
|
@ -1,4 +1,4 @@
|
||||
# CollapsingMergeTree {#table_engine-collapsingmergetree}
|
||||
# 折叠树 {#table_engine-collapsingmergetree}
|
||||
|
||||
该引擎继承于 [MergeTree](mergetree.md),并在数据块合并算法中添加了折叠行的逻辑。
|
||||
|
||||
@ -203,4 +203,4 @@ SELECT * FROM UAct FINAL
|
||||
|
||||
这种查询数据的方法是非常低效的。不要在大表中使用它。
|
||||
|
||||
[原文](https://clickhouse.tech/docs/en/operations/table_engines/collapsingmergetree/) <!--hide-->
|
||||
[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/collapsingmergetree/) <!--hide-->
|
||||
|
@ -3,7 +3,7 @@ toc_priority: 37
|
||||
toc_title: "版本折叠MergeTree"
|
||||
---
|
||||
|
||||
# VersionedCollapsingMergeTree {#versionedcollapsingmergetree}
|
||||
# 版本折叠MergeTree {#versionedcollapsingmergetree}
|
||||
|
||||
这个引擎:
|
||||
|
||||
|
@ -5,6 +5,6 @@ toc_title: 原生接口(TCP)
|
||||
|
||||
# 原生接口(TCP){#native-interface-tcp}
|
||||
|
||||
原生接口协议用于[命令行客户端](cli.md),用于分布式查询处理期间的服务器间通信,以及其他C++ 程序。不幸的是,原生ClickHouse协议还没有正式的规范,但它可以从ClickHouse源代码[从这里开始](https://github.com/ClickHouse/ClickHouse/tree/master/src/Client)或通过拦截和分析TCP流量进行逆向工程。
|
||||
原生接口用于[命令行客户端](cli.md),用于分布式查询处理期间的服务器间通信,以及其他C++程序。可惜的是,原生的ClickHouse协议还没有正式的规范,但它可以从ClickHouse[源代码](https://github.com/ClickHouse/ClickHouse/tree/master/src/Client)通过拦截和分析TCP流量进行反向工程。
|
||||
|
||||
[原文](https://clickhouse.tech/docs/en/interfaces/tcp/) <!--hide-->
|
||||
[来源文章](https://clickhouse.tech/docs/zh/interfaces/tcp/) <!--hide-->
|
||||
|
10
docs/zh/interfaces/third-party/gui.md
vendored
10
docs/zh/interfaces/third-party/gui.md
vendored
@ -57,9 +57,9 @@ ClickHouse Web 界面 [Tabix](https://github.com/tabixio/tabix).
|
||||
- 表格预览。
|
||||
- 自动完成。
|
||||
|
||||
### clickhouse-cli {#clickhouse-cli}
|
||||
### ツ环板-ョツ嘉ッツ偲 {#clickhouse-cli}
|
||||
|
||||
[clickhouse-cli](https://github.com/hatarist/clickhouse-cli) 是ClickHouse的替代命令行客户端,用Python 3编写。
|
||||
[ツ环板-ョツ嘉ッツ偲](https://github.com/hatarist/clickhouse-cli) 是ClickHouse的替代命令行客户端,用Python 3编写。
|
||||
|
||||
特征:
|
||||
|
||||
@ -68,15 +68,15 @@ ClickHouse Web 界面 [Tabix](https://github.com/tabixio/tabix).
|
||||
- 寻呼机支持数据输出。
|
||||
- 自定义PostgreSQL类命令。
|
||||
|
||||
### clickhouse-flamegraph {#clickhouse-flamegraph}
|
||||
### ツ暗ェツ氾环催ツ団ツ法ツ人 {#clickhouse-flamegraph}
|
||||
|
||||
[clickhouse-flamegraph](https://github.com/Slach/clickhouse-flamegraph) 是一个可视化的专业工具`system.trace_log`如[flamegraph](http://www.brendangregg.com/flamegraphs.html).
|
||||
|
||||
## 商业 {#shang-ye}
|
||||
|
||||
### Holistics {#holistics-software}
|
||||
### ツ环板Softwareョツ嘉ッ {#holistics-software}
|
||||
|
||||
[Holistics](https://www.holistics.io/) 在2019年被Gartner FrontRunners列为可用性最高排名第二的商业智能工具之一。 Holistics是一个基于SQL的全栈数据平台和商业智能工具,用于设置您的分析流程。
|
||||
[整体学](https://www.holistics.io/) 在2019年被Gartner FrontRunners列为可用性最高排名第二的商业智能工具之一。 Holistics是一个基于SQL的全栈数据平台和商业智能工具,用于设置您的分析流程。
|
||||
|
||||
特征:
|
||||
|
||||
|
@ -5,21 +5,9 @@ toc_title: "操作"
|
||||
|
||||
# 操作 {#operations}
|
||||
|
||||
ClickHouse操作手册由以下主要部分组成:
|
||||
Clickhouse运维手册主要包含下面几部分:
|
||||
|
||||
- [安装要求](../operations/requirements.md)
|
||||
- [监控](../operations/monitoring.md)
|
||||
- [故障排除](../operations/troubleshooting.md)
|
||||
- [使用建议](../operations/tips.md)
|
||||
- [更新程序](../operations/update.md)
|
||||
- [访问权限](../operations/access-rights.md)
|
||||
- [数据备份](../operations/backup.md)
|
||||
- [配置文件](../operations/configuration-files.md)
|
||||
- [配额](../operations/quotas.md)
|
||||
- [系统表](../operations/system-tables/index.md)
|
||||
- [服务器配置参数](../operations/server-configuration-parameters/index.md)
|
||||
- [如何用ClickHouse测试你的硬件](../operations/performance-test.md)
|
||||
- [设置](../operations/settings/index.md)
|
||||
- [实用工具](../operations/utilities/index.md)
|
||||
- 安装要求
|
||||
|
||||
[原文](https://clickhouse.tech/docs/en/operations/) <!--hide-->
|
||||
|
||||
[原始文章](https://clickhouse.tech/docs/en/operations/) <!--hide-->
|
||||
|
@ -1,8 +1,13 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
|
||||
toc_priority: 42
|
||||
toc_title: mysql
|
||||
---
|
||||
|
||||
# mysql {#mysql}
|
||||
|
||||
允许对存储在远程MySQL服务器上的数据执行`SELECT`和`INSERT`查询。
|
||||
|
||||
**语法**
|
||||
允许 `SELECT` 要对存储在远程MySQL服务器上的数据执行的查询。
|
||||
|
||||
``` sql
|
||||
mysql('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause']);
|
||||
@ -10,44 +15,31 @@ mysql('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_
|
||||
|
||||
**参数**
|
||||
|
||||
- `host:port` — MySQL服务器地址.
|
||||
- `host:port` — MySQL server address.
|
||||
|
||||
- `database` — 远程数据库名称.
|
||||
- `database` — Remote database name.
|
||||
|
||||
- `table` — 远程表名称.
|
||||
- `table` — Remote table name.
|
||||
|
||||
- `user` — MySQL用户.
|
||||
- `user` — MySQL user.
|
||||
|
||||
- `password` — 用户密码.
|
||||
- `password` — User password.
|
||||
|
||||
- `replace_query` — 将INSERT INTO` 查询转换为 `REPLACE INTO`的标志。如果 `replace_query=1`,查询被替换。
|
||||
- `replace_query` — Flag that converts `INSERT INTO` 查询到 `REPLACE INTO`. 如果 `replace_query=1`,查询被替换。
|
||||
|
||||
- `on_duplicate_clause` — 添加 `ON DUPLICATE KEY on_duplicate_clause` 表达式到 `INSERT` 查询。明确规定只能使用 `replace_query = 0` ,如果你同时设置replace_query = 1`和`on_duplicate_clause`,ClickHouse将产生异常。
|
||||
- `on_duplicate_clause` — The `ON DUPLICATE KEY on_duplicate_clause` 表达式被添加到 `INSERT` 查询。
|
||||
|
||||
示例:`INSERT INTO t (c1,c2) VALUES ('a', 2) ON DUPLICATE KEY UPDATE c2 = c2 + 1`
|
||||
Example: `INSERT INTO t (c1,c2) VALUES ('a', 2) ON DUPLICATE KEY UPDATE c2 = c2 + 1`, where `on_duplicate_clause` is `UPDATE c2 = c2 + 1`. See the MySQL documentation to find which `on_duplicate_clause` you can use with the `ON DUPLICATE KEY` clause.
|
||||
|
||||
`on_duplicate_clause`这里是`UPDATE c2 = c2 + 1`。请查阅MySQL文档,来找到可以和`ON DUPLICATE KEY`一起使用的 `on_duplicate_clause`子句。
|
||||
To specify `on_duplicate_clause` you need to pass `0` to the `replace_query` parameter. If you simultaneously pass `replace_query = 1` and `on_duplicate_clause`, ClickHouse generates an exception.
|
||||
|
||||
简单的 `WHERE` 子句如 `=, !=, >, >=, <, <=` 将即时在MySQL服务器上执行。其余的条件和 `LIMIT` 只有在对MySQL的查询完成后,才会在ClickHouse中执行采样约束。
|
||||
简单 `WHERE` 条款如 `=, !=, >, >=, <, <=` 当前在MySQL服务器上执行。
|
||||
|
||||
支持使用`|`并列进行多副本查询,示例如下:
|
||||
|
||||
```sql
|
||||
SELECT name FROM mysql(`mysql{1|2|3}:3306`, 'mysql_database', 'mysql_table', 'user', 'password');
|
||||
```
|
||||
|
||||
或
|
||||
|
||||
```sql
|
||||
SELECT name FROM mysql(`mysql1:3306|mysql2:3306|mysql3:3306`, 'mysql_database', 'mysql_table', 'user', 'password');
|
||||
```
|
||||
其余的条件和 `LIMIT` 只有在对MySQL的查询完成后,才会在ClickHouse中执行采样约束。
|
||||
|
||||
**返回值**
|
||||
|
||||
与原始MySQL表具有相同列的表对象。
|
||||
|
||||
!!! note "注意"
|
||||
在`INSERT`查询中为了区分`mysql(...)`与带有列名列表的表名的表函数,你必须使用关键字`FUNCTION`或`TABLE FUNCTION`。查看如下示例。
|
||||
与原始MySQL表具有相同列的table对象。
|
||||
|
||||
## 用法示例 {#usage-example}
|
||||
|
||||
@ -74,7 +66,7 @@ mysql> select * from test;
|
||||
1 row in set (0,00 sec)
|
||||
```
|
||||
|
||||
从ClickHouse中查询数据:
|
||||
从ClickHouse中选择数据:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM mysql('localhost:3306', 'test', 'test', 'bayonet', '123')
|
||||
@ -86,21 +78,6 @@ SELECT * FROM mysql('localhost:3306', 'test', 'test', 'bayonet', '123')
|
||||
└────────┴──────────────┴───────┴────────────────┘
|
||||
```
|
||||
|
||||
替换和插入:
|
||||
|
||||
```sql
|
||||
INSERT INTO FUNCTION mysql('localhost:3306', 'test', 'test', 'bayonet', '123', 1) (int_id, float) VALUES (1, 3);
|
||||
INSERT INTO TABLE FUNCTION mysql('localhost:3306', 'test', 'test', 'bayonet', '123', 0, 'UPDATE int_id = int_id + 1') (int_id, float) VALUES (1, 4);
|
||||
SELECT * FROM mysql('localhost:3306', 'test', 'test', 'bayonet', '123');
|
||||
```
|
||||
|
||||
```text
|
||||
┌─int_id─┬─float─┐
|
||||
│ 1 │ 3 │
|
||||
│ 2 │ 4 │
|
||||
└────────┴───────┘
|
||||
```
|
||||
|
||||
## 另请参阅 {#see-also}
|
||||
|
||||
- [该 ‘MySQL’ 表引擎](../../engines/table-engines/integrations/mysql.md)
|
||||
|
@ -26,7 +26,6 @@
|
||||
#include <boost/algorithm/string/replace.hpp>
|
||||
#include <Poco/String.h>
|
||||
#include <Poco/Util/Application.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <common/find_symbols.h>
|
||||
#include <common/LineReader.h>
|
||||
#include <Common/ClickHouseRevision.h>
|
||||
@ -302,9 +301,26 @@ private:
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
bool print_stack_trace = config().getBool("stacktrace", false) && e.code() != ErrorCodes::NETWORK_ERROR;
|
||||
bool print_stack_trace = config().getBool("stacktrace", false);
|
||||
|
||||
std::cerr << getExceptionMessage(e, print_stack_trace, true) << std::endl << std::endl;
|
||||
std::string text = e.displayText();
|
||||
|
||||
/** If exception is received from server, then stack trace is embedded in message.
|
||||
* If exception is thrown on client, then stack trace is in separate field.
|
||||
*/
|
||||
|
||||
auto embedded_stack_trace_pos = text.find("Stack trace");
|
||||
if (std::string::npos != embedded_stack_trace_pos && !print_stack_trace)
|
||||
text.resize(embedded_stack_trace_pos);
|
||||
|
||||
std::cerr << "Code: " << e.code() << ". " << text << std::endl << std::endl;
|
||||
|
||||
/// Don't print the stack trace on the client if it was logged on the server.
|
||||
/// Also don't print the stack trace in case of network errors.
|
||||
if (print_stack_trace && e.code() != ErrorCodes::NETWORK_ERROR && std::string::npos == embedded_stack_trace_pos)
|
||||
{
|
||||
std::cerr << "Stack trace:" << std::endl << e.getStackTraceString();
|
||||
}
|
||||
|
||||
/// If exception code isn't zero, we should return non-zero return code anyway.
|
||||
return e.code() ? e.code() : -1;
|
||||
@ -471,52 +487,6 @@ private:
|
||||
}
|
||||
#endif
|
||||
|
||||
/// Make query to get all server warnings
|
||||
std::vector<String> loadWarningMessages()
|
||||
{
|
||||
std::vector<String> messages;
|
||||
connection->sendQuery(connection_parameters.timeouts, "SELECT message FROM system.warnings", "" /* query_id */, QueryProcessingStage::Complete);
|
||||
while (true)
|
||||
{
|
||||
Packet packet = connection->receivePacket();
|
||||
switch (packet.type)
|
||||
{
|
||||
case Protocol::Server::Data:
|
||||
if (packet.block)
|
||||
{
|
||||
const ColumnString & column = typeid_cast<const ColumnString &>(*packet.block.getByPosition(0).column);
|
||||
|
||||
size_t rows = packet.block.rows();
|
||||
for (size_t i = 0; i < rows; ++i)
|
||||
messages.emplace_back(column.getDataAt(i).toString());
|
||||
}
|
||||
continue;
|
||||
|
||||
case Protocol::Server::Progress:
|
||||
continue;
|
||||
case Protocol::Server::ProfileInfo:
|
||||
continue;
|
||||
case Protocol::Server::Totals:
|
||||
continue;
|
||||
case Protocol::Server::Extremes:
|
||||
continue;
|
||||
case Protocol::Server::Log:
|
||||
continue;
|
||||
|
||||
case Protocol::Server::Exception:
|
||||
packet.exception->rethrow();
|
||||
return messages;
|
||||
|
||||
case Protocol::Server::EndOfStream:
|
||||
return messages;
|
||||
|
||||
default:
|
||||
throw Exception(ErrorCodes::UNKNOWN_PACKET_FROM_SERVER, "Unknown packet {} from server {}",
|
||||
packet.type, connection->getDescription());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int mainImpl()
|
||||
{
|
||||
UseSSL use_ssl;
|
||||
@ -595,26 +565,6 @@ private:
|
||||
suggest->load(connection_parameters, config().getInt("suggestion_limit"));
|
||||
}
|
||||
|
||||
/// Load Warnings at the beginning of connection
|
||||
if (!config().has("no-warnings"))
|
||||
{
|
||||
try
|
||||
{
|
||||
std::vector<String> messages = loadWarningMessages();
|
||||
if (!messages.empty())
|
||||
{
|
||||
std::cout << "Warnings:" << std::endl;
|
||||
for (const auto & message : messages)
|
||||
std::cout << "* " << message << std::endl;
|
||||
std::cout << std::endl;
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
/// Ignore exception
|
||||
}
|
||||
}
|
||||
|
||||
/// Load command history if present.
|
||||
if (config().has("history_file"))
|
||||
history_file = config().getString("history_file");
|
||||
@ -683,10 +633,17 @@ private:
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
/// We don't need to handle the test hints in the interactive mode.
|
||||
// We don't need to handle the test hints in the interactive
|
||||
// mode.
|
||||
std::cerr << std::endl
|
||||
<< "Exception on client:" << std::endl
|
||||
<< "Code: " << e.code() << ". " << e.displayText() << std::endl;
|
||||
|
||||
if (config().getBool("stacktrace", false))
|
||||
std::cerr << "Stack trace:" << std::endl << e.getStackTraceString() << std::endl;
|
||||
|
||||
std::cerr << std::endl;
|
||||
|
||||
bool print_stack_trace = config().getBool("stacktrace", false);
|
||||
std::cerr << "Exception on client:" << std::endl << getExceptionMessage(e, print_stack_trace, true) << std::endl << std::endl;
|
||||
client_exception = std::make_unique<Exception>(e);
|
||||
}
|
||||
|
||||
@ -983,11 +940,18 @@ private:
|
||||
{
|
||||
if (server_exception)
|
||||
{
|
||||
bool print_stack_trace = config().getBool("stacktrace", false);
|
||||
std::string text = server_exception->displayText();
|
||||
auto embedded_stack_trace_pos = text.find("Stack trace");
|
||||
if (std::string::npos != embedded_stack_trace_pos && !config().getBool("stacktrace", false))
|
||||
{
|
||||
text.resize(embedded_stack_trace_pos);
|
||||
}
|
||||
std::cerr << "Received exception from server (version " << server_version << "):" << std::endl
|
||||
<< getExceptionMessage(*server_exception, print_stack_trace, true) << std::endl;
|
||||
<< "Code: " << server_exception->code() << ". " << text << std::endl;
|
||||
if (is_interactive)
|
||||
{
|
||||
std::cerr << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
if (client_exception)
|
||||
@ -1446,7 +1410,8 @@ private:
|
||||
{
|
||||
// Just report it, we'll terminate below.
|
||||
fmt::print(stderr,
|
||||
"Error while reconnecting to the server: {}\n",
|
||||
"Error while reconnecting to the server: Code: {}: {}\n",
|
||||
getCurrentExceptionCode(),
|
||||
getCurrentExceptionMessage(true));
|
||||
|
||||
assert(!connection->isConnected());
|
||||
@ -2564,7 +2529,6 @@ public:
|
||||
("opentelemetry-traceparent", po::value<std::string>(), "OpenTelemetry traceparent header as described by W3C Trace Context recommendation")
|
||||
("opentelemetry-tracestate", po::value<std::string>(), "OpenTelemetry tracestate header as described by W3C Trace Context recommendation")
|
||||
("history_file", po::value<std::string>(), "path to history file")
|
||||
("no-warnings", "disable warnings when client connects to server")
|
||||
;
|
||||
|
||||
Settings cmd_settings;
|
||||
@ -2632,7 +2596,8 @@ public:
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
std::cerr << getExceptionMessage(e, false) << std::endl;
|
||||
std::string text = e.displayText();
|
||||
std::cerr << "Code: " << e.code() << ". " << text << std::endl;
|
||||
std::cerr << "Table №" << i << std::endl << std::endl;
|
||||
/// Avoid the case when error exit code can possibly overflow to normal (zero).
|
||||
auto exit_code = e.code() % 256;
|
||||
@ -2724,8 +2689,6 @@ public:
|
||||
config().setBool("highlight", options["highlight"].as<bool>());
|
||||
if (options.count("history_file"))
|
||||
config().setString("history_file", options["history_file"].as<std::string>());
|
||||
if (options.count("no-warnings"))
|
||||
config().setBool("no-warnings", true);
|
||||
|
||||
if ((query_fuzzer_runs = options["query-fuzzer-runs"].as<int>()))
|
||||
{
|
||||
@ -2777,7 +2740,8 @@ int mainEntryClickHouseClient(int argc, char ** argv)
|
||||
}
|
||||
catch (const DB::Exception & e)
|
||||
{
|
||||
std::cerr << DB::getExceptionMessage(e, false) << std::endl;
|
||||
std::string text = e.displayText();
|
||||
std::cerr << "Code: " << e.code() << ". " << text << std::endl;
|
||||
return 1;
|
||||
}
|
||||
catch (...)
|
||||
|
@ -433,7 +433,7 @@ void LocalServer::processQueries()
|
||||
|
||||
try
|
||||
{
|
||||
executeQuery(read_buf, write_buf, /* allow_into_outfile = */ true, context, {}, {}, finalize_progress);
|
||||
executeQuery(read_buf, write_buf, /* allow_into_outfile = */ true, context, {}, finalize_progress);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
@ -477,6 +477,17 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
CurrentMetrics::set(CurrentMetrics::Revision, ClickHouseRevision::getVersionRevision());
|
||||
CurrentMetrics::set(CurrentMetrics::VersionInteger, ClickHouseRevision::getVersionInteger());
|
||||
|
||||
if (ThreadFuzzer::instance().isEffective())
|
||||
LOG_WARNING(log, "ThreadFuzzer is enabled. Application will run slowly and unstable.");
|
||||
|
||||
#if !defined(NDEBUG) || !defined(__OPTIMIZE__)
|
||||
LOG_WARNING(log, "Server was built in debug mode. It will work slowly.");
|
||||
#endif
|
||||
|
||||
#if defined(SANITIZER)
|
||||
LOG_WARNING(log, "Server was built with sanitizer. It will work slowly.");
|
||||
#endif
|
||||
|
||||
/** Context contains all that query execution is dependent:
|
||||
* settings, available functions, data types, aggregate functions, databases, ...
|
||||
*/
|
||||
@ -486,18 +497,6 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
global_context->makeGlobalContext();
|
||||
global_context->setApplicationType(Context::ApplicationType::SERVER);
|
||||
|
||||
#if !defined(NDEBUG) || !defined(__OPTIMIZE__)
|
||||
global_context->addWarningMessage("Server was built in debug mode. It will work slowly.");
|
||||
#endif
|
||||
|
||||
if (ThreadFuzzer::instance().isEffective())
|
||||
global_context->addWarningMessage("ThreadFuzzer is enabled. Application will run slowly and unstable.");
|
||||
|
||||
#if defined(SANITIZER)
|
||||
global_context->addWarningMessage("Server was built with sanitizer. It will work slowly.");
|
||||
#endif
|
||||
|
||||
|
||||
// Initialize global thread pool. Do it before we fetch configs from zookeeper
|
||||
// nodes (`from_zk`), because ZooKeeper interface uses the pool. We will
|
||||
// ignore `max_thread_pool_size` in configs we fetch from ZK, but oh well.
|
||||
@ -553,10 +552,8 @@ if (ThreadFuzzer::instance().isEffective())
|
||||
if (ptrace(PTRACE_TRACEME, 0, nullptr, nullptr) == -1)
|
||||
{
|
||||
/// Program is run under debugger. Modification of it's binary image is ok for breakpoints.
|
||||
global_context->addWarningMessage(
|
||||
fmt::format("Server is run under debugger and its binary image is modified (most likely with breakpoints).",
|
||||
calculated_binary_hash)
|
||||
);
|
||||
LOG_WARNING(log, "Server is run under debugger and its binary image is modified (most likely with breakpoints).",
|
||||
calculated_binary_hash);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -639,7 +636,7 @@ if (ThreadFuzzer::instance().isEffective())
|
||||
}
|
||||
else
|
||||
{
|
||||
global_context->addWarningMessage(message);
|
||||
LOG_WARNING(log, message);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -9,7 +9,7 @@
|
||||
Do not use any JavaScript or CSS frameworks or preprocessors.
|
||||
This HTML page should not require any build systems (node.js, npm, gulp, etc.)
|
||||
This HTML page should not be minified, instead it should be reasonably minimalistic by itself.
|
||||
This HTML page should not load any external resources on load.
|
||||
This HTML page should not load any external resources
|
||||
(CSS and JavaScript must be embedded directly to the page. No external fonts or images should be loaded).
|
||||
This UI should look as lightweight, clean and fast as possible.
|
||||
All UI elements must be aligned in pixel-perfect way.
|
||||
@ -343,18 +343,13 @@
|
||||
/// Save query in history only if it is different.
|
||||
let previous_query = '';
|
||||
|
||||
const current_url = new URL(window.location);
|
||||
|
||||
const server_address = current_url.searchParams.get('url');
|
||||
if (server_address) {
|
||||
document.getElementById('url').value = server_address;
|
||||
} else if (location.protocol != 'file:') {
|
||||
/// Substitute the address of the server where the page is served.
|
||||
/// Substitute the address of the server where the page is served.
|
||||
if (location.protocol != 'file:') {
|
||||
document.getElementById('url').value = location.origin;
|
||||
}
|
||||
|
||||
/// Substitute user name if it's specified in the query string
|
||||
const user_from_url = current_url.searchParams.get('user');
|
||||
let user_from_url = (new URL(window.location)).searchParams.get('user');
|
||||
if (user_from_url) {
|
||||
document.getElementById('user').value = user_from_url;
|
||||
}
|
||||
@ -366,9 +361,7 @@
|
||||
let user = document.getElementById('user').value;
|
||||
let password = document.getElementById('password').value;
|
||||
|
||||
let server_address = document.getElementById('url').value;
|
||||
|
||||
let url = server_address +
|
||||
let url = document.getElementById('url').value +
|
||||
/// Ask server to allow cross-domain requests.
|
||||
'?add_http_cors_header=1' +
|
||||
'&user=' + encodeURIComponent(user) +
|
||||
@ -397,18 +390,11 @@
|
||||
response: this.response.length > 100000 ? null : this.response /// Lower than the browser's limit.
|
||||
};
|
||||
let title = "ClickHouse Query: " + query;
|
||||
|
||||
let history_url = window.location.pathname + '?user=' + encodeURIComponent(user);
|
||||
if (server_address != location.origin) {
|
||||
/// Save server's address in URL if it's not identical to the address of the play UI.
|
||||
history_url += '&url=' + encodeURIComponent(server_address);
|
||||
}
|
||||
history_url += '#' + window.btoa(query);
|
||||
|
||||
let url = window.location.pathname + '?user=' + encodeURIComponent(user) + '#' + window.btoa(query);
|
||||
if (previous_query == '') {
|
||||
history.replaceState(state, title, history_url);
|
||||
history.replaceState(state, title, url);
|
||||
} else {
|
||||
history.pushState(state, title, history_url);
|
||||
history.pushState(state, title, url);
|
||||
}
|
||||
document.title = title;
|
||||
previous_query = query;
|
||||
@ -613,16 +599,10 @@
|
||||
}
|
||||
|
||||
/// Huge JS libraries should be loaded only if needed.
|
||||
function loadJS(src, integrity) {
|
||||
function loadJS(src) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const script = document.createElement('script');
|
||||
script.src = src;
|
||||
if (integrity) {
|
||||
script.crossOrigin = 'anonymous';
|
||||
script.integrity = integrity;
|
||||
} else {
|
||||
console.warn('no integrity for', src)
|
||||
}
|
||||
script.addEventListener('load', function() { resolve(true); });
|
||||
document.head.appendChild(script);
|
||||
});
|
||||
@ -633,14 +613,10 @@
|
||||
if (load_dagre_promise) { return load_dagre_promise; }
|
||||
|
||||
load_dagre_promise = Promise.all([
|
||||
loadJS('https://dagrejs.github.io/project/dagre/v0.8.5/dagre.min.js',
|
||||
'sha384-2IH3T69EIKYC4c+RXZifZRvaH5SRUdacJW7j6HtE5rQbvLhKKdawxq6vpIzJ7j9M'),
|
||||
loadJS('https://dagrejs.github.io/project/graphlib-dot/v0.6.4/graphlib-dot.min.js',
|
||||
'sha384-Q7oatU+b+y0oTkSoiRH9wTLH6sROySROCILZso/AbMMm9uKeq++r8ujD4l4f+CWj'),
|
||||
loadJS('https://dagrejs.github.io/project/dagre-d3/v0.6.4/dagre-d3.min.js',
|
||||
'sha384-9N1ty7Yz7VKL3aJbOk+8ParYNW8G5W+MvxEfFL9G7CRYPmkHI9gJqyAfSI/8190W'),
|
||||
loadJS('https://cdn.jsdelivr.net/npm/d3@7.0.0',
|
||||
'sha384-S+Kf0r6YzKIhKA8d1k2/xtYv+j0xYUU3E7+5YLrcPVab6hBh/r1J6cq90OXhw80u'),
|
||||
loadJS('https://dagrejs.github.io/project/dagre/v0.8.5/dagre.min.js'),
|
||||
loadJS('https://dagrejs.github.io/project/graphlib-dot/v0.6.4/graphlib-dot.min.js'),
|
||||
loadJS('https://dagrejs.github.io/project/dagre-d3/v0.6.4/dagre-d3.min.js'),
|
||||
loadJS('https://cdn.jsdelivr.net/npm/d3@7.0.0'),
|
||||
]);
|
||||
|
||||
return load_dagre_promise;
|
||||
|
@ -64,12 +64,7 @@ public:
|
||||
std::lock_guard lock{mutex};
|
||||
auto x = cache.get(params);
|
||||
if (x)
|
||||
{
|
||||
if ((*x)->getUser())
|
||||
return *x;
|
||||
/// No user, probably the user has been dropped while it was in the cache.
|
||||
cache.remove(params);
|
||||
}
|
||||
return *x;
|
||||
auto res = std::shared_ptr<ContextAccess>(new ContextAccess(manager, params));
|
||||
cache.add(params, res);
|
||||
return res;
|
||||
|
@ -655,7 +655,7 @@ private:
|
||||
for (auto & [lhs_childname, lhs_child] : *children)
|
||||
{
|
||||
if (!rhs.tryGetChild(lhs_childname))
|
||||
lhs_child.addGrantsRec(rhs.flags);
|
||||
lhs_child.flags |= rhs.flags & lhs_child.getAllGrantableFlags();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -673,7 +673,7 @@ private:
|
||||
for (auto & [lhs_childname, lhs_child] : *children)
|
||||
{
|
||||
if (!rhs.tryGetChild(lhs_childname))
|
||||
lhs_child.removeGrantsRec(~rhs.flags);
|
||||
lhs_child.flags &= rhs.flags;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1041,15 +1041,17 @@ void AccessRights::makeIntersection(const AccessRights & other)
|
||||
auto helper = [](std::unique_ptr<Node> & root_node, const std::unique_ptr<Node> & other_root_node)
|
||||
{
|
||||
if (!root_node)
|
||||
return;
|
||||
if (!other_root_node)
|
||||
{
|
||||
root_node = nullptr;
|
||||
if (other_root_node)
|
||||
root_node = std::make_unique<Node>(*other_root_node);
|
||||
return;
|
||||
}
|
||||
root_node->makeIntersection(*other_root_node);
|
||||
if (!root_node->flags && !root_node->children)
|
||||
root_node = nullptr;
|
||||
if (other_root_node)
|
||||
{
|
||||
root_node->makeIntersection(*other_root_node);
|
||||
if (!root_node->flags && !root_node->children)
|
||||
root_node = nullptr;
|
||||
}
|
||||
};
|
||||
helper(root, other.root);
|
||||
helper(root_with_grant_option, other.root_with_grant_option);
|
||||
|
@ -173,7 +173,6 @@ enum class AccessType
|
||||
M(MONGO, "", GLOBAL, SOURCES) \
|
||||
M(MYSQL, "", GLOBAL, SOURCES) \
|
||||
M(POSTGRES, "", GLOBAL, SOURCES) \
|
||||
M(SQLITE, "", GLOBAL, SOURCES) \
|
||||
M(ODBC, "", GLOBAL, SOURCES) \
|
||||
M(JDBC, "", GLOBAL, SOURCES) \
|
||||
M(HDFS, "", GLOBAL, SOURCES) \
|
||||
|
@ -163,10 +163,11 @@ void ContextAccess::setUser(const UserPtr & user_) const
|
||||
if (!user)
|
||||
{
|
||||
/// User has been dropped.
|
||||
auto nothing_granted = std::make_shared<AccessRights>();
|
||||
access = nothing_granted;
|
||||
access_with_implicit = nothing_granted;
|
||||
subscription_for_user_change = {};
|
||||
subscription_for_roles_changes = {};
|
||||
access = nullptr;
|
||||
access_with_implicit = nullptr;
|
||||
enabled_roles = nullptr;
|
||||
roles_info = nullptr;
|
||||
enabled_row_policies = nullptr;
|
||||
@ -251,45 +252,32 @@ String ContextAccess::getUserName() const
|
||||
std::shared_ptr<const EnabledRolesInfo> ContextAccess::getRolesInfo() const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
if (roles_info)
|
||||
return roles_info;
|
||||
static const auto no_roles = std::make_shared<EnabledRolesInfo>();
|
||||
return no_roles;
|
||||
return roles_info;
|
||||
}
|
||||
|
||||
std::shared_ptr<const EnabledRowPolicies> ContextAccess::getEnabledRowPolicies() const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
if (enabled_row_policies)
|
||||
return enabled_row_policies;
|
||||
static const auto no_row_policies = std::make_shared<EnabledRowPolicies>();
|
||||
return no_row_policies;
|
||||
return enabled_row_policies;
|
||||
}
|
||||
|
||||
ASTPtr ContextAccess::getRowPolicyCondition(const String & database, const String & table_name, RowPolicy::ConditionType index, const ASTPtr & extra_condition) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
if (enabled_row_policies)
|
||||
return enabled_row_policies->getCondition(database, table_name, index, extra_condition);
|
||||
return nullptr;
|
||||
return enabled_row_policies ? enabled_row_policies->getCondition(database, table_name, index, extra_condition) : nullptr;
|
||||
}
|
||||
|
||||
std::shared_ptr<const EnabledQuota> ContextAccess::getQuota() const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
if (enabled_quota)
|
||||
return enabled_quota;
|
||||
static const auto unlimited_quota = EnabledQuota::getUnlimitedQuota();
|
||||
return unlimited_quota;
|
||||
return enabled_quota;
|
||||
}
|
||||
|
||||
|
||||
std::optional<QuotaUsage> ContextAccess::getQuotaUsage() const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
if (enabled_quota)
|
||||
return enabled_quota->getUsage();
|
||||
return {};
|
||||
return enabled_quota ? enabled_quota->getUsage() : std::optional<QuotaUsage>{};
|
||||
}
|
||||
|
||||
|
||||
@ -300,7 +288,7 @@ std::shared_ptr<const ContextAccess> ContextAccess::getFullAccess()
|
||||
auto full_access = std::shared_ptr<ContextAccess>(new ContextAccess);
|
||||
full_access->is_full_access = true;
|
||||
full_access->access = std::make_shared<AccessRights>(AccessRights::getFullAccess());
|
||||
full_access->access_with_implicit = std::make_shared<AccessRights>(addImplicitAccessRights(*full_access->access));
|
||||
full_access->enabled_quota = EnabledQuota::getUnlimitedQuota();
|
||||
return full_access;
|
||||
}();
|
||||
return res;
|
||||
@ -310,40 +298,28 @@ std::shared_ptr<const ContextAccess> ContextAccess::getFullAccess()
|
||||
std::shared_ptr<const Settings> ContextAccess::getDefaultSettings() const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
if (enabled_settings)
|
||||
return enabled_settings->getSettings();
|
||||
static const auto everything_by_default = std::make_shared<Settings>();
|
||||
return everything_by_default;
|
||||
return enabled_settings ? enabled_settings->getSettings() : nullptr;
|
||||
}
|
||||
|
||||
|
||||
std::shared_ptr<const SettingsConstraints> ContextAccess::getSettingsConstraints() const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
if (enabled_settings)
|
||||
return enabled_settings->getConstraints();
|
||||
static const auto no_constraints = std::make_shared<SettingsConstraints>();
|
||||
return no_constraints;
|
||||
return enabled_settings ? enabled_settings->getConstraints() : nullptr;
|
||||
}
|
||||
|
||||
|
||||
std::shared_ptr<const AccessRights> ContextAccess::getAccessRights() const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
if (access)
|
||||
return access;
|
||||
static const auto nothing_granted = std::make_shared<AccessRights>();
|
||||
return nothing_granted;
|
||||
return access;
|
||||
}
|
||||
|
||||
|
||||
std::shared_ptr<const AccessRights> ContextAccess::getAccessRightsWithImplicit() const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
if (access_with_implicit)
|
||||
return access_with_implicit;
|
||||
static const auto nothing_granted = std::make_shared<AccessRights>();
|
||||
return nothing_granted;
|
||||
return access_with_implicit;
|
||||
}
|
||||
|
||||
|
||||
@ -575,7 +551,7 @@ bool ContextAccess::checkAdminOptionImplHelper(const Container & role_ids, const
|
||||
for (auto it = std::begin(role_ids); it != std::end(role_ids); ++it, ++i)
|
||||
{
|
||||
const UUID & role_id = *it;
|
||||
if (info->enabled_roles_with_admin_option.count(role_id))
|
||||
if (info && info->enabled_roles_with_admin_option.count(role_id))
|
||||
continue;
|
||||
|
||||
if (throw_if_denied)
|
||||
@ -584,7 +560,7 @@ bool ContextAccess::checkAdminOptionImplHelper(const Container & role_ids, const
|
||||
if (!role_name)
|
||||
role_name = "ID {" + toString(role_id) + "}";
|
||||
|
||||
if (info->enabled_roles.count(role_id))
|
||||
if (info && info->enabled_roles.count(role_id))
|
||||
show_error("Not enough privileges. "
|
||||
"Role " + backQuote(*role_name) + " is granted, but without ADMIN option. "
|
||||
"To execute this query it's necessary to have the role " + backQuoteIfNeed(*role_name) + " granted with ADMIN option.",
|
||||
|
@ -71,9 +71,11 @@ public:
|
||||
String getUserName() const;
|
||||
|
||||
/// Returns information about current and enabled roles.
|
||||
/// The function can return nullptr.
|
||||
std::shared_ptr<const EnabledRolesInfo> getRolesInfo() const;
|
||||
|
||||
/// Returns information about enabled row policies.
|
||||
/// The function can return nullptr.
|
||||
std::shared_ptr<const EnabledRowPolicies> getEnabledRowPolicies() const;
|
||||
|
||||
/// Returns the row policy filter for a specified table.
|
||||
@ -81,13 +83,16 @@ public:
|
||||
ASTPtr getRowPolicyCondition(const String & database, const String & table_name, RowPolicy::ConditionType index, const ASTPtr & extra_condition = nullptr) const;
|
||||
|
||||
/// Returns the quota to track resource consumption.
|
||||
/// The function returns nullptr if no tracking or limitation is needed.
|
||||
std::shared_ptr<const EnabledQuota> getQuota() const;
|
||||
std::optional<QuotaUsage> getQuotaUsage() const;
|
||||
|
||||
/// Returns the default settings, i.e. the settings to apply on user's login.
|
||||
/// The function returns nullptr if it's no need to apply settings.
|
||||
std::shared_ptr<const Settings> getDefaultSettings() const;
|
||||
|
||||
/// Returns the settings' constraints.
|
||||
/// The function returns nullptr if there are no constraints.
|
||||
std::shared_ptr<const SettingsConstraints> getSettingsConstraints() const;
|
||||
|
||||
/// Returns the current access rights.
|
||||
|
@ -12,11 +12,8 @@ size_t EnabledRowPolicies::Hash::operator()(const MixedConditionKey & key) const
|
||||
}
|
||||
|
||||
|
||||
EnabledRowPolicies::EnabledRowPolicies() : params()
|
||||
{
|
||||
}
|
||||
|
||||
EnabledRowPolicies::EnabledRowPolicies(const Params & params_) : params(params_)
|
||||
EnabledRowPolicies::EnabledRowPolicies(const Params & params_)
|
||||
: params(params_)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -32,7 +32,6 @@ public:
|
||||
friend bool operator >=(const Params & lhs, const Params & rhs) { return !(lhs < rhs); }
|
||||
};
|
||||
|
||||
EnabledRowPolicies();
|
||||
~EnabledRowPolicies();
|
||||
|
||||
using ConditionType = RowPolicy::ConditionType;
|
||||
|
@ -18,8 +18,6 @@ namespace ErrorCodes
|
||||
}
|
||||
|
||||
|
||||
SettingsConstraints::SettingsConstraints() = default;
|
||||
|
||||
SettingsConstraints::SettingsConstraints(const AccessControlManager & manager_) : manager(&manager_)
|
||||
{
|
||||
}
|
||||
@ -201,13 +199,10 @@ bool SettingsConstraints::checkImpl(const Settings & current_settings, SettingCh
|
||||
}
|
||||
};
|
||||
|
||||
if (manager)
|
||||
{
|
||||
if (reaction == THROW_ON_VIOLATION)
|
||||
manager->checkSettingNameIsAllowed(setting_name);
|
||||
else if (!manager->isSettingNameAllowed(setting_name))
|
||||
return false;
|
||||
}
|
||||
if (reaction == THROW_ON_VIOLATION)
|
||||
manager->checkSettingNameIsAllowed(setting_name);
|
||||
else if (!manager->isSettingNameAllowed(setting_name))
|
||||
return false;
|
||||
|
||||
Field current_value, new_value;
|
||||
if (current_settings.tryGet(setting_name, current_value))
|
||||
|
@ -51,7 +51,6 @@ class AccessControlManager;
|
||||
class SettingsConstraints
|
||||
{
|
||||
public:
|
||||
SettingsConstraints();
|
||||
SettingsConstraints(const AccessControlManager & manager_);
|
||||
SettingsConstraints(const SettingsConstraints & src);
|
||||
SettingsConstraints & operator =(const SettingsConstraints & src);
|
||||
|
@ -1,94 +0,0 @@
|
||||
#include <gtest/gtest.h>
|
||||
#include <Access/AccessRights.h>
|
||||
|
||||
using namespace DB;
|
||||
|
||||
|
||||
TEST(AccessRights, Union)
|
||||
{
|
||||
AccessRights lhs, rhs;
|
||||
lhs.grant(AccessType::CREATE_TABLE, "db1", "tb1");
|
||||
rhs.grant(AccessType::SELECT, "db2");
|
||||
lhs.makeUnion(rhs);
|
||||
ASSERT_EQ(lhs.toString(), "GRANT CREATE TABLE ON db1.tb1, GRANT SELECT ON db2.*");
|
||||
|
||||
lhs.clear();
|
||||
rhs.clear();
|
||||
rhs.grant(AccessType::SELECT, "db2");
|
||||
lhs.grant(AccessType::CREATE_TABLE, "db1", "tb1");
|
||||
lhs.makeUnion(rhs);
|
||||
ASSERT_EQ(lhs.toString(), "GRANT CREATE TABLE ON db1.tb1, GRANT SELECT ON db2.*");
|
||||
|
||||
lhs = {};
|
||||
rhs = {};
|
||||
lhs.grant(AccessType::SELECT);
|
||||
rhs.grant(AccessType::SELECT, "db1", "tb1");
|
||||
lhs.makeUnion(rhs);
|
||||
ASSERT_EQ(lhs.toString(), "GRANT SELECT ON *.*");
|
||||
|
||||
lhs = {};
|
||||
rhs = {};
|
||||
lhs.grant(AccessType::SELECT, "db1", "tb1", Strings{"col1", "col2"});
|
||||
rhs.grant(AccessType::SELECT, "db1", "tb1", Strings{"col2", "col3"});
|
||||
lhs.makeUnion(rhs);
|
||||
ASSERT_EQ(lhs.toString(), "GRANT SELECT(col1, col2, col3) ON db1.tb1");
|
||||
|
||||
lhs = {};
|
||||
rhs = {};
|
||||
lhs.grant(AccessType::SELECT, "db1", "tb1", Strings{"col1", "col2"});
|
||||
rhs.grantWithGrantOption(AccessType::SELECT, "db1", "tb1", Strings{"col2", "col3"});
|
||||
lhs.makeUnion(rhs);
|
||||
ASSERT_EQ(lhs.toString(), "GRANT SELECT(col1) ON db1.tb1, GRANT SELECT(col2, col3) ON db1.tb1 WITH GRANT OPTION");
|
||||
|
||||
lhs = {};
|
||||
rhs = {};
|
||||
lhs.grant(AccessType::INSERT);
|
||||
rhs.grant(AccessType::ALL, "db1");
|
||||
lhs.makeUnion(rhs);
|
||||
ASSERT_EQ(lhs.toString(), "GRANT INSERT ON *.*, GRANT SHOW, SELECT, ALTER, CREATE DATABASE, CREATE TABLE, CREATE VIEW, CREATE DICTIONARY, DROP, TRUNCATE, OPTIMIZE, SYSTEM MERGES, SYSTEM TTL MERGES, SYSTEM FETCHES, SYSTEM MOVES, SYSTEM SENDS, SYSTEM REPLICATION QUEUES, SYSTEM DROP REPLICA, SYSTEM SYNC REPLICA, SYSTEM RESTART REPLICA, SYSTEM RESTORE REPLICA, SYSTEM FLUSH DISTRIBUTED, dictGet ON db1.*");
|
||||
}
|
||||
|
||||
|
||||
TEST(AccessRights, Intersection)
|
||||
{
|
||||
AccessRights lhs, rhs;
|
||||
lhs.grant(AccessType::CREATE_TABLE, "db1", "tb1");
|
||||
rhs.grant(AccessType::SELECT, "db2");
|
||||
lhs.makeIntersection(rhs);
|
||||
ASSERT_EQ(lhs.toString(), "GRANT USAGE ON *.*");
|
||||
|
||||
lhs.clear();
|
||||
rhs.clear();
|
||||
lhs.grant(AccessType::SELECT, "db2");
|
||||
rhs.grant(AccessType::CREATE_TABLE, "db1", "tb1");
|
||||
lhs.makeIntersection(rhs);
|
||||
ASSERT_EQ(lhs.toString(), "GRANT USAGE ON *.*");
|
||||
|
||||
lhs = {};
|
||||
rhs = {};
|
||||
lhs.grant(AccessType::SELECT);
|
||||
rhs.grant(AccessType::SELECT, "db1", "tb1");
|
||||
lhs.makeIntersection(rhs);
|
||||
ASSERT_EQ(lhs.toString(), "GRANT SELECT ON db1.tb1");
|
||||
|
||||
lhs = {};
|
||||
rhs = {};
|
||||
lhs.grant(AccessType::SELECT, "db1", "tb1", Strings{"col1", "col2"});
|
||||
rhs.grant(AccessType::SELECT, "db1", "tb1", Strings{"col2", "col3"});
|
||||
lhs.makeIntersection(rhs);
|
||||
ASSERT_EQ(lhs.toString(), "GRANT SELECT(col2) ON db1.tb1");
|
||||
|
||||
lhs = {};
|
||||
rhs = {};
|
||||
lhs.grant(AccessType::SELECT, "db1", "tb1", Strings{"col1", "col2"});
|
||||
rhs.grantWithGrantOption(AccessType::SELECT, "db1", "tb1", Strings{"col2", "col3"});
|
||||
lhs.makeIntersection(rhs);
|
||||
ASSERT_EQ(lhs.toString(), "GRANT SELECT(col2) ON db1.tb1");
|
||||
|
||||
lhs = {};
|
||||
rhs = {};
|
||||
lhs.grant(AccessType::INSERT);
|
||||
rhs.grant(AccessType::ALL, "db1");
|
||||
lhs.makeIntersection(rhs);
|
||||
ASSERT_EQ(lhs.toString(), "GRANT INSERT ON db1.*");
|
||||
}
|
@ -101,24 +101,6 @@ struct AggregateFunctionSumData
|
||||
{
|
||||
const auto * end = ptr + count;
|
||||
|
||||
if constexpr (
|
||||
(is_integer_v<T> && !is_big_int_v<T>)
|
||||
|| (IsDecimalNumber<T> && !std::is_same_v<T, Decimal256> && !std::is_same_v<T, Decimal128>))
|
||||
{
|
||||
/// For integers we can vectorize the operation if we replace the null check using a multiplication (by 0 for null, 1 for not null)
|
||||
/// https://quick-bench.com/q/MLTnfTvwC2qZFVeWHfOBR3U7a8I
|
||||
T local_sum{};
|
||||
while (ptr < end)
|
||||
{
|
||||
T multiplier = !*null_map;
|
||||
Impl::add(local_sum, *ptr * multiplier);
|
||||
++ptr;
|
||||
++null_map;
|
||||
}
|
||||
Impl::add(sum, local_sum);
|
||||
return;
|
||||
}
|
||||
|
||||
if constexpr (std::is_floating_point_v<T>)
|
||||
{
|
||||
constexpr size_t unroll_count = 128 / sizeof(T);
|
||||
|
@ -459,8 +459,6 @@ public:
|
||||
explicit FieldVisitorMax(const Field & rhs_) : rhs(rhs_) {}
|
||||
|
||||
bool operator() (Null &) const { throw Exception("Cannot compare Nulls", ErrorCodes::LOGICAL_ERROR); }
|
||||
bool operator() (NegativeInfinity &) const { throw Exception("Cannot compare -Inf", ErrorCodes::LOGICAL_ERROR); }
|
||||
bool operator() (PositiveInfinity &) const { throw Exception("Cannot compare +Inf", ErrorCodes::LOGICAL_ERROR); }
|
||||
bool operator() (AggregateFunctionStateData &) const { throw Exception("Cannot compare AggregateFunctionStates", ErrorCodes::LOGICAL_ERROR); }
|
||||
|
||||
bool operator() (Array & x) const { return compareImpl<Array>(x); }
|
||||
@ -496,8 +494,6 @@ public:
|
||||
explicit FieldVisitorMin(const Field & rhs_) : rhs(rhs_) {}
|
||||
|
||||
bool operator() (Null &) const { throw Exception("Cannot compare Nulls", ErrorCodes::LOGICAL_ERROR); }
|
||||
bool operator() (NegativeInfinity &) const { throw Exception("Cannot compare -Inf", ErrorCodes::LOGICAL_ERROR); }
|
||||
bool operator() (PositiveInfinity &) const { throw Exception("Cannot compare +Inf", ErrorCodes::LOGICAL_ERROR); }
|
||||
bool operator() (AggregateFunctionStateData &) const { throw Exception("Cannot sum AggregateFunctionStates", ErrorCodes::LOGICAL_ERROR); }
|
||||
|
||||
bool operator() (Array & x) const { return compareImpl<Array>(x); }
|
||||
|
@ -76,10 +76,6 @@ add_headers_and_sources(clickhouse_common_io IO)
|
||||
add_headers_and_sources(clickhouse_common_io IO/S3)
|
||||
list (REMOVE_ITEM clickhouse_common_io_sources Common/malloc.cpp Common/new_delete.cpp)
|
||||
|
||||
if (USE_SQLITE)
|
||||
add_headers_and_sources(dbms Databases/SQLite)
|
||||
endif()
|
||||
|
||||
if(USE_RDKAFKA)
|
||||
add_headers_and_sources(dbms Storages/Kafka)
|
||||
endif()
|
||||
@ -419,11 +415,6 @@ if (USE_AWS_S3)
|
||||
target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${AWS_S3_INCLUDE_DIR})
|
||||
endif()
|
||||
|
||||
if (USE_S2_GEOMETRY)
|
||||
dbms_target_link_libraries (PUBLIC ${S2_GEOMETRY_LIBRARY})
|
||||
dbms_target_include_directories (SYSTEM BEFORE PUBLIC ${S2_GEOMETRY_INCLUDE_DIR})
|
||||
endif()
|
||||
|
||||
if (USE_BROTLI)
|
||||
target_link_libraries (clickhouse_common_io PRIVATE ${BROTLI_LIBRARY})
|
||||
target_include_directories (clickhouse_common_io SYSTEM BEFORE PRIVATE ${BROTLI_INCLUDE_DIR})
|
||||
@ -434,10 +425,6 @@ if (USE_AMQPCPP)
|
||||
dbms_target_include_directories (SYSTEM BEFORE PUBLIC ${AMQPCPP_INCLUDE_DIR})
|
||||
endif()
|
||||
|
||||
if (USE_SQLITE)
|
||||
dbms_target_link_libraries(PUBLIC sqlite)
|
||||
endif()
|
||||
|
||||
if (USE_CASSANDRA)
|
||||
dbms_target_link_libraries(PUBLIC ${CASSANDRA_LIBRARY})
|
||||
dbms_target_include_directories (SYSTEM BEFORE PUBLIC ${CASS_INCLUDE_DIR})
|
||||
|
@ -353,11 +353,6 @@ bool HedgedConnections::resumePacketReceiver(const HedgedConnections::ReplicaLoc
|
||||
if (offset_states[location.offset].active_connection_count == 0 && !offset_states[location.offset].next_replica_in_process)
|
||||
throw NetException("Receive timeout expired", ErrorCodes::SOCKET_TIMEOUT);
|
||||
}
|
||||
else if (std::holds_alternative<std::exception_ptr>(res))
|
||||
{
|
||||
finishProcessReplica(replica_state, true);
|
||||
std::rethrow_exception(std::move(std::get<std::exception_ptr>(res)));
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ public:
|
||||
}
|
||||
|
||||
/// Resume packet receiving.
|
||||
std::variant<int, Packet, Poco::Timespan, std::exception_ptr> resume()
|
||||
std::variant<int, Packet, Poco::Timespan> resume()
|
||||
{
|
||||
/// If there is no pending data, check receive timeout.
|
||||
if (!connection->hasReadPendingData() && !checkReceiveTimeout())
|
||||
@ -43,7 +43,7 @@ public:
|
||||
/// Resume fiber.
|
||||
fiber = std::move(fiber).resume();
|
||||
if (exception)
|
||||
return std::move(exception);
|
||||
std::rethrow_exception(std::move(exception));
|
||||
|
||||
if (is_read_in_process)
|
||||
return epoll.getFileDescriptor();
|
||||
|
@ -546,54 +546,97 @@ namespace
|
||||
{
|
||||
|
||||
/// The following function implements a slightly more general version
|
||||
/// of getExtremes() than the implementation from Not-Null IColumns.
|
||||
/// of getExtremes() than the implementation from ColumnVector.
|
||||
/// It takes into account the possible presence of nullable values.
|
||||
void getExtremesWithNulls(const IColumn & nested_column, const NullMap & null_array, Field & min, Field & max, bool null_last = false)
|
||||
template <typename T>
|
||||
void getExtremesFromNullableContent(const ColumnVector<T> & col, const NullMap & null_map, Field & min, Field & max)
|
||||
{
|
||||
size_t number_of_nulls = 0;
|
||||
size_t n = null_array.size();
|
||||
NullMap not_null_array(n);
|
||||
for (auto i = 0ul; i < n; ++i)
|
||||
const auto & data = col.getData();
|
||||
size_t size = data.size();
|
||||
|
||||
if (size == 0)
|
||||
{
|
||||
if (null_array[i])
|
||||
min = Null();
|
||||
max = Null();
|
||||
return;
|
||||
}
|
||||
|
||||
bool has_not_null = false;
|
||||
bool has_not_nan = false;
|
||||
|
||||
T cur_min = 0;
|
||||
T cur_max = 0;
|
||||
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
const T x = data[i];
|
||||
|
||||
if (null_map[i])
|
||||
continue;
|
||||
|
||||
if (!has_not_null)
|
||||
{
|
||||
++number_of_nulls;
|
||||
not_null_array[i] = 0;
|
||||
cur_min = x;
|
||||
cur_max = x;
|
||||
has_not_null = true;
|
||||
has_not_nan = !isNaN(x);
|
||||
continue;
|
||||
}
|
||||
else
|
||||
|
||||
if (isNaN(x))
|
||||
continue;
|
||||
|
||||
if (!has_not_nan)
|
||||
{
|
||||
not_null_array[i] = 1;
|
||||
cur_min = x;
|
||||
cur_max = x;
|
||||
has_not_nan = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (x < cur_min)
|
||||
cur_min = x;
|
||||
else if (x > cur_max)
|
||||
cur_max = x;
|
||||
}
|
||||
if (number_of_nulls == 0)
|
||||
|
||||
if (has_not_null)
|
||||
{
|
||||
nested_column.getExtremes(min, max);
|
||||
}
|
||||
else if (number_of_nulls == n)
|
||||
{
|
||||
min = PositiveInfinity();
|
||||
max = PositiveInfinity();
|
||||
}
|
||||
else
|
||||
{
|
||||
auto filtered_column = nested_column.filter(not_null_array, -1);
|
||||
filtered_column->getExtremes(min, max);
|
||||
if (null_last)
|
||||
max = PositiveInfinity();
|
||||
min = cur_min;
|
||||
max = cur_max;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
void ColumnNullable::getExtremes(Field & min, Field & max) const
|
||||
{
|
||||
getExtremesWithNulls(getNestedColumn(), getNullMapData(), min, max);
|
||||
}
|
||||
min = Null();
|
||||
max = Null();
|
||||
|
||||
const auto & null_map_data = getNullMapData();
|
||||
|
||||
void ColumnNullable::getExtremesNullLast(Field & min, Field & max) const
|
||||
{
|
||||
getExtremesWithNulls(getNestedColumn(), getNullMapData(), min, max, true);
|
||||
if (const auto * col_i8 = typeid_cast<const ColumnInt8 *>(nested_column.get()))
|
||||
getExtremesFromNullableContent<Int8>(*col_i8, null_map_data, min, max);
|
||||
else if (const auto * col_i16 = typeid_cast<const ColumnInt16 *>(nested_column.get()))
|
||||
getExtremesFromNullableContent<Int16>(*col_i16, null_map_data, min, max);
|
||||
else if (const auto * col_i32 = typeid_cast<const ColumnInt32 *>(nested_column.get()))
|
||||
getExtremesFromNullableContent<Int32>(*col_i32, null_map_data, min, max);
|
||||
else if (const auto * col_i64 = typeid_cast<const ColumnInt64 *>(nested_column.get()))
|
||||
getExtremesFromNullableContent<Int64>(*col_i64, null_map_data, min, max);
|
||||
else if (const auto * col_u8 = typeid_cast<const ColumnUInt8 *>(nested_column.get()))
|
||||
getExtremesFromNullableContent<UInt8>(*col_u8, null_map_data, min, max);
|
||||
else if (const auto * col_u16 = typeid_cast<const ColumnUInt16 *>(nested_column.get()))
|
||||
getExtremesFromNullableContent<UInt16>(*col_u16, null_map_data, min, max);
|
||||
else if (const auto * col_u32 = typeid_cast<const ColumnUInt32 *>(nested_column.get()))
|
||||
getExtremesFromNullableContent<UInt32>(*col_u32, null_map_data, min, max);
|
||||
else if (const auto * col_u64 = typeid_cast<const ColumnUInt64 *>(nested_column.get()))
|
||||
getExtremesFromNullableContent<UInt64>(*col_u64, null_map_data, min, max);
|
||||
else if (const auto * col_f32 = typeid_cast<const ColumnFloat32 *>(nested_column.get()))
|
||||
getExtremesFromNullableContent<Float32>(*col_f32, null_map_data, min, max);
|
||||
else if (const auto * col_f64 = typeid_cast<const ColumnFloat64 *>(nested_column.get()))
|
||||
getExtremesFromNullableContent<Float64>(*col_f64, null_map_data, min, max);
|
||||
}
|
||||
|
||||
|
||||
|
@ -111,8 +111,6 @@ public:
|
||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||
void updateHashFast(SipHash & hash) const override;
|
||||
void getExtremes(Field & min, Field & max) const override;
|
||||
// Special function for nullable minmax index
|
||||
void getExtremesNullLast(Field & min, Field & max) const;
|
||||
|
||||
MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override
|
||||
{
|
||||
|
@ -109,23 +109,11 @@ static DNSResolver::IPAddresses resolveIPAddressImpl(const std::string & host)
|
||||
/// It should not affect client address checking, since client cannot connect from IPv6 address
|
||||
/// if server has no IPv6 addresses.
|
||||
flags |= Poco::Net::DNS::DNS_HINT_AI_ADDRCONFIG;
|
||||
|
||||
DNSResolver::IPAddresses addresses;
|
||||
|
||||
try
|
||||
{
|
||||
#if defined(ARCADIA_BUILD)
|
||||
addresses = Poco::Net::DNS::hostByName(host, &Poco::Net::DNS::DEFAULT_DNS_TIMEOUT, flags).addresses();
|
||||
auto addresses = Poco::Net::DNS::hostByName(host, &Poco::Net::DNS::DEFAULT_DNS_TIMEOUT, flags).addresses();
|
||||
#else
|
||||
addresses = Poco::Net::DNS::hostByName(host, flags).addresses();
|
||||
auto addresses = Poco::Net::DNS::hostByName(host, flags).addresses();
|
||||
#endif
|
||||
}
|
||||
catch (const Poco::Net::DNSException & e)
|
||||
{
|
||||
LOG_ERROR(&Poco::Logger::get("DNSResolver"), "Cannot resolve host ({}), error {}: {}.", host, e.code(), e.message());
|
||||
addresses.clear();
|
||||
}
|
||||
|
||||
if (addresses.empty())
|
||||
throw Exception("Not found address of host: " + host, ErrorCodes::DNS_ERROR);
|
||||
|
||||
|
@ -558,9 +558,6 @@
|
||||
M(588, DISTRIBUTED_BROKEN_BATCH_INFO) \
|
||||
M(589, DISTRIBUTED_BROKEN_BATCH_FILES) \
|
||||
M(590, CANNOT_SYSCONF) \
|
||||
M(591, SQLITE_ENGINE_ERROR) \
|
||||
M(592, DATA_ENCRYPTION_ERROR) \
|
||||
M(593, ZERO_COPY_REPLICATION_ERROR) \
|
||||
\
|
||||
M(998, POSTGRESQL_CONNECTION_FAILURE) \
|
||||
M(999, KEEPER_EXCEPTION) \
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user