Revert "Merge pull request #63279 from allmazz/mongodb_refactoring"

This reverts commit 14cb02a18c, reversing
changes made to a940a9ebbc.
This commit is contained in:
Alexey Milovidov 2024-09-22 22:28:24 +02:00
parent ea71d7f6d1
commit 2ae9c9ac16
103 changed files with 1552 additions and 6149 deletions

6
.gitmodules vendored
View File

@ -363,12 +363,6 @@
[submodule "contrib/double-conversion"]
path = contrib/double-conversion
url = https://github.com/ClickHouse/double-conversion.git
[submodule "contrib/mongo-cxx-driver"]
path = contrib/mongo-cxx-driver
url = https://github.com/ClickHouse/mongo-cxx-driver.git
[submodule "contrib/mongo-c-driver"]
path = contrib/mongo-c-driver
url = https://github.com/ClickHouse/mongo-c-driver.git
[submodule "contrib/numactl"]
path = contrib/numactl
url = https://github.com/ClickHouse/numactl.git

View File

@ -3,11 +3,7 @@ add_subdirectory (Data)
add_subdirectory (Data/ODBC)
add_subdirectory (Foundation)
add_subdirectory (JSON)
if (USE_MONGODB)
add_subdirectory(MongoDB)
endif()
add_subdirectory (MongoDB)
add_subdirectory (Net)
add_subdirectory (NetSSL_OpenSSL)
add_subdirectory (Redis)

View File

@ -18,4 +18,4 @@ set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (USE_MUSL 1)
add_definitions(-DUSE_MUSL=1 -D__MUSL__=1)
add_definitions(-DUSE_MUSL=1)

View File

@ -160,12 +160,6 @@ add_contrib (datasketches-cpp-cmake datasketches-cpp)
add_contrib (incbin-cmake incbin)
add_contrib (sqids-cpp-cmake sqids-cpp)
option(USE_MONGODB "Enable MongoDB support" ${ENABLE_LIBRARIES})
if (USE_MONGODB)
add_contrib (mongo-c-driver-cmake mongo-c-driver) # requires: zlib
add_contrib (mongo-cxx-driver-cmake mongo-cxx-driver) # requires: libmongoc, libbson
endif()
option(ENABLE_NLP "Enable NLP functions support" ${ENABLE_LIBRARIES})
if (ENABLE_NLP)
add_contrib (libstemmer-c-cmake libstemmer_c)

@ -1 +0,0 @@
Subproject commit d55410c69183c90d18fd3b3f1d9db3d224fc8d52

View File

@ -1,152 +0,0 @@
option(USE_MONGODB "Enable MongoDB support" ${ENABLE_LIBRARIES})
if(NOT USE_MONGODB)
message(STATUS "Not using libmongoc and libbson")
return()
endif()
set(libbson_VERSION_MAJOR 1)
set(libbson_VERSION_MINOR 27)
set(libbson_VERSION_PATCH 0)
set(libbson_VERSION 1.27.0)
set(libmongoc_VERSION_MAJOR 1)
set(libmongoc_VERSION_MINOR 27)
set(libmongoc_VERSION_PATCH 0)
set(libmongoc_VERSION 1.27.0)
set(LIBBSON_SOURCES_ROOT "${ClickHouse_SOURCE_DIR}/contrib/mongo-c-driver/src")
set(LIBBSON_SOURCE_DIR "${LIBBSON_SOURCES_ROOT}/libbson/src")
file(GLOB_RECURSE LIBBSON_SOURCES "${LIBBSON_SOURCE_DIR}/*.c")
include(TestBigEndian)
test_big_endian(BSON_BIG_ENDIAN)
if(BSON_BIG_ENDIAN)
set(BSON_BYTE_ORDER 4321)
else()
set(BSON_BYTE_ORDER 1234)
endif()
set(BSON_OS 1)
set(BSON_EXTRA_ALIGN 1)
set(BSON_HAVE_SNPRINTF 1)
set(BSON_HAVE_TIMESPEC 1)
set(BSON_HAVE_GMTIME_R 1)
set(BSON_HAVE_RAND_R 1)
set(BSON_HAVE_STRINGS_H 1)
set(BSON_HAVE_STRLCPY 0)
set(BSON_HAVE_STRNLEN 1)
set(BSON_HAVE_STDBOOL_H 1)
set(BSON_HAVE_CLOCK_GETTIME 1)
# common settings
set(MONGOC_TRACE 0)
set(MONGOC_ENABLE_STATIC_BUILD 1)
set(MONGOC_ENABLE_DEBUG_ASSERTIONS 0)
set(MONGOC_ENABLE_MONGODB_AWS_AUTH 0)
set(MONGOC_ENABLE_SASL_CYRUS 0)
set(MONGOC_ENABLE_SASL 0)
set(MONGOC_ENABLE_SASL_SSPI 0)
set(MONGOC_HAVE_SASL_CLIENT_DONE 0)
set(MONGOC_ENABLE_SRV 0)
# DNS
set(MONGOC_HAVE_DNSAPI 0)
set(MONGOC_HAVE_RES_SEARCH 0)
set(MONGOC_HAVE_RES_NSEARCH 0)
set(MONGOC_HAVE_RES_NCLOSE 0)
set(MONGOC_HAVE_RES_NDESTROY 0)
set(MONGOC_ENABLE_COMPRESSION 1)
set(MONGOC_ENABLE_COMPRESSION_ZLIB 0)
set(MONGOC_ENABLE_COMPRESSION_SNAPPY 0)
set(MONGOC_ENABLE_COMPRESSION_ZSTD 1)
# SSL
set(MONGOC_ENABLE_CRYPTO 0)
set(MONGOC_ENABLE_CRYPTO_CNG 0)
set(MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO 0)
set(MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE 0)
set(MONGOC_ENABLE_SSL 0)
set(MONGOC_ENABLE_SSL_OPENSSL 0)
set(MONGOC_ENABLE_SSL_SECURE_CHANNEL 0)
set(MONGOC_ENABLE_SSL_SECURE_TRANSPORT 0)
set(MONGOC_ENABLE_SSL_LIBRESSL 0)
set(MONGOC_ENABLE_CRYPTO_LIBCRYPTO 0)
set(MONGOC_ENABLE_CLIENT_SIDE_ENCRYPTION 0)
set(MONGOC_HAVE_ASN1_STRING_GET0_DATA 0)
if(ENABLE_SSL)
set(MONGOC_ENABLE_SSL 1)
set(MONGOC_ENABLE_CRYPTO 1)
set(MONGOC_ENABLE_SSL_OPENSSL 1)
set(MONGOC_ENABLE_CRYPTO_LIBCRYPTO 1)
set(MONGOC_HAVE_ASN1_STRING_GET0_DATA 1)
else()
message(WARNING "Building mongoc without SSL")
endif()
set(CMAKE_EXTRA_INCLUDE_FILES "sys/socket.h")
set(MONGOC_SOCKET_ARG2 "struct sockaddr")
set(MONGOC_HAVE_SOCKLEN 1)
set(MONGOC_SOCKET_ARG3 "socklen_t")
set(MONGOC_ENABLE_RDTSCP 0)
set(MONGOC_NO_AUTOMATIC_GLOBALS 1)
set(MONGOC_ENABLE_STATIC_INSTALL 0)
set(MONGOC_ENABLE_SHM_COUNTERS 0)
set(MONGOC_HAVE_SCHED_GETCPU 0)
set(MONGOC_HAVE_SS_FAMILY 0)
configure_file(
${LIBBSON_SOURCE_DIR}/bson/bson-config.h.in
${LIBBSON_SOURCE_DIR}/bson/bson-config.h
)
configure_file(
${LIBBSON_SOURCE_DIR}/bson/bson-version.h.in
${LIBBSON_SOURCE_DIR}/bson/bson-version.h
)
configure_file(
${LIBBSON_SOURCE_DIR}/bson/bson-version.h.in
${LIBBSON_SOURCE_DIR}/bson/bson-version.h
)
set(COMMON_SOURCE_DIR "${LIBBSON_SOURCES_ROOT}/common")
file(GLOB_RECURSE COMMON_SOURCES "${COMMON_SOURCE_DIR}/*.c")
configure_file(
${COMMON_SOURCE_DIR}/common-config.h.in
${COMMON_SOURCE_DIR}/common-config.h
)
add_library(_libbson ${LIBBSON_SOURCES} ${COMMON_SOURCES})
add_library(ch_contrib::libbson ALIAS _libbson)
target_include_directories(_libbson SYSTEM PUBLIC ${LIBBSON_SOURCE_DIR} ${COMMON_SOURCE_DIR})
target_compile_definitions(_libbson PRIVATE BSON_COMPILATION)
if(OS_LINUX)
target_compile_definitions(_libbson PRIVATE -D_GNU_SOURCE -D_POSIX_C_SOURCE=199309L -D_XOPEN_SOURCE=600)
elseif(OS_DARWIN)
target_compile_definitions(_libbson PRIVATE -D_DARWIN_C_SOURCE)
endif()
set(LIBMONGOC_SOURCE_DIR "${LIBBSON_SOURCES_ROOT}/libmongoc/src")
file(GLOB_RECURSE LIBMONGOC_SOURCES "${LIBMONGOC_SOURCE_DIR}/*.c")
set(UTF8PROC_SOURCE_DIR "${LIBBSON_SOURCES_ROOT}/utf8proc-2.8.0")
set(UTF8PROC_SOURCES "${UTF8PROC_SOURCE_DIR}/utf8proc.c")
set(UTHASH_SOURCE_DIR "${LIBBSON_SOURCES_ROOT}/uthash")
configure_file(
${LIBMONGOC_SOURCE_DIR}/mongoc/mongoc-config.h.in
${LIBMONGOC_SOURCE_DIR}/mongoc/mongoc-config.h
)
configure_file(
${LIBMONGOC_SOURCE_DIR}/mongoc/mongoc-version.h.in
${LIBMONGOC_SOURCE_DIR}/mongoc/mongoc-version.h
)
add_library(_libmongoc ${LIBMONGOC_SOURCES} ${COMMON_SOURCES} ${UTF8PROC_SOURCES})
add_library(ch_contrib::libmongoc ALIAS _libmongoc)
target_include_directories(_libmongoc SYSTEM PUBLIC ${LIBMONGOC_SOURCE_DIR} ${COMMON_SOURCE_DIR} ${UTF8PROC_SOURCE_DIR} ${UTHASH_SOURCE_DIR})
target_include_directories(_libmongoc SYSTEM PRIVATE ${LIBMONGOC_SOURCE_DIR}/mongoc ${UTHASH_SOURCE_DIR})
target_compile_definitions(_libmongoc PRIVATE MONGOC_COMPILATION)
target_link_libraries(_libmongoc ch_contrib::libbson ch_contrib::c-ares ch_contrib::zstd)
if(ENABLE_SSL)
target_link_libraries(_libmongoc OpenSSL::SSL)
endif()

@ -1 +0,0 @@
Subproject commit 3166bdb49b717ce1bc30f46cc2b274ab1de7005b

View File

@ -1,189 +0,0 @@
option(USE_MONGODB "Enable MongoDB support" ${ENABLE_LIBRARIES})
if(NOT USE_MONGODB)
message(STATUS "Not using mongocxx and bsoncxx")
return()
endif()
set(BSONCXX_SOURCES_DIR "${ClickHouse_SOURCE_DIR}/contrib/mongo-cxx-driver/src/bsoncxx")
set(BSONCXX_SOURCES
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/array/element.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/array/value.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/array/view.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/builder/core.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/decimal128.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/document/element.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/document/value.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/document/view.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/exception/error_code.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/json.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/oid.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/private/itoa.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/string/view_or_value.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/types.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/types/bson_value/value.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/types/bson_value/view.cpp
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/validate.cpp
)
set(BSONCXX_POLY_USE_IMPLS ON)
configure_file(
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/config/config.hpp.in
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/config/config.hpp
)
configure_file(
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/config/version.hpp.in
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/config/version.hpp
)
configure_file(
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/config/private/config.hh.in
${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/config/private/config.hh
)
add_library(_bsoncxx ${BSONCXX_SOURCES})
add_library(ch_contrib::bsoncxx ALIAS _bsoncxx)
target_include_directories(_bsoncxx SYSTEM PUBLIC "${BSONCXX_SOURCES_DIR}/include/bsoncxx/v_noabi" ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi)
target_compile_definitions(_bsoncxx PUBLIC BSONCXX_STATIC)
target_link_libraries(_bsoncxx ch_contrib::libbson)
include(GenerateExportHeader)
generate_export_header(_bsoncxx
BASE_NAME BSONCXX
EXPORT_MACRO_NAME BSONCXX_API
NO_EXPORT_MACRO_NAME BSONCXX_PRIVATE
EXPORT_FILE_NAME ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/config/export.hpp
STATIC_DEFINE BSONCXX_STATIC
)
set(MONGOCXX_SOURCES_DIR "${ClickHouse_SOURCE_DIR}/contrib/mongo-cxx-driver/src/mongocxx")
set(MONGOCXX_SOURCES
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/bulk_write.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/change_stream.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/client.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/client_encryption.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/client_session.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/collection.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/cursor.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/database.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/command_failed_event.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/command_started_event.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/command_succeeded_event.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/heartbeat_failed_event.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/heartbeat_started_event.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/heartbeat_succeeded_event.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/server_changed_event.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/server_closed_event.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/server_description.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/server_opening_event.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/topology_changed_event.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/topology_closed_event.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/topology_description.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/topology_opening_event.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/exception/error_code.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/exception/operation_exception.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/exception/server_error_code.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/gridfs/bucket.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/gridfs/downloader.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/gridfs/uploader.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/hint.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/index_model.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/index_view.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/instance.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/logger.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/model/delete_many.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/model/delete_one.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/model/insert_one.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/model/replace_one.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/model/update_many.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/model/update_one.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/model/write.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/aggregate.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/apm.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/auto_encryption.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/bulk_write.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/change_stream.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/client.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/client_encryption.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/client_session.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/count.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/create_collection.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/data_key.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/delete.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/distinct.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/encrypt.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/estimated_document_count.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/find.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/find_one_and_delete.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/find_one_and_replace.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/find_one_and_update.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/gridfs/bucket.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/gridfs/upload.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/index.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/index_view.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/insert.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/pool.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/range.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/replace.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/rewrap_many_datakey.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/server_api.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/tls.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/transaction.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/update.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/pipeline.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/pool.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/private/conversions.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/private/libbson.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/private/libmongoc.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/private/numeric_casting.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/read_concern.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/read_preference.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/result/bulk_write.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/result/delete.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/result/gridfs/upload.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/result/insert_many.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/result/insert_one.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/result/replace_one.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/result/rewrap_many_datakey.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/result/update.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/search_index_model.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/search_index_view.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/uri.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/validation_criteria.cpp
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/write_concern.cpp
)
set(MONGOCXX_COMPILER_VERSION "${CMAKE_CXX_COMPILER_VERSION}")
set(MONGOCXX_COMPILER_ID "${CMAKE_CXX_COMPILER_ID}")
set(MONGOCXX_LINK_WITH_STATIC_MONGOC 1)
set(MONGOCXX_BUILD_STATIC 1)
if(ENABLE_SSL)
set(MONGOCXX_ENABLE_SSL 1)
endif()
configure_file(
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/config/config.hpp.in
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/config/config.hpp
)
configure_file(
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/config/version.hpp.in
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/config/version.hpp
)
configure_file(
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/config/private/config.hh.in
${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/config/private/config.hh
)
add_library(_mongocxx ${MONGOCXX_SOURCES})
add_library(ch_contrib::mongocxx ALIAS _mongocxx)
target_include_directories(_mongocxx SYSTEM PUBLIC "${MONGOCXX_SOURCES_DIR}/include/mongocxx/v_noabi" ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi)
target_compile_definitions(_mongocxx PUBLIC MONGOCXX_STATIC)
target_link_libraries(_mongocxx ch_contrib::bsoncxx ch_contrib::libmongoc)
generate_export_header(_mongocxx
BASE_NAME MONGOCXX
EXPORT_MACRO_NAME MONGOCXX_API
NO_EXPORT_MACRO_NAME MONGOCXX_PRIVATE
EXPORT_FILE_NAME ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/config/export.hpp
STATIC_DEFINE MONGOCXX_STATIC
)

2
contrib/sysroot vendored

@ -1 +1 @@
Subproject commit 738138e665809a5b28c453983c5f48f23a340ed6
Subproject commit 5be834147d5b5dd77ca2b821f356982029320513

View File

@ -6,15 +6,7 @@ sidebar_label: MongoDB
# MongoDB
MongoDB engine is read-only table engine which allows to read data from remote [MongoDB](https://www.mongodb.com/) collection.
Only MongoDB v3.6+ servers are supported.
[Seed list(`mongodb**+srv**`)](https://www.mongodb.com/docs/manual/reference/glossary/#std-term-seed-list) is not yet supported.
:::note
If you're facing troubles, please report the issue, and try to use [the legacy implementation](../../../operations/server-configuration-parameters/settings.md#use_legacy_mongodb_integration).
Keep in mind that it is deprecated, and will be removed in next releases.
:::
MongoDB engine is read-only table engine which allows to read data (`SELECT` queries) from remote MongoDB collection. Engine supports only non-nested data types. `INSERT` queries are not supported.
## Creating a Table {#creating-a-table}
@ -48,145 +40,49 @@ If you are using the MongoDB Atlas cloud offering:
- connection url can be obtained from 'Atlas SQL' option
- use options: 'connectTimeoutMS=10000&ssl=true&authSource=admin'
```
:::
Also, you can simply pass a URI:
``` sql
ENGINE = MongoDB(uri, collection);
```
**Engine Parameters**
- `uri` — MongoDB server's connection URI
- `collection` — Remote collection name.
## Types mappings
| MongoDB | ClickHouse |
|--------------------|-----------------------------------------------------------------------|
| bool, int32, int64 | *any numeric type*, String |
| double | Float64, String |
| date | Date, Date32, DateTime, DateTime64, String |
| string | String, UUID |
| document | String(as JSON) |
| array | Array, String(as JSON) |
| oid | String |
| binary | String if in column, base64 encoded string if in an array or document |
| *any other* | String |
If key is not found in MongoDB document (for example, column name doesn't match), default value or `NULL` (if the column is nullable) will be inserted.
## Supported clauses
Only queries with simple expressions are supported (for example, `WHERE field = <constant> ORDER BY field2 LIMIT <constant>`).
Such expressions are translated to MongoDB query language and executed on the server side.
You can disable all these restriction, using [mongodb_throw_on_unsupported_query](../../../operations/settings/settings.md#mongodb_throw_on_unsupported_query).
In that case ClickHouse tries to convert query on best effort basis, but it can lead to full table scan and processing on ClickHouse side.
:::note
It's always better to explicitly set type of literal because Mongo requires strict typed filters.\
For example you want to filter by `Date`:
```sql
SELECT * FROM mongo_table WHERE date = '2024-01-01'
```
This will not work because Mongo will not cast string to `Date`, so you need to cast it manually:
```sql
SELECT * FROM mongo_table WHERE date = '2024-01-01'::Date OR date = toDate('2024-01-01')
```
This applied for `Date`, `Date32`, `DateTime`, `Bool`, `UUID`.
:::
## Usage Example {#usage-example}
Assuming MongoDB has [sample_mflix](https://www.mongodb.com/docs/atlas/sample-data/sample-mflix) dataset loaded
Create a table in ClickHouse which allows to read data from MongoDB collection:
``` sql
CREATE TABLE sample_mflix_table
CREATE TABLE mongo_table
(
_id String,
title String,
plot String,
genres Array(String),
directors Array(String),
writers Array(String),
released Date,
imdb String,
year String,
) ENGINE = MongoDB('mongodb+srv://<USERNAME>:<PASSWORD>@cluster0.cdojylq.mongodb.net/sample_mflix', 'movies');
key UInt64,
data String
) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table', 'testuser', 'clickhouse');
```
To read from an SSL secured MongoDB server:
``` sql
CREATE TABLE mongo_table_ssl
(
key UInt64,
data String
) ENGINE = MongoDB('mongo2:27017', 'test', 'simple_table', 'testuser', 'clickhouse', 'ssl=true');
```
Query:
``` sql
SELECT count() FROM sample_mflix_table
SELECT COUNT() FROM mongo_table;
```
``` text
┌─count()─┐
1. │ 21349
└─────────┘
┌─count()─┐
│ 4 │
└─────────┘
```
```SQL
-- JSONExtractString cannot be pushed down to MongoDB
SET mongodb_throw_on_unsupported_query = 0;
You can also adjust connection timeout:
-- Find all 'Back to the Future' sequels with rating > 7.5
SELECT title, plot, genres, directors, released FROM sample_mflix_table
WHERE title IN ('Back to the Future', 'Back to the Future Part II', 'Back to the Future Part III')
AND toFloat32(JSONExtractString(imdb, 'rating')) > 7.5
ORDER BY year
FORMAT Vertical;
``` sql
CREATE TABLE mongo_table
(
key UInt64,
data String
) ENGINE = MongoDB('mongo2:27017', 'test', 'simple_table', 'testuser', 'clickhouse', 'connectTimeoutMS=100000');
```
```text
Row 1:
──────
title: Back to the Future
plot: A young man is accidentally sent 30 years into the past in a time-traveling DeLorean invented by his friend, Dr. Emmett Brown, and must make sure his high-school-age parents unite in order to save his own existence.
genres: ['Adventure','Comedy','Sci-Fi']
directors: ['Robert Zemeckis']
released: 1985-07-03
Row 2:
──────
title: Back to the Future Part II
plot: After visiting 2015, Marty McFly must repeat his visit to 1955 to prevent disastrous changes to 1985... without interfering with his first trip.
genres: ['Action','Adventure','Comedy']
directors: ['Robert Zemeckis']
released: 1989-11-22
```
```SQL
-- Find top 3 movies based on Cormac McCarthy's books
SELECT title, toFloat32(JSONExtractString(imdb, 'rating')) as rating
FROM sample_mflix_table
WHERE arrayExists(x -> x like 'Cormac McCarthy%', writers)
ORDER BY rating DESC
LIMIT 3;
```
```text
┌─title──────────────────┬─rating─┐
1. │ No Country for Old Men │ 8.1 │
2. │ The Sunset Limited │ 7.4 │
3. │ The Road │ 7.3 │
└────────────────────────┴────────┘
```
## Troubleshooting
You can see the generated MongoDB query in DEBUG level logs.
Implementation details can be found in [mongocxx](https://github.com/mongodb/mongo-cxx-driver) and [mongoc](https://github.com/mongodb/mongo-c-driver) documentations.

View File

@ -3162,11 +3162,3 @@ Type: UInt64
Default value: 100
Zero means unlimited
## use_legacy_mongodb_integration
Use the legacy MongoDB integration implementation. Deprecated.
Type: Bool
Default value: `true`.

View File

@ -5682,11 +5682,3 @@ Default value: `0`.
Enable `IF NOT EXISTS` for `CREATE` statement by default. If either this setting or `IF NOT EXISTS` is specified and a table with the provided name already exists, no exception will be thrown.
Default value: `false`.
## mongodb_throw_on_unsupported_query
If enabled, MongoDB tables will return an error when a MongoDB query can't be built.
Not applied for the legacy implementation, or when 'allow_experimental_analyzer=0`.
Default value: `true`.

View File

@ -1680,7 +1680,7 @@ Setting fields:
The `table` or `where` fields cannot be used together with the `query` field. And either one of the `table` or `query` fields must be declared.
:::
#### MongoDB
#### Mongodb
Example of settings:
@ -1700,17 +1700,6 @@ Example of settings:
or
``` xml
<source>
<mongodb>
<uri>mongodb://localhost:27017/test?ssl=true</uri>
<collection>dictionary_source</collection>
</mongodb>
</source>
```
or
``` sql
SOURCE(MONGODB(
host 'localhost'
@ -1733,22 +1722,6 @@ Setting fields:
- `collection` Name of the collection.
- `options` - MongoDB connection string options (optional parameter).
or
``` sql
SOURCE(MONGODB(
uri 'mongodb://localhost:27017/clickhouse'
collection 'dictionary_source'
))
```
Setting fields:
- `uri` - URI for establish the connection.
- `collection` Name of the collection.
[More information about the engine](../../engines/table-engines/integrations/mongodb.md)
#### Redis
@ -2065,7 +2038,7 @@ Configuration fields:
| `expression` | [Expression](../../sql-reference/syntax.md#expressions) that ClickHouse executes on the value.<br/>The expression can be a column name in the remote SQL database. Thus, you can use it to create an alias for the remote column.<br/><br/>Default value: no expression. | No |
| <a name="hierarchical-dict-attr"></a> `hierarchical` | If `true`, the attribute contains the value of a parent key for the current key. See [Hierarchical Dictionaries](#hierarchical-dictionaries).<br/><br/>Default value: `false`. | No |
| `injective` | Flag that shows whether the `id -> attribute` image is [injective](https://en.wikipedia.org/wiki/Injective_function).<br/>If `true`, ClickHouse can automatically place after the `GROUP BY` clause the requests to dictionaries with injection. Usually it significantly reduces the amount of such requests.<br/><br/>Default value: `false`. | No |
| `is_object_id` | Flag that shows whether the query is executed for a MongoDB document by `ObjectID`.<br/><br/>Default value: `false`.
| `is_object_id` | Flag that shows whether the query is executed for a MongoDB document by `ObjectID`.<br/><br/>Default value: `false`.
## Hierarchical Dictionaries

View File

@ -39,18 +39,6 @@ If you are using the MongoDB Atlas cloud offering please add these options:
:::
Also, you can connect by URI:
``` sql
mongodb(uri, collection, structure)
```
**Arguments**
- `uri` — Connection string.
- `collection` — Remote collection name.
- `structure` — The schema for the ClickHouse table returned from this function.
**Returned Value**
A table object with the same columns as the original MongoDB table.
@ -88,16 +76,6 @@ SELECT * FROM mongodb(
)
```
or:
```sql
SELECT * FROM mongodb(
'mongodb://test_user:password@127.0.0.1:27017/test?connectionTimeoutMS=10000',
'my_collection',
'log_type String, host String, command String'
)
```
**See Also**
- [The `MongoDB` table engine](/docs/en/engines/table-engines/integrations/mongodb.md)

View File

@ -188,9 +188,9 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
registerInterpreters();
registerFunctions();
registerAggregateFunctions();
registerTableFunctions(false);
registerTableFunctions();
registerDatabases();
registerStorages(false);
registerStorages();
registerFormats();
std::unordered_set<std::string> additional_names;

View File

@ -507,10 +507,10 @@ try
/// Don't initialize DateLUT
registerFunctions();
registerAggregateFunctions();
registerTableFunctions(server_settings.use_legacy_mongodb_integration);
registerTableFunctions();
registerDatabases();
registerStorages(server_settings.use_legacy_mongodb_integration);
registerDictionaries(server_settings.use_legacy_mongodb_integration);
registerStorages();
registerDictionaries();
registerDisks(/* global_skip_access_check= */ true);
registerFormats();

View File

@ -784,10 +784,10 @@ try
registerInterpreters();
registerFunctions();
registerAggregateFunctions();
registerTableFunctions(server_settings.use_legacy_mongodb_integration);
registerTableFunctions();
registerDatabases();
registerStorages(server_settings.use_legacy_mongodb_integration);
registerDictionaries(server_settings.use_legacy_mongodb_integration);
registerStorages();
registerDictionaries();
registerDisks(/* global_skip_access_check= */ false);
registerFormats();
registerRemoteFileMetadatas();

View File

@ -412,23 +412,10 @@ dbms_target_link_libraries (
PUBLIC
boost::system
clickhouse_common_io
Poco::MongoDB
Poco::Redis
)
if (USE_MONGODB)
dbms_target_link_libraries (PUBLIC Poco::MongoDB)
endif()
if (TARGET ch_contrib::mongocxx)
dbms_target_link_libraries(
PUBLIC
ch_contrib::libbson
ch_contrib::libmongoc
ch_contrib::bsoncxx
ch_contrib::mongocxx
)
endif ()
if (TARGET ch::mysqlxx)
dbms_target_link_libraries (PUBLIC ch::mysqlxx)
endif()

View File

@ -1,377 +0,0 @@
#pragma once
#include "config.h"
#if USE_MONGODB
#include <Common/Base64.h>
#include <DataTypes/FieldToDataType.h>
namespace DB
{
namespace ErrorCodes
{
extern const int TYPE_MISMATCH;
extern const int NOT_IMPLEMENTED;
}
namespace BSONCXXHelper
{
using bsoncxx::builder::basic::array;
using bsoncxx::builder::basic::document;
using bsoncxx::builder::basic::kvp;
using bsoncxx::builder::basic::make_document;
static bsoncxx::types::bson_value::value fieldAsBSONValue(const Field & field, const DataTypePtr & type)
{
switch (type->getTypeId())
{
case TypeIndex::String:
return bsoncxx::types::b_string{field.safeGet<String>()};
case TypeIndex::UInt8: {
if (isBool(type))
return bsoncxx::types::b_bool{field.safeGet<UInt8>() != 0};
return bsoncxx::types::b_int32{static_cast<Int32>(field.safeGet<UInt8 &>())};
}
case TypeIndex::UInt16:
return bsoncxx::types::b_int32{static_cast<Int32>(field.safeGet<UInt16 &>())};
case TypeIndex::UInt32:
return bsoncxx::types::b_int64{static_cast<Int64>(field.safeGet<UInt32 &>())};
case TypeIndex::UInt64:
return bsoncxx::types::b_double{static_cast<Float64>(field.safeGet<UInt64 &>())};
case TypeIndex::Int8:
return bsoncxx::types::b_int32{static_cast<Int32>(field.safeGet<Int8 &>())};
case TypeIndex::Int16:
return bsoncxx::types::b_int32{static_cast<Int32>(field.safeGet<Int16 &>())};
case TypeIndex::Int32:
return bsoncxx::types::b_int32{static_cast<Int32>(field.safeGet<Int32 &>())};
case TypeIndex::Int64:
return bsoncxx::types::b_int64{field.safeGet<Int64 &>()};
case TypeIndex::Float32:
return bsoncxx::types::b_double{field.safeGet<Float32 &>()};
case TypeIndex::Float64:
return bsoncxx::types::b_double{field.safeGet<Float64 &>()};
case TypeIndex::Date:
return bsoncxx::types::b_date{std::chrono::seconds{field.safeGet<UInt16 &>() * 86400}};
case TypeIndex::Date32:
return bsoncxx::types::b_date{std::chrono::seconds{field.safeGet<Int32 &>() * 86400}};
case TypeIndex::DateTime:
return bsoncxx::types::b_date{std::chrono::seconds{field.safeGet<UInt32 &>()}};
case TypeIndex::UUID:
return bsoncxx::types::b_string{static_cast<String>(formatUUID(field.safeGet<UUID &>()))};
case TypeIndex::Tuple: {
auto arr = array();
for (const auto & elem : field.safeGet<Tuple &>())
arr.append(fieldAsBSONValue(elem, applyVisitor(FieldToDataType(), elem)));
return arr.view();
}
case TypeIndex::Array: {
auto arr = array();
for (const auto & elem : field.safeGet<Array &>())
arr.append(fieldAsBSONValue(elem, applyVisitor(FieldToDataType(), elem)));
return arr.view();
}
default:
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Fields with type '{}' is not supported.", type->getPrettyName());
}
}
template <typename T>
static JSONBuilder::ItemPtr BSONElementAsJSON(const T & value)
{
switch (value.type())
{
case bsoncxx::type::k_string:
return std::make_unique<JSONBuilder::JSONString>(std::string(value.get_string().value));
case bsoncxx::type::k_symbol:
return std::make_unique<JSONBuilder::JSONString>(std::string(value.get_string().value));
case bsoncxx::type::k_oid:
return std::make_unique<JSONBuilder::JSONString>(value.get_oid().value.to_string());
case bsoncxx::type::k_binary:
return std::make_unique<JSONBuilder::JSONString>(
base64Encode(std::string(reinterpret_cast<const char *>(value.get_binary().bytes), value.get_binary().size)));
case bsoncxx::type::k_bool:
return std::make_unique<JSONBuilder::JSONBool>(value.get_bool());
case bsoncxx::type::k_int32:
return std::make_unique<JSONBuilder::JSONNumber<Int32>>(value.get_int32());
case bsoncxx::type::k_int64:
return std::make_unique<JSONBuilder::JSONNumber<Int64>>(value.get_int64());
case bsoncxx::type::k_double:
return std::make_unique<JSONBuilder::JSONNumber<Float64>>(value.get_double());
case bsoncxx::type::k_date:
return std::make_unique<JSONBuilder::JSONString>(DateLUT::instance().timeToString(value.get_date().to_int64() / 1000));
case bsoncxx::type::k_timestamp:
return std::make_unique<JSONBuilder::JSONString>(DateLUT::instance().timeToString(value.get_timestamp().timestamp));
case bsoncxx::type::k_document:
{
auto doc = std::make_unique<JSONBuilder::JSONMap>();
for (const auto & elem : value.get_document().value)
doc->add(std::string(elem.key()), BSONElementAsJSON(elem));
return doc;
}
case bsoncxx::type::k_array:
{
auto arr = std::make_unique<JSONBuilder::JSONArray>();
for (const auto & elem : value.get_array().value)
arr->add(BSONElementAsJSON(elem));
return arr;
}
case bsoncxx::type::k_regex:
{
auto doc = std::make_unique<JSONBuilder::JSONMap>();
doc->add(std::string(value.get_regex().regex), std::string(value.get_regex().options));
return doc;
}
case bsoncxx::type::k_dbpointer:
{
auto doc = std::make_unique<JSONBuilder::JSONMap>();
doc->add(value.get_dbpointer().value.to_string(), std::string(value.get_dbpointer().collection));
return doc;
}
case bsoncxx::type::k_null:
return std::make_unique<JSONBuilder::JSONNull>();
default:
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Serialization BSON type '{}' is not supported", bsoncxx::to_string(value.type()));
}
}
template <typename T>
static std::string BSONElementAsString(const T & value, const JSONBuilder::FormatSettings & json_format_settings)
{
switch (value.type())
{
case bsoncxx::type::k_string:
return std::string(value.get_string().value);
case bsoncxx::type::k_oid:
return value.get_oid().value.to_string();
case bsoncxx::type::k_binary:
return std::string(reinterpret_cast<const char *>(value.get_binary().bytes), value.get_binary().size);
case bsoncxx::type::k_bool:
return value.get_bool().value ? "true" : "false";
case bsoncxx::type::k_int32:
return std::to_string(static_cast<Int64>(value.get_int32().value));
case bsoncxx::type::k_int64:
return std::to_string(value.get_int64().value);
case bsoncxx::type::k_double:
return std::to_string(value.get_double().value);
case bsoncxx::type::k_decimal128:
return value.get_decimal128().value.to_string();
case bsoncxx::type::k_date:
return DateLUT::instance().timeToString(value.get_date().to_int64() / 1000);
case bsoncxx::type::k_timestamp:
return DateLUT::instance().timeToString(value.get_timestamp().timestamp);
// MongoDB's documents and arrays may not have strict types or be nested, so the most optimal solution is store their JSON representations.
// bsoncxx::to_json function will return something like "'number': {'$numberInt': '321'}", this why we have to use own implementation.
case bsoncxx::type::k_document:
case bsoncxx::type::k_array:
case bsoncxx::type::k_regex:
case bsoncxx::type::k_dbpointer:
case bsoncxx::type::k_symbol:
{
WriteBufferFromOwnString buf;
auto format_context = JSONBuilder::FormatContext{.out = buf};
BSONElementAsJSON(value)->format(json_format_settings, format_context);
return buf.str();
}
case bsoncxx::type::k_undefined:
return "undefined";
case bsoncxx::type::k_null:
return "null";
default:
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "BSON type {} is unserializable.", bsoncxx::to_string(value.type()));
}
}
template <typename T, typename T2>
static T BSONElementAsNumber(const T2 & value, const std::string & name)
{
switch (value.type())
{
case bsoncxx::type::k_bool:
return static_cast<T>(value.get_bool());
case bsoncxx::type::k_int32:
return static_cast<T>(value.get_int32());
case bsoncxx::type::k_int64:
return static_cast<T>(value.get_int64());
case bsoncxx::type::k_double:
return static_cast<T>(value.get_double());
default:
throw Exception(
ErrorCodes::TYPE_MISMATCH,
"Type mismatch, {} cannot be converted to number for column {}.",
bsoncxx::to_string(value.type()),
name);
}
}
static Array BSONArrayAsArray(
size_t dimensions,
const bsoncxx::types::b_array & array,
const DataTypePtr & type,
const Field & default_value,
const std::string & name,
const JSONBuilder::FormatSettings & json_format_settings)
{
auto arr = Array();
if (dimensions > 0)
{
--dimensions;
for (auto const & elem : array.value)
{
if (elem.type() != bsoncxx::type::k_array)
throw Exception(ErrorCodes::TYPE_MISMATCH, "Array {} have less dimensions then defined in the schema.", name);
arr.emplace_back(BSONArrayAsArray(dimensions, elem.get_array(), type, default_value, name, json_format_settings));
}
}
else
{
for (auto const & value : array.value)
{
if (value.type() == bsoncxx::type::k_null)
arr.emplace_back(default_value);
else
{
switch (type->getTypeId())
{
case TypeIndex::Int8:
arr.emplace_back(BSONElementAsNumber<Int8, bsoncxx::array::element>(value, name));
break;
case TypeIndex::UInt8:
arr.emplace_back(BSONElementAsNumber<UInt8, bsoncxx::array::element>(value, name));
break;
case TypeIndex::Int16:
arr.emplace_back(BSONElementAsNumber<Int16, bsoncxx::array::element>(value, name));
break;
case TypeIndex::UInt16:
arr.emplace_back(BSONElementAsNumber<UInt16, bsoncxx::array::element>(value, name));
break;
case TypeIndex::Int32:
arr.emplace_back(BSONElementAsNumber<Int32, bsoncxx::array::element>(value, name));
break;
case TypeIndex::UInt32:
arr.emplace_back(BSONElementAsNumber<UInt32, bsoncxx::array::element>(value, name));
break;
case TypeIndex::Int64:
arr.emplace_back(BSONElementAsNumber<Int64, bsoncxx::array::element>(value, name));
break;
case TypeIndex::UInt64:
arr.emplace_back(BSONElementAsNumber<UInt64, bsoncxx::array::element>(value, name));
break;
case TypeIndex::Int128:
arr.emplace_back(BSONElementAsNumber<Int128, bsoncxx::array::element>(value, name));
break;
case TypeIndex::UInt128:
arr.emplace_back(BSONElementAsNumber<UInt128, bsoncxx::array::element>(value, name));
break;
case TypeIndex::Int256:
arr.emplace_back(BSONElementAsNumber<Int256, bsoncxx::array::element>(value, name));
break;
case TypeIndex::UInt256:
arr.emplace_back(BSONElementAsNumber<UInt256, bsoncxx::array::element>(value, name));
break;
case TypeIndex::Float32:
arr.emplace_back(BSONElementAsNumber<Float32, bsoncxx::array::element>(value, name));
break;
case TypeIndex::Float64:
arr.emplace_back(BSONElementAsNumber<Float64, bsoncxx::array::element>(value, name));
break;
case TypeIndex::Date: {
if (value.type() != bsoncxx::type::k_date)
throw Exception(
ErrorCodes::TYPE_MISMATCH,
"Type mismatch, expected date, got {} for column {}.",
bsoncxx::to_string(value.type()),
name);
arr.emplace_back(DateLUT::instance().toDayNum(value.get_date().to_int64() / 1000).toUnderType());
break;
}
case TypeIndex::Date32: {
if (value.type() != bsoncxx::type::k_date)
throw Exception(
ErrorCodes::TYPE_MISMATCH,
"Type mismatch, expected date, got {} for column {}.",
bsoncxx::to_string(value.type()),
name);
arr.emplace_back(DateLUT::instance().toDayNum(value.get_date().to_int64() / 1000).toUnderType());
break;
}
case TypeIndex::DateTime: {
if (value.type() != bsoncxx::type::k_date)
throw Exception(
ErrorCodes::TYPE_MISMATCH,
"Type mismatch, expected date, got {} for column {}.",
bsoncxx::to_string(value.type()),
name);
arr.emplace_back(static_cast<UInt32>(value.get_date().to_int64() / 1000));
break;
}
case TypeIndex::DateTime64: {
if (value.type() != bsoncxx::type::k_date)
throw Exception(
ErrorCodes::TYPE_MISMATCH,
"Type mismatch, expected date, got {} for column {}.",
bsoncxx::to_string(value.type()),
name);
arr.emplace_back(static_cast<Decimal64>(value.get_date().to_int64()));
break;
}
case TypeIndex::UUID: {
if (value.type() != bsoncxx::type::k_string)
throw Exception(
ErrorCodes::TYPE_MISMATCH,
"Type mismatch, expected string (UUID), got {} for column {}.",
bsoncxx::to_string(value.type()),
name);
arr.emplace_back(parse<UUID>(value.get_string().value.data()));
break;
}
case TypeIndex::String:
arr.emplace_back(BSONElementAsString(value, json_format_settings));
break;
default:
throw Exception(
ErrorCodes::NOT_IMPLEMENTED,
"Array {} has unsupported nested type {}.",
name,
type->getName());
}
}
}
}
return arr;
}
static bsoncxx::types::bson_value::value fieldAsOID(const Field & field)
{
switch (field.getType())
{
case Field::Types::String:
return bsoncxx::oid(field.safeGet<String &>());
case Field::Types::Array: {
auto arr = array();
for (const auto & elem : field.safeGet<Array &>())
arr.append(fieldAsOID(elem));
return arr.view();
}
case Field::Types::Tuple: {
auto tuple = array();
for (const auto & elem : field.safeGet<Tuple &>())
tuple.append(fieldAsOID(elem));
return tuple.view();
}
default:
throw Exception(ErrorCodes::TYPE_MISMATCH, "{} can't be converted to oid.", field.getType());
}
}
}
}
#endif

View File

@ -35,7 +35,7 @@ void JSONArray::format(const FormatSettings & settings, FormatContext & context)
context.offset += settings.indent;
bool single_row = settings.solid || (settings.print_simple_arrays_in_single_row && isSimpleArray(values));
bool single_row = settings.print_simple_arrays_in_single_row && isSimpleArray(values);
bool first = true;
for (const auto & value : values)
@ -48,7 +48,7 @@ void JSONArray::format(const FormatSettings & settings, FormatContext & context)
writeChar('\n', context.out);
writeChar(' ', context.offset, context.out);
}
else if (!first && !settings.solid)
else if (!first)
writeChar(' ', context.out);
first = false;
@ -80,33 +80,20 @@ void JSONMap::format(const FormatSettings & settings, FormatContext & context)
writeChar(',', context.out);
first = false;
if (!settings.solid)
{
writeChar('\n', context.out);
writeChar(' ', context.offset, context.out);
}
writeChar('\n', context.out);
writeChar(' ', context.offset, context.out);
writeJSONString(value.key, context.out, settings.settings);
writeChar(':', context.out);
if (!settings.solid)
writeChar(' ', context.out);
writeChar(' ', context.out);
value.value->format(settings, context);
}
context.offset -= settings.indent;
if (!settings.solid)
{
writeChar('\n', context.out);
writeChar(' ', context.offset, context.out);
}
writeChar('\n', context.out);
writeChar(' ', context.offset, context.out);
writeChar('}', context.out);
}
void JSONNull::format(const FormatSettings &, FormatContext & context)
{
writeString("null", context.out);
}
}

View File

@ -13,7 +13,6 @@ struct FormatSettings
const DB::FormatSettings & settings;
size_t indent = 2;
bool print_simple_arrays_in_single_row = true;
bool solid = false; // the output will not contain spaces and line breaks
};
struct FormatContext
@ -112,10 +111,4 @@ private:
std::vector<Pair> values;
};
class JSONNull : public IItem
{
public:
void format(const FormatSettings & settings, FormatContext & context) override;
};
}

View File

@ -67,7 +67,6 @@
#cmakedefine01 USE_LIBARCHIVE
#cmakedefine01 USE_POCKETFFT
#cmakedefine01 USE_PROMETHEUS_PROTOBUFS
#cmakedefine01 USE_MONGODB
#cmakedefine01 USE_NUMACTL
/// This is needed for .incbin in assembly. For some reason, include paths don't work there in presence of LTO.

View File

@ -1,14 +0,0 @@
#pragma once
#include <Common/re2.h>
namespace DB
{
inline bool maskURIPassword(std::string * uri)
{
return RE2::Replace(uri, R"(([^:]+://[^:]*):([^@]*)@(.*))", "\\1:[HIDDEN]@\\3");
}
}

View File

@ -173,8 +173,7 @@ namespace DB
M(Double, gwp_asan_force_sample_probability, 0.0003, "Probability that an allocation from specific places will be sampled by GWP Asan (i.e. PODArray allocations)", 0) \
M(UInt64, config_reload_interval_ms, 2000, "How often clickhouse will reload config and check for new changes", 0) \
M(UInt64, memory_worker_period_ms, 0, "Tick period of background memory worker which corrects memory tracker memory usages and cleans up unused pages during higher memory usage. If set to 0, default value will be used depending on the memory usage source", 0) \
M(Bool, disable_insertion_and_mutation, false, "Disable all insert/alter/delete queries. This setting will be enabled if someone needs read-only nodes to prevent insertion and mutation affect reading performance.", 0) \
M(Bool, use_legacy_mongodb_integration, true, "Use the legacy MongoDB integration implementation. Deprecated.", 0)
M(Bool, disable_insertion_and_mutation, false, "Disable all insert/alter/delete queries. This setting will be enabled if someone needs read-only nodes to prevent insertion and mutation affect reading performance.", 0)
/// If you add a setting which can be updated at runtime, please update 'changeable_settings' map in StorageSystemServerSettings.cpp

View File

@ -918,7 +918,6 @@ namespace ErrorCodes
M(Bool, restore_replace_external_engines_to_null, false, "Replace all the external table engines to Null on restore. Useful for testing purposes", 0) \
M(Bool, restore_replace_external_table_functions_to_null, false, "Replace all table functions to Null on restore. Useful for testing purposes", 0) \
M(Bool, create_if_not_exists, false, "Enable IF NOT EXISTS for CREATE statements by default", 0) \
M(Bool, mongodb_throw_on_unsupported_query, true, "If enabled, MongoDB tables will return an error when a MongoDB query cannot be built. Otherwise, ClickHouse reads the full table and processes it locally. This option does not apply to the legacy implementation or when 'allow_experimental_analyzer=0'.", 0) \
\
\
/* ###################################### */ \

View File

@ -85,8 +85,7 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
{"parallel_replicas_local_plan", false, false, "Use local plan for local replica in a query with parallel replicas"},
{"join_to_sort_minimum_perkey_rows", 0, 40, "The lower limit of per-key average rows in the right table to determine whether to rerange the right table by key in left or inner join. This setting ensures that the optimization is not applied for sparse table keys"},
{"join_to_sort_maximum_table_rows", 0, 10000, "The maximum number of rows in the right table to determine whether to rerange the right table by key in left or inner join"},
{"allow_experimental_join_right_table_sorting", false, false, "If it is set to true, and the conditions of `join_to_sort_minimum_perkey_rows` and `join_to_sort_maximum_table_rows` are met, rerange the right table by key to improve the performance in left or inner hash join"},
{"mongodb_throw_on_unsupported_query", false, true, "New setting."},
{"allow_experimental_join_right_table_sorting", false, false, "If it is set to true, and the conditions of `join_to_sort_minimum_perkey_rows` and `join_to_sort_maximum_table_rows` are met, rerange the right table by key to improve the performance in left or inner hash join"}
}
},
{"24.8",

View File

@ -37,13 +37,10 @@ target_link_libraries(clickhouse_dictionaries
clickhouse_common_io
dbms
Poco::Data
Poco::MongoDB
Poco::Redis
)
if (USE_MONGODB)
target_link_libraries(clickhouse_dictionaries PRIVATE Poco::MongoDB)
endif()
target_link_libraries(clickhouse_dictionaries PUBLIC ch_contrib::abseil_swiss_tables)
if (TARGET ch_contrib::cassandra)

View File

@ -1,130 +1,183 @@
#include "config.h"
#include "DictionarySourceFactory.h"
#if USE_MONGODB
#include "MongoDBDictionarySource.h"
#include "DictionaryStructure.h"
#include <Common/logger_useful.h>
#include <Processors/Sources/MongoDBSource.h>
#include <Storages/NamedCollectionsHelpers.h>
#include <bsoncxx/builder/basic/array.hpp>
using bsoncxx::builder::basic::kvp;
using bsoncxx::builder::basic::make_document;
using bsoncxx::builder::basic::array;
#endif
#include <Storages/StorageMongoDBSocketFactory.h>
#include <Common/RemoteHostFilter.h>
#include "DictionarySourceFactory.h"
#include "DictionaryStructure.h"
namespace DB
{
namespace ErrorCodes
{
#if USE_MONGODB
extern const int UNSUPPORTED_METHOD;
extern const int LOGICAL_ERROR;
#else
extern const int SUPPORT_IS_DISABLED;
#endif
}
void registerDictionarySourceMongoDB(DictionarySourceFactory & factory)
{
#if USE_MONGODB
auto create_dictionary_source = [](
auto create_mongo_db_dictionary = [](
const DictionaryStructure & dict_struct,
const Poco::Util::AbstractConfiguration & config,
const std::string & root_config_prefix,
Block & sample_block,
ContextPtr context,
const std::string & /* default_database */,
bool /* created_from_ddl */)
bool created_from_ddl)
{
const auto config_prefix = root_config_prefix + ".mongodb";
auto configuration = std::make_shared<MongoDBConfiguration>();
if (auto named_collection = tryGetNamedCollectionWithOverrides(config, config_prefix, context))
auto named_collection = created_from_ddl ? tryGetNamedCollectionWithOverrides(config, config_prefix, context) : nullptr;
String host, username, password, database, method, options, collection;
UInt16 port;
if (named_collection)
{
if (named_collection->has("uri"))
{
validateNamedCollection(*named_collection, {"collection"}, {});
configuration->uri = std::make_unique<mongocxx::uri>(named_collection->get<String>("uri"));
}
else
{
validateNamedCollection(*named_collection, {"host", "db", "collection"}, {"port", "user", "password", "options"});
String user = named_collection->get<String>("user");
String auth_string;
if (!user.empty())
auth_string = fmt::format("{}:{}@", user, named_collection->get<String>("password"));
configuration->uri = std::make_unique<mongocxx::uri>(fmt::format("mongodb://{}{}:{}/{}?{}",
auth_string,
named_collection->get<String>("host"),
named_collection->getOrDefault<String>("port", "27017"),
named_collection->get<String>("db"),
named_collection->getOrDefault<String>("options", "")));
}
configuration->collection = named_collection->get<String>("collection");
validateNamedCollection(
*named_collection,
/* required_keys */{"collection"},
/* optional_keys */ValidateKeysMultiset<ExternalDatabaseEqualKeysSet>{
"host", "port", "user", "password", "db", "database", "uri", "name", "method", "options"});
host = named_collection->getOrDefault<String>("host", "");
port = static_cast<UInt16>(named_collection->getOrDefault<UInt64>("port", 0));
username = named_collection->getOrDefault<String>("user", "");
password = named_collection->getOrDefault<String>("password", "");
database = named_collection->getAnyOrDefault<String>({"db", "database"}, "");
method = named_collection->getOrDefault<String>("method", "");
collection = named_collection->getOrDefault<String>("collection", "");
options = named_collection->getOrDefault<String>("options", "");
}
else
{
configuration->collection = config.getString(config_prefix + ".collection");
auto uri_str = config.getString(config_prefix + ".uri", "");
if (!uri_str.empty())
configuration->uri = std::make_unique<mongocxx::uri>(uri_str);
else
{
String user = config.getString(config_prefix + ".user", "");
String auth_string;
if (!user.empty())
auth_string = fmt::format("{}:{}@", user, config.getString(config_prefix + ".password", ""));
configuration->uri = std::make_unique<mongocxx::uri>(fmt::format("mongodb://{}{}:{}/{}?{}",
auth_string,
config.getString(config_prefix + ".host"),
config.getString(config_prefix + ".port", "27017"),
config.getString(config_prefix + ".db"),
config.getString(config_prefix + ".options", "")));
}
host = config.getString(config_prefix + ".host", "");
port = config.getUInt(config_prefix + ".port", 0);
username = config.getString(config_prefix + ".user", "");
password = config.getString(config_prefix + ".password", "");
database = config.getString(config_prefix + ".db", "");
method = config.getString(config_prefix + ".method", "");
collection = config.getString(config_prefix + ".collection");
options = config.getString(config_prefix + ".options", "");
}
configuration->checkHosts(context);
if (created_from_ddl)
context->getRemoteHostFilter().checkHostAndPort(host, toString(port));
return std::make_unique<MongoDBDictionarySource>(dict_struct, std::move(configuration), std::move(sample_block));
return std::make_unique<MongoDBDictionarySource>(
dict_struct,
config.getString(config_prefix + ".uri", ""),
host,
port,
username,
password,
method,
database,
collection,
options,
sample_block);
};
#else
auto create_dictionary_source = [](
const DictionaryStructure & /* dict_struct */,
const Poco::Util::AbstractConfiguration & /* config */,
const std::string & /* root_config_prefix */,
Block & /* sample_block */,
ContextPtr /* context */,
const std::string & /* default_database */,
bool /* created_from_ddl */) -> DictionarySourcePtr
{
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED,
"Dictionary source of type `mongodb` is disabled because ClickHouse was built without mongodb support.");
};
#endif
factory.registerSource("mongodb", create_dictionary_source);
factory.registerSource("mongodb", create_mongo_db_dictionary);
}
#if USE_MONGODB
}
#include <Common/logger_useful.h>
#include <Poco/MongoDB/Array.h>
#include <Poco/MongoDB/Connection.h>
#include <Poco/MongoDB/Cursor.h>
#include <Poco/MongoDB/Database.h>
#include <Poco/MongoDB/ObjectId.h>
#include <Poco/URI.h>
#include <Poco/Util/AbstractConfiguration.h>
// only after poco
// naming conflict:
// Poco/MongoDB/BSONWriter.h:54: void writeCString(const std::string & value);
// src/IO/WriteHelpers.h:146 #define writeCString(s, buf)
#include <IO/WriteHelpers.h>
namespace DB
{
namespace ErrorCodes
{
extern const int NOT_IMPLEMENTED;
extern const int UNSUPPORTED_METHOD;
extern const int MONGODB_CANNOT_AUTHENTICATE;
}
static const UInt64 max_block_size = 8192;
MongoDBDictionarySource::MongoDBDictionarySource(
const DictionaryStructure & dict_struct_,
std::shared_ptr<MongoDBConfiguration> configuration_,
Block sample_block_)
const std::string & uri_,
const std::string & host_,
UInt16 port_,
const std::string & user_,
const std::string & password_,
const std::string & method_,
const std::string & db_,
const std::string & collection_,
const std::string & options_,
const Block & sample_block_)
: dict_struct{dict_struct_}
, configuration{configuration_}
, uri{uri_}
, host{host_}
, port{port_}
, user{user_}
, password{password_}
, method{method_}
, db{db_}
, collection{collection_}
, options(options_)
, sample_block{sample_block_}
, connection{std::make_shared<Poco::MongoDB::Connection>()}
{
StorageMongoDBSocketFactory socket_factory;
if (!uri.empty())
{
// Connect with URI.
connection->connect(uri, socket_factory);
Poco::URI poco_uri(connection->uri());
// Parse database from URI. This is required for correctness -- the
// cursor is created using database name and collection name, so we have
// to specify them properly.
db = poco_uri.getPath();
// getPath() may return a leading slash, remove it.
if (!db.empty() && db[0] == '/')
{
db.erase(0, 1);
}
// Parse some other parts from URI, for logging and display purposes.
host = poco_uri.getHost();
port = poco_uri.getPort();
user = poco_uri.getUserInfo();
if (size_t separator = user.find(':'); separator != std::string::npos)
{
user.resize(separator);
}
}
else
{
// Connect with host/port/user/etc through constructing the uri
std::string uri_constructed("mongodb://" + host + ":" + std::to_string(port) + "/" + db + (options.empty() ? "" : "?" + options));
connection->connect(uri_constructed, socket_factory);
if (!user.empty())
{
Poco::MongoDB::Database poco_db(db);
if (!poco_db.authenticate(*connection, user, password, method.empty() ? Poco::MongoDB::Database::AUTH_SCRAM_SHA1 : method))
throw Exception(ErrorCodes::MONGODB_CANNOT_AUTHENTICATE, "Cannot authenticate in MongoDB, incorrect user or password");
}
}
}
MongoDBDictionarySource::MongoDBDictionarySource(const MongoDBDictionarySource & other)
: MongoDBDictionarySource{other.dict_struct, other.configuration, other.sample_block}
: MongoDBDictionarySource{
other.dict_struct, other.uri, other.host, other.port, other.user, other.password, other.method, other.db,
other.collection, other.options, other.sample_block
}
{
}
@ -132,7 +185,7 @@ MongoDBDictionarySource::~MongoDBDictionarySource() = default;
QueryPipeline MongoDBDictionarySource::loadAll()
{
return QueryPipeline(std::make_shared<MongoDBSource>(*configuration->uri, configuration->collection, make_document(), mongocxx::options::find(), sample_block, max_block_size));
return QueryPipeline(std::make_shared<MongoDBSource>(connection, db, collection, Poco::MongoDB::Document{}, sample_block, max_block_size));
}
QueryPipeline MongoDBDictionarySource::loadIds(const std::vector<UInt64> & ids)
@ -140,11 +193,19 @@ QueryPipeline MongoDBDictionarySource::loadIds(const std::vector<UInt64> & ids)
if (!dict_struct.id)
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "'id' is required for selective loading");
auto ids_array = array();
for (const auto & id : ids)
ids_array.append(static_cast<Int64>(id));
Poco::MongoDB::Document query;
return QueryPipeline(std::make_shared<MongoDBSource>(*configuration->uri, configuration->collection, make_document(kvp(dict_struct.id->name, make_document(kvp("$in", ids_array)))), mongocxx::options::find(), sample_block, max_block_size));
/** NOTE: While building array, Poco::MongoDB requires passing of different unused element names, along with values.
* In general, Poco::MongoDB is quite inefficient and bulky.
*/
Poco::MongoDB::Array::Ptr ids_array(new Poco::MongoDB::Array);
for (const UInt64 id : ids)
ids_array->add(DB::toString(id), static_cast<Int32>(id));
query.addNewDocument(dict_struct.id->name).add("$in", ids_array);
return QueryPipeline(std::make_shared<MongoDBSource>(connection, db, collection, query, sample_block, max_block_size));
}
@ -153,41 +214,68 @@ QueryPipeline MongoDBDictionarySource::loadKeys(const Columns & key_columns, con
if (!dict_struct.key)
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "'key' is required for selective loading");
if (key_columns.size() != dict_struct.key->size())
throw Exception(ErrorCodes::LOGICAL_ERROR, "The size of key_columns does not equal to the size of dictionary key");
Poco::MongoDB::Document query;
Poco::MongoDB::Array::Ptr keys_array(new Poco::MongoDB::Array);
auto keys = array();
for (const auto & row : requested_rows)
for (const auto row_idx : requested_rows)
{
auto key = array();
for (size_t i = 0; i < key_columns.size(); i++)
{
const auto & dict_key = dict_struct.key->at(i);
WhichDataType type(dict_key.type);
auto & key = keys_array->addNewDocument(DB::toString(row_idx));
if (isBool(dict_key.type))
key.append(make_document(kvp(dict_key.name, key_columns[i]->getBool(row))));
else if (type.isUInt())
key.append(make_document(kvp(dict_key.name, static_cast<Int64>(key_columns[i]->getUInt(row)))));
else if (type.isFloat64())
key.append(make_document(kvp(dict_key.name, key_columns[i]->getFloat64(row))));
else if (type.isInt())
key.append(make_document(kvp(dict_key.name, key_columns[i]->getInt(row))));
else if (type.isString())
key.append(make_document(kvp(dict_key.name, key_columns[i]->getDataAt(row).toString())));
else
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected type '{}' of key in MongoDB dictionary", dict_key.type->getName());
const auto & key_attributes = *dict_struct.key;
for (size_t attribute_index = 0; attribute_index < key_attributes.size(); ++attribute_index)
{
const auto & key_attribute = key_attributes[attribute_index];
switch (key_attribute.underlying_type)
{
case AttributeUnderlyingType::UInt8:
case AttributeUnderlyingType::UInt16:
case AttributeUnderlyingType::UInt32:
case AttributeUnderlyingType::UInt64:
case AttributeUnderlyingType::Int8:
case AttributeUnderlyingType::Int16:
case AttributeUnderlyingType::Int32:
case AttributeUnderlyingType::Int64:
{
key.add(key_attribute.name, static_cast<Int32>(key_columns[attribute_index]->get64(row_idx)));
break;
}
case AttributeUnderlyingType::Float32:
case AttributeUnderlyingType::Float64:
{
key.add(key_attribute.name, key_columns[attribute_index]->getFloat64(row_idx));
break;
}
case AttributeUnderlyingType::String:
{
String loaded_str((*key_columns[attribute_index])[row_idx].safeGet<String>());
/// Convert string to ObjectID
if (key_attribute.is_object_id)
{
Poco::MongoDB::ObjectId::Ptr loaded_id(new Poco::MongoDB::ObjectId(loaded_str));
key.add(key_attribute.name, loaded_id);
}
else
{
key.add(key_attribute.name, loaded_str);
}
break;
}
default:
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unsupported dictionary attribute type for MongoDB dictionary source");
}
}
keys.append(make_document(kvp("$and", key)));
}
return QueryPipeline(std::make_shared<MongoDBSource>(*configuration->uri, configuration->collection, make_document(kvp("$or", keys)), mongocxx::options::find(), sample_block, max_block_size));
/// If more than one key we should use $or
query.add("$or", keys_array);
return QueryPipeline(std::make_shared<MongoDBSource>(connection, db, collection, query, sample_block, max_block_size));
}
std::string MongoDBDictionarySource::toString() const
{
return fmt::format("MongoDB: {}", configuration->uri->to_string());
return fmt::format("MongoDB: {}.{},{}{}:{}", db, collection, (user.empty() ? " " : " " + user + '@'), host, port);
}
#endif
}

View File

@ -1,13 +1,23 @@
#pragma once
#include "config.h"
#include <Processors/Sources/MongoDBSource.h>
#include <Core/Block.h>
#if USE_MONGODB
#include "DictionaryStructure.h"
#include "IDictionarySource.h"
#include <Core/Block.h>
#include <Storages/StorageMongoDB.h>
namespace Poco
{
namespace Util
{
class AbstractConfiguration;
}
namespace MongoDB
{
class Connection;
}
}
namespace DB
{
@ -22,8 +32,16 @@ class MongoDBDictionarySource final : public IDictionarySource
public:
MongoDBDictionarySource(
const DictionaryStructure & dict_struct_,
std::shared_ptr<MongoDBConfiguration> configuration_,
Block sample_block_);
const std::string & uri_,
const std::string & host_,
UInt16 port_,
const std::string & user_,
const std::string & password_,
const std::string & method_,
const std::string & db_,
const std::string & collection_,
const std::string & options,
const Block & sample_block_);
MongoDBDictionarySource(const MongoDBDictionarySource & other);
@ -45,7 +63,7 @@ public:
/// @todo: for MongoDB, modification date can somehow be determined from the `_id` object field
bool isModified() const override { return true; }
/// Not yet supported
///Not yet supported
bool hasUpdateField() const override { return false; }
DictionarySourcePtr clone() const override { return std::make_shared<MongoDBDictionarySource>(*this); }
@ -54,9 +72,18 @@ public:
private:
const DictionaryStructure dict_struct;
const std::shared_ptr<MongoDBConfiguration> configuration;
const std::string uri;
std::string host;
UInt16 port;
std::string user;
const std::string password;
const std::string method;
std::string db;
const std::string collection;
const std::string options;
Block sample_block;
std::shared_ptr<Poco::MongoDB::Connection> connection;
};
}
#endif

View File

@ -1,305 +0,0 @@
#include "config.h"
#include "DictionarySourceFactory.h"
#if USE_MONGODB
#include <Common/RemoteHostFilter.h>
#include "MongoDBPocoLegacyDictionarySource.h"
#include "DictionaryStructure.h"
#include "registerDictionaries.h"
#include <Storages/StorageMongoDBPocoLegacySocketFactory.h>
#include <Storages/NamedCollectionsHelpers.h>
#endif
namespace DB
{
namespace ErrorCodes
{
#if USE_MONGODB
extern const int NOT_IMPLEMENTED;
extern const int UNSUPPORTED_METHOD;
extern const int MONGODB_CANNOT_AUTHENTICATE;
#else
extern const int SUPPORT_IS_DISABLED;
#endif
}
void registerDictionarySourceMongoDBPocoLegacy(DictionarySourceFactory & factory)
{
#if USE_MONGODB
auto create_mongo_db_dictionary = [](
const DictionaryStructure & dict_struct,
const Poco::Util::AbstractConfiguration & config,
const std::string & root_config_prefix,
Block & sample_block,
ContextPtr context,
const std::string & /* default_database */,
bool created_from_ddl)
{
const auto config_prefix = root_config_prefix + ".mongodb";
auto named_collection = created_from_ddl ? tryGetNamedCollectionWithOverrides(config, config_prefix, context) : nullptr;
String host, username, password, database, method, options, collection;
UInt16 port;
if (named_collection)
{
validateNamedCollection(
*named_collection,
/* required_keys */{"collection"},
/* optional_keys */ValidateKeysMultiset<ExternalDatabaseEqualKeysSet>{
"host", "port", "user", "password", "db", "database", "uri", "name", "method", "options"});
host = named_collection->getOrDefault<String>("host", "");
port = static_cast<UInt16>(named_collection->getOrDefault<UInt64>("port", 0));
username = named_collection->getOrDefault<String>("user", "");
password = named_collection->getOrDefault<String>("password", "");
database = named_collection->getAnyOrDefault<String>({"db", "database"}, "");
method = named_collection->getOrDefault<String>("method", "");
collection = named_collection->getOrDefault<String>("collection", "");
options = named_collection->getOrDefault<String>("options", "");
}
else
{
host = config.getString(config_prefix + ".host", "");
port = config.getUInt(config_prefix + ".port", 0);
username = config.getString(config_prefix + ".user", "");
password = config.getString(config_prefix + ".password", "");
database = config.getString(config_prefix + ".db", "");
method = config.getString(config_prefix + ".method", "");
collection = config.getString(config_prefix + ".collection");
options = config.getString(config_prefix + ".options", "");
}
if (created_from_ddl)
context->getRemoteHostFilter().checkHostAndPort(host, toString(port));
return std::make_unique<MongoDBPocoLegacyDictionarySource>(dict_struct,
config.getString(config_prefix + ".uri", ""),
host,
port,
username,
password,
method,
database,
collection,
options,
sample_block);
};
#else
auto create_mongo_db_dictionary = [](
const DictionaryStructure & /* dict_struct */,
const Poco::Util::AbstractConfiguration & /* config */,
const std::string & /* root_config_prefix */,
Block & /* sample_block */,
ContextPtr /* context */,
const std::string & /* default_database */,
bool /* created_from_ddl */) -> DictionarySourcePtr
{
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED,
"Dictionary source of type `mongodb` is disabled because ClickHouse was built without mongodb support.");
};
#endif
factory.registerSource("mongodb", create_mongo_db_dictionary);
}
}
#if USE_MONGODB
#include <Common/logger_useful.h>
#include <Poco/MongoDB/Array.h>
#include <Poco/MongoDB/Connection.h>
#include <Poco/MongoDB/Cursor.h>
#include <Poco/MongoDB/Database.h>
#include <Poco/MongoDB/ObjectId.h>
#include <Poco/URI.h>
#include <Poco/Util/AbstractConfiguration.h>
// only after poco
// naming conflict:
// Poco/MongoDB/BSONWriter.h:54: void writeCString(const std::string & value);
// src/IO/WriteHelpers.h:146 #define writeCString(s, buf)
#include <IO/WriteHelpers.h>
namespace DB
{
static const UInt64 max_block_size = 8192;
MongoDBPocoLegacyDictionarySource::MongoDBPocoLegacyDictionarySource(
const DictionaryStructure & dict_struct_,
const std::string & uri_,
const std::string & host_,
UInt16 port_,
const std::string & user_,
const std::string & password_,
const std::string & method_,
const std::string & db_,
const std::string & collection_,
const std::string & options_,
const Block & sample_block_)
: dict_struct{dict_struct_}
, uri{uri_}
, host{host_}
, port{port_}
, user{user_}
, password{password_}
, method{method_}
, db{db_}
, collection{collection_}
, options(options_)
, sample_block{sample_block_}
, connection{std::make_shared<Poco::MongoDB::Connection>()}
{
StorageMongoDBPocoLegacySocketFactory socket_factory;
if (!uri.empty())
{
// Connect with URI.
connection->connect(uri, socket_factory);
Poco::URI poco_uri(connection->uri());
// Parse database from URI. This is required for correctness -- the
// cursor is created using database name and collection name, so we have
// to specify them properly.
db = poco_uri.getPath();
// getPath() may return a leading slash, remove it.
if (!db.empty() && db[0] == '/')
{
db.erase(0, 1);
}
// Parse some other parts from URI, for logging and display purposes.
host = poco_uri.getHost();
port = poco_uri.getPort();
user = poco_uri.getUserInfo();
if (size_t separator = user.find(':'); separator != std::string::npos)
{
user.resize(separator);
}
}
else
{
// Connect with host/port/user/etc through constructing the uri
std::string uri_constructed("mongodb://" + host + ":" + std::to_string(port) + "/" + db + (options.empty() ? "" : "?" + options));
connection->connect(uri_constructed, socket_factory);
if (!user.empty())
{
Poco::MongoDB::Database poco_db(db);
if (!poco_db.authenticate(*connection, user, password, method.empty() ? Poco::MongoDB::Database::AUTH_SCRAM_SHA1 : method))
throw Exception(ErrorCodes::MONGODB_CANNOT_AUTHENTICATE, "Cannot authenticate in MongoDB, incorrect user or password");
}
}
}
MongoDBPocoLegacyDictionarySource::MongoDBPocoLegacyDictionarySource(const MongoDBPocoLegacyDictionarySource & other)
: MongoDBPocoLegacyDictionarySource{
other.dict_struct, other.uri, other.host, other.port, other.user, other.password, other.method, other.db,
other.collection, other.options, other.sample_block
}
{
}
MongoDBPocoLegacyDictionarySource::~MongoDBPocoLegacyDictionarySource() = default;
QueryPipeline MongoDBPocoLegacyDictionarySource::loadAll()
{
return QueryPipeline(std::make_shared<MongoDBPocoLegacySource>(connection, db, collection, Poco::MongoDB::Document{}, sample_block, max_block_size));
}
QueryPipeline MongoDBPocoLegacyDictionarySource::loadIds(const std::vector<UInt64> & ids)
{
if (!dict_struct.id)
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "'id' is required for selective loading");
Poco::MongoDB::Document query;
/** NOTE: While building array, Poco::MongoDB requires passing of different unused element names, along with values.
* In general, Poco::MongoDB is quite inefficient and bulky.
*/
Poco::MongoDB::Array::Ptr ids_array(new Poco::MongoDB::Array);
for (const UInt64 id : ids)
ids_array->add(DB::toString(id), static_cast<Int32>(id));
query.addNewDocument(dict_struct.id->name).add("$in", ids_array);
return QueryPipeline(std::make_shared<MongoDBPocoLegacySource>(connection, db, collection, query, sample_block, max_block_size));
}
QueryPipeline MongoDBPocoLegacyDictionarySource::loadKeys(const Columns & key_columns, const std::vector<size_t> & requested_rows)
{
if (!dict_struct.key)
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "'key' is required for selective loading");
Poco::MongoDB::Document query;
Poco::MongoDB::Array::Ptr keys_array(new Poco::MongoDB::Array);
for (const auto row_idx : requested_rows)
{
auto & key = keys_array->addNewDocument(DB::toString(row_idx));
const auto & key_attributes = *dict_struct.key;
for (size_t attribute_index = 0; attribute_index < key_attributes.size(); ++attribute_index)
{
const auto & key_attribute = key_attributes[attribute_index];
switch (key_attribute.underlying_type)
{
case AttributeUnderlyingType::UInt8:
case AttributeUnderlyingType::UInt16:
case AttributeUnderlyingType::UInt32:
case AttributeUnderlyingType::UInt64:
case AttributeUnderlyingType::Int8:
case AttributeUnderlyingType::Int16:
case AttributeUnderlyingType::Int32:
case AttributeUnderlyingType::Int64:
{
key.add(key_attribute.name, static_cast<Int32>(key_columns[attribute_index]->get64(row_idx)));
break;
}
case AttributeUnderlyingType::Float32:
case AttributeUnderlyingType::Float64:
{
key.add(key_attribute.name, key_columns[attribute_index]->getFloat64(row_idx));
break;
}
case AttributeUnderlyingType::String:
{
String loaded_str((*key_columns[attribute_index])[row_idx].safeGet<String>());
/// Convert string to ObjectID
if (key_attribute.is_object_id)
{
Poco::MongoDB::ObjectId::Ptr loaded_id(new Poco::MongoDB::ObjectId(loaded_str));
key.add(key_attribute.name, loaded_id);
}
else
{
key.add(key_attribute.name, loaded_str);
}
break;
}
default:
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unsupported dictionary attribute type for MongoDB dictionary source");
}
}
}
/// If more than one key we should use $or
query.add("$or", keys_array);
return QueryPipeline(std::make_shared<MongoDBPocoLegacySource>(connection, db, collection, query, sample_block, max_block_size));
}
std::string MongoDBPocoLegacyDictionarySource::toString() const
{
return fmt::format("MongoDB: {}.{},{}{}:{}", db, collection, (user.empty() ? " " : " " + user + '@'), host, port);
}
}
#endif

View File

@ -1,93 +0,0 @@
#pragma once
#include "config.h"
#if USE_MONGODB
#include <Processors/Sources/MongoDBPocoLegacySource.h>
#include <Core/Block.h>
#include "DictionaryStructure.h"
#include "IDictionarySource.h"
namespace Poco
{
namespace Util
{
class AbstractConfiguration;
}
namespace MongoDB
{
class Connection;
}
}
namespace DB
{
namespace ErrorCodes
{
extern const int NOT_IMPLEMENTED;
}
/// Allows loading dictionaries from a MongoDB collection. Deprecated, will be removed soon.
class MongoDBPocoLegacyDictionarySource final : public IDictionarySource
{
public:
MongoDBPocoLegacyDictionarySource(
const DictionaryStructure & dict_struct_,
const std::string & uri_,
const std::string & host_,
UInt16 port_,
const std::string & user_,
const std::string & password_,
const std::string & method_,
const std::string & db_,
const std::string & collection_,
const std::string & options,
const Block & sample_block_);
MongoDBPocoLegacyDictionarySource(const MongoDBPocoLegacyDictionarySource & other);
~MongoDBPocoLegacyDictionarySource() override;
QueryPipeline loadAll() override;
QueryPipeline loadUpdatedAll() override
{
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method loadUpdatedAll is unsupported for MongoDBDictionarySource");
}
bool supportsSelectiveLoad() const override { return true; }
QueryPipeline loadIds(const std::vector<UInt64> & ids) override;
QueryPipeline loadKeys(const Columns & key_columns, const std::vector<size_t> & requested_rows) override;
/// @todo: for MongoDB, modification date can somehow be determined from the `_id` object field
bool isModified() const override { return true; }
///Not yet supported
bool hasUpdateField() const override { return false; }
DictionarySourcePtr clone() const override { return std::make_shared<MongoDBPocoLegacyDictionarySource>(*this); }
std::string toString() const override;
private:
const DictionaryStructure dict_struct;
const std::string uri;
std::string host;
UInt16 port;
std::string user;
const std::string password;
const std::string method;
std::string db;
const std::string collection;
const std::string options;
Block sample_block;
std::shared_ptr<Poco::MongoDB::Connection> connection;
};
}
#endif

View File

@ -11,7 +11,6 @@ void registerDictionarySourceFile(DictionarySourceFactory & source_factory);
void registerDictionarySourceMysql(DictionarySourceFactory & source_factory);
void registerDictionarySourceClickHouse(DictionarySourceFactory & source_factory);
void registerDictionarySourceMongoDB(DictionarySourceFactory & source_factory);
void registerDictionarySourceMongoDBPocoLegacy(DictionarySourceFactory & source_factory);
void registerDictionarySourceCassandra(DictionarySourceFactory & source_factory);
void registerDictionarySourceRedis(DictionarySourceFactory & source_factory);
void registerDictionarySourceXDBC(DictionarySourceFactory & source_factory);
@ -36,7 +35,7 @@ void registerDictionaryPolygon(DictionaryFactory & factory);
void registerDictionaryDirect(DictionaryFactory & factory);
void registerDictionaries(bool use_legacy_mongodb_integration)
void registerDictionaries()
{
{
auto & source_factory = DictionarySourceFactory::instance();
@ -44,12 +43,7 @@ void registerDictionaries(bool use_legacy_mongodb_integration)
registerDictionarySourceFile(source_factory);
registerDictionarySourceMysql(source_factory);
registerDictionarySourceClickHouse(source_factory);
if (use_legacy_mongodb_integration)
registerDictionarySourceMongoDBPocoLegacy(source_factory);
else
registerDictionarySourceMongoDB(source_factory);
registerDictionarySourceMongoDB(source_factory);
registerDictionarySourceRedis(source_factory);
registerDictionarySourceCassandra(source_factory);
registerDictionarySourceXDBC(source_factory);

View File

@ -2,5 +2,5 @@
namespace DB
{
void registerDictionaries(bool use_legacy_mongodb_integration);
void registerDictionaries();
}

View File

@ -30,7 +30,7 @@ TEST(ConvertDictionaryAST, SimpleDictConfiguration)
{
if (!registered)
{
registerDictionaries(false);
registerDictionaries();
registered = true;
}
@ -103,7 +103,7 @@ TEST(ConvertDictionaryAST, TrickyAttributes)
{
if (!registered)
{
registerDictionaries(false);
registerDictionaries();
registered = true;
}
@ -147,7 +147,7 @@ TEST(ConvertDictionaryAST, ComplexKeyAndLayoutWithParams)
{
if (!registered)
{
registerDictionaries(false);
registerDictionaries();
registered = true;
}
@ -198,7 +198,7 @@ TEST(ConvertDictionaryAST, ComplexSource)
{
if (!registered)
{
registerDictionaries(false);
registerDictionaries();
registered = true;
}

View File

@ -29,10 +29,10 @@ extern "C" int LLVMFuzzerInitialize(int *, char ***)
registerInterpreters();
registerFunctions();
registerAggregateFunctions();
registerTableFunctions(false);
registerTableFunctions();
registerDatabases();
registerStorages(false);
registerDictionaries(false);
registerStorages();
registerDictionaries();
registerDisks(/* global_skip_access_check= */ true);
registerFormats();

View File

@ -2,7 +2,6 @@
#include <Poco/String.h>
#include <Common/SipHash.h>
#include <Common/maskURIPassword.h>
#include <IO/Operators.h>
namespace DB
@ -36,17 +35,6 @@ void ASTPair::formatImpl(const FormatSettings & settings, FormatState & state, F
/// SOURCE(CLICKHOUSE(host 'example01-01-1' port 9000 user 'default' password '[HIDDEN]' db 'default' table 'ids'))
settings.ostr << "'[HIDDEN]'";
}
else if (!settings.show_secrets && (first == "uri"))
{
// Hide password from URI in the defention of a dictionary
WriteBufferFromOwnString temp_buf;
FormatSettings tmp_settings(temp_buf, settings.one_line);
FormatState tmp_state;
second->formatImpl(tmp_settings, tmp_state, frame);
maskURIPassword(&temp_buf.str());
settings.ostr << temp_buf.str();
}
else
{
second->formatImpl(settings, state, frame);

View File

@ -2,7 +2,6 @@
#include <Common/KnownObjectNames.h>
#include <Common/re2.h>
#include <Common/maskURIPassword.h>
#include <Core/QualifiedTableName.h>
#include <base/defines.h>
#include <boost/algorithm/string/predicate.hpp>
@ -89,17 +88,13 @@ protected:
void findOrdinaryFunctionSecretArguments()
{
if ((function->name() == "mysql") || (function->name() == "postgresql"))
if ((function->name() == "mysql") || (function->name() == "postgresql") || (function->name() == "mongodb"))
{
/// mysql('host:port', 'database', 'table', 'user', 'password', ...)
/// postgresql('host:port', 'database', 'table', 'user', 'password', ...)
/// mongodb('host:port', 'database', 'collection', 'user', 'password', ...)
findMySQLFunctionSecretArguments();
}
else if (function->name() == "mongodb")
{
findMongoDBSecretArguments();
}
else if ((function->name() == "s3") || (function->name() == "cosn") || (function->name() == "oss") ||
(function->name() == "deltaLake") || (function->name() == "hudi") || (function->name() == "iceberg") ||
(function->name() == "gcs"))
@ -154,40 +149,6 @@ protected:
}
}
void findMongoDBSecretArguments()
{
String uri;
if (isNamedCollectionName(0))
{
/// MongoDB(named_collection, ..., password = 'password', ...)
if (findSecretNamedArgument("password", 1))
return;
/// MongoDB(named_collection, ..., uri = 'mongodb://username:password@127.0.0.1:27017', ...)
findNamedArgument(&uri, "uri", 1);
result.are_named = true;
result.start = 1;
}
else if (function->arguments->size() == 2)
{
tryGetStringFromArgument(0, &uri);
result.are_named = false;
result.start = 0;
}
else
{
// MongoDB('127.0.0.1:27017', 'database', 'collection', 'user, 'password'...)
markSecretArgument(4, false);
return;
}
chassert(result.count == 0);
maskURIPassword(&uri);
result.count = 1;
result.replacement = std::move(uri);
}
/// Returns the number of arguments excluding "headers" and "extra_credentials" (which should
/// always be at the end). Marks "headers" as secret, if found.
size_t excludeS3OrURLNestedMaps()
@ -463,7 +424,8 @@ protected:
/// ExternalDistributed('engine', 'host:port', 'database', 'table', 'user', 'password')
findExternalDistributedTableEngineSecretArguments();
}
else if ((engine_name == "MySQL") || (engine_name == "PostgreSQL") || (engine_name == "MaterializedPostgreSQL"))
else if ((engine_name == "MySQL") || (engine_name == "PostgreSQL") ||
(engine_name == "MaterializedPostgreSQL") || (engine_name == "MongoDB"))
{
/// MySQL('host:port', 'database', 'table', 'user', 'password', ...)
/// PostgreSQL('host:port', 'database', 'table', 'user', 'password', ...)
@ -471,10 +433,6 @@ protected:
/// MongoDB('host:port', 'database', 'collection', 'user', 'password', ...)
findMySQLFunctionSecretArguments();
}
else if (engine_name == "MongoDB")
{
findMongoDBSecretArguments();
}
else if ((engine_name == "S3") || (engine_name == "COSN") || (engine_name == "OSS") ||
(engine_name == "DeltaLake") || (engine_name == "Hudi") || (engine_name == "Iceberg") || (engine_name == "S3Queue"))
{
@ -633,15 +591,11 @@ protected:
/// Looks for a secret argument with a specified name. This function looks for arguments in format `key=value` where the key is specified.
/// If the argument is found, it is marked as a secret.
bool findSecretNamedArgument(const std::string_view & key, size_t start = 0)
void findSecretNamedArgument(const std::string_view & key, size_t start = 0)
{
ssize_t arg_idx = findNamedArgument(nullptr, key, start);
if (arg_idx >= 0)
{
markSecretArgument(arg_idx, /* argument_is_named= */ true);
return true;
}
return false;
}
};

View File

@ -1,580 +0,0 @@
#include "config.h"
#if USE_MONGODB
#include "MongoDBPocoLegacySource.h"
#include <string>
#include <vector>
#include <Poco/MongoDB/Array.h>
#include <Poco/MongoDB/Binary.h>
#include <Poco/MongoDB/Database.h>
#include <Poco/MongoDB/Connection.h>
#include <Poco/MongoDB/Cursor.h>
#include <Poco/MongoDB/OpMsgCursor.h>
#include <Poco/MongoDB/ObjectId.h>
#include <Columns/ColumnArray.h>
#include <Columns/ColumnNullable.h>
#include <Columns/ColumnString.h>
#include <Columns/ColumnsNumber.h>
#include <IO/ReadHelpers.h>
#include <Common/assert_cast.h>
#include <Common/quoteString.h>
#include "base/types.h"
#include <base/range.h>
#include <Poco/URI.h>
#include <DataTypes/DataTypeArray.h>
#include <DataTypes/DataTypeNullable.h>
// only after poco
// naming conflict:
// Poco/MongoDB/BSONWriter.h:54: void writeCString(const std::string & value);
// src/IO/WriteHelpers.h:146 #define writeCString(s, buf)
#include <IO/WriteHelpers.h>
namespace DB
{
namespace ErrorCodes
{
extern const int TYPE_MISMATCH;
extern const int UNKNOWN_TYPE;
extern const int MONGODB_ERROR;
extern const int BAD_ARGUMENTS;
}
namespace
{
using ValueType = ExternalResultDescription::ValueType;
using ObjectId = Poco::MongoDB::ObjectId;
using MongoArray = Poco::MongoDB::Array;
using MongoUUID = Poco::MongoDB::Binary::Ptr;
UUID parsePocoUUID(const Poco::UUID & src)
{
UUID uuid;
std::array<Poco::UInt8, 6> src_node = src.getNode();
UInt64 node = 0;
node |= UInt64(src_node[0]) << 40;
node |= UInt64(src_node[1]) << 32;
node |= UInt64(src_node[2]) << 24;
node |= UInt64(src_node[3]) << 16;
node |= UInt64(src_node[4]) << 8;
node |= src_node[5];
UUIDHelpers::getHighBytes(uuid) = UInt64(src.getTimeLow()) << 32 | UInt32(src.getTimeMid() << 16 | src.getTimeHiAndVersion());
UUIDHelpers::getLowBytes(uuid) = UInt64(src.getClockSeq()) << 48 | node;
return uuid;
}
template <typename T>
Field getNumber(const Poco::MongoDB::Element & value, const std::string & name)
{
switch (value.type())
{
case Poco::MongoDB::ElementTraits<Int32>::TypeId:
return static_cast<T>(static_cast<const Poco::MongoDB::ConcreteElement<Int32> &>(value).value());
case Poco::MongoDB::ElementTraits<Poco::Int64>::TypeId:
return static_cast<T>(static_cast<const Poco::MongoDB::ConcreteElement<Poco::Int64> &>(value).value());
case Poco::MongoDB::ElementTraits<Float64>::TypeId:
return static_cast<T>(static_cast<const Poco::MongoDB::ConcreteElement<Float64> &>(value).value());
case Poco::MongoDB::ElementTraits<bool>::TypeId:
return static_cast<T>(static_cast<const Poco::MongoDB::ConcreteElement<bool> &>(value).value());
case Poco::MongoDB::ElementTraits<Poco::MongoDB::NullValue>::TypeId:
return Field();
case Poco::MongoDB::ElementTraits<String>::TypeId:
return parse<T>(static_cast<const Poco::MongoDB::ConcreteElement<String> &>(value).value());
default:
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected a number, got type id = {} for column {}",
toString(value.type()), name);
}
}
void prepareMongoDBArrayInfo(
std::unordered_map<size_t, MongoDBPocoLegacyArrayInfo> & array_info, size_t column_idx, const DataTypePtr data_type)
{
const auto * array_type = assert_cast<const DataTypeArray *>(data_type.get());
auto nested = array_type->getNestedType();
size_t count_dimensions = 1;
while (isArray(nested))
{
++count_dimensions;
nested = assert_cast<const DataTypeArray *>(nested.get())->getNestedType();
}
Field default_value = nested->getDefault();
if (nested->isNullable())
nested = assert_cast<const DataTypeNullable *>(nested.get())->getNestedType();
WhichDataType which(nested);
std::function<Field(const Poco::MongoDB::Element & value, const std::string & name)> parser;
if (which.isUInt8())
parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber<UInt8>(value, name); };
else if (which.isUInt16())
parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber<UInt16>(value, name); };
else if (which.isUInt32())
parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber<UInt32>(value, name); };
else if (which.isUInt64())
parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber<UInt64>(value, name); };
else if (which.isInt8())
parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber<Int8>(value, name); };
else if (which.isInt16())
parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber<Int16>(value, name); };
else if (which.isInt32())
parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber<Int32>(value, name); };
else if (which.isInt64())
parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber<Int64>(value, name); };
else if (which.isFloat32())
parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber<Float32>(value, name); };
else if (which.isFloat64())
parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber<Float64>(value, name); };
else if (which.isString() || which.isFixedString())
parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field
{
if (value.type() == Poco::MongoDB::ElementTraits<ObjectId::Ptr>::TypeId)
{
String string_id = value.toString();
return Field(string_id.data(), string_id.size());
}
else if (value.type() == Poco::MongoDB::ElementTraits<String>::TypeId)
{
String string = static_cast<const Poco::MongoDB::ConcreteElement<String> &>(value).value();
return Field(string.data(), string.size());
}
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected String, got type id = {} for column {}",
toString(value.type()), name);
};
else if (which.isDate())
parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field
{
if (value.type() != Poco::MongoDB::ElementTraits<Poco::Timestamp>::TypeId)
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected Timestamp, got type id = {} for column {}",
toString(value.type()), name);
return static_cast<UInt16>(DateLUT::instance().toDayNum(
static_cast<const Poco::MongoDB::ConcreteElement<Poco::Timestamp> &>(value).value().epochTime()));
};
else if (which.isDateTime())
parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field
{
if (value.type() != Poco::MongoDB::ElementTraits<Poco::Timestamp>::TypeId)
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected Timestamp, got type id = {} for column {}",
toString(value.type()), name);
return static_cast<UInt32>(static_cast<const Poco::MongoDB::ConcreteElement<Poco::Timestamp> &>(value).value().epochTime());
};
else if (which.isUUID())
parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field
{
if (value.type() == Poco::MongoDB::ElementTraits<String>::TypeId)
{
String string = static_cast<const Poco::MongoDB::ConcreteElement<String> &>(value).value();
return parse<UUID>(string);
}
else if (value.type() == Poco::MongoDB::ElementTraits<MongoUUID>::TypeId)
{
const Poco::UUID & poco_uuid = static_cast<const Poco::MongoDB::ConcreteElement<MongoUUID> &>(value).value()->uuid();
return parsePocoUUID(poco_uuid);
}
else
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected String/UUID, got type id = {} for column {}",
toString(value.type()), name);
};
else
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Type conversion to {} is not supported", nested->getName());
array_info[column_idx] = {count_dimensions, default_value, parser};
}
template <typename T>
void insertNumber(IColumn & column, const Poco::MongoDB::Element & value, const std::string & name)
{
switch (value.type())
{
case Poco::MongoDB::ElementTraits<Int32>::TypeId:
assert_cast<ColumnVector<T> &>(column).getData().push_back(
static_cast<const Poco::MongoDB::ConcreteElement<Int32> &>(value).value());
break;
case Poco::MongoDB::ElementTraits<Poco::Int64>::TypeId:
assert_cast<ColumnVector<T> &>(column).getData().push_back(
static_cast<T>(static_cast<const Poco::MongoDB::ConcreteElement<Poco::Int64> &>(value).value()));
break;
case Poco::MongoDB::ElementTraits<Float64>::TypeId:
assert_cast<ColumnVector<T> &>(column).getData().push_back(static_cast<T>(
static_cast<const Poco::MongoDB::ConcreteElement<Float64> &>(value).value()));
break;
case Poco::MongoDB::ElementTraits<bool>::TypeId:
assert_cast<ColumnVector<T> &>(column).getData().push_back(
static_cast<const Poco::MongoDB::ConcreteElement<bool> &>(value).value());
break;
case Poco::MongoDB::ElementTraits<Poco::MongoDB::NullValue>::TypeId:
assert_cast<ColumnVector<T> &>(column).getData().emplace_back();
break;
case Poco::MongoDB::ElementTraits<String>::TypeId:
assert_cast<ColumnVector<T> &>(column).getData().push_back(
parse<T>(static_cast<const Poco::MongoDB::ConcreteElement<String> &>(value).value()));
break;
default:
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected a number, got type id = {} for column {}",
toString(value.type()), name);
}
}
void insertValue(
IColumn & column,
const ValueType type,
const Poco::MongoDB::Element & value,
const std::string & name,
std::unordered_map<size_t, MongoDBPocoLegacyArrayInfo> & array_info,
size_t idx)
{
switch (type)
{
case ValueType::vtUInt8:
insertNumber<UInt8>(column, value, name);
break;
case ValueType::vtUInt16:
insertNumber<UInt16>(column, value, name);
break;
case ValueType::vtUInt32:
insertNumber<UInt32>(column, value, name);
break;
case ValueType::vtUInt64:
insertNumber<UInt64>(column, value, name);
break;
case ValueType::vtInt8:
insertNumber<Int8>(column, value, name);
break;
case ValueType::vtInt16:
insertNumber<Int16>(column, value, name);
break;
case ValueType::vtInt32:
insertNumber<Int32>(column, value, name);
break;
case ValueType::vtInt64:
insertNumber<Int64>(column, value, name);
break;
case ValueType::vtFloat32:
insertNumber<Float32>(column, value, name);
break;
case ValueType::vtFloat64:
insertNumber<Float64>(column, value, name);
break;
case ValueType::vtEnum8:
case ValueType::vtEnum16:
case ValueType::vtString:
{
if (value.type() == Poco::MongoDB::ElementTraits<ObjectId::Ptr>::TypeId)
{
std::string string_id = value.toString();
assert_cast<ColumnString &>(column).insertData(string_id.data(), string_id.size());
break;
}
else if (value.type() == Poco::MongoDB::ElementTraits<String>::TypeId)
{
String string = static_cast<const Poco::MongoDB::ConcreteElement<String> &>(value).value();
assert_cast<ColumnString &>(column).insertData(string.data(), string.size());
break;
}
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected String, got type id = {} for column {}",
toString(value.type()), name);
}
case ValueType::vtDate:
{
if (value.type() != Poco::MongoDB::ElementTraits<Poco::Timestamp>::TypeId)
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected Timestamp, got type id = {} for column {}",
toString(value.type()), name);
assert_cast<ColumnUInt16 &>(column).getData().push_back(static_cast<UInt16>(DateLUT::instance().toDayNum(
static_cast<const Poco::MongoDB::ConcreteElement<Poco::Timestamp> &>(value).value().epochTime())));
break;
}
case ValueType::vtDateTime:
{
if (value.type() != Poco::MongoDB::ElementTraits<Poco::Timestamp>::TypeId)
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected Timestamp, got type id = {} for column {}",
toString(value.type()), name);
assert_cast<ColumnUInt32 &>(column).getData().push_back(
static_cast<UInt32>(static_cast<const Poco::MongoDB::ConcreteElement<Poco::Timestamp> &>(value).value().epochTime()));
break;
}
case ValueType::vtUUID:
{
if (value.type() == Poco::MongoDB::ElementTraits<String>::TypeId)
{
String string = static_cast<const Poco::MongoDB::ConcreteElement<String> &>(value).value();
assert_cast<ColumnUUID &>(column).getData().push_back(parse<UUID>(string));
}
else if (value.type() == Poco::MongoDB::ElementTraits<MongoUUID>::TypeId)
{
const Poco::UUID & poco_uuid = static_cast<const Poco::MongoDB::ConcreteElement<MongoUUID> &>(value).value()->uuid();
UUID uuid = parsePocoUUID(poco_uuid);
assert_cast<ColumnUUID &>(column).getData().push_back(uuid);
}
else
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected String/UUID, got type id = {} for column {}",
toString(value.type()), name);
break;
}
case ValueType::vtArray:
{
if (value.type() != Poco::MongoDB::ElementTraits<MongoArray::Ptr>::TypeId)
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected Array, got type id = {} for column {}",
toString(value.type()), name);
size_t expected_dimensions = array_info[idx].num_dimensions;
const auto parse_value = array_info[idx].parser;
std::vector<Row> dimensions(expected_dimensions + 1);
auto array = static_cast<const Poco::MongoDB::ConcreteElement<MongoArray::Ptr> &>(value).value();
std::vector<std::pair<const Poco::MongoDB::Element *, size_t>> arrays;
arrays.emplace_back(&value, 0);
while (!arrays.empty())
{
size_t dimension_idx = arrays.size() - 1;
if (dimension_idx + 1 > expected_dimensions)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Got more dimensions than expected");
auto [parent_ptr, child_idx] = arrays.back();
auto parent = static_cast<const Poco::MongoDB::ConcreteElement<MongoArray::Ptr> &>(*parent_ptr).value();
if (child_idx >= parent->size())
{
arrays.pop_back();
if (dimension_idx == 0)
break;
dimensions[dimension_idx].emplace_back(Array(dimensions[dimension_idx + 1].begin(), dimensions[dimension_idx + 1].end()));
dimensions[dimension_idx + 1].clear();
continue;
}
Poco::MongoDB::Element::Ptr child = parent->get(static_cast<int>(child_idx));
arrays.back().second += 1;
if (child->type() == Poco::MongoDB::ElementTraits<MongoArray::Ptr>::TypeId)
{
arrays.emplace_back(child.get(), 0);
}
else if (child->type() == Poco::MongoDB::ElementTraits<Poco::MongoDB::NullValue>::TypeId)
{
if (dimension_idx + 1 == expected_dimensions)
dimensions[dimension_idx + 1].emplace_back(array_info[idx].default_value);
else
dimensions[dimension_idx + 1].emplace_back(Array());
}
else if (dimension_idx + 1 == expected_dimensions)
{
dimensions[dimension_idx + 1].emplace_back(parse_value(*child, name));
}
else
{
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Got less dimensions than expected. ({} instead of {})", dimension_idx + 1, expected_dimensions);
}
}
assert_cast<ColumnArray &>(column).insert(Array(dimensions[1].begin(), dimensions[1].end()));
break;
}
default:
throw Exception(ErrorCodes::UNKNOWN_TYPE, "Value of unsupported type: {}", column.getName());
}
}
void insertDefaultValue(IColumn & column, const IColumn & sample_column) { column.insertFrom(sample_column, 0); }
}
bool isMongoDBWireProtocolOld(Poco::MongoDB::Connection & connection_, const std::string & database_name_)
{
Poco::MongoDB::Database db(database_name_);
Poco::MongoDB::Document::Ptr doc = db.queryServerHello(connection_, false);
if (doc->exists("maxWireVersion"))
{
auto wire_version = doc->getInteger("maxWireVersion");
return wire_version < Poco::MongoDB::Database::WireVersion::VER_36;
}
doc = db.queryServerHello(connection_, true);
if (doc->exists("maxWireVersion"))
{
auto wire_version = doc->getInteger("maxWireVersion");
return wire_version < Poco::MongoDB::Database::WireVersion::VER_36;
}
return true;
}
MongoDBPocoLegacyCursor::MongoDBPocoLegacyCursor(
const std::string & database,
const std::string & collection,
const Block & sample_block_to_select,
const Poco::MongoDB::Document & query,
Poco::MongoDB::Connection & connection)
: is_wire_protocol_old(isMongoDBWireProtocolOld(connection, database))
{
Poco::MongoDB::Document projection;
/// Looks like selecting _id column is implicit by default.
if (!sample_block_to_select.has("_id"))
projection.add("_id", 0);
for (const auto & column : sample_block_to_select)
projection.add(column.name, 1);
if (is_wire_protocol_old)
{
old_cursor = std::make_unique<Poco::MongoDB::Cursor>(database, collection);
old_cursor->query().selector() = query;
old_cursor->query().returnFieldSelector() = projection;
}
else
{
new_cursor = std::make_unique<Poco::MongoDB::OpMsgCursor>(database, collection);
new_cursor->query().setCommandName(Poco::MongoDB::OpMsgMessage::CMD_FIND);
new_cursor->query().body().addNewDocument("filter") = query;
new_cursor->query().body().addNewDocument("projection") = projection;
}
}
Poco::MongoDB::Document::Vector MongoDBPocoLegacyCursor::nextDocuments(Poco::MongoDB::Connection & connection)
{
if (is_wire_protocol_old)
{
auto response = old_cursor->next(connection);
cursor_id = response.cursorID();
return std::move(response.documents());
}
else
{
auto response = new_cursor->next(connection);
cursor_id = new_cursor->cursorID();
return std::move(response.documents());
}
}
Int64 MongoDBPocoLegacyCursor::cursorID() const
{
return cursor_id;
}
MongoDBPocoLegacySource::MongoDBPocoLegacySource(
std::shared_ptr<Poco::MongoDB::Connection> & connection_,
const String & database_name_,
const String & collection_name_,
const Poco::MongoDB::Document & query_,
const Block & sample_block,
UInt64 max_block_size_)
: ISource(sample_block.cloneEmpty())
, connection(connection_)
, cursor(database_name_, collection_name_, sample_block, query_, *connection_)
, max_block_size{max_block_size_}
{
description.init(sample_block);
for (const auto idx : collections::range(0, description.sample_block.columns()))
if (description.types[idx].first == ExternalResultDescription::ValueType::vtArray)
prepareMongoDBArrayInfo(array_info, idx, description.sample_block.getByPosition(idx).type);
}
MongoDBPocoLegacySource::~MongoDBPocoLegacySource() = default;
Chunk MongoDBPocoLegacySource::generate()
{
if (all_read)
return {};
MutableColumns columns(description.sample_block.columns());
const size_t size = columns.size();
for (const auto i : collections::range(0, size))
columns[i] = description.sample_block.getByPosition(i).column->cloneEmpty();
size_t num_rows = 0;
while (num_rows < max_block_size)
{
auto documents = cursor.nextDocuments(*connection);
for (auto & document : documents)
{
if (document->exists("ok") && document->exists("$err")
&& document->exists("code") && document->getInteger("ok") == 0)
{
auto code = document->getInteger("code");
const Poco::MongoDB::Element::Ptr value = document->get("$err");
auto message = static_cast<const Poco::MongoDB::ConcreteElement<String> &>(*value).value();
throw Exception(ErrorCodes::MONGODB_ERROR, "Got error from MongoDB: {}, code: {}", message, code);
}
++num_rows;
for (const auto idx : collections::range(0, size))
{
const auto & name = description.sample_block.getByPosition(idx).name;
bool exists_in_current_document = document->exists(name);
if (!exists_in_current_document)
{
insertDefaultValue(*columns[idx], *description.sample_block.getByPosition(idx).column);
continue;
}
const Poco::MongoDB::Element::Ptr value = document->get(name);
if (value.isNull() || value->type() == Poco::MongoDB::ElementTraits<Poco::MongoDB::NullValue>::TypeId)
{
insertDefaultValue(*columns[idx], *description.sample_block.getByPosition(idx).column);
}
else
{
bool is_nullable = description.types[idx].second;
if (is_nullable)
{
ColumnNullable & column_nullable = assert_cast<ColumnNullable &>(*columns[idx]);
insertValue(column_nullable.getNestedColumn(), description.types[idx].first, *value, name, array_info, idx);
column_nullable.getNullMapData().emplace_back(0);
}
else
insertValue(*columns[idx], description.types[idx].first, *value, name, array_info, idx);
}
}
}
if (cursor.cursorID() == 0)
{
all_read = true;
break;
}
}
if (num_rows == 0)
return {};
return Chunk(std::move(columns), num_rows);
}
}
#endif

View File

@ -1,92 +0,0 @@
#pragma once
#include "config.h"
#if USE_MONGODB
#include <Poco/MongoDB/Element.h>
#include <Poco/MongoDB/Array.h>
#include <Core/Block.h>
#include <Processors/ISource.h>
#include <Core/ExternalResultDescription.h>
#include <Core/Field.h>
namespace Poco
{
namespace MongoDB
{
class Connection;
class Document;
class Cursor;
class OpMsgCursor;
}
}
namespace DB
{
struct MongoDBPocoLegacyArrayInfo
{
size_t num_dimensions;
Field default_value;
std::function<Field(const Poco::MongoDB::Element & value, const std::string & name)> parser;
};
void authenticate(Poco::MongoDB::Connection & connection, const std::string & database, const std::string & user, const std::string & password);
bool isMongoDBWireProtocolOld(Poco::MongoDB::Connection & connection_, const std::string & database_name_);
/// Deprecated, will be removed soon.
class MongoDBPocoLegacyCursor
{
public:
MongoDBPocoLegacyCursor(
const std::string & database,
const std::string & collection,
const Block & sample_block_to_select,
const Poco::MongoDB::Document & query,
Poco::MongoDB::Connection & connection);
Poco::MongoDB::Document::Vector nextDocuments(Poco::MongoDB::Connection & connection);
Int64 cursorID() const;
private:
const bool is_wire_protocol_old;
std::unique_ptr<Poco::MongoDB::Cursor> old_cursor;
std::unique_ptr<Poco::MongoDB::OpMsgCursor> new_cursor;
Int64 cursor_id = 0;
};
/// Converts MongoDB Cursor to a stream of Blocks. Deprecated, will be removed soon.
class MongoDBPocoLegacySource final : public ISource
{
public:
MongoDBPocoLegacySource(
std::shared_ptr<Poco::MongoDB::Connection> & connection_,
const String & database_name_,
const String & collection_name_,
const Poco::MongoDB::Document & query_,
const Block & sample_block,
UInt64 max_block_size_);
~MongoDBPocoLegacySource() override;
String getName() const override { return "MongoDB"; }
private:
Chunk generate() override;
std::shared_ptr<Poco::MongoDB::Connection> connection;
MongoDBPocoLegacyCursor cursor;
const UInt64 max_block_size;
ExternalResultDescription description;
bool all_read = false;
std::unordered_map<size_t, MongoDBPocoLegacyArrayInfo> array_info;
};
}
#endif

View File

@ -1,190 +1,501 @@
#include "config.h"
#if USE_MONGODB
#include "MongoDBSource.h"
#include <string>
#include <vector>
#include <Poco/MongoDB/Array.h>
#include <Poco/MongoDB/Binary.h>
#include <Poco/MongoDB/Database.h>
#include <Poco/MongoDB/Connection.h>
#include <Poco/MongoDB/Cursor.h>
#include <Poco/MongoDB/OpMsgCursor.h>
#include <Poco/MongoDB/ObjectId.h>
#include <Columns/ColumnArray.h>
#include <Columns/ColumnNullable.h>
#include <Columns/ColumnDecimal.h>
#include <Columns/ColumnString.h>
#include <DataTypes/DataTypeNullable.h>
#include <DataTypes/DataTypeArray.h>
#include <Columns/ColumnsNumber.h>
#include <IO/ReadHelpers.h>
#include <Formats/FormatFactory.h>
#include <Common/assert_cast.h>
#include <Common/Exception.h>
#include <Common/BSONCXXHelper.h>
#include <Common/quoteString.h>
#include "base/types.h"
#include <base/range.h>
#include <Poco/URI.h>
#include <DataTypes/DataTypeArray.h>
#include <DataTypes/DataTypeNullable.h>
// only after poco
// naming conflict:
// Poco/MongoDB/BSONWriter.h:54: void writeCString(const std::string & value);
// src/IO/WriteHelpers.h:146 #define writeCString(s, buf)
#include <IO/WriteHelpers.h>
namespace DB
{
namespace ErrorCodes
{
extern const int TYPE_MISMATCH;
extern const int NOT_IMPLEMENTED;
extern const int TYPE_MISMATCH;
extern const int UNKNOWN_TYPE;
extern const int MONGODB_ERROR;
extern const int BAD_ARGUMENTS;
}
using BSONCXXHelper::BSONElementAsNumber;
using BSONCXXHelper::BSONArrayAsArray;
using BSONCXXHelper::BSONElementAsString;
void MongoDBSource::insertDefaultValue(IColumn & column, const IColumn & sample_column) { column.insertFrom(sample_column, 0); }
void MongoDBSource::insertValue(IColumn & column, const size_t & idx, const DataTypePtr & type, const std::string & name, const bsoncxx::document::element & value)
namespace
{
switch (type->getTypeId())
using ValueType = ExternalResultDescription::ValueType;
using ObjectId = Poco::MongoDB::ObjectId;
using MongoArray = Poco::MongoDB::Array;
using MongoUUID = Poco::MongoDB::Binary::Ptr;
UUID parsePocoUUID(const Poco::UUID & src)
{
case TypeIndex::Int8:
assert_cast<ColumnInt8 &>(column).insertValue(BSONElementAsNumber<Int8, bsoncxx::document::element>(value, name));
break;
case TypeIndex::UInt8:
assert_cast<ColumnUInt8 &>(column).insertValue(BSONElementAsNumber<UInt8, bsoncxx::document::element>(value, name));
break;
case TypeIndex::Int16:
assert_cast<ColumnInt16 &>(column).insertValue(BSONElementAsNumber<Int16, bsoncxx::document::element>(value, name));
break;
case TypeIndex::UInt16:
assert_cast<ColumnUInt16 &>(column).insertValue(BSONElementAsNumber<UInt16, bsoncxx::document::element>(value, name));
break;
case TypeIndex::Int32:
assert_cast<ColumnInt32 &>(column).insertValue(BSONElementAsNumber<Int32, bsoncxx::document::element>(value, name));
break;
case TypeIndex::UInt32:
assert_cast<ColumnUInt32 &>(column).insertValue(BSONElementAsNumber<UInt32, bsoncxx::document::element>(value, name));
break;
case TypeIndex::Int64:
assert_cast<ColumnInt64 &>(column).insertValue(BSONElementAsNumber<Int64, bsoncxx::document::element>(value, name));
break;
case TypeIndex::UInt64:
assert_cast<ColumnUInt64 &>(column).insertValue(BSONElementAsNumber<UInt64, bsoncxx::document::element>(value, name));
break;
case TypeIndex::Int128:
assert_cast<ColumnInt128 &>(column).insertValue(BSONElementAsNumber<Int128, bsoncxx::document::element>(value, name));
break;
case TypeIndex::UInt128:
assert_cast<ColumnUInt128 &>(column).insertValue(BSONElementAsNumber<UInt128, bsoncxx::document::element>(value, name));
break;
case TypeIndex::Int256:
assert_cast<ColumnInt256 &>(column).insertValue(BSONElementAsNumber<Int256, bsoncxx::document::element>(value, name));
break;
case TypeIndex::UInt256:
assert_cast<ColumnUInt256 &>(column).insertValue(BSONElementAsNumber<UInt256, bsoncxx::document::element>(value, name));
break;
case TypeIndex::Float32:
assert_cast<ColumnFloat32 &>(column).insertValue(BSONElementAsNumber<Float32, bsoncxx::document::element>(value, name));
break;
case TypeIndex::Float64:
assert_cast<ColumnFloat64 &>(column).insertValue(BSONElementAsNumber<Float64, bsoncxx::document::element>(value, name));
break;
case TypeIndex::Date:
{
if (value.type() != bsoncxx::type::k_date)
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected date, got {} for column {}",
bsoncxx::to_string(value.type()), name);
UUID uuid;
assert_cast<ColumnUInt16 &>(column).insertValue(DateLUT::instance().toDayNum(value.get_date().to_int64() / 1000).toUnderType());
break;
}
case TypeIndex::Date32:
{
if (value.type() != bsoncxx::type::k_date)
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected date, got {} for column {}",
bsoncxx::to_string(value.type()), name);
std::array<Poco::UInt8, 6> src_node = src.getNode();
UInt64 node = 0;
node |= UInt64(src_node[0]) << 40;
node |= UInt64(src_node[1]) << 32;
node |= UInt64(src_node[2]) << 24;
node |= UInt64(src_node[3]) << 16;
node |= UInt64(src_node[4]) << 8;
node |= src_node[5];
assert_cast<ColumnInt32 &>(column).insertValue(DateLUT::instance().toDayNum(value.get_date().to_int64() / 1000).toUnderType());
break;
}
case TypeIndex::DateTime:
{
if (value.type() != bsoncxx::type::k_date)
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected date, got {} for column {}",
bsoncxx::to_string(value.type()), name);
UUIDHelpers::getHighBytes(uuid) = UInt64(src.getTimeLow()) << 32 | UInt32(src.getTimeMid() << 16 | src.getTimeHiAndVersion());
UUIDHelpers::getLowBytes(uuid) = UInt64(src.getClockSeq()) << 48 | node;
assert_cast<ColumnUInt32 &>(column).insertValue(static_cast<UInt32>(value.get_date().to_int64() / 1000));
break;
}
case TypeIndex::DateTime64:
{
if (value.type() != bsoncxx::type::k_date)
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected date, got {} for column {}",
bsoncxx::to_string(value.type()), name);
assert_cast<ColumnDecimal<DateTime64> &>(column).insertValue(value.get_date().to_int64());
break;
}
case TypeIndex::UUID:
{
if (value.type() != bsoncxx::type::k_string)
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected string (UUID), got {} for column {}",
bsoncxx::to_string(value.type()), name);
assert_cast<ColumnUUID &>(column).insertValue(parse<UUID>(value.get_string().value.data()));
break;
}
case TypeIndex::String:
{
assert_cast<ColumnString &>(column).insert(BSONElementAsString(value, json_format_settings));
break;
}
case TypeIndex::Array:
{
if (value.type() != bsoncxx::type::k_array)
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected array, got {} for column {}",
bsoncxx::to_string(value.type()), name);
assert_cast<ColumnArray &>(column).insert(BSONArrayAsArray(arrays_info[idx].first, value.get_array(), arrays_info[idx].second.first, arrays_info[idx].second.second, name, json_format_settings));
break;
}
default:
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Column {} has unsupported type {}", name, type->getName());
return uuid;
}
template <typename T>
Field getNumber(const Poco::MongoDB::Element & value, const std::string & name)
{
switch (value.type())
{
case Poco::MongoDB::ElementTraits<Int32>::TypeId:
return static_cast<T>(static_cast<const Poco::MongoDB::ConcreteElement<Int32> &>(value).value());
case Poco::MongoDB::ElementTraits<Poco::Int64>::TypeId:
return static_cast<T>(static_cast<const Poco::MongoDB::ConcreteElement<Poco::Int64> &>(value).value());
case Poco::MongoDB::ElementTraits<Float64>::TypeId:
return static_cast<T>(static_cast<const Poco::MongoDB::ConcreteElement<Float64> &>(value).value());
case Poco::MongoDB::ElementTraits<bool>::TypeId:
return static_cast<T>(static_cast<const Poco::MongoDB::ConcreteElement<bool> &>(value).value());
case Poco::MongoDB::ElementTraits<Poco::MongoDB::NullValue>::TypeId:
return Field();
case Poco::MongoDB::ElementTraits<String>::TypeId:
return parse<T>(static_cast<const Poco::MongoDB::ConcreteElement<String> &>(value).value());
default:
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected a number, got type id = {} for column {}",
toString(value.type()), name);
}
}
void prepareMongoDBArrayInfo(
std::unordered_map<size_t, MongoDBArrayInfo> & array_info, size_t column_idx, const DataTypePtr data_type)
{
const auto * array_type = assert_cast<const DataTypeArray *>(data_type.get());
auto nested = array_type->getNestedType();
size_t count_dimensions = 1;
while (isArray(nested))
{
++count_dimensions;
nested = assert_cast<const DataTypeArray *>(nested.get())->getNestedType();
}
Field default_value = nested->getDefault();
if (nested->isNullable())
nested = assert_cast<const DataTypeNullable *>(nested.get())->getNestedType();
WhichDataType which(nested);
std::function<Field(const Poco::MongoDB::Element & value, const std::string & name)> parser;
if (which.isUInt8())
parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber<UInt8>(value, name); };
else if (which.isUInt16())
parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber<UInt16>(value, name); };
else if (which.isUInt32())
parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber<UInt32>(value, name); };
else if (which.isUInt64())
parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber<UInt64>(value, name); };
else if (which.isInt8())
parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber<Int8>(value, name); };
else if (which.isInt16())
parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber<Int16>(value, name); };
else if (which.isInt32())
parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber<Int32>(value, name); };
else if (which.isInt64())
parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber<Int64>(value, name); };
else if (which.isFloat32())
parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber<Float32>(value, name); };
else if (which.isFloat64())
parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber<Float64>(value, name); };
else if (which.isString() || which.isFixedString())
parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field
{
if (value.type() == Poco::MongoDB::ElementTraits<ObjectId::Ptr>::TypeId)
{
String string_id = value.toString();
return Field(string_id.data(), string_id.size());
}
else if (value.type() == Poco::MongoDB::ElementTraits<String>::TypeId)
{
String string = static_cast<const Poco::MongoDB::ConcreteElement<String> &>(value).value();
return Field(string.data(), string.size());
}
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected String, got type id = {} for column {}",
toString(value.type()), name);
};
else if (which.isDate())
parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field
{
if (value.type() != Poco::MongoDB::ElementTraits<Poco::Timestamp>::TypeId)
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected Timestamp, got type id = {} for column {}",
toString(value.type()), name);
return static_cast<UInt16>(DateLUT::instance().toDayNum(
static_cast<const Poco::MongoDB::ConcreteElement<Poco::Timestamp> &>(value).value().epochTime()));
};
else if (which.isDateTime())
parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field
{
if (value.type() != Poco::MongoDB::ElementTraits<Poco::Timestamp>::TypeId)
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected Timestamp, got type id = {} for column {}",
toString(value.type()), name);
return static_cast<UInt32>(static_cast<const Poco::MongoDB::ConcreteElement<Poco::Timestamp> &>(value).value().epochTime());
};
else if (which.isUUID())
parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field
{
if (value.type() == Poco::MongoDB::ElementTraits<String>::TypeId)
{
String string = static_cast<const Poco::MongoDB::ConcreteElement<String> &>(value).value();
return parse<UUID>(string);
}
else if (value.type() == Poco::MongoDB::ElementTraits<MongoUUID>::TypeId)
{
const Poco::UUID & poco_uuid = static_cast<const Poco::MongoDB::ConcreteElement<MongoUUID> &>(value).value()->uuid();
return parsePocoUUID(poco_uuid);
}
else
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected String/UUID, got type id = {} for column {}",
toString(value.type()), name);
};
else
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Type conversion to {} is not supported", nested->getName());
array_info[column_idx] = {count_dimensions, default_value, parser};
}
template <typename T>
void insertNumber(IColumn & column, const Poco::MongoDB::Element & value, const std::string & name)
{
switch (value.type())
{
case Poco::MongoDB::ElementTraits<Int32>::TypeId:
assert_cast<ColumnVector<T> &>(column).getData().push_back(
static_cast<const Poco::MongoDB::ConcreteElement<Int32> &>(value).value());
break;
case Poco::MongoDB::ElementTraits<Poco::Int64>::TypeId:
assert_cast<ColumnVector<T> &>(column).getData().push_back(
static_cast<T>(static_cast<const Poco::MongoDB::ConcreteElement<Poco::Int64> &>(value).value()));
break;
case Poco::MongoDB::ElementTraits<Float64>::TypeId:
assert_cast<ColumnVector<T> &>(column).getData().push_back(static_cast<T>(
static_cast<const Poco::MongoDB::ConcreteElement<Float64> &>(value).value()));
break;
case Poco::MongoDB::ElementTraits<bool>::TypeId:
assert_cast<ColumnVector<T> &>(column).getData().push_back(
static_cast<const Poco::MongoDB::ConcreteElement<bool> &>(value).value());
break;
case Poco::MongoDB::ElementTraits<Poco::MongoDB::NullValue>::TypeId:
assert_cast<ColumnVector<T> &>(column).getData().emplace_back();
break;
case Poco::MongoDB::ElementTraits<String>::TypeId:
assert_cast<ColumnVector<T> &>(column).getData().push_back(
parse<T>(static_cast<const Poco::MongoDB::ConcreteElement<String> &>(value).value()));
break;
default:
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected a number, got type id = {} for column {}",
toString(value.type()), name);
}
}
void insertValue(
IColumn & column,
const ValueType type,
const Poco::MongoDB::Element & value,
const std::string & name,
std::unordered_map<size_t, MongoDBArrayInfo> & array_info,
size_t idx)
{
switch (type)
{
case ValueType::vtUInt8:
insertNumber<UInt8>(column, value, name);
break;
case ValueType::vtUInt16:
insertNumber<UInt16>(column, value, name);
break;
case ValueType::vtUInt32:
insertNumber<UInt32>(column, value, name);
break;
case ValueType::vtUInt64:
insertNumber<UInt64>(column, value, name);
break;
case ValueType::vtInt8:
insertNumber<Int8>(column, value, name);
break;
case ValueType::vtInt16:
insertNumber<Int16>(column, value, name);
break;
case ValueType::vtInt32:
insertNumber<Int32>(column, value, name);
break;
case ValueType::vtInt64:
insertNumber<Int64>(column, value, name);
break;
case ValueType::vtFloat32:
insertNumber<Float32>(column, value, name);
break;
case ValueType::vtFloat64:
insertNumber<Float64>(column, value, name);
break;
case ValueType::vtEnum8:
case ValueType::vtEnum16:
case ValueType::vtString:
{
if (value.type() == Poco::MongoDB::ElementTraits<ObjectId::Ptr>::TypeId)
{
std::string string_id = value.toString();
assert_cast<ColumnString &>(column).insertData(string_id.data(), string_id.size());
break;
}
else if (value.type() == Poco::MongoDB::ElementTraits<String>::TypeId)
{
String string = static_cast<const Poco::MongoDB::ConcreteElement<String> &>(value).value();
assert_cast<ColumnString &>(column).insertData(string.data(), string.size());
break;
}
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected String, got type id = {} for column {}",
toString(value.type()), name);
}
case ValueType::vtDate:
{
if (value.type() != Poco::MongoDB::ElementTraits<Poco::Timestamp>::TypeId)
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected Timestamp, got type id = {} for column {}",
toString(value.type()), name);
assert_cast<ColumnUInt16 &>(column).getData().push_back(static_cast<UInt16>(DateLUT::instance().toDayNum(
static_cast<const Poco::MongoDB::ConcreteElement<Poco::Timestamp> &>(value).value().epochTime())));
break;
}
case ValueType::vtDateTime:
{
if (value.type() != Poco::MongoDB::ElementTraits<Poco::Timestamp>::TypeId)
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected Timestamp, got type id = {} for column {}",
toString(value.type()), name);
assert_cast<ColumnUInt32 &>(column).getData().push_back(
static_cast<UInt32>(static_cast<const Poco::MongoDB::ConcreteElement<Poco::Timestamp> &>(value).value().epochTime()));
break;
}
case ValueType::vtUUID:
{
if (value.type() == Poco::MongoDB::ElementTraits<String>::TypeId)
{
String string = static_cast<const Poco::MongoDB::ConcreteElement<String> &>(value).value();
assert_cast<ColumnUUID &>(column).getData().push_back(parse<UUID>(string));
}
else if (value.type() == Poco::MongoDB::ElementTraits<MongoUUID>::TypeId)
{
const Poco::UUID & poco_uuid = static_cast<const Poco::MongoDB::ConcreteElement<MongoUUID> &>(value).value()->uuid();
UUID uuid = parsePocoUUID(poco_uuid);
assert_cast<ColumnUUID &>(column).getData().push_back(uuid);
}
else
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected String/UUID, got type id = {} for column {}",
toString(value.type()), name);
break;
}
case ValueType::vtArray:
{
if (value.type() != Poco::MongoDB::ElementTraits<MongoArray::Ptr>::TypeId)
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected Array, got type id = {} for column {}",
toString(value.type()), name);
size_t expected_dimensions = array_info[idx].num_dimensions;
const auto parse_value = array_info[idx].parser;
std::vector<Row> dimensions(expected_dimensions + 1);
auto array = static_cast<const Poco::MongoDB::ConcreteElement<MongoArray::Ptr> &>(value).value();
std::vector<std::pair<const Poco::MongoDB::Element *, size_t>> arrays;
arrays.emplace_back(&value, 0);
while (!arrays.empty())
{
size_t dimension_idx = arrays.size() - 1;
if (dimension_idx + 1 > expected_dimensions)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Got more dimensions than expected");
auto [parent_ptr, child_idx] = arrays.back();
auto parent = static_cast<const Poco::MongoDB::ConcreteElement<MongoArray::Ptr> &>(*parent_ptr).value();
if (child_idx >= parent->size())
{
arrays.pop_back();
if (dimension_idx == 0)
break;
dimensions[dimension_idx].emplace_back(Array(dimensions[dimension_idx + 1].begin(), dimensions[dimension_idx + 1].end()));
dimensions[dimension_idx + 1].clear();
continue;
}
Poco::MongoDB::Element::Ptr child = parent->get(static_cast<int>(child_idx));
arrays.back().second += 1;
if (child->type() == Poco::MongoDB::ElementTraits<MongoArray::Ptr>::TypeId)
{
arrays.emplace_back(child.get(), 0);
}
else if (child->type() == Poco::MongoDB::ElementTraits<Poco::MongoDB::NullValue>::TypeId)
{
if (dimension_idx + 1 == expected_dimensions)
dimensions[dimension_idx + 1].emplace_back(array_info[idx].default_value);
else
dimensions[dimension_idx + 1].emplace_back(Array());
}
else if (dimension_idx + 1 == expected_dimensions)
{
dimensions[dimension_idx + 1].emplace_back(parse_value(*child, name));
}
else
{
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Got less dimensions than expected. ({} instead of {})", dimension_idx + 1, expected_dimensions);
}
}
assert_cast<ColumnArray &>(column).insert(Array(dimensions[1].begin(), dimensions[1].end()));
break;
}
default:
throw Exception(ErrorCodes::UNKNOWN_TYPE, "Value of unsupported type: {}", column.getName());
}
}
void insertDefaultValue(IColumn & column, const IColumn & sample_column) { column.insertFrom(sample_column, 0); }
}
bool isMongoDBWireProtocolOld(Poco::MongoDB::Connection & connection_, const std::string & database_name_)
{
Poco::MongoDB::Database db(database_name_);
Poco::MongoDB::Document::Ptr doc = db.queryServerHello(connection_, false);
if (doc->exists("maxWireVersion"))
{
auto wire_version = doc->getInteger("maxWireVersion");
return wire_version < Poco::MongoDB::Database::WireVersion::VER_36;
}
doc = db.queryServerHello(connection_, true);
if (doc->exists("maxWireVersion"))
{
auto wire_version = doc->getInteger("maxWireVersion");
return wire_version < Poco::MongoDB::Database::WireVersion::VER_36;
}
return true;
}
MongoDBCursor::MongoDBCursor(
const std::string & database,
const std::string & collection,
const Block & sample_block_to_select,
const Poco::MongoDB::Document & query,
Poco::MongoDB::Connection & connection)
: is_wire_protocol_old(isMongoDBWireProtocolOld(connection, database))
{
Poco::MongoDB::Document projection;
/// Looks like selecting _id column is implicit by default.
if (!sample_block_to_select.has("_id"))
projection.add("_id", 0);
for (const auto & column : sample_block_to_select)
projection.add(column.name, 1);
if (is_wire_protocol_old)
{
old_cursor = std::make_unique<Poco::MongoDB::Cursor>(database, collection);
old_cursor->query().selector() = query;
old_cursor->query().returnFieldSelector() = projection;
}
else
{
new_cursor = std::make_unique<Poco::MongoDB::OpMsgCursor>(database, collection);
new_cursor->query().setCommandName(Poco::MongoDB::OpMsgMessage::CMD_FIND);
new_cursor->query().body().addNewDocument("filter") = query;
new_cursor->query().body().addNewDocument("projection") = projection;
}
}
Poco::MongoDB::Document::Vector MongoDBCursor::nextDocuments(Poco::MongoDB::Connection & connection)
{
if (is_wire_protocol_old)
{
auto response = old_cursor->next(connection);
cursor_id = response.cursorID();
return std::move(response.documents());
}
else
{
auto response = new_cursor->next(connection);
cursor_id = new_cursor->cursorID();
return std::move(response.documents());
}
}
Int64 MongoDBCursor::cursorID() const
{
return cursor_id;
}
MongoDBSource::MongoDBSource(
const mongocxx::uri & uri,
const std::string & collection_name,
const bsoncxx::document::view_or_value & query,
const mongocxx::options::find & options,
const Block & sample_block_,
const UInt64 & max_block_size_)
: ISource{sample_block_}
, client{uri}
, database{client.database(uri.database())}
, collection{database.collection(collection_name)}
, cursor{collection.find(query, options)}
, sample_block{sample_block_}
std::shared_ptr<Poco::MongoDB::Connection> & connection_,
const String & database_name_,
const String & collection_name_,
const Poco::MongoDB::Document & query_,
const Block & sample_block,
UInt64 max_block_size_)
: ISource(sample_block.cloneEmpty())
, connection(connection_)
, cursor(database_name_, collection_name_, sample_block, query_, *connection_)
, max_block_size{max_block_size_}
{
for (const auto & idx : collections::range(0, sample_block.columns()))
{
auto & sample_column = sample_block.getByPosition(idx);
description.init(sample_block);
/// If default value for column was not provided, use default from data type.
if (sample_column.column->empty())
sample_column.column = sample_column.type->createColumnConstWithDefaultValue(1)->convertToFullColumnIfConst();
if (sample_column.type->getTypeId() == TypeIndex::Array)
{
auto type = assert_cast<const DataTypeArray &>(*sample_column.type).getNestedType();
size_t dimensions = 0;
while (type->getTypeId() == TypeIndex::Array)
{
type = assert_cast<const DataTypeArray &>(*type).getNestedType();
++dimensions;
}
if (type->isNullable())
{
type = assert_cast<const DataTypeNullable &>(*type).getNestedType();
arrays_info[idx] = {std::move(dimensions), {std::move(type), Null()}};
}
else
arrays_info[idx] = {std::move(dimensions), {std::move(type), type->getDefault()}};
}
}
for (const auto idx : collections::range(0, description.sample_block.columns()))
if (description.types[idx].first == ExternalResultDescription::ValueType::vtArray)
prepareMongoDBArrayInfo(array_info, idx, description.sample_block.getByPosition(idx).type);
}
@ -195,45 +506,72 @@ Chunk MongoDBSource::generate()
if (all_read)
return {};
auto columns = sample_block.cloneEmptyColumns();
size_t size = columns.size();
MutableColumns columns(description.sample_block.columns());
const size_t size = columns.size();
for (const auto i : collections::range(0, size))
columns[i] = description.sample_block.getByPosition(i).column->cloneEmpty();
size_t num_rows = 0;
for (const auto & doc : cursor)
while (num_rows < max_block_size)
{
for (auto idx : collections::range(0, size))
auto documents = cursor.nextDocuments(*connection);
for (auto & document : documents)
{
auto & sample_column = sample_block.getByPosition(idx);
auto value = doc[sample_column.name];
if (value && value.type() != bsoncxx::type::k_null)
if (document->exists("ok") && document->exists("$err")
&& document->exists("code") && document->getInteger("ok") == 0)
{
if (sample_column.type->isNullable())
{
auto & column_nullable = assert_cast<ColumnNullable &>(*columns[idx]);
const auto & type_nullable = assert_cast<const DataTypeNullable &>(*sample_column.type);
auto code = document->getInteger("code");
const Poco::MongoDB::Element::Ptr value = document->get("$err");
auto message = static_cast<const Poco::MongoDB::ConcreteElement<String> &>(*value).value();
throw Exception(ErrorCodes::MONGODB_ERROR, "Got error from MongoDB: {}, code: {}", message, code);
}
++num_rows;
insertValue(column_nullable.getNestedColumn(), idx, type_nullable.getNestedType(), sample_column.name, value);
column_nullable.getNullMapData().emplace_back(0);
for (const auto idx : collections::range(0, size))
{
const auto & name = description.sample_block.getByPosition(idx).name;
bool exists_in_current_document = document->exists(name);
if (!exists_in_current_document)
{
insertDefaultValue(*columns[idx], *description.sample_block.getByPosition(idx).column);
continue;
}
const Poco::MongoDB::Element::Ptr value = document->get(name);
if (value.isNull() || value->type() == Poco::MongoDB::ElementTraits<Poco::MongoDB::NullValue>::TypeId)
{
insertDefaultValue(*columns[idx], *description.sample_block.getByPosition(idx).column);
}
else
insertValue(*columns[idx], idx, sample_column.type, sample_column.name, value);
{
bool is_nullable = description.types[idx].second;
if (is_nullable)
{
ColumnNullable & column_nullable = assert_cast<ColumnNullable &>(*columns[idx]);
insertValue(column_nullable.getNestedColumn(), description.types[idx].first, *value, name, array_info, idx);
column_nullable.getNullMapData().emplace_back(0);
}
else
insertValue(*columns[idx], description.types[idx].first, *value, name, array_info, idx);
}
}
else
insertDefaultValue(*columns[idx], *sample_column.column);
}
if (++num_rows == max_block_size)
if (cursor.cursorID() == 0)
{
all_read = true;
break;
}
}
if (num_rows < max_block_size)
all_read = true;
if (num_rows == 0)
return {};
return Chunk(std::move(columns), std::move(num_rows));
return Chunk(std::move(columns), num_rows);
}
}
#endif

View File

@ -1,54 +1,87 @@
#pragma once
#include "config.h"
#include <Poco/MongoDB/Element.h>
#include <Poco/MongoDB/Array.h>
#if USE_MONGODB
#include <Core/Block.h>
#include <Processors/ISource.h>
#include <Interpreters/Context.h>
#include <Common/JSONBuilder.h>
#include <Core/ExternalResultDescription.h>
#include <mongocxx/client.hpp>
#include <mongocxx/collection.hpp>
#include <mongocxx/cursor.hpp>
#include <mongocxx/database.hpp>
#include <Core/Field.h>
namespace Poco
{
namespace MongoDB
{
class Connection;
class Document;
class Cursor;
class OpMsgCursor;
}
}
namespace DB
{
/// Creates MongoDB connection and cursor, converts it to a stream of blocks
struct MongoDBArrayInfo
{
size_t num_dimensions;
Field default_value;
std::function<Field(const Poco::MongoDB::Element & value, const std::string & name)> parser;
};
void authenticate(Poco::MongoDB::Connection & connection, const std::string & database, const std::string & user, const std::string & password);
bool isMongoDBWireProtocolOld(Poco::MongoDB::Connection & connection_, const std::string & database_name_);
class MongoDBCursor
{
public:
MongoDBCursor(
const std::string & database,
const std::string & collection,
const Block & sample_block_to_select,
const Poco::MongoDB::Document & query,
Poco::MongoDB::Connection & connection);
Poco::MongoDB::Document::Vector nextDocuments(Poco::MongoDB::Connection & connection);
Int64 cursorID() const;
private:
const bool is_wire_protocol_old;
std::unique_ptr<Poco::MongoDB::Cursor> old_cursor;
std::unique_ptr<Poco::MongoDB::OpMsgCursor> new_cursor;
Int64 cursor_id = 0;
};
/// Converts MongoDB Cursor to a stream of Blocks
class MongoDBSource final : public ISource
{
public:
MongoDBSource(
const mongocxx::uri & uri,
const std::string & collection_name,
const bsoncxx::document::view_or_value & query,
const mongocxx::options::find & options,
const Block & sample_block_,
const UInt64 & max_block_size_);
std::shared_ptr<Poco::MongoDB::Connection> & connection_,
const String & database_name_,
const String & collection_name_,
const Poco::MongoDB::Document & query_,
const Block & sample_block,
UInt64 max_block_size_);
~MongoDBSource() override;
String getName() const override { return "MongoDB"; }
private:
static void insertDefaultValue(IColumn & column, const IColumn & sample_column);
void insertValue(IColumn & column, const size_t & idx, const DataTypePtr & type, const std::string & name, const bsoncxx::document::element & value);
Chunk generate() override;
mongocxx::client client;
mongocxx::database database;
mongocxx::collection collection;
mongocxx::cursor cursor;
Block sample_block;
std::unordered_map<size_t, std::pair<size_t, std::pair<DataTypePtr, Field>>> arrays_info;
std::shared_ptr<Poco::MongoDB::Connection> connection;
MongoDBCursor cursor;
const UInt64 max_block_size;
JSONBuilder::FormatSettings json_format_settings = {{}, 0, true, true};
ExternalResultDescription description;
bool all_read = false;
std::unordered_map<size_t, MongoDBArrayInfo> array_info;
};
}
#endif

View File

@ -1,37 +1,26 @@
#include "config.h"
#if USE_MONGODB
#include <memory>
#include <Analyzer/ColumnNode.h>
#include <Analyzer/ConstantNode.h>
#include <Analyzer/FunctionNode.h>
#include <Analyzer/QueryNode.h>
#include <Analyzer/TableNode.h>
#include <Analyzer/JoinNode.h>
#include <Analyzer/SortNode.h>
#include <Formats/BSONTypes.h>
#include <Interpreters/evaluateConstantExpression.h>
#include <Parsers/ASTIdentifier.h>
#include <Processors/Sources/MongoDBSource.h>
#include <QueryPipeline/Pipe.h>
#include <Storages/NamedCollectionsHelpers.h>
#include <Storages/StorageFactory.h>
#include <Storages/StorageMongoDB.h>
#include <Storages/StorageMongoDBSocketFactory.h>
#include <Storages/StorageFactory.h>
#include <Storages/checkAndGetLiteralArgument.h>
#include <Common/parseAddress.h>
#include <Common/ErrorCodes.h>
#include <Common/BSONCXXHelper.h>
#include <Storages/NamedCollectionsHelpers.h>
#include <Poco/MongoDB/Connection.h>
#include <Poco/MongoDB/Cursor.h>
#include <Poco/MongoDB/Database.h>
#include <Interpreters/evaluateConstantExpression.h>
#include <Common/RemoteHostFilter.h>
#include <Core/Settings.h>
#include <Core/Joins.h>
#include <Interpreters/Context.h>
#include <Common/parseAddress.h>
#include <Common/NamedCollections/NamedCollections.h>
#include <IO/Operators.h>
#include <Parsers/ASTLiteral.h>
#include <QueryPipeline/Pipe.h>
#include <Processors/Sources/MongoDBSource.h>
#include <Processors/Sinks/SinkToStorage.h>
#include <base/range.h>
#include <bsoncxx/json.hpp>
using bsoncxx::builder::basic::document;
using bsoncxx::builder::basic::make_document;
using bsoncxx::builder::basic::make_array;
using bsoncxx::builder::basic::kvp;
using bsoncxx::to_json;
#include <DataTypes/DataTypeArray.h>
namespace DB
{
@ -39,27 +28,27 @@ namespace DB
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int NOT_IMPLEMENTED;
extern const int MONGODB_CANNOT_AUTHENTICATE;
}
namespace Setting
{
extern const SettingsBool allow_experimental_analyzer;
extern const SettingsBool mongodb_throw_on_unsupported_query;
}
using BSONCXXHelper::fieldAsBSONValue;
using BSONCXXHelper::fieldAsOID;
StorageMongoDB::StorageMongoDB(
const StorageID & table_id_,
MongoDBConfiguration configuration_,
const std::string & host_,
uint16_t port_,
const std::string & database_name_,
const std::string & collection_name_,
const std::string & username_,
const std::string & password_,
const std::string & options_,
const ColumnsDescription & columns_,
const ConstraintsDescription & constraints_,
const String & comment)
: IStorage{table_id_}
, configuration{std::move(configuration_)}
, log(getLogger("StorageMongoDB (" + table_id_.table_name + ")"))
: IStorage(table_id_)
, database_name(database_name_)
, collection_name(collection_name_)
, username(username_)
, password(password_)
, uri("mongodb://" + host_ + ":" + std::to_string(port_) + "/" + database_name_ + "?" + options_)
{
StorageInMemoryMetadata storage_metadata;
storage_metadata.setColumns(columns_);
@ -68,15 +57,175 @@ StorageMongoDB::StorageMongoDB(
setInMemoryMetadata(storage_metadata);
}
void StorageMongoDB::connectIfNotConnected()
{
std::lock_guard lock{connection_mutex};
if (!connection)
{
StorageMongoDBSocketFactory factory;
connection = std::make_shared<Poco::MongoDB::Connection>(uri, factory);
}
if (!authenticated)
{
Poco::URI poco_uri(uri);
auto query_params = poco_uri.getQueryParameters();
auto auth_source = std::find_if(query_params.begin(), query_params.end(),
[&](const std::pair<std::string, std::string> & param) { return param.first == "authSource"; });
auto auth_db = database_name;
if (auth_source != query_params.end())
auth_db = auth_source->second;
if (!username.empty() && !password.empty())
{
Poco::MongoDB::Database poco_db(auth_db);
if (!poco_db.authenticate(*connection, username, password, Poco::MongoDB::Database::AUTH_SCRAM_SHA1))
throw Exception(ErrorCodes::MONGODB_CANNOT_AUTHENTICATE, "Cannot authenticate in MongoDB, incorrect user or password");
}
authenticated = true;
}
}
class StorageMongoDBSink : public SinkToStorage
{
public:
explicit StorageMongoDBSink(
const std::string & collection_name_,
const std::string & db_name_,
const StorageMetadataPtr & metadata_snapshot_,
std::shared_ptr<Poco::MongoDB::Connection> connection_)
: SinkToStorage(metadata_snapshot_->getSampleBlock())
, collection_name(collection_name_)
, db_name(db_name_)
, metadata_snapshot{metadata_snapshot_}
, connection(connection_)
, is_wire_protocol_old(isMongoDBWireProtocolOld(*connection_, db_name))
{
}
String getName() const override { return "StorageMongoDBSink"; }
void consume(Chunk & chunk) override
{
Poco::MongoDB::Database db(db_name);
Poco::MongoDB::Document::Vector documents;
auto block = getHeader().cloneWithColumns(chunk.getColumns());
size_t num_rows = block.rows();
size_t num_cols = block.columns();
const auto columns = block.getColumns();
const auto data_types = block.getDataTypes();
const auto data_names = block.getNames();
documents.reserve(num_rows);
for (const auto i : collections::range(0, num_rows))
{
Poco::MongoDB::Document::Ptr document = new Poco::MongoDB::Document();
for (const auto j : collections::range(0, num_cols))
{
insertValueIntoMongoDB(*document, data_names[j], *data_types[j], *columns[j], i);
}
documents.push_back(std::move(document));
}
if (is_wire_protocol_old)
{
Poco::SharedPtr<Poco::MongoDB::InsertRequest> insert_request = db.createInsertRequest(collection_name);
insert_request->documents() = std::move(documents);
connection->sendRequest(*insert_request);
}
else
{
Poco::SharedPtr<Poco::MongoDB::OpMsgMessage> insert_request = db.createOpMsgMessage(collection_name);
insert_request->setCommandName(Poco::MongoDB::OpMsgMessage::CMD_INSERT);
insert_request->documents() = std::move(documents);
connection->sendRequest(*insert_request);
}
}
private:
void insertValueIntoMongoDB(
Poco::MongoDB::Document & document,
const std::string & name,
const IDataType & data_type,
const IColumn & column,
size_t idx)
{
WhichDataType which(data_type);
if (which.isArray())
{
const ColumnArray & column_array = assert_cast<const ColumnArray &>(column);
const ColumnArray::Offsets & offsets = column_array.getOffsets();
size_t offset = offsets[idx - 1];
size_t next_offset = offsets[idx];
const IColumn & nested_column = column_array.getData();
const auto * array_type = assert_cast<const DataTypeArray *>(&data_type);
const DataTypePtr & nested_type = array_type->getNestedType();
Poco::MongoDB::Array::Ptr array = new Poco::MongoDB::Array();
for (size_t i = 0; i + offset < next_offset; ++i)
{
insertValueIntoMongoDB(*array, Poco::NumberFormatter::format(i), *nested_type, nested_column, i + offset);
}
document.add(name, array);
return;
}
/// MongoDB does not support UInt64 type, so just cast it to Int64
if (which.isNativeUInt())
document.add(name, static_cast<Poco::Int64>(column.getUInt(idx)));
else if (which.isNativeInt())
document.add(name, static_cast<Poco::Int64>(column.getInt(idx)));
else if (which.isFloat32())
document.add(name, static_cast<Float64>(column.getFloat32(idx)));
else if (which.isFloat64())
document.add(name, column.getFloat64(idx));
else if (which.isDate())
document.add(name, Poco::Timestamp(DateLUT::instance().fromDayNum(DayNum(column.getUInt(idx))) * 1000000));
else if (which.isDateTime())
document.add(name, Poco::Timestamp(column.getUInt(idx) * 1000000));
else
{
WriteBufferFromOwnString ostr;
data_type.getDefaultSerialization()->serializeText(column, idx, ostr, FormatSettings{});
document.add(name, ostr.str());
}
}
String collection_name;
String db_name;
StorageMetadataPtr metadata_snapshot;
std::shared_ptr<Poco::MongoDB::Connection> connection;
const bool is_wire_protocol_old;
};
Pipe StorageMongoDB::read(
const Names & column_names,
const StorageSnapshotPtr & storage_snapshot,
SelectQueryInfo & query_info,
ContextPtr context,
SelectQueryInfo & /*query_info*/,
ContextPtr /*context*/,
QueryProcessingStage::Enum /*processed_stage*/,
size_t max_block_size,
size_t /*num_streams*/)
{
connectIfNotConnected();
storage_snapshot->check(column_names);
Block sample_block;
@ -86,329 +235,79 @@ Pipe StorageMongoDB::read(
sample_block.insert({ column_data.type, column_data.name });
}
auto options = mongocxx::options::find{};
return Pipe(std::make_shared<MongoDBSource>(*configuration.uri, configuration.collection, buildMongoDBQuery(context, options, query_info, sample_block),
std::move(options), sample_block, max_block_size));
return Pipe(std::make_shared<MongoDBSource>(connection, database_name, collection_name, Poco::MongoDB::Document{}, sample_block, max_block_size));
}
MongoDBConfiguration StorageMongoDB::getConfiguration(ASTs engine_args, ContextPtr context)
SinkToStoragePtr StorageMongoDB::write(const ASTPtr & /* query */, const StorageMetadataPtr & metadata_snapshot, ContextPtr /* context */, bool /*async_insert*/)
{
MongoDBConfiguration configuration;
connectIfNotConnected();
return std::make_shared<StorageMongoDBSink>(collection_name, database_name, metadata_snapshot, connection);
}
StorageMongoDB::Configuration StorageMongoDB::getConfiguration(ASTs engine_args, ContextPtr context)
{
Configuration configuration;
if (auto named_collection = tryGetNamedCollectionWithOverrides(engine_args, context))
{
if (named_collection->has("uri"))
{
validateNamedCollection(*named_collection, {"collection"}, {"uri"});
configuration.uri = std::make_unique<mongocxx::uri>(named_collection->get<String>("uri"));
}
else
{
validateNamedCollection(*named_collection, {"host", "port", "user", "password", "database", "collection"}, {"options"});
String user = named_collection->get<String>("user");
String auth_string;
if (!user.empty())
auth_string = fmt::format("{}:{}@", user, named_collection->get<String>("password"));
configuration.uri = std::make_unique<mongocxx::uri>(fmt::format("mongodb://{}{}:{}/{}?{}",
auth_string,
named_collection->get<String>("host"),
named_collection->get<String>("port"),
named_collection->get<String>("database"),
named_collection->getOrDefault<String>("options", "")));
}
configuration.collection = named_collection->get<String>("collection");
validateNamedCollection(
*named_collection,
ValidateKeysMultiset<MongoDBEqualKeysSet>{"host", "port", "user", "username", "password", "database", "db", "collection", "table"},
{"options"});
configuration.host = named_collection->getAny<String>({"host", "hostname"});
configuration.port = static_cast<UInt16>(named_collection->get<UInt64>("port"));
configuration.username = named_collection->getAny<String>({"user", "username"});
configuration.password = named_collection->get<String>("password");
configuration.database = named_collection->getAny<String>({"database", "db"});
configuration.table = named_collection->getAny<String>({"collection", "table"});
configuration.options = named_collection->getOrDefault<String>("options", "");
}
else
{
if (engine_args.size() < 5 || engine_args.size() > 6)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
"Storage MongoDB requires from 5 to 6 parameters: "
"MongoDB('host:port', database, collection, 'user', 'password' [, 'options']).");
for (auto & engine_arg : engine_args)
engine_arg = evaluateConstantExpressionOrIdentifierAsLiteral(engine_arg, context);
if (engine_args.size() == 5 || engine_args.size() == 6)
{
configuration.collection = checkAndGetLiteralArgument<String>(engine_args[2], "collection");
/// 27017 is the default MongoDB port.
auto parsed_host_port = parseAddress(checkAndGetLiteralArgument<String>(engine_args[0], "host:port"), 27017);
String options;
if (engine_args.size() == 6)
options = checkAndGetLiteralArgument<String>(engine_args[5], "options");
configuration.host = parsed_host_port.first;
configuration.port = parsed_host_port.second;
configuration.database = checkAndGetLiteralArgument<String>(engine_args[1], "database");
configuration.table = checkAndGetLiteralArgument<String>(engine_args[2], "table");
configuration.username = checkAndGetLiteralArgument<String>(engine_args[3], "username");
configuration.password = checkAndGetLiteralArgument<String>(engine_args[4], "password");
String user = checkAndGetLiteralArgument<String>(engine_args[3], "user");
String auth_string;
if (!user.empty())
auth_string = fmt::format("{}:{}@", user, checkAndGetLiteralArgument<String>(engine_args[4], "password"));
auto parsed_host_port = parseAddress(checkAndGetLiteralArgument<String>(engine_args[0], "host:port"), 27017);
configuration.uri = std::make_unique<mongocxx::uri>(fmt::format("mongodb://{}{}:{}/{}?{}",
auth_string,
parsed_host_port.first,
parsed_host_port.second,
checkAndGetLiteralArgument<String>(engine_args[1], "database"),
options));
}
else if (engine_args.size() == 2)
{
configuration.collection = checkAndGetLiteralArgument<String>(engine_args[1], "database");
configuration.uri = std::make_unique<mongocxx::uri>(checkAndGetLiteralArgument<String>(engine_args[0], "host"));
}
else
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
"Storage MongoDB requires 2 or from to 5 to 6 parameters: "
"MongoDB('host:port', 'database', 'collection', 'user', 'password' [, 'options']) or MongoDB('uri', 'collection').");
if (engine_args.size() >= 6)
configuration.options = checkAndGetLiteralArgument<String>(engine_args[5], "database");
}
configuration.checkHosts(context);
context->getRemoteHostFilter().checkHostAndPort(configuration.host, toString(configuration.port));
return configuration;
}
std::string mongoFuncName(const std::string & func)
{
if (func == "equals")
return "$eq";
if (func == "notEquals")
return "$ne";
if (func == "greaterThan" || func == "greater")
return "$gt";
if (func == "lessThan" || func == "less")
return "$lt";
if (func == "greaterOrEquals")
return "$gte";
if (func == "lessOrEquals")
return "$lte";
if (func == "in")
return "$in";
if (func == "notIn")
return "$nin";
if (func == "lessThan")
return "$lt";
if (func == "and")
return "$and";
if (func == "or")
return "$or";
return "";
}
template <typename OnError>
std::optional<bsoncxx::document::value> StorageMongoDB::visitWhereFunction(
const ContextPtr & context,
const FunctionNode * func,
const JoinNode * join_node,
OnError on_error)
{
if (func->getArguments().getNodes().empty())
return {};
if (const auto & column = func->getArguments().getNodes().at(0)->as<ColumnNode>())
{
// Skip unknown columns, which don't belong to the table.
const auto & table = column->getColumnSource()->as<TableNode>();
if (!table)
return {};
// Skip columns from other tables in JOIN queries.
if (table->getStorage()->getStorageID() != this->getStorageID())
return {};
if (join_node && column->getColumnSource() != join_node->getLeftTableExpression())
return {};
// Only these function can have exactly one argument and be passed to MongoDB.
if (func->getFunctionName() == "isNull")
return make_document(kvp(column->getColumnName(), make_document(kvp("$eq", bsoncxx::types::b_null{}))));
if (func->getFunctionName() == "isNotNull")
return make_document(kvp(column->getColumnName(), make_document(kvp("$ne", bsoncxx::types::b_null{}))));
if (func->getFunctionName() == "empty")
return make_document(kvp(column->getColumnName(), make_document(kvp("$in", make_array(bsoncxx::types::b_null{}, "")))));
if (func->getFunctionName() == "notEmpty")
return make_document(kvp(column->getColumnName(), make_document(kvp("$nin", make_array(bsoncxx::types::b_null{}, "")))));
auto func_name = mongoFuncName(func->getFunctionName());
if (func_name.empty())
{
on_error(func);
return {};
}
if (func->getArguments().getNodes().size() == 2)
{
const auto & value = func->getArguments().getNodes().at(1);
if (const auto & const_value = value->as<ConstantNode>())
{
std::optional<bsoncxx::types::bson_value::value> func_value{};
if (column->getColumnName() == "_id")
func_value = fieldAsOID(const_value->getValue());
else
func_value = fieldAsBSONValue(const_value->getValue(), const_value->getResultType());
if (func_name == "$in" && func_value->view().type() != bsoncxx::v_noabi::type::k_array)
func_name = "$eq";
if (func_name == "$nin" && func_value->view().type() != bsoncxx::v_noabi::type::k_array)
func_name = "$ne";
return make_document(kvp(column->getColumnName(), make_document(kvp(func_name, std::move(*func_value)))));
}
if (const auto & func_value = value->as<FunctionNode>())
if (const auto & res_value = visitWhereFunction(context, func_value, join_node, on_error); res_value.has_value())
return make_document(kvp(column->getColumnName(), make_document(kvp(func_name, *res_value))));
}
}
else
{
auto arr = bsoncxx::builder::basic::array{};
for (const auto & elem : func->getArguments().getNodes())
{
if (const auto & elem_func = elem->as<FunctionNode>())
if (const auto & res_value = visitWhereFunction(context, elem_func, join_node, on_error); res_value.has_value())
arr.append(*res_value);
}
if (!arr.view().empty())
{
auto func_name = mongoFuncName(func->getFunctionName());
if (func_name.empty())
{
on_error(func);
return {};
}
return make_document(kvp(func_name, arr));
}
}
on_error(func);
return {};
}
bsoncxx::document::value StorageMongoDB::buildMongoDBQuery(const ContextPtr & context, mongocxx::options::find & options, const SelectQueryInfo & query, const Block & sample_block)
{
document projection{};
for (const auto & column : sample_block)
projection.append(kvp(column.name, 1));
LOG_DEBUG(log, "MongoDB projection has built: '{}'", bsoncxx::to_json(projection));
options.projection(projection.extract());
bool throw_on_error = context->getSettingsRef()[Setting::mongodb_throw_on_unsupported_query];
if (!context->getSettingsRef()[Setting::allow_experimental_analyzer])
{
if (throw_on_error)
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "MongoDB storage does not support 'allow_experimental_analyzer = 0' setting");
return make_document();
}
const auto & query_tree = query.query_tree->as<QueryNode &>();
if (throw_on_error)
{
if (query_tree.hasHaving())
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "HAVING section is not supported. You can disable this error with 'SET mongodb_throw_on_unsupported_query=0', but this may cause poor performance, and is highly not recommended");
if (query_tree.hasGroupBy())
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "GROUP BY section is not supported. You can disable this error with 'SET mongodb_throw_on_unsupported_query=0', but this may cause poor performance, and is highly not recommended");
if (query_tree.hasWindow())
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "WINDOW section is not supported. You can disable this error with 'SET mongodb_throw_on_unsupported_query=0', but this may cause poor performance, and is highly not recommended");
if (query_tree.hasPrewhere())
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "PREWHERE section is not supported. You can disable this error with 'SET mongodb_throw_on_unsupported_query=0', but this may cause poor performance, and is highly not recommended");
if (query_tree.hasLimitBy())
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "LIMIT BY section is not supported. You can disable this error with 'SET mongodb_throw_on_unsupported_query=0', but this may cause poor performance, and is highly not recommended");
if (query_tree.hasOffset())
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "OFFSET section is not supported. You can disable this error with 'SET mongodb_throw_on_unsupported_query=0', but this may cause poor performance, and is highly not recommended");
}
auto on_error = [&] (const auto * node)
{
/// Reset limit, because if we omit ORDER BY, it should not be applied
options.limit(0);
if (throw_on_error)
throw Exception(ErrorCodes::NOT_IMPLEMENTED,
"Only simple queries are supported, failed to convert expression '{}' to MongoDB query. "
"You can disable this restriction with 'SET mongodb_throw_on_unsupported_query=0', to read the full table and process on ClickHouse side (this may cause poor performance)", node->formatASTForErrorMessage());
LOG_WARNING(log, "Failed to build MongoDB query for '{}'", node ? node->formatASTForErrorMessage() : "<unknown>");
};
if (query_tree.hasLimit())
{
if (const auto & limit = query_tree.getLimit()->as<ConstantNode>())
options.limit(limit->getValue().safeGet<UInt64>());
else
on_error(query_tree.getLimit().get());
}
if (query_tree.hasOrderBy())
{
document sort{};
for (const auto & child : query_tree.getOrderByNode()->getChildren())
{
if (const auto * sort_node = child->as<SortNode>())
{
if (sort_node->withFill() || sort_node->hasFillTo() || sort_node->hasFillFrom() || sort_node->hasFillStep())
on_error(sort_node);
if (const auto & column = sort_node->getExpression()->as<ColumnNode>())
sort.append(kvp(column->getColumnName(), sort_node->getSortDirection() == SortDirection::ASCENDING ? 1 : -1));
else
on_error(sort_node);
}
else
on_error(sort_node);
}
if (!sort.view().empty())
{
LOG_DEBUG(log, "MongoDB sort has built: '{}'", bsoncxx::to_json(sort));
options.sort(sort.extract());
}
}
if (query_tree.hasWhere())
{
const auto & join_tree = query_tree.getJoinTree();
const auto * join_node = join_tree->as<JoinNode>();
bool allow_where = true;
if (join_node)
{
if (join_node->getKind() == JoinKind::Left)
allow_where = join_node->getLeftTableExpression()->isEqual(*query.table_expression);
else if (join_node->getKind() == JoinKind::Right)
allow_where = join_node->getRightTableExpression()->isEqual(*query.table_expression);
else
allow_where = (join_node->getKind() == JoinKind::Inner);
}
if (allow_where)
{
std::optional<bsoncxx::document::value> filter{};
if (const auto & func = query_tree.getWhere()->as<FunctionNode>())
filter = visitWhereFunction(context, func, join_node, on_error);
else if (const auto & const_expr = query_tree.getWhere()->as<ConstantNode>())
{
if (const_expr->hasSourceExpression())
{
if (const auto & func_expr = const_expr->getSourceExpression()->as<FunctionNode>())
filter = visitWhereFunction(context, func_expr, join_node, on_error);
}
}
if (filter.has_value())
{
LOG_DEBUG(log, "MongoDB query has built: '{}'.", bsoncxx::to_json(*filter));
return std::move(*filter);
}
}
else
on_error(join_node);
}
return make_document();
}
void registerStorageMongoDB(StorageFactory & factory)
{
factory.registerStorage("MongoDB", [](const StorageFactory::Arguments & args)
{
auto configuration = StorageMongoDB::getConfiguration(args.engine_args, args.getLocalContext());
return std::make_shared<StorageMongoDB>(
args.table_id,
StorageMongoDB::getConfiguration(args.engine_args, args.getLocalContext()),
configuration.host,
configuration.port,
configuration.database,
configuration.table,
configuration.username,
configuration.password,
configuration.options,
args.columns,
args.constraints,
args.comment);
@ -419,4 +318,3 @@ void registerStorageMongoDB(StorageFactory & factory)
}
}
#endif

View File

@ -1,56 +1,33 @@
#pragma once
#include "config.h"
#include <Poco/MongoDB/Connection.h>
#if USE_MONGODB
#include <Common/RemoteHostFilter.h>
#include <Analyzer/JoinNode.h>
#include <Interpreters/Context.h>
#include <Storages/IStorage.h>
#include <Storages/SelectQueryInfo.h>
#include <mongocxx/instance.hpp>
#include <mongocxx/client.hpp>
namespace DB
{
inline mongocxx::instance inst{};
struct MongoDBConfiguration
{
std::unique_ptr<mongocxx::uri> uri;
String collection;
void checkHosts(const ContextPtr & context) const
{
// Because domain records will be resolved inside the driver, we can't check IPs for our restrictions.
for (const auto & host : uri->hosts())
context->getRemoteHostFilter().checkHostAndPort(host.name, toString(host.port));
}
};
/** Implements storage in the MongoDB database.
* Use ENGINE = MongoDB(host:port, database, collection, user, password [, options]);
* MongoDB(uri, collection);
* Read only.
* One stream only.
/* Implements storage in the MongoDB database.
* Use ENGINE = MongoDB(host:port, database, collection, user, password [, options]);
* Read only.
*/
class StorageMongoDB final : public IStorage
{
public:
static MongoDBConfiguration getConfiguration(ASTs engine_args, ContextPtr context);
StorageMongoDB(
const StorageID & table_id_,
MongoDBConfiguration configuration_,
const std::string & host_,
uint16_t port_,
const std::string & database_name_,
const std::string & collection_name_,
const std::string & username_,
const std::string & password_,
const std::string & options_,
const ColumnsDescription & columns_,
const ConstraintsDescription & constraints_,
const String & comment);
std::string getName() const override { return "MongoDB"; }
bool isRemote() const override { return true; }
Pipe read(
const Names & column_names,
@ -61,23 +38,37 @@ public:
size_t max_block_size,
size_t num_streams) override;
SinkToStoragePtr write(
const ASTPtr & query,
const StorageMetadataPtr & /*metadata_snapshot*/,
ContextPtr context,
bool async_insert) override;
struct Configuration
{
std::string host;
UInt16 port;
std::string username;
std::string password;
std::string database;
std::string table;
std::string options;
};
static Configuration getConfiguration(ASTs engine_args, ContextPtr context);
private:
template <typename OnError>
std::optional<bsoncxx::document::value> visitWhereFunction(
const ContextPtr & context,
const FunctionNode * func,
const JoinNode * join_node,
OnError on_error);
void connectIfNotConnected();
bsoncxx::document::value buildMongoDBQuery(
const ContextPtr & context,
mongocxx::options::find & options,
const SelectQueryInfo & query,
const Block & sample_block);
const std::string database_name;
const std::string collection_name;
const std::string username;
const std::string password;
const std::string uri;
const MongoDBConfiguration configuration;
LoggerPtr log;
std::shared_ptr<Poco::MongoDB::Connection> connection;
bool authenticated = false;
std::mutex connection_mutex; /// Protects the variables `connection` and `authenticated`.
};
}
#endif

View File

@ -1,327 +0,0 @@
#include "config.h"
#if USE_MONGODB
#include <Storages/StorageMongoDBPocoLegacy.h>
#include <Storages/StorageMongoDBPocoLegacySocketFactory.h>
#include <Storages/StorageFactory.h>
#include <Storages/checkAndGetLiteralArgument.h>
#include <Storages/NamedCollectionsHelpers.h>
#include <Poco/MongoDB/Connection.h>
#include <Poco/MongoDB/Cursor.h>
#include <Poco/MongoDB/Database.h>
#include <Poco/URI.h>
#include <Interpreters/evaluateConstantExpression.h>
#include <Core/Settings.h>
#include <Interpreters/Context.h>
#include <Common/parseAddress.h>
#include <Common/NamedCollections/NamedCollections.h>
#include <Common/RemoteHostFilter.h>
#include <IO/Operators.h>
#include <QueryPipeline/Pipe.h>
#include <Processors/Sources/MongoDBPocoLegacySource.h>
#include <base/range.h>
#include <unordered_set>
#include <Parsers/ASTLiteral.h>
#include <Processors/Sinks/SinkToStorage.h>
#include <DataTypes/DataTypeArray.h>
namespace DB
{
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int MONGODB_CANNOT_AUTHENTICATE;
}
StorageMongoDBPocoLegacy::StorageMongoDBPocoLegacy(
const StorageID & table_id_,
const std::string & host_,
uint16_t port_,
const std::string & database_name_,
const std::string & collection_name_,
const std::string & username_,
const std::string & password_,
const std::string & options_,
const ColumnsDescription & columns_,
const ConstraintsDescription & constraints_,
const String & comment)
: IStorage(table_id_)
, database_name(database_name_)
, collection_name(collection_name_)
, username(username_)
, password(password_)
, uri("mongodb://" + host_ + ":" + std::to_string(port_) + "/" + database_name_ + "?" + options_)
{
LOG_WARNING(getLogger("StorageMongoDB (" + table_id_.table_name + ")"), "The deprecated MongoDB integartion implementation is used, this will be removed in next releases.");
StorageInMemoryMetadata storage_metadata;
storage_metadata.setColumns(columns_);
storage_metadata.setConstraints(constraints_);
storage_metadata.setComment(comment);
setInMemoryMetadata(storage_metadata);
}
void StorageMongoDBPocoLegacy::connectIfNotConnected()
{
std::lock_guard lock{connection_mutex};
if (!connection)
{
StorageMongoDBPocoLegacySocketFactory factory;
connection = std::make_shared<Poco::MongoDB::Connection>(uri, factory);
}
if (!authenticated)
{
Poco::URI poco_uri(uri);
auto query_params = poco_uri.getQueryParameters();
auto auth_source = std::find_if(query_params.begin(), query_params.end(),
[&](const std::pair<std::string, std::string> & param) { return param.first == "authSource"; });
auto auth_db = database_name;
if (auth_source != query_params.end())
auth_db = auth_source->second;
if (!username.empty() && !password.empty())
{
Poco::MongoDB::Database poco_db(auth_db);
if (!poco_db.authenticate(*connection, username, password, Poco::MongoDB::Database::AUTH_SCRAM_SHA1))
throw Exception(ErrorCodes::MONGODB_CANNOT_AUTHENTICATE, "Cannot authenticate in MongoDB, incorrect user or password");
}
authenticated = true;
}
}
class StorageMongoDBLegacySink : public SinkToStorage
{
public:
explicit StorageMongoDBLegacySink(
const std::string & collection_name_,
const std::string & db_name_,
const StorageMetadataPtr & metadata_snapshot_,
std::shared_ptr<Poco::MongoDB::Connection> connection_)
: SinkToStorage(metadata_snapshot_->getSampleBlock())
, collection_name(collection_name_)
, db_name(db_name_)
, metadata_snapshot{metadata_snapshot_}
, connection(connection_)
, is_wire_protocol_old(isMongoDBWireProtocolOld(*connection_, db_name))
{
}
String getName() const override { return "StorageMongoDBLegacySink"; }
void consume(Chunk & chunk) override
{
Poco::MongoDB::Database db(db_name);
Poco::MongoDB::Document::Vector documents;
auto block = getHeader().cloneWithColumns(chunk.getColumns());
size_t num_rows = block.rows();
size_t num_cols = block.columns();
const auto columns = block.getColumns();
const auto data_types = block.getDataTypes();
const auto data_names = block.getNames();
documents.reserve(num_rows);
for (const auto i : collections::range(0, num_rows))
{
Poco::MongoDB::Document::Ptr document = new Poco::MongoDB::Document();
for (const auto j : collections::range(0, num_cols))
{
insertValueIntoMongoDB(*document, data_names[j], *data_types[j], *columns[j], i);
}
documents.push_back(std::move(document));
}
if (is_wire_protocol_old)
{
Poco::SharedPtr<Poco::MongoDB::InsertRequest> insert_request = db.createInsertRequest(collection_name);
insert_request->documents() = std::move(documents);
connection->sendRequest(*insert_request);
}
else
{
Poco::SharedPtr<Poco::MongoDB::OpMsgMessage> insert_request = db.createOpMsgMessage(collection_name);
insert_request->setCommandName(Poco::MongoDB::OpMsgMessage::CMD_INSERT);
insert_request->documents() = std::move(documents);
connection->sendRequest(*insert_request);
}
}
private:
void insertValueIntoMongoDB(
Poco::MongoDB::Document & document,
const std::string & name,
const IDataType & data_type,
const IColumn & column,
size_t idx)
{
WhichDataType which(data_type);
if (which.isArray())
{
const ColumnArray & column_array = assert_cast<const ColumnArray &>(column);
const ColumnArray::Offsets & offsets = column_array.getOffsets();
size_t offset = offsets[idx - 1];
size_t next_offset = offsets[idx];
const IColumn & nested_column = column_array.getData();
const auto * array_type = assert_cast<const DataTypeArray *>(&data_type);
const DataTypePtr & nested_type = array_type->getNestedType();
Poco::MongoDB::Array::Ptr array = new Poco::MongoDB::Array();
for (size_t i = 0; i + offset < next_offset; ++i)
{
insertValueIntoMongoDB(*array, Poco::NumberFormatter::format(i), *nested_type, nested_column, i + offset);
}
document.add(name, array);
return;
}
/// MongoDB does not support UInt64 type, so just cast it to Int64
if (which.isNativeUInt())
document.add(name, static_cast<Poco::Int64>(column.getUInt(idx)));
else if (which.isNativeInt())
document.add(name, static_cast<Poco::Int64>(column.getInt(idx)));
else if (which.isFloat32())
document.add(name, static_cast<Float64>(column.getFloat32(idx)));
else if (which.isFloat64())
document.add(name, column.getFloat64(idx));
else if (which.isDate())
document.add(name, Poco::Timestamp(DateLUT::instance().fromDayNum(DayNum(column.getUInt(idx))) * 1000000));
else if (which.isDateTime())
document.add(name, Poco::Timestamp(column.getUInt(idx) * 1000000));
else
{
WriteBufferFromOwnString ostr;
data_type.getDefaultSerialization()->serializeText(column, idx, ostr, FormatSettings{});
document.add(name, ostr.str());
}
}
String collection_name;
String db_name;
StorageMetadataPtr metadata_snapshot;
std::shared_ptr<Poco::MongoDB::Connection> connection;
const bool is_wire_protocol_old;
};
Pipe StorageMongoDBPocoLegacy::read(
const Names & column_names,
const StorageSnapshotPtr & storage_snapshot,
SelectQueryInfo & /*query_info*/,
ContextPtr /*context*/,
QueryProcessingStage::Enum /*processed_stage*/,
size_t max_block_size,
size_t /*num_streams*/)
{
connectIfNotConnected();
storage_snapshot->check(column_names);
Block sample_block;
for (const String & column_name : column_names)
{
auto column_data = storage_snapshot->metadata->getColumns().getPhysical(column_name);
sample_block.insert({ column_data.type, column_data.name });
}
return Pipe(std::make_shared<MongoDBPocoLegacySource>(connection, database_name, collection_name, Poco::MongoDB::Document{}, sample_block, max_block_size));
}
SinkToStoragePtr StorageMongoDBPocoLegacy::write(const ASTPtr & /* query */, const StorageMetadataPtr & metadata_snapshot, ContextPtr /* context */, bool /*async_insert*/)
{
connectIfNotConnected();
return std::make_shared<StorageMongoDBLegacySink>(collection_name, database_name, metadata_snapshot, connection);
}
StorageMongoDBPocoLegacy::Configuration StorageMongoDBPocoLegacy::getConfiguration(ASTs engine_args, ContextPtr context)
{
Configuration configuration;
if (auto named_collection = tryGetNamedCollectionWithOverrides(engine_args, context))
{
validateNamedCollection(
*named_collection,
ValidateKeysMultiset<MongoDBEqualKeysSet>{"host", "port", "user", "username", "password", "database", "db", "collection", "table"},
{"options"});
configuration.host = named_collection->getAny<String>({"host", "hostname"});
configuration.port = static_cast<UInt16>(named_collection->get<UInt64>("port"));
configuration.username = named_collection->getAny<String>({"user", "username"});
configuration.password = named_collection->get<String>("password");
configuration.database = named_collection->getAny<String>({"database", "db"});
configuration.table = named_collection->getAny<String>({"collection", "table"});
configuration.options = named_collection->getOrDefault<String>("options", "");
}
else
{
if (engine_args.size() < 5 || engine_args.size() > 6)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
"Storage MongoDB requires from 5 to 6 parameters: "
"MongoDB('host:port', database, collection, 'user', 'password' [, 'options']).");
for (auto & engine_arg : engine_args)
engine_arg = evaluateConstantExpressionOrIdentifierAsLiteral(engine_arg, context);
/// 27017 is the default MongoDB port.
auto parsed_host_port = parseAddress(checkAndGetLiteralArgument<String>(engine_args[0], "host:port"), 27017);
configuration.host = parsed_host_port.first;
configuration.port = parsed_host_port.second;
configuration.database = checkAndGetLiteralArgument<String>(engine_args[1], "database");
configuration.table = checkAndGetLiteralArgument<String>(engine_args[2], "table");
configuration.username = checkAndGetLiteralArgument<String>(engine_args[3], "username");
configuration.password = checkAndGetLiteralArgument<String>(engine_args[4], "password");
if (engine_args.size() >= 6)
configuration.options = checkAndGetLiteralArgument<String>(engine_args[5], "database");
}
context->getRemoteHostFilter().checkHostAndPort(configuration.host, toString(configuration.port));
return configuration;
}
void registerStorageMongoDBPocoLegacy(StorageFactory & factory)
{
factory.registerStorage("MongoDB", [](const StorageFactory::Arguments & args)
{
auto configuration = StorageMongoDBPocoLegacy::getConfiguration(args.engine_args, args.getLocalContext());
return std::make_shared<StorageMongoDBPocoLegacy>(
args.table_id,
configuration.host,
configuration.port,
configuration.database,
configuration.table,
configuration.username,
configuration.password,
configuration.options,
args.columns,
args.constraints,
args.comment);
},
{
.source_access_type = AccessType::MONGO,
});
}
}
#endif

View File

@ -1,79 +0,0 @@
#pragma once
#include "config.h"
#if USE_MONGODB
#include <Poco/MongoDB/Connection.h>
#include <Storages/IStorage.h>
namespace DB
{
/* Implements storage in the MongoDB database.
* Use ENGINE = MongoDB(host:port, database, collection, user, password [, options]);
* Read only.
*/
/// Deprecated, will be removed soon.
class StorageMongoDBPocoLegacy final : public IStorage
{
public:
StorageMongoDBPocoLegacy(
const StorageID & table_id_,
const std::string & host_,
uint16_t port_,
const std::string & database_name_,
const std::string & collection_name_,
const std::string & username_,
const std::string & password_,
const std::string & options_,
const ColumnsDescription & columns_,
const ConstraintsDescription & constraints_,
const String & comment);
std::string getName() const override { return "MongoDB"; }
Pipe read(
const Names & column_names,
const StorageSnapshotPtr & storage_snapshot,
SelectQueryInfo & query_info,
ContextPtr context,
QueryProcessingStage::Enum processed_stage,
size_t max_block_size,
size_t num_streams) override;
SinkToStoragePtr write(
const ASTPtr & query,
const StorageMetadataPtr & /*metadata_snapshot*/,
ContextPtr context,
bool async_insert) override;
struct Configuration
{
std::string host;
UInt16 port;
std::string username;
std::string password;
std::string database;
std::string table;
std::string options;
};
static Configuration getConfiguration(ASTs engine_args, ContextPtr context);
private:
void connectIfNotConnected();
const std::string database_name;
const std::string collection_name;
const std::string username;
const std::string password;
const std::string uri;
std::shared_ptr<Poco::MongoDB::Connection> connection;
bool authenticated = false;
std::mutex connection_mutex; /// Protects the variables `connection` and `authenticated`.
};
}
#endif

View File

@ -1,10 +1,9 @@
#include "config.h"
#if USE_MONGODB
#include "StorageMongoDBPocoLegacySocketFactory.h"
#include "StorageMongoDBSocketFactory.h"
#include <Common/Exception.h>
#include "config.h"
#include <Poco/Net/IPAddress.h>
#include <Poco/Net/SocketAddress.h>
@ -18,15 +17,15 @@ namespace DB
namespace ErrorCodes
{
extern const int FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME;
extern const int FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME;
}
Poco::Net::StreamSocket StorageMongoDBPocoLegacySocketFactory::createSocket(const std::string & host, int port, Poco::Timespan connectTimeout, bool secure)
Poco::Net::StreamSocket StorageMongoDBSocketFactory::createSocket(const std::string & host, int port, Poco::Timespan connectTimeout, bool secure)
{
return secure ? createSecureSocket(host, port, connectTimeout) : createPlainSocket(host, port, connectTimeout);
}
Poco::Net::StreamSocket StorageMongoDBPocoLegacySocketFactory::createPlainSocket(const std::string & host, int port, Poco::Timespan connectTimeout)
Poco::Net::StreamSocket StorageMongoDBSocketFactory::createPlainSocket(const std::string & host, int port, Poco::Timespan connectTimeout)
{
Poco::Net::SocketAddress address(host, port);
Poco::Net::StreamSocket socket;
@ -37,7 +36,7 @@ Poco::Net::StreamSocket StorageMongoDBPocoLegacySocketFactory::createPlainSocket
}
Poco::Net::StreamSocket StorageMongoDBPocoLegacySocketFactory::createSecureSocket(const std::string & host [[maybe_unused]], int port [[maybe_unused]], Poco::Timespan connectTimeout [[maybe_unused]])
Poco::Net::StreamSocket StorageMongoDBSocketFactory::createSecureSocket(const std::string & host [[maybe_unused]], int port [[maybe_unused]], Poco::Timespan connectTimeout [[maybe_unused]])
{
#if USE_SSL
Poco::Net::SocketAddress address(host, port);
@ -54,4 +53,3 @@ Poco::Net::StreamSocket StorageMongoDBPocoLegacySocketFactory::createSecureSocke
}
}
#endif

View File

@ -1,16 +1,12 @@
#pragma once
#include "config.h"
#if USE_MONGODB
#include <Poco/MongoDB/Connection.h>
namespace DB
{
/// Deprecated, will be removed soon.
class StorageMongoDBPocoLegacySocketFactory : public Poco::MongoDB::Connection::SocketFactory
class StorageMongoDBSocketFactory : public Poco::MongoDB::Connection::SocketFactory
{
public:
Poco::Net::StreamSocket createSocket(const std::string & host, int port, Poco::Timespan connectTimeout, bool secure) override;
@ -21,4 +17,3 @@ private:
};
}
#endif

View File

@ -1,5 +1,5 @@
#include <Storages/StorageFactory.h>
#include <Storages/registerStorages.h>
#include <Storages/StorageFactory.h>
#include "config.h"
@ -64,11 +64,7 @@ void registerStorageJDBC(StorageFactory & factory);
void registerStorageMySQL(StorageFactory & factory);
#endif
#if USE_MONGODB
void registerStorageMongoDB(StorageFactory & factory);
void registerStorageMongoDBPocoLegacy(StorageFactory & factory);
#endif
void registerStorageRedis(StorageFactory & factory);
@ -109,7 +105,7 @@ void registerStorageKeeperMap(StorageFactory & factory);
void registerStorageObjectStorage(StorageFactory & factory);
void registerStorages(bool use_legacy_mongodb_integration [[maybe_unused]])
void registerStorages()
{
auto & factory = StorageFactory::instance();
@ -171,13 +167,7 @@ void registerStorages(bool use_legacy_mongodb_integration [[maybe_unused]])
registerStorageMySQL(factory);
#endif
#if USE_MONGODB
if (use_legacy_mongodb_integration)
registerStorageMongoDBPocoLegacy(factory);
else
registerStorageMongoDB(factory);
#endif
registerStorageMongoDB(factory);
registerStorageRedis(factory);
#if USE_RDKAFKA

View File

@ -2,5 +2,5 @@
namespace DB
{
void registerStorages(bool use_legacy_mongodb_integration);
void registerStorages();
}

View File

@ -1,6 +1,3 @@
#include "config.h"
#if USE_MONGODB
#include <Storages/StorageMongoDB.h>
#include <Common/Exception.h>
@ -46,7 +43,7 @@ private:
ColumnsDescription getActualTableStructure(ContextPtr context, bool is_insert_query) const override;
void parseArguments(const ASTPtr & ast_function, ContextPtr context) override;
std::shared_ptr<MongoDBConfiguration> configuration;
std::optional<StorageMongoDB::Configuration> configuration;
String structure;
};
@ -55,8 +52,14 @@ StoragePtr TableFunctionMongoDB::executeImpl(const ASTPtr & /*ast_function*/,
{
auto columns = getActualTableStructure(context, is_insert_query);
auto storage = std::make_shared<StorageMongoDB>(
StorageID(getDatabaseName(), table_name),
std::move(*configuration),
StorageID(configuration->database, table_name),
configuration->host,
configuration->port,
configuration->database,
configuration->table,
configuration->username,
configuration->password,
configuration->options,
columns,
ConstraintsDescription(),
String{});
@ -77,89 +80,49 @@ void TableFunctionMongoDB::parseArguments(const ASTPtr & ast_function, ContextPt
ASTs & args = func_args.arguments->children;
if (args.size() == 6 || args.size() == 7)
{
ASTs main_arguments(args.begin(), args.begin() + 5);
for (size_t i = 5; i < args.size(); ++i)
{
if (const auto * ast_func = typeid_cast<const ASTFunction *>(args[i].get()))
{
const auto * args_expr = assert_cast<const ASTExpressionList *>(ast_func->arguments.get());
auto function_args = args_expr->children;
if (function_args.size() != 2)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected key-value defined argument");
auto arg_name = function_args[0]->as<ASTIdentifier>()->name();
if (arg_name == "structure")
structure = checkAndGetLiteralArgument<String>(function_args[1], "structure");
else if (arg_name == "options")
main_arguments.push_back(function_args[1]);
}
else if (i == 5)
{
structure = checkAndGetLiteralArgument<String>(args[i], "structure");
}
else if (i == 6)
{
main_arguments.push_back(args[i]);
}
}
configuration = std::make_shared<MongoDBConfiguration>(StorageMongoDB::getConfiguration(main_arguments, context));
}
else if (args.size() == 3)
{
ASTs main_arguments(args.begin(), args.begin() + 2);
for (size_t i = 2; i < args.size(); ++i)
{
if (const auto * ast_func = typeid_cast<const ASTFunction *>(args[i].get()))
{
const auto * args_expr = assert_cast<const ASTExpressionList *>(ast_func->arguments.get());
auto function_args = args_expr->children;
if (function_args.size() != 2)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected key-value defined argument");
auto arg_name = function_args[0]->as<ASTIdentifier>()->name();
if (arg_name == "structure")
structure = checkAndGetLiteralArgument<String>(function_args[1], "structure");
}
else if (i == 2)
{
structure = checkAndGetLiteralArgument<String>(args[i], "structure");
}
}
configuration = std::make_shared<MongoDBConfiguration>(StorageMongoDB::getConfiguration(main_arguments, context));
}
else
if (args.size() < 6 || args.size() > 7)
{
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
"Table function 'mongodb' requires 3 or from 6 to 7 parameters: "
"mongodb('host:port', database, collection, user, password, structure, [, options]) or mongodb(uri, collection, structure).");
"Table function 'mongodb' requires from 6 to 7 parameters: "
"mongodb('host:port', database, collection, 'user', 'password', structure, [, 'options'])");
}
ASTs main_arguments(args.begin(), args.begin() + 5);
for (size_t i = 5; i < args.size(); ++i)
{
if (const auto * ast_func = typeid_cast<const ASTFunction *>(args[i].get()))
{
const auto * args_expr = assert_cast<const ASTExpressionList *>(ast_func->arguments.get());
auto function_args = args_expr->children;
if (function_args.size() != 2)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected key-value defined argument");
auto arg_name = function_args[0]->as<ASTIdentifier>()->name();
if (arg_name == "structure")
structure = checkAndGetLiteralArgument<String>(function_args[1], "structure");
else if (arg_name == "options")
main_arguments.push_back(function_args[1]);
}
else if (i == 5)
{
structure = checkAndGetLiteralArgument<String>(args[i], "structure");
}
else if (i == 6)
{
main_arguments.push_back(args[i]);
}
}
configuration = StorageMongoDB::getConfiguration(main_arguments, context);
}
}
void registerTableFunctionMongoDB(TableFunctionFactory & factory)
{
factory.registerFunction<TableFunctionMongoDB>(
{
.documentation =
{
.description = "Allows get data from MongoDB collection.",
.examples = {
{"Fetch collection by URI", "SELECT * FROM mongodb('mongodb://root:clickhouse@localhost:27017/database', 'example_collection', 'key UInt64, data String')", ""},
{"Fetch collection over TLS", "SELECT * FROM mongodb('localhost:27017', 'database', 'example_collection', 'root', 'clickhouse', 'key UInt64, data String', 'tls=true')", ""},
},
.categories = {"Integration"},
},
});
factory.registerFunction<TableFunctionMongoDB>();
}
}
#endif

View File

@ -1,133 +0,0 @@
#include "config.h"
#if USE_MONGODB
#include <Storages/StorageMongoDBPocoLegacy.h>
#include <Common/Exception.h>
#include <Interpreters/Context.h>
#include <Parsers/ASTFunction.h>
#include <Parsers/ASTIdentifier.h>
#include <TableFunctions/TableFunctionFactory.h>
#include <Interpreters/parseColumnsListForTableFunction.h>
#include <TableFunctions/registerTableFunctions.h>
#include <Storages/checkAndGetLiteralArgument.h>
#include <Storages/ColumnsDescription.h>
namespace DB
{
namespace ErrorCodes
{
extern const int BAD_ARGUMENTS;
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
}
namespace
{
/// Deprecated, will be removed soon.
class TableFunctionMongoDBPocoLegacy : public ITableFunction
{
public:
static constexpr auto name = "mongodb";
std::string getName() const override { return name; }
private:
StoragePtr executeImpl(
const ASTPtr & ast_function, ContextPtr context,
const std::string & table_name, ColumnsDescription cached_columns, bool is_insert_query) const override;
const char * getStorageTypeName() const override { return "MongoDB"; }
ColumnsDescription getActualTableStructure(ContextPtr context, bool is_insert_query) const override;
void parseArguments(const ASTPtr & ast_function, ContextPtr context) override;
std::optional<StorageMongoDBPocoLegacy::Configuration> configuration;
String structure;
};
StoragePtr TableFunctionMongoDBPocoLegacy::executeImpl(const ASTPtr & /*ast_function*/,
ContextPtr context, const String & table_name, ColumnsDescription /*cached_columns*/, bool is_insert_query) const
{
auto columns = getActualTableStructure(context, is_insert_query);
auto storage = std::make_shared<StorageMongoDBPocoLegacy>(
StorageID(configuration->database, table_name),
configuration->host,
configuration->port,
configuration->database,
configuration->table,
configuration->username,
configuration->password,
configuration->options,
columns,
ConstraintsDescription(),
String{});
storage->startup();
return storage;
}
ColumnsDescription TableFunctionMongoDBPocoLegacy::getActualTableStructure(ContextPtr context, bool /*is_insert_query*/) const
{
return parseColumnsListFromString(structure, context);
}
void TableFunctionMongoDBPocoLegacy::parseArguments(const ASTPtr & ast_function, ContextPtr context)
{
const auto & func_args = ast_function->as<ASTFunction &>();
if (!func_args.arguments)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Table function 'mongodb' must have arguments.");
ASTs & args = func_args.arguments->children;
if (args.size() < 6 || args.size() > 7)
{
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
"Table function 'mongodb' requires from 6 to 7 parameters: "
"mongodb('host:port', database, collection, 'user', 'password', structure, [, 'options'])");
}
ASTs main_arguments(args.begin(), args.begin() + 5);
for (size_t i = 5; i < args.size(); ++i)
{
if (const auto * ast_func = typeid_cast<const ASTFunction *>(args[i].get()))
{
const auto * args_expr = assert_cast<const ASTExpressionList *>(ast_func->arguments.get());
auto function_args = args_expr->children;
if (function_args.size() != 2)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected key-value defined argument");
auto arg_name = function_args[0]->as<ASTIdentifier>()->name();
if (arg_name == "structure")
structure = checkAndGetLiteralArgument<String>(function_args[1], "structure");
else if (arg_name == "options")
main_arguments.push_back(function_args[1]);
}
else if (i == 5)
{
structure = checkAndGetLiteralArgument<String>(args[i], "structure");
}
else if (i == 6)
{
main_arguments.push_back(args[i]);
}
}
configuration = StorageMongoDBPocoLegacy::getConfiguration(main_arguments, context);
}
}
void registerTableFunctionMongoDBPocoLegacy(TableFunctionFactory & factory)
{
factory.registerFunction<TableFunctionMongoDBPocoLegacy>();
}
}
#endif

View File

@ -1,9 +1,10 @@
#include "registerTableFunctions.h"
#include <TableFunctions/TableFunctionFactory.h>
namespace DB
{
void registerTableFunctions(bool use_legacy_mongodb_integration [[maybe_unused]])
void registerTableFunctions()
{
auto & factory = TableFunctionFactory::instance();
@ -22,12 +23,7 @@ void registerTableFunctions(bool use_legacy_mongodb_integration [[maybe_unused]]
registerTableFunctionValues(factory);
registerTableFunctionInput(factory);
registerTableFunctionGenerate(factory);
#if USE_MONGODB
if (use_legacy_mongodb_integration)
registerTableFunctionMongoDBPocoLegacy(factory);
else
registerTableFunctionMongoDB(factory);
#endif
registerTableFunctionMongoDB(factory);
registerTableFunctionRedis(factory);
registerTableFunctionMergeTreeIndex(factory);
registerTableFunctionFuzzQuery(factory);

View File

@ -20,10 +20,7 @@ void registerTableFunctionURLCluster(TableFunctionFactory & factory);
void registerTableFunctionValues(TableFunctionFactory & factory);
void registerTableFunctionInput(TableFunctionFactory & factory);
void registerTableFunctionGenerate(TableFunctionFactory & factory);
#if USE_MONGODB
void registerTableFunctionMongoDB(TableFunctionFactory & factory);
void registerTableFunctionMongoDBPocoLegacy(TableFunctionFactory & factory);
#endif
void registerTableFunctionRedis(TableFunctionFactory & factory);
void registerTableFunctionMergeTreeIndex(TableFunctionFactory & factory);
void registerTableFunctionFuzzQuery(TableFunctionFactory & factory);
@ -73,6 +70,6 @@ void registerDataLakeTableFunctions(TableFunctionFactory & factory);
void registerTableFunctionTimeSeries(TableFunctionFactory & factory);
void registerTableFunctions(bool use_legacy_mongodb_integration [[maybe_unused]]);
void registerTableFunctions();
}

View File

@ -182,9 +182,6 @@ endif()
if (TARGET ch_contrib::prometheus_protobufs)
set(USE_PROMETHEUS_PROTOBUFS 1)
endif()
if (TARGET ch_contrib::mongocxx)
set(USE_MONGODB 1)
endif()
if (TARGET ch_contrib::numactl)
set(USE_NUMACTL 1)
endif()

View File

@ -1,7 +1,7 @@
version: '2.3'
services:
mongo1:
image: mongo:6.0
image: mongo:5.0
restart: always
environment:
MONGO_INITDB_ROOT_USERNAME: root
@ -10,20 +10,8 @@ services:
- ${MONGO_EXTERNAL_PORT:-27017}:${MONGO_INTERNAL_PORT:-27017}
command: --profile=2 --verbose
mongo_no_cred:
image: mongo:6.0
mongo2:
image: mongo:5.0
restart: always
ports:
- ${MONGO_NO_CRED_EXTERNAL_PORT:-27017}:${MONGO_NO_CRED_INTERNAL_PORT:-27017}
mongo_secure:
image: mongo:6.0
restart: always
environment:
MONGO_INITDB_ROOT_USERNAME: root
MONGO_INITDB_ROOT_PASSWORD: clickhouse
volumes:
- ${MONGO_SECURE_CONFIG_DIR:-}:/mongo/
ports:
- ${MONGO_SECURE_EXTERNAL_PORT:-27017}:${MONGO_SECURE_INTERNAL_PORT:-27017}
command: --config /mongo/mongo_secure.conf --profile=2 --verbose

View File

@ -0,0 +1,13 @@
version: '2.3'
services:
mongo1:
image: mongo:3.6
restart: always
environment:
MONGO_INITDB_ROOT_USERNAME: root
MONGO_INITDB_ROOT_PASSWORD: clickhouse
volumes:
- ${MONGO_CONFIG_PATH:-}:/mongo/
ports:
- ${MONGO_EXTERNAL_PORT:-27017}:${MONGO_INTERNAL_PORT:-27017}
command: --config /mongo/mongo_secure.conf --profile=2 --verbose

View File

@ -544,6 +544,7 @@ class ClickHouseCluster:
self.with_hdfs = False
self.with_kerberized_hdfs = False
self.with_mongo = False
self.with_mongo_secure = False
self.with_net_trics = False
self.with_redis = False
self.with_cassandra = False
@ -623,10 +624,8 @@ class ClickHouseCluster:
# available when with_mongo == True
self.mongo_host = "mongo1"
self._mongo_port = 0
self.mongo_no_cred_host = "mongo_no_cred"
self.mongo_no_cred_host = "mongo2"
self._mongo_no_cred_port = 0
self.mongo_secure_host = "mongo_secure"
self._mongo_secure_port = 0
# available when with_cassandra == True
self.cassandra_host = "cassandra1"
@ -838,13 +837,6 @@ class ClickHouseCluster:
self._mongo_no_cred_port = self.port_pool.get_port()
return self._mongo_no_cred_port
@property
def mongo_secure_port(self):
if self._mongo_secure_port:
return self._mongo_secure_port
self._mongo_secure_port = get_free_port()
return self._mongo_secure_port
@property
def redis_port(self):
if self._redis_port:
@ -1455,6 +1447,29 @@ class ClickHouseCluster:
]
return self.base_nats_cmd
def setup_mongo_secure_cmd(self, instance, env_variables, docker_compose_yml_dir):
self.with_mongo = self.with_mongo_secure = True
env_variables["MONGO_HOST"] = self.mongo_host
env_variables["MONGO_EXTERNAL_PORT"] = str(self.mongo_port)
env_variables["MONGO_INTERNAL_PORT"] = "27017"
env_variables["MONGO_CONFIG_PATH"] = HELPERS_DIR
self.base_cmd.extend(
[
"--file",
p.join(docker_compose_yml_dir, "docker_compose_mongo_secure.yml"),
]
)
self.base_mongo_cmd = [
"docker-compose",
"--env-file",
instance.env_file,
"--project-name",
self.project_name,
"--file",
p.join(docker_compose_yml_dir, "docker_compose_mongo_secure.yml"),
]
return self.base_mongo_cmd
def setup_mongo_cmd(self, instance, env_variables, docker_compose_yml_dir):
self.with_mongo = True
env_variables["MONGO_HOST"] = self.mongo_host
@ -1462,11 +1477,6 @@ class ClickHouseCluster:
env_variables["MONGO_INTERNAL_PORT"] = "27017"
env_variables["MONGO_NO_CRED_EXTERNAL_PORT"] = str(self.mongo_no_cred_port)
env_variables["MONGO_NO_CRED_INTERNAL_PORT"] = "27017"
env_variables["MONGO_SECURE_EXTERNAL_PORT"] = str(self.mongo_secure_port)
env_variables["MONGO_SECURE_INTERNAL_PORT"] = "27017"
env_variables["MONGO_SECURE_CONFIG_DIR"] = (
instance.path + "/" + "mongo_secure_config"
)
self.base_cmd.extend(
["--file", p.join(docker_compose_yml_dir, "docker_compose_mongo.yml")]
)
@ -1699,6 +1709,7 @@ class ClickHouseCluster:
with_hdfs=False,
with_kerberized_hdfs=False,
with_mongo=False,
with_mongo_secure=False,
with_nginx=False,
with_redis=False,
with_minio=False,
@ -1802,7 +1813,7 @@ class ClickHouseCluster:
or with_kerberized_hdfs
or with_kerberos_kdc
or with_kerberized_kafka,
with_mongo=with_mongo,
with_mongo=with_mongo or with_mongo_secure,
with_redis=with_redis,
with_minio=with_minio,
with_azurite=with_azurite,
@ -1977,10 +1988,21 @@ class ClickHouseCluster:
)
)
if with_mongo and not self.with_mongo:
cmds.append(
self.setup_mongo_cmd(instance, env_variables, docker_compose_yml_dir)
)
if (with_mongo or with_mongo_secure) and not (
self.with_mongo or self.with_mongo_secure
):
if with_mongo_secure:
cmds.append(
self.setup_mongo_secure_cmd(
instance, env_variables, docker_compose_yml_dir
)
)
else:
cmds.append(
self.setup_mongo_cmd(
instance, env_variables, docker_compose_yml_dir
)
)
if with_coredns and not self.with_coredns:
cmds.append(
@ -2604,9 +2626,7 @@ class ClickHouseCluster:
while time.time() - start < timeout:
try:
connection.list_database_names()
logging.debug(
f"Connected to Mongo dbs: {connection.list_database_names()}"
)
logging.debug(f"Connected to Mongo dbs: {connection.database_names()}")
return
except Exception as ex:
logging.debug("Can't connect to Mongo " + str(ex))
@ -3060,7 +3080,7 @@ class ClickHouseCluster:
logging.debug("Setup Mongo")
run_and_check(self.base_mongo_cmd + common_opts)
self.up_called = True
self.wait_mongo_to_start(30)
self.wait_mongo_to_start(30, secure=self.with_mongo_secure)
if self.with_coredns and self.base_coredns_cmd:
logging.debug("Setup coredns")
@ -3507,9 +3527,6 @@ class ClickHouseInstance:
self.with_kerberized_hdfs = with_kerberized_hdfs
self.with_secrets = with_secrets
self.with_mongo = with_mongo
self.mongo_secure_config_dir = p.abspath(
p.join(base_path, "mongo_secure_config")
)
self.with_redis = with_redis
self.with_minio = with_minio
self.with_azurite = with_azurite
@ -4637,12 +4654,6 @@ class ClickHouseInstance:
dirs_exist_ok=True,
)
if self.with_mongo and os.path.exists(self.mongo_secure_config_dir):
shutil.copytree(
self.mongo_secure_config_dir,
p.abspath(p.join(self.path, "mongo_secure_config")),
)
if self.with_coredns:
shutil.copytree(
self.coredns_config_dir, p.abspath(p.join(self.path, "coredns_config"))

View File

@ -170,7 +170,6 @@ class SourceMongo(ExternalSource):
user,
password,
secure=False,
legacy=False,
):
ExternalSource.__init__(
self,
@ -183,15 +182,8 @@ class SourceMongo(ExternalSource):
password,
)
self.secure = secure
self.legacy = legacy
def get_source_str(self, table_name):
options = ""
if self.secure and self.legacy:
options = "<options>ssl=true</options>"
if self.secure and not self.legacy:
options = "<options>tls=true&amp;tlsAllowInvalidCertificates=true</options>"
return """
<mongodb>
<host>{host}</host>
@ -208,7 +200,7 @@ class SourceMongo(ExternalSource):
user=self.user,
password=self.password,
tbl=table_name,
options=options,
options="<options>ssl=true</options>" if self.secure else "",
)
def prepare(self, structure, table_name, cluster):
@ -260,15 +252,9 @@ class SourceMongoURI(SourceMongo):
return layout.name == "flat"
def get_source_str(self, table_name):
options = ""
if self.secure and self.legacy:
options = "ssl=true"
if self.secure and not self.legacy:
options = "tls=true&amp;tlsAllowInvalidCertificates=true"
return """
<mongodb>
<uri>mongodb://{user}:{password}@{host}:{port}/test?{options}</uri>
<uri>mongodb://{user}:{password}@{host}:{port}/test{options}</uri>
<collection>{tbl}</collection>
</mongodb>
""".format(
@ -277,7 +263,7 @@ class SourceMongoURI(SourceMongo):
user=self.user,
password=self.password,
tbl=table_name,
options=options,
options="?ssl=true" if self.secure else "",
)

View File

@ -0,0 +1,49 @@
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC3uaPiZMfjPBBE
yDEYJsJIoriu0SaC80uTmPM7bFpnOOXOBvbT4wD2q+uVaLQifKtPTgZAkP5Y3rX8
S5TOzaLsNp68S1Ja/EzxQUolOSgb4A948TTiUTrTjfMxsPRhmxXTjozWV8CFtL9P
Lg6H+55oyQOJedWYe1kSWRJQayXSweBK5qjOPi2qDF/xdFRQuMivpBUar/b/E9GQ
RKpIaoqMYsl/WF/tReb4N658UxkVlFdR8s48UoA9LfJLMPr4N+QDTfvtcT2bYlpT
4a9b6IXa9BQKCw3AKfTqEPO1XunH//iLNkt1bLtqgZNyT/tY0tLY3EKMXIDuRBVn
KCbfVJ1RAgMBAAECggEAJFCjXiqBgB7tMEtJuPZgTK8tRhC9RgEFHUWMPmCqdeC/
O7wQqc0i8Z8Fz+CESpTN370Sa0y9mZ9b5WSjI0VuQLaDJcDVpHpeUwmOuFDV5ryh
EkzLITjhIdPbECVkCK7433o7yFpMCaGydtopsSNBKoEhG9ljKOKotoG4pwCm10N5
K9Qepj82OjRhLkpmuiMFb4/vvOm5dglYmkq5+n/fdUYFtrYr3NvMSCTlietPHDgV
Wb3strvk1g9ARWfa2j7Q6moF2sbyob9zVLoRiD9VgmNB60v7QAJxDctVkbOoDgKp
uN2fkxTHwlOPAO6Zhgnie11jnZr1711TFxmEfMkSKQKBgQDqpB8m0hSJsWLKWxQK
yx+5Xgs+Cr8gb0AYHJQ87obj2XqwXLpBSMrkzTn6vIGRv+NMSfiM/755RUm5aJPY
om+7F68JEIL26ZA7bIfjHhV5o9fvpo+6N6cJyR08Q/KkF8Tej9K4qQec0W/jtKeZ
KAJ1k7/BBuN82iTtEJ3GWBaaRwKBgQDIcwQrGlyyXqnBK25gl/E1Ng+V3p/2sy98
1BpEshxen4KorHEXCJArydELtvK/ll6agil6QebrJN5dtYOOgvcDTu1mQjdUPN3C
VXpSQ0L8XxfyTNYQTWON9wJGL1pzlTiyHvlSrQFsFWMUoxrqndWIIRtrXjap1npp
HDrcqy2/pwKBgB5fHhUlTjlAd7wfq+l1v2Z8ENJ4C6NEIzS7xkhYy6cEiIf5iLZY
mMKi+eVFrzPRdbdzP7Poipwh5tgT/EcnR3UdLK/srjcNpni6pKA2TatQFOxVT/dX
qsxudtVNKkQpO3dfgHQclPqsdWIxCRye/CqB9Gkk3h9UEUGKTBHXZx2TAoGAF0tG
cLvfidr2Xzxs10zQ+x4NMZ1teX3ZRuhfJRyNr3FZ/cAMZGDaYDxTzsiz7Q/Mbqgx
qcN+0lS2gq1VXHpbukaxz/Bh/agVHUBRtr2aSznBzqafOcXEi/roiL94A3aT4B85
WiJAyA60NPG/bwRojClMxm1sbNA/6XceYAaEioECgYEA3m88G3UwizfJAsfT5H5K
3HXNYzQ1XGrA8shI0kxeqfNP5qmTfH5q/K2VMWeShT3F/9Ytgc+H8c9XP1wKq7Zl
6AtmdDOeLzHkgwVK0p20/Wh2Qjw4ikJLdM+y8wnfMiwCXWQxoh1X905EwNtyBc2Z
9S3G5CXldFHC4NGdx0vetiE=
-----END PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
MIIDazCCAlOgAwIBAgIUO9pfiBMsADdk9nBMHs10n8kaIr8wDQYJKoZIhvcNAQEL
BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMjA0MTIwOTQxNDVaFw0yNTAx
MDUwOTQxNDVaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
AQUAA4IBDwAwggEKAoIBAQC3uaPiZMfjPBBEyDEYJsJIoriu0SaC80uTmPM7bFpn
OOXOBvbT4wD2q+uVaLQifKtPTgZAkP5Y3rX8S5TOzaLsNp68S1Ja/EzxQUolOSgb
4A948TTiUTrTjfMxsPRhmxXTjozWV8CFtL9PLg6H+55oyQOJedWYe1kSWRJQayXS
weBK5qjOPi2qDF/xdFRQuMivpBUar/b/E9GQRKpIaoqMYsl/WF/tReb4N658UxkV
lFdR8s48UoA9LfJLMPr4N+QDTfvtcT2bYlpT4a9b6IXa9BQKCw3AKfTqEPO1XunH
//iLNkt1bLtqgZNyT/tY0tLY3EKMXIDuRBVnKCbfVJ1RAgMBAAGjUzBRMB0GA1Ud
DgQWBBSx7Tx8W4c6wjW0qkeG7CAMLY7YkjAfBgNVHSMEGDAWgBSx7Tx8W4c6wjW0
qkeG7CAMLY7YkjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAb
/Up/LEIdwhiN/S3HolxY2D2BrTpKHLQuggBN4+gZlK5OksCkM46LYlP/ruHXCxbR
mQoRhmooj4TvkKyBwzvKq76O+OuRtBhXzRipnBbNTqFPLf9enJUrut8lsFrI+pdl
Nn4PSGGbFPpQ5vFRCktczwwYh0zLuZ/1DbFsbRWlDnZdvoWZdfV0qsvcBRK2DXDI
29xSfw897OpITIkaryZigQVsKv8TXhfsaq9PUuH0/z84S82QG5fR6FzULofgkylb
wXvwaSdcu3k4Lo8j77BEAEvlH8Ynja0eojx5Avl9h4iw/IOQKE4GAg56CzcequLv
clPlaBBWoD6yn+q4NhLF
-----END CERTIFICATE-----

View File

@ -1,6 +1,5 @@
net:
ssl:
mode: requireSSL
PEMKeyFile: /mongo/key.pem
CAFile: /mongo/cert.crt
PEMKeyFile: /mongo/mongo_cert.pem
allowConnectionsWithoutCertificates: true

View File

@ -1,3 +0,0 @@
<clickhouse>
<use_legacy_mongodb_integration>1</use_legacy_mongodb_integration>
</clickhouse>

View File

@ -1,3 +0,0 @@
<clickhouse>
<use_legacy_mongodb_integration>0</use_legacy_mongodb_integration>
</clickhouse>

View File

@ -1,24 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIEGzCCAwOgAwIBAgIUaoGlyuJAyvs6yowFXymfu7seEiUwDQYJKoZIhvcNAQEL
BQAwgZwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDbGlja0hvdXNlMRMwEQYDVQQH
DApDbGlja0hvdXNlMREwDwYDVQQKDAhQZXJzb25hbDETMBEGA1UECwwKQ2xpY2tI
b3VzZTEkMCIGCSqGSIb3DQEJARYVY2xpY2tob3VzZUBjbGlja2hvdXNlMRUwEwYD
VQQDDAxtb25nb19zZWN1cmUwHhcNMjQwNTI2MTYwMDMxWhcNMzQwNTI0MTYwMDMx
WjCBnDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNsaWNrSG91c2UxEzARBgNVBAcM
CkNsaWNrSG91c2UxETAPBgNVBAoMCFBlcnNvbmFsMRMwEQYDVQQLDApDbGlja0hv
dXNlMSQwIgYJKoZIhvcNAQkBFhVjbGlja2hvdXNlQGNsaWNraG91c2UxFTATBgNV
BAMMDG1vbmdvX3NlY3VyZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
AJSeQfMG7xd0+kPehYlEsEw0Sm1DB05SXVCEzIX3DFD6XJrd8eeWwlzYaBatkcwj
+8yvqske02X/3YwpzJyFizAqJIoKql5c5Yii2xH1S9PFP0y+LoJre+eQziHyO33t
eeedeGNJ05Sm2ZAzjfMQ7Rdh6S+gdIO4Y102iQR5yr2aTrh7tu7XkNCjwKTqMMvz
SikP1Rft2J6ECim+MjYCCtH/4yXGeEJ5epU4t3y6Q23B2ZEhY+sqUdwgK9pu8oe4
mkZ1Qvwakc9Qg12owRSDjBBYrPvghXVpkJ2JkgKTrIAIz9tZ53eDVHNXbWMAotov
jEmRSoGIS1yzwmQ9PdxUwYcCAwEAAaNTMFEwHQYDVR0OBBYEFJyz3Kt5XBDg5cvI
0v1ioqejqX+CMB8GA1UdIwQYMBaAFJyz3Kt5XBDg5cvI0v1ioqejqX+CMA8GA1Ud
EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAHAQFA5VMYvaQFnKtKfHg9TF
qfJ4uM3YsGdgsgmGWgflD1S4Z290H6Q2QvyZAEceTrlJxqArlWlVp5DAU6EeXjEh
QMAgdkJHF1Hg2jsZKPtdkb88UtuzwAME357T8NtEJSHzNE5QqYwlVM71JkWpdqvA
UUdOJbWhhJfowIf4tMmL1DUuIy2qYpoP/tEBXEw9uwpmZqb7KELwT3lRyOMaGFN7
RHVwbvJWlHiu83QDNaWz6ijQkWl3tCN6TWcFD1qc1x8GpMzjbsAAYbCx7fbHM2LD
9kGSCiyv5K0MLNK5u67RtUFfPHtyD8RA0TtxIZ4PEN/eFANKS2/5NEi1ZuZ5/Pk=
-----END CERTIFICATE-----

View File

@ -1,52 +0,0 @@
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCUnkHzBu8XdPpD
3oWJRLBMNEptQwdOUl1QhMyF9wxQ+lya3fHnlsJc2GgWrZHMI/vMr6rJHtNl/92M
KcychYswKiSKCqpeXOWIotsR9UvTxT9Mvi6Ca3vnkM4h8jt97XnnnXhjSdOUptmQ
M43zEO0XYekvoHSDuGNdNokEecq9mk64e7bu15DQo8Ck6jDL80opD9UX7diehAop
vjI2AgrR/+MlxnhCeXqVOLd8ukNtwdmRIWPrKlHcICvabvKHuJpGdUL8GpHPUINd
qMEUg4wQWKz74IV1aZCdiZICk6yACM/bWed3g1RzV21jAKLaL4xJkUqBiEtcs8Jk
PT3cVMGHAgMBAAECggEAAul6qiHchB+uQMCWyC5xTeRqAXR3tAv4Tj4fGJjkXY4Z
OrAjr9Kp38EvX1amgvUWV3FT3NMevDf5xd9OdzAA0g0uJIF+mAhYFW48i1FnQcHQ
mOf0zmiZR7l8o7ROb3JvooXHxW+ba/qjGPVwC801gJvruehgbOCRxh9DTRp7sH5K
BmcddhULhKBEQjWUmYNEM3A2axpdi3g1aYKERRLn8J0DXcItTwbxuxbNcs3erl8W
3yyv/JKmqnWF5sNyX3wEWuQcDEZZy+W7Hn4KPMxyU+WA5el5nJ8kFlxhpInmajwu
8Ytn6IEyThyXutVomosVBuP16QORl2Nad0hnQO9toQKBgQDDgiehXr3k2wfVaVOD
PocW4leXausIU2XcCn6FxTG9vLUDMPANw0MxgenC2nrjaUU9J9UjdRYgMcFGWrl4
E27wEn5e0nZ/Y7F2cfhuOc9vNmZ+eHm2KQRyfAjIVL5Hpldqk2jXyCnLBNeWGHSw
kPQMU+FLqmrOFUvXlD2my+OSHwKBgQDCmgS9r+xFh4BCB9dY6eyQJF/jYmAQHs26
80WJ6gAhbUw1O71uDtS9/3PZVXwwNCOHrcc49BPrpJdxGPHGvd2Q5y+j5LDDbQSZ
aLTiCZ2B0RM5Bd2dXD8gEHN4WCX7pJ/o4kDi4zONBmp5mg/tFfer5z5IU/1P7Wak
1Mu0JIHzmQKBgDNaNoqeVgaMuYwGtFbez6DlJtiwzrdLIJAheYYte5k4vdruub8D
sNyKIRp7RJgDCJq9obBEiuE98GRIZDrz78nDMco6QcHIL87KtNRO/vtZMKa7gkyk
jXR8u9nS2H/9YyytN3amLsQSq4XTOqM+D7xFNAIp6w/ibB9d4quzFj1FAoGBAKTE
x/LcO897NWuzO/D6z+QUCGR87R15F3SNenmVedrTskz4ciH3yMW+v5ZrPSWLX/IH
f8GHWD6TM+780eoW5L1GIh5BCjHN4rEJ6O3iekxqfD4x6zzL2F8Lztk8uZxh/Uuw
FoSFHybvIcQoYAe8K+KPfzq6cqb0OY6i5n920dkxAoGAJkw6ADqsJfH3NR+bQfgF
oEA1KqriMxyEJm44Y7E80C+iF4iNALF+Er9TSnr4mDxX5e/dW9d1YeS9o0nOfkpF
MaBmJfxqo4QQJLPRaxYQ2Jhfn7irir4BroxeNXQgNNhgSuKIvkfRyGYwl7P0AT4v
8H8rkZGneMD3gLB5MfnRhGk=
-----END PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
MIIEGzCCAwOgAwIBAgIUaoGlyuJAyvs6yowFXymfu7seEiUwDQYJKoZIhvcNAQEL
BQAwgZwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDbGlja0hvdXNlMRMwEQYDVQQH
DApDbGlja0hvdXNlMREwDwYDVQQKDAhQZXJzb25hbDETMBEGA1UECwwKQ2xpY2tI
b3VzZTEkMCIGCSqGSIb3DQEJARYVY2xpY2tob3VzZUBjbGlja2hvdXNlMRUwEwYD
VQQDDAxtb25nb19zZWN1cmUwHhcNMjQwNTI2MTYwMDMxWhcNMzQwNTI0MTYwMDMx
WjCBnDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNsaWNrSG91c2UxEzARBgNVBAcM
CkNsaWNrSG91c2UxETAPBgNVBAoMCFBlcnNvbmFsMRMwEQYDVQQLDApDbGlja0hv
dXNlMSQwIgYJKoZIhvcNAQkBFhVjbGlja2hvdXNlQGNsaWNraG91c2UxFTATBgNV
BAMMDG1vbmdvX3NlY3VyZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
AJSeQfMG7xd0+kPehYlEsEw0Sm1DB05SXVCEzIX3DFD6XJrd8eeWwlzYaBatkcwj
+8yvqske02X/3YwpzJyFizAqJIoKql5c5Yii2xH1S9PFP0y+LoJre+eQziHyO33t
eeedeGNJ05Sm2ZAzjfMQ7Rdh6S+gdIO4Y102iQR5yr2aTrh7tu7XkNCjwKTqMMvz
SikP1Rft2J6ECim+MjYCCtH/4yXGeEJ5epU4t3y6Q23B2ZEhY+sqUdwgK9pu8oe4
mkZ1Qvwakc9Qg12owRSDjBBYrPvghXVpkJ2JkgKTrIAIz9tZ53eDVHNXbWMAotov
jEmRSoGIS1yzwmQ9PdxUwYcCAwEAAaNTMFEwHQYDVR0OBBYEFJyz3Kt5XBDg5cvI
0v1ioqejqX+CMB8GA1UdIwQYMBaAFJyz3Kt5XBDg5cvI0v1ioqejqX+CMA8GA1Ud
EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAHAQFA5VMYvaQFnKtKfHg9TF
qfJ4uM3YsGdgsgmGWgflD1S4Z290H6Q2QvyZAEceTrlJxqArlWlVp5DAU6EeXjEh
QMAgdkJHF1Hg2jsZKPtdkb88UtuzwAME357T8NtEJSHzNE5QqYwlVM71JkWpdqvA
UUdOJbWhhJfowIf4tMmL1DUuIy2qYpoP/tEBXEw9uwpmZqb7KELwT3lRyOMaGFN7
RHVwbvJWlHiu83QDNaWz6ijQkWl3tCN6TWcFD1qc1x8GpMzjbsAAYbCx7fbHM2LD
9kGSCiyv5K0MLNK5u67RtUFfPHtyD8RA0TtxIZ4PEN/eFANKS2/5NEi1ZuZ5/Pk=
-----END CERTIFICATE-----

View File

@ -1,52 +0,0 @@
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCUnkHzBu8XdPpD
3oWJRLBMNEptQwdOUl1QhMyF9wxQ+lya3fHnlsJc2GgWrZHMI/vMr6rJHtNl/92M
KcychYswKiSKCqpeXOWIotsR9UvTxT9Mvi6Ca3vnkM4h8jt97XnnnXhjSdOUptmQ
M43zEO0XYekvoHSDuGNdNokEecq9mk64e7bu15DQo8Ck6jDL80opD9UX7diehAop
vjI2AgrR/+MlxnhCeXqVOLd8ukNtwdmRIWPrKlHcICvabvKHuJpGdUL8GpHPUINd
qMEUg4wQWKz74IV1aZCdiZICk6yACM/bWed3g1RzV21jAKLaL4xJkUqBiEtcs8Jk
PT3cVMGHAgMBAAECggEAAul6qiHchB+uQMCWyC5xTeRqAXR3tAv4Tj4fGJjkXY4Z
OrAjr9Kp38EvX1amgvUWV3FT3NMevDf5xd9OdzAA0g0uJIF+mAhYFW48i1FnQcHQ
mOf0zmiZR7l8o7ROb3JvooXHxW+ba/qjGPVwC801gJvruehgbOCRxh9DTRp7sH5K
BmcddhULhKBEQjWUmYNEM3A2axpdi3g1aYKERRLn8J0DXcItTwbxuxbNcs3erl8W
3yyv/JKmqnWF5sNyX3wEWuQcDEZZy+W7Hn4KPMxyU+WA5el5nJ8kFlxhpInmajwu
8Ytn6IEyThyXutVomosVBuP16QORl2Nad0hnQO9toQKBgQDDgiehXr3k2wfVaVOD
PocW4leXausIU2XcCn6FxTG9vLUDMPANw0MxgenC2nrjaUU9J9UjdRYgMcFGWrl4
E27wEn5e0nZ/Y7F2cfhuOc9vNmZ+eHm2KQRyfAjIVL5Hpldqk2jXyCnLBNeWGHSw
kPQMU+FLqmrOFUvXlD2my+OSHwKBgQDCmgS9r+xFh4BCB9dY6eyQJF/jYmAQHs26
80WJ6gAhbUw1O71uDtS9/3PZVXwwNCOHrcc49BPrpJdxGPHGvd2Q5y+j5LDDbQSZ
aLTiCZ2B0RM5Bd2dXD8gEHN4WCX7pJ/o4kDi4zONBmp5mg/tFfer5z5IU/1P7Wak
1Mu0JIHzmQKBgDNaNoqeVgaMuYwGtFbez6DlJtiwzrdLIJAheYYte5k4vdruub8D
sNyKIRp7RJgDCJq9obBEiuE98GRIZDrz78nDMco6QcHIL87KtNRO/vtZMKa7gkyk
jXR8u9nS2H/9YyytN3amLsQSq4XTOqM+D7xFNAIp6w/ibB9d4quzFj1FAoGBAKTE
x/LcO897NWuzO/D6z+QUCGR87R15F3SNenmVedrTskz4ciH3yMW+v5ZrPSWLX/IH
f8GHWD6TM+780eoW5L1GIh5BCjHN4rEJ6O3iekxqfD4x6zzL2F8Lztk8uZxh/Uuw
FoSFHybvIcQoYAe8K+KPfzq6cqb0OY6i5n920dkxAoGAJkw6ADqsJfH3NR+bQfgF
oEA1KqriMxyEJm44Y7E80C+iF4iNALF+Er9TSnr4mDxX5e/dW9d1YeS9o0nOfkpF
MaBmJfxqo4QQJLPRaxYQ2Jhfn7irir4BroxeNXQgNNhgSuKIvkfRyGYwl7P0AT4v
8H8rkZGneMD3gLB5MfnRhGk=
-----END PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
MIIEGzCCAwOgAwIBAgIUaoGlyuJAyvs6yowFXymfu7seEiUwDQYJKoZIhvcNAQEL
BQAwgZwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDbGlja0hvdXNlMRMwEQYDVQQH
DApDbGlja0hvdXNlMREwDwYDVQQKDAhQZXJzb25hbDETMBEGA1UECwwKQ2xpY2tI
b3VzZTEkMCIGCSqGSIb3DQEJARYVY2xpY2tob3VzZUBjbGlja2hvdXNlMRUwEwYD
VQQDDAxtb25nb19zZWN1cmUwHhcNMjQwNTI2MTYwMDMxWhcNMzQwNTI0MTYwMDMx
WjCBnDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNsaWNrSG91c2UxEzARBgNVBAcM
CkNsaWNrSG91c2UxETAPBgNVBAoMCFBlcnNvbmFsMRMwEQYDVQQLDApDbGlja0hv
dXNlMSQwIgYJKoZIhvcNAQkBFhVjbGlja2hvdXNlQGNsaWNraG91c2UxFTATBgNV
BAMMDG1vbmdvX3NlY3VyZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
AJSeQfMG7xd0+kPehYlEsEw0Sm1DB05SXVCEzIX3DFD6XJrd8eeWwlzYaBatkcwj
+8yvqske02X/3YwpzJyFizAqJIoKql5c5Yii2xH1S9PFP0y+LoJre+eQziHyO33t
eeedeGNJ05Sm2ZAzjfMQ7Rdh6S+gdIO4Y102iQR5yr2aTrh7tu7XkNCjwKTqMMvz
SikP1Rft2J6ECim+MjYCCtH/4yXGeEJ5epU4t3y6Q23B2ZEhY+sqUdwgK9pu8oe4
mkZ1Qvwakc9Qg12owRSDjBBYrPvghXVpkJ2JkgKTrIAIz9tZ53eDVHNXbWMAotov
jEmRSoGIS1yzwmQ9PdxUwYcCAwEAAaNTMFEwHQYDVR0OBBYEFJyz3Kt5XBDg5cvI
0v1ioqejqX+CMB8GA1UdIwQYMBaAFJyz3Kt5XBDg5cvI0v1ioqejqX+CMA8GA1Ud
EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAHAQFA5VMYvaQFnKtKfHg9TF
qfJ4uM3YsGdgsgmGWgflD1S4Z290H6Q2QvyZAEceTrlJxqArlWlVp5DAU6EeXjEh
QMAgdkJHF1Hg2jsZKPtdkb88UtuzwAME357T8NtEJSHzNE5QqYwlVM71JkWpdqvA
UUdOJbWhhJfowIf4tMmL1DUuIy2qYpoP/tEBXEw9uwpmZqb7KELwT3lRyOMaGFN7
RHVwbvJWlHiu83QDNaWz6ijQkWl3tCN6TWcFD1qc1x8GpMzjbsAAYbCx7fbHM2LD
9kGSCiyv5K0MLNK5u67RtUFfPHtyD8RA0TtxIZ4PEN/eFANKS2/5NEi1ZuZ5/Pk=
-----END CERTIFICATE-----

View File

@ -22,28 +22,22 @@ def secure_connection(request):
return request.param
@pytest.fixture(scope="module")
def legacy(request):
return request.param
@pytest.fixture(scope="module")
def cluster(secure_connection):
return ClickHouseCluster(__file__)
@pytest.fixture(scope="module")
def source(secure_connection, legacy, cluster):
def source(secure_connection, cluster):
return SourceMongo(
"MongoDB",
"localhost",
cluster.mongo_secure_port if secure_connection else cluster.mongo_port,
"mongo_secure" if secure_connection else "mongo1",
27017,
cluster.mongo_port,
cluster.mongo_host,
"27017",
"root",
"clickhouse",
secure=secure_connection,
legacy=legacy,
)
@ -70,24 +64,18 @@ def ranged_tester(source):
@pytest.fixture(scope="module")
def main_config(secure_connection, legacy):
if legacy:
main_config = [os.path.join("configs", "mongo", "legacy.xml")]
else:
main_config = [os.path.join("configs", "mongo", "new.xml")]
def main_config(secure_connection):
main_config = []
if secure_connection:
main_config.append(os.path.join("configs", "disable_ssl_verification.xml"))
else:
main_config.append(os.path.join("configs", "ssl_verification.xml"))
return main_config
@pytest.fixture(scope="module")
def started_cluster(
secure_connection,
legacy,
cluster,
main_config,
simple_tester,
@ -97,13 +85,12 @@ def started_cluster(
SOURCE = SourceMongo(
"MongoDB",
"localhost",
27017,
"mongo_secure" if secure_connection else "mongo1",
27017,
cluster.mongo_port,
cluster.mongo_host,
"27017",
"root",
"clickhouse",
secure=secure_connection,
legacy=legacy,
)
dictionaries = simple_tester.list_dictionaries()
@ -112,6 +99,7 @@ def started_cluster(
main_configs=main_config,
dictionaries=dictionaries,
with_mongo=True,
with_mongo_secure=secure_connection,
)
try:
@ -128,32 +116,24 @@ def started_cluster(
@pytest.mark.parametrize("secure_connection", [False], indirect=["secure_connection"])
@pytest.mark.parametrize("legacy", [False, True], indirect=["legacy"])
@pytest.mark.parametrize("layout_name", sorted(LAYOUTS_SIMPLE))
def test_simple(secure_connection, legacy, started_cluster, layout_name, simple_tester):
def test_simple(secure_connection, started_cluster, layout_name, simple_tester):
simple_tester.execute(layout_name, started_cluster.instances["node"])
@pytest.mark.parametrize("secure_connection", [False], indirect=["secure_connection"])
@pytest.mark.parametrize("legacy", [False, True], indirect=["legacy"])
@pytest.mark.parametrize("layout_name", sorted(LAYOUTS_COMPLEX))
def test_complex(
secure_connection, legacy, started_cluster, layout_name, complex_tester
):
def test_complex(secure_connection, started_cluster, layout_name, complex_tester):
complex_tester.execute(layout_name, started_cluster.instances["node"])
@pytest.mark.parametrize("secure_connection", [False], indirect=["secure_connection"])
@pytest.mark.parametrize("legacy", [False, True], indirect=["legacy"])
@pytest.mark.parametrize("layout_name", sorted(LAYOUTS_RANGED))
def test_ranged(secure_connection, legacy, started_cluster, layout_name, ranged_tester):
def test_ranged(secure_connection, started_cluster, layout_name, ranged_tester):
ranged_tester.execute(layout_name, started_cluster.instances["node"])
@pytest.mark.parametrize("secure_connection", [True], indirect=["secure_connection"])
@pytest.mark.parametrize("legacy", [False, True], indirect=["legacy"])
@pytest.mark.parametrize("layout_name", sorted(LAYOUTS_SIMPLE))
def test_simple_ssl(
secure_connection, legacy, started_cluster, layout_name, simple_tester
):
def test_simple_ssl(secure_connection, started_cluster, layout_name, simple_tester):
simple_tester.execute(layout_name, started_cluster.instances["node"])

View File

@ -16,28 +16,22 @@ def secure_connection(request):
return request.param
@pytest.fixture(scope="module")
def legacy(request):
return request.param
@pytest.fixture(scope="module")
def cluster(secure_connection):
return ClickHouseCluster(__file__)
@pytest.fixture(scope="module")
def source(secure_connection, legacy, cluster):
def source(secure_connection, cluster):
return SourceMongoURI(
"MongoDB",
"localhost",
cluster.mongo_secure_port if secure_connection else cluster.mongo_port,
"mongo_secure" if secure_connection else "mongo1",
27017,
cluster.mongo_port,
cluster.mongo_host,
"27017",
"root",
"clickhouse",
secure=secure_connection,
legacy=legacy,
)
@ -50,22 +44,17 @@ def simple_tester(source):
@pytest.fixture(scope="module")
def main_config(secure_connection, legacy):
if legacy:
main_config = [os.path.join("configs", "mongo", "legacy.xml")]
else:
main_config = [os.path.join("configs", "mongo", "new.xml")]
def main_config(secure_connection):
main_config = []
if secure_connection:
main_config.append(os.path.join("configs", "disable_ssl_verification.xml"))
else:
main_config.append(os.path.join("configs", "ssl_verification.xml"))
return main_config
@pytest.fixture(scope="module")
def started_cluster(secure_connection, legacy, cluster, main_config, simple_tester):
def started_cluster(secure_connection, cluster, main_config, simple_tester):
dictionaries = simple_tester.list_dictionaries()
node = cluster.add_instance(
@ -73,6 +62,7 @@ def started_cluster(secure_connection, legacy, cluster, main_config, simple_test
main_configs=main_config,
dictionaries=dictionaries,
with_mongo=True,
with_mongo_secure=secure_connection,
)
try:
cluster.start()
@ -84,16 +74,12 @@ def started_cluster(secure_connection, legacy, cluster, main_config, simple_test
# See comment in SourceMongoURI
@pytest.mark.parametrize("secure_connection", [False], indirect=["secure_connection"])
@pytest.mark.parametrize("legacy", [False, True], indirect=["legacy"])
@pytest.mark.parametrize("layout_name", ["flat"])
def test_simple(secure_connection, legacy, started_cluster, simple_tester, layout_name):
def test_simple(secure_connection, started_cluster, simple_tester, layout_name):
simple_tester.execute(layout_name, started_cluster.instances["uri_node"])
@pytest.mark.parametrize("secure_connection", [True], indirect=["secure_connection"])
@pytest.mark.parametrize("legacy", [False, True], indirect=["legacy"])
@pytest.mark.parametrize("layout_name", ["flat"])
def test_simple_ssl(
secure_connection, legacy, started_cluster, simple_tester, layout_name
):
def test_simple_ssl(secure_connection, started_cluster, simple_tester, layout_name):
simple_tester.execute(layout_name, started_cluster.instances["uri_node"])

View File

@ -1,3 +0,0 @@
<clickhouse>
<use_legacy_mongodb_integration>0</use_legacy_mongodb_integration>
</clickhouse>

View File

@ -8,10 +8,5 @@
<database>test</database>
<collection>simple_table</collection>
</mongo1>
<mongo1_uri>
<uri>mongodb://root:clickhouse@mongo1:27017/test</uri>
<collection>simple_table_uri</collection>
</mongo1_uri>
</named_collections>
</clickhouse>

View File

@ -1,24 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIEGzCCAwOgAwIBAgIUaoGlyuJAyvs6yowFXymfu7seEiUwDQYJKoZIhvcNAQEL
BQAwgZwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDbGlja0hvdXNlMRMwEQYDVQQH
DApDbGlja0hvdXNlMREwDwYDVQQKDAhQZXJzb25hbDETMBEGA1UECwwKQ2xpY2tI
b3VzZTEkMCIGCSqGSIb3DQEJARYVY2xpY2tob3VzZUBjbGlja2hvdXNlMRUwEwYD
VQQDDAxtb25nb19zZWN1cmUwHhcNMjQwNTI2MTYwMDMxWhcNMzQwNTI0MTYwMDMx
WjCBnDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNsaWNrSG91c2UxEzARBgNVBAcM
CkNsaWNrSG91c2UxETAPBgNVBAoMCFBlcnNvbmFsMRMwEQYDVQQLDApDbGlja0hv
dXNlMSQwIgYJKoZIhvcNAQkBFhVjbGlja2hvdXNlQGNsaWNraG91c2UxFTATBgNV
BAMMDG1vbmdvX3NlY3VyZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
AJSeQfMG7xd0+kPehYlEsEw0Sm1DB05SXVCEzIX3DFD6XJrd8eeWwlzYaBatkcwj
+8yvqske02X/3YwpzJyFizAqJIoKql5c5Yii2xH1S9PFP0y+LoJre+eQziHyO33t
eeedeGNJ05Sm2ZAzjfMQ7Rdh6S+gdIO4Y102iQR5yr2aTrh7tu7XkNCjwKTqMMvz
SikP1Rft2J6ECim+MjYCCtH/4yXGeEJ5epU4t3y6Q23B2ZEhY+sqUdwgK9pu8oe4
mkZ1Qvwakc9Qg12owRSDjBBYrPvghXVpkJ2JkgKTrIAIz9tZ53eDVHNXbWMAotov
jEmRSoGIS1yzwmQ9PdxUwYcCAwEAAaNTMFEwHQYDVR0OBBYEFJyz3Kt5XBDg5cvI
0v1ioqejqX+CMB8GA1UdIwQYMBaAFJyz3Kt5XBDg5cvI0v1ioqejqX+CMA8GA1Ud
EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAHAQFA5VMYvaQFnKtKfHg9TF
qfJ4uM3YsGdgsgmGWgflD1S4Z290H6Q2QvyZAEceTrlJxqArlWlVp5DAU6EeXjEh
QMAgdkJHF1Hg2jsZKPtdkb88UtuzwAME357T8NtEJSHzNE5QqYwlVM71JkWpdqvA
UUdOJbWhhJfowIf4tMmL1DUuIy2qYpoP/tEBXEw9uwpmZqb7KELwT3lRyOMaGFN7
RHVwbvJWlHiu83QDNaWz6ijQkWl3tCN6TWcFD1qc1x8GpMzjbsAAYbCx7fbHM2LD
9kGSCiyv5K0MLNK5u67RtUFfPHtyD8RA0TtxIZ4PEN/eFANKS2/5NEi1ZuZ5/Pk=
-----END CERTIFICATE-----

View File

@ -1,52 +0,0 @@
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCUnkHzBu8XdPpD
3oWJRLBMNEptQwdOUl1QhMyF9wxQ+lya3fHnlsJc2GgWrZHMI/vMr6rJHtNl/92M
KcychYswKiSKCqpeXOWIotsR9UvTxT9Mvi6Ca3vnkM4h8jt97XnnnXhjSdOUptmQ
M43zEO0XYekvoHSDuGNdNokEecq9mk64e7bu15DQo8Ck6jDL80opD9UX7diehAop
vjI2AgrR/+MlxnhCeXqVOLd8ukNtwdmRIWPrKlHcICvabvKHuJpGdUL8GpHPUINd
qMEUg4wQWKz74IV1aZCdiZICk6yACM/bWed3g1RzV21jAKLaL4xJkUqBiEtcs8Jk
PT3cVMGHAgMBAAECggEAAul6qiHchB+uQMCWyC5xTeRqAXR3tAv4Tj4fGJjkXY4Z
OrAjr9Kp38EvX1amgvUWV3FT3NMevDf5xd9OdzAA0g0uJIF+mAhYFW48i1FnQcHQ
mOf0zmiZR7l8o7ROb3JvooXHxW+ba/qjGPVwC801gJvruehgbOCRxh9DTRp7sH5K
BmcddhULhKBEQjWUmYNEM3A2axpdi3g1aYKERRLn8J0DXcItTwbxuxbNcs3erl8W
3yyv/JKmqnWF5sNyX3wEWuQcDEZZy+W7Hn4KPMxyU+WA5el5nJ8kFlxhpInmajwu
8Ytn6IEyThyXutVomosVBuP16QORl2Nad0hnQO9toQKBgQDDgiehXr3k2wfVaVOD
PocW4leXausIU2XcCn6FxTG9vLUDMPANw0MxgenC2nrjaUU9J9UjdRYgMcFGWrl4
E27wEn5e0nZ/Y7F2cfhuOc9vNmZ+eHm2KQRyfAjIVL5Hpldqk2jXyCnLBNeWGHSw
kPQMU+FLqmrOFUvXlD2my+OSHwKBgQDCmgS9r+xFh4BCB9dY6eyQJF/jYmAQHs26
80WJ6gAhbUw1O71uDtS9/3PZVXwwNCOHrcc49BPrpJdxGPHGvd2Q5y+j5LDDbQSZ
aLTiCZ2B0RM5Bd2dXD8gEHN4WCX7pJ/o4kDi4zONBmp5mg/tFfer5z5IU/1P7Wak
1Mu0JIHzmQKBgDNaNoqeVgaMuYwGtFbez6DlJtiwzrdLIJAheYYte5k4vdruub8D
sNyKIRp7RJgDCJq9obBEiuE98GRIZDrz78nDMco6QcHIL87KtNRO/vtZMKa7gkyk
jXR8u9nS2H/9YyytN3amLsQSq4XTOqM+D7xFNAIp6w/ibB9d4quzFj1FAoGBAKTE
x/LcO897NWuzO/D6z+QUCGR87R15F3SNenmVedrTskz4ciH3yMW+v5ZrPSWLX/IH
f8GHWD6TM+780eoW5L1GIh5BCjHN4rEJ6O3iekxqfD4x6zzL2F8Lztk8uZxh/Uuw
FoSFHybvIcQoYAe8K+KPfzq6cqb0OY6i5n920dkxAoGAJkw6ADqsJfH3NR+bQfgF
oEA1KqriMxyEJm44Y7E80C+iF4iNALF+Er9TSnr4mDxX5e/dW9d1YeS9o0nOfkpF
MaBmJfxqo4QQJLPRaxYQ2Jhfn7irir4BroxeNXQgNNhgSuKIvkfRyGYwl7P0AT4v
8H8rkZGneMD3gLB5MfnRhGk=
-----END PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
MIIEGzCCAwOgAwIBAgIUaoGlyuJAyvs6yowFXymfu7seEiUwDQYJKoZIhvcNAQEL
BQAwgZwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDbGlja0hvdXNlMRMwEQYDVQQH
DApDbGlja0hvdXNlMREwDwYDVQQKDAhQZXJzb25hbDETMBEGA1UECwwKQ2xpY2tI
b3VzZTEkMCIGCSqGSIb3DQEJARYVY2xpY2tob3VzZUBjbGlja2hvdXNlMRUwEwYD
VQQDDAxtb25nb19zZWN1cmUwHhcNMjQwNTI2MTYwMDMxWhcNMzQwNTI0MTYwMDMx
WjCBnDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNsaWNrSG91c2UxEzARBgNVBAcM
CkNsaWNrSG91c2UxETAPBgNVBAoMCFBlcnNvbmFsMRMwEQYDVQQLDApDbGlja0hv
dXNlMSQwIgYJKoZIhvcNAQkBFhVjbGlja2hvdXNlQGNsaWNraG91c2UxFTATBgNV
BAMMDG1vbmdvX3NlY3VyZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
AJSeQfMG7xd0+kPehYlEsEw0Sm1DB05SXVCEzIX3DFD6XJrd8eeWwlzYaBatkcwj
+8yvqske02X/3YwpzJyFizAqJIoKql5c5Yii2xH1S9PFP0y+LoJre+eQziHyO33t
eeedeGNJ05Sm2ZAzjfMQ7Rdh6S+gdIO4Y102iQR5yr2aTrh7tu7XkNCjwKTqMMvz
SikP1Rft2J6ECim+MjYCCtH/4yXGeEJ5epU4t3y6Q23B2ZEhY+sqUdwgK9pu8oe4
mkZ1Qvwakc9Qg12owRSDjBBYrPvghXVpkJ2JkgKTrIAIz9tZ53eDVHNXbWMAotov
jEmRSoGIS1yzwmQ9PdxUwYcCAwEAAaNTMFEwHQYDVR0OBBYEFJyz3Kt5XBDg5cvI
0v1ioqejqX+CMB8GA1UdIwQYMBaAFJyz3Kt5XBDg5cvI0v1ioqejqX+CMA8GA1Ud
EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAHAQFA5VMYvaQFnKtKfHg9TF
qfJ4uM3YsGdgsgmGWgflD1S4Z290H6Q2QvyZAEceTrlJxqArlWlVp5DAU6EeXjEh
QMAgdkJHF1Hg2jsZKPtdkb88UtuzwAME357T8NtEJSHzNE5QqYwlVM71JkWpdqvA
UUdOJbWhhJfowIf4tMmL1DUuIy2qYpoP/tEBXEw9uwpmZqb7KELwT3lRyOMaGFN7
RHVwbvJWlHiu83QDNaWz6ijQkWl3tCN6TWcFD1qc1x8GpMzjbsAAYbCx7fbHM2LD
9kGSCiyv5K0MLNK5u67RtUFfPHtyD8RA0TtxIZ4PEN/eFANKS2/5NEi1ZuZ5/Pk=
-----END CERTIFICATE-----

View File

@ -1,6 +0,0 @@
net:
ssl:
mode: requireSSL
PEMKeyFile: /mongo/key.pem
CAFile: /mongo/cert.crt
allowConnectionsWithoutCertificates: true

File diff suppressed because it is too large Load Diff

View File

@ -1,3 +0,0 @@
<clickhouse>
<use_legacy_mongodb_integration>1</use_legacy_mongodb_integration>
</clickhouse>

View File

@ -1,12 +0,0 @@
<clickhouse>
<named_collections>
<mongo1>
<user>root</user>
<password>clickhouse</password>
<host>mongo1</host>
<port>27017</port>
<database>test</database>
<collection>simple_table</collection>
</mongo1>
</named_collections>
</clickhouse>

View File

@ -1,9 +0,0 @@
<clickhouse>
<users>
<default>
<password></password>
<profile>default</profile>
<named_collection_control>1</named_collection_control>
</default>
</users>
</clickhouse>

View File

@ -1,24 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIEGzCCAwOgAwIBAgIUaoGlyuJAyvs6yowFXymfu7seEiUwDQYJKoZIhvcNAQEL
BQAwgZwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDbGlja0hvdXNlMRMwEQYDVQQH
DApDbGlja0hvdXNlMREwDwYDVQQKDAhQZXJzb25hbDETMBEGA1UECwwKQ2xpY2tI
b3VzZTEkMCIGCSqGSIb3DQEJARYVY2xpY2tob3VzZUBjbGlja2hvdXNlMRUwEwYD
VQQDDAxtb25nb19zZWN1cmUwHhcNMjQwNTI2MTYwMDMxWhcNMzQwNTI0MTYwMDMx
WjCBnDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNsaWNrSG91c2UxEzARBgNVBAcM
CkNsaWNrSG91c2UxETAPBgNVBAoMCFBlcnNvbmFsMRMwEQYDVQQLDApDbGlja0hv
dXNlMSQwIgYJKoZIhvcNAQkBFhVjbGlja2hvdXNlQGNsaWNraG91c2UxFTATBgNV
BAMMDG1vbmdvX3NlY3VyZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
AJSeQfMG7xd0+kPehYlEsEw0Sm1DB05SXVCEzIX3DFD6XJrd8eeWwlzYaBatkcwj
+8yvqske02X/3YwpzJyFizAqJIoKql5c5Yii2xH1S9PFP0y+LoJre+eQziHyO33t
eeedeGNJ05Sm2ZAzjfMQ7Rdh6S+gdIO4Y102iQR5yr2aTrh7tu7XkNCjwKTqMMvz
SikP1Rft2J6ECim+MjYCCtH/4yXGeEJ5epU4t3y6Q23B2ZEhY+sqUdwgK9pu8oe4
mkZ1Qvwakc9Qg12owRSDjBBYrPvghXVpkJ2JkgKTrIAIz9tZ53eDVHNXbWMAotov
jEmRSoGIS1yzwmQ9PdxUwYcCAwEAAaNTMFEwHQYDVR0OBBYEFJyz3Kt5XBDg5cvI
0v1ioqejqX+CMB8GA1UdIwQYMBaAFJyz3Kt5XBDg5cvI0v1ioqejqX+CMA8GA1Ud
EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAHAQFA5VMYvaQFnKtKfHg9TF
qfJ4uM3YsGdgsgmGWgflD1S4Z290H6Q2QvyZAEceTrlJxqArlWlVp5DAU6EeXjEh
QMAgdkJHF1Hg2jsZKPtdkb88UtuzwAME357T8NtEJSHzNE5QqYwlVM71JkWpdqvA
UUdOJbWhhJfowIf4tMmL1DUuIy2qYpoP/tEBXEw9uwpmZqb7KELwT3lRyOMaGFN7
RHVwbvJWlHiu83QDNaWz6ijQkWl3tCN6TWcFD1qc1x8GpMzjbsAAYbCx7fbHM2LD
9kGSCiyv5K0MLNK5u67RtUFfPHtyD8RA0TtxIZ4PEN/eFANKS2/5NEi1ZuZ5/Pk=
-----END CERTIFICATE-----

View File

@ -1,52 +0,0 @@
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCUnkHzBu8XdPpD
3oWJRLBMNEptQwdOUl1QhMyF9wxQ+lya3fHnlsJc2GgWrZHMI/vMr6rJHtNl/92M
KcychYswKiSKCqpeXOWIotsR9UvTxT9Mvi6Ca3vnkM4h8jt97XnnnXhjSdOUptmQ
M43zEO0XYekvoHSDuGNdNokEecq9mk64e7bu15DQo8Ck6jDL80opD9UX7diehAop
vjI2AgrR/+MlxnhCeXqVOLd8ukNtwdmRIWPrKlHcICvabvKHuJpGdUL8GpHPUINd
qMEUg4wQWKz74IV1aZCdiZICk6yACM/bWed3g1RzV21jAKLaL4xJkUqBiEtcs8Jk
PT3cVMGHAgMBAAECggEAAul6qiHchB+uQMCWyC5xTeRqAXR3tAv4Tj4fGJjkXY4Z
OrAjr9Kp38EvX1amgvUWV3FT3NMevDf5xd9OdzAA0g0uJIF+mAhYFW48i1FnQcHQ
mOf0zmiZR7l8o7ROb3JvooXHxW+ba/qjGPVwC801gJvruehgbOCRxh9DTRp7sH5K
BmcddhULhKBEQjWUmYNEM3A2axpdi3g1aYKERRLn8J0DXcItTwbxuxbNcs3erl8W
3yyv/JKmqnWF5sNyX3wEWuQcDEZZy+W7Hn4KPMxyU+WA5el5nJ8kFlxhpInmajwu
8Ytn6IEyThyXutVomosVBuP16QORl2Nad0hnQO9toQKBgQDDgiehXr3k2wfVaVOD
PocW4leXausIU2XcCn6FxTG9vLUDMPANw0MxgenC2nrjaUU9J9UjdRYgMcFGWrl4
E27wEn5e0nZ/Y7F2cfhuOc9vNmZ+eHm2KQRyfAjIVL5Hpldqk2jXyCnLBNeWGHSw
kPQMU+FLqmrOFUvXlD2my+OSHwKBgQDCmgS9r+xFh4BCB9dY6eyQJF/jYmAQHs26
80WJ6gAhbUw1O71uDtS9/3PZVXwwNCOHrcc49BPrpJdxGPHGvd2Q5y+j5LDDbQSZ
aLTiCZ2B0RM5Bd2dXD8gEHN4WCX7pJ/o4kDi4zONBmp5mg/tFfer5z5IU/1P7Wak
1Mu0JIHzmQKBgDNaNoqeVgaMuYwGtFbez6DlJtiwzrdLIJAheYYte5k4vdruub8D
sNyKIRp7RJgDCJq9obBEiuE98GRIZDrz78nDMco6QcHIL87KtNRO/vtZMKa7gkyk
jXR8u9nS2H/9YyytN3amLsQSq4XTOqM+D7xFNAIp6w/ibB9d4quzFj1FAoGBAKTE
x/LcO897NWuzO/D6z+QUCGR87R15F3SNenmVedrTskz4ciH3yMW+v5ZrPSWLX/IH
f8GHWD6TM+780eoW5L1GIh5BCjHN4rEJ6O3iekxqfD4x6zzL2F8Lztk8uZxh/Uuw
FoSFHybvIcQoYAe8K+KPfzq6cqb0OY6i5n920dkxAoGAJkw6ADqsJfH3NR+bQfgF
oEA1KqriMxyEJm44Y7E80C+iF4iNALF+Er9TSnr4mDxX5e/dW9d1YeS9o0nOfkpF
MaBmJfxqo4QQJLPRaxYQ2Jhfn7irir4BroxeNXQgNNhgSuKIvkfRyGYwl7P0AT4v
8H8rkZGneMD3gLB5MfnRhGk=
-----END PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
MIIEGzCCAwOgAwIBAgIUaoGlyuJAyvs6yowFXymfu7seEiUwDQYJKoZIhvcNAQEL
BQAwgZwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDbGlja0hvdXNlMRMwEQYDVQQH
DApDbGlja0hvdXNlMREwDwYDVQQKDAhQZXJzb25hbDETMBEGA1UECwwKQ2xpY2tI
b3VzZTEkMCIGCSqGSIb3DQEJARYVY2xpY2tob3VzZUBjbGlja2hvdXNlMRUwEwYD
VQQDDAxtb25nb19zZWN1cmUwHhcNMjQwNTI2MTYwMDMxWhcNMzQwNTI0MTYwMDMx
WjCBnDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNsaWNrSG91c2UxEzARBgNVBAcM
CkNsaWNrSG91c2UxETAPBgNVBAoMCFBlcnNvbmFsMRMwEQYDVQQLDApDbGlja0hv
dXNlMSQwIgYJKoZIhvcNAQkBFhVjbGlja2hvdXNlQGNsaWNraG91c2UxFTATBgNV
BAMMDG1vbmdvX3NlY3VyZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
AJSeQfMG7xd0+kPehYlEsEw0Sm1DB05SXVCEzIX3DFD6XJrd8eeWwlzYaBatkcwj
+8yvqske02X/3YwpzJyFizAqJIoKql5c5Yii2xH1S9PFP0y+LoJre+eQziHyO33t
eeedeGNJ05Sm2ZAzjfMQ7Rdh6S+gdIO4Y102iQR5yr2aTrh7tu7XkNCjwKTqMMvz
SikP1Rft2J6ECim+MjYCCtH/4yXGeEJ5epU4t3y6Q23B2ZEhY+sqUdwgK9pu8oe4
mkZ1Qvwakc9Qg12owRSDjBBYrPvghXVpkJ2JkgKTrIAIz9tZ53eDVHNXbWMAotov
jEmRSoGIS1yzwmQ9PdxUwYcCAwEAAaNTMFEwHQYDVR0OBBYEFJyz3Kt5XBDg5cvI
0v1ioqejqX+CMB8GA1UdIwQYMBaAFJyz3Kt5XBDg5cvI0v1ioqejqX+CMA8GA1Ud
EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAHAQFA5VMYvaQFnKtKfHg9TF
qfJ4uM3YsGdgsgmGWgflD1S4Z290H6Q2QvyZAEceTrlJxqArlWlVp5DAU6EeXjEh
QMAgdkJHF1Hg2jsZKPtdkb88UtuzwAME357T8NtEJSHzNE5QqYwlVM71JkWpdqvA
UUdOJbWhhJfowIf4tMmL1DUuIy2qYpoP/tEBXEw9uwpmZqb7KELwT3lRyOMaGFN7
RHVwbvJWlHiu83QDNaWz6ijQkWl3tCN6TWcFD1qc1x8GpMzjbsAAYbCx7fbHM2LD
9kGSCiyv5K0MLNK5u67RtUFfPHtyD8RA0TtxIZ4PEN/eFANKS2/5NEi1ZuZ5/Pk=
-----END CERTIFICATE-----

View File

@ -1,52 +0,0 @@
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCUnkHzBu8XdPpD
3oWJRLBMNEptQwdOUl1QhMyF9wxQ+lya3fHnlsJc2GgWrZHMI/vMr6rJHtNl/92M
KcychYswKiSKCqpeXOWIotsR9UvTxT9Mvi6Ca3vnkM4h8jt97XnnnXhjSdOUptmQ
M43zEO0XYekvoHSDuGNdNokEecq9mk64e7bu15DQo8Ck6jDL80opD9UX7diehAop
vjI2AgrR/+MlxnhCeXqVOLd8ukNtwdmRIWPrKlHcICvabvKHuJpGdUL8GpHPUINd
qMEUg4wQWKz74IV1aZCdiZICk6yACM/bWed3g1RzV21jAKLaL4xJkUqBiEtcs8Jk
PT3cVMGHAgMBAAECggEAAul6qiHchB+uQMCWyC5xTeRqAXR3tAv4Tj4fGJjkXY4Z
OrAjr9Kp38EvX1amgvUWV3FT3NMevDf5xd9OdzAA0g0uJIF+mAhYFW48i1FnQcHQ
mOf0zmiZR7l8o7ROb3JvooXHxW+ba/qjGPVwC801gJvruehgbOCRxh9DTRp7sH5K
BmcddhULhKBEQjWUmYNEM3A2axpdi3g1aYKERRLn8J0DXcItTwbxuxbNcs3erl8W
3yyv/JKmqnWF5sNyX3wEWuQcDEZZy+W7Hn4KPMxyU+WA5el5nJ8kFlxhpInmajwu
8Ytn6IEyThyXutVomosVBuP16QORl2Nad0hnQO9toQKBgQDDgiehXr3k2wfVaVOD
PocW4leXausIU2XcCn6FxTG9vLUDMPANw0MxgenC2nrjaUU9J9UjdRYgMcFGWrl4
E27wEn5e0nZ/Y7F2cfhuOc9vNmZ+eHm2KQRyfAjIVL5Hpldqk2jXyCnLBNeWGHSw
kPQMU+FLqmrOFUvXlD2my+OSHwKBgQDCmgS9r+xFh4BCB9dY6eyQJF/jYmAQHs26
80WJ6gAhbUw1O71uDtS9/3PZVXwwNCOHrcc49BPrpJdxGPHGvd2Q5y+j5LDDbQSZ
aLTiCZ2B0RM5Bd2dXD8gEHN4WCX7pJ/o4kDi4zONBmp5mg/tFfer5z5IU/1P7Wak
1Mu0JIHzmQKBgDNaNoqeVgaMuYwGtFbez6DlJtiwzrdLIJAheYYte5k4vdruub8D
sNyKIRp7RJgDCJq9obBEiuE98GRIZDrz78nDMco6QcHIL87KtNRO/vtZMKa7gkyk
jXR8u9nS2H/9YyytN3amLsQSq4XTOqM+D7xFNAIp6w/ibB9d4quzFj1FAoGBAKTE
x/LcO897NWuzO/D6z+QUCGR87R15F3SNenmVedrTskz4ciH3yMW+v5ZrPSWLX/IH
f8GHWD6TM+780eoW5L1GIh5BCjHN4rEJ6O3iekxqfD4x6zzL2F8Lztk8uZxh/Uuw
FoSFHybvIcQoYAe8K+KPfzq6cqb0OY6i5n920dkxAoGAJkw6ADqsJfH3NR+bQfgF
oEA1KqriMxyEJm44Y7E80C+iF4iNALF+Er9TSnr4mDxX5e/dW9d1YeS9o0nOfkpF
MaBmJfxqo4QQJLPRaxYQ2Jhfn7irir4BroxeNXQgNNhgSuKIvkfRyGYwl7P0AT4v
8H8rkZGneMD3gLB5MfnRhGk=
-----END PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
MIIEGzCCAwOgAwIBAgIUaoGlyuJAyvs6yowFXymfu7seEiUwDQYJKoZIhvcNAQEL
BQAwgZwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDbGlja0hvdXNlMRMwEQYDVQQH
DApDbGlja0hvdXNlMREwDwYDVQQKDAhQZXJzb25hbDETMBEGA1UECwwKQ2xpY2tI
b3VzZTEkMCIGCSqGSIb3DQEJARYVY2xpY2tob3VzZUBjbGlja2hvdXNlMRUwEwYD
VQQDDAxtb25nb19zZWN1cmUwHhcNMjQwNTI2MTYwMDMxWhcNMzQwNTI0MTYwMDMx
WjCBnDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNsaWNrSG91c2UxEzARBgNVBAcM
CkNsaWNrSG91c2UxETAPBgNVBAoMCFBlcnNvbmFsMRMwEQYDVQQLDApDbGlja0hv
dXNlMSQwIgYJKoZIhvcNAQkBFhVjbGlja2hvdXNlQGNsaWNraG91c2UxFTATBgNV
BAMMDG1vbmdvX3NlY3VyZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
AJSeQfMG7xd0+kPehYlEsEw0Sm1DB05SXVCEzIX3DFD6XJrd8eeWwlzYaBatkcwj
+8yvqske02X/3YwpzJyFizAqJIoKql5c5Yii2xH1S9PFP0y+LoJre+eQziHyO33t
eeedeGNJ05Sm2ZAzjfMQ7Rdh6S+gdIO4Y102iQR5yr2aTrh7tu7XkNCjwKTqMMvz
SikP1Rft2J6ECim+MjYCCtH/4yXGeEJ5epU4t3y6Q23B2ZEhY+sqUdwgK9pu8oe4
mkZ1Qvwakc9Qg12owRSDjBBYrPvghXVpkJ2JkgKTrIAIz9tZ53eDVHNXbWMAotov
jEmRSoGIS1yzwmQ9PdxUwYcCAwEAAaNTMFEwHQYDVR0OBBYEFJyz3Kt5XBDg5cvI
0v1ioqejqX+CMB8GA1UdIwQYMBaAFJyz3Kt5XBDg5cvI0v1ioqejqX+CMA8GA1Ud
EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAHAQFA5VMYvaQFnKtKfHg9TF
qfJ4uM3YsGdgsgmGWgflD1S4Z290H6Q2QvyZAEceTrlJxqArlWlVp5DAU6EeXjEh
QMAgdkJHF1Hg2jsZKPtdkb88UtuzwAME357T8NtEJSHzNE5QqYwlVM71JkWpdqvA
UUdOJbWhhJfowIf4tMmL1DUuIy2qYpoP/tEBXEw9uwpmZqb7KELwT3lRyOMaGFN7
RHVwbvJWlHiu83QDNaWz6ijQkWl3tCN6TWcFD1qc1x8GpMzjbsAAYbCx7fbHM2LD
9kGSCiyv5K0MLNK5u67RtUFfPHtyD8RA0TtxIZ4PEN/eFANKS2/5NEi1ZuZ5/Pk=
-----END CERTIFICATE-----

View File

@ -1,6 +0,0 @@
net:
ssl:
mode: requireSSL
PEMKeyFile: /mongo/key.pem
CAFile: /mongo/cert.crt
allowConnectionsWithoutCertificates: true

View File

@ -1,509 +0,0 @@
import pymongo
from uuid import UUID
import pytest
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
import datetime
@pytest.fixture(scope="module")
def started_cluster(request):
try:
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance(
"node",
main_configs=[
"mongo_secure_config/config.d/ssl_conf.xml",
"configs/named_collections.xml",
"configs/feature_flag.xml",
],
user_configs=["configs/users.xml"],
with_mongo=True,
)
cluster.start()
yield cluster
finally:
cluster.shutdown()
def get_mongo_connection(started_cluster, secure=False, with_credentials=True):
connection_str = ""
if with_credentials:
connection_str = "mongodb://root:clickhouse@localhost:{}".format(
started_cluster.mongo_secure_port if secure else started_cluster.mongo_port
)
else:
connection_str = "mongodb://localhost:{}".format(
started_cluster.mongo_no_cred_port
)
if secure:
connection_str += "/?tls=true&tlsAllowInvalidCertificates=true"
return pymongo.MongoClient(connection_str)
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_uuid(started_cluster):
mongo_connection = get_mongo_connection(started_cluster)
db = mongo_connection["test"]
db.add_user("root", "clickhouse")
mongo_table = db["uuid_table"]
mongo_table.insert({"key": 0, "data": UUID("f0e77736-91d1-48ce-8f01-15123ca1c7ed")})
node = started_cluster.instances["node"]
node.query(
"CREATE TABLE uuid_mongo_table(key UInt64, data UUID) ENGINE = MongoDB('mongo1:27017', 'test', 'uuid_table', 'root', 'clickhouse')"
)
assert node.query("SELECT COUNT() FROM uuid_mongo_table") == "1\n"
assert (
node.query("SELECT data from uuid_mongo_table where key = 0")
== "f0e77736-91d1-48ce-8f01-15123ca1c7ed\n"
)
node.query("DROP TABLE uuid_mongo_table")
mongo_table.drop()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_simple_select(started_cluster):
mongo_connection = get_mongo_connection(started_cluster)
db = mongo_connection["test"]
db.add_user("root", "clickhouse")
simple_mongo_table = db["simple_table"]
data = []
for i in range(0, 100):
data.append({"key": i, "data": hex(i * i)})
simple_mongo_table.insert_many(data)
node = started_cluster.instances["node"]
node.query(
"CREATE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse')"
)
assert node.query("SELECT COUNT() FROM simple_mongo_table") == "100\n"
assert (
node.query("SELECT sum(key) FROM simple_mongo_table")
== str(sum(range(0, 100))) + "\n"
)
assert (
node.query("SELECT data from simple_mongo_table where key = 42")
== hex(42 * 42) + "\n"
)
node.query("DROP TABLE simple_mongo_table")
simple_mongo_table.drop()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_simple_select_from_view(started_cluster):
mongo_connection = get_mongo_connection(started_cluster)
db = mongo_connection["test"]
db.add_user("root", "clickhouse")
simple_mongo_table = db["simple_table"]
data = []
for i in range(0, 100):
data.append({"key": i, "data": hex(i * i)})
simple_mongo_table.insert_many(data)
simple_mongo_table_view = db.create_collection(
"simple_table_view", viewOn="simple_table"
)
node = started_cluster.instances["node"]
node.query(
"CREATE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table_view', 'root', 'clickhouse')"
)
assert node.query("SELECT COUNT() FROM simple_mongo_table") == "100\n"
assert (
node.query("SELECT sum(key) FROM simple_mongo_table")
== str(sum(range(0, 100))) + "\n"
)
assert (
node.query("SELECT data from simple_mongo_table where key = 42")
== hex(42 * 42) + "\n"
)
node.query("DROP TABLE simple_mongo_table")
simple_mongo_table_view.drop()
simple_mongo_table.drop()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_arrays(started_cluster):
mongo_connection = get_mongo_connection(started_cluster)
db = mongo_connection["test"]
db.add_user("root", "clickhouse")
arrays_mongo_table = db["arrays_table"]
data = []
for i in range(0, 100):
data.append(
{
"key": i,
"arr_int64": [-(i + 1), -(i + 2), -(i + 3)],
"arr_int32": [-(i + 1), -(i + 2), -(i + 3)],
"arr_int16": [-(i + 1), -(i + 2), -(i + 3)],
"arr_int8": [-(i + 1), -(i + 2), -(i + 3)],
"arr_uint64": [i + 1, i + 2, i + 3],
"arr_uint32": [i + 1, i + 2, i + 3],
"arr_uint16": [i + 1, i + 2, i + 3],
"arr_uint8": [i + 1, i + 2, i + 3],
"arr_float32": [i + 1.125, i + 2.5, i + 3.750],
"arr_float64": [i + 1.125, i + 2.5, i + 3.750],
"arr_date": [
datetime.datetime(2002, 10, 27),
datetime.datetime(2024, 1, 8),
],
"arr_datetime": [
datetime.datetime(2023, 3, 31, 6, 3, 12),
datetime.datetime(1999, 2, 28, 12, 46, 34),
],
"arr_string": [str(i + 1), str(i + 2), str(i + 3)],
"arr_uuid": [
"f0e77736-91d1-48ce-8f01-15123ca1c7ed",
"93376a07-c044-4281-a76e-ad27cf6973c5",
],
"arr_mongo_uuid": [
UUID("f0e77736-91d1-48ce-8f01-15123ca1c7ed"),
UUID("93376a07-c044-4281-a76e-ad27cf6973c5"),
],
"arr_arr_bool": [
[True, False, True],
[True],
[],
None,
[False],
[None],
],
"arr_empty": [],
"arr_null": None,
"arr_nullable": None,
}
)
arrays_mongo_table.insert_many(data)
node = started_cluster.instances["node"]
node.query(
"CREATE TABLE arrays_mongo_table("
"key UInt64,"
"arr_int64 Array(Int64),"
"arr_int32 Array(Int32),"
"arr_int16 Array(Int16),"
"arr_int8 Array(Int8),"
"arr_uint64 Array(UInt64),"
"arr_uint32 Array(UInt32),"
"arr_uint16 Array(UInt16),"
"arr_uint8 Array(UInt8),"
"arr_float32 Array(Float32),"
"arr_float64 Array(Float64),"
"arr_date Array(Date),"
"arr_datetime Array(DateTime),"
"arr_string Array(String),"
"arr_uuid Array(UUID),"
"arr_mongo_uuid Array(UUID),"
"arr_arr_bool Array(Array(Bool)),"
"arr_empty Array(UInt64),"
"arr_null Array(UInt64),"
"arr_arr_null Array(Array(UInt64)),"
"arr_nullable Array(Nullable(UInt64))"
") ENGINE = MongoDB('mongo1:27017', 'test', 'arrays_table', 'root', 'clickhouse')"
)
assert node.query("SELECT COUNT() FROM arrays_mongo_table") == "100\n"
for column_name in ["arr_int64", "arr_int32", "arr_int16", "arr_int8"]:
assert (
node.query(f"SELECT {column_name} FROM arrays_mongo_table WHERE key = 42")
== "[-43,-44,-45]\n"
)
for column_name in ["arr_uint64", "arr_uint32", "arr_uint16", "arr_uint8"]:
assert (
node.query(f"SELECT {column_name} FROM arrays_mongo_table WHERE key = 42")
== "[43,44,45]\n"
)
for column_name in ["arr_float32", "arr_float64"]:
assert (
node.query(f"SELECT {column_name} FROM arrays_mongo_table WHERE key = 42")
== "[43.125,44.5,45.75]\n"
)
assert (
node.query(f"SELECT arr_date FROM arrays_mongo_table WHERE key = 42")
== "['2002-10-27','2024-01-08']\n"
)
assert (
node.query(f"SELECT arr_datetime FROM arrays_mongo_table WHERE key = 42")
== "['2023-03-31 06:03:12','1999-02-28 12:46:34']\n"
)
assert (
node.query(f"SELECT arr_string FROM arrays_mongo_table WHERE key = 42")
== "['43','44','45']\n"
)
assert (
node.query(f"SELECT arr_uuid FROM arrays_mongo_table WHERE key = 42")
== "['f0e77736-91d1-48ce-8f01-15123ca1c7ed','93376a07-c044-4281-a76e-ad27cf6973c5']\n"
)
assert (
node.query(f"SELECT arr_mongo_uuid FROM arrays_mongo_table WHERE key = 42")
== "['f0e77736-91d1-48ce-8f01-15123ca1c7ed','93376a07-c044-4281-a76e-ad27cf6973c5']\n"
)
assert (
node.query(f"SELECT arr_arr_bool FROM arrays_mongo_table WHERE key = 42")
== "[[true,false,true],[true],[],[],[false],[false]]\n"
)
assert (
node.query(f"SELECT arr_empty FROM arrays_mongo_table WHERE key = 42") == "[]\n"
)
assert (
node.query(f"SELECT arr_null FROM arrays_mongo_table WHERE key = 42") == "[]\n"
)
assert (
node.query(f"SELECT arr_arr_null FROM arrays_mongo_table WHERE key = 42")
== "[]\n"
)
assert (
node.query(f"SELECT arr_nullable FROM arrays_mongo_table WHERE key = 42")
== "[]\n"
)
# Test INSERT SELECT
node.query("INSERT INTO arrays_mongo_table SELECT * FROM arrays_mongo_table")
assert node.query("SELECT COUNT() FROM arrays_mongo_table") == "200\n"
assert node.query("SELECT COUNT(DISTINCT *) FROM arrays_mongo_table") == "100\n"
node.query("DROP TABLE arrays_mongo_table")
arrays_mongo_table.drop()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_complex_data_type(started_cluster):
mongo_connection = get_mongo_connection(started_cluster)
db = mongo_connection["test"]
db.add_user("root", "clickhouse")
incomplete_mongo_table = db["complex_table"]
data = []
for i in range(0, 100):
data.append({"key": i, "data": hex(i * i), "dict": {"a": i, "b": str(i)}})
incomplete_mongo_table.insert_many(data)
node = started_cluster.instances["node"]
node.query(
"CREATE TABLE incomplete_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'complex_table', 'root', 'clickhouse')"
)
assert node.query("SELECT COUNT() FROM incomplete_mongo_table") == "100\n"
assert (
node.query("SELECT sum(key) FROM incomplete_mongo_table")
== str(sum(range(0, 100))) + "\n"
)
assert (
node.query("SELECT data from incomplete_mongo_table where key = 42")
== hex(42 * 42) + "\n"
)
node.query("DROP TABLE incomplete_mongo_table")
incomplete_mongo_table.drop()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_incorrect_data_type(started_cluster):
mongo_connection = get_mongo_connection(started_cluster)
db = mongo_connection["test"]
db.add_user("root", "clickhouse")
strange_mongo_table = db["strange_table"]
data = []
for i in range(0, 100):
data.append({"key": i, "data": hex(i * i), "aaaa": "Hello"})
strange_mongo_table.insert_many(data)
node = started_cluster.instances["node"]
node.query(
"CREATE TABLE strange_mongo_table(key String, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'strange_table', 'root', 'clickhouse')"
)
with pytest.raises(QueryRuntimeException):
node.query("SELECT COUNT() FROM strange_mongo_table")
with pytest.raises(QueryRuntimeException):
node.query("SELECT uniq(key) FROM strange_mongo_table")
node.query(
"CREATE TABLE strange_mongo_table2(key UInt64, data String, bbbb String) ENGINE = MongoDB('mongo1:27017', 'test', 'strange_table', 'root', 'clickhouse')"
)
node.query("DROP TABLE strange_mongo_table")
node.query("DROP TABLE strange_mongo_table2")
strange_mongo_table.drop()
@pytest.mark.parametrize("started_cluster", [True], indirect=["started_cluster"])
def test_secure_connection(started_cluster):
mongo_connection = get_mongo_connection(started_cluster, secure=True)
db = mongo_connection["test"]
db.add_user("root", "clickhouse")
simple_mongo_table = db["simple_table"]
data = []
for i in range(0, 100):
data.append({"key": i, "data": hex(i * i)})
simple_mongo_table.insert_many(data)
node = started_cluster.instances["node"]
node.query(
"CREATE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo_secure:27017', 'test', 'simple_table', 'root', 'clickhouse', 'ssl=true')"
)
assert node.query("SELECT COUNT() FROM simple_mongo_table") == "100\n"
assert (
node.query("SELECT sum(key) FROM simple_mongo_table")
== str(sum(range(0, 100))) + "\n"
)
assert (
node.query("SELECT data from simple_mongo_table where key = 42")
== hex(42 * 42) + "\n"
)
node.query("DROP TABLE simple_mongo_table")
simple_mongo_table.drop()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_predefined_connection_configuration(started_cluster):
mongo_connection = get_mongo_connection(started_cluster)
db = mongo_connection["test"]
db.add_user("root", "clickhouse")
simple_mongo_table = db["simple_table"]
data = []
for i in range(0, 100):
data.append({"key": i, "data": hex(i * i)})
simple_mongo_table.insert_many(data)
node = started_cluster.instances["node"]
node.query("drop table if exists simple_mongo_table")
node.query(
"create table simple_mongo_table(key UInt64, data String) engine = MongoDB(mongo1)"
)
assert node.query("SELECT count() FROM simple_mongo_table") == "100\n"
simple_mongo_table.drop()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_no_credentials(started_cluster):
mongo_connection = get_mongo_connection(started_cluster, with_credentials=False)
db = mongo_connection["test"]
simple_mongo_table = db["simple_table"]
data = []
for i in range(0, 100):
data.append({"key": i, "data": hex(i * i)})
simple_mongo_table.insert_many(data)
node = started_cluster.instances["node"]
node.query(
f"create table simple_mongo_table_2(key UInt64, data String) engine = MongoDB('mongo_no_cred:27017', 'test', 'simple_table', '', '')"
)
assert node.query("SELECT count() FROM simple_mongo_table_2") == "100\n"
simple_mongo_table.drop()
node.query("DROP TABLE IF EXISTS simple_mongo_table_2")
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_auth_source(started_cluster):
mongo_connection = get_mongo_connection(started_cluster, with_credentials=False)
admin_db = mongo_connection["admin"]
admin_db.add_user(
"root",
"clickhouse",
roles=[{"role": "userAdminAnyDatabase", "db": "admin"}, "readWriteAnyDatabase"],
)
simple_mongo_table_admin = admin_db["simple_table"]
data = []
for i in range(0, 50):
data.append({"key": i, "data": hex(i * i)})
simple_mongo_table_admin.insert_many(data)
db = mongo_connection["test"]
simple_mongo_table = db["simple_table"]
data = []
for i in range(0, 100):
data.append({"key": i, "data": hex(i * i)})
simple_mongo_table.insert_many(data)
node = started_cluster.instances["node"]
node.query(
"create table simple_mongo_table_fail(key UInt64, data String) engine = MongoDB('mongo_no_cred:27017', 'test', 'simple_table', 'root', 'clickhouse')"
)
node.query_and_get_error("SELECT count() FROM simple_mongo_table_fail")
node.query(
"create table simple_mongo_table_ok(key UInt64, data String) engine = MongoDB('mongo_no_cred:27017', 'test', 'simple_table', 'root', 'clickhouse', 'authSource=admin')"
)
assert node.query("SELECT count() FROM simple_mongo_table_ok") == "100\n"
simple_mongo_table.drop()
simple_mongo_table_admin.drop()
node.query("DROP TABLE IF EXISTS simple_mongo_table_ok")
node.query("DROP TABLE IF EXISTS simple_mongo_table_fail")
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_missing_columns(started_cluster):
mongo_connection = get_mongo_connection(started_cluster)
db = mongo_connection["test"]
db.add_user("root", "clickhouse")
simple_mongo_table = db["simple_table"]
data = []
for i in range(0, 10):
data.append({"key": i, "data": hex(i * i)})
for i in range(0, 10):
data.append({"key": i})
simple_mongo_table.insert_many(data)
node = started_cluster.instances["node"]
node.query("drop table if exists simple_mongo_table")
node.query(
"create table simple_mongo_table(key UInt64, data Nullable(String)) engine = MongoDB(mongo1)"
)
result = node.query("SELECT count() FROM simple_mongo_table WHERE isNull(data)")
assert result == "10\n"
simple_mongo_table.drop()
node.query("DROP TABLE IF EXISTS simple_mongo_table")
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_simple_insert_select(started_cluster):
mongo_connection = get_mongo_connection(started_cluster)
db = mongo_connection["test"]
db.add_user("root", "clickhouse")
simple_mongo_table = db["simple_table"]
node = started_cluster.instances["node"]
node.query("DROP TABLE IF EXISTS simple_mongo_table")
node.query(
"CREATE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse')"
)
node.query(
"INSERT INTO simple_mongo_table SELECT number, 'kek' || toString(number) FROM numbers(10)"
)
assert (
node.query("SELECT data from simple_mongo_table where key = 7").strip()
== "kek7"
)
node.query("INSERT INTO simple_mongo_table(key) SELECT 12")
assert int(node.query("SELECT count() from simple_mongo_table")) == 11
assert (
node.query("SELECT data from simple_mongo_table where key = 12").strip() == ""
)
node.query("DROP TABLE simple_mongo_table")
simple_mongo_table.drop()

View File

@ -1,3 +0,0 @@
<clickhouse>
<use_legacy_mongodb_integration>0</use_legacy_mongodb_integration>
</clickhouse>

View File

@ -1,16 +0,0 @@
<clickhouse>
<named_collections>
<mongo1>
<user>root</user>
<password>clickhouse</password>
<host>mongo1</host>
<port>27017</port>
<database>test</database>
<collection>simple_table</collection>
</mongo1>
<mongo1_uri>
<uri>mongodb://root:clickhouse@mongo1:27017/test</uri>
<collection>simple_table_uri</collection>
</mongo1_uri>
</named_collections>
</clickhouse>

View File

@ -1,24 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIEGzCCAwOgAwIBAgIUaoGlyuJAyvs6yowFXymfu7seEiUwDQYJKoZIhvcNAQEL
BQAwgZwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDbGlja0hvdXNlMRMwEQYDVQQH
DApDbGlja0hvdXNlMREwDwYDVQQKDAhQZXJzb25hbDETMBEGA1UECwwKQ2xpY2tI
b3VzZTEkMCIGCSqGSIb3DQEJARYVY2xpY2tob3VzZUBjbGlja2hvdXNlMRUwEwYD
VQQDDAxtb25nb19zZWN1cmUwHhcNMjQwNTI2MTYwMDMxWhcNMzQwNTI0MTYwMDMx
WjCBnDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNsaWNrSG91c2UxEzARBgNVBAcM
CkNsaWNrSG91c2UxETAPBgNVBAoMCFBlcnNvbmFsMRMwEQYDVQQLDApDbGlja0hv
dXNlMSQwIgYJKoZIhvcNAQkBFhVjbGlja2hvdXNlQGNsaWNraG91c2UxFTATBgNV
BAMMDG1vbmdvX3NlY3VyZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
AJSeQfMG7xd0+kPehYlEsEw0Sm1DB05SXVCEzIX3DFD6XJrd8eeWwlzYaBatkcwj
+8yvqske02X/3YwpzJyFizAqJIoKql5c5Yii2xH1S9PFP0y+LoJre+eQziHyO33t
eeedeGNJ05Sm2ZAzjfMQ7Rdh6S+gdIO4Y102iQR5yr2aTrh7tu7XkNCjwKTqMMvz
SikP1Rft2J6ECim+MjYCCtH/4yXGeEJ5epU4t3y6Q23B2ZEhY+sqUdwgK9pu8oe4
mkZ1Qvwakc9Qg12owRSDjBBYrPvghXVpkJ2JkgKTrIAIz9tZ53eDVHNXbWMAotov
jEmRSoGIS1yzwmQ9PdxUwYcCAwEAAaNTMFEwHQYDVR0OBBYEFJyz3Kt5XBDg5cvI
0v1ioqejqX+CMB8GA1UdIwQYMBaAFJyz3Kt5XBDg5cvI0v1ioqejqX+CMA8GA1Ud
EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAHAQFA5VMYvaQFnKtKfHg9TF
qfJ4uM3YsGdgsgmGWgflD1S4Z290H6Q2QvyZAEceTrlJxqArlWlVp5DAU6EeXjEh
QMAgdkJHF1Hg2jsZKPtdkb88UtuzwAME357T8NtEJSHzNE5QqYwlVM71JkWpdqvA
UUdOJbWhhJfowIf4tMmL1DUuIy2qYpoP/tEBXEw9uwpmZqb7KELwT3lRyOMaGFN7
RHVwbvJWlHiu83QDNaWz6ijQkWl3tCN6TWcFD1qc1x8GpMzjbsAAYbCx7fbHM2LD
9kGSCiyv5K0MLNK5u67RtUFfPHtyD8RA0TtxIZ4PEN/eFANKS2/5NEi1ZuZ5/Pk=
-----END CERTIFICATE-----

View File

@ -1,52 +0,0 @@
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCUnkHzBu8XdPpD
3oWJRLBMNEptQwdOUl1QhMyF9wxQ+lya3fHnlsJc2GgWrZHMI/vMr6rJHtNl/92M
KcychYswKiSKCqpeXOWIotsR9UvTxT9Mvi6Ca3vnkM4h8jt97XnnnXhjSdOUptmQ
M43zEO0XYekvoHSDuGNdNokEecq9mk64e7bu15DQo8Ck6jDL80opD9UX7diehAop
vjI2AgrR/+MlxnhCeXqVOLd8ukNtwdmRIWPrKlHcICvabvKHuJpGdUL8GpHPUINd
qMEUg4wQWKz74IV1aZCdiZICk6yACM/bWed3g1RzV21jAKLaL4xJkUqBiEtcs8Jk
PT3cVMGHAgMBAAECggEAAul6qiHchB+uQMCWyC5xTeRqAXR3tAv4Tj4fGJjkXY4Z
OrAjr9Kp38EvX1amgvUWV3FT3NMevDf5xd9OdzAA0g0uJIF+mAhYFW48i1FnQcHQ
mOf0zmiZR7l8o7ROb3JvooXHxW+ba/qjGPVwC801gJvruehgbOCRxh9DTRp7sH5K
BmcddhULhKBEQjWUmYNEM3A2axpdi3g1aYKERRLn8J0DXcItTwbxuxbNcs3erl8W
3yyv/JKmqnWF5sNyX3wEWuQcDEZZy+W7Hn4KPMxyU+WA5el5nJ8kFlxhpInmajwu
8Ytn6IEyThyXutVomosVBuP16QORl2Nad0hnQO9toQKBgQDDgiehXr3k2wfVaVOD
PocW4leXausIU2XcCn6FxTG9vLUDMPANw0MxgenC2nrjaUU9J9UjdRYgMcFGWrl4
E27wEn5e0nZ/Y7F2cfhuOc9vNmZ+eHm2KQRyfAjIVL5Hpldqk2jXyCnLBNeWGHSw
kPQMU+FLqmrOFUvXlD2my+OSHwKBgQDCmgS9r+xFh4BCB9dY6eyQJF/jYmAQHs26
80WJ6gAhbUw1O71uDtS9/3PZVXwwNCOHrcc49BPrpJdxGPHGvd2Q5y+j5LDDbQSZ
aLTiCZ2B0RM5Bd2dXD8gEHN4WCX7pJ/o4kDi4zONBmp5mg/tFfer5z5IU/1P7Wak
1Mu0JIHzmQKBgDNaNoqeVgaMuYwGtFbez6DlJtiwzrdLIJAheYYte5k4vdruub8D
sNyKIRp7RJgDCJq9obBEiuE98GRIZDrz78nDMco6QcHIL87KtNRO/vtZMKa7gkyk
jXR8u9nS2H/9YyytN3amLsQSq4XTOqM+D7xFNAIp6w/ibB9d4quzFj1FAoGBAKTE
x/LcO897NWuzO/D6z+QUCGR87R15F3SNenmVedrTskz4ciH3yMW+v5ZrPSWLX/IH
f8GHWD6TM+780eoW5L1GIh5BCjHN4rEJ6O3iekxqfD4x6zzL2F8Lztk8uZxh/Uuw
FoSFHybvIcQoYAe8K+KPfzq6cqb0OY6i5n920dkxAoGAJkw6ADqsJfH3NR+bQfgF
oEA1KqriMxyEJm44Y7E80C+iF4iNALF+Er9TSnr4mDxX5e/dW9d1YeS9o0nOfkpF
MaBmJfxqo4QQJLPRaxYQ2Jhfn7irir4BroxeNXQgNNhgSuKIvkfRyGYwl7P0AT4v
8H8rkZGneMD3gLB5MfnRhGk=
-----END PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
MIIEGzCCAwOgAwIBAgIUaoGlyuJAyvs6yowFXymfu7seEiUwDQYJKoZIhvcNAQEL
BQAwgZwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDbGlja0hvdXNlMRMwEQYDVQQH
DApDbGlja0hvdXNlMREwDwYDVQQKDAhQZXJzb25hbDETMBEGA1UECwwKQ2xpY2tI
b3VzZTEkMCIGCSqGSIb3DQEJARYVY2xpY2tob3VzZUBjbGlja2hvdXNlMRUwEwYD
VQQDDAxtb25nb19zZWN1cmUwHhcNMjQwNTI2MTYwMDMxWhcNMzQwNTI0MTYwMDMx
WjCBnDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNsaWNrSG91c2UxEzARBgNVBAcM
CkNsaWNrSG91c2UxETAPBgNVBAoMCFBlcnNvbmFsMRMwEQYDVQQLDApDbGlja0hv
dXNlMSQwIgYJKoZIhvcNAQkBFhVjbGlja2hvdXNlQGNsaWNraG91c2UxFTATBgNV
BAMMDG1vbmdvX3NlY3VyZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
AJSeQfMG7xd0+kPehYlEsEw0Sm1DB05SXVCEzIX3DFD6XJrd8eeWwlzYaBatkcwj
+8yvqske02X/3YwpzJyFizAqJIoKql5c5Yii2xH1S9PFP0y+LoJre+eQziHyO33t
eeedeGNJ05Sm2ZAzjfMQ7Rdh6S+gdIO4Y102iQR5yr2aTrh7tu7XkNCjwKTqMMvz
SikP1Rft2J6ECim+MjYCCtH/4yXGeEJ5epU4t3y6Q23B2ZEhY+sqUdwgK9pu8oe4
mkZ1Qvwakc9Qg12owRSDjBBYrPvghXVpkJ2JkgKTrIAIz9tZ53eDVHNXbWMAotov
jEmRSoGIS1yzwmQ9PdxUwYcCAwEAAaNTMFEwHQYDVR0OBBYEFJyz3Kt5XBDg5cvI
0v1ioqejqX+CMB8GA1UdIwQYMBaAFJyz3Kt5XBDg5cvI0v1ioqejqX+CMA8GA1Ud
EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAHAQFA5VMYvaQFnKtKfHg9TF
qfJ4uM3YsGdgsgmGWgflD1S4Z290H6Q2QvyZAEceTrlJxqArlWlVp5DAU6EeXjEh
QMAgdkJHF1Hg2jsZKPtdkb88UtuzwAME357T8NtEJSHzNE5QqYwlVM71JkWpdqvA
UUdOJbWhhJfowIf4tMmL1DUuIy2qYpoP/tEBXEw9uwpmZqb7KELwT3lRyOMaGFN7
RHVwbvJWlHiu83QDNaWz6ijQkWl3tCN6TWcFD1qc1x8GpMzjbsAAYbCx7fbHM2LD
9kGSCiyv5K0MLNK5u67RtUFfPHtyD8RA0TtxIZ4PEN/eFANKS2/5NEi1ZuZ5/Pk=
-----END CERTIFICATE-----

View File

@ -1,6 +0,0 @@
net:
ssl:
mode: requireSSL
PEMKeyFile: /mongo/key.pem
CAFile: /mongo/cert.crt
allowConnectionsWithoutCertificates: true

View File

@ -10,14 +10,14 @@ from helpers.cluster import ClickHouseCluster
def started_cluster(request):
try:
cluster = ClickHouseCluster(__file__)
cluster.add_instance(
node = cluster.add_instance(
"node",
with_mongo=True,
main_configs=[
"configs/named_collections.xml",
"configs/feature_flag.xml",
"configs_secure/config.d/ssl_conf.xml",
],
user_configs=["configs/users.xml"],
with_mongo_secure=request.param,
)
cluster.start()
yield cluster
@ -26,33 +26,34 @@ def started_cluster(request):
def get_mongo_connection(started_cluster, secure=False, with_credentials=True):
if secure:
return pymongo.MongoClient(
"mongodb://root:clickhouse@localhost:{}/?tls=true&tlsAllowInvalidCertificates=true&tlsAllowInvalidHostnames=true".format(
started_cluster.mongo_secure_port
)
)
connection_str = ""
if with_credentials:
return pymongo.MongoClient(
"mongodb://root:clickhouse@localhost:{}".format(started_cluster.mongo_port)
connection_str = "mongodb://root:clickhouse@localhost:{}".format(
started_cluster.mongo_port
)
return pymongo.MongoClient(
"mongodb://localhost:{}".format(started_cluster.mongo_no_cred_port)
)
else:
connection_str = "mongodb://localhost:{}".format(
started_cluster.mongo_no_cred_port
)
if secure:
connection_str += "/?tls=true&tlsAllowInvalidCertificates=true"
return pymongo.MongoClient(connection_str)
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_simple_select(started_cluster):
mongo_connection = get_mongo_connection(started_cluster)
db = mongo_connection["test"]
db.add_user("root", "clickhouse")
simple_mongo_table = db["simple_table"]
data = []
for i in range(0, 100):
data.append({"key": i, "data": hex(i * i)})
simple_mongo_table.insert_many(data)
node = started_cluster.instances["node"]
for i in range(0, 100):
node.query(
"INSERT INTO FUNCTION mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String') (key, data) VALUES ({}, '{}')".format(
i, hex(i * i)
)
)
assert (
node.query(
"SELECT COUNT() FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String')"
@ -74,52 +75,14 @@ def test_simple_select(started_cluster):
assert (
node.query(
"SELECT data FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String') WHERE key = 42"
)
== hex(42 * 42) + "\n"
)
simple_mongo_table.drop()
def test_simple_select_uri(started_cluster):
mongo_connection = get_mongo_connection(started_cluster)
db = mongo_connection["test"]
db.add_user("root", "clickhouse")
simple_mongo_table = db["simple_table"]
data = []
for i in range(0, 100):
data.append({"key": i, "data": hex(i * i)})
simple_mongo_table.insert_many(data)
node = started_cluster.instances["node"]
assert (
node.query(
"SELECT COUNT() FROM mongodb('mongodb://root:clickhouse@mongo1:27017/test', 'simple_table', structure='key UInt64, data String')"
)
== "100\n"
)
assert (
node.query(
"SELECT sum(key) FROM mongodb('mongodb://root:clickhouse@mongo1:27017/test', 'simple_table', structure='key UInt64, data String')"
)
== str(sum(range(0, 100))) + "\n"
)
assert (
node.query(
"SELECT sum(key) FROM mongodb('mongodb://root:clickhouse@mongo1:27017/test', 'simple_table', 'key UInt64, data String')"
)
== str(sum(range(0, 100))) + "\n"
)
assert (
node.query(
"SELECT data FROM mongodb('mongodb://root:clickhouse@mongo1:27017/test', 'simple_table', structure='key UInt64, data String') WHERE key = 42"
"SELECT data from mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String') where key = 42"
)
== hex(42 * 42) + "\n"
)
simple_mongo_table.drop()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_complex_data_type(started_cluster):
mongo_connection = get_mongo_connection(started_cluster)
db = mongo_connection["test"]
@ -134,49 +97,27 @@ def test_complex_data_type(started_cluster):
assert (
node.query(
"""
SELECT COUNT()
FROM mongodb('mongo1:27017',
'test',
'complex_table',
'root',
'clickhouse',
structure='key UInt64, data String, dict Map(UInt64, String)')"""
"SELECT COUNT() FROM mongodb('mongo1:27017', 'test', 'complex_table', 'root', 'clickhouse', structure='key UInt64, data String, dict Map(UInt64, String)')"
)
== "100\n"
)
assert (
node.query(
"""
SELECT sum(key)
FROM mongodb('mongo1:27017',
'test',
'complex_table',
'root',
'clickhouse',
structure='key UInt64, data String, dict Map(UInt64, String)')"""
"SELECT sum(key) FROM mongodb('mongo1:27017', 'test', 'complex_table', 'root', 'clickhouse', structure='key UInt64, data String, dict Map(UInt64, String)')"
)
== str(sum(range(0, 100))) + "\n"
)
assert (
node.query(
"""
SELECT data
FROM mongodb('mongo1:27017',
'test',
'complex_table',
'root',
'clickhouse',
structure='key UInt64, data String, dict Map(UInt64, String)')
WHERE key = 42
"""
"SELECT data from mongodb('mongo1:27017', 'test', 'complex_table', 'root', 'clickhouse', structure='key UInt64, data String, dict Map(UInt64, String)') where key = 42"
)
== hex(42 * 42) + "\n"
)
incomplete_mongo_table.drop()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_incorrect_data_type(started_cluster):
mongo_connection = get_mongo_connection(started_cluster)
db = mongo_connection["test"]
@ -197,6 +138,7 @@ def test_incorrect_data_type(started_cluster):
strange_mongo_table.drop()
@pytest.mark.parametrize("started_cluster", [True], indirect=["started_cluster"])
def test_secure_connection(started_cluster):
mongo_connection = get_mongo_connection(started_cluster, secure=True)
db = mongo_connection["test"]
@ -211,63 +153,35 @@ def test_secure_connection(started_cluster):
assert (
node.query(
"""SELECT COUNT()
FROM mongodb('mongo_secure:27017',
'test',
'simple_table',
'root',
'clickhouse',
structure='key UInt64, data String',
options='tls=true&tlsAllowInvalidCertificates=true&tlsAllowInvalidHostnames=true')"""
"SELECT COUNT() FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String', options='ssl=true')"
)
== "100\n"
)
assert (
node.query(
"""SELECT sum(key)
FROM mongodb('mongo_secure:27017',
'test',
'simple_table',
'root',
'clickhouse',
structure='key UInt64, data String',
options='tls=true&tlsAllowInvalidCertificates=true&tlsAllowInvalidHostnames=true')"""
"SELECT sum(key) FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String', options='ssl=true')"
)
== str(sum(range(0, 100))) + "\n"
)
assert (
node.query(
"""SELECT sum(key)
FROM mongodb('mongo_secure:27017',
'test',
'simple_table',
'root',
'clickhouse',
'key UInt64, data String',
'tls=true&tlsAllowInvalidCertificates=true&tlsAllowInvalidHostnames=true')"""
"SELECT sum(key) FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', 'key UInt64, data String', 'ssl=true')"
)
== str(sum(range(0, 100))) + "\n"
)
assert (
node.query(
"""SELECT data
FROM mongodb('mongo_secure:27017',
'test',
'simple_table',
'root',
'clickhouse',
'key UInt64, data String',
'tls=true&tlsAllowInvalidCertificates=true&tlsAllowInvalidHostnames=true')
WHERE key = 42"""
"SELECT data from mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String', options='ssl=true') where key = 42"
)
== hex(42 * 42) + "\n"
)
simple_mongo_table.drop()
def test_secure_connection_with_validation(started_cluster):
mongo_connection = get_mongo_connection(started_cluster, secure=True)
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_predefined_connection_configuration(started_cluster):
mongo_connection = get_mongo_connection(started_cluster)
db = mongo_connection["test"]
db.add_user("root", "clickhouse")
simple_mongo_table = db["simple_table"]
@ -277,73 +191,16 @@ def test_secure_connection_with_validation(started_cluster):
simple_mongo_table.insert_many(data)
node = started_cluster.instances["node"]
with pytest.raises(QueryRuntimeException):
node.query(
"""SELECT COUNT() FROM mongodb('mongo_secure:27017',
'test',
'simple_table',
'root',
'clickhouse',
structure='key UInt64, data String',
options='tls=true')"""
)
simple_mongo_table.drop()
def test_secure_connection_uri(started_cluster):
mongo_connection = get_mongo_connection(started_cluster, secure=True)
db = mongo_connection["test"]
db.add_user("root", "clickhouse")
simple_mongo_table = db["simple_table"]
data = []
for i in range(0, 100):
data.append({"key": i, "data": hex(i * i)})
simple_mongo_table.insert_many(data)
node = started_cluster.instances["node"]
assert (
node.query(
"""SELECT COUNT()
FROM mongodb('mongodb://root:clickhouse@mongo_secure:27017/test?tls=true&tlsAllowInvalidCertificates=true&tlsAllowInvalidHostnames=true',
'simple_table',
'key UInt64, data String')"""
"SELECT count() FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String')"
)
== "100\n"
)
assert (
node.query(
"""SELECT sum(key)
FROM mongodb('mongodb://root:clickhouse@mongo_secure:27017/test?tls=true&tlsAllowInvalidCertificates=true&tlsAllowInvalidHostnames=true',
'simple_table',
'key UInt64, data String')"""
)
== str(sum(range(0, 100))) + "\n"
)
assert (
node.query(
"""SELECT sum(key)
FROM mongodb('mongodb://root:clickhouse@mongo_secure:27017/test?tls=true&tlsAllowInvalidCertificates=true&tlsAllowInvalidHostnames=true',
'simple_table',
'key UInt64, data String')"""
)
== str(sum(range(0, 100))) + "\n"
)
assert (
node.query(
"""SELECT data
FROM mongodb('mongodb://root:clickhouse@mongo_secure:27017/test?tls=true&tlsAllowInvalidCertificates=true&tlsAllowInvalidHostnames=true',
'simple_table',
'key UInt64, data String')
WHERE key = 42"""
)
== hex(42 * 42) + "\n"
)
simple_mongo_table.drop()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_no_credentials(started_cluster):
mongo_connection = get_mongo_connection(started_cluster, with_credentials=False)
db = mongo_connection["test"]
@ -356,13 +213,14 @@ def test_no_credentials(started_cluster):
node = started_cluster.instances["node"]
assert (
node.query(
"SELECT count() FROM mongodb('mongo_no_cred:27017', 'test', 'simple_table', '', '', structure='key UInt64, data String')"
"SELECT count() FROM mongodb('mongo2:27017', 'test', 'simple_table', '', '', structure='key UInt64, data String')"
)
== "100\n"
)
simple_mongo_table.drop()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_auth_source(started_cluster):
mongo_connection = get_mongo_connection(started_cluster, with_credentials=False)
admin_db = mongo_connection["admin"]
@ -384,21 +242,21 @@ def test_auth_source(started_cluster):
simple_mongo_table.insert_many(data)
node = started_cluster.instances["node"]
with pytest.raises(QueryRuntimeException):
node.query(
"SELECT count() FROM mongodb('mongo_no_cred:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String')"
)
node.query_and_get_error(
"SELECT count() FROM mongodb('mongo2:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String')"
)
assert (
node.query(
"SELECT count() FROM mongodb('mongo_no_cred:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String', options='authSource=admin')"
"SELECT count() FROM mongodb('mongo2:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String', options='authSource=admin')"
)
== "100\n"
)
simple_mongo_table.drop()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_missing_columns(started_cluster):
mongo_connection = get_mongo_connection(started_cluster)
db = mongo_connection["test"]

View File

@ -1,3 +0,0 @@
<clickhouse>
<use_legacy_mongodb_integration>1</use_legacy_mongodb_integration>
</clickhouse>

View File

@ -1,9 +0,0 @@
<clickhouse>
<users>
<default>
<password></password>
<profile>default</profile>
<named_collection_control>1</named_collection_control>
</default>
</users>
</clickhouse>

View File

@ -1,24 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIEGzCCAwOgAwIBAgIUaoGlyuJAyvs6yowFXymfu7seEiUwDQYJKoZIhvcNAQEL
BQAwgZwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDbGlja0hvdXNlMRMwEQYDVQQH
DApDbGlja0hvdXNlMREwDwYDVQQKDAhQZXJzb25hbDETMBEGA1UECwwKQ2xpY2tI
b3VzZTEkMCIGCSqGSIb3DQEJARYVY2xpY2tob3VzZUBjbGlja2hvdXNlMRUwEwYD
VQQDDAxtb25nb19zZWN1cmUwHhcNMjQwNTI2MTYwMDMxWhcNMzQwNTI0MTYwMDMx
WjCBnDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNsaWNrSG91c2UxEzARBgNVBAcM
CkNsaWNrSG91c2UxETAPBgNVBAoMCFBlcnNvbmFsMRMwEQYDVQQLDApDbGlja0hv
dXNlMSQwIgYJKoZIhvcNAQkBFhVjbGlja2hvdXNlQGNsaWNraG91c2UxFTATBgNV
BAMMDG1vbmdvX3NlY3VyZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
AJSeQfMG7xd0+kPehYlEsEw0Sm1DB05SXVCEzIX3DFD6XJrd8eeWwlzYaBatkcwj
+8yvqske02X/3YwpzJyFizAqJIoKql5c5Yii2xH1S9PFP0y+LoJre+eQziHyO33t
eeedeGNJ05Sm2ZAzjfMQ7Rdh6S+gdIO4Y102iQR5yr2aTrh7tu7XkNCjwKTqMMvz
SikP1Rft2J6ECim+MjYCCtH/4yXGeEJ5epU4t3y6Q23B2ZEhY+sqUdwgK9pu8oe4
mkZ1Qvwakc9Qg12owRSDjBBYrPvghXVpkJ2JkgKTrIAIz9tZ53eDVHNXbWMAotov
jEmRSoGIS1yzwmQ9PdxUwYcCAwEAAaNTMFEwHQYDVR0OBBYEFJyz3Kt5XBDg5cvI
0v1ioqejqX+CMB8GA1UdIwQYMBaAFJyz3Kt5XBDg5cvI0v1ioqejqX+CMA8GA1Ud
EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAHAQFA5VMYvaQFnKtKfHg9TF
qfJ4uM3YsGdgsgmGWgflD1S4Z290H6Q2QvyZAEceTrlJxqArlWlVp5DAU6EeXjEh
QMAgdkJHF1Hg2jsZKPtdkb88UtuzwAME357T8NtEJSHzNE5QqYwlVM71JkWpdqvA
UUdOJbWhhJfowIf4tMmL1DUuIy2qYpoP/tEBXEw9uwpmZqb7KELwT3lRyOMaGFN7
RHVwbvJWlHiu83QDNaWz6ijQkWl3tCN6TWcFD1qc1x8GpMzjbsAAYbCx7fbHM2LD
9kGSCiyv5K0MLNK5u67RtUFfPHtyD8RA0TtxIZ4PEN/eFANKS2/5NEi1ZuZ5/Pk=
-----END CERTIFICATE-----

View File

@ -1,8 +0,0 @@
<clickhouse>
<openSSL>
<client>
<!-- For self-signed certificate -->
<verificationMode>none</verificationMode>
</client>
</openSSL>
</clickhouse>

View File

@ -1,52 +0,0 @@
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCUnkHzBu8XdPpD
3oWJRLBMNEptQwdOUl1QhMyF9wxQ+lya3fHnlsJc2GgWrZHMI/vMr6rJHtNl/92M
KcychYswKiSKCqpeXOWIotsR9UvTxT9Mvi6Ca3vnkM4h8jt97XnnnXhjSdOUptmQ
M43zEO0XYekvoHSDuGNdNokEecq9mk64e7bu15DQo8Ck6jDL80opD9UX7diehAop
vjI2AgrR/+MlxnhCeXqVOLd8ukNtwdmRIWPrKlHcICvabvKHuJpGdUL8GpHPUINd
qMEUg4wQWKz74IV1aZCdiZICk6yACM/bWed3g1RzV21jAKLaL4xJkUqBiEtcs8Jk
PT3cVMGHAgMBAAECggEAAul6qiHchB+uQMCWyC5xTeRqAXR3tAv4Tj4fGJjkXY4Z
OrAjr9Kp38EvX1amgvUWV3FT3NMevDf5xd9OdzAA0g0uJIF+mAhYFW48i1FnQcHQ
mOf0zmiZR7l8o7ROb3JvooXHxW+ba/qjGPVwC801gJvruehgbOCRxh9DTRp7sH5K
BmcddhULhKBEQjWUmYNEM3A2axpdi3g1aYKERRLn8J0DXcItTwbxuxbNcs3erl8W
3yyv/JKmqnWF5sNyX3wEWuQcDEZZy+W7Hn4KPMxyU+WA5el5nJ8kFlxhpInmajwu
8Ytn6IEyThyXutVomosVBuP16QORl2Nad0hnQO9toQKBgQDDgiehXr3k2wfVaVOD
PocW4leXausIU2XcCn6FxTG9vLUDMPANw0MxgenC2nrjaUU9J9UjdRYgMcFGWrl4
E27wEn5e0nZ/Y7F2cfhuOc9vNmZ+eHm2KQRyfAjIVL5Hpldqk2jXyCnLBNeWGHSw
kPQMU+FLqmrOFUvXlD2my+OSHwKBgQDCmgS9r+xFh4BCB9dY6eyQJF/jYmAQHs26
80WJ6gAhbUw1O71uDtS9/3PZVXwwNCOHrcc49BPrpJdxGPHGvd2Q5y+j5LDDbQSZ
aLTiCZ2B0RM5Bd2dXD8gEHN4WCX7pJ/o4kDi4zONBmp5mg/tFfer5z5IU/1P7Wak
1Mu0JIHzmQKBgDNaNoqeVgaMuYwGtFbez6DlJtiwzrdLIJAheYYte5k4vdruub8D
sNyKIRp7RJgDCJq9obBEiuE98GRIZDrz78nDMco6QcHIL87KtNRO/vtZMKa7gkyk
jXR8u9nS2H/9YyytN3amLsQSq4XTOqM+D7xFNAIp6w/ibB9d4quzFj1FAoGBAKTE
x/LcO897NWuzO/D6z+QUCGR87R15F3SNenmVedrTskz4ciH3yMW+v5ZrPSWLX/IH
f8GHWD6TM+780eoW5L1GIh5BCjHN4rEJ6O3iekxqfD4x6zzL2F8Lztk8uZxh/Uuw
FoSFHybvIcQoYAe8K+KPfzq6cqb0OY6i5n920dkxAoGAJkw6ADqsJfH3NR+bQfgF
oEA1KqriMxyEJm44Y7E80C+iF4iNALF+Er9TSnr4mDxX5e/dW9d1YeS9o0nOfkpF
MaBmJfxqo4QQJLPRaxYQ2Jhfn7irir4BroxeNXQgNNhgSuKIvkfRyGYwl7P0AT4v
8H8rkZGneMD3gLB5MfnRhGk=
-----END PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
MIIEGzCCAwOgAwIBAgIUaoGlyuJAyvs6yowFXymfu7seEiUwDQYJKoZIhvcNAQEL
BQAwgZwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDbGlja0hvdXNlMRMwEQYDVQQH
DApDbGlja0hvdXNlMREwDwYDVQQKDAhQZXJzb25hbDETMBEGA1UECwwKQ2xpY2tI
b3VzZTEkMCIGCSqGSIb3DQEJARYVY2xpY2tob3VzZUBjbGlja2hvdXNlMRUwEwYD
VQQDDAxtb25nb19zZWN1cmUwHhcNMjQwNTI2MTYwMDMxWhcNMzQwNTI0MTYwMDMx
WjCBnDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNsaWNrSG91c2UxEzARBgNVBAcM
CkNsaWNrSG91c2UxETAPBgNVBAoMCFBlcnNvbmFsMRMwEQYDVQQLDApDbGlja0hv
dXNlMSQwIgYJKoZIhvcNAQkBFhVjbGlja2hvdXNlQGNsaWNraG91c2UxFTATBgNV
BAMMDG1vbmdvX3NlY3VyZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
AJSeQfMG7xd0+kPehYlEsEw0Sm1DB05SXVCEzIX3DFD6XJrd8eeWwlzYaBatkcwj
+8yvqske02X/3YwpzJyFizAqJIoKql5c5Yii2xH1S9PFP0y+LoJre+eQziHyO33t
eeedeGNJ05Sm2ZAzjfMQ7Rdh6S+gdIO4Y102iQR5yr2aTrh7tu7XkNCjwKTqMMvz
SikP1Rft2J6ECim+MjYCCtH/4yXGeEJ5epU4t3y6Q23B2ZEhY+sqUdwgK9pu8oe4
mkZ1Qvwakc9Qg12owRSDjBBYrPvghXVpkJ2JkgKTrIAIz9tZ53eDVHNXbWMAotov
jEmRSoGIS1yzwmQ9PdxUwYcCAwEAAaNTMFEwHQYDVR0OBBYEFJyz3Kt5XBDg5cvI
0v1ioqejqX+CMB8GA1UdIwQYMBaAFJyz3Kt5XBDg5cvI0v1ioqejqX+CMA8GA1Ud
EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAHAQFA5VMYvaQFnKtKfHg9TF
qfJ4uM3YsGdgsgmGWgflD1S4Z290H6Q2QvyZAEceTrlJxqArlWlVp5DAU6EeXjEh
QMAgdkJHF1Hg2jsZKPtdkb88UtuzwAME357T8NtEJSHzNE5QqYwlVM71JkWpdqvA
UUdOJbWhhJfowIf4tMmL1DUuIy2qYpoP/tEBXEw9uwpmZqb7KELwT3lRyOMaGFN7
RHVwbvJWlHiu83QDNaWz6ijQkWl3tCN6TWcFD1qc1x8GpMzjbsAAYbCx7fbHM2LD
9kGSCiyv5K0MLNK5u67RtUFfPHtyD8RA0TtxIZ4PEN/eFANKS2/5NEi1ZuZ5/Pk=
-----END CERTIFICATE-----

View File

@ -1,6 +0,0 @@
net:
ssl:
mode: requireSSL
PEMKeyFile: /mongo/key.pem
CAFile: /mongo/cert.crt
allowConnectionsWithoutCertificates: true

View File

@ -1,277 +0,0 @@
import pymongo
import pytest
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
@pytest.fixture(scope="module")
def started_cluster(request):
try:
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance(
"node",
with_mongo=True,
main_configs=[
"mongo_secure_config/config.d/ssl_conf.xml",
"configs/feature_flag.xml",
],
user_configs=["configs/users.xml"],
)
cluster.start()
yield cluster
finally:
cluster.shutdown()
def get_mongo_connection(started_cluster, secure=False, with_credentials=True):
connection_str = ""
if with_credentials:
connection_str = "mongodb://root:clickhouse@localhost:{}".format(
started_cluster.mongo_secure_port if secure else started_cluster.mongo_port
)
else:
connection_str = "mongodb://localhost:{}".format(
started_cluster.mongo_no_cred_port
)
if secure:
connection_str += "/?tls=true&tlsAllowInvalidCertificates=true"
return pymongo.MongoClient(connection_str)
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_simple_select(started_cluster):
mongo_connection = get_mongo_connection(started_cluster)
db = mongo_connection["test"]
db.add_user("root", "clickhouse")
simple_mongo_table = db["simple_table"]
node = started_cluster.instances["node"]
for i in range(0, 100):
node.query(
"INSERT INTO FUNCTION mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String') (key, data) VALUES ({}, '{}')".format(
i, hex(i * i)
)
)
assert (
node.query(
"SELECT COUNT() FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String')"
)
== "100\n"
)
assert (
node.query(
"SELECT sum(key) FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String')"
)
== str(sum(range(0, 100))) + "\n"
)
assert (
node.query(
"SELECT sum(key) FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', 'key UInt64, data String')"
)
== str(sum(range(0, 100))) + "\n"
)
assert (
node.query(
"SELECT data from mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String') where key = 42"
)
== hex(42 * 42) + "\n"
)
simple_mongo_table.drop()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_complex_data_type(started_cluster):
mongo_connection = get_mongo_connection(started_cluster)
db = mongo_connection["test"]
db.add_user("root", "clickhouse")
incomplete_mongo_table = db["complex_table"]
data = []
for i in range(0, 100):
data.append({"key": i, "data": hex(i * i), "dict": {"a": i, "b": str(i)}})
incomplete_mongo_table.insert_many(data)
node = started_cluster.instances["node"]
assert (
node.query(
"SELECT COUNT() FROM mongodb('mongo1:27017', 'test', 'complex_table', 'root', 'clickhouse', structure='key UInt64, data String, dict Map(UInt64, String)')"
)
== "100\n"
)
assert (
node.query(
"SELECT sum(key) FROM mongodb('mongo1:27017', 'test', 'complex_table', 'root', 'clickhouse', structure='key UInt64, data String, dict Map(UInt64, String)')"
)
== str(sum(range(0, 100))) + "\n"
)
assert (
node.query(
"SELECT data from mongodb('mongo1:27017', 'test', 'complex_table', 'root', 'clickhouse', structure='key UInt64, data String, dict Map(UInt64, String)') where key = 42"
)
== hex(42 * 42) + "\n"
)
incomplete_mongo_table.drop()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_incorrect_data_type(started_cluster):
mongo_connection = get_mongo_connection(started_cluster)
db = mongo_connection["test"]
db.add_user("root", "clickhouse")
strange_mongo_table = db["strange_table"]
data = []
for i in range(0, 100):
data.append({"key": i, "data": hex(i * i), "aaaa": "Hello"})
strange_mongo_table.insert_many(data)
node = started_cluster.instances["node"]
with pytest.raises(QueryRuntimeException):
node.query(
"SELECT aaaa FROM mongodb('mongo1:27017', 'test', 'strange_table', 'root', 'clickhouse', structure='key UInt64, data String')"
)
strange_mongo_table.drop()
@pytest.mark.parametrize("started_cluster", [True], indirect=["started_cluster"])
def test_secure_connection(started_cluster):
mongo_connection = get_mongo_connection(started_cluster, secure=True)
db = mongo_connection["test"]
db.add_user("root", "clickhouse")
simple_mongo_table = db["simple_table"]
data = []
for i in range(0, 100):
data.append({"key": i, "data": hex(i * i)})
simple_mongo_table.insert_many(data)
node = started_cluster.instances["node"]
assert (
node.query(
"SELECT COUNT() FROM mongodb('mongo_secure:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String', options='ssl=true')"
)
== "100\n"
)
assert (
node.query(
"SELECT sum(key) FROM mongodb('mongo_secure:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String', options='ssl=true')"
)
== str(sum(range(0, 100))) + "\n"
)
assert (
node.query(
"SELECT sum(key) FROM mongodb('mongo_secure:27017', 'test', 'simple_table', 'root', 'clickhouse', 'key UInt64, data String', 'ssl=true')"
)
== str(sum(range(0, 100))) + "\n"
)
assert (
node.query(
"SELECT data from mongodb('mongo_secure:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String', options='ssl=true') where key = 42"
)
== hex(42 * 42) + "\n"
)
simple_mongo_table.drop()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_predefined_connection_configuration(started_cluster):
mongo_connection = get_mongo_connection(started_cluster)
db = mongo_connection["test"]
db.add_user("root", "clickhouse")
simple_mongo_table = db["simple_table"]
data = []
for i in range(0, 100):
data.append({"key": i, "data": hex(i * i)})
simple_mongo_table.insert_many(data)
node = started_cluster.instances["node"]
assert (
node.query(
"SELECT count() FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String')"
)
== "100\n"
)
simple_mongo_table.drop()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_no_credentials(started_cluster):
mongo_connection = get_mongo_connection(started_cluster, with_credentials=False)
db = mongo_connection["test"]
simple_mongo_table = db["simple_table"]
data = []
for i in range(0, 100):
data.append({"key": i, "data": hex(i * i)})
simple_mongo_table.insert_many(data)
node = started_cluster.instances["node"]
assert (
node.query(
"SELECT count() FROM mongodb('mongo_no_cred:27017', 'test', 'simple_table', '', '', structure='key UInt64, data String')"
)
== "100\n"
)
simple_mongo_table.drop()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_auth_source(started_cluster):
mongo_connection = get_mongo_connection(started_cluster, with_credentials=False)
admin_db = mongo_connection["admin"]
admin_db.add_user(
"root",
"clickhouse",
roles=[{"role": "userAdminAnyDatabase", "db": "admin"}, "readWriteAnyDatabase"],
)
simple_mongo_table = admin_db["simple_table"]
data = []
for i in range(0, 50):
data.append({"key": i, "data": hex(i * i)})
simple_mongo_table.insert_many(data)
db = mongo_connection["test"]
simple_mongo_table = db["simple_table"]
data = []
for i in range(0, 100):
data.append({"key": i, "data": hex(i * i)})
simple_mongo_table.insert_many(data)
node = started_cluster.instances["node"]
node.query_and_get_error(
"SELECT count() FROM mongodb('mongo_no_cred:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String')"
)
assert (
node.query(
"SELECT count() FROM mongodb('mongo_no_cred:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String', options='authSource=admin')"
)
== "100\n"
)
simple_mongo_table.drop()
@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"])
def test_missing_columns(started_cluster):
mongo_connection = get_mongo_connection(started_cluster)
db = mongo_connection["test"]
db.add_user("root", "clickhouse")
simple_mongo_table = db["simple_table"]
data = []
for i in range(0, 10):
data.append({"key": i, "data": hex(i * i)})
for i in range(0, 10):
data.append({"key": i})
simple_mongo_table.insert_many(data)
node = started_cluster.instances["node"]
result = node.query(
"SELECT count() FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data Nullable(String)') WHERE isNull(data)"
)
assert result == "10\n"
simple_mongo_table.drop()

View File

@ -9,6 +9,7 @@ generate_series
input
jdbc
merge
mongodb
null
numbers
numbers_mt

View File

@ -2,6 +2,5 @@
-- Please help shorten this list down to zero elements.
SELECT name FROM system.table_functions WHERE length(description) < 10
AND name NOT IN (
'cosn', 'oss', 'hdfs', 'hdfsCluster', 'hive', 'mysql', 'postgresql', 's3', 's3Cluster', 'sqlite', 'urlCluster', -- these functions are not enabled in fast test
'mongodb' -- will be removed when `use_legacy_mongodb_integration` setting will be purged will with the old implementation
'cosn', 'oss', 'hdfs', 'hdfsCluster', 'hive', 'mysql', 'postgresql', 's3', 's3Cluster', 'sqlite', 'urlCluster' -- these functions are not enabled in fast test
) ORDER BY name;

Some files were not shown because too many files have changed in this diff Show More