diff --git a/.gitmodules b/.gitmodules index bd61c52a5e0..3aa2e4e8ea9 100644 --- a/.gitmodules +++ b/.gitmodules @@ -363,12 +363,6 @@ [submodule "contrib/double-conversion"] path = contrib/double-conversion url = https://github.com/ClickHouse/double-conversion.git -[submodule "contrib/mongo-cxx-driver"] - path = contrib/mongo-cxx-driver - url = https://github.com/ClickHouse/mongo-cxx-driver.git -[submodule "contrib/mongo-c-driver"] - path = contrib/mongo-c-driver - url = https://github.com/ClickHouse/mongo-c-driver.git [submodule "contrib/numactl"] path = contrib/numactl url = https://github.com/ClickHouse/numactl.git diff --git a/base/poco/CMakeLists.txt b/base/poco/CMakeLists.txt index 434e24cf334..82c48b5b622 100644 --- a/base/poco/CMakeLists.txt +++ b/base/poco/CMakeLists.txt @@ -3,11 +3,7 @@ add_subdirectory (Data) add_subdirectory (Data/ODBC) add_subdirectory (Foundation) add_subdirectory (JSON) - -if (USE_MONGODB) - add_subdirectory(MongoDB) -endif() - +add_subdirectory (MongoDB) add_subdirectory (Net) add_subdirectory (NetSSL_OpenSSL) add_subdirectory (Redis) diff --git a/cmake/linux/toolchain-x86_64-musl.cmake b/cmake/linux/toolchain-x86_64-musl.cmake index 26c159edd85..fa7b3eaf0d1 100644 --- a/cmake/linux/toolchain-x86_64-musl.cmake +++ b/cmake/linux/toolchain-x86_64-musl.cmake @@ -18,4 +18,4 @@ set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (USE_MUSL 1) -add_definitions(-DUSE_MUSL=1 -D__MUSL__=1) +add_definitions(-DUSE_MUSL=1) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index b102b2919d9..c36ace61396 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -160,12 +160,6 @@ add_contrib (datasketches-cpp-cmake datasketches-cpp) add_contrib (incbin-cmake incbin) add_contrib (sqids-cpp-cmake sqids-cpp) -option(USE_MONGODB "Enable MongoDB support" ${ENABLE_LIBRARIES}) -if (USE_MONGODB) - add_contrib (mongo-c-driver-cmake mongo-c-driver) # requires: zlib - add_contrib (mongo-cxx-driver-cmake mongo-cxx-driver) # requires: libmongoc, libbson -endif() - option(ENABLE_NLP "Enable NLP functions support" ${ENABLE_LIBRARIES}) if (ENABLE_NLP) add_contrib (libstemmer-c-cmake libstemmer_c) diff --git a/contrib/mongo-c-driver b/contrib/mongo-c-driver deleted file mode 160000 index d55410c6918..00000000000 --- a/contrib/mongo-c-driver +++ /dev/null @@ -1 +0,0 @@ -Subproject commit d55410c69183c90d18fd3b3f1d9db3d224fc8d52 diff --git a/contrib/mongo-c-driver-cmake/CMakeLists.txt b/contrib/mongo-c-driver-cmake/CMakeLists.txt deleted file mode 100644 index 60712074479..00000000000 --- a/contrib/mongo-c-driver-cmake/CMakeLists.txt +++ /dev/null @@ -1,152 +0,0 @@ -option(USE_MONGODB "Enable MongoDB support" ${ENABLE_LIBRARIES}) -if(NOT USE_MONGODB) - message(STATUS "Not using libmongoc and libbson") - return() -endif() - -set(libbson_VERSION_MAJOR 1) -set(libbson_VERSION_MINOR 27) -set(libbson_VERSION_PATCH 0) -set(libbson_VERSION 1.27.0) -set(libmongoc_VERSION_MAJOR 1) -set(libmongoc_VERSION_MINOR 27) -set(libmongoc_VERSION_PATCH 0) -set(libmongoc_VERSION 1.27.0) - -set(LIBBSON_SOURCES_ROOT "${ClickHouse_SOURCE_DIR}/contrib/mongo-c-driver/src") -set(LIBBSON_SOURCE_DIR "${LIBBSON_SOURCES_ROOT}/libbson/src") -file(GLOB_RECURSE LIBBSON_SOURCES "${LIBBSON_SOURCE_DIR}/*.c") - -include(TestBigEndian) -test_big_endian(BSON_BIG_ENDIAN) -if(BSON_BIG_ENDIAN) - set(BSON_BYTE_ORDER 4321) -else() - set(BSON_BYTE_ORDER 1234) -endif() - -set(BSON_OS 1) -set(BSON_EXTRA_ALIGN 1) -set(BSON_HAVE_SNPRINTF 1) -set(BSON_HAVE_TIMESPEC 1) -set(BSON_HAVE_GMTIME_R 1) -set(BSON_HAVE_RAND_R 1) -set(BSON_HAVE_STRINGS_H 1) -set(BSON_HAVE_STRLCPY 0) -set(BSON_HAVE_STRNLEN 1) -set(BSON_HAVE_STDBOOL_H 1) -set(BSON_HAVE_CLOCK_GETTIME 1) - - -# common settings -set(MONGOC_TRACE 0) -set(MONGOC_ENABLE_STATIC_BUILD 1) -set(MONGOC_ENABLE_DEBUG_ASSERTIONS 0) -set(MONGOC_ENABLE_MONGODB_AWS_AUTH 0) -set(MONGOC_ENABLE_SASL_CYRUS 0) -set(MONGOC_ENABLE_SASL 0) -set(MONGOC_ENABLE_SASL_SSPI 0) -set(MONGOC_HAVE_SASL_CLIENT_DONE 0) -set(MONGOC_ENABLE_SRV 0) - -# DNS -set(MONGOC_HAVE_DNSAPI 0) -set(MONGOC_HAVE_RES_SEARCH 0) -set(MONGOC_HAVE_RES_NSEARCH 0) -set(MONGOC_HAVE_RES_NCLOSE 0) -set(MONGOC_HAVE_RES_NDESTROY 0) - -set(MONGOC_ENABLE_COMPRESSION 1) -set(MONGOC_ENABLE_COMPRESSION_ZLIB 0) -set(MONGOC_ENABLE_COMPRESSION_SNAPPY 0) -set(MONGOC_ENABLE_COMPRESSION_ZSTD 1) - -# SSL -set(MONGOC_ENABLE_CRYPTO 0) -set(MONGOC_ENABLE_CRYPTO_CNG 0) -set(MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO 0) -set(MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE 0) -set(MONGOC_ENABLE_SSL 0) -set(MONGOC_ENABLE_SSL_OPENSSL 0) -set(MONGOC_ENABLE_SSL_SECURE_CHANNEL 0) -set(MONGOC_ENABLE_SSL_SECURE_TRANSPORT 0) -set(MONGOC_ENABLE_SSL_LIBRESSL 0) -set(MONGOC_ENABLE_CRYPTO_LIBCRYPTO 0) -set(MONGOC_ENABLE_CLIENT_SIDE_ENCRYPTION 0) -set(MONGOC_HAVE_ASN1_STRING_GET0_DATA 0) -if(ENABLE_SSL) - set(MONGOC_ENABLE_SSL 1) - set(MONGOC_ENABLE_CRYPTO 1) - set(MONGOC_ENABLE_SSL_OPENSSL 1) - set(MONGOC_ENABLE_CRYPTO_LIBCRYPTO 1) - set(MONGOC_HAVE_ASN1_STRING_GET0_DATA 1) -else() - message(WARNING "Building mongoc without SSL") -endif() - -set(CMAKE_EXTRA_INCLUDE_FILES "sys/socket.h") -set(MONGOC_SOCKET_ARG2 "struct sockaddr") -set(MONGOC_HAVE_SOCKLEN 1) -set(MONGOC_SOCKET_ARG3 "socklen_t") - -set(MONGOC_ENABLE_RDTSCP 0) -set(MONGOC_NO_AUTOMATIC_GLOBALS 1) -set(MONGOC_ENABLE_STATIC_INSTALL 0) -set(MONGOC_ENABLE_SHM_COUNTERS 0) -set(MONGOC_HAVE_SCHED_GETCPU 0) -set(MONGOC_HAVE_SS_FAMILY 0) - -configure_file( - ${LIBBSON_SOURCE_DIR}/bson/bson-config.h.in - ${LIBBSON_SOURCE_DIR}/bson/bson-config.h -) -configure_file( - ${LIBBSON_SOURCE_DIR}/bson/bson-version.h.in - ${LIBBSON_SOURCE_DIR}/bson/bson-version.h -) - -configure_file( - ${LIBBSON_SOURCE_DIR}/bson/bson-version.h.in - ${LIBBSON_SOURCE_DIR}/bson/bson-version.h -) - -set(COMMON_SOURCE_DIR "${LIBBSON_SOURCES_ROOT}/common") -file(GLOB_RECURSE COMMON_SOURCES "${COMMON_SOURCE_DIR}/*.c") -configure_file( - ${COMMON_SOURCE_DIR}/common-config.h.in - ${COMMON_SOURCE_DIR}/common-config.h -) -add_library(_libbson ${LIBBSON_SOURCES} ${COMMON_SOURCES}) -add_library(ch_contrib::libbson ALIAS _libbson) -target_include_directories(_libbson SYSTEM PUBLIC ${LIBBSON_SOURCE_DIR} ${COMMON_SOURCE_DIR}) -target_compile_definitions(_libbson PRIVATE BSON_COMPILATION) -if(OS_LINUX) - target_compile_definitions(_libbson PRIVATE -D_GNU_SOURCE -D_POSIX_C_SOURCE=199309L -D_XOPEN_SOURCE=600) -elseif(OS_DARWIN) - target_compile_definitions(_libbson PRIVATE -D_DARWIN_C_SOURCE) -endif() - - -set(LIBMONGOC_SOURCE_DIR "${LIBBSON_SOURCES_ROOT}/libmongoc/src") -file(GLOB_RECURSE LIBMONGOC_SOURCES "${LIBMONGOC_SOURCE_DIR}/*.c") -set(UTF8PROC_SOURCE_DIR "${LIBBSON_SOURCES_ROOT}/utf8proc-2.8.0") -set(UTF8PROC_SOURCES "${UTF8PROC_SOURCE_DIR}/utf8proc.c") -set(UTHASH_SOURCE_DIR "${LIBBSON_SOURCES_ROOT}/uthash") - -configure_file( - ${LIBMONGOC_SOURCE_DIR}/mongoc/mongoc-config.h.in - ${LIBMONGOC_SOURCE_DIR}/mongoc/mongoc-config.h -) -configure_file( - ${LIBMONGOC_SOURCE_DIR}/mongoc/mongoc-version.h.in - ${LIBMONGOC_SOURCE_DIR}/mongoc/mongoc-version.h -) -add_library(_libmongoc ${LIBMONGOC_SOURCES} ${COMMON_SOURCES} ${UTF8PROC_SOURCES}) -add_library(ch_contrib::libmongoc ALIAS _libmongoc) -target_include_directories(_libmongoc SYSTEM PUBLIC ${LIBMONGOC_SOURCE_DIR} ${COMMON_SOURCE_DIR} ${UTF8PROC_SOURCE_DIR} ${UTHASH_SOURCE_DIR}) -target_include_directories(_libmongoc SYSTEM PRIVATE ${LIBMONGOC_SOURCE_DIR}/mongoc ${UTHASH_SOURCE_DIR}) -target_compile_definitions(_libmongoc PRIVATE MONGOC_COMPILATION) -target_link_libraries(_libmongoc ch_contrib::libbson ch_contrib::c-ares ch_contrib::zstd) -if(ENABLE_SSL) - target_link_libraries(_libmongoc OpenSSL::SSL) -endif() diff --git a/contrib/mongo-cxx-driver b/contrib/mongo-cxx-driver deleted file mode 160000 index 3166bdb49b7..00000000000 --- a/contrib/mongo-cxx-driver +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 3166bdb49b717ce1bc30f46cc2b274ab1de7005b diff --git a/contrib/mongo-cxx-driver-cmake/CMakeLists.txt b/contrib/mongo-cxx-driver-cmake/CMakeLists.txt deleted file mode 100644 index 23ee4204b8b..00000000000 --- a/contrib/mongo-cxx-driver-cmake/CMakeLists.txt +++ /dev/null @@ -1,189 +0,0 @@ -option(USE_MONGODB "Enable MongoDB support" ${ENABLE_LIBRARIES}) - -if(NOT USE_MONGODB) - message(STATUS "Not using mongocxx and bsoncxx") - return() -endif() - -set(BSONCXX_SOURCES_DIR "${ClickHouse_SOURCE_DIR}/contrib/mongo-cxx-driver/src/bsoncxx") -set(BSONCXX_SOURCES - ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/array/element.cpp - ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/array/value.cpp - ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/array/view.cpp - ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/builder/core.cpp - ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/decimal128.cpp - ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/document/element.cpp - ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/document/value.cpp - ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/document/view.cpp - ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/exception/error_code.cpp - ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/json.cpp - ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/oid.cpp - ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/private/itoa.cpp - ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/string/view_or_value.cpp - ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/types.cpp - ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/types/bson_value/value.cpp - ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/types/bson_value/view.cpp - ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/validate.cpp -) -set(BSONCXX_POLY_USE_IMPLS ON) - -configure_file( - ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/config/config.hpp.in - ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/config/config.hpp -) -configure_file( - ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/config/version.hpp.in - ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/config/version.hpp -) -configure_file( - ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/config/private/config.hh.in - ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/config/private/config.hh -) - -add_library(_bsoncxx ${BSONCXX_SOURCES}) -add_library(ch_contrib::bsoncxx ALIAS _bsoncxx) -target_include_directories(_bsoncxx SYSTEM PUBLIC "${BSONCXX_SOURCES_DIR}/include/bsoncxx/v_noabi" ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi) -target_compile_definitions(_bsoncxx PUBLIC BSONCXX_STATIC) -target_link_libraries(_bsoncxx ch_contrib::libbson) - -include(GenerateExportHeader) -generate_export_header(_bsoncxx - BASE_NAME BSONCXX - EXPORT_MACRO_NAME BSONCXX_API - NO_EXPORT_MACRO_NAME BSONCXX_PRIVATE - EXPORT_FILE_NAME ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/config/export.hpp - STATIC_DEFINE BSONCXX_STATIC -) - - - -set(MONGOCXX_SOURCES_DIR "${ClickHouse_SOURCE_DIR}/contrib/mongo-cxx-driver/src/mongocxx") -set(MONGOCXX_SOURCES - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/bulk_write.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/change_stream.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/client.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/client_encryption.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/client_session.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/collection.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/cursor.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/database.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/command_failed_event.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/command_started_event.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/command_succeeded_event.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/heartbeat_failed_event.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/heartbeat_started_event.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/heartbeat_succeeded_event.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/server_changed_event.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/server_closed_event.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/server_description.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/server_opening_event.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/topology_changed_event.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/topology_closed_event.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/topology_description.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/topology_opening_event.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/exception/error_code.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/exception/operation_exception.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/exception/server_error_code.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/gridfs/bucket.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/gridfs/downloader.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/gridfs/uploader.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/hint.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/index_model.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/index_view.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/instance.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/logger.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/model/delete_many.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/model/delete_one.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/model/insert_one.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/model/replace_one.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/model/update_many.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/model/update_one.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/model/write.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/aggregate.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/apm.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/auto_encryption.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/bulk_write.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/change_stream.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/client.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/client_encryption.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/client_session.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/count.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/create_collection.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/data_key.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/delete.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/distinct.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/encrypt.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/estimated_document_count.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/find.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/find_one_and_delete.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/find_one_and_replace.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/find_one_and_update.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/gridfs/bucket.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/gridfs/upload.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/index.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/index_view.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/insert.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/pool.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/range.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/replace.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/rewrap_many_datakey.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/server_api.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/tls.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/transaction.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/options/update.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/pipeline.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/pool.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/private/conversions.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/private/libbson.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/private/libmongoc.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/private/numeric_casting.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/read_concern.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/read_preference.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/result/bulk_write.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/result/delete.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/result/gridfs/upload.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/result/insert_many.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/result/insert_one.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/result/replace_one.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/result/rewrap_many_datakey.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/result/update.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/search_index_model.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/search_index_view.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/uri.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/validation_criteria.cpp - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/write_concern.cpp -) -set(MONGOCXX_COMPILER_VERSION "${CMAKE_CXX_COMPILER_VERSION}") -set(MONGOCXX_COMPILER_ID "${CMAKE_CXX_COMPILER_ID}") -set(MONGOCXX_LINK_WITH_STATIC_MONGOC 1) -set(MONGOCXX_BUILD_STATIC 1) -if(ENABLE_SSL) - set(MONGOCXX_ENABLE_SSL 1) -endif() - -configure_file( - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/config/config.hpp.in - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/config/config.hpp -) -configure_file( - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/config/version.hpp.in - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/config/version.hpp -) -configure_file( - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/config/private/config.hh.in - ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/config/private/config.hh -) - -add_library(_mongocxx ${MONGOCXX_SOURCES}) -add_library(ch_contrib::mongocxx ALIAS _mongocxx) -target_include_directories(_mongocxx SYSTEM PUBLIC "${MONGOCXX_SOURCES_DIR}/include/mongocxx/v_noabi" ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi) -target_compile_definitions(_mongocxx PUBLIC MONGOCXX_STATIC) -target_link_libraries(_mongocxx ch_contrib::bsoncxx ch_contrib::libmongoc) - -generate_export_header(_mongocxx - BASE_NAME MONGOCXX - EXPORT_MACRO_NAME MONGOCXX_API - NO_EXPORT_MACRO_NAME MONGOCXX_PRIVATE - EXPORT_FILE_NAME ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/config/export.hpp - STATIC_DEFINE MONGOCXX_STATIC -) diff --git a/contrib/sysroot b/contrib/sysroot index 738138e6658..5be834147d5 160000 --- a/contrib/sysroot +++ b/contrib/sysroot @@ -1 +1 @@ -Subproject commit 738138e665809a5b28c453983c5f48f23a340ed6 +Subproject commit 5be834147d5b5dd77ca2b821f356982029320513 diff --git a/docs/en/engines/table-engines/integrations/mongodb.md b/docs/en/engines/table-engines/integrations/mongodb.md index 999863eaa1a..5bb3bc752f5 100644 --- a/docs/en/engines/table-engines/integrations/mongodb.md +++ b/docs/en/engines/table-engines/integrations/mongodb.md @@ -6,15 +6,7 @@ sidebar_label: MongoDB # MongoDB -MongoDB engine is read-only table engine which allows to read data from remote [MongoDB](https://www.mongodb.com/) collection. - -Only MongoDB v3.6+ servers are supported. -[Seed list(`mongodb**+srv**`)](https://www.mongodb.com/docs/manual/reference/glossary/#std-term-seed-list) is not yet supported. - -:::note -If you're facing troubles, please report the issue, and try to use [the legacy implementation](../../../operations/server-configuration-parameters/settings.md#use_legacy_mongodb_integration). -Keep in mind that it is deprecated, and will be removed in next releases. -::: +MongoDB engine is read-only table engine which allows to read data (`SELECT` queries) from remote MongoDB collection. Engine supports only non-nested data types. `INSERT` queries are not supported. ## Creating a Table {#creating-a-table} @@ -48,145 +40,49 @@ If you are using the MongoDB Atlas cloud offering: - connection url can be obtained from 'Atlas SQL' option - use options: 'connectTimeoutMS=10000&ssl=true&authSource=admin' ``` -::: - -Also, you can simply pass a URI: - -``` sql -ENGINE = MongoDB(uri, collection); -``` - -**Engine Parameters** - -- `uri` — MongoDB server's connection URI - -- `collection` — Remote collection name. - - -## Types mappings - -| MongoDB | ClickHouse | -|--------------------|-----------------------------------------------------------------------| -| bool, int32, int64 | *any numeric type*, String | -| double | Float64, String | -| date | Date, Date32, DateTime, DateTime64, String | -| string | String, UUID | -| document | String(as JSON) | -| array | Array, String(as JSON) | -| oid | String | -| binary | String if in column, base64 encoded string if in an array or document | -| *any other* | String | - -If key is not found in MongoDB document (for example, column name doesn't match), default value or `NULL` (if the column is nullable) will be inserted. - -## Supported clauses - -Only queries with simple expressions are supported (for example, `WHERE field = ORDER BY field2 LIMIT `). -Such expressions are translated to MongoDB query language and executed on the server side. -You can disable all these restriction, using [mongodb_throw_on_unsupported_query](../../../operations/settings/settings.md#mongodb_throw_on_unsupported_query). -In that case ClickHouse tries to convert query on best effort basis, but it can lead to full table scan and processing on ClickHouse side. - -:::note -It's always better to explicitly set type of literal because Mongo requires strict typed filters.\ -For example you want to filter by `Date`: - -```sql -SELECT * FROM mongo_table WHERE date = '2024-01-01' -``` - -This will not work because Mongo will not cast string to `Date`, so you need to cast it manually: - -```sql -SELECT * FROM mongo_table WHERE date = '2024-01-01'::Date OR date = toDate('2024-01-01') -``` - -This applied for `Date`, `Date32`, `DateTime`, `Bool`, `UUID`. ::: - ## Usage Example {#usage-example} - -Assuming MongoDB has [sample_mflix](https://www.mongodb.com/docs/atlas/sample-data/sample-mflix) dataset loaded - Create a table in ClickHouse which allows to read data from MongoDB collection: ``` sql -CREATE TABLE sample_mflix_table +CREATE TABLE mongo_table ( - _id String, - title String, - plot String, - genres Array(String), - directors Array(String), - writers Array(String), - released Date, - imdb String, - year String, -) ENGINE = MongoDB('mongodb+srv://:@cluster0.cdojylq.mongodb.net/sample_mflix', 'movies'); + key UInt64, + data String +) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table', 'testuser', 'clickhouse'); +``` + +To read from an SSL secured MongoDB server: + +``` sql +CREATE TABLE mongo_table_ssl +( + key UInt64, + data String +) ENGINE = MongoDB('mongo2:27017', 'test', 'simple_table', 'testuser', 'clickhouse', 'ssl=true'); ``` Query: ``` sql -SELECT count() FROM sample_mflix_table +SELECT COUNT() FROM mongo_table; ``` ``` text - ┌─count()─┐ -1. │ 21349 │ - └─────────┘ +┌─count()─┐ +│ 4 │ +└─────────┘ ``` -```SQL --- JSONExtractString cannot be pushed down to MongoDB -SET mongodb_throw_on_unsupported_query = 0; +You can also adjust connection timeout: --- Find all 'Back to the Future' sequels with rating > 7.5 -SELECT title, plot, genres, directors, released FROM sample_mflix_table -WHERE title IN ('Back to the Future', 'Back to the Future Part II', 'Back to the Future Part III') - AND toFloat32(JSONExtractString(imdb, 'rating')) > 7.5 -ORDER BY year -FORMAT Vertical; +``` sql +CREATE TABLE mongo_table +( + key UInt64, + data String +) ENGINE = MongoDB('mongo2:27017', 'test', 'simple_table', 'testuser', 'clickhouse', 'connectTimeoutMS=100000'); ``` - -```text -Row 1: -────── -title: Back to the Future -plot: A young man is accidentally sent 30 years into the past in a time-traveling DeLorean invented by his friend, Dr. Emmett Brown, and must make sure his high-school-age parents unite in order to save his own existence. -genres: ['Adventure','Comedy','Sci-Fi'] -directors: ['Robert Zemeckis'] -released: 1985-07-03 - -Row 2: -────── -title: Back to the Future Part II -plot: After visiting 2015, Marty McFly must repeat his visit to 1955 to prevent disastrous changes to 1985... without interfering with his first trip. -genres: ['Action','Adventure','Comedy'] -directors: ['Robert Zemeckis'] -released: 1989-11-22 -``` - -```SQL --- Find top 3 movies based on Cormac McCarthy's books -SELECT title, toFloat32(JSONExtractString(imdb, 'rating')) as rating -FROM sample_mflix_table -WHERE arrayExists(x -> x like 'Cormac McCarthy%', writers) -ORDER BY rating DESC -LIMIT 3; -``` - -```text - ┌─title──────────────────┬─rating─┐ -1. │ No Country for Old Men │ 8.1 │ -2. │ The Sunset Limited │ 7.4 │ -3. │ The Road │ 7.3 │ - └────────────────────────┴────────┘ -``` - -## Troubleshooting -You can see the generated MongoDB query in DEBUG level logs. - -Implementation details can be found in [mongocxx](https://github.com/mongodb/mongo-cxx-driver) and [mongoc](https://github.com/mongodb/mongo-c-driver) documentations. diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index 79407d46ce0..014141aa33b 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -3162,11 +3162,3 @@ Type: UInt64 Default value: 100 Zero means unlimited - -## use_legacy_mongodb_integration - -Use the legacy MongoDB integration implementation. Deprecated. - -Type: Bool - -Default value: `true`. diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 9b53ddaa4da..fdd053342a8 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -5682,11 +5682,3 @@ Default value: `0`. Enable `IF NOT EXISTS` for `CREATE` statement by default. If either this setting or `IF NOT EXISTS` is specified and a table with the provided name already exists, no exception will be thrown. Default value: `false`. - -## mongodb_throw_on_unsupported_query - -If enabled, MongoDB tables will return an error when a MongoDB query can't be built. - -Not applied for the legacy implementation, or when 'allow_experimental_analyzer=0`. - -Default value: `true`. diff --git a/docs/en/sql-reference/dictionaries/index.md b/docs/en/sql-reference/dictionaries/index.md index 9f6c9868ae6..c6aedb63961 100644 --- a/docs/en/sql-reference/dictionaries/index.md +++ b/docs/en/sql-reference/dictionaries/index.md @@ -1680,7 +1680,7 @@ Setting fields: The `table` or `where` fields cannot be used together with the `query` field. And either one of the `table` or `query` fields must be declared. ::: -#### MongoDB +#### Mongodb Example of settings: @@ -1700,17 +1700,6 @@ Example of settings: or -``` xml - - - mongodb://localhost:27017/test?ssl=true - dictionary_source - - -``` - -or - ``` sql SOURCE(MONGODB( host 'localhost' @@ -1733,22 +1722,6 @@ Setting fields: - `collection` – Name of the collection. - `options` - MongoDB connection string options (optional parameter). -or - -``` sql -SOURCE(MONGODB( - uri 'mongodb://localhost:27017/clickhouse' - collection 'dictionary_source' -)) -``` - -Setting fields: - -- `uri` - URI for establish the connection. -- `collection` – Name of the collection. - -[More information about the engine](../../engines/table-engines/integrations/mongodb.md) - #### Redis @@ -2065,7 +2038,7 @@ Configuration fields: | `expression` | [Expression](../../sql-reference/syntax.md#expressions) that ClickHouse executes on the value.
The expression can be a column name in the remote SQL database. Thus, you can use it to create an alias for the remote column.

Default value: no expression. | No | | `hierarchical` | If `true`, the attribute contains the value of a parent key for the current key. See [Hierarchical Dictionaries](#hierarchical-dictionaries).

Default value: `false`. | No | | `injective` | Flag that shows whether the `id -> attribute` image is [injective](https://en.wikipedia.org/wiki/Injective_function).
If `true`, ClickHouse can automatically place after the `GROUP BY` clause the requests to dictionaries with injection. Usually it significantly reduces the amount of such requests.

Default value: `false`. | No | -| `is_object_id` | Flag that shows whether the query is executed for a MongoDB document by `ObjectID`.

Default value: `false`. +| `is_object_id` | Flag that shows whether the query is executed for a MongoDB document by `ObjectID`.

Default value: `false`. ## Hierarchical Dictionaries diff --git a/docs/en/sql-reference/table-functions/mongodb.md b/docs/en/sql-reference/table-functions/mongodb.md index 6b677f4aa9c..a483414c0d4 100644 --- a/docs/en/sql-reference/table-functions/mongodb.md +++ b/docs/en/sql-reference/table-functions/mongodb.md @@ -39,18 +39,6 @@ If you are using the MongoDB Atlas cloud offering please add these options: ::: -Also, you can connect by URI: -``` sql -mongodb(uri, collection, structure) -``` -**Arguments** - -- `uri` — Connection string. - -- `collection` — Remote collection name. - -- `structure` — The schema for the ClickHouse table returned from this function. - **Returned Value** A table object with the same columns as the original MongoDB table. @@ -88,16 +76,6 @@ SELECT * FROM mongodb( ) ``` -or: - -```sql -SELECT * FROM mongodb( - 'mongodb://test_user:password@127.0.0.1:27017/test?connectionTimeoutMS=10000', - 'my_collection', - 'log_type String, host String, command String' -) -``` - **See Also** - [The `MongoDB` table engine](/docs/en/engines/table-engines/integrations/mongodb.md) diff --git a/programs/format/Format.cpp b/programs/format/Format.cpp index 900d9cb8e01..522b9a74cff 100644 --- a/programs/format/Format.cpp +++ b/programs/format/Format.cpp @@ -188,9 +188,9 @@ int mainEntryClickHouseFormat(int argc, char ** argv) registerInterpreters(); registerFunctions(); registerAggregateFunctions(); - registerTableFunctions(false); + registerTableFunctions(); registerDatabases(); - registerStorages(false); + registerStorages(); registerFormats(); std::unordered_set additional_names; diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 00d4ee1ca65..f678194849d 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -507,10 +507,10 @@ try /// Don't initialize DateLUT registerFunctions(); registerAggregateFunctions(); - registerTableFunctions(server_settings.use_legacy_mongodb_integration); + registerTableFunctions(); registerDatabases(); - registerStorages(server_settings.use_legacy_mongodb_integration); - registerDictionaries(server_settings.use_legacy_mongodb_integration); + registerStorages(); + registerDictionaries(); registerDisks(/* global_skip_access_check= */ true); registerFormats(); diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 9cf0e08e0ef..f0c9719051f 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -784,10 +784,10 @@ try registerInterpreters(); registerFunctions(); registerAggregateFunctions(); - registerTableFunctions(server_settings.use_legacy_mongodb_integration); + registerTableFunctions(); registerDatabases(); - registerStorages(server_settings.use_legacy_mongodb_integration); - registerDictionaries(server_settings.use_legacy_mongodb_integration); + registerStorages(); + registerDictionaries(); registerDisks(/* global_skip_access_check= */ false); registerFormats(); registerRemoteFileMetadatas(); diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index b8e55b85212..f0410eee9fe 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -412,23 +412,10 @@ dbms_target_link_libraries ( PUBLIC boost::system clickhouse_common_io + Poco::MongoDB Poco::Redis ) -if (USE_MONGODB) - dbms_target_link_libraries (PUBLIC Poco::MongoDB) -endif() - -if (TARGET ch_contrib::mongocxx) - dbms_target_link_libraries( - PUBLIC - ch_contrib::libbson - ch_contrib::libmongoc - ch_contrib::bsoncxx - ch_contrib::mongocxx - ) -endif () - if (TARGET ch::mysqlxx) dbms_target_link_libraries (PUBLIC ch::mysqlxx) endif() diff --git a/src/Common/BSONCXXHelper.h b/src/Common/BSONCXXHelper.h deleted file mode 100644 index 70fb89722ea..00000000000 --- a/src/Common/BSONCXXHelper.h +++ /dev/null @@ -1,377 +0,0 @@ -#pragma once - -#include "config.h" - -#if USE_MONGODB -#include -#include - -namespace DB -{ - -namespace ErrorCodes -{ -extern const int TYPE_MISMATCH; -extern const int NOT_IMPLEMENTED; -} - -namespace BSONCXXHelper -{ - -using bsoncxx::builder::basic::array; -using bsoncxx::builder::basic::document; -using bsoncxx::builder::basic::kvp; -using bsoncxx::builder::basic::make_document; - -static bsoncxx::types::bson_value::value fieldAsBSONValue(const Field & field, const DataTypePtr & type) -{ - switch (type->getTypeId()) - { - case TypeIndex::String: - return bsoncxx::types::b_string{field.safeGet()}; - case TypeIndex::UInt8: { - if (isBool(type)) - return bsoncxx::types::b_bool{field.safeGet() != 0}; - return bsoncxx::types::b_int32{static_cast(field.safeGet())}; - } - case TypeIndex::UInt16: - return bsoncxx::types::b_int32{static_cast(field.safeGet())}; - case TypeIndex::UInt32: - return bsoncxx::types::b_int64{static_cast(field.safeGet())}; - case TypeIndex::UInt64: - return bsoncxx::types::b_double{static_cast(field.safeGet())}; - case TypeIndex::Int8: - return bsoncxx::types::b_int32{static_cast(field.safeGet())}; - case TypeIndex::Int16: - return bsoncxx::types::b_int32{static_cast(field.safeGet())}; - case TypeIndex::Int32: - return bsoncxx::types::b_int32{static_cast(field.safeGet())}; - case TypeIndex::Int64: - return bsoncxx::types::b_int64{field.safeGet()}; - case TypeIndex::Float32: - return bsoncxx::types::b_double{field.safeGet()}; - case TypeIndex::Float64: - return bsoncxx::types::b_double{field.safeGet()}; - case TypeIndex::Date: - return bsoncxx::types::b_date{std::chrono::seconds{field.safeGet() * 86400}}; - case TypeIndex::Date32: - return bsoncxx::types::b_date{std::chrono::seconds{field.safeGet() * 86400}}; - case TypeIndex::DateTime: - return bsoncxx::types::b_date{std::chrono::seconds{field.safeGet()}}; - case TypeIndex::UUID: - return bsoncxx::types::b_string{static_cast(formatUUID(field.safeGet()))}; - case TypeIndex::Tuple: { - auto arr = array(); - for (const auto & elem : field.safeGet()) - arr.append(fieldAsBSONValue(elem, applyVisitor(FieldToDataType(), elem))); - return arr.view(); - } - case TypeIndex::Array: { - auto arr = array(); - for (const auto & elem : field.safeGet()) - arr.append(fieldAsBSONValue(elem, applyVisitor(FieldToDataType(), elem))); - return arr.view(); - } - default: - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Fields with type '{}' is not supported.", type->getPrettyName()); - } -} - -template -static JSONBuilder::ItemPtr BSONElementAsJSON(const T & value) -{ - switch (value.type()) - { - case bsoncxx::type::k_string: - return std::make_unique(std::string(value.get_string().value)); - case bsoncxx::type::k_symbol: - return std::make_unique(std::string(value.get_string().value)); - case bsoncxx::type::k_oid: - return std::make_unique(value.get_oid().value.to_string()); - case bsoncxx::type::k_binary: - return std::make_unique( - base64Encode(std::string(reinterpret_cast(value.get_binary().bytes), value.get_binary().size))); - case bsoncxx::type::k_bool: - return std::make_unique(value.get_bool()); - case bsoncxx::type::k_int32: - return std::make_unique>(value.get_int32()); - case bsoncxx::type::k_int64: - return std::make_unique>(value.get_int64()); - case bsoncxx::type::k_double: - return std::make_unique>(value.get_double()); - case bsoncxx::type::k_date: - return std::make_unique(DateLUT::instance().timeToString(value.get_date().to_int64() / 1000)); - case bsoncxx::type::k_timestamp: - return std::make_unique(DateLUT::instance().timeToString(value.get_timestamp().timestamp)); - case bsoncxx::type::k_document: - { - auto doc = std::make_unique(); - for (const auto & elem : value.get_document().value) - doc->add(std::string(elem.key()), BSONElementAsJSON(elem)); - return doc; - } - case bsoncxx::type::k_array: - { - auto arr = std::make_unique(); - for (const auto & elem : value.get_array().value) - arr->add(BSONElementAsJSON(elem)); - return arr; - } - case bsoncxx::type::k_regex: - { - auto doc = std::make_unique(); - doc->add(std::string(value.get_regex().regex), std::string(value.get_regex().options)); - return doc; - } - case bsoncxx::type::k_dbpointer: - { - auto doc = std::make_unique(); - doc->add(value.get_dbpointer().value.to_string(), std::string(value.get_dbpointer().collection)); - return doc; - } - case bsoncxx::type::k_null: - return std::make_unique(); - - default: - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Serialization BSON type '{}' is not supported", bsoncxx::to_string(value.type())); - } -} - -template -static std::string BSONElementAsString(const T & value, const JSONBuilder::FormatSettings & json_format_settings) -{ - switch (value.type()) - { - case bsoncxx::type::k_string: - return std::string(value.get_string().value); - case bsoncxx::type::k_oid: - return value.get_oid().value.to_string(); - case bsoncxx::type::k_binary: - return std::string(reinterpret_cast(value.get_binary().bytes), value.get_binary().size); - case bsoncxx::type::k_bool: - return value.get_bool().value ? "true" : "false"; - case bsoncxx::type::k_int32: - return std::to_string(static_cast(value.get_int32().value)); - case bsoncxx::type::k_int64: - return std::to_string(value.get_int64().value); - case bsoncxx::type::k_double: - return std::to_string(value.get_double().value); - case bsoncxx::type::k_decimal128: - return value.get_decimal128().value.to_string(); - case bsoncxx::type::k_date: - return DateLUT::instance().timeToString(value.get_date().to_int64() / 1000); - case bsoncxx::type::k_timestamp: - return DateLUT::instance().timeToString(value.get_timestamp().timestamp); - // MongoDB's documents and arrays may not have strict types or be nested, so the most optimal solution is store their JSON representations. - // bsoncxx::to_json function will return something like "'number': {'$numberInt': '321'}", this why we have to use own implementation. - case bsoncxx::type::k_document: - case bsoncxx::type::k_array: - case bsoncxx::type::k_regex: - case bsoncxx::type::k_dbpointer: - case bsoncxx::type::k_symbol: - { - WriteBufferFromOwnString buf; - auto format_context = JSONBuilder::FormatContext{.out = buf}; - BSONElementAsJSON(value)->format(json_format_settings, format_context); - return buf.str(); - } - case bsoncxx::type::k_undefined: - return "undefined"; - case bsoncxx::type::k_null: - return "null"; - default: - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "BSON type {} is unserializable.", bsoncxx::to_string(value.type())); - } -} - -template -static T BSONElementAsNumber(const T2 & value, const std::string & name) -{ - switch (value.type()) - { - case bsoncxx::type::k_bool: - return static_cast(value.get_bool()); - case bsoncxx::type::k_int32: - return static_cast(value.get_int32()); - case bsoncxx::type::k_int64: - return static_cast(value.get_int64()); - case bsoncxx::type::k_double: - return static_cast(value.get_double()); - default: - throw Exception( - ErrorCodes::TYPE_MISMATCH, - "Type mismatch, {} cannot be converted to number for column {}.", - bsoncxx::to_string(value.type()), - name); - } -} - -static Array BSONArrayAsArray( - size_t dimensions, - const bsoncxx::types::b_array & array, - const DataTypePtr & type, - const Field & default_value, - const std::string & name, - const JSONBuilder::FormatSettings & json_format_settings) -{ - auto arr = Array(); - if (dimensions > 0) - { - --dimensions; - for (auto const & elem : array.value) - { - if (elem.type() != bsoncxx::type::k_array) - throw Exception(ErrorCodes::TYPE_MISMATCH, "Array {} have less dimensions then defined in the schema.", name); - - arr.emplace_back(BSONArrayAsArray(dimensions, elem.get_array(), type, default_value, name, json_format_settings)); - } - } - else - { - for (auto const & value : array.value) - { - if (value.type() == bsoncxx::type::k_null) - arr.emplace_back(default_value); - else - { - switch (type->getTypeId()) - { - case TypeIndex::Int8: - arr.emplace_back(BSONElementAsNumber(value, name)); - break; - case TypeIndex::UInt8: - arr.emplace_back(BSONElementAsNumber(value, name)); - break; - case TypeIndex::Int16: - arr.emplace_back(BSONElementAsNumber(value, name)); - break; - case TypeIndex::UInt16: - arr.emplace_back(BSONElementAsNumber(value, name)); - break; - case TypeIndex::Int32: - arr.emplace_back(BSONElementAsNumber(value, name)); - break; - case TypeIndex::UInt32: - arr.emplace_back(BSONElementAsNumber(value, name)); - break; - case TypeIndex::Int64: - arr.emplace_back(BSONElementAsNumber(value, name)); - break; - case TypeIndex::UInt64: - arr.emplace_back(BSONElementAsNumber(value, name)); - break; - case TypeIndex::Int128: - arr.emplace_back(BSONElementAsNumber(value, name)); - break; - case TypeIndex::UInt128: - arr.emplace_back(BSONElementAsNumber(value, name)); - break; - case TypeIndex::Int256: - arr.emplace_back(BSONElementAsNumber(value, name)); - break; - case TypeIndex::UInt256: - arr.emplace_back(BSONElementAsNumber(value, name)); - break; - case TypeIndex::Float32: - arr.emplace_back(BSONElementAsNumber(value, name)); - break; - case TypeIndex::Float64: - arr.emplace_back(BSONElementAsNumber(value, name)); - break; - case TypeIndex::Date: { - if (value.type() != bsoncxx::type::k_date) - throw Exception( - ErrorCodes::TYPE_MISMATCH, - "Type mismatch, expected date, got {} for column {}.", - bsoncxx::to_string(value.type()), - name); - - arr.emplace_back(DateLUT::instance().toDayNum(value.get_date().to_int64() / 1000).toUnderType()); - break; - } - case TypeIndex::Date32: { - if (value.type() != bsoncxx::type::k_date) - throw Exception( - ErrorCodes::TYPE_MISMATCH, - "Type mismatch, expected date, got {} for column {}.", - bsoncxx::to_string(value.type()), - name); - - arr.emplace_back(DateLUT::instance().toDayNum(value.get_date().to_int64() / 1000).toUnderType()); - break; - } - case TypeIndex::DateTime: { - if (value.type() != bsoncxx::type::k_date) - throw Exception( - ErrorCodes::TYPE_MISMATCH, - "Type mismatch, expected date, got {} for column {}.", - bsoncxx::to_string(value.type()), - name); - - arr.emplace_back(static_cast(value.get_date().to_int64() / 1000)); - break; - } - case TypeIndex::DateTime64: { - if (value.type() != bsoncxx::type::k_date) - throw Exception( - ErrorCodes::TYPE_MISMATCH, - "Type mismatch, expected date, got {} for column {}.", - bsoncxx::to_string(value.type()), - name); - - arr.emplace_back(static_cast(value.get_date().to_int64())); - break; - } - case TypeIndex::UUID: { - if (value.type() != bsoncxx::type::k_string) - throw Exception( - ErrorCodes::TYPE_MISMATCH, - "Type mismatch, expected string (UUID), got {} for column {}.", - bsoncxx::to_string(value.type()), - name); - - arr.emplace_back(parse(value.get_string().value.data())); - break; - } - case TypeIndex::String: - arr.emplace_back(BSONElementAsString(value, json_format_settings)); - break; - default: - throw Exception( - ErrorCodes::NOT_IMPLEMENTED, - "Array {} has unsupported nested type {}.", - name, - type->getName()); - } - } - } - } - return arr; -} - -static bsoncxx::types::bson_value::value fieldAsOID(const Field & field) -{ - switch (field.getType()) - { - case Field::Types::String: - return bsoncxx::oid(field.safeGet()); - case Field::Types::Array: { - auto arr = array(); - for (const auto & elem : field.safeGet()) - arr.append(fieldAsOID(elem)); - return arr.view(); - } - case Field::Types::Tuple: { - auto tuple = array(); - for (const auto & elem : field.safeGet()) - tuple.append(fieldAsOID(elem)); - return tuple.view(); - } - default: - throw Exception(ErrorCodes::TYPE_MISMATCH, "{} can't be converted to oid.", field.getType()); - } -} -} - -} -#endif diff --git a/src/Common/JSONBuilder.cpp b/src/Common/JSONBuilder.cpp index e797671d571..30bec88003e 100644 --- a/src/Common/JSONBuilder.cpp +++ b/src/Common/JSONBuilder.cpp @@ -35,7 +35,7 @@ void JSONArray::format(const FormatSettings & settings, FormatContext & context) context.offset += settings.indent; - bool single_row = settings.solid || (settings.print_simple_arrays_in_single_row && isSimpleArray(values)); + bool single_row = settings.print_simple_arrays_in_single_row && isSimpleArray(values); bool first = true; for (const auto & value : values) @@ -48,7 +48,7 @@ void JSONArray::format(const FormatSettings & settings, FormatContext & context) writeChar('\n', context.out); writeChar(' ', context.offset, context.out); } - else if (!first && !settings.solid) + else if (!first) writeChar(' ', context.out); first = false; @@ -80,33 +80,20 @@ void JSONMap::format(const FormatSettings & settings, FormatContext & context) writeChar(',', context.out); first = false; - if (!settings.solid) - { - writeChar('\n', context.out); - writeChar(' ', context.offset, context.out); - } + writeChar('\n', context.out); + writeChar(' ', context.offset, context.out); writeJSONString(value.key, context.out, settings.settings); writeChar(':', context.out); - if (!settings.solid) - writeChar(' ', context.out); - + writeChar(' ', context.out); value.value->format(settings, context); } context.offset -= settings.indent; - if (!settings.solid) - { - writeChar('\n', context.out); - writeChar(' ', context.offset, context.out); - } + writeChar('\n', context.out); + writeChar(' ', context.offset, context.out); writeChar('}', context.out); } -void JSONNull::format(const FormatSettings &, FormatContext & context) -{ - writeString("null", context.out); -} - } diff --git a/src/Common/JSONBuilder.h b/src/Common/JSONBuilder.h index c69d3b2530b..1690ae7bb77 100644 --- a/src/Common/JSONBuilder.h +++ b/src/Common/JSONBuilder.h @@ -13,7 +13,6 @@ struct FormatSettings const DB::FormatSettings & settings; size_t indent = 2; bool print_simple_arrays_in_single_row = true; - bool solid = false; // the output will not contain spaces and line breaks }; struct FormatContext @@ -112,10 +111,4 @@ private: std::vector values; }; -class JSONNull : public IItem -{ -public: - void format(const FormatSettings & settings, FormatContext & context) override; -}; - } diff --git a/src/Common/config.h.in b/src/Common/config.h.in index 86ac054a62c..2e3b8d84366 100644 --- a/src/Common/config.h.in +++ b/src/Common/config.h.in @@ -67,7 +67,6 @@ #cmakedefine01 USE_LIBARCHIVE #cmakedefine01 USE_POCKETFFT #cmakedefine01 USE_PROMETHEUS_PROTOBUFS -#cmakedefine01 USE_MONGODB #cmakedefine01 USE_NUMACTL /// This is needed for .incbin in assembly. For some reason, include paths don't work there in presence of LTO. diff --git a/src/Common/maskURIPassword.h b/src/Common/maskURIPassword.h deleted file mode 100644 index 56623dbebcd..00000000000 --- a/src/Common/maskURIPassword.h +++ /dev/null @@ -1,14 +0,0 @@ -#pragma once - -#include - - -namespace DB -{ - -inline bool maskURIPassword(std::string * uri) -{ - return RE2::Replace(uri, R"(([^:]+://[^:]*):([^@]*)@(.*))", "\\1:[HIDDEN]@\\3"); -} - -} diff --git a/src/Core/ServerSettings.h b/src/Core/ServerSettings.h index f3059c5370b..13f8373f1ce 100644 --- a/src/Core/ServerSettings.h +++ b/src/Core/ServerSettings.h @@ -173,8 +173,7 @@ namespace DB M(Double, gwp_asan_force_sample_probability, 0.0003, "Probability that an allocation from specific places will be sampled by GWP Asan (i.e. PODArray allocations)", 0) \ M(UInt64, config_reload_interval_ms, 2000, "How often clickhouse will reload config and check for new changes", 0) \ M(UInt64, memory_worker_period_ms, 0, "Tick period of background memory worker which corrects memory tracker memory usages and cleans up unused pages during higher memory usage. If set to 0, default value will be used depending on the memory usage source", 0) \ - M(Bool, disable_insertion_and_mutation, false, "Disable all insert/alter/delete queries. This setting will be enabled if someone needs read-only nodes to prevent insertion and mutation affect reading performance.", 0) \ - M(Bool, use_legacy_mongodb_integration, true, "Use the legacy MongoDB integration implementation. Deprecated.", 0) + M(Bool, disable_insertion_and_mutation, false, "Disable all insert/alter/delete queries. This setting will be enabled if someone needs read-only nodes to prevent insertion and mutation affect reading performance.", 0) /// If you add a setting which can be updated at runtime, please update 'changeable_settings' map in StorageSystemServerSettings.cpp diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index af279bb0712..d0ce90e6fdd 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -918,7 +918,6 @@ namespace ErrorCodes M(Bool, restore_replace_external_engines_to_null, false, "Replace all the external table engines to Null on restore. Useful for testing purposes", 0) \ M(Bool, restore_replace_external_table_functions_to_null, false, "Replace all table functions to Null on restore. Useful for testing purposes", 0) \ M(Bool, create_if_not_exists, false, "Enable IF NOT EXISTS for CREATE statements by default", 0) \ - M(Bool, mongodb_throw_on_unsupported_query, true, "If enabled, MongoDB tables will return an error when a MongoDB query cannot be built. Otherwise, ClickHouse reads the full table and processes it locally. This option does not apply to the legacy implementation or when 'allow_experimental_analyzer=0'.", 0) \ \ \ /* ###################################### */ \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index c349aebf27a..560f144866b 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -85,8 +85,7 @@ static std::initializer_list -#include #include - -#include - -using bsoncxx::builder::basic::kvp; -using bsoncxx::builder::basic::make_document; -using bsoncxx::builder::basic::array; -#endif +#include +#include +#include "DictionarySourceFactory.h" +#include "DictionaryStructure.h" namespace DB { -namespace ErrorCodes -{ - #if USE_MONGODB - extern const int UNSUPPORTED_METHOD; - extern const int LOGICAL_ERROR; - #else - extern const int SUPPORT_IS_DISABLED; - #endif -} - void registerDictionarySourceMongoDB(DictionarySourceFactory & factory) { - #if USE_MONGODB - auto create_dictionary_source = []( + auto create_mongo_db_dictionary = []( const DictionaryStructure & dict_struct, const Poco::Util::AbstractConfiguration & config, const std::string & root_config_prefix, Block & sample_block, ContextPtr context, const std::string & /* default_database */, - bool /* created_from_ddl */) + bool created_from_ddl) { const auto config_prefix = root_config_prefix + ".mongodb"; - auto configuration = std::make_shared(); - if (auto named_collection = tryGetNamedCollectionWithOverrides(config, config_prefix, context)) + auto named_collection = created_from_ddl ? tryGetNamedCollectionWithOverrides(config, config_prefix, context) : nullptr; + + String host, username, password, database, method, options, collection; + UInt16 port; + if (named_collection) { - if (named_collection->has("uri")) - { - validateNamedCollection(*named_collection, {"collection"}, {}); - configuration->uri = std::make_unique(named_collection->get("uri")); - } - else - { - validateNamedCollection(*named_collection, {"host", "db", "collection"}, {"port", "user", "password", "options"}); - String user = named_collection->get("user"); - String auth_string; - if (!user.empty()) - auth_string = fmt::format("{}:{}@", user, named_collection->get("password")); - configuration->uri = std::make_unique(fmt::format("mongodb://{}{}:{}/{}?{}", - auth_string, - named_collection->get("host"), - named_collection->getOrDefault("port", "27017"), - named_collection->get("db"), - named_collection->getOrDefault("options", ""))); - } - configuration->collection = named_collection->get("collection"); + validateNamedCollection( + *named_collection, + /* required_keys */{"collection"}, + /* optional_keys */ValidateKeysMultiset{ + "host", "port", "user", "password", "db", "database", "uri", "name", "method", "options"}); + + host = named_collection->getOrDefault("host", ""); + port = static_cast(named_collection->getOrDefault("port", 0)); + username = named_collection->getOrDefault("user", ""); + password = named_collection->getOrDefault("password", ""); + database = named_collection->getAnyOrDefault({"db", "database"}, ""); + method = named_collection->getOrDefault("method", ""); + collection = named_collection->getOrDefault("collection", ""); + options = named_collection->getOrDefault("options", ""); } else { - configuration->collection = config.getString(config_prefix + ".collection"); - auto uri_str = config.getString(config_prefix + ".uri", ""); - if (!uri_str.empty()) - configuration->uri = std::make_unique(uri_str); - else - { - String user = config.getString(config_prefix + ".user", ""); - String auth_string; - if (!user.empty()) - auth_string = fmt::format("{}:{}@", user, config.getString(config_prefix + ".password", "")); - configuration->uri = std::make_unique(fmt::format("mongodb://{}{}:{}/{}?{}", - auth_string, - config.getString(config_prefix + ".host"), - config.getString(config_prefix + ".port", "27017"), - config.getString(config_prefix + ".db"), - config.getString(config_prefix + ".options", ""))); - } + host = config.getString(config_prefix + ".host", ""); + port = config.getUInt(config_prefix + ".port", 0); + username = config.getString(config_prefix + ".user", ""); + password = config.getString(config_prefix + ".password", ""); + database = config.getString(config_prefix + ".db", ""); + method = config.getString(config_prefix + ".method", ""); + collection = config.getString(config_prefix + ".collection"); + options = config.getString(config_prefix + ".options", ""); } - configuration->checkHosts(context); + if (created_from_ddl) + context->getRemoteHostFilter().checkHostAndPort(host, toString(port)); - return std::make_unique(dict_struct, std::move(configuration), std::move(sample_block)); + return std::make_unique( + dict_struct, + config.getString(config_prefix + ".uri", ""), + host, + port, + username, + password, + method, + database, + collection, + options, + sample_block); }; - #else - auto create_dictionary_source = []( - const DictionaryStructure & /* dict_struct */, - const Poco::Util::AbstractConfiguration & /* config */, - const std::string & /* root_config_prefix */, - Block & /* sample_block */, - ContextPtr /* context */, - const std::string & /* default_database */, - bool /* created_from_ddl */) -> DictionarySourcePtr - { - throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, - "Dictionary source of type `mongodb` is disabled because ClickHouse was built without mongodb support."); - }; - #endif - factory.registerSource("mongodb", create_dictionary_source); + factory.registerSource("mongodb", create_mongo_db_dictionary); } -#if USE_MONGODB +} + +#include +#include +#include +#include +#include +#include +#include +#include + +// only after poco +// naming conflict: +// Poco/MongoDB/BSONWriter.h:54: void writeCString(const std::string & value); +// src/IO/WriteHelpers.h:146 #define writeCString(s, buf) +#include + + +namespace DB +{ +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; + extern const int UNSUPPORTED_METHOD; + extern const int MONGODB_CANNOT_AUTHENTICATE; +} + + static const UInt64 max_block_size = 8192; MongoDBDictionarySource::MongoDBDictionarySource( const DictionaryStructure & dict_struct_, - std::shared_ptr configuration_, - Block sample_block_) + const std::string & uri_, + const std::string & host_, + UInt16 port_, + const std::string & user_, + const std::string & password_, + const std::string & method_, + const std::string & db_, + const std::string & collection_, + const std::string & options_, + const Block & sample_block_) : dict_struct{dict_struct_} - , configuration{configuration_} + , uri{uri_} + , host{host_} + , port{port_} + , user{user_} + , password{password_} + , method{method_} + , db{db_} + , collection{collection_} + , options(options_) , sample_block{sample_block_} + , connection{std::make_shared()} { + + StorageMongoDBSocketFactory socket_factory; + if (!uri.empty()) + { + // Connect with URI. + connection->connect(uri, socket_factory); + + Poco::URI poco_uri(connection->uri()); + + // Parse database from URI. This is required for correctness -- the + // cursor is created using database name and collection name, so we have + // to specify them properly. + db = poco_uri.getPath(); + // getPath() may return a leading slash, remove it. + if (!db.empty() && db[0] == '/') + { + db.erase(0, 1); + } + + // Parse some other parts from URI, for logging and display purposes. + host = poco_uri.getHost(); + port = poco_uri.getPort(); + user = poco_uri.getUserInfo(); + if (size_t separator = user.find(':'); separator != std::string::npos) + { + user.resize(separator); + } + } + else + { + // Connect with host/port/user/etc through constructing the uri + std::string uri_constructed("mongodb://" + host + ":" + std::to_string(port) + "/" + db + (options.empty() ? "" : "?" + options)); + connection->connect(uri_constructed, socket_factory); + + if (!user.empty()) + { + Poco::MongoDB::Database poco_db(db); + if (!poco_db.authenticate(*connection, user, password, method.empty() ? Poco::MongoDB::Database::AUTH_SCRAM_SHA1 : method)) + throw Exception(ErrorCodes::MONGODB_CANNOT_AUTHENTICATE, "Cannot authenticate in MongoDB, incorrect user or password"); + } + } } + MongoDBDictionarySource::MongoDBDictionarySource(const MongoDBDictionarySource & other) - : MongoDBDictionarySource{other.dict_struct, other.configuration, other.sample_block} + : MongoDBDictionarySource{ + other.dict_struct, other.uri, other.host, other.port, other.user, other.password, other.method, other.db, + other.collection, other.options, other.sample_block + } { } @@ -132,7 +185,7 @@ MongoDBDictionarySource::~MongoDBDictionarySource() = default; QueryPipeline MongoDBDictionarySource::loadAll() { - return QueryPipeline(std::make_shared(*configuration->uri, configuration->collection, make_document(), mongocxx::options::find(), sample_block, max_block_size)); + return QueryPipeline(std::make_shared(connection, db, collection, Poco::MongoDB::Document{}, sample_block, max_block_size)); } QueryPipeline MongoDBDictionarySource::loadIds(const std::vector & ids) @@ -140,11 +193,19 @@ QueryPipeline MongoDBDictionarySource::loadIds(const std::vector & ids) if (!dict_struct.id) throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "'id' is required for selective loading"); - auto ids_array = array(); - for (const auto & id : ids) - ids_array.append(static_cast(id)); + Poco::MongoDB::Document query; - return QueryPipeline(std::make_shared(*configuration->uri, configuration->collection, make_document(kvp(dict_struct.id->name, make_document(kvp("$in", ids_array)))), mongocxx::options::find(), sample_block, max_block_size)); + /** NOTE: While building array, Poco::MongoDB requires passing of different unused element names, along with values. + * In general, Poco::MongoDB is quite inefficient and bulky. + */ + + Poco::MongoDB::Array::Ptr ids_array(new Poco::MongoDB::Array); + for (const UInt64 id : ids) + ids_array->add(DB::toString(id), static_cast(id)); + + query.addNewDocument(dict_struct.id->name).add("$in", ids_array); + + return QueryPipeline(std::make_shared(connection, db, collection, query, sample_block, max_block_size)); } @@ -153,41 +214,68 @@ QueryPipeline MongoDBDictionarySource::loadKeys(const Columns & key_columns, con if (!dict_struct.key) throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "'key' is required for selective loading"); - if (key_columns.size() != dict_struct.key->size()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "The size of key_columns does not equal to the size of dictionary key"); + Poco::MongoDB::Document query; + Poco::MongoDB::Array::Ptr keys_array(new Poco::MongoDB::Array); - auto keys = array(); - for (const auto & row : requested_rows) + for (const auto row_idx : requested_rows) { - auto key = array(); - for (size_t i = 0; i < key_columns.size(); i++) - { - const auto & dict_key = dict_struct.key->at(i); - WhichDataType type(dict_key.type); + auto & key = keys_array->addNewDocument(DB::toString(row_idx)); - if (isBool(dict_key.type)) - key.append(make_document(kvp(dict_key.name, key_columns[i]->getBool(row)))); - else if (type.isUInt()) - key.append(make_document(kvp(dict_key.name, static_cast(key_columns[i]->getUInt(row))))); - else if (type.isFloat64()) - key.append(make_document(kvp(dict_key.name, key_columns[i]->getFloat64(row)))); - else if (type.isInt()) - key.append(make_document(kvp(dict_key.name, key_columns[i]->getInt(row)))); - else if (type.isString()) - key.append(make_document(kvp(dict_key.name, key_columns[i]->getDataAt(row).toString()))); - else - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected type '{}' of key in MongoDB dictionary", dict_key.type->getName()); + const auto & key_attributes = *dict_struct.key; + for (size_t attribute_index = 0; attribute_index < key_attributes.size(); ++attribute_index) + { + const auto & key_attribute = key_attributes[attribute_index]; + + switch (key_attribute.underlying_type) + { + case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::Int64: + { + key.add(key_attribute.name, static_cast(key_columns[attribute_index]->get64(row_idx))); + break; + } + case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::Float64: + { + key.add(key_attribute.name, key_columns[attribute_index]->getFloat64(row_idx)); + break; + } + case AttributeUnderlyingType::String: + { + String loaded_str((*key_columns[attribute_index])[row_idx].safeGet()); + /// Convert string to ObjectID + if (key_attribute.is_object_id) + { + Poco::MongoDB::ObjectId::Ptr loaded_id(new Poco::MongoDB::ObjectId(loaded_str)); + key.add(key_attribute.name, loaded_id); + } + else + { + key.add(key_attribute.name, loaded_str); + } + break; + } + default: + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unsupported dictionary attribute type for MongoDB dictionary source"); + } } - keys.append(make_document(kvp("$and", key))); } - return QueryPipeline(std::make_shared(*configuration->uri, configuration->collection, make_document(kvp("$or", keys)), mongocxx::options::find(), sample_block, max_block_size)); + /// If more than one key we should use $or + query.add("$or", keys_array); + + return QueryPipeline(std::make_shared(connection, db, collection, query, sample_block, max_block_size)); } std::string MongoDBDictionarySource::toString() const { - return fmt::format("MongoDB: {}", configuration->uri->to_string()); + return fmt::format("MongoDB: {}.{},{}{}:{}", db, collection, (user.empty() ? " " : " " + user + '@'), host, port); } -#endif } diff --git a/src/Dictionaries/MongoDBDictionarySource.h b/src/Dictionaries/MongoDBDictionarySource.h index 45f22e8908f..6d93bc6c090 100644 --- a/src/Dictionaries/MongoDBDictionarySource.h +++ b/src/Dictionaries/MongoDBDictionarySource.h @@ -1,13 +1,23 @@ #pragma once -#include "config.h" +#include +#include -#if USE_MONGODB #include "DictionaryStructure.h" #include "IDictionarySource.h" -#include -#include +namespace Poco +{ +namespace Util +{ + class AbstractConfiguration; +} + +namespace MongoDB +{ + class Connection; +} +} namespace DB { @@ -22,8 +32,16 @@ class MongoDBDictionarySource final : public IDictionarySource public: MongoDBDictionarySource( const DictionaryStructure & dict_struct_, - std::shared_ptr configuration_, - Block sample_block_); + const std::string & uri_, + const std::string & host_, + UInt16 port_, + const std::string & user_, + const std::string & password_, + const std::string & method_, + const std::string & db_, + const std::string & collection_, + const std::string & options, + const Block & sample_block_); MongoDBDictionarySource(const MongoDBDictionarySource & other); @@ -45,7 +63,7 @@ public: /// @todo: for MongoDB, modification date can somehow be determined from the `_id` object field bool isModified() const override { return true; } - /// Not yet supported + ///Not yet supported bool hasUpdateField() const override { return false; } DictionarySourcePtr clone() const override { return std::make_shared(*this); } @@ -54,9 +72,18 @@ public: private: const DictionaryStructure dict_struct; - const std::shared_ptr configuration; + const std::string uri; + std::string host; + UInt16 port; + std::string user; + const std::string password; + const std::string method; + std::string db; + const std::string collection; + const std::string options; Block sample_block; + + std::shared_ptr connection; }; } -#endif diff --git a/src/Dictionaries/MongoDBPocoLegacyDictionarySource.cpp b/src/Dictionaries/MongoDBPocoLegacyDictionarySource.cpp deleted file mode 100644 index 4495215d826..00000000000 --- a/src/Dictionaries/MongoDBPocoLegacyDictionarySource.cpp +++ /dev/null @@ -1,305 +0,0 @@ -#include "config.h" - -#include "DictionarySourceFactory.h" -#if USE_MONGODB -#include -#include "MongoDBPocoLegacyDictionarySource.h" -#include "DictionaryStructure.h" -#include "registerDictionaries.h" -#include -#include -#endif - -namespace DB -{ - -namespace ErrorCodes -{ -#if USE_MONGODB -extern const int NOT_IMPLEMENTED; -extern const int UNSUPPORTED_METHOD; -extern const int MONGODB_CANNOT_AUTHENTICATE; -#else -extern const int SUPPORT_IS_DISABLED; -#endif -} - -void registerDictionarySourceMongoDBPocoLegacy(DictionarySourceFactory & factory) -{ - #if USE_MONGODB - auto create_mongo_db_dictionary = []( - const DictionaryStructure & dict_struct, - const Poco::Util::AbstractConfiguration & config, - const std::string & root_config_prefix, - Block & sample_block, - ContextPtr context, - const std::string & /* default_database */, - bool created_from_ddl) - { - const auto config_prefix = root_config_prefix + ".mongodb"; - auto named_collection = created_from_ddl ? tryGetNamedCollectionWithOverrides(config, config_prefix, context) : nullptr; - - String host, username, password, database, method, options, collection; - UInt16 port; - if (named_collection) - { - validateNamedCollection( - *named_collection, - /* required_keys */{"collection"}, - /* optional_keys */ValidateKeysMultiset{ - "host", "port", "user", "password", "db", "database", "uri", "name", "method", "options"}); - - host = named_collection->getOrDefault("host", ""); - port = static_cast(named_collection->getOrDefault("port", 0)); - username = named_collection->getOrDefault("user", ""); - password = named_collection->getOrDefault("password", ""); - database = named_collection->getAnyOrDefault({"db", "database"}, ""); - method = named_collection->getOrDefault("method", ""); - collection = named_collection->getOrDefault("collection", ""); - options = named_collection->getOrDefault("options", ""); - } - else - { - host = config.getString(config_prefix + ".host", ""); - port = config.getUInt(config_prefix + ".port", 0); - username = config.getString(config_prefix + ".user", ""); - password = config.getString(config_prefix + ".password", ""); - database = config.getString(config_prefix + ".db", ""); - method = config.getString(config_prefix + ".method", ""); - collection = config.getString(config_prefix + ".collection"); - options = config.getString(config_prefix + ".options", ""); - } - - if (created_from_ddl) - context->getRemoteHostFilter().checkHostAndPort(host, toString(port)); - - return std::make_unique(dict_struct, - config.getString(config_prefix + ".uri", ""), - host, - port, - username, - password, - method, - database, - collection, - options, - sample_block); - }; - #else - auto create_mongo_db_dictionary = []( - const DictionaryStructure & /* dict_struct */, - const Poco::Util::AbstractConfiguration & /* config */, - const std::string & /* root_config_prefix */, - Block & /* sample_block */, - ContextPtr /* context */, - const std::string & /* default_database */, - bool /* created_from_ddl */) -> DictionarySourcePtr - { - throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, - "Dictionary source of type `mongodb` is disabled because ClickHouse was built without mongodb support."); - }; - #endif - - factory.registerSource("mongodb", create_mongo_db_dictionary); -} - -} - -#if USE_MONGODB -#include -#include -#include -#include -#include -#include -#include -#include - -// only after poco -// naming conflict: -// Poco/MongoDB/BSONWriter.h:54: void writeCString(const std::string & value); -// src/IO/WriteHelpers.h:146 #define writeCString(s, buf) -#include - - -namespace DB -{ -static const UInt64 max_block_size = 8192; - - -MongoDBPocoLegacyDictionarySource::MongoDBPocoLegacyDictionarySource( - const DictionaryStructure & dict_struct_, - const std::string & uri_, - const std::string & host_, - UInt16 port_, - const std::string & user_, - const std::string & password_, - const std::string & method_, - const std::string & db_, - const std::string & collection_, - const std::string & options_, - const Block & sample_block_) - : dict_struct{dict_struct_} - , uri{uri_} - , host{host_} - , port{port_} - , user{user_} - , password{password_} - , method{method_} - , db{db_} - , collection{collection_} - , options(options_) - , sample_block{sample_block_} - , connection{std::make_shared()} -{ - - StorageMongoDBPocoLegacySocketFactory socket_factory; - if (!uri.empty()) - { - // Connect with URI. - connection->connect(uri, socket_factory); - - Poco::URI poco_uri(connection->uri()); - - // Parse database from URI. This is required for correctness -- the - // cursor is created using database name and collection name, so we have - // to specify them properly. - db = poco_uri.getPath(); - // getPath() may return a leading slash, remove it. - if (!db.empty() && db[0] == '/') - { - db.erase(0, 1); - } - - // Parse some other parts from URI, for logging and display purposes. - host = poco_uri.getHost(); - port = poco_uri.getPort(); - user = poco_uri.getUserInfo(); - if (size_t separator = user.find(':'); separator != std::string::npos) - { - user.resize(separator); - } - } - else - { - // Connect with host/port/user/etc through constructing the uri - std::string uri_constructed("mongodb://" + host + ":" + std::to_string(port) + "/" + db + (options.empty() ? "" : "?" + options)); - connection->connect(uri_constructed, socket_factory); - - if (!user.empty()) - { - Poco::MongoDB::Database poco_db(db); - if (!poco_db.authenticate(*connection, user, password, method.empty() ? Poco::MongoDB::Database::AUTH_SCRAM_SHA1 : method)) - throw Exception(ErrorCodes::MONGODB_CANNOT_AUTHENTICATE, "Cannot authenticate in MongoDB, incorrect user or password"); - } - } -} - - -MongoDBPocoLegacyDictionarySource::MongoDBPocoLegacyDictionarySource(const MongoDBPocoLegacyDictionarySource & other) - : MongoDBPocoLegacyDictionarySource{ - other.dict_struct, other.uri, other.host, other.port, other.user, other.password, other.method, other.db, - other.collection, other.options, other.sample_block - } -{ -} - -MongoDBPocoLegacyDictionarySource::~MongoDBPocoLegacyDictionarySource() = default; - -QueryPipeline MongoDBPocoLegacyDictionarySource::loadAll() -{ - return QueryPipeline(std::make_shared(connection, db, collection, Poco::MongoDB::Document{}, sample_block, max_block_size)); -} - -QueryPipeline MongoDBPocoLegacyDictionarySource::loadIds(const std::vector & ids) -{ - if (!dict_struct.id) - throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "'id' is required for selective loading"); - - Poco::MongoDB::Document query; - - /** NOTE: While building array, Poco::MongoDB requires passing of different unused element names, along with values. - * In general, Poco::MongoDB is quite inefficient and bulky. - */ - - Poco::MongoDB::Array::Ptr ids_array(new Poco::MongoDB::Array); - for (const UInt64 id : ids) - ids_array->add(DB::toString(id), static_cast(id)); - - query.addNewDocument(dict_struct.id->name).add("$in", ids_array); - - return QueryPipeline(std::make_shared(connection, db, collection, query, sample_block, max_block_size)); -} - - -QueryPipeline MongoDBPocoLegacyDictionarySource::loadKeys(const Columns & key_columns, const std::vector & requested_rows) -{ - if (!dict_struct.key) - throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "'key' is required for selective loading"); - - Poco::MongoDB::Document query; - Poco::MongoDB::Array::Ptr keys_array(new Poco::MongoDB::Array); - - for (const auto row_idx : requested_rows) - { - auto & key = keys_array->addNewDocument(DB::toString(row_idx)); - - const auto & key_attributes = *dict_struct.key; - for (size_t attribute_index = 0; attribute_index < key_attributes.size(); ++attribute_index) - { - const auto & key_attribute = key_attributes[attribute_index]; - - switch (key_attribute.underlying_type) - { - case AttributeUnderlyingType::UInt8: - case AttributeUnderlyingType::UInt16: - case AttributeUnderlyingType::UInt32: - case AttributeUnderlyingType::UInt64: - case AttributeUnderlyingType::Int8: - case AttributeUnderlyingType::Int16: - case AttributeUnderlyingType::Int32: - case AttributeUnderlyingType::Int64: - { - key.add(key_attribute.name, static_cast(key_columns[attribute_index]->get64(row_idx))); - break; - } - case AttributeUnderlyingType::Float32: - case AttributeUnderlyingType::Float64: - { - key.add(key_attribute.name, key_columns[attribute_index]->getFloat64(row_idx)); - break; - } - case AttributeUnderlyingType::String: - { - String loaded_str((*key_columns[attribute_index])[row_idx].safeGet()); - /// Convert string to ObjectID - if (key_attribute.is_object_id) - { - Poco::MongoDB::ObjectId::Ptr loaded_id(new Poco::MongoDB::ObjectId(loaded_str)); - key.add(key_attribute.name, loaded_id); - } - else - { - key.add(key_attribute.name, loaded_str); - } - break; - } - default: - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unsupported dictionary attribute type for MongoDB dictionary source"); - } - } - } - - /// If more than one key we should use $or - query.add("$or", keys_array); - - return QueryPipeline(std::make_shared(connection, db, collection, query, sample_block, max_block_size)); -} - -std::string MongoDBPocoLegacyDictionarySource::toString() const -{ - return fmt::format("MongoDB: {}.{},{}{}:{}", db, collection, (user.empty() ? " " : " " + user + '@'), host, port); -} - -} -#endif diff --git a/src/Dictionaries/MongoDBPocoLegacyDictionarySource.h b/src/Dictionaries/MongoDBPocoLegacyDictionarySource.h deleted file mode 100644 index 95dc1194981..00000000000 --- a/src/Dictionaries/MongoDBPocoLegacyDictionarySource.h +++ /dev/null @@ -1,93 +0,0 @@ -#pragma once - -#include "config.h" - -#if USE_MONGODB -#include -#include - -#include "DictionaryStructure.h" -#include "IDictionarySource.h" - -namespace Poco -{ -namespace Util -{ - class AbstractConfiguration; -} - -namespace MongoDB -{ - class Connection; -} -} - -namespace DB -{ -namespace ErrorCodes -{ - extern const int NOT_IMPLEMENTED; -} - -/// Allows loading dictionaries from a MongoDB collection. Deprecated, will be removed soon. -class MongoDBPocoLegacyDictionarySource final : public IDictionarySource -{ -public: - MongoDBPocoLegacyDictionarySource( - const DictionaryStructure & dict_struct_, - const std::string & uri_, - const std::string & host_, - UInt16 port_, - const std::string & user_, - const std::string & password_, - const std::string & method_, - const std::string & db_, - const std::string & collection_, - const std::string & options, - const Block & sample_block_); - - MongoDBPocoLegacyDictionarySource(const MongoDBPocoLegacyDictionarySource & other); - - ~MongoDBPocoLegacyDictionarySource() override; - - QueryPipeline loadAll() override; - - QueryPipeline loadUpdatedAll() override - { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method loadUpdatedAll is unsupported for MongoDBDictionarySource"); - } - - bool supportsSelectiveLoad() const override { return true; } - - QueryPipeline loadIds(const std::vector & ids) override; - - QueryPipeline loadKeys(const Columns & key_columns, const std::vector & requested_rows) override; - - /// @todo: for MongoDB, modification date can somehow be determined from the `_id` object field - bool isModified() const override { return true; } - - ///Not yet supported - bool hasUpdateField() const override { return false; } - - DictionarySourcePtr clone() const override { return std::make_shared(*this); } - - std::string toString() const override; - -private: - const DictionaryStructure dict_struct; - const std::string uri; - std::string host; - UInt16 port; - std::string user; - const std::string password; - const std::string method; - std::string db; - const std::string collection; - const std::string options; - Block sample_block; - - std::shared_ptr connection; -}; - -} -#endif diff --git a/src/Dictionaries/registerDictionaries.cpp b/src/Dictionaries/registerDictionaries.cpp index fdecd540f13..1dcf76e6f64 100644 --- a/src/Dictionaries/registerDictionaries.cpp +++ b/src/Dictionaries/registerDictionaries.cpp @@ -11,7 +11,6 @@ void registerDictionarySourceFile(DictionarySourceFactory & source_factory); void registerDictionarySourceMysql(DictionarySourceFactory & source_factory); void registerDictionarySourceClickHouse(DictionarySourceFactory & source_factory); void registerDictionarySourceMongoDB(DictionarySourceFactory & source_factory); -void registerDictionarySourceMongoDBPocoLegacy(DictionarySourceFactory & source_factory); void registerDictionarySourceCassandra(DictionarySourceFactory & source_factory); void registerDictionarySourceRedis(DictionarySourceFactory & source_factory); void registerDictionarySourceXDBC(DictionarySourceFactory & source_factory); @@ -36,7 +35,7 @@ void registerDictionaryPolygon(DictionaryFactory & factory); void registerDictionaryDirect(DictionaryFactory & factory); -void registerDictionaries(bool use_legacy_mongodb_integration) +void registerDictionaries() { { auto & source_factory = DictionarySourceFactory::instance(); @@ -44,12 +43,7 @@ void registerDictionaries(bool use_legacy_mongodb_integration) registerDictionarySourceFile(source_factory); registerDictionarySourceMysql(source_factory); registerDictionarySourceClickHouse(source_factory); - - if (use_legacy_mongodb_integration) - registerDictionarySourceMongoDBPocoLegacy(source_factory); - else - registerDictionarySourceMongoDB(source_factory); - + registerDictionarySourceMongoDB(source_factory); registerDictionarySourceRedis(source_factory); registerDictionarySourceCassandra(source_factory); registerDictionarySourceXDBC(source_factory); diff --git a/src/Dictionaries/registerDictionaries.h b/src/Dictionaries/registerDictionaries.h index 4f82f7b5d29..e8480277c2c 100644 --- a/src/Dictionaries/registerDictionaries.h +++ b/src/Dictionaries/registerDictionaries.h @@ -2,5 +2,5 @@ namespace DB { -void registerDictionaries(bool use_legacy_mongodb_integration); +void registerDictionaries(); } diff --git a/src/Dictionaries/tests/gtest_dictionary_configuration.cpp b/src/Dictionaries/tests/gtest_dictionary_configuration.cpp index 1f803567cf8..08aad663a8c 100644 --- a/src/Dictionaries/tests/gtest_dictionary_configuration.cpp +++ b/src/Dictionaries/tests/gtest_dictionary_configuration.cpp @@ -30,7 +30,7 @@ TEST(ConvertDictionaryAST, SimpleDictConfiguration) { if (!registered) { - registerDictionaries(false); + registerDictionaries(); registered = true; } @@ -103,7 +103,7 @@ TEST(ConvertDictionaryAST, TrickyAttributes) { if (!registered) { - registerDictionaries(false); + registerDictionaries(); registered = true; } @@ -147,7 +147,7 @@ TEST(ConvertDictionaryAST, ComplexKeyAndLayoutWithParams) { if (!registered) { - registerDictionaries(false); + registerDictionaries(); registered = true; } @@ -198,7 +198,7 @@ TEST(ConvertDictionaryAST, ComplexSource) { if (!registered) { - registerDictionaries(false); + registerDictionaries(); registered = true; } diff --git a/src/Interpreters/fuzzers/execute_query_fuzzer.cpp b/src/Interpreters/fuzzers/execute_query_fuzzer.cpp index 908ac6322d0..c29efae1e7d 100644 --- a/src/Interpreters/fuzzers/execute_query_fuzzer.cpp +++ b/src/Interpreters/fuzzers/execute_query_fuzzer.cpp @@ -29,10 +29,10 @@ extern "C" int LLVMFuzzerInitialize(int *, char ***) registerInterpreters(); registerFunctions(); registerAggregateFunctions(); - registerTableFunctions(false); + registerTableFunctions(); registerDatabases(); - registerStorages(false); - registerDictionaries(false); + registerStorages(); + registerDictionaries(); registerDisks(/* global_skip_access_check= */ true); registerFormats(); diff --git a/src/Parsers/ASTFunctionWithKeyValueArguments.cpp b/src/Parsers/ASTFunctionWithKeyValueArguments.cpp index abb2e42a5de..a5467bef363 100644 --- a/src/Parsers/ASTFunctionWithKeyValueArguments.cpp +++ b/src/Parsers/ASTFunctionWithKeyValueArguments.cpp @@ -2,7 +2,6 @@ #include #include -#include #include namespace DB @@ -36,17 +35,6 @@ void ASTPair::formatImpl(const FormatSettings & settings, FormatState & state, F /// SOURCE(CLICKHOUSE(host 'example01-01-1' port 9000 user 'default' password '[HIDDEN]' db 'default' table 'ids')) settings.ostr << "'[HIDDEN]'"; } - else if (!settings.show_secrets && (first == "uri")) - { - // Hide password from URI in the defention of a dictionary - WriteBufferFromOwnString temp_buf; - FormatSettings tmp_settings(temp_buf, settings.one_line); - FormatState tmp_state; - second->formatImpl(tmp_settings, tmp_state, frame); - - maskURIPassword(&temp_buf.str()); - settings.ostr << temp_buf.str(); - } else { second->formatImpl(settings, state, frame); diff --git a/src/Parsers/FunctionSecretArgumentsFinder.h b/src/Parsers/FunctionSecretArgumentsFinder.h index fa1355937ee..01dbf38e97f 100644 --- a/src/Parsers/FunctionSecretArgumentsFinder.h +++ b/src/Parsers/FunctionSecretArgumentsFinder.h @@ -2,7 +2,6 @@ #include #include -#include #include #include #include @@ -89,17 +88,13 @@ protected: void findOrdinaryFunctionSecretArguments() { - if ((function->name() == "mysql") || (function->name() == "postgresql")) + if ((function->name() == "mysql") || (function->name() == "postgresql") || (function->name() == "mongodb")) { /// mysql('host:port', 'database', 'table', 'user', 'password', ...) /// postgresql('host:port', 'database', 'table', 'user', 'password', ...) /// mongodb('host:port', 'database', 'collection', 'user', 'password', ...) findMySQLFunctionSecretArguments(); } - else if (function->name() == "mongodb") - { - findMongoDBSecretArguments(); - } else if ((function->name() == "s3") || (function->name() == "cosn") || (function->name() == "oss") || (function->name() == "deltaLake") || (function->name() == "hudi") || (function->name() == "iceberg") || (function->name() == "gcs")) @@ -154,40 +149,6 @@ protected: } } - void findMongoDBSecretArguments() - { - String uri; - - if (isNamedCollectionName(0)) - { - /// MongoDB(named_collection, ..., password = 'password', ...) - if (findSecretNamedArgument("password", 1)) - return; - - /// MongoDB(named_collection, ..., uri = 'mongodb://username:password@127.0.0.1:27017', ...) - findNamedArgument(&uri, "uri", 1); - result.are_named = true; - result.start = 1; - } - else if (function->arguments->size() == 2) - { - tryGetStringFromArgument(0, &uri); - result.are_named = false; - result.start = 0; - } - else - { - // MongoDB('127.0.0.1:27017', 'database', 'collection', 'user, 'password'...) - markSecretArgument(4, false); - return; - } - - chassert(result.count == 0); - maskURIPassword(&uri); - result.count = 1; - result.replacement = std::move(uri); - } - /// Returns the number of arguments excluding "headers" and "extra_credentials" (which should /// always be at the end). Marks "headers" as secret, if found. size_t excludeS3OrURLNestedMaps() @@ -463,7 +424,8 @@ protected: /// ExternalDistributed('engine', 'host:port', 'database', 'table', 'user', 'password') findExternalDistributedTableEngineSecretArguments(); } - else if ((engine_name == "MySQL") || (engine_name == "PostgreSQL") || (engine_name == "MaterializedPostgreSQL")) + else if ((engine_name == "MySQL") || (engine_name == "PostgreSQL") || + (engine_name == "MaterializedPostgreSQL") || (engine_name == "MongoDB")) { /// MySQL('host:port', 'database', 'table', 'user', 'password', ...) /// PostgreSQL('host:port', 'database', 'table', 'user', 'password', ...) @@ -471,10 +433,6 @@ protected: /// MongoDB('host:port', 'database', 'collection', 'user', 'password', ...) findMySQLFunctionSecretArguments(); } - else if (engine_name == "MongoDB") - { - findMongoDBSecretArguments(); - } else if ((engine_name == "S3") || (engine_name == "COSN") || (engine_name == "OSS") || (engine_name == "DeltaLake") || (engine_name == "Hudi") || (engine_name == "Iceberg") || (engine_name == "S3Queue")) { @@ -633,15 +591,11 @@ protected: /// Looks for a secret argument with a specified name. This function looks for arguments in format `key=value` where the key is specified. /// If the argument is found, it is marked as a secret. - bool findSecretNamedArgument(const std::string_view & key, size_t start = 0) + void findSecretNamedArgument(const std::string_view & key, size_t start = 0) { ssize_t arg_idx = findNamedArgument(nullptr, key, start); if (arg_idx >= 0) - { markSecretArgument(arg_idx, /* argument_is_named= */ true); - return true; - } - return false; } }; diff --git a/src/Processors/Sources/MongoDBPocoLegacySource.cpp b/src/Processors/Sources/MongoDBPocoLegacySource.cpp deleted file mode 100644 index c1775b1a252..00000000000 --- a/src/Processors/Sources/MongoDBPocoLegacySource.cpp +++ /dev/null @@ -1,580 +0,0 @@ -#include "config.h" - -#if USE_MONGODB -#include "MongoDBPocoLegacySource.h" - -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include "base/types.h" -#include -#include - -#include -#include - -// only after poco -// naming conflict: -// Poco/MongoDB/BSONWriter.h:54: void writeCString(const std::string & value); -// src/IO/WriteHelpers.h:146 #define writeCString(s, buf) -#include - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int TYPE_MISMATCH; - extern const int UNKNOWN_TYPE; - extern const int MONGODB_ERROR; - extern const int BAD_ARGUMENTS; -} - -namespace -{ - using ValueType = ExternalResultDescription::ValueType; - using ObjectId = Poco::MongoDB::ObjectId; - using MongoArray = Poco::MongoDB::Array; - using MongoUUID = Poco::MongoDB::Binary::Ptr; - - - UUID parsePocoUUID(const Poco::UUID & src) - { - UUID uuid; - - std::array src_node = src.getNode(); - UInt64 node = 0; - node |= UInt64(src_node[0]) << 40; - node |= UInt64(src_node[1]) << 32; - node |= UInt64(src_node[2]) << 24; - node |= UInt64(src_node[3]) << 16; - node |= UInt64(src_node[4]) << 8; - node |= src_node[5]; - - UUIDHelpers::getHighBytes(uuid) = UInt64(src.getTimeLow()) << 32 | UInt32(src.getTimeMid() << 16 | src.getTimeHiAndVersion()); - UUIDHelpers::getLowBytes(uuid) = UInt64(src.getClockSeq()) << 48 | node; - - return uuid; - } - - template - Field getNumber(const Poco::MongoDB::Element & value, const std::string & name) - { - switch (value.type()) - { - case Poco::MongoDB::ElementTraits::TypeId: - return static_cast(static_cast &>(value).value()); - case Poco::MongoDB::ElementTraits::TypeId: - return static_cast(static_cast &>(value).value()); - case Poco::MongoDB::ElementTraits::TypeId: - return static_cast(static_cast &>(value).value()); - case Poco::MongoDB::ElementTraits::TypeId: - return static_cast(static_cast &>(value).value()); - case Poco::MongoDB::ElementTraits::TypeId: - return Field(); - case Poco::MongoDB::ElementTraits::TypeId: - return parse(static_cast &>(value).value()); - default: - throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected a number, got type id = {} for column {}", - toString(value.type()), name); - } - } - - void prepareMongoDBArrayInfo( - std::unordered_map & array_info, size_t column_idx, const DataTypePtr data_type) - { - const auto * array_type = assert_cast(data_type.get()); - auto nested = array_type->getNestedType(); - - size_t count_dimensions = 1; - while (isArray(nested)) - { - ++count_dimensions; - nested = assert_cast(nested.get())->getNestedType(); - } - - Field default_value = nested->getDefault(); - if (nested->isNullable()) - nested = assert_cast(nested.get())->getNestedType(); - - WhichDataType which(nested); - std::function parser; - - if (which.isUInt8()) - parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber(value, name); }; - else if (which.isUInt16()) - parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber(value, name); }; - else if (which.isUInt32()) - parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber(value, name); }; - else if (which.isUInt64()) - parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber(value, name); }; - else if (which.isInt8()) - parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber(value, name); }; - else if (which.isInt16()) - parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber(value, name); }; - else if (which.isInt32()) - parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber(value, name); }; - else if (which.isInt64()) - parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber(value, name); }; - else if (which.isFloat32()) - parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber(value, name); }; - else if (which.isFloat64()) - parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber(value, name); }; - else if (which.isString() || which.isFixedString()) - parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field - { - if (value.type() == Poco::MongoDB::ElementTraits::TypeId) - { - String string_id = value.toString(); - return Field(string_id.data(), string_id.size()); - } - else if (value.type() == Poco::MongoDB::ElementTraits::TypeId) - { - String string = static_cast &>(value).value(); - return Field(string.data(), string.size()); - } - - throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected String, got type id = {} for column {}", - toString(value.type()), name); - }; - else if (which.isDate()) - parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field - { - if (value.type() != Poco::MongoDB::ElementTraits::TypeId) - throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected Timestamp, got type id = {} for column {}", - toString(value.type()), name); - - return static_cast(DateLUT::instance().toDayNum( - static_cast &>(value).value().epochTime())); - }; - else if (which.isDateTime()) - parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field - { - if (value.type() != Poco::MongoDB::ElementTraits::TypeId) - throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected Timestamp, got type id = {} for column {}", - toString(value.type()), name); - - return static_cast(static_cast &>(value).value().epochTime()); - }; - else if (which.isUUID()) - parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field - { - if (value.type() == Poco::MongoDB::ElementTraits::TypeId) - { - String string = static_cast &>(value).value(); - return parse(string); - } - else if (value.type() == Poco::MongoDB::ElementTraits::TypeId) - { - const Poco::UUID & poco_uuid = static_cast &>(value).value()->uuid(); - return parsePocoUUID(poco_uuid); - } - else - throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected String/UUID, got type id = {} for column {}", - toString(value.type()), name); - }; - else - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Type conversion to {} is not supported", nested->getName()); - - array_info[column_idx] = {count_dimensions, default_value, parser}; - } - - template - void insertNumber(IColumn & column, const Poco::MongoDB::Element & value, const std::string & name) - { - switch (value.type()) - { - case Poco::MongoDB::ElementTraits::TypeId: - assert_cast &>(column).getData().push_back( - static_cast &>(value).value()); - break; - case Poco::MongoDB::ElementTraits::TypeId: - assert_cast &>(column).getData().push_back( - static_cast(static_cast &>(value).value())); - break; - case Poco::MongoDB::ElementTraits::TypeId: - assert_cast &>(column).getData().push_back(static_cast( - static_cast &>(value).value())); - break; - case Poco::MongoDB::ElementTraits::TypeId: - assert_cast &>(column).getData().push_back( - static_cast &>(value).value()); - break; - case Poco::MongoDB::ElementTraits::TypeId: - assert_cast &>(column).getData().emplace_back(); - break; - case Poco::MongoDB::ElementTraits::TypeId: - assert_cast &>(column).getData().push_back( - parse(static_cast &>(value).value())); - break; - default: - throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected a number, got type id = {} for column {}", - toString(value.type()), name); - } - } - - void insertValue( - IColumn & column, - const ValueType type, - const Poco::MongoDB::Element & value, - const std::string & name, - std::unordered_map & array_info, - size_t idx) - { - switch (type) - { - case ValueType::vtUInt8: - insertNumber(column, value, name); - break; - case ValueType::vtUInt16: - insertNumber(column, value, name); - break; - case ValueType::vtUInt32: - insertNumber(column, value, name); - break; - case ValueType::vtUInt64: - insertNumber(column, value, name); - break; - case ValueType::vtInt8: - insertNumber(column, value, name); - break; - case ValueType::vtInt16: - insertNumber(column, value, name); - break; - case ValueType::vtInt32: - insertNumber(column, value, name); - break; - case ValueType::vtInt64: - insertNumber(column, value, name); - break; - case ValueType::vtFloat32: - insertNumber(column, value, name); - break; - case ValueType::vtFloat64: - insertNumber(column, value, name); - break; - - case ValueType::vtEnum8: - case ValueType::vtEnum16: - case ValueType::vtString: - { - if (value.type() == Poco::MongoDB::ElementTraits::TypeId) - { - std::string string_id = value.toString(); - assert_cast(column).insertData(string_id.data(), string_id.size()); - break; - } - else if (value.type() == Poco::MongoDB::ElementTraits::TypeId) - { - String string = static_cast &>(value).value(); - assert_cast(column).insertData(string.data(), string.size()); - break; - } - - throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected String, got type id = {} for column {}", - toString(value.type()), name); - } - - case ValueType::vtDate: - { - if (value.type() != Poco::MongoDB::ElementTraits::TypeId) - throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected Timestamp, got type id = {} for column {}", - toString(value.type()), name); - - assert_cast(column).getData().push_back(static_cast(DateLUT::instance().toDayNum( - static_cast &>(value).value().epochTime()))); - break; - } - - case ValueType::vtDateTime: - { - if (value.type() != Poco::MongoDB::ElementTraits::TypeId) - throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected Timestamp, got type id = {} for column {}", - toString(value.type()), name); - - assert_cast(column).getData().push_back( - static_cast(static_cast &>(value).value().epochTime())); - break; - } - case ValueType::vtUUID: - { - if (value.type() == Poco::MongoDB::ElementTraits::TypeId) - { - String string = static_cast &>(value).value(); - assert_cast(column).getData().push_back(parse(string)); - } - else if (value.type() == Poco::MongoDB::ElementTraits::TypeId) - { - const Poco::UUID & poco_uuid = static_cast &>(value).value()->uuid(); - UUID uuid = parsePocoUUID(poco_uuid); - assert_cast(column).getData().push_back(uuid); - } - else - throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected String/UUID, got type id = {} for column {}", - toString(value.type()), name); - break; - } - case ValueType::vtArray: - { - if (value.type() != Poco::MongoDB::ElementTraits::TypeId) - throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected Array, got type id = {} for column {}", - toString(value.type()), name); - - size_t expected_dimensions = array_info[idx].num_dimensions; - const auto parse_value = array_info[idx].parser; - std::vector dimensions(expected_dimensions + 1); - - auto array = static_cast &>(value).value(); - - std::vector> arrays; - arrays.emplace_back(&value, 0); - - while (!arrays.empty()) - { - size_t dimension_idx = arrays.size() - 1; - - if (dimension_idx + 1 > expected_dimensions) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Got more dimensions than expected"); - - auto [parent_ptr, child_idx] = arrays.back(); - auto parent = static_cast &>(*parent_ptr).value(); - - if (child_idx >= parent->size()) - { - arrays.pop_back(); - - if (dimension_idx == 0) - break; - - dimensions[dimension_idx].emplace_back(Array(dimensions[dimension_idx + 1].begin(), dimensions[dimension_idx + 1].end())); - dimensions[dimension_idx + 1].clear(); - - continue; - } - - Poco::MongoDB::Element::Ptr child = parent->get(static_cast(child_idx)); - arrays.back().second += 1; - - if (child->type() == Poco::MongoDB::ElementTraits::TypeId) - { - arrays.emplace_back(child.get(), 0); - } - else if (child->type() == Poco::MongoDB::ElementTraits::TypeId) - { - if (dimension_idx + 1 == expected_dimensions) - dimensions[dimension_idx + 1].emplace_back(array_info[idx].default_value); - else - dimensions[dimension_idx + 1].emplace_back(Array()); - } - else if (dimension_idx + 1 == expected_dimensions) - { - dimensions[dimension_idx + 1].emplace_back(parse_value(*child, name)); - } - else - { - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Got less dimensions than expected. ({} instead of {})", dimension_idx + 1, expected_dimensions); - } - } - - assert_cast(column).insert(Array(dimensions[1].begin(), dimensions[1].end())); - break; - - } - default: - throw Exception(ErrorCodes::UNKNOWN_TYPE, "Value of unsupported type: {}", column.getName()); - } - } - - void insertDefaultValue(IColumn & column, const IColumn & sample_column) { column.insertFrom(sample_column, 0); } -} - - -bool isMongoDBWireProtocolOld(Poco::MongoDB::Connection & connection_, const std::string & database_name_) -{ - Poco::MongoDB::Database db(database_name_); - Poco::MongoDB::Document::Ptr doc = db.queryServerHello(connection_, false); - - if (doc->exists("maxWireVersion")) - { - auto wire_version = doc->getInteger("maxWireVersion"); - return wire_version < Poco::MongoDB::Database::WireVersion::VER_36; - } - - doc = db.queryServerHello(connection_, true); - if (doc->exists("maxWireVersion")) - { - auto wire_version = doc->getInteger("maxWireVersion"); - return wire_version < Poco::MongoDB::Database::WireVersion::VER_36; - } - - return true; -} - - -MongoDBPocoLegacyCursor::MongoDBPocoLegacyCursor( - const std::string & database, - const std::string & collection, - const Block & sample_block_to_select, - const Poco::MongoDB::Document & query, - Poco::MongoDB::Connection & connection) - : is_wire_protocol_old(isMongoDBWireProtocolOld(connection, database)) -{ - Poco::MongoDB::Document projection; - - /// Looks like selecting _id column is implicit by default. - if (!sample_block_to_select.has("_id")) - projection.add("_id", 0); - - for (const auto & column : sample_block_to_select) - projection.add(column.name, 1); - - if (is_wire_protocol_old) - { - old_cursor = std::make_unique(database, collection); - old_cursor->query().selector() = query; - old_cursor->query().returnFieldSelector() = projection; - } - else - { - new_cursor = std::make_unique(database, collection); - new_cursor->query().setCommandName(Poco::MongoDB::OpMsgMessage::CMD_FIND); - new_cursor->query().body().addNewDocument("filter") = query; - new_cursor->query().body().addNewDocument("projection") = projection; - } -} - -Poco::MongoDB::Document::Vector MongoDBPocoLegacyCursor::nextDocuments(Poco::MongoDB::Connection & connection) -{ - if (is_wire_protocol_old) - { - auto response = old_cursor->next(connection); - cursor_id = response.cursorID(); - return std::move(response.documents()); - } - else - { - auto response = new_cursor->next(connection); - cursor_id = new_cursor->cursorID(); - return std::move(response.documents()); - } -} - -Int64 MongoDBPocoLegacyCursor::cursorID() const -{ - return cursor_id; -} - - -MongoDBPocoLegacySource::MongoDBPocoLegacySource( - std::shared_ptr & connection_, - const String & database_name_, - const String & collection_name_, - const Poco::MongoDB::Document & query_, - const Block & sample_block, - UInt64 max_block_size_) - : ISource(sample_block.cloneEmpty()) - , connection(connection_) - , cursor(database_name_, collection_name_, sample_block, query_, *connection_) - , max_block_size{max_block_size_} -{ - description.init(sample_block); - - for (const auto idx : collections::range(0, description.sample_block.columns())) - if (description.types[idx].first == ExternalResultDescription::ValueType::vtArray) - prepareMongoDBArrayInfo(array_info, idx, description.sample_block.getByPosition(idx).type); -} - - -MongoDBPocoLegacySource::~MongoDBPocoLegacySource() = default; - -Chunk MongoDBPocoLegacySource::generate() -{ - if (all_read) - return {}; - - MutableColumns columns(description.sample_block.columns()); - const size_t size = columns.size(); - - for (const auto i : collections::range(0, size)) - columns[i] = description.sample_block.getByPosition(i).column->cloneEmpty(); - - size_t num_rows = 0; - while (num_rows < max_block_size) - { - auto documents = cursor.nextDocuments(*connection); - - for (auto & document : documents) - { - if (document->exists("ok") && document->exists("$err") - && document->exists("code") && document->getInteger("ok") == 0) - { - auto code = document->getInteger("code"); - const Poco::MongoDB::Element::Ptr value = document->get("$err"); - auto message = static_cast &>(*value).value(); - throw Exception(ErrorCodes::MONGODB_ERROR, "Got error from MongoDB: {}, code: {}", message, code); - } - ++num_rows; - - for (const auto idx : collections::range(0, size)) - { - const auto & name = description.sample_block.getByPosition(idx).name; - - bool exists_in_current_document = document->exists(name); - if (!exists_in_current_document) - { - insertDefaultValue(*columns[idx], *description.sample_block.getByPosition(idx).column); - continue; - } - - const Poco::MongoDB::Element::Ptr value = document->get(name); - - if (value.isNull() || value->type() == Poco::MongoDB::ElementTraits::TypeId) - { - insertDefaultValue(*columns[idx], *description.sample_block.getByPosition(idx).column); - } - else - { - bool is_nullable = description.types[idx].second; - if (is_nullable) - { - ColumnNullable & column_nullable = assert_cast(*columns[idx]); - insertValue(column_nullable.getNestedColumn(), description.types[idx].first, *value, name, array_info, idx); - column_nullable.getNullMapData().emplace_back(0); - } - else - insertValue(*columns[idx], description.types[idx].first, *value, name, array_info, idx); - } - } - } - - if (cursor.cursorID() == 0) - { - all_read = true; - break; - } - } - - if (num_rows == 0) - return {}; - - return Chunk(std::move(columns), num_rows); -} - -} -#endif diff --git a/src/Processors/Sources/MongoDBPocoLegacySource.h b/src/Processors/Sources/MongoDBPocoLegacySource.h deleted file mode 100644 index 0c9f2c7cc9f..00000000000 --- a/src/Processors/Sources/MongoDBPocoLegacySource.h +++ /dev/null @@ -1,92 +0,0 @@ -#pragma once - -#include "config.h" - -#if USE_MONGODB -#include -#include - -#include -#include -#include - -#include - - -namespace Poco -{ -namespace MongoDB -{ - class Connection; - class Document; - class Cursor; - class OpMsgCursor; -} -} - -namespace DB -{ - -struct MongoDBPocoLegacyArrayInfo -{ - size_t num_dimensions; - Field default_value; - std::function parser; -}; - -void authenticate(Poco::MongoDB::Connection & connection, const std::string & database, const std::string & user, const std::string & password); - -bool isMongoDBWireProtocolOld(Poco::MongoDB::Connection & connection_, const std::string & database_name_); - -/// Deprecated, will be removed soon. -class MongoDBPocoLegacyCursor -{ -public: - MongoDBPocoLegacyCursor( - const std::string & database, - const std::string & collection, - const Block & sample_block_to_select, - const Poco::MongoDB::Document & query, - Poco::MongoDB::Connection & connection); - - Poco::MongoDB::Document::Vector nextDocuments(Poco::MongoDB::Connection & connection); - - Int64 cursorID() const; - -private: - const bool is_wire_protocol_old; - std::unique_ptr old_cursor; - std::unique_ptr new_cursor; - Int64 cursor_id = 0; -}; - -/// Converts MongoDB Cursor to a stream of Blocks. Deprecated, will be removed soon. -class MongoDBPocoLegacySource final : public ISource -{ -public: - MongoDBPocoLegacySource( - std::shared_ptr & connection_, - const String & database_name_, - const String & collection_name_, - const Poco::MongoDB::Document & query_, - const Block & sample_block, - UInt64 max_block_size_); - - ~MongoDBPocoLegacySource() override; - - String getName() const override { return "MongoDB"; } - -private: - Chunk generate() override; - - std::shared_ptr connection; - MongoDBPocoLegacyCursor cursor; - const UInt64 max_block_size; - ExternalResultDescription description; - bool all_read = false; - - std::unordered_map array_info; -}; - -} -#endif diff --git a/src/Processors/Sources/MongoDBSource.cpp b/src/Processors/Sources/MongoDBSource.cpp index f593f36a318..e00a541b300 100644 --- a/src/Processors/Sources/MongoDBSource.cpp +++ b/src/Processors/Sources/MongoDBSource.cpp @@ -1,190 +1,501 @@ -#include "config.h" - -#if USE_MONGODB #include "MongoDBSource.h" +#include #include +#include +#include +#include +#include +#include +#include +#include + #include #include -#include #include -#include -#include +#include #include -#include #include -#include -#include +#include +#include "base/types.h" #include +#include + +#include +#include + +// only after poco +// naming conflict: +// Poco/MongoDB/BSONWriter.h:54: void writeCString(const std::string & value); +// src/IO/WriteHelpers.h:146 #define writeCString(s, buf) +#include namespace DB { namespace ErrorCodes { -extern const int TYPE_MISMATCH; -extern const int NOT_IMPLEMENTED; + extern const int TYPE_MISMATCH; + extern const int UNKNOWN_TYPE; + extern const int MONGODB_ERROR; + extern const int BAD_ARGUMENTS; } -using BSONCXXHelper::BSONElementAsNumber; -using BSONCXXHelper::BSONArrayAsArray; -using BSONCXXHelper::BSONElementAsString; - -void MongoDBSource::insertDefaultValue(IColumn & column, const IColumn & sample_column) { column.insertFrom(sample_column, 0); } - -void MongoDBSource::insertValue(IColumn & column, const size_t & idx, const DataTypePtr & type, const std::string & name, const bsoncxx::document::element & value) +namespace { - switch (type->getTypeId()) + using ValueType = ExternalResultDescription::ValueType; + using ObjectId = Poco::MongoDB::ObjectId; + using MongoArray = Poco::MongoDB::Array; + using MongoUUID = Poco::MongoDB::Binary::Ptr; + + + UUID parsePocoUUID(const Poco::UUID & src) { - case TypeIndex::Int8: - assert_cast(column).insertValue(BSONElementAsNumber(value, name)); - break; - case TypeIndex::UInt8: - assert_cast(column).insertValue(BSONElementAsNumber(value, name)); - break; - case TypeIndex::Int16: - assert_cast(column).insertValue(BSONElementAsNumber(value, name)); - break; - case TypeIndex::UInt16: - assert_cast(column).insertValue(BSONElementAsNumber(value, name)); - break; - case TypeIndex::Int32: - assert_cast(column).insertValue(BSONElementAsNumber(value, name)); - break; - case TypeIndex::UInt32: - assert_cast(column).insertValue(BSONElementAsNumber(value, name)); - break; - case TypeIndex::Int64: - assert_cast(column).insertValue(BSONElementAsNumber(value, name)); - break; - case TypeIndex::UInt64: - assert_cast(column).insertValue(BSONElementAsNumber(value, name)); - break; - case TypeIndex::Int128: - assert_cast(column).insertValue(BSONElementAsNumber(value, name)); - break; - case TypeIndex::UInt128: - assert_cast(column).insertValue(BSONElementAsNumber(value, name)); - break; - case TypeIndex::Int256: - assert_cast(column).insertValue(BSONElementAsNumber(value, name)); - break; - case TypeIndex::UInt256: - assert_cast(column).insertValue(BSONElementAsNumber(value, name)); - break; - case TypeIndex::Float32: - assert_cast(column).insertValue(BSONElementAsNumber(value, name)); - break; - case TypeIndex::Float64: - assert_cast(column).insertValue(BSONElementAsNumber(value, name)); - break; - case TypeIndex::Date: - { - if (value.type() != bsoncxx::type::k_date) - throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected date, got {} for column {}", - bsoncxx::to_string(value.type()), name); + UUID uuid; - assert_cast(column).insertValue(DateLUT::instance().toDayNum(value.get_date().to_int64() / 1000).toUnderType()); - break; - } - case TypeIndex::Date32: - { - if (value.type() != bsoncxx::type::k_date) - throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected date, got {} for column {}", - bsoncxx::to_string(value.type()), name); + std::array src_node = src.getNode(); + UInt64 node = 0; + node |= UInt64(src_node[0]) << 40; + node |= UInt64(src_node[1]) << 32; + node |= UInt64(src_node[2]) << 24; + node |= UInt64(src_node[3]) << 16; + node |= UInt64(src_node[4]) << 8; + node |= src_node[5]; - assert_cast(column).insertValue(DateLUT::instance().toDayNum(value.get_date().to_int64() / 1000).toUnderType()); - break; - } - case TypeIndex::DateTime: - { - if (value.type() != bsoncxx::type::k_date) - throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected date, got {} for column {}", - bsoncxx::to_string(value.type()), name); + UUIDHelpers::getHighBytes(uuid) = UInt64(src.getTimeLow()) << 32 | UInt32(src.getTimeMid() << 16 | src.getTimeHiAndVersion()); + UUIDHelpers::getLowBytes(uuid) = UInt64(src.getClockSeq()) << 48 | node; - assert_cast(column).insertValue(static_cast(value.get_date().to_int64() / 1000)); - break; - } - case TypeIndex::DateTime64: - { - if (value.type() != bsoncxx::type::k_date) - throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected date, got {} for column {}", - bsoncxx::to_string(value.type()), name); - - assert_cast &>(column).insertValue(value.get_date().to_int64()); - break; - } - case TypeIndex::UUID: - { - if (value.type() != bsoncxx::type::k_string) - throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected string (UUID), got {} for column {}", - bsoncxx::to_string(value.type()), name); - - assert_cast(column).insertValue(parse(value.get_string().value.data())); - break; - } - case TypeIndex::String: - { - assert_cast(column).insert(BSONElementAsString(value, json_format_settings)); - break; - } - case TypeIndex::Array: - { - if (value.type() != bsoncxx::type::k_array) - throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected array, got {} for column {}", - bsoncxx::to_string(value.type()), name); - - assert_cast(column).insert(BSONArrayAsArray(arrays_info[idx].first, value.get_array(), arrays_info[idx].second.first, arrays_info[idx].second.second, name, json_format_settings)); - break; - } - default: - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Column {} has unsupported type {}", name, type->getName()); + return uuid; } + + template + Field getNumber(const Poco::MongoDB::Element & value, const std::string & name) + { + switch (value.type()) + { + case Poco::MongoDB::ElementTraits::TypeId: + return static_cast(static_cast &>(value).value()); + case Poco::MongoDB::ElementTraits::TypeId: + return static_cast(static_cast &>(value).value()); + case Poco::MongoDB::ElementTraits::TypeId: + return static_cast(static_cast &>(value).value()); + case Poco::MongoDB::ElementTraits::TypeId: + return static_cast(static_cast &>(value).value()); + case Poco::MongoDB::ElementTraits::TypeId: + return Field(); + case Poco::MongoDB::ElementTraits::TypeId: + return parse(static_cast &>(value).value()); + default: + throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected a number, got type id = {} for column {}", + toString(value.type()), name); + } + } + + void prepareMongoDBArrayInfo( + std::unordered_map & array_info, size_t column_idx, const DataTypePtr data_type) + { + const auto * array_type = assert_cast(data_type.get()); + auto nested = array_type->getNestedType(); + + size_t count_dimensions = 1; + while (isArray(nested)) + { + ++count_dimensions; + nested = assert_cast(nested.get())->getNestedType(); + } + + Field default_value = nested->getDefault(); + if (nested->isNullable()) + nested = assert_cast(nested.get())->getNestedType(); + + WhichDataType which(nested); + std::function parser; + + if (which.isUInt8()) + parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber(value, name); }; + else if (which.isUInt16()) + parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber(value, name); }; + else if (which.isUInt32()) + parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber(value, name); }; + else if (which.isUInt64()) + parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber(value, name); }; + else if (which.isInt8()) + parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber(value, name); }; + else if (which.isInt16()) + parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber(value, name); }; + else if (which.isInt32()) + parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber(value, name); }; + else if (which.isInt64()) + parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber(value, name); }; + else if (which.isFloat32()) + parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber(value, name); }; + else if (which.isFloat64()) + parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { return getNumber(value, name); }; + else if (which.isString() || which.isFixedString()) + parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field + { + if (value.type() == Poco::MongoDB::ElementTraits::TypeId) + { + String string_id = value.toString(); + return Field(string_id.data(), string_id.size()); + } + else if (value.type() == Poco::MongoDB::ElementTraits::TypeId) + { + String string = static_cast &>(value).value(); + return Field(string.data(), string.size()); + } + + throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected String, got type id = {} for column {}", + toString(value.type()), name); + }; + else if (which.isDate()) + parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field + { + if (value.type() != Poco::MongoDB::ElementTraits::TypeId) + throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected Timestamp, got type id = {} for column {}", + toString(value.type()), name); + + return static_cast(DateLUT::instance().toDayNum( + static_cast &>(value).value().epochTime())); + }; + else if (which.isDateTime()) + parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field + { + if (value.type() != Poco::MongoDB::ElementTraits::TypeId) + throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected Timestamp, got type id = {} for column {}", + toString(value.type()), name); + + return static_cast(static_cast &>(value).value().epochTime()); + }; + else if (which.isUUID()) + parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field + { + if (value.type() == Poco::MongoDB::ElementTraits::TypeId) + { + String string = static_cast &>(value).value(); + return parse(string); + } + else if (value.type() == Poco::MongoDB::ElementTraits::TypeId) + { + const Poco::UUID & poco_uuid = static_cast &>(value).value()->uuid(); + return parsePocoUUID(poco_uuid); + } + else + throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected String/UUID, got type id = {} for column {}", + toString(value.type()), name); + + }; + else + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Type conversion to {} is not supported", nested->getName()); + + array_info[column_idx] = {count_dimensions, default_value, parser}; + } + + template + void insertNumber(IColumn & column, const Poco::MongoDB::Element & value, const std::string & name) + { + switch (value.type()) + { + case Poco::MongoDB::ElementTraits::TypeId: + assert_cast &>(column).getData().push_back( + static_cast &>(value).value()); + break; + case Poco::MongoDB::ElementTraits::TypeId: + assert_cast &>(column).getData().push_back( + static_cast(static_cast &>(value).value())); + break; + case Poco::MongoDB::ElementTraits::TypeId: + assert_cast &>(column).getData().push_back(static_cast( + static_cast &>(value).value())); + break; + case Poco::MongoDB::ElementTraits::TypeId: + assert_cast &>(column).getData().push_back( + static_cast &>(value).value()); + break; + case Poco::MongoDB::ElementTraits::TypeId: + assert_cast &>(column).getData().emplace_back(); + break; + case Poco::MongoDB::ElementTraits::TypeId: + assert_cast &>(column).getData().push_back( + parse(static_cast &>(value).value())); + break; + default: + throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected a number, got type id = {} for column {}", + toString(value.type()), name); + } + } + + void insertValue( + IColumn & column, + const ValueType type, + const Poco::MongoDB::Element & value, + const std::string & name, + std::unordered_map & array_info, + size_t idx) + { + switch (type) + { + case ValueType::vtUInt8: + insertNumber(column, value, name); + break; + case ValueType::vtUInt16: + insertNumber(column, value, name); + break; + case ValueType::vtUInt32: + insertNumber(column, value, name); + break; + case ValueType::vtUInt64: + insertNumber(column, value, name); + break; + case ValueType::vtInt8: + insertNumber(column, value, name); + break; + case ValueType::vtInt16: + insertNumber(column, value, name); + break; + case ValueType::vtInt32: + insertNumber(column, value, name); + break; + case ValueType::vtInt64: + insertNumber(column, value, name); + break; + case ValueType::vtFloat32: + insertNumber(column, value, name); + break; + case ValueType::vtFloat64: + insertNumber(column, value, name); + break; + + case ValueType::vtEnum8: + case ValueType::vtEnum16: + case ValueType::vtString: + { + if (value.type() == Poco::MongoDB::ElementTraits::TypeId) + { + std::string string_id = value.toString(); + assert_cast(column).insertData(string_id.data(), string_id.size()); + break; + } + else if (value.type() == Poco::MongoDB::ElementTraits::TypeId) + { + String string = static_cast &>(value).value(); + assert_cast(column).insertData(string.data(), string.size()); + break; + } + + throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected String, got type id = {} for column {}", + toString(value.type()), name); + } + + case ValueType::vtDate: + { + if (value.type() != Poco::MongoDB::ElementTraits::TypeId) + throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected Timestamp, got type id = {} for column {}", + toString(value.type()), name); + + assert_cast(column).getData().push_back(static_cast(DateLUT::instance().toDayNum( + static_cast &>(value).value().epochTime()))); + break; + } + + case ValueType::vtDateTime: + { + if (value.type() != Poco::MongoDB::ElementTraits::TypeId) + throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected Timestamp, got type id = {} for column {}", + toString(value.type()), name); + + assert_cast(column).getData().push_back( + static_cast(static_cast &>(value).value().epochTime())); + break; + } + case ValueType::vtUUID: + { + if (value.type() == Poco::MongoDB::ElementTraits::TypeId) + { + String string = static_cast &>(value).value(); + assert_cast(column).getData().push_back(parse(string)); + } + else if (value.type() == Poco::MongoDB::ElementTraits::TypeId) + { + const Poco::UUID & poco_uuid = static_cast &>(value).value()->uuid(); + UUID uuid = parsePocoUUID(poco_uuid); + assert_cast(column).getData().push_back(uuid); + } + else + throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected String/UUID, got type id = {} for column {}", + toString(value.type()), name); + break; + } + case ValueType::vtArray: + { + if (value.type() != Poco::MongoDB::ElementTraits::TypeId) + throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected Array, got type id = {} for column {}", + toString(value.type()), name); + + size_t expected_dimensions = array_info[idx].num_dimensions; + const auto parse_value = array_info[idx].parser; + std::vector dimensions(expected_dimensions + 1); + + auto array = static_cast &>(value).value(); + + std::vector> arrays; + arrays.emplace_back(&value, 0); + + while (!arrays.empty()) + { + size_t dimension_idx = arrays.size() - 1; + + if (dimension_idx + 1 > expected_dimensions) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Got more dimensions than expected"); + + auto [parent_ptr, child_idx] = arrays.back(); + auto parent = static_cast &>(*parent_ptr).value(); + + if (child_idx >= parent->size()) + { + arrays.pop_back(); + + if (dimension_idx == 0) + break; + + dimensions[dimension_idx].emplace_back(Array(dimensions[dimension_idx + 1].begin(), dimensions[dimension_idx + 1].end())); + dimensions[dimension_idx + 1].clear(); + + continue; + } + + Poco::MongoDB::Element::Ptr child = parent->get(static_cast(child_idx)); + arrays.back().second += 1; + + if (child->type() == Poco::MongoDB::ElementTraits::TypeId) + { + arrays.emplace_back(child.get(), 0); + } + else if (child->type() == Poco::MongoDB::ElementTraits::TypeId) + { + if (dimension_idx + 1 == expected_dimensions) + dimensions[dimension_idx + 1].emplace_back(array_info[idx].default_value); + else + dimensions[dimension_idx + 1].emplace_back(Array()); + } + else if (dimension_idx + 1 == expected_dimensions) + { + dimensions[dimension_idx + 1].emplace_back(parse_value(*child, name)); + } + else + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Got less dimensions than expected. ({} instead of {})", dimension_idx + 1, expected_dimensions); + } + } + + assert_cast(column).insert(Array(dimensions[1].begin(), dimensions[1].end())); + break; + + } + default: + throw Exception(ErrorCodes::UNKNOWN_TYPE, "Value of unsupported type: {}", column.getName()); + } + } + + void insertDefaultValue(IColumn & column, const IColumn & sample_column) { column.insertFrom(sample_column, 0); } +} + + +bool isMongoDBWireProtocolOld(Poco::MongoDB::Connection & connection_, const std::string & database_name_) +{ + Poco::MongoDB::Database db(database_name_); + Poco::MongoDB::Document::Ptr doc = db.queryServerHello(connection_, false); + + if (doc->exists("maxWireVersion")) + { + auto wire_version = doc->getInteger("maxWireVersion"); + return wire_version < Poco::MongoDB::Database::WireVersion::VER_36; + } + + doc = db.queryServerHello(connection_, true); + if (doc->exists("maxWireVersion")) + { + auto wire_version = doc->getInteger("maxWireVersion"); + return wire_version < Poco::MongoDB::Database::WireVersion::VER_36; + } + + return true; +} + + +MongoDBCursor::MongoDBCursor( + const std::string & database, + const std::string & collection, + const Block & sample_block_to_select, + const Poco::MongoDB::Document & query, + Poco::MongoDB::Connection & connection) + : is_wire_protocol_old(isMongoDBWireProtocolOld(connection, database)) +{ + Poco::MongoDB::Document projection; + + /// Looks like selecting _id column is implicit by default. + if (!sample_block_to_select.has("_id")) + projection.add("_id", 0); + + for (const auto & column : sample_block_to_select) + projection.add(column.name, 1); + + if (is_wire_protocol_old) + { + old_cursor = std::make_unique(database, collection); + old_cursor->query().selector() = query; + old_cursor->query().returnFieldSelector() = projection; + } + else + { + new_cursor = std::make_unique(database, collection); + new_cursor->query().setCommandName(Poco::MongoDB::OpMsgMessage::CMD_FIND); + new_cursor->query().body().addNewDocument("filter") = query; + new_cursor->query().body().addNewDocument("projection") = projection; + } +} + +Poco::MongoDB::Document::Vector MongoDBCursor::nextDocuments(Poco::MongoDB::Connection & connection) +{ + if (is_wire_protocol_old) + { + auto response = old_cursor->next(connection); + cursor_id = response.cursorID(); + return std::move(response.documents()); + } + else + { + auto response = new_cursor->next(connection); + cursor_id = new_cursor->cursorID(); + return std::move(response.documents()); + } +} + +Int64 MongoDBCursor::cursorID() const +{ + return cursor_id; } MongoDBSource::MongoDBSource( - const mongocxx::uri & uri, - const std::string & collection_name, - const bsoncxx::document::view_or_value & query, - const mongocxx::options::find & options, - const Block & sample_block_, - const UInt64 & max_block_size_) - : ISource{sample_block_} - , client{uri} - , database{client.database(uri.database())} - , collection{database.collection(collection_name)} - , cursor{collection.find(query, options)} - , sample_block{sample_block_} + std::shared_ptr & connection_, + const String & database_name_, + const String & collection_name_, + const Poco::MongoDB::Document & query_, + const Block & sample_block, + UInt64 max_block_size_) + : ISource(sample_block.cloneEmpty()) + , connection(connection_) + , cursor(database_name_, collection_name_, sample_block, query_, *connection_) , max_block_size{max_block_size_} { - for (const auto & idx : collections::range(0, sample_block.columns())) - { - auto & sample_column = sample_block.getByPosition(idx); + description.init(sample_block); - /// If default value for column was not provided, use default from data type. - if (sample_column.column->empty()) - sample_column.column = sample_column.type->createColumnConstWithDefaultValue(1)->convertToFullColumnIfConst(); - - if (sample_column.type->getTypeId() == TypeIndex::Array) - { - auto type = assert_cast(*sample_column.type).getNestedType(); - size_t dimensions = 0; - while (type->getTypeId() == TypeIndex::Array) - { - type = assert_cast(*type).getNestedType(); - ++dimensions; - } - if (type->isNullable()) - { - type = assert_cast(*type).getNestedType(); - arrays_info[idx] = {std::move(dimensions), {std::move(type), Null()}}; - } - else - arrays_info[idx] = {std::move(dimensions), {std::move(type), type->getDefault()}}; - } - } + for (const auto idx : collections::range(0, description.sample_block.columns())) + if (description.types[idx].first == ExternalResultDescription::ValueType::vtArray) + prepareMongoDBArrayInfo(array_info, idx, description.sample_block.getByPosition(idx).type); } @@ -195,45 +506,72 @@ Chunk MongoDBSource::generate() if (all_read) return {}; - auto columns = sample_block.cloneEmptyColumns(); - size_t size = columns.size(); + MutableColumns columns(description.sample_block.columns()); + const size_t size = columns.size(); + + for (const auto i : collections::range(0, size)) + columns[i] = description.sample_block.getByPosition(i).column->cloneEmpty(); size_t num_rows = 0; - for (const auto & doc : cursor) + while (num_rows < max_block_size) { - for (auto idx : collections::range(0, size)) + auto documents = cursor.nextDocuments(*connection); + + for (auto & document : documents) { - auto & sample_column = sample_block.getByPosition(idx); - auto value = doc[sample_column.name]; - - if (value && value.type() != bsoncxx::type::k_null) + if (document->exists("ok") && document->exists("$err") + && document->exists("code") && document->getInteger("ok") == 0) { - if (sample_column.type->isNullable()) - { - auto & column_nullable = assert_cast(*columns[idx]); - const auto & type_nullable = assert_cast(*sample_column.type); + auto code = document->getInteger("code"); + const Poco::MongoDB::Element::Ptr value = document->get("$err"); + auto message = static_cast &>(*value).value(); + throw Exception(ErrorCodes::MONGODB_ERROR, "Got error from MongoDB: {}, code: {}", message, code); + } + ++num_rows; - insertValue(column_nullable.getNestedColumn(), idx, type_nullable.getNestedType(), sample_column.name, value); - column_nullable.getNullMapData().emplace_back(0); + for (const auto idx : collections::range(0, size)) + { + const auto & name = description.sample_block.getByPosition(idx).name; + + bool exists_in_current_document = document->exists(name); + if (!exists_in_current_document) + { + insertDefaultValue(*columns[idx], *description.sample_block.getByPosition(idx).column); + continue; + } + + const Poco::MongoDB::Element::Ptr value = document->get(name); + + if (value.isNull() || value->type() == Poco::MongoDB::ElementTraits::TypeId) + { + insertDefaultValue(*columns[idx], *description.sample_block.getByPosition(idx).column); } else - insertValue(*columns[idx], idx, sample_column.type, sample_column.name, value); + { + bool is_nullable = description.types[idx].second; + if (is_nullable) + { + ColumnNullable & column_nullable = assert_cast(*columns[idx]); + insertValue(column_nullable.getNestedColumn(), description.types[idx].first, *value, name, array_info, idx); + column_nullable.getNullMapData().emplace_back(0); + } + else + insertValue(*columns[idx], description.types[idx].first, *value, name, array_info, idx); + } } - else - insertDefaultValue(*columns[idx], *sample_column.column); } - if (++num_rows == max_block_size) + if (cursor.cursorID() == 0) + { + all_read = true; break; + } } - if (num_rows < max_block_size) - all_read = true; if (num_rows == 0) return {}; - return Chunk(std::move(columns), std::move(num_rows)); + return Chunk(std::move(columns), num_rows); } } -#endif diff --git a/src/Processors/Sources/MongoDBSource.h b/src/Processors/Sources/MongoDBSource.h index 5e64563a239..fa02dce81f8 100644 --- a/src/Processors/Sources/MongoDBSource.h +++ b/src/Processors/Sources/MongoDBSource.h @@ -1,54 +1,87 @@ #pragma once -#include "config.h" +#include +#include -#if USE_MONGODB +#include #include -#include -#include +#include -#include -#include -#include -#include +#include + + +namespace Poco +{ +namespace MongoDB +{ + class Connection; + class Document; + class Cursor; + class OpMsgCursor; +} +} namespace DB { -/// Creates MongoDB connection and cursor, converts it to a stream of blocks +struct MongoDBArrayInfo +{ + size_t num_dimensions; + Field default_value; + std::function parser; +}; + +void authenticate(Poco::MongoDB::Connection & connection, const std::string & database, const std::string & user, const std::string & password); + +bool isMongoDBWireProtocolOld(Poco::MongoDB::Connection & connection_, const std::string & database_name_); + +class MongoDBCursor +{ +public: + MongoDBCursor( + const std::string & database, + const std::string & collection, + const Block & sample_block_to_select, + const Poco::MongoDB::Document & query, + Poco::MongoDB::Connection & connection); + + Poco::MongoDB::Document::Vector nextDocuments(Poco::MongoDB::Connection & connection); + + Int64 cursorID() const; + +private: + const bool is_wire_protocol_old; + std::unique_ptr old_cursor; + std::unique_ptr new_cursor; + Int64 cursor_id = 0; +}; + +/// Converts MongoDB Cursor to a stream of Blocks class MongoDBSource final : public ISource { public: MongoDBSource( - const mongocxx::uri & uri, - const std::string & collection_name, - const bsoncxx::document::view_or_value & query, - const mongocxx::options::find & options, - const Block & sample_block_, - const UInt64 & max_block_size_); + std::shared_ptr & connection_, + const String & database_name_, + const String & collection_name_, + const Poco::MongoDB::Document & query_, + const Block & sample_block, + UInt64 max_block_size_); ~MongoDBSource() override; String getName() const override { return "MongoDB"; } private: - static void insertDefaultValue(IColumn & column, const IColumn & sample_column); - void insertValue(IColumn & column, const size_t & idx, const DataTypePtr & type, const std::string & name, const bsoncxx::document::element & value); - Chunk generate() override; - mongocxx::client client; - mongocxx::database database; - mongocxx::collection collection; - mongocxx::cursor cursor; - - Block sample_block; - std::unordered_map>> arrays_info; + std::shared_ptr connection; + MongoDBCursor cursor; const UInt64 max_block_size; - - JSONBuilder::FormatSettings json_format_settings = {{}, 0, true, true}; + ExternalResultDescription description; bool all_read = false; + + std::unordered_map array_info; }; } -#endif diff --git a/src/Storages/StorageMongoDB.cpp b/src/Storages/StorageMongoDB.cpp index 64971d0c8cd..d964cd33728 100644 --- a/src/Storages/StorageMongoDB.cpp +++ b/src/Storages/StorageMongoDB.cpp @@ -1,37 +1,26 @@ -#include "config.h" - -#if USE_MONGODB -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include +#include +#include #include -#include -#include -#include +#include + +#include +#include +#include +#include +#include #include -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include -#include - -using bsoncxx::builder::basic::document; -using bsoncxx::builder::basic::make_document; -using bsoncxx::builder::basic::make_array; -using bsoncxx::builder::basic::kvp; -using bsoncxx::to_json; +#include namespace DB { @@ -39,27 +28,27 @@ namespace DB namespace ErrorCodes { extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; - extern const int NOT_IMPLEMENTED; + extern const int MONGODB_CANNOT_AUTHENTICATE; } -namespace Setting -{ - extern const SettingsBool allow_experimental_analyzer; - extern const SettingsBool mongodb_throw_on_unsupported_query; -} - -using BSONCXXHelper::fieldAsBSONValue; -using BSONCXXHelper::fieldAsOID; - StorageMongoDB::StorageMongoDB( const StorageID & table_id_, - MongoDBConfiguration configuration_, + const std::string & host_, + uint16_t port_, + const std::string & database_name_, + const std::string & collection_name_, + const std::string & username_, + const std::string & password_, + const std::string & options_, const ColumnsDescription & columns_, const ConstraintsDescription & constraints_, const String & comment) - : IStorage{table_id_} - , configuration{std::move(configuration_)} - , log(getLogger("StorageMongoDB (" + table_id_.table_name + ")")) + : IStorage(table_id_) + , database_name(database_name_) + , collection_name(collection_name_) + , username(username_) + , password(password_) + , uri("mongodb://" + host_ + ":" + std::to_string(port_) + "/" + database_name_ + "?" + options_) { StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); @@ -68,15 +57,175 @@ StorageMongoDB::StorageMongoDB( setInMemoryMetadata(storage_metadata); } + +void StorageMongoDB::connectIfNotConnected() +{ + std::lock_guard lock{connection_mutex}; + if (!connection) + { + StorageMongoDBSocketFactory factory; + connection = std::make_shared(uri, factory); + } + + if (!authenticated) + { + Poco::URI poco_uri(uri); + auto query_params = poco_uri.getQueryParameters(); + auto auth_source = std::find_if(query_params.begin(), query_params.end(), + [&](const std::pair & param) { return param.first == "authSource"; }); + auto auth_db = database_name; + if (auth_source != query_params.end()) + auth_db = auth_source->second; + + if (!username.empty() && !password.empty()) + { + Poco::MongoDB::Database poco_db(auth_db); + if (!poco_db.authenticate(*connection, username, password, Poco::MongoDB::Database::AUTH_SCRAM_SHA1)) + throw Exception(ErrorCodes::MONGODB_CANNOT_AUTHENTICATE, "Cannot authenticate in MongoDB, incorrect user or password"); + } + + authenticated = true; + } +} + + +class StorageMongoDBSink : public SinkToStorage +{ +public: + explicit StorageMongoDBSink( + const std::string & collection_name_, + const std::string & db_name_, + const StorageMetadataPtr & metadata_snapshot_, + std::shared_ptr connection_) + : SinkToStorage(metadata_snapshot_->getSampleBlock()) + , collection_name(collection_name_) + , db_name(db_name_) + , metadata_snapshot{metadata_snapshot_} + , connection(connection_) + , is_wire_protocol_old(isMongoDBWireProtocolOld(*connection_, db_name)) + { + } + + String getName() const override { return "StorageMongoDBSink"; } + + void consume(Chunk & chunk) override + { + Poco::MongoDB::Database db(db_name); + Poco::MongoDB::Document::Vector documents; + + auto block = getHeader().cloneWithColumns(chunk.getColumns()); + + size_t num_rows = block.rows(); + size_t num_cols = block.columns(); + + const auto columns = block.getColumns(); + const auto data_types = block.getDataTypes(); + const auto data_names = block.getNames(); + + documents.reserve(num_rows); + + for (const auto i : collections::range(0, num_rows)) + { + Poco::MongoDB::Document::Ptr document = new Poco::MongoDB::Document(); + + for (const auto j : collections::range(0, num_cols)) + { + insertValueIntoMongoDB(*document, data_names[j], *data_types[j], *columns[j], i); + } + + documents.push_back(std::move(document)); + } + + if (is_wire_protocol_old) + { + Poco::SharedPtr insert_request = db.createInsertRequest(collection_name); + insert_request->documents() = std::move(documents); + connection->sendRequest(*insert_request); + } + else + { + Poco::SharedPtr insert_request = db.createOpMsgMessage(collection_name); + insert_request->setCommandName(Poco::MongoDB::OpMsgMessage::CMD_INSERT); + insert_request->documents() = std::move(documents); + connection->sendRequest(*insert_request); + } + } + +private: + + void insertValueIntoMongoDB( + Poco::MongoDB::Document & document, + const std::string & name, + const IDataType & data_type, + const IColumn & column, + size_t idx) + { + WhichDataType which(data_type); + + if (which.isArray()) + { + const ColumnArray & column_array = assert_cast(column); + const ColumnArray::Offsets & offsets = column_array.getOffsets(); + + size_t offset = offsets[idx - 1]; + size_t next_offset = offsets[idx]; + + const IColumn & nested_column = column_array.getData(); + + const auto * array_type = assert_cast(&data_type); + const DataTypePtr & nested_type = array_type->getNestedType(); + + Poco::MongoDB::Array::Ptr array = new Poco::MongoDB::Array(); + for (size_t i = 0; i + offset < next_offset; ++i) + { + insertValueIntoMongoDB(*array, Poco::NumberFormatter::format(i), *nested_type, nested_column, i + offset); + } + + document.add(name, array); + return; + } + + /// MongoDB does not support UInt64 type, so just cast it to Int64 + if (which.isNativeUInt()) + document.add(name, static_cast(column.getUInt(idx))); + else if (which.isNativeInt()) + document.add(name, static_cast(column.getInt(idx))); + else if (which.isFloat32()) + document.add(name, static_cast(column.getFloat32(idx))); + else if (which.isFloat64()) + document.add(name, column.getFloat64(idx)); + else if (which.isDate()) + document.add(name, Poco::Timestamp(DateLUT::instance().fromDayNum(DayNum(column.getUInt(idx))) * 1000000)); + else if (which.isDateTime()) + document.add(name, Poco::Timestamp(column.getUInt(idx) * 1000000)); + else + { + WriteBufferFromOwnString ostr; + data_type.getDefaultSerialization()->serializeText(column, idx, ostr, FormatSettings{}); + document.add(name, ostr.str()); + } + } + + String collection_name; + String db_name; + StorageMetadataPtr metadata_snapshot; + std::shared_ptr connection; + + const bool is_wire_protocol_old; +}; + + Pipe StorageMongoDB::read( const Names & column_names, const StorageSnapshotPtr & storage_snapshot, - SelectQueryInfo & query_info, - ContextPtr context, + SelectQueryInfo & /*query_info*/, + ContextPtr /*context*/, QueryProcessingStage::Enum /*processed_stage*/, size_t max_block_size, size_t /*num_streams*/) { + connectIfNotConnected(); + storage_snapshot->check(column_names); Block sample_block; @@ -86,329 +235,79 @@ Pipe StorageMongoDB::read( sample_block.insert({ column_data.type, column_data.name }); } - auto options = mongocxx::options::find{}; - - return Pipe(std::make_shared(*configuration.uri, configuration.collection, buildMongoDBQuery(context, options, query_info, sample_block), - std::move(options), sample_block, max_block_size)); + return Pipe(std::make_shared(connection, database_name, collection_name, Poco::MongoDB::Document{}, sample_block, max_block_size)); } -MongoDBConfiguration StorageMongoDB::getConfiguration(ASTs engine_args, ContextPtr context) +SinkToStoragePtr StorageMongoDB::write(const ASTPtr & /* query */, const StorageMetadataPtr & metadata_snapshot, ContextPtr /* context */, bool /*async_insert*/) { - MongoDBConfiguration configuration; + connectIfNotConnected(); + return std::make_shared(collection_name, database_name, metadata_snapshot, connection); +} + +StorageMongoDB::Configuration StorageMongoDB::getConfiguration(ASTs engine_args, ContextPtr context) +{ + Configuration configuration; + if (auto named_collection = tryGetNamedCollectionWithOverrides(engine_args, context)) { - if (named_collection->has("uri")) - { - validateNamedCollection(*named_collection, {"collection"}, {"uri"}); - configuration.uri = std::make_unique(named_collection->get("uri")); - } - else - { - validateNamedCollection(*named_collection, {"host", "port", "user", "password", "database", "collection"}, {"options"}); - String user = named_collection->get("user"); - String auth_string; - if (!user.empty()) - auth_string = fmt::format("{}:{}@", user, named_collection->get("password")); - configuration.uri = std::make_unique(fmt::format("mongodb://{}{}:{}/{}?{}", - auth_string, - named_collection->get("host"), - named_collection->get("port"), - named_collection->get("database"), - named_collection->getOrDefault("options", ""))); - } - configuration.collection = named_collection->get("collection"); + validateNamedCollection( + *named_collection, + ValidateKeysMultiset{"host", "port", "user", "username", "password", "database", "db", "collection", "table"}, + {"options"}); + + configuration.host = named_collection->getAny({"host", "hostname"}); + configuration.port = static_cast(named_collection->get("port")); + configuration.username = named_collection->getAny({"user", "username"}); + configuration.password = named_collection->get("password"); + configuration.database = named_collection->getAny({"database", "db"}); + configuration.table = named_collection->getAny({"collection", "table"}); + configuration.options = named_collection->getOrDefault("options", ""); } else { + if (engine_args.size() < 5 || engine_args.size() > 6) + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Storage MongoDB requires from 5 to 6 parameters: " + "MongoDB('host:port', database, collection, 'user', 'password' [, 'options'])."); + for (auto & engine_arg : engine_args) engine_arg = evaluateConstantExpressionOrIdentifierAsLiteral(engine_arg, context); - if (engine_args.size() == 5 || engine_args.size() == 6) - { - configuration.collection = checkAndGetLiteralArgument(engine_args[2], "collection"); + /// 27017 is the default MongoDB port. + auto parsed_host_port = parseAddress(checkAndGetLiteralArgument(engine_args[0], "host:port"), 27017); - String options; - if (engine_args.size() == 6) - options = checkAndGetLiteralArgument(engine_args[5], "options"); + configuration.host = parsed_host_port.first; + configuration.port = parsed_host_port.second; + configuration.database = checkAndGetLiteralArgument(engine_args[1], "database"); + configuration.table = checkAndGetLiteralArgument(engine_args[2], "table"); + configuration.username = checkAndGetLiteralArgument(engine_args[3], "username"); + configuration.password = checkAndGetLiteralArgument(engine_args[4], "password"); - String user = checkAndGetLiteralArgument(engine_args[3], "user"); - String auth_string; - if (!user.empty()) - auth_string = fmt::format("{}:{}@", user, checkAndGetLiteralArgument(engine_args[4], "password")); - auto parsed_host_port = parseAddress(checkAndGetLiteralArgument(engine_args[0], "host:port"), 27017); - configuration.uri = std::make_unique(fmt::format("mongodb://{}{}:{}/{}?{}", - auth_string, - parsed_host_port.first, - parsed_host_port.second, - checkAndGetLiteralArgument(engine_args[1], "database"), - options)); - } - else if (engine_args.size() == 2) - { - configuration.collection = checkAndGetLiteralArgument(engine_args[1], "database"); - configuration.uri = std::make_unique(checkAndGetLiteralArgument(engine_args[0], "host")); - } - else - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, - "Storage MongoDB requires 2 or from to 5 to 6 parameters: " - "MongoDB('host:port', 'database', 'collection', 'user', 'password' [, 'options']) or MongoDB('uri', 'collection')."); + if (engine_args.size() >= 6) + configuration.options = checkAndGetLiteralArgument(engine_args[5], "database"); } - configuration.checkHosts(context); + context->getRemoteHostFilter().checkHostAndPort(configuration.host, toString(configuration.port)); return configuration; } -std::string mongoFuncName(const std::string & func) -{ - if (func == "equals") - return "$eq"; - if (func == "notEquals") - return "$ne"; - if (func == "greaterThan" || func == "greater") - return "$gt"; - if (func == "lessThan" || func == "less") - return "$lt"; - if (func == "greaterOrEquals") - return "$gte"; - if (func == "lessOrEquals") - return "$lte"; - if (func == "in") - return "$in"; - if (func == "notIn") - return "$nin"; - if (func == "lessThan") - return "$lt"; - if (func == "and") - return "$and"; - if (func == "or") - return "$or"; - - return ""; -} - -template -std::optional StorageMongoDB::visitWhereFunction( - const ContextPtr & context, - const FunctionNode * func, - const JoinNode * join_node, - OnError on_error) -{ - if (func->getArguments().getNodes().empty()) - return {}; - - if (const auto & column = func->getArguments().getNodes().at(0)->as()) - { - // Skip unknown columns, which don't belong to the table. - const auto & table = column->getColumnSource()->as(); - if (!table) - return {}; - - // Skip columns from other tables in JOIN queries. - if (table->getStorage()->getStorageID() != this->getStorageID()) - return {}; - if (join_node && column->getColumnSource() != join_node->getLeftTableExpression()) - return {}; - - // Only these function can have exactly one argument and be passed to MongoDB. - if (func->getFunctionName() == "isNull") - return make_document(kvp(column->getColumnName(), make_document(kvp("$eq", bsoncxx::types::b_null{})))); - if (func->getFunctionName() == "isNotNull") - return make_document(kvp(column->getColumnName(), make_document(kvp("$ne", bsoncxx::types::b_null{})))); - if (func->getFunctionName() == "empty") - return make_document(kvp(column->getColumnName(), make_document(kvp("$in", make_array(bsoncxx::types::b_null{}, ""))))); - if (func->getFunctionName() == "notEmpty") - return make_document(kvp(column->getColumnName(), make_document(kvp("$nin", make_array(bsoncxx::types::b_null{}, ""))))); - - auto func_name = mongoFuncName(func->getFunctionName()); - if (func_name.empty()) - { - on_error(func); - return {}; - } - - if (func->getArguments().getNodes().size() == 2) - { - const auto & value = func->getArguments().getNodes().at(1); - - if (const auto & const_value = value->as()) - { - std::optional func_value{}; - if (column->getColumnName() == "_id") - func_value = fieldAsOID(const_value->getValue()); - else - func_value = fieldAsBSONValue(const_value->getValue(), const_value->getResultType()); - - if (func_name == "$in" && func_value->view().type() != bsoncxx::v_noabi::type::k_array) - func_name = "$eq"; - if (func_name == "$nin" && func_value->view().type() != bsoncxx::v_noabi::type::k_array) - func_name = "$ne"; - - return make_document(kvp(column->getColumnName(), make_document(kvp(func_name, std::move(*func_value))))); - } - - if (const auto & func_value = value->as()) - if (const auto & res_value = visitWhereFunction(context, func_value, join_node, on_error); res_value.has_value()) - return make_document(kvp(column->getColumnName(), make_document(kvp(func_name, *res_value)))); - } - } - else - { - auto arr = bsoncxx::builder::basic::array{}; - for (const auto & elem : func->getArguments().getNodes()) - { - if (const auto & elem_func = elem->as()) - if (const auto & res_value = visitWhereFunction(context, elem_func, join_node, on_error); res_value.has_value()) - arr.append(*res_value); - } - if (!arr.view().empty()) - { - auto func_name = mongoFuncName(func->getFunctionName()); - if (func_name.empty()) - { - on_error(func); - return {}; - } - return make_document(kvp(func_name, arr)); - } - } - - on_error(func); - return {}; -} - -bsoncxx::document::value StorageMongoDB::buildMongoDBQuery(const ContextPtr & context, mongocxx::options::find & options, const SelectQueryInfo & query, const Block & sample_block) -{ - document projection{}; - for (const auto & column : sample_block) - projection.append(kvp(column.name, 1)); - LOG_DEBUG(log, "MongoDB projection has built: '{}'", bsoncxx::to_json(projection)); - options.projection(projection.extract()); - - bool throw_on_error = context->getSettingsRef()[Setting::mongodb_throw_on_unsupported_query]; - - if (!context->getSettingsRef()[Setting::allow_experimental_analyzer]) - { - if (throw_on_error) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "MongoDB storage does not support 'allow_experimental_analyzer = 0' setting"); - return make_document(); - } - - const auto & query_tree = query.query_tree->as(); - - if (throw_on_error) - { - if (query_tree.hasHaving()) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "HAVING section is not supported. You can disable this error with 'SET mongodb_throw_on_unsupported_query=0', but this may cause poor performance, and is highly not recommended"); - if (query_tree.hasGroupBy()) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "GROUP BY section is not supported. You can disable this error with 'SET mongodb_throw_on_unsupported_query=0', but this may cause poor performance, and is highly not recommended"); - if (query_tree.hasWindow()) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "WINDOW section is not supported. You can disable this error with 'SET mongodb_throw_on_unsupported_query=0', but this may cause poor performance, and is highly not recommended"); - if (query_tree.hasPrewhere()) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "PREWHERE section is not supported. You can disable this error with 'SET mongodb_throw_on_unsupported_query=0', but this may cause poor performance, and is highly not recommended"); - if (query_tree.hasLimitBy()) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "LIMIT BY section is not supported. You can disable this error with 'SET mongodb_throw_on_unsupported_query=0', but this may cause poor performance, and is highly not recommended"); - if (query_tree.hasOffset()) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "OFFSET section is not supported. You can disable this error with 'SET mongodb_throw_on_unsupported_query=0', but this may cause poor performance, and is highly not recommended"); - } - - auto on_error = [&] (const auto * node) - { - /// Reset limit, because if we omit ORDER BY, it should not be applied - options.limit(0); - - if (throw_on_error) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "Only simple queries are supported, failed to convert expression '{}' to MongoDB query. " - "You can disable this restriction with 'SET mongodb_throw_on_unsupported_query=0', to read the full table and process on ClickHouse side (this may cause poor performance)", node->formatASTForErrorMessage()); - LOG_WARNING(log, "Failed to build MongoDB query for '{}'", node ? node->formatASTForErrorMessage() : ""); - }; - - - if (query_tree.hasLimit()) - { - if (const auto & limit = query_tree.getLimit()->as()) - options.limit(limit->getValue().safeGet()); - else - on_error(query_tree.getLimit().get()); - } - - if (query_tree.hasOrderBy()) - { - document sort{}; - for (const auto & child : query_tree.getOrderByNode()->getChildren()) - { - if (const auto * sort_node = child->as()) - { - if (sort_node->withFill() || sort_node->hasFillTo() || sort_node->hasFillFrom() || sort_node->hasFillStep()) - on_error(sort_node); - - if (const auto & column = sort_node->getExpression()->as()) - sort.append(kvp(column->getColumnName(), sort_node->getSortDirection() == SortDirection::ASCENDING ? 1 : -1)); - else - on_error(sort_node); - } - else - on_error(sort_node); - } - if (!sort.view().empty()) - { - LOG_DEBUG(log, "MongoDB sort has built: '{}'", bsoncxx::to_json(sort)); - options.sort(sort.extract()); - } - } - - if (query_tree.hasWhere()) - { - const auto & join_tree = query_tree.getJoinTree(); - const auto * join_node = join_tree->as(); - bool allow_where = true; - if (join_node) - { - if (join_node->getKind() == JoinKind::Left) - allow_where = join_node->getLeftTableExpression()->isEqual(*query.table_expression); - else if (join_node->getKind() == JoinKind::Right) - allow_where = join_node->getRightTableExpression()->isEqual(*query.table_expression); - else - allow_where = (join_node->getKind() == JoinKind::Inner); - } - - if (allow_where) - { - std::optional filter{}; - if (const auto & func = query_tree.getWhere()->as()) - filter = visitWhereFunction(context, func, join_node, on_error); - - else if (const auto & const_expr = query_tree.getWhere()->as()) - { - if (const_expr->hasSourceExpression()) - { - if (const auto & func_expr = const_expr->getSourceExpression()->as()) - filter = visitWhereFunction(context, func_expr, join_node, on_error); - } - } - - if (filter.has_value()) - { - LOG_DEBUG(log, "MongoDB query has built: '{}'.", bsoncxx::to_json(*filter)); - return std::move(*filter); - } - } - else - on_error(join_node); - } - - return make_document(); -} - void registerStorageMongoDB(StorageFactory & factory) { factory.registerStorage("MongoDB", [](const StorageFactory::Arguments & args) { + auto configuration = StorageMongoDB::getConfiguration(args.engine_args, args.getLocalContext()); + return std::make_shared( args.table_id, - StorageMongoDB::getConfiguration(args.engine_args, args.getLocalContext()), + configuration.host, + configuration.port, + configuration.database, + configuration.table, + configuration.username, + configuration.password, + configuration.options, args.columns, args.constraints, args.comment); @@ -419,4 +318,3 @@ void registerStorageMongoDB(StorageFactory & factory) } } -#endif diff --git a/src/Storages/StorageMongoDB.h b/src/Storages/StorageMongoDB.h index 15052c67e76..36090d4584e 100644 --- a/src/Storages/StorageMongoDB.h +++ b/src/Storages/StorageMongoDB.h @@ -1,56 +1,33 @@ #pragma once -#include "config.h" +#include -#if USE_MONGODB -#include - -#include -#include #include -#include - -#include -#include namespace DB { - -inline mongocxx::instance inst{}; - -struct MongoDBConfiguration -{ - std::unique_ptr uri; - String collection; - - void checkHosts(const ContextPtr & context) const - { - // Because domain records will be resolved inside the driver, we can't check IPs for our restrictions. - for (const auto & host : uri->hosts()) - context->getRemoteHostFilter().checkHostAndPort(host.name, toString(host.port)); - } -}; - -/** Implements storage in the MongoDB database. - * Use ENGINE = MongoDB(host:port, database, collection, user, password [, options]); - * MongoDB(uri, collection); - * Read only. - * One stream only. +/* Implements storage in the MongoDB database. + * Use ENGINE = MongoDB(host:port, database, collection, user, password [, options]); + * Read only. */ + class StorageMongoDB final : public IStorage { public: - static MongoDBConfiguration getConfiguration(ASTs engine_args, ContextPtr context); - StorageMongoDB( const StorageID & table_id_, - MongoDBConfiguration configuration_, + const std::string & host_, + uint16_t port_, + const std::string & database_name_, + const std::string & collection_name_, + const std::string & username_, + const std::string & password_, + const std::string & options_, const ColumnsDescription & columns_, const ConstraintsDescription & constraints_, const String & comment); std::string getName() const override { return "MongoDB"; } - bool isRemote() const override { return true; } Pipe read( const Names & column_names, @@ -61,23 +38,37 @@ public: size_t max_block_size, size_t num_streams) override; + SinkToStoragePtr write( + const ASTPtr & query, + const StorageMetadataPtr & /*metadata_snapshot*/, + ContextPtr context, + bool async_insert) override; + + struct Configuration + { + std::string host; + UInt16 port; + std::string username; + std::string password; + std::string database; + std::string table; + std::string options; + }; + + static Configuration getConfiguration(ASTs engine_args, ContextPtr context); + private: - template - std::optional visitWhereFunction( - const ContextPtr & context, - const FunctionNode * func, - const JoinNode * join_node, - OnError on_error); + void connectIfNotConnected(); - bsoncxx::document::value buildMongoDBQuery( - const ContextPtr & context, - mongocxx::options::find & options, - const SelectQueryInfo & query, - const Block & sample_block); + const std::string database_name; + const std::string collection_name; + const std::string username; + const std::string password; + const std::string uri; - const MongoDBConfiguration configuration; - LoggerPtr log; + std::shared_ptr connection; + bool authenticated = false; + std::mutex connection_mutex; /// Protects the variables `connection` and `authenticated`. }; } -#endif diff --git a/src/Storages/StorageMongoDBPocoLegacy.cpp b/src/Storages/StorageMongoDBPocoLegacy.cpp deleted file mode 100644 index 04f73cb0510..00000000000 --- a/src/Storages/StorageMongoDBPocoLegacy.cpp +++ /dev/null @@ -1,327 +0,0 @@ -#include "config.h" - -#if USE_MONGODB -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; - extern const int MONGODB_CANNOT_AUTHENTICATE; -} - -StorageMongoDBPocoLegacy::StorageMongoDBPocoLegacy( - const StorageID & table_id_, - const std::string & host_, - uint16_t port_, - const std::string & database_name_, - const std::string & collection_name_, - const std::string & username_, - const std::string & password_, - const std::string & options_, - const ColumnsDescription & columns_, - const ConstraintsDescription & constraints_, - const String & comment) - : IStorage(table_id_) - , database_name(database_name_) - , collection_name(collection_name_) - , username(username_) - , password(password_) - , uri("mongodb://" + host_ + ":" + std::to_string(port_) + "/" + database_name_ + "?" + options_) -{ - LOG_WARNING(getLogger("StorageMongoDB (" + table_id_.table_name + ")"), "The deprecated MongoDB integartion implementation is used, this will be removed in next releases."); - - StorageInMemoryMetadata storage_metadata; - storage_metadata.setColumns(columns_); - storage_metadata.setConstraints(constraints_); - storage_metadata.setComment(comment); - setInMemoryMetadata(storage_metadata); -} - - -void StorageMongoDBPocoLegacy::connectIfNotConnected() -{ - std::lock_guard lock{connection_mutex}; - if (!connection) - { - StorageMongoDBPocoLegacySocketFactory factory; - connection = std::make_shared(uri, factory); - } - - if (!authenticated) - { - Poco::URI poco_uri(uri); - auto query_params = poco_uri.getQueryParameters(); - auto auth_source = std::find_if(query_params.begin(), query_params.end(), - [&](const std::pair & param) { return param.first == "authSource"; }); - auto auth_db = database_name; - if (auth_source != query_params.end()) - auth_db = auth_source->second; - - if (!username.empty() && !password.empty()) - { - Poco::MongoDB::Database poco_db(auth_db); - if (!poco_db.authenticate(*connection, username, password, Poco::MongoDB::Database::AUTH_SCRAM_SHA1)) - throw Exception(ErrorCodes::MONGODB_CANNOT_AUTHENTICATE, "Cannot authenticate in MongoDB, incorrect user or password"); - } - - authenticated = true; - } -} - -class StorageMongoDBLegacySink : public SinkToStorage -{ -public: - explicit StorageMongoDBLegacySink( - const std::string & collection_name_, - const std::string & db_name_, - const StorageMetadataPtr & metadata_snapshot_, - std::shared_ptr connection_) - : SinkToStorage(metadata_snapshot_->getSampleBlock()) - , collection_name(collection_name_) - , db_name(db_name_) - , metadata_snapshot{metadata_snapshot_} - , connection(connection_) - , is_wire_protocol_old(isMongoDBWireProtocolOld(*connection_, db_name)) - { - } - - String getName() const override { return "StorageMongoDBLegacySink"; } - - void consume(Chunk & chunk) override - { - Poco::MongoDB::Database db(db_name); - Poco::MongoDB::Document::Vector documents; - - auto block = getHeader().cloneWithColumns(chunk.getColumns()); - - size_t num_rows = block.rows(); - size_t num_cols = block.columns(); - - const auto columns = block.getColumns(); - const auto data_types = block.getDataTypes(); - const auto data_names = block.getNames(); - - documents.reserve(num_rows); - - for (const auto i : collections::range(0, num_rows)) - { - Poco::MongoDB::Document::Ptr document = new Poco::MongoDB::Document(); - - for (const auto j : collections::range(0, num_cols)) - { - insertValueIntoMongoDB(*document, data_names[j], *data_types[j], *columns[j], i); - } - - documents.push_back(std::move(document)); - } - - if (is_wire_protocol_old) - { - Poco::SharedPtr insert_request = db.createInsertRequest(collection_name); - insert_request->documents() = std::move(documents); - connection->sendRequest(*insert_request); - } - else - { - Poco::SharedPtr insert_request = db.createOpMsgMessage(collection_name); - insert_request->setCommandName(Poco::MongoDB::OpMsgMessage::CMD_INSERT); - insert_request->documents() = std::move(documents); - connection->sendRequest(*insert_request); - } - } - -private: - - void insertValueIntoMongoDB( - Poco::MongoDB::Document & document, - const std::string & name, - const IDataType & data_type, - const IColumn & column, - size_t idx) - { - WhichDataType which(data_type); - - if (which.isArray()) - { - const ColumnArray & column_array = assert_cast(column); - const ColumnArray::Offsets & offsets = column_array.getOffsets(); - - size_t offset = offsets[idx - 1]; - size_t next_offset = offsets[idx]; - - const IColumn & nested_column = column_array.getData(); - - const auto * array_type = assert_cast(&data_type); - const DataTypePtr & nested_type = array_type->getNestedType(); - - Poco::MongoDB::Array::Ptr array = new Poco::MongoDB::Array(); - for (size_t i = 0; i + offset < next_offset; ++i) - { - insertValueIntoMongoDB(*array, Poco::NumberFormatter::format(i), *nested_type, nested_column, i + offset); - } - - document.add(name, array); - return; - } - - /// MongoDB does not support UInt64 type, so just cast it to Int64 - if (which.isNativeUInt()) - document.add(name, static_cast(column.getUInt(idx))); - else if (which.isNativeInt()) - document.add(name, static_cast(column.getInt(idx))); - else if (which.isFloat32()) - document.add(name, static_cast(column.getFloat32(idx))); - else if (which.isFloat64()) - document.add(name, column.getFloat64(idx)); - else if (which.isDate()) - document.add(name, Poco::Timestamp(DateLUT::instance().fromDayNum(DayNum(column.getUInt(idx))) * 1000000)); - else if (which.isDateTime()) - document.add(name, Poco::Timestamp(column.getUInt(idx) * 1000000)); - else - { - WriteBufferFromOwnString ostr; - data_type.getDefaultSerialization()->serializeText(column, idx, ostr, FormatSettings{}); - document.add(name, ostr.str()); - } - } - - String collection_name; - String db_name; - StorageMetadataPtr metadata_snapshot; - std::shared_ptr connection; - - const bool is_wire_protocol_old; -}; - -Pipe StorageMongoDBPocoLegacy::read( - const Names & column_names, - const StorageSnapshotPtr & storage_snapshot, - SelectQueryInfo & /*query_info*/, - ContextPtr /*context*/, - QueryProcessingStage::Enum /*processed_stage*/, - size_t max_block_size, - size_t /*num_streams*/) -{ - connectIfNotConnected(); - - storage_snapshot->check(column_names); - - Block sample_block; - for (const String & column_name : column_names) - { - auto column_data = storage_snapshot->metadata->getColumns().getPhysical(column_name); - sample_block.insert({ column_data.type, column_data.name }); - } - - return Pipe(std::make_shared(connection, database_name, collection_name, Poco::MongoDB::Document{}, sample_block, max_block_size)); -} - - -SinkToStoragePtr StorageMongoDBPocoLegacy::write(const ASTPtr & /* query */, const StorageMetadataPtr & metadata_snapshot, ContextPtr /* context */, bool /*async_insert*/) -{ - connectIfNotConnected(); - return std::make_shared(collection_name, database_name, metadata_snapshot, connection); -} - -StorageMongoDBPocoLegacy::Configuration StorageMongoDBPocoLegacy::getConfiguration(ASTs engine_args, ContextPtr context) -{ - Configuration configuration; - - if (auto named_collection = tryGetNamedCollectionWithOverrides(engine_args, context)) - { - validateNamedCollection( - *named_collection, - ValidateKeysMultiset{"host", "port", "user", "username", "password", "database", "db", "collection", "table"}, - {"options"}); - - configuration.host = named_collection->getAny({"host", "hostname"}); - configuration.port = static_cast(named_collection->get("port")); - configuration.username = named_collection->getAny({"user", "username"}); - configuration.password = named_collection->get("password"); - configuration.database = named_collection->getAny({"database", "db"}); - configuration.table = named_collection->getAny({"collection", "table"}); - configuration.options = named_collection->getOrDefault("options", ""); - } - else - { - if (engine_args.size() < 5 || engine_args.size() > 6) - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, - "Storage MongoDB requires from 5 to 6 parameters: " - "MongoDB('host:port', database, collection, 'user', 'password' [, 'options'])."); - - for (auto & engine_arg : engine_args) - engine_arg = evaluateConstantExpressionOrIdentifierAsLiteral(engine_arg, context); - - /// 27017 is the default MongoDB port. - auto parsed_host_port = parseAddress(checkAndGetLiteralArgument(engine_args[0], "host:port"), 27017); - - configuration.host = parsed_host_port.first; - configuration.port = parsed_host_port.second; - configuration.database = checkAndGetLiteralArgument(engine_args[1], "database"); - configuration.table = checkAndGetLiteralArgument(engine_args[2], "table"); - configuration.username = checkAndGetLiteralArgument(engine_args[3], "username"); - configuration.password = checkAndGetLiteralArgument(engine_args[4], "password"); - - if (engine_args.size() >= 6) - configuration.options = checkAndGetLiteralArgument(engine_args[5], "database"); - } - - context->getRemoteHostFilter().checkHostAndPort(configuration.host, toString(configuration.port)); - - return configuration; -} - - -void registerStorageMongoDBPocoLegacy(StorageFactory & factory) -{ - factory.registerStorage("MongoDB", [](const StorageFactory::Arguments & args) - { - auto configuration = StorageMongoDBPocoLegacy::getConfiguration(args.engine_args, args.getLocalContext()); - - return std::make_shared( - args.table_id, - configuration.host, - configuration.port, - configuration.database, - configuration.table, - configuration.username, - configuration.password, - configuration.options, - args.columns, - args.constraints, - args.comment); - }, - { - .source_access_type = AccessType::MONGO, - }); -} - -} -#endif diff --git a/src/Storages/StorageMongoDBPocoLegacy.h b/src/Storages/StorageMongoDBPocoLegacy.h deleted file mode 100644 index a5814ccd5dd..00000000000 --- a/src/Storages/StorageMongoDBPocoLegacy.h +++ /dev/null @@ -1,79 +0,0 @@ -#pragma once - -#include "config.h" - -#if USE_MONGODB -#include - -#include - -namespace DB -{ -/* Implements storage in the MongoDB database. - * Use ENGINE = MongoDB(host:port, database, collection, user, password [, options]); - * Read only. - */ - -/// Deprecated, will be removed soon. -class StorageMongoDBPocoLegacy final : public IStorage -{ -public: - StorageMongoDBPocoLegacy( - const StorageID & table_id_, - const std::string & host_, - uint16_t port_, - const std::string & database_name_, - const std::string & collection_name_, - const std::string & username_, - const std::string & password_, - const std::string & options_, - const ColumnsDescription & columns_, - const ConstraintsDescription & constraints_, - const String & comment); - - std::string getName() const override { return "MongoDB"; } - - Pipe read( - const Names & column_names, - const StorageSnapshotPtr & storage_snapshot, - SelectQueryInfo & query_info, - ContextPtr context, - QueryProcessingStage::Enum processed_stage, - size_t max_block_size, - size_t num_streams) override; - - SinkToStoragePtr write( - const ASTPtr & query, - const StorageMetadataPtr & /*metadata_snapshot*/, - ContextPtr context, - bool async_insert) override; - - struct Configuration - { - std::string host; - UInt16 port; - std::string username; - std::string password; - std::string database; - std::string table; - std::string options; - }; - - static Configuration getConfiguration(ASTs engine_args, ContextPtr context); - -private: - void connectIfNotConnected(); - - const std::string database_name; - const std::string collection_name; - const std::string username; - const std::string password; - const std::string uri; - - std::shared_ptr connection; - bool authenticated = false; - std::mutex connection_mutex; /// Protects the variables `connection` and `authenticated`. -}; - -} -#endif diff --git a/src/Storages/StorageMongoDBPocoLegacySocketFactory.cpp b/src/Storages/StorageMongoDBSocketFactory.cpp similarity index 57% rename from src/Storages/StorageMongoDBPocoLegacySocketFactory.cpp rename to src/Storages/StorageMongoDBSocketFactory.cpp index bcfe995dcc6..ef03689e08e 100644 --- a/src/Storages/StorageMongoDBPocoLegacySocketFactory.cpp +++ b/src/Storages/StorageMongoDBSocketFactory.cpp @@ -1,10 +1,9 @@ -#include "config.h" - -#if USE_MONGODB -#include "StorageMongoDBPocoLegacySocketFactory.h" +#include "StorageMongoDBSocketFactory.h" #include +#include "config.h" + #include #include @@ -18,15 +17,15 @@ namespace DB namespace ErrorCodes { -extern const int FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME; + extern const int FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME; } -Poco::Net::StreamSocket StorageMongoDBPocoLegacySocketFactory::createSocket(const std::string & host, int port, Poco::Timespan connectTimeout, bool secure) +Poco::Net::StreamSocket StorageMongoDBSocketFactory::createSocket(const std::string & host, int port, Poco::Timespan connectTimeout, bool secure) { return secure ? createSecureSocket(host, port, connectTimeout) : createPlainSocket(host, port, connectTimeout); } -Poco::Net::StreamSocket StorageMongoDBPocoLegacySocketFactory::createPlainSocket(const std::string & host, int port, Poco::Timespan connectTimeout) +Poco::Net::StreamSocket StorageMongoDBSocketFactory::createPlainSocket(const std::string & host, int port, Poco::Timespan connectTimeout) { Poco::Net::SocketAddress address(host, port); Poco::Net::StreamSocket socket; @@ -37,7 +36,7 @@ Poco::Net::StreamSocket StorageMongoDBPocoLegacySocketFactory::createPlainSocket } -Poco::Net::StreamSocket StorageMongoDBPocoLegacySocketFactory::createSecureSocket(const std::string & host [[maybe_unused]], int port [[maybe_unused]], Poco::Timespan connectTimeout [[maybe_unused]]) +Poco::Net::StreamSocket StorageMongoDBSocketFactory::createSecureSocket(const std::string & host [[maybe_unused]], int port [[maybe_unused]], Poco::Timespan connectTimeout [[maybe_unused]]) { #if USE_SSL Poco::Net::SocketAddress address(host, port); @@ -54,4 +53,3 @@ Poco::Net::StreamSocket StorageMongoDBPocoLegacySocketFactory::createSecureSocke } } -#endif diff --git a/src/Storages/StorageMongoDBPocoLegacySocketFactory.h b/src/Storages/StorageMongoDBSocketFactory.h similarity index 72% rename from src/Storages/StorageMongoDBPocoLegacySocketFactory.h rename to src/Storages/StorageMongoDBSocketFactory.h index ee6ee8faa29..2dc9cbddf8a 100644 --- a/src/Storages/StorageMongoDBPocoLegacySocketFactory.h +++ b/src/Storages/StorageMongoDBSocketFactory.h @@ -1,16 +1,12 @@ #pragma once -#include "config.h" - -#if USE_MONGODB #include namespace DB { -/// Deprecated, will be removed soon. -class StorageMongoDBPocoLegacySocketFactory : public Poco::MongoDB::Connection::SocketFactory +class StorageMongoDBSocketFactory : public Poco::MongoDB::Connection::SocketFactory { public: Poco::Net::StreamSocket createSocket(const std::string & host, int port, Poco::Timespan connectTimeout, bool secure) override; @@ -21,4 +17,3 @@ private: }; } -#endif diff --git a/src/Storages/registerStorages.cpp b/src/Storages/registerStorages.cpp index cfd406ccbe2..4ed74763810 100644 --- a/src/Storages/registerStorages.cpp +++ b/src/Storages/registerStorages.cpp @@ -1,5 +1,5 @@ -#include #include +#include #include "config.h" @@ -64,11 +64,7 @@ void registerStorageJDBC(StorageFactory & factory); void registerStorageMySQL(StorageFactory & factory); #endif -#if USE_MONGODB void registerStorageMongoDB(StorageFactory & factory); -void registerStorageMongoDBPocoLegacy(StorageFactory & factory); -#endif - void registerStorageRedis(StorageFactory & factory); @@ -109,7 +105,7 @@ void registerStorageKeeperMap(StorageFactory & factory); void registerStorageObjectStorage(StorageFactory & factory); -void registerStorages(bool use_legacy_mongodb_integration [[maybe_unused]]) +void registerStorages() { auto & factory = StorageFactory::instance(); @@ -171,13 +167,7 @@ void registerStorages(bool use_legacy_mongodb_integration [[maybe_unused]]) registerStorageMySQL(factory); #endif - #if USE_MONGODB - if (use_legacy_mongodb_integration) - registerStorageMongoDBPocoLegacy(factory); - else - registerStorageMongoDB(factory); - #endif - + registerStorageMongoDB(factory); registerStorageRedis(factory); #if USE_RDKAFKA diff --git a/src/Storages/registerStorages.h b/src/Storages/registerStorages.h index 330855a49d0..d44b934ff9f 100644 --- a/src/Storages/registerStorages.h +++ b/src/Storages/registerStorages.h @@ -2,5 +2,5 @@ namespace DB { -void registerStorages(bool use_legacy_mongodb_integration); +void registerStorages(); } diff --git a/src/TableFunctions/TableFunctionMongoDB.cpp b/src/TableFunctions/TableFunctionMongoDB.cpp index e13427c1557..94279d1bf6d 100644 --- a/src/TableFunctions/TableFunctionMongoDB.cpp +++ b/src/TableFunctions/TableFunctionMongoDB.cpp @@ -1,6 +1,3 @@ -#include "config.h" - -#if USE_MONGODB #include #include @@ -46,7 +43,7 @@ private: ColumnsDescription getActualTableStructure(ContextPtr context, bool is_insert_query) const override; void parseArguments(const ASTPtr & ast_function, ContextPtr context) override; - std::shared_ptr configuration; + std::optional configuration; String structure; }; @@ -55,8 +52,14 @@ StoragePtr TableFunctionMongoDB::executeImpl(const ASTPtr & /*ast_function*/, { auto columns = getActualTableStructure(context, is_insert_query); auto storage = std::make_shared( - StorageID(getDatabaseName(), table_name), - std::move(*configuration), + StorageID(configuration->database, table_name), + configuration->host, + configuration->port, + configuration->database, + configuration->table, + configuration->username, + configuration->password, + configuration->options, columns, ConstraintsDescription(), String{}); @@ -77,89 +80,49 @@ void TableFunctionMongoDB::parseArguments(const ASTPtr & ast_function, ContextPt ASTs & args = func_args.arguments->children; - if (args.size() == 6 || args.size() == 7) - { - ASTs main_arguments(args.begin(), args.begin() + 5); - - for (size_t i = 5; i < args.size(); ++i) - { - if (const auto * ast_func = typeid_cast(args[i].get())) - { - const auto * args_expr = assert_cast(ast_func->arguments.get()); - auto function_args = args_expr->children; - if (function_args.size() != 2) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected key-value defined argument"); - - auto arg_name = function_args[0]->as()->name(); - - if (arg_name == "structure") - structure = checkAndGetLiteralArgument(function_args[1], "structure"); - else if (arg_name == "options") - main_arguments.push_back(function_args[1]); - } - else if (i == 5) - { - structure = checkAndGetLiteralArgument(args[i], "structure"); - } - else if (i == 6) - { - main_arguments.push_back(args[i]); - } - } - - configuration = std::make_shared(StorageMongoDB::getConfiguration(main_arguments, context)); - } - else if (args.size() == 3) - { - ASTs main_arguments(args.begin(), args.begin() + 2); - - for (size_t i = 2; i < args.size(); ++i) - { - if (const auto * ast_func = typeid_cast(args[i].get())) - { - const auto * args_expr = assert_cast(ast_func->arguments.get()); - auto function_args = args_expr->children; - if (function_args.size() != 2) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected key-value defined argument"); - - auto arg_name = function_args[0]->as()->name(); - - if (arg_name == "structure") - structure = checkAndGetLiteralArgument(function_args[1], "structure"); - } - else if (i == 2) - { - structure = checkAndGetLiteralArgument(args[i], "structure"); - } - } - - configuration = std::make_shared(StorageMongoDB::getConfiguration(main_arguments, context)); - } - else + if (args.size() < 6 || args.size() > 7) { throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, - "Table function 'mongodb' requires 3 or from 6 to 7 parameters: " - "mongodb('host:port', database, collection, user, password, structure, [, options]) or mongodb(uri, collection, structure)."); + "Table function 'mongodb' requires from 6 to 7 parameters: " + "mongodb('host:port', database, collection, 'user', 'password', structure, [, 'options'])"); } + + ASTs main_arguments(args.begin(), args.begin() + 5); + + for (size_t i = 5; i < args.size(); ++i) + { + if (const auto * ast_func = typeid_cast(args[i].get())) + { + const auto * args_expr = assert_cast(ast_func->arguments.get()); + auto function_args = args_expr->children; + if (function_args.size() != 2) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected key-value defined argument"); + + auto arg_name = function_args[0]->as()->name(); + + if (arg_name == "structure") + structure = checkAndGetLiteralArgument(function_args[1], "structure"); + else if (arg_name == "options") + main_arguments.push_back(function_args[1]); + } + else if (i == 5) + { + structure = checkAndGetLiteralArgument(args[i], "structure"); + } + else if (i == 6) + { + main_arguments.push_back(args[i]); + } + } + + configuration = StorageMongoDB::getConfiguration(main_arguments, context); } } void registerTableFunctionMongoDB(TableFunctionFactory & factory) { - factory.registerFunction( - { - .documentation = - { - .description = "Allows get data from MongoDB collection.", - .examples = { - {"Fetch collection by URI", "SELECT * FROM mongodb('mongodb://root:clickhouse@localhost:27017/database', 'example_collection', 'key UInt64, data String')", ""}, - {"Fetch collection over TLS", "SELECT * FROM mongodb('localhost:27017', 'database', 'example_collection', 'root', 'clickhouse', 'key UInt64, data String', 'tls=true')", ""}, - }, - .categories = {"Integration"}, - }, - }); + factory.registerFunction(); } } -#endif diff --git a/src/TableFunctions/TableFunctionMongoDBPocoLegacy.cpp b/src/TableFunctions/TableFunctionMongoDBPocoLegacy.cpp deleted file mode 100644 index dc1df7fcad8..00000000000 --- a/src/TableFunctions/TableFunctionMongoDBPocoLegacy.cpp +++ /dev/null @@ -1,133 +0,0 @@ -#include "config.h" - -#if USE_MONGODB -#include - -#include - -#include - -#include -#include - -#include -#include -#include -#include -#include - - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int BAD_ARGUMENTS; - extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; -} - -namespace -{ - -/// Deprecated, will be removed soon. -class TableFunctionMongoDBPocoLegacy : public ITableFunction -{ -public: - static constexpr auto name = "mongodb"; - - std::string getName() const override { return name; } - -private: - StoragePtr executeImpl( - const ASTPtr & ast_function, ContextPtr context, - const std::string & table_name, ColumnsDescription cached_columns, bool is_insert_query) const override; - - const char * getStorageTypeName() const override { return "MongoDB"; } - - ColumnsDescription getActualTableStructure(ContextPtr context, bool is_insert_query) const override; - void parseArguments(const ASTPtr & ast_function, ContextPtr context) override; - - std::optional configuration; - String structure; -}; - -StoragePtr TableFunctionMongoDBPocoLegacy::executeImpl(const ASTPtr & /*ast_function*/, - ContextPtr context, const String & table_name, ColumnsDescription /*cached_columns*/, bool is_insert_query) const -{ - auto columns = getActualTableStructure(context, is_insert_query); - auto storage = std::make_shared( - StorageID(configuration->database, table_name), - configuration->host, - configuration->port, - configuration->database, - configuration->table, - configuration->username, - configuration->password, - configuration->options, - columns, - ConstraintsDescription(), - String{}); - storage->startup(); - return storage; -} - -ColumnsDescription TableFunctionMongoDBPocoLegacy::getActualTableStructure(ContextPtr context, bool /*is_insert_query*/) const -{ - return parseColumnsListFromString(structure, context); -} - -void TableFunctionMongoDBPocoLegacy::parseArguments(const ASTPtr & ast_function, ContextPtr context) -{ - const auto & func_args = ast_function->as(); - if (!func_args.arguments) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Table function 'mongodb' must have arguments."); - - ASTs & args = func_args.arguments->children; - - if (args.size() < 6 || args.size() > 7) - { - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, - "Table function 'mongodb' requires from 6 to 7 parameters: " - "mongodb('host:port', database, collection, 'user', 'password', structure, [, 'options'])"); - } - - ASTs main_arguments(args.begin(), args.begin() + 5); - - for (size_t i = 5; i < args.size(); ++i) - { - if (const auto * ast_func = typeid_cast(args[i].get())) - { - const auto * args_expr = assert_cast(ast_func->arguments.get()); - auto function_args = args_expr->children; - if (function_args.size() != 2) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected key-value defined argument"); - - auto arg_name = function_args[0]->as()->name(); - - if (arg_name == "structure") - structure = checkAndGetLiteralArgument(function_args[1], "structure"); - else if (arg_name == "options") - main_arguments.push_back(function_args[1]); - } - else if (i == 5) - { - structure = checkAndGetLiteralArgument(args[i], "structure"); - } - else if (i == 6) - { - main_arguments.push_back(args[i]); - } - } - - configuration = StorageMongoDBPocoLegacy::getConfiguration(main_arguments, context); -} - -} - -void registerTableFunctionMongoDBPocoLegacy(TableFunctionFactory & factory) -{ - factory.registerFunction(); -} - -} -#endif diff --git a/src/TableFunctions/registerTableFunctions.cpp b/src/TableFunctions/registerTableFunctions.cpp index fbe2c7c59ed..0b21de00f86 100644 --- a/src/TableFunctions/registerTableFunctions.cpp +++ b/src/TableFunctions/registerTableFunctions.cpp @@ -1,9 +1,10 @@ #include "registerTableFunctions.h" #include + namespace DB { -void registerTableFunctions(bool use_legacy_mongodb_integration [[maybe_unused]]) +void registerTableFunctions() { auto & factory = TableFunctionFactory::instance(); @@ -22,12 +23,7 @@ void registerTableFunctions(bool use_legacy_mongodb_integration [[maybe_unused]] registerTableFunctionValues(factory); registerTableFunctionInput(factory); registerTableFunctionGenerate(factory); -#if USE_MONGODB - if (use_legacy_mongodb_integration) - registerTableFunctionMongoDBPocoLegacy(factory); - else - registerTableFunctionMongoDB(factory); -#endif + registerTableFunctionMongoDB(factory); registerTableFunctionRedis(factory); registerTableFunctionMergeTreeIndex(factory); registerTableFunctionFuzzQuery(factory); diff --git a/src/TableFunctions/registerTableFunctions.h b/src/TableFunctions/registerTableFunctions.h index e22ba7346fa..1dd6341b67e 100644 --- a/src/TableFunctions/registerTableFunctions.h +++ b/src/TableFunctions/registerTableFunctions.h @@ -20,10 +20,7 @@ void registerTableFunctionURLCluster(TableFunctionFactory & factory); void registerTableFunctionValues(TableFunctionFactory & factory); void registerTableFunctionInput(TableFunctionFactory & factory); void registerTableFunctionGenerate(TableFunctionFactory & factory); -#if USE_MONGODB void registerTableFunctionMongoDB(TableFunctionFactory & factory); -void registerTableFunctionMongoDBPocoLegacy(TableFunctionFactory & factory); -#endif void registerTableFunctionRedis(TableFunctionFactory & factory); void registerTableFunctionMergeTreeIndex(TableFunctionFactory & factory); void registerTableFunctionFuzzQuery(TableFunctionFactory & factory); @@ -73,6 +70,6 @@ void registerDataLakeTableFunctions(TableFunctionFactory & factory); void registerTableFunctionTimeSeries(TableFunctionFactory & factory); -void registerTableFunctions(bool use_legacy_mongodb_integration [[maybe_unused]]); +void registerTableFunctions(); } diff --git a/src/configure_config.cmake b/src/configure_config.cmake index c67f8d290b3..5a1aa179e01 100644 --- a/src/configure_config.cmake +++ b/src/configure_config.cmake @@ -182,9 +182,6 @@ endif() if (TARGET ch_contrib::prometheus_protobufs) set(USE_PROMETHEUS_PROTOBUFS 1) endif() -if (TARGET ch_contrib::mongocxx) - set(USE_MONGODB 1) -endif() if (TARGET ch_contrib::numactl) set(USE_NUMACTL 1) endif() diff --git a/tests/integration/compose/docker_compose_mongo.yml b/tests/integration/compose/docker_compose_mongo.yml index fbbfac17aeb..9a6eae6ca8c 100644 --- a/tests/integration/compose/docker_compose_mongo.yml +++ b/tests/integration/compose/docker_compose_mongo.yml @@ -1,7 +1,7 @@ version: '2.3' services: mongo1: - image: mongo:6.0 + image: mongo:5.0 restart: always environment: MONGO_INITDB_ROOT_USERNAME: root @@ -10,20 +10,8 @@ services: - ${MONGO_EXTERNAL_PORT:-27017}:${MONGO_INTERNAL_PORT:-27017} command: --profile=2 --verbose - mongo_no_cred: - image: mongo:6.0 + mongo2: + image: mongo:5.0 restart: always ports: - ${MONGO_NO_CRED_EXTERNAL_PORT:-27017}:${MONGO_NO_CRED_INTERNAL_PORT:-27017} - - mongo_secure: - image: mongo:6.0 - restart: always - environment: - MONGO_INITDB_ROOT_USERNAME: root - MONGO_INITDB_ROOT_PASSWORD: clickhouse - volumes: - - ${MONGO_SECURE_CONFIG_DIR:-}:/mongo/ - ports: - - ${MONGO_SECURE_EXTERNAL_PORT:-27017}:${MONGO_SECURE_INTERNAL_PORT:-27017} - command: --config /mongo/mongo_secure.conf --profile=2 --verbose diff --git a/tests/integration/compose/docker_compose_mongo_secure.yml b/tests/integration/compose/docker_compose_mongo_secure.yml new file mode 100644 index 00000000000..193e5d26568 --- /dev/null +++ b/tests/integration/compose/docker_compose_mongo_secure.yml @@ -0,0 +1,13 @@ +version: '2.3' +services: + mongo1: + image: mongo:3.6 + restart: always + environment: + MONGO_INITDB_ROOT_USERNAME: root + MONGO_INITDB_ROOT_PASSWORD: clickhouse + volumes: + - ${MONGO_CONFIG_PATH:-}:/mongo/ + ports: + - ${MONGO_EXTERNAL_PORT:-27017}:${MONGO_INTERNAL_PORT:-27017} + command: --config /mongo/mongo_secure.conf --profile=2 --verbose diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 6aea1b59ae1..ee0a75753d2 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -544,6 +544,7 @@ class ClickHouseCluster: self.with_hdfs = False self.with_kerberized_hdfs = False self.with_mongo = False + self.with_mongo_secure = False self.with_net_trics = False self.with_redis = False self.with_cassandra = False @@ -623,10 +624,8 @@ class ClickHouseCluster: # available when with_mongo == True self.mongo_host = "mongo1" self._mongo_port = 0 - self.mongo_no_cred_host = "mongo_no_cred" + self.mongo_no_cred_host = "mongo2" self._mongo_no_cred_port = 0 - self.mongo_secure_host = "mongo_secure" - self._mongo_secure_port = 0 # available when with_cassandra == True self.cassandra_host = "cassandra1" @@ -838,13 +837,6 @@ class ClickHouseCluster: self._mongo_no_cred_port = self.port_pool.get_port() return self._mongo_no_cred_port - @property - def mongo_secure_port(self): - if self._mongo_secure_port: - return self._mongo_secure_port - self._mongo_secure_port = get_free_port() - return self._mongo_secure_port - @property def redis_port(self): if self._redis_port: @@ -1455,6 +1447,29 @@ class ClickHouseCluster: ] return self.base_nats_cmd + def setup_mongo_secure_cmd(self, instance, env_variables, docker_compose_yml_dir): + self.with_mongo = self.with_mongo_secure = True + env_variables["MONGO_HOST"] = self.mongo_host + env_variables["MONGO_EXTERNAL_PORT"] = str(self.mongo_port) + env_variables["MONGO_INTERNAL_PORT"] = "27017" + env_variables["MONGO_CONFIG_PATH"] = HELPERS_DIR + self.base_cmd.extend( + [ + "--file", + p.join(docker_compose_yml_dir, "docker_compose_mongo_secure.yml"), + ] + ) + self.base_mongo_cmd = [ + "docker-compose", + "--env-file", + instance.env_file, + "--project-name", + self.project_name, + "--file", + p.join(docker_compose_yml_dir, "docker_compose_mongo_secure.yml"), + ] + return self.base_mongo_cmd + def setup_mongo_cmd(self, instance, env_variables, docker_compose_yml_dir): self.with_mongo = True env_variables["MONGO_HOST"] = self.mongo_host @@ -1462,11 +1477,6 @@ class ClickHouseCluster: env_variables["MONGO_INTERNAL_PORT"] = "27017" env_variables["MONGO_NO_CRED_EXTERNAL_PORT"] = str(self.mongo_no_cred_port) env_variables["MONGO_NO_CRED_INTERNAL_PORT"] = "27017" - env_variables["MONGO_SECURE_EXTERNAL_PORT"] = str(self.mongo_secure_port) - env_variables["MONGO_SECURE_INTERNAL_PORT"] = "27017" - env_variables["MONGO_SECURE_CONFIG_DIR"] = ( - instance.path + "/" + "mongo_secure_config" - ) self.base_cmd.extend( ["--file", p.join(docker_compose_yml_dir, "docker_compose_mongo.yml")] ) @@ -1699,6 +1709,7 @@ class ClickHouseCluster: with_hdfs=False, with_kerberized_hdfs=False, with_mongo=False, + with_mongo_secure=False, with_nginx=False, with_redis=False, with_minio=False, @@ -1802,7 +1813,7 @@ class ClickHouseCluster: or with_kerberized_hdfs or with_kerberos_kdc or with_kerberized_kafka, - with_mongo=with_mongo, + with_mongo=with_mongo or with_mongo_secure, with_redis=with_redis, with_minio=with_minio, with_azurite=with_azurite, @@ -1977,10 +1988,21 @@ class ClickHouseCluster: ) ) - if with_mongo and not self.with_mongo: - cmds.append( - self.setup_mongo_cmd(instance, env_variables, docker_compose_yml_dir) - ) + if (with_mongo or with_mongo_secure) and not ( + self.with_mongo or self.with_mongo_secure + ): + if with_mongo_secure: + cmds.append( + self.setup_mongo_secure_cmd( + instance, env_variables, docker_compose_yml_dir + ) + ) + else: + cmds.append( + self.setup_mongo_cmd( + instance, env_variables, docker_compose_yml_dir + ) + ) if with_coredns and not self.with_coredns: cmds.append( @@ -2604,9 +2626,7 @@ class ClickHouseCluster: while time.time() - start < timeout: try: connection.list_database_names() - logging.debug( - f"Connected to Mongo dbs: {connection.list_database_names()}" - ) + logging.debug(f"Connected to Mongo dbs: {connection.database_names()}") return except Exception as ex: logging.debug("Can't connect to Mongo " + str(ex)) @@ -3060,7 +3080,7 @@ class ClickHouseCluster: logging.debug("Setup Mongo") run_and_check(self.base_mongo_cmd + common_opts) self.up_called = True - self.wait_mongo_to_start(30) + self.wait_mongo_to_start(30, secure=self.with_mongo_secure) if self.with_coredns and self.base_coredns_cmd: logging.debug("Setup coredns") @@ -3507,9 +3527,6 @@ class ClickHouseInstance: self.with_kerberized_hdfs = with_kerberized_hdfs self.with_secrets = with_secrets self.with_mongo = with_mongo - self.mongo_secure_config_dir = p.abspath( - p.join(base_path, "mongo_secure_config") - ) self.with_redis = with_redis self.with_minio = with_minio self.with_azurite = with_azurite @@ -4637,12 +4654,6 @@ class ClickHouseInstance: dirs_exist_ok=True, ) - if self.with_mongo and os.path.exists(self.mongo_secure_config_dir): - shutil.copytree( - self.mongo_secure_config_dir, - p.abspath(p.join(self.path, "mongo_secure_config")), - ) - if self.with_coredns: shutil.copytree( self.coredns_config_dir, p.abspath(p.join(self.path, "coredns_config")) diff --git a/tests/integration/helpers/external_sources.py b/tests/integration/helpers/external_sources.py index 8b5e78a5269..033a2f84fa2 100644 --- a/tests/integration/helpers/external_sources.py +++ b/tests/integration/helpers/external_sources.py @@ -170,7 +170,6 @@ class SourceMongo(ExternalSource): user, password, secure=False, - legacy=False, ): ExternalSource.__init__( self, @@ -183,15 +182,8 @@ class SourceMongo(ExternalSource): password, ) self.secure = secure - self.legacy = legacy def get_source_str(self, table_name): - options = "" - if self.secure and self.legacy: - options = "ssl=true" - if self.secure and not self.legacy: - options = "tls=true&tlsAllowInvalidCertificates=true" - return """ {host} @@ -208,7 +200,7 @@ class SourceMongo(ExternalSource): user=self.user, password=self.password, tbl=table_name, - options=options, + options="ssl=true" if self.secure else "", ) def prepare(self, structure, table_name, cluster): @@ -260,15 +252,9 @@ class SourceMongoURI(SourceMongo): return layout.name == "flat" def get_source_str(self, table_name): - options = "" - if self.secure and self.legacy: - options = "ssl=true" - if self.secure and not self.legacy: - options = "tls=true&tlsAllowInvalidCertificates=true" - return """ - mongodb://{user}:{password}@{host}:{port}/test?{options} + mongodb://{user}:{password}@{host}:{port}/test{options} {tbl} """.format( @@ -277,7 +263,7 @@ class SourceMongoURI(SourceMongo): user=self.user, password=self.password, tbl=table_name, - options=options, + options="?ssl=true" if self.secure else "", ) diff --git a/tests/integration/helpers/mongo_cert.pem b/tests/integration/helpers/mongo_cert.pem new file mode 100644 index 00000000000..b9c3b83e473 --- /dev/null +++ b/tests/integration/helpers/mongo_cert.pem @@ -0,0 +1,49 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC3uaPiZMfjPBBE +yDEYJsJIoriu0SaC80uTmPM7bFpnOOXOBvbT4wD2q+uVaLQifKtPTgZAkP5Y3rX8 +S5TOzaLsNp68S1Ja/EzxQUolOSgb4A948TTiUTrTjfMxsPRhmxXTjozWV8CFtL9P +Lg6H+55oyQOJedWYe1kSWRJQayXSweBK5qjOPi2qDF/xdFRQuMivpBUar/b/E9GQ +RKpIaoqMYsl/WF/tReb4N658UxkVlFdR8s48UoA9LfJLMPr4N+QDTfvtcT2bYlpT +4a9b6IXa9BQKCw3AKfTqEPO1XunH//iLNkt1bLtqgZNyT/tY0tLY3EKMXIDuRBVn +KCbfVJ1RAgMBAAECggEAJFCjXiqBgB7tMEtJuPZgTK8tRhC9RgEFHUWMPmCqdeC/ +O7wQqc0i8Z8Fz+CESpTN370Sa0y9mZ9b5WSjI0VuQLaDJcDVpHpeUwmOuFDV5ryh +EkzLITjhIdPbECVkCK7433o7yFpMCaGydtopsSNBKoEhG9ljKOKotoG4pwCm10N5 +K9Qepj82OjRhLkpmuiMFb4/vvOm5dglYmkq5+n/fdUYFtrYr3NvMSCTlietPHDgV +Wb3strvk1g9ARWfa2j7Q6moF2sbyob9zVLoRiD9VgmNB60v7QAJxDctVkbOoDgKp +uN2fkxTHwlOPAO6Zhgnie11jnZr1711TFxmEfMkSKQKBgQDqpB8m0hSJsWLKWxQK +yx+5Xgs+Cr8gb0AYHJQ87obj2XqwXLpBSMrkzTn6vIGRv+NMSfiM/755RUm5aJPY +om+7F68JEIL26ZA7bIfjHhV5o9fvpo+6N6cJyR08Q/KkF8Tej9K4qQec0W/jtKeZ +KAJ1k7/BBuN82iTtEJ3GWBaaRwKBgQDIcwQrGlyyXqnBK25gl/E1Ng+V3p/2sy98 +1BpEshxen4KorHEXCJArydELtvK/ll6agil6QebrJN5dtYOOgvcDTu1mQjdUPN3C +VXpSQ0L8XxfyTNYQTWON9wJGL1pzlTiyHvlSrQFsFWMUoxrqndWIIRtrXjap1npp +HDrcqy2/pwKBgB5fHhUlTjlAd7wfq+l1v2Z8ENJ4C6NEIzS7xkhYy6cEiIf5iLZY +mMKi+eVFrzPRdbdzP7Poipwh5tgT/EcnR3UdLK/srjcNpni6pKA2TatQFOxVT/dX +qsxudtVNKkQpO3dfgHQclPqsdWIxCRye/CqB9Gkk3h9UEUGKTBHXZx2TAoGAF0tG +cLvfidr2Xzxs10zQ+x4NMZ1teX3ZRuhfJRyNr3FZ/cAMZGDaYDxTzsiz7Q/Mbqgx +qcN+0lS2gq1VXHpbukaxz/Bh/agVHUBRtr2aSznBzqafOcXEi/roiL94A3aT4B85 +WiJAyA60NPG/bwRojClMxm1sbNA/6XceYAaEioECgYEA3m88G3UwizfJAsfT5H5K +3HXNYzQ1XGrA8shI0kxeqfNP5qmTfH5q/K2VMWeShT3F/9Ytgc+H8c9XP1wKq7Zl +6AtmdDOeLzHkgwVK0p20/Wh2Qjw4ikJLdM+y8wnfMiwCXWQxoh1X905EwNtyBc2Z +9S3G5CXldFHC4NGdx0vetiE= +-----END PRIVATE KEY----- +-----BEGIN CERTIFICATE----- +MIIDazCCAlOgAwIBAgIUO9pfiBMsADdk9nBMHs10n8kaIr8wDQYJKoZIhvcNAQEL +BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM +GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMjA0MTIwOTQxNDVaFw0yNTAx +MDUwOTQxNDVaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw +HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQC3uaPiZMfjPBBEyDEYJsJIoriu0SaC80uTmPM7bFpn +OOXOBvbT4wD2q+uVaLQifKtPTgZAkP5Y3rX8S5TOzaLsNp68S1Ja/EzxQUolOSgb +4A948TTiUTrTjfMxsPRhmxXTjozWV8CFtL9PLg6H+55oyQOJedWYe1kSWRJQayXS +weBK5qjOPi2qDF/xdFRQuMivpBUar/b/E9GQRKpIaoqMYsl/WF/tReb4N658UxkV +lFdR8s48UoA9LfJLMPr4N+QDTfvtcT2bYlpT4a9b6IXa9BQKCw3AKfTqEPO1XunH +//iLNkt1bLtqgZNyT/tY0tLY3EKMXIDuRBVnKCbfVJ1RAgMBAAGjUzBRMB0GA1Ud +DgQWBBSx7Tx8W4c6wjW0qkeG7CAMLY7YkjAfBgNVHSMEGDAWgBSx7Tx8W4c6wjW0 +qkeG7CAMLY7YkjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAb +/Up/LEIdwhiN/S3HolxY2D2BrTpKHLQuggBN4+gZlK5OksCkM46LYlP/ruHXCxbR +mQoRhmooj4TvkKyBwzvKq76O+OuRtBhXzRipnBbNTqFPLf9enJUrut8lsFrI+pdl +Nn4PSGGbFPpQ5vFRCktczwwYh0zLuZ/1DbFsbRWlDnZdvoWZdfV0qsvcBRK2DXDI +29xSfw897OpITIkaryZigQVsKv8TXhfsaq9PUuH0/z84S82QG5fR6FzULofgkylb +wXvwaSdcu3k4Lo8j77BEAEvlH8Ynja0eojx5Avl9h4iw/IOQKE4GAg56CzcequLv +clPlaBBWoD6yn+q4NhLF +-----END CERTIFICATE----- diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/mongo_secure_config/mongo_secure.conf b/tests/integration/helpers/mongo_secure.conf similarity index 57% rename from tests/integration/test_dictionaries_all_layouts_separate_sources/mongo_secure_config/mongo_secure.conf rename to tests/integration/helpers/mongo_secure.conf index 42d9853c6eb..1128b16b546 100644 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/mongo_secure_config/mongo_secure.conf +++ b/tests/integration/helpers/mongo_secure.conf @@ -1,6 +1,5 @@ net: ssl: mode: requireSSL - PEMKeyFile: /mongo/key.pem - CAFile: /mongo/cert.crt + PEMKeyFile: /mongo/mongo_cert.pem allowConnectionsWithoutCertificates: true diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/configs/mongo/legacy.xml b/tests/integration/test_dictionaries_all_layouts_separate_sources/configs/mongo/legacy.xml deleted file mode 100644 index 4ee05db9d1e..00000000000 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/configs/mongo/legacy.xml +++ /dev/null @@ -1,3 +0,0 @@ - - 1 - diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/configs/mongo/new.xml b/tests/integration/test_dictionaries_all_layouts_separate_sources/configs/mongo/new.xml deleted file mode 100644 index eb2d328060a..00000000000 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/configs/mongo/new.xml +++ /dev/null @@ -1,3 +0,0 @@ - - 0 - diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/mongo_secure_config/cert.crt b/tests/integration/test_dictionaries_all_layouts_separate_sources/mongo_secure_config/cert.crt deleted file mode 100644 index 94249109d41..00000000000 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/mongo_secure_config/cert.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEGzCCAwOgAwIBAgIUaoGlyuJAyvs6yowFXymfu7seEiUwDQYJKoZIhvcNAQEL -BQAwgZwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDbGlja0hvdXNlMRMwEQYDVQQH -DApDbGlja0hvdXNlMREwDwYDVQQKDAhQZXJzb25hbDETMBEGA1UECwwKQ2xpY2tI -b3VzZTEkMCIGCSqGSIb3DQEJARYVY2xpY2tob3VzZUBjbGlja2hvdXNlMRUwEwYD -VQQDDAxtb25nb19zZWN1cmUwHhcNMjQwNTI2MTYwMDMxWhcNMzQwNTI0MTYwMDMx -WjCBnDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNsaWNrSG91c2UxEzARBgNVBAcM -CkNsaWNrSG91c2UxETAPBgNVBAoMCFBlcnNvbmFsMRMwEQYDVQQLDApDbGlja0hv -dXNlMSQwIgYJKoZIhvcNAQkBFhVjbGlja2hvdXNlQGNsaWNraG91c2UxFTATBgNV -BAMMDG1vbmdvX3NlY3VyZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB -AJSeQfMG7xd0+kPehYlEsEw0Sm1DB05SXVCEzIX3DFD6XJrd8eeWwlzYaBatkcwj -+8yvqske02X/3YwpzJyFizAqJIoKql5c5Yii2xH1S9PFP0y+LoJre+eQziHyO33t -eeedeGNJ05Sm2ZAzjfMQ7Rdh6S+gdIO4Y102iQR5yr2aTrh7tu7XkNCjwKTqMMvz -SikP1Rft2J6ECim+MjYCCtH/4yXGeEJ5epU4t3y6Q23B2ZEhY+sqUdwgK9pu8oe4 -mkZ1Qvwakc9Qg12owRSDjBBYrPvghXVpkJ2JkgKTrIAIz9tZ53eDVHNXbWMAotov -jEmRSoGIS1yzwmQ9PdxUwYcCAwEAAaNTMFEwHQYDVR0OBBYEFJyz3Kt5XBDg5cvI -0v1ioqejqX+CMB8GA1UdIwQYMBaAFJyz3Kt5XBDg5cvI0v1ioqejqX+CMA8GA1Ud -EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAHAQFA5VMYvaQFnKtKfHg9TF -qfJ4uM3YsGdgsgmGWgflD1S4Z290H6Q2QvyZAEceTrlJxqArlWlVp5DAU6EeXjEh -QMAgdkJHF1Hg2jsZKPtdkb88UtuzwAME357T8NtEJSHzNE5QqYwlVM71JkWpdqvA -UUdOJbWhhJfowIf4tMmL1DUuIy2qYpoP/tEBXEw9uwpmZqb7KELwT3lRyOMaGFN7 -RHVwbvJWlHiu83QDNaWz6ijQkWl3tCN6TWcFD1qc1x8GpMzjbsAAYbCx7fbHM2LD -9kGSCiyv5K0MLNK5u67RtUFfPHtyD8RA0TtxIZ4PEN/eFANKS2/5NEi1ZuZ5/Pk= ------END CERTIFICATE----- diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/mongo_secure_config/key.pem b/tests/integration/test_dictionaries_all_layouts_separate_sources/mongo_secure_config/key.pem deleted file mode 100644 index 9444d19a3d2..00000000000 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/mongo_secure_config/key.pem +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCUnkHzBu8XdPpD -3oWJRLBMNEptQwdOUl1QhMyF9wxQ+lya3fHnlsJc2GgWrZHMI/vMr6rJHtNl/92M -KcychYswKiSKCqpeXOWIotsR9UvTxT9Mvi6Ca3vnkM4h8jt97XnnnXhjSdOUptmQ -M43zEO0XYekvoHSDuGNdNokEecq9mk64e7bu15DQo8Ck6jDL80opD9UX7diehAop -vjI2AgrR/+MlxnhCeXqVOLd8ukNtwdmRIWPrKlHcICvabvKHuJpGdUL8GpHPUINd -qMEUg4wQWKz74IV1aZCdiZICk6yACM/bWed3g1RzV21jAKLaL4xJkUqBiEtcs8Jk -PT3cVMGHAgMBAAECggEAAul6qiHchB+uQMCWyC5xTeRqAXR3tAv4Tj4fGJjkXY4Z -OrAjr9Kp38EvX1amgvUWV3FT3NMevDf5xd9OdzAA0g0uJIF+mAhYFW48i1FnQcHQ -mOf0zmiZR7l8o7ROb3JvooXHxW+ba/qjGPVwC801gJvruehgbOCRxh9DTRp7sH5K -BmcddhULhKBEQjWUmYNEM3A2axpdi3g1aYKERRLn8J0DXcItTwbxuxbNcs3erl8W -3yyv/JKmqnWF5sNyX3wEWuQcDEZZy+W7Hn4KPMxyU+WA5el5nJ8kFlxhpInmajwu -8Ytn6IEyThyXutVomosVBuP16QORl2Nad0hnQO9toQKBgQDDgiehXr3k2wfVaVOD -PocW4leXausIU2XcCn6FxTG9vLUDMPANw0MxgenC2nrjaUU9J9UjdRYgMcFGWrl4 -E27wEn5e0nZ/Y7F2cfhuOc9vNmZ+eHm2KQRyfAjIVL5Hpldqk2jXyCnLBNeWGHSw -kPQMU+FLqmrOFUvXlD2my+OSHwKBgQDCmgS9r+xFh4BCB9dY6eyQJF/jYmAQHs26 -80WJ6gAhbUw1O71uDtS9/3PZVXwwNCOHrcc49BPrpJdxGPHGvd2Q5y+j5LDDbQSZ -aLTiCZ2B0RM5Bd2dXD8gEHN4WCX7pJ/o4kDi4zONBmp5mg/tFfer5z5IU/1P7Wak -1Mu0JIHzmQKBgDNaNoqeVgaMuYwGtFbez6DlJtiwzrdLIJAheYYte5k4vdruub8D -sNyKIRp7RJgDCJq9obBEiuE98GRIZDrz78nDMco6QcHIL87KtNRO/vtZMKa7gkyk -jXR8u9nS2H/9YyytN3amLsQSq4XTOqM+D7xFNAIp6w/ibB9d4quzFj1FAoGBAKTE -x/LcO897NWuzO/D6z+QUCGR87R15F3SNenmVedrTskz4ciH3yMW+v5ZrPSWLX/IH -f8GHWD6TM+780eoW5L1GIh5BCjHN4rEJ6O3iekxqfD4x6zzL2F8Lztk8uZxh/Uuw -FoSFHybvIcQoYAe8K+KPfzq6cqb0OY6i5n920dkxAoGAJkw6ADqsJfH3NR+bQfgF -oEA1KqriMxyEJm44Y7E80C+iF4iNALF+Er9TSnr4mDxX5e/dW9d1YeS9o0nOfkpF -MaBmJfxqo4QQJLPRaxYQ2Jhfn7irir4BroxeNXQgNNhgSuKIvkfRyGYwl7P0AT4v -8H8rkZGneMD3gLB5MfnRhGk= ------END PRIVATE KEY----- ------BEGIN CERTIFICATE----- -MIIEGzCCAwOgAwIBAgIUaoGlyuJAyvs6yowFXymfu7seEiUwDQYJKoZIhvcNAQEL -BQAwgZwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDbGlja0hvdXNlMRMwEQYDVQQH -DApDbGlja0hvdXNlMREwDwYDVQQKDAhQZXJzb25hbDETMBEGA1UECwwKQ2xpY2tI -b3VzZTEkMCIGCSqGSIb3DQEJARYVY2xpY2tob3VzZUBjbGlja2hvdXNlMRUwEwYD -VQQDDAxtb25nb19zZWN1cmUwHhcNMjQwNTI2MTYwMDMxWhcNMzQwNTI0MTYwMDMx -WjCBnDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNsaWNrSG91c2UxEzARBgNVBAcM -CkNsaWNrSG91c2UxETAPBgNVBAoMCFBlcnNvbmFsMRMwEQYDVQQLDApDbGlja0hv -dXNlMSQwIgYJKoZIhvcNAQkBFhVjbGlja2hvdXNlQGNsaWNraG91c2UxFTATBgNV -BAMMDG1vbmdvX3NlY3VyZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB -AJSeQfMG7xd0+kPehYlEsEw0Sm1DB05SXVCEzIX3DFD6XJrd8eeWwlzYaBatkcwj -+8yvqske02X/3YwpzJyFizAqJIoKql5c5Yii2xH1S9PFP0y+LoJre+eQziHyO33t -eeedeGNJ05Sm2ZAzjfMQ7Rdh6S+gdIO4Y102iQR5yr2aTrh7tu7XkNCjwKTqMMvz -SikP1Rft2J6ECim+MjYCCtH/4yXGeEJ5epU4t3y6Q23B2ZEhY+sqUdwgK9pu8oe4 -mkZ1Qvwakc9Qg12owRSDjBBYrPvghXVpkJ2JkgKTrIAIz9tZ53eDVHNXbWMAotov -jEmRSoGIS1yzwmQ9PdxUwYcCAwEAAaNTMFEwHQYDVR0OBBYEFJyz3Kt5XBDg5cvI -0v1ioqejqX+CMB8GA1UdIwQYMBaAFJyz3Kt5XBDg5cvI0v1ioqejqX+CMA8GA1Ud -EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAHAQFA5VMYvaQFnKtKfHg9TF -qfJ4uM3YsGdgsgmGWgflD1S4Z290H6Q2QvyZAEceTrlJxqArlWlVp5DAU6EeXjEh -QMAgdkJHF1Hg2jsZKPtdkb88UtuzwAME357T8NtEJSHzNE5QqYwlVM71JkWpdqvA -UUdOJbWhhJfowIf4tMmL1DUuIy2qYpoP/tEBXEw9uwpmZqb7KELwT3lRyOMaGFN7 -RHVwbvJWlHiu83QDNaWz6ijQkWl3tCN6TWcFD1qc1x8GpMzjbsAAYbCx7fbHM2LD -9kGSCiyv5K0MLNK5u67RtUFfPHtyD8RA0TtxIZ4PEN/eFANKS2/5NEi1ZuZ5/Pk= ------END CERTIFICATE----- diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/mongo_secure_config/mongo_cert.pem b/tests/integration/test_dictionaries_all_layouts_separate_sources/mongo_secure_config/mongo_cert.pem deleted file mode 100644 index 9444d19a3d2..00000000000 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/mongo_secure_config/mongo_cert.pem +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCUnkHzBu8XdPpD -3oWJRLBMNEptQwdOUl1QhMyF9wxQ+lya3fHnlsJc2GgWrZHMI/vMr6rJHtNl/92M -KcychYswKiSKCqpeXOWIotsR9UvTxT9Mvi6Ca3vnkM4h8jt97XnnnXhjSdOUptmQ -M43zEO0XYekvoHSDuGNdNokEecq9mk64e7bu15DQo8Ck6jDL80opD9UX7diehAop -vjI2AgrR/+MlxnhCeXqVOLd8ukNtwdmRIWPrKlHcICvabvKHuJpGdUL8GpHPUINd -qMEUg4wQWKz74IV1aZCdiZICk6yACM/bWed3g1RzV21jAKLaL4xJkUqBiEtcs8Jk -PT3cVMGHAgMBAAECggEAAul6qiHchB+uQMCWyC5xTeRqAXR3tAv4Tj4fGJjkXY4Z -OrAjr9Kp38EvX1amgvUWV3FT3NMevDf5xd9OdzAA0g0uJIF+mAhYFW48i1FnQcHQ -mOf0zmiZR7l8o7ROb3JvooXHxW+ba/qjGPVwC801gJvruehgbOCRxh9DTRp7sH5K -BmcddhULhKBEQjWUmYNEM3A2axpdi3g1aYKERRLn8J0DXcItTwbxuxbNcs3erl8W -3yyv/JKmqnWF5sNyX3wEWuQcDEZZy+W7Hn4KPMxyU+WA5el5nJ8kFlxhpInmajwu -8Ytn6IEyThyXutVomosVBuP16QORl2Nad0hnQO9toQKBgQDDgiehXr3k2wfVaVOD -PocW4leXausIU2XcCn6FxTG9vLUDMPANw0MxgenC2nrjaUU9J9UjdRYgMcFGWrl4 -E27wEn5e0nZ/Y7F2cfhuOc9vNmZ+eHm2KQRyfAjIVL5Hpldqk2jXyCnLBNeWGHSw -kPQMU+FLqmrOFUvXlD2my+OSHwKBgQDCmgS9r+xFh4BCB9dY6eyQJF/jYmAQHs26 -80WJ6gAhbUw1O71uDtS9/3PZVXwwNCOHrcc49BPrpJdxGPHGvd2Q5y+j5LDDbQSZ -aLTiCZ2B0RM5Bd2dXD8gEHN4WCX7pJ/o4kDi4zONBmp5mg/tFfer5z5IU/1P7Wak -1Mu0JIHzmQKBgDNaNoqeVgaMuYwGtFbez6DlJtiwzrdLIJAheYYte5k4vdruub8D -sNyKIRp7RJgDCJq9obBEiuE98GRIZDrz78nDMco6QcHIL87KtNRO/vtZMKa7gkyk -jXR8u9nS2H/9YyytN3amLsQSq4XTOqM+D7xFNAIp6w/ibB9d4quzFj1FAoGBAKTE -x/LcO897NWuzO/D6z+QUCGR87R15F3SNenmVedrTskz4ciH3yMW+v5ZrPSWLX/IH -f8GHWD6TM+780eoW5L1GIh5BCjHN4rEJ6O3iekxqfD4x6zzL2F8Lztk8uZxh/Uuw -FoSFHybvIcQoYAe8K+KPfzq6cqb0OY6i5n920dkxAoGAJkw6ADqsJfH3NR+bQfgF -oEA1KqriMxyEJm44Y7E80C+iF4iNALF+Er9TSnr4mDxX5e/dW9d1YeS9o0nOfkpF -MaBmJfxqo4QQJLPRaxYQ2Jhfn7irir4BroxeNXQgNNhgSuKIvkfRyGYwl7P0AT4v -8H8rkZGneMD3gLB5MfnRhGk= ------END PRIVATE KEY----- ------BEGIN CERTIFICATE----- -MIIEGzCCAwOgAwIBAgIUaoGlyuJAyvs6yowFXymfu7seEiUwDQYJKoZIhvcNAQEL -BQAwgZwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDbGlja0hvdXNlMRMwEQYDVQQH -DApDbGlja0hvdXNlMREwDwYDVQQKDAhQZXJzb25hbDETMBEGA1UECwwKQ2xpY2tI -b3VzZTEkMCIGCSqGSIb3DQEJARYVY2xpY2tob3VzZUBjbGlja2hvdXNlMRUwEwYD -VQQDDAxtb25nb19zZWN1cmUwHhcNMjQwNTI2MTYwMDMxWhcNMzQwNTI0MTYwMDMx -WjCBnDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNsaWNrSG91c2UxEzARBgNVBAcM -CkNsaWNrSG91c2UxETAPBgNVBAoMCFBlcnNvbmFsMRMwEQYDVQQLDApDbGlja0hv -dXNlMSQwIgYJKoZIhvcNAQkBFhVjbGlja2hvdXNlQGNsaWNraG91c2UxFTATBgNV -BAMMDG1vbmdvX3NlY3VyZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB -AJSeQfMG7xd0+kPehYlEsEw0Sm1DB05SXVCEzIX3DFD6XJrd8eeWwlzYaBatkcwj -+8yvqske02X/3YwpzJyFizAqJIoKql5c5Yii2xH1S9PFP0y+LoJre+eQziHyO33t -eeedeGNJ05Sm2ZAzjfMQ7Rdh6S+gdIO4Y102iQR5yr2aTrh7tu7XkNCjwKTqMMvz -SikP1Rft2J6ECim+MjYCCtH/4yXGeEJ5epU4t3y6Q23B2ZEhY+sqUdwgK9pu8oe4 -mkZ1Qvwakc9Qg12owRSDjBBYrPvghXVpkJ2JkgKTrIAIz9tZ53eDVHNXbWMAotov -jEmRSoGIS1yzwmQ9PdxUwYcCAwEAAaNTMFEwHQYDVR0OBBYEFJyz3Kt5XBDg5cvI -0v1ioqejqX+CMB8GA1UdIwQYMBaAFJyz3Kt5XBDg5cvI0v1ioqejqX+CMA8GA1Ud -EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAHAQFA5VMYvaQFnKtKfHg9TF -qfJ4uM3YsGdgsgmGWgflD1S4Z290H6Q2QvyZAEceTrlJxqArlWlVp5DAU6EeXjEh -QMAgdkJHF1Hg2jsZKPtdkb88UtuzwAME357T8NtEJSHzNE5QqYwlVM71JkWpdqvA -UUdOJbWhhJfowIf4tMmL1DUuIy2qYpoP/tEBXEw9uwpmZqb7KELwT3lRyOMaGFN7 -RHVwbvJWlHiu83QDNaWz6ijQkWl3tCN6TWcFD1qc1x8GpMzjbsAAYbCx7fbHM2LD -9kGSCiyv5K0MLNK5u67RtUFfPHtyD8RA0TtxIZ4PEN/eFANKS2/5NEi1ZuZ5/Pk= ------END CERTIFICATE----- diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo.py b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo.py index d1246b7ad1d..973dbfc0429 100644 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo.py +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo.py @@ -22,28 +22,22 @@ def secure_connection(request): return request.param -@pytest.fixture(scope="module") -def legacy(request): - return request.param - - @pytest.fixture(scope="module") def cluster(secure_connection): return ClickHouseCluster(__file__) @pytest.fixture(scope="module") -def source(secure_connection, legacy, cluster): +def source(secure_connection, cluster): return SourceMongo( "MongoDB", "localhost", - cluster.mongo_secure_port if secure_connection else cluster.mongo_port, - "mongo_secure" if secure_connection else "mongo1", - 27017, + cluster.mongo_port, + cluster.mongo_host, + "27017", "root", "clickhouse", secure=secure_connection, - legacy=legacy, ) @@ -70,24 +64,18 @@ def ranged_tester(source): @pytest.fixture(scope="module") -def main_config(secure_connection, legacy): - if legacy: - main_config = [os.path.join("configs", "mongo", "legacy.xml")] - else: - main_config = [os.path.join("configs", "mongo", "new.xml")] - +def main_config(secure_connection): + main_config = [] if secure_connection: main_config.append(os.path.join("configs", "disable_ssl_verification.xml")) else: main_config.append(os.path.join("configs", "ssl_verification.xml")) - return main_config @pytest.fixture(scope="module") def started_cluster( secure_connection, - legacy, cluster, main_config, simple_tester, @@ -97,13 +85,12 @@ def started_cluster( SOURCE = SourceMongo( "MongoDB", "localhost", - 27017, - "mongo_secure" if secure_connection else "mongo1", - 27017, + cluster.mongo_port, + cluster.mongo_host, + "27017", "root", "clickhouse", secure=secure_connection, - legacy=legacy, ) dictionaries = simple_tester.list_dictionaries() @@ -112,6 +99,7 @@ def started_cluster( main_configs=main_config, dictionaries=dictionaries, with_mongo=True, + with_mongo_secure=secure_connection, ) try: @@ -128,32 +116,24 @@ def started_cluster( @pytest.mark.parametrize("secure_connection", [False], indirect=["secure_connection"]) -@pytest.mark.parametrize("legacy", [False, True], indirect=["legacy"]) @pytest.mark.parametrize("layout_name", sorted(LAYOUTS_SIMPLE)) -def test_simple(secure_connection, legacy, started_cluster, layout_name, simple_tester): +def test_simple(secure_connection, started_cluster, layout_name, simple_tester): simple_tester.execute(layout_name, started_cluster.instances["node"]) @pytest.mark.parametrize("secure_connection", [False], indirect=["secure_connection"]) -@pytest.mark.parametrize("legacy", [False, True], indirect=["legacy"]) @pytest.mark.parametrize("layout_name", sorted(LAYOUTS_COMPLEX)) -def test_complex( - secure_connection, legacy, started_cluster, layout_name, complex_tester -): +def test_complex(secure_connection, started_cluster, layout_name, complex_tester): complex_tester.execute(layout_name, started_cluster.instances["node"]) @pytest.mark.parametrize("secure_connection", [False], indirect=["secure_connection"]) -@pytest.mark.parametrize("legacy", [False, True], indirect=["legacy"]) @pytest.mark.parametrize("layout_name", sorted(LAYOUTS_RANGED)) -def test_ranged(secure_connection, legacy, started_cluster, layout_name, ranged_tester): +def test_ranged(secure_connection, started_cluster, layout_name, ranged_tester): ranged_tester.execute(layout_name, started_cluster.instances["node"]) @pytest.mark.parametrize("secure_connection", [True], indirect=["secure_connection"]) -@pytest.mark.parametrize("legacy", [False, True], indirect=["legacy"]) @pytest.mark.parametrize("layout_name", sorted(LAYOUTS_SIMPLE)) -def test_simple_ssl( - secure_connection, legacy, started_cluster, layout_name, simple_tester -): +def test_simple_ssl(secure_connection, started_cluster, layout_name, simple_tester): simple_tester.execute(layout_name, started_cluster.instances["node"]) diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo_uri.py b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo_uri.py index 700d4b550c1..22541432259 100644 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo_uri.py +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo_uri.py @@ -16,28 +16,22 @@ def secure_connection(request): return request.param -@pytest.fixture(scope="module") -def legacy(request): - return request.param - - @pytest.fixture(scope="module") def cluster(secure_connection): return ClickHouseCluster(__file__) @pytest.fixture(scope="module") -def source(secure_connection, legacy, cluster): +def source(secure_connection, cluster): return SourceMongoURI( "MongoDB", "localhost", - cluster.mongo_secure_port if secure_connection else cluster.mongo_port, - "mongo_secure" if secure_connection else "mongo1", - 27017, + cluster.mongo_port, + cluster.mongo_host, + "27017", "root", "clickhouse", secure=secure_connection, - legacy=legacy, ) @@ -50,22 +44,17 @@ def simple_tester(source): @pytest.fixture(scope="module") -def main_config(secure_connection, legacy): - if legacy: - main_config = [os.path.join("configs", "mongo", "legacy.xml")] - else: - main_config = [os.path.join("configs", "mongo", "new.xml")] - +def main_config(secure_connection): + main_config = [] if secure_connection: main_config.append(os.path.join("configs", "disable_ssl_verification.xml")) else: main_config.append(os.path.join("configs", "ssl_verification.xml")) - return main_config @pytest.fixture(scope="module") -def started_cluster(secure_connection, legacy, cluster, main_config, simple_tester): +def started_cluster(secure_connection, cluster, main_config, simple_tester): dictionaries = simple_tester.list_dictionaries() node = cluster.add_instance( @@ -73,6 +62,7 @@ def started_cluster(secure_connection, legacy, cluster, main_config, simple_test main_configs=main_config, dictionaries=dictionaries, with_mongo=True, + with_mongo_secure=secure_connection, ) try: cluster.start() @@ -84,16 +74,12 @@ def started_cluster(secure_connection, legacy, cluster, main_config, simple_test # See comment in SourceMongoURI @pytest.mark.parametrize("secure_connection", [False], indirect=["secure_connection"]) -@pytest.mark.parametrize("legacy", [False, True], indirect=["legacy"]) @pytest.mark.parametrize("layout_name", ["flat"]) -def test_simple(secure_connection, legacy, started_cluster, simple_tester, layout_name): +def test_simple(secure_connection, started_cluster, simple_tester, layout_name): simple_tester.execute(layout_name, started_cluster.instances["uri_node"]) @pytest.mark.parametrize("secure_connection", [True], indirect=["secure_connection"]) -@pytest.mark.parametrize("legacy", [False, True], indirect=["legacy"]) @pytest.mark.parametrize("layout_name", ["flat"]) -def test_simple_ssl( - secure_connection, legacy, started_cluster, simple_tester, layout_name -): +def test_simple_ssl(secure_connection, started_cluster, simple_tester, layout_name): simple_tester.execute(layout_name, started_cluster.instances["uri_node"]) diff --git a/tests/integration/test_storage_mongodb/configs/feature_flag.xml b/tests/integration/test_storage_mongodb/configs/feature_flag.xml deleted file mode 100644 index eb2d328060a..00000000000 --- a/tests/integration/test_storage_mongodb/configs/feature_flag.xml +++ /dev/null @@ -1,3 +0,0 @@ - - 0 - diff --git a/tests/integration/test_storage_mongodb/configs/named_collections.xml b/tests/integration/test_storage_mongodb/configs/named_collections.xml index f6cd367d7ce..5f7db390982 100644 --- a/tests/integration/test_storage_mongodb/configs/named_collections.xml +++ b/tests/integration/test_storage_mongodb/configs/named_collections.xml @@ -8,10 +8,5 @@ test simple_table - - mongodb://root:clickhouse@mongo1:27017/test - simple_table_uri - - diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/mongo_secure_config/config.d/ssl_conf.xml b/tests/integration/test_storage_mongodb/configs_secure/config.d/ssl_conf.xml similarity index 100% rename from tests/integration/test_dictionaries_all_layouts_separate_sources/mongo_secure_config/config.d/ssl_conf.xml rename to tests/integration/test_storage_mongodb/configs_secure/config.d/ssl_conf.xml diff --git a/tests/integration/test_storage_mongodb/mongo_secure_config/cert.crt b/tests/integration/test_storage_mongodb/mongo_secure_config/cert.crt deleted file mode 100644 index 94249109d41..00000000000 --- a/tests/integration/test_storage_mongodb/mongo_secure_config/cert.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEGzCCAwOgAwIBAgIUaoGlyuJAyvs6yowFXymfu7seEiUwDQYJKoZIhvcNAQEL -BQAwgZwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDbGlja0hvdXNlMRMwEQYDVQQH -DApDbGlja0hvdXNlMREwDwYDVQQKDAhQZXJzb25hbDETMBEGA1UECwwKQ2xpY2tI -b3VzZTEkMCIGCSqGSIb3DQEJARYVY2xpY2tob3VzZUBjbGlja2hvdXNlMRUwEwYD -VQQDDAxtb25nb19zZWN1cmUwHhcNMjQwNTI2MTYwMDMxWhcNMzQwNTI0MTYwMDMx -WjCBnDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNsaWNrSG91c2UxEzARBgNVBAcM -CkNsaWNrSG91c2UxETAPBgNVBAoMCFBlcnNvbmFsMRMwEQYDVQQLDApDbGlja0hv -dXNlMSQwIgYJKoZIhvcNAQkBFhVjbGlja2hvdXNlQGNsaWNraG91c2UxFTATBgNV -BAMMDG1vbmdvX3NlY3VyZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB -AJSeQfMG7xd0+kPehYlEsEw0Sm1DB05SXVCEzIX3DFD6XJrd8eeWwlzYaBatkcwj -+8yvqske02X/3YwpzJyFizAqJIoKql5c5Yii2xH1S9PFP0y+LoJre+eQziHyO33t -eeedeGNJ05Sm2ZAzjfMQ7Rdh6S+gdIO4Y102iQR5yr2aTrh7tu7XkNCjwKTqMMvz -SikP1Rft2J6ECim+MjYCCtH/4yXGeEJ5epU4t3y6Q23B2ZEhY+sqUdwgK9pu8oe4 -mkZ1Qvwakc9Qg12owRSDjBBYrPvghXVpkJ2JkgKTrIAIz9tZ53eDVHNXbWMAotov -jEmRSoGIS1yzwmQ9PdxUwYcCAwEAAaNTMFEwHQYDVR0OBBYEFJyz3Kt5XBDg5cvI -0v1ioqejqX+CMB8GA1UdIwQYMBaAFJyz3Kt5XBDg5cvI0v1ioqejqX+CMA8GA1Ud -EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAHAQFA5VMYvaQFnKtKfHg9TF -qfJ4uM3YsGdgsgmGWgflD1S4Z290H6Q2QvyZAEceTrlJxqArlWlVp5DAU6EeXjEh -QMAgdkJHF1Hg2jsZKPtdkb88UtuzwAME357T8NtEJSHzNE5QqYwlVM71JkWpdqvA -UUdOJbWhhJfowIf4tMmL1DUuIy2qYpoP/tEBXEw9uwpmZqb7KELwT3lRyOMaGFN7 -RHVwbvJWlHiu83QDNaWz6ijQkWl3tCN6TWcFD1qc1x8GpMzjbsAAYbCx7fbHM2LD -9kGSCiyv5K0MLNK5u67RtUFfPHtyD8RA0TtxIZ4PEN/eFANKS2/5NEi1ZuZ5/Pk= ------END CERTIFICATE----- diff --git a/tests/integration/test_storage_mongodb/mongo_secure_config/key.pem b/tests/integration/test_storage_mongodb/mongo_secure_config/key.pem deleted file mode 100644 index 9444d19a3d2..00000000000 --- a/tests/integration/test_storage_mongodb/mongo_secure_config/key.pem +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCUnkHzBu8XdPpD -3oWJRLBMNEptQwdOUl1QhMyF9wxQ+lya3fHnlsJc2GgWrZHMI/vMr6rJHtNl/92M -KcychYswKiSKCqpeXOWIotsR9UvTxT9Mvi6Ca3vnkM4h8jt97XnnnXhjSdOUptmQ -M43zEO0XYekvoHSDuGNdNokEecq9mk64e7bu15DQo8Ck6jDL80opD9UX7diehAop -vjI2AgrR/+MlxnhCeXqVOLd8ukNtwdmRIWPrKlHcICvabvKHuJpGdUL8GpHPUINd -qMEUg4wQWKz74IV1aZCdiZICk6yACM/bWed3g1RzV21jAKLaL4xJkUqBiEtcs8Jk -PT3cVMGHAgMBAAECggEAAul6qiHchB+uQMCWyC5xTeRqAXR3tAv4Tj4fGJjkXY4Z -OrAjr9Kp38EvX1amgvUWV3FT3NMevDf5xd9OdzAA0g0uJIF+mAhYFW48i1FnQcHQ -mOf0zmiZR7l8o7ROb3JvooXHxW+ba/qjGPVwC801gJvruehgbOCRxh9DTRp7sH5K -BmcddhULhKBEQjWUmYNEM3A2axpdi3g1aYKERRLn8J0DXcItTwbxuxbNcs3erl8W -3yyv/JKmqnWF5sNyX3wEWuQcDEZZy+W7Hn4KPMxyU+WA5el5nJ8kFlxhpInmajwu -8Ytn6IEyThyXutVomosVBuP16QORl2Nad0hnQO9toQKBgQDDgiehXr3k2wfVaVOD -PocW4leXausIU2XcCn6FxTG9vLUDMPANw0MxgenC2nrjaUU9J9UjdRYgMcFGWrl4 -E27wEn5e0nZ/Y7F2cfhuOc9vNmZ+eHm2KQRyfAjIVL5Hpldqk2jXyCnLBNeWGHSw -kPQMU+FLqmrOFUvXlD2my+OSHwKBgQDCmgS9r+xFh4BCB9dY6eyQJF/jYmAQHs26 -80WJ6gAhbUw1O71uDtS9/3PZVXwwNCOHrcc49BPrpJdxGPHGvd2Q5y+j5LDDbQSZ -aLTiCZ2B0RM5Bd2dXD8gEHN4WCX7pJ/o4kDi4zONBmp5mg/tFfer5z5IU/1P7Wak -1Mu0JIHzmQKBgDNaNoqeVgaMuYwGtFbez6DlJtiwzrdLIJAheYYte5k4vdruub8D -sNyKIRp7RJgDCJq9obBEiuE98GRIZDrz78nDMco6QcHIL87KtNRO/vtZMKa7gkyk -jXR8u9nS2H/9YyytN3amLsQSq4XTOqM+D7xFNAIp6w/ibB9d4quzFj1FAoGBAKTE -x/LcO897NWuzO/D6z+QUCGR87R15F3SNenmVedrTskz4ciH3yMW+v5ZrPSWLX/IH -f8GHWD6TM+780eoW5L1GIh5BCjHN4rEJ6O3iekxqfD4x6zzL2F8Lztk8uZxh/Uuw -FoSFHybvIcQoYAe8K+KPfzq6cqb0OY6i5n920dkxAoGAJkw6ADqsJfH3NR+bQfgF -oEA1KqriMxyEJm44Y7E80C+iF4iNALF+Er9TSnr4mDxX5e/dW9d1YeS9o0nOfkpF -MaBmJfxqo4QQJLPRaxYQ2Jhfn7irir4BroxeNXQgNNhgSuKIvkfRyGYwl7P0AT4v -8H8rkZGneMD3gLB5MfnRhGk= ------END PRIVATE KEY----- ------BEGIN CERTIFICATE----- -MIIEGzCCAwOgAwIBAgIUaoGlyuJAyvs6yowFXymfu7seEiUwDQYJKoZIhvcNAQEL -BQAwgZwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDbGlja0hvdXNlMRMwEQYDVQQH -DApDbGlja0hvdXNlMREwDwYDVQQKDAhQZXJzb25hbDETMBEGA1UECwwKQ2xpY2tI -b3VzZTEkMCIGCSqGSIb3DQEJARYVY2xpY2tob3VzZUBjbGlja2hvdXNlMRUwEwYD -VQQDDAxtb25nb19zZWN1cmUwHhcNMjQwNTI2MTYwMDMxWhcNMzQwNTI0MTYwMDMx -WjCBnDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNsaWNrSG91c2UxEzARBgNVBAcM -CkNsaWNrSG91c2UxETAPBgNVBAoMCFBlcnNvbmFsMRMwEQYDVQQLDApDbGlja0hv -dXNlMSQwIgYJKoZIhvcNAQkBFhVjbGlja2hvdXNlQGNsaWNraG91c2UxFTATBgNV -BAMMDG1vbmdvX3NlY3VyZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB -AJSeQfMG7xd0+kPehYlEsEw0Sm1DB05SXVCEzIX3DFD6XJrd8eeWwlzYaBatkcwj -+8yvqske02X/3YwpzJyFizAqJIoKql5c5Yii2xH1S9PFP0y+LoJre+eQziHyO33t -eeedeGNJ05Sm2ZAzjfMQ7Rdh6S+gdIO4Y102iQR5yr2aTrh7tu7XkNCjwKTqMMvz -SikP1Rft2J6ECim+MjYCCtH/4yXGeEJ5epU4t3y6Q23B2ZEhY+sqUdwgK9pu8oe4 -mkZ1Qvwakc9Qg12owRSDjBBYrPvghXVpkJ2JkgKTrIAIz9tZ53eDVHNXbWMAotov -jEmRSoGIS1yzwmQ9PdxUwYcCAwEAAaNTMFEwHQYDVR0OBBYEFJyz3Kt5XBDg5cvI -0v1ioqejqX+CMB8GA1UdIwQYMBaAFJyz3Kt5XBDg5cvI0v1ioqejqX+CMA8GA1Ud -EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAHAQFA5VMYvaQFnKtKfHg9TF -qfJ4uM3YsGdgsgmGWgflD1S4Z290H6Q2QvyZAEceTrlJxqArlWlVp5DAU6EeXjEh -QMAgdkJHF1Hg2jsZKPtdkb88UtuzwAME357T8NtEJSHzNE5QqYwlVM71JkWpdqvA -UUdOJbWhhJfowIf4tMmL1DUuIy2qYpoP/tEBXEw9uwpmZqb7KELwT3lRyOMaGFN7 -RHVwbvJWlHiu83QDNaWz6ijQkWl3tCN6TWcFD1qc1x8GpMzjbsAAYbCx7fbHM2LD -9kGSCiyv5K0MLNK5u67RtUFfPHtyD8RA0TtxIZ4PEN/eFANKS2/5NEi1ZuZ5/Pk= ------END CERTIFICATE----- diff --git a/tests/integration/test_storage_mongodb/mongo_secure_config/mongo_secure.conf b/tests/integration/test_storage_mongodb/mongo_secure_config/mongo_secure.conf deleted file mode 100644 index 42d9853c6eb..00000000000 --- a/tests/integration/test_storage_mongodb/mongo_secure_config/mongo_secure.conf +++ /dev/null @@ -1,6 +0,0 @@ -net: - ssl: - mode: requireSSL - PEMKeyFile: /mongo/key.pem - CAFile: /mongo/cert.crt - allowConnectionsWithoutCertificates: true diff --git a/tests/integration/test_storage_mongodb/test.py b/tests/integration/test_storage_mongodb/test.py index ec098d7ac54..6e2d15b03be 100644 --- a/tests/integration/test_storage_mongodb/test.py +++ b/tests/integration/test_storage_mongodb/test.py @@ -1,26 +1,26 @@ -import datetime -import json - -import bson import pymongo -import pytest +from uuid import UUID +import pytest from helpers.client import QueryRuntimeException + from helpers.cluster import ClickHouseCluster +import datetime @pytest.fixture(scope="module") def started_cluster(request): try: cluster = ClickHouseCluster(__file__) - cluster.add_instance( + node = cluster.add_instance( "node", main_configs=[ + "configs_secure/config.d/ssl_conf.xml", "configs/named_collections.xml", - "configs/feature_flag.xml", ], user_configs=["configs/users.xml"], with_mongo=True, + with_mongo_secure=request.param, ) cluster.start() yield cluster @@ -29,27 +29,25 @@ def started_cluster(request): def get_mongo_connection(started_cluster, secure=False, with_credentials=True): - if secure: - return pymongo.MongoClient( - "mongodb://root:clickhouse@localhost:{}/?tls=true&tlsAllowInvalidCertificates=true&tlsAllowInvalidHostnames=true".format( - started_cluster.mongo_secure_port - ) - ) + connection_str = "" if with_credentials: - return pymongo.MongoClient( - "mongodb://root:clickhouse@localhost:{}".format(started_cluster.mongo_port) + connection_str = "mongodb://root:clickhouse@localhost:{}".format( + started_cluster.mongo_port ) - - return pymongo.MongoClient( - "mongodb://localhost:{}".format(started_cluster.mongo_no_cred_port) - ) + else: + connection_str = "mongodb://localhost:{}".format( + started_cluster.mongo_no_cred_port + ) + if secure: + connection_str += "/?tls=true&tlsAllowInvalidCertificates=true" + return pymongo.MongoClient(connection_str) +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) def test_simple_select(started_cluster): mongo_connection = get_mongo_connection(started_cluster) db = mongo_connection["test"] - db.command("dropAllUsersFromDatabase") - db.command("createUser", "root", pwd="clickhouse", roles=["readWrite"]) + db.add_user("root", "clickhouse") simple_mongo_table = db["simple_table"] data = [] for i in range(0, 100): @@ -58,7 +56,7 @@ def test_simple_select(started_cluster): node = started_cluster.instances["node"] node.query( - "CREATE OR REPLACE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse')" + "CREATE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse')" ) assert node.query("SELECT COUNT() FROM simple_mongo_table") == "100\n" @@ -66,50 +64,42 @@ def test_simple_select(started_cluster): node.query("SELECT sum(key) FROM simple_mongo_table") == str(sum(range(0, 100))) + "\n" ) + assert ( node.query("SELECT data from simple_mongo_table where key = 42") == hex(42 * 42) + "\n" ) - node.query("DROP TABLE simple_mongo_table") simple_mongo_table.drop() -def test_simple_select_uri(started_cluster): +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) +def test_uuid(started_cluster): mongo_connection = get_mongo_connection(started_cluster) db = mongo_connection["test"] - db.command("dropAllUsersFromDatabase") - db.command("createUser", "root", pwd="clickhouse", roles=["readWrite"]) - simple_mongo_table = db["simple_table_uri"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) + db.add_user("root", "clickhouse") + mongo_table = db["uuid_table"] + mongo_table.insert({"key": 0, "data": UUID("f0e77736-91d1-48ce-8f01-15123ca1c7ed")}) node = started_cluster.instances["node"] node.query( - "CREATE OR REPLACE TABLE simple_table_uri(key UInt64, data String) ENGINE = MongoDB('mongodb://root:clickhouse@mongo1:27017/test', 'simple_table_uri')" + "CREATE TABLE uuid_mongo_table(key UInt64, data UUID) ENGINE = MongoDB('mongo1:27017', 'test', 'uuid_table', 'root', 'clickhouse')" ) - assert node.query("SELECT COUNT() FROM simple_table_uri") == "100\n" + assert node.query("SELECT COUNT() FROM uuid_mongo_table") == "1\n" assert ( - node.query("SELECT sum(key) FROM simple_table_uri") - == str(sum(range(0, 100))) + "\n" + node.query("SELECT data from uuid_mongo_table where key = 0") + == "f0e77736-91d1-48ce-8f01-15123ca1c7ed\n" ) - assert ( - node.query("SELECT data from simple_table_uri where key = 42") - == hex(42 * 42) + "\n" - ) - - node.query("DROP TABLE simple_table_uri") - simple_mongo_table.drop() + node.query("DROP TABLE uuid_mongo_table") + mongo_table.drop() +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) def test_simple_select_from_view(started_cluster): mongo_connection = get_mongo_connection(started_cluster) db = mongo_connection["test"] - db.command("dropAllUsersFromDatabase") - db.command("createUser", "root", pwd="clickhouse", roles=["readWrite"]) + db.add_user("root", "clickhouse") simple_mongo_table = db["simple_table"] data = [] for i in range(0, 100): @@ -121,7 +111,7 @@ def test_simple_select_from_view(started_cluster): node = started_cluster.instances["node"] node.query( - "CREATE OR REPLACE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table_view', 'root', 'clickhouse')" + "CREATE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table_view', 'root', 'clickhouse')" ) assert node.query("SELECT COUNT() FROM simple_mongo_table") == "100\n" @@ -129,21 +119,21 @@ def test_simple_select_from_view(started_cluster): node.query("SELECT sum(key) FROM simple_mongo_table") == str(sum(range(0, 100))) + "\n" ) + assert ( node.query("SELECT data from simple_mongo_table where key = 42") == hex(42 * 42) + "\n" ) - node.query("DROP TABLE simple_mongo_table") simple_mongo_table_view.drop() simple_mongo_table.drop() +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) def test_arrays(started_cluster): mongo_connection = get_mongo_connection(started_cluster) db = mongo_connection["test"] - db.command("dropAllUsersFromDatabase") - db.command("createUser", "root", pwd="clickhouse", roles=["readWrite"]) + db.add_user("root", "clickhouse") arrays_mongo_table = db["arrays_table"] data = [] for i in range(0, 100): @@ -162,36 +152,28 @@ def test_arrays(started_cluster): "arr_float64": [i + 1.125, i + 2.5, i + 3.750], "arr_date": [ datetime.datetime(2002, 10, 27), - datetime.datetime(2024, 1, 8, 23, 59, 59), - ], - "arr_date32": [ - datetime.datetime(2002, 10, 27), - datetime.datetime(2024, 1, 8, 23, 59, 59), + datetime.datetime(2024, 1, 8), ], "arr_datetime": [ datetime.datetime(2023, 3, 31, 6, 3, 12), datetime.datetime(1999, 2, 28, 12, 46, 34), ], - "arr_datetime64": [ - datetime.datetime(2023, 3, 31, 6, 3, 12), - datetime.datetime(1999, 2, 28, 12, 46, 34), - ], "arr_string": [str(i + 1), str(i + 2), str(i + 3)], "arr_uuid": [ "f0e77736-91d1-48ce-8f01-15123ca1c7ed", "93376a07-c044-4281-a76e-ad27cf6973c5", ], + "arr_mongo_uuid": [ + UUID("f0e77736-91d1-48ce-8f01-15123ca1c7ed"), + UUID("93376a07-c044-4281-a76e-ad27cf6973c5"), + ], "arr_arr_bool": [ [True, False, True], [True], - [None], + [], + None, [False], - ], - "arr_arr_bool_nullable": [ - [True, False, True], - [True, None], [None], - [False], ], "arr_empty": [], "arr_null": None, @@ -203,7 +185,7 @@ def test_arrays(started_cluster): node = started_cluster.instances["node"] node.query( - "CREATE OR REPLACE TABLE arrays_mongo_table(" + "CREATE TABLE arrays_mongo_table(" "key UInt64," "arr_int64 Array(Int64)," "arr_int32 Array(Int32)," @@ -216,13 +198,11 @@ def test_arrays(started_cluster): "arr_float32 Array(Float32)," "arr_float64 Array(Float64)," "arr_date Array(Date)," - "arr_date32 Array(Date32)," "arr_datetime Array(DateTime)," - "arr_datetime64 Array(DateTime64)," "arr_string Array(String)," "arr_uuid Array(UUID)," + "arr_mongo_uuid Array(UUID)," "arr_arr_bool Array(Array(Bool))," - "arr_arr_bool_nullable Array(Array(Nullable(Bool)))," "arr_empty Array(UInt64)," "arr_null Array(UInt64)," "arr_arr_null Array(Array(UInt64))," @@ -254,60 +234,65 @@ def test_arrays(started_cluster): node.query(f"SELECT arr_date FROM arrays_mongo_table WHERE key = 42") == "['2002-10-27','2024-01-08']\n" ) - assert ( - node.query(f"SELECT arr_date32 FROM arrays_mongo_table WHERE key = 42") - == "['2002-10-27','2024-01-08']\n" - ) + assert ( node.query(f"SELECT arr_datetime FROM arrays_mongo_table WHERE key = 42") == "['2023-03-31 06:03:12','1999-02-28 12:46:34']\n" ) - assert ( - node.query(f"SELECT arr_datetime64 FROM arrays_mongo_table WHERE key = 42") - == "['2023-03-31 06:03:12.000','1999-02-28 12:46:34.000']\n" - ) + assert ( node.query(f"SELECT arr_string FROM arrays_mongo_table WHERE key = 42") == "['43','44','45']\n" ) + assert ( node.query(f"SELECT arr_uuid FROM arrays_mongo_table WHERE key = 42") == "['f0e77736-91d1-48ce-8f01-15123ca1c7ed','93376a07-c044-4281-a76e-ad27cf6973c5']\n" ) + + assert ( + node.query(f"SELECT arr_mongo_uuid FROM arrays_mongo_table WHERE key = 42") + == "['f0e77736-91d1-48ce-8f01-15123ca1c7ed','93376a07-c044-4281-a76e-ad27cf6973c5']\n" + ) + assert ( node.query(f"SELECT arr_arr_bool FROM arrays_mongo_table WHERE key = 42") - == "[[true,false,true],[true],[false],[false]]\n" - ) - assert ( - node.query( - f"SELECT arr_arr_bool_nullable FROM arrays_mongo_table WHERE key = 42" - ) - == "[[true,false,true],[true,NULL],[NULL],[false]]\n" + == "[[true,false,true],[true],[],[],[false],[false]]\n" ) + assert ( node.query(f"SELECT arr_empty FROM arrays_mongo_table WHERE key = 42") == "[]\n" ) + assert ( node.query(f"SELECT arr_null FROM arrays_mongo_table WHERE key = 42") == "[]\n" ) + assert ( node.query(f"SELECT arr_arr_null FROM arrays_mongo_table WHERE key = 42") == "[]\n" ) + assert ( node.query(f"SELECT arr_nullable FROM arrays_mongo_table WHERE key = 42") == "[]\n" ) + # Test INSERT SELECT + node.query("INSERT INTO arrays_mongo_table SELECT * FROM arrays_mongo_table") + + assert node.query("SELECT COUNT() FROM arrays_mongo_table") == "200\n" + assert node.query("SELECT COUNT(DISTINCT *) FROM arrays_mongo_table") == "100\n" + node.query("DROP TABLE arrays_mongo_table") arrays_mongo_table.drop() +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) def test_complex_data_type(started_cluster): mongo_connection = get_mongo_connection(started_cluster) db = mongo_connection["test"] - db.command("dropAllUsersFromDatabase") - db.command("createUser", "root", pwd="clickhouse", roles=["readWrite"]) + db.add_user("root", "clickhouse") incomplete_mongo_table = db["complex_table"] data = [] for i in range(0, 100): @@ -316,7 +301,7 @@ def test_complex_data_type(started_cluster): node = started_cluster.instances["node"] node.query( - "CREATE OR REPLACE TABLE incomplete_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'complex_table', 'root', 'clickhouse')" + "CREATE TABLE incomplete_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'complex_table', 'root', 'clickhouse')" ) assert node.query("SELECT COUNT() FROM incomplete_mongo_table") == "100\n" @@ -324,28 +309,60 @@ def test_complex_data_type(started_cluster): node.query("SELECT sum(key) FROM incomplete_mongo_table") == str(sum(range(0, 100))) + "\n" ) + assert ( node.query("SELECT data from incomplete_mongo_table where key = 42") == hex(42 * 42) + "\n" ) - node.query("DROP TABLE incomplete_mongo_table") incomplete_mongo_table.drop() +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) +def test_incorrect_data_type(started_cluster): + mongo_connection = get_mongo_connection(started_cluster) + db = mongo_connection["test"] + db.add_user("root", "clickhouse") + strange_mongo_table = db["strange_table"] + data = [] + for i in range(0, 100): + data.append({"key": i, "data": hex(i * i), "aaaa": "Hello"}) + strange_mongo_table.insert_many(data) + + node = started_cluster.instances["node"] + node.query( + "CREATE TABLE strange_mongo_table(key String, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'strange_table', 'root', 'clickhouse')" + ) + + with pytest.raises(QueryRuntimeException): + node.query("SELECT COUNT() FROM strange_mongo_table") + + with pytest.raises(QueryRuntimeException): + node.query("SELECT uniq(key) FROM strange_mongo_table") + + node.query( + "CREATE TABLE strange_mongo_table2(key UInt64, data String, bbbb String) ENGINE = MongoDB('mongo1:27017', 'test', 'strange_table', 'root', 'clickhouse')" + ) + + node.query("DROP TABLE strange_mongo_table") + node.query("DROP TABLE strange_mongo_table2") + strange_mongo_table.drop() + + +@pytest.mark.parametrize("started_cluster", [True], indirect=["started_cluster"]) def test_secure_connection(started_cluster): mongo_connection = get_mongo_connection(started_cluster, secure=True) db = mongo_connection["test"] - db.command("dropAllUsersFromDatabase") - db.command("createUser", "root", pwd="clickhouse", roles=["readWrite"]) + db.add_user("root", "clickhouse") simple_mongo_table = db["simple_table"] data = [] for i in range(0, 100): data.append({"key": i, "data": hex(i * i)}) simple_mongo_table.insert_many(data) + node = started_cluster.instances["node"] node.query( - "CREATE OR REPLACE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo_secure:27017', 'test', 'simple_table', 'root', 'clickhouse', 'tls=true&tlsAllowInvalidCertificates=true&tlsAllowInvalidHostnames=true')" + "CREATE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', 'ssl=true')" ) assert node.query("SELECT COUNT() FROM simple_mongo_table") == "100\n" @@ -353,92 +370,20 @@ def test_secure_connection(started_cluster): node.query("SELECT sum(key) FROM simple_mongo_table") == str(sum(range(0, 100))) + "\n" ) + assert ( node.query("SELECT data from simple_mongo_table where key = 42") == hex(42 * 42) + "\n" ) - node.query("DROP TABLE simple_mongo_table") simple_mongo_table.drop() -def test_secure_connection_with_validation(started_cluster): - mongo_connection = get_mongo_connection(started_cluster, secure=True) - db = mongo_connection["test"] - db.command("dropAllUsersFromDatabase") - db.command("createUser", "root", pwd="clickhouse", roles=["readWrite"]) - simple_mongo_table = db["simple_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - node.query( - "CREATE OR REPLACE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo_secure:27017', 'test', 'simple_table', 'root', 'clickhouse', 'tls=true')" - ) - - with pytest.raises(QueryRuntimeException): - node.query("SELECT COUNT() FROM simple_mongo_table") - - node.query("DROP TABLE simple_mongo_table") - simple_mongo_table.drop() - - -def test_secure_connection_uri(started_cluster): - mongo_connection = get_mongo_connection(started_cluster, secure=True) - db = mongo_connection["test"] - simple_mongo_table = db["test_secure_connection_uri"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) - node = started_cluster.instances["node"] - node.query( - "CREATE OR REPLACE TABLE test_secure_connection_uri(key UInt64, data String) ENGINE = MongoDB('mongodb://root:clickhouse@mongo_secure:27017/test?tls=true&tlsAllowInvalidCertificates=true&tlsAllowInvalidHostnames=true', 'test_secure_connection_uri')" - ) - - assert node.query("SELECT COUNT() FROM test_secure_connection_uri") == "100\n" - assert ( - node.query("SELECT sum(key) FROM test_secure_connection_uri") - == str(sum(range(0, 100))) + "\n" - ) - assert ( - node.query("SELECT data from test_secure_connection_uri where key = 42") - == hex(42 * 42) + "\n" - ) - - node.query("DROP TABLE test_secure_connection_uri") - simple_mongo_table.drop() - - -def test_secure_connection_uri_with_validation(started_cluster): - mongo_connection = get_mongo_connection(started_cluster, secure=True) - db = mongo_connection["test"] - simple_mongo_table = db["test_secure_connection_uri"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - node.query( - "CREATE OR REPLACE TABLE test_secure_connection_uri(key UInt64, data String) ENGINE = MongoDB('mongodb://root:clickhouse@mongo_secure:27017/test?tls=true', 'test_secure_connection_uri')" - ) - - with pytest.raises(QueryRuntimeException): - node.query("SELECT COUNT() FROM test_secure_connection_uri") - - node.query("DROP TABLE test_secure_connection_uri") - - simple_mongo_table.drop() - - +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) def test_predefined_connection_configuration(started_cluster): mongo_connection = get_mongo_connection(started_cluster) db = mongo_connection["test"] - db.command("dropAllUsersFromDatabase") - db.command("createUser", "root", pwd="clickhouse", roles=["readWrite"]) + db.add_user("root", "clickhouse") simple_mongo_table = db["simple_table"] data = [] for i in range(0, 100): @@ -446,38 +391,15 @@ def test_predefined_connection_configuration(started_cluster): simple_mongo_table.insert_many(data) node = started_cluster.instances["node"] + node.query("drop table if exists simple_mongo_table") node.query( - "CREATE OR REPLACE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB(mongo1)" + "create table simple_mongo_table(key UInt64, data String) engine = MongoDB(mongo1)" ) - assert node.query("SELECT count() FROM simple_mongo_table") == "100\n" - - node.query("DROP TABLE simple_mongo_table") - simple_mongo_table.drop() - - -def test_predefined_connection_configuration_uri(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.command("dropAllUsersFromDatabase") - db.command("createUser", "root", pwd="clickhouse", roles=["readWrite"]) - simple_mongo_table = db["simple_table_uri"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - node.query( - "CREATE OR REPLACE TABLE simple_table_uri(key UInt64, data String) ENGINE = MongoDB(mongo1_uri)" - ) - - assert node.query("SELECT count() FROM simple_table_uri") == "100\n" - - node.query("DROP TABLE simple_table_uri") simple_mongo_table.drop() +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) def test_no_credentials(started_cluster): mongo_connection = get_mongo_connection(started_cluster, with_credentials=False) db = mongo_connection["test"] @@ -488,44 +410,21 @@ def test_no_credentials(started_cluster): simple_mongo_table.insert_many(data) node = started_cluster.instances["node"] + node.query("drop table if exists simple_mongo_table_2") node.query( - "CREATE OR REPLACE TABLE simple_table(key UInt64, data String) ENGINE = MongoDB('mongo_no_cred:27017', 'test', 'simple_table', '', '')" + "create table simple_mongo_table_2(key UInt64, data String) engine = MongoDB('mongo2:27017', 'test', 'simple_table', '', '')" ) - - assert node.query("SELECT count() FROM simple_table") == "100\n" - - node.query("DROP TABLE simple_table") - simple_mongo_table.drop() - - -def test_no_credentials_uri(started_cluster): - mongo_connection = get_mongo_connection(started_cluster, with_credentials=False) - db = mongo_connection["test"] - simple_mongo_table = db["simple_table_uri"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - node.query( - "CREATE OR REPLACE TABLE simple_table_uri(key UInt64, data String) ENGINE = MongoDB('mongodb://mongo_no_cred:27017/test', 'simple_table_uri')" - ) - - assert node.query("SELECT count() FROM simple_table_uri") == "100\n" - - node.query("DROP TABLE simple_table_uri") + assert node.query("SELECT count() FROM simple_mongo_table_2") == "100\n" simple_mongo_table.drop() +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) def test_auth_source(started_cluster): mongo_connection = get_mongo_connection(started_cluster, with_credentials=False) admin_db = mongo_connection["admin"] - admin_db.command("dropAllUsersFromDatabase") - admin_db.command( - "createUser", + admin_db.add_user( "root", - pwd="clickhouse", + "clickhouse", roles=[{"role": "userAdminAnyDatabase", "db": "admin"}, "readWriteAnyDatabase"], ) simple_mongo_table = admin_db["simple_table"] @@ -541,27 +440,25 @@ def test_auth_source(started_cluster): simple_mongo_table.insert_many(data) node = started_cluster.instances["node"] + node.query("drop table if exists simple_mongo_table_fail") node.query( - "CREATE OR REPLACE TABLE simple_mongo_table_fail(key UInt64, data String) ENGINE = MongoDB('mongo_no_cred:27017', 'test', 'simple_table', 'root', 'clickhouse')" + "create table simple_mongo_table_fail(key UInt64, data String) engine = MongoDB('mongo2:27017', 'test', 'simple_table', 'root', 'clickhouse')" ) - with pytest.raises(QueryRuntimeException): - node.query("SELECT count() FROM simple_mongo_table_fail") + node.query_and_get_error("SELECT count() FROM simple_mongo_table_fail") + node.query("drop table if exists simple_mongo_table_ok") node.query( - "CREATE OR REPLACE TABLE simple_mongo_table_ok(key UInt64, data String) ENGINE = MongoDB('mongo_no_cred:27017', 'test', 'simple_table', 'root', 'clickhouse', 'authSource=admin')" + "create table simple_mongo_table_ok(key UInt64, data String) engine = MongoDB('mongo2:27017', 'test', 'simple_table', 'root', 'clickhouse', 'authSource=admin')" ) assert node.query("SELECT count() FROM simple_mongo_table_ok") == "100\n" - - node.query("DROP TABLE simple_mongo_table_fail") - node.query("DROP TABLE simple_mongo_table_ok") simple_mongo_table.drop() +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) def test_missing_columns(started_cluster): mongo_connection = get_mongo_connection(started_cluster) db = mongo_connection["test"] - db.command("dropAllUsersFromDatabase") - db.command("createUser", "root", pwd="clickhouse", roles=["readWrite"]) + db.add_user("root", "clickhouse") simple_mongo_table = db["simple_table"] data = [] for i in range(0, 10): @@ -571,707 +468,40 @@ def test_missing_columns(started_cluster): simple_mongo_table.insert_many(data) node = started_cluster.instances["node"] + node.query("drop table if exists simple_mongo_table") node.query( - """CREATE OR REPLACE TABLE simple_mongo_table( - key UInt64, - data Nullable(String), - not_exists Int64, - not_exists_nullable Nullable(Int64) - ) ENGINE = MongoDB(mongo1)""" + "create table simple_mongo_table(key UInt64, data Nullable(String)) engine = MongoDB(mongo1)" ) - - assert ( - node.query("SELECT count() FROM simple_mongo_table WHERE isNull(data)") - == "10\n" - ) - assert ( - node.query("SELECT count() FROM simple_mongo_table WHERE isNull(not_exists)") - == "0\n" - ) - - node.query("DROP TABLE IF EXISTS simple_mongo_table") + result = node.query("SELECT count() FROM simple_mongo_table WHERE isNull(data)") + assert result == "10\n" simple_mongo_table.drop() -def test_string_casting(started_cluster): +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) +def test_simple_insert_select(started_cluster): mongo_connection = get_mongo_connection(started_cluster) db = mongo_connection["test"] - db.command("dropAllUsersFromDatabase") - db.command("createUser", "root", pwd="clickhouse", roles=["readWrite"]) - string_mongo_table = db["strings_table"] - data = { - "k_boolT": True, - "k_boolF": False, - "k_int32P": 100, - "k_int32N": -100, - "k_int64P": bson.int64.Int64(100), - "k_int64N": bson.int64.Int64(-100), - "k_doubleP": 6.66, - "k_doubleN": -6.66, - "k_date": datetime.datetime(1999, 2, 28, 0, 0, 0), - "k_timestamp": datetime.datetime(1999, 2, 28, 12, 46, 34), - "k_string": "ClickHouse", - "k_document": { - "Hello": "world!", - "meow123": True, - "number": 321, - "doc": {"Hello": "world!"}, - "arr": [{"Hello": "world!"}, 1, "c"], - }, - "k_array": [ - "Hello", - "world!", - {"cat": "meow!"}, - [1, 2, 3], - ], - } - string_mongo_table.insert_one(data) + db.add_user("root", "clickhouse") + simple_mongo_table = db["simple_table"] node = started_cluster.instances["node"] + node.query("DROP TABLE IF EXISTS simple_mongo_table") node.query( - """CREATE OR REPLACE TABLE strings_table ( - _id String, - k_boolT String, - k_boolF String, - k_int32P String, - k_int32N String, - k_int64P String, - k_int64N String, - k_doubleP String, - k_doubleN String, - k_date String, - k_timestamp String, - k_string String, - k_document String, - k_array String - ) ENGINE = MongoDB('mongo1:27017', 'test', 'strings_table', 'root', 'clickhouse')""" - ) - - assert node.query("SELECT COUNT() FROM strings_table") == "1\n" - assert node.query("SELECT _id FROM strings_table") != "" - assert node.query("SELECT k_boolT FROM strings_table") == "true\n" - assert node.query("SELECT k_boolF FROM strings_table") == "false\n" - assert node.query("SELECT k_int32P FROM strings_table") == "100\n" - assert node.query("SELECT k_int32N FROM strings_table") == "-100\n" - assert node.query("SELECT k_int64P FROM strings_table") == "100\n" - assert node.query("SELECT k_int64N FROM strings_table") == "-100\n" - assert node.query("SELECT k_doubleP FROM strings_table") == "6.660000\n" - assert node.query("SELECT k_doubleN FROM strings_table") == "-6.660000\n" - assert node.query("SELECT k_date FROM strings_table") == "1999-02-28 00:00:00\n" - assert ( - node.query("SELECT k_timestamp FROM strings_table") == "1999-02-28 12:46:34\n" - ) - assert node.query("SELECT k_string FROM strings_table") == "ClickHouse\n" - assert json.dumps( - json.loads(node.query("SELECT k_document FROM strings_table")) - ) == json.dumps(data["k_document"]) - assert json.dumps( - json.loads(node.query("SELECT k_array FROM strings_table")) - ) == json.dumps(data["k_array"]) - - node.query("DROP TABLE strings_table") - string_mongo_table.drop() - - -def test_dates_casting(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.command("dropAllUsersFromDatabase") - db.command("createUser", "root", pwd="clickhouse", roles=["readWrite"]) - dates_mongo_table = db["dates_table"] - data = { - "k_dateTime": datetime.datetime(1999, 2, 28, 11, 23, 16), - "k_dateTime64": datetime.datetime(1999, 2, 28, 11, 23, 16), - "k_date": datetime.datetime(1999, 2, 28, 11, 23, 16), - "k_date32": datetime.datetime(1999, 2, 28, 11, 23, 16), - } - dates_mongo_table.insert_one(data) - - node = started_cluster.instances["node"] - node.query( - """CREATE OR REPLACE TABLE dates_table ( - k_dateTime DateTime, - k_dateTime64 DateTime64, - k_date Date, - k_date32 Date32 - ) ENGINE = MongoDB('mongo1:27017', 'test', 'dates_table', 'root', 'clickhouse')""" - ) - - assert node.query("SELECT COUNT() FROM dates_table") == "1\n" - assert ( - node.query("SELECT * FROM dates_table") - == "1999-02-28 11:23:16\t1999-02-28 11:23:16.000\t1999-02-28\t1999-02-28\n" - ) - - node.query("DROP TABLE dates_table") - dates_mongo_table.drop() - - -def test_order_by(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.command("dropAllUsersFromDatabase") - db.command("createUser", "root", pwd="clickhouse", roles=["readWrite"]) - sort_mongo_table = db["sort_table"] - data = [] - for i in range(1, 31): - for d in range(1, 31): - data.append( - { - "keyInt": i, - "keyFloat": i + (d * 0.001), - "keyDateTime": datetime.datetime(1999, 12, i, 11, 23, 16), - "keyDate": datetime.datetime(1999, 12, i, 11, 23, 16), - } - ) - sort_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - node.query( - """CREATE OR REPLACE TABLE sort_table ( - keyInt Int, - keyFloat Float64, - keyDateTime DateTime, - keyDate Date - ) ENGINE = MongoDB('mongo1:27017', 'test', 'sort_table', 'root', 'clickhouse')""" - ) - - assert node.query("SELECT COUNT() FROM sort_table") == "900\n" - assert node.query("SELECT keyInt FROM sort_table ORDER BY keyInt LIMIT 1") == "1\n" - assert ( - node.query("SELECT keyInt FROM sort_table ORDER BY keyInt DESC LIMIT 1") - == "30\n" - ) - assert ( - node.query( - "SELECT keyInt, keyFloat FROM sort_table ORDER BY keyInt, keyFloat DESC LIMIT 1" - ) - == "1\t1.03\n" - ) - assert ( - node.query( - "SELECT keyDateTime FROM sort_table ORDER BY keyDateTime DESC LIMIT 1" - ) - == "1999-12-30 11:23:16\n" - ) - assert ( - node.query("SELECT keyDate FROM sort_table ORDER BY keyDate DESC LIMIT 1") - == "1999-12-30\n" - ) - - with pytest.raises(QueryRuntimeException): - node.query("SELECT * FROM sort_table ORDER BY keyInt WITH FILL") - with pytest.raises(QueryRuntimeException): - node.query("SELECT * FROM sort_table ORDER BY keyInt WITH FILL TO sort_table") - with pytest.raises(QueryRuntimeException): - node.query("SELECT * FROM sort_table ORDER BY keyInt WITH FILL FROM sort_table") - with pytest.raises(QueryRuntimeException): - node.query("SELECT * FROM sort_table ORDER BY keyInt WITH FILL STEP 1") - - node.query("DROP TABLE sort_table") - sort_mongo_table.drop() - - -def test_where(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.command("dropAllUsersFromDatabase") - db.command("createUser", "root", pwd="clickhouse", roles=["readWrite"]) - where_mongo_table = db["where_table"] - data = [] - for i in range(1, 3): - for d in range(1, 3): - data.append( - { - "id": str(i) + str(d), - "keyInt": i, - "keyFloat": i + (d * 0.001), - "keyString": str(d) + "string", - "keyDateTime": datetime.datetime(1999, d, i, 11, 23, 16), - "keyDate": datetime.datetime(1999, d, i, 11, 23, 16), - "keyNull": None, - } - ) - where_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - node.query( - """CREATE OR REPLACE TABLE where_table ( - id String, - keyInt Int, - keyFloat Float64, - keyString String, - keyDateTime DateTime, - keyDate Date, - keyNull Nullable(UInt8), - keyNotExists Nullable(Int) - ) ENGINE = MongoDB('mongo1:27017', 'test', 'where_table', 'root', 'clickhouse')""" - ) - - assert node.query("SELECT COUNT() FROM where_table") == "4\n" - - assert ( - node.query("SELECT keyString FROM where_table WHERE id = '11'") == "1string\n" - ) - assert ( - node.query( - "SELECT keyString FROM where_table WHERE id != '11' ORDER BY keyFloat" - ) - == "2string\n1string\n2string\n" - ) - assert ( - node.query( - "SELECT keyString FROM where_table WHERE id = '11' AND keyString = '1string'" - ) - == "1string\n" - ) - assert ( - node.query("SELECT id FROM where_table WHERE keyInt = 1 AND keyFloat = 1.001") - == "11\n" - ) - assert ( - node.query("SELECT id FROM where_table WHERE keyInt = 0 OR keyFloat = 1.001") - == "11\n" - ) - - assert ( - node.query("SELECT id FROM where_table WHERE keyInt BETWEEN 1 AND 2") - == "11\n12\n21\n22\n" - ) - assert node.query("SELECT id FROM where_table WHERE keyInt > 10") == "" - assert ( - node.query("SELECT id FROM where_table WHERE keyInt < 10.1 ORDER BY keyFloat") - == "11\n12\n21\n22\n" - ) - - assert node.query("SELECT id FROM where_table WHERE id IN ('11')") == "11\n" - assert node.query("SELECT id FROM where_table WHERE id IN ['11']") == "11\n" - assert node.query("SELECT id FROM where_table WHERE id IN ('11', 100)") == "11\n" - assert ( - node.query( - "SELECT id FROM where_table WHERE id IN ('11', '22') ORDER BY keyFloat" - ) - == "11\n22\n" - ) - assert ( - node.query( - "SELECT id FROM where_table WHERE id IN ['11', '22'] ORDER BY keyFloat" - ) - == "11\n22\n" - ) - - assert ( - node.query( - "SELECT id FROM where_table WHERE id NOT IN ('11') ORDER BY keyFloat" - ) - == "12\n21\n22\n" - ) - assert ( - node.query( - "SELECT id FROM where_table WHERE id NOT IN ['11'] ORDER BY keyFloat" - ) - == "12\n21\n22\n" - ) - assert ( - node.query( - "SELECT id FROM where_table WHERE id NOT IN ('11', 100) ORDER BY keyFloat" - ) - == "12\n21\n22\n" - ) - assert ( - node.query("SELECT id FROM where_table WHERE id NOT IN ('11') AND id IN ('12')") - == "12\n" - ) - assert ( - node.query("SELECT id FROM where_table WHERE id NOT IN ['11'] AND id IN ('12')") - == "12\n" - ) - - with pytest.raises(QueryRuntimeException): - assert ( - node.query( - "SELECT id FROM where_table WHERE id NOT IN ['11', 100] ORDER BY keyFloat" - ) - == "12\n21\n22\n" - ) - - assert node.query("SELECT id FROM where_table WHERE keyDateTime > now()") == "" - assert ( - node.query( - "SELECT keyInt FROM where_table WHERE keyDateTime < now() AND keyString = '1string' AND keyInt IS NOT NULL ORDER BY keyInt" - ) - == "1\n2\n" - ) - - assert node.query("SELECT count() FROM where_table WHERE isNotNull(id)") == "4\n" - assert ( - node.query("SELECT count() FROM where_table WHERE isNotNull(keyNull)") == "0\n" - ) - assert node.query("SELECT count() FROM where_table WHERE isNull(keyNull)") == "4\n" - assert ( - node.query("SELECT count() FROM where_table WHERE isNotNull(keyNotExists)") - == "0\n" - ) - assert ( - node.query("SELECT count() FROM where_table WHERE isNull(keyNotExists)") - == "4\n" - ) - assert node.query("SELECT count() FROM where_table WHERE keyNotExists = 0") == "0\n" - assert ( - node.query("SELECT count() FROM where_table WHERE keyNotExists != 0") == "0\n" - ) - - with pytest.raises(QueryRuntimeException): - node.query("SELECT * FROM where_table WHERE keyInt = keyFloat") - with pytest.raises(QueryRuntimeException): - node.query("SELECT * FROM where_table WHERE equals(keyInt, keyFloat)") - - node.query("DROP TABLE where_table") - where_mongo_table.drop() - - -def test_defaults(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.command("dropAllUsersFromDatabase") - db.command("createUser", "root", pwd="clickhouse", roles=["readWrite"]) - defaults_mongo_table = db["defaults_table"] - defaults_mongo_table.insert_one({"key": "key"}) - - node = started_cluster.instances["node"] - node.query( - """ - CREATE OR REPLACE TABLE defaults_table( - _id String, - k_int64 Int64, - k_int32 Int32, - k_int16 Int16, - k_int8 Int8, - k_uint64 UInt64, - k_uint32 UInt32, - k_uint16 UInt16, - k_uint8 UInt8, - k_float32 Float32, - k_float64 Float64, - k_date Date, - k_date32 Date32, - k_datetime DateTime, - k_datetime64 DateTime64, - k_string String, - k_uuid UUID, - k_arr Array(Bool) - ) ENGINE = MongoDB('mongo1:27017', 'test', 'defaults_table', 'root', 'clickhouse') - """ - ) - - assert node.query("SELECT COUNT() FROM defaults_table") == "1\n" - - assert ( - node.query( - "SELECT k_int64, k_int32, k_int16, k_int8, k_uint64, k_uint32, k_uint16, k_uint8, k_float32, k_float64 FROM defaults_table" - ) - == "0\t0\t0\t0\t0\t0\t0\t0\t0\t0\n" - ) - assert ( - node.query( - "SELECT k_date, k_date32, k_datetime, k_datetime64, k_string, k_uuid, k_arr FROM defaults_table" - ) - == "1970-01-01\t1900-01-01\t1970-01-01 00:00:00\t1970-01-01 00:00:00.000\t\t00000000-0000-0000-0000-000000000000\t[]\n" - ) - - node.query("DROP TABLE defaults_table") - defaults_mongo_table.drop() - - -def test_nulls(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.command("dropAllUsersFromDatabase") - db.command("createUser", "root", pwd="clickhouse", roles=["readWrite"]) - nulls_mongo_table = db["nulls_table"] - nulls_mongo_table.insert_one({"key": "key"}) - - node = started_cluster.instances["node"] - node.query( - """ - CREATE OR REPLACE TABLE nulls_table( - _id String, - k_int64 Nullable(Int64), - k_int32 Nullable(Int32), - k_int16 Nullable(Int16), - k_int8 Nullable(Int8), - k_uint64 Nullable(UInt64), - k_uint32 Nullable(UInt32), - k_uint16 Nullable(UInt16), - k_uint8 Nullable(UInt8), - k_float32 Nullable(Float32), - k_float64 Nullable(Float64), - k_date Nullable(Date), - k_date32 Nullable(Date32), - k_datetime Nullable(DateTime), - k_datetime64 Nullable(DateTime64), - k_string Nullable(String), - k_uuid Nullable(UUID) - ) ENGINE = MongoDB('mongo1:27017', 'test', 'nulls_table', 'root', 'clickhouse') - """ - ) - - assert node.query("SELECT COUNT() FROM nulls_table") == "1\n" - - assert ( - node.query( - "SELECT k_int64, k_int32, k_int16, k_int8, k_uint64, k_uint32, k_uint16, k_uint8, k_float32, k_float64 FROM nulls_table" - ) - == "\\N\t\\N\t\\N\t\\N\t\\N\t\\N\t\\N\t\\N\t\\N\t\\N\n" - ) - assert ( - node.query( - "SELECT k_date, k_date32, k_datetime, k_datetime64, k_string, k_uuid FROM nulls_table" - ) - == "\\N\t\\N\t\\N\t\\N\t\\N\t\\N\n" - ) - - node.query("DROP TABLE nulls_table") - nulls_mongo_table.drop() - - -def test_oid(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.command("dropAllUsersFromDatabase") - db.command("createUser", "root", pwd="clickhouse", roles=["readWrite"]) - oid_mongo_table = db["oid_table"] - inserted_result = oid_mongo_table.insert_many( - [ - {"key": "a"}, - {"key": "b"}, - {"key": "c"}, - {"key": "d"}, - {"key": "e"}, - ] - ) - oid = inserted_result.inserted_ids - - node = started_cluster.instances["node"] - node.query( - """ - CREATE OR REPLACE TABLE oid_table( - _id String, - key String - ) ENGINE = MongoDB('mongo1:27017', 'test', 'oid_table', 'root', 'clickhouse') - """ - ) - - assert node.query("SELECT COUNT() FROM oid_table") == "5\n" - - assert node.query(f"SELECT key FROM oid_table WHERE _id = '{oid[0]}'") == "a\n" - assert ( - node.query(f"SELECT * FROM oid_table WHERE _id = '{oid[2]}'") - == f"{oid[2]}\tc\n" - ) - assert node.query(f"SELECT COUNT() FROM oid_table WHERE _id != '{oid[0]}'") == "4\n" - - assert ( - node.query( - f"SELECT key FROM oid_table WHERE _id in ('{oid[0]}', '{oid[1]}') ORDER BY key" - ) - == "a\nb\n" - ) - assert ( - node.query( - f"SELECT key FROM oid_table WHERE _id in ['{oid[0]}', '{oid[1]}'] ORDER BY key" - ) - == "a\nb\n" - ) - assert ( - node.query(f"SELECT key FROM oid_table WHERE _id in ('{oid[0]}') ORDER BY key") - == "a\n" - ) - assert ( - node.query(f"SELECT key FROM oid_table WHERE _id in ['{oid[1]}'] ORDER BY key") - == "b\n" - ) - - with pytest.raises(QueryRuntimeException): - node.query("SELECT * FROM oid_table WHERE _id = 'invalidOID'") - with pytest.raises(QueryRuntimeException): - node.query("SELECT * FROM oid_table WHERE _id = 123123") - with pytest.raises(QueryRuntimeException): - node.query("SELECT * FROM oid_table WHERE _id in (123123, 123)") - - node.query("DROP TABLE oid_table") - oid_mongo_table.drop() - - -def test_uuid(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.command("dropAllUsersFromDatabase") - db.command("createUser", "root", pwd="clickhouse", roles=["readWrite"]) - uuid_mongo_table = db["uuid_table"] - uuid_mongo_table.insert_many( - [ - {"isValid": 0, "kUUID": "bad_uuid"}, - {"isValid": 1, "kUUID": "f0e77736-91d1-48ce-8f01-15123ca1c7ed"}, - ] - ) - - node = started_cluster.instances["node"] - node.query( - """ - CREATE OR REPLACE TABLE uuid_table( - isValid UInt8, - kUUID UUID - ) ENGINE = MongoDB('mongo1:27017', 'test', 'uuid_table', 'root', 'clickhouse') - """ - ) - - assert ( - node.query(f"SELECT kUUID FROM uuid_table WHERE isValid = 1") - == "f0e77736-91d1-48ce-8f01-15123ca1c7ed\n" - ) - - with pytest.raises(QueryRuntimeException): - node.query("SELECT * FROM uuid_table WHERE isValid = 0") - with pytest.raises(QueryRuntimeException): - node.query("SELECT * FROM uuid_table") - - node.query("DROP TABLE uuid_table") - uuid_mongo_table.drop() - - -def test_no_fail_on_unsupported_clauses(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.command("dropAllUsersFromDatabase") - db.command("createUser", "root", pwd="clickhouse", roles=["readWrite"]) - unsupported_clauses_table = db["unsupported_clauses"] - - node = started_cluster.instances["node"] - node.query( - """ - CREATE OR REPLACE TABLE unsupported_clauses( - a UInt64, - b UInt64 - ) ENGINE = MongoDB('mongo1:27017', 'test', 'unsupported_clauses', 'root', 'clickhouse') - """ - ) - - node.query( - f"SELECT * FROM unsupported_clauses WHERE a > rand() SETTINGS mongodb_throw_on_unsupported_query = 0" + "CREATE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse')" ) node.query( - f"SELECT * FROM unsupported_clauses WHERE a / 1000 > 0 SETTINGS mongodb_throw_on_unsupported_query = 0" - ) - node.query( - f"SELECT * FROM unsupported_clauses WHERE toFloat64(a) < 6.66 > rand() SETTINGS mongodb_throw_on_unsupported_query = 0" - ) - node.query( - f"SELECT * FROM unsupported_clauses ORDER BY a, b LIMIT 2 BY a SETTINGS mongodb_throw_on_unsupported_query = 0" + "INSERT INTO simple_mongo_table SELECT number, 'kek' || toString(number) FROM numbers(10)" ) - node.query("DROP TABLE unsupported_clauses") - unsupported_clauses_table.drop() - - -def test_password_masking(started_cluster): - node = started_cluster.instances["node"] - - node.query( - """ - CREATE OR REPLACE TABLE mongodb_uri_password_masking (_id String) - ENGINE = MongoDB('mongodb://testuser:mypassword@127.0.0.1:27017/example', 'test_clickhouse'); - """ - ) assert ( - node.query( - """ - SELECT replaceAll(create_table_query, currentDatabase(), 'default') FROM system.tables - WHERE table = 'mongodb_uri_password_masking' AND database = currentDatabase(); - """ - ) - == "CREATE TABLE default.mongodb_uri_password_masking (`_id` String) ENGINE = MongoDB(\\'mongodb://testuser:[HIDDEN]@127.0.0.1:27017/example\\', \\'test_clickhouse\\')\n" + node.query("SELECT data from simple_mongo_table where key = 7").strip() + == "kek7" ) + node.query("INSERT INTO simple_mongo_table(key) SELECT 12") + assert int(node.query("SELECT count() from simple_mongo_table")) == 11 assert ( - node.query( - """ - SELECT replaceAll(engine_full, currentDatabase(), 'default') FROM system.tables - WHERE table = 'mongodb_uri_password_masking' AND database = currentDatabase(); - """ - ) - == "MongoDB(\\'mongodb://testuser:[HIDDEN]@127.0.0.1:27017/example\\', \\'test_clickhouse\\')\n" + node.query("SELECT data from simple_mongo_table where key = 12").strip() == "" ) - node.query("DROP TABLE IF EXISTS mongodb_uri_password_masking;") - node.query( - """ - CREATE OR REPLACE DICTIONARY mongodb_dictionary_uri_password_masking (_id String) - PRIMARY KEY _id - SOURCE(MONGODB(uri 'mongodb://testuser:mypassword@127.0.0.1:27017/example' collection 'test_clickhouse')) - LAYOUT(FLAT()) - LIFETIME(0); - """ - ) - assert ( - node.query( - """ - SELECT replaceAll(create_table_query, currentDatabase(), 'default') FROM system.tables - WHERE table = 'mongodb_dictionary_uri_password_masking' AND database = currentDatabase();""" - ) - == "CREATE DICTIONARY default.mongodb_dictionary_uri_password_masking (`_id` String) PRIMARY KEY _id SOURCE(MONGODB(URI \\'mongodb://testuser:[HIDDEN]@127.0.0.1:27017/example\\' COLLECTION \\'test_clickhouse\\')) LIFETIME(MIN 0 MAX 0) LAYOUT(FLAT())\n" - ) - node.query("DROP DICTIONARY IF EXISTS mongodb_dictionary_uri_password_masking;") - - node.query( - """ - CREATE TABLE mongodb_password_masking (_id String) - ENGINE = MongoDB('127.0.0.1:27017', 'example', 'test_clickhouse', 'testuser', 'mypassword'); - """ - ) - assert ( - node.query( - """ - SELECT replaceAll(create_table_query, currentDatabase(), 'default') FROM system.tables - WHERE table = 'mongodb_password_masking' AND database = currentDatabase(); - """ - ) - == "CREATE TABLE default.mongodb_password_masking (`_id` String) ENGINE = MongoDB(\\'127.0.0.1:27017\\', \\'example\\', \\'test_clickhouse\\', \\'testuser\\', \\'[HIDDEN]\\')\n" - ) - assert ( - node.query( - """ - SELECT replaceAll(engine_full, currentDatabase(), 'default') FROM system.tables - WHERE table = 'mongodb_password_masking' AND database = currentDatabase(); - """ - ) - == "MongoDB(\\'127.0.0.1:27017\\', \\'example\\', \\'test_clickhouse\\', \\'testuser\\', \\'[HIDDEN]\\')\n" - ) - node.query("DROP TABLE IF EXISTS mongodb_password_masking;") - - node.query( - """ - CREATE OR REPLACE DICTIONARY mongodb_dictionary_password_masking (_id String) - PRIMARY KEY _id - SOURCE(MONGODB( - host '127.0.0.1' - port 27017 - user 'testuser' - password 'mypassword' - db 'example' - collection 'test_clickhouse' - options 'ssl=true' - )) - LAYOUT(FLAT()) - LIFETIME(0); - """ - ) - assert ( - node.query( - """ - SELECT replaceAll(create_table_query, currentDatabase(), 'default') FROM system.tables - WHERE table = 'mongodb_dictionary_password_masking' AND database = currentDatabase(); - """ - ) - == "CREATE DICTIONARY default.mongodb_dictionary_password_masking (`_id` String) PRIMARY KEY _id SOURCE(MONGODB(HOST \\'127.0.0.1\\' PORT 27017 USER \\'testuser\\' PASSWORD \\'[HIDDEN]\\' DB \\'example\\' COLLECTION \\'test_clickhouse\\' OPTIONS \\'ssl=true\\')) LIFETIME(MIN 0 MAX 0) LAYOUT(FLAT())\n" - ) - node.query("DROP DICTIONARY IF EXISTS mongodb_dictionary_password_masking;") + node.query("DROP TABLE simple_mongo_table") + simple_mongo_table.drop() diff --git a/tests/integration/test_storage_mongodb_legacy/__init__.py b/tests/integration/test_storage_mongodb_legacy/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tests/integration/test_storage_mongodb_legacy/configs/feature_flag.xml b/tests/integration/test_storage_mongodb_legacy/configs/feature_flag.xml deleted file mode 100644 index 4ee05db9d1e..00000000000 --- a/tests/integration/test_storage_mongodb_legacy/configs/feature_flag.xml +++ /dev/null @@ -1,3 +0,0 @@ - - 1 - diff --git a/tests/integration/test_storage_mongodb_legacy/configs/named_collections.xml b/tests/integration/test_storage_mongodb_legacy/configs/named_collections.xml deleted file mode 100644 index 5f7db390982..00000000000 --- a/tests/integration/test_storage_mongodb_legacy/configs/named_collections.xml +++ /dev/null @@ -1,12 +0,0 @@ - - - - root - clickhouse - mongo1 - 27017 - test - simple_table - - - diff --git a/tests/integration/test_storage_mongodb_legacy/configs/users.xml b/tests/integration/test_storage_mongodb_legacy/configs/users.xml deleted file mode 100644 index 4b6ba057ecb..00000000000 --- a/tests/integration/test_storage_mongodb_legacy/configs/users.xml +++ /dev/null @@ -1,9 +0,0 @@ - - - - - default - 1 - - - diff --git a/tests/integration/test_storage_mongodb_legacy/mongo_secure_config/cert.crt b/tests/integration/test_storage_mongodb_legacy/mongo_secure_config/cert.crt deleted file mode 100644 index 94249109d41..00000000000 --- a/tests/integration/test_storage_mongodb_legacy/mongo_secure_config/cert.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEGzCCAwOgAwIBAgIUaoGlyuJAyvs6yowFXymfu7seEiUwDQYJKoZIhvcNAQEL -BQAwgZwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDbGlja0hvdXNlMRMwEQYDVQQH -DApDbGlja0hvdXNlMREwDwYDVQQKDAhQZXJzb25hbDETMBEGA1UECwwKQ2xpY2tI -b3VzZTEkMCIGCSqGSIb3DQEJARYVY2xpY2tob3VzZUBjbGlja2hvdXNlMRUwEwYD -VQQDDAxtb25nb19zZWN1cmUwHhcNMjQwNTI2MTYwMDMxWhcNMzQwNTI0MTYwMDMx -WjCBnDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNsaWNrSG91c2UxEzARBgNVBAcM -CkNsaWNrSG91c2UxETAPBgNVBAoMCFBlcnNvbmFsMRMwEQYDVQQLDApDbGlja0hv -dXNlMSQwIgYJKoZIhvcNAQkBFhVjbGlja2hvdXNlQGNsaWNraG91c2UxFTATBgNV -BAMMDG1vbmdvX3NlY3VyZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB -AJSeQfMG7xd0+kPehYlEsEw0Sm1DB05SXVCEzIX3DFD6XJrd8eeWwlzYaBatkcwj -+8yvqske02X/3YwpzJyFizAqJIoKql5c5Yii2xH1S9PFP0y+LoJre+eQziHyO33t -eeedeGNJ05Sm2ZAzjfMQ7Rdh6S+gdIO4Y102iQR5yr2aTrh7tu7XkNCjwKTqMMvz -SikP1Rft2J6ECim+MjYCCtH/4yXGeEJ5epU4t3y6Q23B2ZEhY+sqUdwgK9pu8oe4 -mkZ1Qvwakc9Qg12owRSDjBBYrPvghXVpkJ2JkgKTrIAIz9tZ53eDVHNXbWMAotov -jEmRSoGIS1yzwmQ9PdxUwYcCAwEAAaNTMFEwHQYDVR0OBBYEFJyz3Kt5XBDg5cvI -0v1ioqejqX+CMB8GA1UdIwQYMBaAFJyz3Kt5XBDg5cvI0v1ioqejqX+CMA8GA1Ud -EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAHAQFA5VMYvaQFnKtKfHg9TF -qfJ4uM3YsGdgsgmGWgflD1S4Z290H6Q2QvyZAEceTrlJxqArlWlVp5DAU6EeXjEh -QMAgdkJHF1Hg2jsZKPtdkb88UtuzwAME357T8NtEJSHzNE5QqYwlVM71JkWpdqvA -UUdOJbWhhJfowIf4tMmL1DUuIy2qYpoP/tEBXEw9uwpmZqb7KELwT3lRyOMaGFN7 -RHVwbvJWlHiu83QDNaWz6ijQkWl3tCN6TWcFD1qc1x8GpMzjbsAAYbCx7fbHM2LD -9kGSCiyv5K0MLNK5u67RtUFfPHtyD8RA0TtxIZ4PEN/eFANKS2/5NEi1ZuZ5/Pk= ------END CERTIFICATE----- diff --git a/tests/integration/test_storage_mongodb_legacy/mongo_secure_config/key.pem b/tests/integration/test_storage_mongodb_legacy/mongo_secure_config/key.pem deleted file mode 100644 index 9444d19a3d2..00000000000 --- a/tests/integration/test_storage_mongodb_legacy/mongo_secure_config/key.pem +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCUnkHzBu8XdPpD -3oWJRLBMNEptQwdOUl1QhMyF9wxQ+lya3fHnlsJc2GgWrZHMI/vMr6rJHtNl/92M -KcychYswKiSKCqpeXOWIotsR9UvTxT9Mvi6Ca3vnkM4h8jt97XnnnXhjSdOUptmQ -M43zEO0XYekvoHSDuGNdNokEecq9mk64e7bu15DQo8Ck6jDL80opD9UX7diehAop -vjI2AgrR/+MlxnhCeXqVOLd8ukNtwdmRIWPrKlHcICvabvKHuJpGdUL8GpHPUINd -qMEUg4wQWKz74IV1aZCdiZICk6yACM/bWed3g1RzV21jAKLaL4xJkUqBiEtcs8Jk -PT3cVMGHAgMBAAECggEAAul6qiHchB+uQMCWyC5xTeRqAXR3tAv4Tj4fGJjkXY4Z -OrAjr9Kp38EvX1amgvUWV3FT3NMevDf5xd9OdzAA0g0uJIF+mAhYFW48i1FnQcHQ -mOf0zmiZR7l8o7ROb3JvooXHxW+ba/qjGPVwC801gJvruehgbOCRxh9DTRp7sH5K -BmcddhULhKBEQjWUmYNEM3A2axpdi3g1aYKERRLn8J0DXcItTwbxuxbNcs3erl8W -3yyv/JKmqnWF5sNyX3wEWuQcDEZZy+W7Hn4KPMxyU+WA5el5nJ8kFlxhpInmajwu -8Ytn6IEyThyXutVomosVBuP16QORl2Nad0hnQO9toQKBgQDDgiehXr3k2wfVaVOD -PocW4leXausIU2XcCn6FxTG9vLUDMPANw0MxgenC2nrjaUU9J9UjdRYgMcFGWrl4 -E27wEn5e0nZ/Y7F2cfhuOc9vNmZ+eHm2KQRyfAjIVL5Hpldqk2jXyCnLBNeWGHSw -kPQMU+FLqmrOFUvXlD2my+OSHwKBgQDCmgS9r+xFh4BCB9dY6eyQJF/jYmAQHs26 -80WJ6gAhbUw1O71uDtS9/3PZVXwwNCOHrcc49BPrpJdxGPHGvd2Q5y+j5LDDbQSZ -aLTiCZ2B0RM5Bd2dXD8gEHN4WCX7pJ/o4kDi4zONBmp5mg/tFfer5z5IU/1P7Wak -1Mu0JIHzmQKBgDNaNoqeVgaMuYwGtFbez6DlJtiwzrdLIJAheYYte5k4vdruub8D -sNyKIRp7RJgDCJq9obBEiuE98GRIZDrz78nDMco6QcHIL87KtNRO/vtZMKa7gkyk -jXR8u9nS2H/9YyytN3amLsQSq4XTOqM+D7xFNAIp6w/ibB9d4quzFj1FAoGBAKTE -x/LcO897NWuzO/D6z+QUCGR87R15F3SNenmVedrTskz4ciH3yMW+v5ZrPSWLX/IH -f8GHWD6TM+780eoW5L1GIh5BCjHN4rEJ6O3iekxqfD4x6zzL2F8Lztk8uZxh/Uuw -FoSFHybvIcQoYAe8K+KPfzq6cqb0OY6i5n920dkxAoGAJkw6ADqsJfH3NR+bQfgF -oEA1KqriMxyEJm44Y7E80C+iF4iNALF+Er9TSnr4mDxX5e/dW9d1YeS9o0nOfkpF -MaBmJfxqo4QQJLPRaxYQ2Jhfn7irir4BroxeNXQgNNhgSuKIvkfRyGYwl7P0AT4v -8H8rkZGneMD3gLB5MfnRhGk= ------END PRIVATE KEY----- ------BEGIN CERTIFICATE----- -MIIEGzCCAwOgAwIBAgIUaoGlyuJAyvs6yowFXymfu7seEiUwDQYJKoZIhvcNAQEL -BQAwgZwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDbGlja0hvdXNlMRMwEQYDVQQH -DApDbGlja0hvdXNlMREwDwYDVQQKDAhQZXJzb25hbDETMBEGA1UECwwKQ2xpY2tI -b3VzZTEkMCIGCSqGSIb3DQEJARYVY2xpY2tob3VzZUBjbGlja2hvdXNlMRUwEwYD -VQQDDAxtb25nb19zZWN1cmUwHhcNMjQwNTI2MTYwMDMxWhcNMzQwNTI0MTYwMDMx -WjCBnDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNsaWNrSG91c2UxEzARBgNVBAcM -CkNsaWNrSG91c2UxETAPBgNVBAoMCFBlcnNvbmFsMRMwEQYDVQQLDApDbGlja0hv -dXNlMSQwIgYJKoZIhvcNAQkBFhVjbGlja2hvdXNlQGNsaWNraG91c2UxFTATBgNV -BAMMDG1vbmdvX3NlY3VyZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB -AJSeQfMG7xd0+kPehYlEsEw0Sm1DB05SXVCEzIX3DFD6XJrd8eeWwlzYaBatkcwj -+8yvqske02X/3YwpzJyFizAqJIoKql5c5Yii2xH1S9PFP0y+LoJre+eQziHyO33t -eeedeGNJ05Sm2ZAzjfMQ7Rdh6S+gdIO4Y102iQR5yr2aTrh7tu7XkNCjwKTqMMvz -SikP1Rft2J6ECim+MjYCCtH/4yXGeEJ5epU4t3y6Q23B2ZEhY+sqUdwgK9pu8oe4 -mkZ1Qvwakc9Qg12owRSDjBBYrPvghXVpkJ2JkgKTrIAIz9tZ53eDVHNXbWMAotov -jEmRSoGIS1yzwmQ9PdxUwYcCAwEAAaNTMFEwHQYDVR0OBBYEFJyz3Kt5XBDg5cvI -0v1ioqejqX+CMB8GA1UdIwQYMBaAFJyz3Kt5XBDg5cvI0v1ioqejqX+CMA8GA1Ud -EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAHAQFA5VMYvaQFnKtKfHg9TF -qfJ4uM3YsGdgsgmGWgflD1S4Z290H6Q2QvyZAEceTrlJxqArlWlVp5DAU6EeXjEh -QMAgdkJHF1Hg2jsZKPtdkb88UtuzwAME357T8NtEJSHzNE5QqYwlVM71JkWpdqvA -UUdOJbWhhJfowIf4tMmL1DUuIy2qYpoP/tEBXEw9uwpmZqb7KELwT3lRyOMaGFN7 -RHVwbvJWlHiu83QDNaWz6ijQkWl3tCN6TWcFD1qc1x8GpMzjbsAAYbCx7fbHM2LD -9kGSCiyv5K0MLNK5u67RtUFfPHtyD8RA0TtxIZ4PEN/eFANKS2/5NEi1ZuZ5/Pk= ------END CERTIFICATE----- diff --git a/tests/integration/test_storage_mongodb_legacy/mongo_secure_config/mongo_cert.pem b/tests/integration/test_storage_mongodb_legacy/mongo_secure_config/mongo_cert.pem deleted file mode 100644 index 9444d19a3d2..00000000000 --- a/tests/integration/test_storage_mongodb_legacy/mongo_secure_config/mongo_cert.pem +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCUnkHzBu8XdPpD -3oWJRLBMNEptQwdOUl1QhMyF9wxQ+lya3fHnlsJc2GgWrZHMI/vMr6rJHtNl/92M -KcychYswKiSKCqpeXOWIotsR9UvTxT9Mvi6Ca3vnkM4h8jt97XnnnXhjSdOUptmQ -M43zEO0XYekvoHSDuGNdNokEecq9mk64e7bu15DQo8Ck6jDL80opD9UX7diehAop -vjI2AgrR/+MlxnhCeXqVOLd8ukNtwdmRIWPrKlHcICvabvKHuJpGdUL8GpHPUINd -qMEUg4wQWKz74IV1aZCdiZICk6yACM/bWed3g1RzV21jAKLaL4xJkUqBiEtcs8Jk -PT3cVMGHAgMBAAECggEAAul6qiHchB+uQMCWyC5xTeRqAXR3tAv4Tj4fGJjkXY4Z -OrAjr9Kp38EvX1amgvUWV3FT3NMevDf5xd9OdzAA0g0uJIF+mAhYFW48i1FnQcHQ -mOf0zmiZR7l8o7ROb3JvooXHxW+ba/qjGPVwC801gJvruehgbOCRxh9DTRp7sH5K -BmcddhULhKBEQjWUmYNEM3A2axpdi3g1aYKERRLn8J0DXcItTwbxuxbNcs3erl8W -3yyv/JKmqnWF5sNyX3wEWuQcDEZZy+W7Hn4KPMxyU+WA5el5nJ8kFlxhpInmajwu -8Ytn6IEyThyXutVomosVBuP16QORl2Nad0hnQO9toQKBgQDDgiehXr3k2wfVaVOD -PocW4leXausIU2XcCn6FxTG9vLUDMPANw0MxgenC2nrjaUU9J9UjdRYgMcFGWrl4 -E27wEn5e0nZ/Y7F2cfhuOc9vNmZ+eHm2KQRyfAjIVL5Hpldqk2jXyCnLBNeWGHSw -kPQMU+FLqmrOFUvXlD2my+OSHwKBgQDCmgS9r+xFh4BCB9dY6eyQJF/jYmAQHs26 -80WJ6gAhbUw1O71uDtS9/3PZVXwwNCOHrcc49BPrpJdxGPHGvd2Q5y+j5LDDbQSZ -aLTiCZ2B0RM5Bd2dXD8gEHN4WCX7pJ/o4kDi4zONBmp5mg/tFfer5z5IU/1P7Wak -1Mu0JIHzmQKBgDNaNoqeVgaMuYwGtFbez6DlJtiwzrdLIJAheYYte5k4vdruub8D -sNyKIRp7RJgDCJq9obBEiuE98GRIZDrz78nDMco6QcHIL87KtNRO/vtZMKa7gkyk -jXR8u9nS2H/9YyytN3amLsQSq4XTOqM+D7xFNAIp6w/ibB9d4quzFj1FAoGBAKTE -x/LcO897NWuzO/D6z+QUCGR87R15F3SNenmVedrTskz4ciH3yMW+v5ZrPSWLX/IH -f8GHWD6TM+780eoW5L1GIh5BCjHN4rEJ6O3iekxqfD4x6zzL2F8Lztk8uZxh/Uuw -FoSFHybvIcQoYAe8K+KPfzq6cqb0OY6i5n920dkxAoGAJkw6ADqsJfH3NR+bQfgF -oEA1KqriMxyEJm44Y7E80C+iF4iNALF+Er9TSnr4mDxX5e/dW9d1YeS9o0nOfkpF -MaBmJfxqo4QQJLPRaxYQ2Jhfn7irir4BroxeNXQgNNhgSuKIvkfRyGYwl7P0AT4v -8H8rkZGneMD3gLB5MfnRhGk= ------END PRIVATE KEY----- ------BEGIN CERTIFICATE----- -MIIEGzCCAwOgAwIBAgIUaoGlyuJAyvs6yowFXymfu7seEiUwDQYJKoZIhvcNAQEL -BQAwgZwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDbGlja0hvdXNlMRMwEQYDVQQH -DApDbGlja0hvdXNlMREwDwYDVQQKDAhQZXJzb25hbDETMBEGA1UECwwKQ2xpY2tI -b3VzZTEkMCIGCSqGSIb3DQEJARYVY2xpY2tob3VzZUBjbGlja2hvdXNlMRUwEwYD -VQQDDAxtb25nb19zZWN1cmUwHhcNMjQwNTI2MTYwMDMxWhcNMzQwNTI0MTYwMDMx -WjCBnDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNsaWNrSG91c2UxEzARBgNVBAcM -CkNsaWNrSG91c2UxETAPBgNVBAoMCFBlcnNvbmFsMRMwEQYDVQQLDApDbGlja0hv -dXNlMSQwIgYJKoZIhvcNAQkBFhVjbGlja2hvdXNlQGNsaWNraG91c2UxFTATBgNV -BAMMDG1vbmdvX3NlY3VyZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB -AJSeQfMG7xd0+kPehYlEsEw0Sm1DB05SXVCEzIX3DFD6XJrd8eeWwlzYaBatkcwj -+8yvqske02X/3YwpzJyFizAqJIoKql5c5Yii2xH1S9PFP0y+LoJre+eQziHyO33t -eeedeGNJ05Sm2ZAzjfMQ7Rdh6S+gdIO4Y102iQR5yr2aTrh7tu7XkNCjwKTqMMvz -SikP1Rft2J6ECim+MjYCCtH/4yXGeEJ5epU4t3y6Q23B2ZEhY+sqUdwgK9pu8oe4 -mkZ1Qvwakc9Qg12owRSDjBBYrPvghXVpkJ2JkgKTrIAIz9tZ53eDVHNXbWMAotov -jEmRSoGIS1yzwmQ9PdxUwYcCAwEAAaNTMFEwHQYDVR0OBBYEFJyz3Kt5XBDg5cvI -0v1ioqejqX+CMB8GA1UdIwQYMBaAFJyz3Kt5XBDg5cvI0v1ioqejqX+CMA8GA1Ud -EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAHAQFA5VMYvaQFnKtKfHg9TF -qfJ4uM3YsGdgsgmGWgflD1S4Z290H6Q2QvyZAEceTrlJxqArlWlVp5DAU6EeXjEh -QMAgdkJHF1Hg2jsZKPtdkb88UtuzwAME357T8NtEJSHzNE5QqYwlVM71JkWpdqvA -UUdOJbWhhJfowIf4tMmL1DUuIy2qYpoP/tEBXEw9uwpmZqb7KELwT3lRyOMaGFN7 -RHVwbvJWlHiu83QDNaWz6ijQkWl3tCN6TWcFD1qc1x8GpMzjbsAAYbCx7fbHM2LD -9kGSCiyv5K0MLNK5u67RtUFfPHtyD8RA0TtxIZ4PEN/eFANKS2/5NEi1ZuZ5/Pk= ------END CERTIFICATE----- diff --git a/tests/integration/test_storage_mongodb_legacy/mongo_secure_config/mongo_secure.conf b/tests/integration/test_storage_mongodb_legacy/mongo_secure_config/mongo_secure.conf deleted file mode 100644 index 42d9853c6eb..00000000000 --- a/tests/integration/test_storage_mongodb_legacy/mongo_secure_config/mongo_secure.conf +++ /dev/null @@ -1,6 +0,0 @@ -net: - ssl: - mode: requireSSL - PEMKeyFile: /mongo/key.pem - CAFile: /mongo/cert.crt - allowConnectionsWithoutCertificates: true diff --git a/tests/integration/test_storage_mongodb_legacy/test.py b/tests/integration/test_storage_mongodb_legacy/test.py deleted file mode 100644 index c6e1c22379d..00000000000 --- a/tests/integration/test_storage_mongodb_legacy/test.py +++ /dev/null @@ -1,509 +0,0 @@ -import pymongo -from uuid import UUID - -import pytest -from helpers.client import QueryRuntimeException - -from helpers.cluster import ClickHouseCluster -import datetime - - -@pytest.fixture(scope="module") -def started_cluster(request): - try: - cluster = ClickHouseCluster(__file__) - node = cluster.add_instance( - "node", - main_configs=[ - "mongo_secure_config/config.d/ssl_conf.xml", - "configs/named_collections.xml", - "configs/feature_flag.xml", - ], - user_configs=["configs/users.xml"], - with_mongo=True, - ) - cluster.start() - yield cluster - finally: - cluster.shutdown() - - -def get_mongo_connection(started_cluster, secure=False, with_credentials=True): - connection_str = "" - if with_credentials: - connection_str = "mongodb://root:clickhouse@localhost:{}".format( - started_cluster.mongo_secure_port if secure else started_cluster.mongo_port - ) - else: - connection_str = "mongodb://localhost:{}".format( - started_cluster.mongo_no_cred_port - ) - if secure: - connection_str += "/?tls=true&tlsAllowInvalidCertificates=true" - return pymongo.MongoClient(connection_str) - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_uuid(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - mongo_table = db["uuid_table"] - mongo_table.insert({"key": 0, "data": UUID("f0e77736-91d1-48ce-8f01-15123ca1c7ed")}) - - node = started_cluster.instances["node"] - node.query( - "CREATE TABLE uuid_mongo_table(key UInt64, data UUID) ENGINE = MongoDB('mongo1:27017', 'test', 'uuid_table', 'root', 'clickhouse')" - ) - - assert node.query("SELECT COUNT() FROM uuid_mongo_table") == "1\n" - assert ( - node.query("SELECT data from uuid_mongo_table where key = 0") - == "f0e77736-91d1-48ce-8f01-15123ca1c7ed\n" - ) - node.query("DROP TABLE uuid_mongo_table") - mongo_table.drop() - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_simple_select(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - simple_mongo_table = db["simple_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - node.query( - "CREATE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse')" - ) - - assert node.query("SELECT COUNT() FROM simple_mongo_table") == "100\n" - assert ( - node.query("SELECT sum(key) FROM simple_mongo_table") - == str(sum(range(0, 100))) + "\n" - ) - - assert ( - node.query("SELECT data from simple_mongo_table where key = 42") - == hex(42 * 42) + "\n" - ) - node.query("DROP TABLE simple_mongo_table") - simple_mongo_table.drop() - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_simple_select_from_view(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - simple_mongo_table = db["simple_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) - simple_mongo_table_view = db.create_collection( - "simple_table_view", viewOn="simple_table" - ) - - node = started_cluster.instances["node"] - node.query( - "CREATE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table_view', 'root', 'clickhouse')" - ) - - assert node.query("SELECT COUNT() FROM simple_mongo_table") == "100\n" - assert ( - node.query("SELECT sum(key) FROM simple_mongo_table") - == str(sum(range(0, 100))) + "\n" - ) - - assert ( - node.query("SELECT data from simple_mongo_table where key = 42") - == hex(42 * 42) + "\n" - ) - node.query("DROP TABLE simple_mongo_table") - simple_mongo_table_view.drop() - simple_mongo_table.drop() - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_arrays(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - arrays_mongo_table = db["arrays_table"] - data = [] - for i in range(0, 100): - data.append( - { - "key": i, - "arr_int64": [-(i + 1), -(i + 2), -(i + 3)], - "arr_int32": [-(i + 1), -(i + 2), -(i + 3)], - "arr_int16": [-(i + 1), -(i + 2), -(i + 3)], - "arr_int8": [-(i + 1), -(i + 2), -(i + 3)], - "arr_uint64": [i + 1, i + 2, i + 3], - "arr_uint32": [i + 1, i + 2, i + 3], - "arr_uint16": [i + 1, i + 2, i + 3], - "arr_uint8": [i + 1, i + 2, i + 3], - "arr_float32": [i + 1.125, i + 2.5, i + 3.750], - "arr_float64": [i + 1.125, i + 2.5, i + 3.750], - "arr_date": [ - datetime.datetime(2002, 10, 27), - datetime.datetime(2024, 1, 8), - ], - "arr_datetime": [ - datetime.datetime(2023, 3, 31, 6, 3, 12), - datetime.datetime(1999, 2, 28, 12, 46, 34), - ], - "arr_string": [str(i + 1), str(i + 2), str(i + 3)], - "arr_uuid": [ - "f0e77736-91d1-48ce-8f01-15123ca1c7ed", - "93376a07-c044-4281-a76e-ad27cf6973c5", - ], - "arr_mongo_uuid": [ - UUID("f0e77736-91d1-48ce-8f01-15123ca1c7ed"), - UUID("93376a07-c044-4281-a76e-ad27cf6973c5"), - ], - "arr_arr_bool": [ - [True, False, True], - [True], - [], - None, - [False], - [None], - ], - "arr_empty": [], - "arr_null": None, - "arr_nullable": None, - } - ) - - arrays_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - node.query( - "CREATE TABLE arrays_mongo_table(" - "key UInt64," - "arr_int64 Array(Int64)," - "arr_int32 Array(Int32)," - "arr_int16 Array(Int16)," - "arr_int8 Array(Int8)," - "arr_uint64 Array(UInt64)," - "arr_uint32 Array(UInt32)," - "arr_uint16 Array(UInt16)," - "arr_uint8 Array(UInt8)," - "arr_float32 Array(Float32)," - "arr_float64 Array(Float64)," - "arr_date Array(Date)," - "arr_datetime Array(DateTime)," - "arr_string Array(String)," - "arr_uuid Array(UUID)," - "arr_mongo_uuid Array(UUID)," - "arr_arr_bool Array(Array(Bool))," - "arr_empty Array(UInt64)," - "arr_null Array(UInt64)," - "arr_arr_null Array(Array(UInt64))," - "arr_nullable Array(Nullable(UInt64))" - ") ENGINE = MongoDB('mongo1:27017', 'test', 'arrays_table', 'root', 'clickhouse')" - ) - - assert node.query("SELECT COUNT() FROM arrays_mongo_table") == "100\n" - - for column_name in ["arr_int64", "arr_int32", "arr_int16", "arr_int8"]: - assert ( - node.query(f"SELECT {column_name} FROM arrays_mongo_table WHERE key = 42") - == "[-43,-44,-45]\n" - ) - - for column_name in ["arr_uint64", "arr_uint32", "arr_uint16", "arr_uint8"]: - assert ( - node.query(f"SELECT {column_name} FROM arrays_mongo_table WHERE key = 42") - == "[43,44,45]\n" - ) - - for column_name in ["arr_float32", "arr_float64"]: - assert ( - node.query(f"SELECT {column_name} FROM arrays_mongo_table WHERE key = 42") - == "[43.125,44.5,45.75]\n" - ) - - assert ( - node.query(f"SELECT arr_date FROM arrays_mongo_table WHERE key = 42") - == "['2002-10-27','2024-01-08']\n" - ) - - assert ( - node.query(f"SELECT arr_datetime FROM arrays_mongo_table WHERE key = 42") - == "['2023-03-31 06:03:12','1999-02-28 12:46:34']\n" - ) - - assert ( - node.query(f"SELECT arr_string FROM arrays_mongo_table WHERE key = 42") - == "['43','44','45']\n" - ) - - assert ( - node.query(f"SELECT arr_uuid FROM arrays_mongo_table WHERE key = 42") - == "['f0e77736-91d1-48ce-8f01-15123ca1c7ed','93376a07-c044-4281-a76e-ad27cf6973c5']\n" - ) - - assert ( - node.query(f"SELECT arr_mongo_uuid FROM arrays_mongo_table WHERE key = 42") - == "['f0e77736-91d1-48ce-8f01-15123ca1c7ed','93376a07-c044-4281-a76e-ad27cf6973c5']\n" - ) - - assert ( - node.query(f"SELECT arr_arr_bool FROM arrays_mongo_table WHERE key = 42") - == "[[true,false,true],[true],[],[],[false],[false]]\n" - ) - - assert ( - node.query(f"SELECT arr_empty FROM arrays_mongo_table WHERE key = 42") == "[]\n" - ) - - assert ( - node.query(f"SELECT arr_null FROM arrays_mongo_table WHERE key = 42") == "[]\n" - ) - - assert ( - node.query(f"SELECT arr_arr_null FROM arrays_mongo_table WHERE key = 42") - == "[]\n" - ) - - assert ( - node.query(f"SELECT arr_nullable FROM arrays_mongo_table WHERE key = 42") - == "[]\n" - ) - - # Test INSERT SELECT - node.query("INSERT INTO arrays_mongo_table SELECT * FROM arrays_mongo_table") - - assert node.query("SELECT COUNT() FROM arrays_mongo_table") == "200\n" - assert node.query("SELECT COUNT(DISTINCT *) FROM arrays_mongo_table") == "100\n" - - node.query("DROP TABLE arrays_mongo_table") - arrays_mongo_table.drop() - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_complex_data_type(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - incomplete_mongo_table = db["complex_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i), "dict": {"a": i, "b": str(i)}}) - incomplete_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - node.query( - "CREATE TABLE incomplete_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'complex_table', 'root', 'clickhouse')" - ) - - assert node.query("SELECT COUNT() FROM incomplete_mongo_table") == "100\n" - assert ( - node.query("SELECT sum(key) FROM incomplete_mongo_table") - == str(sum(range(0, 100))) + "\n" - ) - - assert ( - node.query("SELECT data from incomplete_mongo_table where key = 42") - == hex(42 * 42) + "\n" - ) - node.query("DROP TABLE incomplete_mongo_table") - incomplete_mongo_table.drop() - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_incorrect_data_type(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - strange_mongo_table = db["strange_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i), "aaaa": "Hello"}) - strange_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - node.query( - "CREATE TABLE strange_mongo_table(key String, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'strange_table', 'root', 'clickhouse')" - ) - - with pytest.raises(QueryRuntimeException): - node.query("SELECT COUNT() FROM strange_mongo_table") - - with pytest.raises(QueryRuntimeException): - node.query("SELECT uniq(key) FROM strange_mongo_table") - - node.query( - "CREATE TABLE strange_mongo_table2(key UInt64, data String, bbbb String) ENGINE = MongoDB('mongo1:27017', 'test', 'strange_table', 'root', 'clickhouse')" - ) - - node.query("DROP TABLE strange_mongo_table") - node.query("DROP TABLE strange_mongo_table2") - strange_mongo_table.drop() - - -@pytest.mark.parametrize("started_cluster", [True], indirect=["started_cluster"]) -def test_secure_connection(started_cluster): - mongo_connection = get_mongo_connection(started_cluster, secure=True) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - simple_mongo_table = db["simple_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - node.query( - "CREATE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo_secure:27017', 'test', 'simple_table', 'root', 'clickhouse', 'ssl=true')" - ) - - assert node.query("SELECT COUNT() FROM simple_mongo_table") == "100\n" - assert ( - node.query("SELECT sum(key) FROM simple_mongo_table") - == str(sum(range(0, 100))) + "\n" - ) - - assert ( - node.query("SELECT data from simple_mongo_table where key = 42") - == hex(42 * 42) + "\n" - ) - node.query("DROP TABLE simple_mongo_table") - simple_mongo_table.drop() - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_predefined_connection_configuration(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - simple_mongo_table = db["simple_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - node.query("drop table if exists simple_mongo_table") - node.query( - "create table simple_mongo_table(key UInt64, data String) engine = MongoDB(mongo1)" - ) - assert node.query("SELECT count() FROM simple_mongo_table") == "100\n" - simple_mongo_table.drop() - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_no_credentials(started_cluster): - mongo_connection = get_mongo_connection(started_cluster, with_credentials=False) - db = mongo_connection["test"] - simple_mongo_table = db["simple_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - node.query( - f"create table simple_mongo_table_2(key UInt64, data String) engine = MongoDB('mongo_no_cred:27017', 'test', 'simple_table', '', '')" - ) - assert node.query("SELECT count() FROM simple_mongo_table_2") == "100\n" - simple_mongo_table.drop() - node.query("DROP TABLE IF EXISTS simple_mongo_table_2") - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_auth_source(started_cluster): - mongo_connection = get_mongo_connection(started_cluster, with_credentials=False) - admin_db = mongo_connection["admin"] - admin_db.add_user( - "root", - "clickhouse", - roles=[{"role": "userAdminAnyDatabase", "db": "admin"}, "readWriteAnyDatabase"], - ) - simple_mongo_table_admin = admin_db["simple_table"] - data = [] - for i in range(0, 50): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table_admin.insert_many(data) - - db = mongo_connection["test"] - simple_mongo_table = db["simple_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - node.query( - "create table simple_mongo_table_fail(key UInt64, data String) engine = MongoDB('mongo_no_cred:27017', 'test', 'simple_table', 'root', 'clickhouse')" - ) - node.query_and_get_error("SELECT count() FROM simple_mongo_table_fail") - node.query( - "create table simple_mongo_table_ok(key UInt64, data String) engine = MongoDB('mongo_no_cred:27017', 'test', 'simple_table', 'root', 'clickhouse', 'authSource=admin')" - ) - assert node.query("SELECT count() FROM simple_mongo_table_ok") == "100\n" - simple_mongo_table.drop() - simple_mongo_table_admin.drop() - node.query("DROP TABLE IF EXISTS simple_mongo_table_ok") - node.query("DROP TABLE IF EXISTS simple_mongo_table_fail") - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_missing_columns(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - simple_mongo_table = db["simple_table"] - data = [] - for i in range(0, 10): - data.append({"key": i, "data": hex(i * i)}) - for i in range(0, 10): - data.append({"key": i}) - simple_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - node.query("drop table if exists simple_mongo_table") - node.query( - "create table simple_mongo_table(key UInt64, data Nullable(String)) engine = MongoDB(mongo1)" - ) - result = node.query("SELECT count() FROM simple_mongo_table WHERE isNull(data)") - assert result == "10\n" - simple_mongo_table.drop() - node.query("DROP TABLE IF EXISTS simple_mongo_table") - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_simple_insert_select(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - simple_mongo_table = db["simple_table"] - - node = started_cluster.instances["node"] - node.query("DROP TABLE IF EXISTS simple_mongo_table") - node.query( - "CREATE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse')" - ) - node.query( - "INSERT INTO simple_mongo_table SELECT number, 'kek' || toString(number) FROM numbers(10)" - ) - - assert ( - node.query("SELECT data from simple_mongo_table where key = 7").strip() - == "kek7" - ) - node.query("INSERT INTO simple_mongo_table(key) SELECT 12") - assert int(node.query("SELECT count() from simple_mongo_table")) == 11 - assert ( - node.query("SELECT data from simple_mongo_table where key = 12").strip() == "" - ) - - node.query("DROP TABLE simple_mongo_table") - simple_mongo_table.drop() diff --git a/tests/integration/test_table_function_mongodb/configs/feature_flag.xml b/tests/integration/test_table_function_mongodb/configs/feature_flag.xml deleted file mode 100644 index eb2d328060a..00000000000 --- a/tests/integration/test_table_function_mongodb/configs/feature_flag.xml +++ /dev/null @@ -1,3 +0,0 @@ - - 0 - diff --git a/tests/integration/test_table_function_mongodb/configs/named_collections.xml b/tests/integration/test_table_function_mongodb/configs/named_collections.xml deleted file mode 100644 index 1079ab996d4..00000000000 --- a/tests/integration/test_table_function_mongodb/configs/named_collections.xml +++ /dev/null @@ -1,16 +0,0 @@ - - - - root - clickhouse - mongo1 - 27017 - test - simple_table - - - mongodb://root:clickhouse@mongo1:27017/test - simple_table_uri - - - diff --git a/tests/integration/test_storage_mongodb_legacy/mongo_secure_config/config.d/ssl_conf.xml b/tests/integration/test_table_function_mongodb/configs_secure/config.d/ssl_conf.xml similarity index 100% rename from tests/integration/test_storage_mongodb_legacy/mongo_secure_config/config.d/ssl_conf.xml rename to tests/integration/test_table_function_mongodb/configs_secure/config.d/ssl_conf.xml diff --git a/tests/integration/test_table_function_mongodb/mongo_secure_config/cert.crt b/tests/integration/test_table_function_mongodb/mongo_secure_config/cert.crt deleted file mode 100644 index 94249109d41..00000000000 --- a/tests/integration/test_table_function_mongodb/mongo_secure_config/cert.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEGzCCAwOgAwIBAgIUaoGlyuJAyvs6yowFXymfu7seEiUwDQYJKoZIhvcNAQEL -BQAwgZwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDbGlja0hvdXNlMRMwEQYDVQQH -DApDbGlja0hvdXNlMREwDwYDVQQKDAhQZXJzb25hbDETMBEGA1UECwwKQ2xpY2tI -b3VzZTEkMCIGCSqGSIb3DQEJARYVY2xpY2tob3VzZUBjbGlja2hvdXNlMRUwEwYD -VQQDDAxtb25nb19zZWN1cmUwHhcNMjQwNTI2MTYwMDMxWhcNMzQwNTI0MTYwMDMx -WjCBnDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNsaWNrSG91c2UxEzARBgNVBAcM -CkNsaWNrSG91c2UxETAPBgNVBAoMCFBlcnNvbmFsMRMwEQYDVQQLDApDbGlja0hv -dXNlMSQwIgYJKoZIhvcNAQkBFhVjbGlja2hvdXNlQGNsaWNraG91c2UxFTATBgNV -BAMMDG1vbmdvX3NlY3VyZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB -AJSeQfMG7xd0+kPehYlEsEw0Sm1DB05SXVCEzIX3DFD6XJrd8eeWwlzYaBatkcwj -+8yvqske02X/3YwpzJyFizAqJIoKql5c5Yii2xH1S9PFP0y+LoJre+eQziHyO33t -eeedeGNJ05Sm2ZAzjfMQ7Rdh6S+gdIO4Y102iQR5yr2aTrh7tu7XkNCjwKTqMMvz -SikP1Rft2J6ECim+MjYCCtH/4yXGeEJ5epU4t3y6Q23B2ZEhY+sqUdwgK9pu8oe4 -mkZ1Qvwakc9Qg12owRSDjBBYrPvghXVpkJ2JkgKTrIAIz9tZ53eDVHNXbWMAotov -jEmRSoGIS1yzwmQ9PdxUwYcCAwEAAaNTMFEwHQYDVR0OBBYEFJyz3Kt5XBDg5cvI -0v1ioqejqX+CMB8GA1UdIwQYMBaAFJyz3Kt5XBDg5cvI0v1ioqejqX+CMA8GA1Ud -EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAHAQFA5VMYvaQFnKtKfHg9TF -qfJ4uM3YsGdgsgmGWgflD1S4Z290H6Q2QvyZAEceTrlJxqArlWlVp5DAU6EeXjEh -QMAgdkJHF1Hg2jsZKPtdkb88UtuzwAME357T8NtEJSHzNE5QqYwlVM71JkWpdqvA -UUdOJbWhhJfowIf4tMmL1DUuIy2qYpoP/tEBXEw9uwpmZqb7KELwT3lRyOMaGFN7 -RHVwbvJWlHiu83QDNaWz6ijQkWl3tCN6TWcFD1qc1x8GpMzjbsAAYbCx7fbHM2LD -9kGSCiyv5K0MLNK5u67RtUFfPHtyD8RA0TtxIZ4PEN/eFANKS2/5NEi1ZuZ5/Pk= ------END CERTIFICATE----- diff --git a/tests/integration/test_table_function_mongodb/mongo_secure_config/key.pem b/tests/integration/test_table_function_mongodb/mongo_secure_config/key.pem deleted file mode 100644 index 9444d19a3d2..00000000000 --- a/tests/integration/test_table_function_mongodb/mongo_secure_config/key.pem +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCUnkHzBu8XdPpD -3oWJRLBMNEptQwdOUl1QhMyF9wxQ+lya3fHnlsJc2GgWrZHMI/vMr6rJHtNl/92M -KcychYswKiSKCqpeXOWIotsR9UvTxT9Mvi6Ca3vnkM4h8jt97XnnnXhjSdOUptmQ -M43zEO0XYekvoHSDuGNdNokEecq9mk64e7bu15DQo8Ck6jDL80opD9UX7diehAop -vjI2AgrR/+MlxnhCeXqVOLd8ukNtwdmRIWPrKlHcICvabvKHuJpGdUL8GpHPUINd -qMEUg4wQWKz74IV1aZCdiZICk6yACM/bWed3g1RzV21jAKLaL4xJkUqBiEtcs8Jk -PT3cVMGHAgMBAAECggEAAul6qiHchB+uQMCWyC5xTeRqAXR3tAv4Tj4fGJjkXY4Z -OrAjr9Kp38EvX1amgvUWV3FT3NMevDf5xd9OdzAA0g0uJIF+mAhYFW48i1FnQcHQ -mOf0zmiZR7l8o7ROb3JvooXHxW+ba/qjGPVwC801gJvruehgbOCRxh9DTRp7sH5K -BmcddhULhKBEQjWUmYNEM3A2axpdi3g1aYKERRLn8J0DXcItTwbxuxbNcs3erl8W -3yyv/JKmqnWF5sNyX3wEWuQcDEZZy+W7Hn4KPMxyU+WA5el5nJ8kFlxhpInmajwu -8Ytn6IEyThyXutVomosVBuP16QORl2Nad0hnQO9toQKBgQDDgiehXr3k2wfVaVOD -PocW4leXausIU2XcCn6FxTG9vLUDMPANw0MxgenC2nrjaUU9J9UjdRYgMcFGWrl4 -E27wEn5e0nZ/Y7F2cfhuOc9vNmZ+eHm2KQRyfAjIVL5Hpldqk2jXyCnLBNeWGHSw -kPQMU+FLqmrOFUvXlD2my+OSHwKBgQDCmgS9r+xFh4BCB9dY6eyQJF/jYmAQHs26 -80WJ6gAhbUw1O71uDtS9/3PZVXwwNCOHrcc49BPrpJdxGPHGvd2Q5y+j5LDDbQSZ -aLTiCZ2B0RM5Bd2dXD8gEHN4WCX7pJ/o4kDi4zONBmp5mg/tFfer5z5IU/1P7Wak -1Mu0JIHzmQKBgDNaNoqeVgaMuYwGtFbez6DlJtiwzrdLIJAheYYte5k4vdruub8D -sNyKIRp7RJgDCJq9obBEiuE98GRIZDrz78nDMco6QcHIL87KtNRO/vtZMKa7gkyk -jXR8u9nS2H/9YyytN3amLsQSq4XTOqM+D7xFNAIp6w/ibB9d4quzFj1FAoGBAKTE -x/LcO897NWuzO/D6z+QUCGR87R15F3SNenmVedrTskz4ciH3yMW+v5ZrPSWLX/IH -f8GHWD6TM+780eoW5L1GIh5BCjHN4rEJ6O3iekxqfD4x6zzL2F8Lztk8uZxh/Uuw -FoSFHybvIcQoYAe8K+KPfzq6cqb0OY6i5n920dkxAoGAJkw6ADqsJfH3NR+bQfgF -oEA1KqriMxyEJm44Y7E80C+iF4iNALF+Er9TSnr4mDxX5e/dW9d1YeS9o0nOfkpF -MaBmJfxqo4QQJLPRaxYQ2Jhfn7irir4BroxeNXQgNNhgSuKIvkfRyGYwl7P0AT4v -8H8rkZGneMD3gLB5MfnRhGk= ------END PRIVATE KEY----- ------BEGIN CERTIFICATE----- -MIIEGzCCAwOgAwIBAgIUaoGlyuJAyvs6yowFXymfu7seEiUwDQYJKoZIhvcNAQEL -BQAwgZwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDbGlja0hvdXNlMRMwEQYDVQQH -DApDbGlja0hvdXNlMREwDwYDVQQKDAhQZXJzb25hbDETMBEGA1UECwwKQ2xpY2tI -b3VzZTEkMCIGCSqGSIb3DQEJARYVY2xpY2tob3VzZUBjbGlja2hvdXNlMRUwEwYD -VQQDDAxtb25nb19zZWN1cmUwHhcNMjQwNTI2MTYwMDMxWhcNMzQwNTI0MTYwMDMx -WjCBnDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNsaWNrSG91c2UxEzARBgNVBAcM -CkNsaWNrSG91c2UxETAPBgNVBAoMCFBlcnNvbmFsMRMwEQYDVQQLDApDbGlja0hv -dXNlMSQwIgYJKoZIhvcNAQkBFhVjbGlja2hvdXNlQGNsaWNraG91c2UxFTATBgNV -BAMMDG1vbmdvX3NlY3VyZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB -AJSeQfMG7xd0+kPehYlEsEw0Sm1DB05SXVCEzIX3DFD6XJrd8eeWwlzYaBatkcwj -+8yvqske02X/3YwpzJyFizAqJIoKql5c5Yii2xH1S9PFP0y+LoJre+eQziHyO33t -eeedeGNJ05Sm2ZAzjfMQ7Rdh6S+gdIO4Y102iQR5yr2aTrh7tu7XkNCjwKTqMMvz -SikP1Rft2J6ECim+MjYCCtH/4yXGeEJ5epU4t3y6Q23B2ZEhY+sqUdwgK9pu8oe4 -mkZ1Qvwakc9Qg12owRSDjBBYrPvghXVpkJ2JkgKTrIAIz9tZ53eDVHNXbWMAotov -jEmRSoGIS1yzwmQ9PdxUwYcCAwEAAaNTMFEwHQYDVR0OBBYEFJyz3Kt5XBDg5cvI -0v1ioqejqX+CMB8GA1UdIwQYMBaAFJyz3Kt5XBDg5cvI0v1ioqejqX+CMA8GA1Ud -EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAHAQFA5VMYvaQFnKtKfHg9TF -qfJ4uM3YsGdgsgmGWgflD1S4Z290H6Q2QvyZAEceTrlJxqArlWlVp5DAU6EeXjEh -QMAgdkJHF1Hg2jsZKPtdkb88UtuzwAME357T8NtEJSHzNE5QqYwlVM71JkWpdqvA -UUdOJbWhhJfowIf4tMmL1DUuIy2qYpoP/tEBXEw9uwpmZqb7KELwT3lRyOMaGFN7 -RHVwbvJWlHiu83QDNaWz6ijQkWl3tCN6TWcFD1qc1x8GpMzjbsAAYbCx7fbHM2LD -9kGSCiyv5K0MLNK5u67RtUFfPHtyD8RA0TtxIZ4PEN/eFANKS2/5NEi1ZuZ5/Pk= ------END CERTIFICATE----- diff --git a/tests/integration/test_table_function_mongodb/mongo_secure_config/mongo_secure.conf b/tests/integration/test_table_function_mongodb/mongo_secure_config/mongo_secure.conf deleted file mode 100644 index 42d9853c6eb..00000000000 --- a/tests/integration/test_table_function_mongodb/mongo_secure_config/mongo_secure.conf +++ /dev/null @@ -1,6 +0,0 @@ -net: - ssl: - mode: requireSSL - PEMKeyFile: /mongo/key.pem - CAFile: /mongo/cert.crt - allowConnectionsWithoutCertificates: true diff --git a/tests/integration/test_table_function_mongodb/test.py b/tests/integration/test_table_function_mongodb/test.py index 449e9d90672..3b6ace9d11b 100644 --- a/tests/integration/test_table_function_mongodb/test.py +++ b/tests/integration/test_table_function_mongodb/test.py @@ -10,14 +10,14 @@ from helpers.cluster import ClickHouseCluster def started_cluster(request): try: cluster = ClickHouseCluster(__file__) - cluster.add_instance( + node = cluster.add_instance( "node", with_mongo=True, main_configs=[ - "configs/named_collections.xml", - "configs/feature_flag.xml", + "configs_secure/config.d/ssl_conf.xml", ], user_configs=["configs/users.xml"], + with_mongo_secure=request.param, ) cluster.start() yield cluster @@ -26,33 +26,34 @@ def started_cluster(request): def get_mongo_connection(started_cluster, secure=False, with_credentials=True): - if secure: - return pymongo.MongoClient( - "mongodb://root:clickhouse@localhost:{}/?tls=true&tlsAllowInvalidCertificates=true&tlsAllowInvalidHostnames=true".format( - started_cluster.mongo_secure_port - ) - ) + connection_str = "" if with_credentials: - return pymongo.MongoClient( - "mongodb://root:clickhouse@localhost:{}".format(started_cluster.mongo_port) + connection_str = "mongodb://root:clickhouse@localhost:{}".format( + started_cluster.mongo_port ) - - return pymongo.MongoClient( - "mongodb://localhost:{}".format(started_cluster.mongo_no_cred_port) - ) + else: + connection_str = "mongodb://localhost:{}".format( + started_cluster.mongo_no_cred_port + ) + if secure: + connection_str += "/?tls=true&tlsAllowInvalidCertificates=true" + return pymongo.MongoClient(connection_str) +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) def test_simple_select(started_cluster): mongo_connection = get_mongo_connection(started_cluster) db = mongo_connection["test"] db.add_user("root", "clickhouse") simple_mongo_table = db["simple_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) node = started_cluster.instances["node"] + for i in range(0, 100): + node.query( + "INSERT INTO FUNCTION mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String') (key, data) VALUES ({}, '{}')".format( + i, hex(i * i) + ) + ) assert ( node.query( "SELECT COUNT() FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String')" @@ -74,52 +75,14 @@ def test_simple_select(started_cluster): assert ( node.query( - "SELECT data FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String') WHERE key = 42" - ) - == hex(42 * 42) + "\n" - ) - simple_mongo_table.drop() - - -def test_simple_select_uri(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - simple_mongo_table = db["simple_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - assert ( - node.query( - "SELECT COUNT() FROM mongodb('mongodb://root:clickhouse@mongo1:27017/test', 'simple_table', structure='key UInt64, data String')" - ) - == "100\n" - ) - assert ( - node.query( - "SELECT sum(key) FROM mongodb('mongodb://root:clickhouse@mongo1:27017/test', 'simple_table', structure='key UInt64, data String')" - ) - == str(sum(range(0, 100))) + "\n" - ) - assert ( - node.query( - "SELECT sum(key) FROM mongodb('mongodb://root:clickhouse@mongo1:27017/test', 'simple_table', 'key UInt64, data String')" - ) - == str(sum(range(0, 100))) + "\n" - ) - - assert ( - node.query( - "SELECT data FROM mongodb('mongodb://root:clickhouse@mongo1:27017/test', 'simple_table', structure='key UInt64, data String') WHERE key = 42" + "SELECT data from mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String') where key = 42" ) == hex(42 * 42) + "\n" ) simple_mongo_table.drop() +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) def test_complex_data_type(started_cluster): mongo_connection = get_mongo_connection(started_cluster) db = mongo_connection["test"] @@ -134,49 +97,27 @@ def test_complex_data_type(started_cluster): assert ( node.query( - """ - SELECT COUNT() - FROM mongodb('mongo1:27017', - 'test', - 'complex_table', - 'root', - 'clickhouse', - structure='key UInt64, data String, dict Map(UInt64, String)')""" + "SELECT COUNT() FROM mongodb('mongo1:27017', 'test', 'complex_table', 'root', 'clickhouse', structure='key UInt64, data String, dict Map(UInt64, String)')" ) == "100\n" ) assert ( node.query( - """ - SELECT sum(key) - FROM mongodb('mongo1:27017', - 'test', - 'complex_table', - 'root', - 'clickhouse', - structure='key UInt64, data String, dict Map(UInt64, String)')""" + "SELECT sum(key) FROM mongodb('mongo1:27017', 'test', 'complex_table', 'root', 'clickhouse', structure='key UInt64, data String, dict Map(UInt64, String)')" ) == str(sum(range(0, 100))) + "\n" ) assert ( node.query( - """ - SELECT data - FROM mongodb('mongo1:27017', - 'test', - 'complex_table', - 'root', - 'clickhouse', - structure='key UInt64, data String, dict Map(UInt64, String)') - WHERE key = 42 - """ + "SELECT data from mongodb('mongo1:27017', 'test', 'complex_table', 'root', 'clickhouse', structure='key UInt64, data String, dict Map(UInt64, String)') where key = 42" ) == hex(42 * 42) + "\n" ) incomplete_mongo_table.drop() +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) def test_incorrect_data_type(started_cluster): mongo_connection = get_mongo_connection(started_cluster) db = mongo_connection["test"] @@ -197,6 +138,7 @@ def test_incorrect_data_type(started_cluster): strange_mongo_table.drop() +@pytest.mark.parametrize("started_cluster", [True], indirect=["started_cluster"]) def test_secure_connection(started_cluster): mongo_connection = get_mongo_connection(started_cluster, secure=True) db = mongo_connection["test"] @@ -211,63 +153,35 @@ def test_secure_connection(started_cluster): assert ( node.query( - """SELECT COUNT() - FROM mongodb('mongo_secure:27017', - 'test', - 'simple_table', - 'root', - 'clickhouse', - structure='key UInt64, data String', - options='tls=true&tlsAllowInvalidCertificates=true&tlsAllowInvalidHostnames=true')""" + "SELECT COUNT() FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String', options='ssl=true')" ) == "100\n" ) assert ( node.query( - """SELECT sum(key) - FROM mongodb('mongo_secure:27017', - 'test', - 'simple_table', - 'root', - 'clickhouse', - structure='key UInt64, data String', - options='tls=true&tlsAllowInvalidCertificates=true&tlsAllowInvalidHostnames=true')""" + "SELECT sum(key) FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String', options='ssl=true')" ) == str(sum(range(0, 100))) + "\n" ) assert ( node.query( - """SELECT sum(key) - FROM mongodb('mongo_secure:27017', - 'test', - 'simple_table', - 'root', - 'clickhouse', - 'key UInt64, data String', - 'tls=true&tlsAllowInvalidCertificates=true&tlsAllowInvalidHostnames=true')""" + "SELECT sum(key) FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', 'key UInt64, data String', 'ssl=true')" ) == str(sum(range(0, 100))) + "\n" ) assert ( node.query( - """SELECT data - FROM mongodb('mongo_secure:27017', - 'test', - 'simple_table', - 'root', - 'clickhouse', - 'key UInt64, data String', - 'tls=true&tlsAllowInvalidCertificates=true&tlsAllowInvalidHostnames=true') - WHERE key = 42""" + "SELECT data from mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String', options='ssl=true') where key = 42" ) == hex(42 * 42) + "\n" ) simple_mongo_table.drop() -def test_secure_connection_with_validation(started_cluster): - mongo_connection = get_mongo_connection(started_cluster, secure=True) +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) +def test_predefined_connection_configuration(started_cluster): + mongo_connection = get_mongo_connection(started_cluster) db = mongo_connection["test"] db.add_user("root", "clickhouse") simple_mongo_table = db["simple_table"] @@ -277,73 +191,16 @@ def test_secure_connection_with_validation(started_cluster): simple_mongo_table.insert_many(data) node = started_cluster.instances["node"] - with pytest.raises(QueryRuntimeException): - node.query( - """SELECT COUNT() FROM mongodb('mongo_secure:27017', - 'test', - 'simple_table', - 'root', - 'clickhouse', - structure='key UInt64, data String', - options='tls=true')""" - ) - - simple_mongo_table.drop() - - -def test_secure_connection_uri(started_cluster): - mongo_connection = get_mongo_connection(started_cluster, secure=True) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - simple_mongo_table = db["simple_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - assert ( node.query( - """SELECT COUNT() - FROM mongodb('mongodb://root:clickhouse@mongo_secure:27017/test?tls=true&tlsAllowInvalidCertificates=true&tlsAllowInvalidHostnames=true', - 'simple_table', - 'key UInt64, data String')""" + "SELECT count() FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String')" ) == "100\n" ) - assert ( - node.query( - """SELECT sum(key) - FROM mongodb('mongodb://root:clickhouse@mongo_secure:27017/test?tls=true&tlsAllowInvalidCertificates=true&tlsAllowInvalidHostnames=true', - 'simple_table', - 'key UInt64, data String')""" - ) - == str(sum(range(0, 100))) + "\n" - ) - assert ( - node.query( - """SELECT sum(key) - FROM mongodb('mongodb://root:clickhouse@mongo_secure:27017/test?tls=true&tlsAllowInvalidCertificates=true&tlsAllowInvalidHostnames=true', - 'simple_table', - 'key UInt64, data String')""" - ) - == str(sum(range(0, 100))) + "\n" - ) - - assert ( - node.query( - """SELECT data - FROM mongodb('mongodb://root:clickhouse@mongo_secure:27017/test?tls=true&tlsAllowInvalidCertificates=true&tlsAllowInvalidHostnames=true', - 'simple_table', - 'key UInt64, data String') - WHERE key = 42""" - ) - == hex(42 * 42) + "\n" - ) simple_mongo_table.drop() +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) def test_no_credentials(started_cluster): mongo_connection = get_mongo_connection(started_cluster, with_credentials=False) db = mongo_connection["test"] @@ -356,13 +213,14 @@ def test_no_credentials(started_cluster): node = started_cluster.instances["node"] assert ( node.query( - "SELECT count() FROM mongodb('mongo_no_cred:27017', 'test', 'simple_table', '', '', structure='key UInt64, data String')" + "SELECT count() FROM mongodb('mongo2:27017', 'test', 'simple_table', '', '', structure='key UInt64, data String')" ) == "100\n" ) simple_mongo_table.drop() +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) def test_auth_source(started_cluster): mongo_connection = get_mongo_connection(started_cluster, with_credentials=False) admin_db = mongo_connection["admin"] @@ -384,21 +242,21 @@ def test_auth_source(started_cluster): simple_mongo_table.insert_many(data) node = started_cluster.instances["node"] - with pytest.raises(QueryRuntimeException): - node.query( - "SELECT count() FROM mongodb('mongo_no_cred:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String')" - ) + + node.query_and_get_error( + "SELECT count() FROM mongodb('mongo2:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String')" + ) assert ( node.query( - "SELECT count() FROM mongodb('mongo_no_cred:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String', options='authSource=admin')" + "SELECT count() FROM mongodb('mongo2:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String', options='authSource=admin')" ) == "100\n" ) - simple_mongo_table.drop() +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) def test_missing_columns(started_cluster): mongo_connection = get_mongo_connection(started_cluster) db = mongo_connection["test"] diff --git a/tests/integration/test_table_function_mongodb_legacy/__init__.py b/tests/integration/test_table_function_mongodb_legacy/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tests/integration/test_table_function_mongodb_legacy/configs/feature_flag.xml b/tests/integration/test_table_function_mongodb_legacy/configs/feature_flag.xml deleted file mode 100644 index 4ee05db9d1e..00000000000 --- a/tests/integration/test_table_function_mongodb_legacy/configs/feature_flag.xml +++ /dev/null @@ -1,3 +0,0 @@ - - 1 - diff --git a/tests/integration/test_table_function_mongodb_legacy/configs/users.xml b/tests/integration/test_table_function_mongodb_legacy/configs/users.xml deleted file mode 100644 index 4b6ba057ecb..00000000000 --- a/tests/integration/test_table_function_mongodb_legacy/configs/users.xml +++ /dev/null @@ -1,9 +0,0 @@ - - - - - default - 1 - - - diff --git a/tests/integration/test_table_function_mongodb_legacy/mongo_secure_config/cert.crt b/tests/integration/test_table_function_mongodb_legacy/mongo_secure_config/cert.crt deleted file mode 100644 index 94249109d41..00000000000 --- a/tests/integration/test_table_function_mongodb_legacy/mongo_secure_config/cert.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEGzCCAwOgAwIBAgIUaoGlyuJAyvs6yowFXymfu7seEiUwDQYJKoZIhvcNAQEL -BQAwgZwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDbGlja0hvdXNlMRMwEQYDVQQH -DApDbGlja0hvdXNlMREwDwYDVQQKDAhQZXJzb25hbDETMBEGA1UECwwKQ2xpY2tI -b3VzZTEkMCIGCSqGSIb3DQEJARYVY2xpY2tob3VzZUBjbGlja2hvdXNlMRUwEwYD -VQQDDAxtb25nb19zZWN1cmUwHhcNMjQwNTI2MTYwMDMxWhcNMzQwNTI0MTYwMDMx -WjCBnDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNsaWNrSG91c2UxEzARBgNVBAcM -CkNsaWNrSG91c2UxETAPBgNVBAoMCFBlcnNvbmFsMRMwEQYDVQQLDApDbGlja0hv -dXNlMSQwIgYJKoZIhvcNAQkBFhVjbGlja2hvdXNlQGNsaWNraG91c2UxFTATBgNV -BAMMDG1vbmdvX3NlY3VyZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB -AJSeQfMG7xd0+kPehYlEsEw0Sm1DB05SXVCEzIX3DFD6XJrd8eeWwlzYaBatkcwj -+8yvqske02X/3YwpzJyFizAqJIoKql5c5Yii2xH1S9PFP0y+LoJre+eQziHyO33t -eeedeGNJ05Sm2ZAzjfMQ7Rdh6S+gdIO4Y102iQR5yr2aTrh7tu7XkNCjwKTqMMvz -SikP1Rft2J6ECim+MjYCCtH/4yXGeEJ5epU4t3y6Q23B2ZEhY+sqUdwgK9pu8oe4 -mkZ1Qvwakc9Qg12owRSDjBBYrPvghXVpkJ2JkgKTrIAIz9tZ53eDVHNXbWMAotov -jEmRSoGIS1yzwmQ9PdxUwYcCAwEAAaNTMFEwHQYDVR0OBBYEFJyz3Kt5XBDg5cvI -0v1ioqejqX+CMB8GA1UdIwQYMBaAFJyz3Kt5XBDg5cvI0v1ioqejqX+CMA8GA1Ud -EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAHAQFA5VMYvaQFnKtKfHg9TF -qfJ4uM3YsGdgsgmGWgflD1S4Z290H6Q2QvyZAEceTrlJxqArlWlVp5DAU6EeXjEh -QMAgdkJHF1Hg2jsZKPtdkb88UtuzwAME357T8NtEJSHzNE5QqYwlVM71JkWpdqvA -UUdOJbWhhJfowIf4tMmL1DUuIy2qYpoP/tEBXEw9uwpmZqb7KELwT3lRyOMaGFN7 -RHVwbvJWlHiu83QDNaWz6ijQkWl3tCN6TWcFD1qc1x8GpMzjbsAAYbCx7fbHM2LD -9kGSCiyv5K0MLNK5u67RtUFfPHtyD8RA0TtxIZ4PEN/eFANKS2/5NEi1ZuZ5/Pk= ------END CERTIFICATE----- diff --git a/tests/integration/test_table_function_mongodb_legacy/mongo_secure_config/config.d/ssl_conf.xml b/tests/integration/test_table_function_mongodb_legacy/mongo_secure_config/config.d/ssl_conf.xml deleted file mode 100644 index 3efe98e7045..00000000000 --- a/tests/integration/test_table_function_mongodb_legacy/mongo_secure_config/config.d/ssl_conf.xml +++ /dev/null @@ -1,8 +0,0 @@ - - - - - none - - - diff --git a/tests/integration/test_table_function_mongodb_legacy/mongo_secure_config/key.pem b/tests/integration/test_table_function_mongodb_legacy/mongo_secure_config/key.pem deleted file mode 100644 index 9444d19a3d2..00000000000 --- a/tests/integration/test_table_function_mongodb_legacy/mongo_secure_config/key.pem +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCUnkHzBu8XdPpD -3oWJRLBMNEptQwdOUl1QhMyF9wxQ+lya3fHnlsJc2GgWrZHMI/vMr6rJHtNl/92M -KcychYswKiSKCqpeXOWIotsR9UvTxT9Mvi6Ca3vnkM4h8jt97XnnnXhjSdOUptmQ -M43zEO0XYekvoHSDuGNdNokEecq9mk64e7bu15DQo8Ck6jDL80opD9UX7diehAop -vjI2AgrR/+MlxnhCeXqVOLd8ukNtwdmRIWPrKlHcICvabvKHuJpGdUL8GpHPUINd -qMEUg4wQWKz74IV1aZCdiZICk6yACM/bWed3g1RzV21jAKLaL4xJkUqBiEtcs8Jk -PT3cVMGHAgMBAAECggEAAul6qiHchB+uQMCWyC5xTeRqAXR3tAv4Tj4fGJjkXY4Z -OrAjr9Kp38EvX1amgvUWV3FT3NMevDf5xd9OdzAA0g0uJIF+mAhYFW48i1FnQcHQ -mOf0zmiZR7l8o7ROb3JvooXHxW+ba/qjGPVwC801gJvruehgbOCRxh9DTRp7sH5K -BmcddhULhKBEQjWUmYNEM3A2axpdi3g1aYKERRLn8J0DXcItTwbxuxbNcs3erl8W -3yyv/JKmqnWF5sNyX3wEWuQcDEZZy+W7Hn4KPMxyU+WA5el5nJ8kFlxhpInmajwu -8Ytn6IEyThyXutVomosVBuP16QORl2Nad0hnQO9toQKBgQDDgiehXr3k2wfVaVOD -PocW4leXausIU2XcCn6FxTG9vLUDMPANw0MxgenC2nrjaUU9J9UjdRYgMcFGWrl4 -E27wEn5e0nZ/Y7F2cfhuOc9vNmZ+eHm2KQRyfAjIVL5Hpldqk2jXyCnLBNeWGHSw -kPQMU+FLqmrOFUvXlD2my+OSHwKBgQDCmgS9r+xFh4BCB9dY6eyQJF/jYmAQHs26 -80WJ6gAhbUw1O71uDtS9/3PZVXwwNCOHrcc49BPrpJdxGPHGvd2Q5y+j5LDDbQSZ -aLTiCZ2B0RM5Bd2dXD8gEHN4WCX7pJ/o4kDi4zONBmp5mg/tFfer5z5IU/1P7Wak -1Mu0JIHzmQKBgDNaNoqeVgaMuYwGtFbez6DlJtiwzrdLIJAheYYte5k4vdruub8D -sNyKIRp7RJgDCJq9obBEiuE98GRIZDrz78nDMco6QcHIL87KtNRO/vtZMKa7gkyk -jXR8u9nS2H/9YyytN3amLsQSq4XTOqM+D7xFNAIp6w/ibB9d4quzFj1FAoGBAKTE -x/LcO897NWuzO/D6z+QUCGR87R15F3SNenmVedrTskz4ciH3yMW+v5ZrPSWLX/IH -f8GHWD6TM+780eoW5L1GIh5BCjHN4rEJ6O3iekxqfD4x6zzL2F8Lztk8uZxh/Uuw -FoSFHybvIcQoYAe8K+KPfzq6cqb0OY6i5n920dkxAoGAJkw6ADqsJfH3NR+bQfgF -oEA1KqriMxyEJm44Y7E80C+iF4iNALF+Er9TSnr4mDxX5e/dW9d1YeS9o0nOfkpF -MaBmJfxqo4QQJLPRaxYQ2Jhfn7irir4BroxeNXQgNNhgSuKIvkfRyGYwl7P0AT4v -8H8rkZGneMD3gLB5MfnRhGk= ------END PRIVATE KEY----- ------BEGIN CERTIFICATE----- -MIIEGzCCAwOgAwIBAgIUaoGlyuJAyvs6yowFXymfu7seEiUwDQYJKoZIhvcNAQEL -BQAwgZwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDbGlja0hvdXNlMRMwEQYDVQQH -DApDbGlja0hvdXNlMREwDwYDVQQKDAhQZXJzb25hbDETMBEGA1UECwwKQ2xpY2tI -b3VzZTEkMCIGCSqGSIb3DQEJARYVY2xpY2tob3VzZUBjbGlja2hvdXNlMRUwEwYD -VQQDDAxtb25nb19zZWN1cmUwHhcNMjQwNTI2MTYwMDMxWhcNMzQwNTI0MTYwMDMx -WjCBnDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNsaWNrSG91c2UxEzARBgNVBAcM -CkNsaWNrSG91c2UxETAPBgNVBAoMCFBlcnNvbmFsMRMwEQYDVQQLDApDbGlja0hv -dXNlMSQwIgYJKoZIhvcNAQkBFhVjbGlja2hvdXNlQGNsaWNraG91c2UxFTATBgNV -BAMMDG1vbmdvX3NlY3VyZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB -AJSeQfMG7xd0+kPehYlEsEw0Sm1DB05SXVCEzIX3DFD6XJrd8eeWwlzYaBatkcwj -+8yvqske02X/3YwpzJyFizAqJIoKql5c5Yii2xH1S9PFP0y+LoJre+eQziHyO33t -eeedeGNJ05Sm2ZAzjfMQ7Rdh6S+gdIO4Y102iQR5yr2aTrh7tu7XkNCjwKTqMMvz -SikP1Rft2J6ECim+MjYCCtH/4yXGeEJ5epU4t3y6Q23B2ZEhY+sqUdwgK9pu8oe4 -mkZ1Qvwakc9Qg12owRSDjBBYrPvghXVpkJ2JkgKTrIAIz9tZ53eDVHNXbWMAotov -jEmRSoGIS1yzwmQ9PdxUwYcCAwEAAaNTMFEwHQYDVR0OBBYEFJyz3Kt5XBDg5cvI -0v1ioqejqX+CMB8GA1UdIwQYMBaAFJyz3Kt5XBDg5cvI0v1ioqejqX+CMA8GA1Ud -EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAHAQFA5VMYvaQFnKtKfHg9TF -qfJ4uM3YsGdgsgmGWgflD1S4Z290H6Q2QvyZAEceTrlJxqArlWlVp5DAU6EeXjEh -QMAgdkJHF1Hg2jsZKPtdkb88UtuzwAME357T8NtEJSHzNE5QqYwlVM71JkWpdqvA -UUdOJbWhhJfowIf4tMmL1DUuIy2qYpoP/tEBXEw9uwpmZqb7KELwT3lRyOMaGFN7 -RHVwbvJWlHiu83QDNaWz6ijQkWl3tCN6TWcFD1qc1x8GpMzjbsAAYbCx7fbHM2LD -9kGSCiyv5K0MLNK5u67RtUFfPHtyD8RA0TtxIZ4PEN/eFANKS2/5NEi1ZuZ5/Pk= ------END CERTIFICATE----- diff --git a/tests/integration/test_table_function_mongodb_legacy/mongo_secure_config/mongo_secure.conf b/tests/integration/test_table_function_mongodb_legacy/mongo_secure_config/mongo_secure.conf deleted file mode 100644 index 42d9853c6eb..00000000000 --- a/tests/integration/test_table_function_mongodb_legacy/mongo_secure_config/mongo_secure.conf +++ /dev/null @@ -1,6 +0,0 @@ -net: - ssl: - mode: requireSSL - PEMKeyFile: /mongo/key.pem - CAFile: /mongo/cert.crt - allowConnectionsWithoutCertificates: true diff --git a/tests/integration/test_table_function_mongodb_legacy/test.py b/tests/integration/test_table_function_mongodb_legacy/test.py deleted file mode 100644 index a3dcf84193e..00000000000 --- a/tests/integration/test_table_function_mongodb_legacy/test.py +++ /dev/null @@ -1,277 +0,0 @@ -import pymongo - -import pytest -from helpers.client import QueryRuntimeException - -from helpers.cluster import ClickHouseCluster - - -@pytest.fixture(scope="module") -def started_cluster(request): - try: - cluster = ClickHouseCluster(__file__) - node = cluster.add_instance( - "node", - with_mongo=True, - main_configs=[ - "mongo_secure_config/config.d/ssl_conf.xml", - "configs/feature_flag.xml", - ], - user_configs=["configs/users.xml"], - ) - cluster.start() - yield cluster - finally: - cluster.shutdown() - - -def get_mongo_connection(started_cluster, secure=False, with_credentials=True): - connection_str = "" - if with_credentials: - connection_str = "mongodb://root:clickhouse@localhost:{}".format( - started_cluster.mongo_secure_port if secure else started_cluster.mongo_port - ) - else: - connection_str = "mongodb://localhost:{}".format( - started_cluster.mongo_no_cred_port - ) - if secure: - connection_str += "/?tls=true&tlsAllowInvalidCertificates=true" - return pymongo.MongoClient(connection_str) - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_simple_select(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - simple_mongo_table = db["simple_table"] - - node = started_cluster.instances["node"] - for i in range(0, 100): - node.query( - "INSERT INTO FUNCTION mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String') (key, data) VALUES ({}, '{}')".format( - i, hex(i * i) - ) - ) - assert ( - node.query( - "SELECT COUNT() FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String')" - ) - == "100\n" - ) - assert ( - node.query( - "SELECT sum(key) FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String')" - ) - == str(sum(range(0, 100))) + "\n" - ) - assert ( - node.query( - "SELECT sum(key) FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', 'key UInt64, data String')" - ) - == str(sum(range(0, 100))) + "\n" - ) - - assert ( - node.query( - "SELECT data from mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String') where key = 42" - ) - == hex(42 * 42) + "\n" - ) - simple_mongo_table.drop() - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_complex_data_type(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - incomplete_mongo_table = db["complex_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i), "dict": {"a": i, "b": str(i)}}) - incomplete_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - - assert ( - node.query( - "SELECT COUNT() FROM mongodb('mongo1:27017', 'test', 'complex_table', 'root', 'clickhouse', structure='key UInt64, data String, dict Map(UInt64, String)')" - ) - == "100\n" - ) - assert ( - node.query( - "SELECT sum(key) FROM mongodb('mongo1:27017', 'test', 'complex_table', 'root', 'clickhouse', structure='key UInt64, data String, dict Map(UInt64, String)')" - ) - == str(sum(range(0, 100))) + "\n" - ) - - assert ( - node.query( - "SELECT data from mongodb('mongo1:27017', 'test', 'complex_table', 'root', 'clickhouse', structure='key UInt64, data String, dict Map(UInt64, String)') where key = 42" - ) - == hex(42 * 42) + "\n" - ) - incomplete_mongo_table.drop() - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_incorrect_data_type(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - strange_mongo_table = db["strange_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i), "aaaa": "Hello"}) - strange_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - - with pytest.raises(QueryRuntimeException): - node.query( - "SELECT aaaa FROM mongodb('mongo1:27017', 'test', 'strange_table', 'root', 'clickhouse', structure='key UInt64, data String')" - ) - - strange_mongo_table.drop() - - -@pytest.mark.parametrize("started_cluster", [True], indirect=["started_cluster"]) -def test_secure_connection(started_cluster): - mongo_connection = get_mongo_connection(started_cluster, secure=True) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - simple_mongo_table = db["simple_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - - assert ( - node.query( - "SELECT COUNT() FROM mongodb('mongo_secure:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String', options='ssl=true')" - ) - == "100\n" - ) - assert ( - node.query( - "SELECT sum(key) FROM mongodb('mongo_secure:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String', options='ssl=true')" - ) - == str(sum(range(0, 100))) + "\n" - ) - assert ( - node.query( - "SELECT sum(key) FROM mongodb('mongo_secure:27017', 'test', 'simple_table', 'root', 'clickhouse', 'key UInt64, data String', 'ssl=true')" - ) - == str(sum(range(0, 100))) + "\n" - ) - - assert ( - node.query( - "SELECT data from mongodb('mongo_secure:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String', options='ssl=true') where key = 42" - ) - == hex(42 * 42) + "\n" - ) - simple_mongo_table.drop() - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_predefined_connection_configuration(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - simple_mongo_table = db["simple_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - assert ( - node.query( - "SELECT count() FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String')" - ) - == "100\n" - ) - simple_mongo_table.drop() - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_no_credentials(started_cluster): - mongo_connection = get_mongo_connection(started_cluster, with_credentials=False) - db = mongo_connection["test"] - simple_mongo_table = db["simple_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - assert ( - node.query( - "SELECT count() FROM mongodb('mongo_no_cred:27017', 'test', 'simple_table', '', '', structure='key UInt64, data String')" - ) - == "100\n" - ) - simple_mongo_table.drop() - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_auth_source(started_cluster): - mongo_connection = get_mongo_connection(started_cluster, with_credentials=False) - admin_db = mongo_connection["admin"] - admin_db.add_user( - "root", - "clickhouse", - roles=[{"role": "userAdminAnyDatabase", "db": "admin"}, "readWriteAnyDatabase"], - ) - simple_mongo_table = admin_db["simple_table"] - data = [] - for i in range(0, 50): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) - db = mongo_connection["test"] - simple_mongo_table = db["simple_table"] - data = [] - for i in range(0, 100): - data.append({"key": i, "data": hex(i * i)}) - simple_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - - node.query_and_get_error( - "SELECT count() FROM mongodb('mongo_no_cred:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String')" - ) - - assert ( - node.query( - "SELECT count() FROM mongodb('mongo_no_cred:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data String', options='authSource=admin')" - ) - == "100\n" - ) - simple_mongo_table.drop() - - -@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) -def test_missing_columns(started_cluster): - mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection["test"] - db.add_user("root", "clickhouse") - simple_mongo_table = db["simple_table"] - data = [] - for i in range(0, 10): - data.append({"key": i, "data": hex(i * i)}) - for i in range(0, 10): - data.append({"key": i}) - simple_mongo_table.insert_many(data) - - node = started_cluster.instances["node"] - result = node.query( - "SELECT count() FROM mongodb('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', structure='key UInt64, data Nullable(String)') WHERE isNull(data)" - ) - assert result == "10\n" - simple_mongo_table.drop() diff --git a/tests/queries/0_stateless/02414_all_new_table_functions_must_be_documented.reference b/tests/queries/0_stateless/02414_all_new_table_functions_must_be_documented.reference index 7ec1e15415a..57ffeca9d43 100644 --- a/tests/queries/0_stateless/02414_all_new_table_functions_must_be_documented.reference +++ b/tests/queries/0_stateless/02414_all_new_table_functions_must_be_documented.reference @@ -9,6 +9,7 @@ generate_series input jdbc merge +mongodb null numbers numbers_mt diff --git a/tests/queries/0_stateless/02414_all_new_table_functions_must_be_documented.sql b/tests/queries/0_stateless/02414_all_new_table_functions_must_be_documented.sql index 5fc06e19110..ef339b760aa 100644 --- a/tests/queries/0_stateless/02414_all_new_table_functions_must_be_documented.sql +++ b/tests/queries/0_stateless/02414_all_new_table_functions_must_be_documented.sql @@ -2,6 +2,5 @@ -- Please help shorten this list down to zero elements. SELECT name FROM system.table_functions WHERE length(description) < 10 AND name NOT IN ( - 'cosn', 'oss', 'hdfs', 'hdfsCluster', 'hive', 'mysql', 'postgresql', 's3', 's3Cluster', 'sqlite', 'urlCluster', -- these functions are not enabled in fast test - 'mongodb' -- will be removed when `use_legacy_mongodb_integration` setting will be purged will with the old implementation + 'cosn', 'oss', 'hdfs', 'hdfsCluster', 'hive', 'mysql', 'postgresql', 's3', 's3Cluster', 'sqlite', 'urlCluster' -- these functions are not enabled in fast test ) ORDER BY name; diff --git a/tests/queries/0_stateless/02888_system_tables_with_inaccessible_table_function.sql b/tests/queries/0_stateless/02888_system_tables_with_inaccessible_table_function.sql index 1727ae3dc85..14768a95006 100644 --- a/tests/queries/0_stateless/02888_system_tables_with_inaccessible_table_function.sql +++ b/tests/queries/0_stateless/02888_system_tables_with_inaccessible_table_function.sql @@ -8,7 +8,7 @@ CREATE DATABASE {CLICKHOUSE_DATABASE:Identifier}; CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc01 (x int) AS postgresql('127.121.0.1:5432', 'postgres_db', 'postgres_table', 'postgres_user', '124444'); CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc02 (x int) AS mysql('127.123.0.1:3306', 'mysql_db', 'mysql_table', 'mysql_user','123123'); CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc03 (a int) AS sqlite('db_path', 'table_name'); -CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc04 (a int) AS mongodb('127.0.0.1:27017','test', 'my_collection', 'test_user', 'password', 'a Int'); +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc04 (a int) AS mongodb('127.0.0.1:27017','test', 'my_collection', 'test_user', 'password', 'a Int'); CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc05 (a int) AS redis('127.0.0.1:6379', 'key', 'key UInt32'); CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc06 (a int) AS s3('http://some_addr:9000/cloud-storage-01/data.tsv', 'M9O7o0SX5I4udXhWxI12', '9ijqzmVN83fzD9XDkEAAAAAAAA', 'TSV'); diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index daf55272fcb..6d8d8ddde3e 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -583,9 +583,6 @@ MinHash MinIO MinMax MindsDB -mongoc -mongocxx -Mongo Mongodb Monotonicity MsgPack @@ -2086,7 +2083,6 @@ minimalistic mininum miniselect minmap -mflix minmax mins misconfiguration diff --git a/utils/check-style/codespell-ignore-words.list b/utils/check-style/codespell-ignore-words.list index 9593f98ff5d..27e08de80ee 100644 --- a/utils/check-style/codespell-ignore-words.list +++ b/utils/check-style/codespell-ignore-words.list @@ -32,4 +32,3 @@ nam ubuntu toolchain vie -nin