diff --git a/CHANGELOG.md b/CHANGELOG.md index b5a9928e8ff..f59e58846d3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +# ClickHouse release 1.1.54381, 2018-05-14 + +## Bug fixes: +* Fixed a nodes leak in ZooKeeper when ClickHouse loses connection to ZooKeeper server. + # ClickHouse release 1.1.54380, 2018-04-21 ## New features: diff --git a/CHANGELOG_RU.md b/CHANGELOG_RU.md index d6b0c1e1ddb..c05d70b0eef 100644 --- a/CHANGELOG_RU.md +++ b/CHANGELOG_RU.md @@ -1,3 +1,8 @@ +# ClickHouse release 1.1.54381, 2018-05-14 + +## Исправление ошибок: +* Исправлена ошибка, приводящая к "утеканию" метаданных в ZooKeeper при потере соединения с сервером ZooKeeper. + # ClickHouse release 1.1.54380, 2018-04-21 ## Новые возможности: diff --git a/ci/README.md b/ci/README.md index 6eeb35c1c25..733cbce80c9 100644 --- a/ci/README.md +++ b/ci/README.md @@ -1,4 +1,4 @@ -### Build and test ClickHouse on various plaforms +## Build and test ClickHouse on various plaforms Quick and dirty scripts. @@ -13,17 +13,23 @@ Another example, check build on ARM 64: ./run-with-docker.sh multiarch/ubuntu-core:arm64-bionic jobs/quick-build/run.sh ``` -Look at `default_config` and `jobs/quick-build/config` +Another example, check build on FreeBSD: +``` +./prepare-vagrant-image-freebsd.sh +./run-with-vagrant.sh freebsd jobs/quick-build/run.sh +``` + +Look at `default_config` and `jobs/quick-build/run.sh` Various possible options. We are not going to automate testing all of them. -##### CPU architectures: +#### CPU architectures: - x86_64; - AArch64. x86_64 is the main CPU architecture. We also have minimal support for AArch64. -##### Operating systems: +#### Operating systems: - Linux; - FreeBSD. @@ -31,7 +37,7 @@ We also target Mac OS X, but it's more difficult to test. Linux is the main. FreeBSD is also supported as production OS. Mac OS is intended only for development and have minimal support: client should work, server should just start. -##### Linux distributions: +#### Linux distributions: For build: - Ubuntu Bionic; - Ubuntu Trusty. @@ -42,83 +48,83 @@ For run: We should support almost any Linux to run ClickHouse. That's why we test also on old distributions. -##### How to obtain sources: +#### How to obtain sources: - use sources from local working copy; - clone sources from github; - download source tarball. -##### Compilers: +#### Compilers: - gcc-7; - gcc-8; - clang-6; - clang-svn. -##### Compiler installation: +#### Compiler installation: - from OS packages; - build from sources. -##### C++ standard library implementation: +#### C++ standard library implementation: - libc++; - libstdc++ with C++11 ABI; - libstdc++ with old ABI. When building with clang, libc++ is used. When building with gcc, we choose libstdc++ with C++11 ABI. -##### Linkers: +#### Linkers: - ldd; - gold; When building with clang on x86_64, ldd is used. Otherwise we use gold. -##### Build types: +#### Build types: - RelWithDebInfo; - Debug; - ASan; - TSan. -##### Build types, extra: +#### Build types, extra: - -g0 for quick build; - enable test coverage; - debug tcmalloc. -##### What to build: +#### What to build: - only `clickhouse` target; - all targets; - debian packages; We also have intent to build RPM and simple tgz packages. -##### Where to get third-party libraries: +#### Where to get third-party libraries: - from contrib directory (submodules); - from OS packages. The only production option is to use libraries from contrib directory. Using libraries from OS packages is discouraged, but we also support this option. -##### Linkage types: +#### Linkage types: - static; - shared; Static linking is the only option for production usage. We also have support for shared linking, but it is indended only for developers. -##### Make tools: +#### Make tools: - make; - ninja. -##### Installation options: +#### Installation options: - run built `clickhouse` binary directly; - install from packages. -##### How to obtain packages: +#### How to obtain packages: - build them; - download from repository. -##### Sanity checks: +#### Sanity checks: - check that clickhouse binary has no dependencies on unexpected shared libraries; - check that source code have no style violations. -##### Tests: +#### Tests: - Functional tests; - Integration tests; - Unit tests; @@ -127,10 +133,10 @@ We also have support for shared linking, but it is indended only for developers. - Tests for external dictionaries (should be moved to integration tests); - Jepsen like tests for quorum inserts (not yet available in opensource). -##### Tests extra: +#### Tests extra: - Run functional tests with Valgrind. -##### Static analyzers: +#### Static analyzers: - CppCheck; - clang-tidy; - Coverity. diff --git a/ci/build-clang-from-sources.sh b/ci/build-clang-from-sources.sh index 64898c5fdc3..7e3793c8148 100755 --- a/ci/build-clang-from-sources.sh +++ b/ci/build-clang-from-sources.sh @@ -4,8 +4,8 @@ set -e -x source default-config # TODO Non debian systems -$SUDO apt-get install -y subversion -apt-cache search cmake3 | grep -P '^cmake3 ' && $SUDO apt-get -y install cmake3 || $SUDO apt-get -y install cmake +./install-os-packages.sh svn +./install-os-packages.sh cmake mkdir "${WORKSPACE}/llvm" diff --git a/ci/build-gcc-from-sources.sh b/ci/build-gcc-from-sources.sh index b41ac0365bd..0734b22335a 100755 --- a/ci/build-gcc-from-sources.sh +++ b/ci/build-gcc-from-sources.sh @@ -3,7 +3,7 @@ set -e -x source default-config -$SUDO apt-get install -y curl +./install-os-packages.sh curl if [[ "${GCC_SOURCES_VERSION}" == "latest" ]]; then GCC_SOURCES_VERSION=$(curl -sSL https://ftpmirror.gnu.org/gcc/ | grep -oE 'gcc-[0-9]+(\.[0-9]+)+' | sort -Vr | head -n1) diff --git a/ci/check-syntax.sh b/ci/check-syntax.sh index c5043ff512c..e95e38346d6 100755 --- a/ci/check-syntax.sh +++ b/ci/check-syntax.sh @@ -3,7 +3,7 @@ set -e -x source default-config -$SUDO apt-get install -y jq +./install-os-packages.sh jq [[ -d "${WORKSPACE}/sources" ]] || die "Run get-sources.sh first" diff --git a/ci/default-config b/ci/default-config index 7837b1fe57d..26e82ddcceb 100644 --- a/ci/default-config +++ b/ci/default-config @@ -9,7 +9,7 @@ SCRIPTPATH=$(pwd) WORKSPACE=${SCRIPTPATH}/workspace PROJECT_ROOT=$(cd $SCRIPTPATH/.. && pwd) -# All scripts take no arguments. All arguments must be in config. +# Almost all scripts take no arguments. Arguments should be in config. # get-sources SOURCES_METHOD=local # clone, local, tarball @@ -44,7 +44,7 @@ DOCKER_UBUNTU_TAG_ARCH=arm64 # How the architecture is named in Docker DOCKER_UBUNTU_QEMU_VER=v2.9.1 DOCKER_UBUNTU_REPO=multiarch/ubuntu-core -THREADS=$(grep -c ^processor /proc/cpuinfo || nproc || sysctl -a | grep -F 'hw.ncpu') +THREADS=$(grep -c ^processor /proc/cpuinfo || nproc || sysctl -a | grep -F 'hw.ncpu' | grep -oE '[0-9]+') # All scripts should return 0 in case of success, 1 in case of permanent error, # 2 in case of temporary error, any other code in case of permanent error. @@ -55,7 +55,7 @@ function die { [[ $EUID -ne 0 ]] && SUDO=sudo -command -v apt-get && $SUDO apt-get update +./install-os-packages.sh prepare # Configuration parameters may be overriden with CONFIG environment variable pointing to config file. [[ -n "$CONFIG" ]] && source $CONFIG diff --git a/ci/get-sources.sh b/ci/get-sources.sh index f09f8c3c812..ee57b0ec27d 100755 --- a/ci/get-sources.sh +++ b/ci/get-sources.sh @@ -4,12 +4,12 @@ set -e -x source default-config if [[ "$SOURCES_METHOD" == "clone" ]]; then - $SUDO apt-get install -y git + ./install-os-packages.sh git SOURCES_DIR="${WORKSPACE}/sources" mkdir -p "${SOURCES_DIR}" git clone --recursive --branch "$SOURCES_BRANCH" "$SOURCES_CLONE_URL" "${SOURCES_DIR}" pushd "${SOURCES_DIR}" - git checkout "$SOURCES_COMMIT" + git checkout --recurse-submodules "$SOURCES_COMMIT" popd elif [[ "$SOURCES_METHOD" == "local" ]]; then ln -f -s "${PROJECT_ROOT}" "${WORKSPACE}/sources" diff --git a/ci/install-compiler-from-packages.sh b/ci/install-compiler-from-packages.sh index c46f09219e7..53909435a06 100755 --- a/ci/install-compiler-from-packages.sh +++ b/ci/install-compiler-from-packages.sh @@ -3,27 +3,20 @@ set -e -x source default-config -# TODO Non debian systems # TODO Install from PPA on older Ubuntu -if [ -f '/etc/lsb-release' ]; then - source /etc/lsb-release - if [[ "$DISTRIB_ID" == "Ubuntu" ]]; then - if [[ "$COMPILER" == "gcc" ]]; then - $SUDO apt-get -y install gcc-${COMPILER_PACKAGE_VERSION} g++-${COMPILER_PACKAGE_VERSION} - export CC=gcc-${COMPILER_PACKAGE_VERSION} - export CXX=g++-${COMPILER_PACKAGE_VERSION} - elif [[ "$COMPILER" == "clang" ]]; then - [[ $(uname -m) == "x86_64" ]] && LLD="lld-${COMPILER_PACKAGE_VERSION}" - $SUDO apt-get -y install clang-${COMPILER_PACKAGE_VERSION} "$LLD" libc++-dev libc++abi-dev - export CC=clang-${COMPILER_PACKAGE_VERSION} - export CXX=clang++-${COMPILER_PACKAGE_VERSION} - else - die "Unknown compiler specified" - fi - else - die "Unknown Linux variant" +./install-os-packages.sh ${COMPILER}-${COMPILER_PACKAGE_VERSION} + +if [[ "$COMPILER" == "gcc" ]]; then + if command -v gcc-${COMPILER_PACKAGE_VERSION}; then export CC=gcc-${COMPILER_PACKAGE_VERSION} CXX=g++-${COMPILER_PACKAGE_VERSION}; + elif command -v gcc${COMPILER_PACKAGE_VERSION}; then export CC=gcc${COMPILER_PACKAGE_VERSION} CXX=g++${COMPILER_PACKAGE_VERSION}; + elif command -v gcc; then export CC=gcc CXX=g++; + fi +elif [[ "$COMPILER" == "clang" ]]; then + if command -v clang-${COMPILER_PACKAGE_VERSION}; then export CC=clang-${COMPILER_PACKAGE_VERSION} CXX=clang++-${COMPILER_PACKAGE_VERSION}; + elif command -v clang${COMPILER_PACKAGE_VERSION}; then export CC=clang${COMPILER_PACKAGE_VERSION} CXX=clang++${COMPILER_PACKAGE_VERSION}; + elif command -v clang; then export CC=clang CXX=clang++; fi else - die "Unknown OS" + die "Unknown compiler specified" fi diff --git a/ci/install-libraries.sh b/ci/install-libraries.sh index 7070083d57e..4868221b342 100755 --- a/ci/install-libraries.sh +++ b/ci/install-libraries.sh @@ -3,10 +3,12 @@ set -e -x source default-config -# TODO Non-debian systems - -$SUDO apt-get -y install libssl-dev libicu-dev libreadline-dev libmysqlclient-dev unixodbc-dev +./install-os-packages.sh libssl-dev +./install-os-packages.sh libicu-dev +./install-os-packages.sh libreadline-dev +./install-os-packages.sh libmariadbclient-dev +./install-os-packages.sh libunixodbc-dev if [[ "$ENABLE_EMBEDDED_COMPILER" == 1 && "$USE_LLVM_LIBRARIES_FROM_SYSTEM" == 1 ]]; then - $SUDO apt-get -y install liblld-5.0-dev libclang-5.0-dev + ./install-os-packages.sh llvm-libs-5.0 fi diff --git a/ci/install-os-packages.sh b/ci/install-os-packages.sh new file mode 100755 index 00000000000..e3e7e88044a --- /dev/null +++ b/ci/install-os-packages.sh @@ -0,0 +1,120 @@ +#!/usr/bin/env bash +set -e -x + +# Dispatches package installation on various OS and distributives + +WHAT=$1 + +[[ $EUID -ne 0 ]] && SUDO=sudo + +command -v apt-get && PACKAGE_MANAGER=apt +command -v yum && PACKAGE_MANAGER=yum +command -v pkg && PACKAGE_MANAGER=pkg + + +case $PACKAGE_MANAGER in + apt) + case $WHAT in + prepare) + $SUDO apt-get update + ;; + svn) + $SUDO apt-get install -y subversion + ;; + gcc*) + $SUDO apt-get install -y $WHAT ${WHAT/cc/++} + ;; + clang*) + $SUDO apt-get install -y $WHAT libc++-dev libc++abi-dev + [[ $(uname -m) == "x86_64" ]] && $SUDO apt-get install -y ${WHAT/clang/lld} || true + ;; + git) + $SUDO apt-get install -y git + ;; + cmake) + $SUDO apt-get install -y cmake3 || $SUDO apt-get install -y cmake + ;; + curl) + $SUDO apt-get install -y curl + ;; + jq) + $SUDO apt-get install -y jq + ;; + libssl-dev) + $SUDO apt-get install -y libssl-dev + ;; + libicu-dev) + $SUDO apt-get install -y libicu-dev + ;; + libreadline-dev) + $SUDO apt-get install -y libreadline-dev + ;; + libunixodbc-dev) + $SUDO apt-get install -y unixodbc-dev + ;; + libmariadbclient-dev) + $SUDO apt-get install -y libmariadbclient-dev + ;; + llvm-libs*) + $SUDO apt-get install -y ${WHAT/llvm-libs/liblld}-dev ${WHAT/llvm-libs/libclang}-dev + ;; + qemu-user-static) + $SUDO apt-get install -y qemu-user-static + ;; + vagrant-virtualbox) + $SUDO apt-get install -y vagrant virtualbox + ;; + *) + echo "Unknown package"; exit 1; + ;; + esac + ;; + pkg) + case $WHAT in + prepare) + ;; + svn) + $SUDO pkg install -y subversion + ;; + gcc*) + $SUDO pkg install -y ${WHAT/-/} + ;; + clang*) + $SUDO pkg install -y clang-devel + ;; + git) + $SUDO pkg install -y git + ;; + cmake) + $SUDO pkg install -y cmake + ;; + curl) + $SUDO pkg install -y curl + ;; + jq) + $SUDO pkg install -y jq + ;; + libssl-dev) + $SUDO pkg install -y openssl + ;; + libicu-dev) + $SUDO pkg install -y icu + ;; + libreadline-dev) + $SUDO pkg install -y readline + ;; + libunixodbc-dev) + $SUDO pkg install -y unixODBC libltdl + ;; + libmariadbclient-dev) + $SUDO pkg install -y mariadb102-client + ;; + *) + echo "Unknown package"; exit 1; + ;; + esac + ;; + *) + echo "Unknown distributive"; exit 1; + ;; +esac diff --git a/ci/jobs/quick-build/README.md b/ci/jobs/quick-build/README.md new file mode 100644 index 00000000000..803acae0f93 --- /dev/null +++ b/ci/jobs/quick-build/README.md @@ -0,0 +1,5 @@ +## Build with debug mode and without many libraries + +This job is intended as first check that build is not broken on wide variety of platforms. + +Results of this build are not intended for production usage. diff --git a/ci/jobs/quick-build/config b/ci/jobs/quick-build/config deleted file mode 100644 index c45d9690c7a..00000000000 --- a/ci/jobs/quick-build/config +++ /dev/null @@ -1,12 +0,0 @@ -SOURCES_METHOD=local -COMPILER=clang -COMPILER_INSTALL_METHOD=packages -COMPILER_PACKAGE_VERSION=6.0 -USE_LLVM_LIBRARIES_FROM_SYSTEM=0 -BUILD_METHOD=normal -BUILD_TARGETS=clickhouse -BUILD_TYPE=Debug -ENABLE_EMBEDDED_COMPILER=0 -CMAKE_FLAGS="-D CMAKE_C_FLAGS_ADD=-g0 -D CMAKE_CXX_FLAGS_ADD=-g0 -D ENABLE_TCMALLOC=0 -D ENABLE_CAPNP=0 -D ENABLE_RDKAFKA=0 -D ENABLE_UNWIND=0 -D ENABLE_ICU=0" - -# TODO it doesn't build with -D ENABLE_NETSSL=0 -D ENABLE_MONGODB=0 -D ENABLE_MYSQL=0 -D ENABLE_DATA_ODBC=0 diff --git a/ci/jobs/quick-build/run.sh b/ci/jobs/quick-build/run.sh index 0872b685e7c..5fe57457645 100755 --- a/ci/jobs/quick-build/run.sh +++ b/ci/jobs/quick-build/run.sh @@ -7,11 +7,24 @@ set -e -x # or: # ./run-with-docker.sh ubuntu:bionic jobs/quick-build/run.sh -CONFIG="$(dirname $0)"/config cd "$(dirname $0)"/../.. . default-config +SOURCES_METHOD=local +COMPILER=clang +COMPILER_INSTALL_METHOD=packages +COMPILER_PACKAGE_VERSION=6.0 +USE_LLVM_LIBRARIES_FROM_SYSTEM=0 +BUILD_METHOD=normal +BUILD_TARGETS=clickhouse +BUILD_TYPE=Debug +ENABLE_EMBEDDED_COMPILER=0 + +CMAKE_FLAGS="-D CMAKE_C_FLAGS_ADD=-g0 -D CMAKE_CXX_FLAGS_ADD=-g0 -D ENABLE_TCMALLOC=0 -D ENABLE_CAPNP=0 -D ENABLE_RDKAFKA=0 -D ENABLE_UNWIND=0 -D ENABLE_ICU=0 -D ENABLE_POCO_MONGODB=0 -D ENABLE_POCO_NETSSL=0 -D ENABLE_POCO_ODBC=0 -D ENABLE_MYSQL=0" + +[[ $(uname) == "FreeBSD" ]] && COMPILER_PACKAGE_VERSION=devel && export COMPILER_PATH=/usr/local/bin + . get-sources.sh . prepare-toolchain.sh . install-libraries.sh diff --git a/ci/prepare-docker-image-ubuntu.sh b/ci/prepare-docker-image-ubuntu.sh index 1b3d3bd18f6..2880d7fc1e6 100755 --- a/ci/prepare-docker-image-ubuntu.sh +++ b/ci/prepare-docker-image-ubuntu.sh @@ -6,7 +6,7 @@ source default-config ./check-docker.sh # http://fl47l1n3.net/2015/12/24/binfmt/ -$SUDO apt-get -y install qemu-user-static +./install-os-packages.sh qemu-user-static pushd docker-multiarch diff --git a/ci/prepare-toolchain.sh b/ci/prepare-toolchain.sh index f90cb4fca4d..4718a854860 100755 --- a/ci/prepare-toolchain.sh +++ b/ci/prepare-toolchain.sh @@ -3,11 +3,10 @@ set -e -x source default-config -# TODO Non debian systems -apt-cache search cmake3 | grep -P '^cmake3 ' && $SUDO apt-get -y install cmake3 || $SUDO apt-get -y install cmake +./install-os-packages.sh cmake if [[ "$COMPILER_INSTALL_METHOD" == "packages" ]]; then - . install-compiler-from-packages.sh; + . install-compiler-from-packages.sh elif [[ "$COMPILER_INSTALL_METHOD" == "sources" ]]; then . install-compiler-from-sources.sh else diff --git a/ci/prepare-vagrant-image-freebsd.sh b/ci/prepare-vagrant-image-freebsd.sh index 81d021ca31f..16c5e58c7c5 100755 --- a/ci/prepare-vagrant-image-freebsd.sh +++ b/ci/prepare-vagrant-image-freebsd.sh @@ -3,11 +3,10 @@ set -e -x source default-config -$SUDO apt-get -y install vagrant virtualbox +./install-os-packages.sh vagrant-virtualbox pushd "vagrant-freebsd" vagrant up vagrant ssh-config > vagrant-ssh ssh -F vagrant-ssh default 'uname -a' -scp -F vagrant-ssh -r ../../ci default:~ popd diff --git a/ci/run-with-docker.sh b/ci/run-with-docker.sh index 238907bb5dd..158961dd5da 100755 --- a/ci/run-with-docker.sh +++ b/ci/run-with-docker.sh @@ -1,6 +1,9 @@ #!/usr/bin/env bash set -e -x +mkdir -p /var/cache/ccache +DOCKER_ENV+=" --mount=type=bind,source=/var/cache/ccache,destination=/ccache -e CCACHE_DIR=/ccache " + PROJECT_ROOT="$(cd "$(dirname "$0")/.."; pwd -P)" [[ -n "$CONFIG" ]] && DOCKER_ENV="--env=CONFIG" docker run -t --network=host --mount=type=bind,source=${PROJECT_ROOT},destination=/ClickHouse --workdir=/ClickHouse/ci $DOCKER_ENV "$1" "$2" diff --git a/ci/run-with-vagrant.sh b/ci/run-with-vagrant.sh new file mode 100755 index 00000000000..620d38071eb --- /dev/null +++ b/ci/run-with-vagrant.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash +set -e -x + +[[ -r "vagrant-${1}/vagrant-ssh" ]] || die "Run prepare-vagrant-image-... first." + +pushd vagrant-$1 + +shopt -s extglob + +vagrant ssh -c "mkdir -p ClickHouse" +scp -q -F vagrant-ssh -r ../../!(*build*) default:~/ClickHouse +vagrant ssh -c "cd ClickHouse/ci; $2" + +popd diff --git a/cmake/find_execinfo.cmake b/cmake/find_execinfo.cmake index 6d7428a166f..05dd72dbb3d 100644 --- a/cmake/find_execinfo.cmake +++ b/cmake/find_execinfo.cmake @@ -1,6 +1,9 @@ if (ARCH_FREEBSD) find_library (EXECINFO_LIBRARY execinfo) + find_library (ELF_LIBRARY elf) message (STATUS "Using execinfo: ${EXECINFO_LIBRARY}") + message (STATUS "Using elf: ${ELF_LIBRARY}") else () set (EXECINFO_LIBRARY "") + set (ELF_LIBRARY "") endif () diff --git a/cmake/find_llvm.cmake b/cmake/find_llvm.cmake index a2006e37c64..22195c85f2f 100644 --- a/cmake/find_llvm.cmake +++ b/cmake/find_llvm.cmake @@ -1,5 +1,5 @@ option (ENABLE_EMBEDDED_COMPILER "Set to TRUE to enable support for 'compile' option for query execution" 1) -option (USE_INTERNAL_LLVM_LIBRARY "Use bundled or system LLVM library. Default: system library for quicker developer builds." 0) +option (USE_INTERNAL_LLVM_LIBRARY "Use bundled or system LLVM library. Default: system library for quicker developer builds." ${APPLE}) if (ENABLE_EMBEDDED_COMPILER) if (USE_INTERNAL_LLVM_LIBRARY AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/llvm/llvm/CMakeLists.txt") diff --git a/cmake/find_poco.cmake b/cmake/find_poco.cmake index b46e722c94b..e09c7428720 100644 --- a/cmake/find_poco.cmake +++ b/cmake/find_poco.cmake @@ -8,8 +8,21 @@ if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/poco/CMakeLists.txt") set (MISSING_INTERNAL_POCO_LIBRARY 1) endif () +set (POCO_COMPONENTS Net XML SQL Data) +if (NOT DEFINED ENABLE_POCO_NETSSL OR ENABLE_POCO_NETSSL) + list (APPEND POCO_COMPONENTS Crypto NetSSL) +endif () +if (NOT DEFINED ENABLE_POCO_MONGODB OR ENABLE_POCO_MONGODB) + list (APPEND POCO_COMPONENTS MongoDB) +endif () +# TODO: after new poco release with SQL library rename ENABLE_POCO_ODBC -> ENABLE_POCO_SQLODBC +if (NOT DEFINED ENABLE_POCO_ODBC OR ENABLE_POCO_ODBC) + list (APPEND POCO_COMPONENTS DataODBC) + #list (APPEND POCO_COMPONENTS SQLODBC) # future +endif () + if (NOT USE_INTERNAL_POCO_LIBRARY) - find_package (Poco COMPONENTS Net NetSSL XML SQL Data Crypto DataODBC MongoDB) + find_package (Poco COMPONENTS ${POCO_COMPONENTS}) endif () if (Poco_INCLUDE_DIRS AND Poco_Foundation_LIBRARY) @@ -46,13 +59,12 @@ elseif (NOT MISSING_INTERNAL_POCO_LIBRARY) "${ClickHouse_SOURCE_DIR}/contrib/poco/Util/include/" ) - if (NOT DEFINED POCO_ENABLE_MONGODB OR POCO_ENABLE_MONGODB) - set (Poco_MongoDB_FOUND 1) + if (NOT DEFINED ENABLE_POCO_MONGODB OR ENABLE_POCO_MONGODB) + set (USE_POCO_MONGODB 1) set (Poco_MongoDB_LIBRARY PocoMongoDB) set (Poco_MongoDB_INCLUDE_DIRS "${ClickHouse_SOURCE_DIR}/contrib/poco/MongoDB/include/") endif () - if (EXISTS "${ClickHouse_SOURCE_DIR}/contrib/poco/SQL/ODBC/include/") set (Poco_SQL_FOUND 1) set (Poco_SQL_LIBRARY PocoSQL) @@ -60,8 +72,8 @@ elseif (NOT MISSING_INTERNAL_POCO_LIBRARY) "${ClickHouse_SOURCE_DIR}/contrib/poco/SQL/include" "${ClickHouse_SOURCE_DIR}/contrib/poco/Data/include" ) - if (ODBC_FOUND) - set (Poco_SQLODBC_FOUND 1) + if ((NOT DEFINED ENABLE_POCO_ODBC OR ENABLE_POCO_ODBC) AND ODBC_FOUND) + set (USE_POCO_SQLODBC 1) set (Poco_SQLODBC_INCLUDE_DIRS "${ClickHouse_SOURCE_DIR}/contrib/poco/SQL/ODBC/include/" "${ClickHouse_SOURCE_DIR}/contrib/poco/Data/ODBC/include/" @@ -73,8 +85,8 @@ elseif (NOT MISSING_INTERNAL_POCO_LIBRARY) set (Poco_Data_FOUND 1) set (Poco_Data_INCLUDE_DIRS "${ClickHouse_SOURCE_DIR}/contrib/poco/Data/include") set (Poco_Data_LIBRARY PocoData) - if (ODBC_FOUND) - set (Poco_DataODBC_FOUND 1) + if ((NOT DEFINED ENABLE_POCO_ODBC OR ENABLE_POCO_ODBC) AND ODBC_FOUND) + set (USE_POCO_DATAODBC 1) set (Poco_DataODBC_INCLUDE_DIRS "${ClickHouse_SOURCE_DIR}/contrib/poco/Data/ODBC/include/" ${ODBC_INCLUDE_DIRECTORIES} @@ -84,8 +96,8 @@ elseif (NOT MISSING_INTERNAL_POCO_LIBRARY) endif () # TODO! fix internal ssl - if (OPENSSL_FOUND AND NOT USE_INTERNAL_SSL_LIBRARY) - set (Poco_NetSSL_FOUND 1) + if (OPENSSL_FOUND AND NOT USE_INTERNAL_SSL_LIBRARY AND (NOT DEFINED ENABLE_POCO_NETSSL OR ENABLE_POCO_NETSSL)) + set (USE_POCO_NETSSL 1) set (Poco_NetSSL_LIBRARY PocoNetSSL) set (Poco_Crypto_LIBRARY PocoCrypto) endif () @@ -103,7 +115,7 @@ elseif (NOT MISSING_INTERNAL_POCO_LIBRARY) set (Poco_XML_LIBRARY PocoXML) endif () -message(STATUS "Using Poco: ${Poco_INCLUDE_DIRS} : ${Poco_Foundation_LIBRARY},${Poco_Util_LIBRARY},${Poco_Net_LIBRARY},${Poco_NetSSL_LIBRARY},${Poco_XML_LIBRARY},${Poco_Data_LIBRARY},${Poco_DataODBC_LIBRARY},${Poco_MongoDB_LIBRARY}; MongoDB=${Poco_MongoDB_FOUND}, DataODBC=${Poco_DataODBC_FOUND}, NetSSL=${Poco_NetSSL_FOUND}") +message(STATUS "Using Poco: ${Poco_INCLUDE_DIRS} : ${Poco_Foundation_LIBRARY},${Poco_Util_LIBRARY},${Poco_Net_LIBRARY},${Poco_NetSSL_LIBRARY},${Poco_XML_LIBRARY},${Poco_Data_LIBRARY},${Poco_DataODBC_LIBRARY},${Poco_MongoDB_LIBRARY}; MongoDB=${USE_POCO_MONGODB}, DataODBC=${Poco_DataODBC_FOUND}, NetSSL=${USE_POCO_NETSSL}") # How to make sutable poco: # use branch: diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 104db478ef0..2966d5b26f8 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -128,7 +128,7 @@ if (USE_INTERNAL_POCO_LIBRARY) set (_save ${ENABLE_TESTS}) set (ENABLE_TESTS 0) set (CMAKE_DISABLE_FIND_PACKAGE_ZLIB 1) - if (USE_INTERNAL_SSL_LIBRARY) + if (USE_INTERNAL_SSL_LIBRARY OR (DEFINED ENABLE_POCO_NETSSL AND NOT ENABLE_POCO_NETSSL)) set (DISABLE_INTERNAL_OPENSSL 1 CACHE INTERNAL "") set (ENABLE_NETSSL 0 CACHE INTERNAL "") # TODO! set (ENABLE_CRYPTO 0 CACHE INTERNAL "") # TODO! @@ -141,7 +141,8 @@ if (USE_INTERNAL_POCO_LIBRARY) set (ENABLE_TESTS ${_save}) set (CMAKE_CXX_FLAGS ${save_CMAKE_CXX_FLAGS}) set (CMAKE_C_FLAGS ${save_CMAKE_C_FLAGS}) - if (OPENSSL_FOUND AND TARGET Crypto) + + if (OPENSSL_FOUND AND TARGET Crypto AND (NOT DEFINED ENABLE_POCO_NETSSL OR ENABLE_POCO_NETSSL)) # Bug in poco https://github.com/pocoproject/poco/pull/2100 found on macos target_include_directories(Crypto PUBLIC ${OPENSSL_INCLUDE_DIR}) endif () diff --git a/contrib/llvm b/contrib/llvm index 6b3975cf38d..163def21781 160000 --- a/contrib/llvm +++ b/contrib/llvm @@ -1 +1 @@ -Subproject commit 6b3975cf38d5c9436e1311b7e54ad93ef1a9aa9c +Subproject commit 163def217817c90fb982a6daf384744d8472b92b diff --git a/contrib/poco b/contrib/poco index 2d5a158303a..3a2d0a833a2 160000 --- a/contrib/poco +++ b/contrib/poco @@ -1 +1 @@ -Subproject commit 2d5a158303adf9d47b980cdcfdb26cee1460704e +Subproject commit 3a2d0a833a22ef5e1164a9ada54e3253cb038904 diff --git a/dbms/CMakeLists.txt b/dbms/CMakeLists.txt index da199163994..bb83e67cd0a 100644 --- a/dbms/CMakeLists.txt +++ b/dbms/CMakeLists.txt @@ -104,9 +104,7 @@ if (USE_EMBEDDED_COMPILER) if (TERMCAP_LIBRARY) list(APPEND REQUIRED_LLVM_LIBRARIES ${TERMCAP_LIBRARY}) endif () - if (LTDL_LIBRARY) - list(APPEND REQUIRED_LLVM_LIBRARIES ${LTDL_LIBRARY}) - endif () + list(APPEND REQUIRED_LLVM_LIBRARIES ${CMAKE_DL_LIBS}) target_link_libraries (dbms ${REQUIRED_LLVM_LIBRARIES}) target_include_directories (dbms BEFORE PUBLIC ${LLVM_INCLUDE_DIRS}) @@ -149,6 +147,7 @@ target_link_libraries (clickhouse_common_io ${Poco_Data_LIBRARY} ${ZLIB_LIBRARIES} ${EXECINFO_LIBRARY} + ${ELF_LIBRARY} ${Boost_SYSTEM_LIBRARY} ${CMAKE_DL_LIBS} ) @@ -172,7 +171,7 @@ if (NOT USE_INTERNAL_BOOST_LIBRARY) target_include_directories (clickhouse_common_io BEFORE PUBLIC ${Boost_INCLUDE_DIRS}) endif () -if (Poco_SQLODBC_FOUND) +if (USE_POCO_SQLODBC) target_link_libraries (clickhouse_common_io ${Poco_SQL_LIBRARY}) target_link_libraries (dbms ${Poco_SQLODBC_LIBRARY} ${Poco_SQL_LIBRARY}) if (NOT USE_INTERNAL_POCO_LIBRARY) @@ -186,7 +185,7 @@ if (Poco_Data_FOUND AND NOT USE_INTERNAL_POCO_LIBRARY) target_include_directories (dbms PRIVATE ${Poco_Data_INCLUDE_DIRS}) endif() -if (Poco_DataODBC_FOUND) +if (USE_POCO_DATAODBC) target_link_libraries (clickhouse_common_io ${Poco_Data_LIBRARY}) target_link_libraries (dbms ${Poco_DataODBC_LIBRARY}) if (NOT USE_INTERNAL_POCO_LIBRARY) @@ -194,12 +193,11 @@ if (Poco_DataODBC_FOUND) endif() endif() - -if (Poco_MongoDB_FOUND) +if (USE_POCO_MONGODB) target_link_libraries (dbms ${Poco_MongoDB_LIBRARY}) endif() -if (Poco_NetSSL_FOUND) +if (USE_POCO_NETSSL) target_link_libraries (clickhouse_common_io ${Poco_NetSSL_LIBRARY}) endif() diff --git a/dbms/src/Client/Connection.cpp b/dbms/src/Client/Connection.cpp index 16f5bde4f71..9cd6e29986d 100644 --- a/dbms/src/Client/Connection.cpp +++ b/dbms/src/Client/Connection.cpp @@ -21,7 +21,7 @@ #include #include -#if Poco_NetSSL_FOUND +#if USE_POCO_NETSSL #include #endif @@ -57,7 +57,7 @@ void Connection::connect() if (static_cast(secure)) { -#if Poco_NetSSL_FOUND +#if USE_POCO_NETSSL socket = std::make_unique(); #else throw Exception{"tcp_secure protocol is disabled because poco library was built without NetSSL support.", ErrorCodes::SUPPORT_IS_DISABLED}; diff --git a/dbms/src/Common/Config/ConfigProcessor.cpp b/dbms/src/Common/Config/ConfigProcessor.cpp index 54f9ca58823..e25c5faa412 100644 --- a/dbms/src/Common/Config/ConfigProcessor.cpp +++ b/dbms/src/Common/Config/ConfigProcessor.cpp @@ -369,15 +369,17 @@ ConfigProcessor::Files ConfigProcessor::getConfigMergeFiles(const std::string & Files files; Poco::Path merge_dir_path(config_path); - merge_dir_path.setExtension("d"); + std::set merge_dirs; - std::vector merge_dirs; - merge_dirs.push_back(merge_dir_path.toString()); - if (merge_dir_path.getBaseName() != "conf") - { - merge_dir_path.setBaseName("conf"); - merge_dirs.push_back(merge_dir_path.toString()); - } + /// Add path_to_config/config_name.d dir + merge_dir_path.setExtension("d"); + merge_dirs.insert(merge_dir_path.toString()); + /// Add path_to_config/conf.d dir + merge_dir_path.setBaseName("conf"); + merge_dirs.insert(merge_dir_path.toString()); + /// Add path_to_config/config.d dir + merge_dir_path.setBaseName("config"); + merge_dirs.insert(merge_dir_path.toString()); for (const std::string & merge_dir_name : merge_dirs) { diff --git a/dbms/src/Common/config.h.in b/dbms/src/Common/config.h.in index f4d155de2c8..f037a62d36e 100644 --- a/dbms/src/Common/config.h.in +++ b/dbms/src/Common/config.h.in @@ -10,7 +10,7 @@ #cmakedefine01 USE_CAPNP #cmakedefine01 USE_EMBEDDED_COMPILER #cmakedefine01 LLVM_HAS_RTTI -#cmakedefine01 Poco_SQLODBC_FOUND -#cmakedefine01 Poco_DataODBC_FOUND -#cmakedefine01 Poco_MongoDB_FOUND -#cmakedefine01 Poco_NetSSL_FOUND +#cmakedefine01 USE_POCO_SQLODBC +#cmakedefine01 USE_POCO_DATAODBC +#cmakedefine01 USE_POCO_MONGODB +#cmakedefine01 USE_POCO_NETSSL diff --git a/dbms/src/Common/config_build.cpp.in b/dbms/src/Common/config_build.cpp.in index fee7c868384..9e1114668a6 100644 --- a/dbms/src/Common/config_build.cpp.in +++ b/dbms/src/Common/config_build.cpp.in @@ -35,10 +35,10 @@ const char * auto_config_build[] "USE_VECTORCLASS", "@USE_VECTORCLASS@", "USE_RDKAFKA", "@USE_RDKAFKA@", "USE_CAPNP", "@USE_CAPNP@", - "USE_Poco_SQLODBC", "@Poco_SQLODBC_FOUND@", - "USE_Poco_DataODBC", "@Poco_DataODBC_FOUND@", - "USE_Poco_MongoDB", "@Poco_MongoDB_FOUND@", - "USE_Poco_NetSSL", "@Poco_NetSSL_FOUND@", + "USE_POCO_SQLODBC", "@USE_POCO_SQLODBC@", + "USE_POCO_DATAODBC", "@USE_POCO_DATAODBC@", + "USE_POCO_MONGODB", "@USE_POCO_MONGODB@", + "USE_POCO_NETSSL", "@USE_POCO_NETSSL@", nullptr, nullptr }; diff --git a/dbms/src/Dictionaries/DictionarySourceFactory.cpp b/dbms/src/Dictionaries/DictionarySourceFactory.cpp index e77a1189233..963a51c7923 100644 --- a/dbms/src/Dictionaries/DictionarySourceFactory.cpp +++ b/dbms/src/Dictionaries/DictionarySourceFactory.cpp @@ -16,10 +16,10 @@ #include #include -#if Poco_MongoDB_FOUND +#if USE_POCO_MONGODB #include #endif -#if Poco_SQLODBC_FOUND || Poco_DataODBC_FOUND +#if USE_POCO_SQLODBC || USE_POCO_DATAODBC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #include @@ -88,7 +88,7 @@ Block createSampleBlock(const DictionaryStructure & dict_struct) DictionarySourceFactory::DictionarySourceFactory() : log(&Poco::Logger::get("DictionarySourceFactory")) { -#if Poco_SQLODBC_FOUND || Poco_DataODBC_FOUND +#if USE_POCO_SQLODBC || USE_POCO_DATAODBC Poco::Data::ODBC::Connector::registerConnector(); #endif } @@ -139,7 +139,7 @@ DictionarySourcePtr DictionarySourceFactory::create( } else if ("mongodb" == source_type) { -#if Poco_MongoDB_FOUND +#if USE_POCO_MONGODB return std::make_unique(dict_struct, config, config_prefix + ".mongodb", sample_block); #else throw Exception{"Dictionary source of type `mongodb` is disabled because poco library was built without mongodb support.", @@ -148,7 +148,7 @@ DictionarySourcePtr DictionarySourceFactory::create( } else if ("odbc" == source_type) { -#if Poco_SQLODBC_FOUND || Poco_DataODBC_FOUND +#if USE_POCO_SQLODBC || USE_POCO_DATAODBC return std::make_unique(dict_struct, config, config_prefix + ".odbc", sample_block, context); #else throw Exception{"Dictionary source of type `odbc` is disabled because poco library was built without ODBC support.", @@ -168,7 +168,7 @@ DictionarySourcePtr DictionarySourceFactory::create( if (dict_struct.has_expressions) throw Exception{"Dictionary source of type `http` does not support attribute expressions", ErrorCodes::LOGICAL_ERROR}; -#if Poco_NetSSL_FOUND +#if USE_POCO_NETSSL // Used for https queries std::call_once(ssl_init_once, SSLInit); #endif diff --git a/dbms/src/Dictionaries/MongoDBBlockInputStream.cpp b/dbms/src/Dictionaries/MongoDBBlockInputStream.cpp index 20c2d655d85..02fc75976ab 100644 --- a/dbms/src/Dictionaries/MongoDBBlockInputStream.cpp +++ b/dbms/src/Dictionaries/MongoDBBlockInputStream.cpp @@ -1,5 +1,5 @@ #include -#if Poco_MongoDB_FOUND +#if USE_POCO_MONGODB #include #include diff --git a/dbms/src/Dictionaries/MongoDBDictionarySource.cpp b/dbms/src/Dictionaries/MongoDBDictionarySource.cpp index 348e415b201..340d5b81a5b 100644 --- a/dbms/src/Dictionaries/MongoDBDictionarySource.cpp +++ b/dbms/src/Dictionaries/MongoDBDictionarySource.cpp @@ -1,5 +1,5 @@ #include -#if Poco_MongoDB_FOUND +#if USE_POCO_MONGODB #include #pragma GCC diagnostic push diff --git a/dbms/src/Dictionaries/MySQLBlockInputStream.h b/dbms/src/Dictionaries/MySQLBlockInputStream.h index 6e72f4eb3cf..9e760cd28f8 100644 --- a/dbms/src/Dictionaries/MySQLBlockInputStream.h +++ b/dbms/src/Dictionaries/MySQLBlockInputStream.h @@ -21,7 +21,7 @@ public: String getName() const override { return "MySQL"; } - Block getHeader() const override { return description.sample_block; }; + Block getHeader() const override { return description.sample_block; } private: Block readImpl() override; diff --git a/dbms/src/Functions/FunctionsMiscellaneous.cpp b/dbms/src/Functions/FunctionsMiscellaneous.cpp index d599e9bf1ec..ca35888a4b9 100644 --- a/dbms/src/Functions/FunctionsMiscellaneous.cpp +++ b/dbms/src/Functions/FunctionsMiscellaneous.cpp @@ -755,7 +755,7 @@ public: tuple = typeid_cast(materialized_tuple.get()); } - if (tuple) + if (tuple && type_tuple->getElements().size() != 1) { const Columns & tuple_columns = tuple->getColumns(); const DataTypes & tuple_types = type_tuple->getElements(); diff --git a/dbms/src/IO/HTTPCommon.cpp b/dbms/src/IO/HTTPCommon.cpp index 7ca9058896f..84ee03b679d 100644 --- a/dbms/src/IO/HTTPCommon.cpp +++ b/dbms/src/IO/HTTPCommon.cpp @@ -1,7 +1,7 @@ #include #include -#if Poco_NetSSL_FOUND +#if USE_POCO_NETSSL #include #include #include @@ -30,7 +30,7 @@ std::once_flag ssl_init_once; void SSLInit() { // http://stackoverflow.com/questions/18315472/https-request-in-c-using-poco -#if Poco_NetSSL_FOUND +#if USE_POCO_NETSSL Poco::Net::initializeSSL(); #endif } diff --git a/dbms/src/IO/ReadWriteBufferFromHTTP.cpp b/dbms/src/IO/ReadWriteBufferFromHTTP.cpp index 0ec26f684f1..dfd3cfbdbde 100644 --- a/dbms/src/IO/ReadWriteBufferFromHTTP.cpp +++ b/dbms/src/IO/ReadWriteBufferFromHTTP.cpp @@ -9,7 +9,7 @@ #include #include -#if Poco_NetSSL_FOUND +#if USE_POCO_NETSSL #include #endif @@ -36,7 +36,7 @@ ReadWriteBufferFromHTTP::ReadWriteBufferFromHTTP(const Poco::URI & uri, session { std::unique_ptr( -#if Poco_NetSSL_FOUND +#if USE_POCO_NETSSL is_ssl ? new Poco::Net::HTTPSClientSession : #endif new Poco::Net::HTTPClientSession) diff --git a/dbms/src/Interpreters/DNSCacheUpdater.h b/dbms/src/Interpreters/DNSCacheUpdater.h index 4c1939d2f8e..ad57f37b5f6 100644 --- a/dbms/src/Interpreters/DNSCacheUpdater.h +++ b/dbms/src/Interpreters/DNSCacheUpdater.h @@ -1,5 +1,8 @@ #pragma once + #include +#include +#include namespace DB diff --git a/dbms/src/Interpreters/ExpressionAnalyzer.cpp b/dbms/src/Interpreters/ExpressionAnalyzer.cpp index 378b4d805b1..c7810666da8 100644 --- a/dbms/src/Interpreters/ExpressionAnalyzer.cpp +++ b/dbms/src/Interpreters/ExpressionAnalyzer.cpp @@ -32,6 +32,8 @@ #include #include #include +#include +#include #include #include @@ -58,7 +60,7 @@ #include #include #include -#include "ProjectionManipulation.h" +#include namespace DB @@ -1645,82 +1647,72 @@ void ExpressionAnalyzer::makeExplicitSet(const ASTFunction * node, const Block & if (args.children.size() != 2) throw Exception("Wrong number of arguments passed to function in", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - const ASTPtr & arg = args.children.at(1); - - DataTypes set_element_types; const ASTPtr & left_arg = args.children.at(0); + const ASTPtr & right_arg = args.children.at(1); - const ASTFunction * left_arg_tuple = typeid_cast(left_arg.get()); - - /** NOTE If tuple in left hand side specified non-explicitly - * Example: identity((a, b)) IN ((1, 2), (3, 4)) - * instead of (a, b)) IN ((1, 2), (3, 4)) - * then set creation doesn't work correctly. - */ - if (left_arg_tuple && left_arg_tuple->name == "tuple") + auto getTupleTypeFromAst = [this](const ASTPtr & node) -> DataTypePtr { - for (const auto & arg : left_arg_tuple->arguments->children) - set_element_types.push_back(sample_block.getByName(arg->getColumnName()).type); - } - else - { - DataTypePtr left_type = sample_block.getByName(left_arg->getColumnName()).type; - set_element_types.push_back(left_type); - } - - /// The case `x in (1, 2)` distinguishes from the case `x in 1` (also `x in (1)`). - bool single_value = false; - ASTPtr elements_ast = arg; - - if (ASTFunction * set_func = typeid_cast(arg.get())) - { - if (set_func->name == "tuple") + auto ast_function = typeid_cast(node.get()); + if (ast_function && ast_function->name == "tuple" && !ast_function->arguments->children.empty()) { - if (set_func->arguments->children.empty()) - { - /// Empty set. - elements_ast = set_func->arguments; - } - else - { - /// Distinguish the case `(x, y) in ((1, 2), (3, 4))` from the case `(x, y) in (1, 2)`. - ASTFunction * any_element = typeid_cast(set_func->arguments->children.at(0).get()); - if (set_element_types.size() >= 2 && (!any_element || any_element->name != "tuple")) - single_value = true; - else - elements_ast = set_func->arguments; - } + /// Won't parse all values of outer tuple. + auto element = ast_function->arguments->children.at(0); + std::pair value_raw = evaluateConstantExpression(element, context); + return std::make_shared(DataTypes({value_raw.second})); } - else - { - if (set_element_types.size() >= 2) - throw Exception("Incorrect type of 2nd argument for function " + node->name - + ". Must be subquery or set of " + toString(set_element_types.size()) + "-element tuples.", - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); - single_value = true; - } - } - else if (typeid_cast(arg.get())) - { - single_value = true; - } - else - { - throw Exception("Incorrect type of 2nd argument for function " + node->name + ". Must be subquery or set of values.", - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); - } + return evaluateConstantExpression(node, context).second; + }; - if (single_value) + const DataTypePtr & left_arg_type = sample_block.getByName(left_arg->getColumnName()).type; + const DataTypePtr & right_arg_type = getTupleTypeFromAst(right_arg); + + std::function getTupleDepth; + getTupleDepth = [&getTupleDepth](const DataTypePtr & type) -> size_t + { + if (auto tuple_type = typeid_cast(type.get())) + return 1 + (tuple_type->getElements().empty() ? 0 : getTupleDepth(tuple_type->getElements().at(0))); + + return 0; + }; + + size_t left_tuple_depth = getTupleDepth(left_arg_type); + size_t right_tuple_depth = getTupleDepth(right_arg_type); + + DataTypes set_element_types = {left_arg_type}; + auto left_tuple_type = typeid_cast(left_arg_type.get()); + if (left_tuple_type && left_tuple_type->getElements().size() != 1) + set_element_types = left_tuple_type->getElements(); + + ASTPtr elements_ast = nullptr; + + /// 1 in 1; (1, 2) in (1, 2); identity(tuple(tuple(tuple(1)))) in tuple(tuple(tuple(1))); etc. + if (left_tuple_depth == right_tuple_depth) { ASTPtr exp_list = std::make_shared(); - exp_list->children.push_back(elements_ast); + exp_list->children.push_back(right_arg); elements_ast = exp_list; } + /// 1 in (1, 2); (1, 2) in ((1, 2), (3, 4)); etc. + else if (left_tuple_depth + 1 == right_tuple_depth) + { + ASTFunction * set_func = typeid_cast(right_arg.get()); + + if (!set_func || set_func->name != "tuple") + throw Exception("Incorrect type of 2nd argument for function " + node->name + + ". Must be subquery or set of elements with type " + left_arg_type->getName() + ".", + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + + elements_ast = set_func->arguments; + } + else + throw Exception("Invalid types for IN function: " + + left_arg_type->getName() + " and " + right_arg_type->getName() + ".", + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); SetPtr set = std::make_shared(SizeLimits(settings.max_rows_in_set, settings.max_bytes_in_set, settings.set_overflow_mode)); set->createFromAST(set_element_types, elements_ast, context, create_ordered_set); - prepared_sets[arg.get()] = std::move(set); + prepared_sets[right_arg.get()] = std::move(set); } @@ -2089,8 +2081,10 @@ void ExpressionAnalyzer::getActionsImpl(const ASTPtr & ast, bool no_subqueries, /// If the function has an argument-lambda expression, you need to determine its type before the recursive call. bool has_lambda_arguments = false; - for (auto & child : node->arguments->children) + for (size_t arg = 0; arg < node->arguments->children.size(); ++arg) { + auto & child = node->arguments->children[arg]; + ASTFunction * lambda = typeid_cast(child.get()); if (lambda && lambda->name == "lambda") { @@ -2108,7 +2102,7 @@ void ExpressionAnalyzer::getActionsImpl(const ASTPtr & ast, bool no_subqueries, /// Select the name in the next cycle. argument_names.emplace_back(); } - else if (prepared_sets.count(child.get())) + else if (prepared_sets.count(child.get()) && functionIsInOrGlobalInOperator(node->name) && arg == 1) { ColumnWithTypeAndName column; column.type = std::make_shared(); @@ -2204,9 +2198,9 @@ void ExpressionAnalyzer::getActionsImpl(const ASTPtr & ast, bool no_subqueries, Names captured; Names required = lambda_actions->getRequiredColumns(); - for (size_t j = 0; j < required.size(); ++j) - if (findColumn(required[j], lambda_arguments) == lambda_arguments.end()) - captured.push_back(required[j]); + for (const auto & required_arg : required) + if (findColumn(required_arg, lambda_arguments) == lambda_arguments.end()) + captured.push_back(required_arg); /// We can not name `getColumnName()`, /// because it does not uniquely define the expression (the types of arguments can be different). @@ -2226,9 +2220,9 @@ void ExpressionAnalyzer::getActionsImpl(const ASTPtr & ast, bool no_subqueries, if (only_consts) { - for (size_t i = 0; i < argument_names.size(); ++i) + for (const auto & argument_name : argument_names) { - if (!actions_stack.getSampleBlock().has(argument_names[i])) + if (!actions_stack.getSampleBlock().has(argument_name)) { arguments_present = false; break; diff --git a/dbms/src/Interpreters/Set.cpp b/dbms/src/Interpreters/Set.cpp index 925479e05e1..9fb95128ad1 100644 --- a/dbms/src/Interpreters/Set.cpp +++ b/dbms/src/Interpreters/Set.cpp @@ -208,6 +208,7 @@ void Set::createFromAST(const DataTypes & types, ASTPtr node, const Context & co MutableColumns columns = header.cloneEmptyColumns(); + DataTypePtr tuple_type; Row tuple_values; ASTExpressionList & list = typeid_cast(*node); for (auto & elem : list.children) @@ -221,10 +222,22 @@ void Set::createFromAST(const DataTypes & types, ASTPtr node, const Context & co } else if (ASTFunction * func = typeid_cast(elem.get())) { + Field function_result; + const TupleBackend * tuple = nullptr; if (func->name != "tuple") - throw Exception("Incorrect element of set. Must be tuple.", ErrorCodes::INCORRECT_ELEMENT_OF_SET); + { + if (!tuple_type) + tuple_type = std::make_shared(types); - size_t tuple_size = func->arguments->children.size(); + function_result = extractValueFromNode(elem, *tuple_type, context); + if (function_result.getType() != Field::Types::Tuple) + throw Exception("Invalid type of set. Expected tuple, got " + String(function_result.getTypeName()), + ErrorCodes::INCORRECT_ELEMENT_OF_SET); + + tuple = &function_result.get().t; + } + + size_t tuple_size = tuple ? tuple->size() : func->arguments->children.size(); if (tuple_size != num_columns) throw Exception("Incorrect size of tuple in set: " + toString(tuple_size) + " instead of " + toString(num_columns), ErrorCodes::INCORRECT_ELEMENT_OF_SET); @@ -235,7 +248,8 @@ void Set::createFromAST(const DataTypes & types, ASTPtr node, const Context & co size_t i = 0; for (; i < tuple_size; ++i) { - Field value = extractValueFromNode(func->arguments->children[i], *types[i], context); + Field value = tuple ? (*tuple)[i] + : extractValueFromNode(func->arguments->children[i], *types[i], context); /// If at least one of the elements of the tuple has an impossible (outside the range of the type) value, then the entire tuple too. if (value.isNull()) diff --git a/dbms/src/Interpreters/Settings.h b/dbms/src/Interpreters/Settings.h index 0d0e47c598c..ff275938ad2 100644 --- a/dbms/src/Interpreters/Settings.h +++ b/dbms/src/Interpreters/Settings.h @@ -87,6 +87,8 @@ struct Settings \ M(SettingBool, merge_tree_uniform_read_distribution, true, "Distribute read from MergeTree over threads evenly, ensuring stable average execution time of each thread within one read operation.") \ \ + M(SettingUInt64, mysql_max_rows_to_insert, 65536, "The maximum number of rows in MySQL batch insertion of the MySQL storage engine") \ + \ M(SettingUInt64, optimize_min_equality_disjunction_chain_length, 3, "The minimum length of the expression `expr = x1 OR ... expr = xN` for optimization ") \ \ M(SettingUInt64, min_bytes_to_use_direct_io, 0, "The minimum number of bytes for input/output operations is bypassing the page cache. 0 - disabled.") \ diff --git a/dbms/src/Server/Compiler-5.0.0/CMakeLists.txt b/dbms/src/Server/Compiler-5.0.0/CMakeLists.txt index 076eef6921d..5a29d3bd8ae 100644 --- a/dbms/src/Server/Compiler-5.0.0/CMakeLists.txt +++ b/dbms/src/Server/Compiler-5.0.0/CMakeLists.txt @@ -12,9 +12,8 @@ llvm_map_components_to_libnames(REQUIRED_LLVM_LIBRARIES all) if (TERMCAP_LIBRARY) list(APPEND REQUIRED_LLVM_LIBRARIES ${TERMCAP_LIBRARY}) endif () -if (LTDL_LIBRARY) - list(APPEND REQUIRED_LLVM_LIBRARIES ${LTDL_LIBRARY}) -endif () +list(APPEND REQUIRED_LLVM_LIBRARIES ${CMAKE_DL_LIBS}) + message(STATUS "Using LLVM ${LLVM_VERSION}: ${LLVM_INCLUDE_DIRS} : ${REQUIRED_LLVM_LIBRARIES}") diff --git a/dbms/src/Server/Compiler-6.0.0/CMakeLists.txt b/dbms/src/Server/Compiler-6.0.0/CMakeLists.txt index 23c7ea61c31..d7123ea3f07 100644 --- a/dbms/src/Server/Compiler-6.0.0/CMakeLists.txt +++ b/dbms/src/Server/Compiler-6.0.0/CMakeLists.txt @@ -12,9 +12,7 @@ llvm_map_components_to_libnames(REQUIRED_LLVM_LIBRARIES all) if (TERMCAP_LIBRARY) list(APPEND REQUIRED_LLVM_LIBRARIES ${TERMCAP_LIBRARY}) endif () -if (LTDL_LIBRARY) - list(APPEND REQUIRED_LLVM_LIBRARIES ${LTDL_LIBRARY}) -endif () +list(APPEND REQUIRED_LLVM_LIBRARIES ${CMAKE_DL_LIBS}) message(STATUS "Using LLVM ${LLVM_VERSION}: ${LLVM_INCLUDE_DIRS} : ${REQUIRED_LLVM_LIBRARIES}") diff --git a/dbms/src/Server/Compiler-7.0.0/CMakeLists.txt b/dbms/src/Server/Compiler-7.0.0/CMakeLists.txt index 809f8604366..15f6234dfa9 100644 --- a/dbms/src/Server/Compiler-7.0.0/CMakeLists.txt +++ b/dbms/src/Server/Compiler-7.0.0/CMakeLists.txt @@ -12,9 +12,8 @@ llvm_map_components_to_libnames(REQUIRED_LLVM_LIBRARIES all) if (TERMCAP_LIBRARY) list(APPEND REQUIRED_LLVM_LIBRARIES ${TERMCAP_LIBRARY}) endif () -if (LTDL_LIBRARY) - list(APPEND REQUIRED_LLVM_LIBRARIES ${LTDL_LIBRARY}) -endif () +list(APPEND REQUIRED_LLVM_LIBRARIES ${CMAKE_DL_LIBS}) + message(STATUS "Using LLVM ${LLVM_VERSION}: ${LLVM_INCLUDE_DIRS} : ${REQUIRED_LLVM_LIBRARIES}") diff --git a/dbms/src/Server/Server.cpp b/dbms/src/Server/Server.cpp index 918ea19c92e..6cc6f09799b 100644 --- a/dbms/src/Server/Server.cpp +++ b/dbms/src/Server/Server.cpp @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -38,12 +39,9 @@ #include "StatusFile.h" #include "TCPHandlerFactory.h" -#if Poco_NetSSL_FOUND +#if USE_POCO_NETSSL #include #include -#include - - #endif namespace CurrentMetrics @@ -431,7 +429,7 @@ int Server::main(const std::vector & /*args*/) /// HTTPS if (config().has("https_port")) { -#if Poco_NetSSL_FOUND +#if USE_POCO_NETSSL std::call_once(ssl_init_once, SSLInit); Poco::Net::SecureServerSocket socket; @@ -471,7 +469,7 @@ int Server::main(const std::vector & /*args*/) /// TCP with SSL if (config().has("tcp_port_secure")) { -#if Poco_NetSSL_FOUND +#if USE_POCO_NETSSL Poco::Net::SecureServerSocket socket; auto address = socket_bind_listen(socket, listen_host, config().getInt("tcp_port_secure"), /* secure = */ true); socket.setReceiveTimeout(settings.receive_timeout); diff --git a/dbms/src/Storages/StorageMySQL.cpp b/dbms/src/Storages/StorageMySQL.cpp index 3c63ebe4ca5..cd9e0295de0 100644 --- a/dbms/src/Storages/StorageMySQL.cpp +++ b/dbms/src/Storages/StorageMySQL.cpp @@ -1,12 +1,22 @@ #include #if USE_MYSQL + #include #include #include #include +#include +#include +#include +#include +#include +#include #include +#include +#include #include +#include namespace DB @@ -15,20 +25,26 @@ namespace DB namespace ErrorCodes { extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int BAD_ARGUMENTS; } -StorageMySQL::StorageMySQL( - const std::string & name, +StorageMySQL::StorageMySQL(const std::string & name, mysqlxx::Pool && pool, const std::string & remote_database_name, const std::string & remote_table_name, - const ColumnsDescription & columns_) + const bool replace_query, + const std::string & on_duplicate_clause, + const ColumnsDescription & columns_, + const Context & context) : IStorage{columns_} , name(name) , remote_database_name(remote_database_name) , remote_table_name(remote_table_name) + , replace_query{replace_query} + , on_duplicate_clause{on_duplicate_clause} , pool(std::move(pool)) + , context(context) { } @@ -56,18 +72,132 @@ BlockInputStreams StorageMySQL::read( } +class StorageMySQLBlockOutputStream : public IBlockOutputStream +{ +public: + explicit StorageMySQLBlockOutputStream(const StorageMySQL & storage, + const std::string & remote_database_name, + const std::string & remote_table_name , + const mysqlxx::PoolWithFailover::Entry & entry, + const size_t & mysql_max_rows_to_insert) + : storage{storage} + , remote_database_name{remote_database_name} + , remote_table_name{remote_table_name} + , entry{entry} + , max_batch_rows{mysql_max_rows_to_insert} + { + } + + Block getHeader() const override { return storage.getSampleBlock(); } + + void write(const Block & block) override + { + auto blocks = splitBlocks(block, max_batch_rows); + mysqlxx::Transaction trans(entry); + try + { + for (const Block & batch_data : blocks) + { + writeBlockData(batch_data); + } + trans.commit(); + } + catch(...) + { + trans.rollback(); + throw; + } + } + + void writeBlockData(const Block & block) + { + WriteBufferFromOwnString sqlbuf; + sqlbuf << (storage.replace_query ? "REPLACE" : "INSERT") << " INTO "; + sqlbuf << backQuoteIfNeed(remote_database_name) << "." << backQuoteIfNeed(remote_table_name); + sqlbuf << " ( " << dumpNamesWithBackQuote(block) << " ) VALUES "; + + auto writer = FormatFactory().getOutput("Values", sqlbuf, storage.getSampleBlock(), storage.context); + writer->write(block); + + if (!storage.on_duplicate_clause.empty()) + sqlbuf << " ON DUPLICATE KEY " << storage.on_duplicate_clause; + + sqlbuf << ";"; + + auto query = this->entry->query(sqlbuf.str()); + query.execute(); + } + + Blocks splitBlocks(const Block & block, const size_t & max_rows) const + { + /// Avoid Excessive copy when block is small enough + if (block.rows() <= max_rows) + return Blocks{std::move(block)}; + + const size_t splited_block_size = ceil(block.rows() * 1.0 / max_rows); + Blocks splitted_blocks(splited_block_size); + + for (size_t idx = 0; idx < splited_block_size; ++idx) + splitted_blocks[idx] = block.cloneEmpty(); + + const size_t columns = block.columns(); + const size_t rows = block.rows(); + size_t offsets = 0; + size_t limits = max_batch_rows; + for (size_t idx = 0; idx < splited_block_size; ++idx) + { + /// For last batch, limits should be the remain size + if (idx == splited_block_size - 1) limits = rows - offsets; + for (size_t col_idx = 0; col_idx < columns; ++col_idx) + { + splitted_blocks[idx].getByPosition(col_idx).column = block.getByPosition(col_idx).column->cut(offsets, limits); + } + offsets += max_batch_rows; + } + + return splitted_blocks; + } + + std::string dumpNamesWithBackQuote(const Block & block) const + { + WriteBufferFromOwnString out; + for (auto it = block.begin(); it != block.end(); ++it) + { + if (it != block.begin()) + out << ", "; + out << backQuoteIfNeed(it->name); + } + return out.str(); + } + + +private: + const StorageMySQL & storage; + std::string remote_database_name; + std::string remote_table_name; + mysqlxx::PoolWithFailover::Entry entry; + size_t max_batch_rows; +}; + + +BlockOutputStreamPtr StorageMySQL::write( + const ASTPtr & /*query*/, const Settings & settings) +{ + return std::make_shared(*this, remote_database_name, remote_table_name, pool.Get(), settings.mysql_max_rows_to_insert); +} + void registerStorageMySQL(StorageFactory & factory) { factory.registerStorage("MySQL", [](const StorageFactory::Arguments & args) { ASTs & engine_args = args.engine_args; - if (engine_args.size() != 5) + if (engine_args.size() < 5 || engine_args.size() > 7) throw Exception( - "Storage MySQL requires exactly 5 parameters: MySQL('host:port', database, table, 'user', 'password').", + "Storage MySQL requires 5-7 parameters: MySQL('host:port', database, table, 'user', 'password'[, replace_query, 'on_duplicate_clause']).", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - for (size_t i = 0; i < 5; ++i) + for (size_t i = 0; i < engine_args.size(); ++i) engine_args[i] = evaluateConstantExpressionOrIdentifierAsLiteral(engine_args[i], args.local_context); /// 3306 is the default MySQL port. @@ -80,12 +210,27 @@ void registerStorageMySQL(StorageFactory & factory) mysqlxx::Pool pool(remote_database, parsed_host_port.first, username, password, parsed_host_port.second); + bool replace_query = false; + std::string on_duplicate_clause; + if (engine_args.size() >= 6) + replace_query = static_cast(*engine_args[5]).value.safeGet() > 0; + if (engine_args.size() == 7) + on_duplicate_clause = static_cast(*engine_args[6]).value.safeGet(); + + if (replace_query && !on_duplicate_clause.empty()) + throw Exception( + "Only one of 'replace_query' and 'on_duplicate_clause' can be specified, or none of them", + ErrorCodes::BAD_ARGUMENTS); + return StorageMySQL::create( args.table_name, std::move(pool), remote_database, remote_table, - args.columns); + replace_query, + on_duplicate_clause, + args.columns, + args.context); }); } diff --git a/dbms/src/Storages/StorageMySQL.h b/dbms/src/Storages/StorageMySQL.h index 9e2b233283e..52197d54ae0 100644 --- a/dbms/src/Storages/StorageMySQL.h +++ b/dbms/src/Storages/StorageMySQL.h @@ -24,7 +24,10 @@ public: mysqlxx::Pool && pool, const std::string & remote_database_name, const std::string & remote_table_name, - const ColumnsDescription & columns); + const bool replace_query, + const std::string & on_duplicate_clause, + const ColumnsDescription & columns, + const Context & context); std::string getName() const override { return "MySQL"; } std::string getTableName() const override { return name; } @@ -37,14 +40,20 @@ public: size_t max_block_size, unsigned num_streams) override; + BlockOutputStreamPtr write(const ASTPtr & query, const Settings & settings) override; + private: + friend class StorageMySQLBlockOutputStream; std::string name; std::string remote_database_name; std::string remote_table_name; + bool replace_query; + std::string on_duplicate_clause; mysqlxx::Pool pool; + const Context & context; }; } diff --git a/dbms/src/Storages/registerStorages.cpp b/dbms/src/Storages/registerStorages.cpp index 6f140d92562..651146eee99 100644 --- a/dbms/src/Storages/registerStorages.cpp +++ b/dbms/src/Storages/registerStorages.cpp @@ -23,7 +23,7 @@ void registerStorageJoin(StorageFactory & factory); void registerStorageView(StorageFactory & factory); void registerStorageMaterializedView(StorageFactory & factory); -#if Poco_SQLODBC_FOUND || Poco_DataODBC_FOUND +#if USE_POCO_SQLODBC || USE_POCO_DATAODBC void registerStorageODBC(StorageFactory & factory); #endif @@ -56,7 +56,7 @@ void registerStorages() registerStorageView(factory); registerStorageMaterializedView(factory); - #if Poco_SQLODBC_FOUND || Poco_DataODBC_FOUND + #if USE_POCO_SQLODBC || USE_POCO_DATAODBC registerStorageODBC(factory); #endif diff --git a/dbms/src/TableFunctions/CMakeLists.txt b/dbms/src/TableFunctions/CMakeLists.txt index 4fef8cf3978..b4ba1191ba0 100644 --- a/dbms/src/TableFunctions/CMakeLists.txt +++ b/dbms/src/TableFunctions/CMakeLists.txt @@ -7,12 +7,12 @@ list(REMOVE_ITEM clickhouse_table_functions_headers ITableFunction.h TableFuncti add_library(clickhouse_table_functions ${clickhouse_table_functions_sources}) target_link_libraries(clickhouse_table_functions clickhouse_storages_system dbms ${Poco_Foundation_LIBRARY}) -if (Poco_SQLODBC_FOUND) +if (USE_POCO_SQLODBC) target_link_libraries (clickhouse_table_functions ${Poco_SQLODBC_LIBRARY}) target_include_directories (clickhouse_table_functions PRIVATE ${ODBC_INCLUDE_DIRECTORIES} ${Poco_SQLODBC_INCLUDE_DIRS}) endif () -if (Poco_DataODBC_FOUND) +if (USE_POCO_DATAODBC) target_link_libraries (clickhouse_table_functions ${Poco_DataODBC_LIBRARY}) target_include_directories (clickhouse_table_functions PRIVATE ${ODBC_INCLUDE_DIRECTORIES} ${Poco_DataODBC_INCLUDE_DIRS}) endif () diff --git a/dbms/src/TableFunctions/TableFunctionMySQL.cpp b/dbms/src/TableFunctions/TableFunctionMySQL.cpp index 1f7839ada86..cccfb76dd80 100644 --- a/dbms/src/TableFunctions/TableFunctionMySQL.cpp +++ b/dbms/src/TableFunctions/TableFunctionMySQL.cpp @@ -29,8 +29,8 @@ namespace DB namespace ErrorCodes { - extern const int LOGICAL_ERROR; extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int BAD_ARGUMENTS; } @@ -89,11 +89,11 @@ StoragePtr TableFunctionMySQL::executeImpl(const ASTPtr & ast_function, const Co ASTs & args = typeid_cast(*args_func.arguments).children; - if (args.size() != 5) - throw Exception("Table function 'mysql' requires exactly 5 arguments: host:port, database name, table name, username and password", + if (args.size() < 5 || args.size() > 7) + throw Exception("Table function 'mysql' requires 5-7 parameters: MySQL('host:port', database, table, 'user', 'password'[, replace_query, 'on_duplicate_clause']).", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - for (size_t i = 0; i < 5; ++i) + for (size_t i = 0; i < args.size(); ++i) args[i] = evaluateConstantExpressionOrIdentifierAsLiteral(args[i], context); std::string host_port = static_cast(*args[0]).value.safeGet(); @@ -102,6 +102,18 @@ StoragePtr TableFunctionMySQL::executeImpl(const ASTPtr & ast_function, const Co std::string user_name = static_cast(*args[3]).value.safeGet(); std::string password = static_cast(*args[4]).value.safeGet(); + bool replace_query = false; + std::string on_duplicate_clause; + if (args.size() >= 6) + replace_query = static_cast(*args[5]).value.safeGet() > 0; + if (args.size() == 7) + on_duplicate_clause = static_cast(*args[6]).value.safeGet(); + + if (replace_query && !on_duplicate_clause.empty()) + throw Exception( + "Only one of 'replace_query' and 'on_duplicate_clause' can be specified, or none of them", + ErrorCodes::BAD_ARGUMENTS); + /// 3306 is the default MySQL port number auto parsed_host_port = parseAddress(host_port, 3306); @@ -152,7 +164,10 @@ StoragePtr TableFunctionMySQL::executeImpl(const ASTPtr & ast_function, const Co std::move(pool), database_name, table_name, - ColumnsDescription{columns}); + replace_query, + on_duplicate_clause, + ColumnsDescription{columns}, + context); res->startup(); return res; diff --git a/dbms/src/TableFunctions/TableFunctionODBC.cpp b/dbms/src/TableFunctions/TableFunctionODBC.cpp index 333ab0e9c6b..75f73146485 100644 --- a/dbms/src/TableFunctions/TableFunctionODBC.cpp +++ b/dbms/src/TableFunctions/TableFunctionODBC.cpp @@ -1,6 +1,6 @@ #include -#if Poco_SQLODBC_FOUND || Poco_DataODBC_FOUND +#if USE_POCO_SQLODBC || USE_POCO_DATAODBC #include #include diff --git a/dbms/src/TableFunctions/TableFunctionODBC.h b/dbms/src/TableFunctions/TableFunctionODBC.h index eb06a8c5097..ce0ded30555 100644 --- a/dbms/src/TableFunctions/TableFunctionODBC.h +++ b/dbms/src/TableFunctions/TableFunctionODBC.h @@ -1,7 +1,7 @@ #pragma once #include -#if Poco_SQLODBC_FOUND || Poco_DataODBC_FOUND +#if USE_POCO_SQLODBC || USE_POCO_DATAODBC #include diff --git a/dbms/src/TableFunctions/registerTableFunctions.cpp b/dbms/src/TableFunctions/registerTableFunctions.cpp index 776ea491921..0858b44cbb0 100644 --- a/dbms/src/TableFunctions/registerTableFunctions.cpp +++ b/dbms/src/TableFunctions/registerTableFunctions.cpp @@ -13,7 +13,7 @@ void registerTableFunctionNumbers(TableFunctionFactory & factory); void registerTableFunctionCatBoostPool(TableFunctionFactory & factory); void registerTableFunctionFile(TableFunctionFactory & factory); -#if Poco_SQLODBC_FOUND || Poco_DataODBC_FOUND +#if USE_POCO_SQLODBC || USE_POCO_DATAODBC void registerTableFunctionODBC(TableFunctionFactory & factory); #endif @@ -33,7 +33,7 @@ void registerTableFunctions() registerTableFunctionCatBoostPool(factory); registerTableFunctionFile(factory); -#if Poco_SQLODBC_FOUND || Poco_DataODBC_FOUND +#if USE_POCO_SQLODBC || USE_POCO_DATAODBC registerTableFunctionODBC(factory); #endif diff --git a/dbms/tests/integration/README.md b/dbms/tests/integration/README.md index cc704022e79..bf0d184f134 100644 --- a/dbms/tests/integration/README.md +++ b/dbms/tests/integration/README.md @@ -14,7 +14,7 @@ Don't use Docker from your system repository. * [pip](https://pypi.python.org/pypi/pip). To install: `sudo apt-get install python-pip` * [py.test](https://docs.pytest.org/) testing framework. To install: `sudo -H pip install pytest` -* [docker-compose](https://docs.docker.com/compose/) and additional python libraries. To install: `sudo -H pip install docker-compose docker dicttoxml kazoo` +* [docker-compose](https://docs.docker.com/compose/) and additional python libraries. To install: `sudo -H pip install docker-compose docker dicttoxml kazoo PyMySQL` If you want to run the tests under a non-privileged user, you must add this user to `docker` group: `sudo usermod -aG docker $USER` and re-login. (You must close all your sessions (for example, restart your computer)) diff --git a/dbms/tests/integration/helpers/cluster.py b/dbms/tests/integration/helpers/cluster.py index 52003b1d010..4242fa8fa62 100644 --- a/dbms/tests/integration/helpers/cluster.py +++ b/dbms/tests/integration/helpers/cluster.py @@ -48,15 +48,17 @@ class ClickHouseCluster: self.base_cmd = ['docker-compose', '--project-directory', self.base_dir, '--project-name', self.project_name] self.base_zookeeper_cmd = None + self.base_mysql_cmd = [] self.pre_zookeeper_commands = [] self.instances = {} self.with_zookeeper = False - + self.with_mysql = False + self.docker_client = None self.is_up = False - def add_instance(self, name, config_dir=None, main_configs=[], user_configs=[], macroses={}, with_zookeeper=False, + def add_instance(self, name, config_dir=None, main_configs=[], user_configs=[], macroses={}, with_zookeeper=False, with_mysql=False, clickhouse_path_dir=None, hostname=None): """Add an instance to the cluster. @@ -75,7 +77,7 @@ class ClickHouseCluster: instance = ClickHouseInstance( self, self.base_dir, name, config_dir, main_configs, user_configs, macroses, with_zookeeper, - self.zookeeper_config_path, self.base_configs_dir, self.server_bin_path, clickhouse_path_dir, hostname=hostname) + self.zookeeper_config_path, with_mysql, self.base_configs_dir, self.server_bin_path, clickhouse_path_dir, hostname=hostname) self.instances[name] = instance self.base_cmd.extend(['--file', instance.docker_compose_path]) @@ -84,6 +86,12 @@ class ClickHouseCluster: self.base_cmd.extend(['--file', p.join(HELPERS_DIR, 'docker_compose_zookeeper.yml')]) self.base_zookeeper_cmd = ['docker-compose', '--project-directory', self.base_dir, '--project-name', self.project_name, '--file', p.join(HELPERS_DIR, 'docker_compose_zookeeper.yml')] + + if with_mysql and not self.with_mysql: + self.with_mysql = True + self.base_cmd.extend(['--file', p.join(HELPERS_DIR, 'docker_compose_mysql.yml')]) + self.base_mysql_cmd = ['docker-compose', '--project-directory', self.base_dir, '--project-name', + self.project_name, '--file', p.join(HELPERS_DIR, 'docker_compose_mysql.yml')] return instance @@ -124,6 +132,9 @@ class ClickHouseCluster: for command in self.pre_zookeeper_commands: self.run_kazoo_commands_with_retries(command, repeats=5) + if self.with_mysql and self.base_mysql_cmd: + subprocess.check_call(self.base_mysql_cmd + ['up', '-d', '--no-recreate']) + # Uncomment for debugging #print ' '.join(self.base_cmd + ['up', '--no-recreate']) @@ -138,6 +149,7 @@ class ClickHouseCluster: instance.client = Client(instance.ip_address, command=self.client_bin_path) + self.is_up = True @@ -201,7 +213,7 @@ services: class ClickHouseInstance: def __init__( self, cluster, base_path, name, custom_config_dir, custom_main_configs, custom_user_configs, macroses, - with_zookeeper, zookeeper_config_path, base_configs_dir, server_bin_path, clickhouse_path_dir, hostname=None): + with_zookeeper, zookeeper_config_path, with_mysql, base_configs_dir, server_bin_path, clickhouse_path_dir, hostname=None): self.name = name self.base_cmd = cluster.base_cmd[:] @@ -220,6 +232,8 @@ class ClickHouseInstance: self.base_configs_dir = base_configs_dir self.server_bin_path = server_bin_path + self.with_mysql = with_mysql + self.path = p.join(self.cluster.instances_dir, name) self.docker_compose_path = p.join(self.path, 'docker_compose.yml') @@ -269,7 +283,6 @@ class ClickHouseInstance: while True: status = self.get_docker_handle().status - if status == 'exited': raise Exception("Instance `{}' failed to start. Container status: {}".format(self.name, status)) @@ -356,9 +369,15 @@ class ClickHouseInstance: logs_dir = p.abspath(p.join(self.path, 'logs')) os.mkdir(logs_dir) - depends_on = '[]' + depends_on = [] + + if self.with_mysql: + depends_on.append("mysql1") + if self.with_zookeeper: - depends_on = '["zoo1", "zoo2", "zoo3"]' + depends_on.append("zoo1") + depends_on.append("zoo2") + depends_on.append("zoo3") with open(self.docker_compose_path, 'w') as docker_compose: docker_compose.write(DOCKER_COMPOSE_TEMPLATE.format( @@ -370,7 +389,7 @@ class ClickHouseInstance: config_d_dir=config_d_dir, db_dir=db_dir, logs_dir=logs_dir, - depends_on=depends_on)) + depends_on=str(depends_on))) def destroy_dir(self): diff --git a/dbms/tests/integration/helpers/docker_compose_mysql.yml b/dbms/tests/integration/helpers/docker_compose_mysql.yml new file mode 100644 index 00000000000..6106b588f76 --- /dev/null +++ b/dbms/tests/integration/helpers/docker_compose_mysql.yml @@ -0,0 +1,9 @@ +version: '2' +services: + mysql1: + image: mysql:5.7 + restart: always + environment: + MYSQL_ROOT_PASSWORD: clickhouse + ports: + - 3308:3306 diff --git a/dbms/tests/integration/test_cluster_copier/configs/config-copier.xml b/dbms/tests/integration/test_cluster_copier/configs/config-copier.xml new file mode 100644 index 00000000000..1248d295c09 --- /dev/null +++ b/dbms/tests/integration/test_cluster_copier/configs/config-copier.xml @@ -0,0 +1,11 @@ + + + trace + /var/log/clickhouse-server/copier/log.log + /var/log/clickhouse-server/copier/log.err.log + 1000M + 10 + /var/log/clickhouse-server/copier/stderr + /var/log/clickhouse-server/copier/stdout + + \ No newline at end of file diff --git a/dbms/tests/integration/test_cluster_copier/test.py b/dbms/tests/integration/test_cluster_copier/test.py index 1bc06fda310..a19fa8231cf 100644 --- a/dbms/tests/integration/test_cluster_copier/test.py +++ b/dbms/tests/integration/test_cluster_copier/test.py @@ -183,7 +183,7 @@ def execute_task(task, cmd_options): copiers_exec_ids = [] cmd = ['/usr/bin/clickhouse', 'copier', - '--config', '/etc/clickhouse-server/config-preprocessed.xml', + '--config', '/etc/clickhouse-server/config-copier.xml', '--task-path', zk_task_path, '--base-dir', '/var/log/clickhouse-server/copier'] cmd += cmd_options diff --git a/dbms/tests/integration/test_storage_mysql/__init__.py b/dbms/tests/integration/test_storage_mysql/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/integration/test_storage_mysql/configs/remote_servers.xml b/dbms/tests/integration/test_storage_mysql/configs/remote_servers.xml new file mode 100644 index 00000000000..de8e5865f12 --- /dev/null +++ b/dbms/tests/integration/test_storage_mysql/configs/remote_servers.xml @@ -0,0 +1,12 @@ + + + + + + node1 + 9000 + + + + + diff --git a/dbms/tests/integration/test_storage_mysql/test.py b/dbms/tests/integration/test_storage_mysql/test.py new file mode 100644 index 00000000000..97aca105c74 --- /dev/null +++ b/dbms/tests/integration/test_storage_mysql/test.py @@ -0,0 +1,98 @@ +from contextlib import contextmanager + +import pytest + +## sudo -H pip install PyMySQL +import pymysql.cursors + +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) + +node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_mysql = True) +create_table_sql_template = """ + CREATE TABLE `clickhouse`.`{}` ( + `id` int(11) NOT NULL, + `name` varchar(50) NOT NULL, + `age` int NOT NULL default 0, + `money` int NOT NULL default 0, + PRIMARY KEY (`id`)) ENGINE=InnoDB; + """ + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + + conn = get_mysql_conn() + ## create mysql db and table + create_mysql_db(conn, 'clickhouse') + yield cluster + + finally: + cluster.shutdown() + + +def test_insert_select(started_cluster): + table_name = 'test_insert_select' + conn = get_mysql_conn() + create_mysql_table(conn, table_name) + + node1.query(''' +CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL('mysql1:3306', 'clickhouse', '{}', 'root', 'clickhouse'); +'''.format(table_name, table_name)) + node1.query("INSERT INTO {}(id, name, money) select number, concat('name_', toString(number)), 3 from numbers(10000) ".format(table_name)) + assert node1.query("SELECT count() FROM {}".format(table_name)).rstrip() == '10000' + assert node1.query("SELECT sum(money) FROM {}".format(table_name)).rstrip() == '30000' + conn.close() + + +def test_replace_select(started_cluster): + table_name = 'test_replace_select' + conn = get_mysql_conn() + create_mysql_table(conn, table_name) + + node1.query(''' +CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL('mysql1:3306', 'clickhouse', '{}', 'root', 'clickhouse', 1); +'''.format(table_name, table_name)) + node1.query("INSERT INTO {}(id, name, money) select number, concat('name_', toString(number)), 3 from numbers(10000) ".format(table_name)) + node1.query("INSERT INTO {}(id, name, money) select number, concat('name_', toString(number)), 3 from numbers(10000) ".format(table_name)) + assert node1.query("SELECT count() FROM {}".format(table_name)).rstrip() == '10000' + assert node1.query("SELECT sum(money) FROM {}".format(table_name)).rstrip() == '30000' + conn.close() + + +def test_insert_on_duplicate_select(started_cluster): + table_name = 'test_insert_on_duplicate_select' + conn = get_mysql_conn() + create_mysql_table(conn, table_name) + + node1.query(''' +CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL('mysql1:3306', 'clickhouse', '{}', 'root', 'clickhouse', 0, 'update money = money + values(money)'); +'''.format(table_name, table_name)) + node1.query("INSERT INTO {}(id, name, money) select number, concat('name_', toString(number)), 3 from numbers(10000) ".format(table_name)) + node1.query("INSERT INTO {}(id, name, money) select number, concat('name_', toString(number)), 3 from numbers(10000) ".format(table_name)) + assert node1.query("SELECT count() FROM {}".format(table_name)).rstrip() == '10000' + assert node1.query("SELECT sum(money) FROM {}".format(table_name)).rstrip() == '60000' + conn.close() + + +def get_mysql_conn(): + conn = pymysql.connect(user='root', password='clickhouse', host='127.0.0.1', port=3308) + return conn + +def create_mysql_db(conn, name): + with conn.cursor() as cursor: + cursor.execute( + "CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".format(name)) + +def create_mysql_table(conn, tableName): + with conn.cursor() as cursor: + cursor.execute(create_table_sql_template.format(tableName)) + + +if __name__ == '__main__': + with contextmanager(started_cluster)() as cluster: + for name, instance in cluster.instances.items(): + print name, instance.ip_address + raw_input("Cluster created, press any key to destroy...") diff --git a/dbms/tests/queries/0_stateless/00626_in_syntax.reference b/dbms/tests/queries/0_stateless/00626_in_syntax.reference new file mode 100644 index 00000000000..3e4db78953a --- /dev/null +++ b/dbms/tests/queries/0_stateless/00626_in_syntax.reference @@ -0,0 +1,38 @@ +1 +1 +1 +1 +1 +1 +- +1 +1 +1 +1 +1 +1 +1 +- +0 +0 +1 +0 +1 +1 +- +0 +1 +1 +1 +0 +1 +0 +1 +1 +0 +- +1 +1 +1 +- +(1,2) ((1,2),(3,4)) 1 1 diff --git a/dbms/tests/queries/0_stateless/00626_in_syntax.sql b/dbms/tests/queries/0_stateless/00626_in_syntax.sql new file mode 100644 index 00000000000..e4777f7fe61 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00626_in_syntax.sql @@ -0,0 +1,44 @@ +select (1, 2) in tuple((1, 2)); +select (1, 2) in ((1, 2), (3, 4)); +select ((1, 2), (3, 4)) in ((1, 2), (3, 4)); +select ((1, 2), (3, 4)) in (((1, 2), (3, 4))); +select ((1, 2), (3, 4)) in tuple(((1, 2), (3, 4))); +select ((1, 2), (3, 4)) in (((1, 2), (3, 4)), ((5, 6), (7, 8))); + +select '-'; +select 1 in 1; +select 1 in tuple(1); +select tuple(1) in tuple(1); +select tuple(1) in tuple(tuple(1)); +select tuple(tuple(1)) in tuple(tuple(1)); +select tuple(tuple(1)) in tuple(tuple(tuple(1))); +select tuple(tuple(tuple(1))) in tuple(tuple(tuple(1))); + +select '-'; +select 1 in Null; +select 1 in tuple(Null); +select 1 in tuple(Null, 1); +select tuple(1) in tuple(tuple(Null)); +select tuple(1) in tuple(tuple(Null), tuple(1)); +select tuple(tuple(Null), tuple(1)) in tuple(tuple(Null), tuple(1)); + +select '-'; +select 1 in (1 + 1, 1 - 1); +select 1 in (0 + 1, 1, toInt8(sin(5))); +select (0 + 1, 1, toInt8(sin(5))) in (0 + 1, 1, toInt8(sin(5))); +select identity(tuple(1)) in (tuple(1), tuple(2)); +select identity(tuple(1)) in (tuple(0), tuple(2)); +select identity(tuple(1)) in (identity(tuple(1)), tuple(2)); +select identity(tuple(1)) in (identity(tuple(0)), tuple(2)); +select identity(tuple(1)) in (identity(tuple(1)), identity(tuple(2))); +select identity(tuple(1)) in (identity(tuple(1)), identity(identity(tuple(2)))); +select identity(tuple(1)) in (identity(tuple(0)), identity(identity(tuple(2)))); + +select '-'; +select identity((1, 2)) in (1, 2); +select identity((1, 2)) in ((1, 2), (3, 4)); +select identity((1, 2)) in ((1, 2), identity((3, 4))); + +select '-'; +select (1,2) as x, ((1,2),(3,4)) as y, 1 in x, x in y; + diff --git a/debian/pbuilder-hooks/A00ccache b/debian/pbuilder-hooks/A00ccache index a5d1d33b428..b8bf8d579c0 100755 --- a/debian/pbuilder-hooks/A00ccache +++ b/debian/pbuilder-hooks/A00ccache @@ -12,5 +12,6 @@ if [ -n "$CCACHE_DIR" ]; then chmod -R a+rwx $CCACHE_DIR || true fi +df -h ccache --show-stats ccache -M ${CCACHE_SIZE:=32G} diff --git a/docs/en/operations/server_settings/settings.md b/docs/en/operations/server_settings/settings.md index 39858c27e72..99419345600 100644 --- a/docs/en/operations/server_settings/settings.md +++ b/docs/en/operations/server_settings/settings.md @@ -657,6 +657,16 @@ The uncompressed cache is advantageous for very short queries in individual case 8589934592 ``` +## user_files_path + +A catalog with user files. Used in a [file()](../../table_functions/file.md#table_functions-file) table function. + +**Example** + +```xml +/var/lib/clickhouse/user_files/ +``` + ## users_config diff --git a/docs/en/table_engines/mysql.md b/docs/en/table_engines/mysql.md index 42a0e2d0c1b..c9b90d2e253 100644 --- a/docs/en/table_engines/mysql.md +++ b/docs/en/table_engines/mysql.md @@ -4,13 +4,16 @@ The MySQL engine allows you to perform SELECT queries on data that is stored on a remote MySQL server. -The engine takes 4 parameters: the server address (host and port); the name of the database; the name of the table; the user's name; the user's password. Example: +The engine takes 5-7 parameters: the server address (host and port); the name of the database; the name of the table; the user's name; the user's password; whether to use replace query; the on duplcate clause. Example: ```text -MySQL('host:port', 'database', 'table', 'user', 'password'); +MySQL('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause']); ``` At this time, simple WHERE clauses such as ```=, !=, >, >=, <, <=``` are executed on the MySQL server. The rest of the conditions and the LIMIT sampling constraint are executed in ClickHouse only after the query to MySQL finishes. +If `replace_query` is specified to 1, then `INSERT INTO` query to this table would be transformed to `REPLACE INTO`. +If `on_duplicate_clause` is specified, eg `update impression = values(impression) + impression`, it would add `on_duplicate_clause` to the end of the MySQL insert sql. +Notice that only one of 'replace_query' and 'on_duplicate_clause' can be specified, or none of them. diff --git a/docs/en/table_functions/file.md b/docs/en/table_functions/file.md new file mode 100644 index 00000000000..2760f7e56c2 --- /dev/null +++ b/docs/en/table_functions/file.md @@ -0,0 +1,18 @@ + + +# file + +`file(path, format, structure)` - returns a table created from a path file with a format type, with columns specified in structure. + +path - a relative path to a file from [user_files_path](../operations/server_settings/settings.md#user_files_path). + +format - file [format](../formats/index.md). + +structure - table structure in 'UserID UInt64, URL String' format. Determines column names and types. + +**Example** + +```sql +-- getting the first 10 lines of a table that contains 3 columns of UInt32 type from a CSV file +SELECT * FROM file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32') LIMIT 10 +``` diff --git a/docs/mkdocs_en.yml b/docs/mkdocs_en.yml index 08209f90550..566557dd095 100644 --- a/docs/mkdocs_en.yml +++ b/docs/mkdocs_en.yml @@ -122,9 +122,10 @@ pages: - 'Table functions': - 'Introduction': 'table_functions/index.md' - - 'remote': 'table_functions/remote.md' + - 'file': 'table_functions/file.md' - 'merge': 'table_functions/merge.md' - 'numbers': 'table_functions/numbers.md' + - 'remote': 'table_functions/remote.md' - 'Formats': - 'Introduction': 'formats/index.md' diff --git a/docs/mkdocs_ru.yml b/docs/mkdocs_ru.yml index 2e8eae30640..8207ebe5f53 100644 --- a/docs/mkdocs_ru.yml +++ b/docs/mkdocs_ru.yml @@ -122,9 +122,10 @@ pages: - 'Табличные функции': - 'Введение': 'table_functions/index.md' - - 'remote': 'table_functions/remote.md' + - 'file': 'table_functions/file.md' - 'merge': 'table_functions/merge.md' - 'numbers': 'table_functions/numbers.md' + - 'remote': 'table_functions/remote.md' - 'Форматы': - 'Введение': 'formats/index.md' diff --git a/docs/ru/operations/server_settings/settings.md b/docs/ru/operations/server_settings/settings.md index a96f3df3666..9398803c4f5 100644 --- a/docs/ru/operations/server_settings/settings.md +++ b/docs/ru/operations/server_settings/settings.md @@ -660,6 +660,16 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat 8589934592 ``` +## user_files_path + +Каталог с пользовательскими файлами. Используется в табличной функции [file()](../../table_functions/file.md#table_functions-file). + +**Пример** + +```xml +/var/lib/clickhouse/user_files/ +``` + ## users_config diff --git a/docs/ru/table_functions/file.md b/docs/ru/table_functions/file.md new file mode 100644 index 00000000000..88bb201eb9b --- /dev/null +++ b/docs/ru/table_functions/file.md @@ -0,0 +1,18 @@ + + +# file + +`file(path, format, structure)` - возвращает таблицу со столбцами, указанными в structure, созданную из файла path типа format. + +path - относительный путь до файла от [user_files_path](../operations/server_settings/settings.md#user_files_path). + +format - [формат](../formats/index.md) файла. + +structure - структура таблицы в форме 'UserID UInt64, URL String'. Определяет имена и типы столбцов. + +**Пример** + +```sql +-- получение первых 10 строк таблицы, состоящей из трёх колонок типа UInt32 из CSV файла +SELECT * FROM file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32') LIMIT 10 +``` diff --git a/libs/libdaemon/CMakeLists.txt b/libs/libdaemon/CMakeLists.txt index 6f31c4e5b38..a0e4e7d2733 100644 --- a/libs/libdaemon/CMakeLists.txt +++ b/libs/libdaemon/CMakeLists.txt @@ -17,4 +17,4 @@ endif () target_include_directories (daemon PUBLIC include) target_include_directories (daemon PRIVATE ${ClickHouse_SOURCE_DIR}/libs/libpocoext/include) -target_link_libraries (daemon clickhouse_common_io clickhouse_common_config ${EXECINFO_LIBRARY}) +target_link_libraries (daemon clickhouse_common_io clickhouse_common_config ${EXECINFO_LIBRARY} ${ELF_LIBRARY}) diff --git a/libs/libdaemon/src/BaseDaemon.cpp b/libs/libdaemon/src/BaseDaemon.cpp index 01fd377b66e..0d2b53fd9bc 100644 --- a/libs/libdaemon/src/BaseDaemon.cpp +++ b/libs/libdaemon/src/BaseDaemon.cpp @@ -700,7 +700,7 @@ void BaseDaemon::buildLoggers(Poco::Util::AbstractConfiguration & config) return; config_logger = current_logger; - bool is_daemon = config.getBool("application.runAsDaemon", true); + bool is_daemon = config.getBool("application.runAsDaemon", false); // Split log and error log. Poco::AutoPtr split = new SplitterChannel; @@ -883,7 +883,7 @@ void BaseDaemon::initialize(Application & self) config().add(map_config, PRIO_APPLICATION - 100); } - bool is_daemon = config().getBool("application.runAsDaemon", true); + bool is_daemon = config().getBool("application.runAsDaemon", false); if (is_daemon) { @@ -943,29 +943,30 @@ void BaseDaemon::initialize(Application & self) if (!log_path.empty()) log_path = Poco::Path(log_path).setFileName("").toString(); - if (is_daemon) + /** Redirect stdout, stderr to separate files in the log directory (or in the specified file). + * Some libraries write to stderr in case of errors in debug mode, + * and this output makes sense even if the program is run in daemon mode. + * We have to do it before buildLoggers, for errors on logger initialization will be written to these files. + * If logger.stderr is specified then stderr will be forcibly redirected to that file. + */ + if ((!log_path.empty() && is_daemon) || config().has("logger.stderr")) { - /** Redirect stdout, stderr to separate files in the log directory. - * Some libraries write to stderr in case of errors in debug mode, - * and this output makes sense even if the program is run in daemon mode. - * We have to do it before buildLoggers, for errors on logger initialization will be written to these files. - */ - if (!log_path.empty()) - { - std::string stdout_path = log_path + "/stdout"; - if (!freopen(stdout_path.c_str(), "a+", stdout)) - throw Poco::OpenFileException("Cannot attach stdout to " + stdout_path); - - std::string stderr_path = log_path + "/stderr"; - if (!freopen(stderr_path.c_str(), "a+", stderr)) - throw Poco::OpenFileException("Cannot attach stderr to " + stderr_path); - } - - /// Create pid file. - if (is_daemon && config().has("pid")) - pid.seed(config().getString("pid")); + std::string stderr_path = config().getString("logger.stderr", log_path + "/stderr"); + if (!freopen(stderr_path.c_str(), "a+", stderr)) + throw Poco::OpenFileException("Cannot attach stderr to " + stderr_path); } + if ((!log_path.empty() && is_daemon) || config().has("logger.stdout")) + { + std::string stdout_path = config().getString("logger.stdout", log_path + "/stdout"); + if (!freopen(stdout_path.c_str(), "a+", stdout)) + throw Poco::OpenFileException("Cannot attach stdout to " + stdout_path); + } + + /// Create pid file. + if (is_daemon && config().has("pid")) + pid.seed(config().getString("pid")); + /// Change path for logging. if (!log_path.empty()) {