Merge branch 'master' of github.com:ClickHouse/ClickHouse into pipe_reading

This commit is contained in:
BoloniniD 2021-07-15 17:37:37 +03:00
commit f27e69a31f
414 changed files with 8762 additions and 2839 deletions

8
.gitmodules vendored
View File

@ -193,7 +193,7 @@
url = https://github.com/danlark1/miniselect url = https://github.com/danlark1/miniselect
[submodule "contrib/rocksdb"] [submodule "contrib/rocksdb"]
path = contrib/rocksdb path = contrib/rocksdb
url = https://github.com/ClickHouse-Extras/rocksdb.git url = https://github.com/ClickHouse-Extras/rocksdb.git
[submodule "contrib/xz"] [submodule "contrib/xz"]
path = contrib/xz path = contrib/xz
url = https://github.com/xz-mirror/xz url = https://github.com/xz-mirror/xz
@ -228,3 +228,9 @@
[submodule "contrib/libpqxx"] [submodule "contrib/libpqxx"]
path = contrib/libpqxx path = contrib/libpqxx
url = https://github.com/ClickHouse-Extras/libpqxx.git url = https://github.com/ClickHouse-Extras/libpqxx.git
[submodule "contrib/sqlite-amalgamation"]
path = contrib/sqlite-amalgamation
url = https://github.com/azadkuh/sqlite-amalgamation
[submodule "contrib/s2geometry"]
path = contrib/s2geometry
url = https://github.com/ClickHouse-Extras/s2geometry.git

View File

@ -536,10 +536,12 @@ include (cmake/find/rapidjson.cmake)
include (cmake/find/fastops.cmake) include (cmake/find/fastops.cmake)
include (cmake/find/odbc.cmake) include (cmake/find/odbc.cmake)
include (cmake/find/nanodbc.cmake) include (cmake/find/nanodbc.cmake)
include (cmake/find/sqlite.cmake)
include (cmake/find/rocksdb.cmake) include (cmake/find/rocksdb.cmake)
include (cmake/find/libpqxx.cmake) include (cmake/find/libpqxx.cmake)
include (cmake/find/nuraft.cmake) include (cmake/find/nuraft.cmake)
include (cmake/find/yaml-cpp.cmake) include (cmake/find/yaml-cpp.cmake)
include (cmake/find/s2geometry.cmake)
if(NOT USE_INTERNAL_PARQUET_LIBRARY) if(NOT USE_INTERNAL_PARQUET_LIBRARY)
set (ENABLE_ORC OFF CACHE INTERNAL "") set (ENABLE_ORC OFF CACHE INTERNAL "")

View File

@ -2,11 +2,11 @@
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION, # NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes. # only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
SET(VERSION_REVISION 54453) SET(VERSION_REVISION 54454)
SET(VERSION_MAJOR 21) SET(VERSION_MAJOR 21)
SET(VERSION_MINOR 8) SET(VERSION_MINOR 9)
SET(VERSION_PATCH 1) SET(VERSION_PATCH 1)
SET(VERSION_GITHASH fb895056568e26200629c7d19626e92d2dedc70d) SET(VERSION_GITHASH f48c5af90c2ad51955d1ee3b6b05d006b03e4238)
SET(VERSION_DESCRIBE v21.8.1.1-prestable) SET(VERSION_DESCRIBE v21.9.1.1-prestable)
SET(VERSION_STRING 21.8.1.1) SET(VERSION_STRING 21.9.1.1)
# end of autochange # end of autochange

View File

@ -0,0 +1,24 @@
option(ENABLE_S2_GEOMETRY "Enable S2 geometry library" ${ENABLE_LIBRARIES})
if (ENABLE_S2_GEOMETRY)
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/s2geometry")
message (WARNING "submodule contrib/s2geometry is missing. to fix try run: \n git submodule update --init --recursive")
set (ENABLE_S2_GEOMETRY 0)
set (USE_S2_GEOMETRY 0)
else()
if (OPENSSL_FOUND)
set (S2_GEOMETRY_LIBRARY s2)
set (S2_GEOMETRY_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/s2geometry/src/s2)
set (USE_S2_GEOMETRY 1)
else()
message (WARNING "S2 uses OpenSSL, but the latter is absent.")
endif()
endif()
if (NOT USE_S2_GEOMETRY)
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't enable S2 geometry library")
endif()
endif()
message (STATUS "Using s2geometry=${USE_S2_GEOMETRY} : ${S2_GEOMETRY_INCLUDE_DIR}")

16
cmake/find/sqlite.cmake Normal file
View File

@ -0,0 +1,16 @@
option(ENABLE_SQLITE "Enable sqlite" ${ENABLE_LIBRARIES})
if (NOT ENABLE_SQLITE)
return()
endif()
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/sqlite-amalgamation/sqlite3.c")
message (WARNING "submodule contrib/sqlite3-amalgamation is missing. to fix try run: \n git submodule update --init --recursive")
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal sqlite library")
set (USE_SQLITE 0)
return()
endif()
set (USE_SQLITE 1)
set (SQLITE_LIBRARY sqlite)
message (STATUS "Using sqlite=${USE_SQLITE}")

View File

@ -1,4 +1,4 @@
option(ENABLE_STATS "Enalbe StatsLib library" ${ENABLE_LIBRARIES}) option(ENABLE_STATS "Enable StatsLib library" ${ENABLE_LIBRARIES})
if (ENABLE_STATS) if (ENABLE_STATS)
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/stats") if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/stats")

View File

@ -1,3 +1,4 @@
# Third-party libraries may have substandard code.
# Put all targets defined here and in added subfolders under "contrib/" folder in GUI-based IDEs by default. # Put all targets defined here and in added subfolders under "contrib/" folder in GUI-based IDEs by default.
# Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they will # Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they will
@ -10,10 +11,8 @@ else ()
endif () endif ()
unset (_current_dir_name) unset (_current_dir_name)
# Third-party libraries may have substandard code. set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w")
# Also remove a possible source of nondeterminism. set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w -D__DATE__= -D__TIME__= -D__TIMESTAMP__=")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w -D__DATE__= -D__TIME__= -D__TIMESTAMP__=")
if (WITH_COVERAGE) if (WITH_COVERAGE)
set (WITHOUT_COVERAGE_LIST ${WITHOUT_COVERAGE}) set (WITHOUT_COVERAGE_LIST ${WITHOUT_COVERAGE})
@ -329,3 +328,10 @@ endif()
add_subdirectory(fast_float) add_subdirectory(fast_float)
if (USE_SQLITE)
add_subdirectory(sqlite-cmake)
endif()
if (USE_S2_GEOMETRY)
add_subdirectory(s2geometry-cmake)
endif()

2
contrib/h3 vendored

@ -1 +1 @@
Subproject commit e209086ae1b5477307f545a0f6111780edc59940 Subproject commit c7f46cfd71fb60e2fefc90e28abe81657deff735

View File

@ -3,21 +3,22 @@ set(H3_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/h3/src/h3lib")
set(SRCS set(SRCS
"${H3_SOURCE_DIR}/lib/algos.c" "${H3_SOURCE_DIR}/lib/algos.c"
"${H3_SOURCE_DIR}/lib/baseCells.c"
"${H3_SOURCE_DIR}/lib/bbox.c"
"${H3_SOURCE_DIR}/lib/coordijk.c" "${H3_SOURCE_DIR}/lib/coordijk.c"
"${H3_SOURCE_DIR}/lib/faceijk.c" "${H3_SOURCE_DIR}/lib/bbox.c"
"${H3_SOURCE_DIR}/lib/geoCoord.c"
"${H3_SOURCE_DIR}/lib/h3Index.c"
"${H3_SOURCE_DIR}/lib/h3UniEdge.c"
"${H3_SOURCE_DIR}/lib/linkedGeo.c"
"${H3_SOURCE_DIR}/lib/localij.c"
"${H3_SOURCE_DIR}/lib/mathExtensions.c"
"${H3_SOURCE_DIR}/lib/polygon.c" "${H3_SOURCE_DIR}/lib/polygon.c"
"${H3_SOURCE_DIR}/lib/h3Index.c"
"${H3_SOURCE_DIR}/lib/vec2d.c" "${H3_SOURCE_DIR}/lib/vec2d.c"
"${H3_SOURCE_DIR}/lib/vec3d.c" "${H3_SOURCE_DIR}/lib/vec3d.c"
"${H3_SOURCE_DIR}/lib/vertex.c" "${H3_SOURCE_DIR}/lib/vertex.c"
"${H3_SOURCE_DIR}/lib/linkedGeo.c"
"${H3_SOURCE_DIR}/lib/localij.c"
"${H3_SOURCE_DIR}/lib/latLng.c"
"${H3_SOURCE_DIR}/lib/directedEdge.c"
"${H3_SOURCE_DIR}/lib/mathExtensions.c"
"${H3_SOURCE_DIR}/lib/iterators.c"
"${H3_SOURCE_DIR}/lib/vertexGraph.c" "${H3_SOURCE_DIR}/lib/vertexGraph.c"
"${H3_SOURCE_DIR}/lib/faceijk.c"
"${H3_SOURCE_DIR}/lib/baseCells.c"
) )
configure_file("${H3_SOURCE_DIR}/include/h3api.h.in" "${H3_BINARY_DIR}/include/h3api.h") configure_file("${H3_SOURCE_DIR}/include/h3api.h.in" "${H3_BINARY_DIR}/include/h3api.h")

2
contrib/poco vendored

@ -1 +1 @@
Subproject commit 5994506908028612869fee627d68d8212dfe7c1e Subproject commit 7351c4691b5d401f59e3959adfc5b4fa263b32da

1
contrib/s2geometry vendored Submodule

@ -0,0 +1 @@
Subproject commit 20ea540d81f4575a3fc0aea585aac611bcd03ede

View File

@ -0,0 +1,126 @@
set(S2_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/s2geometry/src")
set(S2_SRCS
"${S2_SOURCE_DIR}/s2/base/stringprintf.cc"
"${S2_SOURCE_DIR}/s2/base/strtoint.cc"
"${S2_SOURCE_DIR}/s2/encoded_s2cell_id_vector.cc"
"${S2_SOURCE_DIR}/s2/encoded_s2point_vector.cc"
"${S2_SOURCE_DIR}/s2/encoded_s2shape_index.cc"
"${S2_SOURCE_DIR}/s2/encoded_string_vector.cc"
"${S2_SOURCE_DIR}/s2/id_set_lexicon.cc"
"${S2_SOURCE_DIR}/s2/mutable_s2shape_index.cc"
"${S2_SOURCE_DIR}/s2/r2rect.cc"
"${S2_SOURCE_DIR}/s2/s1angle.cc"
"${S2_SOURCE_DIR}/s2/s1chord_angle.cc"
"${S2_SOURCE_DIR}/s2/s1interval.cc"
"${S2_SOURCE_DIR}/s2/s2boolean_operation.cc"
"${S2_SOURCE_DIR}/s2/s2builder.cc"
"${S2_SOURCE_DIR}/s2/s2builder_graph.cc"
"${S2_SOURCE_DIR}/s2/s2builderutil_closed_set_normalizer.cc"
"${S2_SOURCE_DIR}/s2/s2builderutil_find_polygon_degeneracies.cc"
"${S2_SOURCE_DIR}/s2/s2builderutil_lax_polygon_layer.cc"
"${S2_SOURCE_DIR}/s2/s2builderutil_s2point_vector_layer.cc"
"${S2_SOURCE_DIR}/s2/s2builderutil_s2polygon_layer.cc"
"${S2_SOURCE_DIR}/s2/s2builderutil_s2polyline_layer.cc"
"${S2_SOURCE_DIR}/s2/s2builderutil_s2polyline_vector_layer.cc"
"${S2_SOURCE_DIR}/s2/s2builderutil_snap_functions.cc"
"${S2_SOURCE_DIR}/s2/s2cap.cc"
"${S2_SOURCE_DIR}/s2/s2cell.cc"
"${S2_SOURCE_DIR}/s2/s2cell_id.cc"
"${S2_SOURCE_DIR}/s2/s2cell_index.cc"
"${S2_SOURCE_DIR}/s2/s2cell_union.cc"
"${S2_SOURCE_DIR}/s2/s2centroids.cc"
"${S2_SOURCE_DIR}/s2/s2closest_cell_query.cc"
"${S2_SOURCE_DIR}/s2/s2closest_edge_query.cc"
"${S2_SOURCE_DIR}/s2/s2closest_point_query.cc"
"${S2_SOURCE_DIR}/s2/s2contains_vertex_query.cc"
"${S2_SOURCE_DIR}/s2/s2convex_hull_query.cc"
"${S2_SOURCE_DIR}/s2/s2coords.cc"
"${S2_SOURCE_DIR}/s2/s2crossing_edge_query.cc"
"${S2_SOURCE_DIR}/s2/s2debug.cc"
"${S2_SOURCE_DIR}/s2/s2earth.cc"
"${S2_SOURCE_DIR}/s2/s2edge_clipping.cc"
"${S2_SOURCE_DIR}/s2/s2edge_crosser.cc"
"${S2_SOURCE_DIR}/s2/s2edge_crossings.cc"
"${S2_SOURCE_DIR}/s2/s2edge_distances.cc"
"${S2_SOURCE_DIR}/s2/s2edge_tessellator.cc"
"${S2_SOURCE_DIR}/s2/s2error.cc"
"${S2_SOURCE_DIR}/s2/s2furthest_edge_query.cc"
"${S2_SOURCE_DIR}/s2/s2latlng.cc"
"${S2_SOURCE_DIR}/s2/s2latlng_rect.cc"
"${S2_SOURCE_DIR}/s2/s2latlng_rect_bounder.cc"
"${S2_SOURCE_DIR}/s2/s2lax_loop_shape.cc"
"${S2_SOURCE_DIR}/s2/s2lax_polygon_shape.cc"
"${S2_SOURCE_DIR}/s2/s2lax_polyline_shape.cc"
"${S2_SOURCE_DIR}/s2/s2loop.cc"
"${S2_SOURCE_DIR}/s2/s2loop_measures.cc"
"${S2_SOURCE_DIR}/s2/s2measures.cc"
"${S2_SOURCE_DIR}/s2/s2metrics.cc"
"${S2_SOURCE_DIR}/s2/s2max_distance_targets.cc"
"${S2_SOURCE_DIR}/s2/s2min_distance_targets.cc"
"${S2_SOURCE_DIR}/s2/s2padded_cell.cc"
"${S2_SOURCE_DIR}/s2/s2point_compression.cc"
"${S2_SOURCE_DIR}/s2/s2point_region.cc"
"${S2_SOURCE_DIR}/s2/s2pointutil.cc"
"${S2_SOURCE_DIR}/s2/s2polygon.cc"
"${S2_SOURCE_DIR}/s2/s2polyline.cc"
"${S2_SOURCE_DIR}/s2/s2polyline_alignment.cc"
"${S2_SOURCE_DIR}/s2/s2polyline_measures.cc"
"${S2_SOURCE_DIR}/s2/s2polyline_simplifier.cc"
"${S2_SOURCE_DIR}/s2/s2predicates.cc"
"${S2_SOURCE_DIR}/s2/s2projections.cc"
"${S2_SOURCE_DIR}/s2/s2r2rect.cc"
"${S2_SOURCE_DIR}/s2/s2region.cc"
"${S2_SOURCE_DIR}/s2/s2region_term_indexer.cc"
"${S2_SOURCE_DIR}/s2/s2region_coverer.cc"
"${S2_SOURCE_DIR}/s2/s2region_intersection.cc"
"${S2_SOURCE_DIR}/s2/s2region_union.cc"
"${S2_SOURCE_DIR}/s2/s2shape_index.cc"
"${S2_SOURCE_DIR}/s2/s2shape_index_buffered_region.cc"
"${S2_SOURCE_DIR}/s2/s2shape_index_measures.cc"
"${S2_SOURCE_DIR}/s2/s2shape_measures.cc"
"${S2_SOURCE_DIR}/s2/s2shapeutil_build_polygon_boundaries.cc"
"${S2_SOURCE_DIR}/s2/s2shapeutil_coding.cc"
"${S2_SOURCE_DIR}/s2/s2shapeutil_contains_brute_force.cc"
"${S2_SOURCE_DIR}/s2/s2shapeutil_edge_iterator.cc"
"${S2_SOURCE_DIR}/s2/s2shapeutil_get_reference_point.cc"
"${S2_SOURCE_DIR}/s2/s2shapeutil_range_iterator.cc"
"${S2_SOURCE_DIR}/s2/s2shapeutil_visit_crossing_edge_pairs.cc"
"${S2_SOURCE_DIR}/s2/s2text_format.cc"
"${S2_SOURCE_DIR}/s2/s2wedge_relations.cc"
"${S2_SOURCE_DIR}/s2/strings/ostringstream.cc"
"${S2_SOURCE_DIR}/s2/strings/serialize.cc"
# ClickHouse doesn't use strings from abseil.
# So, there is no duplicate symbols.
"${S2_SOURCE_DIR}/s2/third_party/absl/base/dynamic_annotations.cc"
"${S2_SOURCE_DIR}/s2/third_party/absl/base/internal/raw_logging.cc"
"${S2_SOURCE_DIR}/s2/third_party/absl/base/internal/throw_delegate.cc"
"${S2_SOURCE_DIR}/s2/third_party/absl/numeric/int128.cc"
"${S2_SOURCE_DIR}/s2/third_party/absl/strings/ascii.cc"
"${S2_SOURCE_DIR}/s2/third_party/absl/strings/match.cc"
"${S2_SOURCE_DIR}/s2/third_party/absl/strings/numbers.cc"
"${S2_SOURCE_DIR}/s2/third_party/absl/strings/str_cat.cc"
"${S2_SOURCE_DIR}/s2/third_party/absl/strings/str_split.cc"
"${S2_SOURCE_DIR}/s2/third_party/absl/strings/string_view.cc"
"${S2_SOURCE_DIR}/s2/third_party/absl/strings/strip.cc"
"${S2_SOURCE_DIR}/s2/third_party/absl/strings/internal/memutil.cc"
"${S2_SOURCE_DIR}/s2/util/bits/bit-interleave.cc"
"${S2_SOURCE_DIR}/s2/util/bits/bits.cc"
"${S2_SOURCE_DIR}/s2/util/coding/coder.cc"
"${S2_SOURCE_DIR}/s2/util/coding/varint.cc"
"${S2_SOURCE_DIR}/s2/util/math/exactfloat/exactfloat.cc"
"${S2_SOURCE_DIR}/s2/util/math/mathutil.cc"
"${S2_SOURCE_DIR}/s2/util/units/length-units.cc"
)
add_library(s2 ${S2_SRCS})
if (OPENSSL_FOUND)
target_link_libraries(s2 PRIVATE ${OPENSSL_LIBRARIES})
endif()
target_include_directories(s2 SYSTEM BEFORE PUBLIC "${S2_SOURCE_DIR}/")
if(M_LIBRARY)
target_link_libraries(s2 PRIVATE ${M_LIBRARY})
endif()

1
contrib/sqlite-amalgamation vendored Submodule

@ -0,0 +1 @@
Subproject commit 9818baa5d027ffb26d57f810dc4c597d4946781c

View File

@ -0,0 +1,6 @@
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/sqlite-amalgamation")
set(SRCS ${LIBRARY_DIR}/sqlite3.c)
add_library(sqlite ${SRCS})
target_include_directories(sqlite SYSTEM PUBLIC "${LIBRARY_DIR}")

4
debian/changelog vendored
View File

@ -1,5 +1,5 @@
clickhouse (21.8.1.1) unstable; urgency=low clickhouse (21.9.1.1) unstable; urgency=low
* Modified source code * Modified source code
-- clickhouse-release <clickhouse-release@yandex-team.ru> Mon, 28 Jun 2021 00:50:15 +0300 -- clickhouse-release <clickhouse-release@yandex-team.ru> Sat, 10 Jul 2021 08:22:49 +0300

View File

@ -1,7 +1,7 @@
FROM ubuntu:18.04 FROM ubuntu:18.04
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/" ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
ARG version=21.8.1.* ARG version=21.9.1.*
RUN apt-get update \ RUN apt-get update \
&& apt-get install --yes --no-install-recommends \ && apt-get install --yes --no-install-recommends \

View File

@ -1,7 +1,7 @@
FROM ubuntu:20.04 FROM ubuntu:20.04
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/" ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
ARG version=21.8.1.* ARG version=21.9.1.*
ARG gosu_ver=1.10 ARG gosu_ver=1.10
# set non-empty deb_location_url url to create a docker image # set non-empty deb_location_url url to create a docker image

View File

@ -1,7 +1,7 @@
FROM ubuntu:18.04 FROM ubuntu:18.04
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/" ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
ARG version=21.8.1.* ARG version=21.9.1.*
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y apt-transport-https dirmngr && \ apt-get install -y apt-transport-https dirmngr && \

View File

@ -378,6 +378,16 @@ function run_tests
# needs pv # needs pv
01923_network_receive_time_metric_insert 01923_network_receive_time_metric_insert
01889_sqlite_read_write
# needs s2
01849_geoToS2
01851_s2_to_geo
01852_s2_get_neighbours
01853_s2_cells_intersect
01854_s2_cap_contains
01854_s2_cap_union
) )
time clickhouse-test --hung-check -j 8 --order=random --use-skip-list \ time clickhouse-test --hung-check -j 8 --order=random --use-skip-list \

View File

@ -32,7 +32,7 @@ RUN rm -rf \
RUN apt-get clean RUN apt-get clean
# Install MySQL ODBC driver # Install MySQL ODBC driver
RUN curl 'https://cdn.mysql.com//Downloads/Connector-ODBC/8.0/mysql-connector-odbc-8.0.21-linux-glibc2.12-x86-64bit.tar.gz' --output 'mysql-connector.tar.gz' && tar -xzf mysql-connector.tar.gz && cd mysql-connector-odbc-8.0.21-linux-glibc2.12-x86-64bit/lib && mv * /usr/local/lib && ln -s /usr/local/lib/libmyodbc8a.so /usr/lib/x86_64-linux-gnu/odbc/libmyodbc.so RUN curl 'https://downloads.mysql.com/archives/get/p/10/file/mysql-connector-odbc-8.0.21-linux-glibc2.12-x86-64bit.tar.gz' --location --output 'mysql-connector.tar.gz' && tar -xzf mysql-connector.tar.gz && cd mysql-connector-odbc-8.0.21-linux-glibc2.12-x86-64bit/lib && mv * /usr/local/lib && ln -s /usr/local/lib/libmyodbc8a.so /usr/lib/x86_64-linux-gnu/odbc/libmyodbc.so
# Unfortunately this is required for a single test for conversion data from zookeeper to clickhouse-keeper. # Unfortunately this is required for a single test for conversion data from zookeeper to clickhouse-keeper.
# ZooKeeper is not started by default, but consumes some space in containers. # ZooKeeper is not started by default, but consumes some space in containers.
@ -49,4 +49,3 @@ RUN mkdir /zookeeper && chmod -R 777 /zookeeper
ENV TZ=Europe/Moscow ENV TZ=Europe/Moscow
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone

View File

@ -29,7 +29,8 @@ RUN apt-get update -y \
unixodbc \ unixodbc \
wget \ wget \
mysql-client=5.7* \ mysql-client=5.7* \
postgresql-client postgresql-client \
sqlite3
RUN pip3 install numpy scipy pandas RUN pip3 install numpy scipy pandas

View File

@ -118,7 +118,7 @@ clickhouse-client --query "SELECT 'Server successfully started', 'OK'" >> /test_
[ -f /var/log/clickhouse-server/stderr.log ] || echo -e "Stderr log does not exist\tFAIL" [ -f /var/log/clickhouse-server/stderr.log ] || echo -e "Stderr log does not exist\tFAIL"
# Print Fatal log messages to stdout # Print Fatal log messages to stdout
zgrep -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server.log zgrep -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server.log*
# Grep logs for sanitizer asserts, crashes and other critical errors # Grep logs for sanitizer asserts, crashes and other critical errors
@ -131,22 +131,22 @@ zgrep -Fav "ASan doesn't fully support makecontext/swapcontext functions" > /dev
rm -f /test_output/tmp rm -f /test_output/tmp
# OOM # OOM
zgrep -Fa " <Fatal> Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server.log > /dev/null \ zgrep -Fa " <Fatal> Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server.log* > /dev/null \
&& echo -e 'OOM killer (or signal 9) in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \ && echo -e 'OOM killer (or signal 9) in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \
|| echo -e 'No OOM messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv || echo -e 'No OOM messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
# Logical errors # Logical errors
zgrep -Fa "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.log > /dev/null \ zgrep -Fa "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.log* > /dev/null \
&& echo -e 'Logical error thrown (see clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \ && echo -e 'Logical error thrown (see clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \
|| echo -e 'No logical errors\tOK' >> /test_output/test_results.tsv || echo -e 'No logical errors\tOK' >> /test_output/test_results.tsv
# Crash # Crash
zgrep -Fa "########################################" /var/log/clickhouse-server/clickhouse-server.log > /dev/null \ zgrep -Fa "########################################" /var/log/clickhouse-server/clickhouse-server.log* > /dev/null \
&& echo -e 'Killed by signal (in clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \ && echo -e 'Killed by signal (in clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \
|| echo -e 'Not crashed\tOK' >> /test_output/test_results.tsv || echo -e 'Not crashed\tOK' >> /test_output/test_results.tsv
# It also checks for crash without stacktrace (printed by watchdog) # It also checks for crash without stacktrace (printed by watchdog)
zgrep -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server.log > /dev/null \ zgrep -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server.log* > /dev/null \
&& echo -e 'Fatal message in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \ && echo -e 'Fatal message in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \
|| echo -e 'No fatal messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv || echo -e 'No fatal messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv

View File

@ -105,11 +105,11 @@ clickhouse-client -nmT < tests/queries/0_stateless/01521_dummy_test.sql | tee te
5) ensure everything is correct, if the test output is incorrect (due to some bug for example), adjust the reference file using text editor. 5) ensure everything is correct, if the test output is incorrect (due to some bug for example), adjust the reference file using text editor.
#### How to create good test #### How to create a good test
- test should be - A test should be
- minimal - create only tables related to tested functionality, remove unrelated columns and parts of query - minimal - create only tables related to tested functionality, remove unrelated columns and parts of query
- fast - should not take longer than few seconds (better subseconds) - fast - should not take longer than a few seconds (better subseconds)
- correct - fails then feature is not working - correct - fails then feature is not working
- deterministic - deterministic
- isolated / stateless - isolated / stateless
@ -126,6 +126,16 @@ clickhouse-client -nmT < tests/queries/0_stateless/01521_dummy_test.sql | tee te
- use other SQL files in the `0_stateless` folder as an example - use other SQL files in the `0_stateless` folder as an example
- ensure the feature / feature combination you want to test is not yet covered with existing tests - ensure the feature / feature combination you want to test is not yet covered with existing tests
#### Test naming rules
It's important to name tests correctly, so one could turn some tests subset off in clickhouse-test invocation.
| Tester flag| What should be in test name | When flag should be added |
|---|---|---|---|
| `--[no-]zookeeper`| "zookeeper" or "replica" | Test uses tables from ReplicatedMergeTree family |
| `--[no-]shard` | "shard" or "distributed" or "global"| Test using connections to 127.0.0.2 or similar |
| `--[no-]long` | "long" or "deadlock" or "race" | Test runs longer than 60 seconds |
#### Commit / push / create PR. #### Commit / push / create PR.
1) commit & push your changes 1) commit & push your changes

View File

@ -134,10 +134,10 @@ $ ./release
## Faster builds for development ## Faster builds for development
Normally all tools of the ClickHouse bundle, such as `clickhouse-server`, `clickhouse-client` etc., are linked into a single static executable, `clickhouse`. This executable must be re-linked on every change, which might be slow. Two common ways to improve linking time are to use `lld` linker, and use the 'split' build configuration, which builds a separate binary for every tool, and further splits the code into several shared libraries. To enable these tweaks, pass the following flags to `cmake`: Normally all tools of the ClickHouse bundle, such as `clickhouse-server`, `clickhouse-client` etc., are linked into a single static executable, `clickhouse`. This executable must be re-linked on every change, which might be slow. One common way to improve build time is to use the 'split' build configuration, which builds a separate binary for every tool, and further splits the code into several shared libraries. To enable this tweak, pass the following flags to `cmake`:
``` ```
-DCMAKE_C_FLAGS="--ld-path=lld" -DCMAKE_CXX_FLAGS="--ld-path=lld" -DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1 -DCLICKHOUSE_SPLIT_BINARY=1 -DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1 -DCLICKHOUSE_SPLIT_BINARY=1
``` ```
## You Dont Have to Build ClickHouse {#you-dont-have-to-build-clickhouse} ## You Dont Have to Build ClickHouse {#you-dont-have-to-build-clickhouse}

View File

@ -79,6 +79,7 @@ SELECT library_name, license_type, license_path FROM system.licenses ORDER BY li
| re2 | BSD 3-clause | /contrib/re2/LICENSE | | re2 | BSD 3-clause | /contrib/re2/LICENSE |
| replxx | BSD 3-clause | /contrib/replxx/LICENSE.md | | replxx | BSD 3-clause | /contrib/replxx/LICENSE.md |
| rocksdb | BSD 3-clause | /contrib/rocksdb/LICENSE.leveldb | | rocksdb | BSD 3-clause | /contrib/rocksdb/LICENSE.leveldb |
| s2geometry | Apache | /contrib/s2geometry/LICENSE |
| sentry-native | MIT | /contrib/sentry-native/LICENSE | | sentry-native | MIT | /contrib/sentry-native/LICENSE |
| simdjson | Apache | /contrib/simdjson/LICENSE | | simdjson | Apache | /contrib/simdjson/LICENSE |
| snappy | Public Domain | /contrib/snappy/COPYING | | snappy | Public Domain | /contrib/snappy/COPYING |

View File

@ -1,6 +1,6 @@
--- ---
toc_priority: 12 toc_priority: 12
toc_title: MateriaziePostgreSQL toc_title: MaterializedPostgreSQL
--- ---
# MaterializedPostgreSQL {#materialize-postgresql} # MaterializedPostgreSQL {#materialize-postgresql}

View File

@ -1246,12 +1246,14 @@ The table below shows supported data types and how they match ClickHouse [data t
| `DOUBLE` | [Float64](../sql-reference/data-types/float.md) | `DOUBLE` | | `DOUBLE` | [Float64](../sql-reference/data-types/float.md) | `DOUBLE` |
| `DATE32` | [Date](../sql-reference/data-types/date.md) | `UINT16` | | `DATE32` | [Date](../sql-reference/data-types/date.md) | `UINT16` |
| `DATE64`, `TIMESTAMP` | [DateTime](../sql-reference/data-types/datetime.md) | `UINT32` | | `DATE64`, `TIMESTAMP` | [DateTime](../sql-reference/data-types/datetime.md) | `UINT32` |
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `STRING` | | `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` |
| — | [FixedString](../sql-reference/data-types/fixedstring.md) | `STRING` | | — | [FixedString](../sql-reference/data-types/fixedstring.md) | `BINARY` |
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` | | `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` |
| `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` | | `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` |
| `STRUCT` | [Tuple](../sql-reference/data-types/tuple.md) | `STRUCT` |
| `MAP` | [Map](../sql-reference/data-types/map.md) | `MAP` |
Arrays can be nested and can have a value of the `Nullable` type as an argument. Arrays can be nested and can have a value of the `Nullable` type as an argument. `Tuple` and `Map` types also can be nested.
ClickHouse supports configurable precision of `Decimal` type. The `INSERT` query treats the Parquet `DECIMAL` type as the ClickHouse `Decimal128` type. ClickHouse supports configurable precision of `Decimal` type. The `INSERT` query treats the Parquet `DECIMAL` type as the ClickHouse `Decimal128` type.
@ -1299,13 +1301,17 @@ The table below shows supported data types and how they match ClickHouse [data t
| `DOUBLE` | [Float64](../sql-reference/data-types/float.md) | `FLOAT64` | | `DOUBLE` | [Float64](../sql-reference/data-types/float.md) | `FLOAT64` |
| `DATE32` | [Date](../sql-reference/data-types/date.md) | `UINT16` | | `DATE32` | [Date](../sql-reference/data-types/date.md) | `UINT16` |
| `DATE64`, `TIMESTAMP` | [DateTime](../sql-reference/data-types/datetime.md) | `UINT32` | | `DATE64`, `TIMESTAMP` | [DateTime](../sql-reference/data-types/datetime.md) | `UINT32` |
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `UTF8` | | `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` |
| `STRING`, `BINARY` | [FixedString](../sql-reference/data-types/fixedstring.md) | `UTF8` | | `STRING`, `BINARY` | [FixedString](../sql-reference/data-types/fixedstring.md) | `BINARY` |
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` | | `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` |
| `DECIMAL256` | [Decimal256](../sql-reference/data-types/decimal.md)| `DECIMAL256` | | `DECIMAL256` | [Decimal256](../sql-reference/data-types/decimal.md)| `DECIMAL256` |
| `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` | | `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` |
| `STRUCT` | [Tuple](../sql-reference/data-types/tuple.md) | `STRUCT` |
| `MAP` | [Map](../sql-reference/data-types/map.md) | `MAP` |
Arrays can be nested and can have a value of the `Nullable` type as an argument. Arrays can be nested and can have a value of the `Nullable` type as an argument. `Tuple` and `Map` types also can be nested.
The `DICTIONARY` type is supported for `INSERT` queries, and for `SELECT` queries there is an [output_format_arrow_low_cardinality_as_dictionary](../operations/settings/settings.md#output-format-arrow-low-cardinality-as-dictionary) setting that allows to output [LowCardinality](../sql-reference/data-types/lowcardinality.md) type as a `DICTIONARY` type.
ClickHouse supports configurable precision of the `Decimal` type. The `INSERT` query treats the Arrow `DECIMAL` type as the ClickHouse `Decimal128` type. ClickHouse supports configurable precision of the `Decimal` type. The `INSERT` query treats the Arrow `DECIMAL` type as the ClickHouse `Decimal128` type.
@ -1358,8 +1364,10 @@ The table below shows supported data types and how they match ClickHouse [data t
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` | | `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` |
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` | | `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` |
| `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` | | `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` |
| `STRUCT` | [Tuple](../sql-reference/data-types/tuple.md) | `STRUCT` |
| `MAP` | [Map](../sql-reference/data-types/map.md) | `MAP` |
Arrays can be nested and can have a value of the `Nullable` type as an argument. Arrays can be nested and can have a value of the `Nullable` type as an argument. `Tuple` and `Map` types also can be nested.
ClickHouse supports configurable precision of the `Decimal` type. The `INSERT` query treats the ORC `DECIMAL` type as the ClickHouse `Decimal128` type. ClickHouse supports configurable precision of the `Decimal` type. The `INSERT` query treats the ORC `DECIMAL` type as the ClickHouse `Decimal128` type.

View File

@ -157,5 +157,6 @@ toc_title: Adopters
| <a href="https://signoz.io/" class="favicon">SigNoz</a> | Observability Platform | Main Product | — | — | [Source code](https://github.com/SigNoz/signoz) | | <a href="https://signoz.io/" class="favicon">SigNoz</a> | Observability Platform | Main Product | — | — | [Source code](https://github.com/SigNoz/signoz) |
| <a href="https://chelpipegroup.com/" class="favicon">ChelPipe Group</a> | Analytics | — | — | — | [Blog post, June 2021](https://vc.ru/trade/253172-tyazhelomu-proizvodstvu-user-friendly-sayt-internet-magazin-trub-dlya-chtpz) | | <a href="https://chelpipegroup.com/" class="favicon">ChelPipe Group</a> | Analytics | — | — | — | [Blog post, June 2021](https://vc.ru/trade/253172-tyazhelomu-proizvodstvu-user-friendly-sayt-internet-magazin-trub-dlya-chtpz) |
| <a href="https://zagravagames.com/en/" class="favicon">Zagrava Trading</a> | — | — | — | — | [Job offer, May 2021](https://twitter.com/datastackjobs/status/1394707267082063874) | | <a href="https://zagravagames.com/en/" class="favicon">Zagrava Trading</a> | — | — | — | — | [Job offer, May 2021](https://twitter.com/datastackjobs/status/1394707267082063874) |
| <a href="https://beeline.ru/" class="favicon">Beeline</a> | Telecom | Data Platform | — | — | [Blog post, July 2021](https://habr.com/en/company/beeline/blog/567508/) |
[Original article](https://clickhouse.tech/docs/en/introduction/adopters/) <!--hide--> [Original article](https://clickhouse.tech/docs/en/introduction/adopters/) <!--hide-->

View File

@ -0,0 +1,114 @@
---
toc_priority: 66
toc_title: ClickHouse Keeper
---
# [pre-production] clickhouse-keeper
ClickHouse server use [ZooKeeper](https://zookeeper.apache.org/) coordination system for data [replication](../engines/table-engines/mergetree-family/replication.md) and [distributed DDL](../sql-reference/distributed-ddl.md) queries execution. ClickHouse Keeper is an alternative coordination system compatible with ZooKeeper.
!!! warning "Warning"
This feature currently in pre-production stage. We test it in our CI and on small internal installations.
## Implemetation details
ZooKeeper is one of the first well-known open-source coordination systems. It's implemented in Java, has quite a simple and powerful data model. ZooKeeper's coordination algorithm called ZAB (ZooKeeper Atomic Broadcast) doesn't provide linearizability guarantees for reads, because each ZooKeeper node serves reads locally. Unlike ZooKeeper `clickhouse-keeper` written in C++ and use [RAFT algorithm](https://raft.github.io/) [implementation](https://github.com/eBay/NuRaft). This algorithm allows to have linearizability for reads and writes, has several open-source implementations in different languages.
By default, `clickhouse-keeper` provides the same guarantees as ZooKeeper (linearizable writes, non-linearizable reads). It has a compatible client-server protocol, so any standard ZooKeeper client can be used to interact with `clickhouse-keeper`. Snapshots and logs have an incompatible format with ZooKeeper, but `clickhouse-keeper-converter` tool allows to convert ZooKeeper data to `clickhouse-keeper` snapshot. Interserver protocol in `clickhouse-keeper` also incompatible with ZooKeeper so mixed ZooKeeper/clickhouse-keeper cluster is impossible.
## Configuration
`clickhouse-keeper` can be used as a standalone replacement for ZooKeeper or as an internal part of the `clickhouse-server`, but in both cases configuration is almost the same `.xml` file. The main `clickhouse-keeper` configuration tag is `<keeper_server>`. Keeper configuration has the following parameters:
- `tcp_port` — the port for a client to connect (default for ZooKeeper is `2181`)
- `tcp_port_secure` — the secure port for a client to connect
- `server_id` — unique server id, each participant of the clickhouse-keeper cluster must have a unique number (1, 2, 3, and so on)
- `log_storage_path` — path to coordination logs, better to store logs on the non-busy device (same for ZooKeeper)
- `snapshot_storage_path` — path to coordination snapshots
Other common parameters are inherited from clickhouse-server config (`listen_host`, `logger` and so on).
Internal coordination settings are located in `<keeper_server>.<coordination_settings>` section:
- `operation_timeout_ms` — timeout for a single client operation
- `session_timeout_ms` — timeout for client session
- `dead_session_check_period_ms` — how often clickhouse-keeper check dead sessions and remove them
- `heart_beat_interval_ms` — how often a clickhouse-keeper leader will send heartbeats to followers
- `election_timeout_lower_bound_ms` — if follower didn't receive heartbeats from the leader in this interval, then it can initiate leader election
- `election_timeout_upper_bound_ms` — if follower didn't receive heartbeats from the leader in this interval, then it must initiate leader election
- `rotate_log_storage_interval` — how many logs to store in a single file
- `reserved_log_items` — how many coordination logs to store before compaction
- `snapshot_distance` — how often clickhouse-keeper will create new snapshots (in the number of logs)
- `snapshots_to_keep` — how many snapshots to keep
- `stale_log_gap` — the threshold when leader consider follower as stale and send snapshot to it instead of logs
- `force_sync` — call `fsync` on each write to coordination log
- `raft_logs_level` — text logging level about coordination (trace, debug, and so on)
- `shutdown_timeout` — wait to finish internal connections and shutdown
- `startup_timeout` — if the server doesn't connect to other quorum participants in the specified timeout it will terminate
Quorum configuration is located in `<keeper_server>.<raft_configuration>` section and contain servers description. The only parameter for the whole quorum is `secure`, which enables encrypted connection for communication between quorum participants. The main parameters for each `<server>` are:
- `id` — server_id in quorum
- `hostname` — hostname where this server placed
- `port` — port where this server listen for connections
Examples of configuration for quorum with three nodes can be found in [integration tests](https://github.com/ClickHouse/ClickHouse/tree/master/tests/integration) with `test_keeper_` prefix. Example configuration for server #1:
```xml
<keeper_server>
<tcp_port>2181</tcp_port>
<server_id>1</server_id>
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
<coordination_settings>
<operation_timeout_ms>10000</operation_timeout_ms>
<session_timeout_ms>30000</session_timeout_ms>
<raft_logs_level>trace</raft_logs_level>
</coordination_settings>
<raft_configuration>
<server>
<id>1</id>
<hostname>zoo1</hostname>
<port>9444</port>
</server>
<server>
<id>2</id>
<hostname>zoo2</hostname>
<port>9444</port>
</server>
<server>
<id>3</id>
<hostname>zoo3</hostname>
<port>9444</port>
</server>
</raft_configuration>
</keeper_server>
```
## How to run
`clickhouse-keeper` is bundled into `clickhouse-server` package, just add configuration of `<keeper_server>` and start clickhouse-server as always. If you want to run standalone `clickhouse-keeper` you can start it in a similar way with:
```bash
clickhouse-keeper --config /etc/your_path_to_config/config.xml --daemon
```
## [experimental] Migration from ZooKeeper
Seamlessly migration from ZooKeeper to `clickhouse-keeper` is impossible you have to stop your ZooKeeper cluster, convert data and start `clickhouse-keeper`. `clickhouse-keeper-converter` tool allows to convert ZooKeeper logs and snapshots to `clickhouse-keeper` snapshot. It works only with ZooKeeper > 3.4. Steps for migration:
1. Stop all ZooKeeper nodes.
2. [optional, but recommended] Found ZooKeeper leader node, start and stop it again. It will force ZooKeeper to create consistent snapshot.
3. Run `clickhouse-keeper-converter` on leader, example
```bash
clickhouse-keeper-converter --zookeeper-logs-dir /var/lib/zookeeper/version-2 --zookeeper-snapshots-dir /var/lib/zookeeper/version-2 --output-dir /path/to/clickhouse/keeper/snapshots
```
4. Copy snapshot to `clickhouse-server` nodes with configured `keeper` or start `clickhouse-keeper` instead of ZooKeeper. Snapshot must persist only on leader node, leader will sync it automatically to other nodes.

View File

@ -22,6 +22,23 @@ Some settings specified in the main configuration file can be overridden in othe
The config can also define “substitutions”. If an element has the `incl` attribute, the corresponding substitution from the file will be used as the value. By default, the path to the file with substitutions is `/etc/metrika.xml`. This can be changed in the [include_from](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-include_from) element in the server config. The substitution values are specified in `/yandex/substitution_name` elements in this file. If a substitution specified in `incl` does not exist, it is recorded in the log. To prevent ClickHouse from logging missing substitutions, specify the `optional="true"` attribute (for example, settings for [macros](../operations/server-configuration-parameters/settings.md)). The config can also define “substitutions”. If an element has the `incl` attribute, the corresponding substitution from the file will be used as the value. By default, the path to the file with substitutions is `/etc/metrika.xml`. This can be changed in the [include_from](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-include_from) element in the server config. The substitution values are specified in `/yandex/substitution_name` elements in this file. If a substitution specified in `incl` does not exist, it is recorded in the log. To prevent ClickHouse from logging missing substitutions, specify the `optional="true"` attribute (for example, settings for [macros](../operations/server-configuration-parameters/settings.md)).
If you want to replace an entire element with a substitution use `include` as element name.
XML substitution example:
```xml
<yandex>
<!-- Appends XML subtree found at `/profiles-in-zookeeper` ZK path to `<profiles>` element. -->
<profiles from_zk="/profiles-in-zookeeper" />
<users>
<!-- Replaces `include` element with the subtree found at `/users-in-zookeeper` ZK path. -->
<include from_zk="/users-in-zookeeper" />
<include from_zk="/other-users-in-zookeeper" />
</users>
</yandex>
```
Substitutions can also be performed from ZooKeeper. To do this, specify the attribute `from_zk = "/path/to/node"`. The element value is replaced with the contents of the node at `/path/to/node` in ZooKeeper. You can also put an entire XML subtree on the ZooKeeper node and it will be fully inserted into the source element. Substitutions can also be performed from ZooKeeper. To do this, specify the attribute `from_zk = "/path/to/node"`. The element value is replaced with the contents of the node at `/path/to/node` in ZooKeeper. You can also put an entire XML subtree on the ZooKeeper node and it will be fully inserted into the source element.
## User Settings {#user-settings} ## User Settings {#user-settings}
@ -32,6 +49,8 @@ Users configuration can be splitted into separate files similar to `config.xml`
Directory name is defined as `users_config` setting without `.xml` postfix concatenated with `.d`. Directory name is defined as `users_config` setting without `.xml` postfix concatenated with `.d`.
Directory `users.d` is used by default, as `users_config` defaults to `users.xml`. Directory `users.d` is used by default, as `users_config` defaults to `users.xml`.
Note that configuration files are first merged taking into account [Override](#override) settings and includes are processed after that.
## XML example {#example} ## XML example {#example}
For example, you can have separate config file for each user like this: For example, you can have separate config file for each user like this:

View File

@ -1213,7 +1213,15 @@ Default value: `3`.
## output_format_json_quote_64bit_integers {#session_settings-output_format_json_quote_64bit_integers} ## output_format_json_quote_64bit_integers {#session_settings-output_format_json_quote_64bit_integers}
If the value is true, integers appear in quotes when using JSON\* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes. Controls quoting of 64-bit or bigger [integers](../../sql-reference/data-types/int-uint.md) (like `UInt64` or `Int128`) when they are output in a [JSON](../../interfaces/formats.md#json) format.
Such integers are enclosed in quotes by default. This behavior is compatible with most JavaScript implementations.
Possible values:
- 0 — Integers are output without quotes.
- 1 — Integers are enclosed in quotes.
Default value: 1.
## output_format_json_quote_denormals {#settings-output_format_json_quote_denormals} ## output_format_json_quote_denormals {#settings-output_format_json_quote_denormals}
@ -1990,6 +1998,16 @@ Possible values:
Default value: 16. Default value: 16.
## merge_selecting_sleep_ms {#merge_selecting_sleep_ms}
Sleep time for merge selecting when no part selected, a lower setting will trigger selecting tasks in background_schedule_pool frequently which result in large amount of requests to zookeeper in large-scale clusters
Possible values:
- Any positive integer.
Default value: 5000
## parallel_distributed_insert_select {#parallel_distributed_insert_select} ## parallel_distributed_insert_select {#parallel_distributed_insert_select}
Enables parallel distributed `INSERT ... SELECT` query. Enables parallel distributed `INSERT ... SELECT` query.
@ -3202,3 +3220,14 @@ Default value: `1`.
**Usage** **Usage**
If the setting is set to `0`, the table function does not make Nullable columns and inserts default values instead of NULL. This is also applicable for NULL values inside arrays. If the setting is set to `0`, the table function does not make Nullable columns and inserts default values instead of NULL. This is also applicable for NULL values inside arrays.
## output_format_arrow_low_cardinality_as_dictionary {#output-format-arrow-low-cardinality-as-dictionary}
Allows to convert the [LowCardinality](../../sql-reference/data-types/lowcardinality.md) type to the `DICTIONARY` type of the [Arrow](../../interfaces/formats.md#data-format-arrow) format for `SELECT` queries.
Possible values:
- 0 — The `LowCardinality` type is not converted to the `DICTIONARY` type.
- 1 — The `LowCardinality` type is converted to the `DICTIONARY` type.
Default value: `0`.

View File

@ -8,12 +8,11 @@ Columns:
- `table` ([String](../../sql-reference/data-types/string.md)) — Table name. - `table` ([String](../../sql-reference/data-types/string.md)) — Table name.
- `name` ([String](../../sql-reference/data-types/string.md)) — Index name. - `name` ([String](../../sql-reference/data-types/string.md)) — Index name.
- `type` ([String](../../sql-reference/data-types/string.md)) — Index type. - `type` ([String](../../sql-reference/data-types/string.md)) — Index type.
- `expr` ([String](../../sql-reference/data-types/string.md)) — Expression used to calculate the index. - `expr` ([String](../../sql-reference/data-types/string.md)) — Expression for the index calculation.
- `granularity` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of granules in the block. - `granularity` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The number of granules in the block.
**Example** **Example**
```sql ```sql
SELECT * FROM system.data_skipping_indices LIMIT 2 FORMAT Vertical; SELECT * FROM system.data_skipping_indices LIMIT 2 FORMAT Vertical;
``` ```

View File

@ -34,7 +34,7 @@ Input table:
Query: Query:
``` sql ``` sql
SELECT medianDeterministic(val, 1) FROM t SELECT medianDeterministic(val, 1) FROM t;
``` ```
Result: Result:

View File

@ -47,6 +47,7 @@ Settings:
- [low_cardinality_use_single_dictionary_for_part](../../operations/settings/settings.md#low_cardinality_use_single_dictionary_for_part) - [low_cardinality_use_single_dictionary_for_part](../../operations/settings/settings.md#low_cardinality_use_single_dictionary_for_part)
- [low_cardinality_allow_in_native_format](../../operations/settings/settings.md#low_cardinality_allow_in_native_format) - [low_cardinality_allow_in_native_format](../../operations/settings/settings.md#low_cardinality_allow_in_native_format)
- [allow_suspicious_low_cardinality_types](../../operations/settings/settings.md#allow_suspicious_low_cardinality_types) - [allow_suspicious_low_cardinality_types](../../operations/settings/settings.md#allow_suspicious_low_cardinality_types)
- [output_format_arrow_low_cardinality_as_dictionary](../../operations/settings/settings.md#output-format-arrow-low-cardinality-as-dictionary)
Functions: Functions:
@ -57,5 +58,3 @@ Functions:
- [A Magical Mystery Tour of the LowCardinality Data Type](https://www.altinity.com/blog/2019/3/27/low-cardinality). - [A Magical Mystery Tour of the LowCardinality Data Type](https://www.altinity.com/blog/2019/3/27/low-cardinality).
- [Reducing ClickHouse Storage Cost with the Low Cardinality Type Lessons from an Instana Engineer](https://www.instana.com/blog/reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer/). - [Reducing ClickHouse Storage Cost with the Low Cardinality Type Lessons from an Instana Engineer](https://www.instana.com/blog/reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer/).
- [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/yandex/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf). - [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/yandex/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf).
[Original article](https://clickhouse.tech/docs/en/sql-reference/data-types/lowcardinality/) <!--hide-->

View File

@ -12,9 +12,6 @@ toc_title: Map(key, value)
- `key` — The key part of the pair. [String](../../sql-reference/data-types/string.md) or [Integer](../../sql-reference/data-types/int-uint.md). - `key` — The key part of the pair. [String](../../sql-reference/data-types/string.md) or [Integer](../../sql-reference/data-types/int-uint.md).
- `value` — The value part of the pair. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md) or [Array](../../sql-reference/data-types/array.md). - `value` — The value part of the pair. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md) or [Array](../../sql-reference/data-types/array.md).
!!! warning "Warning"
Currently `Map` data type is an experimental feature. To work with it you must set `allow_experimental_map_type = 1`.
To get the value from an `a Map('key', 'value')` column, use `a['key']` syntax. This lookup works now with a linear complexity. To get the value from an `a Map('key', 'value')` column, use `a['key']` syntax. This lookup works now with a linear complexity.
**Examples** **Examples**

View File

@ -12,7 +12,7 @@ For information on connecting and configuring external dictionaries, see [Extern
## dictGet, dictGetOrDefault, dictGetOrNull {#dictget} ## dictGet, dictGetOrDefault, dictGetOrNull {#dictget}
Retrieves values from an external dictionary. Retrieves values from an external dictionary.
``` sql ``` sql
dictGet('dict_name', attr_names, id_expr) dictGet('dict_name', attr_names, id_expr)
@ -24,7 +24,7 @@ dictGetOrNull('dict_name', attr_name, id_expr)
- `dict_name` — Name of the dictionary. [String literal](../../sql-reference/syntax.md#syntax-string-literal). - `dict_name` — Name of the dictionary. [String literal](../../sql-reference/syntax.md#syntax-string-literal).
- `attr_names` — Name of the column of the dictionary, [String literal](../../sql-reference/syntax.md#syntax-string-literal), or tuple of column names, [Tuple](../../sql-reference/data-types/tuple.md)([String literal](../../sql-reference/syntax.md#syntax-string-literal)). - `attr_names` — Name of the column of the dictionary, [String literal](../../sql-reference/syntax.md#syntax-string-literal), or tuple of column names, [Tuple](../../sql-reference/data-types/tuple.md)([String literal](../../sql-reference/syntax.md#syntax-string-literal)).
- `id_expr` — Key value. [Expression](../../sql-reference/syntax.md#syntax-expressions) returning a [UInt64](../../sql-reference/data-types/int-uint.md) or [Tuple](../../sql-reference/data-types/tuple.md)-type value depending on the dictionary configuration. - `id_expr` — Key value. [Expression](../../sql-reference/syntax.md#syntax-expressions) returning dictionary key-type value or [Tuple](../../sql-reference/data-types/tuple.md)-type value depending on the dictionary configuration.
- `default_value_expr` — Values returned if the dictionary does not contain a row with the `id_expr` key. [Expression](../../sql-reference/syntax.md#syntax-expressions) or [Tuple](../../sql-reference/data-types/tuple.md)([Expression](../../sql-reference/syntax.md#syntax-expressions)), returning the value (or values) in the data types configured for the `attr_names` attribute. - `default_value_expr` — Values returned if the dictionary does not contain a row with the `id_expr` key. [Expression](../../sql-reference/syntax.md#syntax-expressions) or [Tuple](../../sql-reference/data-types/tuple.md)([Expression](../../sql-reference/syntax.md#syntax-expressions)), returning the value (or values) in the data types configured for the `attr_names` attribute.
**Returned value** **Returned value**
@ -138,7 +138,7 @@ Configure the external dictionary:
<name>c2</name> <name>c2</name>
<type>String</type> <type>String</type>
<null_value></null_value> <null_value></null_value>
</attribute> </attribute>
</structure> </structure>
<lifetime>0</lifetime> <lifetime>0</lifetime>
</dictionary> </dictionary>
@ -237,7 +237,7 @@ dictHas('dict_name', id_expr)
**Arguments** **Arguments**
- `dict_name` — Name of the dictionary. [String literal](../../sql-reference/syntax.md#syntax-string-literal). - `dict_name` — Name of the dictionary. [String literal](../../sql-reference/syntax.md#syntax-string-literal).
- `id_expr` — Key value. [Expression](../../sql-reference/syntax.md#syntax-expressions) returning a [UInt64](../../sql-reference/data-types/int-uint.md) or [Tuple](../../sql-reference/data-types/tuple.md)-type value depending on the dictionary configuration. - `id_expr` — Key value. [Expression](../../sql-reference/syntax.md#syntax-expressions) returning dictionary key-type value or [Tuple](../../sql-reference/data-types/tuple.md)-type value depending on the dictionary configuration.
**Returned value** **Returned value**
@ -292,16 +292,16 @@ Type: `UInt8`.
Returns first-level children as an array of indexes. It is the inverse transformation for [dictGetHierarchy](#dictgethierarchy). Returns first-level children as an array of indexes. It is the inverse transformation for [dictGetHierarchy](#dictgethierarchy).
**Syntax** **Syntax**
``` sql ``` sql
dictGetChildren(dict_name, key) dictGetChildren(dict_name, key)
``` ```
**Arguments** **Arguments**
- `dict_name` — Name of the dictionary. [String literal](../../sql-reference/syntax.md#syntax-string-literal). - `dict_name` — Name of the dictionary. [String literal](../../sql-reference/syntax.md#syntax-string-literal).
- `key` — Key value. [Expression](../../sql-reference/syntax.md#syntax-expressions) returning a [UInt64](../../sql-reference/data-types/int-uint.md)-type value. - `key` — Key value. [Expression](../../sql-reference/syntax.md#syntax-expressions) returning a [UInt64](../../sql-reference/data-types/int-uint.md)-type value.
**Returned values** **Returned values**
@ -339,7 +339,7 @@ SELECT dictGetChildren('hierarchy_flat_dictionary', number) FROM system.numbers
## dictGetDescendant {#dictgetdescendant} ## dictGetDescendant {#dictgetdescendant}
Returns all descendants as if [dictGetChildren](#dictgetchildren) function was applied `level` times recursively. Returns all descendants as if [dictGetChildren](#dictgetchildren) function was applied `level` times recursively.
**Syntax** **Syntax**
@ -347,9 +347,9 @@ Returns all descendants as if [dictGetChildren](#dictgetchildren) function was a
dictGetDescendants(dict_name, key, level) dictGetDescendants(dict_name, key, level)
``` ```
**Arguments** **Arguments**
- `dict_name` — Name of the dictionary. [String literal](../../sql-reference/syntax.md#syntax-string-literal). - `dict_name` — Name of the dictionary. [String literal](../../sql-reference/syntax.md#syntax-string-literal).
- `key` — Key value. [Expression](../../sql-reference/syntax.md#syntax-expressions) returning a [UInt64](../../sql-reference/data-types/int-uint.md)-type value. - `key` — Key value. [Expression](../../sql-reference/syntax.md#syntax-expressions) returning a [UInt64](../../sql-reference/data-types/int-uint.md)-type value.
- `level` — Hierarchy level. If `level = 0` returns all descendants to the end. [UInt8](../../sql-reference/data-types/int-uint.md). - `level` — Hierarchy level. If `level = 0` returns all descendants to the end. [UInt8](../../sql-reference/data-types/int-uint.md).

View File

@ -195,6 +195,41 @@ Result:
└────────────────────┘ └────────────────────┘
``` ```
## h3ToGeo {#h3togeo}
Returns `(lon, lat)` that corresponds to the provided H3 index.
**Syntax**
``` sql
h3ToGeo(h3Index)
```
**Arguments**
- `h3Index` — H3 Index. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
**Returned values**
- `lon` — Longitude. Type: [Float64](../../../sql-reference/data-types/float.md).
- `lat` — Latitude. Type: [Float64](../../../sql-reference/data-types/float.md).
**Example**
Query:
``` sql
SELECT h3ToGeo(644325524701193974) coordinates;
```
Result:
``` text
┌─coordinates───────────────────────────┐
│ (37.79506616830252,55.71290243145668) │
└───────────────────────────────────────┘
```
## h3kRing {#h3kring} ## h3kRing {#h3kring}
Lists all the [H3](#h3index) hexagons in the raduis of `k` from the given hexagon in random order. Lists all the [H3](#h3index) hexagons in the raduis of `k` from the given hexagon in random order.

View File

@ -306,3 +306,49 @@ Result:
└───────────────────────────────────────────────────────────────────────────────────────────────────────┘ └───────────────────────────────────────────────────────────────────────────────────────────────────────┘
``` ```
## toJSONString {#tojsonstring}
Serializes a value to its JSON representation. Various data types and nested structures are supported.
64-bit [integers](../../sql-reference/data-types/int-uint.md) or bigger (like `UInt64` or `Int128`) are enclosed in quotes by default. [output_format_json_quote_64bit_integers](../../operations/settings/settings.md#session_settings-output_format_json_quote_64bit_integers) controls this behavior.
Special values `NaN` and `inf` are replaced with `null`. Enable [output_format_json_quote_denormals](../../operations/settings/settings.md#settings-output_format_json_quote_denormals) setting to show them.
When serializing an [Enum](../../sql-reference/data-types/enum.md) value, the function outputs its name.
**Syntax**
``` sql
toJSONString(value)
```
**Arguments**
- `value` — Value to serialize. Value may be of any data type.
**Returned value**
- JSON representation of the value.
Type: [String](../../sql-reference/data-types/string.md).
**Example**
The first example shows serialization of a [Map](../../sql-reference/data-types/map.md).
The second example shows some special values wrapped into a [Tuple](../../sql-reference/data-types/tuple.md).
Query:
``` sql
SELECT toJSONString(map('key1', 1, 'key2', 2));
SELECT toJSONString(tuple(1.25, NULL, NaN, +inf, -inf, [])) SETTINGS output_format_json_quote_denormals = 1;
```
Result:
``` text
{"key1":1,"key2":2}
[1.25,null,"nan","inf","-inf",[]]
```
**See Also**
- [output_format_json_quote_64bit_integers](../../operations/settings/settings.md#session_settings-output_format_json_quote_64bit_integers)
- [output_format_json_quote_denormals](../../operations/settings/settings.md#settings-output_format_json_quote_denormals)

View File

@ -87,6 +87,8 @@ Result:
└───────┴───────┘ └───────┴───────┘
``` ```
Note: the names are implementation specific and are subject to change. You should not assume specific names of the columns after application of the `untuple`.
Example of using an `EXCEPT` expression: Example of using an `EXCEPT` expression:
Query: Query:

View File

@ -1 +0,0 @@
../../en/development/build-osx.md

View File

@ -0,0 +1,125 @@
---
toc_priority: 65
toc_title: Сборка на Mac OS X
---
# Как собрать ClickHouse на Mac OS X {#how-to-build-clickhouse-on-mac-os-x}
Сборка должна запускаться с x86_64 (Intel) на macOS версии 10.15 (Catalina) и выше в последней версии компилятора Xcode's native AppleClang, Homebrew's vanilla Clang или в GCC-компиляторах.
## Установка Homebrew {#install-homebrew}
``` bash
$ /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
```
## Установка Xcode и инструментов командной строки {#install-xcode-and-command-line-tools}
1. Установите из App Store последнюю версию [Xcode](https://apps.apple.com/am/app/xcode/id497799835?mt=12).
2. Запустите ее, чтобы принять лицензионное соглашение. Необходимые компоненты установятся автоматически.
3. Затем убедитесь, что в системе выбрана последняя версия инструментов командной строки:
``` bash
$ sudo rm -rf /Library/Developer/CommandLineTools
$ sudo xcode-select --install
```
4. Перезагрузитесь.
## Установка компиляторов, инструментов и библиотек {#install-required-compilers-tools-and-libraries}
``` bash
$ brew update
$ brew install cmake ninja libtool gettext llvm gcc
```
## Просмотр исходников ClickHouse {#checkout-clickhouse-sources}
``` bash
$ git clone --recursive git@github.com:ClickHouse/ClickHouse.git # or https://github.com/ClickHouse/ClickHouse.git
```
## Сборка ClickHouse {#build-clickhouse}
Чтобы запустить сборку в компиляторе Xcode's native AppleClang:
``` bash
$ cd ClickHouse
$ rm -rf build
$ mkdir build
$ cd build
$ cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_JEMALLOC=OFF ..
$ cmake --build . --config RelWithDebInfo
$ cd ..
```
Чтобы запустить сборку в компиляторе Homebrew's vanilla Clang:
``` bash
$ cd ClickHouse
$ rm -rf build
$ mkdir build
$ cd build
$ cmake -DCMAKE_C_COMPILER=$(brew --prefix llvm)/bin/clang -DCMAKE_CXX_COMPILER==$(brew --prefix llvm)/bin/clang++ -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_JEMALLOC=OFF ..
$ cmake -DCMAKE_C_COMPILER=$(brew --prefix llvm)/bin/clang -DCMAKE_CXX_COMPILER=$(brew --prefix llvm)/bin/clang++ -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_JEMALLOC=OFF ..
$ cmake --build . --config RelWithDebInfo
$ cd ..
```
Чтобы собрать с помощью компилятора Homebrew's vanilla GCC:
``` bash
$ cd ClickHouse
$ rm -rf build
$ mkdir build
$ cd build
$ cmake -DCMAKE_C_COMPILER=$(brew --prefix gcc)/bin/gcc-10 -DCMAKE_CXX_COMPILER=$(brew --prefix gcc)/bin/g++-10 -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_JEMALLOC=OFF ..
$ cmake --build . --config RelWithDebInfo
$ cd ..
```
## Предупреждения {#caveats}
Если будете запускать `clickhouse-server`, убедитесь, что увеличили системную переменную `maxfiles`.
!!! info "Note"
Вам понадобится команда `sudo`.
1. Создайте файл `/Library/LaunchDaemons/limit.maxfiles.plist` и поместите в него следующее:
``` xml
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN"
"http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>limit.maxfiles</string>
<key>ProgramArguments</key>
<array>
<string>launchctl</string>
<string>limit</string>
<string>maxfiles</string>
<string>524288</string>
<string>524288</string>
</array>
<key>RunAtLoad</key>
<true/>
<key>ServiceIPC</key>
<false/>
</dict>
</plist>
```
2. Выполните команду:
``` bash
$ sudo chown root:wheel /Library/LaunchDaemons/limit.maxfiles.plist
```
3. Перезагрузитесь.
4. Чтобы проверить, как это работает, выполните команду `ulimit -n`.
[Original article](https://clickhouse.tech/docs/en/development/build_osx/) <!--hide-->

View File

@ -49,6 +49,7 @@ ENGINE = MaterializeMySQL('host:port', ['database' | database], 'user', 'passwor
| DATE, NEWDATE | [Date](../../sql-reference/data-types/date.md) | | DATE, NEWDATE | [Date](../../sql-reference/data-types/date.md) |
| DATETIME, TIMESTAMP | [DateTime](../../sql-reference/data-types/datetime.md) | | DATETIME, TIMESTAMP | [DateTime](../../sql-reference/data-types/datetime.md) |
| DATETIME2, TIMESTAMP2 | [DateTime64](../../sql-reference/data-types/datetime64.md) | | DATETIME2, TIMESTAMP2 | [DateTime64](../../sql-reference/data-types/datetime64.md) |
| ENUM | [Enum](../../sql-reference/data-types/enum.md) |
| STRING | [String](../../sql-reference/data-types/string.md) | | STRING | [String](../../sql-reference/data-types/string.md) |
| VARCHAR, VAR_STRING | [String](../../sql-reference/data-types/string.md) | | VARCHAR, VAR_STRING | [String](../../sql-reference/data-types/string.md) |
| BLOB | [String](../../sql-reference/data-types/string.md) | | BLOB | [String](../../sql-reference/data-types/string.md) |

View File

@ -100,9 +100,9 @@ sudo ./clickhouse install
Для других операционных систем и архитектуры AArch64 сборки ClickHouse предоставляются в виде кросс-компилированного бинарного файла из последнего коммита ветки `master` (с задержкой в несколько часов). Для других операционных систем и архитектуры AArch64 сборки ClickHouse предоставляются в виде кросс-компилированного бинарного файла из последнего коммита ветки `master` (с задержкой в несколько часов).
- [macOS](https://builds.clickhouse.tech/master/macos/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/macos/clickhouse' && chmod a+x ./clickhouse` - [macOS](https://builds.clickhouse.tech/master/macos/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/macos/clickhouse' && chmod a+x ./clickhouse`
- [AArch64](https://builds.clickhouse.tech/master/aarch64/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/aarch64/clickhouse' && chmod a+x ./clickhouse` - [FreeBSD](https://builds.clickhouse.tech/master/freebsd/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/freebsd/clickhouse' && chmod a+x ./clickhouse`
- [FreeBSD](https://builds.clickhouse.tech/master/freebsd/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/freebsd/clickhouse' && chmod a+x ./clickhouse` - [AArch64](https://builds.clickhouse.tech/master/aarch64/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/aarch64/clickhouse' && chmod a+x ./clickhouse`
После скачивания можно воспользоваться `clickhouse client` для подключения к серверу или `clickhouse local` для обработки локальных данных. После скачивания можно воспользоваться `clickhouse client` для подключения к серверу или `clickhouse local` для обработки локальных данных.

View File

@ -1165,12 +1165,14 @@ SELECT * FROM topic1_stream;
| `DOUBLE` | [Float64](../sql-reference/data-types/float.md) | `DOUBLE` | | `DOUBLE` | [Float64](../sql-reference/data-types/float.md) | `DOUBLE` |
| `DATE32` | [Date](../sql-reference/data-types/date.md) | `UINT16` | | `DATE32` | [Date](../sql-reference/data-types/date.md) | `UINT16` |
| `DATE64`, `TIMESTAMP` | [DateTime](../sql-reference/data-types/datetime.md) | `UINT32` | | `DATE64`, `TIMESTAMP` | [DateTime](../sql-reference/data-types/datetime.md) | `UINT32` |
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `STRING` | | `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` |
| — | [FixedString](../sql-reference/data-types/fixedstring.md) | `STRING` | | — | [FixedString](../sql-reference/data-types/fixedstring.md) | `BINARY` |
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` | | `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` |
| `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` | | `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` |
| `STRUCT` | [Tuple](../sql-reference/data-types/tuple.md) | `STRUCT` |
| `MAP` | [Map](../sql-reference/data-types/map.md) | `MAP` |
Массивы могут быть вложенными и иметь в качестве аргумента значение типа `Nullable`. Массивы могут быть вложенными и иметь в качестве аргумента значение типа `Nullable`. Типы `Tuple` и `Map` также могут быть вложенными.
ClickHouse поддерживает настраиваемую точность для формата `Decimal`. При выполнении запроса `INSERT` ClickHouse обрабатывает тип данных Parquet `DECIMAL` как `Decimal128`. ClickHouse поддерживает настраиваемую точность для формата `Decimal`. При выполнении запроса `INSERT` ClickHouse обрабатывает тип данных Parquet `DECIMAL` как `Decimal128`.
@ -1218,12 +1220,17 @@ $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Parquet" > {some_
| `DOUBLE` | [Float64](../sql-reference/data-types/float.md) | `FLOAT64` | | `DOUBLE` | [Float64](../sql-reference/data-types/float.md) | `FLOAT64` |
| `DATE32` | [Date](../sql-reference/data-types/date.md) | `UINT16` | | `DATE32` | [Date](../sql-reference/data-types/date.md) | `UINT16` |
| `DATE64`, `TIMESTAMP` | [DateTime](../sql-reference/data-types/datetime.md) | `UINT32` | | `DATE64`, `TIMESTAMP` | [DateTime](../sql-reference/data-types/datetime.md) | `UINT32` |
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `UTF8` | | `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` |
| `STRING`, `BINARY` | [FixedString](../sql-reference/data-types/fixedstring.md) | `UTF8` | | `STRING`, `BINARY` | [FixedString](../sql-reference/data-types/fixedstring.md) | `BINARY` |
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` | | `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` |
| `DECIMAL256` | [Decimal256](../sql-reference/data-types/decimal.md)| `DECIMAL256` |
| `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` | | `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` |
| `STRUCT` | [Tuple](../sql-reference/data-types/tuple.md) | `STRUCT` |
| `MAP` | [Map](../sql-reference/data-types/map.md) | `MAP` |
Массивы могут быть вложенными и иметь в качестве аргумента значение типа `Nullable`. Массивы могут быть вложенными и иметь в качестве аргумента значение типа `Nullable`. Типы `Tuple` и `Map` также могут быть вложенными.
Тип `DICTIONARY` поддерживается для запросов `INSERT`. Для запросов `SELECT` есть настройка [output_format_arrow_low_cardinality_as_dictionary](../operations/settings/settings.md#output-format-arrow-low-cardinality-as-dictionary), которая позволяет выводить тип [LowCardinality](../sql-reference/data-types/lowcardinality.md) как `DICTIONARY`.
ClickHouse поддерживает настраиваемую точность для формата `Decimal`. При выполнении запроса `INSERT` ClickHouse обрабатывает тип данных Arrow `DECIMAL` как `Decimal128`. ClickHouse поддерживает настраиваемую точность для формата `Decimal`. При выполнении запроса `INSERT` ClickHouse обрабатывает тип данных Arrow `DECIMAL` как `Decimal128`.
@ -1276,8 +1283,10 @@ $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Arrow" > {filenam
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` | | `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` |
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` | | `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` |
| `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` | | `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` |
| `STRUCT` | [Tuple](../sql-reference/data-types/tuple.md) | `STRUCT` |
| `MAP` | [Map](../sql-reference/data-types/map.md) | `MAP` |
Массивы могут быть вложенными и иметь в качестве аргумента значение типа `Nullable`. Массивы могут быть вложенными и иметь в качестве аргумента значение типа `Nullable`. Типы `Tuple` и `Map` также могут быть вложенными.
ClickHouse поддерживает настраиваемую точность для формата `Decimal`. При выполнении запроса `INSERT` ClickHouse обрабатывает тип данных ORC `DECIMAL` как `Decimal128`. ClickHouse поддерживает настраиваемую точность для формата `Decimal`. При выполнении запроса `INSERT` ClickHouse обрабатывает тип данных ORC `DECIMAL` как `Decimal128`.

View File

@ -1204,8 +1204,15 @@ load_balancing = round_robin
Работает для форматов JSONEachRow и TSKV. Работает для форматов JSONEachRow и TSKV.
## output_format_json_quote_64bit_integers {#session_settings-output_format_json_quote_64bit_integers} ## output_format_json_quote_64bit_integers {#session_settings-output_format_json_quote_64bit_integers}
Управляет кавычками при выводе 64-битных или более [целых чисел](../../sql-reference/data-types/int-uint.md) (например, `UInt64` или `Int128`) в формате [JSON](../../interfaces/formats.md#json).
По умолчанию такие числа заключаются в кавычки. Это поведение соответствует большинству реализаций JavaScript.
Если значение истинно, то при использовании JSON\* форматов UInt64 и Int64 числа выводятся в кавычках (из соображений совместимости с большинством реализаций JavaScript), иначе - без кавычек. Возможные значения:
- 0 — числа выводятся без кавычек.
- 1 — числа выводятся в кавычках.
Значение по умолчанию: 1.
## output_format_json_quote_denormals {#settings-output_format_json_quote_denormals} ## output_format_json_quote_denormals {#settings-output_format_json_quote_denormals}
@ -3059,3 +3066,14 @@ SETTINGS index_granularity = 8192 │
**Использование** **Использование**
Если установлено значение `0`, то табличная функция не делает Nullable столбцы, а вместо NULL выставляет значения по умолчанию для скалярного типа. Это также применимо для значений NULL внутри массивов. Если установлено значение `0`, то табличная функция не делает Nullable столбцы, а вместо NULL выставляет значения по умолчанию для скалярного типа. Это также применимо для значений NULL внутри массивов.
## output_format_arrow_low_cardinality_as_dictionary {#output-format-arrow-low-cardinality-as-dictionary}
Позволяет конвертировать тип [LowCardinality](../../sql-reference/data-types/lowcardinality.md) в тип `DICTIONARY` формата [Arrow](../../interfaces/formats.md#data-format-arrow) для запросов `SELECT`.
Возможные значения:
- 0 — тип `LowCardinality` не конвертируется в тип `DICTIONARY`.
- 1 — тип `LowCardinality` конвертируется в тип `DICTIONARY`.
Значение по умолчанию: `0`.

View File

@ -0,0 +1,38 @@
# system.data_skipping_indices {#system-data-skipping-indices}
Содержит информацию о существующих индексах пропуска данных во всех таблицах.
Столбцы:
- `database` ([String](../../sql-reference/data-types/string.md)) — имя базы данных.
- `table` ([String](../../sql-reference/data-types/string.md)) — имя таблицы.
- `name` ([String](../../sql-reference/data-types/string.md)) — имя индекса.
- `type` ([String](../../sql-reference/data-types/string.md)) — тип индекса.
- `expr` ([String](../../sql-reference/data-types/string.md)) — выражение, используемое для вычисления индекса.
- `granularity` ([UInt64](../../sql-reference/data-types/int-uint.md)) — количество гранул в блоке данных.
**Пример**
```sql
SELECT * FROM system.data_skipping_indices LIMIT 2 FORMAT Vertical;
```
```text
Row 1:
──────
database: default
table: user_actions
name: clicks_idx
type: minmax
expr: clicks
granularity: 1
Row 2:
──────
database: default
table: users
name: contacts_null_idx
type: minmax
expr: assumeNotNull(contacts_null)
granularity: 1
```

View File

@ -4,7 +4,6 @@
Функции: Функции:
- `median` — синоним для [quantile](../../../sql-reference/aggregate-functions/reference/quantile.md#quantile). - `median` — синоним для [quantile](../../../sql-reference/aggregate-functions/reference/quantile.md#quantile).
- `medianDeterministic` — синоним для [quantileDeterministic](../../../sql-reference/aggregate-functions/reference/quantiledeterministic.md#quantiledeterministic). - `medianDeterministic` — синоним для [quantileDeterministic](../../../sql-reference/aggregate-functions/reference/quantiledeterministic.md#quantiledeterministic).
- `medianExact` — синоним для [quantileExact](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantileexact). - `medianExact` — синоним для [quantileExact](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantileexact).
@ -31,7 +30,7 @@
Запрос: Запрос:
``` sql ``` sql
SELECT medianDeterministic(val, 1) FROM t SELECT medianDeterministic(val, 1) FROM t;
``` ```
Результат: Результат:
@ -41,4 +40,3 @@ SELECT medianDeterministic(val, 1) FROM t
│ 1.5 │ │ 1.5 │
└─────────────────────────────┘ └─────────────────────────────┘
``` ```

View File

@ -15,7 +15,7 @@ LowCardinality(data_type)
**Параметры** **Параметры**
- `data_type` — [String](string.md), [FixedString](fixedstring.md), [Date](date.md), [DateTime](datetime.md) и числа за исключением типа [Decimal](decimal.md). `LowCardinality` неэффективен для некоторых типов данных, см. описание настройки [allow_suspicious_low_cardinality_types](../../operations/settings/settings.md#allow_suspicious_low_cardinality_types). - `data_type` — [String](string.md), [FixedString](fixedstring.md), [Date](date.md), [DateTime](datetime.md) и числа за исключением типа [Decimal](decimal.md). `LowCardinality` неэффективен для некоторых типов данных, см. описание настройки [allow_suspicious_low_cardinality_types](../../operations/settings/settings.md#allow_suspicious_low_cardinality_types).
## Описание {#lowcardinality-dscr} ## Описание {#lowcardinality-dscr}
@ -23,11 +23,11 @@ LowCardinality(data_type)
Эффективность использования типа данных `LowCarditality` зависит от разнообразия данных. Если словарь содержит менее 10 000 различных значений, ClickHouse в основном показывает более высокую эффективность чтения и хранения данных. Если же словарь содержит более 100 000 различных значений, ClickHouse может работать хуже, чем при использовании обычных типов данных. Эффективность использования типа данных `LowCarditality` зависит от разнообразия данных. Если словарь содержит менее 10 000 различных значений, ClickHouse в основном показывает более высокую эффективность чтения и хранения данных. Если же словарь содержит более 100 000 различных значений, ClickHouse может работать хуже, чем при использовании обычных типов данных.
При работе со строками, использование `LowCardinality` вместо [Enum](enum.md) обеспечивает большую гибкость в использовании и часто показывает такую же или более высокую эффективность. При работе со строками использование `LowCardinality` вместо [Enum](enum.md) обеспечивает большую гибкость в использовании и часто показывает такую же или более высокую эффективность.
## Пример ## Пример
Создать таблицу со столбцами типа `LowCardinality`: Создание таблицы со столбцами типа `LowCardinality`:
```sql ```sql
CREATE TABLE lc_t CREATE TABLE lc_t
@ -43,18 +43,18 @@ ORDER BY id
Настройки: Настройки:
- [low_cardinality_max_dictionary_size](../../operations/settings/settings.md#low_cardinality_max_dictionary_size) - [low_cardinality_max_dictionary_size](../../operations/settings/settings.md#low_cardinality_max_dictionary_size)
- [low_cardinality_use_single_dictionary_for_part](../../operations/settings/settings.md#low_cardinality_use_single_dictionary_for_part) - [low_cardinality_use_single_dictionary_for_part](../../operations/settings/settings.md#low_cardinality_use_single_dictionary_for_part)
- [low_cardinality_allow_in_native_format](../../operations/settings/settings.md#low_cardinality_allow_in_native_format) - [low_cardinality_allow_in_native_format](../../operations/settings/settings.md#low_cardinality_allow_in_native_format)
- [allow_suspicious_low_cardinality_types](../../operations/settings/settings.md#allow_suspicious_low_cardinality_types) - [allow_suspicious_low_cardinality_types](../../operations/settings/settings.md#allow_suspicious_low_cardinality_types)
- [output_format_arrow_low_cardinality_as_dictionary](../../operations/settings/settings.md#output-format-arrow-low-cardinality-as-dictionary)
Функции: Функции:
- [toLowCardinality](../functions/type-conversion-functions.md#tolowcardinality) - [toLowCardinality](../functions/type-conversion-functions.md#tolowcardinality)
## Смотрите также ## Смотрите также
- [A Magical Mystery Tour of the LowCardinality Data Type](https://www.altinity.com/blog/2019/3/27/low-cardinality). - [A Magical Mystery Tour of the LowCardinality Data Type](https://www.altinity.com/blog/2019/3/27/low-cardinality).
- [Reducing Clickhouse Storage Cost with the Low Cardinality Type Lessons from an Instana Engineer](https://www.instana.com/blog/reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer/). - [Reducing Clickhouse Storage Cost with the Low Cardinality Type Lessons from an Instana Engineer](https://www.instana.com/blog/reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer/).
- [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/yandex/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf). - [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/yandex/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf).

View File

@ -12,9 +12,6 @@ toc_title: Map(key, value)
- `key` — ключ. [String](../../sql-reference/data-types/string.md) или [Integer](../../sql-reference/data-types/int-uint.md). - `key` — ключ. [String](../../sql-reference/data-types/string.md) или [Integer](../../sql-reference/data-types/int-uint.md).
- `value` — значение. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md) или [Array](../../sql-reference/data-types/array.md). - `value` — значение. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md) или [Array](../../sql-reference/data-types/array.md).
!!! warning "Предупреждение"
Сейчас использование типа данных `Map` является экспериментальной возможностью. Чтобы использовать этот тип данных, включите настройку `allow_experimental_map_type = 1`.
Чтобы получить значение из колонки `a Map('key', 'value')`, используйте синтаксис `a['key']`. В настоящее время такая подстановка работает по алгоритму с линейной сложностью. Чтобы получить значение из колонки `a Map('key', 'value')`, используйте синтаксис `a['key']`. В настоящее время такая подстановка работает по алгоритму с линейной сложностью.
**Примеры** **Примеры**

View File

@ -306,3 +306,51 @@ SELECT JSONExtractKeysAndValuesRaw('{"a": [-100, 200.0], "b":{"c": {"d": "hello"
│ [('d','"hello"'),('f','"world"')] │ │ [('d','"hello"'),('f','"world"')] │
└───────────────────────────────────────────────────────────────────────────────────────────────────────┘ └───────────────────────────────────────────────────────────────────────────────────────────────────────┘
``` ```
## toJSONString {#tojsonstring}
Сериализует значение в JSON представление. Поддерживаются различные типы данных и вложенные структуры.
По умолчанию 64-битные [целые числа](../../sql-reference/data-types/int-uint.md) и более (например, `UInt64` или `Int128`) заключаются в кавычки. Настройка [output_format_json_quote_64bit_integers](../../operations/settings/settings.md#session_settings-output_format_json_quote_64bit_integers) управляет этим поведением.
Специальные значения `NaN` и `inf` заменяются на `null`. Чтобы они отображались, включите настройку [output_format_json_quote_denormals](../../operations/settings/settings.md#settings-output_format_json_quote_denormals).
Когда сериализуется значение [Enum](../../sql-reference/data-types/enum.md), то функция выводит его имя.
**Синтаксис**
``` sql
toJSONString(value)
```
**Аргументы**
- `value` — значение, которое необходимо сериализовать. Может быть любого типа.
**Возвращаемое значение**
- JSON представление значения.
Тип: [String](../../sql-reference/data-types/string.md).
**Пример**
Первый пример показывает сериализацию [Map](../../sql-reference/data-types/map.md).
Во втором примере есть специальные значения, обернутые в [Tuple](../../sql-reference/data-types/tuple.md).
Запрос:
``` sql
SELECT toJSONString(map('key1', 1, 'key2', 2));
SELECT toJSONString(tuple(1.25, NULL, NaN, +inf, -inf, [])) SETTINGS output_format_json_quote_denormals = 1;
```
Результат:
``` text
{"key1":1,"key2":2}
[1.25,null,"nan","inf","-inf",[]]
```
**Смотрите также**
- [output_format_json_quote_64bit_integers](../../operations/settings/settings.md#session_settings-output_format_json_quote_64bit_integers)
- [output_format_json_quote_denormals](../../operations/settings/settings.md#settings-output_format_json_quote_denormals)

View File

@ -17,7 +17,7 @@ toc_title: PARTITION
- [CLEAR INDEX IN PARTITION](#alter_clear-index-partition) — очистить построенные вторичные индексы для заданной партиции; - [CLEAR INDEX IN PARTITION](#alter_clear-index-partition) — очистить построенные вторичные индексы для заданной партиции;
- [FREEZE PARTITION](#alter_freeze-partition) — создать резервную копию партиции; - [FREEZE PARTITION](#alter_freeze-partition) — создать резервную копию партиции;
- [UNFREEZE PARTITION](#alter_unfreeze-partition) — удалить резервную копию партиции; - [UNFREEZE PARTITION](#alter_unfreeze-partition) — удалить резервную копию партиции;
- [FETCH PARTITION](#alter_fetch-partition) — скачать партицию с другого сервера; - [FETCH PARTITION\|PART](#alter_fetch-partition) — скачать партицию/кусок с другого сервера;
- [MOVE PARTITION\|PART](#alter_move-partition) — переместить партицию/кускок на другой диск или том. - [MOVE PARTITION\|PART](#alter_move-partition) — переместить партицию/кускок на другой диск или том.
- [UPDATE IN PARTITION](#update-in-partition) — обновить данные внутри партиции по условию. - [UPDATE IN PARTITION](#update-in-partition) — обновить данные внутри партиции по условию.
- [DELETE IN PARTITION](#delete-in-partition) — удалить данные внутри партиции по условию. - [DELETE IN PARTITION](#delete-in-partition) — удалить данные внутри партиции по условию.
@ -209,29 +209,35 @@ ALTER TABLE 'table_name' UNFREEZE [PARTITION 'part_expr'] WITH NAME 'backup_name
Удаляет с диска "замороженные" партиции с указанным именем. Если секция `PARTITION` опущена, запрос удаляет резервную копию всех партиций сразу. Удаляет с диска "замороженные" партиции с указанным именем. Если секция `PARTITION` опущена, запрос удаляет резервную копию всех партиций сразу.
## FETCH PARTITION {#alter_fetch-partition} ## FETCH PARTITION\|PART {#alter_fetch-partition}
``` sql ``` sql
ALTER TABLE table_name FETCH PARTITION partition_expr FROM 'path-in-zookeeper' ALTER TABLE table_name FETCH PARTITION|PART partition_expr FROM 'path-in-zookeeper'
``` ```
Загружает партицию с другого сервера. Этот запрос работает только для реплицированных таблиц. Загружает партицию с другого сервера. Этот запрос работает только для реплицированных таблиц.
Запрос выполняет следующее: Запрос выполняет следующее:
1. Загружает партицию с указанного шарда. Путь к шарду задается в секции `FROM` (path-in-zookeeper). Обратите внимание, нужно задавать путь к шарду в ZooKeeper. 1. Загружает партицию/кусок с указанного шарда. Путь к шарду задается в секции `FROM` (path-in-zookeeper). Обратите внимание, нужно задавать путь к шарду в ZooKeeper.
2. Помещает загруженные данные в директорию `detached` таблицы `table_name`. Чтобы прикрепить эти данные к таблице, используйте запрос [ATTACH PARTITION\|PART](#alter_attach-partition). 2. Помещает загруженные данные в директорию `detached` таблицы `table_name`. Чтобы прикрепить эти данные к таблице, используйте запрос [ATTACH PARTITION\|PART](#alter_attach-partition).
Например: Например:
1. FETCH PARTITION
``` sql ``` sql
ALTER TABLE users FETCH PARTITION 201902 FROM '/clickhouse/tables/01-01/visits'; ALTER TABLE users FETCH PARTITION 201902 FROM '/clickhouse/tables/01-01/visits';
ALTER TABLE users ATTACH PARTITION 201902; ALTER TABLE users ATTACH PARTITION 201902;
``` ```
2. FETCH PART
``` sql
ALTER TABLE users FETCH PART 201901_2_2_0 FROM '/clickhouse/tables/01-01/visits';
ALTER TABLE users ATTACH PART 201901_2_2_0;
```
Следует иметь в виду: Следует иметь в виду:
- Запрос `ALTER TABLE t FETCH PARTITION` не реплицируется. Он загружает партицию в директорию `detached` только на локальном сервере. - Запрос `ALTER TABLE t FETCH PARTITION|PART` не реплицируется. Он загружает партицию в директорию `detached` только на локальном сервере.
- Запрос `ALTER TABLE t ATTACH` реплицируется — он добавляет данные в таблицу сразу на всех репликах. На одной из реплик данные будут добавлены из директории `detached`, а на других — из соседних реплик. - Запрос `ALTER TABLE t ATTACH` реплицируется — он добавляет данные в таблицу сразу на всех репликах. На одной из реплик данные будут добавлены из директории `detached`, а на других — из соседних реплик.
Перед загрузкой данных система проверяет, существует ли партиция и совпадает ли её структура со структурой таблицы. При этом автоматически выбирается наиболее актуальная реплика среди всех живых реплик. Перед загрузкой данных система проверяет, существует ли партиция и совпадает ли её структура со структурой таблицы. При этом автоматически выбирается наиболее актуальная реплика среди всех живых реплик.

View File

@ -282,7 +282,7 @@ GRANT INSERT(x,y) ON db.table TO john
- `ALTER MATERIALIZE TTL`. Уровень: `TABLE`. Алиасы: `MATERIALIZE TTL` - `ALTER MATERIALIZE TTL`. Уровень: `TABLE`. Алиасы: `MATERIALIZE TTL`
- `ALTER SETTINGS`. Уровень: `TABLE`. Алиасы: `ALTER SETTING`, `ALTER MODIFY SETTING`, `MODIFY SETTING` - `ALTER SETTINGS`. Уровень: `TABLE`. Алиасы: `ALTER SETTING`, `ALTER MODIFY SETTING`, `MODIFY SETTING`
- `ALTER MOVE PARTITION`. Уровень: `TABLE`. Алиасы: `ALTER MOVE PART`, `MOVE PARTITION`, `MOVE PART` - `ALTER MOVE PARTITION`. Уровень: `TABLE`. Алиасы: `ALTER MOVE PART`, `MOVE PARTITION`, `MOVE PART`
- `ALTER FETCH PARTITION`. Уровень: `TABLE`. Алиасы: `FETCH PARTITION` - `ALTER FETCH PARTITION`. Уровень: `TABLE`. Алиасы: `ALTER FETCH PART`, `FETCH PARTITION`, `FETCH PART`
- `ALTER FREEZE PARTITION`. Уровень: `TABLE`. Алиасы: `FREEZE PARTITION` - `ALTER FREEZE PARTITION`. Уровень: `TABLE`. Алиасы: `FREEZE PARTITION`
- `ALTER VIEW` Уровень: `GROUP` - `ALTER VIEW` Уровень: `GROUP`
- `ALTER VIEW REFRESH `. Уровень: `VIEW`. Алиасы: `ALTER LIVE VIEW REFRESH`, `REFRESH VIEW` - `ALTER VIEW REFRESH `. Уровень: `VIEW`. Алиасы: `ALTER LIVE VIEW REFRESH`, `REFRESH VIEW`

View File

@ -57,9 +57,9 @@ ClickHouse Web 界面 [Tabix](https://github.com/tabixio/tabix).
- 表格预览。 - 表格预览。
- 自动完成。 - 自动完成。
### ツ环板-ョツ嘉ッツ偲 {#clickhouse-cli} ### clickhouse-cli {#clickhouse-cli}
[ツ环板-ョツ嘉ッツ偲](https://github.com/hatarist/clickhouse-cli) 是ClickHouse的替代命令行客户端用Python 3编写。 [clickhouse-cli](https://github.com/hatarist/clickhouse-cli) 是ClickHouse的替代命令行客户端用Python 3编写。
特征: 特征:
@ -68,15 +68,15 @@ ClickHouse Web 界面 [Tabix](https://github.com/tabixio/tabix).
- 寻呼机支持数据输出。 - 寻呼机支持数据输出。
- 自定义PostgreSQL类命令。 - 自定义PostgreSQL类命令。
### ツ暗ェツ氾环催ツ団ツ法ツ人 {#clickhouse-flamegraph} ### clickhouse-flamegraph {#clickhouse-flamegraph}
[clickhouse-flamegraph](https://github.com/Slach/clickhouse-flamegraph) 是一个可视化的专业工具`system.trace_log`如[flamegraph](http://www.brendangregg.com/flamegraphs.html). [clickhouse-flamegraph](https://github.com/Slach/clickhouse-flamegraph) 是一个可视化的专业工具`system.trace_log`如[flamegraph](http://www.brendangregg.com/flamegraphs.html).
## 商业 {#shang-ye} ## 商业 {#shang-ye}
### ツ环板Softwareョツ嘉ッ {#holistics-software} ### Holistics {#holistics-software}
[整体学](https://www.holistics.io/) 在2019年被Gartner FrontRunners列为可用性最高排名第二的商业智能工具之一。 Holistics是一个基于SQL的全栈数据平台和商业智能工具用于设置您的分析流程。 [Holistics](https://www.holistics.io/) 在2019年被Gartner FrontRunners列为可用性最高排名第二的商业智能工具之一。 Holistics是一个基于SQL的全栈数据平台和商业智能工具用于设置您的分析流程。
特征: 特征:

View File

@ -1,13 +1,8 @@
---
machine_translated: true
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
toc_priority: 42
toc_title: mysql
---
# mysql {#mysql} # mysql {#mysql}
允许 `SELECT` 要对存储在远程MySQL服务器上的数据执行的查询。 允许对存储在远程MySQL服务器上的数据执行`SELECT`和`INSERT`查询。
**语法**
``` sql ``` sql
mysql('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause']); mysql('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause']);
@ -15,31 +10,44 @@ mysql('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_
**参数** **参数**
- `host:port` — MySQL server address. - `host:port` — MySQL服务器地址.
- `database`Remote database name. - `database`远程数据库名称.
- `table`Remote table name. - `table`远程表名称.
- `user` — MySQL user. - `user` — MySQL用户.
- `password`User password. - `password`用户密码.
- `replace_query`Flag that converts `INSERT INTO` 查询到 `REPLACE INTO`. 如果 `replace_query=1`,查询被替换。 - `replace_query`将INSERT INTO` 查询转换为 `REPLACE INTO`的标志。如果 `replace_query=1`,查询被替换。
- `on_duplicate_clause` — The `ON DUPLICATE KEY on_duplicate_clause` 表达式被添加`INSERT` 查询。 - `on_duplicate_clause` — 添加 `ON DUPLICATE KEY on_duplicate_clause` 表达式到 `INSERT` 查询。明确规定只能使用 `replace_query = 0` 如果你同时设置replace_query = 1`和`on_duplicate_clause`ClickHouse将产生异常。
Example: `INSERT INTO t (c1,c2) VALUES ('a', 2) ON DUPLICATE KEY UPDATE c2 = c2 + 1`, where `on_duplicate_clause` is `UPDATE c2 = c2 + 1`. See the MySQL documentation to find which `on_duplicate_clause` you can use with the `ON DUPLICATE KEY` clause. 示例:`INSERT INTO t (c1,c2) VALUES ('a', 2) ON DUPLICATE KEY UPDATE c2 = c2 + 1`
To specify `on_duplicate_clause` you need to pass `0` to the `replace_query` parameter. If you simultaneously pass `replace_query = 1` and `on_duplicate_clause`, ClickHouse generates an exception. `on_duplicate_clause`这里是`UPDATE c2 = c2 + 1`。请查阅MySQL文档来找到可以和`ON DUPLICATE KEY`一起使用的 `on_duplicate_clause`子句。
简单 `WHERE` 条款如 `=, !=, >, >=, <, <=` 当前在MySQL服务器上执行 简单`WHERE` 子句如 `=, !=, >, >=, <, <=` 将即时在MySQL服务器上执行。其余的条件和 `LIMIT` 只有在对MySQL的查询完成后才会在ClickHouse中执行采样约束
其余的条件和 `LIMIT` 只有在对MySQL的查询完成后才会在ClickHouse中执行采样约束。 支持使用`|`并列进行多副本查询,示例如下:
```sql
SELECT name FROM mysql(`mysql{1|2|3}:3306`, 'mysql_database', 'mysql_table', 'user', 'password');
```
```sql
SELECT name FROM mysql(`mysql1:3306|mysql2:3306|mysql3:3306`, 'mysql_database', 'mysql_table', 'user', 'password');
```
**返回值** **返回值**
与原始MySQL表具有相同列的table对象。 与原始MySQL表具有相同列的表对象。
!!! note "注意"
在`INSERT`查询中为了区分`mysql(...)`与带有列名列表的表名的表函数,你必须使用关键字`FUNCTION`或`TABLE FUNCTION`。查看如下示例。
## 用法示例 {#usage-example} ## 用法示例 {#usage-example}
@ -66,7 +74,7 @@ mysql> select * from test;
1 row in set (0,00 sec) 1 row in set (0,00 sec)
``` ```
从ClickHouse中选择数据: 从ClickHouse中查询数据:
``` sql ``` sql
SELECT * FROM mysql('localhost:3306', 'test', 'test', 'bayonet', '123') SELECT * FROM mysql('localhost:3306', 'test', 'test', 'bayonet', '123')
@ -78,6 +86,21 @@ SELECT * FROM mysql('localhost:3306', 'test', 'test', 'bayonet', '123')
└────────┴──────────────┴───────┴────────────────┘ └────────┴──────────────┴───────┴────────────────┘
``` ```
替换和插入:
```sql
INSERT INTO FUNCTION mysql('localhost:3306', 'test', 'test', 'bayonet', '123', 1) (int_id, float) VALUES (1, 3);
INSERT INTO TABLE FUNCTION mysql('localhost:3306', 'test', 'test', 'bayonet', '123', 0, 'UPDATE int_id = int_id + 1') (int_id, float) VALUES (1, 4);
SELECT * FROM mysql('localhost:3306', 'test', 'test', 'bayonet', '123');
```
```text
┌─int_id─┬─float─┐
│ 1 │ 3 │
│ 2 │ 4 │
└────────┴───────┘
```
## 另请参阅 {#see-also} ## 另请参阅 {#see-also}
- [MySQL 表引擎](../../engines/table-engines/integrations/mysql.md) - [MySQL 表引擎](../../engines/table-engines/integrations/mysql.md)

View File

@ -430,6 +430,7 @@ private:
{TokenType::ClosingRoundBracket, Replxx::Color::BROWN}, {TokenType::ClosingRoundBracket, Replxx::Color::BROWN},
{TokenType::OpeningSquareBracket, Replxx::Color::BROWN}, {TokenType::OpeningSquareBracket, Replxx::Color::BROWN},
{TokenType::ClosingSquareBracket, Replxx::Color::BROWN}, {TokenType::ClosingSquareBracket, Replxx::Color::BROWN},
{TokenType::DoubleColon, Replxx::Color::BROWN},
{TokenType::OpeningCurlyBrace, Replxx::Color::INTENSE}, {TokenType::OpeningCurlyBrace, Replxx::Color::INTENSE},
{TokenType::ClosingCurlyBrace, Replxx::Color::INTENSE}, {TokenType::ClosingCurlyBrace, Replxx::Color::INTENSE},

View File

@ -388,24 +388,32 @@ void LocalServer::processQueries()
/// Use the same query_id (and thread group) for all queries /// Use the same query_id (and thread group) for all queries
CurrentThread::QueryScope query_scope_holder(context); CurrentThread::QueryScope query_scope_holder(context);
///Set progress show /// Set progress show
need_render_progress = config().getBool("progress", false); need_render_progress = config().getBool("progress", false);
std::function<void()> finalize_progress;
if (need_render_progress) if (need_render_progress)
{ {
/// Set progress callback, which can be run from multiple threads.
context->setProgressCallback([&](const Progress & value) context->setProgressCallback([&](const Progress & value)
{ {
/// Write progress only if progress was updated /// Write progress only if progress was updated
if (progress_indication.updateProgress(value)) if (progress_indication.updateProgress(value))
progress_indication.writeProgress(); progress_indication.writeProgress();
}); });
/// Set finalizing callback for progress, which is called right before finalizing query output.
finalize_progress = [&]()
{
progress_indication.clearProgressOutput();
};
/// Set callback for file processing progress.
progress_indication.setFileProgressCallback(context);
} }
bool echo_queries = config().hasOption("echo") || config().hasOption("verbose"); bool echo_queries = config().hasOption("echo") || config().hasOption("verbose");
if (need_render_progress)
progress_indication.setFileProgressCallback(context);
std::exception_ptr exception; std::exception_ptr exception;
for (const auto & query : queries) for (const auto & query : queries)
@ -425,7 +433,7 @@ void LocalServer::processQueries()
try try
{ {
executeQuery(read_buf, write_buf, /* allow_into_outfile = */ true, context, {}); executeQuery(read_buf, write_buf, /* allow_into_outfile = */ true, context, {}, finalize_progress);
} }
catch (...) catch (...)
{ {

View File

@ -477,17 +477,6 @@ int Server::main(const std::vector<std::string> & /*args*/)
CurrentMetrics::set(CurrentMetrics::Revision, ClickHouseRevision::getVersionRevision()); CurrentMetrics::set(CurrentMetrics::Revision, ClickHouseRevision::getVersionRevision());
CurrentMetrics::set(CurrentMetrics::VersionInteger, ClickHouseRevision::getVersionInteger()); CurrentMetrics::set(CurrentMetrics::VersionInteger, ClickHouseRevision::getVersionInteger());
if (ThreadFuzzer::instance().isEffective())
LOG_WARNING(log, "ThreadFuzzer is enabled. Application will run slowly and unstable.");
#if !defined(NDEBUG) || !defined(__OPTIMIZE__)
LOG_WARNING(log, "Server was built in debug mode. It will work slowly.");
#endif
#if defined(SANITIZER)
LOG_WARNING(log, "Server was built with sanitizer. It will work slowly.");
#endif
/** Context contains all that query execution is dependent: /** Context contains all that query execution is dependent:
* settings, available functions, data types, aggregate functions, databases, ... * settings, available functions, data types, aggregate functions, databases, ...
*/ */
@ -497,6 +486,18 @@ int Server::main(const std::vector<std::string> & /*args*/)
global_context->makeGlobalContext(); global_context->makeGlobalContext();
global_context->setApplicationType(Context::ApplicationType::SERVER); global_context->setApplicationType(Context::ApplicationType::SERVER);
#if !defined(NDEBUG) || !defined(__OPTIMIZE__)
global_context->addWarningMessage("Server was built in debug mode. It will work slowly.");
#endif
if (ThreadFuzzer::instance().isEffective())
global_context->addWarningMessage("ThreadFuzzer is enabled. Application will run slowly and unstable.");
#if defined(SANITIZER)
global_context->addWarningMessage("Server was built with sanitizer. It will work slowly.");
#endif
// Initialize global thread pool. Do it before we fetch configs from zookeeper // Initialize global thread pool. Do it before we fetch configs from zookeeper
// nodes (`from_zk`), because ZooKeeper interface uses the pool. We will // nodes (`from_zk`), because ZooKeeper interface uses the pool. We will
// ignore `max_thread_pool_size` in configs we fetch from ZK, but oh well. // ignore `max_thread_pool_size` in configs we fetch from ZK, but oh well.
@ -552,8 +553,10 @@ int Server::main(const std::vector<std::string> & /*args*/)
if (ptrace(PTRACE_TRACEME, 0, nullptr, nullptr) == -1) if (ptrace(PTRACE_TRACEME, 0, nullptr, nullptr) == -1)
{ {
/// Program is run under debugger. Modification of it's binary image is ok for breakpoints. /// Program is run under debugger. Modification of it's binary image is ok for breakpoints.
LOG_WARNING(log, "Server is run under debugger and its binary image is modified (most likely with breakpoints).", global_context->addWarningMessage(
calculated_binary_hash); fmt::format("Server is run under debugger and its binary image is modified (most likely with breakpoints).",
calculated_binary_hash)
);
} }
else else
{ {
@ -636,7 +639,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
} }
else else
{ {
LOG_WARNING(log, message); global_context->addWarningMessage(message);
} }
} }

View File

@ -9,7 +9,7 @@
Do not use any JavaScript or CSS frameworks or preprocessors. Do not use any JavaScript or CSS frameworks or preprocessors.
This HTML page should not require any build systems (node.js, npm, gulp, etc.) This HTML page should not require any build systems (node.js, npm, gulp, etc.)
This HTML page should not be minified, instead it should be reasonably minimalistic by itself. This HTML page should not be minified, instead it should be reasonably minimalistic by itself.
This HTML page should not load any external resources This HTML page should not load any external resources on load.
(CSS and JavaScript must be embedded directly to the page. No external fonts or images should be loaded). (CSS and JavaScript must be embedded directly to the page. No external fonts or images should be loaded).
This UI should look as lightweight, clean and fast as possible. This UI should look as lightweight, clean and fast as possible.
All UI elements must be aligned in pixel-perfect way. All UI elements must be aligned in pixel-perfect way.
@ -343,13 +343,18 @@
/// Save query in history only if it is different. /// Save query in history only if it is different.
let previous_query = ''; let previous_query = '';
/// Substitute the address of the server where the page is served. const current_url = new URL(window.location);
if (location.protocol != 'file:') {
const server_address = current_url.searchParams.get('url');
if (server_address) {
document.getElementById('url').value = server_address;
} else if (location.protocol != 'file:') {
/// Substitute the address of the server where the page is served.
document.getElementById('url').value = location.origin; document.getElementById('url').value = location.origin;
} }
/// Substitute user name if it's specified in the query string /// Substitute user name if it's specified in the query string
let user_from_url = (new URL(window.location)).searchParams.get('user'); const user_from_url = current_url.searchParams.get('user');
if (user_from_url) { if (user_from_url) {
document.getElementById('user').value = user_from_url; document.getElementById('user').value = user_from_url;
} }
@ -361,7 +366,9 @@
let user = document.getElementById('user').value; let user = document.getElementById('user').value;
let password = document.getElementById('password').value; let password = document.getElementById('password').value;
let url = document.getElementById('url').value + let server_address = document.getElementById('url').value;
let url = server_address +
/// Ask server to allow cross-domain requests. /// Ask server to allow cross-domain requests.
'?add_http_cors_header=1' + '?add_http_cors_header=1' +
'&user=' + encodeURIComponent(user) + '&user=' + encodeURIComponent(user) +
@ -390,11 +397,18 @@
response: this.response.length > 100000 ? null : this.response /// Lower than the browser's limit. response: this.response.length > 100000 ? null : this.response /// Lower than the browser's limit.
}; };
let title = "ClickHouse Query: " + query; let title = "ClickHouse Query: " + query;
let url = window.location.pathname + '?user=' + encodeURIComponent(user) + '#' + window.btoa(query);
let history_url = window.location.pathname + '?user=' + encodeURIComponent(user);
if (server_address != location.origin) {
/// Save server's address in URL if it's not identical to the address of the play UI.
history_url += '&url=' + encodeURIComponent(server_address);
}
history_url += '#' + window.btoa(query);
if (previous_query == '') { if (previous_query == '') {
history.replaceState(state, title, url); history.replaceState(state, title, history_url);
} else { } else {
history.pushState(state, title, url); history.pushState(state, title, history_url);
} }
document.title = title; document.title = title;
previous_query = query; previous_query = query;

View File

@ -173,6 +173,7 @@ enum class AccessType
M(MONGO, "", GLOBAL, SOURCES) \ M(MONGO, "", GLOBAL, SOURCES) \
M(MYSQL, "", GLOBAL, SOURCES) \ M(MYSQL, "", GLOBAL, SOURCES) \
M(POSTGRES, "", GLOBAL, SOURCES) \ M(POSTGRES, "", GLOBAL, SOURCES) \
M(SQLITE, "", GLOBAL, SOURCES) \
M(ODBC, "", GLOBAL, SOURCES) \ M(ODBC, "", GLOBAL, SOURCES) \
M(JDBC, "", GLOBAL, SOURCES) \ M(JDBC, "", GLOBAL, SOURCES) \
M(HDFS, "", GLOBAL, SOURCES) \ M(HDFS, "", GLOBAL, SOURCES) \

View File

@ -9,6 +9,14 @@
#include <AggregateFunctions/IAggregateFunction.h> #include <AggregateFunctions/IAggregateFunction.h>
#if !defined(ARCADIA_BUILD)
# include <Common/config.h>
#endif
#if USE_EMBEDDED_COMPILER
# include <llvm/IR/IRBuilder.h>
# include <DataTypes/Native.h>
#endif
namespace DB namespace DB
{ {
@ -21,6 +29,21 @@ struct AggregateFunctionGroupBitOrData
T value = 0; T value = 0;
static const char * name() { return "groupBitOr"; } static const char * name() { return "groupBitOr"; }
void update(T x) { value |= x; } void update(T x) { value |= x; }
#if USE_EMBEDDED_COMPILER
static void compileCreate(llvm::IRBuilderBase & builder, llvm::Value * value_ptr)
{
auto type = toNativeType<T>(builder);
builder.CreateStore(llvm::Constant::getNullValue(type), value_ptr);
}
static llvm::Value* compileUpdate(llvm::IRBuilderBase & builder, llvm::Value * lhs, llvm::Value * rhs)
{
return builder.CreateOr(lhs, rhs);
}
#endif
}; };
template <typename T> template <typename T>
@ -29,6 +52,21 @@ struct AggregateFunctionGroupBitAndData
T value = -1; /// Two's complement arithmetic, sign extension. T value = -1; /// Two's complement arithmetic, sign extension.
static const char * name() { return "groupBitAnd"; } static const char * name() { return "groupBitAnd"; }
void update(T x) { value &= x; } void update(T x) { value &= x; }
#if USE_EMBEDDED_COMPILER
static void compileCreate(llvm::IRBuilderBase & builder, llvm::Value * value_ptr)
{
auto type = toNativeType<T>(builder);
builder.CreateStore(llvm::ConstantInt::get(type, -1), value_ptr);
}
static llvm::Value* compileUpdate(llvm::IRBuilderBase & builder, llvm::Value * lhs, llvm::Value * rhs)
{
return builder.CreateAnd(lhs, rhs);
}
#endif
}; };
template <typename T> template <typename T>
@ -37,6 +75,21 @@ struct AggregateFunctionGroupBitXorData
T value = 0; T value = 0;
static const char * name() { return "groupBitXor"; } static const char * name() { return "groupBitXor"; }
void update(T x) { value ^= x; } void update(T x) { value ^= x; }
#if USE_EMBEDDED_COMPILER
static void compileCreate(llvm::IRBuilderBase & builder, llvm::Value * value_ptr)
{
auto type = toNativeType<T>(builder);
builder.CreateStore(llvm::Constant::getNullValue(type), value_ptr);
}
static llvm::Value* compileUpdate(llvm::IRBuilderBase & builder, llvm::Value * lhs, llvm::Value * rhs)
{
return builder.CreateXor(lhs, rhs);
}
#endif
}; };
@ -45,7 +98,7 @@ template <typename T, typename Data>
class AggregateFunctionBitwise final : public IAggregateFunctionDataHelper<Data, AggregateFunctionBitwise<T, Data>> class AggregateFunctionBitwise final : public IAggregateFunctionDataHelper<Data, AggregateFunctionBitwise<T, Data>>
{ {
public: public:
AggregateFunctionBitwise(const DataTypePtr & type) explicit AggregateFunctionBitwise(const DataTypePtr & type)
: IAggregateFunctionDataHelper<Data, AggregateFunctionBitwise<T, Data>>({type}, {}) {} : IAggregateFunctionDataHelper<Data, AggregateFunctionBitwise<T, Data>>({type}, {}) {}
String getName() const override { return Data::name(); } String getName() const override { return Data::name(); }
@ -81,6 +134,68 @@ public:
{ {
assert_cast<ColumnVector<T> &>(to).getData().push_back(this->data(place).value); assert_cast<ColumnVector<T> &>(to).getData().push_back(this->data(place).value);
} }
#if USE_EMBEDDED_COMPILER
bool isCompilable() const override
{
auto return_type = getReturnType();
return canBeNativeType(*return_type);
}
void compileCreate(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) const override
{
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
auto * return_type = toNativeType(b, getReturnType());
auto * value_ptr = b.CreatePointerCast(aggregate_data_ptr, return_type->getPointerTo());
Data::compileCreate(builder, value_ptr);
}
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes &, const std::vector<llvm::Value *> & argument_values) const override
{
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
auto * return_type = toNativeType(b, getReturnType());
auto * value_ptr = b.CreatePointerCast(aggregate_data_ptr, return_type->getPointerTo());
auto * value = b.CreateLoad(return_type, value_ptr);
const auto & argument_value = argument_values[0];
auto * result_value = Data::compileUpdate(builder, value, argument_value);
b.CreateStore(result_value, value_ptr);
}
void compileMerge(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_dst_ptr, llvm::Value * aggregate_data_src_ptr) const override
{
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
auto * return_type = toNativeType(b, getReturnType());
auto * value_dst_ptr = b.CreatePointerCast(aggregate_data_dst_ptr, return_type->getPointerTo());
auto * value_dst = b.CreateLoad(return_type, value_dst_ptr);
auto * value_src_ptr = b.CreatePointerCast(aggregate_data_src_ptr, return_type->getPointerTo());
auto * value_src = b.CreateLoad(return_type, value_src_ptr);
auto * result_value = Data::compileUpdate(builder, value_dst, value_src);
b.CreateStore(result_value, value_dst_ptr);
}
llvm::Value * compileGetResult(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) const override
{
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
auto * return_type = toNativeType(b, getReturnType());
auto * value_ptr = b.CreatePointerCast(aggregate_data_ptr, return_type->getPointerTo());
return b.CreateLoad(return_type, value_ptr);
}
#endif
}; };

View File

@ -101,6 +101,24 @@ struct AggregateFunctionSumData
{ {
const auto * end = ptr + count; const auto * end = ptr + count;
if constexpr (
(is_integer_v<T> && !is_big_int_v<T>)
|| (IsDecimalNumber<T> && !std::is_same_v<T, Decimal256> && !std::is_same_v<T, Decimal128>))
{
/// For integers we can vectorize the operation if we replace the null check using a multiplication (by 0 for null, 1 for not null)
/// https://quick-bench.com/q/MLTnfTvwC2qZFVeWHfOBR3U7a8I
T local_sum{};
while (ptr < end)
{
T multiplier = !*null_map;
Impl::add(local_sum, *ptr * multiplier);
++ptr;
++null_map;
}
Impl::add(sum, local_sum);
return;
}
if constexpr (std::is_floating_point_v<T>) if constexpr (std::is_floating_point_v<T>)
{ {
constexpr size_t unroll_count = 128 / sizeof(T); constexpr size_t unroll_count = 128 / sizeof(T);

View File

@ -76,6 +76,10 @@ add_headers_and_sources(clickhouse_common_io IO)
add_headers_and_sources(clickhouse_common_io IO/S3) add_headers_and_sources(clickhouse_common_io IO/S3)
list (REMOVE_ITEM clickhouse_common_io_sources Common/malloc.cpp Common/new_delete.cpp) list (REMOVE_ITEM clickhouse_common_io_sources Common/malloc.cpp Common/new_delete.cpp)
if (USE_SQLITE)
add_headers_and_sources(dbms Databases/SQLite)
endif()
if(USE_RDKAFKA) if(USE_RDKAFKA)
add_headers_and_sources(dbms Storages/Kafka) add_headers_and_sources(dbms Storages/Kafka)
endif() endif()
@ -415,6 +419,11 @@ if (USE_AWS_S3)
target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${AWS_S3_INCLUDE_DIR}) target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${AWS_S3_INCLUDE_DIR})
endif() endif()
if (USE_S2_GEOMETRY)
dbms_target_link_libraries (PUBLIC ${S2_GEOMETRY_LIBRARY})
dbms_target_include_directories (SYSTEM BEFORE PUBLIC ${S2_GEOMETRY_INCLUDE_DIR})
endif()
if (USE_BROTLI) if (USE_BROTLI)
target_link_libraries (clickhouse_common_io PRIVATE ${BROTLI_LIBRARY}) target_link_libraries (clickhouse_common_io PRIVATE ${BROTLI_LIBRARY})
target_include_directories (clickhouse_common_io SYSTEM BEFORE PRIVATE ${BROTLI_INCLUDE_DIR}) target_include_directories (clickhouse_common_io SYSTEM BEFORE PRIVATE ${BROTLI_INCLUDE_DIR})
@ -425,6 +434,10 @@ if (USE_AMQPCPP)
dbms_target_include_directories (SYSTEM BEFORE PUBLIC ${AMQPCPP_INCLUDE_DIR}) dbms_target_include_directories (SYSTEM BEFORE PUBLIC ${AMQPCPP_INCLUDE_DIR})
endif() endif()
if (USE_SQLITE)
dbms_target_link_libraries(PUBLIC sqlite)
endif()
if (USE_CASSANDRA) if (USE_CASSANDRA)
dbms_target_link_libraries(PUBLIC ${CASSANDRA_LIBRARY}) dbms_target_link_libraries(PUBLIC ${CASSANDRA_LIBRARY})
dbms_target_include_directories (SYSTEM BEFORE PUBLIC ${CASS_INCLUDE_DIR}) dbms_target_include_directories (SYSTEM BEFORE PUBLIC ${CASS_INCLUDE_DIR})

View File

@ -298,11 +298,19 @@ void ConfigProcessor::doIncludesRecursive(
{ {
const auto * subst = attributes->getNamedItem(attr_name); const auto * subst = attributes->getNamedItem(attr_name);
attr_nodes[attr_name] = subst; attr_nodes[attr_name] = subst;
substs_count += static_cast<size_t>(subst == nullptr); substs_count += static_cast<size_t>(subst != nullptr);
} }
if (substs_count < SUBSTITUTION_ATTRS.size() - 1) /// only one substitution is allowed if (substs_count > 1) /// only one substitution is allowed
throw Poco::Exception("several substitutions attributes set for element <" + node->nodeName() + ">"); throw Poco::Exception("More than one substitution attribute is set for element <" + node->nodeName() + ">");
if (node->nodeName() == "include")
{
if (node->hasChildNodes())
throw Poco::Exception("<include> element must have no children");
if (substs_count == 0)
throw Poco::Exception("No substitution attributes set for element <include>, must have exactly one");
}
/// Replace the original contents, not add to it. /// Replace the original contents, not add to it.
bool replace = attributes->getNamedItem("replace"); bool replace = attributes->getNamedItem("replace");
@ -320,37 +328,57 @@ void ConfigProcessor::doIncludesRecursive(
else if (throw_on_bad_incl) else if (throw_on_bad_incl)
throw Poco::Exception(error_msg + name); throw Poco::Exception(error_msg + name);
else else
{
if (node->nodeName() == "include")
node->parentNode()->removeChild(node);
LOG_WARNING(log, "{}{}", error_msg, name); LOG_WARNING(log, "{}{}", error_msg, name);
}
} }
else else
{ {
Element & element = dynamic_cast<Element &>(*node); /// Replace the whole node not just contents.
if (node->nodeName() == "include")
for (const auto & attr_name : SUBSTITUTION_ATTRS)
element.removeAttribute(attr_name);
if (replace)
{ {
while (Node * child = node->firstChild()) const NodeListPtr children = node_to_include->childNodes();
node->removeChild(child); for (size_t i = 0, size = children->length(); i < size; ++i)
{
NodePtr new_node = config->importNode(children->item(i), true);
node->parentNode()->insertBefore(new_node, node);
}
element.removeAttribute("replace"); node->parentNode()->removeChild(node);
} }
else
const NodeListPtr children = node_to_include->childNodes();
for (size_t i = 0, size = children->length(); i < size; ++i)
{ {
NodePtr new_node = config->importNode(children->item(i), true); Element & element = dynamic_cast<Element &>(*node);
node->appendChild(new_node);
}
const NamedNodeMapPtr from_attrs = node_to_include->attributes(); for (const auto & attr_name : SUBSTITUTION_ATTRS)
for (size_t i = 0, size = from_attrs->length(); i < size; ++i) element.removeAttribute(attr_name);
{
element.setAttributeNode(dynamic_cast<Attr *>(config->importNode(from_attrs->item(i), true)));
}
included_something = true; if (replace)
{
while (Node * child = node->firstChild())
node->removeChild(child);
element.removeAttribute("replace");
}
const NodeListPtr children = node_to_include->childNodes();
for (size_t i = 0, size = children->length(); i < size; ++i)
{
NodePtr new_node = config->importNode(children->item(i), true);
node->appendChild(new_node);
}
const NamedNodeMapPtr from_attrs = node_to_include->attributes();
for (size_t i = 0, size = from_attrs->length(); i < size; ++i)
{
element.setAttributeNode(dynamic_cast<Attr *>(config->importNode(from_attrs->item(i), true)));
}
included_something = true;
}
} }
}; };

View File

@ -10,16 +10,10 @@ namespace fs = std::filesystem;
namespace DB namespace DB
{ {
/// Checks if file exists without throwing an exception but with message in console. bool safeFsExists(const String & path)
bool safeFsExists(const auto & path)
{ {
std::error_code ec; std::error_code ec;
bool res = fs::exists(path, ec); return fs::exists(path, ec);
if (ec)
{
std::cerr << "Can't check '" << path << "': [" << ec.value() << "] " << ec.message() << std::endl;
}
return res;
}; };
bool configReadClient(Poco::Util::LayeredConfiguration & config, const std::string & home_path) bool configReadClient(Poco::Util::LayeredConfiguration & config, const std::string & home_path)

View File

@ -558,6 +558,9 @@
M(588, DISTRIBUTED_BROKEN_BATCH_INFO) \ M(588, DISTRIBUTED_BROKEN_BATCH_INFO) \
M(589, DISTRIBUTED_BROKEN_BATCH_FILES) \ M(589, DISTRIBUTED_BROKEN_BATCH_FILES) \
M(590, CANNOT_SYSCONF) \ M(590, CANNOT_SYSCONF) \
M(591, SQLITE_ENGINE_ERROR) \
M(592, DATA_ENCRYPTION_ERROR) \
M(593, ZERO_COPY_REPLICATION_ERROR) \
\ \
M(998, POSTGRESQL_CONNECTION_FAILURE) \ M(998, POSTGRESQL_CONNECTION_FAILURE) \
M(999, KEEPER_EXCEPTION) \ M(999, KEEPER_EXCEPTION) \

View File

@ -237,7 +237,12 @@ public:
// 1. Always memcpy 8 times bytes // 1. Always memcpy 8 times bytes
// 2. Use switch case extension to generate fast dispatching table // 2. Use switch case extension to generate fast dispatching table
// 3. Funcs are named callables that can be force_inlined // 3. Funcs are named callables that can be force_inlined
//
// NOTE: It relies on Little Endianness // NOTE: It relies on Little Endianness
//
// NOTE: It requires padded to 8 bytes keys (IOW you cannot pass
// std::string here, but you can pass i.e. ColumnString::getDataAt()),
// since it copies 8 bytes at a time.
template <typename Self, typename KeyHolder, typename Func> template <typename Self, typename KeyHolder, typename Func>
static auto ALWAYS_INLINE dispatch(Self & self, KeyHolder && key_holder, Func && func) static auto ALWAYS_INLINE dispatch(Self & self, KeyHolder && key_holder, Func && func)
{ {

View File

@ -22,10 +22,6 @@
M(WriteBufferFromFileDescriptorWrite, "Number of writes (write/pwrite) to a file descriptor. Does not include sockets.") \ M(WriteBufferFromFileDescriptorWrite, "Number of writes (write/pwrite) to a file descriptor. Does not include sockets.") \
M(WriteBufferFromFileDescriptorWriteFailed, "Number of times the write (write/pwrite) to a file descriptor have failed.") \ M(WriteBufferFromFileDescriptorWriteFailed, "Number of times the write (write/pwrite) to a file descriptor have failed.") \
M(WriteBufferFromFileDescriptorWriteBytes, "Number of bytes written to file descriptors. If the file is compressed, this will show compressed data size.") \ M(WriteBufferFromFileDescriptorWriteBytes, "Number of bytes written to file descriptors. If the file is compressed, this will show compressed data size.") \
M(ReadBufferAIORead, "") \
M(ReadBufferAIOReadBytes, "") \
M(WriteBufferAIOWrite, "") \
M(WriteBufferAIOWriteBytes, "") \
M(ReadCompressedBytes, "Number of bytes (the number of bytes before decompression) read from compressed sources (files, network).") \ M(ReadCompressedBytes, "Number of bytes (the number of bytes before decompression) read from compressed sources (files, network).") \
M(CompressedReadBufferBlocks, "Number of compressed blocks (the blocks of data that are compressed independent of each other) read from compressed sources (files, network).") \ M(CompressedReadBufferBlocks, "Number of compressed blocks (the blocks of data that are compressed independent of each other) read from compressed sources (files, network).") \
M(CompressedReadBufferBytes, "Number of uncompressed bytes (the number of bytes after decompression) read from compressed sources (files, network).") \ M(CompressedReadBufferBytes, "Number of uncompressed bytes (the number of bytes after decompression) read from compressed sources (files, network).") \
@ -34,6 +30,10 @@
M(UncompressedCacheWeightLost, "") \ M(UncompressedCacheWeightLost, "") \
M(MMappedFileCacheHits, "") \ M(MMappedFileCacheHits, "") \
M(MMappedFileCacheMisses, "") \ M(MMappedFileCacheMisses, "") \
M(AIOWrite, "Number of writes with Linux or FreeBSD AIO interface") \
M(AIOWriteBytes, "Number of bytes written with Linux or FreeBSD AIO interface") \
M(AIORead, "Number of reads with Linux or FreeBSD AIO interface") \
M(AIOReadBytes, "Number of bytes read with Linux or FreeBSD AIO interface") \
M(IOBufferAllocs, "") \ M(IOBufferAllocs, "") \
M(IOBufferAllocBytes, "") \ M(IOBufferAllocBytes, "") \
M(ArenaAllocChunks, "") \ M(ArenaAllocChunks, "") \
@ -43,8 +43,8 @@
M(MarkCacheHits, "") \ M(MarkCacheHits, "") \
M(MarkCacheMisses, "") \ M(MarkCacheMisses, "") \
M(CreatedReadBufferOrdinary, "") \ M(CreatedReadBufferOrdinary, "") \
M(CreatedReadBufferAIO, "") \ M(CreatedReadBufferDirectIO, "") \
M(CreatedReadBufferAIOFailed, "") \ M(CreatedReadBufferDirectIOFailed, "") \
M(CreatedReadBufferMMap, "") \ M(CreatedReadBufferMMap, "") \
M(CreatedReadBufferMMapFailed, "") \ M(CreatedReadBufferMMapFailed, "") \
M(DiskReadElapsedMicroseconds, "Total time spent waiting for read syscall. This include reads from page cache.") \ M(DiskReadElapsedMicroseconds, "Total time spent waiting for read syscall. This include reads from page cache.") \

View File

@ -4,9 +4,6 @@
#include <Common/UnicodeBar.h> #include <Common/UnicodeBar.h>
#include <Databases/DatabaseMemory.h> #include <Databases/DatabaseMemory.h>
/// FIXME: progress bar in clickhouse-local needs to be cleared after query execution
/// - same as it is now in clickhouse-client. Also there is no writeFinalProgress call
/// in clickhouse-local.
namespace DB namespace DB
{ {

View File

@ -45,6 +45,8 @@ struct ZooKeeperRequest : virtual Request
/// If the request was sent and we didn't get the response and the error happens, then we cannot be sure was it processed or not. /// If the request was sent and we didn't get the response and the error happens, then we cannot be sure was it processed or not.
bool probably_sent = false; bool probably_sent = false;
bool restored_from_zookeeper_log = false;
ZooKeeperRequest() = default; ZooKeeperRequest() = default;
ZooKeeperRequest(const ZooKeeperRequest &) = default; ZooKeeperRequest(const ZooKeeperRequest &) = default;
virtual ~ZooKeeperRequest() override = default; virtual ~ZooKeeperRequest() override = default;
@ -172,6 +174,9 @@ struct ZooKeeperCloseResponse final : ZooKeeperResponse
struct ZooKeeperCreateRequest final : public CreateRequest, ZooKeeperRequest struct ZooKeeperCreateRequest final : public CreateRequest, ZooKeeperRequest
{ {
/// used only during restore from zookeeper log
int32_t parent_cversion = -1;
ZooKeeperCreateRequest() = default; ZooKeeperCreateRequest() = default;
explicit ZooKeeperCreateRequest(const CreateRequest & base) : CreateRequest(base) {} explicit ZooKeeperCreateRequest(const CreateRequest & base) : CreateRequest(base) {}
@ -183,9 +188,6 @@ struct ZooKeeperCreateRequest final : public CreateRequest, ZooKeeperRequest
bool isReadRequest() const override { return false; } bool isReadRequest() const override { return false; }
size_t bytesSize() const override { return CreateRequest::bytesSize() + sizeof(xid) + sizeof(has_watch); } size_t bytesSize() const override { return CreateRequest::bytesSize() + sizeof(xid) + sizeof(has_watch); }
/// During recovery from log we don't rehash ACLs
bool need_to_hash_acls = true;
}; };
struct ZooKeeperCreateResponse final : CreateResponse, ZooKeeperResponse struct ZooKeeperCreateResponse final : CreateResponse, ZooKeeperResponse
@ -362,8 +364,6 @@ struct ZooKeeperSetACLRequest final : SetACLRequest, ZooKeeperRequest
bool isReadRequest() const override { return false; } bool isReadRequest() const override { return false; }
size_t bytesSize() const override { return SetACLRequest::bytesSize() + sizeof(xid); } size_t bytesSize() const override { return SetACLRequest::bytesSize() + sizeof(xid); }
bool need_to_hash_acls = true;
}; };
struct ZooKeeperSetACLResponse final : SetACLResponse, ZooKeeperResponse struct ZooKeeperSetACLResponse final : SetACLResponse, ZooKeeperResponse

View File

@ -47,13 +47,13 @@ CompressedReadBufferFromFile::CompressedReadBufferFromFile(std::unique_ptr<ReadB
CompressedReadBufferFromFile::CompressedReadBufferFromFile( CompressedReadBufferFromFile::CompressedReadBufferFromFile(
const std::string & path, const std::string & path,
size_t estimated_size, size_t estimated_size,
size_t aio_threshold, size_t direct_io_threshold,
size_t mmap_threshold, size_t mmap_threshold,
MMappedFileCache * mmap_cache, MMappedFileCache * mmap_cache,
size_t buf_size, size_t buf_size,
bool allow_different_codecs_) bool allow_different_codecs_)
: BufferWithOwnMemory<ReadBuffer>(0) : BufferWithOwnMemory<ReadBuffer>(0)
, p_file_in(createReadBufferFromFileBase(path, estimated_size, aio_threshold, mmap_threshold, mmap_cache, buf_size)) , p_file_in(createReadBufferFromFileBase(path, estimated_size, direct_io_threshold, mmap_threshold, mmap_cache, buf_size))
, file_in(*p_file_in) , file_in(*p_file_in)
{ {
compressed_in = &file_in; compressed_in = &file_in;

View File

@ -33,7 +33,7 @@ public:
CompressedReadBufferFromFile(std::unique_ptr<ReadBufferFromFileBase> buf, bool allow_different_codecs_ = false); CompressedReadBufferFromFile(std::unique_ptr<ReadBufferFromFileBase> buf, bool allow_different_codecs_ = false);
CompressedReadBufferFromFile( CompressedReadBufferFromFile(
const std::string & path, size_t estimated_size, size_t aio_threshold, size_t mmap_threshold, MMappedFileCache * mmap_cache, const std::string & path, size_t estimated_size, size_t direct_io_threshold, size_t mmap_threshold, MMappedFileCache * mmap_cache,
size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, bool allow_different_codecs_ = false); size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, bool allow_different_codecs_ = false);
void seek(size_t offset_in_compressed_file, size_t offset_in_decompressed_block); void seek(size_t offset_in_compressed_file, size_t offset_in_decompressed_block);

View File

@ -267,13 +267,12 @@ struct KeeperStorageCreateRequest final : public KeeperStorageRequest
} }
else else
{ {
auto & session_auth_ids = storage.session_and_auth[session_id]; auto & session_auth_ids = storage.session_and_auth[session_id];
KeeperStorage::Node created_node; KeeperStorage::Node created_node;
Coordination::ACLs node_acls; Coordination::ACLs node_acls;
if (!fixupACL(request.acls, session_auth_ids, node_acls, request.need_to_hash_acls)) if (!fixupACL(request.acls, session_auth_ids, node_acls, !request.restored_from_zookeeper_log))
{ {
response.error = Coordination::Error::ZINVALIDACL; response.error = Coordination::Error::ZINVALIDACL;
return {response_ptr, {}}; return {response_ptr, {}};
@ -307,16 +306,28 @@ struct KeeperStorageCreateRequest final : public KeeperStorageRequest
path_created += seq_num_str.str(); path_created += seq_num_str.str();
} }
int32_t parent_cversion = request.parent_cversion;
auto child_path = getBaseName(path_created); auto child_path = getBaseName(path_created);
int64_t prev_parent_zxid; int64_t prev_parent_zxid;
container.updateValue(parent_path, [child_path, zxid, &prev_parent_zxid] (KeeperStorage::Node & parent) int32_t prev_parent_cversion;
container.updateValue(parent_path, [child_path, zxid, &prev_parent_zxid,
parent_cversion, &prev_parent_cversion] (KeeperStorage::Node & parent)
{ {
parent.children.insert(child_path);
prev_parent_cversion = parent.stat.cversion;
prev_parent_zxid = parent.stat.pzxid;
/// Increment sequential number even if node is not sequential /// Increment sequential number even if node is not sequential
++parent.seq_num; ++parent.seq_num;
parent.children.insert(child_path);
++parent.stat.cversion; if (parent_cversion == -1)
prev_parent_zxid = parent.stat.pzxid; ++parent.stat.cversion;
parent.stat.pzxid = zxid; else if (parent_cversion > parent.stat.cversion)
parent.stat.cversion = parent_cversion;
if (zxid > parent.stat.pzxid)
parent.stat.pzxid = zxid;
++parent.stat.numChildren; ++parent.stat.numChildren;
}); });
@ -326,7 +337,7 @@ struct KeeperStorageCreateRequest final : public KeeperStorageRequest
if (request.is_ephemeral) if (request.is_ephemeral)
ephemerals[session_id].emplace(path_created); ephemerals[session_id].emplace(path_created);
undo = [&storage, prev_parent_zxid, session_id, path_created, is_ephemeral = request.is_ephemeral, parent_path, child_path, acl_id] undo = [&storage, prev_parent_zxid, prev_parent_cversion, session_id, path_created, is_ephemeral = request.is_ephemeral, parent_path, child_path, acl_id]
{ {
storage.container.erase(path_created); storage.container.erase(path_created);
storage.acl_map.removeUsage(acl_id); storage.acl_map.removeUsage(acl_id);
@ -334,11 +345,11 @@ struct KeeperStorageCreateRequest final : public KeeperStorageRequest
if (is_ephemeral) if (is_ephemeral)
storage.ephemerals[session_id].erase(path_created); storage.ephemerals[session_id].erase(path_created);
storage.container.updateValue(parent_path, [child_path, prev_parent_zxid] (KeeperStorage::Node & undo_parent) storage.container.updateValue(parent_path, [child_path, prev_parent_zxid, prev_parent_cversion] (KeeperStorage::Node & undo_parent)
{ {
--undo_parent.stat.cversion;
--undo_parent.stat.numChildren; --undo_parent.stat.numChildren;
--undo_parent.seq_num; --undo_parent.seq_num;
undo_parent.stat.cversion = prev_parent_cversion;
undo_parent.stat.pzxid = prev_parent_zxid; undo_parent.stat.pzxid = prev_parent_zxid;
undo_parent.children.erase(child_path); undo_parent.children.erase(child_path);
}); });
@ -394,6 +405,24 @@ struct KeeperStorageGetRequest final : public KeeperStorageRequest
} }
}; };
namespace
{
/// Garbage required to apply log to "fuzzy" zookeeper snapshot
void updateParentPzxid(const std::string & child_path, int64_t zxid, KeeperStorage::Container & container)
{
auto parent_path = parentPath(child_path);
auto parent_it = container.find(parent_path);
if (parent_it != container.end())
{
container.updateValue(parent_path, [zxid](KeeperStorage::Node & parent)
{
if (parent.stat.pzxid < zxid)
parent.stat.pzxid = zxid;
});
}
}
}
struct KeeperStorageRemoveRequest final : public KeeperStorageRequest struct KeeperStorageRemoveRequest final : public KeeperStorageRequest
{ {
bool checkAuth(KeeperStorage & storage, int64_t session_id) const override bool checkAuth(KeeperStorage & storage, int64_t session_id) const override
@ -412,7 +441,7 @@ struct KeeperStorageRemoveRequest final : public KeeperStorageRequest
} }
using KeeperStorageRequest::KeeperStorageRequest; using KeeperStorageRequest::KeeperStorageRequest;
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(KeeperStorage & storage, int64_t /*zxid*/, int64_t /*session_id*/) const override std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(KeeperStorage & storage, int64_t zxid, int64_t /*session_id*/) const override
{ {
auto & container = storage.container; auto & container = storage.container;
auto & ephemerals = storage.ephemerals; auto & ephemerals = storage.ephemerals;
@ -425,6 +454,8 @@ struct KeeperStorageRemoveRequest final : public KeeperStorageRequest
auto it = container.find(request.path); auto it = container.find(request.path);
if (it == container.end()) if (it == container.end())
{ {
if (request.restored_from_zookeeper_log)
updateParentPzxid(request.path, zxid, container);
response.error = Coordination::Error::ZNONODE; response.error = Coordination::Error::ZNONODE;
} }
else if (request.version != -1 && request.version != it->value.stat.version) else if (request.version != -1 && request.version != it->value.stat.version)
@ -437,6 +468,9 @@ struct KeeperStorageRemoveRequest final : public KeeperStorageRequest
} }
else else
{ {
if (request.restored_from_zookeeper_log)
updateParentPzxid(request.path, zxid, container);
auto prev_node = it->value; auto prev_node = it->value;
if (prev_node.stat.ephemeralOwner != 0) if (prev_node.stat.ephemeralOwner != 0)
{ {
@ -719,7 +753,7 @@ struct KeeperStorageSetACLRequest final : public KeeperStorageRequest
auto & session_auth_ids = storage.session_and_auth[session_id]; auto & session_auth_ids = storage.session_and_auth[session_id];
Coordination::ACLs node_acls; Coordination::ACLs node_acls;
if (!fixupACL(request.acls, session_auth_ids, node_acls, request.need_to_hash_acls)) if (!fixupACL(request.acls, session_auth_ids, node_acls, !request.restored_from_zookeeper_log))
{ {
response.error = Coordination::Error::ZINVALIDACL; response.error = Coordination::Error::ZINVALIDACL;
return {response_ptr, {}}; return {response_ptr, {}};

View File

@ -174,7 +174,22 @@ void deserializeKeeperStorageFromSnapshot(KeeperStorage & storage, const std::st
LOG_INFO(log, "Deserializing data from snapshot"); LOG_INFO(log, "Deserializing data from snapshot");
int64_t zxid_from_nodes = deserializeStorageData(storage, reader, log); int64_t zxid_from_nodes = deserializeStorageData(storage, reader, log);
storage.zxid = std::max(zxid, zxid_from_nodes); /// In ZooKeeper Snapshots can contain inconsistent state of storage. They call
/// this inconsistent state "fuzzy". So it's guaranteed that snapshot contain all
/// records up to zxid from snapshot name and also some records for future.
/// But it doesn't mean that we have just some state of storage from future (like zxid + 100 log records).
/// We have incorrect state of storage where some random log entries from future were applied....
///
/// In ZooKeeper they say that their transactions log is idempotent and can be applied to "fuzzy" state as is.
/// It's true but there is no any general invariant which produces this property. They just have ad-hoc "if's" which detects
/// "fuzzy" state inconsistencies and apply log records in special way. Several examples:
/// https://github.com/apache/zookeeper/blob/master/zookeeper-server/src/main/java/org/apache/zookeeper/server/DataTree.java#L453-L463
/// https://github.com/apache/zookeeper/blob/master/zookeeper-server/src/main/java/org/apache/zookeeper/server/DataTree.java#L476-L480
/// https://github.com/apache/zookeeper/blob/master/zookeeper-server/src/main/java/org/apache/zookeeper/server/DataTree.java#L547-L549
if (zxid_from_nodes > zxid)
LOG_WARNING(log, "ZooKeeper snapshot was in inconsistent (fuzzy) state. Will try to apply log. ZooKeeper create non fuzzy snapshot with restart. You can just restart ZooKeeper server and get consistent version.");
storage.zxid = zxid;
LOG_INFO(log, "Finished, snapshot ZXID {}", storage.zxid); LOG_INFO(log, "Finished, snapshot ZXID {}", storage.zxid);
} }
@ -210,16 +225,18 @@ void deserializeLogMagic(ReadBuffer & in)
static constexpr int32_t LOG_HEADER = 1514884167; /// "ZKLG" static constexpr int32_t LOG_HEADER = 1514884167; /// "ZKLG"
if (magic_header != LOG_HEADER) if (magic_header != LOG_HEADER)
throw Exception(ErrorCodes::CORRUPTED_DATA ,"Incorrect magic header in file, expected {}, got {}", LOG_HEADER, magic_header); throw Exception(ErrorCodes::CORRUPTED_DATA, "Incorrect magic header in file, expected {}, got {}", LOG_HEADER, magic_header);
if (version != 2) if (version != 2)
throw Exception(ErrorCodes::NOT_IMPLEMENTED,"Cannot deserialize ZooKeeper data other than version 2, got version {}", version); throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot deserialize ZooKeeper data other than version 2, got version {}", version);
} }
/// For some reason zookeeper stores slightly different records in log then /// ZooKeeper transactions log differs from requests. The main reason: to store records in log
/// requests. For example: /// in some "finalized" state (for example with concrete versions).
/// class CreateTxn { ///
/// Example:
/// class CreateTxn {
/// ustring path; /// ustring path;
/// buffer data; /// buffer data;
/// vector<org.apache.zookeeper.data.ACL> acl; /// vector<org.apache.zookeeper.data.ACL> acl;
@ -289,10 +306,9 @@ Coordination::ZooKeeperRequestPtr deserializeCreateTxn(ReadBuffer & in)
Coordination::read(result->data, in); Coordination::read(result->data, in);
Coordination::read(result->acls, in); Coordination::read(result->acls, in);
Coordination::read(result->is_ephemeral, in); Coordination::read(result->is_ephemeral, in);
result->need_to_hash_acls = false; Coordination::read(result->parent_cversion, in);
/// How we should use it? It should just increment on request execution
int32_t parent_c_version; result->restored_from_zookeeper_log = true;
Coordination::read(parent_c_version, in);
return result; return result;
} }
@ -300,6 +316,7 @@ Coordination::ZooKeeperRequestPtr deserializeDeleteTxn(ReadBuffer & in)
{ {
std::shared_ptr<Coordination::ZooKeeperRemoveRequest> result = std::make_shared<Coordination::ZooKeeperRemoveRequest>(); std::shared_ptr<Coordination::ZooKeeperRemoveRequest> result = std::make_shared<Coordination::ZooKeeperRemoveRequest>();
Coordination::read(result->path, in); Coordination::read(result->path, in);
result->restored_from_zookeeper_log = true;
return result; return result;
} }
@ -309,6 +326,7 @@ Coordination::ZooKeeperRequestPtr deserializeSetTxn(ReadBuffer & in)
Coordination::read(result->path, in); Coordination::read(result->path, in);
Coordination::read(result->data, in); Coordination::read(result->data, in);
Coordination::read(result->version, in); Coordination::read(result->version, in);
result->restored_from_zookeeper_log = true;
/// It stores version + 1 (which should be, not for request) /// It stores version + 1 (which should be, not for request)
result->version -= 1; result->version -= 1;
@ -320,6 +338,7 @@ Coordination::ZooKeeperRequestPtr deserializeCheckVersionTxn(ReadBuffer & in)
std::shared_ptr<Coordination::ZooKeeperCheckRequest> result = std::make_shared<Coordination::ZooKeeperCheckRequest>(); std::shared_ptr<Coordination::ZooKeeperCheckRequest> result = std::make_shared<Coordination::ZooKeeperCheckRequest>();
Coordination::read(result->path, in); Coordination::read(result->path, in);
Coordination::read(result->version, in); Coordination::read(result->version, in);
result->restored_from_zookeeper_log = true;
return result; return result;
} }
@ -329,14 +348,19 @@ Coordination::ZooKeeperRequestPtr deserializeCreateSession(ReadBuffer & in)
int32_t timeout; int32_t timeout;
Coordination::read(timeout, in); Coordination::read(timeout, in);
result->session_timeout_ms = timeout; result->session_timeout_ms = timeout;
result->restored_from_zookeeper_log = true;
return result; return result;
} }
Coordination::ZooKeeperRequestPtr deserializeCloseSession(ReadBuffer & in) Coordination::ZooKeeperRequestPtr deserializeCloseSession(ReadBuffer & in, bool empty)
{ {
std::shared_ptr<Coordination::ZooKeeperCloseRequest> result = std::make_shared<Coordination::ZooKeeperCloseRequest>(); std::shared_ptr<Coordination::ZooKeeperCloseRequest> result = std::make_shared<Coordination::ZooKeeperCloseRequest>();
std::vector<std::string> data; if (!empty)
Coordination::read(data, in); {
std::vector<std::string> data;
Coordination::read(data, in);
}
result->restored_from_zookeeper_log = true;
return result; return result;
} }
@ -356,14 +380,14 @@ Coordination::ZooKeeperRequestPtr deserializeSetACLTxn(ReadBuffer & in)
Coordination::read(result->version, in); Coordination::read(result->version, in);
/// It stores version + 1 (which should be, not for request) /// It stores version + 1 (which should be, not for request)
result->version -= 1; result->version -= 1;
result->need_to_hash_acls = false; result->restored_from_zookeeper_log = true;
return result; return result;
} }
Coordination::ZooKeeperRequestPtr deserializeMultiTxn(ReadBuffer & in); Coordination::ZooKeeperRequestPtr deserializeMultiTxn(ReadBuffer & in);
Coordination::ZooKeeperRequestPtr deserializeTxnImpl(ReadBuffer & in, bool subtxn) Coordination::ZooKeeperRequestPtr deserializeTxnImpl(ReadBuffer & in, bool subtxn, int64_t txn_length = 0)
{ {
int32_t type; int32_t type;
Coordination::read(type, in); Coordination::read(type, in);
@ -372,6 +396,11 @@ Coordination::ZooKeeperRequestPtr deserializeTxnImpl(ReadBuffer & in, bool subtx
if (subtxn) if (subtxn)
Coordination::read(sub_txn_length, in); Coordination::read(sub_txn_length, in);
bool empty_txn = !subtxn && txn_length == 32; /// Possible for old-style CloseTxn's
if (empty_txn && type != -11)
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Empty non close-session transaction found");
int64_t in_count_before = in.count(); int64_t in_count_before = in.count();
switch (type) switch (type)
@ -398,7 +427,7 @@ Coordination::ZooKeeperRequestPtr deserializeTxnImpl(ReadBuffer & in, bool subtx
result = deserializeCreateSession(in); result = deserializeCreateSession(in);
break; break;
case -11: case -11:
result = deserializeCloseSession(in); result = deserializeCloseSession(in, empty_txn);
break; break;
case -1: case -1:
result = deserializeErrorTxn(in); result = deserializeErrorTxn(in);
@ -442,7 +471,7 @@ bool hasErrorsInMultiRequest(Coordination::ZooKeeperRequestPtr request)
if (request == nullptr) if (request == nullptr)
return true; return true;
for (const auto & subrequest : dynamic_cast<Coordination::ZooKeeperMultiRequest *>(request.get())->requests) //-V522 for (const auto & subrequest : dynamic_cast<Coordination::ZooKeeperMultiRequest *>(request.get())->requests) // -V522
if (subrequest == nullptr) if (subrequest == nullptr)
return true; return true;
return false; return false;
@ -470,7 +499,7 @@ bool deserializeTxn(KeeperStorage & storage, ReadBuffer & in, Poco::Logger * /*l
int64_t time; int64_t time;
Coordination::read(time, in); Coordination::read(time, in);
Coordination::ZooKeeperRequestPtr request = deserializeTxnImpl(in, false); Coordination::ZooKeeperRequestPtr request = deserializeTxnImpl(in, false, txn_len);
/// Skip all other bytes /// Skip all other bytes
int64_t bytes_read = in.count() - count_before; int64_t bytes_read = in.count() - count_before;

View File

@ -436,7 +436,7 @@ Block Block::sortColumns() const
Block sorted_block; Block sorted_block;
/// std::unordered_map (index_by_name) cannot be used to guarantee the sort order /// std::unordered_map (index_by_name) cannot be used to guarantee the sort order
std::vector<decltype(index_by_name.begin())> sorted_index_by_name(index_by_name.size()); std::vector<IndexByName::const_iterator> sorted_index_by_name(index_by_name.size());
{ {
size_t i = 0; size_t i = 0;
for (auto it = index_by_name.begin(); it != index_by_name.end(); ++it) for (auto it = index_by_name.begin(); it != index_by_name.end(); ++it)

View File

@ -68,7 +68,7 @@ public:
const_cast<const Block *>(this)->findByName(name)); const_cast<const Block *>(this)->findByName(name));
} }
const ColumnWithTypeAndName* findByName(const std::string & name) const; const ColumnWithTypeAndName * findByName(const std::string & name) const;
ColumnWithTypeAndName & getByName(const std::string & name) ColumnWithTypeAndName & getByName(const std::string & name)
{ {

View File

@ -26,13 +26,14 @@ namespace ErrorCodes
MySQLClient::MySQLClient(const String & host_, UInt16 port_, const String & user_, const String & password_) MySQLClient::MySQLClient(const String & host_, UInt16 port_, const String & user_, const String & password_)
: host(host_), port(port_), user(user_), password(std::move(password_)) : host(host_), port(port_), user(user_), password(std::move(password_))
{ {
client_capability_flags = CLIENT_PROTOCOL_41 | CLIENT_PLUGIN_AUTH | CLIENT_SECURE_CONNECTION; mysql_context.client_capabilities = CLIENT_PROTOCOL_41 | CLIENT_PLUGIN_AUTH | CLIENT_SECURE_CONNECTION;
} }
MySQLClient::MySQLClient(MySQLClient && other) MySQLClient::MySQLClient(MySQLClient && other)
: host(std::move(other.host)), port(other.port), user(std::move(other.user)), password(std::move(other.password)) : host(std::move(other.host)), port(other.port), user(std::move(other.user)), password(std::move(other.password))
, client_capability_flags(other.client_capability_flags) , mysql_context(other.mysql_context)
{ {
mysql_context.sequence_id = 0;
} }
void MySQLClient::connect() void MySQLClient::connect()
@ -56,7 +57,7 @@ void MySQLClient::connect()
in = std::make_shared<ReadBufferFromPocoSocket>(*socket); in = std::make_shared<ReadBufferFromPocoSocket>(*socket);
out = std::make_shared<WriteBufferFromPocoSocket>(*socket); out = std::make_shared<WriteBufferFromPocoSocket>(*socket);
packet_endpoint = std::make_shared<PacketEndpoint>(*in, *out, seq); packet_endpoint = mysql_context.makeEndpoint(*in, *out);
handshake(); handshake();
} }
@ -68,7 +69,7 @@ void MySQLClient::disconnect()
socket->close(); socket->close();
socket = nullptr; socket = nullptr;
connected = false; connected = false;
seq = 0; mysql_context.sequence_id = 0;
} }
/// https://dev.mysql.com/doc/internals/en/connection-phase-packets.html /// https://dev.mysql.com/doc/internals/en/connection-phase-packets.html
@ -87,10 +88,10 @@ void MySQLClient::handshake()
String auth_plugin_data = native41.getAuthPluginData(); String auth_plugin_data = native41.getAuthPluginData();
HandshakeResponse handshake_response( HandshakeResponse handshake_response(
client_capability_flags, MAX_PACKET_LENGTH, charset_utf8, user, "", auth_plugin_data, mysql_native_password); mysql_context.client_capabilities, MAX_PACKET_LENGTH, charset_utf8, user, "", auth_plugin_data, mysql_native_password);
packet_endpoint->sendPacket<HandshakeResponse>(handshake_response, true); packet_endpoint->sendPacket<HandshakeResponse>(handshake_response, true);
ResponsePacket packet_response(client_capability_flags, true); ResponsePacket packet_response(mysql_context.client_capabilities, true);
packet_endpoint->receivePacket(packet_response); packet_endpoint->receivePacket(packet_response);
packet_endpoint->resetSequenceId(); packet_endpoint->resetSequenceId();
@ -105,7 +106,7 @@ void MySQLClient::writeCommand(char command, String query)
WriteCommand write_command(command, query); WriteCommand write_command(command, query);
packet_endpoint->sendPacket<WriteCommand>(write_command, true); packet_endpoint->sendPacket<WriteCommand>(write_command, true);
ResponsePacket packet_response(client_capability_flags); ResponsePacket packet_response(mysql_context.client_capabilities);
packet_endpoint->receivePacket(packet_response); packet_endpoint->receivePacket(packet_response);
switch (packet_response.getType()) switch (packet_response.getType())
{ {
@ -124,7 +125,7 @@ void MySQLClient::registerSlaveOnMaster(UInt32 slave_id)
RegisterSlave register_slave(slave_id); RegisterSlave register_slave(slave_id);
packet_endpoint->sendPacket<RegisterSlave>(register_slave, true); packet_endpoint->sendPacket<RegisterSlave>(register_slave, true);
ResponsePacket packet_response(client_capability_flags); ResponsePacket packet_response(mysql_context.client_capabilities);
packet_endpoint->receivePacket(packet_response); packet_endpoint->receivePacket(packet_response);
packet_endpoint->resetSequenceId(); packet_endpoint->resetSequenceId();
if (packet_response.getType() == PACKET_ERR) if (packet_response.getType() == PACKET_ERR)

View File

@ -45,9 +45,7 @@ private:
String password; String password;
bool connected = false; bool connected = false;
UInt32 client_capability_flags = 0; MySQLWireContext mysql_context;
uint8_t seq = 0;
const UInt8 charset_utf8 = 33; const UInt8 charset_utf8 = 33;
const String mysql_native_password = "mysql_native_password"; const String mysql_native_password = "mysql_native_password";

View File

@ -68,4 +68,15 @@ String PacketEndpoint::packetToText(const String & payload)
} }
MySQLProtocol::PacketEndpointPtr MySQLWireContext::makeEndpoint(WriteBuffer & out)
{
return MySQLProtocol::PacketEndpoint::create(out, sequence_id);
}
MySQLProtocol::PacketEndpointPtr MySQLWireContext::makeEndpoint(ReadBuffer & in, WriteBuffer & out)
{
return MySQLProtocol::PacketEndpoint::create(in, out, sequence_id);
}
} }

View File

@ -5,6 +5,7 @@
#include "IMySQLReadPacket.h" #include "IMySQLReadPacket.h"
#include "IMySQLWritePacket.h" #include "IMySQLWritePacket.h"
#include "IO/MySQLPacketPayloadReadBuffer.h" #include "IO/MySQLPacketPayloadReadBuffer.h"
#include <common/shared_ptr_helper.h>
namespace DB namespace DB
{ {
@ -15,19 +16,13 @@ namespace MySQLProtocol
/* Writes and reads packets, keeping sequence-id. /* Writes and reads packets, keeping sequence-id.
* Throws ProtocolError, if packet with incorrect sequence-id was received. * Throws ProtocolError, if packet with incorrect sequence-id was received.
*/ */
class PacketEndpoint class PacketEndpoint : public shared_ptr_helper<PacketEndpoint>
{ {
public: public:
uint8_t & sequence_id; uint8_t & sequence_id;
ReadBuffer * in; ReadBuffer * in;
WriteBuffer * out; WriteBuffer * out;
/// For writing.
PacketEndpoint(WriteBuffer & out_, uint8_t & sequence_id_);
/// For reading and writing.
PacketEndpoint(ReadBuffer & in_, WriteBuffer & out_, uint8_t & sequence_id_);
MySQLPacketPayloadReadBuffer getPayload(); MySQLPacketPayloadReadBuffer getPayload();
void receivePacket(IMySQLReadPacket & packet); void receivePacket(IMySQLReadPacket & packet);
@ -48,8 +43,29 @@ public:
/// Converts packet to text. Is used for debug output. /// Converts packet to text. Is used for debug output.
static String packetToText(const String & payload); static String packetToText(const String & payload);
protected:
/// For writing.
PacketEndpoint(WriteBuffer & out_, uint8_t & sequence_id_);
/// For reading and writing.
PacketEndpoint(ReadBuffer & in_, WriteBuffer & out_, uint8_t & sequence_id_);
friend struct shared_ptr_helper<PacketEndpoint>;
};
using PacketEndpointPtr = std::shared_ptr<PacketEndpoint>;
}
struct MySQLWireContext
{
uint8_t sequence_id = 0;
uint32_t client_capabilities = 0;
size_t max_packet_size = 0;
MySQLProtocol::PacketEndpointPtr makeEndpoint(WriteBuffer & out);
MySQLProtocol::PacketEndpointPtr makeEndpoint(ReadBuffer & in, WriteBuffer & out);
}; };
} }
}

View File

@ -1,4 +1,7 @@
#include "Connection.h" #include "Connection.h"
#if USE_LIBPQXX
#include <common/logger_useful.h> #include <common/logger_useful.h>
namespace postgres namespace postgres
@ -72,3 +75,5 @@ void Connection::connect()
updateConnection(); updateConnection();
} }
} }
#endif

View File

@ -1,5 +1,11 @@
#pragma once #pragma once
#if !defined(ARCADIA_BUILD)
#include "config_core.h"
#endif
#if USE_LIBPQXX
#include <pqxx/pqxx> // Y_IGNORE #include <pqxx/pqxx> // Y_IGNORE
#include <Core/Types.h> #include <Core/Types.h>
#include <boost/noncopyable.hpp> #include <boost/noncopyable.hpp>
@ -45,3 +51,5 @@ private:
Poco::Logger * log; Poco::Logger * log;
}; };
} }
#endif

View File

@ -1,5 +1,11 @@
#pragma once #pragma once
#if !defined(ARCADIA_BUILD)
#include "config_core.h"
#endif
#if USE_LIBPQXX
#include <pqxx/pqxx> // Y_IGNORE #include <pqxx/pqxx> // Y_IGNORE
#include <Core/Types.h> #include <Core/Types.h>
#include <common/BorrowedObjectPool.h> #include <common/BorrowedObjectPool.h>
@ -35,3 +41,5 @@ private:
using ConnectionHolderPtr = std::unique_ptr<ConnectionHolder>; using ConnectionHolderPtr = std::unique_ptr<ConnectionHolder>;
} }
#endif

View File

@ -1,4 +1,7 @@
#include "PoolWithFailover.h" #include "PoolWithFailover.h"
#if USE_LIBPQXX
#include "Utils.h" #include "Utils.h"
#include <Common/parseRemoteDescription.h> #include <Common/parseRemoteDescription.h>
#include <Common/Exception.h> #include <Common/Exception.h>
@ -136,3 +139,5 @@ ConnectionHolderPtr PoolWithFailover::get()
throw DB::Exception(DB::ErrorCodes::POSTGRESQL_CONNECTION_FAILURE, "Unable to connect to any of the replicas"); throw DB::Exception(DB::ErrorCodes::POSTGRESQL_CONNECTION_FAILURE, "Unable to connect to any of the replicas");
} }
} }
#endif

View File

@ -1,5 +1,12 @@
#pragma once #pragma once
#if !defined(ARCADIA_BUILD)
#include "config_core.h"
#endif
#if USE_LIBPQXX
#include "ConnectionHolder.h" #include "ConnectionHolder.h"
#include <mutex> #include <mutex>
#include <Poco/Util/AbstractConfiguration.h> #include <Poco/Util/AbstractConfiguration.h>
@ -63,3 +70,5 @@ private:
using PoolWithFailoverPtr = std::shared_ptr<PoolWithFailover>; using PoolWithFailoverPtr = std::shared_ptr<PoolWithFailover>;
} }
#endif

View File

@ -1,4 +1,7 @@
#include "Utils.h" #include "Utils.h"
#if USE_LIBPQXX
#include <IO/Operators.h> #include <IO/Operators.h>
namespace postgres namespace postgres
@ -17,3 +20,5 @@ ConnectionInfo formatConnectionString(String dbname, String host, UInt16 port, S
} }
} }
#endif

View File

@ -1,5 +1,11 @@
#pragma once #pragma once
#if !defined(ARCADIA_BUILD)
#include "config_core.h"
#endif
#if USE_LIBPQXX
#include <pqxx/pqxx> // Y_IGNORE #include <pqxx/pqxx> // Y_IGNORE
#include <Core/Types.h> #include <Core/Types.h>
#include "Connection.h" #include "Connection.h"
@ -15,3 +21,5 @@ namespace postgres
{ {
ConnectionInfo formatConnectionString(String dbname, String host, UInt16 port, String user, String password); ConnectionInfo formatConnectionString(String dbname, String host, UInt16 port, String user, String password);
} }
#endif

View File

@ -9,6 +9,7 @@
#include <DataTypes/IDataType.h> #include <DataTypes/IDataType.h>
#include <DataTypes/DataTypeNullable.h> #include <DataTypes/DataTypeNullable.h>
#include <DataTypes/DataTypeArray.h> #include <DataTypes/DataTypeArray.h>
#include <DataTypes/DataTypeDateTime64.h>
#include <DataTypes/DataTypesDecimal.h> #include <DataTypes/DataTypesDecimal.h>
#include <Interpreters/convertFieldToType.h> #include <Interpreters/convertFieldToType.h>
#include <IO/ReadHelpers.h> #include <IO/ReadHelpers.h>
@ -99,7 +100,16 @@ void insertPostgreSQLValue(
assert_cast<ColumnUInt32 &>(column).insertValue(time); assert_cast<ColumnUInt32 &>(column).insertValue(time);
break; break;
} }
case ExternalResultDescription::ValueType::vtDateTime64:[[fallthrough]]; case ExternalResultDescription::ValueType::vtDateTime64:
{
ReadBufferFromString in(value);
DateTime64 time = 0;
readDateTime64Text(time, 6, in, assert_cast<const DataTypeDateTime64 *>(data_type.get())->getTimeZone());
if (time < 0)
time = 0;
assert_cast<ColumnDecimal<Decimal64> &>(column).insertValue(time);
break;
}
case ExternalResultDescription::ValueType::vtDecimal32: [[fallthrough]]; case ExternalResultDescription::ValueType::vtDecimal32: [[fallthrough]];
case ExternalResultDescription::ValueType::vtDecimal64: [[fallthrough]]; case ExternalResultDescription::ValueType::vtDecimal64: [[fallthrough]];
case ExternalResultDescription::ValueType::vtDecimal128: [[fallthrough]]; case ExternalResultDescription::ValueType::vtDecimal128: [[fallthrough]];
@ -201,6 +211,18 @@ void preparePostgreSQLArrayInfo(
ReadBufferFromString in(field); ReadBufferFromString in(field);
time_t time = 0; time_t time = 0;
readDateTimeText(time, in, assert_cast<const DataTypeDateTime *>(nested.get())->getTimeZone()); readDateTimeText(time, in, assert_cast<const DataTypeDateTime *>(nested.get())->getTimeZone());
if (time < 0)
time = 0;
return time;
};
else if (which.isDateTime64())
parser = [nested](std::string & field) -> Field
{
ReadBufferFromString in(field);
DateTime64 time = 0;
readDateTime64Text(time, 6, in, assert_cast<const DataTypeDateTime64 *>(nested.get())->getTimeZone());
if (time < 0)
time = 0;
return time; return time;
}; };
else if (which.isDecimal32()) else if (which.isDecimal32())

View File

@ -108,7 +108,7 @@ class IColumn;
M(Bool, compile_expressions, true, "Compile some scalar functions and operators to native code.", 0) \ M(Bool, compile_expressions, true, "Compile some scalar functions and operators to native code.", 0) \
M(UInt64, min_count_to_compile_expression, 3, "The number of identical expressions before they are JIT-compiled", 0) \ M(UInt64, min_count_to_compile_expression, 3, "The number of identical expressions before they are JIT-compiled", 0) \
M(Bool, compile_aggregate_expressions, true, "Compile aggregate functions to native code.", 0) \ M(Bool, compile_aggregate_expressions, true, "Compile aggregate functions to native code.", 0) \
M(UInt64, min_count_to_compile_aggregate_expression, 3, "The number of identical aggregate expressions before they are JIT-compiled", 0) \ M(UInt64, min_count_to_compile_aggregate_expression, 0, "The number of identical aggregate expressions before they are JIT-compiled", 0) \
M(UInt64, group_by_two_level_threshold, 100000, "From what number of keys, a two-level aggregation starts. 0 - the threshold is not set.", 0) \ M(UInt64, group_by_two_level_threshold, 100000, "From what number of keys, a two-level aggregation starts. 0 - the threshold is not set.", 0) \
M(UInt64, group_by_two_level_threshold_bytes, 50000000, "From what size of the aggregation state in bytes, a two-level aggregation begins to be used. 0 - the threshold is not set. Two-level aggregation is used when at least one of the thresholds is triggered.", 0) \ M(UInt64, group_by_two_level_threshold_bytes, 50000000, "From what size of the aggregation state in bytes, a two-level aggregation begins to be used. 0 - the threshold is not set. Two-level aggregation is used when at least one of the thresholds is triggered.", 0) \
M(Bool, distributed_aggregation_memory_efficient, true, "Is the memory-saving mode of distributed aggregation enabled.", 0) \ M(Bool, distributed_aggregation_memory_efficient, true, "Is the memory-saving mode of distributed aggregation enabled.", 0) \
@ -482,6 +482,8 @@ class IColumn;
M(UInt64, limit, 0, "Limit on read rows from the most 'end' result for select query, default 0 means no limit length", 0) \ M(UInt64, limit, 0, "Limit on read rows from the most 'end' result for select query, default 0 means no limit length", 0) \
M(UInt64, offset, 0, "Offset on read rows from the most 'end' result for select query", 0) \ M(UInt64, offset, 0, "Offset on read rows from the most 'end' result for select query", 0) \
\ \
M(UInt64, function_range_max_elements_in_block, 500000000, "Maximum number of values generated by function 'range' per block of data (sum of array sizes for every row in a block, see also 'max_block_size' and 'min_insert_block_size_rows'). It is a safety threshold.", 0) \
\
/** Experimental functions */ \ /** Experimental functions */ \
M(Bool, allow_experimental_funnel_functions, false, "Enable experimental functions for funnel analysis.", 0) \ M(Bool, allow_experimental_funnel_functions, false, "Enable experimental functions for funnel analysis.", 0) \
\ \

View File

@ -13,5 +13,6 @@
#cmakedefine01 USE_LDAP #cmakedefine01 USE_LDAP
#cmakedefine01 USE_ROCKSDB #cmakedefine01 USE_ROCKSDB
#cmakedefine01 USE_LIBPQXX #cmakedefine01 USE_LIBPQXX
#cmakedefine01 USE_SQLITE
#cmakedefine01 USE_NURAFT #cmakedefine01 USE_NURAFT
#cmakedefine01 USE_KRB5 #cmakedefine01 USE_KRB5

View File

@ -31,6 +31,10 @@ SRCS(
MySQL/PacketsProtocolText.cpp MySQL/PacketsProtocolText.cpp
MySQL/PacketsReplication.cpp MySQL/PacketsReplication.cpp
NamesAndTypes.cpp NamesAndTypes.cpp
PostgreSQL/Connection.cpp
PostgreSQL/PoolWithFailover.cpp
PostgreSQL/Utils.cpp
PostgreSQL/insertPostgreSQLValue.cpp
PostgreSQLProtocol.cpp PostgreSQLProtocol.cpp
QueryProcessingStage.cpp QueryProcessingStage.cpp
Settings.cpp Settings.cpp

View File

@ -0,0 +1,163 @@
#include "SQLiteBlockInputStream.h"
#if USE_SQLITE
#include <common/range.h>
#include <common/logger_useful.h>
#include <Common/assert_cast.h>
#include <Columns/ColumnArray.h>
#include <Columns/ColumnDecimal.h>
#include <Columns/ColumnNullable.h>
#include <Columns/ColumnString.h>
#include <Columns/ColumnsNumber.h>
#include <DataTypes/DataTypeNullable.h>
namespace DB
{
namespace ErrorCodes
{
extern const int SQLITE_ENGINE_ERROR;
}
SQLiteBlockInputStream::SQLiteBlockInputStream(
SQLitePtr sqlite_db_,
const String & query_str_,
const Block & sample_block,
const UInt64 max_block_size_)
: query_str(query_str_)
, max_block_size(max_block_size_)
, sqlite_db(std::move(sqlite_db_))
{
description.init(sample_block);
}
void SQLiteBlockInputStream::readPrefix()
{
sqlite3_stmt * compiled_stmt = nullptr;
int status = sqlite3_prepare_v2(sqlite_db.get(), query_str.c_str(), query_str.size() + 1, &compiled_stmt, nullptr);
if (status != SQLITE_OK)
throw Exception(ErrorCodes::SQLITE_ENGINE_ERROR,
"Cannot prepate sqlite statement. Status: {}. Message: {}",
status, sqlite3_errstr(status));
compiled_statement = std::unique_ptr<sqlite3_stmt, StatementDeleter>(compiled_stmt, StatementDeleter());
}
Block SQLiteBlockInputStream::readImpl()
{
if (!compiled_statement)
return Block();
MutableColumns columns = description.sample_block.cloneEmptyColumns();
size_t num_rows = 0;
while (true)
{
int status = sqlite3_step(compiled_statement.get());
if (status == SQLITE_BUSY)
{
continue;
}
else if (status == SQLITE_DONE)
{
compiled_statement.reset();
break;
}
else if (status != SQLITE_ROW)
{
throw Exception(ErrorCodes::SQLITE_ENGINE_ERROR,
"Expected SQLITE_ROW status, but got status {}. Error: {}, Message: {}",
status, sqlite3_errstr(status), sqlite3_errmsg(sqlite_db.get()));
}
int column_count = sqlite3_column_count(compiled_statement.get());
for (const auto idx : collections::range(0, column_count))
{
const auto & sample = description.sample_block.getByPosition(idx);
if (sqlite3_column_type(compiled_statement.get(), idx) == SQLITE_NULL)
{
insertDefaultSQLiteValue(*columns[idx], *sample.column);
continue;
}
if (description.types[idx].second)
{
ColumnNullable & column_nullable = assert_cast<ColumnNullable &>(*columns[idx]);
insertValue(column_nullable.getNestedColumn(), description.types[idx].first, idx);
column_nullable.getNullMapData().emplace_back(0);
}
else
{
insertValue(*columns[idx], description.types[idx].first, idx);
}
}
if (++num_rows == max_block_size)
break;
}
return description.sample_block.cloneWithColumns(std::move(columns));
}
void SQLiteBlockInputStream::readSuffix()
{
if (compiled_statement)
compiled_statement.reset();
}
void SQLiteBlockInputStream::insertValue(IColumn & column, const ExternalResultDescription::ValueType type, size_t idx)
{
switch (type)
{
case ValueType::vtUInt8:
assert_cast<ColumnUInt8 &>(column).insertValue(sqlite3_column_int(compiled_statement.get(), idx));
break;
case ValueType::vtUInt16:
assert_cast<ColumnUInt16 &>(column).insertValue(sqlite3_column_int(compiled_statement.get(), idx));
break;
case ValueType::vtUInt32:
assert_cast<ColumnUInt32 &>(column).insertValue(sqlite3_column_int64(compiled_statement.get(), idx));
break;
case ValueType::vtUInt64:
/// There is no uint64 in sqlite3, only int and int64
assert_cast<ColumnUInt64 &>(column).insertValue(sqlite3_column_int64(compiled_statement.get(), idx));
break;
case ValueType::vtInt8:
assert_cast<ColumnInt8 &>(column).insertValue(sqlite3_column_int(compiled_statement.get(), idx));
break;
case ValueType::vtInt16:
assert_cast<ColumnInt16 &>(column).insertValue(sqlite3_column_int(compiled_statement.get(), idx));
break;
case ValueType::vtInt32:
assert_cast<ColumnInt32 &>(column).insertValue(sqlite3_column_int(compiled_statement.get(), idx));
break;
case ValueType::vtInt64:
assert_cast<ColumnInt64 &>(column).insertValue(sqlite3_column_int64(compiled_statement.get(), idx));
break;
case ValueType::vtFloat32:
assert_cast<ColumnFloat32 &>(column).insertValue(sqlite3_column_double(compiled_statement.get(), idx));
break;
case ValueType::vtFloat64:
assert_cast<ColumnFloat64 &>(column).insertValue(sqlite3_column_double(compiled_statement.get(), idx));
break;
default:
const char * data = reinterpret_cast<const char *>(sqlite3_column_text(compiled_statement.get(), idx));
int len = sqlite3_column_bytes(compiled_statement.get(), idx);
assert_cast<ColumnString &>(column).insertData(data, len);
break;
}
}
}
#endif

View File

@ -0,0 +1,62 @@
#pragma once
#if !defined(ARCADIA_BUILD)
#include "config_core.h"
#endif
#if USE_SQLITE
#include <Core/ExternalResultDescription.h>
#include <DataStreams/IBlockInputStream.h>
#include <sqlite3.h> // Y_IGNORE
namespace DB
{
class SQLiteBlockInputStream : public IBlockInputStream
{
using SQLitePtr = std::shared_ptr<sqlite3>;
public:
SQLiteBlockInputStream(SQLitePtr sqlite_db_,
const String & query_str_,
const Block & sample_block,
UInt64 max_block_size_);
String getName() const override { return "SQLite"; }
Block getHeader() const override { return description.sample_block.cloneEmpty(); }
private:
void insertDefaultSQLiteValue(IColumn & column, const IColumn & sample_column)
{
column.insertFrom(sample_column, 0);
}
using ValueType = ExternalResultDescription::ValueType;
struct StatementDeleter
{
void operator()(sqlite3_stmt * stmt) { sqlite3_finalize(stmt); }
};
void readPrefix() override;
Block readImpl() override;
void readSuffix() override;
void insertValue(IColumn & column, const ExternalResultDescription::ValueType type, size_t idx);
String query_str;
UInt64 max_block_size;
ExternalResultDescription description;
SQLitePtr sqlite_db;
std::unique_ptr<sqlite3_stmt, StatementDeleter> compiled_statement;
};
}
#endif

View File

@ -41,6 +41,7 @@ SRCS(
RemoteBlockOutputStream.cpp RemoteBlockOutputStream.cpp
RemoteQueryExecutor.cpp RemoteQueryExecutor.cpp
RemoteQueryExecutorReadContext.cpp RemoteQueryExecutorReadContext.cpp
SQLiteBlockInputStream.cpp
SizeLimits.cpp SizeLimits.cpp
SquashingBlockInputStream.cpp SquashingBlockInputStream.cpp
SquashingBlockOutputStream.cpp SquashingBlockOutputStream.cpp

View File

@ -1,11 +1,13 @@
#include <Columns/ColumnArray.h> #include <Columns/ColumnArray.h>
#include <Columns/ColumnConst.h> #include <Columns/ColumnConst.h>
#include <Columns/ColumnTuple.h> #include <Columns/ColumnTuple.h>
#include <Columns/ColumnMap.h>
#include <Columns/ColumnLowCardinality.h> #include <Columns/ColumnLowCardinality.h>
#include <DataTypes/DataTypeLowCardinality.h> #include <DataTypes/DataTypeLowCardinality.h>
#include <DataTypes/DataTypeArray.h> #include <DataTypes/DataTypeArray.h>
#include <DataTypes/DataTypeTuple.h> #include <DataTypes/DataTypeTuple.h>
#include <DataTypes/DataTypeMap.h>
#include <Common/assert_cast.h> #include <Common/assert_cast.h>
@ -39,6 +41,11 @@ DataTypePtr recursiveRemoveLowCardinality(const DataTypePtr & type)
return std::make_shared<DataTypeTuple>(elements); return std::make_shared<DataTypeTuple>(elements);
} }
if (const auto * map_type = typeid_cast<const DataTypeMap *>(type.get()))
{
return std::make_shared<DataTypeMap>(recursiveRemoveLowCardinality(map_type->getKeyType()), recursiveRemoveLowCardinality(map_type->getValueType()));
}
if (const auto * low_cardinality_type = typeid_cast<const DataTypeLowCardinality *>(type.get())) if (const auto * low_cardinality_type = typeid_cast<const DataTypeLowCardinality *>(type.get()))
return low_cardinality_type->getDictionaryType(); return low_cardinality_type->getDictionaryType();
@ -78,6 +85,16 @@ ColumnPtr recursiveRemoveLowCardinality(const ColumnPtr & column)
return ColumnTuple::create(columns); return ColumnTuple::create(columns);
} }
if (const auto * column_map = typeid_cast<const ColumnMap *>(column.get()))
{
const auto & nested = column_map->getNestedColumnPtr();
auto nested_no_lc = recursiveRemoveLowCardinality(nested);
if (nested.get() == nested_no_lc.get())
return column;
return ColumnMap::create(nested_no_lc);
}
if (const auto * column_low_cardinality = typeid_cast<const ColumnLowCardinality *>(column.get())) if (const auto * column_low_cardinality = typeid_cast<const ColumnLowCardinality *>(column.get()))
return column_low_cardinality->convertToFullColumn(); return column_low_cardinality->convertToFullColumn();

View File

@ -7,6 +7,7 @@
#include <DataTypes/DataTypeMap.h> #include <DataTypes/DataTypeMap.h>
#include <DataTypes/DataTypeArray.h> #include <DataTypes/DataTypeArray.h>
#include <DataTypes/DataTypeTuple.h> #include <DataTypes/DataTypeTuple.h>
#include <DataTypes/DataTypeLowCardinality.h>
#include <DataTypes/DataTypeFactory.h> #include <DataTypes/DataTypeFactory.h>
#include <DataTypes/Serializations/SerializationMap.h> #include <DataTypes/Serializations/SerializationMap.h>
#include <Parsers/IAST.h> #include <Parsers/IAST.h>
@ -53,12 +54,24 @@ DataTypeMap::DataTypeMap(const DataTypePtr & key_type_, const DataTypePtr & valu
void DataTypeMap::assertKeyType() const void DataTypeMap::assertKeyType() const
{ {
if (!key_type->isValueRepresentedByInteger() bool type_error = false;
if (key_type->getTypeId() == TypeIndex::LowCardinality)
{
const auto & low_cardinality_data_type = assert_cast<const DataTypeLowCardinality &>(*key_type);
if (!isStringOrFixedString(*(low_cardinality_data_type.getDictionaryType())))
type_error = true;
}
else if (!key_type->isValueRepresentedByInteger()
&& !isStringOrFixedString(*key_type) && !isStringOrFixedString(*key_type)
&& !WhichDataType(key_type).isNothing() && !WhichDataType(key_type).isNothing()
&& !WhichDataType(key_type).isUUID()) && !WhichDataType(key_type).isUUID())
{
type_error = true;
}
if (type_error)
throw Exception(ErrorCodes::BAD_ARGUMENTS, throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Type of Map key must be a type, that can be represented by integer or string or UUID," "Type of Map key must be a type, that can be represented by integer or String or FixedString (possibly LowCardinality) or UUID,"
" but {} given", key_type->getName()); " but {} given", key_type->getName());
} }

View File

@ -42,11 +42,23 @@ public:
return it; return it;
} }
/// throws exception if value is not valid
const StringRef & getNameForValue(const T & value) const const StringRef & getNameForValue(const T & value) const
{ {
return findByValue(value)->second; return findByValue(value)->second;
} }
/// returns false if value is not valid
bool getNameForValue(const T & value, StringRef & result) const
{
const auto it = value_to_name_map.find(value);
if (it == std::end(value_to_name_map))
return false;
result = it->second;
return true;
}
T getValue(StringRef field_name, bool try_treat_as_id = false) const; T getValue(StringRef field_name, bool try_treat_as_id = false) const;
template <typename TValues> template <typename TValues>

View File

@ -29,7 +29,7 @@ namespace ErrorCodes
static inline bool typeIsSigned(const IDataType & type) static inline bool typeIsSigned(const IDataType & type)
{ {
WhichDataType data_type(type); WhichDataType data_type(type);
return data_type.isNativeInt() || data_type.isFloat(); return data_type.isNativeInt() || data_type.isFloat() || data_type.isEnum();
} }
static inline llvm::Type * toNativeType(llvm::IRBuilderBase & builder, const IDataType & type) static inline llvm::Type * toNativeType(llvm::IRBuilderBase & builder, const IDataType & type)
@ -57,6 +57,10 @@ static inline llvm::Type * toNativeType(llvm::IRBuilderBase & builder, const IDa
return builder.getFloatTy(); return builder.getFloatTy();
else if (data_type.isFloat64()) else if (data_type.isFloat64())
return builder.getDoubleTy(); return builder.getDoubleTy();
else if (data_type.isEnum8())
return builder.getInt8Ty();
else if (data_type.isEnum16())
return builder.getInt16Ty();
return nullptr; return nullptr;
} }
@ -109,7 +113,7 @@ static inline bool canBeNativeType(const IDataType & type)
return canBeNativeType(*data_type_nullable.getNestedType()); return canBeNativeType(*data_type_nullable.getNestedType());
} }
return data_type.isNativeInt() || data_type.isNativeUInt() || data_type.isFloat() || data_type.isDate(); return data_type.isNativeInt() || data_type.isNativeUInt() || data_type.isFloat() || data_type.isDate() || data_type.isEnum();
} }
static inline llvm::Type * toNativeType(llvm::IRBuilderBase & builder, const DataTypePtr & type) static inline llvm::Type * toNativeType(llvm::IRBuilderBase & builder, const DataTypePtr & type)
@ -266,7 +270,7 @@ static inline llvm::Constant * getColumnNativeValue(llvm::IRBuilderBase & builde
{ {
return llvm::ConstantInt::get(type, column.getUInt(index)); return llvm::ConstantInt::get(type, column.getUInt(index));
} }
else if (column_data_type.isNativeInt()) else if (column_data_type.isNativeInt() || column_data_type.isEnum())
{ {
return llvm::ConstantInt::get(type, column.getInt(index)); return llvm::ConstantInt::get(type, column.getInt(index));
} }

View File

@ -1,17 +1,17 @@
#include <Databases/DatabaseFactory.h> #include <Databases/DatabaseFactory.h>
#include <Databases/DatabaseAtomic.h> #include <Databases/DatabaseAtomic.h>
#include <Databases/DatabaseReplicated.h>
#include <Databases/DatabaseDictionary.h> #include <Databases/DatabaseDictionary.h>
#include <Databases/DatabaseLazy.h> #include <Databases/DatabaseLazy.h>
#include <Databases/DatabaseMemory.h> #include <Databases/DatabaseMemory.h>
#include <Databases/DatabaseOrdinary.h> #include <Databases/DatabaseOrdinary.h>
#include <Databases/DatabaseReplicated.h>
#include <Interpreters/Context.h>
#include <Parsers/ASTCreateQuery.h> #include <Parsers/ASTCreateQuery.h>
#include <Parsers/ASTFunction.h> #include <Parsers/ASTFunction.h>
#include <Parsers/ASTIdentifier.h> #include <Parsers/ASTIdentifier.h>
#include <Parsers/ASTLiteral.h> #include <Parsers/ASTLiteral.h>
#include <Parsers/formatAST.h> #include <Parsers/formatAST.h>
#include <Interpreters/Context.h>
#include <Common/Macros.h> #include <Common/Macros.h>
#include <filesystem> #include <filesystem>
@ -40,6 +40,10 @@
#include <Storages/PostgreSQL/MaterializedPostgreSQLSettings.h> #include <Storages/PostgreSQL/MaterializedPostgreSQLSettings.h>
#endif #endif
#if USE_SQLITE
#include <Databases/SQLite/DatabaseSQLite.h>
#endif
namespace fs = std::filesystem; namespace fs = std::filesystem;
namespace DB namespace DB
@ -100,7 +104,7 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String
const UUID & uuid = create.uuid; const UUID & uuid = create.uuid;
bool engine_may_have_arguments = engine_name == "MySQL" || engine_name == "MaterializeMySQL" || engine_name == "Lazy" || bool engine_may_have_arguments = engine_name == "MySQL" || engine_name == "MaterializeMySQL" || engine_name == "Lazy" ||
engine_name == "Replicated" || engine_name == "PostgreSQL" || engine_name == "MaterializedPostgreSQL"; engine_name == "Replicated" || engine_name == "PostgreSQL" || engine_name == "MaterializedPostgreSQL" || engine_name == "SQLite";
if (engine_define->engine->arguments && !engine_may_have_arguments) if (engine_define->engine->arguments && !engine_may_have_arguments)
throw Exception("Database engine " + engine_name + " cannot have arguments", ErrorCodes::BAD_ARGUMENTS); throw Exception("Database engine " + engine_name + " cannot have arguments", ErrorCodes::BAD_ARGUMENTS);
@ -299,6 +303,22 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String
} }
#endif
#if USE_SQLITE
else if (engine_name == "SQLite")
{
const ASTFunction * engine = engine_define->engine;
if (!engine->arguments || engine->arguments->children.size() != 1)
throw Exception("SQLite database requires 1 argument: database path", ErrorCodes::BAD_ARGUMENTS);
const auto & arguments = engine->arguments->children;
String database_path = safeGetLiteralValue<String>(arguments[0], "SQLite");
return std::make_shared<DatabaseSQLite>(context, engine_define, database_path);
}
#endif #endif
throw Exception("Unknown database engine: " + engine_name, ErrorCodes::UNKNOWN_DATABASE_ENGINE); throw Exception("Unknown database engine: " + engine_name, ErrorCodes::UNKNOWN_DATABASE_ENGINE);

View File

@ -9,7 +9,7 @@
#include <DataTypes/DataTypeArray.h> #include <DataTypes/DataTypeArray.h>
#include <DataTypes/DataTypesDecimal.h> #include <DataTypes/DataTypesDecimal.h>
#include <DataTypes/DataTypeDate.h> #include <DataTypes/DataTypeDate.h>
#include <DataTypes/DataTypeDateTime.h> #include <DataTypes/DataTypeDateTime64.h>
#include <boost/algorithm/string/split.hpp> #include <boost/algorithm/string/split.hpp>
#include <boost/algorithm/string/trim.hpp> #include <boost/algorithm/string/trim.hpp>
#include <Common/quoteString.h> #include <Common/quoteString.h>
@ -71,7 +71,7 @@ static DataTypePtr convertPostgreSQLDataType(String & type, const std::function<
else if (type == "bigserial") else if (type == "bigserial")
res = std::make_shared<DataTypeUInt64>(); res = std::make_shared<DataTypeUInt64>();
else if (type.starts_with("timestamp")) else if (type.starts_with("timestamp"))
res = std::make_shared<DataTypeDateTime>(); res = std::make_shared<DataTypeDateTime64>(6);
else if (type == "date") else if (type == "date")
res = std::make_shared<DataTypeDate>(); res = std::make_shared<DataTypeDate>();
else if (type.starts_with("numeric")) else if (type.starts_with("numeric"))

View File

@ -0,0 +1,215 @@
#include "DatabaseSQLite.h"
#if USE_SQLITE
#include <common/logger_useful.h>
#include <DataTypes/DataTypesNumber.h>
#include <DataTypes/DataTypeNullable.h>
#include <Databases/SQLite/fetchSQLiteTableStructure.h>
#include <Parsers/ASTCreateQuery.h>
#include <Parsers/ASTColumnDeclaration.h>
#include <Interpreters/Context.h>
#include <Storages/StorageSQLite.h>
#include <Databases/SQLite/SQLiteUtils.h>
namespace DB
{
namespace ErrorCodes
{
extern const int SQLITE_ENGINE_ERROR;
extern const int UNKNOWN_TABLE;
}
DatabaseSQLite::DatabaseSQLite(
ContextPtr context_,
const ASTStorage * database_engine_define_,
const String & database_path_)
: IDatabase("SQLite")
, WithContext(context_->getGlobalContext())
, database_engine_define(database_engine_define_->clone())
, log(&Poco::Logger::get("DatabaseSQLite"))
{
sqlite_db = openSQLiteDB(database_path_, context_);
}
bool DatabaseSQLite::empty() const
{
std::lock_guard<std::mutex> lock(mutex);
return fetchTablesList().empty();
}
DatabaseTablesIteratorPtr DatabaseSQLite::getTablesIterator(ContextPtr local_context, const IDatabase::FilterByNameFunction &)
{
std::lock_guard<std::mutex> lock(mutex);
Tables tables;
auto table_names = fetchTablesList();
for (const auto & table_name : table_names)
tables[table_name] = fetchTable(table_name, local_context, true);
return std::make_unique<DatabaseTablesSnapshotIterator>(tables, database_name);
}
std::unordered_set<std::string> DatabaseSQLite::fetchTablesList() const
{
std::unordered_set<String> tables;
std::string query = "SELECT name FROM sqlite_master "
"WHERE type = 'table' AND name NOT LIKE 'sqlite_%'";
auto callback_get_data = [](void * res, int col_num, char ** data_by_col, char ** /* col_names */) -> int
{
for (int i = 0; i < col_num; ++i)
static_cast<std::unordered_set<std::string> *>(res)->insert(data_by_col[i]);
return 0;
};
char * err_message = nullptr;
int status = sqlite3_exec(sqlite_db.get(), query.c_str(), callback_get_data, &tables, &err_message);
if (status != SQLITE_OK)
{
String err_msg(err_message);
sqlite3_free(err_message);
throw Exception(ErrorCodes::SQLITE_ENGINE_ERROR,
"Cannot fetch sqlite database tables. Error status: {}. Message: {}",
status, err_msg);
}
return tables;
}
bool DatabaseSQLite::checkSQLiteTable(const String & table_name) const
{
const String query = fmt::format("SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';", table_name);
auto callback_get_data = [](void * res, int, char **, char **) -> int
{
*(static_cast<int *>(res)) += 1;
return 0;
};
int count = 0;
char * err_message = nullptr;
int status = sqlite3_exec(sqlite_db.get(), query.c_str(), callback_get_data, &count, &err_message);
if (status != SQLITE_OK)
{
String err_msg(err_message);
sqlite3_free(err_message);
throw Exception(ErrorCodes::SQLITE_ENGINE_ERROR,
"Cannot check sqlite table. Error status: {}. Message: {}",
status, err_msg);
}
return (count != 0);
}
bool DatabaseSQLite::isTableExist(const String & table_name, ContextPtr) const
{
std::lock_guard<std::mutex> lock(mutex);
return checkSQLiteTable(table_name);
}
StoragePtr DatabaseSQLite::tryGetTable(const String & table_name, ContextPtr local_context) const
{
std::lock_guard<std::mutex> lock(mutex);
return fetchTable(table_name, local_context, false);
}
StoragePtr DatabaseSQLite::fetchTable(const String & table_name, ContextPtr local_context, bool table_checked) const
{
if (!table_checked && !checkSQLiteTable(table_name))
return StoragePtr{};
auto columns = fetchSQLiteTableStructure(sqlite_db.get(), table_name);
if (!columns)
return StoragePtr{};
auto storage = StorageSQLite::create(
StorageID(database_name, table_name),
sqlite_db,
table_name,
ColumnsDescription{*columns},
ConstraintsDescription{},
local_context);
return storage;
}
ASTPtr DatabaseSQLite::getCreateDatabaseQuery() const
{
const auto & create_query = std::make_shared<ASTCreateQuery>();
create_query->database = getDatabaseName();
create_query->set(create_query->storage, database_engine_define);
return create_query;
}
ASTPtr DatabaseSQLite::getCreateTableQueryImpl(const String & table_name, ContextPtr local_context, bool throw_on_error) const
{
auto storage = fetchTable(table_name, local_context, false);
if (!storage)
{
if (throw_on_error)
throw Exception(ErrorCodes::UNKNOWN_TABLE, "SQLite table {}.{} does not exist",
database_name, table_name);
return nullptr;
}
auto create_table_query = std::make_shared<ASTCreateQuery>();
auto table_storage_define = database_engine_define->clone();
create_table_query->set(create_table_query->storage, table_storage_define);
auto columns_declare_list = std::make_shared<ASTColumns>();
auto columns_expression_list = std::make_shared<ASTExpressionList>();
columns_declare_list->set(columns_declare_list->columns, columns_expression_list);
create_table_query->set(create_table_query->columns_list, columns_declare_list);
/// init create query.
auto table_id = storage->getStorageID();
create_table_query->table = table_id.table_name;
create_table_query->database = table_id.database_name;
auto metadata_snapshot = storage->getInMemoryMetadataPtr();
for (const auto & column_type_and_name : metadata_snapshot->getColumns().getOrdinary())
{
const auto & column_declaration = std::make_shared<ASTColumnDeclaration>();
column_declaration->name = column_type_and_name.name;
column_declaration->type = getColumnDeclaration(column_type_and_name.type);
columns_expression_list->children.emplace_back(column_declaration);
}
ASTStorage * ast_storage = table_storage_define->as<ASTStorage>();
ASTs storage_children = ast_storage->children;
auto storage_engine_arguments = ast_storage->engine->arguments;
/// Add table_name to engine arguments
storage_engine_arguments->children.insert(storage_engine_arguments->children.begin() + 1, std::make_shared<ASTLiteral>(table_id.table_name));
return create_table_query;
}
ASTPtr DatabaseSQLite::getColumnDeclaration(const DataTypePtr & data_type) const
{
WhichDataType which(data_type);
if (which.isNullable())
return makeASTFunction("Nullable", getColumnDeclaration(typeid_cast<const DataTypeNullable *>(data_type.get())->getNestedType()));
return std::make_shared<ASTIdentifier>(data_type->getName());
}
}
#endif

Some files were not shown because too many files have changed in this diff Show More