diff --git a/.clang-format b/.clang-format index 8416ba8ce72..c8b9672dc7d 100644 --- a/.clang-format +++ b/.clang-format @@ -52,7 +52,7 @@ IncludeCategories: ReflowComments: false AlignEscapedNewlinesLeft: false AlignEscapedNewlines: DontAlign -AlignTrailingComments: true +AlignTrailingComments: false # Not changed: AccessModifierOffset: -4 diff --git a/.clang-tidy b/.clang-tidy new file mode 100644 index 00000000000..3c222fbf8da --- /dev/null +++ b/.clang-tidy @@ -0,0 +1,205 @@ +Checks: '-*, + misc-throw-by-value-catch-by-reference, + misc-misplaced-const, + misc-unconventional-assign-operator, + misc-redundant-expression, + misc-static-assert, + misc-unconventional-assign-operator, + misc-uniqueptr-reset-release, + misc-unused-alias-decls, + misc-unused-parameters, + misc-unused-using-decls, + + modernize-avoid-bind, + modernize-loop-convert, + modernize-make-shared, + modernize-make-unique, + modernize-raw-string-literal, + modernize-redundant-void-arg, + modernize-replace-auto-ptr, + modernize-replace-random-shuffle, + modernize-use-bool-literals, + modernize-use-nullptr, + modernize-use-using, + modernize-use-equals-default, + modernize-use-equals-delete, + + performance-faster-string-find, + performance-for-range-copy, + performance-implicit-conversion-in-loop, + performance-inefficient-algorithm, + performance-inefficient-vector-operation, + performance-move-constructor-init, + performance-no-automatic-move, + performance-trivially-destructible, + performance-unnecessary-copy-initialization, + + readability-avoid-const-params-in-decls, + readability-const-return-type, + readability-container-size-empty, + readability-convert-member-functions-to-static, + readability-delete-null-pointer, + readability-deleted-default, + readability-make-member-function-const, + readability-misplaced-array-index, + readability-non-const-parameter, + readability-qualified-auto, + readability-redundant-access-specifiers, + readability-redundant-control-flow, + readability-redundant-function-ptr-dereference, + readability-redundant-smartptr-get, + readability-redundant-string-cstr, + readability-redundant-string-init, + readability-static-definition-in-anonymous-namespace, + readability-string-compare, + readability-uniqueptr-delete-release, + readability-redundant-member-init, + readability-simplify-subscript-expr, + readability-simplify-boolean-expr, + readability-inconsistent-declaration-parameter-name, + readability-identifier-naming, + + bugprone-undelegated-constructor, + bugprone-argument-comment, + bugprone-bad-signal-to-kill-thread, + bugprone-bool-pointer-implicit-conversion, + bugprone-copy-constructor-init, + bugprone-dangling-handle, + bugprone-forward-declaration-namespace, + bugprone-fold-init-type, + bugprone-inaccurate-erase, + bugprone-incorrect-roundings, + bugprone-infinite-loop, + bugprone-integer-division, + bugprone-macro-parentheses, + bugprone-macro-repeated-side-effects, + bugprone-misplaced-operator-in-strlen-in-alloc, + bugprone-misplaced-pointer-artithmetic-in-alloc, + bugprone-misplaced-widening-cast, + bugprone-move-forwarding-reference, + bugprone-multiple-statement-macro, + bugprone-parent-virtual-call, + bugprone-posix-return, + bugprone-reserved-identifier, + bugprone-signed-char-misuse, + bugprone-sizeof-container, + bugprone-sizeof-expression, + bugprone-string-constructor, + bugprone-string-integer-assignment, + bugprone-string-literal-with-embedded-nul, + bugprone-suspicious-enum-usage, + bugprone-suspicious-include, + bugprone-suspicious-memset-usage, + bugprone-suspicious-missing-comma, + bugprone-suspicious-string-compare, + bugprone-swapped-arguments, + bugprone-terminating-continue, + bugprone-throw-keyword-missing, + bugprone-too-small-loop-variable, + bugprone-undefined-memory-manipulation, + bugprone-unhandled-self-assignment, + bugprone-unused-raii, + bugprone-unused-return-value, + bugprone-use-after-move, + bugprone-virtual-near-miss, + + cert-dcl21-cpp, + cert-dcl50-cpp, + cert-env33-c, + cert-err34-c, + cert-err52-cpp, + cert-flp30-c, + cert-mem57-cpp, + cert-msc50-cpp, + cert-oop58-cpp, + + google-build-explicit-make-pair, + google-build-namespaces, + google-default-arguments, + google-explicit-constructor, + google-readability-casting, + google-readability-avoid-underscore-in-googletest-name, + google-runtime-int, + google-runtime-operator, + + hicpp-exception-baseclass, + + clang-analyzer-core.CallAndMessage, + clang-analyzer-core.DivideZero, + clang-analyzer-core.NonNullParamChecker, + clang-analyzer-core.NullDereference, + clang-analyzer-core.StackAddressEscape, + clang-analyzer-core.UndefinedBinaryOperatorResult, + clang-analyzer-core.VLASize, + clang-analyzer-core.uninitialized.ArraySubscript, + clang-analyzer-core.uninitialized.Assign, + clang-analyzer-core.uninitialized.Branch, + clang-analyzer-core.uninitialized.CapturedBlockVariable, + clang-analyzer-core.uninitialized.UndefReturn, + clang-analyzer-cplusplus.InnerPointer, + clang-analyzer-cplusplus.NewDelete, + clang-analyzer-cplusplus.NewDeleteLeaks, + clang-analyzer-cplusplus.PlacementNewChecker, + clang-analyzer-cplusplus.SelfAssignment, + clang-analyzer-deadcode.DeadStores, + clang-analyzer-optin.cplusplus.VirtualCall, + clang-analyzer-security.insecureAPI.UncheckedReturn, + clang-analyzer-security.insecureAPI.bcmp, + clang-analyzer-security.insecureAPI.bcopy, + clang-analyzer-security.insecureAPI.bzero, + clang-analyzer-security.insecureAPI.getpw, + clang-analyzer-security.insecureAPI.gets, + clang-analyzer-security.insecureAPI.mkstemp, + clang-analyzer-security.insecureAPI.mktemp, + clang-analyzer-security.insecureAPI.rand, + clang-analyzer-security.insecureAPI.strcpy, + clang-analyzer-unix.Malloc, + clang-analyzer-unix.MallocSizeof, + clang-analyzer-unix.MismatchedDeallocator, + clang-analyzer-unix.Vfork, + clang-analyzer-unix.cstring.BadSizeArg, + clang-analyzer-unix.cstring.NullArg, + + boost-use-to-string, +' +WarningsAsErrors: '*' + +CheckOptions: + - key: readability-identifier-naming.ClassCase + value: CamelCase + - key: readability-identifier-naming.EnumCase + value: CamelCase + - key: readability-identifier-naming.LocalVariableCase + value: lower_case + - key: readability-identifier-naming.StaticConstantCase + value: aNy_CasE + - key: readability-identifier-naming.MemberCase + value: lower_case + - key: readability-identifier-naming.PrivateMemberPrefix + value: '' + - key: readability-identifier-naming.ProtectedMemberPrefix + value: '' + - key: readability-identifier-naming.PublicMemberCase + value: lower_case + - key: readability-identifier-naming.MethodCase + value: camelBack + - key: readability-identifier-naming.PrivateMethodPrefix + value: '' + - key: readability-identifier-naming.ProtectedMethodPrefix + value: '' + - key: readability-identifier-naming.ParameterPackCase + value: lower_case + - key: readability-identifier-naming.StructCase + value: CamelCase + - key: readability-identifier-naming.TemplateTemplateParameterCase + value: CamelCase + - key: readability-identifier-naming.TemplateUsingCase + value: lower_case + - key: readability-identifier-naming.TypeTemplateParameterCase + value: CamelCase + - key: readability-identifier-naming.TypedefCase + value: CamelCase + - key: readability-identifier-naming.UnionCase + value: CamelCase + - key: readability-identifier-naming.UsingCase + value: CamelCase diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 8e502c0b36f..be66f21b838 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,2 +1,3 @@ docs/* @ClickHouse/docs docs/zh/* @ClickHouse/docs-zh +website/* @ClickHouse/docs diff --git a/.github/ISSUE_TEMPLATE/backward-compatibility.md b/.github/ISSUE_TEMPLATE/backward-compatibility.md new file mode 100644 index 00000000000..8f87197e73d --- /dev/null +++ b/.github/ISSUE_TEMPLATE/backward-compatibility.md @@ -0,0 +1,27 @@ +--- +name: Backward compatibility issue +about: Create a report to help us improve ClickHouse +title: '' +labels: backward compatibility +assignees: '' + +--- + +(you don't have to strictly follow this form) + +**Describe the issue** +A clear and concise description of what works not as it is supposed to. + +**How to reproduce** +* Which ClickHouse server versions are incompatible +* Which interface to use, if matters +* Non-default settings, if any +* `CREATE TABLE` statements for all tables involved +* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary +* Queries to run that lead to unexpected result + +**Error message and/or stacktrace** +If applicable, add screenshots to help explain your problem. + +**Additional context** +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/bug-report-or-unexpected-behaviour.md b/.github/ISSUE_TEMPLATE/bug-report-or-unexpected-behaviour.md deleted file mode 100644 index 542442e2856..00000000000 --- a/.github/ISSUE_TEMPLATE/bug-report-or-unexpected-behaviour.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -name: Bug report or unexpected behaviour -about: Create a report to help us improve ClickHouse -title: '' -labels: bug -assignees: '' - ---- - -(you don't have to strictly follow this form) - -**Describe the bug or unexpected behaviour** -A clear and concise description of what works not as it is supposed to. - -**How to reproduce** -* Which ClickHouse server version to use -* Which interface to use, if matters -* Non-default settings, if any -* `CREATE TABLE` statements for all tables involved -* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary -* Queries to run that lead to unexpected result - -**Expected behavior** -A clear and concise description of what you expected to happen. - -**Error message and/or stacktrace** -If applicable, add screenshots to help explain your problem. - -**Additional context** -Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md new file mode 100644 index 00000000000..1445af4b051 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -0,0 +1,30 @@ +--- +name: Bug report +about: Create a report to help us improve ClickHouse +title: '' +labels: bug +assignees: '' + +--- + +(you don't have to strictly follow this form) + +**Describe the bug** +A clear and concise description of what works not as it is supposed to. + +**How to reproduce** +* Which ClickHouse server version to use +* Which interface to use, if matters +* Non-default settings, if any +* `CREATE TABLE` statements for all tables involved +* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary +* Queries to run that lead to unexpected result + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Error message and/or stacktrace** +If applicable, add screenshots to help explain your problem. + +**Additional context** +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature-request.md similarity index 100% rename from .github/ISSUE_TEMPLATE/feature_request.md rename to .github/ISSUE_TEMPLATE/feature-request.md diff --git a/.github/ISSUE_TEMPLATE/performance-issue.md b/.github/ISSUE_TEMPLATE/performance-issue.md index 96c8cb77afb..d0e549039a6 100644 --- a/.github/ISSUE_TEMPLATE/performance-issue.md +++ b/.github/ISSUE_TEMPLATE/performance-issue.md @@ -17,7 +17,7 @@ What exactly works slower than expected? * Which interface to use, if matters * Non-default settings, if any * `CREATE TABLE` statements for all tables involved -* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary +* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary * Queries to run that lead to slow performance **Expected performance** diff --git a/.github/ISSUE_TEMPLATE/unexpected-behaviour.md b/.github/ISSUE_TEMPLATE/unexpected-behaviour.md new file mode 100644 index 00000000000..27ab217ca33 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/unexpected-behaviour.md @@ -0,0 +1,30 @@ +--- +name: Unexpected behaviour +about: Create a report to help us improve ClickHouse +title: '' +labels: unexpected behaviour +assignees: '' + +--- + +(you don't have to strictly follow this form) + +**Describe the unexpected behaviour** +A clear and concise description of what works not as it is supposed to. + +**How to reproduce** +* Which ClickHouse server version to use +* Which interface to use, if matters +* Non-default settings, if any +* `CREATE TABLE` statements for all tables involved +* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary +* Queries to run that lead to unexpected result + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Error message and/or stacktrace** +If applicable, add screenshots to help explain your problem. + +**Additional context** +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/usability-issue.md b/.github/ISSUE_TEMPLATE/usability-issue.md new file mode 100644 index 00000000000..6a084a72619 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/usability-issue.md @@ -0,0 +1,30 @@ +--- +name: Usability issue +about: Create a report to help us improve ClickHouse +title: '' +labels: usability +assignees: '' + +--- + +(you don't have to strictly follow this form) + +**Describe the issue** +A clear and concise description of what works not as it is supposed to. + +**How to reproduce** +* Which ClickHouse server version to use +* Which interface to use, if matters +* Non-default settings, if any +* `CREATE TABLE` statements for all tables involved +* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary +* Queries to run that lead to unexpected result + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Error message and/or stacktrace** +If applicable, add screenshots to help explain your problem. + +**Additional context** +Add any other context about the problem here. diff --git a/.gitignore b/.gitignore index 1e6bb1716ec..6bd57911ac8 100644 --- a/.gitignore +++ b/.gitignore @@ -12,11 +12,15 @@ /build /build_* /build-* + /docs/build /docs/publish /docs/edit /docs/website -/docs/tools/venv/ +/docs/venv +/docs/tools/venv +/docs/tools/translate/venv +/docs/tools/translate/output.md /docs/en/single.md /docs/ru/single.md /docs/zh/single.md @@ -54,143 +58,9 @@ cmake-build-* __pycache__ *.pytest_cache -# ignore generated files -*-metrika-yandex - test.cpp -utils/compressor/compressor -utils/corrector_utf8/corrector_utf8 -utils/iotest/iotest -utils/iotest/iotest_aio -utils/iotest/iotest_nonblock -utils/config-processor/config-processor CPackConfig.cmake CPackSourceConfig.cmake -contrib/libpoco/Poco/ -contrib/libpoco/bin/ -contrib/libpoco/cmake_uninstall.cmake -contrib/libre2/re2_st/ -dbms/src/Client/clickhouse-benchmark -dbms/src/Client/clickhouse-client -dbms/src/Client/tests/test-connect -dbms/src/Common/tests/arena_with_free_lists -dbms/src/Common/tests/auto_array -dbms/src/Common/tests/compact_array -dbms/src/Common/tests/hash_table -dbms/src/Common/tests/hashes_test -dbms/src/Common/tests/int_hashes_perf -dbms/src/Common/tests/lru_cache -dbms/src/Common/tests/parallel_aggregation -dbms/src/Common/tests/parallel_aggregation2 -dbms/src/Common/tests/radix_sort -dbms/src/Common/tests/shell_command_test -dbms/src/Common/tests/simple_cache -dbms/src/Common/tests/sip_hash -dbms/src/Common/tests/sip_hash_perf -dbms/src/Common/tests/small_table -dbms/src/Core/tests/exception -dbms/src/Core/tests/field -dbms/src/Core/tests/rvo_test -dbms/src/Core/tests/string_pool -dbms/src/DataStreams/tests/aggregating_stream -dbms/src/DataStreams/tests/block_tab_separated_streams -dbms/src/DataStreams/tests/collapsing_sorted_stream -dbms/src/DataStreams/tests/expression_stream -dbms/src/DataStreams/tests/filter_stream -dbms/src/DataStreams/tests/filter_stream_hitlog -dbms/src/DataStreams/tests/fork_streams -dbms/src/DataStreams/tests/glue_streams -dbms/src/DataStreams/tests/json_streams -dbms/src/DataStreams/tests/native_streams -dbms/src/DataStreams/tests/sorting_stream -dbms/src/DataStreams/tests/tab_separated_streams -dbms/src/DataStreams/tests/union_stream -dbms/src/DataStreams/tests/union_stream2 -dbms/src/DataTypes/tests/data_type_string -dbms/src/DataTypes/tests/data_types_number_fixed -dbms/src/Functions/tests/functions_arithmetic -dbms/src/Functions/tests/logical_functions_performance -dbms/src/Functions/tests/number_traits -dbms/src/IO/tests/async_write -dbms/src/IO/tests/cached_compressed_read_buffer -dbms/src/IO/tests/compressed_buffer -dbms/src/IO/tests/hashing_read_buffer -dbms/src/IO/tests/hashing_write_buffer -dbms/src/IO/tests/io_and_exceptions -dbms/src/IO/tests/io_operators -dbms/src/IO/tests/mempbrk -dbms/src/IO/tests/o_direct_and_dirty_pages -dbms/src/IO/tests/parse_int_perf -dbms/src/IO/tests/parse_int_perf2 -dbms/src/IO/tests/read_buffer -dbms/src/IO/tests/read_buffer_aio -dbms/src/IO/tests/read_buffer_perf -dbms/src/IO/tests/read_escaped_string -dbms/src/IO/tests/read_float_perf -dbms/src/IO/tests/read_write_int -dbms/src/IO/tests/valid_utf8 -dbms/src/IO/tests/valid_utf8_perf -dbms/src/IO/tests/var_uint -dbms/src/IO/tests/write_buffer -dbms/src/IO/tests/write_buffer_aio -dbms/src/IO/tests/write_buffer_perf -dbms/src/Interpreters/tests/address_patterns -dbms/src/Interpreters/tests/aggregate -dbms/src/Interpreters/tests/compiler_test -dbms/src/Interpreters/tests/create_query -dbms/src/Interpreters/tests/expression -dbms/src/Interpreters/tests/expression_analyzer -dbms/src/Interpreters/tests/hash_map -dbms/src/Interpreters/tests/hash_map2 -dbms/src/Interpreters/tests/hash_map3 -dbms/src/Interpreters/tests/hash_map_string -dbms/src/Interpreters/tests/hash_map_string_2 -dbms/src/Interpreters/tests/hash_map_string_3 -dbms/src/Interpreters/tests/hash_map_string_small -dbms/src/Interpreters/tests/in_join_subqueries_preprocessor -dbms/src/Interpreters/tests/logical_expressions_optimizer -dbms/src/Interpreters/tests/select_query -dbms/src/Interpreters/tests/two_level_hash_map -dbms/src/Interpreters/tests/users -dbms/src/Parsers/tests/create_parser -dbms/src/Parsers/tests/select_parser -dbms/src/Server/clickhouse-server -dbms/src/Server/clickhouse-server.init -dbms/src/Storages/tests/hit_log -dbms/src/Storages/tests/merge_tree -dbms/src/Storages/tests/part_checker -dbms/src/Storages/tests/part_name -dbms/src/Storages/tests/pk_condition -dbms/src/Storages/tests/seek_speed_test -dbms/src/Storages/tests/storage_log -dbms/src/Storages/tests/system_numbers -libs/libcommon/src/revision.h -libs/libcommon/src/tests/date_lut2 -libs/libcommon/src/tests/date_lut3 -libs/libcommon/src/tests/date_lut4 -libs/libcommon/src/tests/date_lut_init -libs/libcommon/src/tests/multi_version -libs/libmysqlxx/src/tests/failover -libs/libmysqlxx/src/tests/mysqlxx_test -libs/libzkutil/src/tests/zkutil_expiration_test -libs/libzkutil/src/tests/zkutil_test -libs/libzkutil/src/tests/zkutil_test_async -libs/libzkutil/src/tests/zkutil_test_commands -libs/libzkutil/src/tests/zkutil_test_lock -libs/libzkutil/src/tests/zkutil_zookeeper_holder -utils/zookeeper-create-entry-to-download-part/zookeeper-create-entry-to-download-part -utils/zookeeper-dump-tree/zookeeper-dump-tree -utils/zookeeper-remove-by-list/zookeeper-remove-by-list -dbms/src/Storages/tests/remove_symlink_directory -libs/libcommon/src/tests/json_test -utils/compressor/zstd_test -utils/wikistat-loader/wikistat-loader -dbms/src/Common/tests/pod_array - -dbms/src/Server/data/* -dbms/src/Server/metadata/* -dbms/src/Server/status -config-9001.xml *-preprocessed.xml @@ -238,9 +108,6 @@ website/package-lock.json .DS_Store */.DS_Store -# Ignore files for locally disabled tests -/dbms/tests/queries/**/*.disabled - # cquery cache /.cquery-cache diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml deleted file mode 100644 index d9f207a06d4..00000000000 --- a/.gitlab-ci.yml +++ /dev/null @@ -1,41 +0,0 @@ -stages: - - builder - - build -variables: - GIT_SUBMODULE_STRATEGY: recursive -builder: - stage: builder - when: manual - services: - - docker:dind - script: - - docker info - - apk add --no-cache git curl binutils ca-certificates - - docker login -u gitlab -p nopasswd $CI_REGISTRY - - docker build -t yandex/clickhouse-builder ./docker/builder - - docker tag yandex/clickhouse-builder $CI_REGISTRY/yandex/clickhouse-builder - - docker push $CI_REGISTRY/yandex/clickhouse-builder - tags: - - docker -build: - stage: build - when: manual - services: - - docker:dind - script: - - apk add --no-cache git curl binutils ca-certificates - - git submodule sync --recursive - - git submodule update --init --recursive - - docker info - - docker login -u gitlab -p nopasswd $CI_REGISTRY - - docker pull $CI_REGISTRY/yandex/clickhouse-builder - - docker run --rm --volumes-from "${HOSTNAME}-build" --workdir "${CI_PROJECT_DIR}" --env CI_PROJECT_DIR=${CI_PROJECT_DIR} $CI_REGISTRY/yandex/clickhouse-builder /build_gitlab_ci.sh - # You can upload your binary to nexus - - curl -v --keepalive-time 60 --keepalive --user "$NEXUS_USER:$NEXUS_PASSWORD" -XPUT "http://$NEXUS_HOST/repository/binaries/$CI_PROJECT_NAME" --upload-file ./dbms/src/Server/clickhouse - # Or download artifacts from gitlab - artifacts: - paths: - - ./dbms/src/Server/clickhouse - expire_in: 1 day - tags: - - docker \ No newline at end of file diff --git a/.gitmodules b/.gitmodules index 29b2ada63ea..f7a16b84d37 100644 --- a/.gitmodules +++ b/.gitmodules @@ -13,7 +13,7 @@ url = https://github.com/edenhill/librdkafka.git [submodule "contrib/cctz"] path = contrib/cctz - url = https://github.com/google/cctz.git + url = https://github.com/ClickHouse-Extras/cctz.git [submodule "contrib/zlib-ng"] path = contrib/zlib-ng url = https://github.com/ClickHouse-Extras/zlib-ng.git @@ -110,16 +110,16 @@ branch = v1.25.0 [submodule "contrib/aws"] path = contrib/aws - url = https://github.com/aws/aws-sdk-cpp.git + url = https://github.com/ClickHouse-Extras/aws-sdk-cpp.git [submodule "aws-c-event-stream"] path = contrib/aws-c-event-stream - url = https://github.com/awslabs/aws-c-event-stream.git + url = https://github.com/ClickHouse-Extras/aws-c-event-stream.git [submodule "aws-c-common"] path = contrib/aws-c-common - url = https://github.com/awslabs/aws-c-common.git + url = https://github.com/ClickHouse-Extras/aws-c-common.git [submodule "aws-checksums"] path = contrib/aws-checksums - url = https://github.com/awslabs/aws-checksums.git + url = https://github.com/ClickHouse-Extras/aws-checksums.git [submodule "contrib/curl"] path = contrib/curl url = https://github.com/curl/curl.git @@ -148,3 +148,12 @@ path = contrib/avro url = https://github.com/ClickHouse-Extras/avro.git ignore = untracked +[submodule "contrib/msgpack-c"] + path = contrib/msgpack-c + url = https://github.com/msgpack/msgpack-c +[submodule "contrib/libcpuid"] + path = contrib/libcpuid + url = https://github.com/ClickHouse-Extras/libcpuid.git +[submodule "contrib/openldap"] + path = contrib/openldap + url = https://github.com/openldap/openldap.git diff --git a/CHANGELOG.md b/CHANGELOG.md index 7a8a2c93722..885b4fc656f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,497 @@ +## ClickHouse release v20.3 + +### ClickHouse release v20.3.8.53, 2020-04-23 + +### Bug Fix +* Fixed wrong behaviour of datetime functions for timezones that has altered between positive and negative offsets from UTC (e.g. Pacific/Kiritimati). This fixes [#7202](https://github.com/ClickHouse/ClickHouse/issues/7202) [#10369](https://github.com/ClickHouse/ClickHouse/pull/10369) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fix possible segfault with `distributed_group_by_no_merge` enabled (introduced in 20.3.7.46 by [#10131](https://github.com/ClickHouse/ClickHouse/issues/10131)). [#10399](https://github.com/ClickHouse/ClickHouse/pull/10399) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Fix wrong flattening of `Array(Tuple(...))` data types. This fixes [#10259](https://github.com/ClickHouse/ClickHouse/issues/10259) [#10390](https://github.com/ClickHouse/ClickHouse/pull/10390) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Drop disks reservation in Aggregator. This fixes bug in disk space reservation, which may cause big external aggregation to fail even if it could be completed successfully [#10375](https://github.com/ClickHouse/ClickHouse/pull/10375) ([Azat Khuzhin](https://github.com/azat)) +* Fixed `DROP` vs `OPTIMIZE` race in `ReplicatedMergeTree`. `DROP` could left some garbage in replica path in ZooKeeper if there was concurrent `OPTIMIZE` query. [#10312](https://github.com/ClickHouse/ClickHouse/pull/10312) ([tavplubix](https://github.com/tavplubix)) +* Fix bug when server cannot attach table after column default was altered. [#10441](https://github.com/ClickHouse/ClickHouse/pull/10441) ([alesapin](https://github.com/alesapin)) +* Do not remove metadata directory when attach database fails before loading tables. [#10442](https://github.com/ClickHouse/ClickHouse/pull/10442) ([Winter Zhang](https://github.com/zhang2014)) +* Fixed several bugs when some data was inserted with quorum, then deleted somehow (DROP PARTITION, TTL) and this leaded to the stuck of INSERTs or false-positive exceptions in SELECTs. This fixes [#9946](https://github.com/ClickHouse/ClickHouse/issues/9946) [#10188](https://github.com/ClickHouse/ClickHouse/pull/10188) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +* Fix possible `Pipeline stuck` error in `ConcatProcessor` which could have happened in remote query. [#10381](https://github.com/ClickHouse/ClickHouse/pull/10381) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Fixed wrong behavior in HashTable that caused compilation error when trying to read HashMap from buffer. [#10386](https://github.com/ClickHouse/ClickHouse/pull/10386) ([palasonic1](https://github.com/palasonic1)) +* Allow to use `count(*)` with multiple JOINs. Fixes [#9853](https://github.com/ClickHouse/ClickHouse/issues/9853) [#10291](https://github.com/ClickHouse/ClickHouse/pull/10291) ([Artem Zuikov](https://github.com/4ertus2)) +* Prefer `fallback_to_stale_replicas` over `skip_unavailable_shards`, otherwise when both settings specified and there are no up-to-date replicas the query will fail (patch from @alex-zaitsev). Fixes: [#2564](https://github.com/ClickHouse/ClickHouse/issues/2564). [#10422](https://github.com/ClickHouse/ClickHouse/pull/10422) ([Azat Khuzhin](https://github.com/azat)) +* Fix the issue when a query with ARRAY JOIN, ORDER BY and LIMIT may return incomplete result. This fixes [#10226](https://github.com/ClickHouse/ClickHouse/issues/10226). Author: [Vadim Plakhtinskiy](https://github.com/VadimPlh). [#10427](https://github.com/ClickHouse/ClickHouse/pull/10427) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Check the number and type of arguments when creating BloomFilter index [#9623](https://github.com/ClickHouse/ClickHouse/issues/9623) [#10431](https://github.com/ClickHouse/ClickHouse/pull/10431) ([Winter Zhang](https://github.com/zhang2014)) + +### Performance Improvement +* Improved performance of queries with explicitly defined sets at right side of `IN` operator and tuples in the left side. This fixes performance regression in version 20.3. [#9740](https://github.com/ClickHouse/ClickHouse/pull/9740), [#10385](https://github.com/ClickHouse/ClickHouse/pull/10385) ([Anton Popov](https://github.com/CurtizJ)) + +### ClickHouse release v20.3.7.46, 2020-04-17 + +#### Bug Fix + +* Fix `Logical error: CROSS JOIN has expressions` error for queries with comma and names joins mix. [#10311](https://github.com/ClickHouse/ClickHouse/pull/10311) ([Artem Zuikov](https://github.com/4ertus2)). +* Fix queries with `max_bytes_before_external_group_by`. [#10302](https://github.com/ClickHouse/ClickHouse/pull/10302) ([Artem Zuikov](https://github.com/4ertus2)). +* Fix move-to-prewhere optimization in presense of arrayJoin functions (in certain cases). This fixes [#10092](https://github.com/ClickHouse/ClickHouse/issues/10092). [#10195](https://github.com/ClickHouse/ClickHouse/pull/10195) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Add the ability to relax the restriction on non-deterministic functions usage in mutations with `allow_nondeterministic_mutations` setting. [#10186](https://github.com/ClickHouse/ClickHouse/pull/10186) ([filimonov](https://github.com/filimonov)). + +### ClickHouse release v20.3.6.40, 2020-04-16 + +#### New Feature + +* Added function `isConstant`. This function checks whether its argument is constant expression and returns 1 or 0. It is intended for development, debugging and demonstration purposes. [#10198](https://github.com/ClickHouse/ClickHouse/pull/10198) ([alexey-milovidov](https://github.com/alexey-milovidov)). + +#### Bug Fix + +* Fix error `Pipeline stuck` with `max_rows_to_group_by` and `group_by_overflow_mode = 'break'`. [#10279](https://github.com/ClickHouse/ClickHouse/pull/10279) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix rare possible exception `Cannot drain connections: cancel first`. [#10239](https://github.com/ClickHouse/ClickHouse/pull/10239) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fixed bug where ClickHouse would throw "Unknown function lambda." error message when user tries to run ALTER UPDATE/DELETE on tables with ENGINE = Replicated*. Check for nondeterministic functions now handles lambda expressions correctly. [#10237](https://github.com/ClickHouse/ClickHouse/pull/10237) ([Alexander Kazakov](https://github.com/Akazz)). +* Fixed "generateRandom" function for Date type. This fixes [#9973](https://github.com/ClickHouse/ClickHouse/issues/9973). Fix an edge case when dates with year 2106 are inserted to MergeTree tables with old-style partitioning but partitions are named with year 1970. [#10218](https://github.com/ClickHouse/ClickHouse/pull/10218) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Convert types if the table definition of a View does not correspond to the SELECT query. This fixes [#10180](https://github.com/ClickHouse/ClickHouse/issues/10180) and [#10022](https://github.com/ClickHouse/ClickHouse/issues/10022). [#10217](https://github.com/ClickHouse/ClickHouse/pull/10217) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Fix `parseDateTimeBestEffort` for strings in RFC-2822 when day of week is Tuesday or Thursday. This fixes [#10082](https://github.com/ClickHouse/ClickHouse/issues/10082). [#10214](https://github.com/ClickHouse/ClickHouse/pull/10214) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Fix column names of constants inside JOIN that may clash with names of constants outside of JOIN. [#10207](https://github.com/ClickHouse/ClickHouse/pull/10207) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Fix possible inifinite query execution when the query actually should stop on LIMIT, while reading from infinite source like `system.numbers` or `system.zeros`. [#10206](https://github.com/ClickHouse/ClickHouse/pull/10206) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix using the current database for access checking when the database isn't specified. [#10192](https://github.com/ClickHouse/ClickHouse/pull/10192) ([Vitaly Baranov](https://github.com/vitlibar)). +* Convert blocks if structure does not match on INSERT into Distributed(). [#10135](https://github.com/ClickHouse/ClickHouse/pull/10135) ([Azat Khuzhin](https://github.com/azat)). +* Fix possible incorrect result for extremes in processors pipeline. [#10131](https://github.com/ClickHouse/ClickHouse/pull/10131) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix some kinds of alters with compact parts. [#10130](https://github.com/ClickHouse/ClickHouse/pull/10130) ([Anton Popov](https://github.com/CurtizJ)). +* Fix incorrect `index_granularity_bytes` check while creating new replica. Fixes [#10098](https://github.com/ClickHouse/ClickHouse/issues/10098). [#10121](https://github.com/ClickHouse/ClickHouse/pull/10121) ([alesapin](https://github.com/alesapin)). +* Fix SIGSEGV on INSERT into Distributed table when its structure differs from the underlying tables. [#10105](https://github.com/ClickHouse/ClickHouse/pull/10105) ([Azat Khuzhin](https://github.com/azat)). +* Fix possible rows loss for queries with `JOIN` and `UNION ALL`. Fixes [#9826](https://github.com/ClickHouse/ClickHouse/issues/9826), [#10113](https://github.com/ClickHouse/ClickHouse/issues/10113). [#10099](https://github.com/ClickHouse/ClickHouse/pull/10099) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fixed replicated tables startup when updating from an old ClickHouse version where `/table/replicas/replica_name/metadata` node doesn't exist. Fixes [#10037](https://github.com/ClickHouse/ClickHouse/issues/10037). [#10095](https://github.com/ClickHouse/ClickHouse/pull/10095) ([alesapin](https://github.com/alesapin)). +* Add some arguments check and support identifier arguments for MySQL Database Engine. [#10077](https://github.com/ClickHouse/ClickHouse/pull/10077) ([Winter Zhang](https://github.com/zhang2014)). +* Fix bug in clickhouse dictionary source from localhost clickhouse server. The bug may lead to memory corruption if types in dictionary and source are not compatible. [#10071](https://github.com/ClickHouse/ClickHouse/pull/10071) ([alesapin](https://github.com/alesapin)). +* Fix bug in `CHECK TABLE` query when table contain skip indices. [#10068](https://github.com/ClickHouse/ClickHouse/pull/10068) ([alesapin](https://github.com/alesapin)). +* Fix error `Cannot clone block with columns because block has 0 columns ... While executing GroupingAggregatedTransform`. It happened when setting `distributed_aggregation_memory_efficient` was enabled, and distributed query read aggregating data with different level from different shards (mixed single and two level aggregation). [#10063](https://github.com/ClickHouse/ClickHouse/pull/10063) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix a segmentation fault that could occur in GROUP BY over string keys containing trailing zero bytes ([#8636](https://github.com/ClickHouse/ClickHouse/issues/8636), [#8925](https://github.com/ClickHouse/ClickHouse/issues/8925)). [#10025](https://github.com/ClickHouse/ClickHouse/pull/10025) ([Alexander Kuzmenkov](https://github.com/akuzm)). +* Fix parallel distributed INSERT SELECT for remote table. This PR fixes the solution provided in [#9759](https://github.com/ClickHouse/ClickHouse/pull/9759). [#9999](https://github.com/ClickHouse/ClickHouse/pull/9999) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fix the number of threads used for remote query execution (performance regression, since 20.3). This happened when query from `Distributed` table was executed simultaneously on local and remote shards. Fixes [#9965](https://github.com/ClickHouse/ClickHouse/issues/9965). [#9971](https://github.com/ClickHouse/ClickHouse/pull/9971) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix bug in which the necessary tables weren't retrieved at one of the processing stages of queries to some databases. Fixes [#9699](https://github.com/ClickHouse/ClickHouse/issues/9699). [#9949](https://github.com/ClickHouse/ClickHouse/pull/9949) ([achulkov2](https://github.com/achulkov2)). +* Fix 'Not found column in block' error when `JOIN` appears with `TOTALS`. Fixes [#9839](https://github.com/ClickHouse/ClickHouse/issues/9839). [#9939](https://github.com/ClickHouse/ClickHouse/pull/9939) ([Artem Zuikov](https://github.com/4ertus2)). +* Fix a bug with `ON CLUSTER` DDL queries freezing on server startup. [#9927](https://github.com/ClickHouse/ClickHouse/pull/9927) ([Gagan Arneja](https://github.com/garneja)). +* Fix parsing multiple hosts set in the CREATE USER command, e.g. `CREATE USER user6 HOST NAME REGEXP 'lo.?*host', NAME REGEXP 'lo*host'`. [#9924](https://github.com/ClickHouse/ClickHouse/pull/9924) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fix `TRUNCATE` for Join table engine ([#9917](https://github.com/ClickHouse/ClickHouse/issues/9917)). [#9920](https://github.com/ClickHouse/ClickHouse/pull/9920) ([Amos Bird](https://github.com/amosbird)). +* Fix "scalar doesn't exist" error in ALTERs ([#9878](https://github.com/ClickHouse/ClickHouse/issues/9878)). [#9904](https://github.com/ClickHouse/ClickHouse/pull/9904) ([Amos Bird](https://github.com/amosbird)). +* Fix race condition between drop and optimize in `ReplicatedMergeTree`. [#9901](https://github.com/ClickHouse/ClickHouse/pull/9901) ([alesapin](https://github.com/alesapin)). +* Fix error with qualified names in `distributed_product_mode='local'`. Fixes [#4756](https://github.com/ClickHouse/ClickHouse/issues/4756). [#9891](https://github.com/ClickHouse/ClickHouse/pull/9891) ([Artem Zuikov](https://github.com/4ertus2)). +* Fix calculating grants for introspection functions from the setting 'allow_introspection_functions'. [#9840](https://github.com/ClickHouse/ClickHouse/pull/9840) ([Vitaly Baranov](https://github.com/vitlibar)). + +#### Build/Testing/Packaging Improvement + +* Fix integration test `test_settings_constraints`. [#9962](https://github.com/ClickHouse/ClickHouse/pull/9962) ([Vitaly Baranov](https://github.com/vitlibar)). +* Removed dependency on `clock_getres`. [#9833](https://github.com/ClickHouse/ClickHouse/pull/9833) ([alexey-milovidov](https://github.com/alexey-milovidov)). + + +### ClickHouse release v20.3.5.21, 2020-03-27 + +#### Bug Fix + +* Fix 'Different expressions with the same alias' error when query has PREWHERE and WHERE on distributed table and `SET distributed_product_mode = 'local'`. [#9871](https://github.com/ClickHouse/ClickHouse/pull/9871) ([Artem Zuikov](https://github.com/4ertus2)). +* Fix mutations excessive memory consumption for tables with a composite primary key. This fixes [#9850](https://github.com/ClickHouse/ClickHouse/issues/9850). [#9860](https://github.com/ClickHouse/ClickHouse/pull/9860) ([alesapin](https://github.com/alesapin)). +* For INSERT queries shard now clamps the settings got from the initiator to the shard's constaints instead of throwing an exception. This fix allows to send INSERT queries to a shard with another constraints. This change improves fix [#9447](https://github.com/ClickHouse/ClickHouse/issues/9447). [#9852](https://github.com/ClickHouse/ClickHouse/pull/9852) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fix 'COMMA to CROSS JOIN rewriter is not enabled or cannot rewrite query' error in case of subqueries with COMMA JOIN out of tables lists (i.e. in WHERE). Fixes [#9782](https://github.com/ClickHouse/ClickHouse/issues/9782). [#9830](https://github.com/ClickHouse/ClickHouse/pull/9830) ([Artem Zuikov](https://github.com/4ertus2)). +* Fix possible exception `Got 0 in totals chunk, expected 1` on client. It happened for queries with `JOIN` in case if right joined table had zero rows. Example: `select * from system.one t1 join system.one t2 on t1.dummy = t2.dummy limit 0 FORMAT TabSeparated;`. Fixes [#9777](https://github.com/ClickHouse/ClickHouse/issues/9777). [#9823](https://github.com/ClickHouse/ClickHouse/pull/9823) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix SIGSEGV with optimize_skip_unused_shards when type cannot be converted. [#9804](https://github.com/ClickHouse/ClickHouse/pull/9804) ([Azat Khuzhin](https://github.com/azat)). +* Fix broken `ALTER TABLE DELETE COLUMN` query for compact parts. [#9779](https://github.com/ClickHouse/ClickHouse/pull/9779) ([alesapin](https://github.com/alesapin)). +* Fix max_distributed_connections (w/ and w/o Processors). [#9673](https://github.com/ClickHouse/ClickHouse/pull/9673) ([Azat Khuzhin](https://github.com/azat)). +* Fixed a few cases when timezone of the function argument wasn't used properly. [#9574](https://github.com/ClickHouse/ClickHouse/pull/9574) ([Vasily Nemkov](https://github.com/Enmk)). + +#### Improvement + +* Remove order by stage from mutations because we read from a single ordered part in a single thread. Also add check that the order of rows in mutation is ordered in sorting key order and this order is not violated. [#9886](https://github.com/ClickHouse/ClickHouse/pull/9886) ([alesapin](https://github.com/alesapin)). + + +### ClickHouse release v20.3.4.10, 2020-03-20 + +#### Bug Fix +* This release also contains all bug fixes from 20.1.8.41 +* Fix missing `rows_before_limit_at_least` for queries over http (with processors pipeline). This fixes [#9730](https://github.com/ClickHouse/ClickHouse/issues/9730). [#9757](https://github.com/ClickHouse/ClickHouse/pull/9757) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + + +### ClickHouse release v20.3.3.6, 2020-03-17 + +#### Bug Fix +* This release also contains all bug fixes from 20.1.7.38 +* Fix bug in a replication that doesn't allow replication to work if the user has executed mutations on the previous version. This fixes [#9645](https://github.com/ClickHouse/ClickHouse/issues/9645). [#9652](https://github.com/ClickHouse/ClickHouse/pull/9652) ([alesapin](https://github.com/alesapin)). It makes version 20.3 backward compatible again. +* Add setting `use_compact_format_in_distributed_parts_names` which allows to write files for `INSERT` queries into `Distributed` table with more compact format. This fixes [#9647](https://github.com/ClickHouse/ClickHouse/issues/9647). [#9653](https://github.com/ClickHouse/ClickHouse/pull/9653) ([alesapin](https://github.com/alesapin)). It makes version 20.3 backward compatible again. + +### ClickHouse release v20.3.2.1, 2020-03-12 + +#### Backward Incompatible Change + +* Fixed the issue `file name too long` when sending data for `Distributed` tables for a large number of replicas. Fixed the issue that replica credentials were exposed in the server log. The format of directory name on disk was changed to `[shard{shard_index}[_replica{replica_index}]]`. [#8911](https://github.com/ClickHouse/ClickHouse/pull/8911) ([Mikhail Korotov](https://github.com/millb)) After you upgrade to the new version, you will not be able to downgrade without manual intervention, because old server version does not recognize the new directory format. If you want to downgrade, you have to manually rename the corresponding directories to the old format. This change is relevant only if you have used asynchronous `INSERT`s to `Distributed` tables. In the version 20.3.3 we will introduce a setting that will allow you to enable the new format gradually. +* Changed the format of replication log entries for mutation commands. You have to wait for old mutations to process before installing the new version. +* Implement simple memory profiler that dumps stacktraces to `system.trace_log` every N bytes over soft allocation limit [#8765](https://github.com/ClickHouse/ClickHouse/pull/8765) ([Ivan](https://github.com/abyss7)) [#9472](https://github.com/ClickHouse/ClickHouse/pull/9472) ([alexey-milovidov](https://github.com/alexey-milovidov)) The column of `system.trace_log` was renamed from `timer_type` to `trace_type`. This will require changes in third-party performance analysis and flamegraph processing tools. +* Use OS thread id everywhere instead of internal thread number. This fixes [#7477](https://github.com/ClickHouse/ClickHouse/issues/7477) Old `clickhouse-client` cannot receive logs that are send from the server when the setting `send_logs_level` is enabled, because the names and types of the structured log messages were changed. On the other hand, different server versions can send logs with different types to each other. When you don't use the `send_logs_level` setting, you should not care. [#8954](https://github.com/ClickHouse/ClickHouse/pull/8954) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Remove `indexHint` function [#9542](https://github.com/ClickHouse/ClickHouse/pull/9542) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Remove `findClusterIndex`, `findClusterValue` functions. This fixes [#8641](https://github.com/ClickHouse/ClickHouse/issues/8641). If you were using these functions, send an email to `clickhouse-feedback@yandex-team.com` [#9543](https://github.com/ClickHouse/ClickHouse/pull/9543) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Now it's not allowed to create columns or add columns with `SELECT` subquery as default expression. [#9481](https://github.com/ClickHouse/ClickHouse/pull/9481) ([alesapin](https://github.com/alesapin)) +* Require aliases for subqueries in JOIN. [#9274](https://github.com/ClickHouse/ClickHouse/pull/9274) ([Artem Zuikov](https://github.com/4ertus2)) +* Improved `ALTER MODIFY/ADD` queries logic. Now you cannot `ADD` column without type, `MODIFY` default expression doesn't change type of column and `MODIFY` type doesn't loose default expression value. Fixes [#8669](https://github.com/ClickHouse/ClickHouse/issues/8669). [#9227](https://github.com/ClickHouse/ClickHouse/pull/9227) ([alesapin](https://github.com/alesapin)) +* Require server to be restarted to apply the changes in logging configuration. This is a temporary workaround to avoid the bug where the server logs to a deleted log file (see [#8696](https://github.com/ClickHouse/ClickHouse/issues/8696)). [#8707](https://github.com/ClickHouse/ClickHouse/pull/8707) ([Alexander Kuzmenkov](https://github.com/akuzm)) +* The setting `experimental_use_processors` is enabled by default. This setting enables usage of the new query pipeline. This is internal refactoring and we expect no visible changes. If you will see any issues, set it to back zero. [#8768](https://github.com/ClickHouse/ClickHouse/pull/8768) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### New Feature +* Add `Avro` and `AvroConfluent` input/output formats [#8571](https://github.com/ClickHouse/ClickHouse/pull/8571) ([Andrew Onyshchuk](https://github.com/oandrew)) [#8957](https://github.com/ClickHouse/ClickHouse/pull/8957) ([Andrew Onyshchuk](https://github.com/oandrew)) [#8717](https://github.com/ClickHouse/ClickHouse/pull/8717) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Multi-threaded and non-blocking updates of expired keys in `cache` dictionaries (with optional permission to read old ones). [#8303](https://github.com/ClickHouse/ClickHouse/pull/8303) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +* Add query `ALTER ... MATERIALIZE TTL`. It runs mutation that forces to remove expired data by TTL and recalculates meta-information about TTL in all parts. [#8775](https://github.com/ClickHouse/ClickHouse/pull/8775) ([Anton Popov](https://github.com/CurtizJ)) +* Switch from HashJoin to MergeJoin (on disk) if needed [#9082](https://github.com/ClickHouse/ClickHouse/pull/9082) ([Artem Zuikov](https://github.com/4ertus2)) +* Added `MOVE PARTITION` command for `ALTER TABLE` [#4729](https://github.com/ClickHouse/ClickHouse/issues/4729) [#6168](https://github.com/ClickHouse/ClickHouse/pull/6168) ([Guillaume Tassery](https://github.com/YiuRULE)) +* Reloading storage configuration from configuration file on the fly. [#8594](https://github.com/ClickHouse/ClickHouse/pull/8594) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Allowed to change `storage_policy` to not less rich one. [#8107](https://github.com/ClickHouse/ClickHouse/pull/8107) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Added support for globs/wildcards for S3 storage and table function. [#8851](https://github.com/ClickHouse/ClickHouse/pull/8851) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Implement `bitAnd`, `bitOr`, `bitXor`, `bitNot` for `FixedString(N)` datatype. [#9091](https://github.com/ClickHouse/ClickHouse/pull/9091) ([Guillaume Tassery](https://github.com/YiuRULE)) +* Added function `bitCount`. This fixes [#8702](https://github.com/ClickHouse/ClickHouse/issues/8702). [#8708](https://github.com/ClickHouse/ClickHouse/pull/8708) ([alexey-milovidov](https://github.com/alexey-milovidov)) [#8749](https://github.com/ClickHouse/ClickHouse/pull/8749) ([ikopylov](https://github.com/ikopylov)) +* Add `generateRandom` table function to generate random rows with given schema. Allows to populate arbitrary test table with data. [#8994](https://github.com/ClickHouse/ClickHouse/pull/8994) ([Ilya Yatsishin](https://github.com/qoega)) +* `JSONEachRowFormat`: support special case when objects enclosed in top-level array. [#8860](https://github.com/ClickHouse/ClickHouse/pull/8860) ([Kruglov Pavel](https://github.com/Avogar)) +* Now it's possible to create a column with `DEFAULT` expression which depends on a column with default `ALIAS` expression. [#9489](https://github.com/ClickHouse/ClickHouse/pull/9489) ([alesapin](https://github.com/alesapin)) +* Allow to specify `--limit` more than the source data size in `clickhouse-obfuscator`. The data will repeat itself with different random seed. [#9155](https://github.com/ClickHouse/ClickHouse/pull/9155) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Added `groupArraySample` function (similar to `groupArray`) with reservior sampling algorithm. [#8286](https://github.com/ClickHouse/ClickHouse/pull/8286) ([Amos Bird](https://github.com/amosbird)) +* Now you can monitor the size of update queue in `cache`/`complex_key_cache` dictionaries via system metrics. [#9413](https://github.com/ClickHouse/ClickHouse/pull/9413) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +* Allow to use CRLF as a line separator in CSV output format with setting `output_format_csv_crlf_end_of_line` is set to 1 [#8934](https://github.com/ClickHouse/ClickHouse/pull/8934) [#8935](https://github.com/ClickHouse/ClickHouse/pull/8935) [#8963](https://github.com/ClickHouse/ClickHouse/pull/8963) ([Mikhail Korotov](https://github.com/millb)) +* Implement more functions of the [H3](https://github.com/uber/h3) API: `h3GetBaseCell`, `h3HexAreaM2`, `h3IndexesAreNeighbors`, `h3ToChildren`, `h3ToString` and `stringToH3` [#8938](https://github.com/ClickHouse/ClickHouse/pull/8938) ([Nico Mandery](https://github.com/nmandery)) +* New setting introduced: `max_parser_depth` to control maximum stack size and allow large complex queries. This fixes [#6681](https://github.com/ClickHouse/ClickHouse/issues/6681) and [#7668](https://github.com/ClickHouse/ClickHouse/issues/7668). [#8647](https://github.com/ClickHouse/ClickHouse/pull/8647) ([Maxim Smirnov](https://github.com/qMBQx8GH)) +* Add a setting `force_optimize_skip_unused_shards` setting to throw if skipping of unused shards is not possible [#8805](https://github.com/ClickHouse/ClickHouse/pull/8805) ([Azat Khuzhin](https://github.com/azat)) +* Allow to configure multiple disks/volumes for storing data for send in `Distributed` engine [#8756](https://github.com/ClickHouse/ClickHouse/pull/8756) ([Azat Khuzhin](https://github.com/azat)) +* Support storage policy (``) for storing temporary data. [#8750](https://github.com/ClickHouse/ClickHouse/pull/8750) ([Azat Khuzhin](https://github.com/azat)) +* Added `X-ClickHouse-Exception-Code` HTTP header that is set if exception was thrown before sending data. This implements [#4971](https://github.com/ClickHouse/ClickHouse/issues/4971). [#8786](https://github.com/ClickHouse/ClickHouse/pull/8786) ([Mikhail Korotov](https://github.com/millb)) +* Added function `ifNotFinite`. It is just a syntactic sugar: `ifNotFinite(x, y) = isFinite(x) ? x : y`. [#8710](https://github.com/ClickHouse/ClickHouse/pull/8710) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Added `last_successful_update_time` column in `system.dictionaries` table [#9394](https://github.com/ClickHouse/ClickHouse/pull/9394) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +* Add `blockSerializedSize` function (size on disk without compression) [#8952](https://github.com/ClickHouse/ClickHouse/pull/8952) ([Azat Khuzhin](https://github.com/azat)) +* Add function `moduloOrZero` [#9358](https://github.com/ClickHouse/ClickHouse/pull/9358) ([hcz](https://github.com/hczhcz)) +* Added system tables `system.zeros` and `system.zeros_mt` as well as tale functions `zeros()` and `zeros_mt()`. Tables (and table functions) contain single column with name `zero` and type `UInt8`. This column contains zeros. It is needed for test purposes as the fastest method to generate many rows. This fixes [#6604](https://github.com/ClickHouse/ClickHouse/issues/6604) [#9593](https://github.com/ClickHouse/ClickHouse/pull/9593) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +#### Experimental Feature +* Add new compact format of parts in `MergeTree`-family tables in which all columns are stored in one file. It helps to increase performance of small and frequent inserts. The old format (one file per column) is now called wide. Data storing format is controlled by settings `min_bytes_for_wide_part` and `min_rows_for_wide_part`. [#8290](https://github.com/ClickHouse/ClickHouse/pull/8290) ([Anton Popov](https://github.com/CurtizJ)) +* Support for S3 storage for `Log`, `TinyLog` and `StripeLog` tables. [#8862](https://github.com/ClickHouse/ClickHouse/pull/8862) ([Pavel Kovalenko](https://github.com/Jokser)) + +#### Bug Fix +* Fixed inconsistent whitespaces in log messages. [#9322](https://github.com/ClickHouse/ClickHouse/pull/9322) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fix bug in which arrays of unnamed tuples were flattened as Nested structures on table creation. [#8866](https://github.com/ClickHouse/ClickHouse/pull/8866) ([achulkov2](https://github.com/achulkov2)) +* Fixed the issue when "Too many open files" error may happen if there are too many files matching glob pattern in `File` table or `file` table function. Now files are opened lazily. This fixes [#8857](https://github.com/ClickHouse/ClickHouse/issues/8857) [#8861](https://github.com/ClickHouse/ClickHouse/pull/8861) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* DROP TEMPORARY TABLE now drops only temporary table. [#8907](https://github.com/ClickHouse/ClickHouse/pull/8907) ([Vitaly Baranov](https://github.com/vitlibar)) +* Remove outdated partition when we shutdown the server or DETACH/ATTACH a table. [#8602](https://github.com/ClickHouse/ClickHouse/pull/8602) ([Guillaume Tassery](https://github.com/YiuRULE)) +* For how the default disk calculates the free space from `data` subdirectory. Fixed the issue when the amount of free space is not calculated correctly if the `data` directory is mounted to a separate device (rare case). This fixes [#7441](https://github.com/ClickHouse/ClickHouse/issues/7441) [#9257](https://github.com/ClickHouse/ClickHouse/pull/9257) ([Mikhail Korotov](https://github.com/millb)) +* Allow comma (cross) join with IN () inside. [#9251](https://github.com/ClickHouse/ClickHouse/pull/9251) ([Artem Zuikov](https://github.com/4ertus2)) +* Allow to rewrite CROSS to INNER JOIN if there's [NOT] LIKE operator in WHERE section. [#9229](https://github.com/ClickHouse/ClickHouse/pull/9229) ([Artem Zuikov](https://github.com/4ertus2)) +* Fix possible incorrect result after `GROUP BY` with enabled setting `distributed_aggregation_memory_efficient`. Fixes [#9134](https://github.com/ClickHouse/ClickHouse/issues/9134). [#9289](https://github.com/ClickHouse/ClickHouse/pull/9289) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Found keys were counted as missed in metrics of cache dictionaries. [#9411](https://github.com/ClickHouse/ClickHouse/pull/9411) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +* Fix replication protocol incompatibility introduced in [#8598](https://github.com/ClickHouse/ClickHouse/issues/8598). [#9412](https://github.com/ClickHouse/ClickHouse/pull/9412) ([alesapin](https://github.com/alesapin)) +* Fixed race condition on `queue_task_handle` at the startup of `ReplicatedMergeTree` tables. [#9552](https://github.com/ClickHouse/ClickHouse/pull/9552) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* The token `NOT` didn't work in `SHOW TABLES NOT LIKE` query [#8727](https://github.com/ClickHouse/ClickHouse/issues/8727) [#8940](https://github.com/ClickHouse/ClickHouse/pull/8940) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Added range check to function `h3EdgeLengthM`. Without this check, buffer overflow is possible. [#8945](https://github.com/ClickHouse/ClickHouse/pull/8945) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fixed up a bug in batched calculations of ternary logical OPs on multiple arguments (more than 10). [#8718](https://github.com/ClickHouse/ClickHouse/pull/8718) ([Alexander Kazakov](https://github.com/Akazz)) +* Fix error of PREWHERE optimization, which could lead to segfaults or `Inconsistent number of columns got from MergeTreeRangeReader` exception. [#9024](https://github.com/ClickHouse/ClickHouse/pull/9024) ([Anton Popov](https://github.com/CurtizJ)) +* Fix unexpected `Timeout exceeded while reading from socket` exception, which randomly happens on secure connection before timeout actually exceeded and when query profiler is enabled. Also add `connect_timeout_with_failover_secure_ms` settings (default 100ms), which is similar to `connect_timeout_with_failover_ms`, but is used for secure connections (because SSL handshake is slower, than ordinary TCP connection) [#9026](https://github.com/ClickHouse/ClickHouse/pull/9026) ([tavplubix](https://github.com/tavplubix)) +* Fix bug with mutations finalization, when mutation may hang in state with `parts_to_do=0` and `is_done=0`. [#9022](https://github.com/ClickHouse/ClickHouse/pull/9022) ([alesapin](https://github.com/alesapin)) +* Use new ANY JOIN logic with `partial_merge_join` setting. It's possible to make `ANY|ALL|SEMI LEFT` and `ALL INNER` joins with `partial_merge_join=1` now. [#8932](https://github.com/ClickHouse/ClickHouse/pull/8932) ([Artem Zuikov](https://github.com/4ertus2)) +* Shard now clamps the settings got from the initiator to the shard's constaints instead of throwing an exception. This fix allows to send queries to a shard with another constraints. [#9447](https://github.com/ClickHouse/ClickHouse/pull/9447) ([Vitaly Baranov](https://github.com/vitlibar)) +* Fixed memory management problem in `MergeTreeReadPool`. [#8791](https://github.com/ClickHouse/ClickHouse/pull/8791) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Fix `toDecimal*OrNull()` functions family when called with string `e`. Fixes [#8312](https://github.com/ClickHouse/ClickHouse/issues/8312) [#8764](https://github.com/ClickHouse/ClickHouse/pull/8764) ([Artem Zuikov](https://github.com/4ertus2)) +* Make sure that `FORMAT Null` sends no data to the client. [#8767](https://github.com/ClickHouse/ClickHouse/pull/8767) ([Alexander Kuzmenkov](https://github.com/akuzm)) +* Fix bug that timestamp in `LiveViewBlockInputStream` will not updated. `LIVE VIEW` is an experimental feature. [#8644](https://github.com/ClickHouse/ClickHouse/pull/8644) ([vxider](https://github.com/Vxider)) [#8625](https://github.com/ClickHouse/ClickHouse/pull/8625) ([vxider](https://github.com/Vxider)) +* Fixed `ALTER MODIFY TTL` wrong behavior which did not allow to delete old TTL expressions. [#8422](https://github.com/ClickHouse/ClickHouse/pull/8422) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Fixed UBSan report in MergeTreeIndexSet. This fixes [#9250](https://github.com/ClickHouse/ClickHouse/issues/9250) [#9365](https://github.com/ClickHouse/ClickHouse/pull/9365) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fixed the behaviour of `match` and `extract` functions when haystack has zero bytes. The behaviour was wrong when haystack was constant. This fixes [#9160](https://github.com/ClickHouse/ClickHouse/issues/9160) [#9163](https://github.com/ClickHouse/ClickHouse/pull/9163) ([alexey-milovidov](https://github.com/alexey-milovidov)) [#9345](https://github.com/ClickHouse/ClickHouse/pull/9345) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Avoid throwing from destructor in Apache Avro 3rd-party library. [#9066](https://github.com/ClickHouse/ClickHouse/pull/9066) ([Andrew Onyshchuk](https://github.com/oandrew)) +* Don't commit a batch polled from `Kafka` partially as it can lead to holes in data. [#8876](https://github.com/ClickHouse/ClickHouse/pull/8876) ([filimonov](https://github.com/filimonov)) +* Fix `joinGet` with nullable return types. https://github.com/ClickHouse/ClickHouse/issues/8919 [#9014](https://github.com/ClickHouse/ClickHouse/pull/9014) ([Amos Bird](https://github.com/amosbird)) +* Fix data incompatibility when compressed with `T64` codec. [#9016](https://github.com/ClickHouse/ClickHouse/pull/9016) ([Artem Zuikov](https://github.com/4ertus2)) Fix data type ids in `T64` compression codec that leads to wrong (de)compression in affected versions. [#9033](https://github.com/ClickHouse/ClickHouse/pull/9033) ([Artem Zuikov](https://github.com/4ertus2)) +* Add setting `enable_early_constant_folding` and disable it in some cases that leads to errors. [#9010](https://github.com/ClickHouse/ClickHouse/pull/9010) ([Artem Zuikov](https://github.com/4ertus2)) +* Fix pushdown predicate optimizer with VIEW and enable the test [#9011](https://github.com/ClickHouse/ClickHouse/pull/9011) ([Winter Zhang](https://github.com/zhang2014)) +* Fix segfault in `Merge` tables, that can happen when reading from `File` storages [#9387](https://github.com/ClickHouse/ClickHouse/pull/9387) ([tavplubix](https://github.com/tavplubix)) +* Added a check for storage policy in `ATTACH PARTITION FROM`, `REPLACE PARTITION`, `MOVE TO TABLE`. Otherwise it could make data of part inaccessible after restart and prevent ClickHouse to start. [#9383](https://github.com/ClickHouse/ClickHouse/pull/9383) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Fix alters if there is TTL set for table. [#8800](https://github.com/ClickHouse/ClickHouse/pull/8800) ([Anton Popov](https://github.com/CurtizJ)) +* Fix race condition that can happen when `SYSTEM RELOAD ALL DICTIONARIES` is executed while some dictionary is being modified/added/removed. [#8801](https://github.com/ClickHouse/ClickHouse/pull/8801) ([Vitaly Baranov](https://github.com/vitlibar)) +* In previous versions `Memory` database engine use empty data path, so tables are created in `path` directory (e.g. `/var/lib/clickhouse/`), not in data directory of database (e.g. `/var/lib/clickhouse/db_name`). [#8753](https://github.com/ClickHouse/ClickHouse/pull/8753) ([tavplubix](https://github.com/tavplubix)) +* Fixed wrong log messages about missing default disk or policy. [#9530](https://github.com/ClickHouse/ClickHouse/pull/9530) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Fix not(has()) for the bloom_filter index of array types. [#9407](https://github.com/ClickHouse/ClickHouse/pull/9407) ([achimbab](https://github.com/achimbab)) +* Allow first column(s) in a table with `Log` engine be an alias [#9231](https://github.com/ClickHouse/ClickHouse/pull/9231) ([Ivan](https://github.com/abyss7)) +* Fix order of ranges while reading from `MergeTree` table in one thread. It could lead to exceptions from `MergeTreeRangeReader` or wrong query results. [#9050](https://github.com/ClickHouse/ClickHouse/pull/9050) ([Anton Popov](https://github.com/CurtizJ)) +* Make `reinterpretAsFixedString` to return `FixedString` instead of `String`. [#9052](https://github.com/ClickHouse/ClickHouse/pull/9052) ([Andrew Onyshchuk](https://github.com/oandrew)) +* Avoid extremely rare cases when the user can get wrong error message (`Success` instead of detailed error description). [#9457](https://github.com/ClickHouse/ClickHouse/pull/9457) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Do not crash when using `Template` format with empty row template. [#8785](https://github.com/ClickHouse/ClickHouse/pull/8785) ([Alexander Kuzmenkov](https://github.com/akuzm)) +* Metadata files for system tables could be created in wrong place [#8653](https://github.com/ClickHouse/ClickHouse/pull/8653) ([tavplubix](https://github.com/tavplubix)) Fixes [#8581](https://github.com/ClickHouse/ClickHouse/issues/8581). +* Fix data race on exception_ptr in cache dictionary [#8303](https://github.com/ClickHouse/ClickHouse/issues/8303). [#9379](https://github.com/ClickHouse/ClickHouse/pull/9379) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +* Do not throw an exception for query `ATTACH TABLE IF NOT EXISTS`. Previously it was thrown if table already exists, despite the `IF NOT EXISTS` clause. [#8967](https://github.com/ClickHouse/ClickHouse/pull/8967) ([Anton Popov](https://github.com/CurtizJ)) +* Fixed missing closing paren in exception message. [#8811](https://github.com/ClickHouse/ClickHouse/pull/8811) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Avoid message `Possible deadlock avoided` at the startup of clickhouse-client in interactive mode. [#9455](https://github.com/ClickHouse/ClickHouse/pull/9455) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fixed the issue when padding at the end of base64 encoded value can be malformed. Update base64 library. This fixes [#9491](https://github.com/ClickHouse/ClickHouse/issues/9491), closes [#9492](https://github.com/ClickHouse/ClickHouse/issues/9492) [#9500](https://github.com/ClickHouse/ClickHouse/pull/9500) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Prevent losing data in `Kafka` in rare cases when exception happens after reading suffix but before commit. Fixes [#9378](https://github.com/ClickHouse/ClickHouse/issues/9378) [#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) ([filimonov](https://github.com/filimonov)) +* Fixed exception in `DROP TABLE IF EXISTS` [#8663](https://github.com/ClickHouse/ClickHouse/pull/8663) ([Nikita Vasilev](https://github.com/nikvas0)) +* Fix crash when a user tries to `ALTER MODIFY SETTING` for old-formated `MergeTree` table engines family. [#9435](https://github.com/ClickHouse/ClickHouse/pull/9435) ([alesapin](https://github.com/alesapin)) +* Support for UInt64 numbers that don't fit in Int64 in JSON-related functions. Update SIMDJSON to master. This fixes [#9209](https://github.com/ClickHouse/ClickHouse/issues/9209) [#9344](https://github.com/ClickHouse/ClickHouse/pull/9344) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fixed execution of inversed predicates when non-strictly monotinic functional index is used. [#9223](https://github.com/ClickHouse/ClickHouse/pull/9223) ([Alexander Kazakov](https://github.com/Akazz)) +* Don't try to fold `IN` constant in `GROUP BY` [#8868](https://github.com/ClickHouse/ClickHouse/pull/8868) ([Amos Bird](https://github.com/amosbird)) +* Fix bug in `ALTER DELETE` mutations which leads to index corruption. This fixes [#9019](https://github.com/ClickHouse/ClickHouse/issues/9019) and [#8982](https://github.com/ClickHouse/ClickHouse/issues/8982). Additionally fix extremely rare race conditions in `ReplicatedMergeTree` `ALTER` queries. [#9048](https://github.com/ClickHouse/ClickHouse/pull/9048) ([alesapin](https://github.com/alesapin)) +* When the setting `compile_expressions` is enabled, you can get `unexpected column` in `LLVMExecutableFunction` when we use `Nullable` type [#8910](https://github.com/ClickHouse/ClickHouse/pull/8910) ([Guillaume Tassery](https://github.com/YiuRULE)) +* Multiple fixes for `Kafka` engine: 1) fix duplicates that were appearing during consumer group rebalance. 2) Fix rare 'holes' appeared when data were polled from several partitions with one poll and committed partially (now we always process / commit the whole polled block of messages). 3) Fix flushes by block size (before that only flushing by timeout was working properly). 4) better subscription procedure (with assignment feedback). 5) Make tests work faster (with default intervals and timeouts). Due to the fact that data was not flushed by block size before (as it should according to documentation), that PR may lead to some performance degradation with default settings (due to more often & tinier flushes which are less optimal). If you encounter the performance issue after that change - please increase `kafka_max_block_size` in the table to the bigger value ( for example `CREATE TABLE ...Engine=Kafka ... SETTINGS ... kafka_max_block_size=524288`). Fixes [#7259](https://github.com/ClickHouse/ClickHouse/issues/7259) [#8917](https://github.com/ClickHouse/ClickHouse/pull/8917) ([filimonov](https://github.com/filimonov)) +* Fix `Parameter out of bound` exception in some queries after PREWHERE optimizations. [#8914](https://github.com/ClickHouse/ClickHouse/pull/8914) ([Baudouin Giard](https://github.com/bgiard)) +* Fixed the case of mixed-constness of arguments of function `arrayZip`. [#8705](https://github.com/ClickHouse/ClickHouse/pull/8705) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* When executing `CREATE` query, fold constant expressions in storage engine arguments. Replace empty database name with current database. Fixes [#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [#3492](https://github.com/ClickHouse/ClickHouse/issues/3492) [#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) ([tavplubix](https://github.com/tavplubix)) +* Now it's not possible to create or add columns with simple cyclic aliases like `a DEFAULT b, b DEFAULT a`. [#9603](https://github.com/ClickHouse/ClickHouse/pull/9603) ([alesapin](https://github.com/alesapin)) +* Fixed a bug with double move which may corrupt original part. This is relevant if you use `ALTER TABLE MOVE` [#8680](https://github.com/ClickHouse/ClickHouse/pull/8680) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Allow `interval` identifier to correctly parse without backticks. Fixed issue when a query cannot be executed even if the `interval` identifier is enclosed in backticks or double quotes. This fixes [#9124](https://github.com/ClickHouse/ClickHouse/issues/9124). [#9142](https://github.com/ClickHouse/ClickHouse/pull/9142) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fixed fuzz test and incorrect behaviour of `bitTestAll`/`bitTestAny` functions. [#9143](https://github.com/ClickHouse/ClickHouse/pull/9143) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fix possible crash/wrong number of rows in `LIMIT n WITH TIES` when there are a lot of rows equal to n'th row. [#9464](https://github.com/ClickHouse/ClickHouse/pull/9464) ([tavplubix](https://github.com/tavplubix)) +* Fix mutations with parts written with enabled `insert_quorum`. [#9463](https://github.com/ClickHouse/ClickHouse/pull/9463) ([alesapin](https://github.com/alesapin)) +* Fix data race at destruction of `Poco::HTTPServer`. It could happen when server is started and immediately shut down. [#9468](https://github.com/ClickHouse/ClickHouse/pull/9468) ([Anton Popov](https://github.com/CurtizJ)) +* Fix bug in which a misleading error message was shown when running `SHOW CREATE TABLE a_table_that_does_not_exist`. [#8899](https://github.com/ClickHouse/ClickHouse/pull/8899) ([achulkov2](https://github.com/achulkov2)) +* Fixed `Parameters are out of bound` exception in some rare cases when we have a constant in the `SELECT` clause when we have an `ORDER BY` and a `LIMIT` clause. [#8892](https://github.com/ClickHouse/ClickHouse/pull/8892) ([Guillaume Tassery](https://github.com/YiuRULE)) +* Fix mutations finalization, when already done mutation can have status `is_done=0`. [#9217](https://github.com/ClickHouse/ClickHouse/pull/9217) ([alesapin](https://github.com/alesapin)) +* Prevent from executing `ALTER ADD INDEX` for MergeTree tables with old syntax, because it doesn't work. [#8822](https://github.com/ClickHouse/ClickHouse/pull/8822) ([Mikhail Korotov](https://github.com/millb)) +* During server startup do not access table, which `LIVE VIEW` depends on, so server will be able to start. Also remove `LIVE VIEW` dependencies when detaching `LIVE VIEW`. `LIVE VIEW` is an experimental feature. [#8824](https://github.com/ClickHouse/ClickHouse/pull/8824) ([tavplubix](https://github.com/tavplubix)) +* Fix possible segfault in `MergeTreeRangeReader`, while executing `PREWHERE`. [#9106](https://github.com/ClickHouse/ClickHouse/pull/9106) ([Anton Popov](https://github.com/CurtizJ)) +* Fix possible mismatched checksums with column TTLs. [#9451](https://github.com/ClickHouse/ClickHouse/pull/9451) ([Anton Popov](https://github.com/CurtizJ)) +* Fixed a bug when parts were not being moved in background by TTL rules in case when there is only one volume. [#8672](https://github.com/ClickHouse/ClickHouse/pull/8672) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Fixed the issue `Method createColumn() is not implemented for data type Set`. This fixes [#7799](https://github.com/ClickHouse/ClickHouse/issues/7799). [#8674](https://github.com/ClickHouse/ClickHouse/pull/8674) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Now we will try finalize mutations more frequently. [#9427](https://github.com/ClickHouse/ClickHouse/pull/9427) ([alesapin](https://github.com/alesapin)) +* Fix `intDiv` by minus one constant [#9351](https://github.com/ClickHouse/ClickHouse/pull/9351) ([hcz](https://github.com/hczhcz)) +* Fix possible race condition in `BlockIO`. [#9356](https://github.com/ClickHouse/ClickHouse/pull/9356) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Fix bug leading to server termination when trying to use / drop `Kafka` table created with wrong parameters. [#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) ([filimonov](https://github.com/filimonov)) +* Added workaround if OS returns wrong result for `timer_create` function. [#8837](https://github.com/ClickHouse/ClickHouse/pull/8837) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fixed error in usage of `min_marks_for_seek` parameter. Fixed the error message when there is no sharding key in Distributed table and we try to skip unused shards. [#8908](https://github.com/ClickHouse/ClickHouse/pull/8908) ([Azat Khuzhin](https://github.com/azat)) + +#### Improvement +* Implement `ALTER MODIFY/DROP` queries on top of mutations for `ReplicatedMergeTree*` engines family. Now `ALTERS` blocks only at the metadata update stage, and don't block after that. [#8701](https://github.com/ClickHouse/ClickHouse/pull/8701) ([alesapin](https://github.com/alesapin)) +* Add ability to rewrite CROSS to INNER JOINs with `WHERE` section containing unqialified names. [#9512](https://github.com/ClickHouse/ClickHouse/pull/9512) ([Artem Zuikov](https://github.com/4ertus2)) +* Make `SHOW TABLES` and `SHOW DATABASES` queries support the `WHERE` expressions and `FROM`/`IN` [#9076](https://github.com/ClickHouse/ClickHouse/pull/9076) ([sundyli](https://github.com/sundy-li)) +* Added a setting `deduplicate_blocks_in_dependent_materialized_views`. [#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) ([urykhy](https://github.com/urykhy)) +* After recent changes MySQL client started to print binary strings in hex thereby making them not readable ([#9032](https://github.com/ClickHouse/ClickHouse/issues/9032)). The workaround in ClickHouse is to mark string columns as UTF-8, which is not always, but usually the case. [#9079](https://github.com/ClickHouse/ClickHouse/pull/9079) ([Yuriy Baranov](https://github.com/yurriy)) +* Add support of String and FixedString keys for `sumMap` [#8903](https://github.com/ClickHouse/ClickHouse/pull/8903) ([Baudouin Giard](https://github.com/bgiard)) +* Support string keys in SummingMergeTree maps [#8933](https://github.com/ClickHouse/ClickHouse/pull/8933) ([Baudouin Giard](https://github.com/bgiard)) +* Signal termination of thread to the thread pool even if the thread has thrown exception [#8736](https://github.com/ClickHouse/ClickHouse/pull/8736) ([Ding Xiang Fei](https://github.com/dingxiangfei2009)) +* Allow to set `query_id` in `clickhouse-benchmark` [#9416](https://github.com/ClickHouse/ClickHouse/pull/9416) ([Anton Popov](https://github.com/CurtizJ)) +* Don't allow strange expressions in `ALTER TABLE ... PARTITION partition` query. This addresses [#7192](https://github.com/ClickHouse/ClickHouse/issues/7192) [#8835](https://github.com/ClickHouse/ClickHouse/pull/8835) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* The table `system.table_engines` now provides information about feature support (like `supports_ttl` or `supports_sort_order`). [#8830](https://github.com/ClickHouse/ClickHouse/pull/8830) ([Max Akhmedov](https://github.com/zlobober)) +* Enable `system.metric_log` by default. It will contain rows with values of ProfileEvents, CurrentMetrics collected with "collect_interval_milliseconds" interval (one second by default). The table is very small (usually in order of megabytes) and collecting this data by default is reasonable. [#9225](https://github.com/ClickHouse/ClickHouse/pull/9225) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Initialize query profiler for all threads in a group, e.g. it allows to fully profile insert-queries. Fixes [#6964](https://github.com/ClickHouse/ClickHouse/issues/6964) [#8874](https://github.com/ClickHouse/ClickHouse/pull/8874) ([Ivan](https://github.com/abyss7)) +* Now temporary `LIVE VIEW` is created by `CREATE LIVE VIEW name WITH TIMEOUT [42] ...` instead of `CREATE TEMPORARY LIVE VIEW ...`, because the previous syntax was not consistent with `CREATE TEMPORARY TABLE ...` [#9131](https://github.com/ClickHouse/ClickHouse/pull/9131) ([tavplubix](https://github.com/tavplubix)) +* Add text_log.level configuration parameter to limit entries that goes to `system.text_log` table [#8809](https://github.com/ClickHouse/ClickHouse/pull/8809) ([Azat Khuzhin](https://github.com/azat)) +* Allow to put downloaded part to a disks/volumes according to TTL rules [#8598](https://github.com/ClickHouse/ClickHouse/pull/8598) ([Vladimir Chebotarev](https://github.com/excitoon)) +* For external MySQL dictionaries, allow to mutualize MySQL connection pool to "share" them among dictionaries. This option significantly reduces the number of connections to MySQL servers. [#9409](https://github.com/ClickHouse/ClickHouse/pull/9409) ([Clément Rodriguez](https://github.com/clemrodriguez)) +* Show nearest query execution time for quantiles in `clickhouse-benchmark` output instead of interpolated values. It's better to show values that correspond to the execution time of some queries. [#8712](https://github.com/ClickHouse/ClickHouse/pull/8712) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Possibility to add key & timestamp for the message when inserting data to Kafka. Fixes [#7198](https://github.com/ClickHouse/ClickHouse/issues/7198) [#8969](https://github.com/ClickHouse/ClickHouse/pull/8969) ([filimonov](https://github.com/filimonov)) +* If server is run from terminal, highlight thread number, query id and log priority by colors. This is for improved readability of correlated log messages for developers. [#8961](https://github.com/ClickHouse/ClickHouse/pull/8961) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Better exception message while loading tables for `Ordinary` database. [#9527](https://github.com/ClickHouse/ClickHouse/pull/9527) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Implement `arraySlice` for arrays with aggregate function states. This fixes [#9388](https://github.com/ClickHouse/ClickHouse/issues/9388) [#9391](https://github.com/ClickHouse/ClickHouse/pull/9391) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Allow constant functions and constant arrays to be used on the right side of IN operator. [#8813](https://github.com/ClickHouse/ClickHouse/pull/8813) ([Anton Popov](https://github.com/CurtizJ)) +* If zookeeper exception has happened while fetching data for system.replicas, display it in a separate column. This implements [#9137](https://github.com/ClickHouse/ClickHouse/issues/9137) [#9138](https://github.com/ClickHouse/ClickHouse/pull/9138) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Atomically remove MergeTree data parts on destroy. [#8402](https://github.com/ClickHouse/ClickHouse/pull/8402) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Support row-level security for Distributed tables. [#8926](https://github.com/ClickHouse/ClickHouse/pull/8926) ([Ivan](https://github.com/abyss7)) +* Now we recognize suffix (like KB, KiB...) in settings values. [#8072](https://github.com/ClickHouse/ClickHouse/pull/8072) ([Mikhail Korotov](https://github.com/millb)) +* Prevent out of memory while constructing result of a large JOIN. [#8637](https://github.com/ClickHouse/ClickHouse/pull/8637) ([Artem Zuikov](https://github.com/4ertus2)) +* Added names of clusters to suggestions in interactive mode in `clickhouse-client`. [#8709](https://github.com/ClickHouse/ClickHouse/pull/8709) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Initialize query profiler for all threads in a group, e.g. it allows to fully profile insert-queries [#8820](https://github.com/ClickHouse/ClickHouse/pull/8820) ([Ivan](https://github.com/abyss7)) +* Added column `exception_code` in `system.query_log` table. [#8770](https://github.com/ClickHouse/ClickHouse/pull/8770) ([Mikhail Korotov](https://github.com/millb)) +* Enabled MySQL compatibility server on port `9004` in the default server configuration file. Fixed password generation command in the example in configuration. [#8771](https://github.com/ClickHouse/ClickHouse/pull/8771) ([Yuriy Baranov](https://github.com/yurriy)) +* Prevent abort on shutdown if the filesystem is readonly. This fixes [#9094](https://github.com/ClickHouse/ClickHouse/issues/9094) [#9100](https://github.com/ClickHouse/ClickHouse/pull/9100) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Better exception message when length is required in HTTP POST query. [#9453](https://github.com/ClickHouse/ClickHouse/pull/9453) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Add `_path` and `_file` virtual columns to `HDFS` and `File` engines and `hdfs` and `file` table functions [#8489](https://github.com/ClickHouse/ClickHouse/pull/8489) ([Olga Khvostikova](https://github.com/stavrolia)) +* Fix error `Cannot find column` while inserting into `MATERIALIZED VIEW` in case if new column was added to view's internal table. [#8766](https://github.com/ClickHouse/ClickHouse/pull/8766) [#8788](https://github.com/ClickHouse/ClickHouse/pull/8788) ([vzakaznikov](https://github.com/vzakaznikov)) [#8788](https://github.com/ClickHouse/ClickHouse/issues/8788) [#8806](https://github.com/ClickHouse/ClickHouse/pull/8806) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) [#8803](https://github.com/ClickHouse/ClickHouse/pull/8803) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Fix progress over native client-server protocol, by send progress after final update (like logs). This may be relevant only to some third-party tools that are using native protocol. [#9495](https://github.com/ClickHouse/ClickHouse/pull/9495) ([Azat Khuzhin](https://github.com/azat)) +* Add a system metric tracking the number of client connections using MySQL protocol ([#9013](https://github.com/ClickHouse/ClickHouse/issues/9013)). [#9015](https://github.com/ClickHouse/ClickHouse/pull/9015) ([Eugene Klimov](https://github.com/Slach)) +* From now on, HTTP responses will have `X-ClickHouse-Timezone` header set to the same timezone value that `SELECT timezone()` would report. [#9493](https://github.com/ClickHouse/ClickHouse/pull/9493) ([Denis Glazachev](https://github.com/traceon)) + +#### Performance Improvement +* Improve performance of analysing index with IN [#9261](https://github.com/ClickHouse/ClickHouse/pull/9261) ([Anton Popov](https://github.com/CurtizJ)) +* Simpler and more efficient code in Logical Functions + code cleanups. A followup to [#8718](https://github.com/ClickHouse/ClickHouse/issues/8718) [#8728](https://github.com/ClickHouse/ClickHouse/pull/8728) ([Alexander Kazakov](https://github.com/Akazz)) +* Overall performance improvement (in range of 5%..200% for affected queries) by ensuring even more strict aliasing with C++20 features. [#9304](https://github.com/ClickHouse/ClickHouse/pull/9304) ([Amos Bird](https://github.com/amosbird)) +* More strict aliasing for inner loops of comparison functions. [#9327](https://github.com/ClickHouse/ClickHouse/pull/9327) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* More strict aliasing for inner loops of arithmetic functions. [#9325](https://github.com/ClickHouse/ClickHouse/pull/9325) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* A ~3 times faster implementation for ColumnVector::replicate(), via which ColumnConst::convertToFullColumn() is implemented. Also will be useful in tests when materializing constants. [#9293](https://github.com/ClickHouse/ClickHouse/pull/9293) ([Alexander Kazakov](https://github.com/Akazz)) +* Another minor performance improvement to `ColumnVector::replicate()` (this speeds up the `materialize` function and higher order functions) an even further improvement to [#9293](https://github.com/ClickHouse/ClickHouse/issues/9293) [#9442](https://github.com/ClickHouse/ClickHouse/pull/9442) ([Alexander Kazakov](https://github.com/Akazz)) +* Improved performance of `stochasticLinearRegression` aggregate function. This patch is contributed by Intel. [#8652](https://github.com/ClickHouse/ClickHouse/pull/8652) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Improve performance of `reinterpretAsFixedString` function. [#9342](https://github.com/ClickHouse/ClickHouse/pull/9342) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Do not send blocks to client for `Null` format in processors pipeline. [#8797](https://github.com/ClickHouse/ClickHouse/pull/8797) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) [#8767](https://github.com/ClickHouse/ClickHouse/pull/8767) ([Alexander Kuzmenkov](https://github.com/akuzm)) + +#### Build/Testing/Packaging Improvement +* Exception handling now works correctly on Windows Subsystem for Linux. See https://github.com/ClickHouse-Extras/libunwind/pull/3 This fixes [#6480](https://github.com/ClickHouse/ClickHouse/issues/6480) [#9564](https://github.com/ClickHouse/ClickHouse/pull/9564) ([sobolevsv](https://github.com/sobolevsv)) +* Replace `readline` with `replxx` for interactive line editing in `clickhouse-client` [#8416](https://github.com/ClickHouse/ClickHouse/pull/8416) ([Ivan](https://github.com/abyss7)) +* Better build time and less template instantiations in FunctionsComparison. [#9324](https://github.com/ClickHouse/ClickHouse/pull/9324) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Added integration with `clang-tidy` in CI. See also [#6044](https://github.com/ClickHouse/ClickHouse/issues/6044) [#9566](https://github.com/ClickHouse/ClickHouse/pull/9566) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Now we link ClickHouse in CI using `lld` even for `gcc`. [#9049](https://github.com/ClickHouse/ClickHouse/pull/9049) ([alesapin](https://github.com/alesapin)) +* Allow to randomize thread scheduling and insert glitches when `THREAD_FUZZER_*` environment variables are set. This helps testing. [#9459](https://github.com/ClickHouse/ClickHouse/pull/9459) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Enable secure sockets in stateless tests [#9288](https://github.com/ClickHouse/ClickHouse/pull/9288) ([tavplubix](https://github.com/tavplubix)) +* Make SPLIT_SHARED_LIBRARIES=OFF more robust [#9156](https://github.com/ClickHouse/ClickHouse/pull/9156) ([Azat Khuzhin](https://github.com/azat)) +* Make "performance_introspection_and_logging" test reliable to random server stuck. This may happen in CI environment. See also [#9515](https://github.com/ClickHouse/ClickHouse/issues/9515) [#9528](https://github.com/ClickHouse/ClickHouse/pull/9528) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Validate XML in style check. [#9550](https://github.com/ClickHouse/ClickHouse/pull/9550) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fixed race condition in test `00738_lock_for_inner_table`. This test relied on sleep. [#9555](https://github.com/ClickHouse/ClickHouse/pull/9555) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Remove performance tests of type `once`. This is needed to run all performance tests in statistical comparison mode (more reliable). [#9557](https://github.com/ClickHouse/ClickHouse/pull/9557) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Added performance test for arithmetic functions. [#9326](https://github.com/ClickHouse/ClickHouse/pull/9326) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Added performance test for `sumMap` and `sumMapWithOverflow` aggregate functions. Follow-up for [#8933](https://github.com/ClickHouse/ClickHouse/issues/8933) [#8947](https://github.com/ClickHouse/ClickHouse/pull/8947) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Ensure style of ErrorCodes by style check. [#9370](https://github.com/ClickHouse/ClickHouse/pull/9370) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Add script for tests history. [#8796](https://github.com/ClickHouse/ClickHouse/pull/8796) ([alesapin](https://github.com/alesapin)) +* Add GCC warning `-Wsuggest-override` to locate and fix all places where `override` keyword must be used. [#8760](https://github.com/ClickHouse/ClickHouse/pull/8760) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +* Ignore weak symbol under Mac OS X because it must be defined [#9538](https://github.com/ClickHouse/ClickHouse/pull/9538) ([Deleted user](https://github.com/ghost)) +* Normalize running time of some queries in performance tests. This is done in preparation to run all the performance tests in comparison mode. [#9565](https://github.com/ClickHouse/ClickHouse/pull/9565) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fix some tests to support pytest with query tests [#9062](https://github.com/ClickHouse/ClickHouse/pull/9062) ([Ivan](https://github.com/abyss7)) +* Enable SSL in build with MSan, so server will not fail at startup when running stateless tests [#9531](https://github.com/ClickHouse/ClickHouse/pull/9531) ([tavplubix](https://github.com/tavplubix)) +* Fix database substitution in test results [#9384](https://github.com/ClickHouse/ClickHouse/pull/9384) ([Ilya Yatsishin](https://github.com/qoega)) +* Build fixes for miscellaneous platforms [#9381](https://github.com/ClickHouse/ClickHouse/pull/9381) ([proller](https://github.com/proller)) [#8755](https://github.com/ClickHouse/ClickHouse/pull/8755) ([proller](https://github.com/proller)) [#8631](https://github.com/ClickHouse/ClickHouse/pull/8631) ([proller](https://github.com/proller)) +* Added disks section to stateless-with-coverage test docker image [#9213](https://github.com/ClickHouse/ClickHouse/pull/9213) ([Pavel Kovalenko](https://github.com/Jokser)) +* Get rid of in-source-tree files when building with GRPC [#9588](https://github.com/ClickHouse/ClickHouse/pull/9588) ([Amos Bird](https://github.com/amosbird)) +* Slightly faster build time by removing SessionCleaner from Context. Make the code of SessionCleaner more simple. [#9232](https://github.com/ClickHouse/ClickHouse/pull/9232) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Updated checking for hung queries in clickhouse-test script [#8858](https://github.com/ClickHouse/ClickHouse/pull/8858) ([Alexander Kazakov](https://github.com/Akazz)) +* Removed some useless files from repository. [#8843](https://github.com/ClickHouse/ClickHouse/pull/8843) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Changed type of math perftests from `once` to `loop`. [#8783](https://github.com/ClickHouse/ClickHouse/pull/8783) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Add docker image which allows to build interactive code browser HTML report for our codebase. [#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([alesapin](https://github.com/alesapin)) See [Woboq Code Browser](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/index.html) +* Suppress some test failures under MSan. [#8780](https://github.com/ClickHouse/ClickHouse/pull/8780) ([Alexander Kuzmenkov](https://github.com/akuzm)) +* Speedup "exception while insert" test. This test often time out in debug-with-coverage build. [#8711](https://github.com/ClickHouse/ClickHouse/pull/8711) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Updated `libcxx` and `libcxxabi` to master. In preparation to [#9304](https://github.com/ClickHouse/ClickHouse/issues/9304) [#9308](https://github.com/ClickHouse/ClickHouse/pull/9308) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fix flacky test `00910_zookeeper_test_alter_compression_codecs`. [#9525](https://github.com/ClickHouse/ClickHouse/pull/9525) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Clean up duplicated linker flags. Make sure the linker won't look up an unexpected symbol. [#9433](https://github.com/ClickHouse/ClickHouse/pull/9433) ([Amos Bird](https://github.com/amosbird)) +* Add `clickhouse-odbc` driver into test images. This allows to test interaction of ClickHouse with ClickHouse via its own ODBC driver. [#9348](https://github.com/ClickHouse/ClickHouse/pull/9348) ([filimonov](https://github.com/filimonov)) +* Fix several bugs in unit tests. [#9047](https://github.com/ClickHouse/ClickHouse/pull/9047) ([alesapin](https://github.com/alesapin)) +* Enable `-Wmissing-include-dirs` GCC warning to eliminate all non-existing includes - mostly as a result of CMake scripting errors [#8704](https://github.com/ClickHouse/ClickHouse/pull/8704) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +* Describe reasons if query profiler cannot work. This is intended for [#9049](https://github.com/ClickHouse/ClickHouse/issues/9049) [#9144](https://github.com/ClickHouse/ClickHouse/pull/9144) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Update OpenSSL to upstream master. Fixed the issue when TLS connections may fail with the message `OpenSSL SSL_read: error:14094438:SSL routines:ssl3_read_bytes:tlsv1 alert internal error` and `SSL Exception: error:2400006E:random number generator::error retrieving entropy`. The issue was present in version 20.1. [#8956](https://github.com/ClickHouse/ClickHouse/pull/8956) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Update Dockerfile for server [#8893](https://github.com/ClickHouse/ClickHouse/pull/8893) ([Ilya Mazaev](https://github.com/ne-ray)) +* Minor fixes in build-gcc-from-sources script [#8774](https://github.com/ClickHouse/ClickHouse/pull/8774) ([Michael Nacharov](https://github.com/mnach)) +* Replace `numbers` to `zeros` in perftests where `number` column is not used. This will lead to more clean test results. [#9600](https://github.com/ClickHouse/ClickHouse/pull/9600) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Fix stack overflow issue when using initializer_list in Column constructors. [#9367](https://github.com/ClickHouse/ClickHouse/pull/9367) ([Deleted user](https://github.com/ghost)) +* Upgrade librdkafka to v1.3.0. Enable bundled `rdkafka` and `gsasl` libraries on Mac OS X. [#9000](https://github.com/ClickHouse/ClickHouse/pull/9000) ([Andrew Onyshchuk](https://github.com/oandrew)) +* build fix on GCC 9.2.0 [#9306](https://github.com/ClickHouse/ClickHouse/pull/9306) ([vxider](https://github.com/Vxider)) + + ## ClickHouse release v20.1 +### ClickHouse release v20.1.10.70, 2020-04-17 + +#### Bug Fix + +* Fix rare possible exception `Cannot drain connections: cancel first`. [#10239](https://github.com/ClickHouse/ClickHouse/pull/10239) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fixed bug where ClickHouse would throw `'Unknown function lambda.'` error message when user tries to run `ALTER UPDATE/DELETE` on tables with `ENGINE = Replicated*`. Check for nondeterministic functions now handles lambda expressions correctly. [#10237](https://github.com/ClickHouse/ClickHouse/pull/10237) ([Alexander Kazakov](https://github.com/Akazz)). +* Fix `parseDateTimeBestEffort` for strings in RFC-2822 when day of week is Tuesday or Thursday. This fixes [#10082](https://github.com/ClickHouse/ClickHouse/issues/10082). [#10214](https://github.com/ClickHouse/ClickHouse/pull/10214) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Fix column names of constants inside `JOIN` that may clash with names of constants outside of `JOIN`. [#10207](https://github.com/ClickHouse/ClickHouse/pull/10207) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Fix possible inifinite query execution when the query actually should stop on LIMIT, while reading from infinite source like `system.numbers` or `system.zeros`. [#10206](https://github.com/ClickHouse/ClickHouse/pull/10206) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix move-to-prewhere optimization in presense of `arrayJoin` functions (in certain cases). This fixes [#10092](https://github.com/ClickHouse/ClickHouse/issues/10092). [#10195](https://github.com/ClickHouse/ClickHouse/pull/10195) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Add the ability to relax the restriction on non-deterministic functions usage in mutations with `allow_nondeterministic_mutations` setting. [#10186](https://github.com/ClickHouse/ClickHouse/pull/10186) ([filimonov](https://github.com/filimonov)). +* Convert blocks if structure does not match on `INSERT` into table with `Distributed` engine. [#10135](https://github.com/ClickHouse/ClickHouse/pull/10135) ([Azat Khuzhin](https://github.com/azat)). +* Fix `SIGSEGV` on `INSERT` into `Distributed` table when its structure differs from the underlying tables. [#10105](https://github.com/ClickHouse/ClickHouse/pull/10105) ([Azat Khuzhin](https://github.com/azat)). +* Fix possible rows loss for queries with `JOIN` and `UNION ALL`. Fixes [#9826](https://github.com/ClickHouse/ClickHouse/issues/9826), [#10113](https://github.com/ClickHouse/ClickHouse/issues/10113). [#10099](https://github.com/ClickHouse/ClickHouse/pull/10099) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Add arguments check and support identifier arguments for MySQL Database Engine. [#10077](https://github.com/ClickHouse/ClickHouse/pull/10077) ([Winter Zhang](https://github.com/zhang2014)). +* Fix bug in clickhouse dictionary source from localhost clickhouse server. The bug may lead to memory corruption if types in dictionary and source are not compatible. [#10071](https://github.com/ClickHouse/ClickHouse/pull/10071) ([alesapin](https://github.com/alesapin)). +* Fix error `Cannot clone block with columns because block has 0 columns ... While executing GroupingAggregatedTransform`. It happened when setting `distributed_aggregation_memory_efficient` was enabled, and distributed query read aggregating data with different level from different shards (mixed single and two level aggregation). [#10063](https://github.com/ClickHouse/ClickHouse/pull/10063) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix a segmentation fault that could occur in `GROUP BY` over string keys containing trailing zero bytes ([#8636](https://github.com/ClickHouse/ClickHouse/issues/8636), [#8925](https://github.com/ClickHouse/ClickHouse/issues/8925)). [#10025](https://github.com/ClickHouse/ClickHouse/pull/10025) ([Alexander Kuzmenkov](https://github.com/akuzm)). +* Fix bug in which the necessary tables weren't retrieved at one of the processing stages of queries to some databases. Fixes [#9699](https://github.com/ClickHouse/ClickHouse/issues/9699). [#9949](https://github.com/ClickHouse/ClickHouse/pull/9949) ([achulkov2](https://github.com/achulkov2)). +* Fix `'Not found column in block'` error when `JOIN` appears with `TOTALS`. Fixes [#9839](https://github.com/ClickHouse/ClickHouse/issues/9839). [#9939](https://github.com/ClickHouse/ClickHouse/pull/9939) ([Artem Zuikov](https://github.com/4ertus2)). +* Fix a bug with `ON CLUSTER` DDL queries freezing on server startup. [#9927](https://github.com/ClickHouse/ClickHouse/pull/9927) ([Gagan Arneja](https://github.com/garneja)). +* Fix `TRUNCATE` for Join table engine ([#9917](https://github.com/ClickHouse/ClickHouse/issues/9917)). [#9920](https://github.com/ClickHouse/ClickHouse/pull/9920) ([Amos Bird](https://github.com/amosbird)). +* Fix `'scalar doesn't exist'` error in ALTER queries ([#9878](https://github.com/ClickHouse/ClickHouse/issues/9878)). [#9904](https://github.com/ClickHouse/ClickHouse/pull/9904) ([Amos Bird](https://github.com/amosbird)). +* Fix race condition between drop and optimize in `ReplicatedMergeTree`. [#9901](https://github.com/ClickHouse/ClickHouse/pull/9901) ([alesapin](https://github.com/alesapin)). +* Fixed `DeleteOnDestroy` logic in `ATTACH PART` which could lead to automatic removal of attached part and added few tests. [#9410](https://github.com/ClickHouse/ClickHouse/pull/9410) ([Vladimir Chebotarev](https://github.com/excitoon)). + +#### Build/Testing/Packaging Improvement + +* Fix unit test `collapsing_sorted_stream`. [#9367](https://github.com/ClickHouse/ClickHouse/pull/9367) ([Deleted user](https://github.com/ghost)). + +### ClickHouse release v20.1.9.54, 2020-03-28 + +#### Bug Fix + +* Fix `'Different expressions with the same alias'` error when query has `PREWHERE` and `WHERE` on distributed table and `SET distributed_product_mode = 'local'`. [#9871](https://github.com/ClickHouse/ClickHouse/pull/9871) ([Artem Zuikov](https://github.com/4ertus2)). +* Fix mutations excessive memory consumption for tables with a composite primary key. This fixes [#9850](https://github.com/ClickHouse/ClickHouse/issues/9850). [#9860](https://github.com/ClickHouse/ClickHouse/pull/9860) ([alesapin](https://github.com/alesapin)). +* For INSERT queries shard now clamps the settings got from the initiator to the shard's constaints instead of throwing an exception. This fix allows to send `INSERT` queries to a shard with another constraints. This change improves fix [#9447](https://github.com/ClickHouse/ClickHouse/issues/9447). [#9852](https://github.com/ClickHouse/ClickHouse/pull/9852) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fix possible exception `Got 0 in totals chunk, expected 1` on client. It happened for queries with `JOIN` in case if right joined table had zero rows. Example: `select * from system.one t1 join system.one t2 on t1.dummy = t2.dummy limit 0 FORMAT TabSeparated;`. Fixes [#9777](https://github.com/ClickHouse/ClickHouse/issues/9777). [#9823](https://github.com/ClickHouse/ClickHouse/pull/9823) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix `SIGSEGV` with `optimize_skip_unused_shards` when type cannot be converted. [#9804](https://github.com/ClickHouse/ClickHouse/pull/9804) ([Azat Khuzhin](https://github.com/azat)). +* Fixed a few cases when timezone of the function argument wasn't used properly. [#9574](https://github.com/ClickHouse/ClickHouse/pull/9574) ([Vasily Nemkov](https://github.com/Enmk)). + +#### Improvement + +* Remove `ORDER BY` stage from mutations because we read from a single ordered part in a single thread. Also add check that the order of rows in mutation is ordered in sorting key order and this order is not violated. [#9886](https://github.com/ClickHouse/ClickHouse/pull/9886) ([alesapin](https://github.com/alesapin)). + +#### Build/Testing/Packaging Improvement + +* Clean up duplicated linker flags. Make sure the linker won't look up an unexpected symbol. [#9433](https://github.com/ClickHouse/ClickHouse/pull/9433) ([Amos Bird](https://github.com/amosbird)). + +### ClickHouse release v20.1.8.41, 2020-03-20 + +#### Bug Fix +* Fix possible permanent `Cannot schedule a task` error (due to unhandled exception in `ParallelAggregatingBlockInputStream::Handler::onFinish/onFinishThread`). This fixes [#6833](https://github.com/ClickHouse/ClickHouse/issues/6833). [#9154](https://github.com/ClickHouse/ClickHouse/pull/9154) ([Azat Khuzhin](https://github.com/azat)) +* Fix excessive memory consumption in `ALTER` queries (mutations). This fixes [#9533](https://github.com/ClickHouse/ClickHouse/issues/9533) and [#9670](https://github.com/ClickHouse/ClickHouse/issues/9670). [#9754](https://github.com/ClickHouse/ClickHouse/pull/9754) ([alesapin](https://github.com/alesapin)) +* Fix bug in backquoting in external dictionaries DDL. This fixes [#9619](https://github.com/ClickHouse/ClickHouse/issues/9619). [#9734](https://github.com/ClickHouse/ClickHouse/pull/9734) ([alesapin](https://github.com/alesapin)) + +### ClickHouse release v20.1.7.38, 2020-03-18 + +#### Bug Fix +* Fixed incorrect internal function names for `sumKahan` and `sumWithOverflow`. I lead to exception while using this functions in remote queries. [#9636](https://github.com/ClickHouse/ClickHouse/pull/9636) ([Azat Khuzhin](https://github.com/azat)). This issue was in all ClickHouse releases. +* Allow `ALTER ON CLUSTER` of `Distributed` tables with internal replication. This fixes [#3268](https://github.com/ClickHouse/ClickHouse/issues/3268). [#9617](https://github.com/ClickHouse/ClickHouse/pull/9617) ([shinoi2](https://github.com/shinoi2)). This issue was in all ClickHouse releases. +* Fix possible exceptions `Size of filter doesn't match size of column` and `Invalid number of rows in Chunk` in `MergeTreeRangeReader`. They could appear while executing `PREWHERE` in some cases. Fixes [#9132](https://github.com/ClickHouse/ClickHouse/issues/9132). [#9612](https://github.com/ClickHouse/ClickHouse/pull/9612) ([Anton Popov](https://github.com/CurtizJ)) +* Fixed the issue: timezone was not preserved if you write a simple arithmetic expression like `time + 1` (in contrast to an expression like `time + INTERVAL 1 SECOND`). This fixes [#5743](https://github.com/ClickHouse/ClickHouse/issues/5743). [#9323](https://github.com/ClickHouse/ClickHouse/pull/9323) ([alexey-milovidov](https://github.com/alexey-milovidov)). This issue was in all ClickHouse releases. +* Now it's not possible to create or add columns with simple cyclic aliases like `a DEFAULT b, b DEFAULT a`. [#9603](https://github.com/ClickHouse/ClickHouse/pull/9603) ([alesapin](https://github.com/alesapin)) +* Fixed the issue when padding at the end of base64 encoded value can be malformed. Update base64 library. This fixes [#9491](https://github.com/ClickHouse/ClickHouse/issues/9491), closes [#9492](https://github.com/ClickHouse/ClickHouse/issues/9492) [#9500](https://github.com/ClickHouse/ClickHouse/pull/9500) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fix data race at destruction of `Poco::HTTPServer`. It could happen when server is started and immediately shut down. [#9468](https://github.com/ClickHouse/ClickHouse/pull/9468) ([Anton Popov](https://github.com/CurtizJ)) +* Fix possible crash/wrong number of rows in `LIMIT n WITH TIES` when there are a lot of rows equal to n'th row. [#9464](https://github.com/ClickHouse/ClickHouse/pull/9464) ([tavplubix](https://github.com/tavplubix)) +* Fix possible mismatched checksums with column TTLs. [#9451](https://github.com/ClickHouse/ClickHouse/pull/9451) ([Anton Popov](https://github.com/CurtizJ)) +* Fix crash when a user tries to `ALTER MODIFY SETTING` for old-formated `MergeTree` table engines family. [#9435](https://github.com/ClickHouse/ClickHouse/pull/9435) ([alesapin](https://github.com/alesapin)) +* Now we will try finalize mutations more frequently. [#9427](https://github.com/ClickHouse/ClickHouse/pull/9427) ([alesapin](https://github.com/alesapin)) +* Fix replication protocol incompatibility introduced in [#8598](https://github.com/ClickHouse/ClickHouse/issues/8598). [#9412](https://github.com/ClickHouse/ClickHouse/pull/9412) ([alesapin](https://github.com/alesapin)) +* Fix not(has()) for the bloom_filter index of array types. [#9407](https://github.com/ClickHouse/ClickHouse/pull/9407) ([achimbab](https://github.com/achimbab)) +* Fixed the behaviour of `match` and `extract` functions when haystack has zero bytes. The behaviour was wrong when haystack was constant. This fixes [#9160](https://github.com/ClickHouse/ClickHouse/issues/9160) [#9163](https://github.com/ClickHouse/ClickHouse/pull/9163) ([alexey-milovidov](https://github.com/alexey-milovidov)) [#9345](https://github.com/ClickHouse/ClickHouse/pull/9345) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Build/Testing/Packaging Improvement + +* Exception handling now works correctly on Windows Subsystem for Linux. See https://github.com/ClickHouse-Extras/libunwind/pull/3 This fixes [#6480](https://github.com/ClickHouse/ClickHouse/issues/6480) [#9564](https://github.com/ClickHouse/ClickHouse/pull/9564) ([sobolevsv](https://github.com/sobolevsv)) + + +### ClickHouse release v20.1.6.30, 2020-03-05 + +#### Bug Fix + +* Fix data incompatibility when compressed with `T64` codec. +[#9039](https://github.com/ClickHouse/ClickHouse/pull/9039) [(abyss7)](https://github.com/abyss7) +* Fix order of ranges while reading from MergeTree table in one thread. Fixes [#8964](https://github.com/ClickHouse/ClickHouse/issues/8964). +[#9050](https://github.com/ClickHouse/ClickHouse/pull/9050) [(CurtizJ)](https://github.com/CurtizJ) +* Fix possible segfault in `MergeTreeRangeReader`, while executing `PREWHERE`. Fixes [#9064](https://github.com/ClickHouse/ClickHouse/issues/9064). +[#9106](https://github.com/ClickHouse/ClickHouse/pull/9106) [(CurtizJ)](https://github.com/CurtizJ) +* Fix `reinterpretAsFixedString` to return `FixedString` instead of `String`. +[#9052](https://github.com/ClickHouse/ClickHouse/pull/9052) [(oandrew)](https://github.com/oandrew) +* Fix `joinGet` with nullable return types. Fixes [#8919](https://github.com/ClickHouse/ClickHouse/issues/8919) +[#9014](https://github.com/ClickHouse/ClickHouse/pull/9014) [(amosbird)](https://github.com/amosbird) +* Fix fuzz test and incorrect behaviour of bitTestAll/bitTestAny functions. +[#9143](https://github.com/ClickHouse/ClickHouse/pull/9143) [(alexey-milovidov)](https://github.com/alexey-milovidov) +* Fix the behaviour of match and extract functions when haystack has zero bytes. The behaviour was wrong when haystack was constant. Fixes [#9160](https://github.com/ClickHouse/ClickHouse/issues/9160) +[#9163](https://github.com/ClickHouse/ClickHouse/pull/9163) [(alexey-milovidov)](https://github.com/alexey-milovidov) +* Fixed execution of inversed predicates when non-strictly monotinic functional index is used. Fixes [#9034](https://github.com/ClickHouse/ClickHouse/issues/9034) +[#9223](https://github.com/ClickHouse/ClickHouse/pull/9223) [(Akazz)](https://github.com/Akazz) +* Allow to rewrite `CROSS` to `INNER JOIN` if there's `[NOT] LIKE` operator in `WHERE` section. Fixes [#9191](https://github.com/ClickHouse/ClickHouse/issues/9191) +[#9229](https://github.com/ClickHouse/ClickHouse/pull/9229) [(4ertus2)](https://github.com/4ertus2) +* Allow first column(s) in a table with Log engine be an alias. +[#9231](https://github.com/ClickHouse/ClickHouse/pull/9231) [(abyss7)](https://github.com/abyss7) +* Allow comma join with `IN()` inside. Fixes [#7314](https://github.com/ClickHouse/ClickHouse/issues/7314). +[#9251](https://github.com/ClickHouse/ClickHouse/pull/9251) [(4ertus2)](https://github.com/4ertus2) +* Improve `ALTER MODIFY/ADD` queries logic. Now you cannot `ADD` column without type, `MODIFY` default expression doesn't change type of column and `MODIFY` type doesn't loose default expression value. Fixes [#8669](https://github.com/ClickHouse/ClickHouse/issues/8669). +[#9227](https://github.com/ClickHouse/ClickHouse/pull/9227) [(alesapin)](https://github.com/alesapin) +* Fix mutations finalization, when already done mutation can have status is_done=0. +[#9217](https://github.com/ClickHouse/ClickHouse/pull/9217) [(alesapin)](https://github.com/alesapin) +* Support "Processors" pipeline for system.numbers and system.numbers_mt. This also fixes the bug when `max_execution_time` is not respected. +[#7796](https://github.com/ClickHouse/ClickHouse/pull/7796) [(KochetovNicolai)](https://github.com/KochetovNicolai) +* Fix wrong counting of `DictCacheKeysRequestedFound` metric. +[#9411](https://github.com/ClickHouse/ClickHouse/pull/9411) [(nikitamikhaylov)](https://github.com/nikitamikhaylov) +* Added a check for storage policy in `ATTACH PARTITION FROM`, `REPLACE PARTITION`, `MOVE TO TABLE` which otherwise could make data of part inaccessible after restart and prevent ClickHouse to start. +[#9383](https://github.com/ClickHouse/ClickHouse/pull/9383) [(excitoon)](https://github.com/excitoon) +* Fixed UBSan report in `MergeTreeIndexSet`. This fixes [#9250](https://github.com/ClickHouse/ClickHouse/issues/9250) +[#9365](https://github.com/ClickHouse/ClickHouse/pull/9365) [(alexey-milovidov)](https://github.com/alexey-milovidov) +* Fix possible datarace in BlockIO. +[#9356](https://github.com/ClickHouse/ClickHouse/pull/9356) [(KochetovNicolai)](https://github.com/KochetovNicolai) +* Support for `UInt64` numbers that don't fit in Int64 in JSON-related functions. Update `SIMDJSON` to master. This fixes [#9209](https://github.com/ClickHouse/ClickHouse/issues/9209) +[#9344](https://github.com/ClickHouse/ClickHouse/pull/9344) [(alexey-milovidov)](https://github.com/alexey-milovidov) +* Fix the issue when the amount of free space is not calculated correctly if the data directory is mounted to a separate device. For default disk calculate the free space from data subdirectory. This fixes [#7441](https://github.com/ClickHouse/ClickHouse/issues/7441) +[#9257](https://github.com/ClickHouse/ClickHouse/pull/9257) [(millb)](https://github.com/millb) +* Fix the issue when TLS connections may fail with the message `OpenSSL SSL_read: error:14094438:SSL routines:ssl3_read_bytes:tlsv1 alert internal error and SSL Exception: error:2400006E:random number generator::error retrieving entropy.` Update OpenSSL to upstream master. +[#8956](https://github.com/ClickHouse/ClickHouse/pull/8956) [(alexey-milovidov)](https://github.com/alexey-milovidov) +* When executing `CREATE` query, fold constant expressions in storage engine arguments. Replace empty database name with current database. Fixes [#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [#3492](https://github.com/ClickHouse/ClickHouse/issues/3492). Also fix check for local address in ClickHouseDictionarySource. +[#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) [(tabplubix)](https://github.com/tavplubix) +* Fix segfault in `StorageMerge`, which can happen when reading from StorageFile. +[#9387](https://github.com/ClickHouse/ClickHouse/pull/9387) [(tabplubix)](https://github.com/tavplubix) +* Prevent losing data in `Kafka` in rare cases when exception happens after reading suffix but before commit. Fixes [#9378](https://github.com/ClickHouse/ClickHouse/issues/9378). Related: [#7175](https://github.com/ClickHouse/ClickHouse/issues/7175) +[#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) [(filimonov)](https://github.com/filimonov) +* Fix bug leading to server termination when trying to use / drop `Kafka` table created with wrong parameters. Fixes [#9494](https://github.com/ClickHouse/ClickHouse/issues/9494). Incorporates [#9507](https://github.com/ClickHouse/ClickHouse/issues/9507). +[#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) [(filimonov)](https://github.com/filimonov) + +#### New Feature +* Add `deduplicate_blocks_in_dependent_materialized_views` option to control the behaviour of idempotent inserts into tables with materialized views. This new feature was added to the bugfix release by a special request from Altinity. +[#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) [(urykhy)](https://github.com/urykhy) + ### ClickHouse release v20.1.2.4, 2020-01-22 -### Backward Incompatible Change +#### Backward Incompatible Change * Make the setting `merge_tree_uniform_read_distribution` obsolete. The server still recognizes this setting but it has no effect. [#8308](https://github.com/ClickHouse/ClickHouse/pull/8308) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Changed return type of the function `greatCircleDistance` to `Float32` because now the result of calculation is `Float32`. [#7993](https://github.com/ClickHouse/ClickHouse/pull/7993) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Now it's expected that query parameters are represented in "escaped" format. For example, to pass string `ab` you have to write `a\tb` or `a\b` and respectively, `a%5Ctb` or `a%5C%09b` in URL. This is needed to add the possibility to pass NULL as `\N`. This fixes [#7488](https://github.com/ClickHouse/ClickHouse/issues/7488). [#8517](https://github.com/ClickHouse/ClickHouse/pull/8517) ([alexey-milovidov](https://github.com/alexey-milovidov)) @@ -11,14 +500,14 @@ * Add new `ANY JOIN` logic for `StorageJoin` consistent with `JOIN` operation. To upgrade without changes in behaviour you need add `SETTINGS any_join_distinct_right_table_keys = 1` to Engine Join tables metadata or recreate these tables after upgrade. [#8400](https://github.com/ClickHouse/ClickHouse/pull/8400) ([Artem Zuikov](https://github.com/4ertus2)) * Require server to be restarted to apply the changes in logging configuration. This is a temporary workaround to avoid the bug where the server logs to a deleted log file (see [#8696](https://github.com/ClickHouse/ClickHouse/issues/8696)). [#8707](https://github.com/ClickHouse/ClickHouse/pull/8707) ([Alexander Kuzmenkov](https://github.com/akuzm)) -### New Feature +#### New Feature * Added information about part paths to `system.merges`. [#8043](https://github.com/ClickHouse/ClickHouse/pull/8043) ([Vladimir Chebotarev](https://github.com/excitoon)) * Add ability to execute `SYSTEM RELOAD DICTIONARY` query in `ON CLUSTER` mode. [#8288](https://github.com/ClickHouse/ClickHouse/pull/8288) ([Guillaume Tassery](https://github.com/YiuRULE)) * Add ability to execute `CREATE DICTIONARY` queries in `ON CLUSTER` mode. [#8163](https://github.com/ClickHouse/ClickHouse/pull/8163) ([alesapin](https://github.com/alesapin)) * Now user's profile in `users.xml` can inherit multiple profiles. [#8343](https://github.com/ClickHouse/ClickHouse/pull/8343) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) * Added `system.stack_trace` table that allows to look at stack traces of all server threads. This is useful for developers to introspect server state. This fixes [#7576](https://github.com/ClickHouse/ClickHouse/issues/7576). [#8344](https://github.com/ClickHouse/ClickHouse/pull/8344) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Add `DateTime64` datatype with configurable sub-second precision. [#7170](https://github.com/ClickHouse/ClickHouse/pull/7170) ([Vasily Nemkov](https://github.com/Enmk)) -* Add table function `clusterAllReplicas` which allows to query all the nodes in the cluster. [#8493](https://github.com/ClickHouse/ClickHouse/pull/8493) ([kiran sunkari](https://github.com/kiransunkari)) +* Add table function `clusterAllReplicas` which allows to query all the nodes in the cluster. [#8493](https://github.com/ClickHouse/ClickHouse/pull/8493) ([kiran sunkari](https://github.com/kiransunkari)) * Add aggregate function `categoricalInformationValue` which calculates the information value of a discrete feature. [#8117](https://github.com/ClickHouse/ClickHouse/pull/8117) ([hcz](https://github.com/hczhcz)) * Speed up parsing of data files in `CSV`, `TSV` and `JSONEachRow` format by doing it in parallel. [#7780](https://github.com/ClickHouse/ClickHouse/pull/7780) ([Alexander Kuzmenkov](https://github.com/akuzm)) * Add function `bankerRound` which performs banker's rounding. [#8112](https://github.com/ClickHouse/ClickHouse/pull/8112) ([hcz](https://github.com/hczhcz)) @@ -44,7 +533,7 @@ * Added support for brotli (`br`) compression in file-related storages and table functions. This fixes [#8156](https://github.com/ClickHouse/ClickHouse/issues/8156). [#8526](https://github.com/ClickHouse/ClickHouse/pull/8526) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Add `groupBit*` functions for the `SimpleAggregationFunction` type. [#8485](https://github.com/ClickHouse/ClickHouse/pull/8485) ([Guillaume Tassery](https://github.com/YiuRULE)) -### Bug Fix +#### Bug Fix * Fix rename of tables with `Distributed` engine. Fixes issue [#7868](https://github.com/ClickHouse/ClickHouse/issues/7868). [#8306](https://github.com/ClickHouse/ClickHouse/pull/8306) ([tavplubix](https://github.com/tavplubix)) * Now dictionaries support `EXPRESSION` for attributes in arbitrary string in non-ClickHouse SQL dialect. [#8098](https://github.com/ClickHouse/ClickHouse/pull/8098) ([alesapin](https://github.com/alesapin)) * Fix broken `INSERT SELECT FROM mysql(...)` query. This fixes [#8070](https://github.com/ClickHouse/ClickHouse/issues/8070) and [#7960](https://github.com/ClickHouse/ClickHouse/issues/7960). [#8234](https://github.com/ClickHouse/ClickHouse/pull/8234) ([tavplubix](https://github.com/tavplubix)) @@ -145,7 +634,7 @@ * Fix dictionary reload if it has `invalidate_query`, which stopped updates and some exception on previous update tries. [#8029](https://github.com/ClickHouse/ClickHouse/pull/8029) ([alesapin](https://github.com/alesapin)) * Fixed error in function `arrayReduce` that may lead to "double free" and error in aggregate function combinator `Resample` that may lead to memory leak. Added aggregate function `aggThrow`. This function can be used for testing purposes. [#8446](https://github.com/ClickHouse/ClickHouse/pull/8446) ([alexey-milovidov](https://github.com/alexey-milovidov)) -### Improvement +#### Improvement * Improved logging when working with `S3` table engine. [#8251](https://github.com/ClickHouse/ClickHouse/pull/8251) ([Grigory Pervakov](https://github.com/GrigoryPervakov)) * Printed help message when no arguments are passed when calling `clickhouse-local`. This fixes [#5335](https://github.com/ClickHouse/ClickHouse/issues/5335). [#8230](https://github.com/ClickHouse/ClickHouse/pull/8230) ([Andrey Nagorny](https://github.com/Melancholic)) * Add setting `mutations_sync` which allows to wait `ALTER UPDATE/DELETE` queries synchronously. [#8237](https://github.com/ClickHouse/ClickHouse/pull/8237) ([alesapin](https://github.com/alesapin)) @@ -193,7 +682,7 @@ * The settings `mark_cache_min_lifetime` is now obsolete and does nothing. In previous versions, mark cache can grow in memory larger than `mark_cache_size` to accomodate data within `mark_cache_min_lifetime` seconds. That was leading to confusion and higher memory usage than expected, that is especially bad on memory constrained systems. If you will see performance degradation after installing this release, you should increase the `mark_cache_size`. [#8484](https://github.com/ClickHouse/ClickHouse/pull/8484) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Preparation to use `tid` everywhere. This is needed for [#7477](https://github.com/ClickHouse/ClickHouse/issues/7477). [#8276](https://github.com/ClickHouse/ClickHouse/pull/8276) ([alexey-milovidov](https://github.com/alexey-milovidov)) -### Performance Improvement +#### Performance Improvement * Performance optimizations in processors pipeline. [#7988](https://github.com/ClickHouse/ClickHouse/pull/7988) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) * Non-blocking updates of expired keys in cache dictionaries (with permission to read old ones). [#8303](https://github.com/ClickHouse/ClickHouse/pull/8303) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) * Compile ClickHouse without `-fno-omit-frame-pointer` globally to spare one more register. [#8097](https://github.com/ClickHouse/ClickHouse/pull/8097) ([Amos Bird](https://github.com/amosbird)) @@ -218,7 +707,7 @@ * Parallel parsing data formats [#6553](https://github.com/ClickHouse/ClickHouse/pull/6553) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) * Enable optimized parser of `Values` with expressions by default (`input_format_values_deduce_templates_of_expressions=1`). [#8231](https://github.com/ClickHouse/ClickHouse/pull/8231) ([tavplubix](https://github.com/tavplubix)) -### Build/Testing/Packaging Improvement +#### Build/Testing/Packaging Improvement * Build fixes for `ARM` and in minimal mode. [#8304](https://github.com/ClickHouse/ClickHouse/pull/8304) ([proller](https://github.com/proller)) * Add coverage file flush for `clickhouse-server` when std::atexit is not called. Also slightly improved logging in stateless tests with coverage. [#8267](https://github.com/ClickHouse/ClickHouse/pull/8267) ([alesapin](https://github.com/alesapin)) * Update LLVM library in contrib. Avoid using LLVM from OS packages. [#8258](https://github.com/ClickHouse/ClickHouse/pull/8258) ([alexey-milovidov](https://github.com/alexey-milovidov)) @@ -288,3246 +777,13 @@ * Update contrib/protobuf. [#8256](https://github.com/ClickHouse/ClickHouse/pull/8256) ([Matwey V. Kornilov](https://github.com/matwey)) * In preparation of switching to c++20 as a new year celebration. "May the C++ force be with ClickHouse." [#8447](https://github.com/ClickHouse/ClickHouse/pull/8447) ([Amos Bird](https://github.com/amosbird)) -### Experimental Feature +#### Experimental Feature * Added experimental setting `min_bytes_to_use_mmap_io`. It allows to read big files without copying data from kernel to userspace. The setting is disabled by default. Recommended threshold is about 64 MB, because mmap/munmap is slow. [#8520](https://github.com/ClickHouse/ClickHouse/pull/8520) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Reworked quotas as a part of access control system. Added new table `system.quotas`, new functions `currentQuota`, `currentQuotaKey`, new SQL syntax `CREATE QUOTA`, `ALTER QUOTA`, `DROP QUOTA`, `SHOW QUOTA`. [#7257](https://github.com/ClickHouse/ClickHouse/pull/7257) ([Vitaly Baranov](https://github.com/vitlibar)) * Allow skipping unknown settings with warnings instead of throwing exceptions. [#7653](https://github.com/ClickHouse/ClickHouse/pull/7653) ([Vitaly Baranov](https://github.com/vitlibar)) * Reworked row policies as a part of access control system. Added new table `system.row_policies`, new function `currentRowPolicies()`, new SQL syntax `CREATE POLICY`, `ALTER POLICY`, `DROP POLICY`, `SHOW CREATE POLICY`, `SHOW POLICIES`. [#7808](https://github.com/ClickHouse/ClickHouse/pull/7808) ([Vitaly Baranov](https://github.com/vitlibar)) -### Security Fix +#### Security Fix * Fixed the possibility of reading directories structure in tables with `File` table engine. This fixes [#8536](https://github.com/ClickHouse/ClickHouse/issues/8536). [#8537](https://github.com/ClickHouse/ClickHouse/pull/8537) ([alexey-milovidov](https://github.com/alexey-milovidov)) -## ClickHouse release v19.17 - -### ClickHouse release v19.17.6.36, 2019-12-27 - -#### Bug Fix -* Fixed potential buffer overflow in decompress. Malicious user can pass fabricated compressed data that could cause read after buffer. This issue was found by Eldar Zaitov from Yandex information security team. [#8404](https://github.com/ClickHouse/ClickHouse/pull/8404) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed possible server crash (`std::terminate`) when the server cannot send or write data in JSON or XML format with values of String data type (that require UTF-8 validation) or when compressing result data with Brotli algorithm or in some other rare cases. [#8384](https://github.com/ClickHouse/ClickHouse/pull/8384) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed dictionaries with source from a clickhouse `VIEW`, now reading such dictionaries doesn't cause the error `There is no query`. [#8351](https://github.com/ClickHouse/ClickHouse/pull/8351) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Fixed checking if a client host is allowed by host_regexp specified in users.xml. [#8241](https://github.com/ClickHouse/ClickHouse/pull/8241), [#8342](https://github.com/ClickHouse/ClickHouse/pull/8342) ([Vitaly Baranov](https://github.com/vitlibar)) -* `RENAME TABLE` for a distributed table now renames the folder containing inserted data before sending to shards. This fixes an issue with successive renames `tableA->tableB`, `tableC->tableA`. [#8306](https://github.com/ClickHouse/ClickHouse/pull/8306) ([tavplubix](https://github.com/tavplubix)) -* `range_hashed` external dictionaries created by DDL queries now allow ranges of arbitrary numeric types. [#8275](https://github.com/ClickHouse/ClickHouse/pull/8275) ([alesapin](https://github.com/alesapin)) -* Fixed `INSERT INTO table SELECT ... FROM mysql(...)` table function. [#8234](https://github.com/ClickHouse/ClickHouse/pull/8234) ([tavplubix](https://github.com/tavplubix)) -* Fixed segfault in `INSERT INTO TABLE FUNCTION file()` while inserting into a file which doesn't exist. Now in this case file would be created and then insert would be processed. [#8177](https://github.com/ClickHouse/ClickHouse/pull/8177) ([Olga Khvostikova](https://github.com/stavrolia)) -* Fixed bitmapAnd error when intersecting an aggregated bitmap and a scalar bitmap. [#8082](https://github.com/ClickHouse/ClickHouse/pull/8082) ([Yue Huang](https://github.com/moon03432)) -* Fixed segfault when `EXISTS` query was used without `TABLE` or `DICTIONARY` qualifier, just like `EXISTS t`. [#8213](https://github.com/ClickHouse/ClickHouse/pull/8213) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed return type for functions `rand` and `randConstant` in case of nullable argument. Now functions always return `UInt32` and never `Nullable(UInt32)`. [#8204](https://github.com/ClickHouse/ClickHouse/pull/8204) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Fixed `DROP DICTIONARY IF EXISTS db.dict`, now it doesn't throw exception if `db` doesn't exist. [#8185](https://github.com/ClickHouse/ClickHouse/pull/8185) ([Vitaly Baranov](https://github.com/vitlibar)) -* If a table wasn't completely dropped because of server crash, the server will try to restore and load it [#8176](https://github.com/ClickHouse/ClickHouse/pull/8176) ([tavplubix](https://github.com/tavplubix)) -* Fixed a trivial count query for a distributed table if there are more than two shard local table. [#8164](https://github.com/ClickHouse/ClickHouse/pull/8164) ([小路](https://github.com/nicelulu)) -* Fixed bug that lead to a data race in DB::BlockStreamProfileInfo::calculateRowsBeforeLimit() [#8143](https://github.com/ClickHouse/ClickHouse/pull/8143) ([Alexander Kazakov](https://github.com/Akazz)) -* Fixed `ALTER table MOVE part` executed immediately after merging the specified part, which could cause moving a part which the specified part merged into. Now it correctly moves the specified part. [#8104](https://github.com/ClickHouse/ClickHouse/pull/8104) ([Vladimir Chebotarev](https://github.com/excitoon)) -* Expressions for dictionaries can be specified as strings now. This is useful for calculation of attributes while extracting data from non-ClickHouse sources because it allows to use non-ClickHouse syntax for those expressions. [#8098](https://github.com/ClickHouse/ClickHouse/pull/8098) ([alesapin](https://github.com/alesapin)) -* Fixed a very rare race in `clickhouse-copier` because of an overflow in ZXid. [#8088](https://github.com/ClickHouse/ClickHouse/pull/8088) ([Ding Xiang Fei](https://github.com/dingxiangfei2009)) -* Fixed the bug when after the query failed (due to "Too many simultaneous queries" for example) it would not read external tables info, and the -next request would interpret this info as the beginning of the next query causing an error like `Unknown packet from client`. [#8084](https://github.com/ClickHouse/ClickHouse/pull/8084) ([Azat Khuzhin](https://github.com/azat)) -* Avoid null dereference after "Unknown packet X from server" [#8071](https://github.com/ClickHouse/ClickHouse/pull/8071) ([Azat Khuzhin](https://github.com/azat)) -* Restore support of all ICU locales, add the ability to apply collations for constant expressions and add language name to system.collations table. [#8051](https://github.com/ClickHouse/ClickHouse/pull/8051) ([alesapin](https://github.com/alesapin)) -* Number of streams for read from `StorageFile` and `StorageHDFS` is now limited, to avoid exceeding the memory limit. [#7981](https://github.com/ClickHouse/ClickHouse/pull/7981) ([alesapin](https://github.com/alesapin)) -* Fixed `CHECK TABLE` query for `*MergeTree` tables without key. [#7979](https://github.com/ClickHouse/ClickHouse/pull/7979) ([alesapin](https://github.com/alesapin)) -* Removed the mutation number from a part name in case there were no mutations. This removing improved the compatibility with older versions. [#8250](https://github.com/ClickHouse/ClickHouse/pull/8250) ([alesapin](https://github.com/alesapin)) -* Fixed the bug that mutations are skipped for some attached parts due to their data_version are larger than the table mutation version. [#7812](https://github.com/ClickHouse/ClickHouse/pull/7812) ([Zhichang Yu](https://github.com/yuzhichang)) -* Allow starting the server with redundant copies of parts after moving them to another device. [#7810](https://github.com/ClickHouse/ClickHouse/pull/7810) ([Vladimir Chebotarev](https://github.com/excitoon)) -* Fixed the error "Sizes of columns doesn't match" that might appear when using aggregate function columns. [#7790](https://github.com/ClickHouse/ClickHouse/pull/7790) ([Boris Granveaud](https://github.com/bgranvea)) -* Now an exception will be thrown in case of using WITH TIES alongside LIMIT BY. And now it's possible to use TOP with LIMIT BY. [#7637](https://github.com/ClickHouse/ClickHouse/pull/7637) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) -* Fix dictionary reload if it has `invalidate_query`, which stopped updates and some exception on previous update tries. [#8029](https://github.com/ClickHouse/ClickHouse/pull/8029) ([alesapin](https://github.com/alesapin)) - -### ClickHouse release v19.17.4.11, 2019-11-22 - -#### Backward Incompatible Change -* Using column instead of AST to store scalar subquery results for better performance. Setting `enable_scalar_subquery_optimization` was added in 19.17 and it was enabled by default. It leads to errors like [this](https://github.com/ClickHouse/ClickHouse/issues/7851) during upgrade to 19.17.2 or 19.17.3 from previous versions. This setting was disabled by default in 19.17.4, to make possible upgrading from 19.16 and older versions without errors. [#7392](https://github.com/ClickHouse/ClickHouse/pull/7392) ([Amos Bird](https://github.com/amosbird)) - -#### New Feature -* Add the ability to create dictionaries with DDL queries. [#7360](https://github.com/ClickHouse/ClickHouse/pull/7360) ([alesapin](https://github.com/alesapin)) -* Make `bloom_filter` type of index supporting `LowCardinality` and `Nullable` [#7363](https://github.com/ClickHouse/ClickHouse/issues/7363) [#7561](https://github.com/ClickHouse/ClickHouse/pull/7561) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Add function `isValidJSON` to check that passed string is a valid json. [#5910](https://github.com/ClickHouse/ClickHouse/issues/5910) [#7293](https://github.com/ClickHouse/ClickHouse/pull/7293) ([Vdimir](https://github.com/Vdimir)) -* Implement `arrayCompact` function [#7328](https://github.com/ClickHouse/ClickHouse/pull/7328) ([Memo](https://github.com/Joeywzr)) -* Created function `hex` for Decimal numbers. It works like `hex(reinterpretAsString())`, but doesn't delete last zero bytes. [#7355](https://github.com/ClickHouse/ClickHouse/pull/7355) ([Mikhail Korotov](https://github.com/millb)) -* Add `arrayFill` and `arrayReverseFill` functions, which replace elements by other elements in front/back of them in the array. [#7380](https://github.com/ClickHouse/ClickHouse/pull/7380) ([hcz](https://github.com/hczhcz)) -* Add `CRC32IEEE()`/`CRC64()` support [#7480](https://github.com/ClickHouse/ClickHouse/pull/7480) ([Azat Khuzhin](https://github.com/azat)) -* Implement `char` function similar to one in [mysql](https://dev.mysql.com/doc/refman/8.0/en/string-functions.html#function_char) [#7486](https://github.com/ClickHouse/ClickHouse/pull/7486) ([sundyli](https://github.com/sundy-li)) -* Add `bitmapTransform` function. It transforms an array of values in a bitmap to another array of values, the result is a new bitmap [#7598](https://github.com/ClickHouse/ClickHouse/pull/7598) ([Zhichang Yu](https://github.com/yuzhichang)) -* Implemented `javaHashUTF16LE()` function [#7651](https://github.com/ClickHouse/ClickHouse/pull/7651) ([achimbab](https://github.com/achimbab)) -* Add `_shard_num` virtual column for the Distributed engine [#7624](https://github.com/ClickHouse/ClickHouse/pull/7624) ([Azat Khuzhin](https://github.com/azat)) - -#### Experimental Feature -* Support for processors (new query execution pipeline) in `MergeTree`. [#7181](https://github.com/ClickHouse/ClickHouse/pull/7181) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - -#### Bug Fix -* Fix incorrect float parsing in `Values` [#7817](https://github.com/ClickHouse/ClickHouse/issues/7817) [#7870](https://github.com/ClickHouse/ClickHouse/pull/7870) ([tavplubix](https://github.com/tavplubix)) -* Fix rare deadlock which can happen when trace_log is enabled. [#7838](https://github.com/ClickHouse/ClickHouse/pull/7838) ([filimonov](https://github.com/filimonov)) -* Prevent message duplication when producing Kafka table has any MVs selecting from it [#7265](https://github.com/ClickHouse/ClickHouse/pull/7265) ([Ivan](https://github.com/abyss7)) -* Support for `Array(LowCardinality(Nullable(String)))` in `IN`. Resolves [#7364](https://github.com/ClickHouse/ClickHouse/issues/7364) [#7366](https://github.com/ClickHouse/ClickHouse/pull/7366) ([achimbab](https://github.com/achimbab)) -* Add handling of `SQL_TINYINT` and `SQL_BIGINT`, and fix handling of `SQL_FLOAT` data source types in ODBC Bridge. [#7491](https://github.com/ClickHouse/ClickHouse/pull/7491) ([Denis Glazachev](https://github.com/traceon)) -* Fix aggregation (`avg` and quantiles) over empty decimal columns [#7431](https://github.com/ClickHouse/ClickHouse/pull/7431) ([Andrey Konyaev](https://github.com/akonyaev90)) -* Fix `INSERT` into Distributed with `MATERIALIZED` columns [#7377](https://github.com/ClickHouse/ClickHouse/pull/7377) ([Azat Khuzhin](https://github.com/azat)) -* Make `MOVE PARTITION` work if some parts of partition are already on destination disk or volume [#7434](https://github.com/ClickHouse/ClickHouse/pull/7434) ([Vladimir Chebotarev](https://github.com/excitoon)) -* Fixed bug with hardlinks failing to be created during mutations in `ReplicatedMergeTree` in multi-disk configurations. [#7558](https://github.com/ClickHouse/ClickHouse/pull/7558) ([Vladimir Chebotarev](https://github.com/excitoon)) -* Fixed a bug with a mutation on a MergeTree when whole part remains unchanged and best space is being found on another disk [#7602](https://github.com/ClickHouse/ClickHouse/pull/7602) ([Vladimir Chebotarev](https://github.com/excitoon)) -* Fixed bug with `keep_free_space_ratio` not being read from disks configuration [#7645](https://github.com/ClickHouse/ClickHouse/pull/7645) ([Vladimir Chebotarev](https://github.com/excitoon)) -* Fix bug with table contains only `Tuple` columns or columns with complex paths. Fixes [7541](https://github.com/ClickHouse/ClickHouse/issues/7541). [#7545](https://github.com/ClickHouse/ClickHouse/pull/7545) ([alesapin](https://github.com/alesapin)) -* Do not account memory for Buffer engine in max_memory_usage limit [#7552](https://github.com/ClickHouse/ClickHouse/pull/7552) ([Azat Khuzhin](https://github.com/azat)) -* Fix final mark usage in `MergeTree` tables ordered by `tuple()`. In rare cases it could lead to `Can't adjust last granule` error while select. [#7639](https://github.com/ClickHouse/ClickHouse/pull/7639) ([Anton Popov](https://github.com/CurtizJ)) -* Fix bug in mutations that have predicate with actions that require context (for example functions for json), which may lead to crashes or strange exceptions. [#7664](https://github.com/ClickHouse/ClickHouse/pull/7664) ([alesapin](https://github.com/alesapin)) -* Fix mismatch of database and table names escaping in `data/` and `shadow/` directories [#7575](https://github.com/ClickHouse/ClickHouse/pull/7575) ([Alexander Burmak](https://github.com/Alex-Burmak)) -* Support duplicated keys in RIGHT|FULL JOINs, e.g. ```ON t.x = u.x AND t.x = u.y```. Fix crash in this case. [#7586](https://github.com/ClickHouse/ClickHouse/pull/7586) ([Artem Zuikov](https://github.com/4ertus2)) -* Fix `Not found column in block` when joining on expression with RIGHT or FULL JOIN. [#7641](https://github.com/ClickHouse/ClickHouse/pull/7641) ([Artem Zuikov](https://github.com/4ertus2)) -* One more attempt to fix infinite loop in `PrettySpace` format [#7591](https://github.com/ClickHouse/ClickHouse/pull/7591) ([Olga Khvostikova](https://github.com/stavrolia)) -* Fix bug in `concat` function when all arguments were `FixedString` of the same size. [#7635](https://github.com/ClickHouse/ClickHouse/pull/7635) ([alesapin](https://github.com/alesapin)) -* Fixed exception in case of using 1 argument while defining S3, URL and HDFS storages. [#7618](https://github.com/ClickHouse/ClickHouse/pull/7618) ([Vladimir Chebotarev](https://github.com/excitoon)) -* Fix scope of the InterpreterSelectQuery for views with query [#7601](https://github.com/ClickHouse/ClickHouse/pull/7601) ([Azat Khuzhin](https://github.com/azat)) - -#### Improvement -* `Nullable` columns recognized and NULL-values handled correctly by ODBC-bridge [#7402](https://github.com/ClickHouse/ClickHouse/pull/7402) ([Vasily Nemkov](https://github.com/Enmk)) -* Write current batch for distributed send atomically [#7600](https://github.com/ClickHouse/ClickHouse/pull/7600) ([Azat Khuzhin](https://github.com/azat)) -* Throw an exception if we cannot detect table for column name in query. [#7358](https://github.com/ClickHouse/ClickHouse/pull/7358) ([Artem Zuikov](https://github.com/4ertus2)) -* Add `merge_max_block_size` setting to `MergeTreeSettings` [#7412](https://github.com/ClickHouse/ClickHouse/pull/7412) ([Artem Zuikov](https://github.com/4ertus2)) -* Queries with `HAVING` and without `GROUP BY` assume group by constant. So, `SELECT 1 HAVING 1` now returns a result. [#7496](https://github.com/ClickHouse/ClickHouse/pull/7496) ([Amos Bird](https://github.com/amosbird)) -* Support parsing `(X,)` as tuple similar to python. [#7501](https://github.com/ClickHouse/ClickHouse/pull/7501), [#7562](https://github.com/ClickHouse/ClickHouse/pull/7562) ([Amos Bird](https://github.com/amosbird)) -* Make `range` function behaviors almost like pythonic one. [#7518](https://github.com/ClickHouse/ClickHouse/pull/7518) ([sundyli](https://github.com/sundy-li)) -* Add `constraints` columns to table `system.settings` [#7553](https://github.com/ClickHouse/ClickHouse/pull/7553) ([Vitaly Baranov](https://github.com/vitlibar)) -* Better Null format for tcp handler, so that it's possible to use `select ignore() from table format Null` for perf measure via clickhouse-client [#7606](https://github.com/ClickHouse/ClickHouse/pull/7606) ([Amos Bird](https://github.com/amosbird)) -* Queries like `CREATE TABLE ... AS (SELECT (1, 2))` are parsed correctly [#7542](https://github.com/ClickHouse/ClickHouse/pull/7542) ([hcz](https://github.com/hczhcz)) - -#### Performance Improvement -* The performance of aggregation over short string keys is improved. [#6243](https://github.com/ClickHouse/ClickHouse/pull/6243) ([Alexander Kuzmenkov](https://github.com/akuzm), [Amos Bird](https://github.com/amosbird)) -* Run another pass of syntax/expression analysis to get potential optimizations after constant predicates are folded. [#7497](https://github.com/ClickHouse/ClickHouse/pull/7497) ([Amos Bird](https://github.com/amosbird)) -* Use storage meta info to evaluate trivial `SELECT count() FROM table;` [#7510](https://github.com/ClickHouse/ClickHouse/pull/7510) ([Amos Bird](https://github.com/amosbird), [alexey-milovidov](https://github.com/alexey-milovidov)) -* Vectorize processing `arrayReduce` similar to Aggregator `addBatch`. [#7608](https://github.com/ClickHouse/ClickHouse/pull/7608) ([Amos Bird](https://github.com/amosbird)) -* Minor improvements in performance of `Kafka` consumption [#7475](https://github.com/ClickHouse/ClickHouse/pull/7475) ([Ivan](https://github.com/abyss7)) - -#### Build/Testing/Packaging Improvement -* Add support for cross-compiling to the CPU architecture AARCH64. Refactor packager script. [#7370](https://github.com/ClickHouse/ClickHouse/pull/7370) [#7539](https://github.com/ClickHouse/ClickHouse/pull/7539) ([Ivan](https://github.com/abyss7)) -* Unpack darwin-x86_64 and linux-aarch64 toolchains into mounted Docker volume when building packages [#7534](https://github.com/ClickHouse/ClickHouse/pull/7534) ([Ivan](https://github.com/abyss7)) -* Update Docker Image for Binary Packager [#7474](https://github.com/ClickHouse/ClickHouse/pull/7474) ([Ivan](https://github.com/abyss7)) -* Fixed compile errors on MacOS Catalina [#7585](https://github.com/ClickHouse/ClickHouse/pull/7585) ([Ernest Poletaev](https://github.com/ernestp)) -* Some refactoring in query analysis logic: split complex class into several simple ones. [#7454](https://github.com/ClickHouse/ClickHouse/pull/7454) ([Artem Zuikov](https://github.com/4ertus2)) -* Fix build without submodules [#7295](https://github.com/ClickHouse/ClickHouse/pull/7295) ([proller](https://github.com/proller)) -* Better `add_globs` in CMake files [#7418](https://github.com/ClickHouse/ClickHouse/pull/7418) ([Amos Bird](https://github.com/amosbird)) -* Remove hardcoded paths in `unwind` target [#7460](https://github.com/ClickHouse/ClickHouse/pull/7460) ([Konstantin Podshumok](https://github.com/podshumok)) -* Allow to use mysql format without ssl [#7524](https://github.com/ClickHouse/ClickHouse/pull/7524) ([proller](https://github.com/proller)) - -#### Other -* Added ANTLR4 grammar for ClickHouse SQL dialect [#7595](https://github.com/ClickHouse/ClickHouse/issues/7595) [#7596](https://github.com/ClickHouse/ClickHouse/pull/7596) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -## ClickHouse release v19.16 - -### ClickHouse release v19.16.2.2, 2019-10-30 - -#### Backward Incompatible Change -* Add missing arity validation for count/counIf. - [#7095](https://github.com/ClickHouse/ClickHouse/issues/7095) -[#7298](https://github.com/ClickHouse/ClickHouse/pull/7298) ([Vdimir](https://github.com/Vdimir)) -* Remove legacy `asterisk_left_columns_only` setting (it was disabled by default). - [#7335](https://github.com/ClickHouse/ClickHouse/pull/7335) ([Artem -Zuikov](https://github.com/4ertus2)) -* Format strings for Template data format are now specified in files. - [#7118](https://github.com/ClickHouse/ClickHouse/pull/7118) -([tavplubix](https://github.com/tavplubix)) - -#### New Feature -* Introduce uniqCombined64() to calculate cardinality greater than UINT_MAX. - [#7213](https://github.com/ClickHouse/ClickHouse/pull/7213), -[#7222](https://github.com/ClickHouse/ClickHouse/pull/7222) ([Azat -Khuzhin](https://github.com/azat)) -* Support Bloom filter indexes on Array columns. - [#6984](https://github.com/ClickHouse/ClickHouse/pull/6984) -([achimbab](https://github.com/achimbab)) -* Add a function `getMacro(name)` that returns String with the value of corresponding `` - from server configuration. [#7240](https://github.com/ClickHouse/ClickHouse/pull/7240) -([alexey-milovidov](https://github.com/alexey-milovidov)) -* Set two configuration options for a dictionary based on an HTTP source: `credentials` and - `http-headers`. [#7092](https://github.com/ClickHouse/ClickHouse/pull/7092) ([Guillaume -Tassery](https://github.com/YiuRULE)) -* Add a new ProfileEvent `Merge` that counts the number of launched background merges. - [#7093](https://github.com/ClickHouse/ClickHouse/pull/7093) ([Mikhail -Korotov](https://github.com/millb)) -* Add fullHostName function that returns a fully qualified domain name. - [#7263](https://github.com/ClickHouse/ClickHouse/issues/7263) -[#7291](https://github.com/ClickHouse/ClickHouse/pull/7291) ([sundyli](https://github.com/sundy-li)) -* Add function `arraySplit` and `arrayReverseSplit` which split an array by "cut off" - conditions. They are useful in time sequence handling. -[#7294](https://github.com/ClickHouse/ClickHouse/pull/7294) ([hcz](https://github.com/hczhcz)) -* Add new functions that return the Array of all matched indices in multiMatch family of functions. - [#7299](https://github.com/ClickHouse/ClickHouse/pull/7299) ([Danila -Kutenin](https://github.com/danlark1)) -* Add a new database engine `Lazy` that is optimized for storing a large number of small -Log - tables. [#7171](https://github.com/ClickHouse/ClickHouse/pull/7171) ([Nikita -Vasilev](https://github.com/nikvas0)) -* Add aggregate functions groupBitmapAnd, -Or, -Xor for bitmap columns. [#7109](https://github.com/ClickHouse/ClickHouse/pull/7109) ([Zhichang -Yu](https://github.com/yuzhichang)) -* Add aggregate function combinators -OrNull and -OrDefault, which return null - or default values when there is nothing to aggregate. -[#7331](https://github.com/ClickHouse/ClickHouse/pull/7331) -([hcz](https://github.com/hczhcz)) -* Introduce CustomSeparated data format that supports custom escaping and - delimiter rules. [#7118](https://github.com/ClickHouse/ClickHouse/pull/7118) -([tavplubix](https://github.com/tavplubix)) -* Support Redis as source of external dictionary. [#4361](https://github.com/ClickHouse/ClickHouse/pull/4361) [#6962](https://github.com/ClickHouse/ClickHouse/pull/6962) ([comunodi](https://github.com/comunodi), [Anton -Popov](https://github.com/CurtizJ)) - -#### Bug Fix -* Fix wrong query result if it has `WHERE IN (SELECT ...)` section and `optimize_read_in_order` is - used. [#7371](https://github.com/ClickHouse/ClickHouse/pull/7371) ([Anton -Popov](https://github.com/CurtizJ)) -* Disabled MariaDB authentication plugin, which depends on files outside of project. - [#7140](https://github.com/ClickHouse/ClickHouse/pull/7140) ([Yuriy -Baranov](https://github.com/yurriy)) -* Fix exception `Cannot convert column ... because it is constant but values of constants are - different in source and result` which could rarely happen when functions `now()`, `today()`, -`yesterday()`, `randConstant()` are used. -[#7156](https://github.com/ClickHouse/ClickHouse/pull/7156) ([Nikolai -Kochetov](https://github.com/KochetovNicolai)) -* Fixed issue of using HTTP keep alive timeout instead of TCP keep alive timeout. - [#7351](https://github.com/ClickHouse/ClickHouse/pull/7351) ([Vasily -Nemkov](https://github.com/Enmk)) -* Fixed a segmentation fault in groupBitmapOr (issue [#7109](https://github.com/ClickHouse/ClickHouse/issues/7109)). - [#7289](https://github.com/ClickHouse/ClickHouse/pull/7289) ([Zhichang -Yu](https://github.com/yuzhichang)) -* For materialized views the commit for Kafka is called after all data were written. - [#7175](https://github.com/ClickHouse/ClickHouse/pull/7175) ([Ivan](https://github.com/abyss7)) -* Fixed wrong `duration_ms` value in `system.part_log` table. It was ten times off. - [#7172](https://github.com/ClickHouse/ClickHouse/pull/7172) ([Vladimir -Chebotarev](https://github.com/excitoon)) -* A quick fix to resolve crash in LIVE VIEW table and re-enabling all LIVE VIEW tests. - [#7201](https://github.com/ClickHouse/ClickHouse/pull/7201) -([vzakaznikov](https://github.com/vzakaznikov)) -* Serialize NULL values correctly in min/max indexes of MergeTree parts. - [#7234](https://github.com/ClickHouse/ClickHouse/pull/7234) ([Alexander -Kuzmenkov](https://github.com/akuzm)) -* Don't put virtual columns to .sql metadata when table is created as `CREATE TABLE AS`. - [#7183](https://github.com/ClickHouse/ClickHouse/pull/7183) ([Ivan](https://github.com/abyss7)) -* Fix segmentation fault in `ATTACH PART` query. - [#7185](https://github.com/ClickHouse/ClickHouse/pull/7185) -([alesapin](https://github.com/alesapin)) -* Fix wrong result for some queries given by the optimization of empty IN subqueries and empty - INNER/RIGHT JOIN. [#7284](https://github.com/ClickHouse/ClickHouse/pull/7284) ([Nikolai -Kochetov](https://github.com/KochetovNicolai)) -* Fixing AddressSanitizer error in the LIVE VIEW getHeader() method. - [#7271](https://github.com/ClickHouse/ClickHouse/pull/7271) -([vzakaznikov](https://github.com/vzakaznikov)) - -#### Improvement -* Add a message in case of queue_wait_max_ms wait takes place. - [#7390](https://github.com/ClickHouse/ClickHouse/pull/7390) ([Azat -Khuzhin](https://github.com/azat)) -* Made setting `s3_min_upload_part_size` table-level. - [#7059](https://github.com/ClickHouse/ClickHouse/pull/7059) ([Vladimir -Chebotarev](https://github.com/excitoon)) -* Check TTL in StorageFactory. [#7304](https://github.com/ClickHouse/ClickHouse/pull/7304) - ([sundyli](https://github.com/sundy-li)) -* Squash left-hand blocks in partial merge join (optimization). - [#7122](https://github.com/ClickHouse/ClickHouse/pull/7122) ([Artem -Zuikov](https://github.com/4ertus2)) -* Do not allow non-deterministic functions in mutations of Replicated table engines, because this - can introduce inconsistencies between replicas. -[#7247](https://github.com/ClickHouse/ClickHouse/pull/7247) ([Alexander -Kazakov](https://github.com/Akazz)) -* Disable memory tracker while converting exception stack trace to string. It can prevent the loss - of error messages of type `Memory limit exceeded` on server, which caused the `Attempt to read -after eof` exception on client. [#7264](https://github.com/ClickHouse/ClickHouse/pull/7264) -([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Miscellaneous format improvements. Resolves - [#6033](https://github.com/ClickHouse/ClickHouse/issues/6033), -[#2633](https://github.com/ClickHouse/ClickHouse/issues/2633), -[#6611](https://github.com/ClickHouse/ClickHouse/issues/6611), -[#6742](https://github.com/ClickHouse/ClickHouse/issues/6742) -[#7215](https://github.com/ClickHouse/ClickHouse/pull/7215) -([tavplubix](https://github.com/tavplubix)) -* ClickHouse ignores values on the right side of IN operator that are not convertible to the left - side type. Make it work properly for compound types -- Array and Tuple. -[#7283](https://github.com/ClickHouse/ClickHouse/pull/7283) ([Alexander -Kuzmenkov](https://github.com/akuzm)) -* Support missing inequalities for ASOF JOIN. It's possible to join less-or-equal variant and strict - greater and less variants for ASOF column in ON syntax. -[#7282](https://github.com/ClickHouse/ClickHouse/pull/7282) ([Artem -Zuikov](https://github.com/4ertus2)) -* Optimize partial merge join. [#7070](https://github.com/ClickHouse/ClickHouse/pull/7070) - ([Artem Zuikov](https://github.com/4ertus2)) -* Do not use more than 98K of memory in uniqCombined functions. - [#7236](https://github.com/ClickHouse/ClickHouse/pull/7236), -[#7270](https://github.com/ClickHouse/ClickHouse/pull/7270) ([Azat -Khuzhin](https://github.com/azat)) -* Flush parts of right-hand joining table on disk in PartialMergeJoin (if there is not enough - memory). Load data back when needed. [#7186](https://github.com/ClickHouse/ClickHouse/pull/7186) -([Artem Zuikov](https://github.com/4ertus2)) - -#### Performance Improvement -* Speed up joinGet with const arguments by avoiding data duplication. - [#7359](https://github.com/ClickHouse/ClickHouse/pull/7359) ([Amos -Bird](https://github.com/amosbird)) -* Return early if the subquery is empty. - [#7007](https://github.com/ClickHouse/ClickHouse/pull/7007) ([小路](https://github.com/nicelulu)) -* Optimize parsing of SQL expression in Values. - [#6781](https://github.com/ClickHouse/ClickHouse/pull/6781) -([tavplubix](https://github.com/tavplubix)) - -#### Build/Testing/Packaging Improvement -* Disable some contribs for cross-compilation to Mac OS. - [#7101](https://github.com/ClickHouse/ClickHouse/pull/7101) ([Ivan](https://github.com/abyss7)) -* Add missing linking with PocoXML for clickhouse_common_io. - [#7200](https://github.com/ClickHouse/ClickHouse/pull/7200) ([Azat -Khuzhin](https://github.com/azat)) -* Accept multiple test filter arguments in clickhouse-test. - [#7226](https://github.com/ClickHouse/ClickHouse/pull/7226) ([Alexander -Kuzmenkov](https://github.com/akuzm)) -* Enable musl and jemalloc for ARM. [#7300](https://github.com/ClickHouse/ClickHouse/pull/7300) - ([Amos Bird](https://github.com/amosbird)) -* Added `--client-option` parameter to `clickhouse-test` to pass additional parameters to client. - [#7277](https://github.com/ClickHouse/ClickHouse/pull/7277) ([Nikolai -Kochetov](https://github.com/KochetovNicolai)) -* Preserve existing configs on rpm package upgrade. - [#7103](https://github.com/ClickHouse/ClickHouse/pull/7103) -([filimonov](https://github.com/filimonov)) -* Fix errors detected by PVS. [#7153](https://github.com/ClickHouse/ClickHouse/pull/7153) ([Artem - Zuikov](https://github.com/4ertus2)) -* Fix build for Darwin. [#7149](https://github.com/ClickHouse/ClickHouse/pull/7149) - ([Ivan](https://github.com/abyss7)) -* glibc 2.29 compatibility. [#7142](https://github.com/ClickHouse/ClickHouse/pull/7142) ([Amos - Bird](https://github.com/amosbird)) -* Make sure dh_clean does not touch potential source files. - [#7205](https://github.com/ClickHouse/ClickHouse/pull/7205) ([Amos -Bird](https://github.com/amosbird)) -* Attempt to avoid conflict when updating from altinity rpm - it has config file packaged separately - in clickhouse-server-common. [#7073](https://github.com/ClickHouse/ClickHouse/pull/7073) -([filimonov](https://github.com/filimonov)) -* Optimize some header files for faster rebuilds. - [#7212](https://github.com/ClickHouse/ClickHouse/pull/7212), -[#7231](https://github.com/ClickHouse/ClickHouse/pull/7231) ([Alexander -Kuzmenkov](https://github.com/akuzm)) -* Add performance tests for Date and DateTime. [#7332](https://github.com/ClickHouse/ClickHouse/pull/7332) ([Vasily - Nemkov](https://github.com/Enmk)) -* Fix some tests that contained non-deterministic mutations. - [#7132](https://github.com/ClickHouse/ClickHouse/pull/7132) ([Alexander -Kazakov](https://github.com/Akazz)) -* Add build with MemorySanitizer to CI. [#7066](https://github.com/ClickHouse/ClickHouse/pull/7066) - ([Alexander Kuzmenkov](https://github.com/akuzm)) -* Avoid use of uninitialized values in MetricsTransmitter. - [#7158](https://github.com/ClickHouse/ClickHouse/pull/7158) ([Azat -Khuzhin](https://github.com/azat)) -* Fix some issues in Fields found by MemorySanitizer. - [#7135](https://github.com/ClickHouse/ClickHouse/pull/7135), -[#7179](https://github.com/ClickHouse/ClickHouse/pull/7179) ([Alexander -Kuzmenkov](https://github.com/akuzm)), [#7376](https://github.com/ClickHouse/ClickHouse/pull/7376) -([Amos Bird](https://github.com/amosbird)) -* Fix undefined behavior in murmurhash32. [#7388](https://github.com/ClickHouse/ClickHouse/pull/7388) ([Amos - Bird](https://github.com/amosbird)) -* Fix undefined behavior in StoragesInfoStream. [#7384](https://github.com/ClickHouse/ClickHouse/pull/7384) - ([tavplubix](https://github.com/tavplubix)) -* Fixed constant expressions folding for external database engines (MySQL, ODBC, JDBC). In previous - versions it wasn't working for multiple constant expressions and was not working at all for Date, -DateTime and UUID. This fixes [#7245](https://github.com/ClickHouse/ClickHouse/issues/7245) -[#7252](https://github.com/ClickHouse/ClickHouse/pull/7252) -([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixing ThreadSanitizer data race error in the LIVE VIEW when accessing no_users_thread variable. - [#7353](https://github.com/ClickHouse/ClickHouse/pull/7353) -([vzakaznikov](https://github.com/vzakaznikov)) -* Get rid of malloc symbols in libcommon - [#7134](https://github.com/ClickHouse/ClickHouse/pull/7134), -[#7065](https://github.com/ClickHouse/ClickHouse/pull/7065) ([Amos -Bird](https://github.com/amosbird)) -* Add global flag ENABLE_LIBRARIES for disabling all libraries. - [#7063](https://github.com/ClickHouse/ClickHouse/pull/7063) -([proller](https://github.com/proller)) - -#### Code cleanup -* Generalize configuration repository to prepare for DDL for Dictionaries. [#7155](https://github.com/ClickHouse/ClickHouse/pull/7155) - ([alesapin](https://github.com/alesapin)) -* Parser for dictionaries DDL without any semantic. - [#7209](https://github.com/ClickHouse/ClickHouse/pull/7209) -([alesapin](https://github.com/alesapin)) -* Split ParserCreateQuery into different smaller parsers. - [#7253](https://github.com/ClickHouse/ClickHouse/pull/7253) -([alesapin](https://github.com/alesapin)) -* Small refactoring and renaming near external dictionaries. - [#7111](https://github.com/ClickHouse/ClickHouse/pull/7111) -([alesapin](https://github.com/alesapin)) -* Refactor some code to prepare for role-based access control. [#7235](https://github.com/ClickHouse/ClickHouse/pull/7235) ([Vitaly - Baranov](https://github.com/vitlibar)) -* Some improvements in DatabaseOrdinary code. - [#7086](https://github.com/ClickHouse/ClickHouse/pull/7086) ([Nikita -Vasilev](https://github.com/nikvas0)) -* Do not use iterators in find() and emplace() methods of hash tables. -[#7026](https://github.com/ClickHouse/ClickHouse/pull/7026) ([Alexander -Kuzmenkov](https://github.com/akuzm)) -* Fix getMultipleValuesFromConfig in case when parameter root is not empty. [#7374](https://github.com/ClickHouse/ClickHouse/pull/7374) -([Mikhail Korotov](https://github.com/millb)) -* Remove some copy-paste (TemporaryFile and TemporaryFileStream) - [#7166](https://github.com/ClickHouse/ClickHouse/pull/7166) ([Artem -Zuikov](https://github.com/4ertus2)) -* Improved code readability a little bit (`MergeTreeData::getActiveContainingPart`). - [#7361](https://github.com/ClickHouse/ClickHouse/pull/7361) ([Vladimir -Chebotarev](https://github.com/excitoon)) -* Wait for all scheduled jobs, which are using local objects, if `ThreadPool::schedule(...)` throws - an exception. Rename `ThreadPool::schedule(...)` to `ThreadPool::scheduleOrThrowOnError(...)` and -fix comments to make obvious that it may throw. -[#7350](https://github.com/ClickHouse/ClickHouse/pull/7350) -([tavplubix](https://github.com/tavplubix)) - -## ClickHouse release 19.15 - -### ClickHouse release 19.15.4.10, 2019-10-31 - -#### Bug Fix -* Added handling of SQL_TINYINT and SQL_BIGINT, and fix handling of SQL_FLOAT data source types in ODBC Bridge. -[#7491](https://github.com/ClickHouse/ClickHouse/pull/7491) ([Denis Glazachev](https://github.com/traceon)) -* Allowed to have some parts on destination disk or volume in MOVE PARTITION. -[#7434](https://github.com/ClickHouse/ClickHouse/pull/7434) ([Vladimir Chebotarev](https://github.com/excitoon)) -* Fixed NULL-values in nullable columns through ODBC-bridge. -[#7402](https://github.com/ClickHouse/ClickHouse/pull/7402) ([Vasily Nemkov](https://github.com/Enmk)) -* Fixed INSERT into Distributed non local node with MATERIALIZED columns. -[#7377](https://github.com/ClickHouse/ClickHouse/pull/7377) ([Azat Khuzhin](https://github.com/azat)) -* Fixed function getMultipleValuesFromConfig. -[#7374](https://github.com/ClickHouse/ClickHouse/pull/7374) ([Mikhail Korotov](https://github.com/millb)) -* Fixed issue of using HTTP keep alive timeout instead of TCP keep alive timeout. -[#7351](https://github.com/ClickHouse/ClickHouse/pull/7351) ([Vasily Nemkov](https://github.com/Enmk)) -* Wait for all jobs to finish on exception (fixes rare segfaults). -[#7350](https://github.com/ClickHouse/ClickHouse/pull/7350) ([tavplubix](https://github.com/tavplubix)) -* Don't push to MVs when inserting into Kafka table. -[#7265](https://github.com/ClickHouse/ClickHouse/pull/7265) ([Ivan](https://github.com/abyss7)) -* Disable memory tracker for exception stack. -[#7264](https://github.com/ClickHouse/ClickHouse/pull/7264) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Fixed bad code in transforming query for external database. -[#7252](https://github.com/ClickHouse/ClickHouse/pull/7252) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Avoid use of uninitialized values in MetricsTransmitter. -[#7158](https://github.com/ClickHouse/ClickHouse/pull/7158) ([Azat Khuzhin](https://github.com/azat)) -* Added example config with macros for tests ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.15.3.6, 2019-10-09 - -#### Bug Fix -* Fixed bad_variant in hashed dictionary. -([alesapin](https://github.com/alesapin)) -* Fixed up bug with segmentation fault in ATTACH PART query. -([alesapin](https://github.com/alesapin)) -* Fixed time calculation in `MergeTreeData`. -([Vladimir Chebotarev](https://github.com/excitoon)) -* Commit to Kafka explicitly after the writing is finalized. -[#7175](https://github.com/ClickHouse/ClickHouse/pull/7175) ([Ivan](https://github.com/abyss7)) -* Serialize NULL values correctly in min/max indexes of MergeTree parts. -[#7234](https://github.com/ClickHouse/ClickHouse/pull/7234) ([Alexander Kuzmenkov](https://github.com/akuzm)) - -### ClickHouse release 19.15.2.2, 2019-10-01 - -#### New Feature -* Tiered storage: support to use multiple storage volumes for tables with MergeTree engine. It's possible to store fresh data on SSD and automatically move old data to HDD. ([example](https://clickhouse.github.io/clickhouse-presentations/meetup30/new_features/#12)). [#4918](https://github.com/ClickHouse/ClickHouse/pull/4918) ([Igr](https://github.com/ObjatieGroba)) [#6489](https://github.com/ClickHouse/ClickHouse/pull/6489) ([alesapin](https://github.com/alesapin)) -* Add table function `input` for reading incoming data in `INSERT SELECT` query. [#5450](https://github.com/ClickHouse/ClickHouse/pull/5450) ([palasonic1](https://github.com/palasonic1)) [#6832](https://github.com/ClickHouse/ClickHouse/pull/6832) ([Anton Popov](https://github.com/CurtizJ)) -* Add a `sparse_hashed` dictionary layout, that is functionally equivalent to the `hashed` layout, but is more memory efficient. It uses about twice as less memory at the cost of slower value retrieval. [#6894](https://github.com/ClickHouse/ClickHouse/pull/6894) ([Azat Khuzhin](https://github.com/azat)) -* Implement ability to define list of users for access to dictionaries. Only current connected database using. [#6907](https://github.com/ClickHouse/ClickHouse/pull/6907) ([Guillaume Tassery](https://github.com/YiuRULE)) -* Add `LIMIT` option to `SHOW` query. [#6944](https://github.com/ClickHouse/ClickHouse/pull/6944) ([Philipp Malkovsky](https://github.com/malkfilipp)) -* Add `bitmapSubsetLimit(bitmap, range_start, limit)` function, that returns subset of the smallest `limit` values in set that is no smaller than `range_start`. [#6957](https://github.com/ClickHouse/ClickHouse/pull/6957) ([Zhichang Yu](https://github.com/yuzhichang)) -* Add `bitmapMin` and `bitmapMax` functions. [#6970](https://github.com/ClickHouse/ClickHouse/pull/6970) ([Zhichang Yu](https://github.com/yuzhichang)) -* Add function `repeat` related to [issue-6648](https://github.com/yandex/ClickHouse/issues/6648) [#6999](https://github.com/ClickHouse/ClickHouse/pull/6999) ([flynn](https://github.com/ucasFL)) - -#### Experimental Feature -* Implement (in memory) Merge Join variant that does not change current pipeline. Result is partially sorted by merge key. Set `partial_merge_join = 1` to use this feature. The Merge Join is still in development. [#6940](https://github.com/ClickHouse/ClickHouse/pull/6940) ([Artem Zuikov](https://github.com/4ertus2)) -* Add `S3` engine and table function. It is still in development (no authentication support yet). [#5596](https://github.com/ClickHouse/ClickHouse/pull/5596) ([Vladimir Chebotarev](https://github.com/excitoon)) - -#### Improvement -* Every message read from Kafka is inserted atomically. This resolves almost all known issues with Kafka engine. [#6950](https://github.com/ClickHouse/ClickHouse/pull/6950) ([Ivan](https://github.com/abyss7)) -* Improvements for failover of Distributed queries. Shorten recovery time, also it is now configurable and can be seen in `system.clusters`. [#6399](https://github.com/ClickHouse/ClickHouse/pull/6399) ([Vasily Nemkov](https://github.com/Enmk)) -* Support numeric values for Enums directly in `IN` section. #6766 [#6941](https://github.com/ClickHouse/ClickHouse/pull/6941) ([dimarub2000](https://github.com/dimarub2000)) -* Support (optional, disabled by default) redirects on URL storage. [#6914](https://github.com/ClickHouse/ClickHouse/pull/6914) ([maqroll](https://github.com/maqroll)) -* Add information message when client with an older version connects to a server. [#6893](https://github.com/ClickHouse/ClickHouse/pull/6893) ([Philipp Malkovsky](https://github.com/malkfilipp)) -* Remove maximum backoff sleep time limit for sending data in Distributed tables [#6895](https://github.com/ClickHouse/ClickHouse/pull/6895) ([Azat Khuzhin](https://github.com/azat)) -* Add ability to send profile events (counters) with cumulative values to graphite. It can be enabled under `` in server `config.xml`. [#6969](https://github.com/ClickHouse/ClickHouse/pull/6969) ([Azat Khuzhin](https://github.com/azat)) -* Add automatically cast type `T` to `LowCardinality(T)` while inserting data in column of type `LowCardinality(T)` in Native format via HTTP. [#6891](https://github.com/ClickHouse/ClickHouse/pull/6891) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Add ability to use function `hex` without using `reinterpretAsString` for `Float32`, `Float64`. [#7024](https://github.com/ClickHouse/ClickHouse/pull/7024) ([Mikhail Korotov](https://github.com/millb)) - -#### Build/Testing/Packaging Improvement -* Add gdb-index to clickhouse binary with debug info. It will speed up startup time of `gdb`. [#6947](https://github.com/ClickHouse/ClickHouse/pull/6947) ([alesapin](https://github.com/alesapin)) -* Speed up deb packaging with patched dpkg-deb which uses `pigz`. [#6960](https://github.com/ClickHouse/ClickHouse/pull/6960) ([alesapin](https://github.com/alesapin)) -* Set `enable_fuzzing = 1` to enable libfuzzer instrumentation of all the project code. [#7042](https://github.com/ClickHouse/ClickHouse/pull/7042) ([kyprizel](https://github.com/kyprizel)) -* Add split build smoke test in CI. [#7061](https://github.com/ClickHouse/ClickHouse/pull/7061) ([alesapin](https://github.com/alesapin)) -* Add build with MemorySanitizer to CI. [#7066](https://github.com/ClickHouse/ClickHouse/pull/7066) ([Alexander Kuzmenkov](https://github.com/akuzm)) -* Replace `libsparsehash` with `sparsehash-c11` [#6965](https://github.com/ClickHouse/ClickHouse/pull/6965) ([Azat Khuzhin](https://github.com/azat)) - -#### Bug Fix -* Fixed performance degradation of index analysis on complex keys on large tables. This fixes #6924. [#7075](https://github.com/ClickHouse/ClickHouse/pull/7075) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix logical error causing segfaults when selecting from Kafka empty topic. [#6909](https://github.com/ClickHouse/ClickHouse/pull/6909) ([Ivan](https://github.com/abyss7)) -* Fix too early MySQL connection close in `MySQLBlockInputStream.cpp`. [#6882](https://github.com/ClickHouse/ClickHouse/pull/6882) ([Clément Rodriguez](https://github.com/clemrodriguez)) -* Returned support for very old Linux kernels (fix [#6841](https://github.com/ClickHouse/ClickHouse/issues/6841)) [#6853](https://github.com/ClickHouse/ClickHouse/pull/6853) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix possible data loss in `insert select` query in case of empty block in input stream. #6834 #6862 [#6911](https://github.com/ClickHouse/ClickHouse/pull/6911) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Fix for function `АrrayEnumerateUniqRanked` with empty arrays in params [#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) -* Fix complex queries with array joins and global subqueries. [#6934](https://github.com/ClickHouse/ClickHouse/pull/6934) ([Ivan](https://github.com/abyss7)) -* Fix `Unknown identifier` error in ORDER BY and GROUP BY with multiple JOINs [#7022](https://github.com/ClickHouse/ClickHouse/pull/7022) ([Artem Zuikov](https://github.com/4ertus2)) -* Fixed `MSan` warning while executing function with `LowCardinality` argument. [#7062](https://github.com/ClickHouse/ClickHouse/pull/7062) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - -#### Backward Incompatible Change -* Changed serialization format of bitmap* aggregate function states to improve performance. Serialized states of bitmap* from previous versions cannot be read. [#6908](https://github.com/ClickHouse/ClickHouse/pull/6908) ([Zhichang Yu](https://github.com/yuzhichang)) - -## ClickHouse release 19.14 -### ClickHouse release 19.14.7.15, 2019-10-02 - -#### Bug Fix -* This release also contains all bug fixes from 19.11.12.69. -* Fixed compatibility for distributed queries between 19.14 and earlier versions. This fixes [#7068](https://github.com/ClickHouse/ClickHouse/issues/7068). [#7069](https://github.com/ClickHouse/ClickHouse/pull/7069) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.14.6.12, 2019-09-19 - -#### Bug Fix -* Fix for function `АrrayEnumerateUniqRanked` with empty arrays in params. [#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) -* Fixed subquery name in queries with `ARRAY JOIN` and `GLOBAL IN subquery` with alias. Use subquery alias for external table name if it is specified. [#6934](https://github.com/ClickHouse/ClickHouse/pull/6934) ([Ivan](https://github.com/abyss7)) - -#### Build/Testing/Packaging Improvement -* Fix [flapping](https://clickhouse-test-reports.s3.yandex.net/6944/aab95fd5175a513413c7395a73a82044bdafb906/functional_stateless_tests_(debug).html) test `00715_fetch_merged_or_mutated_part_zookeeper` by rewriting it to a shell scripts because it needs to wait for mutations to apply. [#6977](https://github.com/ClickHouse/ClickHouse/pull/6977) ([Alexander Kazakov](https://github.com/Akazz)) -* Fixed UBSan and MemSan failure in function `groupUniqArray` with emtpy array argument. It was caused by placing of empty `PaddedPODArray` into hash table zero cell because constructor for zero cell value was not called. [#6937](https://github.com/ClickHouse/ClickHouse/pull/6937) ([Amos Bird](https://github.com/amosbird)) - -### ClickHouse release 19.14.3.3, 2019-09-10 - -#### New Feature -* `WITH FILL` modifier for `ORDER BY`. (continuation of [#5069](https://github.com/ClickHouse/ClickHouse/issues/5069)) [#6610](https://github.com/ClickHouse/ClickHouse/pull/6610) ([Anton Popov](https://github.com/CurtizJ)) -* `WITH TIES` modifier for `LIMIT`. (continuation of [#5069](https://github.com/ClickHouse/ClickHouse/issues/5069)) [#6610](https://github.com/ClickHouse/ClickHouse/pull/6610) ([Anton Popov](https://github.com/CurtizJ)) -* Parse unquoted `NULL` literal as NULL (if setting `format_csv_unquoted_null_literal_as_null=1`). Initialize null fields with default values if data type of this field is not nullable (if setting `input_format_null_as_default=1`). [#5990](https://github.com/ClickHouse/ClickHouse/issues/5990) [#6055](https://github.com/ClickHouse/ClickHouse/pull/6055) ([tavplubix](https://github.com/tavplubix)) -* Support for wildcards in paths of table functions `file` and `hdfs`. If the path contains wildcards, the table will be readonly. Example of usage: `select * from hdfs('hdfs://hdfs1:9000/some_dir/another_dir/*/file{0..9}{0..9}')` and `select * from file('some_dir/{some_file,another_file,yet_another}.tsv', 'TSV', 'value UInt32')`. [#6092](https://github.com/ClickHouse/ClickHouse/pull/6092) ([Olga Khvostikova](https://github.com/stavrolia)) -* New `system.metric_log` table which stores values of `system.events` and `system.metrics` with specified time interval. [#6363](https://github.com/ClickHouse/ClickHouse/issues/6363) [#6467](https://github.com/ClickHouse/ClickHouse/pull/6467) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) [#6530](https://github.com/ClickHouse/ClickHouse/pull/6530) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Allow to write ClickHouse text logs to `system.text_log` table. [#6037](https://github.com/ClickHouse/ClickHouse/issues/6037) [#6103](https://github.com/ClickHouse/ClickHouse/pull/6103) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) [#6164](https://github.com/ClickHouse/ClickHouse/pull/6164) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Show private symbols in stack traces (this is done via parsing symbol tables of ELF files). Added information about file and line number in stack traces if debug info is present. Speedup symbol name lookup with indexing symbols present in program. Added new SQL functions for introspection: `demangle` and `addressToLine`. Renamed function `symbolizeAddress` to `addressToSymbol` for consistency. Function `addressToSymbol` will return mangled name for performance reasons and you have to apply `demangle`. Added setting `allow_introspection_functions` which is turned off by default. [#6201](https://github.com/ClickHouse/ClickHouse/pull/6201) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Table function `values` (the name is case-insensitive). It allows to read from `VALUES` list proposed in [#5984](https://github.com/ClickHouse/ClickHouse/issues/5984). Example: `SELECT * FROM VALUES('a UInt64, s String', (1, 'one'), (2, 'two'), (3, 'three'))`. [#6217](https://github.com/ClickHouse/ClickHouse/issues/6217). [#6209](https://github.com/ClickHouse/ClickHouse/pull/6209) ([dimarub2000](https://github.com/dimarub2000)) -* Added an ability to alter storage settings. Syntax: `ALTER TABLE MODIFY SETTING = `. [#6366](https://github.com/ClickHouse/ClickHouse/pull/6366) [#6669](https://github.com/ClickHouse/ClickHouse/pull/6669) [#6685](https://github.com/ClickHouse/ClickHouse/pull/6685) ([alesapin](https://github.com/alesapin)) -* Support for removing of detached parts. Syntax: `ALTER TABLE DROP DETACHED PART ''`. [#6158](https://github.com/ClickHouse/ClickHouse/pull/6158) ([tavplubix](https://github.com/tavplubix)) -* Table constraints. Allows to add constraint to table definition which will be checked at insert. [#5273](https://github.com/ClickHouse/ClickHouse/pull/5273) ([Gleb Novikov](https://github.com/NanoBjorn)) [#6652](https://github.com/ClickHouse/ClickHouse/pull/6652) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Suppport for cascaded materialized views. [#6324](https://github.com/ClickHouse/ClickHouse/pull/6324) ([Amos Bird](https://github.com/amosbird)) -* Turn on query profiler by default to sample every query execution thread once a second. [#6283](https://github.com/ClickHouse/ClickHouse/pull/6283) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Input format `ORC`. [#6454](https://github.com/ClickHouse/ClickHouse/pull/6454) [#6703](https://github.com/ClickHouse/ClickHouse/pull/6703) ([akonyaev90](https://github.com/akonyaev90)) -* Added two new functions: `sigmoid` and `tanh` (that are useful for machine learning applications). [#6254](https://github.com/ClickHouse/ClickHouse/pull/6254) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Function `hasToken(haystack, token)`, `hasTokenCaseInsensitive(haystack, token)` to check if given token is in haystack. Token is a maximal length substring between two non alphanumeric ASCII characters (or boundaries of haystack). Token must be a constant string. Supported by tokenbf_v1 index specialization. [#6596](https://github.com/ClickHouse/ClickHouse/pull/6596), [#6662](https://github.com/ClickHouse/ClickHouse/pull/6662) ([Vasily Nemkov](https://github.com/Enmk)) -* New function `neighbor(value, offset[, default_value])`. Allows to reach prev/next value within column in a block of data. [#5925](https://github.com/ClickHouse/ClickHouse/pull/5925) ([Alex Krash](https://github.com/alex-krash)) [6685365ab8c5b74f9650492c88a012596eb1b0c6](https://github.com/ClickHouse/ClickHouse/commit/6685365ab8c5b74f9650492c88a012596eb1b0c6) [341e2e4587a18065c2da1ca888c73389f48ce36c](https://github.com/ClickHouse/ClickHouse/commit/341e2e4587a18065c2da1ca888c73389f48ce36c) [Alexey Milovidov](https://github.com/alexey-milovidov) -* Created a function `currentUser()`, returning login of authorized user. Added alias `user()` for compatibility with MySQL. [#6470](https://github.com/ClickHouse/ClickHouse/pull/6470) ([Alex Krash](https://github.com/alex-krash)) -* New aggregate functions `quantilesExactInclusive` and `quantilesExactExclusive` which were proposed in [#5885](https://github.com/ClickHouse/ClickHouse/issues/5885). [#6477](https://github.com/ClickHouse/ClickHouse/pull/6477) ([dimarub2000](https://github.com/dimarub2000)) -* Function `bitmapRange(bitmap, range_begin, range_end)` which returns new set with specified range (not include the `range_end`). [#6314](https://github.com/ClickHouse/ClickHouse/pull/6314) ([Zhichang Yu](https://github.com/yuzhichang)) -* Function `geohashesInBox(longitude_min, latitude_min, longitude_max, latitude_max, precision)` which creates array of precision-long strings of geohash-boxes covering provided area. [#6127](https://github.com/ClickHouse/ClickHouse/pull/6127) ([Vasily Nemkov](https://github.com/Enmk)) -* Implement support for INSERT query with `Kafka` tables. [#6012](https://github.com/ClickHouse/ClickHouse/pull/6012) ([Ivan](https://github.com/abyss7)) -* Added support for `_partition` and `_timestamp` virtual columns to Kafka engine. [#6400](https://github.com/ClickHouse/ClickHouse/pull/6400) ([Ivan](https://github.com/abyss7)) -* Possibility to remove sensitive data from `query_log`, server logs, process list with regexp-based rules. [#5710](https://github.com/ClickHouse/ClickHouse/pull/5710) ([filimonov](https://github.com/filimonov)) - -#### Experimental Feature -* Input and output data format `Template`. It allows to specify custom format string for input and output. [#4354](https://github.com/ClickHouse/ClickHouse/issues/4354) [#6727](https://github.com/ClickHouse/ClickHouse/pull/6727) ([tavplubix](https://github.com/tavplubix)) -* Implementation of `LIVE VIEW` tables that were originally proposed in [#2898](https://github.com/ClickHouse/ClickHouse/pull/2898), prepared in [#3925](https://github.com/ClickHouse/ClickHouse/issues/3925), and then updated in [#5541](https://github.com/ClickHouse/ClickHouse/issues/5541). See [#5541](https://github.com/ClickHouse/ClickHouse/issues/5541) for detailed description. [#5541](https://github.com/ClickHouse/ClickHouse/issues/5541) ([vzakaznikov](https://github.com/vzakaznikov)) [#6425](https://github.com/ClickHouse/ClickHouse/pull/6425) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) [#6656](https://github.com/ClickHouse/ClickHouse/pull/6656) ([vzakaznikov](https://github.com/vzakaznikov)) Note that `LIVE VIEW` feature may be removed in next versions. - -#### Bug Fix -* This release also contains all bug fixes from 19.13 and 19.11. -* Fix segmentation fault when the table has skip indices and vertical merge happens. [#6723](https://github.com/ClickHouse/ClickHouse/pull/6723) ([alesapin](https://github.com/alesapin)) -* Fix per-column TTL with non-trivial column defaults. Previously in case of force TTL merge with `OPTIMIZE ... FINAL` query, expired values was replaced by type defaults instead of user-specified column defaults. [#6796](https://github.com/ClickHouse/ClickHouse/pull/6796) ([Anton Popov](https://github.com/CurtizJ)) -* Fix Kafka messages duplication problem on normal server restart. [#6597](https://github.com/ClickHouse/ClickHouse/pull/6597) ([Ivan](https://github.com/abyss7)) -* Fixed infinite loop when reading Kafka messages. Do not pause/resume consumer on subscription at all - otherwise it may get paused indefinitely in some scenarios. [#6354](https://github.com/ClickHouse/ClickHouse/pull/6354) ([Ivan](https://github.com/abyss7)) -* Fix `Key expression contains comparison between inconvertible types` exception in `bitmapContains` function. [#6136](https://github.com/ClickHouse/ClickHouse/issues/6136) [#6146](https://github.com/ClickHouse/ClickHouse/issues/6146) [#6156](https://github.com/ClickHouse/ClickHouse/pull/6156) ([dimarub2000](https://github.com/dimarub2000)) -* Fix segfault with enabled `optimize_skip_unused_shards` and missing sharding key. [#6384](https://github.com/ClickHouse/ClickHouse/pull/6384) ([Anton Popov](https://github.com/CurtizJ)) -* Fixed wrong code in mutations that may lead to memory corruption. Fixed segfault with read of address `0x14c0` that may happed due to concurrent `DROP TABLE` and `SELECT` from `system.parts` or `system.parts_columns`. Fixed race condition in preparation of mutation queries. Fixed deadlock caused by `OPTIMIZE` of Replicated tables and concurrent modification operations like ALTERs. [#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Removed extra verbose logging in MySQL interface [#6389](https://github.com/ClickHouse/ClickHouse/pull/6389) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Return the ability to parse boolean settings from 'true' and 'false' in the configuration file. [#6278](https://github.com/ClickHouse/ClickHouse/pull/6278) ([alesapin](https://github.com/alesapin)) -* Fix crash in `quantile` and `median` function over `Nullable(Decimal128)`. [#6378](https://github.com/ClickHouse/ClickHouse/pull/6378) ([Artem Zuikov](https://github.com/4ertus2)) -* Fixed possible incomplete result returned by `SELECT` query with `WHERE` condition on primary key contained conversion to Float type. It was caused by incorrect checking of monotonicity in `toFloat` function. [#6248](https://github.com/ClickHouse/ClickHouse/issues/6248) [#6374](https://github.com/ClickHouse/ClickHouse/pull/6374) ([dimarub2000](https://github.com/dimarub2000)) -* Check `max_expanded_ast_elements` setting for mutations. Clear mutations after `TRUNCATE TABLE`. [#6205](https://github.com/ClickHouse/ClickHouse/pull/6205) ([Winter Zhang](https://github.com/zhang2014)) -* Fix JOIN results for key columns when used with `join_use_nulls`. Attach Nulls instead of columns defaults. [#6249](https://github.com/ClickHouse/ClickHouse/pull/6249) ([Artem Zuikov](https://github.com/4ertus2)) -* Fix for skip indices with vertical merge and alter. Fix for `Bad size of marks file` exception. [#6594](https://github.com/ClickHouse/ClickHouse/issues/6594) [#6713](https://github.com/ClickHouse/ClickHouse/pull/6713) ([alesapin](https://github.com/alesapin)) -* Fix rare crash in `ALTER MODIFY COLUMN` and vertical merge when one of merged/altered parts is empty (0 rows) [#6746](https://github.com/ClickHouse/ClickHouse/issues/6746) [#6780](https://github.com/ClickHouse/ClickHouse/pull/6780) ([alesapin](https://github.com/alesapin)) -* Fixed bug in conversion of `LowCardinality` types in `AggregateFunctionFactory`. This fixes [#6257](https://github.com/ClickHouse/ClickHouse/issues/6257). [#6281](https://github.com/ClickHouse/ClickHouse/pull/6281) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Fix wrong behavior and possible segfaults in `topK` and `topKWeighted` aggregated functions. [#6404](https://github.com/ClickHouse/ClickHouse/pull/6404) ([Anton Popov](https://github.com/CurtizJ)) -* Fixed unsafe code around `getIdentifier` function. [#6401](https://github.com/ClickHouse/ClickHouse/issues/6401) [#6409](https://github.com/ClickHouse/ClickHouse/pull/6409) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed bug in MySQL wire protocol (is used while connecting to ClickHouse form MySQL client). Caused by heap buffer overflow in `PacketPayloadWriteBuffer`. [#6212](https://github.com/ClickHouse/ClickHouse/pull/6212) ([Yuriy Baranov](https://github.com/yurriy)) -* Fixed memory leak in `bitmapSubsetInRange` function. [#6819](https://github.com/ClickHouse/ClickHouse/pull/6819) ([Zhichang Yu](https://github.com/yuzhichang)) -* Fix rare bug when mutation executed after granularity change. [#6816](https://github.com/ClickHouse/ClickHouse/pull/6816) ([alesapin](https://github.com/alesapin)) -* Allow protobuf message with all fields by default. [#6132](https://github.com/ClickHouse/ClickHouse/pull/6132) ([Vitaly Baranov](https://github.com/vitlibar)) -* Resolve a bug with `nullIf` function when we send a `NULL` argument on the second argument. [#6446](https://github.com/ClickHouse/ClickHouse/pull/6446) ([Guillaume Tassery](https://github.com/YiuRULE)) -* Fix rare bug with wrong memory allocation/deallocation in complex key cache dictionaries with string fields which leads to infinite memory consumption (looks like memory leak). Bug reproduces when string size was a power of two starting from eight (8, 16, 32, etc). [#6447](https://github.com/ClickHouse/ClickHouse/pull/6447) ([alesapin](https://github.com/alesapin)) -* Fixed Gorilla encoding on small sequences which caused exception `Cannot write after end of buffer`. [#6398](https://github.com/ClickHouse/ClickHouse/issues/6398) [#6444](https://github.com/ClickHouse/ClickHouse/pull/6444) ([Vasily Nemkov](https://github.com/Enmk)) -* Allow to use not nullable types in JOINs with `join_use_nulls` enabled. [#6705](https://github.com/ClickHouse/ClickHouse/pull/6705) ([Artem Zuikov](https://github.com/4ertus2)) -* Disable `Poco::AbstractConfiguration` substitutions in query in `clickhouse-client`. [#6706](https://github.com/ClickHouse/ClickHouse/pull/6706) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Avoid deadlock in `REPLACE PARTITION`. [#6677](https://github.com/ClickHouse/ClickHouse/pull/6677) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Using `arrayReduce` for constant arguments may lead to segfault. [#6242](https://github.com/ClickHouse/ClickHouse/issues/6242) [#6326](https://github.com/ClickHouse/ClickHouse/pull/6326) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix inconsistent parts which can appear if replica was restored after `DROP PARTITION`. [#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) -* Fixed hang in `JSONExtractRaw` function. [#6195](https://github.com/ClickHouse/ClickHouse/issues/6195) [#6198](https://github.com/ClickHouse/ClickHouse/pull/6198) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix bug with incorrect skip indices serialization and aggregation with adaptive granularity. [#6594](https://github.com/ClickHouse/ClickHouse/issues/6594). [#6748](https://github.com/ClickHouse/ClickHouse/pull/6748) ([alesapin](https://github.com/alesapin)) -* Fix `WITH ROLLUP` and `WITH CUBE` modifiers of `GROUP BY` with two-level aggregation. [#6225](https://github.com/ClickHouse/ClickHouse/pull/6225) ([Anton Popov](https://github.com/CurtizJ)) -* Fix bug with writing secondary indices marks with adaptive granularity. [#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([alesapin](https://github.com/alesapin)) -* Fix initialization order while server startup. Since `StorageMergeTree::background_task_handle` is initialized in `startup()` the `MergeTreeBlockOutputStream::write()` may try to use it before initialization. Just check if it is initialized. [#6080](https://github.com/ClickHouse/ClickHouse/pull/6080) ([Ivan](https://github.com/abyss7)) -* Clearing the data buffer from the previous read operation that was completed with an error. [#6026](https://github.com/ClickHouse/ClickHouse/pull/6026) ([Nikolay](https://github.com/bopohaa)) -* Fix bug with enabling adaptive granularity when creating a new replica for Replicated\*MergeTree table. [#6394](https://github.com/ClickHouse/ClickHouse/issues/6394) [#6452](https://github.com/ClickHouse/ClickHouse/pull/6452) ([alesapin](https://github.com/alesapin)) -* Fixed possible crash during server startup in case of exception happened in `libunwind` during exception at access to uninitialized `ThreadStatus` structure. [#6456](https://github.com/ClickHouse/ClickHouse/pull/6456) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) -* Fix crash in `yandexConsistentHash` function. Found by fuzz test. [#6304](https://github.com/ClickHouse/ClickHouse/issues/6304) [#6305](https://github.com/ClickHouse/ClickHouse/pull/6305) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed the possibility of hanging queries when server is overloaded and global thread pool becomes near full. This have higher chance to happen on clusters with large number of shards (hundreds), because distributed queries allocate a thread per connection to each shard. For example, this issue may reproduce if a cluster of 330 shards is processing 30 concurrent distributed queries. This issue affects all versions starting from 19.2. [#6301](https://github.com/ClickHouse/ClickHouse/pull/6301) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed logic of `arrayEnumerateUniqRanked` function. [#6423](https://github.com/ClickHouse/ClickHouse/pull/6423) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix segfault when decoding symbol table. [#6603](https://github.com/ClickHouse/ClickHouse/pull/6603) ([Amos Bird](https://github.com/amosbird)) -* Fixed irrelevant exception in cast of `LowCardinality(Nullable)` to not-Nullable column in case if it doesn't contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Removed extra quoting of description in `system.settings` table. [#6696](https://github.com/ClickHouse/ClickHouse/issues/6696) [#6699](https://github.com/ClickHouse/ClickHouse/pull/6699) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Avoid possible deadlock in `TRUNCATE` of Replicated table. [#6695](https://github.com/ClickHouse/ClickHouse/pull/6695) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix reading in order of sorting key. [#6189](https://github.com/ClickHouse/ClickHouse/pull/6189) ([Anton Popov](https://github.com/CurtizJ)) -* Fix `ALTER TABLE ... UPDATE` query for tables with `enable_mixed_granularity_parts=1`. [#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([alesapin](https://github.com/alesapin)) -* Fix bug opened by [#4405](https://github.com/ClickHouse/ClickHouse/pull/4405) (since 19.4.0). Reproduces in queries to Distributed tables over MergeTree tables when we doesn't query any columns (`SELECT 1`). [#6236](https://github.com/ClickHouse/ClickHouse/pull/6236) ([alesapin](https://github.com/alesapin)) -* Fixed overflow in integer division of signed type to unsigned type. The behaviour was exactly as in C or C++ language (integer promotion rules) that may be surprising. Please note that the overflow is still possible when dividing large signed number to large unsigned number or vice-versa (but that case is less usual). The issue existed in all server versions. [#6214](https://github.com/ClickHouse/ClickHouse/issues/6214) [#6233](https://github.com/ClickHouse/ClickHouse/pull/6233) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Limit maximum sleep time for throttling when `max_execution_speed` or `max_execution_speed_bytes` is set. Fixed false errors like `Estimated query execution time (inf seconds) is too long`. [#5547](https://github.com/ClickHouse/ClickHouse/issues/5547) [#6232](https://github.com/ClickHouse/ClickHouse/pull/6232) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed issues about using `MATERIALIZED` columns and aliases in `MaterializedView`. [#448](https://github.com/ClickHouse/ClickHouse/issues/448) [#3484](https://github.com/ClickHouse/ClickHouse/issues/3484) [#3450](https://github.com/ClickHouse/ClickHouse/issues/3450) [#2878](https://github.com/ClickHouse/ClickHouse/issues/2878) [#2285](https://github.com/ClickHouse/ClickHouse/issues/2285) [#3796](https://github.com/ClickHouse/ClickHouse/pull/3796) ([Amos Bird](https://github.com/amosbird)) [#6316](https://github.com/ClickHouse/ClickHouse/pull/6316) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix `FormatFactory` behaviour for input streams which are not implemented as processor. [#6495](https://github.com/ClickHouse/ClickHouse/pull/6495) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Fixed typo. [#6631](https://github.com/ClickHouse/ClickHouse/pull/6631) ([Alex Ryndin](https://github.com/alexryndin)) -* Typo in the error message ( is -> are ). [#6839](https://github.com/ClickHouse/ClickHouse/pull/6839) ([Denis Zhuravlev](https://github.com/den-crane)) -* Fixed error while parsing of columns list from string if type contained a comma (this issue was relevant for `File`, `URL`, `HDFS` storages) [#6217](https://github.com/ClickHouse/ClickHouse/issues/6217). [#6209](https://github.com/ClickHouse/ClickHouse/pull/6209) ([dimarub2000](https://github.com/dimarub2000)) - -#### Security Fix -* This release also contains all bug security fixes from 19.13 and 19.11. -* Fixed the possibility of a fabricated query to cause server crash due to stack overflow in SQL parser. Fixed the possibility of stack overflow in Merge and Distributed tables, materialized views and conditions for row-level security that involve subqueries. [#6433](https://github.com/ClickHouse/ClickHouse/pull/6433) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Improvement -* Correct implementation of ternary logic for `AND/OR`. [#6048](https://github.com/ClickHouse/ClickHouse/pull/6048) ([Alexander Kazakov](https://github.com/Akazz)) -* Now values and rows with expired TTL will be removed after `OPTIMIZE ... FINAL` query from old parts without TTL infos or with outdated TTL infos, e.g. after `ALTER ... MODIFY TTL` query. Added queries `SYSTEM STOP/START TTL MERGES` to disallow/allow assign merges with TTL and filter expired values in all merges. [#6274](https://github.com/ClickHouse/ClickHouse/pull/6274) ([Anton Popov](https://github.com/CurtizJ)) -* Possibility to change the location of ClickHouse history file for client using `CLICKHOUSE_HISTORY_FILE` env. [#6840](https://github.com/ClickHouse/ClickHouse/pull/6840) ([filimonov](https://github.com/filimonov)) -* Remove `dry_run` flag from `InterpreterSelectQuery`. ... [#6375](https://github.com/ClickHouse/ClickHouse/pull/6375) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Support `ASOF JOIN` with `ON` section. [#6211](https://github.com/ClickHouse/ClickHouse/pull/6211) ([Artem Zuikov](https://github.com/4ertus2)) -* Better support of skip indexes for mutations and replication. Support for `MATERIALIZE/CLEAR INDEX ... IN PARTITION` query. `UPDATE x = x` recalculates all indices that use column `x`. [#5053](https://github.com/ClickHouse/ClickHouse/pull/5053) ([Nikita Vasilev](https://github.com/nikvas0)) -* Allow to `ATTACH` live views (for example, at the server startup) regardless to `allow_experimental_live_view` setting. [#6754](https://github.com/ClickHouse/ClickHouse/pull/6754) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* For stack traces gathered by query profiler, do not include stack frames generated by the query profiler itself. [#6250](https://github.com/ClickHouse/ClickHouse/pull/6250) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Now table functions `values`, `file`, `url`, `hdfs` have support for ALIAS columns. [#6255](https://github.com/ClickHouse/ClickHouse/pull/6255) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Throw an exception if `config.d` file doesn't have the corresponding root element as the config file. [#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([dimarub2000](https://github.com/dimarub2000)) -* Print extra info in exception message for `no space left on device`. [#6182](https://github.com/ClickHouse/ClickHouse/issues/6182), [#6252](https://github.com/ClickHouse/ClickHouse/issues/6252) [#6352](https://github.com/ClickHouse/ClickHouse/pull/6352) ([tavplubix](https://github.com/tavplubix)) -* When determining shards of a `Distributed` table to be covered by a read query (for `optimize_skip_unused_shards` = 1) ClickHouse now checks conditions from both `prewhere` and `where` clauses of select statement. [#6521](https://github.com/ClickHouse/ClickHouse/pull/6521) ([Alexander Kazakov](https://github.com/Akazz)) -* Enabled `SIMDJSON` for machines without AVX2 but with SSE 4.2 and PCLMUL instruction set. [#6285](https://github.com/ClickHouse/ClickHouse/issues/6285) [#6320](https://github.com/ClickHouse/ClickHouse/pull/6320) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* ClickHouse can work on filesystems without `O_DIRECT` support (such as ZFS and BtrFS) without additional tuning. [#4449](https://github.com/ClickHouse/ClickHouse/issues/4449) [#6730](https://github.com/ClickHouse/ClickHouse/pull/6730) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Support push down predicate for final subquery. [#6120](https://github.com/ClickHouse/ClickHouse/pull/6120) ([TCeason](https://github.com/TCeason)) [#6162](https://github.com/ClickHouse/ClickHouse/pull/6162) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Better `JOIN ON` keys extraction [#6131](https://github.com/ClickHouse/ClickHouse/pull/6131) ([Artem Zuikov](https://github.com/4ertus2)) -* Upated `SIMDJSON`. [#6285](https://github.com/ClickHouse/ClickHouse/issues/6285). [#6306](https://github.com/ClickHouse/ClickHouse/pull/6306) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Optimize selecting of smallest column for `SELECT count()` query. [#6344](https://github.com/ClickHouse/ClickHouse/pull/6344) ([Amos Bird](https://github.com/amosbird)) -* Added `strict` parameter in `windowFunnel()`. When the `strict` is set, the `windowFunnel()` applies conditions only for the unique values. [#6548](https://github.com/ClickHouse/ClickHouse/pull/6548) ([achimbab](https://github.com/achimbab)) -* Safer interface of `mysqlxx::Pool`. [#6150](https://github.com/ClickHouse/ClickHouse/pull/6150) ([avasiliev](https://github.com/avasiliev)) -* Options line size when executing with `--help` option now corresponds with terminal size. [#6590](https://github.com/ClickHouse/ClickHouse/pull/6590) ([dimarub2000](https://github.com/dimarub2000)) -* Disable "read in order" optimization for aggregation without keys. [#6599](https://github.com/ClickHouse/ClickHouse/pull/6599) ([Anton Popov](https://github.com/CurtizJ)) -* HTTP status code for `INCORRECT_DATA` and `TYPE_MISMATCH` error codes was changed from default `500 Internal Server Error` to `400 Bad Request`. [#6271](https://github.com/ClickHouse/ClickHouse/pull/6271) ([Alexander Rodin](https://github.com/a-rodin)) -* Move Join object from `ExpressionAction` into `AnalyzedJoin`. `ExpressionAnalyzer` and `ExpressionAction` do not know about `Join` class anymore. Its logic is hidden by `AnalyzedJoin` iface. [#6801](https://github.com/ClickHouse/ClickHouse/pull/6801) ([Artem Zuikov](https://github.com/4ertus2)) -* Fixed possible deadlock of distributed queries when one of shards is localhost but the query is sent via network connection. [#6759](https://github.com/ClickHouse/ClickHouse/pull/6759) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Changed semantic of multiple tables `RENAME` to avoid possible deadlocks. [#6757](https://github.com/ClickHouse/ClickHouse/issues/6757). [#6756](https://github.com/ClickHouse/ClickHouse/pull/6756) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Rewritten MySQL compatibility server to prevent loading full packet payload in memory. Decreased memory consumption for each connection to approximately `2 * DBMS_DEFAULT_BUFFER_SIZE` (read/write buffers). [#5811](https://github.com/ClickHouse/ClickHouse/pull/5811) ([Yuriy Baranov](https://github.com/yurriy)) -* Move AST alias interpreting logic out of parser that doesn't have to know anything about query semantics. [#6108](https://github.com/ClickHouse/ClickHouse/pull/6108) ([Artem Zuikov](https://github.com/4ertus2)) -* Slightly more safe parsing of `NamesAndTypesList`. [#6408](https://github.com/ClickHouse/ClickHouse/issues/6408). [#6410](https://github.com/ClickHouse/ClickHouse/pull/6410) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* `clickhouse-copier`: Allow use `where_condition` from config with `partition_key` alias in query for checking partition existence (Earlier it was used only in reading data queries). [#6577](https://github.com/ClickHouse/ClickHouse/pull/6577) ([proller](https://github.com/proller)) -* Added optional message argument in `throwIf`. ([#5772](https://github.com/ClickHouse/ClickHouse/issues/5772)) [#6329](https://github.com/ClickHouse/ClickHouse/pull/6329) ([Vdimir](https://github.com/Vdimir)) -* Server exception got while sending insertion data is now being processed in client as well. [#5891](https://github.com/ClickHouse/ClickHouse/issues/5891) [#6711](https://github.com/ClickHouse/ClickHouse/pull/6711) ([dimarub2000](https://github.com/dimarub2000)) -* Added a metric `DistributedFilesToInsert` that shows the total number of files in filesystem that are selected to send to remote servers by Distributed tables. The number is summed across all shards. [#6600](https://github.com/ClickHouse/ClickHouse/pull/6600) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Move most of JOINs prepare logic from `ExpressionAction/ExpressionAnalyzer` to `AnalyzedJoin`. [#6785](https://github.com/ClickHouse/ClickHouse/pull/6785) ([Artem Zuikov](https://github.com/4ertus2)) -* Fix TSan [warning](https://clickhouse-test-reports.s3.yandex.net/6399/c1c1d1daa98e199e620766f1bd06a5921050a00d/functional_stateful_tests_(thread).html) 'lock-order-inversion'. [#6740](https://github.com/ClickHouse/ClickHouse/pull/6740) ([Vasily Nemkov](https://github.com/Enmk)) -* Better information messages about lack of Linux capabilities. Logging fatal errors with "fatal" level, that will make it easier to find in `system.text_log`. [#6441](https://github.com/ClickHouse/ClickHouse/pull/6441) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* When enable dumping temporary data to the disk to restrict memory usage during `GROUP BY`, `ORDER BY`, it didn't check the free disk space. The fix add a new setting `min_free_disk_space`, when the free disk space it smaller then the threshold, the query will stop and throw `ErrorCodes::NOT_ENOUGH_SPACE`. [#6678](https://github.com/ClickHouse/ClickHouse/pull/6678) ([Weiqing Xu](https://github.com/weiqxu)) [#6691](https://github.com/ClickHouse/ClickHouse/pull/6691) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Removed recursive rwlock by thread. It makes no sense, because threads are reused between queries. `SELECT` query may acquire a lock in one thread, hold a lock from another thread and exit from first thread. In the same time, first thread can be reused by `DROP` query. This will lead to false "Attempt to acquire exclusive lock recursively" messages. [#6771](https://github.com/ClickHouse/ClickHouse/pull/6771) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Split `ExpressionAnalyzer.appendJoin()`. Prepare a place in `ExpressionAnalyzer` for `MergeJoin`. [#6524](https://github.com/ClickHouse/ClickHouse/pull/6524) ([Artem Zuikov](https://github.com/4ertus2)) -* Added `mysql_native_password` authentication plugin to MySQL compatibility server. [#6194](https://github.com/ClickHouse/ClickHouse/pull/6194) ([Yuriy Baranov](https://github.com/yurriy)) -* Less number of `clock_gettime` calls; fixed ABI compatibility between debug/release in `Allocator` (insignificant issue). [#6197](https://github.com/ClickHouse/ClickHouse/pull/6197) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Move `collectUsedColumns` from `ExpressionAnalyzer` to `SyntaxAnalyzer`. `SyntaxAnalyzer` makes `required_source_columns` itself now. [#6416](https://github.com/ClickHouse/ClickHouse/pull/6416) ([Artem Zuikov](https://github.com/4ertus2)) -* Add setting `joined_subquery_requires_alias` to require aliases for subselects and table functions in `FROM` that more than one table is present (i.e. queries with JOINs). [#6733](https://github.com/ClickHouse/ClickHouse/pull/6733) ([Artem Zuikov](https://github.com/4ertus2)) -* Extract `GetAggregatesVisitor` class from `ExpressionAnalyzer`. [#6458](https://github.com/ClickHouse/ClickHouse/pull/6458) ([Artem Zuikov](https://github.com/4ertus2)) -* `system.query_log`: change data type of `type` column to `Enum`. [#6265](https://github.com/ClickHouse/ClickHouse/pull/6265) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) -* Static linking of `sha256_password` authentication plugin. [#6512](https://github.com/ClickHouse/ClickHouse/pull/6512) ([Yuriy Baranov](https://github.com/yurriy)) -* Avoid extra dependency for the setting `compile` to work. In previous versions, the user may get error like `cannot open crti.o`, `unable to find library -lc` etc. [#6309](https://github.com/ClickHouse/ClickHouse/pull/6309) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* More validation of the input that may come from malicious replica. [#6303](https://github.com/ClickHouse/ClickHouse/pull/6303) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Now `clickhouse-obfuscator` file is available in `clickhouse-client` package. In previous versions it was available as `clickhouse obfuscator` (with whitespace). [#5816](https://github.com/ClickHouse/ClickHouse/issues/5816) [#6609](https://github.com/ClickHouse/ClickHouse/pull/6609) ([dimarub2000](https://github.com/dimarub2000)) -* Fixed deadlock when we have at least two queries that read at least two tables in different order and another query that performs DDL operation on one of tables. Fixed another very rare deadlock. [#6764](https://github.com/ClickHouse/ClickHouse/pull/6764) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Added `os_thread_ids` column to `system.processes` and `system.query_log` for better debugging possibilities. [#6763](https://github.com/ClickHouse/ClickHouse/pull/6763) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* A workaround for PHP mysqlnd extension bugs which occur when `sha256_password` is used as a default authentication plugin (described in [#6031](https://github.com/ClickHouse/ClickHouse/issues/6031)). [#6113](https://github.com/ClickHouse/ClickHouse/pull/6113) ([Yuriy Baranov](https://github.com/yurriy)) -* Remove unneeded place with changed nullability columns. [#6693](https://github.com/ClickHouse/ClickHouse/pull/6693) ([Artem Zuikov](https://github.com/4ertus2)) -* Set default value of `queue_max_wait_ms` to zero, because current value (five seconds) makes no sense. There are rare circumstances when this settings has any use. Added settings `replace_running_query_max_wait_ms`, `kafka_max_wait_ms` and `connection_pool_max_wait_ms` for disambiguation. [#6692](https://github.com/ClickHouse/ClickHouse/pull/6692) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Extract `SelectQueryExpressionAnalyzer` from `ExpressionAnalyzer`. Keep the last one for non-select queries. [#6499](https://github.com/ClickHouse/ClickHouse/pull/6499) ([Artem Zuikov](https://github.com/4ertus2)) -* Removed duplicating input and output formats. [#6239](https://github.com/ClickHouse/ClickHouse/pull/6239) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Allow user to override `poll_interval` and `idle_connection_timeout` settings on connection. [#6230](https://github.com/ClickHouse/ClickHouse/pull/6230) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* `MergeTree` now has an additional option `ttl_only_drop_parts` (disabled by default) to avoid partial pruning of parts, so that they dropped completely when all the rows in a part are expired. [#6191](https://github.com/ClickHouse/ClickHouse/pull/6191) ([Sergi Vladykin](https://github.com/svladykin)) -* Type checks for set index functions. Throw exception if function got a wrong type. This fixes fuzz test with UBSan. [#6511](https://github.com/ClickHouse/ClickHouse/pull/6511) ([Nikita Vasilev](https://github.com/nikvas0)) - -#### Performance Improvement -* Optimize queries with `ORDER BY expressions` clause, where `expressions` have coinciding prefix with sorting key in `MergeTree` tables. This optimization is controlled by `optimize_read_in_order` setting. [#6054](https://github.com/ClickHouse/ClickHouse/pull/6054) [#6629](https://github.com/ClickHouse/ClickHouse/pull/6629) ([Anton Popov](https://github.com/CurtizJ)) -* Allow to use multiple threads during parts loading and removal. [#6372](https://github.com/ClickHouse/ClickHouse/issues/6372) [#6074](https://github.com/ClickHouse/ClickHouse/issues/6074) [#6438](https://github.com/ClickHouse/ClickHouse/pull/6438) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Implemented batch variant of updating aggregate function states. It may lead to performance benefits. [#6435](https://github.com/ClickHouse/ClickHouse/pull/6435) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Using `FastOps` library for functions `exp`, `log`, `sigmoid`, `tanh`. FastOps is a fast vector math library from Michael Parakhin (Yandex CTO). Improved performance of `exp` and `log` functions more than 6 times. The functions `exp` and `log` from `Float32` argument will return `Float32` (in previous versions they always return `Float64`). Now `exp(nan)` may return `inf`. The result of `exp` and `log` functions may be not the nearest machine representable number to the true answer. [#6254](https://github.com/ClickHouse/ClickHouse/pull/6254) ([alexey-milovidov](https://github.com/alexey-milovidov)) Using Danila Kutenin variant to make fastops working [#6317](https://github.com/ClickHouse/ClickHouse/pull/6317) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Disable consecutive key optimization for `UInt8/16`. [#6298](https://github.com/ClickHouse/ClickHouse/pull/6298) [#6701](https://github.com/ClickHouse/ClickHouse/pull/6701) ([akuzm](https://github.com/akuzm)) -* Improved performance of `simdjson` library by getting rid of dynamic allocation in `ParsedJson::Iterator`. [#6479](https://github.com/ClickHouse/ClickHouse/pull/6479) ([Vitaly Baranov](https://github.com/vitlibar)) -* Pre-fault pages when allocating memory with `mmap()`. [#6667](https://github.com/ClickHouse/ClickHouse/pull/6667) ([akuzm](https://github.com/akuzm)) -* Fix performance bug in `Decimal` comparison. [#6380](https://github.com/ClickHouse/ClickHouse/pull/6380) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Build/Testing/Packaging Improvement -* Remove Compiler (runtime template instantiation) because we've win over it's performance. [#6646](https://github.com/ClickHouse/ClickHouse/pull/6646) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Added performance test to show degradation of performance in gcc-9 in more isolated way. [#6302](https://github.com/ClickHouse/ClickHouse/pull/6302) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Added table function `numbers_mt`, which is multithreaded version of `numbers`. Updated performance tests with hash functions. [#6554](https://github.com/ClickHouse/ClickHouse/pull/6554) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Comparison mode in `clickhouse-benchmark` [#6220](https://github.com/ClickHouse/ClickHouse/issues/6220) [#6343](https://github.com/ClickHouse/ClickHouse/pull/6343) ([dimarub2000](https://github.com/dimarub2000)) -* Best effort for printing stack traces. Also added `SIGPROF` as a debugging signal to print stack trace of a running thread. [#6529](https://github.com/ClickHouse/ClickHouse/pull/6529) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Every function in its own file, part 10. [#6321](https://github.com/ClickHouse/ClickHouse/pull/6321) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Remove doubled const `TABLE_IS_READ_ONLY`. [#6566](https://github.com/ClickHouse/ClickHouse/pull/6566) ([filimonov](https://github.com/filimonov)) -* Formatting changes for `StringHashMap` PR [#5417](https://github.com/ClickHouse/ClickHouse/issues/5417). [#6700](https://github.com/ClickHouse/ClickHouse/pull/6700) ([akuzm](https://github.com/akuzm)) -* Better subquery for join creation in `ExpressionAnalyzer`. [#6824](https://github.com/ClickHouse/ClickHouse/pull/6824) ([Artem Zuikov](https://github.com/4ertus2)) -* Remove a redundant condition (found by PVS Studio). [#6775](https://github.com/ClickHouse/ClickHouse/pull/6775) ([akuzm](https://github.com/akuzm)) -* Separate the hash table interface for `ReverseIndex`. [#6672](https://github.com/ClickHouse/ClickHouse/pull/6672) ([akuzm](https://github.com/akuzm)) -* Refactoring of settings. [#6689](https://github.com/ClickHouse/ClickHouse/pull/6689) ([alesapin](https://github.com/alesapin)) -* Add comments for `set` index functions. [#6319](https://github.com/ClickHouse/ClickHouse/pull/6319) ([Nikita Vasilev](https://github.com/nikvas0)) -* Increase OOM score in debug version on Linux. [#6152](https://github.com/ClickHouse/ClickHouse/pull/6152) ([akuzm](https://github.com/akuzm)) -* HDFS HA now work in debug build. [#6650](https://github.com/ClickHouse/ClickHouse/pull/6650) ([Weiqing Xu](https://github.com/weiqxu)) -* Added a test to `transform_query_for_external_database`. [#6388](https://github.com/ClickHouse/ClickHouse/pull/6388) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Add test for multiple materialized views for Kafka table. [#6509](https://github.com/ClickHouse/ClickHouse/pull/6509) ([Ivan](https://github.com/abyss7)) -* Make a better build scheme. [#6500](https://github.com/ClickHouse/ClickHouse/pull/6500) ([Ivan](https://github.com/abyss7)) -* Fixed `test_external_dictionaries` integration in case it was executed under non root user. [#6507](https://github.com/ClickHouse/ClickHouse/pull/6507) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* The bug reproduces when total size of written packets exceeds `DBMS_DEFAULT_BUFFER_SIZE`. [#6204](https://github.com/ClickHouse/ClickHouse/pull/6204) ([Yuriy Baranov](https://github.com/yurriy)) -* Added a test for `RENAME` table race condition [#6752](https://github.com/ClickHouse/ClickHouse/pull/6752) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Avoid data race on Settings in `KILL QUERY`. [#6753](https://github.com/ClickHouse/ClickHouse/pull/6753) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Add integration test for handling errors by a cache dictionary. [#6755](https://github.com/ClickHouse/ClickHouse/pull/6755) ([Vitaly Baranov](https://github.com/vitlibar)) -* Disable parsing of ELF object files on Mac OS, because it makes no sense. [#6578](https://github.com/ClickHouse/ClickHouse/pull/6578) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Attempt to make changelog generator better. [#6327](https://github.com/ClickHouse/ClickHouse/pull/6327) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Adding `-Wshadow` switch to the GCC. [#6325](https://github.com/ClickHouse/ClickHouse/pull/6325) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) -* Removed obsolete code for `mimalloc` support. [#6715](https://github.com/ClickHouse/ClickHouse/pull/6715) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* `zlib-ng` determines x86 capabilities and saves this info to global variables. This is done in defalteInit call, which may be made by different threads simultaneously. To avoid multithreaded writes, do it on library startup. [#6141](https://github.com/ClickHouse/ClickHouse/pull/6141) ([akuzm](https://github.com/akuzm)) -* Regression test for a bug which in join which was fixed in [#5192](https://github.com/ClickHouse/ClickHouse/issues/5192). [#6147](https://github.com/ClickHouse/ClickHouse/pull/6147) ([Bakhtiyor Ruziev](https://github.com/theruziev)) -* Fixed MSan report. [#6144](https://github.com/ClickHouse/ClickHouse/pull/6144) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix flapping TTL test. [#6782](https://github.com/ClickHouse/ClickHouse/pull/6782) ([Anton Popov](https://github.com/CurtizJ)) -* Fixed false data race in `MergeTreeDataPart::is_frozen` field. [#6583](https://github.com/ClickHouse/ClickHouse/pull/6583) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed timeouts in fuzz test. In previous version, it managed to find false hangup in query `SELECT * FROM numbers_mt(gccMurmurHash(''))`. [#6582](https://github.com/ClickHouse/ClickHouse/pull/6582) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Added debug checks to `static_cast` of columns. [#6581](https://github.com/ClickHouse/ClickHouse/pull/6581) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Support for Oracle Linux in official RPM packages. [#6356](https://github.com/ClickHouse/ClickHouse/issues/6356) [#6585](https://github.com/ClickHouse/ClickHouse/pull/6585) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Changed json perftests from `once` to `loop` type. [#6536](https://github.com/ClickHouse/ClickHouse/pull/6536) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* `odbc-bridge.cpp` defines `main()` so it should not be included in `clickhouse-lib`. [#6538](https://github.com/ClickHouse/ClickHouse/pull/6538) ([Orivej Desh](https://github.com/orivej)) -* Test for crash in `FULL|RIGHT JOIN` with nulls in right table's keys. [#6362](https://github.com/ClickHouse/ClickHouse/pull/6362) ([Artem Zuikov](https://github.com/4ertus2)) -* Added a test for the limit on expansion of aliases just in case. [#6442](https://github.com/ClickHouse/ClickHouse/pull/6442) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Switched from `boost::filesystem` to `std::filesystem` where appropriate. [#6253](https://github.com/ClickHouse/ClickHouse/pull/6253) [#6385](https://github.com/ClickHouse/ClickHouse/pull/6385) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Added RPM packages to website. [#6251](https://github.com/ClickHouse/ClickHouse/pull/6251) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Add a test for fixed `Unknown identifier` exception in `IN` section. [#6708](https://github.com/ClickHouse/ClickHouse/pull/6708) ([Artem Zuikov](https://github.com/4ertus2)) -* Simplify `shared_ptr_helper` because people facing difficulties understanding it. [#6675](https://github.com/ClickHouse/ClickHouse/pull/6675) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Added performance tests for fixed Gorilla and DoubleDelta codec. [#6179](https://github.com/ClickHouse/ClickHouse/pull/6179) ([Vasily Nemkov](https://github.com/Enmk)) -* Split the integration test `test_dictionaries` into 4 separate tests. [#6776](https://github.com/ClickHouse/ClickHouse/pull/6776) ([Vitaly Baranov](https://github.com/vitlibar)) -* Fix PVS-Studio warning in `PipelineExecutor`. [#6777](https://github.com/ClickHouse/ClickHouse/pull/6777) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Allow to use `library` dictionary source with ASan. [#6482](https://github.com/ClickHouse/ClickHouse/pull/6482) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Added option to generate changelog from a list of PRs. [#6350](https://github.com/ClickHouse/ClickHouse/pull/6350) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Lock the `TinyLog` storage when reading. [#6226](https://github.com/ClickHouse/ClickHouse/pull/6226) ([akuzm](https://github.com/akuzm)) -* Check for broken symlinks in CI. [#6634](https://github.com/ClickHouse/ClickHouse/pull/6634) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Increase timeout for "stack overflow" test because it may take a long time in debug build. [#6637](https://github.com/ClickHouse/ClickHouse/pull/6637) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Added a check for double whitespaces. [#6643](https://github.com/ClickHouse/ClickHouse/pull/6643) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix `new/delete` memory tracking when build with sanitizers. Tracking is not clear. It only prevents memory limit exceptions in tests. [#6450](https://github.com/ClickHouse/ClickHouse/pull/6450) ([Artem Zuikov](https://github.com/4ertus2)) -* Enable back the check of undefined symbols while linking. [#6453](https://github.com/ClickHouse/ClickHouse/pull/6453) ([Ivan](https://github.com/abyss7)) -* Avoid rebuilding `hyperscan` every day. [#6307](https://github.com/ClickHouse/ClickHouse/pull/6307) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed UBSan report in `ProtobufWriter`. [#6163](https://github.com/ClickHouse/ClickHouse/pull/6163) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Don't allow to use query profiler with sanitizers because it is not compatible. [#6769](https://github.com/ClickHouse/ClickHouse/pull/6769) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Add test for reloading a dictionary after fail by timer. [#6114](https://github.com/ClickHouse/ClickHouse/pull/6114) ([Vitaly Baranov](https://github.com/vitlibar)) -* Fix inconsistency in `PipelineExecutor::prepareProcessor` argument type. [#6494](https://github.com/ClickHouse/ClickHouse/pull/6494) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Added a test for bad URIs. [#6493](https://github.com/ClickHouse/ClickHouse/pull/6493) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Added more checks to `CAST` function. This should get more information about segmentation fault in fuzzy test. [#6346](https://github.com/ClickHouse/ClickHouse/pull/6346) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Added `gcc-9` support to `docker/builder` container that builds image locally. [#6333](https://github.com/ClickHouse/ClickHouse/pull/6333) ([Gleb Novikov](https://github.com/NanoBjorn)) -* Test for primary key with `LowCardinality(String)`. [#5044](https://github.com/ClickHouse/ClickHouse/issues/5044) [#6219](https://github.com/ClickHouse/ClickHouse/pull/6219) ([dimarub2000](https://github.com/dimarub2000)) -* Fixed tests affected by slow stack traces printing. [#6315](https://github.com/ClickHouse/ClickHouse/pull/6315) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Add a test case for crash in `groupUniqArray` fixed in [#6029](https://github.com/ClickHouse/ClickHouse/pull/6029). [#4402](https://github.com/ClickHouse/ClickHouse/issues/4402) [#6129](https://github.com/ClickHouse/ClickHouse/pull/6129) ([akuzm](https://github.com/akuzm)) -* Fixed indices mutations tests. [#6645](https://github.com/ClickHouse/ClickHouse/pull/6645) ([Nikita Vasilev](https://github.com/nikvas0)) -* In performance test, do not read query log for queries we didn't run. [#6427](https://github.com/ClickHouse/ClickHouse/pull/6427) ([akuzm](https://github.com/akuzm)) -* Materialized view now could be created with any low cardinality types regardless to the setting about suspicious low cardinality types. [#6428](https://github.com/ClickHouse/ClickHouse/pull/6428) ([Olga Khvostikova](https://github.com/stavrolia)) -* Updated tests for `send_logs_level` setting. [#6207](https://github.com/ClickHouse/ClickHouse/pull/6207) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Fix build under gcc-8.2. [#6196](https://github.com/ClickHouse/ClickHouse/pull/6196) ([Max Akhmedov](https://github.com/zlobober)) -* Fix build with internal libc++. [#6724](https://github.com/ClickHouse/ClickHouse/pull/6724) ([Ivan](https://github.com/abyss7)) -* Fix shared build with `rdkafka` library [#6101](https://github.com/ClickHouse/ClickHouse/pull/6101) ([Ivan](https://github.com/abyss7)) -* Fixes for Mac OS build (incomplete). [#6390](https://github.com/ClickHouse/ClickHouse/pull/6390) ([alexey-milovidov](https://github.com/alexey-milovidov)) [#6429](https://github.com/ClickHouse/ClickHouse/pull/6429) ([alex-zaitsev](https://github.com/alex-zaitsev)) -* Fix "splitted" build. [#6618](https://github.com/ClickHouse/ClickHouse/pull/6618) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Other build fixes: [#6186](https://github.com/ClickHouse/ClickHouse/pull/6186) ([Amos Bird](https://github.com/amosbird)) [#6486](https://github.com/ClickHouse/ClickHouse/pull/6486) [#6348](https://github.com/ClickHouse/ClickHouse/pull/6348) ([vxider](https://github.com/Vxider)) [#6744](https://github.com/ClickHouse/ClickHouse/pull/6744) ([Ivan](https://github.com/abyss7)) [#6016](https://github.com/ClickHouse/ClickHouse/pull/6016) [#6421](https://github.com/ClickHouse/ClickHouse/pull/6421) [#6491](https://github.com/ClickHouse/ClickHouse/pull/6491) ([proller](https://github.com/proller)) - -#### Backward Incompatible Change -* Removed rarely used table function `catBoostPool` and storage `CatBoostPool`. If you have used this table function, please write email to `clickhouse-feedback@yandex-team.com`. Note that CatBoost integration remains and will be supported. [#6279](https://github.com/ClickHouse/ClickHouse/pull/6279) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Disable `ANY RIGHT JOIN` and `ANY FULL JOIN` by default. Set `any_join_distinct_right_table_keys` setting to enable them. [#5126](https://github.com/ClickHouse/ClickHouse/issues/5126) [#6351](https://github.com/ClickHouse/ClickHouse/pull/6351) ([Artem Zuikov](https://github.com/4ertus2)) - -## ClickHouse release 19.13 -### ClickHouse release 19.13.6.51, 2019-10-02 - -#### Bug Fix -* This release also contains all bug fixes from 19.11.12.69. - -### ClickHouse release 19.13.5.44, 2019-09-20 - -#### Bug Fix -* This release also contains all bug fixes from 19.14.6.12. -* Fixed possible inconsistent state of table while executing `DROP` query for replicated table while zookeeper is not accessible. [#6045](https://github.com/ClickHouse/ClickHouse/issues/6045) [#6413](https://github.com/ClickHouse/ClickHouse/pull/6413) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) -* Fix for data race in StorageMerge [#6717](https://github.com/ClickHouse/ClickHouse/pull/6717) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix bug introduced in query profiler which leads to endless recv from socket. [#6386](https://github.com/ClickHouse/ClickHouse/pull/6386) ([alesapin](https://github.com/alesapin)) -* Fix excessive CPU usage while executing `JSONExtractRaw` function over a boolean value. [#6208](https://github.com/ClickHouse/ClickHouse/pull/6208) ([Vitaly Baranov](https://github.com/vitlibar)) -* Fixes the regression while pushing to materialized view. [#6415](https://github.com/ClickHouse/ClickHouse/pull/6415) ([Ivan](https://github.com/abyss7)) -* Table function `url` had the vulnerability allowed the attacker to inject arbitrary HTTP headers in the request. This issue was found by [Nikita Tikhomirov](https://github.com/NSTikhomirov). [#6466](https://github.com/ClickHouse/ClickHouse/pull/6466) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix useless `AST` check in Set index. [#6510](https://github.com/ClickHouse/ClickHouse/issues/6510) [#6651](https://github.com/ClickHouse/ClickHouse/pull/6651) ([Nikita Vasilev](https://github.com/nikvas0)) -* Fixed parsing of `AggregateFunction` values embedded in query. [#6575](https://github.com/ClickHouse/ClickHouse/issues/6575) [#6773](https://github.com/ClickHouse/ClickHouse/pull/6773) ([Zhichang Yu](https://github.com/yuzhichang)) -* Fixed wrong behaviour of `trim` functions family. [#6647](https://github.com/ClickHouse/ClickHouse/pull/6647) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.13.4.32, 2019-09-10 - -#### Bug Fix -* This release also contains all bug security fixes from 19.11.9.52 and 19.11.10.54. -* Fixed data race in `system.parts` table and `ALTER` query. [#6245](https://github.com/ClickHouse/ClickHouse/issues/6245) [#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed mismatched header in streams happened in case of reading from empty distributed table with sample and prewhere. [#6167](https://github.com/ClickHouse/ClickHouse/issues/6167) ([Lixiang Qian](https://github.com/fancyqlx)) [#6823](https://github.com/ClickHouse/ClickHouse/pull/6823) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Fixed crash when using `IN` clause with a subquery with a tuple. [#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) -* Fix case with same column names in `GLOBAL JOIN ON` section. [#6181](https://github.com/ClickHouse/ClickHouse/pull/6181) ([Artem Zuikov](https://github.com/4ertus2)) -* Fix crash when casting types to `Decimal` that do not support it. Throw exception instead. [#6297](https://github.com/ClickHouse/ClickHouse/pull/6297) ([Artem Zuikov](https://github.com/4ertus2)) -* Fixed crash in `extractAll()` function. [#6644](https://github.com/ClickHouse/ClickHouse/pull/6644) ([Artem Zuikov](https://github.com/4ertus2)) -* Query transformation for `MySQL`, `ODBC`, `JDBC` table functions now works properly for `SELECT WHERE` queries with multiple `AND` expressions. [#6381](https://github.com/ClickHouse/ClickHouse/issues/6381) [#6676](https://github.com/ClickHouse/ClickHouse/pull/6676) ([dimarub2000](https://github.com/dimarub2000)) -* Added previous declaration checks for MySQL 8 integration. [#6569](https://github.com/ClickHouse/ClickHouse/pull/6569) ([Rafael David Tinoco](https://github.com/rafaeldtinoco)) - -#### Security Fix -* Fix two vulnerabilities in codecs in decompression phase (malicious user can fabricate compressed data that will lead to buffer overflow in decompression). [#6670](https://github.com/ClickHouse/ClickHouse/pull/6670) ([Artem Zuikov](https://github.com/4ertus2)) - - -### ClickHouse release 19.13.3.26, 2019-08-22 - -#### Bug Fix -* Fix `ALTER TABLE ... UPDATE` query for tables with `enable_mixed_granularity_parts=1`. [#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([alesapin](https://github.com/alesapin)) -* Fix NPE when using IN clause with a subquery with a tuple. [#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) -* Fixed an issue that if a stale replica becomes alive, it may still have data parts that were removed by DROP PARTITION. [#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) -* Fixed issue with parsing CSV [#6426](https://github.com/ClickHouse/ClickHouse/issues/6426) [#6559](https://github.com/ClickHouse/ClickHouse/pull/6559) ([tavplubix](https://github.com/tavplubix)) -* Fixed data race in system.parts table and ALTER query. This fixes [#6245](https://github.com/ClickHouse/ClickHouse/issues/6245). [#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed wrong code in mutations that may lead to memory corruption. Fixed segfault with read of address `0x14c0` that may happed due to concurrent `DROP TABLE` and `SELECT` from `system.parts` or `system.parts_columns`. Fixed race condition in preparation of mutation queries. Fixed deadlock caused by `OPTIMIZE` of Replicated tables and concurrent modification operations like ALTERs. [#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed possible data loss after `ALTER DELETE` query on table with skipping index. [#6224](https://github.com/ClickHouse/ClickHouse/issues/6224) [#6282](https://github.com/ClickHouse/ClickHouse/pull/6282) ([Nikita Vasilev](https://github.com/nikvas0)) - -#### Security Fix -* If the attacker has write access to ZooKeeper and is able to run custom server available from the network where ClickHouse run, it can create custom-built malicious server that will act as ClickHouse replica and register it in ZooKeeper. When another replica will fetch data part from malicious replica, it can force clickhouse-server to write to arbitrary path on filesystem. Found by Eldar Zaitov, information security team at Yandex. [#6247](https://github.com/ClickHouse/ClickHouse/pull/6247) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.13.2.19, 2019-08-14 - -#### New Feature -* Sampling profiler on query level. [Example](https://gist.github.com/alexey-milovidov/92758583dd41c24c360fdb8d6a4da194). [#4247](https://github.com/ClickHouse/ClickHouse/issues/4247) ([laplab](https://github.com/laplab)) [#6124](https://github.com/ClickHouse/ClickHouse/pull/6124) ([alexey-milovidov](https://github.com/alexey-milovidov)) [#6250](https://github.com/ClickHouse/ClickHouse/pull/6250) [#6283](https://github.com/ClickHouse/ClickHouse/pull/6283) [#6386](https://github.com/ClickHouse/ClickHouse/pull/6386) -* Allow to specify a list of columns with `COLUMNS('regexp')` expression that works like a more sophisticated variant of `*` asterisk. [#5951](https://github.com/ClickHouse/ClickHouse/pull/5951) ([mfridental](https://github.com/mfridental)), ([alexey-milovidov](https://github.com/alexey-milovidov)) -* `CREATE TABLE AS table_function()` is now possible [#6057](https://github.com/ClickHouse/ClickHouse/pull/6057) ([dimarub2000](https://github.com/dimarub2000)) -* Adam optimizer for stochastic gradient descent is used by default in `stochasticLinearRegression()` and `stochasticLogisticRegression()` aggregate functions, because it shows good quality without almost any tuning. [#6000](https://github.com/ClickHouse/ClickHouse/pull/6000) ([Quid37](https://github.com/Quid37)) -* Added functions for working with the сustom week number [#5212](https://github.com/ClickHouse/ClickHouse/pull/5212) ([Andy Yang](https://github.com/andyyzh)) -* `RENAME` queries now work with all storages. [#5953](https://github.com/ClickHouse/ClickHouse/pull/5953) ([Ivan](https://github.com/abyss7)) -* Now client receive logs from server with any desired level by setting `send_logs_level` regardless to the log level specified in server settings. [#5964](https://github.com/ClickHouse/ClickHouse/pull/5964) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) - -#### Backward Incompatible Change -* The setting `input_format_defaults_for_omitted_fields` is enabled by default. Inserts in Distributed tables need this setting to be the same on cluster (you need to set it before rolling update). It enables calculation of complex default expressions for omitted fields in `JSONEachRow` and `CSV*` formats. It should be the expected behavior but may lead to negligible performance difference. [#6043](https://github.com/ClickHouse/ClickHouse/pull/6043) ([Artem Zuikov](https://github.com/4ertus2)), [#5625](https://github.com/ClickHouse/ClickHouse/pull/5625) ([akuzm](https://github.com/akuzm)) - -#### Experimental features -* New query processing pipeline. Use `experimental_use_processors=1` option to enable it. Use for your own trouble. [#4914](https://github.com/ClickHouse/ClickHouse/pull/4914) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - -#### Bug Fix -* Kafka integration has been fixed in this version. -* Fixed `DoubleDelta` encoding of `Int64` for large `DoubleDelta` values, improved `DoubleDelta` encoding for random data for `Int32`. [#5998](https://github.com/ClickHouse/ClickHouse/pull/5998) ([Vasily Nemkov](https://github.com/Enmk)) -* Fixed overestimation of `max_rows_to_read` if the setting `merge_tree_uniform_read_distribution` is set to 0. [#6019](https://github.com/ClickHouse/ClickHouse/pull/6019) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Improvement -* Throws an exception if `config.d` file doesn't have the corresponding root element as the config file [#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([dimarub2000](https://github.com/dimarub2000)) - -#### Performance Improvement -* Optimize `count()`. Now it uses the smallest column (if possible). [#6028](https://github.com/ClickHouse/ClickHouse/pull/6028) ([Amos Bird](https://github.com/amosbird)) - -#### Build/Testing/Packaging Improvement -* Report memory usage in performance tests. [#5899](https://github.com/ClickHouse/ClickHouse/pull/5899) ([akuzm](https://github.com/akuzm)) -* Fix build with external `libcxx` [#6010](https://github.com/ClickHouse/ClickHouse/pull/6010) ([Ivan](https://github.com/abyss7)) -* Fix shared build with `rdkafka` library [#6101](https://github.com/ClickHouse/ClickHouse/pull/6101) ([Ivan](https://github.com/abyss7)) - -## ClickHouse release 19.11 - -### ClickHouse release 19.11.13.74, 2019-11-01 - -#### Bug Fix -* Fixed rare crash in `ALTER MODIFY COLUMN` and vertical merge when one of merged/altered parts is empty (0 rows). [#6780](https://github.com/ClickHouse/ClickHouse/pull/6780) ([alesapin](https://github.com/alesapin)) -* Manual update of `SIMDJSON`. This fixes possible flooding of stderr files with bogus json diagnostic messages. [#7548](https://github.com/ClickHouse/ClickHouse/pull/7548) ([Alexander Kazakov](https://github.com/Akazz)) -* Fixed bug with `mrk` file extension for mutations ([alesapin](https://github.com/alesapin)) - -### ClickHouse release 19.11.12.69, 2019-10-02 - -#### Bug Fix -* Fixed performance degradation of index analysis on complex keys on large tables. This fixes [#6924](https://github.com/ClickHouse/ClickHouse/issues/6924). [#7075](https://github.com/ClickHouse/ClickHouse/pull/7075) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Avoid rare SIGSEGV while sending data in tables with Distributed engine (`Failed to send batch: file with index XXXXX is absent`). [#7032](https://github.com/ClickHouse/ClickHouse/pull/7032) ([Azat Khuzhin](https://github.com/azat)) -* Fix `Unknown identifier` with multiple joins. This fixes [#5254](https://github.com/ClickHouse/ClickHouse/issues/5254). [#7022](https://github.com/ClickHouse/ClickHouse/pull/7022) ([Artem Zuikov](https://github.com/4ertus2)) - -### ClickHouse release 19.11.11.57, 2019-09-13 -* Fix logical error causing segfaults when selecting from Kafka empty topic. [#6902](https://github.com/ClickHouse/ClickHouse/issues/6902) [#6909](https://github.com/ClickHouse/ClickHouse/pull/6909) ([Ivan](https://github.com/abyss7)) -* Fix for function `АrrayEnumerateUniqRanked` with empty arrays in params. [#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) - -### ClickHouse release 19.11.10.54, 2019-09-10 - -#### Bug Fix -* Do store offsets for Kafka messages manually to be able to commit them all at once for all partitions. Fixes potential duplication in "one consumer - many partitions" scenario. [#6872](https://github.com/ClickHouse/ClickHouse/pull/6872) ([Ivan](https://github.com/abyss7)) - -### ClickHouse release 19.11.9.52, 2019-09-6 -* Improve error handling in cache dictionaries. [#6737](https://github.com/ClickHouse/ClickHouse/pull/6737) ([Vitaly Baranov](https://github.com/vitlibar)) -* Fixed bug in function `arrayEnumerateUniqRanked`. [#6779](https://github.com/ClickHouse/ClickHouse/pull/6779) ([proller](https://github.com/proller)) -* Fix `JSONExtract` function while extracting a `Tuple` from JSON. [#6718](https://github.com/ClickHouse/ClickHouse/pull/6718) ([Vitaly Baranov](https://github.com/vitlibar)) -* Fixed possible data loss after `ALTER DELETE` query on table with skipping index. [#6224](https://github.com/ClickHouse/ClickHouse/issues/6224) [#6282](https://github.com/ClickHouse/ClickHouse/pull/6282) ([Nikita Vasilev](https://github.com/nikvas0)) -* Fixed performance test. [#6392](https://github.com/ClickHouse/ClickHouse/pull/6392) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Parquet: Fix reading boolean columns. [#6579](https://github.com/ClickHouse/ClickHouse/pull/6579) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed wrong behaviour of `nullIf` function for constant arguments. [#6518](https://github.com/ClickHouse/ClickHouse/pull/6518) ([Guillaume Tassery](https://github.com/YiuRULE)) [#6580](https://github.com/ClickHouse/ClickHouse/pull/6580) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix Kafka messages duplication problem on normal server restart. [#6597](https://github.com/ClickHouse/ClickHouse/pull/6597) ([Ivan](https://github.com/abyss7)) -* Fixed an issue when long `ALTER UPDATE` or `ALTER DELETE` may prevent regular merges to run. Prevent mutations from executing if there is no enough free threads available. [#6502](https://github.com/ClickHouse/ClickHouse/issues/6502) [#6617](https://github.com/ClickHouse/ClickHouse/pull/6617) ([tavplubix](https://github.com/tavplubix)) -* Fixed error with processing "timezone" in server configuration file. [#6709](https://github.com/ClickHouse/ClickHouse/pull/6709) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix kafka tests. [#6805](https://github.com/ClickHouse/ClickHouse/pull/6805) ([Ivan](https://github.com/abyss7)) - -#### Security Fix -* If the attacker has write access to ZooKeeper and is able to run custom server available from the network where ClickHouse runs, it can create custom-built malicious server that will act as ClickHouse replica and register it in ZooKeeper. When another replica will fetch data part from malicious replica, it can force clickhouse-server to write to arbitrary path on filesystem. Found by Eldar Zaitov, information security team at Yandex. [#6247](https://github.com/ClickHouse/ClickHouse/pull/6247) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.11.8.46, 2019-08-22 - -#### Bug Fix -* Fix `ALTER TABLE ... UPDATE` query for tables with `enable_mixed_granularity_parts=1`. [#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([alesapin](https://github.com/alesapin)) -* Fix NPE when using IN clause with a subquery with a tuple. [#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) -* Fixed an issue that if a stale replica becomes alive, it may still have data parts that were removed by DROP PARTITION. [#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) -* Fixed issue with parsing CSV [#6426](https://github.com/ClickHouse/ClickHouse/issues/6426) [#6559](https://github.com/ClickHouse/ClickHouse/pull/6559) ([tavplubix](https://github.com/tavplubix)) -* Fixed data race in system.parts table and ALTER query. This fixes [#6245](https://github.com/ClickHouse/ClickHouse/issues/6245). [#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed wrong code in mutations that may lead to memory corruption. Fixed segfault with read of address `0x14c0` that may happed due to concurrent `DROP TABLE` and `SELECT` from `system.parts` or `system.parts_columns`. Fixed race condition in preparation of mutation queries. Fixed deadlock caused by `OPTIMIZE` of Replicated tables and concurrent modification operations like ALTERs. [#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.11.7.40, 2019-08-14 - -#### Bug fix -* Kafka integration has been fixed in this version. -* Fix segfault when using `arrayReduce` for constant arguments. [#6326](https://github.com/ClickHouse/ClickHouse/pull/6326) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed `toFloat()` monotonicity. [#6374](https://github.com/ClickHouse/ClickHouse/pull/6374) ([dimarub2000](https://github.com/dimarub2000)) -* Fix segfault with enabled `optimize_skip_unused_shards` and missing sharding key. [#6384](https://github.com/ClickHouse/ClickHouse/pull/6384) ([CurtizJ](https://github.com/CurtizJ)) -* Fixed logic of `arrayEnumerateUniqRanked` function. [#6423](https://github.com/ClickHouse/ClickHouse/pull/6423) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Removed extra verbose logging from MySQL handler. [#6389](https://github.com/ClickHouse/ClickHouse/pull/6389) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix wrong behavior and possible segfaults in `topK` and `topKWeighted` aggregated functions. [#6404](https://github.com/ClickHouse/ClickHouse/pull/6404) ([CurtizJ](https://github.com/CurtizJ)) -* Do not expose virtual columns in `system.columns` table. This is required for backward compatibility. [#6406](https://github.com/ClickHouse/ClickHouse/pull/6406) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix bug with memory allocation for string fields in complex key cache dictionary. [#6447](https://github.com/ClickHouse/ClickHouse/pull/6447) ([alesapin](https://github.com/alesapin)) -* Fix bug with enabling adaptive granularity when creating new replica for `Replicated*MergeTree` table. [#6452](https://github.com/ClickHouse/ClickHouse/pull/6452) ([alesapin](https://github.com/alesapin)) -* Fix infinite loop when reading Kafka messages. [#6354](https://github.com/ClickHouse/ClickHouse/pull/6354) ([abyss7](https://github.com/abyss7)) -* Fixed the possibility of a fabricated query to cause server crash due to stack overflow in SQL parser and possibility of stack overflow in `Merge` and `Distributed` tables [#6433](https://github.com/ClickHouse/ClickHouse/pull/6433) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed Gorilla encoding error on small sequences. [#6444](https://github.com/ClickHouse/ClickHouse/pull/6444) ([Enmk](https://github.com/Enmk)) - -#### Improvement -* Allow user to override `poll_interval` and `idle_connection_timeout` settings on connection. [#6230](https://github.com/ClickHouse/ClickHouse/pull/6230) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.11.5.28, 2019-08-05 - -#### Bug fix -* Fixed the possibility of hanging queries when server is overloaded. [#6301](https://github.com/ClickHouse/ClickHouse/pull/6301) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix FPE in yandexConsistentHash function. This fixes [#6304](https://github.com/ClickHouse/ClickHouse/issues/6304). [#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed bug in conversion of `LowCardinality` types in `AggregateFunctionFactory`. This fixes [#6257](https://github.com/ClickHouse/ClickHouse/issues/6257). [#6281](https://github.com/ClickHouse/ClickHouse/pull/6281) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Fix parsing of `bool` settings from `true` and `false` strings in configuration files. [#6278](https://github.com/ClickHouse/ClickHouse/pull/6278) ([alesapin](https://github.com/alesapin)) -* Fix rare bug with incompatible stream headers in queries to `Distributed` table over `MergeTree` table when part of `WHERE` moves to `PREWHERE`. [#6236](https://github.com/ClickHouse/ClickHouse/pull/6236) ([alesapin](https://github.com/alesapin)) -* Fixed overflow in integer division of signed type to unsigned type. This fixes [#6214](https://github.com/ClickHouse/ClickHouse/issues/6214). [#6233](https://github.com/ClickHouse/ClickHouse/pull/6233) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Backward Incompatible Change -* `Kafka` still broken. - -### ClickHouse release 19.11.4.24, 2019-08-01 - -#### Bug Fix -* Fix bug with writing secondary indices marks with adaptive granularity. [#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([alesapin](https://github.com/alesapin)) -* Fix `WITH ROLLUP` and `WITH CUBE` modifiers of `GROUP BY` with two-level aggregation. [#6225](https://github.com/ClickHouse/ClickHouse/pull/6225) ([Anton Popov](https://github.com/CurtizJ)) -* Fixed hang in `JSONExtractRaw` function. Fixed [#6195](https://github.com/ClickHouse/ClickHouse/issues/6195) [#6198](https://github.com/ClickHouse/ClickHouse/pull/6198) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix segfault in ExternalLoader::reloadOutdated(). [#6082](https://github.com/ClickHouse/ClickHouse/pull/6082) ([Vitaly Baranov](https://github.com/vitlibar)) -* Fixed the case when server may close listening sockets but not shutdown and continue serving remaining queries. You may end up with two running clickhouse-server processes. Sometimes, the server may return an error `bad_function_call` for remaining queries. [#6231](https://github.com/ClickHouse/ClickHouse/pull/6231) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed useless and incorrect condition on update field for initial loading of external dictionaries via ODBC, MySQL, ClickHouse and HTTP. This fixes [#6069](https://github.com/ClickHouse/ClickHouse/issues/6069) [#6083](https://github.com/ClickHouse/ClickHouse/pull/6083) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed irrelevant exception in cast of `LowCardinality(Nullable)` to not-Nullable column in case if it doesn't contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Fix non-deterministic result of "uniq" aggregate function in extreme rare cases. The bug was present in all ClickHouse versions. [#6058](https://github.com/ClickHouse/ClickHouse/pull/6058) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Segfault when we set a little bit too high CIDR on the function `IPv6CIDRToRange`. [#6068](https://github.com/ClickHouse/ClickHouse/pull/6068) ([Guillaume Tassery](https://github.com/YiuRULE)) -* Fixed small memory leak when server throw many exceptions from many different contexts. [#6144](https://github.com/ClickHouse/ClickHouse/pull/6144) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix the situation when consumer got paused before subscription and not resumed afterwards. [#6075](https://github.com/ClickHouse/ClickHouse/pull/6075) ([Ivan](https://github.com/abyss7)) Note that Kafka is broken in this version. -* Clearing the Kafka data buffer from the previous read operation that was completed with an error [#6026](https://github.com/ClickHouse/ClickHouse/pull/6026) ([Nikolay](https://github.com/bopohaa)) Note that Kafka is broken in this version. -* Since `StorageMergeTree::background_task_handle` is initialized in `startup()` the `MergeTreeBlockOutputStream::write()` may try to use it before initialization. Just check if it is initialized. [#6080](https://github.com/ClickHouse/ClickHouse/pull/6080) ([Ivan](https://github.com/abyss7)) - -#### Build/Testing/Packaging Improvement -* Added official `rpm` packages. [#5740](https://github.com/ClickHouse/ClickHouse/pull/5740) ([proller](https://github.com/proller)) ([alesapin](https://github.com/alesapin)) -* Add an ability to build `.rpm` and `.tgz` packages with `packager` script. [#5769](https://github.com/ClickHouse/ClickHouse/pull/5769) ([alesapin](https://github.com/alesapin)) -* Fixes for "Arcadia" build system. [#6223](https://github.com/ClickHouse/ClickHouse/pull/6223) ([proller](https://github.com/proller)) - -#### Backward Incompatible Change -* `Kafka` is broken in this version. - - -### ClickHouse release 19.11.3.11, 2019-07-18 - -#### New Feature -* Added support for prepared statements. [#5331](https://github.com/ClickHouse/ClickHouse/pull/5331/) ([Alexander](https://github.com/sanych73)) [#5630](https://github.com/ClickHouse/ClickHouse/pull/5630) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* `DoubleDelta` and `Gorilla` column codecs [#5600](https://github.com/ClickHouse/ClickHouse/pull/5600) ([Vasily Nemkov](https://github.com/Enmk)) -* Added `os_thread_priority` setting that allows to control the "nice" value of query processing threads that is used by OS to adjust dynamic scheduling priority. It requires `CAP_SYS_NICE` capabilities to work. This implements [#5858](https://github.com/ClickHouse/ClickHouse/issues/5858) [#5909](https://github.com/ClickHouse/ClickHouse/pull/5909) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Implement `_topic`, `_offset`, `_key` columns for Kafka engine [#5382](https://github.com/ClickHouse/ClickHouse/pull/5382) ([Ivan](https://github.com/abyss7)) Note that Kafka is broken in this version. -* Add aggregate function combinator `-Resample` [#5590](https://github.com/ClickHouse/ClickHouse/pull/5590) ([hcz](https://github.com/hczhcz)) -* Aggregate functions `groupArrayMovingSum(win_size)(x)` and `groupArrayMovingAvg(win_size)(x)`, which calculate moving sum/avg with or without window-size limitation. [#5595](https://github.com/ClickHouse/ClickHouse/pull/5595) ([inv2004](https://github.com/inv2004)) -* Add synonim `arrayFlatten` <-> `flatten` [#5764](https://github.com/ClickHouse/ClickHouse/pull/5764) ([hcz](https://github.com/hczhcz)) -* Intergate H3 function `geoToH3` from Uber. [#4724](https://github.com/ClickHouse/ClickHouse/pull/4724) ([Remen Ivan](https://github.com/BHYCHIK)) [#5805](https://github.com/ClickHouse/ClickHouse/pull/5805) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Bug Fix -* Implement DNS cache with asynchronous update. Separate thread resolves all hosts and updates DNS cache with period (setting `dns_cache_update_period`). It should help, when ip of hosts changes frequently. [#5857](https://github.com/ClickHouse/ClickHouse/pull/5857) ([Anton Popov](https://github.com/CurtizJ)) -* Fix segfault in `Delta` codec which affects columns with values less than 32 bits size. The bug led to random memory corruption. [#5786](https://github.com/ClickHouse/ClickHouse/pull/5786) ([alesapin](https://github.com/alesapin)) -* Fix segfault in TTL merge with non-physical columns in block. [#5819](https://github.com/ClickHouse/ClickHouse/pull/5819) ([Anton Popov](https://github.com/CurtizJ)) -* Fix rare bug in checking of part with `LowCardinality` column. Previously `checkDataPart` always fails for part with `LowCardinality` column. [#5832](https://github.com/ClickHouse/ClickHouse/pull/5832) ([alesapin](https://github.com/alesapin)) -* Avoid hanging connections when server thread pool is full. It is important for connections from `remote` table function or connections to a shard without replicas when there is long connection timeout. This fixes [#5878](https://github.com/ClickHouse/ClickHouse/issues/5878) [#5881](https://github.com/ClickHouse/ClickHouse/pull/5881) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Support for constant arguments to `evalMLModel` function. This fixes [#5817](https://github.com/ClickHouse/ClickHouse/issues/5817) [#5820](https://github.com/ClickHouse/ClickHouse/pull/5820) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed the issue when ClickHouse determines default time zone as `UCT` instead of `UTC`. This fixes [#5804](https://github.com/ClickHouse/ClickHouse/issues/5804). [#5828](https://github.com/ClickHouse/ClickHouse/pull/5828) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed buffer underflow in `visitParamExtractRaw`. This fixes [#5901](https://github.com/ClickHouse/ClickHouse/issues/5901) [#5902](https://github.com/ClickHouse/ClickHouse/pull/5902) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Now distributed `DROP/ALTER/TRUNCATE/OPTIMIZE ON CLUSTER` queries will be executed directly on leader replica. [#5757](https://github.com/ClickHouse/ClickHouse/pull/5757) ([alesapin](https://github.com/alesapin)) -* Fix `coalesce` for `ColumnConst` with `ColumnNullable` + related changes. [#5755](https://github.com/ClickHouse/ClickHouse/pull/5755) ([Artem Zuikov](https://github.com/4ertus2)) -* Fix the `ReadBufferFromKafkaConsumer` so that it keeps reading new messages after `commit()` even if it was stalled before [#5852](https://github.com/ClickHouse/ClickHouse/pull/5852) ([Ivan](https://github.com/abyss7)) -* Fix `FULL` and `RIGHT` JOIN results when joining on `Nullable` keys in right table. [#5859](https://github.com/ClickHouse/ClickHouse/pull/5859) ([Artem Zuikov](https://github.com/4ertus2)) -* Possible fix of infinite sleeping of low-priority queries. [#5842](https://github.com/ClickHouse/ClickHouse/pull/5842) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix race condition, which cause that some queries may not appear in query_log after `SYSTEM FLUSH LOGS` query. [#5456](https://github.com/ClickHouse/ClickHouse/issues/5456) [#5685](https://github.com/ClickHouse/ClickHouse/pull/5685) ([Anton Popov](https://github.com/CurtizJ)) -* Fixed `heap-use-after-free` ASan warning in ClusterCopier caused by watch which try to use already removed copier object. [#5871](https://github.com/ClickHouse/ClickHouse/pull/5871) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Fixed wrong `StringRef` pointer returned by some implementations of `IColumn::deserializeAndInsertFromArena`. This bug affected only unit-tests. [#5973](https://github.com/ClickHouse/ClickHouse/pull/5973) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Prevent source and intermediate array join columns of masking same name columns. [#5941](https://github.com/ClickHouse/ClickHouse/pull/5941) ([Artem Zuikov](https://github.com/4ertus2)) -* Fix insert and select query to MySQL engine with MySQL style identifier quoting. [#5704](https://github.com/ClickHouse/ClickHouse/pull/5704) ([Winter Zhang](https://github.com/zhang2014)) -* Now `CHECK TABLE` query can work with MergeTree engine family. It returns check status and message if any for each part (or file in case of simplier engines). Also, fix bug in fetch of a broken part. [#5865](https://github.com/ClickHouse/ClickHouse/pull/5865) ([alesapin](https://github.com/alesapin)) -* Fix SPLIT_SHARED_LIBRARIES runtime [#5793](https://github.com/ClickHouse/ClickHouse/pull/5793) ([Danila Kutenin](https://github.com/danlark1)) -* Fixed time zone initialization when `/etc/localtime` is a relative symlink like `../usr/share/zoneinfo/Europe/Moscow` [#5922](https://github.com/ClickHouse/ClickHouse/pull/5922) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* clickhouse-copier: Fix use-after free on shutdown [#5752](https://github.com/ClickHouse/ClickHouse/pull/5752) ([proller](https://github.com/proller)) -* Updated `simdjson`. Fixed the issue that some invalid JSONs with zero bytes successfully parse. [#5938](https://github.com/ClickHouse/ClickHouse/pull/5938) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix shutdown of SystemLogs [#5802](https://github.com/ClickHouse/ClickHouse/pull/5802) ([Anton Popov](https://github.com/CurtizJ)) -* Fix hanging when condition in invalidate_query depends on a dictionary. [#6011](https://github.com/ClickHouse/ClickHouse/pull/6011) ([Vitaly Baranov](https://github.com/vitlibar)) - -#### Improvement -* Allow unresolvable addresses in cluster configuration. They will be considered unavailable and tried to resolve at every connection attempt. This is especially useful for Kubernetes. This fixes [#5714](https://github.com/ClickHouse/ClickHouse/issues/5714) [#5924](https://github.com/ClickHouse/ClickHouse/pull/5924) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Close idle TCP connections (with one hour timeout by default). This is especially important for large clusters with multiple distributed tables on every server, because every server can possibly keep a connection pool to every other server, and after peak query concurrency, connections will stall. This fixes [#5879](https://github.com/ClickHouse/ClickHouse/issues/5879) [#5880](https://github.com/ClickHouse/ClickHouse/pull/5880) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Better quality of `topK` function. Changed the SavingSpace set behavior to remove the last element if the new element have a bigger weight. [#5833](https://github.com/ClickHouse/ClickHouse/issues/5833) [#5850](https://github.com/ClickHouse/ClickHouse/pull/5850) ([Guillaume Tassery](https://github.com/YiuRULE)) -* URL functions to work with domains now can work for incomplete URLs without scheme [#5725](https://github.com/ClickHouse/ClickHouse/pull/5725) ([alesapin](https://github.com/alesapin)) -* Checksums added to the `system.parts_columns` table. [#5874](https://github.com/ClickHouse/ClickHouse/pull/5874) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) -* Added `Enum` data type as a synonim for `Enum8` or `Enum16`. [#5886](https://github.com/ClickHouse/ClickHouse/pull/5886) ([dimarub2000](https://github.com/dimarub2000)) -* Full bit transpose variant for `T64` codec. Could lead to better compression with `zstd`. [#5742](https://github.com/ClickHouse/ClickHouse/pull/5742) ([Artem Zuikov](https://github.com/4ertus2)) -* Condition on `startsWith` function now can uses primary key. This fixes [#5310](https://github.com/ClickHouse/ClickHouse/issues/5310) and [#5882](https://github.com/ClickHouse/ClickHouse/issues/5882) [#5919](https://github.com/ClickHouse/ClickHouse/pull/5919) ([dimarub2000](https://github.com/dimarub2000)) -* Allow to use `clickhouse-copier` with cross-replication cluster topology by permitting empty database name. [#5745](https://github.com/ClickHouse/ClickHouse/pull/5745) ([nvartolomei](https://github.com/nvartolomei)) -* Use `UTC` as default timezone on a system without `tzdata` (e.g. bare Docker container). Before this patch, error message `Could not determine local time zone` was printed and server or client refused to start. [#5827](https://github.com/ClickHouse/ClickHouse/pull/5827) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Returned back support for floating point argument in function `quantileTiming` for backward compatibility. [#5911](https://github.com/ClickHouse/ClickHouse/pull/5911) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Show which table is missing column in error messages. [#5768](https://github.com/ClickHouse/ClickHouse/pull/5768) ([Ivan](https://github.com/abyss7)) -* Disallow run query with same query_id by various users [#5430](https://github.com/ClickHouse/ClickHouse/pull/5430) ([proller](https://github.com/proller)) -* More robust code for sending metrics to Graphite. It will work even during long multiple `RENAME TABLE` operation. [#5875](https://github.com/ClickHouse/ClickHouse/pull/5875) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* More informative error messages will be displayed when ThreadPool cannot schedule a task for execution. This fixes [#5305](https://github.com/ClickHouse/ClickHouse/issues/5305) [#5801](https://github.com/ClickHouse/ClickHouse/pull/5801) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Inverting ngramSearch to be more intuitive [#5807](https://github.com/ClickHouse/ClickHouse/pull/5807) ([Danila Kutenin](https://github.com/danlark1)) -* Add user parsing in HDFS engine builder [#5946](https://github.com/ClickHouse/ClickHouse/pull/5946) ([akonyaev90](https://github.com/akonyaev90)) -* Update default value of `max_ast_elements parameter` [#5933](https://github.com/ClickHouse/ClickHouse/pull/5933) ([Artem Konovalov](https://github.com/izebit)) -* Added a notion of obsolete settings. The obsolete setting `allow_experimental_low_cardinality_type` can be used with no effect. [0f15c01c6802f7ce1a1494c12c846be8c98944cd](https://github.com/ClickHouse/ClickHouse/commit/0f15c01c6802f7ce1a1494c12c846be8c98944cd) [Alexey Milovidov](https://github.com/alexey-milovidov) - -#### Performance Improvement -* Increase number of streams to SELECT from Merge table for more uniform distribution of threads. Added setting `max_streams_multiplier_for_merge_tables`. This fixes [#5797](https://github.com/ClickHouse/ClickHouse/issues/5797) [#5915](https://github.com/ClickHouse/ClickHouse/pull/5915) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Build/Testing/Packaging Improvement -* Add a backward compatibility test for client-server interaction with different versions of clickhouse. [#5868](https://github.com/ClickHouse/ClickHouse/pull/5868) ([alesapin](https://github.com/alesapin)) -* Test coverage information in every commit and pull request. [#5896](https://github.com/ClickHouse/ClickHouse/pull/5896) ([alesapin](https://github.com/alesapin)) -* Cooperate with address sanitizer to support our custom allocators (`Arena` and `ArenaWithFreeLists`) for better debugging of "use-after-free" errors. [#5728](https://github.com/ClickHouse/ClickHouse/pull/5728) ([akuzm](https://github.com/akuzm)) -* Switch to [LLVM libunwind implementation](https://github.com/llvm-mirror/libunwind) for C++ exception handling and for stack traces printing [#4828](https://github.com/ClickHouse/ClickHouse/pull/4828) ([Nikita Lapkov](https://github.com/laplab)) -* Add two more warnings from -Weverything [#5923](https://github.com/ClickHouse/ClickHouse/pull/5923) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Allow to build ClickHouse with Memory Sanitizer. [#3949](https://github.com/ClickHouse/ClickHouse/pull/3949) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed ubsan report about `bitTest` function in fuzz test. [#5943](https://github.com/ClickHouse/ClickHouse/pull/5943) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Docker: added possibility to init a ClickHouse instance which requires authentication. [#5727](https://github.com/ClickHouse/ClickHouse/pull/5727) ([Korviakov Andrey](https://github.com/shurshun)) -* Update librdkafka to version 1.1.0 [#5872](https://github.com/ClickHouse/ClickHouse/pull/5872) ([Ivan](https://github.com/abyss7)) -* Add global timeout for integration tests and disable some of them in tests code. [#5741](https://github.com/ClickHouse/ClickHouse/pull/5741) ([alesapin](https://github.com/alesapin)) -* Fix some ThreadSanitizer failures. [#5854](https://github.com/ClickHouse/ClickHouse/pull/5854) ([akuzm](https://github.com/akuzm)) -* The `--no-undefined` option forces the linker to check all external names for existence while linking. It's very useful to track real dependencies between libraries in the split build mode. [#5855](https://github.com/ClickHouse/ClickHouse/pull/5855) ([Ivan](https://github.com/abyss7)) -* Added performance test for [#5797](https://github.com/ClickHouse/ClickHouse/issues/5797) [#5914](https://github.com/ClickHouse/ClickHouse/pull/5914) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed compatibility with gcc-7. [#5840](https://github.com/ClickHouse/ClickHouse/pull/5840) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Added support for gcc-9. This fixes [#5717](https://github.com/ClickHouse/ClickHouse/issues/5717) [#5774](https://github.com/ClickHouse/ClickHouse/pull/5774) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed error when libunwind can be linked incorrectly. [#5948](https://github.com/ClickHouse/ClickHouse/pull/5948) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed a few warnings found by PVS-Studio. [#5921](https://github.com/ClickHouse/ClickHouse/pull/5921) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Added initial support for `clang-tidy` static analyzer. [#5806](https://github.com/ClickHouse/ClickHouse/pull/5806) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Convert BSD/Linux endian macros( 'be64toh' and 'htobe64') to the Mac OS X equivalents [#5785](https://github.com/ClickHouse/ClickHouse/pull/5785) ([Fu Chen](https://github.com/fredchenbj)) -* Improved integration tests guide. [#5796](https://github.com/ClickHouse/ClickHouse/pull/5796) ([Vladimir Chebotarev](https://github.com/excitoon)) -* Fixing build at macosx + gcc9 [#5822](https://github.com/ClickHouse/ClickHouse/pull/5822) ([filimonov](https://github.com/filimonov)) -* Fix a hard-to-spot typo: aggreAGte -> aggregate. [#5753](https://github.com/ClickHouse/ClickHouse/pull/5753) ([akuzm](https://github.com/akuzm)) -* Fix freebsd build [#5760](https://github.com/ClickHouse/ClickHouse/pull/5760) ([proller](https://github.com/proller)) -* Add link to experimental YouTube channel to website [#5845](https://github.com/ClickHouse/ClickHouse/pull/5845) ([Ivan Blinkov](https://github.com/blinkov)) -* CMake: add option for coverage flags: WITH_COVERAGE [#5776](https://github.com/ClickHouse/ClickHouse/pull/5776) ([proller](https://github.com/proller)) -* Fix initial size of some inline PODArray's. [#5787](https://github.com/ClickHouse/ClickHouse/pull/5787) ([akuzm](https://github.com/akuzm)) -* clickhouse-server.postinst: fix os detection for centos 6 [#5788](https://github.com/ClickHouse/ClickHouse/pull/5788) ([proller](https://github.com/proller)) -* Added Arch linux package generation. [#5719](https://github.com/ClickHouse/ClickHouse/pull/5719) ([Vladimir Chebotarev](https://github.com/excitoon)) -* Split Common/config.h by libs (dbms) [#5715](https://github.com/ClickHouse/ClickHouse/pull/5715) ([proller](https://github.com/proller)) -* Fixes for "Arcadia" build platform [#5795](https://github.com/ClickHouse/ClickHouse/pull/5795) ([proller](https://github.com/proller)) -* Fixes for unconventional build (gcc9, no submodules) [#5792](https://github.com/ClickHouse/ClickHouse/pull/5792) ([proller](https://github.com/proller)) -* Require explicit type in unalignedStore because it was proven to be bug-prone [#5791](https://github.com/ClickHouse/ClickHouse/pull/5791) ([akuzm](https://github.com/akuzm)) -* Fixes MacOS build [#5830](https://github.com/ClickHouse/ClickHouse/pull/5830) ([filimonov](https://github.com/filimonov)) -* Performance test concerning the new JIT feature with bigger dataset, as requested here [#5263](https://github.com/ClickHouse/ClickHouse/issues/5263) [#5887](https://github.com/ClickHouse/ClickHouse/pull/5887) ([Guillaume Tassery](https://github.com/YiuRULE)) -* Run stateful tests in stress test [12693e568722f11e19859742f56428455501fd2a](https://github.com/ClickHouse/ClickHouse/commit/12693e568722f11e19859742f56428455501fd2a) ([alesapin](https://github.com/alesapin)) - -#### Backward Incompatible Change -* `Kafka` is broken in this version. -* Enable `adaptive_index_granularity` = 10MB by default for new `MergeTree` tables. If you created new MergeTree tables on version 19.11+, downgrade to versions prior to 19.6 will be impossible. [#5628](https://github.com/ClickHouse/ClickHouse/pull/5628) ([alesapin](https://github.com/alesapin)) -* Removed obsolete undocumented embedded dictionaries that were used by Yandex.Metrica. The functions `OSIn`, `SEIn`, `OSToRoot`, `SEToRoot`, `OSHierarchy`, `SEHierarchy` are no longer available. If you are using these functions, write email to clickhouse-feedback@yandex-team.com. Note: at the last moment we decided to keep these functions for a while. [#5780](https://github.com/ClickHouse/ClickHouse/pull/5780) ([alexey-milovidov](https://github.com/alexey-milovidov)) - - -## ClickHouse release 19.10 -### ClickHouse release 19.10.1.5, 2019-07-12 - -#### New Feature -* Add new column codec: `T64`. Made for (U)IntX/EnumX/Data(Time)/DecimalX columns. It should be good for columns with constant or small range values. Codec itself allows enlarge or shrink data type without re-compression. [#5557](https://github.com/ClickHouse/ClickHouse/pull/5557) ([Artem Zuikov](https://github.com/4ertus2)) -* Add database engine `MySQL` that allow to view all the tables in remote MySQL server [#5599](https://github.com/ClickHouse/ClickHouse/pull/5599) ([Winter Zhang](https://github.com/zhang2014)) -* `bitmapContains` implementation. It's 2x faster than `bitmapHasAny` if the second bitmap contains one element. [#5535](https://github.com/ClickHouse/ClickHouse/pull/5535) ([Zhichang Yu](https://github.com/yuzhichang)) -* Support for `crc32` function (with behaviour exactly as in MySQL or PHP). Do not use it if you need a hash function. [#5661](https://github.com/ClickHouse/ClickHouse/pull/5661) ([Remen Ivan](https://github.com/BHYCHIK)) -* Implemented `SYSTEM START/STOP DISTRIBUTED SENDS` queries to control asynchronous inserts into `Distributed` tables. [#4935](https://github.com/ClickHouse/ClickHouse/pull/4935) ([Winter Zhang](https://github.com/zhang2014)) - -#### Bug Fix -* Ignore query execution limits and max parts size for merge limits while executing mutations. [#5659](https://github.com/ClickHouse/ClickHouse/pull/5659) ([Anton Popov](https://github.com/CurtizJ)) -* Fix bug which may lead to deduplication of normal blocks (extremely rare) and insertion of duplicate blocks (more often). [#5549](https://github.com/ClickHouse/ClickHouse/pull/5549) ([alesapin](https://github.com/alesapin)) -* Fix of function `arrayEnumerateUniqRanked` for arguments with empty arrays [#5559](https://github.com/ClickHouse/ClickHouse/pull/5559) ([proller](https://github.com/proller)) -* Don't subscribe to Kafka topics without intent to poll any messages. [#5698](https://github.com/ClickHouse/ClickHouse/pull/5698) ([Ivan](https://github.com/abyss7)) -* Make setting `join_use_nulls` get no effect for types that cannot be inside Nullable [#5700](https://github.com/ClickHouse/ClickHouse/pull/5700) ([Olga Khvostikova](https://github.com/stavrolia)) -* Fixed `Incorrect size of index granularity` errors [#5720](https://github.com/ClickHouse/ClickHouse/pull/5720) ([coraxster](https://github.com/coraxster)) -* Fix Float to Decimal convert overflow [#5607](https://github.com/ClickHouse/ClickHouse/pull/5607) ([coraxster](https://github.com/coraxster)) -* Flush buffer when `WriteBufferFromHDFS`'s destructor is called. This fixes writing into `HDFS`. [#5684](https://github.com/ClickHouse/ClickHouse/pull/5684) ([Xindong Peng](https://github.com/eejoin)) - -#### Improvement -* Treat empty cells in `CSV` as default values when the setting `input_format_defaults_for_omitted_fields` is enabled. [#5625](https://github.com/ClickHouse/ClickHouse/pull/5625) ([akuzm](https://github.com/akuzm)) -* Non-blocking loading of external dictionaries. [#5567](https://github.com/ClickHouse/ClickHouse/pull/5567) ([Vitaly Baranov](https://github.com/vitlibar)) -* Network timeouts can be dynamically changed for already established connections according to the settings. [#4558](https://github.com/ClickHouse/ClickHouse/pull/4558) ([Konstantin Podshumok](https://github.com/podshumok)) -* Using "public_suffix_list" for functions `firstSignificantSubdomain`, `cutToFirstSignificantSubdomain`. It's using a perfect hash table generated by `gperf` with a list generated from the file: [https://publicsuffix.org/list/public_suffix_list.dat](https://publicsuffix.org/list/public_suffix_list.dat). (for example, now we recognize the domain `ac.uk` as non-significant). [#5030](https://github.com/ClickHouse/ClickHouse/pull/5030) ([Guillaume Tassery](https://github.com/YiuRULE)) -* Adopted `IPv6` data type in system tables; unified client info columns in `system.processes` and `system.query_log` [#5640](https://github.com/ClickHouse/ClickHouse/pull/5640) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Using sessions for connections with MySQL compatibility protocol. #5476 [#5646](https://github.com/ClickHouse/ClickHouse/pull/5646) ([Yuriy Baranov](https://github.com/yurriy)) -* Support more `ALTER` queries `ON CLUSTER`. [#5593](https://github.com/ClickHouse/ClickHouse/pull/5593) [#5613](https://github.com/ClickHouse/ClickHouse/pull/5613) ([sundyli](https://github.com/sundy-li)) -* Support `` section in `clickhouse-local` config file. [#5540](https://github.com/ClickHouse/ClickHouse/pull/5540) ([proller](https://github.com/proller)) -* Allow run query with `remote` table function in `clickhouse-local` [#5627](https://github.com/ClickHouse/ClickHouse/pull/5627) ([proller](https://github.com/proller)) - -#### Performance Improvement -* Add the possibility to write the final mark at the end of MergeTree columns. It allows to avoid useless reads for keys that are out of table data range. It is enabled only if adaptive index granularity is in use. [#5624](https://github.com/ClickHouse/ClickHouse/pull/5624) ([alesapin](https://github.com/alesapin)) -* Improved performance of MergeTree tables on very slow filesystems by reducing number of `stat` syscalls. [#5648](https://github.com/ClickHouse/ClickHouse/pull/5648) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed performance degradation in reading from MergeTree tables that was introduced in version 19.6. Fixes #5631. [#5633](https://github.com/ClickHouse/ClickHouse/pull/5633) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Build/Testing/Packaging Improvement -* Implemented `TestKeeper` as an implementation of ZooKeeper interface used for testing [#5643](https://github.com/ClickHouse/ClickHouse/pull/5643) ([alexey-milovidov](https://github.com/alexey-milovidov)) ([levushkin aleksej](https://github.com/alexey-milovidov)) -* From now on `.sql` tests can be run isolated by server, in parallel, with random database. It allows to run them faster, add new tests with custom server configurations, and be sure that different tests doesn't affect each other. [#5554](https://github.com/ClickHouse/ClickHouse/pull/5554) ([Ivan](https://github.com/abyss7)) -* Remove `` and `` from performance tests [#5672](https://github.com/ClickHouse/ClickHouse/pull/5672) ([Olga Khvostikova](https://github.com/stavrolia)) -* Fixed "select_format" performance test for `Pretty` formats [#5642](https://github.com/ClickHouse/ClickHouse/pull/5642) ([alexey-milovidov](https://github.com/alexey-milovidov)) - - -## ClickHouse release 19.9 -### ClickHouse release 19.9.3.31, 2019-07-05 - -#### Bug Fix -* Fix segfault in Delta codec which affects columns with values less than 32 bits size. The bug led to random memory corruption. [#5786](https://github.com/ClickHouse/ClickHouse/pull/5786) ([alesapin](https://github.com/alesapin)) -* Fix rare bug in checking of part with LowCardinality column. [#5832](https://github.com/ClickHouse/ClickHouse/pull/5832) ([alesapin](https://github.com/alesapin)) -* Fix segfault in TTL merge with non-physical columns in block. [#5819](https://github.com/ClickHouse/ClickHouse/pull/5819) ([Anton Popov](https://github.com/CurtizJ)) -* Fix potential infinite sleeping of low-priority queries. [#5842](https://github.com/ClickHouse/ClickHouse/pull/5842) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix how ClickHouse determines default time zone as UCT instead of UTC. [#5828](https://github.com/ClickHouse/ClickHouse/pull/5828) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix bug about executing distributed DROP/ALTER/TRUNCATE/OPTIMIZE ON CLUSTER queries on follower replica before leader replica. Now they will be executed directly on leader replica. [#5757](https://github.com/ClickHouse/ClickHouse/pull/5757) ([alesapin](https://github.com/alesapin)) -* Fix race condition, which cause that some queries may not appear in query_log instantly after SYSTEM FLUSH LOGS query. [#5685](https://github.com/ClickHouse/ClickHouse/pull/5685) ([Anton Popov](https://github.com/CurtizJ)) -* Added missing support for constant arguments to `evalMLModel` function. [#5820](https://github.com/ClickHouse/ClickHouse/pull/5820) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.9.2.4, 2019-06-24 - -#### New Feature -* Print information about frozen parts in `system.parts` table. [#5471](https://github.com/ClickHouse/ClickHouse/pull/5471) ([proller](https://github.com/proller)) -* Ask client password on clickhouse-client start on tty if not set in arguments [#5092](https://github.com/ClickHouse/ClickHouse/pull/5092) ([proller](https://github.com/proller)) -* Implement `dictGet` and `dictGetOrDefault` functions for Decimal types. [#5394](https://github.com/ClickHouse/ClickHouse/pull/5394) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Improvement -* Debian init: Add service stop timeout [#5522](https://github.com/ClickHouse/ClickHouse/pull/5522) ([proller](https://github.com/proller)) -* Add setting forbidden by default to create table with suspicious types for LowCardinality [#5448](https://github.com/ClickHouse/ClickHouse/pull/5448) ([Olga Khvostikova](https://github.com/stavrolia)) -* Regression functions return model weights when not used as State in function `evalMLMethod`. [#5411](https://github.com/ClickHouse/ClickHouse/pull/5411) ([Quid37](https://github.com/Quid37)) -* Rename and improve regression methods. [#5492](https://github.com/ClickHouse/ClickHouse/pull/5492) ([Quid37](https://github.com/Quid37)) -* Clearer interfaces of string searchers. [#5586](https://github.com/ClickHouse/ClickHouse/pull/5586) ([Danila Kutenin](https://github.com/danlark1)) - -#### Bug Fix -* Fix potential data loss in Kafka [#5445](https://github.com/ClickHouse/ClickHouse/pull/5445) ([Ivan](https://github.com/abyss7)) -* Fix potential infinite loop in `PrettySpace` format when called with zero columns [#5560](https://github.com/ClickHouse/ClickHouse/pull/5560) ([Olga Khvostikova](https://github.com/stavrolia)) -* Fixed UInt32 overflow bug in linear models. Allow eval ML model for non-const model argument. [#5516](https://github.com/ClickHouse/ClickHouse/pull/5516) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* `ALTER TABLE ... DROP INDEX IF EXISTS ...` should not raise an exception if provided index does not exist [#5524](https://github.com/ClickHouse/ClickHouse/pull/5524) ([Gleb Novikov](https://github.com/NanoBjorn)) -* Fix segfault with `bitmapHasAny` in scalar subquery [#5528](https://github.com/ClickHouse/ClickHouse/pull/5528) ([Zhichang Yu](https://github.com/yuzhichang)) -* Fixed error when replication connection pool doesn't retry to resolve host, even when DNS cache was dropped. [#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([alesapin](https://github.com/alesapin)) -* Fixed `ALTER ... MODIFY TTL` on ReplicatedMergeTree. [#5539](https://github.com/ClickHouse/ClickHouse/pull/5539) ([Anton Popov](https://github.com/CurtizJ)) -* Fix INSERT into Distributed table with MATERIALIZED column [#5429](https://github.com/ClickHouse/ClickHouse/pull/5429) ([Azat Khuzhin](https://github.com/azat)) -* Fix bad alloc when truncate Join storage [#5437](https://github.com/ClickHouse/ClickHouse/pull/5437) ([TCeason](https://github.com/TCeason)) -* In recent versions of package tzdata some of files are symlinks now. The current mechanism for detecting default timezone gets broken and gives wrong names for some timezones. Now at least we force the timezone name to the contents of TZ if provided. [#5443](https://github.com/ClickHouse/ClickHouse/pull/5443) ([Ivan](https://github.com/abyss7)) -* Fix some extremely rare cases with MultiVolnitsky searcher when the constant needles in sum are at least 16KB long. The algorithm missed or overwrote the previous results which can lead to the incorrect result of `multiSearchAny`. [#5588](https://github.com/ClickHouse/ClickHouse/pull/5588) ([Danila Kutenin](https://github.com/danlark1)) -* Fix the issue when settings for ExternalData requests couldn't use ClickHouse settings. Also, for now, settings `date_time_input_format` and `low_cardinality_allow_in_native_format` cannot be used because of the ambiguity of names (in external data it can be interpreted as table format and in the query it can be a setting). [#5455](https://github.com/ClickHouse/ClickHouse/pull/5455) ([Danila Kutenin](https://github.com/danlark1)) -* Fix bug when parts were removed only from FS without dropping them from Zookeeper. [#5520](https://github.com/ClickHouse/ClickHouse/pull/5520) ([alesapin](https://github.com/alesapin)) -* Remove debug logging from MySQL protocol [#5478](https://github.com/ClickHouse/ClickHouse/pull/5478) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Skip ZNONODE during DDL query processing [#5489](https://github.com/ClickHouse/ClickHouse/pull/5489) ([Azat Khuzhin](https://github.com/azat)) -* Fix mix `UNION ALL` result column type. There were cases with inconsistent data and column types of resulting columns. [#5503](https://github.com/ClickHouse/ClickHouse/pull/5503) ([Artem Zuikov](https://github.com/4ertus2)) -* Throw an exception on wrong integers in `dictGetT` functions instead of crash. [#5446](https://github.com/ClickHouse/ClickHouse/pull/5446) ([Artem Zuikov](https://github.com/4ertus2)) -* Fix wrong element_count and load_factor for hashed dictionary in `system.dictionaries` table. [#5440](https://github.com/ClickHouse/ClickHouse/pull/5440) ([Azat Khuzhin](https://github.com/azat)) - -#### Build/Testing/Packaging Improvement -* Fixed build without `Brotli` HTTP compression support (`ENABLE_BROTLI=OFF` cmake variable). [#5521](https://github.com/ClickHouse/ClickHouse/pull/5521) ([Anton Yuzhaninov](https://github.com/citrin)) -* Include roaring.h as roaring/roaring.h [#5523](https://github.com/ClickHouse/ClickHouse/pull/5523) ([Orivej Desh](https://github.com/orivej)) -* Fix gcc9 warnings in hyperscan (#line directive is evil!) [#5546](https://github.com/ClickHouse/ClickHouse/pull/5546) ([Danila Kutenin](https://github.com/danlark1)) -* Fix all warnings when compiling with gcc-9. Fix some contrib issues. Fix gcc9 ICE and submit it to bugzilla. [#5498](https://github.com/ClickHouse/ClickHouse/pull/5498) ([Danila Kutenin](https://github.com/danlark1)) -* Fixed linking with lld [#5477](https://github.com/ClickHouse/ClickHouse/pull/5477) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Remove unused specializations in dictionaries [#5452](https://github.com/ClickHouse/ClickHouse/pull/5452) ([Artem Zuikov](https://github.com/4ertus2)) -* Improvement performance tests for formatting and parsing tables for different types of files [#5497](https://github.com/ClickHouse/ClickHouse/pull/5497) ([Olga Khvostikova](https://github.com/stavrolia)) -* Fixes for parallel test run [#5506](https://github.com/ClickHouse/ClickHouse/pull/5506) ([proller](https://github.com/proller)) -* Docker: use configs from clickhouse-test [#5531](https://github.com/ClickHouse/ClickHouse/pull/5531) ([proller](https://github.com/proller)) -* Fix compile for FreeBSD [#5447](https://github.com/ClickHouse/ClickHouse/pull/5447) ([proller](https://github.com/proller)) -* Upgrade boost to 1.70 [#5570](https://github.com/ClickHouse/ClickHouse/pull/5570) ([proller](https://github.com/proller)) -* Fix build clickhouse as submodule [#5574](https://github.com/ClickHouse/ClickHouse/pull/5574) ([proller](https://github.com/proller)) -* Improve JSONExtract performance tests [#5444](https://github.com/ClickHouse/ClickHouse/pull/5444) ([Vitaly Baranov](https://github.com/vitlibar)) - -## ClickHouse release 19.8 -### ClickHouse release 19.8.3.8, 2019-06-11 - -#### New Features -* Added functions to work with JSON [#4686](https://github.com/ClickHouse/ClickHouse/pull/4686) ([hcz](https://github.com/hczhcz)) [#5124](https://github.com/ClickHouse/ClickHouse/pull/5124). ([Vitaly Baranov](https://github.com/vitlibar)) -* Add a function basename, with a similar behaviour to a basename function, which exists in a lot of languages (`os.path.basename` in python, `basename` in PHP, etc...). Work with both an UNIX-like path or a Windows path. [#5136](https://github.com/ClickHouse/ClickHouse/pull/5136) ([Guillaume Tassery](https://github.com/YiuRULE)) -* Added `LIMIT n, m BY` or `LIMIT m OFFSET n BY` syntax to set offset of n for LIMIT BY clause. [#5138](https://github.com/ClickHouse/ClickHouse/pull/5138) ([Anton Popov](https://github.com/CurtizJ)) -* Added new data type `SimpleAggregateFunction`, which allows to have columns with light aggregation in an `AggregatingMergeTree`. This can only be used with simple functions like `any`, `anyLast`, `sum`, `min`, `max`. [#4629](https://github.com/ClickHouse/ClickHouse/pull/4629) ([Boris Granveaud](https://github.com/bgranvea)) -* Added support for non-constant arguments in function `ngramDistance` [#5198](https://github.com/ClickHouse/ClickHouse/pull/5198) ([Danila Kutenin](https://github.com/danlark1)) -* Added functions `skewPop`, `skewSamp`, `kurtPop` and `kurtSamp` to compute for sequence skewness, sample skewness, kurtosis and sample kurtosis respectively. [#5200](https://github.com/ClickHouse/ClickHouse/pull/5200) ([hcz](https://github.com/hczhcz)) -* Support rename operation for `MaterializeView` storage. [#5209](https://github.com/ClickHouse/ClickHouse/pull/5209) ([Guillaume Tassery](https://github.com/YiuRULE)) -* Added server which allows connecting to ClickHouse using MySQL client. [#4715](https://github.com/ClickHouse/ClickHouse/pull/4715) ([Yuriy Baranov](https://github.com/yurriy)) -* Add `toDecimal*OrZero` and `toDecimal*OrNull` functions. [#5291](https://github.com/ClickHouse/ClickHouse/pull/5291) ([Artem Zuikov](https://github.com/4ertus2)) -* Support Decimal types in functions: `quantile`, `quantiles`, `median`, `quantileExactWeighted`, `quantilesExactWeighted`, medianExactWeighted. [#5304](https://github.com/ClickHouse/ClickHouse/pull/5304) ([Artem Zuikov](https://github.com/4ertus2)) -* Added `toValidUTF8` function, which replaces all invalid UTF-8 characters by replacement character � (U+FFFD). [#5322](https://github.com/ClickHouse/ClickHouse/pull/5322) ([Danila Kutenin](https://github.com/danlark1)) -* Added `format` function. Formatting constant pattern (simplified Python format pattern) with the strings listed in the arguments. [#5330](https://github.com/ClickHouse/ClickHouse/pull/5330) ([Danila Kutenin](https://github.com/danlark1)) -* Added `system.detached_parts` table containing information about detached parts of `MergeTree` tables. [#5353](https://github.com/ClickHouse/ClickHouse/pull/5353) ([akuzm](https://github.com/akuzm)) -* Added `ngramSearch` function to calculate the non-symmetric difference between needle and haystack. [#5418](https://github.com/ClickHouse/ClickHouse/pull/5418)[#5422](https://github.com/ClickHouse/ClickHouse/pull/5422) ([Danila Kutenin](https://github.com/danlark1)) -* Implementation of basic machine learning methods (stochastic linear regression and logistic regression) using aggregate functions interface. Has different strategies for updating model weights (simple gradient descent, momentum method, Nesterov method). Also supports mini-batches of custom size. [#4943](https://github.com/ClickHouse/ClickHouse/pull/4943) ([Quid37](https://github.com/Quid37)) -* Implementation of `geohashEncode` and `geohashDecode` functions. [#5003](https://github.com/ClickHouse/ClickHouse/pull/5003) ([Vasily Nemkov](https://github.com/Enmk)) -* Added aggregate function `timeSeriesGroupSum`, which can aggregate different time series that sample timestamp not alignment. It will use linear interpolation between two sample timestamp and then sum time-series together. Added aggregate function `timeSeriesGroupRateSum`, which calculates the rate of time-series and then sum rates together. [#4542](https://github.com/ClickHouse/ClickHouse/pull/4542) ([Yangkuan Liu](https://github.com/LiuYangkuan)) -* Added functions `IPv4CIDRtoIPv4Range` and `IPv6CIDRtoIPv6Range` to calculate the lower and higher bounds for an IP in the subnet using a CIDR. [#5095](https://github.com/ClickHouse/ClickHouse/pull/5095) ([Guillaume Tassery](https://github.com/YiuRULE)) -* Add a X-ClickHouse-Summary header when we send a query using HTTP with enabled setting `send_progress_in_http_headers`. Return the usual information of X-ClickHouse-Progress, with additional information like how many rows and bytes were inserted in the query. [#5116](https://github.com/ClickHouse/ClickHouse/pull/5116) ([Guillaume Tassery](https://github.com/YiuRULE)) - -#### Improvements -* Added `max_parts_in_total` setting for MergeTree family of tables (default: 100 000) that prevents unsafe specification of partition key #5166. [#5171](https://github.com/ClickHouse/ClickHouse/pull/5171) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* `clickhouse-obfuscator`: derive seed for individual columns by combining initial seed with column name, not column position. This is intended to transform datasets with multiple related tables, so that tables will remain JOINable after transformation. [#5178](https://github.com/ClickHouse/ClickHouse/pull/5178) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Added functions `JSONExtractRaw`, `JSONExtractKeyAndValues`. Renamed functions `jsonExtract` to `JSONExtract`. When something goes wrong these functions return the correspondent values, not `NULL`. Modified function `JSONExtract`, now it gets the return type from its last parameter and doesn't inject nullables. Implemented fallback to RapidJSON in case AVX2 instructions are not available. Simdjson library updated to a new version. [#5235](https://github.com/ClickHouse/ClickHouse/pull/5235) ([Vitaly Baranov](https://github.com/vitlibar)) -* Now `if` and `multiIf` functions don't rely on the condition's `Nullable`, but rely on the branches for sql compatibility. [#5238](https://github.com/ClickHouse/ClickHouse/pull/5238) ([Jian Wu](https://github.com/janplus)) -* `In` predicate now generates `Null` result from `Null` input like the `Equal` function. [#5152](https://github.com/ClickHouse/ClickHouse/pull/5152) ([Jian Wu](https://github.com/janplus)) -* Check the time limit every (flush_interval / poll_timeout) number of rows from Kafka. This allows to break the reading from Kafka consumer more frequently and to check the time limits for the top-level streams [#5249](https://github.com/ClickHouse/ClickHouse/pull/5249) ([Ivan](https://github.com/abyss7)) -* Link rdkafka with bundled SASL. It should allow to use SASL SCRAM authentication [#5253](https://github.com/ClickHouse/ClickHouse/pull/5253) ([Ivan](https://github.com/abyss7)) -* Batched version of RowRefList for ALL JOINS. [#5267](https://github.com/ClickHouse/ClickHouse/pull/5267) ([Artem Zuikov](https://github.com/4ertus2)) -* clickhouse-server: more informative listen error messages. [#5268](https://github.com/ClickHouse/ClickHouse/pull/5268) ([proller](https://github.com/proller)) -* Support dictionaries in clickhouse-copier for functions in `` [#5270](https://github.com/ClickHouse/ClickHouse/pull/5270) ([proller](https://github.com/proller)) -* Add new setting `kafka_commit_every_batch` to regulate Kafka committing policy. -It allows to set commit mode: after every batch of messages is handled, or after the whole block is written to the storage. It's a trade-off between losing some messages or reading them twice in some extreme situations. [#5308](https://github.com/ClickHouse/ClickHouse/pull/5308) ([Ivan](https://github.com/abyss7)) -* Make `windowFunnel` support other Unsigned Integer Types. [#5320](https://github.com/ClickHouse/ClickHouse/pull/5320) ([sundyli](https://github.com/sundy-li)) -* Allow to shadow virtual column `_table` in Merge engine. [#5325](https://github.com/ClickHouse/ClickHouse/pull/5325) ([Ivan](https://github.com/abyss7)) -* Make `sequenceMatch` aggregate functions support other unsigned Integer types [#5339](https://github.com/ClickHouse/ClickHouse/pull/5339) ([sundyli](https://github.com/sundy-li)) -* Better error messages if checksum mismatch is most likely caused by hardware failures. [#5355](https://github.com/ClickHouse/ClickHouse/pull/5355) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Check that underlying tables support sampling for `StorageMerge` [#5366](https://github.com/ClickHouse/ClickHouse/pull/5366) ([Ivan](https://github.com/abyss7)) -* Сlose MySQL connections after their usage in external dictionaries. It is related to issue #893. [#5395](https://github.com/ClickHouse/ClickHouse/pull/5395) ([Clément Rodriguez](https://github.com/clemrodriguez)) -* Improvements of MySQL Wire Protocol. Changed name of format to MySQLWire. Using RAII for calling RSA_free. Disabling SSL if context cannot be created. [#5419](https://github.com/ClickHouse/ClickHouse/pull/5419) ([Yuriy Baranov](https://github.com/yurriy)) -* clickhouse-client: allow to run with unaccessable history file (read-only, no disk space, file is directory, ...). [#5431](https://github.com/ClickHouse/ClickHouse/pull/5431) ([proller](https://github.com/proller)) -* Respect query settings in asynchronous INSERTs into Distributed tables. [#4936](https://github.com/ClickHouse/ClickHouse/pull/4936) ([TCeason](https://github.com/TCeason)) -* Renamed functions `leastSqr` to `simpleLinearRegression`, `LinearRegression` to `linearRegression`, `LogisticRegression` to `logisticRegression`. [#5391](https://github.com/ClickHouse/ClickHouse/pull/5391) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - -#### Performance Improvements -* Parallelize processing of parts of non-replicated MergeTree tables in ALTER MODIFY query. [#4639](https://github.com/ClickHouse/ClickHouse/pull/4639) ([Ivan Kush](https://github.com/IvanKush)) -* Optimizations in regular expressions extraction. [#5193](https://github.com/ClickHouse/ClickHouse/pull/5193) [#5191](https://github.com/ClickHouse/ClickHouse/pull/5191) ([Danila Kutenin](https://github.com/danlark1)) -* Do not add right join key column to join result if it's used only in join on section. [#5260](https://github.com/ClickHouse/ClickHouse/pull/5260) ([Artem Zuikov](https://github.com/4ertus2)) -* Freeze the Kafka buffer after first empty response. It avoids multiple invokations of `ReadBuffer::next()` for empty result in some row-parsing streams. [#5283](https://github.com/ClickHouse/ClickHouse/pull/5283) ([Ivan](https://github.com/abyss7)) -* `concat` function optimization for multiple arguments. [#5357](https://github.com/ClickHouse/ClickHouse/pull/5357) ([Danila Kutenin](https://github.com/danlark1)) -* Query optimisation. Allow push down IN statement while rewriting commа/cross join into inner one. [#5396](https://github.com/ClickHouse/ClickHouse/pull/5396) ([Artem Zuikov](https://github.com/4ertus2)) -* Upgrade our LZ4 implementation with reference one to have faster decompression. [#5070](https://github.com/ClickHouse/ClickHouse/pull/5070) ([Danila Kutenin](https://github.com/danlark1)) -* Implemented MSD radix sort (based on kxsort), and partial sorting. [#5129](https://github.com/ClickHouse/ClickHouse/pull/5129) ([Evgenii Pravda](https://github.com/kvinty)) - -#### Bug Fixes -* Fix push require columns with join [#5192](https://github.com/ClickHouse/ClickHouse/pull/5192) ([Winter Zhang](https://github.com/zhang2014)) -* Fixed bug, when ClickHouse is run by systemd, the command `sudo service clickhouse-server forcerestart` was not working as expected. [#5204](https://github.com/ClickHouse/ClickHouse/pull/5204) ([proller](https://github.com/proller)) -* Fix http error codes in DataPartsExchange (interserver http server on 9009 port always returned code 200, even on errors). [#5216](https://github.com/ClickHouse/ClickHouse/pull/5216) ([proller](https://github.com/proller)) -* Fix SimpleAggregateFunction for String longer than MAX_SMALL_STRING_SIZE [#5311](https://github.com/ClickHouse/ClickHouse/pull/5311) ([Azat Khuzhin](https://github.com/azat)) -* Fix error for `Decimal` to `Nullable(Decimal)` conversion in IN. Support other Decimal to Decimal conversions (including different scales). [#5350](https://github.com/ClickHouse/ClickHouse/pull/5350) ([Artem Zuikov](https://github.com/4ertus2)) -* Fixed FPU clobbering in simdjson library that lead to wrong calculation of `uniqHLL` and `uniqCombined` aggregate function and math functions such as `log`. [#5354](https://github.com/ClickHouse/ClickHouse/pull/5354) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed handling mixed const/nonconst cases in JSON functions. [#5435](https://github.com/ClickHouse/ClickHouse/pull/5435) ([Vitaly Baranov](https://github.com/vitlibar)) -* Fix `retention` function. Now all conditions that satisfy in a row of data are added to the data state. [#5119](https://github.com/ClickHouse/ClickHouse/pull/5119) ([小路](https://github.com/nicelulu)) -* Fix result type for `quantileExact` with Decimals. [#5304](https://github.com/ClickHouse/ClickHouse/pull/5304) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Documentation -* Translate documentation for `CollapsingMergeTree` to chinese. [#5168](https://github.com/ClickHouse/ClickHouse/pull/5168) ([张风啸](https://github.com/AlexZFX)) -* Translate some documentation about table engines to chinese. - [#5134](https://github.com/ClickHouse/ClickHouse/pull/5134) - [#5328](https://github.com/ClickHouse/ClickHouse/pull/5328) - ([never lee](https://github.com/neverlee)) - - -#### Build/Testing/Packaging Improvements -* Fix some sanitizer reports that show probable use-after-free.[#5139](https://github.com/ClickHouse/ClickHouse/pull/5139) [#5143](https://github.com/ClickHouse/ClickHouse/pull/5143) [#5393](https://github.com/ClickHouse/ClickHouse/pull/5393) ([Ivan](https://github.com/abyss7)) -* Move performance tests out of separate directories for convenience. [#5158](https://github.com/ClickHouse/ClickHouse/pull/5158) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix incorrect performance tests. [#5255](https://github.com/ClickHouse/ClickHouse/pull/5255) ([alesapin](https://github.com/alesapin)) -* Added a tool to calculate checksums caused by bit flips to debug hardware issues. [#5334](https://github.com/ClickHouse/ClickHouse/pull/5334) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Make runner script more usable. [#5340](https://github.com/ClickHouse/ClickHouse/pull/5340)[#5360](https://github.com/ClickHouse/ClickHouse/pull/5360) ([filimonov](https://github.com/filimonov)) -* Add small instruction how to write performance tests. [#5408](https://github.com/ClickHouse/ClickHouse/pull/5408) ([alesapin](https://github.com/alesapin)) -* Add ability to make substitutions in create, fill and drop query in performance tests [#5367](https://github.com/ClickHouse/ClickHouse/pull/5367) ([Olga Khvostikova](https://github.com/stavrolia)) - -## ClickHouse release 19.7 - -### ClickHouse release 19.7.5.29, 2019-07-05 - -#### Bug Fix -* Fix performance regression in some queries with JOIN. [#5192](https://github.com/ClickHouse/ClickHouse/pull/5192) ([Winter Zhang](https://github.com/zhang2014)) - -### ClickHouse release 19.7.5.27, 2019-06-09 - -#### New features -* Added bitmap related functions `bitmapHasAny` and `bitmapHasAll` analogous to `hasAny` and `hasAll` functions for arrays. [#5279](https://github.com/ClickHouse/ClickHouse/pull/5279) ([Sergi Vladykin](https://github.com/svladykin)) - -#### Bug Fixes -* Fix segfault on `minmax` INDEX with Null value. [#5246](https://github.com/ClickHouse/ClickHouse/pull/5246) ([Nikita Vasilev](https://github.com/nikvas0)) -* Mark all input columns in LIMIT BY as required output. It fixes 'Not found column' error in some distributed queries. [#5407](https://github.com/ClickHouse/ClickHouse/pull/5407) ([Constantin S. Pan](https://github.com/kvap)) -* Fix "Column '0' already exists" error in `SELECT .. PREWHERE` on column with DEFAULT [#5397](https://github.com/ClickHouse/ClickHouse/pull/5397) ([proller](https://github.com/proller)) -* Fix `ALTER MODIFY TTL` query on `ReplicatedMergeTree`. [#5539](https://github.com/ClickHouse/ClickHouse/pull/5539/commits) ([Anton Popov](https://github.com/CurtizJ)) -* Don't crash the server when Kafka consumers have failed to start. [#5285](https://github.com/ClickHouse/ClickHouse/pull/5285) ([Ivan](https://github.com/abyss7)) -* Fixed bitmap functions produce wrong result. [#5359](https://github.com/ClickHouse/ClickHouse/pull/5359) ([Andy Yang](https://github.com/andyyzh)) -* Fix element_count for hashed dictionary (do not include duplicates) [#5440](https://github.com/ClickHouse/ClickHouse/pull/5440) ([Azat Khuzhin](https://github.com/azat)) -* Use contents of environment variable TZ as the name for timezone. It helps to correctly detect default timezone in some cases.[#5443](https://github.com/ClickHouse/ClickHouse/pull/5443) ([Ivan](https://github.com/abyss7)) -* Do not try to convert integers in `dictGetT` functions, because it doesn't work correctly. Throw an exception instead. [#5446](https://github.com/ClickHouse/ClickHouse/pull/5446) ([Artem Zuikov](https://github.com/4ertus2)) -* Fix settings in ExternalData HTTP request. [#5455](https://github.com/ClickHouse/ClickHouse/pull/5455) ([Danila - Kutenin](https://github.com/danlark1)) -* Fix bug when parts were removed only from FS without dropping them from Zookeeper. [#5520](https://github.com/ClickHouse/ClickHouse/pull/5520) ([alesapin](https://github.com/alesapin)) -* Fix segmentation fault in `bitmapHasAny` function. [#5528](https://github.com/ClickHouse/ClickHouse/pull/5528) ([Zhichang Yu](https://github.com/yuzhichang)) -* Fixed error when replication connection pool doesn't retry to resolve host, even when DNS cache was dropped. [#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([alesapin](https://github.com/alesapin)) -* Fixed `DROP INDEX IF EXISTS` query. Now `ALTER TABLE ... DROP INDEX IF EXISTS ...` query doesn't raise an exception if provided index does not exist. [#5524](https://github.com/ClickHouse/ClickHouse/pull/5524) ([Gleb Novikov](https://github.com/NanoBjorn)) -* Fix union all supertype column. There were cases with inconsistent data and column types of resulting columns. [#5503](https://github.com/ClickHouse/ClickHouse/pull/5503) ([Artem Zuikov](https://github.com/4ertus2)) -* Skip ZNONODE during DDL query processing. Before if another node removes the znode in task queue, the one that -did not process it, but already get list of children, will terminate the DDLWorker thread. [#5489](https://github.com/ClickHouse/ClickHouse/pull/5489) ([Azat Khuzhin](https://github.com/azat)) -* Fix INSERT into Distributed() table with MATERIALIZED column. [#5429](https://github.com/ClickHouse/ClickHouse/pull/5429) ([Azat Khuzhin](https://github.com/azat)) - -### ClickHouse release 19.7.3.9, 2019-05-30 - -#### New Features -* Allow to limit the range of a setting that can be specified by user. - These constraints can be set up in user settings profile. -[#4931](https://github.com/ClickHouse/ClickHouse/pull/4931) ([Vitaly -Baranov](https://github.com/vitlibar)) -* Add a second version of the function `groupUniqArray` with an optional - `max_size` parameter that limits the size of the resulting array. This -behavior is similar to `groupArray(max_size)(x)` function. -[#5026](https://github.com/ClickHouse/ClickHouse/pull/5026) ([Guillaume -Tassery](https://github.com/YiuRULE)) -* For TSVWithNames/CSVWithNames input file formats, column order can now be - determined from file header. This is controlled by -`input_format_with_names_use_header` parameter. -[#5081](https://github.com/ClickHouse/ClickHouse/pull/5081) -([Alexander](https://github.com/Akazz)) - -#### Bug Fixes -* Crash with uncompressed_cache + JOIN during merge (#5197) -[#5133](https://github.com/ClickHouse/ClickHouse/pull/5133) ([Danila -Kutenin](https://github.com/danlark1)) -* Segmentation fault on a clickhouse-client query to system tables. #5066 -[#5127](https://github.com/ClickHouse/ClickHouse/pull/5127) -([Ivan](https://github.com/abyss7)) -* Data loss on heavy load via KafkaEngine (#4736) -[#5080](https://github.com/ClickHouse/ClickHouse/pull/5080) -([Ivan](https://github.com/abyss7)) -* Fixed very rare data race condition that could happen when executing a query with UNION ALL involving at least two SELECTs from system.columns, system.tables, system.parts, system.parts_tables or tables of Merge family and performing ALTER of columns of the related tables concurrently. [#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Performance Improvements -* Use radix sort for sorting by single numeric column in `ORDER BY` without - `LIMIT`. [#5106](https://github.com/ClickHouse/ClickHouse/pull/5106), -[#4439](https://github.com/ClickHouse/ClickHouse/pull/4439) -([Evgenii Pravda](https://github.com/kvinty), -[alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Documentation -* Translate documentation for some table engines to Chinese. - [#5107](https://github.com/ClickHouse/ClickHouse/pull/5107), -[#5094](https://github.com/ClickHouse/ClickHouse/pull/5094), -[#5087](https://github.com/ClickHouse/ClickHouse/pull/5087) -([张风啸](https://github.com/AlexZFX)), -[#5068](https://github.com/ClickHouse/ClickHouse/pull/5068) ([never -lee](https://github.com/neverlee)) - -#### Build/Testing/Packaging Improvements -* Print UTF-8 characters properly in `clickhouse-test`. - [#5084](https://github.com/ClickHouse/ClickHouse/pull/5084) -([alexey-milovidov](https://github.com/alexey-milovidov)) -* Add command line parameter for clickhouse-client to always load suggestion - data. [#5102](https://github.com/ClickHouse/ClickHouse/pull/5102) -([alexey-milovidov](https://github.com/alexey-milovidov)) -* Resolve some of PVS-Studio warnings. - [#5082](https://github.com/ClickHouse/ClickHouse/pull/5082) -([alexey-milovidov](https://github.com/alexey-milovidov)) -* Update LZ4 [#5040](https://github.com/ClickHouse/ClickHouse/pull/5040) ([Danila - Kutenin](https://github.com/danlark1)) -* Add gperf to build requirements for upcoming pull request #5030. - [#5110](https://github.com/ClickHouse/ClickHouse/pull/5110) -([proller](https://github.com/proller)) - -## ClickHouse release 19.6 -### ClickHouse release 19.6.3.18, 2019-06-13 - -#### Bug Fixes -* Fixed IN condition pushdown for queries from table functions `mysql` and `odbc` and corresponding table engines. This fixes #3540 and #2384. [#5313](https://github.com/ClickHouse/ClickHouse/pull/5313) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix deadlock in Zookeeper. [#5297](https://github.com/ClickHouse/ClickHouse/pull/5297) ([github1youlc](https://github.com/github1youlc)) -* Allow quoted decimals in CSV. [#5284](https://github.com/ClickHouse/ClickHouse/pull/5284) ([Artem Zuikov](https://github.com/4ertus2) -* Disallow conversion from float Inf/NaN into Decimals (throw exception). [#5282](https://github.com/ClickHouse/ClickHouse/pull/5282) ([Artem Zuikov](https://github.com/4ertus2)) -* Fix data race in rename query. [#5247](https://github.com/ClickHouse/ClickHouse/pull/5247) ([Winter Zhang](https://github.com/zhang2014)) -* Temporarily disable LFAlloc. Usage of LFAlloc might lead to a lot of MAP_FAILED in allocating UncompressedCache and in a result to crashes of queries at high loaded servers. [cfdba93](https://github.com/ClickHouse/ClickHouse/commit/cfdba938ce22f16efeec504f7f90206a515b1280)([Danila Kutenin](https://github.com/danlark1)) - -### ClickHouse release 19.6.2.11, 2019-05-13 - -#### New Features -* TTL expressions for columns and tables. [#4212](https://github.com/ClickHouse/ClickHouse/pull/4212) ([Anton Popov](https://github.com/CurtizJ)) -* Added support for `brotli` compression for HTTP responses (Accept-Encoding: br) [#4388](https://github.com/ClickHouse/ClickHouse/pull/4388) ([Mikhail](https://github.com/fandyushin)) -* Added new function `isValidUTF8` for checking whether a set of bytes is correctly utf-8 encoded. [#4934](https://github.com/ClickHouse/ClickHouse/pull/4934) ([Danila Kutenin](https://github.com/danlark1)) -* Add new load balancing policy `first_or_random` which sends queries to the first specified host and if it's inaccessible send queries to random hosts of shard. Useful for cross-replication topology setups. [#5012](https://github.com/ClickHouse/ClickHouse/pull/5012) ([nvartolomei](https://github.com/nvartolomei)) - -#### Experimental Features -* Add setting `index_granularity_bytes` (adaptive index granularity) for MergeTree* tables family. [#4826](https://github.com/ClickHouse/ClickHouse/pull/4826) ([alesapin](https://github.com/alesapin)) - -#### Improvements -* Added support for non-constant and negative size and length arguments for function `substringUTF8`. [#4989](https://github.com/ClickHouse/ClickHouse/pull/4989) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Disable push-down to right table in left join, left table in right join, and both tables in full join. This fixes wrong JOIN results in some cases. [#4846](https://github.com/ClickHouse/ClickHouse/pull/4846) ([Ivan](https://github.com/abyss7)) -* `clickhouse-copier`: auto upload task configuration from `--task-file` option [#4876](https://github.com/ClickHouse/ClickHouse/pull/4876) ([proller](https://github.com/proller)) -* Added typos handler for storage factory and table functions factory. [#4891](https://github.com/ClickHouse/ClickHouse/pull/4891) ([Danila Kutenin](https://github.com/danlark1)) -* Support asterisks and qualified asterisks for multiple joins without subqueries [#4898](https://github.com/ClickHouse/ClickHouse/pull/4898) ([Artem Zuikov](https://github.com/4ertus2)) -* Make missing column error message more user friendly. [#4915](https://github.com/ClickHouse/ClickHouse/pull/4915) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Performance Improvements -* Significant speedup of ASOF JOIN [#4924](https://github.com/ClickHouse/ClickHouse/pull/4924) ([Martijn Bakker](https://github.com/Gladdy)) - -#### Backward Incompatible Changes -* HTTP header `Query-Id` was renamed to `X-ClickHouse-Query-Id` for consistency. [#4972](https://github.com/ClickHouse/ClickHouse/pull/4972) ([Mikhail](https://github.com/fandyushin)) - -#### Bug Fixes -* Fixed potential null pointer dereference in `clickhouse-copier`. [#4900](https://github.com/ClickHouse/ClickHouse/pull/4900) ([proller](https://github.com/proller)) -* Fixed error on query with JOIN + ARRAY JOIN [#4938](https://github.com/ClickHouse/ClickHouse/pull/4938) ([Artem Zuikov](https://github.com/4ertus2)) -* Fixed hanging on start of the server when a dictionary depends on another dictionary via a database with engine=Dictionary. [#4962](https://github.com/ClickHouse/ClickHouse/pull/4962) ([Vitaly Baranov](https://github.com/vitlibar)) -* Partially fix distributed_product_mode = local. It's possible to allow columns of local tables in where/having/order by/... via table aliases. Throw exception if table does not have alias. There's not possible to access to the columns without table aliases yet. [#4986](https://github.com/ClickHouse/ClickHouse/pull/4986) ([Artem Zuikov](https://github.com/4ertus2)) -* Fix potentially wrong result for `SELECT DISTINCT` with `JOIN` [#5001](https://github.com/ClickHouse/ClickHouse/pull/5001) ([Artem Zuikov](https://github.com/4ertus2)) -* Fixed very rare data race condition that could happen when executing a query with UNION ALL involving at least two SELECTs from system.columns, system.tables, system.parts, system.parts_tables or tables of Merge family and performing ALTER of columns of the related tables concurrently. [#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Build/Testing/Packaging Improvements -* Fixed test failures when running clickhouse-server on different host [#4713](https://github.com/ClickHouse/ClickHouse/pull/4713) ([Vasily Nemkov](https://github.com/Enmk)) -* clickhouse-test: Disable color control sequences in non tty environment. [#4937](https://github.com/ClickHouse/ClickHouse/pull/4937) ([alesapin](https://github.com/alesapin)) -* clickhouse-test: Allow use any test database (remove `test.` qualification where it possible) [#5008](https://github.com/ClickHouse/ClickHouse/pull/5008) ([proller](https://github.com/proller)) -* Fix ubsan errors [#5037](https://github.com/ClickHouse/ClickHouse/pull/5037) ([Vitaly Baranov](https://github.com/vitlibar)) -* Yandex LFAlloc was added to ClickHouse to allocate MarkCache and UncompressedCache data in different ways to catch segfaults more reliable [#4995](https://github.com/ClickHouse/ClickHouse/pull/4995) ([Danila Kutenin](https://github.com/danlark1)) -* Python util to help with backports and changelogs. [#4949](https://github.com/ClickHouse/ClickHouse/pull/4949) ([Ivan](https://github.com/abyss7)) - - -## ClickHouse release 19.5 -### ClickHouse release 19.5.4.22, 2019-05-13 - -#### Bug fixes -* Fixed possible crash in bitmap* functions [#5220](https://github.com/ClickHouse/ClickHouse/pull/5220) [#5228](https://github.com/ClickHouse/ClickHouse/pull/5228) ([Andy Yang](https://github.com/andyyzh)) -* Fixed very rare data race condition that could happen when executing a query with UNION ALL involving at least two SELECTs from system.columns, system.tables, system.parts, system.parts_tables or tables of Merge family and performing ALTER of columns of the related tables concurrently. [#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed error `Set for IN is not created yet in case of using single LowCardinality column in the left part of IN`. This error happened if LowCardinality column was the part of primary key. #5031 [#5154](https://github.com/ClickHouse/ClickHouse/pull/5154) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Modification of retention function: If a row satisfies both the first and NTH condition, only the first satisfied condition is added to the data state. Now all conditions that satisfy in a row of data are added to the data state. [#5119](https://github.com/ClickHouse/ClickHouse/pull/5119) ([小路](https://github.com/nicelulu)) - - -### ClickHouse release 19.5.3.8, 2019-04-18 - -#### Bug fixes -* Fixed type of setting `max_partitions_per_insert_block` from boolean to UInt64. [#5028](https://github.com/ClickHouse/ClickHouse/pull/5028) ([Mohammad Hossein Sekhavat](https://github.com/mhsekhavat)) - - -### ClickHouse release 19.5.2.6, 2019-04-15 - -#### New Features - -* [Hyperscan](https://github.com/intel/hyperscan) multiple regular expression matching was added (functions `multiMatchAny`, `multiMatchAnyIndex`, `multiFuzzyMatchAny`, `multiFuzzyMatchAnyIndex`). [#4780](https://github.com/ClickHouse/ClickHouse/pull/4780), [#4841](https://github.com/ClickHouse/ClickHouse/pull/4841) ([Danila Kutenin](https://github.com/danlark1)) -* `multiSearchFirstPosition` function was added. [#4780](https://github.com/ClickHouse/ClickHouse/pull/4780) ([Danila Kutenin](https://github.com/danlark1)) -* Implement the predefined expression filter per row for tables. [#4792](https://github.com/ClickHouse/ClickHouse/pull/4792) ([Ivan](https://github.com/abyss7)) -* A new type of data skipping indices based on bloom filters (can be used for `equal`, `in` and `like` functions). [#4499](https://github.com/ClickHouse/ClickHouse/pull/4499) ([Nikita Vasilev](https://github.com/nikvas0)) -* Added `ASOF JOIN` which allows to run queries that join to the most recent value known. [#4774](https://github.com/ClickHouse/ClickHouse/pull/4774) [#4867](https://github.com/ClickHouse/ClickHouse/pull/4867) [#4863](https://github.com/ClickHouse/ClickHouse/pull/4863) [#4875](https://github.com/ClickHouse/ClickHouse/pull/4875) ([Martijn Bakker](https://github.com/Gladdy), [Artem Zuikov](https://github.com/4ertus2)) -* Rewrite multiple `COMMA JOIN` to `CROSS JOIN`. Then rewrite them to `INNER JOIN` if possible. [#4661](https://github.com/ClickHouse/ClickHouse/pull/4661) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Improvement - -* `topK` and `topKWeighted` now supports custom `loadFactor` (fixes issue [#4252](https://github.com/ClickHouse/ClickHouse/issues/4252)). [#4634](https://github.com/ClickHouse/ClickHouse/pull/4634) ([Kirill Danshin](https://github.com/kirillDanshin)) -* Allow to use `parallel_replicas_count > 1` even for tables without sampling (the setting is simply ignored for them). In previous versions it was lead to exception. [#4637](https://github.com/ClickHouse/ClickHouse/pull/4637) ([Alexey Elymanov](https://github.com/digitalist)) -* Support for `CREATE OR REPLACE VIEW`. Allow to create a view or set a new definition in a single statement. [#4654](https://github.com/ClickHouse/ClickHouse/pull/4654) ([Boris Granveaud](https://github.com/bgranvea)) -* `Buffer` table engine now supports `PREWHERE`. [#4671](https://github.com/ClickHouse/ClickHouse/pull/4671) ([Yangkuan Liu](https://github.com/LiuYangkuan)) -* Add ability to start replicated table without metadata in zookeeper in `readonly` mode. [#4691](https://github.com/ClickHouse/ClickHouse/pull/4691) ([alesapin](https://github.com/alesapin)) -* Fixed flicker of progress bar in clickhouse-client. The issue was most noticeable when using `FORMAT Null` with streaming queries. [#4811](https://github.com/ClickHouse/ClickHouse/pull/4811) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Allow to disable functions with `hyperscan` library on per user basis to limit potentially excessive and uncontrolled resource usage. [#4816](https://github.com/ClickHouse/ClickHouse/pull/4816) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Add version number logging in all errors. [#4824](https://github.com/ClickHouse/ClickHouse/pull/4824) ([proller](https://github.com/proller)) -* Added restriction to the `multiMatch` functions which requires string size to fit into `unsigned int`. Also added the number of arguments limit to the `multiSearch` functions. [#4834](https://github.com/ClickHouse/ClickHouse/pull/4834) ([Danila Kutenin](https://github.com/danlark1)) -* Improved usage of scratch space and error handling in Hyperscan. [#4866](https://github.com/ClickHouse/ClickHouse/pull/4866) ([Danila Kutenin](https://github.com/danlark1)) -* Fill `system.graphite_detentions` from a table config of `*GraphiteMergeTree` engine tables. [#4584](https://github.com/ClickHouse/ClickHouse/pull/4584) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) -* Rename `trigramDistance` function to `ngramDistance` and add more functions with `CaseInsensitive` and `UTF`. [#4602](https://github.com/ClickHouse/ClickHouse/pull/4602) ([Danila Kutenin](https://github.com/danlark1)) -* Improved data skipping indices calculation. [#4640](https://github.com/ClickHouse/ClickHouse/pull/4640) ([Nikita Vasilev](https://github.com/nikvas0)) -* Keep ordinary, `DEFAULT`, `MATERIALIZED` and `ALIAS` columns in a single list (fixes issue [#2867](https://github.com/ClickHouse/ClickHouse/issues/2867)). [#4707](https://github.com/ClickHouse/ClickHouse/pull/4707) ([Alex Zatelepin](https://github.com/ztlpn)) - -#### Bug Fix - -* Avoid `std::terminate` in case of memory allocation failure. Now `std::bad_alloc` exception is thrown as expected. [#4665](https://github.com/ClickHouse/ClickHouse/pull/4665) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixes capnproto reading from buffer. Sometimes files wasn't loaded successfully by HTTP. [#4674](https://github.com/ClickHouse/ClickHouse/pull/4674) ([Vladislav](https://github.com/smirnov-vs)) -* Fix error `Unknown log entry type: 0` after `OPTIMIZE TABLE FINAL` query. [#4683](https://github.com/ClickHouse/ClickHouse/pull/4683) ([Amos Bird](https://github.com/amosbird)) -* Wrong arguments to `hasAny` or `hasAll` functions may lead to segfault. [#4698](https://github.com/ClickHouse/ClickHouse/pull/4698) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Deadlock may happen while executing `DROP DATABASE dictionary` query. [#4701](https://github.com/ClickHouse/ClickHouse/pull/4701) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix undefined behavior in `median` and `quantile` functions. [#4702](https://github.com/ClickHouse/ClickHouse/pull/4702) ([hcz](https://github.com/hczhcz)) -* Fix compression level detection when `network_compression_method` in lowercase. Broken in v19.1. [#4706](https://github.com/ClickHouse/ClickHouse/pull/4706) ([proller](https://github.com/proller)) -* Fixed ignorance of `UTC` setting (fixes issue [#4658](https://github.com/ClickHouse/ClickHouse/issues/4658)). [#4718](https://github.com/ClickHouse/ClickHouse/pull/4718) ([proller](https://github.com/proller)) -* Fix `histogram` function behaviour with `Distributed` tables. [#4741](https://github.com/ClickHouse/ClickHouse/pull/4741) ([olegkv](https://github.com/olegkv)) -* Fixed tsan report `destroy of a locked mutex`. [#4742](https://github.com/ClickHouse/ClickHouse/pull/4742) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed TSan report on shutdown due to race condition in system logs usage. Fixed potential use-after-free on shutdown when part_log is enabled. [#4758](https://github.com/ClickHouse/ClickHouse/pull/4758) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix recheck parts in `ReplicatedMergeTreeAlterThread` in case of error. [#4772](https://github.com/ClickHouse/ClickHouse/pull/4772) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Arithmetic operations on intermediate aggregate function states were not working for constant arguments (such as subquery results). [#4776](https://github.com/ClickHouse/ClickHouse/pull/4776) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Always backquote column names in metadata. Otherwise it's impossible to create a table with column named `index` (server won't restart due to malformed `ATTACH` query in metadata). [#4782](https://github.com/ClickHouse/ClickHouse/pull/4782) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix crash in `ALTER ... MODIFY ORDER BY` on `Distributed` table. [#4790](https://github.com/ClickHouse/ClickHouse/pull/4790) ([TCeason](https://github.com/TCeason)) -* Fix segfault in `JOIN ON` with enabled `enable_optimize_predicate_expression`. [#4794](https://github.com/ClickHouse/ClickHouse/pull/4794) ([Winter Zhang](https://github.com/zhang2014)) -* Fix bug with adding an extraneous row after consuming a protobuf message from Kafka. [#4808](https://github.com/ClickHouse/ClickHouse/pull/4808) ([Vitaly Baranov](https://github.com/vitlibar)) -* Fix crash of `JOIN` on not-nullable vs nullable column. Fix `NULLs` in right keys in `ANY JOIN` + `join_use_nulls`. [#4815](https://github.com/ClickHouse/ClickHouse/pull/4815) ([Artem Zuikov](https://github.com/4ertus2)) -* Fix segmentation fault in `clickhouse-copier`. [#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) -* Fixed race condition in `SELECT` from `system.tables` if the table is renamed or altered concurrently. [#4836](https://github.com/ClickHouse/ClickHouse/pull/4836) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed data race when fetching data part that is already obsolete. [#4839](https://github.com/ClickHouse/ClickHouse/pull/4839) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed rare data race that can happen during `RENAME` table of MergeTree family. [#4844](https://github.com/ClickHouse/ClickHouse/pull/4844) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed segmentation fault in function `arrayIntersect`. Segmentation fault could happen if function was called with mixed constant and ordinary arguments. [#4847](https://github.com/ClickHouse/ClickHouse/pull/4847) ([Lixiang Qian](https://github.com/fancyqlx)) -* Fixed reading from `Array(LowCardinality)` column in rare case when column contained a long sequence of empty arrays. [#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Fix crash in `FULL/RIGHT JOIN` when we joining on nullable vs not nullable. [#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) -* Fix `No message received` exception while fetching parts between replicas. [#4856](https://github.com/ClickHouse/ClickHouse/pull/4856) ([alesapin](https://github.com/alesapin)) -* Fixed `arrayIntersect` function wrong result in case of several repeated values in single array. [#4871](https://github.com/ClickHouse/ClickHouse/pull/4871) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Fix a race condition during concurrent `ALTER COLUMN` queries that could lead to a server crash (fixes issue [#3421](https://github.com/ClickHouse/ClickHouse/issues/3421)). [#4592](https://github.com/ClickHouse/ClickHouse/pull/4592) ([Alex Zatelepin](https://github.com/ztlpn)) -* Fix incorrect result in `FULL/RIGHT JOIN` with const column. [#4723](https://github.com/ClickHouse/ClickHouse/pull/4723) ([Artem Zuikov](https://github.com/4ertus2)) -* Fix duplicates in `GLOBAL JOIN` with asterisk. [#4705](https://github.com/ClickHouse/ClickHouse/pull/4705) ([Artem Zuikov](https://github.com/4ertus2)) -* Fix parameter deduction in `ALTER MODIFY` of column `CODEC` when column type is not specified. [#4883](https://github.com/ClickHouse/ClickHouse/pull/4883) ([alesapin](https://github.com/alesapin)) -* Functions `cutQueryStringAndFragment()` and `queryStringAndFragment()` now works correctly when `URL` contains a fragment and no query. [#4894](https://github.com/ClickHouse/ClickHouse/pull/4894) ([Vitaly Baranov](https://github.com/vitlibar)) -* Fix rare bug when setting `min_bytes_to_use_direct_io` is greater than zero, which occures when thread have to seek backward in column file. [#4897](https://github.com/ClickHouse/ClickHouse/pull/4897) ([alesapin](https://github.com/alesapin)) -* Fix wrong argument types for aggregate functions with `LowCardinality` arguments (fixes issue [#4919](https://github.com/ClickHouse/ClickHouse/issues/4919)). [#4922](https://github.com/ClickHouse/ClickHouse/pull/4922) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Fix wrong name qualification in `GLOBAL JOIN`. [#4969](https://github.com/ClickHouse/ClickHouse/pull/4969) ([Artem Zuikov](https://github.com/4ertus2)) -* Fix function `toISOWeek` result for year 1970. [#4988](https://github.com/ClickHouse/ClickHouse/pull/4988) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix `DROP`, `TRUNCATE` and `OPTIMIZE` queries duplication, when executed on `ON CLUSTER` for `ReplicatedMergeTree*` tables family. [#4991](https://github.com/ClickHouse/ClickHouse/pull/4991) ([alesapin](https://github.com/alesapin)) - -#### Backward Incompatible Change - -* Rename setting `insert_sample_with_metadata` to setting `input_format_defaults_for_omitted_fields`. [#4771](https://github.com/ClickHouse/ClickHouse/pull/4771) ([Artem Zuikov](https://github.com/4ertus2)) -* Added setting `max_partitions_per_insert_block` (with value 100 by default). If inserted block contains larger number of partitions, an exception is thrown. Set it to 0 if you want to remove the limit (not recommended). [#4845](https://github.com/ClickHouse/ClickHouse/pull/4845) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Multi-search functions were renamed (`multiPosition` to `multiSearchAllPositions`, `multiSearch` to `multiSearchAny`, `firstMatch` to `multiSearchFirstIndex`). [#4780](https://github.com/ClickHouse/ClickHouse/pull/4780) ([Danila Kutenin](https://github.com/danlark1)) - -#### Performance Improvement - -* Optimize Volnitsky searcher by inlining, giving about 5-10% search improvement for queries with many needles or many similar bigrams. [#4862](https://github.com/ClickHouse/ClickHouse/pull/4862) ([Danila Kutenin](https://github.com/danlark1)) -* Fix performance issue when setting `use_uncompressed_cache` is greater than zero, which appeared when all read data contained in cache. [#4913](https://github.com/ClickHouse/ClickHouse/pull/4913) ([alesapin](https://github.com/alesapin)) - - -#### Build/Testing/Packaging Improvement - -* Hardening debug build: more granular memory mappings and ASLR; add memory protection for mark cache and index. This allows to find more memory stomping bugs in case when ASan and MSan cannot do it. [#4632](https://github.com/ClickHouse/ClickHouse/pull/4632) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Add support for cmake variables `ENABLE_PROTOBUF`, `ENABLE_PARQUET` and `ENABLE_BROTLI` which allows to enable/disable the above features (same as we can do for librdkafka, mysql, etc). [#4669](https://github.com/ClickHouse/ClickHouse/pull/4669) ([Silviu Caragea](https://github.com/silviucpp)) -* Add ability to print process list and stacktraces of all threads if some queries are hung after test run. [#4675](https://github.com/ClickHouse/ClickHouse/pull/4675) ([alesapin](https://github.com/alesapin)) -* Add retries on `Connection loss` error in `clickhouse-test`. [#4682](https://github.com/ClickHouse/ClickHouse/pull/4682) ([alesapin](https://github.com/alesapin)) -* Add freebsd build with vagrant and build with thread sanitizer to packager script. [#4712](https://github.com/ClickHouse/ClickHouse/pull/4712) [#4748](https://github.com/ClickHouse/ClickHouse/pull/4748) ([alesapin](https://github.com/alesapin)) -* Now user asked for password for user `'default'` during installation. [#4725](https://github.com/ClickHouse/ClickHouse/pull/4725) ([proller](https://github.com/proller)) -* Suppress warning in `rdkafka` library. [#4740](https://github.com/ClickHouse/ClickHouse/pull/4740) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Allow ability to build without ssl. [#4750](https://github.com/ClickHouse/ClickHouse/pull/4750) ([proller](https://github.com/proller)) -* Add a way to launch clickhouse-server image from a custom user. [#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) -* Upgrade contrib boost to 1.69. [#4793](https://github.com/ClickHouse/ClickHouse/pull/4793) ([proller](https://github.com/proller)) -* Disable usage of `mremap` when compiled with Thread Sanitizer. Surprisingly enough, TSan does not intercept `mremap` (though it does intercept `mmap`, `munmap`) that leads to false positives. Fixed TSan report in stateful tests. [#4859](https://github.com/ClickHouse/ClickHouse/pull/4859) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Add test checking using format schema via HTTP interface. [#4864](https://github.com/ClickHouse/ClickHouse/pull/4864) ([Vitaly Baranov](https://github.com/vitlibar)) - -## ClickHouse release 19.4 -### ClickHouse release 19.4.4.33, 2019-04-17 - -#### Bug Fixes - -* Avoid `std::terminate` in case of memory allocation failure. Now `std::bad_alloc` exception is thrown as expected. [#4665](https://github.com/ClickHouse/ClickHouse/pull/4665) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixes capnproto reading from buffer. Sometimes files wasn't loaded successfully by HTTP. [#4674](https://github.com/ClickHouse/ClickHouse/pull/4674) ([Vladislav](https://github.com/smirnov-vs)) -* Fix error `Unknown log entry type: 0` after `OPTIMIZE TABLE FINAL` query. [#4683](https://github.com/ClickHouse/ClickHouse/pull/4683) ([Amos Bird](https://github.com/amosbird)) -* Wrong arguments to `hasAny` or `hasAll` functions may lead to segfault. [#4698](https://github.com/ClickHouse/ClickHouse/pull/4698) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Deadlock may happen while executing `DROP DATABASE dictionary` query. [#4701](https://github.com/ClickHouse/ClickHouse/pull/4701) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix undefined behavior in `median` and `quantile` functions. [#4702](https://github.com/ClickHouse/ClickHouse/pull/4702) ([hcz](https://github.com/hczhcz)) -* Fix compression level detection when `network_compression_method` in lowercase. Broken in v19.1. [#4706](https://github.com/ClickHouse/ClickHouse/pull/4706) ([proller](https://github.com/proller)) -* Fixed ignorance of `UTC` setting (fixes issue [#4658](https://github.com/ClickHouse/ClickHouse/issues/4658)). [#4718](https://github.com/ClickHouse/ClickHouse/pull/4718) ([proller](https://github.com/proller)) -* Fix `histogram` function behaviour with `Distributed` tables. [#4741](https://github.com/ClickHouse/ClickHouse/pull/4741) ([olegkv](https://github.com/olegkv)) -* Fixed tsan report `destroy of a locked mutex`. [#4742](https://github.com/ClickHouse/ClickHouse/pull/4742) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed TSan report on shutdown due to race condition in system logs usage. Fixed potential use-after-free on shutdown when part_log is enabled. [#4758](https://github.com/ClickHouse/ClickHouse/pull/4758) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix recheck parts in `ReplicatedMergeTreeAlterThread` in case of error. [#4772](https://github.com/ClickHouse/ClickHouse/pull/4772) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Arithmetic operations on intermediate aggregate function states were not working for constant arguments (such as subquery results). [#4776](https://github.com/ClickHouse/ClickHouse/pull/4776) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Always backquote column names in metadata. Otherwise it's impossible to create a table with column named `index` (server won't restart due to malformed `ATTACH` query in metadata). [#4782](https://github.com/ClickHouse/ClickHouse/pull/4782) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix crash in `ALTER ... MODIFY ORDER BY` on `Distributed` table. [#4790](https://github.com/ClickHouse/ClickHouse/pull/4790) ([TCeason](https://github.com/TCeason)) -* Fix segfault in `JOIN ON` with enabled `enable_optimize_predicate_expression`. [#4794](https://github.com/ClickHouse/ClickHouse/pull/4794) ([Winter Zhang](https://github.com/zhang2014)) -* Fix bug with adding an extraneous row after consuming a protobuf message from Kafka. [#4808](https://github.com/ClickHouse/ClickHouse/pull/4808) ([Vitaly Baranov](https://github.com/vitlibar)) -* Fix segmentation fault in `clickhouse-copier`. [#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) -* Fixed race condition in `SELECT` from `system.tables` if the table is renamed or altered concurrently. [#4836](https://github.com/ClickHouse/ClickHouse/pull/4836) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed data race when fetching data part that is already obsolete. [#4839](https://github.com/ClickHouse/ClickHouse/pull/4839) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed rare data race that can happen during `RENAME` table of MergeTree family. [#4844](https://github.com/ClickHouse/ClickHouse/pull/4844) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed segmentation fault in function `arrayIntersect`. Segmentation fault could happen if function was called with mixed constant and ordinary arguments. [#4847](https://github.com/ClickHouse/ClickHouse/pull/4847) ([Lixiang Qian](https://github.com/fancyqlx)) -* Fixed reading from `Array(LowCardinality)` column in rare case when column contained a long sequence of empty arrays. [#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Fix `No message received` exception while fetching parts between replicas. [#4856](https://github.com/ClickHouse/ClickHouse/pull/4856) ([alesapin](https://github.com/alesapin)) -* Fixed `arrayIntersect` function wrong result in case of several repeated values in single array. [#4871](https://github.com/ClickHouse/ClickHouse/pull/4871) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Fix a race condition during concurrent `ALTER COLUMN` queries that could lead to a server crash (fixes issue [#3421](https://github.com/ClickHouse/ClickHouse/issues/3421)). [#4592](https://github.com/ClickHouse/ClickHouse/pull/4592) ([Alex Zatelepin](https://github.com/ztlpn)) -* Fix parameter deduction in `ALTER MODIFY` of column `CODEC` when column type is not specified. [#4883](https://github.com/ClickHouse/ClickHouse/pull/4883) ([alesapin](https://github.com/alesapin)) -* Functions `cutQueryStringAndFragment()` and `queryStringAndFragment()` now works correctly when `URL` contains a fragment and no query. [#4894](https://github.com/ClickHouse/ClickHouse/pull/4894) ([Vitaly Baranov](https://github.com/vitlibar)) -* Fix rare bug when setting `min_bytes_to_use_direct_io` is greater than zero, which occures when thread have to seek backward in column file. [#4897](https://github.com/ClickHouse/ClickHouse/pull/4897) ([alesapin](https://github.com/alesapin)) -* Fix wrong argument types for aggregate functions with `LowCardinality` arguments (fixes issue [#4919](https://github.com/ClickHouse/ClickHouse/issues/4919)). [#4922](https://github.com/ClickHouse/ClickHouse/pull/4922) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Fix function `toISOWeek` result for year 1970. [#4988](https://github.com/ClickHouse/ClickHouse/pull/4988) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix `DROP`, `TRUNCATE` and `OPTIMIZE` queries duplication, when executed on `ON CLUSTER` for `ReplicatedMergeTree*` tables family. [#4991](https://github.com/ClickHouse/ClickHouse/pull/4991) ([alesapin](https://github.com/alesapin)) - -#### Improvements - -* Keep ordinary, `DEFAULT`, `MATERIALIZED` and `ALIAS` columns in a single list (fixes issue [#2867](https://github.com/ClickHouse/ClickHouse/issues/2867)). [#4707](https://github.com/ClickHouse/ClickHouse/pull/4707) ([Alex Zatelepin](https://github.com/ztlpn)) - -### ClickHouse release 19.4.3.11, 2019-04-02 - -#### Bug Fixes - -* Fix crash in `FULL/RIGHT JOIN` when we joining on nullable vs not nullable. [#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) -* Fix segmentation fault in `clickhouse-copier`. [#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) - -#### Build/Testing/Packaging Improvement - -* Add a way to launch clickhouse-server image from a custom user. [#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) - -### ClickHouse release 19.4.2.7, 2019-03-30 - -#### Bug Fixes -* Fixed reading from `Array(LowCardinality)` column in rare case when column contained a long sequence of empty arrays. [#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - -### ClickHouse release 19.4.1.3, 2019-03-19 - -#### Bug Fixes -* Fixed remote queries which contain both `LIMIT BY` and `LIMIT`. Previously, if `LIMIT BY` and `LIMIT` were used for remote query, `LIMIT` could happen before `LIMIT BY`, which led to too filtered result. [#4708](https://github.com/ClickHouse/ClickHouse/pull/4708) ([Constantin S. Pan](https://github.com/kvap)) - -### ClickHouse release 19.4.0.49, 2019-03-09 - -#### New Features -* Added full support for `Protobuf` format (input and output, nested data structures). [#4174](https://github.com/ClickHouse/ClickHouse/pull/4174) [#4493](https://github.com/ClickHouse/ClickHouse/pull/4493) ([Vitaly Baranov](https://github.com/vitlibar)) -* Added bitmap functions with Roaring Bitmaps. [#4207](https://github.com/ClickHouse/ClickHouse/pull/4207) ([Andy Yang](https://github.com/andyyzh)) [#4568](https://github.com/ClickHouse/ClickHouse/pull/4568) ([Vitaly Baranov](https://github.com/vitlibar)) -* Parquet format support. [#4448](https://github.com/ClickHouse/ClickHouse/pull/4448) ([proller](https://github.com/proller)) -* N-gram distance was added for fuzzy string comparison. It is similar to q-gram metrics in R language. [#4466](https://github.com/ClickHouse/ClickHouse/pull/4466) ([Danila Kutenin](https://github.com/danlark1)) -* Combine rules for graphite rollup from dedicated aggregation and retention patterns. [#4426](https://github.com/ClickHouse/ClickHouse/pull/4426) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) -* Added `max_execution_speed` and `max_execution_speed_bytes` to limit resource usage. Added `min_execution_speed_bytes` setting to complement the `min_execution_speed`. [#4430](https://github.com/ClickHouse/ClickHouse/pull/4430) ([Winter Zhang](https://github.com/zhang2014)) -* Implemented function `flatten`. [#4555](https://github.com/ClickHouse/ClickHouse/pull/4555) [#4409](https://github.com/ClickHouse/ClickHouse/pull/4409) ([alexey-milovidov](https://github.com/alexey-milovidov), [kzon](https://github.com/kzon)) -* Added functions `arrayEnumerateDenseRanked` and `arrayEnumerateUniqRanked` (it's like `arrayEnumerateUniq` but allows to fine tune array depth to look inside multidimensional arrays). [#4475](https://github.com/ClickHouse/ClickHouse/pull/4475) ([proller](https://github.com/proller)) [#4601](https://github.com/ClickHouse/ClickHouse/pull/4601) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Multiple JOINS with some restrictions: no asterisks, no complex aliases in ON/WHERE/GROUP BY/... [#4462](https://github.com/ClickHouse/ClickHouse/pull/4462) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Bug Fixes -* This release also contains all bug fixes from 19.3 and 19.1. -* Fixed bug in data skipping indices: order of granules after INSERT was incorrect. [#4407](https://github.com/ClickHouse/ClickHouse/pull/4407) ([Nikita Vasilev](https://github.com/nikvas0)) -* Fixed `set` index for `Nullable` and `LowCardinality` columns. Before it, `set` index with `Nullable` or `LowCardinality` column led to error `Data type must be deserialized with multiple streams` while selecting. [#4594](https://github.com/ClickHouse/ClickHouse/pull/4594) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Correctly set update_time on full `executable` dictionary update. [#4551](https://github.com/ClickHouse/ClickHouse/pull/4551) ([Tema Novikov](https://github.com/temoon)) -* Fix broken progress bar in 19.3. [#4627](https://github.com/ClickHouse/ClickHouse/pull/4627) ([filimonov](https://github.com/filimonov)) -* Fixed inconsistent values of MemoryTracker when memory region was shrinked, in certain cases. [#4619](https://github.com/ClickHouse/ClickHouse/pull/4619) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed undefined behaviour in ThreadPool. [#4612](https://github.com/ClickHouse/ClickHouse/pull/4612) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed a very rare crash with the message `mutex lock failed: Invalid argument` that could happen when a MergeTree table was dropped concurrently with a SELECT. [#4608](https://github.com/ClickHouse/ClickHouse/pull/4608) ([Alex Zatelepin](https://github.com/ztlpn)) -* ODBC driver compatibility with `LowCardinality` data type. [#4381](https://github.com/ClickHouse/ClickHouse/pull/4381) ([proller](https://github.com/proller)) -* FreeBSD: Fixup for `AIOcontextPool: Found io_event with unknown id 0` error. [#4438](https://github.com/ClickHouse/ClickHouse/pull/4438) ([urgordeadbeef](https://github.com/urgordeadbeef)) -* `system.part_log` table was created regardless to configuration. [#4483](https://github.com/ClickHouse/ClickHouse/pull/4483) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix undefined behaviour in `dictIsIn` function for cache dictionaries. [#4515](https://github.com/ClickHouse/ClickHouse/pull/4515) ([alesapin](https://github.com/alesapin)) -* Fixed a deadlock when a SELECT query locks the same table multiple times (e.g. from different threads or when executing multiple subqueries) and there is a concurrent DDL query. [#4535](https://github.com/ClickHouse/ClickHouse/pull/4535) ([Alex Zatelepin](https://github.com/ztlpn)) -* Disable compile_expressions by default until we get own `llvm` contrib and can test it with `clang` and `asan`. [#4579](https://github.com/ClickHouse/ClickHouse/pull/4579) ([alesapin](https://github.com/alesapin)) -* Prevent `std::terminate` when `invalidate_query` for `clickhouse` external dictionary source has returned wrong resultset (empty or more than one row or more than one column). Fixed issue when the `invalidate_query` was performed every five seconds regardless to the `lifetime`. [#4583](https://github.com/ClickHouse/ClickHouse/pull/4583) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Avoid deadlock when the `invalidate_query` for a dictionary with `clickhouse` source was involving `system.dictionaries` table or `Dictionaries` database (rare case). [#4599](https://github.com/ClickHouse/ClickHouse/pull/4599) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixes for CROSS JOIN with empty WHERE. [#4598](https://github.com/ClickHouse/ClickHouse/pull/4598) ([Artem Zuikov](https://github.com/4ertus2)) -* Fixed segfault in function "replicate" when constant argument is passed. [#4603](https://github.com/ClickHouse/ClickHouse/pull/4603) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix lambda function with predicate optimizer. [#4408](https://github.com/ClickHouse/ClickHouse/pull/4408) ([Winter Zhang](https://github.com/zhang2014)) -* Multiple JOINs multiple fixes. [#4595](https://github.com/ClickHouse/ClickHouse/pull/4595) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Improvements -* Support aliases in JOIN ON section for right table columns. [#4412](https://github.com/ClickHouse/ClickHouse/pull/4412) ([Artem Zuikov](https://github.com/4ertus2)) -* Result of multiple JOINs need correct result names to be used in subselects. Replace flat aliases with source names in result. [#4474](https://github.com/ClickHouse/ClickHouse/pull/4474) ([Artem Zuikov](https://github.com/4ertus2)) -* Improve push-down logic for joined statements. [#4387](https://github.com/ClickHouse/ClickHouse/pull/4387) ([Ivan](https://github.com/abyss7)) - -#### Performance Improvements -* Improved heuristics of "move to PREWHERE" optimization. [#4405](https://github.com/ClickHouse/ClickHouse/pull/4405) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Use proper lookup tables that uses HashTable's API for 8-bit and 16-bit keys. [#4536](https://github.com/ClickHouse/ClickHouse/pull/4536) ([Amos Bird](https://github.com/amosbird)) -* Improved performance of string comparison. [#4564](https://github.com/ClickHouse/ClickHouse/pull/4564) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Cleanup distributed DDL queue in a separate thread so that it doesn't slow down the main loop that processes distributed DDL tasks. [#4502](https://github.com/ClickHouse/ClickHouse/pull/4502) ([Alex Zatelepin](https://github.com/ztlpn)) -* When `min_bytes_to_use_direct_io` is set to 1, not every file was opened with O_DIRECT mode because the data size to read was sometimes underestimated by the size of one compressed block. [#4526](https://github.com/ClickHouse/ClickHouse/pull/4526) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Build/Testing/Packaging Improvement -* Added support for clang-9 [#4604](https://github.com/ClickHouse/ClickHouse/pull/4604) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix wrong `__asm__` instructions (again) [#4621](https://github.com/ClickHouse/ClickHouse/pull/4621) ([Konstantin Podshumok](https://github.com/podshumok)) -* Add ability to specify settings for `clickhouse-performance-test` from command line. [#4437](https://github.com/ClickHouse/ClickHouse/pull/4437) ([alesapin](https://github.com/alesapin)) -* Add dictionaries tests to integration tests. [#4477](https://github.com/ClickHouse/ClickHouse/pull/4477) ([alesapin](https://github.com/alesapin)) -* Added queries from the benchmark on the website to automated performance tests. [#4496](https://github.com/ClickHouse/ClickHouse/pull/4496) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* `xxhash.h` does not exist in external lz4 because it is an implementation detail and its symbols are namespaced with `XXH_NAMESPACE` macro. When lz4 is external, xxHash has to be external too, and the dependents have to link to it. [#4495](https://github.com/ClickHouse/ClickHouse/pull/4495) ([Orivej Desh](https://github.com/orivej)) -* Fixed a case when `quantileTiming` aggregate function can be called with negative or floating point argument (this fixes fuzz test with undefined behaviour sanitizer). [#4506](https://github.com/ClickHouse/ClickHouse/pull/4506) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Spelling error correction. [#4531](https://github.com/ClickHouse/ClickHouse/pull/4531) ([sdk2](https://github.com/sdk2)) -* Fix compilation on Mac. [#4371](https://github.com/ClickHouse/ClickHouse/pull/4371) ([Vitaly Baranov](https://github.com/vitlibar)) -* Build fixes for FreeBSD and various unusual build configurations. [#4444](https://github.com/ClickHouse/ClickHouse/pull/4444) ([proller](https://github.com/proller)) - -## ClickHouse release 19.3 -### ClickHouse release 19.3.9.1, 2019-04-02 - -#### Bug Fixes - -* Fix crash in `FULL/RIGHT JOIN` when we joining on nullable vs not nullable. [#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) -* Fix segmentation fault in `clickhouse-copier`. [#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) -* Fixed reading from `Array(LowCardinality)` column in rare case when column contained a long sequence of empty arrays. [#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - -#### Build/Testing/Packaging Improvement - -* Add a way to launch clickhouse-server image from a custom user [#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) - - -### ClickHouse release 19.3.7, 2019-03-12 - -#### Bug fixes - -* Fixed error in #3920. This error manifests itself as random cache corruption (messages `Unknown codec family code`, `Cannot seek through file`) and segfaults. This bug first appeared in version 19.1 and is present in versions up to 19.1.10 and 19.3.6. [#4623](https://github.com/ClickHouse/ClickHouse/pull/4623) ([alexey-milovidov](https://github.com/alexey-milovidov)) - - -### ClickHouse release 19.3.6, 2019-03-02 - -#### Bug fixes - -* When there are more than 1000 threads in a thread pool, `std::terminate` may happen on thread exit. [Azat Khuzhin](https://github.com/azat) [#4485](https://github.com/ClickHouse/ClickHouse/pull/4485) [#4505](https://github.com/ClickHouse/ClickHouse/pull/4505) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Now it's possible to create `ReplicatedMergeTree*` tables with comments on columns without defaults and tables with columns codecs without comments and defaults. Also fix comparison of codecs. [#4523](https://github.com/ClickHouse/ClickHouse/pull/4523) ([alesapin](https://github.com/alesapin)) -* Fixed crash on JOIN with array or tuple. [#4552](https://github.com/ClickHouse/ClickHouse/pull/4552) ([Artem Zuikov](https://github.com/4ertus2)) -* Fixed crash in clickhouse-copier with the message `ThreadStatus not created`. [#4540](https://github.com/ClickHouse/ClickHouse/pull/4540) ([Artem Zuikov](https://github.com/4ertus2)) -* Fixed hangup on server shutdown if distributed DDLs were used. [#4472](https://github.com/ClickHouse/ClickHouse/pull/4472) ([Alex Zatelepin](https://github.com/ztlpn)) -* Incorrect column numbers were printed in error message about text format parsing for columns with number greater than 10. [#4484](https://github.com/ClickHouse/ClickHouse/pull/4484) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Build/Testing/Packaging Improvements - -* Fixed build with AVX enabled. [#4527](https://github.com/ClickHouse/ClickHouse/pull/4527) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Enable extended accounting and IO accounting based on good known version instead of kernel under which it is compiled. [#4541](https://github.com/ClickHouse/ClickHouse/pull/4541) ([nvartolomei](https://github.com/nvartolomei)) -* Allow to skip setting of core_dump.size_limit, warning instead of throw if limit set fail. [#4473](https://github.com/ClickHouse/ClickHouse/pull/4473) ([proller](https://github.com/proller)) -* Removed the `inline` tags of `void readBinary(...)` in `Field.cpp`. Also merged redundant `namespace DB` blocks. [#4530](https://github.com/ClickHouse/ClickHouse/pull/4530) ([hcz](https://github.com/hczhcz)) - - -### ClickHouse release 19.3.5, 2019-02-21 - -#### Bug fixes -* Fixed bug with large http insert queries processing. [#4454](https://github.com/ClickHouse/ClickHouse/pull/4454) ([alesapin](https://github.com/alesapin)) -* Fixed backward incompatibility with old versions due to wrong implementation of `send_logs_level` setting. [#4445](https://github.com/ClickHouse/ClickHouse/pull/4445) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed backward incompatibility of table function `remote` introduced with column comments. [#4446](https://github.com/ClickHouse/ClickHouse/pull/4446) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.3.4, 2019-02-16 - -#### Improvements -* Table index size is not accounted for memory limits when doing `ATTACH TABLE` query. Avoided the possibility that a table cannot be attached after being detached. [#4396](https://github.com/ClickHouse/ClickHouse/pull/4396) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Slightly raised up the limit on max string and array size received from ZooKeeper. It allows to continue to work with increased size of `CLIENT_JVMFLAGS=-Djute.maxbuffer=...` on ZooKeeper. [#4398](https://github.com/ClickHouse/ClickHouse/pull/4398) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Allow to repair abandoned replica even if it already has huge number of nodes in its queue. [#4399](https://github.com/ClickHouse/ClickHouse/pull/4399) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Add one required argument to `SET` index (max stored rows number). [#4386](https://github.com/ClickHouse/ClickHouse/pull/4386) ([Nikita Vasilev](https://github.com/nikvas0)) - -#### Bug Fixes -* Fixed `WITH ROLLUP` result for group by single `LowCardinality` key. [#4384](https://github.com/ClickHouse/ClickHouse/pull/4384) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Fixed bug in the set index (dropping a granule if it contains more than `max_rows` rows). [#4386](https://github.com/ClickHouse/ClickHouse/pull/4386) ([Nikita Vasilev](https://github.com/nikvas0)) -* A lot of FreeBSD build fixes. [#4397](https://github.com/ClickHouse/ClickHouse/pull/4397) ([proller](https://github.com/proller)) -* Fixed aliases substitution in queries with subquery containing same alias (issue [#4110](https://github.com/ClickHouse/ClickHouse/issues/4110)). [#4351](https://github.com/ClickHouse/ClickHouse/pull/4351) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Build/Testing/Packaging Improvements -* Add ability to run `clickhouse-server` for stateless tests in docker image. [#4347](https://github.com/ClickHouse/ClickHouse/pull/4347) ([Vasily Nemkov](https://github.com/Enmk)) - -### ClickHouse release 19.3.3, 2019-02-13 - -#### New Features -* Added the `KILL MUTATION` statement that allows removing mutations that are for some reasons stuck. Added `latest_failed_part`, `latest_fail_time`, `latest_fail_reason` fields to the `system.mutations` table for easier troubleshooting. [#4287](https://github.com/ClickHouse/ClickHouse/pull/4287) ([Alex Zatelepin](https://github.com/ztlpn)) -* Added aggregate function `entropy` which computes Shannon entropy. [#4238](https://github.com/ClickHouse/ClickHouse/pull/4238) ([Quid37](https://github.com/Quid37)) -* Added ability to send queries `INSERT INTO tbl VALUES (....` to server without splitting on `query` and `data` parts. [#4301](https://github.com/ClickHouse/ClickHouse/pull/4301) ([alesapin](https://github.com/alesapin)) -* Generic implementation of `arrayWithConstant` function was added. [#4322](https://github.com/ClickHouse/ClickHouse/pull/4322) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Implemented `NOT BETWEEN` comparison operator. [#4228](https://github.com/ClickHouse/ClickHouse/pull/4228) ([Dmitry Naumov](https://github.com/nezed)) -* Implement `sumMapFiltered` in order to be able to limit the number of keys for which values will be summed by `sumMap`. [#4129](https://github.com/ClickHouse/ClickHouse/pull/4129) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) -* Added support of `Nullable` types in `mysql` table function. [#4198](https://github.com/ClickHouse/ClickHouse/pull/4198) ([Emmanuel Donin de Rosière](https://github.com/edonin)) -* Support for arbitrary constant expressions in `LIMIT` clause. [#4246](https://github.com/ClickHouse/ClickHouse/pull/4246) ([k3box](https://github.com/k3box)) -* Added `topKWeighted` aggregate function that takes additional argument with (unsigned integer) weight. [#4245](https://github.com/ClickHouse/ClickHouse/pull/4245) ([Andrew Golman](https://github.com/andrewgolman)) -* `StorageJoin` now supports `join_any_take_last_row` setting that allows overwriting existing values of the same key. [#3973](https://github.com/ClickHouse/ClickHouse/pull/3973) ([Amos Bird](https://github.com/amosbird) -* Added function `toStartOfInterval`. [#4304](https://github.com/ClickHouse/ClickHouse/pull/4304) ([Vitaly Baranov](https://github.com/vitlibar)) -* Added `RowBinaryWithNamesAndTypes` format. [#4200](https://github.com/ClickHouse/ClickHouse/pull/4200) ([Oleg V. Kozlyuk](https://github.com/DarkWanderer)) -* Added `IPv4` and `IPv6` data types. More effective implementations of `IPv*` functions. [#3669](https://github.com/ClickHouse/ClickHouse/pull/3669) ([Vasily Nemkov](https://github.com/Enmk)) -* Added function `toStartOfTenMinutes()`. [#4298](https://github.com/ClickHouse/ClickHouse/pull/4298) ([Vitaly Baranov](https://github.com/vitlibar)) -* Added `Protobuf` output format. [#4005](https://github.com/ClickHouse/ClickHouse/pull/4005) [#4158](https://github.com/ClickHouse/ClickHouse/pull/4158) ([Vitaly Baranov](https://github.com/vitlibar)) -* Added brotli support for HTTP interface for data import (INSERTs). [#4235](https://github.com/ClickHouse/ClickHouse/pull/4235) ([Mikhail ](https://github.com/fandyushin)) -* Added hints while user make typo in function name or type in command line client. [#4239](https://github.com/ClickHouse/ClickHouse/pull/4239) ([Danila Kutenin](https://github.com/danlark1)) -* Added `Query-Id` to Server's HTTP Response header. [#4231](https://github.com/ClickHouse/ClickHouse/pull/4231) ([Mikhail ](https://github.com/fandyushin)) - -#### Experimental features -* Added `minmax` and `set` data skipping indices for MergeTree table engines family. [#4143](https://github.com/ClickHouse/ClickHouse/pull/4143) ([Nikita Vasilev](https://github.com/nikvas0)) -* Added conversion of `CROSS JOIN` to `INNER JOIN` if possible. [#4221](https://github.com/ClickHouse/ClickHouse/pull/4221) [#4266](https://github.com/ClickHouse/ClickHouse/pull/4266) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Bug Fixes -* Fixed `Not found column` for duplicate columns in `JOIN ON` section. [#4279](https://github.com/ClickHouse/ClickHouse/pull/4279) ([Artem Zuikov](https://github.com/4ertus2)) -* Make `START REPLICATED SENDS` command start replicated sends. [#4229](https://github.com/ClickHouse/ClickHouse/pull/4229) ([nvartolomei](https://github.com/nvartolomei)) -* Fixed aggregate functions execution with `Array(LowCardinality)` arguments. [#4055](https://github.com/ClickHouse/ClickHouse/pull/4055) ([KochetovNicolai](https://github.com/KochetovNicolai)) -* Fixed wrong behaviour when doing `INSERT ... SELECT ... FROM file(...)` query and file has `CSVWithNames` or `TSVWIthNames` format and the first data row is missing. [#4297](https://github.com/ClickHouse/ClickHouse/pull/4297) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed crash on dictionary reload if dictionary not available. This bug was appeared in 19.1.6. [#4188](https://github.com/ClickHouse/ClickHouse/pull/4188) ([proller](https://github.com/proller)) -* Fixed `ALL JOIN` with duplicates in right table. [#4184](https://github.com/ClickHouse/ClickHouse/pull/4184) ([Artem Zuikov](https://github.com/4ertus2)) -* Fixed segmentation fault with `use_uncompressed_cache=1` and exception with wrong uncompressed size. This bug was appeared in 19.1.6. [#4186](https://github.com/ClickHouse/ClickHouse/pull/4186) ([alesapin](https://github.com/alesapin)) -* Fixed `compile_expressions` bug with comparison of big (more than int16) dates. [#4341](https://github.com/ClickHouse/ClickHouse/pull/4341) ([alesapin](https://github.com/alesapin)) -* Fixed infinite loop when selecting from table function `numbers(0)`. [#4280](https://github.com/ClickHouse/ClickHouse/pull/4280) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Temporarily disable predicate optimization for `ORDER BY`. [#3890](https://github.com/ClickHouse/ClickHouse/pull/3890) ([Winter Zhang](https://github.com/zhang2014)) -* Fixed `Illegal instruction` error when using base64 functions on old CPUs. This error has been reproduced only when ClickHouse was compiled with gcc-8. [#4275](https://github.com/ClickHouse/ClickHouse/pull/4275) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed `No message received` error when interacting with PostgreSQL ODBC Driver through TLS connection. Also fixes segfault when using MySQL ODBC Driver. [#4170](https://github.com/ClickHouse/ClickHouse/pull/4170) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed incorrect result when `Date` and `DateTime` arguments are used in branches of conditional operator (function `if`). Added generic case for function `if`. [#4243](https://github.com/ClickHouse/ClickHouse/pull/4243) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* ClickHouse dictionaries now load within `clickhouse` process. [#4166](https://github.com/ClickHouse/ClickHouse/pull/4166) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed deadlock when `SELECT` from a table with `File` engine was retried after `No such file or directory` error. [#4161](https://github.com/ClickHouse/ClickHouse/pull/4161) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed race condition when selecting from `system.tables` may give `table doesn't exist` error. [#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* `clickhouse-client` can segfault on exit while loading data for command line suggestions if it was run in interactive mode. [#4317](https://github.com/ClickHouse/ClickHouse/pull/4317) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed a bug when the execution of mutations containing `IN` operators was producing incorrect results. [#4099](https://github.com/ClickHouse/ClickHouse/pull/4099) ([Alex Zatelepin](https://github.com/ztlpn)) -* Fixed error: if there is a database with `Dictionary` engine, all dictionaries forced to load at server startup, and if there is a dictionary with ClickHouse source from localhost, the dictionary cannot load. [#4255](https://github.com/ClickHouse/ClickHouse/pull/4255) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed error when system logs are tried to create again at server shutdown. [#4254](https://github.com/ClickHouse/ClickHouse/pull/4254) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Correctly return the right type and properly handle locks in `joinGet` function. [#4153](https://github.com/ClickHouse/ClickHouse/pull/4153) ([Amos Bird](https://github.com/amosbird)) -* Added `sumMapWithOverflow` function. [#4151](https://github.com/ClickHouse/ClickHouse/pull/4151) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) -* Fixed segfault with `allow_experimental_multiple_joins_emulation`. [52de2c](https://github.com/ClickHouse/ClickHouse/commit/52de2cd927f7b5257dd67e175f0a5560a48840d0) ([Artem Zuikov](https://github.com/4ertus2)) -* Fixed bug with incorrect `Date` and `DateTime` comparison. [#4237](https://github.com/ClickHouse/ClickHouse/pull/4237) ([valexey](https://github.com/valexey)) -* Fixed fuzz test under undefined behavior sanitizer: added parameter type check for `quantile*Weighted` family of functions. [#4145](https://github.com/ClickHouse/ClickHouse/pull/4145) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed rare race condition when removing of old data parts can fail with `File not found` error. [#4378](https://github.com/ClickHouse/ClickHouse/pull/4378) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix install package with missing /etc/clickhouse-server/config.xml. [#4343](https://github.com/ClickHouse/ClickHouse/pull/4343) ([proller](https://github.com/proller)) - - -#### Build/Testing/Packaging Improvements -* Debian package: correct /etc/clickhouse-server/preprocessed link according to config. [#4205](https://github.com/ClickHouse/ClickHouse/pull/4205) ([proller](https://github.com/proller)) -* Various build fixes for FreeBSD. [#4225](https://github.com/ClickHouse/ClickHouse/pull/4225) ([proller](https://github.com/proller)) -* Added ability to create, fill and drop tables in perftest. [#4220](https://github.com/ClickHouse/ClickHouse/pull/4220) ([alesapin](https://github.com/alesapin)) -* Added a script to check for duplicate includes. [#4326](https://github.com/ClickHouse/ClickHouse/pull/4326) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Added ability to run queries by index in performance test. [#4264](https://github.com/ClickHouse/ClickHouse/pull/4264) ([alesapin](https://github.com/alesapin)) -* Package with debug symbols is suggested to be installed. [#4274](https://github.com/ClickHouse/ClickHouse/pull/4274) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Refactoring of performance-test. Better logging and signals handling. [#4171](https://github.com/ClickHouse/ClickHouse/pull/4171) ([alesapin](https://github.com/alesapin)) -* Added docs to anonymized Yandex.Metrika datasets. [#4164](https://github.com/ClickHouse/ClickHouse/pull/4164) ([alesapin](https://github.com/alesapin)) -* Аdded tool for converting an old month-partitioned part to the custom-partitioned format. [#4195](https://github.com/ClickHouse/ClickHouse/pull/4195) ([Alex Zatelepin](https://github.com/ztlpn)) -* Added docs about two datasets in s3. [#4144](https://github.com/ClickHouse/ClickHouse/pull/4144) ([alesapin](https://github.com/alesapin)) -* Added script which creates changelog from pull requests description. [#4169](https://github.com/ClickHouse/ClickHouse/pull/4169) [#4173](https://github.com/ClickHouse/ClickHouse/pull/4173) ([KochetovNicolai](https://github.com/KochetovNicolai)) ([KochetovNicolai](https://github.com/KochetovNicolai)) -* Added puppet module for Clickhouse. [#4182](https://github.com/ClickHouse/ClickHouse/pull/4182) ([Maxim Fedotov](https://github.com/MaxFedotov)) -* Added docs for a group of undocumented functions. [#4168](https://github.com/ClickHouse/ClickHouse/pull/4168) ([Winter Zhang](https://github.com/zhang2014)) -* ARM build fixes. [#4210](https://github.com/ClickHouse/ClickHouse/pull/4210)[#4306](https://github.com/ClickHouse/ClickHouse/pull/4306) [#4291](https://github.com/ClickHouse/ClickHouse/pull/4291) ([proller](https://github.com/proller)) ([proller](https://github.com/proller)) -* Dictionary tests now able to run from `ctest`. [#4189](https://github.com/ClickHouse/ClickHouse/pull/4189) ([proller](https://github.com/proller)) -* Now `/etc/ssl` is used as default directory with SSL certificates. [#4167](https://github.com/ClickHouse/ClickHouse/pull/4167) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Added checking SSE and AVX instruction at start. [#4234](https://github.com/ClickHouse/ClickHouse/pull/4234) ([Igr](https://github.com/igron99)) -* Init script will wait server until start. [#4281](https://github.com/ClickHouse/ClickHouse/pull/4281) ([proller](https://github.com/proller)) - -#### Backward Incompatible Changes -* Removed `allow_experimental_low_cardinality_type` setting. `LowCardinality` data types are production ready. [#4323](https://github.com/ClickHouse/ClickHouse/pull/4323) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Reduce mark cache size and uncompressed cache size accordingly to available memory amount. [#4240](https://github.com/ClickHouse/ClickHouse/pull/4240) ([Lopatin Konstantin](https://github.com/k-lopatin) -* Added keyword `INDEX` in `CREATE TABLE` query. A column with name `index` must be quoted with backticks or double quotes: `` `index` ``. [#4143](https://github.com/ClickHouse/ClickHouse/pull/4143) ([Nikita Vasilev](https://github.com/nikvas0)) -* `sumMap` now promote result type instead of overflow. The old `sumMap` behavior can be obtained by using `sumMapWithOverflow` function. [#4151](https://github.com/ClickHouse/ClickHouse/pull/4151) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) - -#### Performance Improvements -* `std::sort` replaced by `pdqsort` for queries without `LIMIT`. [#4236](https://github.com/ClickHouse/ClickHouse/pull/4236) ([Evgenii Pravda](https://github.com/kvinty)) -* Now server reuse threads from global thread pool. This affects performance in some corner cases. [#4150](https://github.com/ClickHouse/ClickHouse/pull/4150) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Improvements -* Implemented AIO support for FreeBSD. [#4305](https://github.com/ClickHouse/ClickHouse/pull/4305) ([urgordeadbeef](https://github.com/urgordeadbeef)) -* `SELECT * FROM a JOIN b USING a, b` now return `a` and `b` columns only from the left table. [#4141](https://github.com/ClickHouse/ClickHouse/pull/4141) ([Artem Zuikov](https://github.com/4ertus2)) -* Allow `-C` option of client to work as `-c` option. [#4232](https://github.com/ClickHouse/ClickHouse/pull/4232) ([syominsergey](https://github.com/syominsergey)) -* Now option `--password` used without value requires password from stdin. [#4230](https://github.com/ClickHouse/ClickHouse/pull/4230) ([BSD_Conqueror](https://github.com/bsd-conqueror)) -* Added highlighting of unescaped metacharacters in string literals that contain `LIKE` expressions or regexps. [#4327](https://github.com/ClickHouse/ClickHouse/pull/4327) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Added cancelling of HTTP read only queries if client socket goes away. [#4213](https://github.com/ClickHouse/ClickHouse/pull/4213) ([nvartolomei](https://github.com/nvartolomei)) -* Now server reports progress to keep client connections alive. [#4215](https://github.com/ClickHouse/ClickHouse/pull/4215) ([Ivan](https://github.com/abyss7)) -* Slightly better message with reason for OPTIMIZE query with `optimize_throw_if_noop` setting enabled. [#4294](https://github.com/ClickHouse/ClickHouse/pull/4294) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Added support of `--version` option for clickhouse server. [#4251](https://github.com/ClickHouse/ClickHouse/pull/4251) ([Lopatin Konstantin](https://github.com/k-lopatin)) -* Added `--help/-h` option to `clickhouse-server`. [#4233](https://github.com/ClickHouse/ClickHouse/pull/4233) ([Yuriy Baranov](https://github.com/yurriy)) -* Added support for scalar subqueries with aggregate function state result. [#4348](https://github.com/ClickHouse/ClickHouse/pull/4348) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Improved server shutdown time and ALTERs waiting time. [#4372](https://github.com/ClickHouse/ClickHouse/pull/4372) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Added info about the replicated_can_become_leader setting to system.replicas and add logging if the replica won't try to become leader. [#4379](https://github.com/ClickHouse/ClickHouse/pull/4379) ([Alex Zatelepin](https://github.com/ztlpn)) - - -## ClickHouse release 19.1 -### ClickHouse release 19.1.14, 2019-03-14 - -* Fixed error `Column ... queried more than once` that may happen if the setting `asterisk_left_columns_only` is set to 1 in case of using `GLOBAL JOIN` with `SELECT *` (rare case). The issue does not exist in 19.3 and newer. [6bac7d8d](https://github.com/ClickHouse/ClickHouse/pull/4692/commits/6bac7d8d11a9b0d6de0b32b53c47eb2f6f8e7062) ([Artem Zuikov](https://github.com/4ertus2)) - -### ClickHouse release 19.1.13, 2019-03-12 - -This release contains exactly the same set of patches as 19.3.7. - -### ClickHouse release 19.1.10, 2019-03-03 - -This release contains exactly the same set of patches as 19.3.6. - - -## ClickHouse release 19.1 -### ClickHouse release 19.1.9, 2019-02-21 - -#### Bug fixes -* Fixed backward incompatibility with old versions due to wrong implementation of `send_logs_level` setting. [#4445](https://github.com/ClickHouse/ClickHouse/pull/4445) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed backward incompatibility of table function `remote` introduced with column comments. [#4446](https://github.com/ClickHouse/ClickHouse/pull/4446) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.1.8, 2019-02-16 - -#### Bug Fixes -* Fix install package with missing /etc/clickhouse-server/config.xml. [#4343](https://github.com/ClickHouse/ClickHouse/pull/4343) ([proller](https://github.com/proller)) - - -## ClickHouse release 19.1 -### ClickHouse release 19.1.7, 2019-02-15 - -#### Bug Fixes -* Correctly return the right type and properly handle locks in `joinGet` function. [#4153](https://github.com/ClickHouse/ClickHouse/pull/4153) ([Amos Bird](https://github.com/amosbird)) -* Fixed error when system logs are tried to create again at server shutdown. [#4254](https://github.com/ClickHouse/ClickHouse/pull/4254) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed error: if there is a database with `Dictionary` engine, all dictionaries forced to load at server startup, and if there is a dictionary with ClickHouse source from localhost, the dictionary cannot load. [#4255](https://github.com/ClickHouse/ClickHouse/pull/4255) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed a bug when the execution of mutations containing `IN` operators was producing incorrect results. [#4099](https://github.com/ClickHouse/ClickHouse/pull/4099) ([Alex Zatelepin](https://github.com/ztlpn)) -* `clickhouse-client` can segfault on exit while loading data for command line suggestions if it was run in interactive mode. [#4317](https://github.com/ClickHouse/ClickHouse/pull/4317) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed race condition when selecting from `system.tables` may give `table doesn't exist` error. [#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed deadlock when `SELECT` from a table with `File` engine was retried after `No such file or directory` error. [#4161](https://github.com/ClickHouse/ClickHouse/pull/4161) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed an issue: local ClickHouse dictionaries are loaded via TCP, but should load within process. [#4166](https://github.com/ClickHouse/ClickHouse/pull/4166) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed `No message received` error when interacting with PostgreSQL ODBC Driver through TLS connection. Also fixes segfault when using MySQL ODBC Driver. [#4170](https://github.com/ClickHouse/ClickHouse/pull/4170) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Temporarily disable predicate optimization for `ORDER BY`. [#3890](https://github.com/ClickHouse/ClickHouse/pull/3890) ([Winter Zhang](https://github.com/zhang2014)) -* Fixed infinite loop when selecting from table function `numbers(0)`. [#4280](https://github.com/ClickHouse/ClickHouse/pull/4280) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed `compile_expressions` bug with comparison of big (more than int16) dates. [#4341](https://github.com/ClickHouse/ClickHouse/pull/4341) ([alesapin](https://github.com/alesapin)) -* Fixed segmentation fault with `uncompressed_cache=1` and exception with wrong uncompressed size. [#4186](https://github.com/ClickHouse/ClickHouse/pull/4186) ([alesapin](https://github.com/alesapin)) -* Fixed `ALL JOIN` with duplicates in right table. [#4184](https://github.com/ClickHouse/ClickHouse/pull/4184) ([Artem Zuikov](https://github.com/4ertus2)) -* Fixed wrong behaviour when doing `INSERT ... SELECT ... FROM file(...)` query and file has `CSVWithNames` or `TSVWIthNames` format and the first data row is missing. [#4297](https://github.com/ClickHouse/ClickHouse/pull/4297) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed aggregate functions execution with `Array(LowCardinality)` arguments. [#4055](https://github.com/ClickHouse/ClickHouse/pull/4055) ([KochetovNicolai](https://github.com/KochetovNicolai)) -* Debian package: correct /etc/clickhouse-server/preprocessed link according to config. [#4205](https://github.com/ClickHouse/ClickHouse/pull/4205) ([proller](https://github.com/proller)) -* Fixed fuzz test under undefined behavior sanitizer: added parameter type check for `quantile*Weighted` family of functions. [#4145](https://github.com/ClickHouse/ClickHouse/pull/4145) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Make `START REPLICATED SENDS` command start replicated sends. [#4229](https://github.com/ClickHouse/ClickHouse/pull/4229) ([nvartolomei](https://github.com/nvartolomei)) -* Fixed `Not found column` for duplicate columns in JOIN ON section. [#4279](https://github.com/ClickHouse/ClickHouse/pull/4279) ([Artem Zuikov](https://github.com/4ertus2)) -* Now `/etc/ssl` is used as default directory with SSL certificates. [#4167](https://github.com/ClickHouse/ClickHouse/pull/4167) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed crash on dictionary reload if dictionary not available. [#4188](https://github.com/ClickHouse/ClickHouse/pull/4188) ([proller](https://github.com/proller)) -* Fixed bug with incorrect `Date` and `DateTime` comparison. [#4237](https://github.com/ClickHouse/ClickHouse/pull/4237) ([valexey](https://github.com/valexey)) -* Fixed incorrect result when `Date` and `DateTime` arguments are used in branches of conditional operator (function `if`). Added generic case for function `if`. [#4243](https://github.com/ClickHouse/ClickHouse/pull/4243) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.1.6, 2019-01-24 - -#### New Features - -* Custom per column compression codecs for tables. [#3899](https://github.com/ClickHouse/ClickHouse/pull/3899) [#4111](https://github.com/ClickHouse/ClickHouse/pull/4111) ([alesapin](https://github.com/alesapin), [Winter Zhang](https://github.com/zhang2014), [Anatoly](https://github.com/Sindbag)) -* Added compression codec `Delta`. [#4052](https://github.com/ClickHouse/ClickHouse/pull/4052) ([alesapin](https://github.com/alesapin)) -* Allow to `ALTER` compression codecs. [#4054](https://github.com/ClickHouse/ClickHouse/pull/4054) ([alesapin](https://github.com/alesapin)) -* Added functions `left`, `right`, `trim`, `ltrim`, `rtrim`, `timestampadd`, `timestampsub` for SQL standard compatibility. [#3826](https://github.com/ClickHouse/ClickHouse/pull/3826) ([Ivan Blinkov](https://github.com/blinkov)) -* Support for write in `HDFS` tables and `hdfs` table function. [#4084](https://github.com/ClickHouse/ClickHouse/pull/4084) ([alesapin](https://github.com/alesapin)) -* Added functions to search for multiple constant strings from big haystack: `multiPosition`, `multiSearch` ,`firstMatch` also with `-UTF8`, `-CaseInsensitive`, and `-CaseInsensitiveUTF8` variants. [#4053](https://github.com/ClickHouse/ClickHouse/pull/4053) ([Danila Kutenin](https://github.com/danlark1)) -* Pruning of unused shards if `SELECT` query filters by sharding key (setting `optimize_skip_unused_shards`). [#3851](https://github.com/ClickHouse/ClickHouse/pull/3851) ([Gleb Kanterov](https://github.com/kanterov), [Ivan](https://github.com/abyss7)) -* Allow `Kafka` engine to ignore some number of parsing errors per block. [#4094](https://github.com/ClickHouse/ClickHouse/pull/4094) ([Ivan](https://github.com/abyss7)) -* Added support for `CatBoost` multiclass models evaluation. Function `modelEvaluate` returns tuple with per-class raw predictions for multiclass models. `libcatboostmodel.so` should be built with [#607](https://github.com/catboost/catboost/pull/607). [#3959](https://github.com/ClickHouse/ClickHouse/pull/3959) ([KochetovNicolai](https://github.com/KochetovNicolai)) -* Added functions `filesystemAvailable`, `filesystemFree`, `filesystemCapacity`. [#4097](https://github.com/ClickHouse/ClickHouse/pull/4097) ([Boris Granveaud](https://github.com/bgranvea)) -* Added hashing functions `xxHash64` and `xxHash32`. [#3905](https://github.com/ClickHouse/ClickHouse/pull/3905) ([filimonov](https://github.com/filimonov)) -* Added `gccMurmurHash` hashing function (GCC flavoured Murmur hash) which uses the same hash seed as [gcc](https://github.com/gcc-mirror/gcc/blob/41d6b10e96a1de98e90a7c0378437c3255814b16/libstdc%2B%2B-v3/include/bits/functional_hash.h#L191) [#4000](https://github.com/ClickHouse/ClickHouse/pull/4000) ([sundyli](https://github.com/sundy-li)) -* Added hashing functions `javaHash`, `hiveHash`. [#3811](https://github.com/ClickHouse/ClickHouse/pull/3811) ([shangshujie365](https://github.com/shangshujie365)) -* Added table function `remoteSecure`. Function works as `remote`, but uses secure connection. [#4088](https://github.com/ClickHouse/ClickHouse/pull/4088) ([proller](https://github.com/proller)) - - -#### Experimental features - -* Added multiple JOINs emulation (`allow_experimental_multiple_joins_emulation` setting). [#3946](https://github.com/ClickHouse/ClickHouse/pull/3946) ([Artem Zuikov](https://github.com/4ertus2)) - - -#### Bug Fixes - -* Make `compiled_expression_cache_size` setting limited by default to lower memory consumption. [#4041](https://github.com/ClickHouse/ClickHouse/pull/4041) ([alesapin](https://github.com/alesapin)) -* Fix a bug that led to hangups in threads that perform ALTERs of Replicated tables and in the thread that updates configuration from ZooKeeper. [#2947](https://github.com/ClickHouse/ClickHouse/issues/2947) [#3891](https://github.com/ClickHouse/ClickHouse/issues/3891) [#3934](https://github.com/ClickHouse/ClickHouse/pull/3934) ([Alex Zatelepin](https://github.com/ztlpn)) -* Fixed a race condition when executing a distributed ALTER task. The race condition led to more than one replica trying to execute the task and all replicas except one failing with a ZooKeeper error. [#3904](https://github.com/ClickHouse/ClickHouse/pull/3904) ([Alex Zatelepin](https://github.com/ztlpn)) -* Fix a bug when `from_zk` config elements weren't refreshed after a request to ZooKeeper timed out. [#2947](https://github.com/ClickHouse/ClickHouse/issues/2947) [#3947](https://github.com/ClickHouse/ClickHouse/pull/3947) ([Alex Zatelepin](https://github.com/ztlpn)) -* Fix bug with wrong prefix for IPv4 subnet masks. [#3945](https://github.com/ClickHouse/ClickHouse/pull/3945) ([alesapin](https://github.com/alesapin)) -* Fixed crash (`std::terminate`) in rare cases when a new thread cannot be created due to exhausted resources. [#3956](https://github.com/ClickHouse/ClickHouse/pull/3956) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix bug when in `remote` table function execution when wrong restrictions were used for in `getStructureOfRemoteTable`. [#4009](https://github.com/ClickHouse/ClickHouse/pull/4009) ([alesapin](https://github.com/alesapin)) -* Fix a leak of netlink sockets. They were placed in a pool where they were never deleted and new sockets were created at the start of a new thread when all current sockets were in use. [#4017](https://github.com/ClickHouse/ClickHouse/pull/4017) ([Alex Zatelepin](https://github.com/ztlpn)) -* Fix bug with closing `/proc/self/fd` directory earlier than all fds were read from `/proc` after forking `odbc-bridge` subprocess. [#4120](https://github.com/ClickHouse/ClickHouse/pull/4120) ([alesapin](https://github.com/alesapin)) -* Fixed String to UInt monotonic conversion in case of usage String in primary key. [#3870](https://github.com/ClickHouse/ClickHouse/pull/3870) ([Winter Zhang](https://github.com/zhang2014)) -* Fixed error in calculation of integer conversion function monotonicity. [#3921](https://github.com/ClickHouse/ClickHouse/pull/3921) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed segfault in `arrayEnumerateUniq`, `arrayEnumerateDense` functions in case of some invalid arguments. [#3909](https://github.com/ClickHouse/ClickHouse/pull/3909) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fix UB in StorageMerge. [#3910](https://github.com/ClickHouse/ClickHouse/pull/3910) ([Amos Bird](https://github.com/amosbird)) -* Fixed segfault in functions `addDays`, `subtractDays`. [#3913](https://github.com/ClickHouse/ClickHouse/pull/3913) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed error: functions `round`, `floor`, `trunc`, `ceil` may return bogus result when executed on integer argument and large negative scale. [#3914](https://github.com/ClickHouse/ClickHouse/pull/3914) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed a bug induced by 'kill query sync' which leads to a core dump. [#3916](https://github.com/ClickHouse/ClickHouse/pull/3916) ([muVulDeePecker](https://github.com/fancyqlx)) -* Fix bug with long delay after empty replication queue. [#3928](https://github.com/ClickHouse/ClickHouse/pull/3928) [#3932](https://github.com/ClickHouse/ClickHouse/pull/3932) ([alesapin](https://github.com/alesapin)) -* Fixed excessive memory usage in case of inserting into table with `LowCardinality` primary key. [#3955](https://github.com/ClickHouse/ClickHouse/pull/3955) ([KochetovNicolai](https://github.com/KochetovNicolai)) -* Fixed `LowCardinality` serialization for `Native` format in case of empty arrays. [#3907](https://github.com/ClickHouse/ClickHouse/issues/3907) [#4011](https://github.com/ClickHouse/ClickHouse/pull/4011) ([KochetovNicolai](https://github.com/KochetovNicolai)) -* Fixed incorrect result while using distinct by single LowCardinality numeric column. [#3895](https://github.com/ClickHouse/ClickHouse/issues/3895) [#4012](https://github.com/ClickHouse/ClickHouse/pull/4012) ([KochetovNicolai](https://github.com/KochetovNicolai)) -* Fixed specialized aggregation with LowCardinality key (in case when `compile` setting is enabled). [#3886](https://github.com/ClickHouse/ClickHouse/pull/3886) ([KochetovNicolai](https://github.com/KochetovNicolai)) -* Fix user and password forwarding for replicated tables queries. [#3957](https://github.com/ClickHouse/ClickHouse/pull/3957) ([alesapin](https://github.com/alesapin)) ([小路](https://github.com/nicelulu)) -* Fixed very rare race condition that can happen when listing tables in Dictionary database while reloading dictionaries. [#3970](https://github.com/ClickHouse/ClickHouse/pull/3970) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed incorrect result when HAVING was used with ROLLUP or CUBE. [#3756](https://github.com/ClickHouse/ClickHouse/issues/3756) [#3837](https://github.com/ClickHouse/ClickHouse/pull/3837) ([Sam Chou](https://github.com/reflection)) -* Fixed column aliases for query with `JOIN ON` syntax and distributed tables. [#3980](https://github.com/ClickHouse/ClickHouse/pull/3980) ([Winter Zhang](https://github.com/zhang2014)) -* Fixed error in internal implementation of `quantileTDigest` (found by Artem Vakhrushev). This error never happens in ClickHouse and was relevant only for those who use ClickHouse codebase as a library directly. [#3935](https://github.com/ClickHouse/ClickHouse/pull/3935) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Improvements - -* Support for `IF NOT EXISTS` in `ALTER TABLE ADD COLUMN` statements along with `IF EXISTS` in `DROP/MODIFY/CLEAR/COMMENT COLUMN`. [#3900](https://github.com/ClickHouse/ClickHouse/pull/3900) ([Boris Granveaud](https://github.com/bgranvea)) -* Function `parseDateTimeBestEffort`: support for formats `DD.MM.YYYY`, `DD.MM.YY`, `DD-MM-YYYY`, `DD-Mon-YYYY`, `DD/Month/YYYY` and similar. [#3922](https://github.com/ClickHouse/ClickHouse/pull/3922) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* `CapnProtoInputStream` now support jagged structures. [#4063](https://github.com/ClickHouse/ClickHouse/pull/4063) ([Odin Hultgren Van Der Horst](https://github.com/Miniwoffer)) -* Usability improvement: added a check that server process is started from the data directory's owner. Do not allow to start server from root if the data belongs to non-root user. [#3785](https://github.com/ClickHouse/ClickHouse/pull/3785) ([sergey-v-galtsev](https://github.com/sergey-v-galtsev)) -* Better logic of checking required columns during analysis of queries with JOINs. [#3930](https://github.com/ClickHouse/ClickHouse/pull/3930) ([Artem Zuikov](https://github.com/4ertus2)) -* Decreased the number of connections in case of large number of Distributed tables in a single server. [#3726](https://github.com/ClickHouse/ClickHouse/pull/3726) ([Winter Zhang](https://github.com/zhang2014)) -* Supported totals row for `WITH TOTALS` query for ODBC driver. [#3836](https://github.com/ClickHouse/ClickHouse/pull/3836) ([Maksim Koritckiy](https://github.com/nightweb)) -* Allowed to use `Enum`s as integers inside if function. [#3875](https://github.com/ClickHouse/ClickHouse/pull/3875) ([Ivan](https://github.com/abyss7)) -* Added `low_cardinality_allow_in_native_format` setting. If disabled, do not use `LowCadrinality` type in `Native` format. [#3879](https://github.com/ClickHouse/ClickHouse/pull/3879) ([KochetovNicolai](https://github.com/KochetovNicolai)) -* Removed some redundant objects from compiled expressions cache to lower memory usage. [#4042](https://github.com/ClickHouse/ClickHouse/pull/4042) ([alesapin](https://github.com/alesapin)) -* Add check that `SET send_logs_level = 'value'` query accept appropriate value. [#3873](https://github.com/ClickHouse/ClickHouse/pull/3873) ([Sabyanin Maxim](https://github.com/s-mx)) -* Fixed data type check in type conversion functions. [#3896](https://github.com/ClickHouse/ClickHouse/pull/3896) ([Winter Zhang](https://github.com/zhang2014)) - -#### Performance Improvements - -* Add a MergeTree setting `use_minimalistic_part_header_in_zookeeper`. If enabled, Replicated tables will store compact part metadata in a single part znode. This can dramatically reduce ZooKeeper snapshot size (especially if the tables have a lot of columns). Note that after enabling this setting you will not be able to downgrade to a version that doesn't support it. [#3960](https://github.com/ClickHouse/ClickHouse/pull/3960) ([Alex Zatelepin](https://github.com/ztlpn)) -* Add an DFA-based implementation for functions `sequenceMatch` and `sequenceCount` in case pattern doesn't contain time. [#4004](https://github.com/ClickHouse/ClickHouse/pull/4004) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) -* Performance improvement for integer numbers serialization. [#3968](https://github.com/ClickHouse/ClickHouse/pull/3968) ([Amos Bird](https://github.com/amosbird)) -* Zero left padding PODArray so that -1 element is always valid and zeroed. It's used for branchless calculation of offsets. [#3920](https://github.com/ClickHouse/ClickHouse/pull/3920) ([Amos Bird](https://github.com/amosbird)) -* Reverted `jemalloc` version which lead to performance degradation. [#4018](https://github.com/ClickHouse/ClickHouse/pull/4018) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Backward Incompatible Changes - -* Removed undocumented feature `ALTER MODIFY PRIMARY KEY` because it was superseded by the `ALTER MODIFY ORDER BY` command. [#3887](https://github.com/ClickHouse/ClickHouse/pull/3887) ([Alex Zatelepin](https://github.com/ztlpn)) -* Removed function `shardByHash`. [#3833](https://github.com/ClickHouse/ClickHouse/pull/3833) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Forbid using scalar subqueries with result of type `AggregateFunction`. [#3865](https://github.com/ClickHouse/ClickHouse/pull/3865) ([Ivan](https://github.com/abyss7)) - -#### Build/Testing/Packaging Improvements - -* Added support for PowerPC (`ppc64le`) build. [#4132](https://github.com/ClickHouse/ClickHouse/pull/4132) ([Danila Kutenin](https://github.com/danlark1)) -* Stateful functional tests are run on public available dataset. [#3969](https://github.com/ClickHouse/ClickHouse/pull/3969) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed error when the server cannot start with the `bash: /usr/bin/clickhouse-extract-from-config: Operation not permitted` message within Docker or systemd-nspawn. [#4136](https://github.com/ClickHouse/ClickHouse/pull/4136) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Updated `rdkafka` library to v1.0.0-RC5. Used cppkafka instead of raw C interface. [#4025](https://github.com/ClickHouse/ClickHouse/pull/4025) ([Ivan](https://github.com/abyss7)) -* Updated `mariadb-client` library. Fixed one of issues found by UBSan. [#3924](https://github.com/ClickHouse/ClickHouse/pull/3924) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Some fixes for UBSan builds. [#3926](https://github.com/ClickHouse/ClickHouse/pull/3926) [#3021](https://github.com/ClickHouse/ClickHouse/pull/3021) [#3948](https://github.com/ClickHouse/ClickHouse/pull/3948) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Added per-commit runs of tests with UBSan build. -* Added per-commit runs of PVS-Studio static analyzer. -* Fixed bugs found by PVS-Studio. [#4013](https://github.com/ClickHouse/ClickHouse/pull/4013) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed glibc compatibility issues. [#4100](https://github.com/ClickHouse/ClickHouse/pull/4100) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Move Docker images to 18.10 and add compatibility file for glibc >= 2.28 [#3965](https://github.com/ClickHouse/ClickHouse/pull/3965) ([alesapin](https://github.com/alesapin)) -* Add env variable if user don't want to chown directories in server Docker image. [#3967](https://github.com/ClickHouse/ClickHouse/pull/3967) ([alesapin](https://github.com/alesapin)) -* Enabled most of the warnings from `-Weverything` in clang. Enabled `-Wpedantic`. [#3986](https://github.com/ClickHouse/ClickHouse/pull/3986) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Added a few more warnings that are available only in clang 8. [#3993](https://github.com/ClickHouse/ClickHouse/pull/3993) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Link to `libLLVM` rather than to individual LLVM libs when using shared linking. [#3989](https://github.com/ClickHouse/ClickHouse/pull/3989) ([Orivej Desh](https://github.com/orivej)) -* Added sanitizer variables for test images. [#4072](https://github.com/ClickHouse/ClickHouse/pull/4072) ([alesapin](https://github.com/alesapin)) -* `clickhouse-server` debian package will recommend `libcap2-bin` package to use `setcap` tool for setting capabilities. This is optional. [#4093](https://github.com/ClickHouse/ClickHouse/pull/4093) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Improved compilation time, fixed includes. [#3898](https://github.com/ClickHouse/ClickHouse/pull/3898) ([proller](https://github.com/proller)) -* Added performance tests for hash functions. [#3918](https://github.com/ClickHouse/ClickHouse/pull/3918) ([filimonov](https://github.com/filimonov)) -* Fixed cyclic library dependences. [#3958](https://github.com/ClickHouse/ClickHouse/pull/3958) ([proller](https://github.com/proller)) -* Improved compilation with low available memory. [#4030](https://github.com/ClickHouse/ClickHouse/pull/4030) ([proller](https://github.com/proller)) -* Added test script to reproduce performance degradation in `jemalloc`. [#4036](https://github.com/ClickHouse/ClickHouse/pull/4036) ([alexey-milovidov](https://github.com/alexey-milovidov)) -* Fixed misspells in comments and string literals under `dbms`. [#4122](https://github.com/ClickHouse/ClickHouse/pull/4122) ([maiha](https://github.com/maiha)) -* Fixed typos in comments. [#4089](https://github.com/ClickHouse/ClickHouse/pull/4089) ([Evgenii Pravda](https://github.com/kvinty)) - - -## ClickHouse release 18.16 -### ClickHouse release 18.16.1, 2018-12-21 - -#### Bug fixes: - -* Fixed an error that led to problems with updating dictionaries with the ODBC source. [#3825](https://github.com/ClickHouse/ClickHouse/issues/3825), [#3829](https://github.com/ClickHouse/ClickHouse/issues/3829) -* JIT compilation of aggregate functions now works with LowCardinality columns. [#3838](https://github.com/ClickHouse/ClickHouse/issues/3838) - -#### Improvements: - -* Added the `low_cardinality_allow_in_native_format` setting (enabled by default). When disabled, LowCardinality columns will be converted to ordinary columns for SELECT queries and ordinary columns will be expected for INSERT queries. [#3879](https://github.com/ClickHouse/ClickHouse/pull/3879) - -#### Build improvements: - -* Fixes for builds on macOS and ARM. - -### ClickHouse release 18.16.0, 2018-12-14 - -#### New features: - -* `DEFAULT` expressions are evaluated for missing fields when loading data in semi-structured input formats (`JSONEachRow`, `TSKV`). The feature is enabled with the `insert_sample_with_metadata` setting. [#3555](https://github.com/ClickHouse/ClickHouse/pull/3555) -* The `ALTER TABLE` query now has the `MODIFY ORDER BY` action for changing the sorting key when adding or removing a table column. This is useful for tables in the `MergeTree` family that perform additional tasks when merging based on this sorting key, such as `SummingMergeTree`, `AggregatingMergeTree`, and so on. [#3581](https://github.com/ClickHouse/ClickHouse/pull/3581) [#3755](https://github.com/ClickHouse/ClickHouse/pull/3755) -* For tables in the `MergeTree` family, now you can specify a different sorting key (`ORDER BY`) and index (`PRIMARY KEY`). The sorting key can be longer than the index. [#3581](https://github.com/ClickHouse/ClickHouse/pull/3581) -* Added the `hdfs` table function and the `HDFS` table engine for importing and exporting data to HDFS. [chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/3617) -* Added functions for working with base64: `base64Encode`, `base64Decode`, `tryBase64Decode`. [Alexander Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/3350) -* Now you can use a parameter to configure the precision of the `uniqCombined` aggregate function (select the number of HyperLogLog cells). [#3406](https://github.com/ClickHouse/ClickHouse/pull/3406) -* Added the `system.contributors` table that contains the names of everyone who made commits in ClickHouse. [#3452](https://github.com/ClickHouse/ClickHouse/pull/3452) -* Added the ability to omit the partition for the `ALTER TABLE ... FREEZE` query in order to back up all partitions at once. [#3514](https://github.com/ClickHouse/ClickHouse/pull/3514) -* Added `dictGet` and `dictGetOrDefault` functions that don't require specifying the type of return value. The type is determined automatically from the dictionary description. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3564) -* Now you can specify comments for a column in the table description and change it using `ALTER`. [#3377](https://github.com/ClickHouse/ClickHouse/pull/3377) -* Reading is supported for `Join` type tables with simple keys. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3728) -* Now you can specify the options `join_use_nulls`, `max_rows_in_join`, `max_bytes_in_join`, and `join_overflow_mode` when creating a `Join` type table. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3728) -* Added the `joinGet` function that allows you to use a `Join` type table like a dictionary. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3728) -* Added the `partition_key`, `sorting_key`, `primary_key`, and `sampling_key` columns to the `system.tables` table in order to provide information about table keys. [#3609](https://github.com/ClickHouse/ClickHouse/pull/3609) -* Added the `is_in_partition_key`, `is_in_sorting_key`, `is_in_primary_key`, and `is_in_sampling_key` columns to the `system.columns` table. [#3609](https://github.com/ClickHouse/ClickHouse/pull/3609) -* Added the `min_time` and `max_time` columns to the `system.parts` table. These columns are populated when the partitioning key is an expression consisting of `DateTime` columns. [Emmanuel Donin de Rosière](https://github.com/ClickHouse/ClickHouse/pull/3800) - -#### Bug fixes: - -* Fixes and performance improvements for the `LowCardinality` data type. `GROUP BY` using `LowCardinality(Nullable(...))`. Getting the values of `extremes`. Processing high-order functions. `LEFT ARRAY JOIN`. Distributed `GROUP BY`. Functions that return `Array`. Execution of `ORDER BY`. Writing to `Distributed` tables (nicelulu). Backward compatibility for `INSERT` queries from old clients that implement the `Native` protocol. Support for `LowCardinality` for `JOIN`. Improved performance when working in a single stream. [#3823](https://github.com/ClickHouse/ClickHouse/pull/3823) [#3803](https://github.com/ClickHouse/ClickHouse/pull/3803) [#3799](https://github.com/ClickHouse/ClickHouse/pull/3799) [#3769](https://github.com/ClickHouse/ClickHouse/pull/3769) [#3744](https://github.com/ClickHouse/ClickHouse/pull/3744) [#3681](https://github.com/ClickHouse/ClickHouse/pull/3681) [#3651](https://github.com/ClickHouse/ClickHouse/pull/3651) [#3649](https://github.com/ClickHouse/ClickHouse/pull/3649) [#3641](https://github.com/ClickHouse/ClickHouse/pull/3641) [#3632](https://github.com/ClickHouse/ClickHouse/pull/3632) [#3568](https://github.com/ClickHouse/ClickHouse/pull/3568) [#3523](https://github.com/ClickHouse/ClickHouse/pull/3523) [#3518](https://github.com/ClickHouse/ClickHouse/pull/3518) -* Fixed how the `select_sequential_consistency` option works. Previously, when this setting was enabled, an incomplete result was sometimes returned after beginning to write to a new partition. [#2863](https://github.com/ClickHouse/ClickHouse/pull/2863) -* Databases are correctly specified when executing DDL `ON CLUSTER` queries and `ALTER UPDATE/DELETE`. [#3772](https://github.com/ClickHouse/ClickHouse/pull/3772) [#3460](https://github.com/ClickHouse/ClickHouse/pull/3460) -* Databases are correctly specified for subqueries inside a VIEW. [#3521](https://github.com/ClickHouse/ClickHouse/pull/3521) -* Fixed a bug in `PREWHERE` with `FINAL` for `VersionedCollapsingMergeTree`. [7167bfd7](https://github.com/ClickHouse/ClickHouse/commit/7167bfd7b365538f7a91c4307ad77e552ab4e8c1) -* Now you can use `KILL QUERY` to cancel queries that have not started yet because they are waiting for the table to be locked. [#3517](https://github.com/ClickHouse/ClickHouse/pull/3517) -* Corrected date and time calculations if the clocks were moved back at midnight (this happens in Iran, and happened in Moscow from 1981 to 1983). Previously, this led to the time being reset a day earlier than necessary, and also caused incorrect formatting of the date and time in text format. [#3819](https://github.com/ClickHouse/ClickHouse/pull/3819) -* Fixed bugs in some cases of `VIEW` and subqueries that omit the database. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3521) -* Fixed a race condition when simultaneously reading from a `MATERIALIZED VIEW` and deleting a `MATERIALIZED VIEW` due to not locking the internal `MATERIALIZED VIEW`. [#3404](https://github.com/ClickHouse/ClickHouse/pull/3404) [#3694](https://github.com/ClickHouse/ClickHouse/pull/3694) -* Fixed the error `Lock handler cannot be nullptr.` [#3689](https://github.com/ClickHouse/ClickHouse/pull/3689) -* Fixed query processing when the `compile_expressions` option is enabled (it's enabled by default). Nondeterministic constant expressions like the `now` function are no longer unfolded. [#3457](https://github.com/ClickHouse/ClickHouse/pull/3457) -* Fixed a crash when specifying a non-constant scale argument in `toDecimal32/64/128` functions. -* Fixed an error when trying to insert an array with `NULL` elements in the `Values` format into a column of type `Array` without `Nullable` (if `input_format_values_interpret_expressions` = 1). [#3487](https://github.com/ClickHouse/ClickHouse/pull/3487) [#3503](https://github.com/ClickHouse/ClickHouse/pull/3503) -* Fixed continuous error logging in `DDLWorker` if ZooKeeper is not available. [8f50c620](https://github.com/ClickHouse/ClickHouse/commit/8f50c620334988b28018213ec0092fe6423847e2) -* Fixed the return type for `quantile*` functions from `Date` and `DateTime` types of arguments. [#3580](https://github.com/ClickHouse/ClickHouse/pull/3580) -* Fixed the `WITH` clause if it specifies a simple alias without expressions. [#3570](https://github.com/ClickHouse/ClickHouse/pull/3570) -* Fixed processing of queries with named sub-queries and qualified column names when `enable_optimize_predicate_expression` is enabled. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3588) -* Fixed the error `Attempt to attach to nullptr thread group` when working with materialized views. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3623) -* Fixed a crash when passing certain incorrect arguments to the `arrayReverse` function. [73e3a7b6](https://github.com/ClickHouse/ClickHouse/commit/73e3a7b662161d6005e7727d8a711b930386b871) -* Fixed the buffer overflow in the `extractURLParameter` function. Improved performance. Added correct processing of strings containing zero bytes. [141e9799](https://github.com/ClickHouse/ClickHouse/commit/141e9799e49201d84ea8e951d1bed4fb6d3dacb5) -* Fixed buffer overflow in the `lowerUTF8` and `upperUTF8` functions. Removed the ability to execute these functions over `FixedString` type arguments. [#3662](https://github.com/ClickHouse/ClickHouse/pull/3662) -* Fixed a rare race condition when deleting `MergeTree` tables. [#3680](https://github.com/ClickHouse/ClickHouse/pull/3680) -* Fixed a race condition when reading from `Buffer` tables and simultaneously performing `ALTER` or `DROP` on the target tables. [#3719](https://github.com/ClickHouse/ClickHouse/pull/3719) -* Fixed a segfault if the `max_temporary_non_const_columns` limit was exceeded. [#3788](https://github.com/ClickHouse/ClickHouse/pull/3788) - -#### Improvements: - -* The server does not write the processed configuration files to the `/etc/clickhouse-server/` directory. Instead, it saves them in the `preprocessed_configs` directory inside `path`. This means that the `/etc/clickhouse-server/` directory doesn't have write access for the `clickhouse` user, which improves security. [#2443](https://github.com/ClickHouse/ClickHouse/pull/2443) -* The `min_merge_bytes_to_use_direct_io` option is set to 10 GiB by default. A merge that forms large parts of tables from the MergeTree family will be performed in `O_DIRECT` mode, which prevents excessive page cache eviction. [#3504](https://github.com/ClickHouse/ClickHouse/pull/3504) -* Accelerated server start when there is a very large number of tables. [#3398](https://github.com/ClickHouse/ClickHouse/pull/3398) -* Added a connection pool and HTTP `Keep-Alive` for connections between replicas. [#3594](https://github.com/ClickHouse/ClickHouse/pull/3594) -* If the query syntax is invalid, the `400 Bad Request` code is returned in the `HTTP` interface (500 was returned previously). [31bc680a](https://github.com/ClickHouse/ClickHouse/commit/31bc680ac5f4bb1d0360a8ba4696fa84bb47d6ab) -* The `join_default_strictness` option is set to `ALL` by default for compatibility. [120e2cbe](https://github.com/ClickHouse/ClickHouse/commit/120e2cbe2ff4fbad626c28042d9b28781c805afe) -* Removed logging to `stderr` from the `re2` library for invalid or complex regular expressions. [#3723](https://github.com/ClickHouse/ClickHouse/pull/3723) -* Added for the `Kafka` table engine: checks for subscriptions before beginning to read from Kafka; the kafka_max_block_size setting for the table. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3396) -* The `cityHash64`, `farmHash64`, `metroHash64`, `sipHash64`, `halfMD5`, `murmurHash2_32`, `murmurHash2_64`, `murmurHash3_32`, and `murmurHash3_64` functions now work for any number of arguments and for arguments in the form of tuples. [#3451](https://github.com/ClickHouse/ClickHouse/pull/3451) [#3519](https://github.com/ClickHouse/ClickHouse/pull/3519) -* The `arrayReverse` function now works with any types of arrays. [73e3a7b6](https://github.com/ClickHouse/ClickHouse/commit/73e3a7b662161d6005e7727d8a711b930386b871) -* Added an optional parameter: the slot size for the `timeSlots` function. [Kirill Shvakov](https://github.com/ClickHouse/ClickHouse/pull/3724) -* For `FULL` and `RIGHT JOIN`, the `max_block_size` setting is used for a stream of non-joined data from the right table. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3699) -* Added the `--secure` command line parameter in `clickhouse-benchmark` and `clickhouse-performance-test` to enable TLS. [#3688](https://github.com/ClickHouse/ClickHouse/pull/3688) [#3690](https://github.com/ClickHouse/ClickHouse/pull/3690) -* Type conversion when the structure of a `Buffer` type table does not match the structure of the destination table. [Vitaly Baranov](https://github.com/ClickHouse/ClickHouse/pull/3603) -* Added the `tcp_keep_alive_timeout` option to enable keep-alive packets after inactivity for the specified time interval. [#3441](https://github.com/ClickHouse/ClickHouse/pull/3441) -* Removed unnecessary quoting of values for the partition key in the `system.parts` table if it consists of a single column. [#3652](https://github.com/ClickHouse/ClickHouse/pull/3652) -* The modulo function works for `Date` and `DateTime` data types. [#3385](https://github.com/ClickHouse/ClickHouse/pull/3385) -* Added synonyms for the `POWER`, `LN`, `LCASE`, `UCASE`, `REPLACE`, `LOCATE`, `SUBSTR`, and `MID` functions. [#3774](https://github.com/ClickHouse/ClickHouse/pull/3774) [#3763](https://github.com/ClickHouse/ClickHouse/pull/3763) Some function names are case-insensitive for compatibility with the SQL standard. Added syntactic sugar `SUBSTRING(expr FROM start FOR length)` for compatibility with SQL. [#3804](https://github.com/ClickHouse/ClickHouse/pull/3804) -* Added the ability to `mlock` memory pages corresponding to `clickhouse-server` executable code to prevent it from being forced out of memory. This feature is disabled by default. [#3553](https://github.com/ClickHouse/ClickHouse/pull/3553) -* Improved performance when reading from `O_DIRECT` (with the `min_bytes_to_use_direct_io` option enabled). [#3405](https://github.com/ClickHouse/ClickHouse/pull/3405) -* Improved performance of the `dictGet...OrDefault` function for a constant key argument and a non-constant default argument. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3563) -* The `firstSignificantSubdomain` function now processes the domains `gov`, `mil`, and `edu`. [Igor Hatarist](https://github.com/ClickHouse/ClickHouse/pull/3601) Improved performance. [#3628](https://github.com/ClickHouse/ClickHouse/pull/3628) -* Ability to specify custom environment variables for starting `clickhouse-server` using the `SYS-V init.d` script by defining `CLICKHOUSE_PROGRAM_ENV` in `/etc/default/clickhouse`. -[Pavlo Bashynskyi](https://github.com/ClickHouse/ClickHouse/pull/3612) -* Correct return code for the clickhouse-server init script. [#3516](https://github.com/ClickHouse/ClickHouse/pull/3516) -* The `system.metrics` table now has the `VersionInteger` metric, and `system.build_options` has the added line `VERSION_INTEGER`, which contains the numeric form of the ClickHouse version, such as `18016000`. [#3644](https://github.com/ClickHouse/ClickHouse/pull/3644) -* Removed the ability to compare the `Date` type with a number to avoid potential errors like `date = 2018-12-17`, where quotes around the date are omitted by mistake. [#3687](https://github.com/ClickHouse/ClickHouse/pull/3687) -* Fixed the behavior of stateful functions like `rowNumberInAllBlocks`. They previously output a result that was one number larger due to starting during query analysis. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3729) -* If the `force_restore_data` file can't be deleted, an error message is displayed. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3794) - -#### Build improvements: - -* Updated the `jemalloc` library, which fixes a potential memory leak. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3557) -* Profiling with `jemalloc` is enabled by default in order to debug builds. [2cc82f5c](https://github.com/ClickHouse/ClickHouse/commit/2cc82f5cbe266421cd4c1165286c2c47e5ffcb15) -* Added the ability to run integration tests when only `Docker` is installed on the system. [#3650](https://github.com/ClickHouse/ClickHouse/pull/3650) -* Added the fuzz expression test in SELECT queries. [#3442](https://github.com/ClickHouse/ClickHouse/pull/3442) -* Added a stress test for commits, which performs functional tests in parallel and in random order to detect more race conditions. [#3438](https://github.com/ClickHouse/ClickHouse/pull/3438) -* Improved the method for starting clickhouse-server in a Docker image. [Elghazal Ahmed](https://github.com/ClickHouse/ClickHouse/pull/3663) -* For a Docker image, added support for initializing databases using files in the `/docker-entrypoint-initdb.d` directory. [Konstantin Lebedev](https://github.com/ClickHouse/ClickHouse/pull/3695) -* Fixes for builds on ARM. [#3709](https://github.com/ClickHouse/ClickHouse/pull/3709) - -#### Backward incompatible changes: - -* Removed the ability to compare the `Date` type with a number. Instead of `toDate('2018-12-18') = 17883`, you must use explicit type conversion `= toDate(17883)` [#3687](https://github.com/ClickHouse/ClickHouse/pull/3687) - -## ClickHouse release 18.14 -### ClickHouse release 18.14.19, 2018-12-19 - -#### Bug fixes: - -* Fixed an error that led to problems with updating dictionaries with the ODBC source. [#3825](https://github.com/ClickHouse/ClickHouse/issues/3825), [#3829](https://github.com/ClickHouse/ClickHouse/issues/3829) -* Databases are correctly specified when executing DDL `ON CLUSTER` queries. [#3460](https://github.com/ClickHouse/ClickHouse/pull/3460) -* Fixed a segfault if the `max_temporary_non_const_columns` limit was exceeded. [#3788](https://github.com/ClickHouse/ClickHouse/pull/3788) - -#### Build improvements: - -* Fixes for builds on ARM. - -### ClickHouse release 18.14.18, 2018-12-04 - -#### Bug fixes: -* Fixed error in `dictGet...` function for dictionaries of type `range`, if one of the arguments is constant and other is not. [#3751](https://github.com/ClickHouse/ClickHouse/pull/3751) -* Fixed error that caused messages `netlink: '...': attribute type 1 has an invalid length` to be printed in Linux kernel log, that was happening only on fresh enough versions of Linux kernel. [#3749](https://github.com/ClickHouse/ClickHouse/pull/3749) -* Fixed segfault in function `empty` for argument of `FixedString` type. [Daniel, Dao Quang Minh](https://github.com/ClickHouse/ClickHouse/pull/3703) -* Fixed excessive memory allocation when using large value of `max_query_size` setting (a memory chunk of `max_query_size` bytes was preallocated at once). [#3720](https://github.com/ClickHouse/ClickHouse/pull/3720) - -#### Build changes: -* Fixed build with LLVM/Clang libraries of version 7 from the OS packages (these libraries are used for runtime query compilation). [#3582](https://github.com/ClickHouse/ClickHouse/pull/3582) - -### ClickHouse release 18.14.17, 2018-11-30 - -#### Bug fixes: -* Fixed cases when the ODBC bridge process did not terminate with the main server process. [#3642](https://github.com/ClickHouse/ClickHouse/pull/3642) -* Fixed synchronous insertion into the `Distributed` table with a columns list that differs from the column list of the remote table. [#3673](https://github.com/ClickHouse/ClickHouse/pull/3673) -* Fixed a rare race condition that can lead to a crash when dropping a MergeTree table. [#3643](https://github.com/ClickHouse/ClickHouse/pull/3643) -* Fixed a query deadlock in case when query thread creation fails with the `Resource temporarily unavailable` error. [#3643](https://github.com/ClickHouse/ClickHouse/pull/3643) -* Fixed parsing of the `ENGINE` clause when the `CREATE AS table` syntax was used and the `ENGINE` clause was specified before the `AS table` (the error resulted in ignoring the specified engine). [#3692](https://github.com/ClickHouse/ClickHouse/pull/3692) - -### ClickHouse release 18.14.15, 2018-11-21 - -#### Bug fixes: -* The size of memory chunk was overestimated while deserializing the column of type `Array(String)` that leads to "Memory limit exceeded" errors. The issue appeared in version 18.12.13. [#3589](https://github.com/ClickHouse/ClickHouse/issues/3589) - -### ClickHouse release 18.14.14, 2018-11-20 - -#### Bug fixes: -* Fixed `ON CLUSTER` queries when cluster configured as secure (flag ``). [#3599](https://github.com/ClickHouse/ClickHouse/pull/3599) - -#### Build changes: -* Fixed problems (llvm-7 from system, macos) [#3582](https://github.com/ClickHouse/ClickHouse/pull/3582) - -### ClickHouse release 18.14.13, 2018-11-08 - -#### Bug fixes: -* Fixed the `Block structure mismatch in MergingSorted stream` error. [#3162](https://github.com/ClickHouse/ClickHouse/issues/3162) -* Fixed `ON CLUSTER` queries in case when secure connections were turned on in the cluster config (the `` flag). [#3465](https://github.com/ClickHouse/ClickHouse/pull/3465) -* Fixed an error in queries that used `SAMPLE`, `PREWHERE` and alias columns. [#3543](https://github.com/ClickHouse/ClickHouse/pull/3543) -* Fixed a rare `unknown compression method` error when the `min_bytes_to_use_direct_io` setting was enabled. [3544](https://github.com/ClickHouse/ClickHouse/pull/3544) - -#### Performance improvements: -* Fixed performance regression of queries with `GROUP BY` of columns of UInt16 or Date type when executing on AMD EPYC processors. [Igor Lapko](https://github.com/ClickHouse/ClickHouse/pull/3512) -* Fixed performance regression of queries that process long strings. [#3530](https://github.com/ClickHouse/ClickHouse/pull/3530) - -#### Build improvements: -* Improvements for simplifying the Arcadia build. [#3475](https://github.com/ClickHouse/ClickHouse/pull/3475), [#3535](https://github.com/ClickHouse/ClickHouse/pull/3535) - -### ClickHouse release 18.14.12, 2018-11-02 - -#### Bug fixes: - -* Fixed a crash on joining two unnamed subqueries. [#3505](https://github.com/ClickHouse/ClickHouse/pull/3505) -* Fixed generating incorrect queries (with an empty `WHERE` clause) when querying external databases. [hotid](https://github.com/ClickHouse/ClickHouse/pull/3477) -* Fixed using an incorrect timeout value in ODBC dictionaries. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3511) - -### ClickHouse release 18.14.11, 2018-10-29 - -#### Bug fixes: - -* Fixed the error `Block structure mismatch in UNION stream: different number of columns` in LIMIT queries. [#2156](https://github.com/ClickHouse/ClickHouse/issues/2156) -* Fixed errors when merging data in tables containing arrays inside Nested structures. [#3397](https://github.com/ClickHouse/ClickHouse/pull/3397) -* Fixed incorrect query results if the `merge_tree_uniform_read_distribution` setting is disabled (it is enabled by default). [#3429](https://github.com/ClickHouse/ClickHouse/pull/3429) -* Fixed an error on inserts to a Distributed table in Native format. [#3411](https://github.com/ClickHouse/ClickHouse/issues/3411) - -### ClickHouse release 18.14.10, 2018-10-23 - -* The `compile_expressions` setting (JIT compilation of expressions) is disabled by default. [#3410](https://github.com/ClickHouse/ClickHouse/pull/3410) -* The `enable_optimize_predicate_expression` setting is disabled by default. - -### ClickHouse release 18.14.9, 2018-10-16 - -#### New features: - -* The `WITH CUBE` modifier for `GROUP BY` (the alternative syntax `GROUP BY CUBE(...)` is also available). [#3172](https://github.com/ClickHouse/ClickHouse/pull/3172) -* Added the `formatDateTime` function. [Alexandr Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/2770) -* Added the `JDBC` table engine and `jdbc` table function (requires installing clickhouse-jdbc-bridge). [Alexandr Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/3210) -* Added functions for working with the ISO week number: `toISOWeek`, `toISOYear`, `toStartOfISOYear`, and `toDayOfYear`. [#3146](https://github.com/ClickHouse/ClickHouse/pull/3146) -* Now you can use `Nullable` columns for `MySQL` and `ODBC` tables. [#3362](https://github.com/ClickHouse/ClickHouse/pull/3362) -* Nested data structures can be read as nested objects in `JSONEachRow` format. Added the `input_format_import_nested_json` setting. [Veloman Yunkan](https://github.com/ClickHouse/ClickHouse/pull/3144) -* Parallel processing is available for many `MATERIALIZED VIEW`s when inserting data. See the `parallel_view_processing` setting. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3208) -* Added the `SYSTEM FLUSH LOGS` query (forced log flushes to system tables such as `query_log`) [#3321](https://github.com/ClickHouse/ClickHouse/pull/3321) -* Now you can use pre-defined `database` and `table` macros when declaring `Replicated` tables. [#3251](https://github.com/ClickHouse/ClickHouse/pull/3251) -* Added the ability to read `Decimal` type values in engineering notation (indicating powers of ten). [#3153](https://github.com/ClickHouse/ClickHouse/pull/3153) - -#### Experimental features: - -* Optimization of the GROUP BY clause for `LowCardinality data types.` [#3138](https://github.com/ClickHouse/ClickHouse/pull/3138) -* Optimized calculation of expressions for `LowCardinality data types.` [#3200](https://github.com/ClickHouse/ClickHouse/pull/3200) - -#### Improvements: - -* Significantly reduced memory consumption for queries with `ORDER BY` and `LIMIT`. See the `max_bytes_before_remerge_sort` setting. [#3205](https://github.com/ClickHouse/ClickHouse/pull/3205) -* In the absence of `JOIN` (`LEFT`, `INNER`, ...), `INNER JOIN` is assumed. [#3147](https://github.com/ClickHouse/ClickHouse/pull/3147) -* Qualified asterisks work correctly in queries with `JOIN`. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3202) -* The `ODBC` table engine correctly chooses the method for quoting identifiers in the SQL dialect of a remote database. [Alexandr Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/3210) -* The `compile_expressions` setting (JIT compilation of expressions) is enabled by default. -* Fixed behavior for simultaneous DROP DATABASE/TABLE IF EXISTS and CREATE DATABASE/TABLE IF NOT EXISTS. Previously, a `CREATE DATABASE ... IF NOT EXISTS` query could return the error message "File ... already exists", and the `CREATE TABLE ... IF NOT EXISTS` and `DROP TABLE IF EXISTS` queries could return `Table ... is creating or attaching right now`. [#3101](https://github.com/ClickHouse/ClickHouse/pull/3101) -* LIKE and IN expressions with a constant right half are passed to the remote server when querying from MySQL or ODBC tables. [#3182](https://github.com/ClickHouse/ClickHouse/pull/3182) -* Comparisons with constant expressions in a WHERE clause are passed to the remote server when querying from MySQL and ODBC tables. Previously, only comparisons with constants were passed. [#3182](https://github.com/ClickHouse/ClickHouse/pull/3182) -* Correct calculation of row width in the terminal for `Pretty` formats, including strings with hieroglyphs. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3257). -* `ON CLUSTER` can be specified for `ALTER UPDATE` queries. -* Improved performance for reading data in `JSONEachRow` format. [#3332](https://github.com/ClickHouse/ClickHouse/pull/3332) -* Added synonyms for the `LENGTH` and `CHARACTER_LENGTH` functions for compatibility. The `CONCAT` function is no longer case-sensitive. [#3306](https://github.com/ClickHouse/ClickHouse/pull/3306) -* Added the `TIMESTAMP` synonym for the `DateTime` type. [#3390](https://github.com/ClickHouse/ClickHouse/pull/3390) -* There is always space reserved for query_id in the server logs, even if the log line is not related to a query. This makes it easier to parse server text logs with third-party tools. -* Memory consumption by a query is logged when it exceeds the next level of an integer number of gigabytes. [#3205](https://github.com/ClickHouse/ClickHouse/pull/3205) -* Added compatibility mode for the case when the client library that uses the Native protocol sends fewer columns by mistake than the server expects for the INSERT query. This scenario was possible when using the clickhouse-cpp library. Previously, this scenario caused the server to crash. [#3171](https://github.com/ClickHouse/ClickHouse/pull/3171) -* In a user-defined WHERE expression in `clickhouse-copier`, you can now use a `partition_key` alias (for additional filtering by source table partition). This is useful if the partitioning scheme changes during copying, but only changes slightly. [#3166](https://github.com/ClickHouse/ClickHouse/pull/3166) -* The workflow of the `Kafka` engine has been moved to a background thread pool in order to automatically reduce the speed of data reading at high loads. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3215). -* Support for reading `Tuple` and `Nested` values of structures like `struct` in the `Cap'n'Proto format`. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3216) -* The list of top-level domains for the `firstSignificantSubdomain` function now includes the domain `biz`. [decaseal](https://github.com/ClickHouse/ClickHouse/pull/3219) -* In the configuration of external dictionaries, `null_value` is interpreted as the value of the default data type. [#3330](https://github.com/ClickHouse/ClickHouse/pull/3330) -* Support for the `intDiv` and `intDivOrZero` functions for `Decimal`. [b48402e8](https://github.com/ClickHouse/ClickHouse/commit/b48402e8712e2b9b151e0eef8193811d433a1264) -* Support for the `Date`, `DateTime`, `UUID`, and `Decimal` types as a key for the `sumMap` aggregate function. [#3281](https://github.com/ClickHouse/ClickHouse/pull/3281) -* Support for the `Decimal` data type in external dictionaries. [#3324](https://github.com/ClickHouse/ClickHouse/pull/3324) -* Support for the `Decimal` data type in `SummingMergeTree` tables. [#3348](https://github.com/ClickHouse/ClickHouse/pull/3348) -* Added specializations for `UUID` in `if`. [#3366](https://github.com/ClickHouse/ClickHouse/pull/3366) -* Reduced the number of `open` and `close` system calls when reading from a `MergeTree table`. [#3283](https://github.com/ClickHouse/ClickHouse/pull/3283) -* A `TRUNCATE TABLE` query can be executed on any replica (the query is passed to the leader replica). [Kirill Shvakov](https://github.com/ClickHouse/ClickHouse/pull/3375) - -#### Bug fixes: - -* Fixed an issue with `Dictionary` tables for `range_hashed` dictionaries. This error occurred in version 18.12.17. [#1702](https://github.com/ClickHouse/ClickHouse/pull/1702) -* Fixed an error when loading `range_hashed` dictionaries (the message `Unsupported type Nullable (...)`). This error occurred in version 18.12.17. [#3362](https://github.com/ClickHouse/ClickHouse/pull/3362) -* Fixed errors in the `pointInPolygon` function due to the accumulation of inaccurate calculations for polygons with a large number of vertices located close to each other. [#3331](https://github.com/ClickHouse/ClickHouse/pull/3331) [#3341](https://github.com/ClickHouse/ClickHouse/pull/3341) -* If after merging data parts, the checksum for the resulting part differs from the result of the same merge in another replica, the result of the merge is deleted and the data part is downloaded from the other replica (this is the correct behavior). But after downloading the data part, it couldn't be added to the working set because of an error that the part already exists (because the data part was deleted with some delay after the merge). This led to cyclical attempts to download the same data. [#3194](https://github.com/ClickHouse/ClickHouse/pull/3194) -* Fixed incorrect calculation of total memory consumption by queries (because of incorrect calculation, the `max_memory_usage_for_all_queries` setting worked incorrectly and the `MemoryTracking` metric had an incorrect value). This error occurred in version 18.12.13. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3344) -* Fixed the functionality of `CREATE TABLE ... ON CLUSTER ... AS SELECT ...` This error occurred in version 18.12.13. [#3247](https://github.com/ClickHouse/ClickHouse/pull/3247) -* Fixed unnecessary preparation of data structures for `JOIN`s on the server that initiates the query if the `JOIN` is only performed on remote servers. [#3340](https://github.com/ClickHouse/ClickHouse/pull/3340) -* Fixed bugs in the `Kafka` engine: deadlocks after exceptions when starting to read data, and locks upon completion [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3215). -* For `Kafka` tables, the optional `schema` parameter was not passed (the schema of the `Cap'n'Proto` format). [Vojtech Splichal](https://github.com/ClickHouse/ClickHouse/pull/3150) -* If the ensemble of ZooKeeper servers has servers that accept the connection but then immediately close it instead of responding to the handshake, ClickHouse chooses to connect another server. Previously, this produced the error `Cannot read all data. Bytes read: 0. Bytes expected: 4.` and the server couldn't start. [8218cf3a](https://github.com/ClickHouse/ClickHouse/commit/8218cf3a5f39a43401953769d6d12a0bb8d29da9) -* If the ensemble of ZooKeeper servers contains servers for which the DNS query returns an error, these servers are ignored. [17b8e209](https://github.com/ClickHouse/ClickHouse/commit/17b8e209221061325ad7ba0539f03c6e65f87f29) -* Fixed type conversion between `Date` and `DateTime` when inserting data in the `VALUES` format (if `input_format_values_interpret_expressions = 1`). Previously, the conversion was performed between the numerical value of the number of days in Unix Epoch time and the Unix timestamp, which led to unexpected results. [#3229](https://github.com/ClickHouse/ClickHouse/pull/3229) -* Corrected type conversion between `Decimal` and integer numbers. [#3211](https://github.com/ClickHouse/ClickHouse/pull/3211) -* Fixed errors in the `enable_optimize_predicate_expression` setting. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3231) -* Fixed a parsing error in CSV format with floating-point numbers if a non-default CSV separator is used, such as `;` [#3155](https://github.com/ClickHouse/ClickHouse/pull/3155) -* Fixed the `arrayCumSumNonNegative` function (it does not accumulate negative values if the accumulator is less than zero). [Aleksey Studnev](https://github.com/ClickHouse/ClickHouse/pull/3163) -* Fixed how `Merge` tables work on top of `Distributed` tables when using `PREWHERE`. [#3165](https://github.com/ClickHouse/ClickHouse/pull/3165) -* Bug fixes in the `ALTER UPDATE` query. -* Fixed bugs in the `odbc` table function that appeared in version 18.12. [#3197](https://github.com/ClickHouse/ClickHouse/pull/3197) -* Fixed the operation of aggregate functions with `StateArray` combinators. [#3188](https://github.com/ClickHouse/ClickHouse/pull/3188) -* Fixed a crash when dividing a `Decimal` value by zero. [69dd6609](https://github.com/ClickHouse/ClickHouse/commit/69dd6609193beb4e7acd3e6ad216eca0ccfb8179) -* Fixed output of types for operations using `Decimal` and integer arguments. [#3224](https://github.com/ClickHouse/ClickHouse/pull/3224) -* Fixed the segfault during `GROUP BY` on `Decimal128`. [3359ba06](https://github.com/ClickHouse/ClickHouse/commit/3359ba06c39fcd05bfdb87d6c64154819621e13a) -* The `log_query_threads` setting (logging information about each thread of query execution) now takes effect only if the `log_queries` option (logging information about queries) is set to 1. Since the `log_query_threads` option is enabled by default, information about threads was previously logged even if query logging was disabled. [#3241](https://github.com/ClickHouse/ClickHouse/pull/3241) -* Fixed an error in the distributed operation of the quantiles aggregate function (the error message `Not found column quantile...`). [292a8855](https://github.com/ClickHouse/ClickHouse/commit/292a885533b8e3b41ce8993867069d14cbd5a664) -* Fixed the compatibility problem when working on a cluster of version 18.12.17 servers and older servers at the same time. For distributed queries with GROUP BY keys of both fixed and non-fixed length, if there was a large amount of data to aggregate, the returned data was not always fully aggregated (two different rows contained the same aggregation keys). [#3254](https://github.com/ClickHouse/ClickHouse/pull/3254) -* Fixed handling of substitutions in `clickhouse-performance-test`, if the query contains only part of the substitutions declared in the test. [#3263](https://github.com/ClickHouse/ClickHouse/pull/3263) -* Fixed an error when using `FINAL` with `PREWHERE`. [#3298](https://github.com/ClickHouse/ClickHouse/pull/3298) -* Fixed an error when using `PREWHERE` over columns that were added during `ALTER`. [#3298](https://github.com/ClickHouse/ClickHouse/pull/3298) -* Added a check for the absence of `arrayJoin` for `DEFAULT` and `MATERIALIZED` expressions. Previously, `arrayJoin` led to an error when inserting data. [#3337](https://github.com/ClickHouse/ClickHouse/pull/3337) -* Added a check for the absence of `arrayJoin` in a `PREWHERE` clause. Previously, this led to messages like `Size ... doesn't match` or `Unknown compression method` when executing queries. [#3357](https://github.com/ClickHouse/ClickHouse/pull/3357) -* Fixed segfault that could occur in rare cases after optimization that replaced AND chains from equality evaluations with the corresponding IN expression. [liuyimin-bytedance](https://github.com/ClickHouse/ClickHouse/pull/3339) -* Minor corrections to `clickhouse-benchmark`: previously, client information was not sent to the server; now the number of queries executed is calculated more accurately when shutting down and for limiting the number of iterations. [#3351](https://github.com/ClickHouse/ClickHouse/pull/3351) [#3352](https://github.com/ClickHouse/ClickHouse/pull/3352) - -#### Backward incompatible changes: - -* Removed the `allow_experimental_decimal_type` option. The `Decimal` data type is available for default use. [#3329](https://github.com/ClickHouse/ClickHouse/pull/3329) - -## ClickHouse release 18.12 - -### ClickHouse release 18.12.17, 2018-09-16 - -#### New features: - -* `invalidate_query` (the ability to specify a query to check whether an external dictionary needs to be updated) is implemented for the `clickhouse` source. [#3126](https://github.com/ClickHouse/ClickHouse/pull/3126) -* Added the ability to use `UInt*`, `Int*`, and `DateTime` data types (along with the `Date` type) as a `range_hashed` external dictionary key that defines the boundaries of ranges. Now `NULL` can be used to designate an open range. [Vasily Nemkov](https://github.com/ClickHouse/ClickHouse/pull/3123) -* The `Decimal` type now supports `var*` and `stddev*` aggregate functions. [#3129](https://github.com/ClickHouse/ClickHouse/pull/3129) -* The `Decimal` type now supports mathematical functions (`exp`, `sin` and so on.) [#3129](https://github.com/ClickHouse/ClickHouse/pull/3129) -* The `system.part_log` table now has the `partition_id` column. [#3089](https://github.com/ClickHouse/ClickHouse/pull/3089) - -#### Bug fixes: - -* `Merge` now works correctly on `Distributed` tables. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3159) -* Fixed incompatibility (unnecessary dependency on the `glibc` version) that made it impossible to run ClickHouse on `Ubuntu Precise` and older versions. The incompatibility arose in version 18.12.13. [#3130](https://github.com/ClickHouse/ClickHouse/pull/3130) -* Fixed errors in the `enable_optimize_predicate_expression` setting. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3107) -* Fixed a minor issue with backwards compatibility that appeared when working with a cluster of replicas on versions earlier than 18.12.13 and simultaneously creating a new replica of a table on a server with a newer version (shown in the message `Can not clone replica, because the ... updated to new ClickHouse version`, which is logical, but shouldn't happen). [#3122](https://github.com/ClickHouse/ClickHouse/pull/3122) - -#### Backward incompatible changes: - -* The `enable_optimize_predicate_expression` option is enabled by default (which is rather optimistic). If query analysis errors occur that are related to searching for the column names, set `enable_optimize_predicate_expression` to 0. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3107) - -### ClickHouse release 18.12.14, 2018-09-13 - -#### New features: - -* Added support for `ALTER UPDATE` queries. [#3035](https://github.com/ClickHouse/ClickHouse/pull/3035) -* Added the `allow_ddl` option, which restricts the user's access to DDL queries. [#3104](https://github.com/ClickHouse/ClickHouse/pull/3104) -* Added the `min_merge_bytes_to_use_direct_io` option for `MergeTree` engines, which allows you to set a threshold for the total size of the merge (when above the threshold, data part files will be handled using O_DIRECT). [#3117](https://github.com/ClickHouse/ClickHouse/pull/3117) -* The `system.merges` system table now contains the `partition_id` column. [#3099](https://github.com/ClickHouse/ClickHouse/pull/3099) - -#### Improvements - -* If a data part remains unchanged during mutation, it isn't downloaded by replicas. [#3103](https://github.com/ClickHouse/ClickHouse/pull/3103) -* Autocomplete is available for names of settings when working with `clickhouse-client`. [#3106](https://github.com/ClickHouse/ClickHouse/pull/3106) - -#### Bug fixes: - -* Added a check for the sizes of arrays that are elements of `Nested` type fields when inserting. [#3118](https://github.com/ClickHouse/ClickHouse/pull/3118) -* Fixed an error updating external dictionaries with the `ODBC` source and `hashed` storage. This error occurred in version 18.12.13. -* Fixed a crash when creating a temporary table from a query with an `IN` condition. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3098) -* Fixed an error in aggregate functions for arrays that can have `NULL` elements. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3097) - - -### ClickHouse release 18.12.13, 2018-09-10 - -#### New features: - -* Added the `DECIMAL(digits, scale)` data type (`Decimal32(scale)`, `Decimal64(scale)`, `Decimal128(scale)`). To enable it, use the setting `allow_experimental_decimal_type`. [#2846](https://github.com/ClickHouse/ClickHouse/pull/2846) [#2970](https://github.com/ClickHouse/ClickHouse/pull/2970) [#3008](https://github.com/ClickHouse/ClickHouse/pull/3008) [#3047](https://github.com/ClickHouse/ClickHouse/pull/3047) -* New `WITH ROLLUP` modifier for `GROUP BY` (alternative syntax: `GROUP BY ROLLUP(...)`). [#2948](https://github.com/ClickHouse/ClickHouse/pull/2948) -* In queries with JOIN, the star character expands to a list of columns in all tables, in compliance with the SQL standard. You can restore the old behavior by setting `asterisk_left_columns_only` to 1 on the user configuration level. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2787) -* Added support for JOIN with table functions. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2907) -* Autocomplete by pressing Tab in clickhouse-client. [Sergey Shcherbin](https://github.com/ClickHouse/ClickHouse/pull/2447) -* Ctrl+C in clickhouse-client clears a query that was entered. [#2877](https://github.com/ClickHouse/ClickHouse/pull/2877) -* Added the `join_default_strictness` setting (values: `"`, `'any'`, `'all'`). This allows you to not specify `ANY` or `ALL` for `JOIN`. [#2982](https://github.com/ClickHouse/ClickHouse/pull/2982) -* Each line of the server log related to query processing shows the query ID. [#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -* Now you can get query execution logs in clickhouse-client (use the `send_logs_level` setting). With distributed query processing, logs are cascaded from all the servers. [#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -* The `system.query_log` and `system.processes` (`SHOW PROCESSLIST`) tables now have information about all changed settings when you run a query (the nested structure of the `Settings` data). Added the `log_query_settings` setting. [#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -* The `system.query_log` and `system.processes` tables now show information about the number of threads that are participating in query execution (see the `thread_numbers` column). [#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -* Added `ProfileEvents` counters that measure the time spent on reading and writing over the network and reading and writing to disk, the number of network errors, and the time spent waiting when network bandwidth is limited. [#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -* Added `ProfileEvents`counters that contain the system metrics from rusage (you can use them to get information about CPU usage in userspace and the kernel, page faults, and context switches), as well as taskstats metrics (use these to obtain information about I/O wait time, CPU wait time, and the amount of data read and recorded, both with and without page cache). [#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -* The `ProfileEvents` counters are applied globally and for each query, as well as for each query execution thread, which allows you to profile resource consumption by query in detail. [#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -* Added the `system.query_thread_log` table, which contains information about each query execution thread. Added the `log_query_threads` setting. [#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -* The `system.metrics` and `system.events` tables now have built-in documentation. [#3016](https://github.com/ClickHouse/ClickHouse/pull/3016) -* Added the `arrayEnumerateDense` function. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2975) -* Added the `arrayCumSumNonNegative` and `arrayDifference` functions. [Aleksey Studnev](https://github.com/ClickHouse/ClickHouse/pull/2942) -* Added the `retention` aggregate function. [Sundy Li](https://github.com/ClickHouse/ClickHouse/pull/2887) -* Now you can add (merge) states of aggregate functions by using the plus operator, and multiply the states of aggregate functions by a nonnegative constant. [#3062](https://github.com/ClickHouse/ClickHouse/pull/3062) [#3034](https://github.com/ClickHouse/ClickHouse/pull/3034) -* Tables in the MergeTree family now have the virtual column `_partition_id`. [#3089](https://github.com/ClickHouse/ClickHouse/pull/3089) - -#### Experimental features: - -* Added the `LowCardinality(T)` data type. This data type automatically creates a local dictionary of values and allows data processing without unpacking the dictionary. [#2830](https://github.com/ClickHouse/ClickHouse/pull/2830) -* Added a cache of JIT-compiled functions and a counter for the number of uses before compiling. To JIT compile expressions, enable the `compile_expressions` setting. [#2990](https://github.com/ClickHouse/ClickHouse/pull/2990) [#3077](https://github.com/ClickHouse/ClickHouse/pull/3077) - -#### Improvements: - -* Fixed the problem with unlimited accumulation of the replication log when there are abandoned replicas. Added an effective recovery mode for replicas with a long lag. -* Improved performance of `GROUP BY` with multiple aggregation fields when one of them is string and the others are fixed length. -* Improved performance when using `PREWHERE` and with implicit transfer of expressions in `PREWHERE`. -* Improved parsing performance for text formats (`CSV`, `TSV`). [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2977) [#2980](https://github.com/ClickHouse/ClickHouse/pull/2980) -* Improved performance of reading strings and arrays in binary formats. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2955) -* Increased performance and reduced memory consumption for queries to `system.tables` and `system.columns` when there is a very large number of tables on a single server. [#2953](https://github.com/ClickHouse/ClickHouse/pull/2953) -* Fixed a performance problem in the case of a large stream of queries that result in an error (the ` _dl_addr` function is visible in `perf top`, but the server isn't using much CPU). [#2938](https://github.com/ClickHouse/ClickHouse/pull/2938) -* Conditions are cast into the View (when `enable_optimize_predicate_expression` is enabled). [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2907) -* Improvements to the functionality for the `UUID` data type. [#3074](https://github.com/ClickHouse/ClickHouse/pull/3074) [#2985](https://github.com/ClickHouse/ClickHouse/pull/2985) -* The `UUID` data type is supported in The-Alchemist dictionaries. [#2822](https://github.com/ClickHouse/ClickHouse/pull/2822) -* The `visitParamExtractRaw` function works correctly with nested structures. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2974) -* When the `input_format_skip_unknown_fields` setting is enabled, object fields in `JSONEachRow` format are skipped correctly. [BlahGeek](https://github.com/ClickHouse/ClickHouse/pull/2958) -* For a `CASE` expression with conditions, you can now omit `ELSE`, which is equivalent to `ELSE NULL`. [#2920](https://github.com/ClickHouse/ClickHouse/pull/2920) -* The operation timeout can now be configured when working with ZooKeeper. [urykhy](https://github.com/ClickHouse/ClickHouse/pull/2971) -* You can specify an offset for `LIMIT n, m` as `LIMIT n OFFSET m`. [#2840](https://github.com/ClickHouse/ClickHouse/pull/2840) -* You can use the `SELECT TOP n` syntax as an alternative for `LIMIT`. [#2840](https://github.com/ClickHouse/ClickHouse/pull/2840) -* Increased the size of the queue to write to system tables, so the `SystemLog parameter queue is full` error doesn't happen as often. -* The `windowFunnel` aggregate function now supports events that meet multiple conditions. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2801) -* Duplicate columns can be used in a `USING` clause for `JOIN`. [#3006](https://github.com/ClickHouse/ClickHouse/pull/3006) -* `Pretty` formats now have a limit on column alignment by width. Use the `output_format_pretty_max_column_pad_width` setting. If a value is wider, it will still be displayed in its entirety, but the other cells in the table will not be too wide. [#3003](https://github.com/ClickHouse/ClickHouse/pull/3003) -* The `odbc` table function now allows you to specify the database/schema name. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2885) -* Added the ability to use a username specified in the `clickhouse-client` config file. [Vladimir Kozbin](https://github.com/ClickHouse/ClickHouse/pull/2909) -* The `ZooKeeperExceptions` counter has been split into three counters: `ZooKeeperUserExceptions`, `ZooKeeperHardwareExceptions`, and `ZooKeeperOtherExceptions`. -* `ALTER DELETE` queries work for materialized views. -* Added randomization when running the cleanup thread periodically for `ReplicatedMergeTree` tables in order to avoid periodic load spikes when there are a very large number of `ReplicatedMergeTree` tables. -* Support for `ATTACH TABLE ... ON CLUSTER` queries. [#3025](https://github.com/ClickHouse/ClickHouse/pull/3025) - -#### Bug fixes: - -* Fixed an issue with `Dictionary` tables (throws the `Size of offsets doesn't match size of column` or `Unknown compression method` exception). This bug appeared in version 18.10.3. [#2913](https://github.com/ClickHouse/ClickHouse/issues/2913) -* Fixed a bug when merging `CollapsingMergeTree` tables if one of the data parts is empty (these parts are formed during merge or `ALTER DELETE` if all data was deleted), and the `vertical` algorithm was used for the merge. [#3049](https://github.com/ClickHouse/ClickHouse/pull/3049) -* Fixed a race condition during `DROP` or `TRUNCATE` for `Memory` tables with a simultaneous `SELECT`, which could lead to server crashes. This bug appeared in version 1.1.54388. [#3038](https://github.com/ClickHouse/ClickHouse/pull/3038) -* Fixed the possibility of data loss when inserting in `Replicated` tables if the `Session is expired` error is returned (data loss can be detected by the `ReplicatedDataLoss` metric). This error occurred in version 1.1.54378. [#2939](https://github.com/ClickHouse/ClickHouse/pull/2939) [#2949](https://github.com/ClickHouse/ClickHouse/pull/2949) [#2964](https://github.com/ClickHouse/ClickHouse/pull/2964) -* Fixed a segfault during `JOIN ... ON`. [#3000](https://github.com/ClickHouse/ClickHouse/pull/3000) -* Fixed the error searching column names when the `WHERE` expression consists entirely of a qualified column name, such as `WHERE table.column`. [#2994](https://github.com/ClickHouse/ClickHouse/pull/2994) -* Fixed the "Not found column" error that occurred when executing distributed queries if a single column consisting of an IN expression with a subquery is requested from a remote server. [#3087](https://github.com/ClickHouse/ClickHouse/pull/3087) -* Fixed the `Block structure mismatch in UNION stream: different number of columns` error that occurred for distributed queries if one of the shards is local and the other is not, and optimization of the move to `PREWHERE` is triggered. [#2226](https://github.com/ClickHouse/ClickHouse/pull/2226) [#3037](https://github.com/ClickHouse/ClickHouse/pull/3037) [#3055](https://github.com/ClickHouse/ClickHouse/pull/3055) [#3065](https://github.com/ClickHouse/ClickHouse/pull/3065) [#3073](https://github.com/ClickHouse/ClickHouse/pull/3073) [#3090](https://github.com/ClickHouse/ClickHouse/pull/3090) [#3093](https://github.com/ClickHouse/ClickHouse/pull/3093) -* Fixed the `pointInPolygon` function for certain cases of non-convex polygons. [#2910](https://github.com/ClickHouse/ClickHouse/pull/2910) -* Fixed the incorrect result when comparing `nan` with integers. [#3024](https://github.com/ClickHouse/ClickHouse/pull/3024) -* Fixed an error in the `zlib-ng` library that could lead to segfault in rare cases. [#2854](https://github.com/ClickHouse/ClickHouse/pull/2854) -* Fixed a memory leak when inserting into a table with `AggregateFunction` columns, if the state of the aggregate function is not simple (allocates memory separately), and if a single insertion request results in multiple small blocks. [#3084](https://github.com/ClickHouse/ClickHouse/pull/3084) -* Fixed a race condition when creating and deleting the same `Buffer` or `MergeTree` table simultaneously. -* Fixed the possibility of a segfault when comparing tuples made up of certain non-trivial types, such as tuples. [#2989](https://github.com/ClickHouse/ClickHouse/pull/2989) -* Fixed the possibility of a segfault when running certain `ON CLUSTER` queries. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2960) -* Fixed an error in the `arrayDistinct` function for `Nullable` array elements. [#2845](https://github.com/ClickHouse/ClickHouse/pull/2845) [#2937](https://github.com/ClickHouse/ClickHouse/pull/2937) -* The `enable_optimize_predicate_expression` option now correctly supports cases with `SELECT *`. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2929) -* Fixed the segfault when re-initializing the ZooKeeper session. [#2917](https://github.com/ClickHouse/ClickHouse/pull/2917) -* Fixed potential blocking when working with ZooKeeper. -* Fixed incorrect code for adding nested data structures in a `SummingMergeTree`. -* When allocating memory for states of aggregate functions, alignment is correctly taken into account, which makes it possible to use operations that require alignment when implementing states of aggregate functions. [chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/2808) - -#### Security fix: - -* Safe use of ODBC data sources. Interaction with ODBC drivers uses a separate `clickhouse-odbc-bridge` process. Errors in third-party ODBC drivers no longer cause problems with server stability or vulnerabilities. [#2828](https://github.com/ClickHouse/ClickHouse/pull/2828) [#2879](https://github.com/ClickHouse/ClickHouse/pull/2879) [#2886](https://github.com/ClickHouse/ClickHouse/pull/2886) [#2893](https://github.com/ClickHouse/ClickHouse/pull/2893) [#2921](https://github.com/ClickHouse/ClickHouse/pull/2921) -* Fixed incorrect validation of the file path in the `catBoostPool` table function. [#2894](https://github.com/ClickHouse/ClickHouse/pull/2894) -* The contents of system tables (`tables`, `databases`, `parts`, `columns`, `parts_columns`, `merges`, `mutations`, `replicas`, and `replication_queue`) are filtered according to the user's configured access to databases (`allow_databases`). [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2856) - -#### Backward incompatible changes: - -* In queries with JOIN, the star character expands to a list of columns in all tables, in compliance with the SQL standard. You can restore the old behavior by setting `asterisk_left_columns_only` to 1 on the user configuration level. - -#### Build changes: - -* Most integration tests can now be run by commit. -* Code style checks can also be run by commit. -* The `memcpy` implementation is chosen correctly when building on CentOS7/Fedora. [Etienne Champetier](https://github.com/ClickHouse/ClickHouse/pull/2912) -* When using clang to build, some warnings from `-Weverything` have been added, in addition to the regular `-Wall-Wextra -Werror`. [#2957](https://github.com/ClickHouse/ClickHouse/pull/2957) -* Debugging the build uses the `jemalloc` debug option. -* The interface of the library for interacting with ZooKeeper is declared abstract. [#2950](https://github.com/ClickHouse/ClickHouse/pull/2950) - -## ClickHouse release 18.10 - -### ClickHouse release 18.10.3, 2018-08-13 - -#### New features: - -* HTTPS can be used for replication. [#2760](https://github.com/ClickHouse/ClickHouse/pull/2760) -* Added the functions `murmurHash2_64`, `murmurHash3_32`, `murmurHash3_64`, and `murmurHash3_128` in addition to the existing `murmurHash2_32`. [#2791](https://github.com/ClickHouse/ClickHouse/pull/2791) -* Support for Nullable types in the ClickHouse ODBC driver (`ODBCDriver2` output format). [#2834](https://github.com/ClickHouse/ClickHouse/pull/2834) -* Support for `UUID` in the key columns. - -#### Improvements: - -* Clusters can be removed without restarting the server when they are deleted from the config files. [#2777](https://github.com/ClickHouse/ClickHouse/pull/2777) -* External dictionaries can be removed without restarting the server when they are removed from config files. [#2779](https://github.com/ClickHouse/ClickHouse/pull/2779) -* Added `SETTINGS` support for the `Kafka` table engine. [Alexander Marshalov](https://github.com/ClickHouse/ClickHouse/pull/2781) -* Improvements for the `UUID` data type (not yet complete). [#2618](https://github.com/ClickHouse/ClickHouse/pull/2618) -* Support for empty parts after merges in the `SummingMergeTree`, `CollapsingMergeTree` and `VersionedCollapsingMergeTree` engines. [#2815](https://github.com/ClickHouse/ClickHouse/pull/2815) -* Old records of completed mutations are deleted (`ALTER DELETE`). [#2784](https://github.com/ClickHouse/ClickHouse/pull/2784) -* Added the `system.merge_tree_settings` table. [Kirill Shvakov](https://github.com/ClickHouse/ClickHouse/pull/2841) -* The `system.tables` table now has dependency columns: `dependencies_database` and `dependencies_table`. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2851) -* Added the `max_partition_size_to_drop` config option. [#2782](https://github.com/ClickHouse/ClickHouse/pull/2782) -* Added the `output_format_json_escape_forward_slashes` option. [Alexander Bocharov](https://github.com/ClickHouse/ClickHouse/pull/2812) -* Added the `max_fetch_partition_retries_count` setting. [#2831](https://github.com/ClickHouse/ClickHouse/pull/2831) -* Added the `prefer_localhost_replica` setting for disabling the preference for a local replica and going to a local replica without inter-process interaction. [#2832](https://github.com/ClickHouse/ClickHouse/pull/2832) -* The `quantileExact` aggregate function returns `nan` in the case of aggregation on an empty `Float32` or `Float64` set. [Sundy Li](https://github.com/ClickHouse/ClickHouse/pull/2855) - -#### Bug fixes: - -* Removed unnecessary escaping of the connection string parameters for ODBC, which made it impossible to establish a connection. This error occurred in version 18.6.0. -* Fixed the logic for processing `REPLACE PARTITION` commands in the replication queue. If there are two `REPLACE` commands for the same partition, the incorrect logic could cause one of them to remain in the replication queue and not be executed. [#2814](https://github.com/ClickHouse/ClickHouse/pull/2814) -* Fixed a merge bug when all data parts were empty (parts that were formed from a merge or from `ALTER DELETE` if all data was deleted). This bug appeared in version 18.1.0. [#2930](https://github.com/ClickHouse/ClickHouse/pull/2930) -* Fixed an error for concurrent `Set` or `Join`. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2823) -* Fixed the `Block structure mismatch in UNION stream: different number of columns` error that occurred for `UNION ALL` queries inside a sub-query if one of the `SELECT` queries contains duplicate column names. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2094) -* Fixed a memory leak if an exception occurred when connecting to a MySQL server. -* Fixed incorrect clickhouse-client response code in case of a query error. -* Fixed incorrect behavior of materialized views containing DISTINCT. [#2795](https://github.com/ClickHouse/ClickHouse/issues/2795) - -#### Backward incompatible changes - -* Removed support for CHECK TABLE queries for Distributed tables. - -#### Build changes: - -* The allocator has been replaced: `jemalloc` is now used instead of `tcmalloc`. In some scenarios, this increases speed up to 20%. However, there are queries that have slowed by up to 20%. Memory consumption has been reduced by approximately 10% in some scenarios, with improved stability. With highly competitive loads, CPU usage in userspace and in system shows just a slight increase. [#2773](https://github.com/ClickHouse/ClickHouse/pull/2773) -* Use of libressl from a submodule. [#1983](https://github.com/ClickHouse/ClickHouse/pull/1983) [#2807](https://github.com/ClickHouse/ClickHouse/pull/2807) -* Use of unixodbc from a submodule. [#2789](https://github.com/ClickHouse/ClickHouse/pull/2789) -* Use of mariadb-connector-c from a submodule. [#2785](https://github.com/ClickHouse/ClickHouse/pull/2785) -* Added functional test files to the repository that depend on the availability of test data (for the time being, without the test data itself). - -## ClickHouse release 18.6 - -### ClickHouse release 18.6.0, 2018-08-02 - -#### New features: - -* Added support for ON expressions for the JOIN ON syntax: -`JOIN ON Expr([table.]column ...) = Expr([table.]column, ...) [AND Expr([table.]column, ...) = Expr([table.]column, ...) ...]` -The expression must be a chain of equalities joined by the AND operator. Each side of the equality can be an arbitrary expression over the columns of one of the tables. The use of fully qualified column names is supported (`table.name`, `database.table.name`, `table_alias.name`, `subquery_alias.name`) for the right table. [#2742](https://github.com/ClickHouse/ClickHouse/pull/2742) -* HTTPS can be enabled for replication. [#2760](https://github.com/ClickHouse/ClickHouse/pull/2760) - -#### Improvements: - -* The server passes the patch component of its version to the client. Data about the patch version component is in `system.processes` and `query_log`. [#2646](https://github.com/ClickHouse/ClickHouse/pull/2646) - -## ClickHouse release 18.5 - -### ClickHouse release 18.5.1, 2018-07-31 - -#### New features: - -* Added the hash function `murmurHash2_32` [#2756](https://github.com/ClickHouse/ClickHouse/pull/2756). - -#### Improvements: - -* Now you can use the `from_env` [#2741](https://github.com/ClickHouse/ClickHouse/pull/2741) attribute to set values in config files from environment variables. -* Added case-insensitive versions of the `coalesce`, `ifNull`, and `nullIf functions` [#2752](https://github.com/ClickHouse/ClickHouse/pull/2752). - -#### Bug fixes: - -* Fixed a possible bug when starting a replica [#2759](https://github.com/ClickHouse/ClickHouse/pull/2759). - -## ClickHouse release 18.4 - -### ClickHouse release 18.4.0, 2018-07-28 - -#### New features: - -* Added system tables: `formats`, `data_type_families`, `aggregate_function_combinators`, `table_functions`, `table_engines`, `collations` [#2721](https://github.com/ClickHouse/ClickHouse/pull/2721). -* Added the ability to use a table function instead of a table as an argument of a `remote` or `cluster table function` [#2708](https://github.com/ClickHouse/ClickHouse/pull/2708). -* Support for `HTTP Basic` authentication in the replication protocol [#2727](https://github.com/ClickHouse/ClickHouse/pull/2727). -* The `has` function now allows searching for a numeric value in an array of `Enum` values [Maxim Khrisanfov](https://github.com/ClickHouse/ClickHouse/pull/2699). -* Support for adding arbitrary message separators when reading from `Kafka` [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2701). - -#### Improvements: - -* The `ALTER TABLE t DELETE WHERE` query does not rewrite data parts that were not affected by the WHERE condition [#2694](https://github.com/ClickHouse/ClickHouse/pull/2694). -* The `use_minimalistic_checksums_in_zookeeper` option for `ReplicatedMergeTree` tables is enabled by default. This setting was added in version 1.1.54378, 2018-04-16. Versions that are older than 1.1.54378 can no longer be installed. -* Support for running `KILL` and `OPTIMIZE` queries that specify `ON CLUSTER` [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2689). - -#### Bug fixes: - -* Fixed the error `Column ... is not under an aggregate function and not in GROUP BY` for aggregation with an IN expression. This bug appeared in version 18.1.0. ([bbdd780b](https://github.com/ClickHouse/ClickHouse/commit/bbdd780be0be06a0f336775941cdd536878dd2c2)) -* Fixed a bug in the `windowFunnel aggregate function` [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2735). -* Fixed a bug in the `anyHeavy` aggregate function ([a2101df2](https://github.com/ClickHouse/ClickHouse/commit/a2101df25a6a0fba99aa71f8793d762af2b801ee)) -* Fixed server crash when using the `countArray()` aggregate function. - -#### Backward incompatible changes: - -* Parameters for `Kafka` engine was changed from `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_schema, kafka_num_consumers])` to `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_row_delimiter, kafka_schema, kafka_num_consumers])`. If your tables use `kafka_schema` or `kafka_num_consumers` parameters, you have to manually edit the metadata files `path/metadata/database/table.sql` and add `kafka_row_delimiter` parameter with `''` value. - -## ClickHouse release 18.1 - -### ClickHouse release 18.1.0, 2018-07-23 - -#### New features: - -* Support for the `ALTER TABLE t DELETE WHERE` query for non-replicated MergeTree tables ([#2634](https://github.com/ClickHouse/ClickHouse/pull/2634)). -* Support for arbitrary types for the `uniq*` family of aggregate functions ([#2010](https://github.com/ClickHouse/ClickHouse/issues/2010)). -* Support for arbitrary types in comparison operators ([#2026](https://github.com/ClickHouse/ClickHouse/issues/2026)). -* The `users.xml` file allows setting a subnet mask in the format `10.0.0.1/255.255.255.0`. This is necessary for using masks for IPv6 networks with zeros in the middle ([#2637](https://github.com/ClickHouse/ClickHouse/pull/2637)). -* Added the `arrayDistinct` function ([#2670](https://github.com/ClickHouse/ClickHouse/pull/2670)). -* The SummingMergeTree engine can now work with AggregateFunction type columns ([Constantin S. Pan](https://github.com/ClickHouse/ClickHouse/pull/2566)). - -#### Improvements: - -* Changed the numbering scheme for release versions. Now the first part contains the year of release (A.D., Moscow timezone, minus 2000), the second part contains the number for major changes (increases for most releases), and the third part is the patch version. Releases are still backward compatible, unless otherwise stated in the changelog. -* Faster conversions of floating-point numbers to a string ([Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2664)). -* If some rows were skipped during an insert due to parsing errors (this is possible with the `input_allow_errors_num` and `input_allow_errors_ratio` settings enabled), the number of skipped rows is now written to the server log ([Leonardo Cecchi](https://github.com/ClickHouse/ClickHouse/pull/2669)). - -#### Bug fixes: - -* Fixed the TRUNCATE command for temporary tables ([Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2624)). -* Fixed a rare deadlock in the ZooKeeper client library that occurred when there was a network error while reading the response ([c315200](https://github.com/ClickHouse/ClickHouse/commit/c315200e64b87e44bdf740707fc857d1fdf7e947)). -* Fixed an error during a CAST to Nullable types ([#1322](https://github.com/ClickHouse/ClickHouse/issues/1322)). -* Fixed the incorrect result of the `maxIntersection()` function when the boundaries of intervals coincided ([Michael Furmur](https://github.com/ClickHouse/ClickHouse/pull/2657)). -* Fixed incorrect transformation of the OR expression chain in a function argument ([chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/2663)). -* Fixed performance degradation for queries containing `IN (subquery)` expressions inside another subquery ([#2571](https://github.com/ClickHouse/ClickHouse/issues/2571)). -* Fixed incompatibility between servers with different versions in distributed queries that use a `CAST` function that isn't in uppercase letters ([fe8c4d6](https://github.com/ClickHouse/ClickHouse/commit/fe8c4d64e434cacd4ceef34faa9005129f2190a5)). -* Added missing quoting of identifiers for queries to an external DBMS ([#2635](https://github.com/ClickHouse/ClickHouse/issues/2635)). - -#### Backward incompatible changes: - -* Converting a string containing the number zero to DateTime does not work. Example: `SELECT toDateTime('0')`. This is also the reason that `DateTime DEFAULT '0'` does not work in tables, as well as `0` in dictionaries. Solution: replace `0` with `0000-00-00 00:00:00`. - -## ClickHouse release 1.1 - -### ClickHouse release 1.1.54394, 2018-07-12 - -#### New features: - -* Added the `histogram` aggregate function ([Mikhail Surin](https://github.com/ClickHouse/ClickHouse/pull/2521)). -* Now `OPTIMIZE TABLE ... FINAL` can be used without specifying partitions for `ReplicatedMergeTree` ([Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2600)). - -#### Bug fixes: - -* Fixed a problem with a very small timeout for sockets (one second) for reading and writing when sending and downloading replicated data, which made it impossible to download larger parts if there is a load on the network or disk (it resulted in cyclical attempts to download parts). This error occurred in version 1.1.54388. -* Fixed issues when using chroot in ZooKeeper if you inserted duplicate data blocks in the table. -* The `has` function now works correctly for an array with Nullable elements ([#2115](https://github.com/ClickHouse/ClickHouse/issues/2115)). -* The `system.tables` table now works correctly when used in distributed queries. The `metadata_modification_time` and `engine_full` columns are now non-virtual. Fixed an error that occurred if only these columns were queried from the table. -* Fixed how an empty `TinyLog` table works after inserting an empty data block ([#2563](https://github.com/ClickHouse/ClickHouse/issues/2563)). -* The `system.zookeeper` table works if the value of the node in ZooKeeper is NULL. - -### ClickHouse release 1.1.54390, 2018-07-06 - -#### New features: - -* Queries can be sent in `multipart/form-data` format (in the `query` field), which is useful if external data is also sent for query processing ([Olga Hvostikova](https://github.com/ClickHouse/ClickHouse/pull/2490)). -* Added the ability to enable or disable processing single or double quotes when reading data in CSV format. You can configure this in the `format_csv_allow_single_quotes` and `format_csv_allow_double_quotes` settings ([Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2574)). -* Now `OPTIMIZE TABLE ... FINAL` can be used without specifying the partition for non-replicated variants of `MergeTree` ([Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2599)). - -#### Improvements: - -* Improved performance, reduced memory consumption, and correct memory consumption tracking with use of the IN operator when a table index could be used ([#2584](https://github.com/ClickHouse/ClickHouse/pull/2584)). -* Removed redundant checking of checksums when adding a data part. This is important when there are a large number of replicas, because in these cases the total number of checks was equal to N^2. -* Added support for `Array(Tuple(...))` arguments for the `arrayEnumerateUniq` function ([#2573](https://github.com/ClickHouse/ClickHouse/pull/2573)). -* Added `Nullable` support for the `runningDifference` function ([#2594](https://github.com/ClickHouse/ClickHouse/pull/2594)). -* Improved query analysis performance when there is a very large number of expressions ([#2572](https://github.com/ClickHouse/ClickHouse/pull/2572)). -* Faster selection of data parts for merging in `ReplicatedMergeTree` tables. Faster recovery of the ZooKeeper session ([#2597](https://github.com/ClickHouse/ClickHouse/pull/2597)). -* The `format_version.txt` file for `MergeTree` tables is re-created if it is missing, which makes sense if ClickHouse is launched after copying the directory structure without files ([Ciprian Hacman](https://github.com/ClickHouse/ClickHouse/pull/2593)). - -#### Bug fixes: - -* Fixed a bug when working with ZooKeeper that could make it impossible to recover the session and readonly states of tables before restarting the server. -* Fixed a bug when working with ZooKeeper that could result in old nodes not being deleted if the session is interrupted. -* Fixed an error in the `quantileTDigest` function for Float arguments (this bug was introduced in version 1.1.54388) ([Mikhail Surin](https://github.com/ClickHouse/ClickHouse/pull/2553)). -* Fixed a bug in the index for MergeTree tables if the primary key column is located inside the function for converting types between signed and unsigned integers of the same size ([#2603](https://github.com/ClickHouse/ClickHouse/pull/2603)). -* Fixed segfault if `macros` are used but they aren't in the config file ([#2570](https://github.com/ClickHouse/ClickHouse/pull/2570)). -* Fixed switching to the default database when reconnecting the client ([#2583](https://github.com/ClickHouse/ClickHouse/pull/2583)). -* Fixed a bug that occurred when the `use_index_for_in_with_subqueries` setting was disabled. - -#### Security fix: - -* Sending files is no longer possible when connected to MySQL (`LOAD DATA LOCAL INFILE`). - -### ClickHouse release 1.1.54388, 2018-06-28 - -#### New features: - -* Support for the `ALTER TABLE t DELETE WHERE` query for replicated tables. Added the `system.mutations` table to track progress of this type of queries. -* Support for the `ALTER TABLE t [REPLACE|ATTACH] PARTITION` query for \*MergeTree tables. -* Support for the `TRUNCATE TABLE` query ([Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2260)) -* Several new `SYSTEM` queries for replicated tables (`RESTART REPLICAS`, `SYNC REPLICA`, `[STOP|START] [MERGES|FETCHES|SENDS REPLICATED|REPLICATION QUEUES]`). -* Added the ability to write to a table with the MySQL engine and the corresponding table function ([sundy-li](https://github.com/ClickHouse/ClickHouse/pull/2294)). -* Added the `url()` table function and the `URL` table engine ([Alexander Sapin](https://github.com/ClickHouse/ClickHouse/pull/2501)). -* Added the `windowFunnel` aggregate function ([sundy-li](https://github.com/ClickHouse/ClickHouse/pull/2352)). -* New `startsWith` and `endsWith` functions for strings ([Vadim Plakhtinsky](https://github.com/ClickHouse/ClickHouse/pull/2429)). -* The `numbers()` table function now allows you to specify the offset ([Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2535)). -* The password to `clickhouse-client` can be entered interactively. -* Server logs can now be sent to syslog ([Alexander Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/2459)). -* Support for logging in dictionaries with a shared library source ([Alexander Sapin](https://github.com/ClickHouse/ClickHouse/pull/2472)). -* Support for custom CSV delimiters ([Ivan Zhukov](https://github.com/ClickHouse/ClickHouse/pull/2263)) -* Added the `date_time_input_format` setting. If you switch this setting to `'best_effort'`, DateTime values will be read in a wide range of formats. -* Added the `clickhouse-obfuscator` utility for data obfuscation. Usage example: publishing data used in performance tests. - -#### Experimental features: - -* Added the ability to calculate `and` arguments only where they are needed ([Anastasia Tsarkova](https://github.com/ClickHouse/ClickHouse/pull/2272)) -* JIT compilation to native code is now available for some expressions ([pyos](https://github.com/ClickHouse/ClickHouse/pull/2277)). - -#### Bug fixes: - -* Duplicates no longer appear for a query with `DISTINCT` and `ORDER BY`. -* Queries with `ARRAY JOIN` and `arrayFilter` no longer return an incorrect result. -* Fixed an error when reading an array column from a Nested structure ([#2066](https://github.com/ClickHouse/ClickHouse/issues/2066)). -* Fixed an error when analyzing queries with a HAVING clause like `HAVING tuple IN (...)`. -* Fixed an error when analyzing queries with recursive aliases. -* Fixed an error when reading from ReplacingMergeTree with a condition in PREWHERE that filters all rows ([#2525](https://github.com/ClickHouse/ClickHouse/issues/2525)). -* User profile settings were not applied when using sessions in the HTTP interface. -* Fixed how settings are applied from the command line parameters in clickhouse-local. -* The ZooKeeper client library now uses the session timeout received from the server. -* Fixed a bug in the ZooKeeper client library when the client waited for the server response longer than the timeout. -* Fixed pruning of parts for queries with conditions on partition key columns ([#2342](https://github.com/ClickHouse/ClickHouse/issues/2342)). -* Merges are now possible after `CLEAR COLUMN IN PARTITION` ([#2315](https://github.com/ClickHouse/ClickHouse/issues/2315)). -* Type mapping in the ODBC table function has been fixed ([sundy-li](https://github.com/ClickHouse/ClickHouse/pull/2268)). -* Type comparisons have been fixed for `DateTime` with and without the time zone ([Alexander Bocharov](https://github.com/ClickHouse/ClickHouse/pull/2400)). -* Fixed syntactic parsing and formatting of the `CAST` operator. -* Fixed insertion into a materialized view for the Distributed table engine ([Babacar Diassé](https://github.com/ClickHouse/ClickHouse/pull/2411)). -* Fixed a race condition when writing data from the `Kafka` engine to materialized views ([Yangkuan Liu](https://github.com/ClickHouse/ClickHouse/pull/2448)). -* Fixed SSRF in the remote() table function. -* Fixed exit behavior of `clickhouse-client` in multiline mode ([#2510](https://github.com/ClickHouse/ClickHouse/issues/2510)). - -#### Improvements: - -* Background tasks in replicated tables are now performed in a thread pool instead of in separate threads ([Silviu Caragea](https://github.com/ClickHouse/ClickHouse/pull/1722)). -* Improved LZ4 compression performance. -* Faster analysis for queries with a large number of JOINs and sub-queries. -* The DNS cache is now updated automatically when there are too many network errors. -* Table inserts no longer occur if the insert into one of the materialized views is not possible because it has too many parts. -* Corrected the discrepancy in the event counters `Query`, `SelectQuery`, and `InsertQuery`. -* Expressions like `tuple IN (SELECT tuple)` are allowed if the tuple types match. -* A server with replicated tables can start even if you haven't configured ZooKeeper. -* When calculating the number of available CPU cores, limits on cgroups are now taken into account ([Atri Sharma](https://github.com/ClickHouse/ClickHouse/pull/2325)). -* Added chown for config directories in the systemd config file ([Mikhail Shiryaev](https://github.com/ClickHouse/ClickHouse/pull/2421)). - -#### Build changes: - -* The gcc8 compiler can be used for builds. -* Added the ability to build llvm from submodule. -* The version of the librdkafka library has been updated to v0.11.4. -* Added the ability to use the system libcpuid library. The library version has been updated to 0.4.0. -* Fixed the build using the vectorclass library ([Babacar Diassé](https://github.com/ClickHouse/ClickHouse/pull/2274)). -* Cmake now generates files for ninja by default (like when using `-G Ninja`). -* Added the ability to use the libtinfo library instead of libtermcap ([Georgy Kondratiev](https://github.com/ClickHouse/ClickHouse/pull/2519)). -* Fixed a header file conflict in Fedora Rawhide ([#2520](https://github.com/ClickHouse/ClickHouse/issues/2520)). - -#### Backward incompatible changes: - -* Removed escaping in `Vertical` and `Pretty*` formats and deleted the `VerticalRaw` format. -* If servers with version 1.1.54388 (or newer) and servers with an older version are used simultaneously in a distributed query and the query has the `cast(x, 'Type')` expression without the `AS` keyword and doesn't have the word `cast` in uppercase, an exception will be thrown with a message like `Not found column cast(0, 'UInt8') in block`. Solution: Update the server on the entire cluster. - -### ClickHouse release 1.1.54385, 2018-06-01 - -#### Bug fixes: - -* Fixed an error that in some cases caused ZooKeeper operations to block. - -### ClickHouse release 1.1.54383, 2018-05-22 - -#### Bug fixes: - -* Fixed a slowdown of replication queue if a table has many replicas. - -### ClickHouse release 1.1.54381, 2018-05-14 - -#### Bug fixes: - -* Fixed a nodes leak in ZooKeeper when ClickHouse loses connection to ZooKeeper server. - -### ClickHouse release 1.1.54380, 2018-04-21 - -#### New features: - -* Added the table function `file(path, format, structure)`. An example reading bytes from `/dev/urandom`: `ln -s /dev/urandom /var/lib/clickhouse/user_files/random``clickhouse-client -q "SELECT * FROM file('random', 'RowBinary', 'd UInt8') LIMIT 10"`. - -#### Improvements: - -* Subqueries can be wrapped in `()` brackets to enhance query readability. For example: `(SELECT 1) UNION ALL (SELECT 1)`. -* Simple `SELECT` queries from the `system.processes` table are not included in the `max_concurrent_queries` limit. - -#### Bug fixes: - -* Fixed incorrect behavior of the `IN` operator when select from `MATERIALIZED VIEW`. -* Fixed incorrect filtering by partition index in expressions like `partition_key_column IN (...)`. -* Fixed inability to execute `OPTIMIZE` query on non-leader replica if `REANAME` was performed on the table. -* Fixed the authorization error when executing `OPTIMIZE` or `ALTER` queries on a non-leader replica. -* Fixed freezing of `KILL QUERY`. -* Fixed an error in ZooKeeper client library which led to loss of watches, freezing of distributed DDL queue, and slowdowns in the replication queue if a non-empty `chroot` prefix is used in the ZooKeeper configuration. - -#### Backward incompatible changes: - -* Removed support for expressions like `(a, b) IN (SELECT (a, b))` (you can use the equivalent expression `(a, b) IN (SELECT a, b)`). In previous releases, these expressions led to undetermined `WHERE` filtering or caused errors. - -### ClickHouse release 1.1.54378, 2018-04-16 - -#### New features: - -* Logging level can be changed without restarting the server. -* Added the `SHOW CREATE DATABASE` query. -* The `query_id` can be passed to `clickhouse-client` (elBroom). -* New setting: `max_network_bandwidth_for_all_users`. -* Added support for `ALTER TABLE ... PARTITION ... ` for `MATERIALIZED VIEW`. -* Added information about the size of data parts in uncompressed form in the system table. -* Server-to-server encryption support for distributed tables (`1` in the replica config in ``). -* Configuration of the table level for the `ReplicatedMergeTree` family in order to minimize the amount of data stored in Zookeeper: : `use_minimalistic_checksums_in_zookeeper = 1` -* Configuration of the `clickhouse-client` prompt. By default, server names are now output to the prompt. The server's display name can be changed. It's also sent in the `X-ClickHouse-Display-Name` HTTP header (Kirill Shvakov). -* Multiple comma-separated `topics` can be specified for the `Kafka` engine (Tobias Adamson) -* When a query is stopped by `KILL QUERY` or `replace_running_query`, the client receives the `Query was canceled` exception instead of an incomplete result. - -#### Improvements: - -* `ALTER TABLE ... DROP/DETACH PARTITION` queries are run at the front of the replication queue. -* `SELECT ... FINAL` and `OPTIMIZE ... FINAL` can be used even when the table has a single data part. -* A `query_log` table is recreated on the fly if it was deleted manually (Kirill Shvakov). -* The `lengthUTF8` function runs faster (zhang2014). -* Improved performance of synchronous inserts in `Distributed` tables (`insert_distributed_sync = 1`) when there is a very large number of shards. -* The server accepts the `send_timeout` and `receive_timeout` settings from the client and applies them when connecting to the client (they are applied in reverse order: the server socket's `send_timeout` is set to the `receive_timeout` value received from the client, and vice versa). -* More robust crash recovery for asynchronous insertion into `Distributed` tables. -* The return type of the `countEqual` function changed from `UInt32` to `UInt64` (谢磊). - -#### Bug fixes: - -* Fixed an error with `IN` when the left side of the expression is `Nullable`. -* Correct results are now returned when using tuples with `IN` when some of the tuple components are in the table index. -* The `max_execution_time` limit now works correctly with distributed queries. -* Fixed errors when calculating the size of composite columns in the `system.columns` table. -* Fixed an error when creating a temporary table `CREATE TEMPORARY TABLE IF NOT EXISTS.` -* Fixed errors in `StorageKafka` (##2075) -* Fixed server crashes from invalid arguments of certain aggregate functions. -* Fixed the error that prevented the `DETACH DATABASE` query from stopping background tasks for `ReplicatedMergeTree` tables. -* `Too many parts` state is less likely to happen when inserting into aggregated materialized views (##2084). -* Corrected recursive handling of substitutions in the config if a substitution must be followed by another substitution on the same level. -* Corrected the syntax in the metadata file when creating a `VIEW` that uses a query with `UNION ALL`. -* `SummingMergeTree` now works correctly for summation of nested data structures with a composite key. -* Fixed the possibility of a race condition when choosing the leader for `ReplicatedMergeTree` tables. - -#### Build changes: - -* The build supports `ninja` instead of `make` and uses `ninja` by default for building releases. -* Renamed packages: `clickhouse-server-base` in `clickhouse-common-static`; `clickhouse-server-common` in `clickhouse-server`; `clickhouse-common-dbg` in `clickhouse-common-static-dbg`. To install, use `clickhouse-server clickhouse-client`. Packages with the old names will still load in the repositories for backward compatibility. - -#### Backward incompatible changes: - -* Removed the special interpretation of an IN expression if an array is specified on the left side. Previously, the expression `arr IN (set)` was interpreted as "at least one `arr` element belongs to the `set`". To get the same behavior in the new version, write `arrayExists(x -> x IN (set), arr)`. -* Disabled the incorrect use of the socket option `SO_REUSEPORT`, which was incorrectly enabled by default in the Poco library. Note that on Linux there is no longer any reason to simultaneously specify the addresses `::` and `0.0.0.0` for listen – use just `::`, which allows listening to the connection both over IPv4 and IPv6 (with the default kernel config settings). You can also revert to the behavior from previous versions by specifying `1` in the config. - -### ClickHouse release 1.1.54370, 2018-03-16 - -#### New features: - -* Added the `system.macros` table and auto updating of macros when the config file is changed. -* Added the `SYSTEM RELOAD CONFIG` query. -* Added the `maxIntersections(left_col, right_col)` aggregate function, which returns the maximum number of simultaneously intersecting intervals `[left; right]`. The `maxIntersectionsPosition(left, right)` function returns the beginning of the "maximum" interval. ([Michael Furmur](https://github.com/ClickHouse/ClickHouse/pull/2012)). - -#### Improvements: - -* When inserting data in a `Replicated` table, fewer requests are made to `ZooKeeper` (and most of the user-level errors have disappeared from the `ZooKeeper` log). -* Added the ability to create aliases for data sets. Example: `WITH (1, 2, 3) AS set SELECT number IN set FROM system.numbers LIMIT 10`. - -#### Bug fixes: - -* Fixed the `Illegal PREWHERE` error when reading from Merge tables for `Distributed`tables. -* Added fixes that allow you to start clickhouse-server in IPv4-only Docker containers. -* Fixed a race condition when reading from system `system.parts_columns tables.` -* Removed double buffering during a synchronous insert to a `Distributed` table, which could have caused the connection to timeout. -* Fixed a bug that caused excessively long waits for an unavailable replica before beginning a `SELECT` query. -* Fixed incorrect dates in the `system.parts` table. -* Fixed a bug that made it impossible to insert data in a `Replicated` table if `chroot` was non-empty in the configuration of the `ZooKeeper` cluster. -* Fixed the vertical merging algorithm for an empty `ORDER BY` table. -* Restored the ability to use dictionaries in queries to remote tables, even if these dictionaries are not present on the requestor server. This functionality was lost in release 1.1.54362. -* Restored the behavior for queries like `SELECT * FROM remote('server2', default.table) WHERE col IN (SELECT col2 FROM default.table)` when the right side of the `IN` should use a remote `default.table` instead of a local one. This behavior was broken in version 1.1.54358. -* Removed extraneous error-level logging of `Not found column ... in block`. - -### Clickhouse Release 1.1.54362, 2018-03-11 - -#### New features: - -* Aggregation without `GROUP BY` for an empty set (such as `SELECT count(*) FROM table WHERE 0`) now returns a result with one row with null values for aggregate functions, in compliance with the SQL standard. To restore the old behavior (return an empty result), set `empty_result_for_aggregation_by_empty_set` to 1. -* Added type conversion for `UNION ALL`. Different alias names are allowed in `SELECT` positions in `UNION ALL`, in compliance with the SQL standard. -* Arbitrary expressions are supported in `LIMIT BY` clauses. Previously, it was only possible to use columns resulting from `SELECT`. -* An index of `MergeTree` tables is used when `IN` is applied to a tuple of expressions from the columns of the primary key. Example: `WHERE (UserID, EventDate) IN ((123, '2000-01-01'), ...)` (Anastasiya Tsarkova). -* Added the `clickhouse-copier` tool for copying between clusters and resharding data (beta). -* Added consistent hashing functions: `yandexConsistentHash`, `jumpConsistentHash`, `sumburConsistentHash`. They can be used as a sharding key in order to reduce the amount of network traffic during subsequent reshardings. -* Added functions: `arrayAny`, `arrayAll`, `hasAny`, `hasAll`, `arrayIntersect`, `arrayResize`. -* Added the `arrayCumSum` function (Javi Santana). -* Added the `parseDateTimeBestEffort`, `parseDateTimeBestEffortOrZero`, and `parseDateTimeBestEffortOrNull` functions to read the DateTime from a string containing text in a wide variety of possible formats. -* Data can be partially reloaded from external dictionaries during updating (load just the records in which the value of the specified field greater than in the previous download) (Arsen Hakobyan). -* Added the `cluster` table function. Example: `cluster(cluster_name, db, table)`. The `remote` table function can accept the cluster name as the first argument, if it is specified as an identifier. -* The `remote` and `cluster` table functions can be used in `INSERT` queries. -* Added the `create_table_query` and `engine_full` virtual columns to the `system.tables`table . The `metadata_modification_time` column is virtual. -* Added the `data_path` and `metadata_path` columns to `system.tables`and` system.databases` tables, and added the `path` column to the `system.parts` and `system.parts_columns` tables. -* Added additional information about merges in the `system.part_log` table. -* An arbitrary partitioning key can be used for the `system.query_log` table (Kirill Shvakov). -* The `SHOW TABLES` query now also shows temporary tables. Added temporary tables and the `is_temporary` column to `system.tables` (zhang2014). -* Added `DROP TEMPORARY TABLE` and `EXISTS TEMPORARY TABLE` queries (zhang2014). -* Support for `SHOW CREATE TABLE` for temporary tables (zhang2014). -* Added the `system_profile` configuration parameter for the settings used by internal processes. -* Support for loading `object_id` as an attribute in `MongoDB` dictionaries (Pavel Litvinenko). -* Reading `null` as the default value when loading data for an external dictionary with the `MongoDB` source (Pavel Litvinenko). -* Reading `DateTime` values in the `Values` format from a Unix timestamp without single quotes. -* Failover is supported in `remote` table functions for cases when some of the replicas are missing the requested table. -* Configuration settings can be overridden in the command line when you run `clickhouse-server`. Example: `clickhouse-server -- --logger.level=information`. -* Implemented the `empty` function from a `FixedString` argument: the function returns 1 if the string consists entirely of null bytes (zhang2014). -* Added the `listen_try`configuration parameter for listening to at least one of the listen addresses without quitting, if some of the addresses can't be listened to (useful for systems with disabled support for IPv4 or IPv6). -* Added the `VersionedCollapsingMergeTree` table engine. -* Support for rows and arbitrary numeric types for the `library` dictionary source. -* `MergeTree` tables can be used without a primary key (you need to specify `ORDER BY tuple()`). -* A `Nullable` type can be `CAST` to a non-`Nullable` type if the argument is not `NULL`. -* `RENAME TABLE` can be performed for `VIEW`. -* Added the `throwIf` function. -* Added the `odbc_default_field_size` option, which allows you to extend the maximum size of the value loaded from an ODBC source (by default, it is 1024). -* The `system.processes` table and `SHOW PROCESSLIST` now have the `is_cancelled` and `peak_memory_usage` columns. - -#### Improvements: - -* Limits and quotas on the result are no longer applied to intermediate data for `INSERT SELECT` queries or for `SELECT` subqueries. -* Fewer false triggers of `force_restore_data` when checking the status of `Replicated` tables when the server starts. -* Added the `allow_distributed_ddl` option. -* Nondeterministic functions are not allowed in expressions for `MergeTree` table keys. -* Files with substitutions from `config.d` directories are loaded in alphabetical order. -* Improved performance of the `arrayElement` function in the case of a constant multidimensional array with an empty array as one of the elements. Example: `[[1], []][x]`. -* The server starts faster now when using configuration files with very large substitutions (for instance, very large lists of IP networks). -* When running a query, table valued functions run once. Previously, `remote` and `mysql` table valued functions performed the same query twice to retrieve the table structure from a remote server. -* The `MkDocs` documentation generator is used. -* When you try to delete a table column that `DEFAULT`/`MATERIALIZED` expressions of other columns depend on, an exception is thrown (zhang2014). -* Added the ability to parse an empty line in text formats as the number 0 for `Float` data types. This feature was previously available but was lost in release 1.1.54342. -* `Enum` values can be used in `min`, `max`, `sum` and some other functions. In these cases, it uses the corresponding numeric values. This feature was previously available but was lost in the release 1.1.54337. -* Added `max_expanded_ast_elements` to restrict the size of the AST after recursively expanding aliases. - -#### Bug fixes: - -* Fixed cases when unnecessary columns were removed from subqueries in error, or not removed from subqueries containing `UNION ALL`. -* Fixed a bug in merges for `ReplacingMergeTree` tables. -* Fixed synchronous insertions in `Distributed` tables (`insert_distributed_sync = 1`). -* Fixed segfault for certain uses of `FULL` and `RIGHT JOIN` with duplicate columns in subqueries. -* Fixed segfault for certain uses of `replace_running_query` and `KILL QUERY`. -* Fixed the order of the `source` and `last_exception` columns in the `system.dictionaries` table. -* Fixed a bug when the `DROP DATABASE` query did not delete the file with metadata. -* Fixed the `DROP DATABASE` query for `Dictionary` databases. -* Fixed the low precision of `uniqHLL12` and `uniqCombined` functions for cardinalities greater than 100 million items (Alex Bocharov). -* Fixed the calculation of implicit default values when necessary to simultaneously calculate default explicit expressions in `INSERT` queries (zhang2014). -* Fixed a rare case when a query to a `MergeTree` table couldn't finish (chenxing-xc). -* Fixed a crash that occurred when running a `CHECK` query for `Distributed` tables if all shards are local (chenxing.xc). -* Fixed a slight performance regression with functions that use regular expressions. -* Fixed a performance regression when creating multidimensional arrays from complex expressions. -* Fixed a bug that could cause an extra `FORMAT` section to appear in an `.sql` file with metadata. -* Fixed a bug that caused the `max_table_size_to_drop` limit to apply when trying to delete a `MATERIALIZED VIEW` looking at an explicitly specified table. -* Fixed incompatibility with old clients (old clients were sometimes sent data with the `DateTime('timezone')` type, which they do not understand). -* Fixed a bug when reading `Nested` column elements of structures that were added using `ALTER` but that are empty for the old partitions, when the conditions for these columns moved to `PREWHERE`. -* Fixed a bug when filtering tables by virtual `_table` columns in queries to `Merge` tables. -* Fixed a bug when using `ALIAS` columns in `Distributed` tables. -* Fixed a bug that made dynamic compilation impossible for queries with aggregate functions from the `quantile` family. -* Fixed a race condition in the query execution pipeline that occurred in very rare cases when using `Merge` tables with a large number of tables, and when using `GLOBAL` subqueries. -* Fixed a crash when passing arrays of different sizes to an `arrayReduce` function when using aggregate functions from multiple arguments. -* Prohibited the use of queries with `UNION ALL` in a `MATERIALIZED VIEW`. -* Fixed an error during initialization of the `part_log` system table when the server starts (by default, `part_log` is disabled). - -#### Backward incompatible changes: - -* Removed the `distributed_ddl_allow_replicated_alter` option. This behavior is enabled by default. -* Removed the `strict_insert_defaults` setting. If you were using this functionality, write to `clickhouse-feedback@yandex-team.com`. -* Removed the `UnsortedMergeTree` engine. - -### Clickhouse Release 1.1.54343, 2018-02-05 - -* Added macros support for defining cluster names in distributed DDL queries and constructors of Distributed tables: `CREATE TABLE distr ON CLUSTER '{cluster}' (...) ENGINE = Distributed('{cluster}', 'db', 'table')`. -* Now queries like `SELECT ... FROM table WHERE expr IN (subquery)` are processed using the `table` index. -* Improved processing of duplicates when inserting to Replicated tables, so they no longer slow down execution of the replication queue. - -### Clickhouse Release 1.1.54342, 2018-01-22 - -This release contains bug fixes for the previous release 1.1.54337: - -* Fixed a regression in 1.1.54337: if the default user has readonly access, then the server refuses to start up with the message `Cannot create database in readonly mode`. -* Fixed a regression in 1.1.54337: on systems with systemd, logs are always written to syslog regardless of the configuration; the watchdog script still uses init.d. -* Fixed a regression in 1.1.54337: wrong default configuration in the Docker image. -* Fixed nondeterministic behavior of GraphiteMergeTree (you can see it in log messages `Data after merge is not byte-identical to the data on another replicas`). -* Fixed a bug that may lead to inconsistent merges after OPTIMIZE query to Replicated tables (you may see it in log messages `Part ... intersects the previous part`). -* Buffer tables now work correctly when MATERIALIZED columns are present in the destination table (by zhang2014). -* Fixed a bug in implementation of NULL. - -### Clickhouse Release 1.1.54337, 2018-01-18 - -#### New features: - -* Added support for storage of multi-dimensional arrays and tuples (`Tuple` data type) in tables. -* Support for table functions for `DESCRIBE` and `INSERT` queries. Added support for subqueries in `DESCRIBE`. Examples: `DESC TABLE remote('host', default.hits)`; `DESC TABLE (SELECT 1)`; `INSERT INTO TABLE FUNCTION remote('host', default.hits)`. Support for `INSERT INTO TABLE` in addition to `INSERT INTO`. -* Improved support for time zones. The `DateTime` data type can be annotated with the timezone that is used for parsing and formatting in text formats. Example: `DateTime('Europe/Moscow')`. When timezones are specified in functions for `DateTime` arguments, the return type will track the timezone, and the value will be displayed as expected. -* Added the functions `toTimeZone`, `timeDiff`, `toQuarter`, `toRelativeQuarterNum`. The `toRelativeHour`/`Minute`/`Second` functions can take a value of type `Date` as an argument. The `now` function name is case-sensitive. -* Added the `toStartOfFifteenMinutes` function (Kirill Shvakov). -* Added the `clickhouse format` tool for formatting queries. -* Added the `format_schema_path` configuration parameter (Marek Vavruşa). It is used for specifying a schema in `Cap'n Proto` format. Schema files can be located only in the specified directory. -* Added support for config substitutions (`incl` and `conf.d`) for configuration of external dictionaries and models (Pavel Yakunin). -* Added a column with documentation for the `system.settings` table (Kirill Shvakov). -* Added the `system.parts_columns` table with information about column sizes in each data part of `MergeTree` tables. -* Added the `system.models` table with information about loaded `CatBoost` machine learning models. -* Added the `mysql` and `odbc` table function and corresponding `MySQL` and `ODBC` table engines for accessing remote databases. This functionality is in the beta stage. -* Added the possibility to pass an argument of type `AggregateFunction` for the `groupArray` aggregate function (so you can create an array of states of some aggregate function). -* Removed restrictions on various combinations of aggregate function combinators. For example, you can use `avgForEachIf` as well as `avgIfForEach` aggregate functions, which have different behaviors. -* The `-ForEach` aggregate function combinator is extended for the case of aggregate functions of multiple arguments. -* Added support for aggregate functions of `Nullable` arguments even for cases when the function returns a non-`Nullable` result (added with the contribution of Silviu Caragea). Example: `groupArray`, `groupUniqArray`, `topK`. -* Added the `max_client_network_bandwidth` for `clickhouse-client` (Kirill Shvakov). -* Users with the ` readonly = 2` setting are allowed to work with TEMPORARY tables (CREATE, DROP, INSERT...) (Kirill Shvakov). -* Added support for using multiple consumers with the `Kafka` engine. Extended configuration options for `Kafka` (Marek Vavruša). -* Added the `intExp3` and `intExp4` functions. -* Added the `sumKahan` aggregate function. -* Added the to * Number* OrNull functions, where * Number* is a numeric type. -* Added support for `WITH` clauses for an `INSERT SELECT` query (author: zhang2014). -* Added settings: `http_connection_timeout`, `http_send_timeout`, `http_receive_timeout`. In particular, these settings are used for downloading data parts for replication. Changing these settings allows for faster failover if the network is overloaded. -* Added support for `ALTER` for tables of type `Null` (Anastasiya Tsarkova). -* The `reinterpretAsString` function is extended for all data types that are stored contiguously in memory. -* Added the `--silent` option for the `clickhouse-local` tool. It suppresses printing query execution info in stderr. -* Added support for reading values of type `Date` from text in a format where the month and/or day of the month is specified using a single digit instead of two digits (Amos Bird). - -#### Performance optimizations: - -* Improved performance of aggregate functions `min`, `max`, `any`, `anyLast`, `anyHeavy`, `argMin`, `argMax` from string arguments. -* Improved performance of the functions `isInfinite`, `isFinite`, `isNaN`, `roundToExp2`. -* Improved performance of parsing and formatting `Date` and `DateTime` type values in text format. -* Improved performance and precision of parsing floating point numbers. -* Lowered memory usage for `JOIN` in the case when the left and right parts have columns with identical names that are not contained in `USING` . -* Improved performance of aggregate functions `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr` by reducing computational stability. The old functions are available under the names `varSampStable`, `varPopStable`, `stddevSampStable`, `stddevPopStable`, `covarSampStable`, `covarPopStable`, `corrStable`. - -#### Bug fixes: - -* Fixed data deduplication after running a `DROP` or `DETACH PARTITION` query. In the previous version, dropping a partition and inserting the same data again was not working because inserted blocks were considered duplicates. -* Fixed a bug that could lead to incorrect interpretation of the `WHERE` clause for ` CREATE MATERIALIZED VIEW` queries with `POPULATE` . -* Fixed a bug in using the `root_path` parameter in the `zookeeper_servers` configuration. -* Fixed unexpected results of passing the `Date` argument to `toStartOfDay` . -* Fixed the `addMonths` and `subtractMonths` functions and the arithmetic for ` INTERVAL n MONTH` in cases when the result has the previous year. -* Added missing support for the `UUID` data type for `DISTINCT` , `JOIN` , and `uniq` aggregate functions and external dictionaries (Evgeniy Ivanov). Support for `UUID` is still incomplete. -* Fixed `SummingMergeTree` behavior in cases when the rows summed to zero. -* Various fixes for the `Kafka` engine (Marek Vavruša). -* Fixed incorrect behavior of the `Join` table engine (Amos Bird). -* Fixed incorrect allocator behavior under FreeBSD and OS X. -* The `extractAll` function now supports empty matches. -* Fixed an error that blocked usage of `libressl` instead of `openssl` . -* Fixed the ` CREATE TABLE AS SELECT` query from temporary tables. -* Fixed non-atomicity of updating the replication queue. This could lead to replicas being out of sync until the server restarts. -* Fixed possible overflow in `gcd` , `lcm` and `modulo` (`%` operator) (Maks Skorokhod). -* `-preprocessed` files are now created after changing `umask` (`umask` can be changed in the config). -* Fixed a bug in the background check of parts (`MergeTreePartChecker` ) when using a custom partition key. -* Fixed parsing of tuples (values of the `Tuple` data type) in text formats. -* Improved error messages about incompatible types passed to `multiIf` , `array` and some other functions. -* Redesigned support for `Nullable` types. Fixed bugs that may lead to a server crash. Fixed almost all other bugs related to ` NULL` support: incorrect type conversions in INSERT SELECT, insufficient support for Nullable in HAVING and PREWHERE, `join_use_nulls` mode, Nullable types as arguments of `OR` operator, etc. -* Fixed various bugs related to internal semantics of data types. Examples: unnecessary summing of `Enum` type fields in `SummingMergeTree` ; alignment of `Enum` types in `Pretty` formats, etc. -* Stricter checks for allowed combinations of composite columns. -* Fixed the overflow when specifying a very large parameter for the `FixedString` data type. -* Fixed a bug in the `topK` aggregate function in a generic case. -* Added the missing check for equality of array sizes in arguments of n-ary variants of aggregate functions with an `-Array` combinator. -* Fixed a bug in `--pager` for `clickhouse-client` (author: ks1322). -* Fixed the precision of the `exp10` function. -* Fixed the behavior of the `visitParamExtract` function for better compliance with documentation. -* Fixed the crash when incorrect data types are specified. -* Fixed the behavior of `DISTINCT` in the case when all columns are constants. -* Fixed query formatting in the case of using the `tupleElement` function with a complex constant expression as the tuple element index. -* Fixed a bug in `Dictionary` tables for `range_hashed` dictionaries. -* Fixed a bug that leads to excessive rows in the result of `FULL` and ` RIGHT JOIN` (Amos Bird). -* Fixed a server crash when creating and removing temporary files in `config.d` directories during config reload. -* Fixed the ` SYSTEM DROP DNS CACHE` query: the cache was flushed but addresses of cluster nodes were not updated. -* Fixed the behavior of ` MATERIALIZED VIEW` after executing ` DETACH TABLE` for the table under the view (Marek Vavruša). - -#### Build improvements: - -* The `pbuilder` tool is used for builds. The build process is almost completely independent of the build host environment. -* A single build is used for different OS versions. Packages and binaries have been made compatible with a wide range of Linux systems. -* Added the `clickhouse-test` package. It can be used to run functional tests. -* The source tarball can now be published to the repository. It can be used to reproduce the build without using GitHub. -* Added limited integration with Travis CI. Due to limits on build time in Travis, only the debug build is tested and a limited subset of tests are run. -* Added support for `Cap'n'Proto` in the default build. -* Changed the format of documentation sources from `Restricted Text` to `Markdown`. -* Added support for `systemd` (Vladimir Smirnov). It is disabled by default due to incompatibility with some OS images and can be enabled manually. -* For dynamic code generation, `clang` and `lld` are embedded into the `clickhouse` binary. They can also be invoked as ` clickhouse clang` and ` clickhouse lld` . -* Removed usage of GNU extensions from the code. Enabled the `-Wextra` option. When building with `clang` the default is `libc++` instead of `libstdc++`. -* Extracted `clickhouse_parsers` and `clickhouse_common_io` libraries to speed up builds of various tools. - -#### Backward incompatible changes: - -* The format for marks in `Log` type tables that contain `Nullable` columns was changed in a backward incompatible way. If you have these tables, you should convert them to the `TinyLog` type before starting up the new server version. To do this, replace `ENGINE = Log` with `ENGINE = TinyLog` in the corresponding `.sql` file in the `metadata` directory. If your table doesn't have `Nullable` columns or if the type of your table is not `Log`, then you don't need to do anything. -* Removed the `experimental_allow_extended_storage_definition_syntax` setting. Now this feature is enabled by default. -* The `runningIncome` function was renamed to `runningDifferenceStartingWithFirstvalue` to avoid confusion. -* Removed the ` FROM ARRAY JOIN arr` syntax when ARRAY JOIN is specified directly after FROM with no table (Amos Bird). -* Removed the `BlockTabSeparated` format that was used solely for demonstration purposes. -* Changed the state format for aggregate functions `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. If you have stored states of these aggregate functions in tables (using the `AggregateFunction` data type or materialized views with corresponding states), please write to clickhouse-feedback@yandex-team.com. -* In previous server versions there was an undocumented feature: if an aggregate function depends on parameters, you can still specify it without parameters in the AggregateFunction data type. Example: `AggregateFunction(quantiles, UInt64)` instead of `AggregateFunction(quantiles(0.5, 0.9), UInt64)`. This feature was lost. Although it was undocumented, we plan to support it again in future releases. -* Enum data types cannot be used in min/max aggregate functions. This ability will be returned in the next release. - -#### Please note when upgrading: - -* When doing a rolling update on a cluster, at the point when some of the replicas are running the old version of ClickHouse and some are running the new version, replication is temporarily stopped and the message ` unknown parameter 'shard'` appears in the log. Replication will continue after all replicas of the cluster are updated. -* If different versions of ClickHouse are running on the cluster servers, it is possible that distributed queries using the following functions will have incorrect results: `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. You should update all cluster nodes. - -### ClickHouse release 1.1.54327, 2017-12-21 - -This release contains bug fixes for the previous release 1.1.54318: - -* Fixed bug with possible race condition in replication that could lead to data loss. This issue affects versions 1.1.54310 and 1.1.54318. If you use one of these versions with Replicated tables, the update is strongly recommended. This issue shows in logs in Warning messages like ` Part ... from own log doesn't exist.` The issue is relevant even if you don't see these messages in logs. - -### ClickHouse release 1.1.54318, 2017-11-30 - -This release contains bug fixes for the previous release 1.1.54310: - -* Fixed incorrect row deletions during merges in the SummingMergeTree engine -* Fixed a memory leak in unreplicated MergeTree engines -* Fixed performance degradation with frequent inserts in MergeTree engines -* Fixed an issue that was causing the replication queue to stop running -* Fixed rotation and archiving of server logs - -### ClickHouse release 1.1.54310, 2017-11-01 - -#### New features: - -* Custom partitioning key for the MergeTree family of table engines. -* [Kafka](https://clickhouse.yandex/docs/en/operations/table_engines/kafka/) table engine. -* Added support for loading [CatBoost](https://catboost.yandex/) models and applying them to data stored in ClickHouse. -* Added support for time zones with non-integer offsets from UTC. -* Added support for arithmetic operations with time intervals. -* The range of values for the Date and DateTime types is extended to the year 2105. -* Added the ` CREATE MATERIALIZED VIEW x TO y` query (specifies an existing table for storing the data of a materialized view). -* Added the `ATTACH TABLE` query without arguments. -* The processing logic for Nested columns with names ending in -Map in a SummingMergeTree table was extracted to the sumMap aggregate function. You can now specify such columns explicitly. -* Max size of the IP trie dictionary is increased to 128M entries. -* Added the getSizeOfEnumType function. -* Added the sumWithOverflow aggregate function. -* Added support for the Cap'n Proto input format. -* You can now customize compression level when using the zstd algorithm. - -#### Backward incompatible changes: - -* Creation of temporary tables with an engine other than Memory is not allowed. -* Explicit creation of tables with the View or MaterializedView engine is not allowed. -* During table creation, a new check verifies that the sampling key expression is included in the primary key. - -#### Bug fixes: - -* Fixed hangups when synchronously inserting into a Distributed table. -* Fixed nonatomic adding and removing of parts in Replicated tables. -* Data inserted into a materialized view is not subjected to unnecessary deduplication. -* Executing a query to a Distributed table for which the local replica is lagging and remote replicas are unavailable does not result in an error anymore. -* Users don't need access permissions to the `default` database to create temporary tables anymore. -* Fixed crashing when specifying the Array type without arguments. -* Fixed hangups when the disk volume containing server logs is full. -* Fixed an overflow in the toRelativeWeekNum function for the first week of the Unix epoch. - -#### Build improvements: - -* Several third-party libraries (notably Poco) were updated and converted to git submodules. - -### ClickHouse release 1.1.54304, 2017-10-19 - -#### New features: - -* TLS support in the native protocol (to enable, set `tcp_ssl_port` in `config.xml` ). - -#### Bug fixes: - -* `ALTER` for replicated tables now tries to start running as soon as possible. -* Fixed crashing when reading data with the setting `preferred_block_size_bytes=0.` -* Fixed crashes of `clickhouse-client` when pressing ` Page Down` -* Correct interpretation of certain complex queries with `GLOBAL IN` and `UNION ALL` -* `FREEZE PARTITION` always works atomically now. -* Empty POST requests now return a response with code 411. -* Fixed interpretation errors for expressions like `CAST(1 AS Nullable(UInt8)).` -* Fixed an error when reading `Array(Nullable(String))` columns from `MergeTree` tables. -* Fixed crashing when parsing queries like `SELECT dummy AS dummy, dummy AS b` -* Users are updated correctly with invalid `users.xml` -* Correct handling when an executable dictionary returns a non-zero response code. - -### ClickHouse release 1.1.54292, 2017-09-20 - -#### New features: - -* Added the `pointInPolygon` function for working with coordinates on a coordinate plane. -* Added the `sumMap` aggregate function for calculating the sum of arrays, similar to `SummingMergeTree`. -* Added the `trunc` function. Improved performance of the rounding functions (`round`, `floor`, `ceil`, `roundToExp2`) and corrected the logic of how they work. Changed the logic of the `roundToExp2` function for fractions and negative numbers. -* The ClickHouse executable file is now less dependent on the libc version. The same ClickHouse executable file can run on a wide variety of Linux systems. There is still a dependency when using compiled queries (with the setting ` compile = 1` , which is not used by default). -* Reduced the time needed for dynamic compilation of queries. - -#### Bug fixes: - -* Fixed an error that sometimes produced ` part ... intersects previous part` messages and weakened replica consistency. -* Fixed an error that caused the server to lock up if ZooKeeper was unavailable during shutdown. -* Removed excessive logging when restoring replicas. -* Fixed an error in the UNION ALL implementation. -* Fixed an error in the concat function that occurred if the first column in a block has the Array type. -* Progress is now displayed correctly in the system.merges table. - -### ClickHouse release 1.1.54289, 2017-09-13 - -#### New features: - -* `SYSTEM` queries for server administration: `SYSTEM RELOAD DICTIONARY`, `SYSTEM RELOAD DICTIONARIES`, `SYSTEM DROP DNS CACHE`, `SYSTEM SHUTDOWN`, `SYSTEM KILL`. -* Added functions for working with arrays: `concat`, `arraySlice`, `arrayPushBack`, `arrayPushFront`, `arrayPopBack`, `arrayPopFront`. -* Added `root` and `identity` parameters for the ZooKeeper configuration. This allows you to isolate individual users on the same ZooKeeper cluster. -* Added aggregate functions `groupBitAnd`, `groupBitOr`, and `groupBitXor` (for compatibility, they are also available under the names `BIT_AND`, `BIT_OR`, and `BIT_XOR`). -* External dictionaries can be loaded from MySQL by specifying a socket in the filesystem. -* External dictionaries can be loaded from MySQL over SSL (`ssl_cert`, `ssl_key`, `ssl_ca` parameters). -* Added the `max_network_bandwidth_for_user` setting to restrict the overall bandwidth use for queries per user. -* Support for `DROP TABLE` for temporary tables. -* Support for reading `DateTime` values in Unix timestamp format from the `CSV` and `JSONEachRow` formats. -* Lagging replicas in distributed queries are now excluded by default (the default threshold is 5 minutes). -* FIFO locking is used during ALTER: an ALTER query isn't blocked indefinitely for continuously running queries. -* Option to set `umask` in the config file. -* Improved performance for queries with `DISTINCT` . - -#### Bug fixes: - -* Improved the process for deleting old nodes in ZooKeeper. Previously, old nodes sometimes didn't get deleted if there were very frequent inserts, which caused the server to be slow to shut down, among other things. -* Fixed randomization when choosing hosts for the connection to ZooKeeper. -* Fixed the exclusion of lagging replicas in distributed queries if the replica is localhost. -* Fixed an error where a data part in a `ReplicatedMergeTree` table could be broken after running ` ALTER MODIFY` on an element in a `Nested` structure. -* Fixed an error that could cause SELECT queries to "hang". -* Improvements to distributed DDL queries. -* Fixed the query `CREATE TABLE ... AS `. -* Resolved the deadlock in the ` ALTER ... CLEAR COLUMN IN PARTITION` query for `Buffer` tables. -* Fixed the invalid default value for `Enum` s (0 instead of the minimum) when using the `JSONEachRow` and `TSKV` formats. -* Resolved the appearance of zombie processes when using a dictionary with an `executable` source. -* Fixed segfault for the HEAD query. - -#### Improved workflow for developing and assembling ClickHouse: - -* You can use `pbuilder` to build ClickHouse. -* You can use `libc++` instead of `libstdc++` for builds on Linux. -* Added instructions for using static code analysis tools: `Coverage`, `clang-tidy`, `cppcheck`. - -#### Please note when upgrading: - -* There is now a higher default value for the MergeTree setting `max_bytes_to_merge_at_max_space_in_pool` (the maximum total size of data parts to merge, in bytes): it has increased from 100 GiB to 150 GiB. This might result in large merges running after the server upgrade, which could cause an increased load on the disk subsystem. If the free space available on the server is less than twice the total amount of the merges that are running, this will cause all other merges to stop running, including merges of small data parts. As a result, INSERT queries will fail with the message "Merges are processing significantly slower than inserts." Use the ` SELECT * FROM system.merges` query to monitor the situation. You can also check the `DiskSpaceReservedForMerge` metric in the `system.metrics` table, or in Graphite. You don't need to do anything to fix this, since the issue will resolve itself once the large merges finish. If you find this unacceptable, you can restore the previous value for the `max_bytes_to_merge_at_max_space_in_pool` setting. To do this, go to the section in config.xml, set ```107374182400` and restart the server. - -### ClickHouse release 1.1.54284, 2017-08-29 - -* This is a bugfix release for the previous 1.1.54282 release. It fixes leaks in the parts directory in ZooKeeper. - -### ClickHouse release 1.1.54282, 2017-08-23 - -This release contains bug fixes for the previous release 1.1.54276: - -* Fixed `DB::Exception: Assertion violation: !_path.empty()` when inserting into a Distributed table. -* Fixed parsing when inserting in RowBinary format if input data starts with';'. -* Errors during runtime compilation of certain aggregate functions (e.g. `groupArray()`). - -### Clickhouse Release 1.1.54276, 2017-08-16 - -#### New features: - -* Added an optional WITH section for a SELECT query. Example query: `WITH 1+1 AS a SELECT a, a*a` -* INSERT can be performed synchronously in a Distributed table: OK is returned only after all the data is saved on all the shards. This is activated by the setting insert_distributed_sync=1. -* Added the UUID data type for working with 16-byte identifiers. -* Added aliases of CHAR, FLOAT and other types for compatibility with the Tableau. -* Added the functions toYYYYMM, toYYYYMMDD, and toYYYYMMDDhhmmss for converting time into numbers. -* You can use IP addresses (together with the hostname) to identify servers for clustered DDL queries. -* Added support for non-constant arguments and negative offsets in the function `substring(str, pos, len).` -* Added the max_size parameter for the `groupArray(max_size)(column)` aggregate function, and optimized its performance. - -#### Main changes: - -* Security improvements: all server files are created with 0640 permissions (can be changed via config parameter). -* Improved error messages for queries with invalid syntax. -* Significantly reduced memory consumption and improved performance when merging large sections of MergeTree data. -* Significantly increased the performance of data merges for the ReplacingMergeTree engine. -* Improved performance for asynchronous inserts from a Distributed table by combining multiple source inserts. To enable this functionality, use the setting distributed_directory_monitor_batch_inserts=1. - -#### Backward incompatible changes: - -* Changed the binary format of aggregate states of `groupArray(array_column)` functions for arrays. - -#### Complete list of changes: - -* Added the `output_format_json_quote_denormals` setting, which enables outputting nan and inf values in JSON format. -* Optimized stream allocation when reading from a Distributed table. -* Settings can be configured in readonly mode if the value doesn't change. -* Added the ability to retrieve non-integer granules of the MergeTree engine in order to meet restrictions on the block size specified in the preferred_block_size_bytes setting. The purpose is to reduce the consumption of RAM and increase cache locality when processing queries from tables with large columns. -* Efficient use of indexes that contain expressions like `toStartOfHour(x)` for conditions like `toStartOfHour(x) op сonstexpr.` -* Added new settings for MergeTree engines (the merge_tree section in config.xml): - - replicated_deduplication_window_seconds sets the number of seconds allowed for deduplicating inserts in Replicated tables. - - cleanup_delay_period sets how often to start cleanup to remove outdated data. - - replicated_can_become_leader can prevent a replica from becoming the leader (and assigning merges). -* Accelerated cleanup to remove outdated data from ZooKeeper. -* Multiple improvements and fixes for clustered DDL queries. Of particular interest is the new setting distributed_ddl_task_timeout, which limits the time to wait for a response from the servers in the cluster. If a ddl request has not been performed on all hosts, a response will contain a timeout error and a request will be executed in an async mode. -* Improved display of stack traces in the server logs. -* Added the "none" value for the compression method. -* You can use multiple dictionaries_config sections in config.xml. -* It is possible to connect to MySQL through a socket in the file system. -* The system.parts table has a new column with information about the size of marks, in bytes. - -#### Bug fixes: - -* Distributed tables using a Merge table now work correctly for a SELECT query with a condition on the `_table` field. -* Fixed a rare race condition in ReplicatedMergeTree when checking data parts. -* Fixed possible freezing on "leader election" when starting a server. -* The max_replica_delay_for_distributed_queries setting was ignored when using a local replica of the data source. This has been fixed. -* Fixed incorrect behavior of `ALTER TABLE CLEAR COLUMN IN PARTITION` when attempting to clean a non-existing column. -* Fixed an exception in the multiIf function when using empty arrays or strings. -* Fixed excessive memory allocations when deserializing Native format. -* Fixed incorrect auto-update of Trie dictionaries. -* Fixed an exception when running queries with a GROUP BY clause from a Merge table when using SAMPLE. -* Fixed a crash of GROUP BY when using distributed_aggregation_memory_efficient=1. -* Now you can specify the database.table in the right side of IN and JOIN. -* Too many threads were used for parallel aggregation. This has been fixed. -* Fixed how the "if" function works with FixedString arguments. -* SELECT worked incorrectly from a Distributed table for shards with a weight of 0. This has been fixed. -* Running `CREATE VIEW IF EXISTS no longer causes crashes.` -* Fixed incorrect behavior when input_format_skip_unknown_fields=1 is set and there are negative numbers. -* Fixed an infinite loop in the `dictGetHierarchy()` function if there is some invalid data in the dictionary. -* Fixed `Syntax error: unexpected (...)` errors when running distributed queries with subqueries in an IN or JOIN clause and Merge tables. -* Fixed an incorrect interpretation of a SELECT query from Dictionary tables. -* Fixed the "Cannot mremap" error when using arrays in IN and JOIN clauses with more than 2 billion elements. -* Fixed the failover for dictionaries with MySQL as the source. - -#### Improved workflow for developing and assembling ClickHouse: - -* Builds can be assembled in Arcadia. -* You can use gcc 7 to compile ClickHouse. -* Parallel builds using ccache+distcc are faster now. - -### ClickHouse release 1.1.54245, 2017-07-04 - -#### New features: - -* Distributed DDL (for example, `CREATE TABLE ON CLUSTER`) -* The replicated query `ALTER TABLE CLEAR COLUMN IN PARTITION.` -* The engine for Dictionary tables (access to dictionary data in the form of a table). -* Dictionary database engine (this type of database automatically has Dictionary tables available for all the connected external dictionaries). -* You can check for updates to the dictionary by sending a request to the source. -* Qualified column names -* Quoting identifiers using double quotation marks. -* Sessions in the HTTP interface. -* The OPTIMIZE query for a Replicated table can can run not only on the leader. - -#### Backward incompatible changes: - -* Removed SET GLOBAL. - -#### Minor changes: - -* Now after an alert is triggered, the log prints the full stack trace. -* Relaxed the verification of the number of damaged/extra data parts at startup (there were too many false positives). - -#### Bug fixes: - -* Fixed a bad connection "sticking" when inserting into a Distributed table. -* GLOBAL IN now works for a query from a Merge table that looks at a Distributed table. -* The incorrect number of cores was detected on a Google Compute Engine virtual machine. This has been fixed. -* Changes in how an executable source of cached external dictionaries works. -* Fixed the comparison of strings containing null characters. -* Fixed the comparison of Float32 primary key fields with constants. -* Previously, an incorrect estimate of the size of a field could lead to overly large allocations. -* Fixed a crash when querying a Nullable column added to a table using ALTER. -* Fixed a crash when sorting by a Nullable column, if the number of rows is less than LIMIT. -* Fixed an ORDER BY subquery consisting of only constant values. -* Previously, a Replicated table could remain in the invalid state after a failed DROP TABLE. -* Aliases for scalar subqueries with empty results are no longer lost. -* Now a query that used compilation does not fail with an error if the .so file gets damaged. +## [Changelog for 2019](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/whats-new/changelog/2019.md) diff --git a/CMakeLists.txt b/CMakeLists.txt index c0d4cd5504d..fb36aff6603 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -17,6 +17,7 @@ project(ClickHouse) include (cmake/arch.cmake) include (cmake/target.cmake) include (cmake/tools.cmake) +include (cmake/analysis.cmake) # Ignore export() since we don't use it, # but it gets broken with a global targets via link_libraries() @@ -29,7 +30,11 @@ set(CMAKE_LINK_DEPENDS_NO_SHARED 1) # Do not relink all depended targets on .so set(CMAKE_CONFIGURATION_TYPES "RelWithDebInfo;Debug;Release;MinSizeRel" CACHE STRING "" FORCE) set(CMAKE_DEBUG_POSTFIX "d" CACHE STRING "Generate debug library name with a postfix.") # To be consistent with CMakeLists from contrib libs. -option(ENABLE_IPO "Enable inter-procedural optimization (aka LTO)" OFF) # need cmake 3.9+ +# Enable the ability to organize targets into hierarchies of "folders" for capable GUI-based IDEs. +# For more info see https://cmake.org/cmake/help/latest/prop_gbl/USE_FOLDERS.html +set_property(GLOBAL PROPERTY USE_FOLDERS ON) + +option(ENABLE_IPO "Enable full link time optimization (it's usually impractical; see also ENABLE_THINLTO)" OFF) # need cmake 3.9+ if(ENABLE_IPO) cmake_policy(SET CMP0069 NEW) include(CheckIPOSupported) @@ -52,12 +57,12 @@ endif () include (cmake/find/ccache.cmake) if (NOT CMAKE_BUILD_TYPE OR CMAKE_BUILD_TYPE STREQUAL "None") - message (STATUS "CMAKE_BUILD_TYPE is not set, set to default = RELWITHDEBINFO") - set (CMAKE_BUILD_TYPE "RELWITHDEBINFO") + set (CMAKE_BUILD_TYPE "RelWithDebInfo") + message (STATUS "CMAKE_BUILD_TYPE is not set, set to default = ${CMAKE_BUILD_TYPE}") endif () -string(TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UC) message (STATUS "CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}") +string (TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UC) option (USE_STATIC_LIBRARIES "Set to FALSE to use shared libraries" ON) option (MAKE_STATIC_LIBRARIES "Set to FALSE to make shared libraries" ${USE_STATIC_LIBRARIES}) @@ -83,9 +88,10 @@ option (ENABLE_FUZZING "Enables fuzzing instrumentation" OFF) if (ENABLE_FUZZING) message (STATUS "Fuzzing instrumentation enabled") set (WITH_COVERAGE ON) - set (SANITIZE "libfuzzer") + set (FUZZER "libfuzzer") endif() +include (cmake/fuzzer.cmake) include (cmake/sanitize.cmake) if (CMAKE_GENERATOR STREQUAL "Ninja" AND NOT DISABLE_COLORED_BUILD) @@ -97,16 +103,7 @@ endif () include (cmake/add_warning.cmake) if (NOT MSVC) - set (COMMON_WARNING_FLAGS "${COMMON_WARNING_FLAGS} -Wall") # -Werror is also added inside directories with our own code. -endif () - -if (COMPILER_GCC OR COMPILER_CLANG) - set (CXX_WARNING_FLAGS "${CXX_WARNING_FLAGS} -Wnon-virtual-dtor") -endif () - -if (COMPILER_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER "8.3.0") - # Warnings in protobuf generating - set (CXX_WARNING_FLAGS "${CXX_WARNING_FLAGS} -Wno-array-bounds") + set (COMMON_WARNING_FLAGS "${COMMON_WARNING_FLAGS} -Wall") # -Werror and many more is also added inside cmake/warnings.cmake endif () if (COMPILER_CLANG) @@ -120,12 +117,8 @@ endif () option (ENABLE_TESTS "Enables tests" ON) -if (ARCH_AMD64) - option (USE_INTERNAL_MEMCPY "Use internal implementation of 'memcpy' function instead of provided by libc. Only for x86_64." ON) -endif () - if (OS_LINUX AND NOT UNBUNDLED AND MAKE_STATIC_LIBRARIES AND NOT SPLIT_SHARED_LIBRARIES AND CMAKE_VERSION VERSION_GREATER "3.9.0") - option (GLIBC_COMPATIBILITY "Set to TRUE to enable compatibility with older glibc libraries. Only for x86_64, Linux. Implies USE_INTERNAL_MEMCPY." ON) + option (GLIBC_COMPATIBILITY "Set to TRUE to enable compatibility with older glibc libraries. Only for x86_64, Linux. Implies ENABLE_FASTMEMCPY." ON) endif () if (NOT CMAKE_VERSION VERSION_GREATER "3.9.0") @@ -189,15 +182,17 @@ if (COMPILER_GCC OR COMPILER_CLANG) endif () option(WITH_COVERAGE "Build with coverage." 0) -if(WITH_COVERAGE AND COMPILER_CLANG) - set(COMPILER_FLAGS "${COMPILER_FLAGS} -fprofile-instr-generate -fcoverage-mapping") - # If we want to disable coverage for specific translation units - set(WITHOUT_COVERAGE "-fno-profile-instr-generate -fno-coverage-mapping") + +if (WITH_COVERAGE AND COMPILER_CLANG) + set(COMPILER_FLAGS "${COMPILER_FLAGS} -fprofile-instr-generate -fcoverage-mapping") + # If we want to disable coverage for specific translation units + set(WITHOUT_COVERAGE "-fno-profile-instr-generate -fno-coverage-mapping") endif() -if(WITH_COVERAGE AND COMPILER_GCC) - set(COMPILER_FLAGS "${COMPILER_FLAGS} -fprofile-arcs -ftest-coverage") - set(COVERAGE_OPTION "-lgcov") - set(WITHOUT_COVERAGE "-fno-profile-arcs -fno-test-coverage") + +if (WITH_COVERAGE AND COMPILER_GCC) + set(COMPILER_FLAGS "${COMPILER_FLAGS} -fprofile-arcs -ftest-coverage") + set(COVERAGE_OPTION "-lgcov") + set(WITHOUT_COVERAGE "-fno-profile-arcs -fno-test-coverage") endif() set (CMAKE_BUILD_COLOR_MAKEFILE ON) @@ -210,10 +205,44 @@ set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${ set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0 -g3 -ggdb3 -fno-inline ${CMAKE_C_FLAGS_ADD}") if (COMPILER_CLANG) - # Exception unwinding doesn't work in clang release build without this option - # TODO investigate that - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-omit-frame-pointer") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-omit-frame-pointer") + if (OS_DARWIN) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-U,_inside_main") + endif() + + # Display absolute paths in error messages. Otherwise KDevelop fails to navigate to correct file and opens a new file instead. + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-absolute-paths") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fdiagnostics-absolute-paths") + + option(ENABLE_THINLTO "Enable Thin LTO. Only applicable for clang. It's also suppressed when building with tests or sanitizers." ON) + + # We cannot afford to use LTO when compiling unitests, and it's not enough + # to only supply -fno-lto at the final linking stage. So we disable it + # completely. + if (ENABLE_THINLTO AND NOT ENABLE_TESTS AND NOT SANITIZE) + # Link time optimization + set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -flto=thin") + set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -flto=thin") + set (CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} -flto=thin") + endif () + + # Always prefer llvm tools when using clang. For instance, we cannot use GNU ar when llvm LTO is enabled + + find_program (LLVM_AR_PATH NAMES "llvm-ar" "llvm-ar-10" "llvm-ar-9" "llvm-ar-8") + if (LLVM_AR_PATH) + message(STATUS "Using llvm-ar: ${LLVM_AR_PATH}.") + set (CMAKE_AR ${LLVM_AR_PATH}) + else () + message(WARNING "Cannot find llvm-ar. System ar will be used instead. It does not work with ThinLTO.") + endif () + + find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib" "llvm-ranlib-10" "llvm-ranlib-9" "llvm-ranlib-8") + if (LLVM_RANLIB_PATH) + message(STATUS "Using llvm-ranlib: ${LLVM_RANLIB_PATH}.") + set (CMAKE_RANLIB ${LLVM_RANLIB_PATH}) + else () + message(WARNING "Cannot find llvm-ranlib. System ranlib will be used instead. It does not work with ThinLTO.") + endif () endif () option (ENABLE_LIBRARIES "Enable all libraries (Global default switch)" ON) @@ -225,7 +254,7 @@ else () set(NOT_UNBUNDLED 1) endif () -if (UNBUNDLED OR NOT (OS_LINUX OR OS_DARWIN) OR ARCH_32) +if (UNBUNDLED OR NOT (OS_LINUX OR OS_DARWIN)) # Using system libs can cause a lot of warnings in includes (on macro expansion). option (WERROR "Enable -Werror compiler option" OFF) else () @@ -239,7 +268,7 @@ endif () # Make this extra-checks for correct library dependencies. if (OS_LINUX AND NOT SANITIZE) set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-undefined") - set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-undefined") + set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--no-undefined") endif () include(cmake/dbms_glob_sources.cmake) @@ -248,13 +277,14 @@ if (OS_LINUX) include(cmake/linux/default_libs.cmake) elseif (OS_DARWIN) include(cmake/darwin/default_libs.cmake) +elseif (OS_FREEBSD) + include(cmake/freebsd/default_libs.cmake) endif () ###################################### ### Add targets below this comment ### ###################################### -string (TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UC) set (CMAKE_POSTFIX_VARIABLE "CMAKE_${CMAKE_BUILD_TYPE_UC}_POSTFIX") if (MAKE_STATIC_LIBRARIES) @@ -281,24 +311,6 @@ if (USE_INCLUDE_WHAT_YOU_USE) endif() endif () -# Using clang-tidy static analyzer http://mariobadr.com/using-clang-tidy-with-cmake-36.html https://cmake.org/cmake/help/v3.6/prop_tgt/LANG_CLANG_TIDY.html -option (ENABLE_CLANG_TIDY "Use 'clang-tidy' static analyzer" OFF) -if (ENABLE_CLANG_TIDY) - if (${CMAKE_VERSION} VERSION_LESS "3.6.0") - message(FATAL_ERROR "clang-tidy requires CMake version at least 3.6.") - endif() - find_program (CLANG_TIDY_EXE NAMES "clang-tidy" DOC "Path to clang-tidy executable") - if (NOT CLANG_TIDY_EXE) - set (USE_CLANG_TIDY 0) - message (STATUS "clang-tidy not found.") - else () - set (USE_CLANG_TIDY 1) - message (STATUS "clang-tidy found: ${CLANG_TIDY_EXE}") - set (DO_CLANG_TIDY "${CLANG_TIDY_EXE}" "-checks=*,-clang-analyzer-alpha.*") - # You can enable it within a directory by: set (CMAKE_CXX_CLANG_TIDY "${DO_CLANG_TIDY}") - endif () -endif () - if (ENABLE_TESTS) message (STATUS "Tests are enabled") endif () @@ -313,35 +325,30 @@ endif () message (STATUS "Building for: ${CMAKE_SYSTEM} ${CMAKE_SYSTEM_PROCESSOR} ${CMAKE_LIBRARY_ARCHITECTURE} ; USE_STATIC_LIBRARIES=${USE_STATIC_LIBRARIES} MAKE_STATIC_LIBRARIES=${MAKE_STATIC_LIBRARIES} SPLIT_SHARED=${SPLIT_SHARED_LIBRARIES} UNBUNDLED=${UNBUNDLED} CCACHE=${CCACHE_FOUND} ${CCACHE_VERSION}") -include(GNUInstallDirs) +include (GNUInstallDirs) include (cmake/contrib_finder.cmake) include (cmake/lib_name.cmake) find_contrib_lib(double-conversion) # Must be before parquet include (cmake/find/ssl.cmake) +include (cmake/find/ldap.cmake) # after ssl include (cmake/find/icu.cmake) include (cmake/find/boost.cmake) include (cmake/find/zlib.cmake) include (cmake/find/zstd.cmake) include (cmake/find/ltdl.cmake) # for odbc include (cmake/find/termcap.cmake) -include (cmake/find/odbc.cmake) -# openssl, zlib, odbc before poco -include (cmake/find/poco.cmake) +# openssl, zlib before poco include (cmake/find/lz4.cmake) include (cmake/find/xxhash.cmake) include (cmake/find/sparsehash.cmake) -include (cmake/find/execinfo.cmake) include (cmake/find/re2.cmake) include (cmake/find/libgsasl.cmake) include (cmake/find/rdkafka.cmake) include (cmake/find/capnp.cmake) include (cmake/find/llvm.cmake) +include (cmake/find/opencl.cmake) include (cmake/find/h3.cmake) -include (cmake/find/cpuid.cmake) # Freebsd, bundled -if (NOT USE_CPUID) - include (cmake/find/cpuinfo.cmake) # Debian -endif() include (cmake/find/libxml2.cmake) include (cmake/find/brotli.cmake) include (cmake/find/protobuf.cmake) @@ -349,7 +356,6 @@ include (cmake/find/grpc.cmake) include (cmake/find/pdqsort.cmake) include (cmake/find/hdfs3.cmake) # uses protobuf include (cmake/find/s3.cmake) -include (cmake/find/consistent-hashing.cmake) include (cmake/find/base64.cmake) include (cmake/find/parquet.cmake) include (cmake/find/hyperscan.cmake) @@ -358,6 +364,7 @@ include (cmake/find/rapidjson.cmake) include (cmake/find/fastops.cmake) include (cmake/find/orc.cmake) include (cmake/find/avro.cmake) +include (cmake/find/msgpack.cmake) find_contrib_lib(cityhash) find_contrib_lib(farmhash) @@ -369,21 +376,24 @@ if (ENABLE_TESTS) endif () # Need to process before "contrib" dir: -include (cmake/find/jemalloc.cmake) -include (cmake/find/cctz.cmake) include (cmake/find/mysqlclient.cmake) # When testing for memory leaks with Valgrind, don't link tcmalloc or jemalloc. -if (USE_JEMALLOC) - message (STATUS "Link jemalloc: ${JEMALLOC_LIBRARIES}") - set (MALLOC_LIBRARIES ${JEMALLOC_LIBRARIES}) -elseif (SANITIZE) - message (STATUS "Will use ${SANITIZE} sanitizer.") -elseif (OS_LINUX) +if (OS_LINUX AND NOT ENABLE_JEMALLOC) message (WARNING "Non default allocator is disabled. This is not recommended for production Linux builds.") endif () +if (USE_OPENCL) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DUSE_OPENCL=1") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DUSE_OPENCL=1") + + if (OS_DARWIN) + set(OPENCL_LINKER_FLAGS "-framework OpenCL") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${OPENCL_LINKER_FLAGS}") + endif () +endif () + include (cmake/print_flags.cmake) if (TARGET global-group) @@ -403,8 +413,16 @@ macro (add_executable target) endif() endmacro() +set(ConfigIncludePath ${CMAKE_CURRENT_BINARY_DIR}/includes/configs CACHE INTERNAL "Path to generated configuration files.") +include_directories(${ConfigIncludePath}) + +# Add as many warnings as possible for our own code. +include (cmake/warnings.cmake) + add_subdirectory (base) +add_subdirectory (programs) +add_subdirectory (src) +add_subdirectory (tests) add_subdirectory (utils) -add_subdirectory (dbms) include (cmake/print_include_directories.cmake) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..4d547a081c6 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1 @@ +We welcome everyone to contribute to our product, see CONTRIBUTING.md. diff --git a/README.md b/README.md index 1014e3f059f..955f9d1a5d1 100644 --- a/README.md +++ b/README.md @@ -8,10 +8,12 @@ ClickHouse is an open-source column-oriented database management system that all * [Tutorial](https://clickhouse.tech/docs/en/getting_started/tutorial/) shows how to set up and query small ClickHouse cluster. * [Documentation](https://clickhouse.tech/docs/en/) provides more in-depth information. * [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format. +* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-d2zxkf9e-XyxDa_ucfPxzuH4SJIm~Ng) and [Telegram](https://telegram.me/clickhouse_en) allow to chat with ClickHouse users in real-time. * [Blog](https://clickhouse.yandex/blog/en/) contains various ClickHouse-related articles, as well as announces and reports about events. * [Contacts](https://clickhouse.tech/#contacts) can help to get your questions answered if there are any. -* You can also [fill this form](https://forms.yandex.com/surveys/meet-yandex-clickhouse-team/) to meet Yandex ClickHouse team in person. +* You can also [fill this form](https://clickhouse.tech/#meet) to meet Yandex ClickHouse team in person. ## Upcoming Events -* [ClickHouse Meetup in Athens](https://www.meetup.com/Athens-Big-Data/events/268379195/) on March 5. +* [ClickHouse Workshop in Novosibirsk](https://2020.codefest.ru/lecture/1628) on TBD date. +* [Yandex C++ Open-Source Sprints in Moscow](https://events.yandex.ru/events/otkrytyj-kod-v-yandek-28-03-2020) on TBD date. diff --git a/SECURITY.md b/SECURITY.md index 1a48ba6abd1..7210db23183 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -9,6 +9,7 @@ currently being supported with security updates: | ------- | ------------------ | | 1.x | :x: | | 18.x | :x: | +| 19.x | :x: | | 19.14 | :white_check_mark: | | 20.x | :white_check_mark: | diff --git a/base/CMakeLists.txt b/base/CMakeLists.txt index 65593f70a95..cfa54fe2ca4 100644 --- a/base/CMakeLists.txt +++ b/base/CMakeLists.txt @@ -1,17 +1,13 @@ -add_subdirectory (common) -add_subdirectory (loggers) -add_subdirectory (daemon) +if (USE_CLANG_TIDY) + set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}") +endif () -if (USE_INTERNAL_MEMCPY) - add_subdirectory (memcpy) -endif() +add_subdirectory (common) +add_subdirectory (daemon) +add_subdirectory (loggers) +add_subdirectory (pcg-random) +add_subdirectory (widechar_width) if (USE_MYSQL) add_subdirectory (mysqlxx) endif () - -if (USE_INTERNAL_CONSISTENT_HASHING_LIBRARY) - add_subdirectory (consistent-hashing) -endif () -add_subdirectory (consistent-hashing-sumbur) -add_subdirectory (widechar_width) diff --git a/base/common/CMakeLists.txt b/base/common/CMakeLists.txt index 682e622bb77..c3eddb4d26c 100644 --- a/base/common/CMakeLists.txt +++ b/base/common/CMakeLists.txt @@ -1,18 +1,17 @@ -configure_file (config_common.h.in config_common.h) - set (SRCS argsToConfig.cpp coverage.cpp DateLUT.cpp DateLUTImpl.cpp demangle.cpp + getFQDNOrHostName.cpp getMemoryAmount.cpp getThreadId.cpp JSON.cpp LineReader.cpp mremap.cpp phdr_cache.cpp - preciseExp10.c + preciseExp10.cpp setTerminalEcho.cpp shift10.cpp sleep.cpp @@ -20,15 +19,9 @@ set (SRCS ) if (ENABLE_REPLXX) - set (SRCS ${SRCS} - ReplxxLineReader.cpp - ReplxxLineReader.h - ) + list (APPEND SRCS ReplxxLineReader.cpp) elseif (ENABLE_READLINE) - set (SRCS ${SRCS} - ReadlineLineReader.cpp - ReadlineLineReader.h - ) + list (APPEND SRCS ReadlineLineReader.cpp) endif () if (USE_DEBUG_HELPERS) @@ -38,31 +31,23 @@ endif () add_library (common ${SRCS}) -target_include_directories(common PUBLIC .. ${CMAKE_CURRENT_BINARY_DIR}/..) - -if (USE_INTERNAL_MEMCPY) - target_link_libraries (common PRIVATE memcpy) +if (WITH_COVERAGE) + target_compile_definitions(common PUBLIC WITH_COVERAGE=1) +else () + target_compile_definitions(common PUBLIC WITH_COVERAGE=0) endif () -if(CCTZ_INCLUDE_DIR) - target_include_directories(common BEFORE PRIVATE ${CCTZ_INCLUDE_DIR}) +if (USE_INTERNAL_CCTZ) + set_source_files_properties(DateLUTImpl.cpp PROPERTIES COMPILE_DEFINITIONS USE_INTERNAL_CCTZ) endif() +target_include_directories(common PUBLIC .. ${CMAKE_CURRENT_BINARY_DIR}/..) + if (NOT USE_INTERNAL_BOOST_LIBRARY) target_include_directories (common SYSTEM BEFORE PUBLIC ${Boost_INCLUDE_DIRS}) endif () -if(NOT USE_INTERNAL_POCO_LIBRARY) - target_include_directories (common SYSTEM BEFORE PUBLIC ${Poco_Foundation_INCLUDE_DIR}) -endif() - -if(CCTZ_LIBRARY) - target_link_libraries(common PRIVATE ${CCTZ_LIBRARY}) -endif() - -target_link_libraries(common PUBLIC replxx) - -# allow explicitly fallback to readline +# Allow explicit fallback to readline if (NOT ENABLE_REPLXX AND ENABLE_READLINE) message (STATUS "Attempt to fallback to readline explicitly") set (READLINE_PATHS "/usr/local/opt/readline/lib") @@ -86,10 +71,17 @@ endif () target_link_libraries (common PUBLIC - ${Poco_Util_LIBRARY} - ${Poco_Foundation_LIBRARY} ${CITYHASH_LIBRARIES} ${Boost_SYSTEM_LIBRARY} + FastMemcpy + Poco::Net + Poco::Net::SSL + Poco::Util + Poco::Foundation + replxx + + PRIVATE + cctz ) if (ENABLE_TESTS) diff --git a/base/common/DateLUT.cpp b/base/common/DateLUT.cpp index 8db1458d00f..750900493aa 100644 --- a/base/common/DateLUT.cpp +++ b/base/common/DateLUT.cpp @@ -1,9 +1,10 @@ -#include +#include "DateLUT.h" -#include +#include #include #include -#include + +#include #include diff --git a/base/common/DateLUT.h b/base/common/DateLUT.h index 3cb935bc553..93c6cb403e2 100644 --- a/base/common/DateLUT.h +++ b/base/common/DateLUT.h @@ -1,20 +1,15 @@ #pragma once #include "DateLUTImpl.h" -#include -#include -#include -#include + +#include "defines.h" + #include -// Also defined in Core/Defines.h -#if !defined(ALWAYS_INLINE) -#if defined(_MSC_VER) - #define ALWAYS_INLINE __forceinline -#else - #define ALWAYS_INLINE __attribute__((__always_inline__)) -#endif -#endif +#include +#include +#include +#include /// This class provides lazy initialization and lookup of singleton DateLUTImpl objects for a given timezone. diff --git a/base/common/DateLUTImpl.cpp b/base/common/DateLUTImpl.cpp index 51f5ceb759c..83a4d1e9149 100644 --- a/base/common/DateLUTImpl.cpp +++ b/base/common/DateLUTImpl.cpp @@ -1,25 +1,19 @@ -#if __has_include() -#include // bundled, debian -#else -#include // freebsd -#endif +#include "DateLUTImpl.h" -#if __has_include() +#include #include -#else -#include -#endif - -#include +#include +#include #include -#include +#include + +#include +#include #include #include -#include #include - -#define DATE_LUT_MIN 0 +#include namespace @@ -37,9 +31,8 @@ UInt8 getDayOfWeek(const cctz::civil_day & date) case cctz::weekday::friday: return 5; case cctz::weekday::saturday: return 6; case cctz::weekday::sunday: return 7; - default: - throw Poco::Exception("Logical error: incorrect week day."); } + __builtin_unreachable(); } } @@ -56,10 +49,10 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) assert(inside_main); size_t i = 0; - time_t start_of_day = DATE_LUT_MIN; + time_t start_of_day = 0; cctz::time_zone cctz_time_zone; - if (!cctz::load_time_zone(time_zone.data(), &cctz_time_zone)) + if (!cctz::load_time_zone(time_zone, &cctz_time_zone)) throw Poco::Exception("Cannot load time zone " + time_zone_); cctz::time_zone::absolute_lookup start_of_epoch_lookup = cctz_time_zone.lookup(std::chrono::system_clock::from_time_t(start_of_day)); @@ -81,6 +74,11 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) values.day_of_week = getDayOfWeek(date); values.date = start_of_day; + assert(values.year >= DATE_LUT_MIN_YEAR && values.year <= DATE_LUT_MAX_YEAR); + assert(values.month >= 1 && values.month <= 12); + assert(values.day_of_month >= 1 && values.day_of_month <= 31); + assert(values.day_of_week >= 1 && values.day_of_week <= 7); + if (values.day_of_month == 1) { cctz::civil_month month(date); @@ -137,12 +135,15 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) /// Fill excessive part of lookup table. This is needed only to simplify handling of overflow cases. while (i < DATE_LUT_SIZE) { - lut[i] = lut[DATE_LUT_MAX_DAY_NUM]; + lut[i] = lut[i - 1]; ++i; } /// Fill lookup table for years and months. - for (size_t day = 0; day < DATE_LUT_SIZE && lut[day].year <= DATE_LUT_MAX_YEAR; ++day) + size_t year_months_lut_index = 0; + size_t first_day_of_last_month = 0; + + for (size_t day = 0; day < DATE_LUT_SIZE; ++day) { const Values & values = lut[day]; @@ -150,7 +151,87 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) { if (values.month == 1) years_lut[values.year - DATE_LUT_MIN_YEAR] = day; - years_months_lut[(values.year - DATE_LUT_MIN_YEAR) * 12 + values.month - 1] = day; + + year_months_lut_index = (values.year - DATE_LUT_MIN_YEAR) * 12 + values.month - 1; + years_months_lut[year_months_lut_index] = day; + first_day_of_last_month = day; } } + + /// Fill the rest of lookup table with the same last month (2106-02-01). + for (; year_months_lut_index < DATE_LUT_YEARS * 12; ++year_months_lut_index) + { + years_months_lut[year_months_lut_index] = first_day_of_last_month; + } } + + +#if !defined(ARCADIA_BUILD) /// Arcadia's variant of CCTZ already has the same implementation. + +/// Prefer to load timezones from blobs linked to the binary. +/// The blobs are provided by "tzdata" library. +/// This allows to avoid dependency on system tzdata. +namespace cctz_extension +{ + namespace + { + class Source : public cctz::ZoneInfoSource + { + public: + Source(const char * data_, size_t size_) : data(data_), size(size_) {} + + size_t Read(void * buf, size_t bytes) override + { + if (bytes > size) + bytes = size; + memcpy(buf, data, bytes); + data += bytes; + size -= bytes; + return bytes; + } + + int Skip(size_t offset) override + { + if (offset <= size) + { + data += offset; + size -= offset; + return 0; + } + else + { + errno = EINVAL; + return -1; + } + } + private: + const char * data; + size_t size; + }; + + std::unique_ptr custom_factory( + const std::string & name, + const std::function(const std::string & name)> & fallback) + { + std::string name_replaced = name; + std::replace(name_replaced.begin(), name_replaced.end(), '/', '_'); + std::replace(name_replaced.begin(), name_replaced.end(), '-', '_'); + + /// These are the names that are generated by "ld -r -b binary" + std::string symbol_name_data = "_binary_" + name_replaced + "_start"; + std::string symbol_name_size = "_binary_" + name_replaced + "_size"; + + const void * sym_data = dlsym(RTLD_DEFAULT, symbol_name_data.c_str()); + const void * sym_size = dlsym(RTLD_DEFAULT, symbol_name_size.c_str()); + + if (sym_data && sym_size) + return std::make_unique(static_cast(sym_data), unalignedLoad(&sym_size)); + + return fallback(name); + } + } + + ZoneInfoSourceFactory zone_info_source_factory = custom_factory; +} + +#endif diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index 2f2e431f950..18078299cd9 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -1,8 +1,9 @@ #pragma once -#include "Types.h" #include "DayNum.h" -#include "likely.h" +#include "defines.h" +#include "types.h" + #include #include @@ -11,7 +12,7 @@ /// Table size is bigger than DATE_LUT_MAX_DAY_NUM to fill all indices within UInt16 range: this allows to remove extra check. #define DATE_LUT_SIZE 0x10000 #define DATE_LUT_MIN_YEAR 1970 -#define DATE_LUT_MAX_YEAR 2105 /// Last supported year +#define DATE_LUT_MAX_YEAR 2106 /// Last supported year (incomplete) #define DATE_LUT_YEARS (1 + DATE_LUT_MAX_YEAR - DATE_LUT_MIN_YEAR) /// Number of years in lookup table #if defined(__PPC__) @@ -36,7 +37,12 @@ using YearWeek = std::pair; class DateLUTImpl { public: - DateLUTImpl(const std::string & time_zone); + explicit DateLUTImpl(const std::string & time_zone); + + DateLUTImpl(const DateLUTImpl &) = delete; + DateLUTImpl & operator=(const DateLUTImpl &) = delete; + DateLUTImpl(const DateLUTImpl &&) = delete; + DateLUTImpl & operator=(const DateLUTImpl &&) = delete; public: /// The order of fields matters for alignment and sizeof. @@ -98,7 +104,7 @@ private: return guess; /// Time zones that have offset 0 from UTC do daylight saving time change (if any) towards increasing UTC offset (example: British Standard Time). - if (offset_at_start_of_epoch >= 0) + if (t >= lut[DayNum(guess + 1)].date) return DayNum(guess + 1); return DayNum(guess - 1); @@ -286,8 +292,8 @@ public: if (offset_is_whole_number_of_hours_everytime) return (t / 60) % 60; - time_t date = find(t).date; - return (t - date) / 60 % 60; + UInt32 date = find(t).date; + return (UInt32(t) - date) / 60 % 60; } inline time_t toStartOfMinute(time_t t) const { return t / 60 * 60; } @@ -300,9 +306,8 @@ public: if (offset_is_whole_number_of_hours_everytime) return t / 3600 * 3600; - time_t date = find(t).date; - /// Still can return wrong values for time at 1970-01-01 if the UTC offset was non-whole number of hours. - return date + (t - date) / 3600 * 3600; + UInt32 date = find(t).date; + return date + (UInt32(t) - date) / 3600 * 3600; } /** Number of calendar day since the beginning of UNIX epoch (1970-01-01 is zero) @@ -578,7 +583,7 @@ public: return t / 3600; /// Assume that if offset was fractional, then the fraction is the same as at the beginning of epoch. - /// NOTE This assumption is false for "Pacific/Pitcairn" time zone. + /// NOTE This assumption is false for "Pacific/Pitcairn" and "Pacific/Kiritimati" time zones. return (t + 86400 - offset_at_start_of_epoch) / 3600; } diff --git a/base/common/DayNum.h b/base/common/DayNum.h index 904a9281d64..a4ef0c43b69 100644 --- a/base/common/DayNum.h +++ b/base/common/DayNum.h @@ -1,6 +1,6 @@ #pragma once -#include +#include #include /** Represents number of days since 1970-01-01. diff --git a/base/common/JSON.cpp b/base/common/JSON.cpp index c6c0c843a16..9823591a2b6 100644 --- a/base/common/JSON.cpp +++ b/base/common/JSON.cpp @@ -215,7 +215,7 @@ JSON::ElementType JSON::getType() const void JSON::checkPos(Pos pos) const { - if (pos >= ptr_end) + if (pos >= ptr_end || ptr_begin == nullptr) throw JSONException("JSON: unexpected end of data."); } @@ -341,7 +341,7 @@ JSON::Pos JSON::skipArray() const if (*pos == ']') return ++pos; - while (1) + while (true) { pos = JSON(pos, ptr_end, level + 1).skipElement(); @@ -373,7 +373,7 @@ JSON::Pos JSON::skipObject() const if (*pos == '}') return ++pos; - while (1) + while (true) { pos = JSON(pos, ptr_end, level + 1).skipNameValuePair(); @@ -451,7 +451,10 @@ JSON JSON::operator[] (size_t n) const size_t i = 0; const_iterator it = begin(); while (i < n && it != end()) - ++it, ++i; + { + ++it; + ++i; + } if (i != n) throw JSONException("JSON: array index " + std::to_string(n) + " out of bounds."); @@ -626,7 +629,7 @@ std::string JSON::getString() const { unicode = Poco::NumberParser::parseHex(hex); } - catch (const Poco::SyntaxException & e) + catch (const Poco::SyntaxException &) { throw JSONException("JSON: incorrect syntax: incorrect HEX code."); } @@ -776,7 +779,7 @@ JSON::iterator & JSON::iterator::operator++() return *this; } -JSON::iterator JSON::iterator::operator++(int) +JSON::iterator JSON::iterator::operator++(int) // NOLINT { iterator copy(*this); ++*this; diff --git a/base/common/JSON.h b/base/common/JSON.h index 5f3d9325626..7039036eeb3 100644 --- a/base/common/JSON.h +++ b/base/common/JSON.h @@ -3,7 +3,7 @@ #include #include #include -#include +#include /** Очень простой класс для чтения JSON (или его кусочков). diff --git a/base/common/LineReader.cpp b/base/common/LineReader.cpp index c69690e3420..0d06e5ef225 100644 --- a/base/common/LineReader.cpp +++ b/base/common/LineReader.cpp @@ -30,7 +30,7 @@ void trim(String & s) bool hasInputData() { timeval timeout = {0, 0}; - fd_set fds; + fd_set fds{}; FD_ZERO(&fds); FD_SET(STDIN_FILENO, &fds); return select(1, &fds, nullptr, nullptr, &timeout) == 1; @@ -53,18 +53,18 @@ LineReader::Suggest::WordsRange LineReader::Suggest::getCompletions(const String /// last_word can be empty. - if (case_insensitive) + /// Only perform case sensitive completion when the prefix string contains any uppercase characters + if (std::none_of(prefix.begin(), prefix.end(), [&](auto c) { return c >= 'A' && c <= 'Z'; })) return std::equal_range( - words.begin(), words.end(), last_word, [prefix_length](std::string_view s, std::string_view prefix_searched) - { - return strncasecmp(s.data(), prefix_searched.data(), prefix_length) < 0; - }); + words_no_case.begin(), words_no_case.end(), last_word, [prefix_length](std::string_view s, std::string_view prefix_searched) + { + return strncasecmp(s.data(), prefix_searched.data(), prefix_length) < 0; + }); else - return std::equal_range( - words.begin(), words.end(), last_word, [prefix_length](std::string_view s, std::string_view prefix_searched) - { - return strncmp(s.data(), prefix_searched.data(), prefix_length) < 0; - }); + return std::equal_range(words.begin(), words.end(), last_word, [prefix_length](std::string_view s, std::string_view prefix_searched) + { + return strncmp(s.data(), prefix_searched.data(), prefix_length) < 0; + }); } LineReader::LineReader(const String & history_file_path_, char extender_, char delimiter_) @@ -127,7 +127,7 @@ LineReader::InputStatus LineReader::readOneLine(const String & prompt) #ifdef OS_LINUX if (!readline_ptr) { - for (auto name : {"libreadline.so", "libreadline.so.0", "libeditline.so", "libeditline.so.0"}) + for (const auto * name : {"libreadline.so", "libreadline.so.0", "libeditline.so", "libeditline.so.0"}) { void * dl_handle = dlopen(name, RTLD_LAZY); if (dl_handle) diff --git a/base/common/LineReader.h b/base/common/LineReader.h index 66de46d5fcb..3e64bc858ad 100644 --- a/base/common/LineReader.h +++ b/base/common/LineReader.h @@ -1,6 +1,6 @@ #pragma once -#include +#include #include #include @@ -14,13 +14,11 @@ public: using WordsRange = std::pair; Words words; + Words words_no_case; std::atomic ready{false}; /// Get iterators for the matched range of words if any. WordsRange getCompletions(const String & prefix, size_t prefix_length) const; - - /// case sensitive suggestion - bool case_insensitive = false; }; LineReader(const String & history_file_path, char extender, char delimiter = 0); /// if delimiter != 0, then it's multiline mode diff --git a/base/common/ReadlineLineReader.cpp b/base/common/ReadlineLineReader.cpp index fdbb929be79..ee9a37d2168 100644 --- a/base/common/ReadlineLineReader.cpp +++ b/base/common/ReadlineLineReader.cpp @@ -104,6 +104,8 @@ ReadlineLineReader::ReadlineLineReader(const Suggest & suggest_, const String & if (signal(SIGINT, clear_prompt_or_exit) == SIG_ERR) throw std::runtime_error(std::string("Cannot set signal handler for readline: ") + strerror(errno)); + + rl_variable_bind("completion-ignore-case", "on"); } ReadlineLineReader::~ReadlineLineReader() diff --git a/base/common/ReplxxLineReader.cpp b/base/common/ReplxxLineReader.cpp index 135c338391d..52c42235f1b 100644 --- a/base/common/ReplxxLineReader.cpp +++ b/base/common/ReplxxLineReader.cpp @@ -3,6 +3,7 @@ #include #include #include +#include namespace { @@ -18,18 +19,31 @@ void trim(String & s) ReplxxLineReader::ReplxxLineReader(const Suggest & suggest, const String & history_file_path_, char extender_, char delimiter_) : LineReader(history_file_path_, extender_, delimiter_) { + using namespace std::placeholders; + using Replxx = replxx::Replxx; + if (!history_file_path.empty()) rx.history_load(history_file_path); auto callback = [&suggest] (const String & context, size_t context_size) { auto range = suggest.getCompletions(context, context_size); - return replxx::Replxx::completions_t(range.first, range.second); + return Replxx::completions_t(range.first, range.second); }; rx.set_completion_callback(callback); rx.set_complete_on_empty(false); rx.set_word_break_characters(word_break_characters); + + /// By default C-p/C-n binded to COMPLETE_NEXT/COMPLETE_PREV, + /// bind C-p/C-n to history-previous/history-next like readline. + rx.bind_key(Replxx::KEY::control('N'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::HISTORY_NEXT, code); }); + rx.bind_key(Replxx::KEY::control('P'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::HISTORY_PREVIOUS, code); }); + /// By default COMPLETE_NEXT/COMPLETE_PREV was binded to C-p/C-n, re-bind + /// to M-P/M-N (that was used for HISTORY_COMMON_PREFIX_SEARCH before, but + /// it also binded to M-p/M-n). + rx.bind_key(Replxx::KEY::meta('N'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::COMPLETE_NEXT, code); }); + rx.bind_key(Replxx::KEY::meta('P'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::COMPLETE_PREVIOUS, code); }); } ReplxxLineReader::~ReplxxLineReader() diff --git a/base/common/StringRef.h b/base/common/StringRef.h index 54010f15085..076b8982b1d 100644 --- a/base/common/StringRef.h +++ b/base/common/StringRef.h @@ -5,7 +5,7 @@ #include #include -#include +#include #include #include @@ -27,17 +27,17 @@ struct StringRef size_t size = 0; template > - StringRef(const CharT * data_, size_t size_) : data(reinterpret_cast(data_)), size(size_) {} + constexpr StringRef(const CharT * data_, size_t size_) : data(reinterpret_cast(data_)), size(size_) {} StringRef(const std::string & s) : data(s.data()), size(s.size()) {} - StringRef(const std::string_view & s) : data(s.data()), size(s.size()) {} - explicit StringRef(const char * data_) : data(data_), size(strlen(data_)) {} - StringRef() = default; + constexpr StringRef(const std::string_view & s) : data(s.data()), size(s.size()) {} + constexpr StringRef(const char * data_) : StringRef(std::string_view{data_}) {} + constexpr StringRef() = default; std::string toString() const { return std::string(data, size); } explicit operator std::string() const { return toString(); } - explicit operator std::string_view() const { return {data, size}; } + constexpr explicit operator std::string_view() const { return {data, size}; } }; using StringRefs = std::vector; diff --git a/base/common/Types.h b/base/common/Types.h deleted file mode 100644 index 8f125146add..00000000000 --- a/base/common/Types.h +++ /dev/null @@ -1,58 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include - -using Int8 = int8_t; -using Int16 = int16_t; -using Int32 = int32_t; -using Int64 = int64_t; - -using UInt8 = char8_t; -using UInt16 = uint16_t; -using UInt32 = uint32_t; -using UInt64 = uint64_t; - -using String = std::string; - -/// The standard library type traits, such as std::is_arithmetic, with one exception -/// (std::common_type), are "set in stone". Attempting to specialize them causes undefined behavior. -/// So instead of using the std type_traits, we use our own version which allows extension. -template -struct is_signed -{ - static constexpr bool value = std::is_signed_v; -}; - -template -inline constexpr bool is_signed_v = is_signed::value; - -template -struct is_unsigned -{ - static constexpr bool value = std::is_unsigned_v; -}; - -template -inline constexpr bool is_unsigned_v = is_unsigned::value; - -template -struct is_integral -{ - static constexpr bool value = std::is_integral_v; -}; - -template -inline constexpr bool is_integral_v = is_integral::value; - -template -struct is_arithmetic -{ - static constexpr bool value = std::is_arithmetic_v; -}; - -template -inline constexpr bool is_arithmetic_v = is_arithmetic::value; diff --git a/base/common/argsToConfig.cpp b/base/common/argsToConfig.cpp index b0ec2900268..d7983779d2d 100644 --- a/base/common/argsToConfig.cpp +++ b/base/common/argsToConfig.cpp @@ -1,6 +1,5 @@ -#include +#include "argsToConfig.h" -#include #include #include @@ -11,7 +10,7 @@ void argsToConfig(const Poco::Util::Application::ArgVec & argv, Poco::Util::Laye /// Test: -- --1=1 --1=2 --3 5 7 8 -9 10 -11=12 14= 15== --16==17 --=18 --19= --20 21 22 --23 --24 25 --26 -27 28 ---29=30 -- ----31 32 --33 3-4 Poco::AutoPtr map_config = new Poco::Util::MapConfiguration; std::string key; - for (auto & arg : argv) + for (const auto & arg : argv) { auto key_start = arg.find_first_not_of('-'); auto pos_minus = arg.find('-'); diff --git a/base/common/argsToConfig.h b/base/common/argsToConfig.h index 1c1607bc4c5..134eed64fd2 100644 --- a/base/common/argsToConfig.h +++ b/base/common/argsToConfig.h @@ -1,4 +1,5 @@ #pragma once + #include namespace Poco::Util diff --git a/base/common/config_common.h.in b/base/common/config_common.h.in deleted file mode 100644 index 41999bb5cde..00000000000 --- a/base/common/config_common.h.in +++ /dev/null @@ -1,7 +0,0 @@ -#pragma once - -// .h autogenerated by cmake ! - -#cmakedefine01 USE_JEMALLOC -#cmakedefine01 UNBUNDLED -#cmakedefine01 WITH_COVERAGE diff --git a/base/common/coverage.cpp b/base/common/coverage.cpp index d8d3b71edd1..9f3c5ca653a 100644 --- a/base/common/coverage.cpp +++ b/base/common/coverage.cpp @@ -1,16 +1,17 @@ -#include -#include +#include "coverage.h" #if WITH_COVERAGE -#include -#include +# include -#if defined(__clang__) +# include + + +# if defined(__clang__) extern "C" void __llvm_profile_dump(); -#elif defined(__GNUC__) || defined(__GNUG__) +# elif defined(__GNUC__) || defined(__GNUG__) extern "C" void __gcov_exit(); -#endif +# endif #endif @@ -21,11 +22,11 @@ void dumpCoverageReportIfPossible() static std::mutex mutex; std::lock_guard lock(mutex); -#if defined(__clang__) +# if defined(__clang__) __llvm_profile_dump(); -#elif defined(__GNUC__) || defined(__GNUG__) +# elif defined(__GNUC__) || defined(__GNUG__) __gcov_exit(); -#endif +# endif #endif } diff --git a/base/common/defines.h b/base/common/defines.h new file mode 100644 index 00000000000..af5981023ff --- /dev/null +++ b/base/common/defines.h @@ -0,0 +1,87 @@ +#pragma once + +#if defined(_MSC_VER) +# if !defined(likely) +# define likely(x) (x) +# endif +# if !defined(unlikely) +# define unlikely(x) (x) +# endif +#else +# if !defined(likely) +# define likely(x) (__builtin_expect(!!(x), 1)) +# endif +# if !defined(unlikely) +# define unlikely(x) (__builtin_expect(!!(x), 0)) +# endif +#endif + +#if defined(_MSC_VER) +# define ALWAYS_INLINE __forceinline +# define NO_INLINE static __declspec(noinline) +# define MAY_ALIAS +#else +# define ALWAYS_INLINE __attribute__((__always_inline__)) +# define NO_INLINE __attribute__((__noinline__)) +# define MAY_ALIAS __attribute__((__may_alias__)) +#endif + +#if !defined(__x86_64__) && !defined(__aarch64__) && !defined(__PPC__) +# error "The only supported platforms are x86_64 and AArch64, PowerPC (work in progress)" +#endif + +/// Check for presence of address sanitizer +#if !defined(ADDRESS_SANITIZER) +# if defined(__has_feature) +# if __has_feature(address_sanitizer) +# define ADDRESS_SANITIZER 1 +# endif +# elif defined(__SANITIZE_ADDRESS__) +# define ADDRESS_SANITIZER 1 +# endif +#endif + +#if !defined(THREAD_SANITIZER) +# if defined(__has_feature) +# if __has_feature(thread_sanitizer) +# define THREAD_SANITIZER 1 +# endif +# elif defined(__SANITIZE_THREAD__) +# define THREAD_SANITIZER 1 +# endif +#endif + +#if !defined(MEMORY_SANITIZER) +# if defined(__has_feature) +# if __has_feature(memory_sanitizer) +# define MEMORY_SANITIZER 1 +# endif +# elif defined(__MEMORY_SANITIZER__) +# define MEMORY_SANITIZER 1 +# endif +#endif + +/// TODO: Strange enough, there is no way to detect UB sanitizer. + +/// Explicitly allow undefined behaviour for certain functions. Use it as a function attribute. +/// It is useful in case when compiler cannot see (and exploit) it, but UBSan can. +/// Example: multiplication of signed integers with possibility of overflow when both sides are from user input. +#if defined(__clang__) +# define NO_SANITIZE_UNDEFINED __attribute__((__no_sanitize__("undefined"))) +# define NO_SANITIZE_ADDRESS __attribute__((__no_sanitize__("address"))) +# define NO_SANITIZE_THREAD __attribute__((__no_sanitize__("thread"))) +#else /// It does not work in GCC. GCC 7 cannot recognize this attribute and GCC 8 simply ignores it. +# define NO_SANITIZE_UNDEFINED +# define NO_SANITIZE_ADDRESS +# define NO_SANITIZE_THREAD +#endif + +#if defined __GNUC__ && !defined __clang__ +# define OPTIMIZE(x) __attribute__((__optimize__(x))) +#else +# define OPTIMIZE(x) +#endif + +/// A macro for suppressing warnings about unused variables or function results. +/// Useful for structured bindings which have no standard way to declare this. +#define UNUSED(...) (void)(__VA_ARGS__) diff --git a/base/common/demangle.cpp b/base/common/demangle.cpp index a034f7a9c05..d7fac7c6990 100644 --- a/base/common/demangle.cpp +++ b/base/common/demangle.cpp @@ -1,16 +1,8 @@ #include -#if defined(__has_feature) - #if __has_feature(memory_sanitizer) - #define MEMORY_SANITIZER 1 - #endif -#elif defined(__MEMORY_SANITIZER__) - #define MEMORY_SANITIZER 1 -#endif +#if defined(_MSC_VER) -#if _MSC_VER || MEMORY_SANITIZER - -DemangleResult tryDemangle(const char * name) +DemangleResult tryDemangle(const char *) { return DemangleResult{}; } diff --git a/base/common/find_symbols.h b/base/common/find_symbols.h index 162c73251fa..65d4a53ceff 100644 --- a/base/common/find_symbols.h +++ b/base/common/find_symbols.h @@ -1,6 +1,7 @@ #pragma once #include +#include #if defined(__SSE2__) #include @@ -292,3 +293,26 @@ inline char * find_last_not_symbols_or_null(char * begin, char * end) { return const_cast(detail::find_last_symbols_sse2(begin, end)); } + + +/// Slightly resembles boost::split. The drawback of boost::split is that it fires a false positive in clang static analyzer. +/// See https://github.com/boostorg/algorithm/issues/63 +/// And https://bugs.llvm.org/show_bug.cgi?id=41141 +template +inline void splitInto(To & to, const std::string & what, bool token_compress = false) +{ + const char * pos = what.data(); + const char * end = pos + what.size(); + while (pos < end) + { + const char * delimiter_or_end = find_first_symbols(pos, end); + + if (!token_compress || pos < delimiter_or_end) + to.emplace_back(pos, delimiter_or_end); + + if (delimiter_or_end < end) + pos = delimiter_or_end + 1; + else + pos = delimiter_or_end; + } +} diff --git a/dbms/src/Common/getFQDNOrHostName.cpp b/base/common/getFQDNOrHostName.cpp similarity index 91% rename from dbms/src/Common/getFQDNOrHostName.cpp rename to base/common/getFQDNOrHostName.cpp index 08ec015919e..f67b37bd71c 100644 --- a/dbms/src/Common/getFQDNOrHostName.cpp +++ b/base/common/getFQDNOrHostName.cpp @@ -1,5 +1,5 @@ #include -#include +#include namespace diff --git a/dbms/src/Common/getFQDNOrHostName.h b/base/common/getFQDNOrHostName.h similarity index 100% rename from dbms/src/Common/getFQDNOrHostName.h rename to base/common/getFQDNOrHostName.h diff --git a/base/common/getMemoryAmount.cpp b/base/common/getMemoryAmount.cpp index a16a701bd8f..5e600a37351 100644 --- a/base/common/getMemoryAmount.cpp +++ b/base/common/getMemoryAmount.cpp @@ -58,7 +58,7 @@ uint64_t getMemoryAmountOrZero() #endif uint64_t size = 0; /* 64-bit */ size_t len = sizeof(size); - if (sysctl(mib, 2, &size, &len, NULL, 0) == 0) + if (sysctl(mib, 2, &size, &len, nullptr, 0) == 0) return size; return 0; /* Failed? */ @@ -69,13 +69,13 @@ uint64_t getMemoryAmountOrZero() #elif defined(_SC_PHYS_PAGES) && defined(_SC_PAGESIZE) /* FreeBSD, Linux, OpenBSD, and Solaris. -------------------- */ - return (uint64_t)sysconf(_SC_PHYS_PAGES) - * (uint64_t)sysconf(_SC_PAGESIZE); + return uint64_t(sysconf(_SC_PHYS_PAGES)) + *uint64_t(sysconf(_SC_PAGESIZE)); #elif defined(_SC_PHYS_PAGES) && defined(_SC_PAGE_SIZE) /* Legacy. -------------------------------------------------- */ - return (uint64_t)sysconf(_SC_PHYS_PAGES) - * (uint64_t)sysconf(_SC_PAGE_SIZE); + return uint64_t(sysconf(_SC_PHYS_PAGES)) + * uint64_t(sysconf(_SC_PAGE_SIZE)); #elif defined(CTL_HW) && (defined(HW_PHYSMEM) || defined(HW_REALMEM)) /* DragonFly BSD, FreeBSD, NetBSD, OpenBSD, and OSX. -------- */ @@ -88,7 +88,7 @@ uint64_t getMemoryAmountOrZero() #endif unsigned int size = 0; /* 32-bit */ size_t len = sizeof(size); - if (sysctl(mib, 2, &size, &len, NULL, 0) == 0) + if (sysctl(mib, 2, &size, &len, nullptr, 0) == 0) return size; return 0; /* Failed? */ diff --git a/base/common/getThreadId.cpp b/base/common/getThreadId.cpp index 2542b14acf1..2575aba3844 100644 --- a/base/common/getThreadId.cpp +++ b/base/common/getThreadId.cpp @@ -1,9 +1,9 @@ #include -#if OS_LINUX +#if defined(OS_LINUX) #include #include -#elif OS_FREEBSD +#elif defined(OS_FREEBSD) #include #else #include @@ -16,9 +16,9 @@ uint64_t getThreadId() { if (!current_tid) { -#if OS_LINUX +#if defined(OS_LINUX) current_tid = syscall(SYS_gettid); /// This call is always successful. - man gettid -#elif OS_FREEBSD +#elif defined(OS_FREEBSD) current_tid = pthread_getthreadid_np(); #else if (0 != pthread_threadid_np(nullptr, ¤t_tid)) diff --git a/base/common/iostream_debug_helpers.h b/base/common/iostream_debug_helpers.h index 51a4b969d8a..4362d832970 100644 --- a/base/common/iostream_debug_helpers.h +++ b/base/common/iostream_debug_helpers.h @@ -80,7 +80,6 @@ dumpImpl(Out & out, T && x) } - /// Tuple, pair template Out & dumpTupleImpl(Out & out, T && x) diff --git a/base/common/itoa.h b/base/common/itoa.h index a29befd9c6d..5d660ca4378 100644 --- a/base/common/itoa.h +++ b/base/common/itoa.h @@ -30,7 +30,6 @@ #include #include #include -#include "likely.h" using int128_t = __int128; using uint128_t = unsigned __int128; diff --git a/base/common/likely.h b/base/common/likely.h deleted file mode 100644 index 338498af35f..00000000000 --- a/base/common/likely.h +++ /dev/null @@ -1,15 +0,0 @@ -#if defined(_MSC_VER) -# if !defined(likely) -# define likely(x) (x) -# endif -# if !defined(unlikely) -# define unlikely(x) (x) -# endif -#else -# if !defined(likely) -# define likely(x) (__builtin_expect(!!(x), 1)) -# endif -# if !defined(unlikely) -# define unlikely(x) (__builtin_expect(!!(x), 0)) -# endif -#endif diff --git a/base/common/memory.h b/base/common/memory.h index ab96cb593b9..e82c019ceab 100644 --- a/base/common/memory.h +++ b/base/common/memory.h @@ -1,45 +1,21 @@ #pragma once #include -#include "likely.h" - -#if __has_include() -#include -#endif +#include "defines.h" #if USE_JEMALLOC -#include - -#if JEMALLOC_VERSION_MAJOR < 4 - #undef USE_JEMALLOC - #define USE_JEMALLOC 0 - #include -#endif -#else -#include +# include #endif -// Also defined in Core/Defines.h -#if !defined(ALWAYS_INLINE) -#if defined(_MSC_VER) - #define ALWAYS_INLINE inline __forceinline -#else - #define ALWAYS_INLINE inline __attribute__((__always_inline__)) -#endif +#if !USE_JEMALLOC || JEMALLOC_VERSION_MAJOR < 4 +# include #endif -#if !defined(NO_INLINE) -#if defined(_MSC_VER) - #define NO_INLINE static __declspec(noinline) -#else - #define NO_INLINE __attribute__((__noinline__)) -#endif -#endif namespace Memory { -ALWAYS_INLINE void * newImpl(std::size_t size) +inline ALWAYS_INLINE void * newImpl(std::size_t size) { auto * ptr = malloc(size); if (likely(ptr != nullptr)) @@ -49,19 +25,19 @@ ALWAYS_INLINE void * newImpl(std::size_t size) throw std::bad_alloc{}; } -ALWAYS_INLINE void * newNoExept(std::size_t size) noexcept +inline ALWAYS_INLINE void * newNoExept(std::size_t size) noexcept { return malloc(size); } -ALWAYS_INLINE void deleteImpl(void * ptr) noexcept +inline ALWAYS_INLINE void deleteImpl(void * ptr) noexcept { free(ptr); } -#if USE_JEMALLOC +#if USE_JEMALLOC && JEMALLOC_VERSION_MAJOR >= 4 -ALWAYS_INLINE void deleteSized(void * ptr, std::size_t size) noexcept +inline ALWAYS_INLINE void deleteSized(void * ptr, std::size_t size) noexcept { if (unlikely(ptr == nullptr)) return; @@ -71,7 +47,7 @@ ALWAYS_INLINE void deleteSized(void * ptr, std::size_t size) noexcept #else -ALWAYS_INLINE void deleteSized(void * ptr, std::size_t size [[maybe_unused]]) noexcept +inline ALWAYS_INLINE void deleteSized(void * ptr, std::size_t size [[maybe_unused]]) noexcept { free(ptr); } diff --git a/base/common/mremap.cpp b/base/common/mremap.cpp index 00c6f260734..d2d8d7fde4f 100644 --- a/base/common/mremap.cpp +++ b/base/common/mremap.cpp @@ -19,7 +19,7 @@ void * mremap_fallback( return MAP_FAILED; } -#if _MSC_VER +#if defined(_MSC_VER) void * new_address = ::operator new(new_size); #else void * new_address = mmap(nullptr, new_size, mmap_prot, mmap_flags, mmap_fd, mmap_offset); @@ -29,7 +29,7 @@ void * mremap_fallback( memcpy(new_address, old_address, old_size); -#if _MSC_VER +#if defined(_MSC_VER) delete old_address; #else if (munmap(old_address, old_size)) diff --git a/base/common/phdr_cache.cpp b/base/common/phdr_cache.cpp index fc81c20e8dd..a5cb466f425 100644 --- a/base/common/phdr_cache.cpp +++ b/base/common/phdr_cache.cpp @@ -20,6 +20,14 @@ #define USE_PHDR_CACHE 1 #endif +/// Thread Sanitizer uses dl_iterate_phdr function on initialization and fails if we provide our own. +#ifdef USE_PHDR_CACHE + +#if defined(__clang__) +# pragma clang diagnostic ignored "-Wreserved-id-macro" +# pragma clang diagnostic ignored "-Wunused-macros" +#endif + #define __msan_unpoison(X, Y) #if defined(__has_feature) # if __has_feature(memory_sanitizer) @@ -28,9 +36,6 @@ # endif #endif -/// Thread Sanitizer uses dl_iterate_phdr function on initialization and fails if we provide our own. -#ifdef USE_PHDR_CACHE - #include #include #include @@ -70,7 +75,7 @@ extern "C" #endif int dl_iterate_phdr(int (*callback) (dl_phdr_info * info, size_t size, void * data), void * data) { - auto current_phdr_cache = phdr_cache.load(); + auto * current_phdr_cache = phdr_cache.load(); if (!current_phdr_cache) { // Cache is not yet populated, pass through to the original function. diff --git a/base/common/preciseExp10.c b/base/common/preciseExp10.c deleted file mode 100644 index 49c87217e20..00000000000 --- a/base/common/preciseExp10.c +++ /dev/null @@ -1,227 +0,0 @@ -/* - -https://www.musl-libc.org/ -http://git.musl-libc.org/cgit/musl/tree/src/math/exp10.c - -musl as a whole is licensed under the following standard MIT license: - ----------------------------------------------------------------------- -Copyright © 2005-2014 Rich Felker, et al. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ----------------------------------------------------------------------- - -Authors/contributors include: - -Alex Dowad -Alexander Monakov -Anthony G. Basile -Arvid Picciani -Bobby Bingham -Boris Brezillon -Brent Cook -Chris Spiegel -Clément Vasseur -Daniel Micay -Denys Vlasenko -Emil Renner Berthing -Felix Fietkau -Felix Janda -Gianluca Anzolin -Hauke Mehrtens -Hiltjo Posthuma -Isaac Dunham -Jaydeep Patil -Jens Gustedt -Jeremy Huntwork -Jo-Philipp Wich -Joakim Sindholt -John Spencer -Josiah Worcester -Justin Cormack -Khem Raj -Kylie McClain -Luca Barbato -Luka Perkov -M Farkas-Dyck (Strake) -Mahesh Bodapati -Michael Forney -Natanael Copa -Nicholas J. Kain -orc -Pascal Cuoq -Petr Hosek -Pierre Carrier -Rich Felker -Richard Pennington -Shiz -sin -Solar Designer -Stefan Kristiansson -Szabolcs Nagy -Timo Teräs -Trutz Behn -Valentin Ochs -William Haddon - -Portions of this software are derived from third-party works licensed -under terms compatible with the above MIT license: - -The TRE regular expression implementation (src/regex/reg* and -src/regex/tre*) is Copyright © 2001-2008 Ville Laurikari and licensed -under a 2-clause BSD license (license text in the source files). The -included version has been heavily modified by Rich Felker in 2012, in -the interests of size, simplicity, and namespace cleanliness. - -Much of the math library code (src/math/ * and src/complex/ *) is -Copyright © 1993,2004 Sun Microsystems or -Copyright © 2003-2011 David Schultz or -Copyright © 2003-2009 Steven G. Kargl or -Copyright © 2003-2009 Bruce D. Evans or -Copyright © 2008 Stephen L. Moshier -and labelled as such in comments in the individual source files. All -have been licensed under extremely permissive terms. - -The ARM memcpy code (src/string/arm/memcpy_el.S) is Copyright © 2008 -The Android Open Source Project and is licensed under a two-clause BSD -license. It was taken from Bionic libc, used on Android. - -The implementation of DES for crypt (src/crypt/crypt_des.c) is -Copyright © 1994 David Burren. It is licensed under a BSD license. - -The implementation of blowfish crypt (src/crypt/crypt_blowfish.c) was -originally written by Solar Designer and placed into the public -domain. The code also comes with a fallback permissive license for use -in jurisdictions that may not recognize the public domain. - -The smoothsort implementation (src/stdlib/qsort.c) is Copyright © 2011 -Valentin Ochs and is licensed under an MIT-style license. - -The BSD PRNG implementation (src/prng/random.c) and XSI search API -(src/search/ *.c) functions are Copyright © 2011 Szabolcs Nagy and -licensed under following terms: "Permission to use, copy, modify, -and/or distribute this code for any purpose with or without fee is -hereby granted. There is no warranty." - -The x86_64 port was written by Nicholas J. Kain and is licensed under -the standard MIT terms. - -The mips and microblaze ports were originally written by Richard -Pennington for use in the ellcc project. The original code was adapted -by Rich Felker for build system and code conventions during upstream -integration. It is licensed under the standard MIT terms. - -The mips64 port was contributed by Imagination Technologies and is -licensed under the standard MIT terms. - -The powerpc port was also originally written by Richard Pennington, -and later supplemented and integrated by John Spencer. It is licensed -under the standard MIT terms. - -All other files which have no copyright comments are original works -produced specifically for use as part of this library, written either -by Rich Felker, the main author of the library, or by one or more -contibutors listed above. Details on authorship of individual files -can be found in the git version control history of the project. The -omission of copyright and license comments in each file is in the -interest of source tree size. - -In addition, permission is hereby granted for all public header files -(include/ * and arch/ * /bits/ *) and crt files intended to be linked into -applications (crt/ *, ldso/dlstart.c, and arch/ * /crt_arch.h) to omit -the copyright notice and permission notice otherwise required by the -license, and to use these files without any requirement of -attribution. These files include substantial contributions from: - -Bobby Bingham -John Spencer -Nicholas J. Kain -Rich Felker -Richard Pennington -Stefan Kristiansson -Szabolcs Nagy - -all of whom have explicitly granted such permission. - -This file previously contained text expressing a belief that most of -the files covered by the above exception were sufficiently trivial not -to be subject to copyright, resulting in confusion over whether it -negated the permissions granted in the license. In the spirit of -permissive licensing, and of not having licensing issues being an -obstacle to adoption, that text has been removed. - -*/ - -#include -#include -#include - -double preciseExp10(double x) -{ - if (isnan(x)) return NAN; - - // ranging between DBL_TRUE_MIN and DBL_MAX. Outsiders are treated as zeros or infinities - static const double p10[] - = {1e-323, 1e-322, 1e-321, 1e-320, 1e-319, 1e-318, 1e-317, 1e-316, 1e-315, 1e-314, 1e-313, 1e-312, 1e-311, 1e-310, 1e-309, 1e-308, 1e-307, - 1e-306, 1e-305, 1e-304, 1e-303, 1e-302, 1e-301, 1e-300, 1e-299, 1e-298, 1e-297, 1e-296, 1e-295, 1e-294, 1e-293, 1e-292, 1e-291, 1e-290, - 1e-289, 1e-288, 1e-287, 1e-286, 1e-285, 1e-284, 1e-283, 1e-282, 1e-281, 1e-280, 1e-279, 1e-278, 1e-277, 1e-276, 1e-275, 1e-274, 1e-273, - 1e-272, 1e-271, 1e-270, 1e-269, 1e-268, 1e-267, 1e-266, 1e-265, 1e-264, 1e-263, 1e-262, 1e-261, 1e-260, 1e-259, 1e-258, 1e-257, 1e-256, - 1e-255, 1e-254, 1e-253, 1e-252, 1e-251, 1e-250, 1e-249, 1e-248, 1e-247, 1e-246, 1e-245, 1e-244, 1e-243, 1e-242, 1e-241, 1e-240, 1e-239, - 1e-238, 1e-237, 1e-236, 1e-235, 1e-234, 1e-233, 1e-232, 1e-231, 1e-230, 1e-229, 1e-228, 1e-227, 1e-226, 1e-225, 1e-224, 1e-223, 1e-222, - 1e-221, 1e-220, 1e-219, 1e-218, 1e-217, 1e-216, 1e-215, 1e-214, 1e-213, 1e-212, 1e-211, 1e-210, 1e-209, 1e-208, 1e-207, 1e-206, 1e-205, - 1e-204, 1e-203, 1e-202, 1e-201, 1e-200, 1e-199, 1e-198, 1e-197, 1e-196, 1e-195, 1e-194, 1e-193, 1e-192, 1e-191, 1e-190, 1e-189, 1e-188, - 1e-187, 1e-186, 1e-185, 1e-184, 1e-183, 1e-182, 1e-181, 1e-180, 1e-179, 1e-178, 1e-177, 1e-176, 1e-175, 1e-174, 1e-173, 1e-172, 1e-171, - 1e-170, 1e-169, 1e-168, 1e-167, 1e-166, 1e-165, 1e-164, 1e-163, 1e-162, 1e-161, 1e-160, 1e-159, 1e-158, 1e-157, 1e-156, 1e-155, 1e-154, - 1e-153, 1e-152, 1e-151, 1e-150, 1e-149, 1e-148, 1e-147, 1e-146, 1e-145, 1e-144, 1e-143, 1e-142, 1e-141, 1e-140, 1e-139, 1e-138, 1e-137, - 1e-136, 1e-135, 1e-134, 1e-133, 1e-132, 1e-131, 1e-130, 1e-129, 1e-128, 1e-127, 1e-126, 1e-125, 1e-124, 1e-123, 1e-122, 1e-121, 1e-120, - 1e-119, 1e-118, 1e-117, 1e-116, 1e-115, 1e-114, 1e-113, 1e-112, 1e-111, 1e-110, 1e-109, 1e-108, 1e-107, 1e-106, 1e-105, 1e-104, 1e-103, - 1e-102, 1e-101, 1e-100, 1e-99, 1e-98, 1e-97, 1e-96, 1e-95, 1e-94, 1e-93, 1e-92, 1e-91, 1e-90, 1e-89, 1e-88, 1e-87, 1e-86, - 1e-85, 1e-84, 1e-83, 1e-82, 1e-81, 1e-80, 1e-79, 1e-78, 1e-77, 1e-76, 1e-75, 1e-74, 1e-73, 1e-72, 1e-71, 1e-70, 1e-69, - 1e-68, 1e-67, 1e-66, 1e-65, 1e-64, 1e-63, 1e-62, 1e-61, 1e-60, 1e-59, 1e-58, 1e-57, 1e-56, 1e-55, 1e-54, 1e-53, 1e-52, - 1e-51, 1e-50, 1e-49, 1e-48, 1e-47, 1e-46, 1e-45, 1e-44, 1e-43, 1e-42, 1e-41, 1e-40, 1e-39, 1e-38, 1e-37, 1e-36, 1e-35, - 1e-34, 1e-33, 1e-32, 1e-31, 1e-30, 1e-29, 1e-28, 1e-27, 1e-26, 1e-25, 1e-24, 1e-23, 1e-22, 1e-21, 1e-20, 1e-19, 1e-18, - 1e-17, 1e-16, 1e-15, 1e-14, 1e-13, 1e-12, 1e-11, 1e-10, 1e-9, 1e-8, 1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, - 1e0, 1e+1, 1e+2, 1e+3, 1e+4, 1e+5, 1e+6, 1e+7, 1e+8, 1e+9, 1e+10, 1e+11, 1e+12, 1e+13, 1e+14, 1e+15, 1e+16, - 1e+17, 1e+18, 1e+19, 1e+20, 1e+21, 1e+22, 1e+23, 1e+24, 1e+25, 1e+26, 1e+27, 1e+28, 1e+29, 1e+30, 1e+31, 1e+32, 1e+33, - 1e+34, 1e+35, 1e+36, 1e+37, 1e+38, 1e+39, 1e+40, 1e+41, 1e+42, 1e+43, 1e+44, 1e+45, 1e+46, 1e+47, 1e+48, 1e+49, 1e+50, - 1e+51, 1e+52, 1e+53, 1e+54, 1e+55, 1e+56, 1e+57, 1e+58, 1e+59, 1e+60, 1e+61, 1e+62, 1e+63, 1e+64, 1e+65, 1e+66, 1e+67, - 1e+68, 1e+69, 1e+70, 1e+71, 1e+72, 1e+73, 1e+74, 1e+75, 1e+76, 1e+77, 1e+78, 1e+79, 1e+80, 1e+81, 1e+82, 1e+83, 1e+84, - 1e+85, 1e+86, 1e+87, 1e+88, 1e+89, 1e+90, 1e+91, 1e+92, 1e+93, 1e+94, 1e+95, 1e+96, 1e+97, 1e+98, 1e+99, 1e+100, 1e+101, - 1e+102, 1e+103, 1e+104, 1e+105, 1e+106, 1e+107, 1e+108, 1e+109, 1e+110, 1e+111, 1e+112, 1e+113, 1e+114, 1e+115, 1e+116, 1e+117, 1e+118, - 1e+119, 1e+120, 1e+121, 1e+122, 1e+123, 1e+124, 1e+125, 1e+126, 1e+127, 1e+128, 1e+129, 1e+130, 1e+131, 1e+132, 1e+133, 1e+134, 1e+135, - 1e+136, 1e+137, 1e+138, 1e+139, 1e+140, 1e+141, 1e+142, 1e+143, 1e+144, 1e+145, 1e+146, 1e+147, 1e+148, 1e+149, 1e+150, 1e+151, 1e+152, - 1e+153, 1e+154, 1e+155, 1e+156, 1e+157, 1e+158, 1e+159, 1e+160, 1e+161, 1e+162, 1e+163, 1e+164, 1e+165, 1e+166, 1e+167, 1e+168, 1e+169, - 1e+170, 1e+171, 1e+172, 1e+173, 1e+174, 1e+175, 1e+176, 1e+177, 1e+178, 1e+179, 1e+180, 1e+181, 1e+182, 1e+183, 1e+184, 1e+185, 1e+186, - 1e+187, 1e+188, 1e+189, 1e+190, 1e+191, 1e+192, 1e+193, 1e+194, 1e+195, 1e+196, 1e+197, 1e+198, 1e+199, 1e+200, 1e+201, 1e+202, 1e+203, - 1e+204, 1e+205, 1e+206, 1e+207, 1e+208, 1e+209, 1e+210, 1e+211, 1e+212, 1e+213, 1e+214, 1e+215, 1e+216, 1e+217, 1e+218, 1e+219, 1e+220, - 1e+221, 1e+222, 1e+223, 1e+224, 1e+225, 1e+226, 1e+227, 1e+228, 1e+229, 1e+230, 1e+231, 1e+232, 1e+233, 1e+234, 1e+235, 1e+236, 1e+237, - 1e+238, 1e+239, 1e+240, 1e+241, 1e+242, 1e+243, 1e+244, 1e+245, 1e+246, 1e+247, 1e+248, 1e+249, 1e+250, 1e+251, 1e+252, 1e+253, 1e+254, - 1e+255, 1e+256, 1e+257, 1e+258, 1e+259, 1e+260, 1e+261, 1e+262, 1e+263, 1e+264, 1e+265, 1e+266, 1e+267, 1e+268, 1e+269, 1e+270, 1e+271, - 1e+272, 1e+273, 1e+274, 1e+275, 1e+276, 1e+277, 1e+278, 1e+279, 1e+280, 1e+281, 1e+282, 1e+283, 1e+284, 1e+285, 1e+286, 1e+287, 1e+288, - 1e+289, 1e+290, 1e+291, 1e+292, 1e+293, 1e+294, 1e+295, 1e+296, 1e+297, 1e+298, 1e+299, 1e+300, 1e+301, 1e+302, 1e+303, 1e+304, 1e+305, - 1e+306, 1e+307, 1e+308}; - - double n, y = modf(x, &n); - if (n > 308) return INFINITY; - if (n < -323) return 0; - - // Using lookup table based formula to get accurate results for integer arguments. - return exp2(3.32192809488736234787031942948939 * y) * p10[(int)n + 323]; -} diff --git a/base/common/preciseExp10.cpp b/base/common/preciseExp10.cpp new file mode 100644 index 00000000000..a034ae2357b --- /dev/null +++ b/base/common/preciseExp10.cpp @@ -0,0 +1,227 @@ +/* + +https://www.musl-libc.org/ +http://git.musl-libc.org/cgit/musl/tree/src/math/exp10.c + +musl as a whole is licensed under the following standard MIT license: + +---------------------------------------------------------------------- +Copyright © 2005-2014 Rich Felker, et al. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +---------------------------------------------------------------------- + +Authors/contributors include: + +Alex Dowad +Alexander Monakov +Anthony G. Basile +Arvid Picciani +Bobby Bingham +Boris Brezillon +Brent Cook +Chris Spiegel +Clément Vasseur +Daniel Micay +Denys Vlasenko +Emil Renner Berthing +Felix Fietkau +Felix Janda +Gianluca Anzolin +Hauke Mehrtens +Hiltjo Posthuma +Isaac Dunham +Jaydeep Patil +Jens Gustedt +Jeremy Huntwork +Jo-Philipp Wich +Joakim Sindholt +John Spencer +Josiah Worcester +Justin Cormack +Khem Raj +Kylie McClain +Luca Barbato +Luka Perkov +M Farkas-Dyck (Strake) +Mahesh Bodapati +Michael Forney +Natanael Copa +Nicholas J. Kain +orc +Pascal Cuoq +Petr Hosek +Pierre Carrier +Rich Felker +Richard Pennington +Shiz +sin +Solar Designer +Stefan Kristiansson +Szabolcs Nagy +Timo Teräs +Trutz Behn +Valentin Ochs +William Haddon + +Portions of this software are derived from third-party works licensed +under terms compatible with the above MIT license: + +The TRE regular expression implementation (src/regex/reg* and +src/regex/tre*) is Copyright © 2001-2008 Ville Laurikari and licensed +under a 2-clause BSD license (license text in the source files). The +included version has been heavily modified by Rich Felker in 2012, in +the interests of size, simplicity, and namespace cleanliness. + +Much of the math library code (src/math/ * and src/complex/ *) is +Copyright © 1993,2004 Sun Microsystems or +Copyright © 2003-2011 David Schultz or +Copyright © 2003-2009 Steven G. Kargl or +Copyright © 2003-2009 Bruce D. Evans or +Copyright © 2008 Stephen L. Moshier +and labelled as such in comments in the individual source files. All +have been licensed under extremely permissive terms. + +The ARM memcpy code (src/string/arm/memcpy_el.S) is Copyright © 2008 +The Android Open Source Project and is licensed under a two-clause BSD +license. It was taken from Bionic libc, used on Android. + +The implementation of DES for crypt (src/crypt/crypt_des.c) is +Copyright © 1994 David Burren. It is licensed under a BSD license. + +The implementation of blowfish crypt (src/crypt/crypt_blowfish.c) was +originally written by Solar Designer and placed into the public +domain. The code also comes with a fallback permissive license for use +in jurisdictions that may not recognize the public domain. + +The smoothsort implementation (src/stdlib/qsort.c) is Copyright © 2011 +Valentin Ochs and is licensed under an MIT-style license. + +The BSD PRNG implementation (src/prng/random.c) and XSI search API +(src/search/ *.c) functions are Copyright © 2011 Szabolcs Nagy and +licensed under following terms: "Permission to use, copy, modify, +and/or distribute this code for any purpose with or without fee is +hereby granted. There is no warranty." + +The x86_64 port was written by Nicholas J. Kain and is licensed under +the standard MIT terms. + +The mips and microblaze ports were originally written by Richard +Pennington for use in the ellcc project. The original code was adapted +by Rich Felker for build system and code conventions during upstream +integration. It is licensed under the standard MIT terms. + +The mips64 port was contributed by Imagination Technologies and is +licensed under the standard MIT terms. + +The powerpc port was also originally written by Richard Pennington, +and later supplemented and integrated by John Spencer. It is licensed +under the standard MIT terms. + +All other files which have no copyright comments are original works +produced specifically for use as part of this library, written either +by Rich Felker, the main author of the library, or by one or more +contibutors listed above. Details on authorship of individual files +can be found in the git version control history of the project. The +omission of copyright and license comments in each file is in the +interest of source tree size. + +In addition, permission is hereby granted for all public header files +(include/ * and arch/ * /bits/ *) and crt files intended to be linked into +applications (crt/ *, ldso/dlstart.c, and arch/ * /crt_arch.h) to omit +the copyright notice and permission notice otherwise required by the +license, and to use these files without any requirement of +attribution. These files include substantial contributions from: + +Bobby Bingham +John Spencer +Nicholas J. Kain +Rich Felker +Richard Pennington +Stefan Kristiansson +Szabolcs Nagy + +all of whom have explicitly granted such permission. + +This file previously contained text expressing a belief that most of +the files covered by the above exception were sufficiently trivial not +to be subject to copyright, resulting in confusion over whether it +negated the permissions granted in the license. In the spirit of +permissive licensing, and of not having licensing issues being an +obstacle to adoption, that text has been removed. + +*/ + +#include +#include +#include + +double preciseExp10(double x) +{ + if (isnan(x)) return NAN; + + // ranging between DBL_TRUE_MIN and DBL_MAX. Outsiders are treated as zeros or infinities + static const double p10[] + = {1e-323, 1e-322, 1e-321, 1e-320, 1e-319, 1e-318, 1e-317, 1e-316, 1e-315, 1e-314, 1e-313, 1e-312, 1e-311, 1e-310, 1e-309, 1e-308, 1e-307, + 1e-306, 1e-305, 1e-304, 1e-303, 1e-302, 1e-301, 1e-300, 1e-299, 1e-298, 1e-297, 1e-296, 1e-295, 1e-294, 1e-293, 1e-292, 1e-291, 1e-290, + 1e-289, 1e-288, 1e-287, 1e-286, 1e-285, 1e-284, 1e-283, 1e-282, 1e-281, 1e-280, 1e-279, 1e-278, 1e-277, 1e-276, 1e-275, 1e-274, 1e-273, + 1e-272, 1e-271, 1e-270, 1e-269, 1e-268, 1e-267, 1e-266, 1e-265, 1e-264, 1e-263, 1e-262, 1e-261, 1e-260, 1e-259, 1e-258, 1e-257, 1e-256, + 1e-255, 1e-254, 1e-253, 1e-252, 1e-251, 1e-250, 1e-249, 1e-248, 1e-247, 1e-246, 1e-245, 1e-244, 1e-243, 1e-242, 1e-241, 1e-240, 1e-239, + 1e-238, 1e-237, 1e-236, 1e-235, 1e-234, 1e-233, 1e-232, 1e-231, 1e-230, 1e-229, 1e-228, 1e-227, 1e-226, 1e-225, 1e-224, 1e-223, 1e-222, + 1e-221, 1e-220, 1e-219, 1e-218, 1e-217, 1e-216, 1e-215, 1e-214, 1e-213, 1e-212, 1e-211, 1e-210, 1e-209, 1e-208, 1e-207, 1e-206, 1e-205, + 1e-204, 1e-203, 1e-202, 1e-201, 1e-200, 1e-199, 1e-198, 1e-197, 1e-196, 1e-195, 1e-194, 1e-193, 1e-192, 1e-191, 1e-190, 1e-189, 1e-188, + 1e-187, 1e-186, 1e-185, 1e-184, 1e-183, 1e-182, 1e-181, 1e-180, 1e-179, 1e-178, 1e-177, 1e-176, 1e-175, 1e-174, 1e-173, 1e-172, 1e-171, + 1e-170, 1e-169, 1e-168, 1e-167, 1e-166, 1e-165, 1e-164, 1e-163, 1e-162, 1e-161, 1e-160, 1e-159, 1e-158, 1e-157, 1e-156, 1e-155, 1e-154, + 1e-153, 1e-152, 1e-151, 1e-150, 1e-149, 1e-148, 1e-147, 1e-146, 1e-145, 1e-144, 1e-143, 1e-142, 1e-141, 1e-140, 1e-139, 1e-138, 1e-137, + 1e-136, 1e-135, 1e-134, 1e-133, 1e-132, 1e-131, 1e-130, 1e-129, 1e-128, 1e-127, 1e-126, 1e-125, 1e-124, 1e-123, 1e-122, 1e-121, 1e-120, + 1e-119, 1e-118, 1e-117, 1e-116, 1e-115, 1e-114, 1e-113, 1e-112, 1e-111, 1e-110, 1e-109, 1e-108, 1e-107, 1e-106, 1e-105, 1e-104, 1e-103, + 1e-102, 1e-101, 1e-100, 1e-99, 1e-98, 1e-97, 1e-96, 1e-95, 1e-94, 1e-93, 1e-92, 1e-91, 1e-90, 1e-89, 1e-88, 1e-87, 1e-86, + 1e-85, 1e-84, 1e-83, 1e-82, 1e-81, 1e-80, 1e-79, 1e-78, 1e-77, 1e-76, 1e-75, 1e-74, 1e-73, 1e-72, 1e-71, 1e-70, 1e-69, + 1e-68, 1e-67, 1e-66, 1e-65, 1e-64, 1e-63, 1e-62, 1e-61, 1e-60, 1e-59, 1e-58, 1e-57, 1e-56, 1e-55, 1e-54, 1e-53, 1e-52, + 1e-51, 1e-50, 1e-49, 1e-48, 1e-47, 1e-46, 1e-45, 1e-44, 1e-43, 1e-42, 1e-41, 1e-40, 1e-39, 1e-38, 1e-37, 1e-36, 1e-35, + 1e-34, 1e-33, 1e-32, 1e-31, 1e-30, 1e-29, 1e-28, 1e-27, 1e-26, 1e-25, 1e-24, 1e-23, 1e-22, 1e-21, 1e-20, 1e-19, 1e-18, + 1e-17, 1e-16, 1e-15, 1e-14, 1e-13, 1e-12, 1e-11, 1e-10, 1e-9, 1e-8, 1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, + 1e0, 1e+1, 1e+2, 1e+3, 1e+4, 1e+5, 1e+6, 1e+7, 1e+8, 1e+9, 1e+10, 1e+11, 1e+12, 1e+13, 1e+14, 1e+15, 1e+16, + 1e+17, 1e+18, 1e+19, 1e+20, 1e+21, 1e+22, 1e+23, 1e+24, 1e+25, 1e+26, 1e+27, 1e+28, 1e+29, 1e+30, 1e+31, 1e+32, 1e+33, + 1e+34, 1e+35, 1e+36, 1e+37, 1e+38, 1e+39, 1e+40, 1e+41, 1e+42, 1e+43, 1e+44, 1e+45, 1e+46, 1e+47, 1e+48, 1e+49, 1e+50, + 1e+51, 1e+52, 1e+53, 1e+54, 1e+55, 1e+56, 1e+57, 1e+58, 1e+59, 1e+60, 1e+61, 1e+62, 1e+63, 1e+64, 1e+65, 1e+66, 1e+67, + 1e+68, 1e+69, 1e+70, 1e+71, 1e+72, 1e+73, 1e+74, 1e+75, 1e+76, 1e+77, 1e+78, 1e+79, 1e+80, 1e+81, 1e+82, 1e+83, 1e+84, + 1e+85, 1e+86, 1e+87, 1e+88, 1e+89, 1e+90, 1e+91, 1e+92, 1e+93, 1e+94, 1e+95, 1e+96, 1e+97, 1e+98, 1e+99, 1e+100, 1e+101, + 1e+102, 1e+103, 1e+104, 1e+105, 1e+106, 1e+107, 1e+108, 1e+109, 1e+110, 1e+111, 1e+112, 1e+113, 1e+114, 1e+115, 1e+116, 1e+117, 1e+118, + 1e+119, 1e+120, 1e+121, 1e+122, 1e+123, 1e+124, 1e+125, 1e+126, 1e+127, 1e+128, 1e+129, 1e+130, 1e+131, 1e+132, 1e+133, 1e+134, 1e+135, + 1e+136, 1e+137, 1e+138, 1e+139, 1e+140, 1e+141, 1e+142, 1e+143, 1e+144, 1e+145, 1e+146, 1e+147, 1e+148, 1e+149, 1e+150, 1e+151, 1e+152, + 1e+153, 1e+154, 1e+155, 1e+156, 1e+157, 1e+158, 1e+159, 1e+160, 1e+161, 1e+162, 1e+163, 1e+164, 1e+165, 1e+166, 1e+167, 1e+168, 1e+169, + 1e+170, 1e+171, 1e+172, 1e+173, 1e+174, 1e+175, 1e+176, 1e+177, 1e+178, 1e+179, 1e+180, 1e+181, 1e+182, 1e+183, 1e+184, 1e+185, 1e+186, + 1e+187, 1e+188, 1e+189, 1e+190, 1e+191, 1e+192, 1e+193, 1e+194, 1e+195, 1e+196, 1e+197, 1e+198, 1e+199, 1e+200, 1e+201, 1e+202, 1e+203, + 1e+204, 1e+205, 1e+206, 1e+207, 1e+208, 1e+209, 1e+210, 1e+211, 1e+212, 1e+213, 1e+214, 1e+215, 1e+216, 1e+217, 1e+218, 1e+219, 1e+220, + 1e+221, 1e+222, 1e+223, 1e+224, 1e+225, 1e+226, 1e+227, 1e+228, 1e+229, 1e+230, 1e+231, 1e+232, 1e+233, 1e+234, 1e+235, 1e+236, 1e+237, + 1e+238, 1e+239, 1e+240, 1e+241, 1e+242, 1e+243, 1e+244, 1e+245, 1e+246, 1e+247, 1e+248, 1e+249, 1e+250, 1e+251, 1e+252, 1e+253, 1e+254, + 1e+255, 1e+256, 1e+257, 1e+258, 1e+259, 1e+260, 1e+261, 1e+262, 1e+263, 1e+264, 1e+265, 1e+266, 1e+267, 1e+268, 1e+269, 1e+270, 1e+271, + 1e+272, 1e+273, 1e+274, 1e+275, 1e+276, 1e+277, 1e+278, 1e+279, 1e+280, 1e+281, 1e+282, 1e+283, 1e+284, 1e+285, 1e+286, 1e+287, 1e+288, + 1e+289, 1e+290, 1e+291, 1e+292, 1e+293, 1e+294, 1e+295, 1e+296, 1e+297, 1e+298, 1e+299, 1e+300, 1e+301, 1e+302, 1e+303, 1e+304, 1e+305, + 1e+306, 1e+307, 1e+308}; + + double n, y = modf(x, &n); + if (n > 308) return INFINITY; + if (n < -323) return 0; + + // Using lookup table based formula to get accurate results for integer arguments. + return exp2(3.32192809488736234787031942948939 * y) * p10[static_cast(n) + 323]; +} diff --git a/base/common/preciseExp10.h b/base/common/preciseExp10.h index 137205c484c..26a88318172 100644 --- a/base/common/preciseExp10.h +++ b/base/common/preciseExp10.h @@ -8,9 +8,4 @@ * Note: the function names are different to avoid confusion with symbols from the system libm. */ -extern "C" -{ - double preciseExp10(double x); - -} diff --git a/base/common/shift10.cpp b/base/common/shift10.cpp index 45f5733bd76..b7b39182145 100644 --- a/base/common/shift10.cpp +++ b/base/common/shift10.cpp @@ -1,6 +1,6 @@ #include -#include +#include "defines.h" #include @@ -8,8 +8,8 @@ template static T shift10Impl(T x, int exponent) { - static constexpr ssize_t MIN_EXPONENT = -323; - static constexpr ssize_t MAX_EXPONENT = 308; + static constexpr ssize_t min_exponent = -323; + static constexpr ssize_t max_exponent = 308; static const long double powers10[] = { @@ -47,12 +47,12 @@ static T shift10Impl(T x, int exponent) 1e291L,1e292L,1e293L,1e294L,1e295L,1e296L,1e297L,1e298L,1e299L,1e300L,1e301L,1e302L,1e303L,1e304L,1e305L,1e306L,1e307L,1e308L }; - if (unlikely(exponent < MIN_EXPONENT)) /// Note: there are some values below MIN_EXPONENT that is greater than zero. + if (unlikely(exponent < min_exponent)) /// Note: there are some values below MIN_EXPONENT that is greater than zero. x *= 0; /// Multiplying to keep the sign of zero. - else if (unlikely(exponent > MAX_EXPONENT)) + else if (unlikely(exponent > max_exponent)) x *= std::numeric_limits::infinity(); /// Multiplying to keep the sign of infinity. else - x *= powers10[exponent - MIN_EXPONENT]; + x *= powers10[exponent - min_exponent]; return x; } diff --git a/base/common/shift10.h b/base/common/shift10.h index 68c18f34450..c50121cfb27 100644 --- a/base/common/shift10.h +++ b/base/common/shift10.h @@ -1,6 +1,6 @@ #pragma once -#include +#include /** Almost the same as x = x * exp10(exponent), but gives more accurate result. * Example: diff --git a/base/common/sleep.cpp b/base/common/sleep.cpp index f65864b369d..85bbc8edfcc 100644 --- a/base/common/sleep.cpp +++ b/base/common/sleep.cpp @@ -22,7 +22,7 @@ void sleepForNanoseconds(uint64_t nanoseconds) #if defined(OS_DARWIN) //https://developer.apple.com/library/archive/technotes/tn2169/_index.html //https://dshil.github.io/blog/missed-os-x-clock-guide/ - static mach_timebase_info_data_t timebase_info = {0}; + static mach_timebase_info_data_t timebase_info{}; if (timebase_info.denom == 0) mach_timebase_info(&timebase_info); diff --git a/base/common/strong_typedef.h b/base/common/strong_typedef.h index ae502b4ef97..a46eb415e15 100644 --- a/base/common/strong_typedef.h +++ b/base/common/strong_typedef.h @@ -1,15 +1,9 @@ #pragma once -#include #include -/** https://svn.boost.org/trac/boost/ticket/5182 - */ - template struct StrongTypedef - : boost::totally_ordered1< StrongTypedef - , boost::totally_ordered2< StrongTypedef, T> > { private: using Self = StrongTypedef; diff --git a/base/common/terminalColors.h b/base/common/terminalColors.h index 0c33b23752b..a1ba85dc8cd 100644 --- a/base/common/terminalColors.h +++ b/base/common/terminalColors.h @@ -1,5 +1,5 @@ #include -#include +#include /** Set color in terminal based on 64-bit hash value. diff --git a/base/common/tests/date_lut2.cpp b/base/common/tests/date_lut2.cpp index f1a106a16ca..6dcf5e8adf2 100644 --- a/base/common/tests/date_lut2.cpp +++ b/base/common/tests/date_lut2.cpp @@ -43,7 +43,7 @@ void loop(time_t begin, time_t end, int step) } -int main(int argc, char ** argv) +int main(int, char **) { loop(orderedIdentifierToDate(20101031), orderedIdentifierToDate(20101101), 15 * 60); loop(orderedIdentifierToDate(20100328), orderedIdentifierToDate(20100330), 15 * 60); diff --git a/base/common/tests/date_lut3.cpp b/base/common/tests/date_lut3.cpp index c2e4c7ccf8d..411765d2b2a 100644 --- a/base/common/tests/date_lut3.cpp +++ b/base/common/tests/date_lut3.cpp @@ -53,7 +53,7 @@ void loop(time_t begin, time_t end, int step) } -int main(int argc, char ** argv) +int main(int, char **) { loop(orderedIdentifierToDate(20101031), orderedIdentifierToDate(20101101), 15 * 60); loop(orderedIdentifierToDate(20100328), orderedIdentifierToDate(20100330), 15 * 60); diff --git a/base/common/tests/date_lut4.cpp b/base/common/tests/date_lut4.cpp index 50c3ef4e3d3..86a4708dc79 100644 --- a/base/common/tests/date_lut4.cpp +++ b/base/common/tests/date_lut4.cpp @@ -2,15 +2,15 @@ #include -int main(int argc, char ** argv) +int main(int, char **) { /** В DateLUT был глюк - для времён из дня 1970-01-01, возвращался номер часа больше 23. */ - static const time_t TIME = 66130; + static const time_t time = 66130; const auto & date_lut = DateLUT::instance(); - std::cerr << date_lut.toHour(TIME) << std::endl; - std::cerr << date_lut.toDayNum(TIME) << std::endl; + std::cerr << date_lut.toHour(time) << std::endl; + std::cerr << date_lut.toDayNum(time) << std::endl; const auto * values = reinterpret_cast(&date_lut); diff --git a/base/common/tests/date_lut_default_timezone.cpp b/base/common/tests/date_lut_default_timezone.cpp index e0251707537..b8e5aa08931 100644 --- a/base/common/tests/date_lut_default_timezone.cpp +++ b/base/common/tests/date_lut_default_timezone.cpp @@ -2,13 +2,13 @@ #include #include -int main(int argc, char ** argv) +int main(int, char **) { try { const auto & date_lut = DateLUT::instance(); std::cout << "Detected default timezone: `" << date_lut.getTimeZone() << "'" << std::endl; - time_t now = time(NULL); + time_t now = time(nullptr); std::cout << "Current time: " << date_lut.timeToString(now) << ", UTC: " << DateLUT::instance("UTC").timeToString(now) << std::endl; } diff --git a/base/common/tests/date_lut_init.cpp b/base/common/tests/date_lut_init.cpp index 3b03e36b02d..48f0d6063c7 100644 --- a/base/common/tests/date_lut_init.cpp +++ b/base/common/tests/date_lut_init.cpp @@ -1,7 +1,7 @@ #include /// Позволяет проверить время инициализации DateLUT. -int main(int argc, char ** argv) +int main(int, char **) { DateLUT::instance(); return 0; diff --git a/base/common/tests/gtest_find_symbols.cpp b/base/common/tests/gtest_find_symbols.cpp index 118a1f5c178..79a7ed032df 100644 --- a/base/common/tests/gtest_find_symbols.cpp +++ b/base/common/tests/gtest_find_symbols.cpp @@ -1,9 +1,10 @@ #include +#include #include #include -TEST(find_symbols, SimpleTest) +TEST(FindSymbols, SimpleTest) { std::string s = "Hello, world! Goodbye..."; const char * begin = s.data(); @@ -22,4 +23,16 @@ TEST(find_symbols, SimpleTest) ASSERT_EQ(find_last_symbols_or_null<' '>(begin, end), end - 11); ASSERT_EQ(find_last_symbols_or_null<'H'>(begin, end), begin); ASSERT_EQ((find_last_symbols_or_null<'a', 'e'>(begin, end)), end - 4); + + { + std::vector vals; + splitInto<' ', ','>(vals, "hello, world", true); + ASSERT_EQ(vals, (std::vector{"hello", "world"})); + } + + { + std::vector vals; + splitInto<' ', ','>(vals, "s String", true); + ASSERT_EQ(vals, (std::vector{"s", "String"})); + } } diff --git a/base/common/tests/gtest_json_test.cpp b/base/common/tests/gtest_json_test.cpp index 79a55dcd1ab..5bdeb1bca9b 100644 --- a/base/common/tests/gtest_json_test.cpp +++ b/base/common/tests/gtest_json_test.cpp @@ -22,484 +22,484 @@ struct GetStringTestRecord std::string result; }; -TEST(JSON_Suite, SimpleTest) +TEST(JSONSuite, SimpleTest) { std::vector test_data = { - { "\"name\""s, ResultType::Return, "name"s }, - { "\"Вафельница Vitek WX-1102 FL\""s, ResultType::Return, "Вафельница Vitek WX-1102 FL"s }, - { "\"brand\""s, ResultType::Return, "brand"s }, - { "\"184509\""s, ResultType::Return, "184509"s }, - { "\"category\""s, ResultType::Return, "category"s }, - { "\"Все для детей/Детская техника/Vitek\""s, ResultType::Return, "Все для детей/Детская техника/Vitek"s }, - { "\"variant\""s, ResultType::Return, "variant"s }, - { "\"В наличии\""s, ResultType::Return, "В наличии"s }, - { "\"price\""s, ResultType::Return, "price"s }, - { "\"2390.00\""s, ResultType::Return, "2390.00"s }, - { "\"list\""s, ResultType::Return, "list"s }, - { "\"Карточка\""s, ResultType::Return, "Карточка"s }, - { "\"position\""s, ResultType::Return, "position"s }, - { "\"detail\""s, ResultType::Return, "detail"s }, - { "\"actionField\""s, ResultType::Return, "actionField"s }, - { "\"list\""s, ResultType::Return, "list"s }, - { "\"http://www.techport.ru/q/?t=вафельница&sort=price&sdim=asc\""s, ResultType::Return, "http://www.techport.ru/q/?t=вафельница&sort=price&sdim=asc"s }, - { "\"action\""s, ResultType::Return, "action"s }, - { "\"detail\""s, ResultType::Return, "detail"s }, - { "\"products\""s, ResultType::Return, "products"s }, - { "\"name\""s, ResultType::Return, "name"s }, - { "\"Вафельница Vitek WX-1102 FL\""s, ResultType::Return, "Вафельница Vitek WX-1102 FL"s }, - { "\"id\""s, ResultType::Return, "id"s }, - { "\"184509\""s, ResultType::Return, "184509"s }, - { "\"price\""s, ResultType::Return, "price"s }, - { "\"2390.00\""s, ResultType::Return, "2390.00"s }, - { "\"brand\""s, ResultType::Return, "brand"s }, - { "\"Vitek\""s, ResultType::Return, "Vitek"s }, - { "\"category\""s, ResultType::Return, "category"s }, - { "\"Все для детей/Детская техника/Vitek\""s, ResultType::Return, "Все для детей/Детская техника/Vitek"s }, - { "\"variant\""s, ResultType::Return, "variant"s }, - { "\"В наличии\""s, ResultType::Return, "В наличии"s }, - { "\"ru\""s, ResultType::Return, "ru"s }, - { "\"experiments\""s, ResultType::Return, "experiments"s }, - { "\"lang\""s, ResultType::Return, "lang"s }, - { "\"ru\""s, ResultType::Return, "ru"s }, - { "\"los_portal\""s, ResultType::Return, "los_portal"s }, - { "\"los_level\""s, ResultType::Return, "los_level"s }, - { "\"none\""s, ResultType::Return, "none"s }, - { "\"isAuthorized\""s, ResultType::Return, "isAuthorized"s }, - { "\"isSubscriber\""s, ResultType::Return, "isSubscriber"s }, - { "\"postType\""s, ResultType::Return, "postType"s }, - { "\"Новости\""s, ResultType::Return, "Новости"s }, - { "\"experiments\""s, ResultType::Return, "experiments"s }, - { "\"lang\""s, ResultType::Return, "lang"s }, - { "\"ru\""s, ResultType::Return, "ru"s }, - { "\"los_portal\""s, ResultType::Return, "los_portal"s }, - { "\"los_level\""s, ResultType::Return, "los_level"s }, - { "\"none\""s, ResultType::Return, "none"s }, - { "\"lang\""s, ResultType::Return, "lang"s }, - { "\"ru\""s, ResultType::Return, "ru"s }, - { "\"Электроплита GEFEST Брест ЭПНД 5140-01 0001\""s, ResultType::Return, "Электроплита GEFEST Брест ЭПНД 5140-01 0001"s }, - { "\"price\""s, ResultType::Return, "price"s }, - { "\"currencyCode\""s, ResultType::Return, "currencyCode"s }, - { "\"RUB\""s, ResultType::Return, "RUB"s }, - { "\"lang\""s, ResultType::Return, "lang"s }, - { "\"ru\""s, ResultType::Return, "ru"s }, - { "\"experiments\""s, ResultType::Return, "experiments"s }, - { "\"lang\""s, ResultType::Return, "lang"s }, - { "\"ru\""s, ResultType::Return, "ru"s }, - { "\"los_portal\""s, ResultType::Return, "los_portal"s }, - { "\"los_level\""s, ResultType::Return, "los_level"s }, - { "\"none\""s, ResultType::Return, "none"s }, - { "\"trash_login\""s, ResultType::Return, "trash_login"s }, - { "\"novikoff\""s, ResultType::Return, "novikoff"s }, - { "\"trash_cat_link\""s, ResultType::Return, "trash_cat_link"s }, - { "\"progs\""s, ResultType::Return, "progs"s }, - { "\"trash_parent_link\""s, ResultType::Return, "trash_parent_link"s }, - { "\"content\""s, ResultType::Return, "content"s }, - { "\"trash_posted_parent\""s, ResultType::Return, "trash_posted_parent"s }, - { "\"content.01.2016\""s, ResultType::Return, "content.01.2016"s }, - { "\"trash_posted_cat\""s, ResultType::Return, "trash_posted_cat"s }, - { "\"progs.01.2016\""s, ResultType::Return, "progs.01.2016"s }, - { "\"trash_virus_count\""s, ResultType::Return, "trash_virus_count"s }, - { "\"trash_is_android\""s, ResultType::Return, "trash_is_android"s }, - { "\"trash_is_wp8\""s, ResultType::Return, "trash_is_wp8"s }, - { "\"trash_is_ios\""s, ResultType::Return, "trash_is_ios"s }, - { "\"trash_posted\""s, ResultType::Return, "trash_posted"s }, - { "\"01.2016\""s, ResultType::Return, "01.2016"s }, - { "\"experiments\""s, ResultType::Return, "experiments"s }, - { "\"lang\""s, ResultType::Return, "lang"s }, - { "\"ru\""s, ResultType::Return, "ru"s }, - { "\"los_portal\""s, ResultType::Return, "los_portal"s }, - { "\"los_level\""s, ResultType::Return, "los_level"s }, - { "\"none\""s, ResultType::Return, "none"s }, - { "\"merchantId\""s, ResultType::Return, "merchantId"s }, - { "\"13694_49246\""s, ResultType::Return, "13694_49246"s }, - { "\"cps-source\""s, ResultType::Return, "cps-source"s }, - { "\"wargaming\""s, ResultType::Return, "wargaming"s }, - { "\"cps_provider\""s, ResultType::Return, "cps_provider"s }, - { "\"default\""s, ResultType::Return, "default"s }, - { "\"errorReason\""s, ResultType::Return, "errorReason"s }, - { "\"no errors\""s, ResultType::Return, "no errors"s }, - { "\"scid\""s, ResultType::Return, "scid"s }, - { "\"isAuthPayment\""s, ResultType::Return, "isAuthPayment"s }, - { "\"lang\""s, ResultType::Return, "lang"s }, - { "\"ru\""s, ResultType::Return, "ru"s }, - { "\"rubric\""s, ResultType::Return, "rubric"s }, - { "\"\""s, ResultType::Return, ""s }, - { "\"rubric\""s, ResultType::Return, "rubric"s }, - { "\"Мир\""s, ResultType::Return, "Мир"s }, - { "\"lang\""s, ResultType::Return, "lang"s }, - { "\"ru\""s, ResultType::Return, "ru"s }, - { "\"experiments\""s, ResultType::Return, "experiments"s }, - { "\"lang\""s, ResultType::Return, "lang"s }, - { "\"ru\""s, ResultType::Return, "ru"s }, - { "\"los_portal\""s, ResultType::Return, "los_portal"s }, - { "\"los_level\""s, ResultType::Return, "los_level"s }, - { "\"none\""s, ResultType::Return, "none"s }, - { "\"lang\""s, ResultType::Return, "lang"s }, - { "\"ru\""s, ResultType::Return, "ru"s }, - { "\"__ym\""s, ResultType::Return, "__ym"s }, - { "\"ecommerce\""s, ResultType::Return, "ecommerce"s }, - { "\"impressions\""s, ResultType::Return, "impressions"s }, - { "\"id\""s, ResultType::Return, "id"s }, - { "\"863813\""s, ResultType::Return, "863813"s }, - { "\"name\""s, ResultType::Return, "name"s }, - { "\"Футболка детская 3D Happy, возраст 1-2 года, трикотаж\""s, ResultType::Return, "Футболка детская 3D Happy, возраст 1-2 года, трикотаж"s }, - { "\"category\""s, ResultType::Return, "category"s }, - { "\"/Летние товары/Летний текстиль/\""s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, - { "\"variant\""s, ResultType::Return, "variant"s }, - { "\"\""s, ResultType::Return, ""s }, - { "\"price\""s, ResultType::Return, "price"s }, - { "\"390.00\""s, ResultType::Return, "390.00"s }, - { "\"list\""s, ResultType::Return, "list"s }, - { "\"/retailrocket/\""s, ResultType::Return, "/retailrocket/"s }, - { "\"position\""s, ResultType::Return, "position"s }, - { "\"brand\""s, ResultType::Return, "brand"s }, - { "\"/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/\""s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, - { "\"id\""s, ResultType::Return, "id"s }, - { "\"863839\""s, ResultType::Return, "863839"s }, - { "\"name\""s, ResultType::Return, "name"s }, - { "\"Футболка детская 3D Pretty kitten, возраст 1-2 года, трикотаж\""s, ResultType::Return, "Футболка детская 3D Pretty kitten, возраст 1-2 года, трикотаж"s }, - { "\"category\""s, ResultType::Return, "category"s }, - { "\"/Летние товары/Летний текстиль/\""s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, - { "\"variant\""s, ResultType::Return, "variant"s }, - { "\"\""s, ResultType::Return, ""s }, - { "\"price\""s, ResultType::Return, "price"s }, - { "\"390.00\""s, ResultType::Return, "390.00"s }, - { "\"list\""s, ResultType::Return, "list"s }, - { "\"/retailrocket/\""s, ResultType::Return, "/retailrocket/"s }, - { "\"position\""s, ResultType::Return, "position"s }, - { "\"brand\""s, ResultType::Return, "brand"s }, - { "\"/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/\""s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, - { "\"id\""s, ResultType::Return, "id"s }, - { "\"863847\""s, ResultType::Return, "863847"s }, - { "\"name\""s, ResultType::Return, "name"s }, - { "\"Футболка детская 3D Little tiger, возраст 1-2 года, трикотаж\""s, ResultType::Return, "Футболка детская 3D Little tiger, возраст 1-2 года, трикотаж"s }, - { "\"category\""s, ResultType::Return, "category"s }, - { "\"/Летние товары/Летний текстиль/\""s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, - { "\"variant\""s, ResultType::Return, "variant"s }, - { "\"\""s, ResultType::Return, ""s }, - { "\"price\""s, ResultType::Return, "price"s }, - { "\"390.00\""s, ResultType::Return, "390.00"s }, - { "\"list\""s, ResultType::Return, "list"s }, - { "\"/retailrocket/\""s, ResultType::Return, "/retailrocket/"s }, - { "\"position\""s, ResultType::Return, "position"s }, - { "\"brand\""s, ResultType::Return, "brand"s }, - { "\"/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/\""s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, - { "\"id\""s, ResultType::Return, "id"s }, - { "\"911480\""s, ResultType::Return, "911480"s }, - { "\"name\""s, ResultType::Return, "name"s }, - { "\"Футболка детская 3D Puppy, возраст 1-2 года, трикотаж\""s, ResultType::Return, "Футболка детская 3D Puppy, возраст 1-2 года, трикотаж"s }, - { "\"category\""s, ResultType::Return, "category"s }, - { "\"/Летние товары/Летний текстиль/\""s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, - { "\"variant\""s, ResultType::Return, "variant"s }, - { "\"\""s, ResultType::Return, ""s }, - { "\"price\""s, ResultType::Return, "price"s }, - { "\"390.00\""s, ResultType::Return, "390.00"s }, - { "\"list\""s, ResultType::Return, "list"s }, - { "\"/retailrocket/\""s, ResultType::Return, "/retailrocket/"s }, - { "\"position\""s, ResultType::Return, "position"s }, - { "\"brand\""s, ResultType::Return, "brand"s }, - { "\"/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/\""s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, - { "\"id\""s, ResultType::Return, "id"s }, - { "\"911484\""s, ResultType::Return, "911484"s }, - { "\"name\""s, ResultType::Return, "name"s }, - { "\"Футболка детская 3D Little bears, возраст 1-2 года, трикотаж\""s, ResultType::Return, "Футболка детская 3D Little bears, возраст 1-2 года, трикотаж"s }, - { "\"category\""s, ResultType::Return, "category"s }, - { "\"/Летние товары/Летний текстиль/\""s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, - { "\"variant\""s, ResultType::Return, "variant"s }, - { "\"\""s, ResultType::Return, ""s }, - { "\"price\""s, ResultType::Return, "price"s }, - { "\"390.00\""s, ResultType::Return, "390.00"s }, - { "\"list\""s, ResultType::Return, "list"s }, - { "\"/retailrocket/\""s, ResultType::Return, "/retailrocket/"s }, - { "\"position\""s, ResultType::Return, "position"s }, - { "\"brand\""s, ResultType::Return, "brand"s }, - { "\"/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/\""s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, - { "\"id\""s, ResultType::Return, "id"s }, - { "\"911489\""s, ResultType::Return, "911489"s }, - { "\"name\""s, ResultType::Return, "name"s }, - { "\"Футболка детская 3D Dolphin, возраст 2-4 года, трикотаж\""s, ResultType::Return, "Футболка детская 3D Dolphin, возраст 2-4 года, трикотаж"s }, - { "\"category\""s, ResultType::Return, "category"s }, - { "\"/Летние товары/Летний текстиль/\""s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, - { "\"variant\""s, ResultType::Return, "variant"s }, - { "\"\""s, ResultType::Return, ""s }, - { "\"price\""s, ResultType::Return, "price"s }, - { "\"390.00\""s, ResultType::Return, "390.00"s }, - { "\"list\""s, ResultType::Return, "list"s }, - { "\"/retailrocket/\""s, ResultType::Return, "/retailrocket/"s }, - { "\"position\""s, ResultType::Return, "position"s }, - { "\"brand\""s, ResultType::Return, "brand"s }, - { "\"/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/\""s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, - { "\"id\""s, ResultType::Return, "id"s }, - { "\"911496\""s, ResultType::Return, "911496"s }, - { "\"name\""s, ResultType::Return, "name"s }, - { "\"Футболка детская 3D Pretty, возраст 1-2 года, трикотаж\""s, ResultType::Return, "Футболка детская 3D Pretty, возраст 1-2 года, трикотаж"s }, - { "\"category\""s, ResultType::Return, "category"s }, - { "\"/Летние товары/Летний текстиль/\""s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, - { "\"variant\""s, ResultType::Return, "variant"s }, - { "\"\""s, ResultType::Return, ""s }, - { "\"price\""s, ResultType::Return, "price"s }, - { "\"390.00\""s, ResultType::Return, "390.00"s }, - { "\"list\""s, ResultType::Return, "list"s }, - { "\"/retailrocket/\""s, ResultType::Return, "/retailrocket/"s }, - { "\"position\""s, ResultType::Return, "position"s }, - { "\"brand\""s, ResultType::Return, "brand"s }, - { "\"/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/\""s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, - { "\"id\""s, ResultType::Return, "id"s }, - { "\"911504\""s, ResultType::Return, "911504"s }, - { "\"name\""s, ResultType::Return, "name"s }, - { "\"Футболка детская 3D Fairytale, возраст 1-2 года, трикотаж\""s, ResultType::Return, "Футболка детская 3D Fairytale, возраст 1-2 года, трикотаж"s }, - { "\"category\""s, ResultType::Return, "category"s }, - { "\"/Летние товары/Летний текстиль/\""s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, - { "\"variant\""s, ResultType::Return, "variant"s }, - { "\"\""s, ResultType::Return, ""s }, - { "\"price\""s, ResultType::Return, "price"s }, - { "\"390.00\""s, ResultType::Return, "390.00"s }, - { "\"list\""s, ResultType::Return, "list"s }, - { "\"/retailrocket/\""s, ResultType::Return, "/retailrocket/"s }, - { "\"position\""s, ResultType::Return, "position"s }, - { "\"brand\""s, ResultType::Return, "brand"s }, - { "\"/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/\""s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, - { "\"id\""s, ResultType::Return, "id"s }, - { "\"911508\""s, ResultType::Return, "911508"s }, - { "\"name\""s, ResultType::Return, "name"s }, - { "\"Футболка детская 3D Kittens, возраст 1-2 года, трикотаж\""s, ResultType::Return, "Футболка детская 3D Kittens, возраст 1-2 года, трикотаж"s }, - { "\"category\""s, ResultType::Return, "category"s }, - { "\"/Летние товары/Летний текстиль/\""s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, - { "\"variant\""s, ResultType::Return, "variant"s }, - { "\"\""s, ResultType::Return, ""s }, - { "\"price\""s, ResultType::Return, "price"s }, - { "\"390.00\""s, ResultType::Return, "390.00"s }, - { "\"list\""s, ResultType::Return, "list"s }, - { "\"/retailrocket/\""s, ResultType::Return, "/retailrocket/"s }, - { "\"position\""s, ResultType::Return, "position"s }, - { "\"brand\""s, ResultType::Return, "brand"s }, - { "\"/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/\""s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, - { "\"id\""s, ResultType::Return, "id"s }, - { "\"911512\""s, ResultType::Return, "911512"s }, - { "\"name\""s, ResultType::Return, "name"s }, - { "\"Футболка детская 3D Sunshine, возраст 1-2 года, трикотаж\""s, ResultType::Return, "Футболка детская 3D Sunshine, возраст 1-2 года, трикотаж"s }, - { "\"category\""s, ResultType::Return, "category"s }, - { "\"/Летние товары/Летний текстиль/\""s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, - { "\"variant\""s, ResultType::Return, "variant"s }, - { "\"\""s, ResultType::Return, ""s }, - { "\"price\""s, ResultType::Return, "price"s }, - { "\"390.00\""s, ResultType::Return, "390.00"s }, - { "\"list\""s, ResultType::Return, "list"s }, - { "\"/retailrocket/\""s, ResultType::Return, "/retailrocket/"s }, - { "\"position\""s, ResultType::Return, "position"s }, - { "\"brand\""s, ResultType::Return, "brand"s }, - { "\"/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/\""s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, - { "\"id\""s, ResultType::Return, "id"s }, - { "\"911516\""s, ResultType::Return, "911516"s }, - { "\"name\""s, ResultType::Return, "name"s }, - { "\"Футболка детская 3D Dog in bag, возраст 1-2 года, трикотаж\""s, ResultType::Return, "Футболка детская 3D Dog in bag, возраст 1-2 года, трикотаж"s }, - { "\"category\""s, ResultType::Return, "category"s }, - { "\"/Летние товары/Летний текстиль/\""s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, - { "\"variant\""s, ResultType::Return, "variant"s }, - { "\"\""s, ResultType::Return, ""s }, - { "\"price\""s, ResultType::Return, "price"s }, - { "\"390.00\""s, ResultType::Return, "390.00"s }, - { "\"list\""s, ResultType::Return, "list"s }, - { "\"/retailrocket/\""s, ResultType::Return, "/retailrocket/"s }, - { "\"position\""s, ResultType::Return, "position"s }, - { "\"brand\""s, ResultType::Return, "brand"s }, - { "\"/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/\""s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, - { "\"id\""s, ResultType::Return, "id"s }, - { "\"911520\""s, ResultType::Return, "911520"s }, - { "\"name\""s, ResultType::Return, "name"s }, - { "\"Футболка детская 3D Cute puppy, возраст 1-2 года, трикотаж\""s, ResultType::Return, "Футболка детская 3D Cute puppy, возраст 1-2 года, трикотаж"s }, - { "\"category\""s, ResultType::Return, "category"s }, - { "\"/Летние товары/Летний текстиль/\""s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, - { "\"variant\""s, ResultType::Return, "variant"s }, - { "\"\""s, ResultType::Return, ""s }, - { "\"price\""s, ResultType::Return, "price"s }, - { "\"390.00\""s, ResultType::Return, "390.00"s }, - { "\"list\""s, ResultType::Return, "list"s }, - { "\"/retailrocket/\""s, ResultType::Return, "/retailrocket/"s }, - { "\"position\""s, ResultType::Return, "position"s }, - { "\"brand\""s, ResultType::Return, "brand"s }, - { "\"/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/\""s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, - { "\"id\""s, ResultType::Return, "id"s }, - { "\"911524\""s, ResultType::Return, "911524"s }, - { "\"name\""s, ResultType::Return, "name"s }, - { "\"Футболка детская 3D Rabbit, возраст 1-2 года, трикотаж\""s, ResultType::Return, "Футболка детская 3D Rabbit, возраст 1-2 года, трикотаж"s }, - { "\"category\""s, ResultType::Return, "category"s }, - { "\"/Летние товары/Летний текстиль/\""s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, - { "\"variant\""s, ResultType::Return, "variant"s }, - { "\"\""s, ResultType::Return, ""s }, - { "\"price\""s, ResultType::Return, "price"s }, - { "\"390.00\""s, ResultType::Return, "390.00"s }, - { "\"list\""s, ResultType::Return, "list"s }, - { "\"/retailrocket/\""s, ResultType::Return, "/retailrocket/"s }, - { "\"position\""s, ResultType::Return, "position"s }, - { "\"brand\""s, ResultType::Return, "brand"s }, - { "\"/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/\""s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, - { "\"id\""s, ResultType::Return, "id"s }, - { "\"911528\""s, ResultType::Return, "911528"s }, - { "\"name\""s, ResultType::Return, "name"s }, - { "\"Футболка детская 3D Turtle, возраст 1-2 года, трикотаж\""s, ResultType::Return, "Футболка детская 3D Turtle, возраст 1-2 года, трикотаж"s }, - { "\"category\""s, ResultType::Return, "category"s }, - { "\"/Летние товары/Летний текстиль/\""s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, - { "\"variant\""s, ResultType::Return, "variant"s }, - { "\"\""s, ResultType::Return, ""s }, - { "\"price\""s, ResultType::Return, "price"s }, - { "\"390.00\""s, ResultType::Return, "390.00"s }, - { "\"list\""s, ResultType::Return, "list"s }, - { "\"/retailrocket/\""s, ResultType::Return, "/retailrocket/"s }, - { "\"position\""s, ResultType::Return, "position"s }, - { "\"brand\""s, ResultType::Return, "brand"s }, - { "\"/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/\""s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, - { "\"id\""s, ResultType::Return, "id"s }, - { "\"888616\""s, ResultType::Return, "888616"s }, - { "\"name\""s, ResultType::Return, "name"s }, + { R"("name")"s, ResultType::Return, "name"s }, + { R"("Вафельница Vitek WX-1102 FL")"s, ResultType::Return, "Вафельница Vitek WX-1102 FL"s }, + { R"("brand")"s, ResultType::Return, "brand"s }, + { R"("184509")"s, ResultType::Return, "184509"s }, + { R"("category")"s, ResultType::Return, "category"s }, + { R"("Все для детей/Детская техника/Vitek")"s, ResultType::Return, "Все для детей/Детская техника/Vitek"s }, + { R"("variant")"s, ResultType::Return, "variant"s }, + { R"("В наличии")"s, ResultType::Return, "В наличии"s }, + { R"("price")"s, ResultType::Return, "price"s }, + { R"("2390.00")"s, ResultType::Return, "2390.00"s }, + { R"("list")"s, ResultType::Return, "list"s }, + { R"("Карточка")"s, ResultType::Return, "Карточка"s }, + { R"("position")"s, ResultType::Return, "position"s }, + { R"("detail")"s, ResultType::Return, "detail"s }, + { R"("actionField")"s, ResultType::Return, "actionField"s }, + { R"("list")"s, ResultType::Return, "list"s }, + { R"("http://www.techport.ru/q/?t=вафельница&sort=price&sdim=asc")"s, ResultType::Return, "http://www.techport.ru/q/?t=вафельница&sort=price&sdim=asc"s }, + { R"("action")"s, ResultType::Return, "action"s }, + { R"("detail")"s, ResultType::Return, "detail"s }, + { R"("products")"s, ResultType::Return, "products"s }, + { R"("name")"s, ResultType::Return, "name"s }, + { R"("Вафельница Vitek WX-1102 FL")"s, ResultType::Return, "Вафельница Vitek WX-1102 FL"s }, + { R"("id")"s, ResultType::Return, "id"s }, + { R"("184509")"s, ResultType::Return, "184509"s }, + { R"("price")"s, ResultType::Return, "price"s }, + { R"("2390.00")"s, ResultType::Return, "2390.00"s }, + { R"("brand")"s, ResultType::Return, "brand"s }, + { R"("Vitek")"s, ResultType::Return, "Vitek"s }, + { R"("category")"s, ResultType::Return, "category"s }, + { R"("Все для детей/Детская техника/Vitek")"s, ResultType::Return, "Все для детей/Детская техника/Vitek"s }, + { R"("variant")"s, ResultType::Return, "variant"s }, + { R"("В наличии")"s, ResultType::Return, "В наличии"s }, + { R"("ru")"s, ResultType::Return, "ru"s }, + { R"("experiments")"s, ResultType::Return, "experiments"s }, + { R"("lang")"s, ResultType::Return, "lang"s }, + { R"("ru")"s, ResultType::Return, "ru"s }, + { R"("los_portal")"s, ResultType::Return, "los_portal"s }, + { R"("los_level")"s, ResultType::Return, "los_level"s }, + { R"("none")"s, ResultType::Return, "none"s }, + { R"("isAuthorized")"s, ResultType::Return, "isAuthorized"s }, + { R"("isSubscriber")"s, ResultType::Return, "isSubscriber"s }, + { R"("postType")"s, ResultType::Return, "postType"s }, + { R"("Новости")"s, ResultType::Return, "Новости"s }, + { R"("experiments")"s, ResultType::Return, "experiments"s }, + { R"("lang")"s, ResultType::Return, "lang"s }, + { R"("ru")"s, ResultType::Return, "ru"s }, + { R"("los_portal")"s, ResultType::Return, "los_portal"s }, + { R"("los_level")"s, ResultType::Return, "los_level"s }, + { R"("none")"s, ResultType::Return, "none"s }, + { R"("lang")"s, ResultType::Return, "lang"s }, + { R"("ru")"s, ResultType::Return, "ru"s }, + { R"("Электроплита GEFEST Брест ЭПНД 5140-01 0001")"s, ResultType::Return, "Электроплита GEFEST Брест ЭПНД 5140-01 0001"s }, + { R"("price")"s, ResultType::Return, "price"s }, + { R"("currencyCode")"s, ResultType::Return, "currencyCode"s }, + { R"("RUB")"s, ResultType::Return, "RUB"s }, + { R"("lang")"s, ResultType::Return, "lang"s }, + { R"("ru")"s, ResultType::Return, "ru"s }, + { R"("experiments")"s, ResultType::Return, "experiments"s }, + { R"("lang")"s, ResultType::Return, "lang"s }, + { R"("ru")"s, ResultType::Return, "ru"s }, + { R"("los_portal")"s, ResultType::Return, "los_portal"s }, + { R"("los_level")"s, ResultType::Return, "los_level"s }, + { R"("none")"s, ResultType::Return, "none"s }, + { R"("trash_login")"s, ResultType::Return, "trash_login"s }, + { R"("novikoff")"s, ResultType::Return, "novikoff"s }, + { R"("trash_cat_link")"s, ResultType::Return, "trash_cat_link"s }, + { R"("progs")"s, ResultType::Return, "progs"s }, + { R"("trash_parent_link")"s, ResultType::Return, "trash_parent_link"s }, + { R"("content")"s, ResultType::Return, "content"s }, + { R"("trash_posted_parent")"s, ResultType::Return, "trash_posted_parent"s }, + { R"("content.01.2016")"s, ResultType::Return, "content.01.2016"s }, + { R"("trash_posted_cat")"s, ResultType::Return, "trash_posted_cat"s }, + { R"("progs.01.2016")"s, ResultType::Return, "progs.01.2016"s }, + { R"("trash_virus_count")"s, ResultType::Return, "trash_virus_count"s }, + { R"("trash_is_android")"s, ResultType::Return, "trash_is_android"s }, + { R"("trash_is_wp8")"s, ResultType::Return, "trash_is_wp8"s }, + { R"("trash_is_ios")"s, ResultType::Return, "trash_is_ios"s }, + { R"("trash_posted")"s, ResultType::Return, "trash_posted"s }, + { R"("01.2016")"s, ResultType::Return, "01.2016"s }, + { R"("experiments")"s, ResultType::Return, "experiments"s }, + { R"("lang")"s, ResultType::Return, "lang"s }, + { R"("ru")"s, ResultType::Return, "ru"s }, + { R"("los_portal")"s, ResultType::Return, "los_portal"s }, + { R"("los_level")"s, ResultType::Return, "los_level"s }, + { R"("none")"s, ResultType::Return, "none"s }, + { R"("merchantId")"s, ResultType::Return, "merchantId"s }, + { R"("13694_49246")"s, ResultType::Return, "13694_49246"s }, + { R"("cps-source")"s, ResultType::Return, "cps-source"s }, + { R"("wargaming")"s, ResultType::Return, "wargaming"s }, + { R"("cps_provider")"s, ResultType::Return, "cps_provider"s }, + { R"("default")"s, ResultType::Return, "default"s }, + { R"("errorReason")"s, ResultType::Return, "errorReason"s }, + { R"("no errors")"s, ResultType::Return, "no errors"s }, + { R"("scid")"s, ResultType::Return, "scid"s }, + { R"("isAuthPayment")"s, ResultType::Return, "isAuthPayment"s }, + { R"("lang")"s, ResultType::Return, "lang"s }, + { R"("ru")"s, ResultType::Return, "ru"s }, + { R"("rubric")"s, ResultType::Return, "rubric"s }, + { R"("")"s, ResultType::Return, ""s }, + { R"("rubric")"s, ResultType::Return, "rubric"s }, + { R"("Мир")"s, ResultType::Return, "Мир"s }, + { R"("lang")"s, ResultType::Return, "lang"s }, + { R"("ru")"s, ResultType::Return, "ru"s }, + { R"("experiments")"s, ResultType::Return, "experiments"s }, + { R"("lang")"s, ResultType::Return, "lang"s }, + { R"("ru")"s, ResultType::Return, "ru"s }, + { R"("los_portal")"s, ResultType::Return, "los_portal"s }, + { R"("los_level")"s, ResultType::Return, "los_level"s }, + { R"("none")"s, ResultType::Return, "none"s }, + { R"("lang")"s, ResultType::Return, "lang"s }, + { R"("ru")"s, ResultType::Return, "ru"s }, + { R"("__ym")"s, ResultType::Return, "__ym"s }, + { R"("ecommerce")"s, ResultType::Return, "ecommerce"s }, + { R"("impressions")"s, ResultType::Return, "impressions"s }, + { R"("id")"s, ResultType::Return, "id"s }, + { R"("863813")"s, ResultType::Return, "863813"s }, + { R"("name")"s, ResultType::Return, "name"s }, + { R"("Футболка детская 3D Happy, возраст 1-2 года, трикотаж")"s, ResultType::Return, "Футболка детская 3D Happy, возраст 1-2 года, трикотаж"s }, + { R"("category")"s, ResultType::Return, "category"s }, + { R"("/Летние товары/Летний текстиль/")"s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, + { R"("variant")"s, ResultType::Return, "variant"s }, + { R"("")"s, ResultType::Return, ""s }, + { R"("price")"s, ResultType::Return, "price"s }, + { R"("390.00")"s, ResultType::Return, "390.00"s }, + { R"("list")"s, ResultType::Return, "list"s }, + { R"("/retailrocket/")"s, ResultType::Return, "/retailrocket/"s }, + { R"("position")"s, ResultType::Return, "position"s }, + { R"("brand")"s, ResultType::Return, "brand"s }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, + { R"("id")"s, ResultType::Return, "id"s }, + { R"("863839")"s, ResultType::Return, "863839"s }, + { R"("name")"s, ResultType::Return, "name"s }, + { R"("Футболка детская 3D Pretty kitten, возраст 1-2 года, трикотаж")"s, ResultType::Return, "Футболка детская 3D Pretty kitten, возраст 1-2 года, трикотаж"s }, + { R"("category")"s, ResultType::Return, "category"s }, + { R"("/Летние товары/Летний текстиль/")"s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, + { R"("variant")"s, ResultType::Return, "variant"s }, + { R"("")"s, ResultType::Return, ""s }, + { R"("price")"s, ResultType::Return, "price"s }, + { R"("390.00")"s, ResultType::Return, "390.00"s }, + { R"("list")"s, ResultType::Return, "list"s }, + { R"("/retailrocket/")"s, ResultType::Return, "/retailrocket/"s }, + { R"("position")"s, ResultType::Return, "position"s }, + { R"("brand")"s, ResultType::Return, "brand"s }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, + { R"("id")"s, ResultType::Return, "id"s }, + { R"("863847")"s, ResultType::Return, "863847"s }, + { R"("name")"s, ResultType::Return, "name"s }, + { R"("Футболка детская 3D Little tiger, возраст 1-2 года, трикотаж")"s, ResultType::Return, "Футболка детская 3D Little tiger, возраст 1-2 года, трикотаж"s }, + { R"("category")"s, ResultType::Return, "category"s }, + { R"("/Летние товары/Летний текстиль/")"s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, + { R"("variant")"s, ResultType::Return, "variant"s }, + { R"("")"s, ResultType::Return, ""s }, + { R"("price")"s, ResultType::Return, "price"s }, + { R"("390.00")"s, ResultType::Return, "390.00"s }, + { R"("list")"s, ResultType::Return, "list"s }, + { R"("/retailrocket/")"s, ResultType::Return, "/retailrocket/"s }, + { R"("position")"s, ResultType::Return, "position"s }, + { R"("brand")"s, ResultType::Return, "brand"s }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, + { R"("id")"s, ResultType::Return, "id"s }, + { R"("911480")"s, ResultType::Return, "911480"s }, + { R"("name")"s, ResultType::Return, "name"s }, + { R"("Футболка детская 3D Puppy, возраст 1-2 года, трикотаж")"s, ResultType::Return, "Футболка детская 3D Puppy, возраст 1-2 года, трикотаж"s }, + { R"("category")"s, ResultType::Return, "category"s }, + { R"("/Летние товары/Летний текстиль/")"s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, + { R"("variant")"s, ResultType::Return, "variant"s }, + { R"("")"s, ResultType::Return, ""s }, + { R"("price")"s, ResultType::Return, "price"s }, + { R"("390.00")"s, ResultType::Return, "390.00"s }, + { R"("list")"s, ResultType::Return, "list"s }, + { R"("/retailrocket/")"s, ResultType::Return, "/retailrocket/"s }, + { R"("position")"s, ResultType::Return, "position"s }, + { R"("brand")"s, ResultType::Return, "brand"s }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, + { R"("id")"s, ResultType::Return, "id"s }, + { R"("911484")"s, ResultType::Return, "911484"s }, + { R"("name")"s, ResultType::Return, "name"s }, + { R"("Футболка детская 3D Little bears, возраст 1-2 года, трикотаж")"s, ResultType::Return, "Футболка детская 3D Little bears, возраст 1-2 года, трикотаж"s }, + { R"("category")"s, ResultType::Return, "category"s }, + { R"("/Летние товары/Летний текстиль/")"s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, + { R"("variant")"s, ResultType::Return, "variant"s }, + { R"("")"s, ResultType::Return, ""s }, + { R"("price")"s, ResultType::Return, "price"s }, + { R"("390.00")"s, ResultType::Return, "390.00"s }, + { R"("list")"s, ResultType::Return, "list"s }, + { R"("/retailrocket/")"s, ResultType::Return, "/retailrocket/"s }, + { R"("position")"s, ResultType::Return, "position"s }, + { R"("brand")"s, ResultType::Return, "brand"s }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, + { R"("id")"s, ResultType::Return, "id"s }, + { R"("911489")"s, ResultType::Return, "911489"s }, + { R"("name")"s, ResultType::Return, "name"s }, + { R"("Футболка детская 3D Dolphin, возраст 2-4 года, трикотаж")"s, ResultType::Return, "Футболка детская 3D Dolphin, возраст 2-4 года, трикотаж"s }, + { R"("category")"s, ResultType::Return, "category"s }, + { R"("/Летние товары/Летний текстиль/")"s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, + { R"("variant")"s, ResultType::Return, "variant"s }, + { R"("")"s, ResultType::Return, ""s }, + { R"("price")"s, ResultType::Return, "price"s }, + { R"("390.00")"s, ResultType::Return, "390.00"s }, + { R"("list")"s, ResultType::Return, "list"s }, + { R"("/retailrocket/")"s, ResultType::Return, "/retailrocket/"s }, + { R"("position")"s, ResultType::Return, "position"s }, + { R"("brand")"s, ResultType::Return, "brand"s }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, + { R"("id")"s, ResultType::Return, "id"s }, + { R"("911496")"s, ResultType::Return, "911496"s }, + { R"("name")"s, ResultType::Return, "name"s }, + { R"("Футболка детская 3D Pretty, возраст 1-2 года, трикотаж")"s, ResultType::Return, "Футболка детская 3D Pretty, возраст 1-2 года, трикотаж"s }, + { R"("category")"s, ResultType::Return, "category"s }, + { R"("/Летние товары/Летний текстиль/")"s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, + { R"("variant")"s, ResultType::Return, "variant"s }, + { R"("")"s, ResultType::Return, ""s }, + { R"("price")"s, ResultType::Return, "price"s }, + { R"("390.00")"s, ResultType::Return, "390.00"s }, + { R"("list")"s, ResultType::Return, "list"s }, + { R"("/retailrocket/")"s, ResultType::Return, "/retailrocket/"s }, + { R"("position")"s, ResultType::Return, "position"s }, + { R"("brand")"s, ResultType::Return, "brand"s }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, + { R"("id")"s, ResultType::Return, "id"s }, + { R"("911504")"s, ResultType::Return, "911504"s }, + { R"("name")"s, ResultType::Return, "name"s }, + { R"("Футболка детская 3D Fairytale, возраст 1-2 года, трикотаж")"s, ResultType::Return, "Футболка детская 3D Fairytale, возраст 1-2 года, трикотаж"s }, + { R"("category")"s, ResultType::Return, "category"s }, + { R"("/Летние товары/Летний текстиль/")"s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, + { R"("variant")"s, ResultType::Return, "variant"s }, + { R"("")"s, ResultType::Return, ""s }, + { R"("price")"s, ResultType::Return, "price"s }, + { R"("390.00")"s, ResultType::Return, "390.00"s }, + { R"("list")"s, ResultType::Return, "list"s }, + { R"("/retailrocket/")"s, ResultType::Return, "/retailrocket/"s }, + { R"("position")"s, ResultType::Return, "position"s }, + { R"("brand")"s, ResultType::Return, "brand"s }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, + { R"("id")"s, ResultType::Return, "id"s }, + { R"("911508")"s, ResultType::Return, "911508"s }, + { R"("name")"s, ResultType::Return, "name"s }, + { R"("Футболка детская 3D Kittens, возраст 1-2 года, трикотаж")"s, ResultType::Return, "Футболка детская 3D Kittens, возраст 1-2 года, трикотаж"s }, + { R"("category")"s, ResultType::Return, "category"s }, + { R"("/Летние товары/Летний текстиль/")"s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, + { R"("variant")"s, ResultType::Return, "variant"s }, + { R"("")"s, ResultType::Return, ""s }, + { R"("price")"s, ResultType::Return, "price"s }, + { R"("390.00")"s, ResultType::Return, "390.00"s }, + { R"("list")"s, ResultType::Return, "list"s }, + { R"("/retailrocket/")"s, ResultType::Return, "/retailrocket/"s }, + { R"("position")"s, ResultType::Return, "position"s }, + { R"("brand")"s, ResultType::Return, "brand"s }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, + { R"("id")"s, ResultType::Return, "id"s }, + { R"("911512")"s, ResultType::Return, "911512"s }, + { R"("name")"s, ResultType::Return, "name"s }, + { R"("Футболка детская 3D Sunshine, возраст 1-2 года, трикотаж")"s, ResultType::Return, "Футболка детская 3D Sunshine, возраст 1-2 года, трикотаж"s }, + { R"("category")"s, ResultType::Return, "category"s }, + { R"("/Летние товары/Летний текстиль/")"s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, + { R"("variant")"s, ResultType::Return, "variant"s }, + { R"("")"s, ResultType::Return, ""s }, + { R"("price")"s, ResultType::Return, "price"s }, + { R"("390.00")"s, ResultType::Return, "390.00"s }, + { R"("list")"s, ResultType::Return, "list"s }, + { R"("/retailrocket/")"s, ResultType::Return, "/retailrocket/"s }, + { R"("position")"s, ResultType::Return, "position"s }, + { R"("brand")"s, ResultType::Return, "brand"s }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, + { R"("id")"s, ResultType::Return, "id"s }, + { R"("911516")"s, ResultType::Return, "911516"s }, + { R"("name")"s, ResultType::Return, "name"s }, + { R"("Футболка детская 3D Dog in bag, возраст 1-2 года, трикотаж")"s, ResultType::Return, "Футболка детская 3D Dog in bag, возраст 1-2 года, трикотаж"s }, + { R"("category")"s, ResultType::Return, "category"s }, + { R"("/Летние товары/Летний текстиль/")"s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, + { R"("variant")"s, ResultType::Return, "variant"s }, + { R"("")"s, ResultType::Return, ""s }, + { R"("price")"s, ResultType::Return, "price"s }, + { R"("390.00")"s, ResultType::Return, "390.00"s }, + { R"("list")"s, ResultType::Return, "list"s }, + { R"("/retailrocket/")"s, ResultType::Return, "/retailrocket/"s }, + { R"("position")"s, ResultType::Return, "position"s }, + { R"("brand")"s, ResultType::Return, "brand"s }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, + { R"("id")"s, ResultType::Return, "id"s }, + { R"("911520")"s, ResultType::Return, "911520"s }, + { R"("name")"s, ResultType::Return, "name"s }, + { R"("Футболка детская 3D Cute puppy, возраст 1-2 года, трикотаж")"s, ResultType::Return, "Футболка детская 3D Cute puppy, возраст 1-2 года, трикотаж"s }, + { R"("category")"s, ResultType::Return, "category"s }, + { R"("/Летние товары/Летний текстиль/")"s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, + { R"("variant")"s, ResultType::Return, "variant"s }, + { R"("")"s, ResultType::Return, ""s }, + { R"("price")"s, ResultType::Return, "price"s }, + { R"("390.00")"s, ResultType::Return, "390.00"s }, + { R"("list")"s, ResultType::Return, "list"s }, + { R"("/retailrocket/")"s, ResultType::Return, "/retailrocket/"s }, + { R"("position")"s, ResultType::Return, "position"s }, + { R"("brand")"s, ResultType::Return, "brand"s }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, + { R"("id")"s, ResultType::Return, "id"s }, + { R"("911524")"s, ResultType::Return, "911524"s }, + { R"("name")"s, ResultType::Return, "name"s }, + { R"("Футболка детская 3D Rabbit, возраст 1-2 года, трикотаж")"s, ResultType::Return, "Футболка детская 3D Rabbit, возраст 1-2 года, трикотаж"s }, + { R"("category")"s, ResultType::Return, "category"s }, + { R"("/Летние товары/Летний текстиль/")"s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, + { R"("variant")"s, ResultType::Return, "variant"s }, + { R"("")"s, ResultType::Return, ""s }, + { R"("price")"s, ResultType::Return, "price"s }, + { R"("390.00")"s, ResultType::Return, "390.00"s }, + { R"("list")"s, ResultType::Return, "list"s }, + { R"("/retailrocket/")"s, ResultType::Return, "/retailrocket/"s }, + { R"("position")"s, ResultType::Return, "position"s }, + { R"("brand")"s, ResultType::Return, "brand"s }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, + { R"("id")"s, ResultType::Return, "id"s }, + { R"("911528")"s, ResultType::Return, "911528"s }, + { R"("name")"s, ResultType::Return, "name"s }, + { R"("Футболка детская 3D Turtle, возраст 1-2 года, трикотаж")"s, ResultType::Return, "Футболка детская 3D Turtle, возраст 1-2 года, трикотаж"s }, + { R"("category")"s, ResultType::Return, "category"s }, + { R"("/Летние товары/Летний текстиль/")"s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, + { R"("variant")"s, ResultType::Return, "variant"s }, + { R"("")"s, ResultType::Return, ""s }, + { R"("price")"s, ResultType::Return, "price"s }, + { R"("390.00")"s, ResultType::Return, "390.00"s }, + { R"("list")"s, ResultType::Return, "list"s }, + { R"("/retailrocket/")"s, ResultType::Return, "/retailrocket/"s }, + { R"("position")"s, ResultType::Return, "position"s }, + { R"("brand")"s, ResultType::Return, "brand"s }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, + { R"("id")"s, ResultType::Return, "id"s }, + { R"("888616")"s, ResultType::Return, "888616"s }, + { R"("name")"s, ResultType::Return, "name"s }, { "\"3Д Футболка мужская \\\"Collorista\\\" Светлое завтра р-р XL(52-54), 100% хлопок, трикотаж\""s, ResultType::Return, "3Д Футболка мужская \"Collorista\" Светлое завтра р-р XL(52-54), 100% хлопок, трикотаж"s }, - { "\"category\""s, ResultType::Return, "category"s }, - { "\"/Одежда и обувь/Мужская одежда/Футболки/\""s, ResultType::Return, "/Одежда и обувь/Мужская одежда/Футболки/"s }, - { "\"variant\""s, ResultType::Return, "variant"s }, - { "\"\""s, ResultType::Return, ""s }, - { "\"price\""s, ResultType::Return, "price"s }, - { "\"406.60\""s, ResultType::Return, "406.60"s }, - { "\"list\""s, ResultType::Return, "list"s }, - { "\"/retailrocket/\""s, ResultType::Return, "/retailrocket/"s }, - { "\"position\""s, ResultType::Return, "position"s }, - { "\"brand\""s, ResultType::Return, "brand"s }, - { "\"/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/\""s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, - { "\"id\""s, ResultType::Return, "id"s }, - { "\"913361\""s, ResultType::Return, "913361"s }, - { "\"name\""s, ResultType::Return, "name"s }, - { "\"3Д Футболка детская World р-р 8-10, 100% хлопок, трикотаж\""s, ResultType::Return, "3Д Футболка детская World р-р 8-10, 100% хлопок, трикотаж"s }, - { "\"category\""s, ResultType::Return, "category"s }, - { "\"/Летние товары/Летний текстиль/\""s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, - { "\"variant\""s, ResultType::Return, "variant"s }, - { "\"\""s, ResultType::Return, ""s }, - { "\"price\""s, ResultType::Return, "price"s }, - { "\"470.00\""s, ResultType::Return, "470.00"s }, - { "\"list\""s, ResultType::Return, "list"s }, - { "\"/retailrocket/\""s, ResultType::Return, "/retailrocket/"s }, - { "\"position\""s, ResultType::Return, "position"s }, - { "\"brand\""s, ResultType::Return, "brand"s }, - { "\"/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/\""s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, - { "\"id\""s, ResultType::Return, "id"s }, - { "\"913364\""s, ResultType::Return, "913364"s }, - { "\"name\""s, ResultType::Return, "name"s }, - { "\"3Д Футболка детская Force р-р 8-10, 100% хлопок, трикотаж\""s, ResultType::Return, "3Д Футболка детская Force р-р 8-10, 100% хлопок, трикотаж"s }, - { "\"category\""s, ResultType::Return, "category"s }, - { "\"/Летние товары/Летний текстиль/\""s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, - { "\"variant\""s, ResultType::Return, "variant"s }, - { "\"\""s, ResultType::Return, ""s }, - { "\"price\""s, ResultType::Return, "price"s }, - { "\"470.00\""s, ResultType::Return, "470.00"s }, - { "\"list\""s, ResultType::Return, "list"s }, - { "\"/retailrocket/\""s, ResultType::Return, "/retailrocket/"s }, - { "\"position\""s, ResultType::Return, "position"s }, - { "\"brand\""s, ResultType::Return, "brand"s }, - { "\"/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/\""s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, - { "\"id\""s, ResultType::Return, "id"s }, - { "\"913367\""s, ResultType::Return, "913367"s }, - { "\"name\""s, ResultType::Return, "name"s }, - { "\"3Д Футболка детская Winter tale р-р 8-10, 100% хлопок, трикотаж\""s, ResultType::Return, "3Д Футболка детская Winter tale р-р 8-10, 100% хлопок, трикотаж"s }, - { "\"category\""s, ResultType::Return, "category"s }, - { "\"/Летние товары/Летний текстиль/\""s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, - { "\"variant\""s, ResultType::Return, "variant"s }, - { "\"\""s, ResultType::Return, ""s }, - { "\"price\""s, ResultType::Return, "price"s }, - { "\"470.00\""s, ResultType::Return, "470.00"s }, - { "\"list\""s, ResultType::Return, "list"s }, - { "\"/retailrocket/\""s, ResultType::Return, "/retailrocket/"s }, - { "\"position\""s, ResultType::Return, "position"s }, - { "\"brand\""s, ResultType::Return, "brand"s }, - { "\"/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/\""s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, - { "\"id\""s, ResultType::Return, "id"s }, - { "\"913385\""s, ResultType::Return, "913385"s }, - { "\"name\""s, ResultType::Return, "name"s }, - { "\"3Д Футболка детская Moonshine р-р 8-10, 100% хлопок, трикотаж\""s, ResultType::Return, "3Д Футболка детская Moonshine р-р 8-10, 100% хлопок, трикотаж"s }, - { "\"category\""s, ResultType::Return, "category"s }, - { "\"/Летние товары/Летний текстиль/\""s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, - { "\"variant\""s, ResultType::Return, "variant"s }, - { "\"\""s, ResultType::Return, ""s }, - { "\"price\""s, ResultType::Return, "price"s }, - { "\"470.00\""s, ResultType::Return, "470.00"s }, - { "\"list\""s, ResultType::Return, "list"s }, - { "\"/retailrocket/\""s, ResultType::Return, "/retailrocket/"s }, - { "\"position\""s, ResultType::Return, "position"s }, - { "\"brand\""s, ResultType::Return, "brand"s }, - { "\"/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/\""s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, - { "\"id\""s, ResultType::Return, "id"s }, - { "\"913391\""s, ResultType::Return, "913391"s }, - { "\"name\""s, ResultType::Return, "name"s }, - { "\"3Д Футболка детская Shaman р-р 8-10, 100% хлопок, трикотаж\""s, ResultType::Return, "3Д Футболка детская Shaman р-р 8-10, 100% хлопок, трикотаж"s }, - { "\"category\""s, ResultType::Return, "category"s }, - { "\"/Летние товары/Летний текстиль/\""s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, - { "\"variant\""s, ResultType::Return, "variant"s }, - { "\"\""s, ResultType::Return, ""s }, - { "\"price\""s, ResultType::Return, "price"s }, - { "\"470.00\""s, ResultType::Return, "470.00"s }, - { "\"list\""s, ResultType::Return, "list"s }, - { "\"/retailrocket/\""s, ResultType::Return, "/retailrocket/"s }, - { "\"position\""s, ResultType::Return, "position"s }, - { "\"brand\""s, ResultType::Return, "brand"s }, - { "\"/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/\""s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, - { "\"usertype\""s, ResultType::Return, "usertype"s }, - { "\"visitor\""s, ResultType::Return, "visitor"s }, - { "\"lang\""s, ResultType::Return, "lang"s }, - { "\"ru\""s, ResultType::Return, "ru"s }, - { "\"__ym\""s, ResultType::Return, "__ym"s }, - { "\"ecommerce\""s, ResultType::Return, "ecommerce"s }, - { "\"impressions\""s, ResultType::Return, "impressions"s }, - { "\"experiments\""s, ResultType::Return, "experiments"s }, - { "\"lang\""s, ResultType::Return, "lang"s }, - { "\"ru\""s, ResultType::Return, "ru"s }, - { "\"los_portal\""s, ResultType::Return, "los_portal"s }, - { "\"los_level\""s, ResultType::Return, "los_level"s }, - { "\"none\""s, ResultType::Return, "none"s }, - { "\"experiments\""s, ResultType::Return, "experiments"s }, - { "\"lang\""s, ResultType::Return, "lang"s }, - { "\"ru\""s, ResultType::Return, "ru"s }, - { "\"los_portal\""s, ResultType::Return, "los_portal"s }, - { "\"los_level\""s, ResultType::Return, "los_level"s }, - { "\"none\""s, ResultType::Return, "none"s }, - { "\"experiments\""s, ResultType::Return, "experiments"s }, - { "\"lang\""s, ResultType::Return, "lang"s }, - { "\"ru\""s, ResultType::Return, "ru"s }, - { "\"los_portal\""s, ResultType::Return, "los_portal"s }, - { "\"los_level\""s, ResultType::Return, "los_level"s }, - { "\"none\""s, ResultType::Return, "none"s }, - { "\"experiments\""s, ResultType::Return, "experiments"s }, - { "\"lang\""s, ResultType::Return, "lang"s }, - { "\"ru\""s, ResultType::Return, "ru"s }, - { "\"los_portal\""s, ResultType::Return, "los_portal"s }, - { "\"los_level\""s, ResultType::Return, "los_level"s }, - { "\"none\""s, ResultType::Return, "none"s }, - { "\"experiments\""s, ResultType::Return, "experiments"s }, - { "\"lang\""s, ResultType::Return, "lang"s }, - { "\"ru\""s, ResultType::Return, "ru"s }, - { "\"los_portal\""s, ResultType::Return, "los_portal"s }, - { "\"los_level\""s, ResultType::Return, "los_level"s }, - { "\"none\""s, ResultType::Return, "none"s }, - { "\"__ym\""s, ResultType::Return, "__ym"s }, - { "\"ecommerce\""s, ResultType::Return, "ecommerce"s }, - { "\"currencyCode\""s, ResultType::Return, "currencyCode"s }, - { "\"RUR\""s, ResultType::Return, "RUR"s }, - { "\"impressions\""s, ResultType::Return, "impressions"s }, - { "\"name\""s, ResultType::Return, "name"s }, - { "\"Чайник электрический Mystery MEK-1627, белый\""s, ResultType::Return, "Чайник электрический Mystery MEK-1627, белый"s }, - { "\"brand\""s, ResultType::Return, "brand"s }, - { "\"Mystery\""s, ResultType::Return, "Mystery"s }, - { "\"id\""s, ResultType::Return, "id"s }, - { "\"187180\""s, ResultType::Return, "187180"s }, - { "\"category\""s, ResultType::Return, "category"s }, - { "\"Мелкая бытовая техника/Мелкие кухонные приборы/Чайники электрические/Mystery\""s, ResultType::Return, "Мелкая бытовая техника/Мелкие кухонные приборы/Чайники электрические/Mystery"s }, - { "\"variant\""s, ResultType::Return, "variant"s }, - { "\"В наличии\""s, ResultType::Return, "В наличии"s }, - { "\"price\""s, ResultType::Return, "price"s }, - { "\"1630.00\""s, ResultType::Return, "1630.00"s }, - { "\"list\""s, ResultType::Return, "list"s }, - { "\"Карточка\""s, ResultType::Return, "Карточка"s }, - { "\"position\""s, ResultType::Return, "position"s }, - { "\"detail\""s, ResultType::Return, "detail"s }, - { "\"actionField\""s, ResultType::Return, "actionField"s }, - { "\"list\""s, ResultType::Return, "list"s }, + { R"("category")"s, ResultType::Return, "category"s }, + { R"("/Одежда и обувь/Мужская одежда/Футболки/")"s, ResultType::Return, "/Одежда и обувь/Мужская одежда/Футболки/"s }, + { R"("variant")"s, ResultType::Return, "variant"s }, + { R"("")"s, ResultType::Return, ""s }, + { R"("price")"s, ResultType::Return, "price"s }, + { R"("406.60")"s, ResultType::Return, "406.60"s }, + { R"("list")"s, ResultType::Return, "list"s }, + { R"("/retailrocket/")"s, ResultType::Return, "/retailrocket/"s }, + { R"("position")"s, ResultType::Return, "position"s }, + { R"("brand")"s, ResultType::Return, "brand"s }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, + { R"("id")"s, ResultType::Return, "id"s }, + { R"("913361")"s, ResultType::Return, "913361"s }, + { R"("name")"s, ResultType::Return, "name"s }, + { R"("3Д Футболка детская World р-р 8-10, 100% хлопок, трикотаж")"s, ResultType::Return, "3Д Футболка детская World р-р 8-10, 100% хлопок, трикотаж"s }, + { R"("category")"s, ResultType::Return, "category"s }, + { R"("/Летние товары/Летний текстиль/")"s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, + { R"("variant")"s, ResultType::Return, "variant"s }, + { R"("")"s, ResultType::Return, ""s }, + { R"("price")"s, ResultType::Return, "price"s }, + { R"("470.00")"s, ResultType::Return, "470.00"s }, + { R"("list")"s, ResultType::Return, "list"s }, + { R"("/retailrocket/")"s, ResultType::Return, "/retailrocket/"s }, + { R"("position")"s, ResultType::Return, "position"s }, + { R"("brand")"s, ResultType::Return, "brand"s }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, + { R"("id")"s, ResultType::Return, "id"s }, + { R"("913364")"s, ResultType::Return, "913364"s }, + { R"("name")"s, ResultType::Return, "name"s }, + { R"("3Д Футболка детская Force р-р 8-10, 100% хлопок, трикотаж")"s, ResultType::Return, "3Д Футболка детская Force р-р 8-10, 100% хлопок, трикотаж"s }, + { R"("category")"s, ResultType::Return, "category"s }, + { R"("/Летние товары/Летний текстиль/")"s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, + { R"("variant")"s, ResultType::Return, "variant"s }, + { R"("")"s, ResultType::Return, ""s }, + { R"("price")"s, ResultType::Return, "price"s }, + { R"("470.00")"s, ResultType::Return, "470.00"s }, + { R"("list")"s, ResultType::Return, "list"s }, + { R"("/retailrocket/")"s, ResultType::Return, "/retailrocket/"s }, + { R"("position")"s, ResultType::Return, "position"s }, + { R"("brand")"s, ResultType::Return, "brand"s }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, + { R"("id")"s, ResultType::Return, "id"s }, + { R"("913367")"s, ResultType::Return, "913367"s }, + { R"("name")"s, ResultType::Return, "name"s }, + { R"("3Д Футболка детская Winter tale р-р 8-10, 100% хлопок, трикотаж")"s, ResultType::Return, "3Д Футболка детская Winter tale р-р 8-10, 100% хлопок, трикотаж"s }, + { R"("category")"s, ResultType::Return, "category"s }, + { R"("/Летние товары/Летний текстиль/")"s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, + { R"("variant")"s, ResultType::Return, "variant"s }, + { R"("")"s, ResultType::Return, ""s }, + { R"("price")"s, ResultType::Return, "price"s }, + { R"("470.00")"s, ResultType::Return, "470.00"s }, + { R"("list")"s, ResultType::Return, "list"s }, + { R"("/retailrocket/")"s, ResultType::Return, "/retailrocket/"s }, + { R"("position")"s, ResultType::Return, "position"s }, + { R"("brand")"s, ResultType::Return, "brand"s }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, + { R"("id")"s, ResultType::Return, "id"s }, + { R"("913385")"s, ResultType::Return, "913385"s }, + { R"("name")"s, ResultType::Return, "name"s }, + { R"("3Д Футболка детская Moonshine р-р 8-10, 100% хлопок, трикотаж")"s, ResultType::Return, "3Д Футболка детская Moonshine р-р 8-10, 100% хлопок, трикотаж"s }, + { R"("category")"s, ResultType::Return, "category"s }, + { R"("/Летние товары/Летний текстиль/")"s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, + { R"("variant")"s, ResultType::Return, "variant"s }, + { R"("")"s, ResultType::Return, ""s }, + { R"("price")"s, ResultType::Return, "price"s }, + { R"("470.00")"s, ResultType::Return, "470.00"s }, + { R"("list")"s, ResultType::Return, "list"s }, + { R"("/retailrocket/")"s, ResultType::Return, "/retailrocket/"s }, + { R"("position")"s, ResultType::Return, "position"s }, + { R"("brand")"s, ResultType::Return, "brand"s }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, + { R"("id")"s, ResultType::Return, "id"s }, + { R"("913391")"s, ResultType::Return, "913391"s }, + { R"("name")"s, ResultType::Return, "name"s }, + { R"("3Д Футболка детская Shaman р-р 8-10, 100% хлопок, трикотаж")"s, ResultType::Return, "3Д Футболка детская Shaman р-р 8-10, 100% хлопок, трикотаж"s }, + { R"("category")"s, ResultType::Return, "category"s }, + { R"("/Летние товары/Летний текстиль/")"s, ResultType::Return, "/Летние товары/Летний текстиль/"s }, + { R"("variant")"s, ResultType::Return, "variant"s }, + { R"("")"s, ResultType::Return, ""s }, + { R"("price")"s, ResultType::Return, "price"s }, + { R"("470.00")"s, ResultType::Return, "470.00"s }, + { R"("list")"s, ResultType::Return, "list"s }, + { R"("/retailrocket/")"s, ResultType::Return, "/retailrocket/"s }, + { R"("position")"s, ResultType::Return, "position"s }, + { R"("brand")"s, ResultType::Return, "brand"s }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"s, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"s }, + { R"("usertype")"s, ResultType::Return, "usertype"s }, + { R"("visitor")"s, ResultType::Return, "visitor"s }, + { R"("lang")"s, ResultType::Return, "lang"s }, + { R"("ru")"s, ResultType::Return, "ru"s }, + { R"("__ym")"s, ResultType::Return, "__ym"s }, + { R"("ecommerce")"s, ResultType::Return, "ecommerce"s }, + { R"("impressions")"s, ResultType::Return, "impressions"s }, + { R"("experiments")"s, ResultType::Return, "experiments"s }, + { R"("lang")"s, ResultType::Return, "lang"s }, + { R"("ru")"s, ResultType::Return, "ru"s }, + { R"("los_portal")"s, ResultType::Return, "los_portal"s }, + { R"("los_level")"s, ResultType::Return, "los_level"s }, + { R"("none")"s, ResultType::Return, "none"s }, + { R"("experiments")"s, ResultType::Return, "experiments"s }, + { R"("lang")"s, ResultType::Return, "lang"s }, + { R"("ru")"s, ResultType::Return, "ru"s }, + { R"("los_portal")"s, ResultType::Return, "los_portal"s }, + { R"("los_level")"s, ResultType::Return, "los_level"s }, + { R"("none")"s, ResultType::Return, "none"s }, + { R"("experiments")"s, ResultType::Return, "experiments"s }, + { R"("lang")"s, ResultType::Return, "lang"s }, + { R"("ru")"s, ResultType::Return, "ru"s }, + { R"("los_portal")"s, ResultType::Return, "los_portal"s }, + { R"("los_level")"s, ResultType::Return, "los_level"s }, + { R"("none")"s, ResultType::Return, "none"s }, + { R"("experiments")"s, ResultType::Return, "experiments"s }, + { R"("lang")"s, ResultType::Return, "lang"s }, + { R"("ru")"s, ResultType::Return, "ru"s }, + { R"("los_portal")"s, ResultType::Return, "los_portal"s }, + { R"("los_level")"s, ResultType::Return, "los_level"s }, + { R"("none")"s, ResultType::Return, "none"s }, + { R"("experiments")"s, ResultType::Return, "experiments"s }, + { R"("lang")"s, ResultType::Return, "lang"s }, + { R"("ru")"s, ResultType::Return, "ru"s }, + { R"("los_portal")"s, ResultType::Return, "los_portal"s }, + { R"("los_level")"s, ResultType::Return, "los_level"s }, + { R"("none")"s, ResultType::Return, "none"s }, + { R"("__ym")"s, ResultType::Return, "__ym"s }, + { R"("ecommerce")"s, ResultType::Return, "ecommerce"s }, + { R"("currencyCode")"s, ResultType::Return, "currencyCode"s }, + { R"("RUR")"s, ResultType::Return, "RUR"s }, + { R"("impressions")"s, ResultType::Return, "impressions"s }, + { R"("name")"s, ResultType::Return, "name"s }, + { R"("Чайник электрический Mystery MEK-1627, белый")"s, ResultType::Return, "Чайник электрический Mystery MEK-1627, белый"s }, + { R"("brand")"s, ResultType::Return, "brand"s }, + { R"("Mystery")"s, ResultType::Return, "Mystery"s }, + { R"("id")"s, ResultType::Return, "id"s }, + { R"("187180")"s, ResultType::Return, "187180"s }, + { R"("category")"s, ResultType::Return, "category"s }, + { R"("Мелкая бытовая техника/Мелкие кухонные приборы/Чайники электрические/Mystery")"s, ResultType::Return, "Мелкая бытовая техника/Мелкие кухонные приборы/Чайники электрические/Mystery"s }, + { R"("variant")"s, ResultType::Return, "variant"s }, + { R"("В наличии")"s, ResultType::Return, "В наличии"s }, + { R"("price")"s, ResultType::Return, "price"s }, + { R"("1630.00")"s, ResultType::Return, "1630.00"s }, + { R"("list")"s, ResultType::Return, "list"s }, + { R"("Карточка")"s, ResultType::Return, "Карточка"s }, + { R"("position")"s, ResultType::Return, "position"s }, + { R"("detail")"s, ResultType::Return, "detail"s }, + { R"("actionField")"s, ResultType::Return, "actionField"s }, + { R"("list")"s, ResultType::Return, "list"s }, { "\0\""s, ResultType::Throw, "JSON: expected \", got \0"s }, { "\"/igrushki/konstruktory\0"s, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."s }, { "\"/1290414/komplekt-zhenskiy-dzhemper-plusbryuki-m-254-09-malina-plustemno-siniy-\0a"s, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."s }, diff --git a/base/common/tests/gtest_strong_typedef.cpp b/base/common/tests/gtest_strong_typedef.cpp index 6bf2f1eaad1..cb358459e68 100644 --- a/base/common/tests/gtest_strong_typedef.cpp +++ b/base/common/tests/gtest_strong_typedef.cpp @@ -1,5 +1,3 @@ -#define BOOST_TEST_MODULE StrongTypedef - #include #include #include @@ -12,15 +10,15 @@ TEST(StrongTypedefSuite, TypedefsOfTheSameType) { /// check that strong typedefs of same type differ - STRONG_TYPEDEF(int, Int); - STRONG_TYPEDEF(int, AnotherInt); + STRONG_TYPEDEF(int, Int) + STRONG_TYPEDEF(int, AnotherInt) EXPECT_TRUE(!(std::is_same::value)); } TEST(StrongTypedefSuite, Map) { - STRONG_TYPEDEF(int, Int); + STRONG_TYPEDEF(int, Int) /// check that this code compiles std::set int_set; @@ -31,13 +29,13 @@ TEST(StrongTypedefSuite, Map) TEST(StrongTypedefSuite, CopyAndMoveCtor) { - STRONG_TYPEDEF(int, Int); + STRONG_TYPEDEF(int, Int) Int a(1); Int b(2); a = b; EXPECT_EQ(a.toUnderType(), 2); - STRONG_TYPEDEF(std::unique_ptr, IntPtr); + STRONG_TYPEDEF(std::unique_ptr, IntPtr) { IntPtr ptr; ptr = IntPtr(std::make_unique(3)); @@ -54,9 +52,9 @@ TEST(StrongTypedefSuite, NoDefaultCtor) { struct NoDefaultCtor { - NoDefaultCtor(int i) {} + NoDefaultCtor(int) {} // NOLINT }; - STRONG_TYPEDEF(NoDefaultCtor, MyStruct); + STRONG_TYPEDEF(NoDefaultCtor, MyStruct) MyStruct m(1); } diff --git a/base/common/time.h b/base/common/time.h index 9a52d8e40b8..1bf588b7cb3 100644 --- a/base/common/time.h +++ b/base/common/time.h @@ -4,4 +4,6 @@ #if defined (OS_DARWIN) # define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC +#elif defined (OS_FREEBSD) +# define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC_FAST #endif diff --git a/base/common/types.h b/base/common/types.h new file mode 100644 index 00000000000..238e4e3956b --- /dev/null +++ b/base/common/types.h @@ -0,0 +1,62 @@ +#pragma once + +#include +#include +#include +#include +#include + +using Int8 = int8_t; +using Int16 = int16_t; +using Int32 = int32_t; +using Int64 = int64_t; + +#if __cplusplus <= 201703L +using char8_t = unsigned char; +#endif + +using UInt8 = char8_t; +using UInt16 = uint16_t; +using UInt32 = uint32_t; +using UInt64 = uint64_t; + +using String = std::string; + +/// The standard library type traits, such as std::is_arithmetic, with one exception +/// (std::common_type), are "set in stone". Attempting to specialize them causes undefined behavior. +/// So instead of using the std type_traits, we use our own version which allows extension. +template +struct is_signed +{ + static constexpr bool value = std::is_signed_v; +}; + +template +inline constexpr bool is_signed_v = is_signed::value; + +template +struct is_unsigned +{ + static constexpr bool value = std::is_unsigned_v; +}; + +template +inline constexpr bool is_unsigned_v = is_unsigned::value; + +template +struct is_integral +{ + static constexpr bool value = std::is_integral_v; +}; + +template +inline constexpr bool is_integral_v = is_integral::value; + +template +struct is_arithmetic +{ + static constexpr bool value = std::is_arithmetic_v; +}; + +template +inline constexpr bool is_arithmetic_v = is_arithmetic::value; diff --git a/base/common/ya.make b/base/common/ya.make new file mode 100644 index 00000000000..fbe63baafdf --- /dev/null +++ b/base/common/ya.make @@ -0,0 +1,51 @@ +LIBRARY() + +ADDINCL( + GLOBAL clickhouse/base + contrib/libs/cctz/include +) + +CFLAGS (GLOBAL -DARCADIA_BUILD) + +CFLAGS (GLOBAL -DUSE_CPUID=1) +CFLAGS (GLOBAL -DUSE_JEMALLOC=0) +CFLAGS (GLOBAL -DUSE_RAPIDJSON=1) + +IF (OS_DARWIN) + CFLAGS (GLOBAL -DOS_DARWIN) +ELSEIF (OS_FREEBSD) + CFLAGS (GLOBAL -DOS_FREEBSD) +ELSEIF (OS_LINUX) + CFLAGS (GLOBAL -DOS_LINUX) +ENDIF () + +PEERDIR( + contrib/libs/cctz/src + contrib/libs/cxxsupp/libcxx-filesystem + contrib/libs/poco/Net + contrib/libs/poco/Util + contrib/restricted/boost + contrib/restricted/cityhash-1.0.2 +) + +SRCS( + argsToConfig.cpp + coverage.cpp + DateLUT.cpp + DateLUTImpl.cpp + demangle.cpp + getFQDNOrHostName.cpp + getMemoryAmount.cpp + getThreadId.cpp + JSON.cpp + LineReader.cpp + mremap.cpp + phdr_cache.cpp + preciseExp10.cpp + setTerminalEcho.cpp + shift10.cpp + sleep.cpp + terminalColors.cpp +) + +END() diff --git a/base/daemon/src/BaseDaemon.cpp b/base/daemon/BaseDaemon.cpp similarity index 85% rename from base/daemon/src/BaseDaemon.cpp rename to base/daemon/BaseDaemon.cpp index b9e94d288d6..6625ba80b7b 100644 --- a/base/daemon/src/BaseDaemon.cpp +++ b/base/daemon/BaseDaemon.cpp @@ -12,7 +12,6 @@ #include #include -#include #include #include #include @@ -51,11 +50,14 @@ #include #include #include -#include -#ifdef __APPLE__ -// ucontext is not available without _XOPEN_SOURCE -#define _XOPEN_SOURCE 700 +#if !defined(ARCADIA_BUILD) +# include +#endif + +#if defined(OS_DARWIN) +# pragma GCC diagnostic ignored "-Wunused-macros" +# define _XOPEN_SOURCE 700 // ucontext is not available without _XOPEN_SOURCE #endif #include @@ -75,7 +77,7 @@ static void call_default_signal_handler(int sig) static constexpr size_t max_query_id_size = 127; -static const size_t buf_size = +static const size_t signal_pipe_buf_size = sizeof(int) + sizeof(siginfo_t) + sizeof(ucontext_t) @@ -88,19 +90,23 @@ using signal_function = void(int, siginfo_t*, void*); static void writeSignalIDtoSignalPipe(int sig) { - char buf[buf_size]; - DB::WriteBufferFromFileDescriptor out(signal_pipe.fds_rw[1], buf_size, buf); + auto saved_errno = errno; /// We must restore previous value of errno in signal handler. + + char buf[signal_pipe_buf_size]; + DB::WriteBufferFromFileDescriptor out(signal_pipe.fds_rw[1], signal_pipe_buf_size, buf); DB::writeBinary(sig, out); out.next(); + + errno = saved_errno; } /** Signal handler for HUP / USR1 */ -static void closeLogsSignalHandler(int sig, siginfo_t * info, void * context) +static void closeLogsSignalHandler(int sig, siginfo_t *, void *) { writeSignalIDtoSignalPipe(sig); } -static void terminateRequestedSignalHandler(int sig, siginfo_t * info, void * context) +static void terminateRequestedSignalHandler(int sig, siginfo_t *, void *) { writeSignalIDtoSignalPipe(sig); } @@ -110,8 +116,10 @@ static void terminateRequestedSignalHandler(int sig, siginfo_t * info, void * co */ static void signalHandler(int sig, siginfo_t * info, void * context) { - char buf[buf_size]; - DB::WriteBufferFromFileDescriptorDiscardOnFailure out(signal_pipe.fds_rw[1], buf_size, buf); + auto saved_errno = errno; /// We must restore previous value of errno in signal handler. + + char buf[signal_pipe_buf_size]; + DB::WriteBufferFromFileDescriptorDiscardOnFailure out(signal_pipe.fds_rw[1], signal_pipe_buf_size, buf); const ucontext_t signal_context = *reinterpret_cast(context); const StackTrace stack_trace(signal_context); @@ -134,6 +142,8 @@ static void signalHandler(int sig, siginfo_t * info, void * context) ::sleep(10); call_default_signal_handler(sig); } + + errno = saved_errno; } @@ -157,15 +167,20 @@ public: { } - void run() + void run() override { - char buf[buf_size]; - DB::ReadBufferFromFileDescriptor in(signal_pipe.fds_rw[0], buf_size, buf); + char buf[signal_pipe_buf_size]; + DB::ReadBufferFromFileDescriptor in(signal_pipe.fds_rw[0], signal_pipe_buf_size, buf); while (!in.eof()) { int sig = 0; DB::readBinary(sig, in); + // We may log some specific signals afterwards, with different log + // levels and more info, but for completeness we log all signals + // here at trace level. + // Don't use strsignal here, because it's not thread-safe. + LOG_TRACE(log, "Received signal " << sig); if (sig == Signals::StopThread) { @@ -219,7 +234,6 @@ private: Logger * log; BaseDaemon & daemon; -private: void onTerminate(const std::string & message, UInt32 thread_num) const { LOG_FATAL(log, "(version " << VERSION_STRING << VERSION_OFFICIAL << ") (from thread " << thread_num << ") " << message); @@ -269,19 +283,56 @@ private: }; +#if defined(SANITIZER) +extern "C" void __sanitizer_set_death_callback(void (*)()); + +static void sanitizerDeathCallback() +{ + Logger * log = &Logger::get("BaseDaemon"); + + StringRef query_id = CurrentThread::getQueryId(); /// This is signal safe. + + { + std::stringstream message; + message << "(version " << VERSION_STRING << VERSION_OFFICIAL << ")"; + message << " (from thread " << getThreadId() << ")"; + if (query_id.size == 0) + message << " (no query)"; + else + message << " (query_id: " << query_id << ")"; + message << " Sanitizer trap."; + + LOG_FATAL(log, message.rdbuf()); + } + + /// Just in case print our own stack trace. In case when llvm-symbolizer does not work. + StackTrace stack_trace; + if (stack_trace.getSize()) + { + std::stringstream bare_stacktrace; + bare_stacktrace << "Stack trace:"; + for (size_t i = stack_trace.getOffset(); i < stack_trace.getSize(); ++i) + bare_stacktrace << ' ' << stack_trace.getFrames()[i]; + + LOG_FATAL(log, bare_stacktrace.rdbuf()); + } + + /// Write symbolized stack trace line by line for better grep-ability. + stack_trace.toStringEveryLine([&](const std::string & s) { LOG_FATAL(log, s); }); +} +#endif + + /** To use with std::set_terminate. * Collects slightly more info than __gnu_cxx::__verbose_terminate_handler, * and send it to pipe. Other thread will read this info from pipe and asynchronously write it to log. * Look at libstdc++-v3/libsupc++/vterminate.cc for example. */ -static void terminate_handler() +[[noreturn]] static void terminate_handler() { static thread_local bool terminating = false; if (terminating) - { abort(); - return; /// Just for convenience. - } terminating = true; @@ -354,10 +405,7 @@ void BaseDaemon::reloadConfiguration() } -BaseDaemon::BaseDaemon() -{ - checkRequiredInstructions(); -} +BaseDaemon::BaseDaemon() = default; BaseDaemon::~BaseDaemon() @@ -368,127 +416,6 @@ BaseDaemon::~BaseDaemon() } -enum class InstructionFail -{ - NONE = 0, - SSE3 = 1, - SSSE3 = 2, - SSE4_1 = 3, - SSE4_2 = 4, - AVX = 5, - AVX2 = 6, - AVX512 = 7 -}; - -static std::string instructionFailToString(InstructionFail fail) -{ - switch (fail) - { - case InstructionFail::NONE: - return "NONE"; - case InstructionFail::SSE3: - return "SSE3"; - case InstructionFail::SSSE3: - return "SSSE3"; - case InstructionFail::SSE4_1: - return "SSE4.1"; - case InstructionFail::SSE4_2: - return "SSE4.2"; - case InstructionFail::AVX: - return "AVX"; - case InstructionFail::AVX2: - return "AVX2"; - case InstructionFail::AVX512: - return "AVX512"; - } - __builtin_unreachable(); -} - - -static sigjmp_buf jmpbuf; - -static void sigIllCheckHandler(int sig, siginfo_t * info, void * context) -{ - siglongjmp(jmpbuf, 1); -} - -/// Check if necessary sse extensions are available by trying to execute some sse instructions. -/// If instruction is unavailable, SIGILL will be sent by kernel. -static void checkRequiredInstructions(volatile InstructionFail & fail) -{ -#if __SSE3__ - fail = InstructionFail::SSE3; - __asm__ volatile ("addsubpd %%xmm0, %%xmm0" : : : "xmm0"); -#endif - -#if __SSSE3__ - fail = InstructionFail::SSSE3; - __asm__ volatile ("pabsw %%xmm0, %%xmm0" : : : "xmm0"); - -#endif - -#if __SSE4_1__ - fail = InstructionFail::SSE4_1; - __asm__ volatile ("pmaxud %%xmm0, %%xmm0" : : : "xmm0"); -#endif - -#if __SSE4_2__ - fail = InstructionFail::SSE4_2; - __asm__ volatile ("pcmpgtq %%xmm0, %%xmm0" : : : "xmm0"); -#endif - -#if __AVX__ - fail = InstructionFail::AVX; - __asm__ volatile ("vaddpd %%ymm0, %%ymm0, %%ymm0" : : : "ymm0"); -#endif - -#if __AVX2__ - fail = InstructionFail::AVX2; - __asm__ volatile ("vpabsw %%ymm0, %%ymm0" : : : "ymm0"); -#endif - -#if __AVX512__ - fail = InstructionFail::AVX512; - __asm__ volatile ("vpabsw %%zmm0, %%zmm0" : : : "zmm0"); -#endif - - fail = InstructionFail::NONE; -} - - -void BaseDaemon::checkRequiredInstructions() -{ - struct sigaction sa{}; - struct sigaction sa_old{}; - sa.sa_sigaction = sigIllCheckHandler; - sa.sa_flags = SA_SIGINFO; - auto signal = SIGILL; - if (sigemptyset(&sa.sa_mask) != 0 - || sigaddset(&sa.sa_mask, signal) != 0 - || sigaction(signal, &sa, &sa_old) != 0) - { - std::cerr << "Can not set signal handler\n"; - exit(1); - } - - volatile InstructionFail fail = InstructionFail::NONE; - - if (sigsetjmp(jmpbuf, 1)) - { - std::cerr << "Instruction check fail. There is no " << instructionFailToString(fail) << " instruction set\n"; - exit(1); - } - - ::checkRequiredInstructions(fail); - - if (sigaction(signal, &sa_old, nullptr)) - { - std::cerr << "Can not set signal handler\n"; - exit(1); - } -} - - void BaseDaemon::terminate() { getTaskManager().cancelAll(); @@ -522,7 +449,7 @@ std::string BaseDaemon::getDefaultCorePath() const void BaseDaemon::closeFDs() { -#if defined(__FreeBSD__) || (defined(__APPLE__) && defined(__MACH__)) +#if defined(OS_FREEBSD) || defined(OS_DARWIN) Poco::File proc_path{"/dev/fd"}; #else Poco::File proc_path{"/proc/self/fd"}; @@ -542,7 +469,7 @@ void BaseDaemon::closeFDs() else { int max_fd = -1; -#ifdef _SC_OPEN_MAX +#if defined(_SC_OPEN_MAX) max_fd = sysconf(_SC_OPEN_MAX); if (max_fd == -1) #endif @@ -560,7 +487,7 @@ namespace /// the maximum is 1000, and chromium uses 300 for its tab processes. Ignore /// whatever errors that occur, because it's just a debugging aid and we don't /// care if it breaks. -#if defined(__linux__) && !defined(NDEBUG) +#if defined(OS_LINUX) && !defined(NDEBUG) void debugIncreaseOOMScore() { const std::string new_score = "555"; @@ -585,7 +512,7 @@ void debugIncreaseOOMScore() {} void BaseDaemon::initialize(Application & self) { closeFDs(); - task_manager.reset(new Poco::TaskManager); + task_manager = std::make_unique(); ServerApplication::initialize(self); /// now highest priority (lowest value) is PRIO_APPLICATION = -100, we want higher! @@ -635,12 +562,12 @@ void BaseDaemon::initialize(Application & self) /// This must be done before any usage of DateLUT. In particular, before any logging. if (config().has("timezone")) { - const std::string timezone = config().getString("timezone"); - if (0 != setenv("TZ", timezone.data(), 1)) + const std::string config_timezone = config().getString("timezone"); + if (0 != setenv("TZ", config_timezone.data(), 1)) throw Poco::Exception("Cannot setenv TZ variable"); tzset(); - DateLUT::setDefaultTimezone(timezone); + DateLUT::setDefaultTimezone(config_timezone); } std::string log_path = config().getString("logger.log", ""); @@ -658,6 +585,9 @@ void BaseDaemon::initialize(Application & self) std::string stderr_path = config().getString("logger.stderr", log_path + "/stderr.log"); if (!freopen(stderr_path.c_str(), "a+", stderr)) throw Poco::OpenFileException("Cannot attach stderr to " + stderr_path); + + /// Disable buffering for stderr + setbuf(stderr, nullptr); } if ((!log_path.empty() && is_daemon) || config().has("logger.stdout")) @@ -744,12 +674,18 @@ void BaseDaemon::initializeTerminationAndSignalProcessing() sa.sa_flags = SA_SIGINFO; { +#if defined(OS_DARWIN) + sigemptyset(&sa.sa_mask); + for (auto signal : signals) + sigaddset(&sa.sa_mask, signal); +#else if (sigemptyset(&sa.sa_mask)) throw Poco::Exception("Cannot set signal handler."); for (auto signal : signals) if (sigaddset(&sa.sa_mask, signal)) throw Poco::Exception("Cannot set signal handler."); +#endif for (auto signal : signals) if (sigaction(signal, &sa, nullptr)) @@ -763,6 +699,10 @@ void BaseDaemon::initializeTerminationAndSignalProcessing() add_signal_handler({SIGHUP, SIGUSR1}, closeLogsSignalHandler); add_signal_handler({SIGINT, SIGQUIT, SIGTERM}, terminateRequestedSignalHandler); +#if defined(SANITIZER) + __sanitizer_set_death_callback(sanitizerDeathCallback); +#endif + /// Set up Poco ErrorHandler for Poco Threads. static KillingErrorHandler killing_error_handler; Poco::ErrorHandler::set(&killing_error_handler); @@ -770,13 +710,15 @@ void BaseDaemon::initializeTerminationAndSignalProcessing() signal_pipe.setNonBlocking(); signal_pipe.tryIncreaseSize(1 << 20); - signal_listener.reset(new SignalListener(*this)); + signal_listener = std::make_unique(*this); signal_listener_thread.start(*signal_listener); } void BaseDaemon::logRevision() const { - Logger::root().information("Starting " + std::string{VERSION_FULL} + " with revision " + std::to_string(ClickHouseRevision::get())); + Logger::root().information("Starting " + std::string{VERSION_FULL} + + " with revision " + std::to_string(ClickHouseRevision::get()) + + ", PID " + std::to_string(getpid())); } /// Makes server shutdown if at least one Poco::Task have failed. @@ -796,44 +738,42 @@ void BaseDaemon::handleNotification(Poco::TaskFailedNotification *_tfn) ServerApplication::terminate(); } -void BaseDaemon::defineOptions(Poco::Util::OptionSet& _options) +void BaseDaemon::defineOptions(Poco::Util::OptionSet & new_options) { - Poco::Util::ServerApplication::defineOptions (_options); - - _options.addOption( + new_options.addOption( Poco::Util::Option("config-file", "C", "load configuration from a given file") .required(false) .repeatable(false) .argument("") .binding("config-file")); - _options.addOption( + new_options.addOption( Poco::Util::Option("log-file", "L", "use given log file") .required(false) .repeatable(false) .argument("") .binding("logger.log")); - _options.addOption( + new_options.addOption( Poco::Util::Option("errorlog-file", "E", "use given log file for errors only") .required(false) .repeatable(false) .argument("") .binding("logger.errorlog")); - _options.addOption( + new_options.addOption( Poco::Util::Option("pid-file", "P", "use given pidfile") .required(false) .repeatable(false) .argument("") .binding("pid")); + + Poco::Util::ServerApplication::defineOptions(new_options); } bool isPidRunning(pid_t pid) { - if (getpgid(pid) >= 0) - return 1; - return 0; + return getpgid(pid) >= 0; } BaseDaemon::PID::PID(const std::string & file_) diff --git a/base/daemon/include/daemon/BaseDaemon.h b/base/daemon/BaseDaemon.h similarity index 97% rename from base/daemon/include/daemon/BaseDaemon.h rename to base/daemon/BaseDaemon.h index b7070c76e9b..39332cfe963 100644 --- a/base/daemon/include/daemon/BaseDaemon.h +++ b/base/daemon/BaseDaemon.h @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include @@ -58,7 +58,7 @@ public: void reloadConfiguration(); /// Определяет параметр командной строки - void defineOptions(Poco::Util::OptionSet & _options) override; + void defineOptions(Poco::Util::OptionSet & new_options) override; /// Заставляет демон завершаться, если хотя бы одна задача завершилась неудачно void exitOnTaskError(); @@ -128,7 +128,7 @@ public: /// close all process FDs except /// 0-2 -- stdin, stdout, stderr /// also doesn't close global internal pipes for signal handling - void closeFDs(); + static void closeFDs(); protected: /// Возвращает TaskManager приложения @@ -198,12 +198,6 @@ protected: std::string config_path; DB::ConfigProcessor::LoadedConfig loaded_config; Poco::Util::AbstractConfiguration * last_configuration = nullptr; - -private: - - /// Check SSE and others instructions availability - /// Calls exit on fail - void checkRequiredInstructions(); }; diff --git a/base/daemon/CMakeLists.txt b/base/daemon/CMakeLists.txt index e7207cb7e85..5d9a37dc75e 100644 --- a/base/daemon/CMakeLists.txt +++ b/base/daemon/CMakeLists.txt @@ -1,11 +1,7 @@ add_library (daemon - src/BaseDaemon.cpp - src/GraphiteWriter.cpp - - include/daemon/BaseDaemon.h - include/daemon/GraphiteWriter.h + BaseDaemon.cpp + GraphiteWriter.cpp ) -target_include_directories (daemon PUBLIC include) - -target_link_libraries (daemon PUBLIC loggers PRIVATE clickhouse_common_io clickhouse_common_config common ${Poco_Net_LIBRARY} ${Poco_Util_LIBRARY} ${EXECINFO_LIBRARIES}) +target_include_directories (daemon PUBLIC ..) +target_link_libraries (daemon PUBLIC loggers PRIVATE clickhouse_common_io clickhouse_common_config common ${EXECINFO_LIBRARIES}) diff --git a/base/daemon/src/GraphiteWriter.cpp b/base/daemon/GraphiteWriter.cpp similarity index 95% rename from base/daemon/src/GraphiteWriter.cpp rename to base/daemon/GraphiteWriter.cpp index 41817ca86a2..f28019dec01 100644 --- a/base/daemon/src/GraphiteWriter.cpp +++ b/base/daemon/GraphiteWriter.cpp @@ -2,7 +2,7 @@ #include #include #include -#include +#include #include #include @@ -30,7 +30,7 @@ GraphiteWriter::GraphiteWriter(const std::string & config_name, const std::strin root_path += hostname_in_path; } - if (sub_path.size()) + if (!sub_path.empty()) { if (!root_path.empty()) root_path += "."; diff --git a/base/daemon/include/daemon/GraphiteWriter.h b/base/daemon/GraphiteWriter.h similarity index 100% rename from base/daemon/include/daemon/GraphiteWriter.h rename to base/daemon/GraphiteWriter.h diff --git a/base/daemon/ya.make b/base/daemon/ya.make new file mode 100644 index 00000000000..1c72af3ed53 --- /dev/null +++ b/base/daemon/ya.make @@ -0,0 +1,14 @@ +LIBRARY() + +NO_COMPILER_WARNINGS() + +PEERDIR( + clickhouse/src/Common +) + +SRCS( + BaseDaemon.cpp + GraphiteWriter.cpp +) + +END() diff --git a/base/ext/chrono_io.h b/base/ext/chrono_io.h index 8fa448b9e6a..40b76a96478 100644 --- a/base/ext/chrono_io.h +++ b/base/ext/chrono_io.h @@ -1,16 +1,30 @@ #pragma once #include +#include #include -#include +#include namespace ext { + inline std::string to_string(const std::time_t & time) + { + std::stringstream ss; + ss << std::put_time(std::localtime(&time), "%Y-%m-%d %X"); + return ss.str(); + } + template std::string to_string(const std::chrono::time_point & tp) { - return DateLUT::instance().timeToString(std::chrono::system_clock::to_time_t(tp)); + // Don't use DateLUT because it shows weird characters for + // TimePoint::max(). I wish we could use C++20 format, but it's not + // there yet. + // return DateLUT::instance().timeToString(std::chrono::system_clock::to_time_t(tp)); + + auto in_time_t = std::chrono::system_clock::to_time_t(tp); + return to_string(in_time_t); } template > diff --git a/base/ext/range.h b/base/ext/range.h index c379d453f7b..266016f5779 100644 --- a/base/ext/range.h +++ b/base/ext/range.h @@ -1,42 +1,62 @@ #pragma once -#include #include #include +#include namespace ext { - /// For loop adaptor which is used to iterate through a half-closed interval [begin, end). - template - inline auto range(BeginType begin, EndType end) +namespace internal +{ + template + auto rangeImpl(BeginType begin, EndType end) { - using CommonType = typename std::common_type::type; - return boost::counting_range(begin, end); - } - - template - inline auto range(Type end) - { - return range(static_cast(0), end); - } - - /// The same as range(), but every value is casted statically to a specified `ValueType`. - /// This is useful to iterate through all constants of a enum. - template - inline auto range_with_static_cast(BeginType begin, EndType end) - { - using CommonType = typename std::common_type::type; - if constexpr (std::is_same_v) - return boost::counting_range(begin, end); + if constexpr (std::is_same_v) + return boost::counting_range(static_cast(begin), static_cast(end)); else - return boost::counting_range(begin, end) - | boost::adaptors::transformed([](CommonType x) -> ValueType { return static_cast(x); }); - } - - template - inline auto range_with_static_cast(EndType end) - { - return range_with_static_cast(static_cast(0), end); + return boost::counting_range(static_cast(begin), static_cast(end)) + | boost::adaptors::transformed([](CountingType x) { return static_cast(x); }); } } + + +/// For loop adaptor which is used to iterate through a half-closed interval [begin, end). +/// The parameters `begin` and `end` can have any integral or enum types. +template || std::is_enum_v) && + (std::is_integral_v || std::is_enum_v) && + (!std::is_enum_v || !std::is_enum_v || std::is_same_v), void>> +inline auto range(BeginType begin, EndType end) +{ + if constexpr (std::is_integral_v && std::is_integral_v) + { + using CommonType = std::common_type_t; + return internal::rangeImpl(begin, end); + } + else if constexpr (std::is_enum_v) + { + return internal::rangeImpl>(begin, end); + } + else + { + return internal::rangeImpl>(begin, end); + } +} + + +/// For loop adaptor which is used to iterate through a half-closed interval [0, end). +/// The parameter `end` can have any integral or enum type. +/// The same as range(0, end). +template || std::is_enum_v, void>> +inline auto range(Type end) +{ + if constexpr (std::is_integral_v) + return internal::rangeImpl(0, end); + else + return internal::rangeImpl>(0, end); +} +} diff --git a/base/ext/scope_guard.h b/base/ext/scope_guard.h index f5b986e7ab6..79bad56f360 100644 --- a/base/ext/scope_guard.h +++ b/base/ext/scope_guard.h @@ -12,20 +12,20 @@ class [[nodiscard]] basic_scope_guard { public: constexpr basic_scope_guard() = default; - constexpr basic_scope_guard(basic_scope_guard && src) : function{std::exchange(src.function, {})} {} + constexpr basic_scope_guard(basic_scope_guard && src) : function{src.release()} {} constexpr basic_scope_guard & operator=(basic_scope_guard && src) { if (this != &src) { invoke(); - function = std::exchange(src.function, {}); + function = src.release(); } return *this; } template , void>> - constexpr basic_scope_guard(basic_scope_guard && src) : function{std::exchange(src.function, {})} {} + constexpr basic_scope_guard(basic_scope_guard && src) : function{src.release()} {} template , void>> constexpr basic_scope_guard & operator=(basic_scope_guard && src) @@ -33,7 +33,7 @@ public: if (this != &src) { invoke(); - function = std::exchange(src.function, {}); + function = src.release(); } return *this; } @@ -46,14 +46,26 @@ public: ~basic_scope_guard() { invoke(); } + static constexpr bool is_nullable = std::is_constructible_v; + explicit operator bool() const { - if constexpr (std::is_constructible_v) + if constexpr (is_nullable) return static_cast(function); return true; } - void reset() { function = {}; } + void reset() + { + invoke(); + release(); + } + + F release() + { + static_assert(is_nullable); + return std::exchange(function, {}); + } template , void>> basic_scope_guard & join(basic_scope_guard && other) @@ -62,14 +74,14 @@ public: { if (function) { - function = [x = std::make_shared>(std::move(function), std::exchange(other.function, {}))]() + function = [x = std::make_shared>(std::move(function), other.release())]() { std::move(x->first)(); std::move(x->second)(); }; } else - function = std::exchange(other.function, {}); + function = other.release(); } return *this; } @@ -77,7 +89,7 @@ public: private: void invoke() { - if constexpr (std::is_constructible_v) + if constexpr (is_nullable) { if (!function) return; diff --git a/base/ext/singleton.h b/base/ext/singleton.h deleted file mode 100644 index 8392ae7fc01..00000000000 --- a/base/ext/singleton.h +++ /dev/null @@ -1,44 +0,0 @@ -#pragma once - -#include - -namespace ext -{ - -/** Thread-unsafe singleton. It works simply like a global variable. - * Supports deinitialization. - * - * In most of the cases, you don't need this class. - * Use "Meyers Singleton" instead: static T & instance() { static T x; return x; } - */ - -template -class Singleton -{ -public: - Singleton() - { - if (!instance) - instance = std::make_unique(); - } - - T * operator->() - { - return instance.get(); - } - - static bool isInitialized() - { - return !!instance; - } - - static void reset() - { - instance.reset(); - } - -private: - inline static std::unique_ptr instance{}; -}; - -} diff --git a/base/glibc-compatibility/CMakeLists.txt b/base/glibc-compatibility/CMakeLists.txt index 42fc8693dd0..2bd4e20d3bc 100644 --- a/base/glibc-compatibility/CMakeLists.txt +++ b/base/glibc-compatibility/CMakeLists.txt @@ -1,5 +1,5 @@ if (GLIBC_COMPATIBILITY) - set (USE_INTERNAL_MEMCPY ON) + set (ENABLE_FASTMEMCPY ON) enable_language(ASM) include(CheckIncludeFile) diff --git a/base/glibc-compatibility/musl/clock_getres.c b/base/glibc-compatibility/musl/clock_getres.c new file mode 100644 index 00000000000..95702709224 --- /dev/null +++ b/base/glibc-compatibility/musl/clock_getres.c @@ -0,0 +1,22 @@ +#include +#include +#include "syscall.h" + +int clock_getres(clockid_t clk, struct timespec *ts) +{ +#ifdef SYS_clock_getres_time64 + /* On a 32-bit arch, use the old syscall if it exists. */ + if (SYS_clock_getres != SYS_clock_getres_time64) { + long ts32[2]; + int r = __syscall(SYS_clock_getres, clk, ts32); + if (!r && ts) { + ts->tv_sec = ts32[0]; + ts->tv_nsec = ts32[1]; + } + return __syscall_ret(r); + } +#endif + /* If reaching this point, it's a 64-bit arch or time64-only + * 32-bit arch and we can get result directly into timespec. */ + return syscall(SYS_clock_getres, clk, ts); +} diff --git a/base/glibc-compatibility/musl/clock_gettime.c b/base/glibc-compatibility/musl/clock_gettime.c index 574f9b83d15..b0807d5fd15 100644 --- a/base/glibc-compatibility/musl/clock_gettime.c +++ b/base/glibc-compatibility/musl/clock_gettime.c @@ -2,7 +2,6 @@ #include #include #include "atomic.h" -#include "musl_features.h" #include "syscall.h" #ifdef VDSO_CGT_SYM @@ -54,7 +53,7 @@ static void *volatile vdso_func = (void *)cgt_init; #endif -int __clock_gettime(clockid_t clk, struct timespec *ts) +int clock_gettime(clockid_t clk, struct timespec *ts) { int r; @@ -104,5 +103,3 @@ int __clock_gettime(clockid_t clk, struct timespec *ts) return __syscall_ret(r); #endif } - -weak_alias(__clock_gettime, clock_gettime); diff --git a/base/glibc-compatibility/musl/clock_nanosleep.c b/base/glibc-compatibility/musl/clock_nanosleep.c index bf71a5e84ac..7c7a9f78288 100644 --- a/base/glibc-compatibility/musl/clock_nanosleep.c +++ b/base/glibc-compatibility/musl/clock_nanosleep.c @@ -1,10 +1,9 @@ #include #include #include -#include "musl_features.h" #include "syscall.h" -int __clock_nanosleep(clockid_t clk, int flags, const struct timespec * req, struct timespec * rem) +int clock_nanosleep(clockid_t clk, int flags, const struct timespec * req, struct timespec * rem) { if (clk == CLOCK_THREAD_CPUTIME_ID) return EINVAL; @@ -23,5 +22,3 @@ int __clock_nanosleep(clockid_t clk, int flags, const struct timespec * req, str pthread_setcanceltype(old_cancel_type, NULL); return status; } - -weak_alias(__clock_nanosleep, clock_nanosleep); diff --git a/base/glibc-compatibility/musl/musl_features.h b/base/glibc-compatibility/musl/musl_features.h index b656efcf4d6..11be55d68fa 100644 --- a/base/glibc-compatibility/musl/musl_features.h +++ b/base/glibc-compatibility/musl/musl_features.h @@ -2,7 +2,4 @@ #define weak __attribute__((__weak__)) #define hidden __attribute__((__visibility__("hidden"))) -#define weak_alias(old, new) \ - extern __typeof(old) new __attribute__((__weak__, __alias__(#old))) - #define predict_false(x) __builtin_expect(x, 0) diff --git a/base/glibc-compatibility/musl/x86_64/syscall.s b/base/glibc-compatibility/musl/x86_64/syscall.s index c4bee804ffa..8edce455728 100644 --- a/base/glibc-compatibility/musl/x86_64/syscall.s +++ b/base/glibc-compatibility/musl/x86_64/syscall.s @@ -2,6 +2,7 @@ .hidden __syscall .type __syscall,@function __syscall: +.cfi_startproc movq %rdi,%rax movq %rsi,%rdi movq %rdx,%rsi @@ -11,3 +12,4 @@ __syscall: movq 8(%rsp),%r9 syscall ret +.cfi_endproc diff --git a/base/loggers/CMakeLists.txt b/base/loggers/CMakeLists.txt index 3cc73da5bce..48868cf1e0d 100644 --- a/base/loggers/CMakeLists.txt +++ b/base/loggers/CMakeLists.txt @@ -1 +1,5 @@ -add_subdirectory(loggers) +include(${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake) +add_headers_and_sources(loggers .) +add_library(loggers ${loggers_sources} ${loggers_headers}) +target_link_libraries(loggers PRIVATE dbms clickhouse_common_io) +target_include_directories(loggers PUBLIC ..) diff --git a/base/loggers/loggers/ExtendedLogChannel.cpp b/base/loggers/ExtendedLogChannel.cpp similarity index 100% rename from base/loggers/loggers/ExtendedLogChannel.cpp rename to base/loggers/ExtendedLogChannel.cpp diff --git a/base/loggers/loggers/ExtendedLogChannel.h b/base/loggers/ExtendedLogChannel.h similarity index 100% rename from base/loggers/loggers/ExtendedLogChannel.h rename to base/loggers/ExtendedLogChannel.h diff --git a/base/loggers/loggers/Loggers.cpp b/base/loggers/Loggers.cpp similarity index 89% rename from base/loggers/loggers/Loggers.cpp rename to base/loggers/Loggers.cpp index d5fd4e4e142..ed806741895 100644 --- a/base/loggers/loggers/Loggers.cpp +++ b/base/loggers/Loggers.cpp @@ -166,12 +166,29 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log logger.root().setChannel(logger.getChannel()); // Explicitly specified log levels for specific loggers. - Poco::Util::AbstractConfiguration::Keys levels; - config.keys("logger.levels", levels); + { + Poco::Util::AbstractConfiguration::Keys loggers_level; + config.keys("logger.levels", loggers_level); - if (!levels.empty()) - for (const auto & level : levels) - logger.root().get(level).setLevel(config.getString("logger.levels." + level, "trace")); + if (!loggers_level.empty()) + { + for (const auto & key : loggers_level) + { + if (key == "logger" || key.starts_with("logger[")) + { + const std::string name(config.getString("logger.levels." + key + ".name")); + const std::string level(config.getString("logger.levels." + key + ".level")); + logger.root().get(name).setLevel(level); + } + else + { + // Legacy syntax + const std::string level(config.getString("logger.levels." + key, "trace")); + logger.root().get(key).setLevel(level); + } + } + } + } } void Loggers::closeLogs(Poco::Logger & logger) diff --git a/base/loggers/loggers/Loggers.h b/base/loggers/Loggers.h similarity index 100% rename from base/loggers/loggers/Loggers.h rename to base/loggers/Loggers.h diff --git a/base/loggers/loggers/OwnFormattingChannel.cpp b/base/loggers/OwnFormattingChannel.cpp similarity index 100% rename from base/loggers/loggers/OwnFormattingChannel.cpp rename to base/loggers/OwnFormattingChannel.cpp diff --git a/base/loggers/loggers/OwnFormattingChannel.h b/base/loggers/OwnFormattingChannel.h similarity index 100% rename from base/loggers/loggers/OwnFormattingChannel.h rename to base/loggers/OwnFormattingChannel.h diff --git a/base/loggers/loggers/OwnPatternFormatter.cpp b/base/loggers/OwnPatternFormatter.cpp similarity index 95% rename from base/loggers/loggers/OwnPatternFormatter.cpp rename to base/loggers/OwnPatternFormatter.cpp index 1f918f01697..029d06ff949 100644 --- a/base/loggers/loggers/OwnPatternFormatter.cpp +++ b/base/loggers/OwnPatternFormatter.cpp @@ -75,7 +75,11 @@ void OwnPatternFormatter::formatExtended(const DB::ExtendedLogMessage & msg_ext, if (color) writeCString(resetColor(), wb); writeCString("> ", wb); + if (color) + writeString(setColor(std::hash()(msg.getSource())), wb); DB::writeString(msg.getSource(), wb); + if (color) + writeCString(resetColor(), wb); writeCString(": ", wb); DB::writeString(msg.getText(), wb); } diff --git a/base/loggers/loggers/OwnPatternFormatter.h b/base/loggers/OwnPatternFormatter.h similarity index 100% rename from base/loggers/loggers/OwnPatternFormatter.h rename to base/loggers/OwnPatternFormatter.h diff --git a/base/loggers/loggers/OwnSplitChannel.cpp b/base/loggers/OwnSplitChannel.cpp similarity index 90% rename from base/loggers/loggers/OwnSplitChannel.cpp rename to base/loggers/OwnSplitChannel.cpp index 3b809c022b5..3f77b594fbb 100644 --- a/base/loggers/loggers/OwnSplitChannel.cpp +++ b/base/loggers/OwnSplitChannel.cpp @@ -20,7 +20,7 @@ void OwnSplitChannel::log(const Poco::Message & msg) if (channels.empty() && (logs_queue == nullptr || msg.getPriority() > logs_queue->max_priority)) return; - if (auto masker = SensitiveDataMasker::getInstance()) + if (auto * masker = SensitiveDataMasker::getInstance()) { auto message_text = msg.getText(); auto matches = masker->wipeSensitiveData(message_text); @@ -71,7 +71,8 @@ void OwnSplitChannel::logSplit(const Poco::Message & msg) /// Also log to system.text_log table, if message is not too noisy - if (text_log_max_priority && msg.getPriority() <= text_log_max_priority) + auto text_log_max_priority_loaded = text_log_max_priority.load(std::memory_order_relaxed); + if (text_log_max_priority_loaded && msg.getPriority() <= text_log_max_priority_loaded) { TextLogElement elem; @@ -108,7 +109,7 @@ void OwnSplitChannel::addTextLog(std::shared_ptr log, int max_prior { std::lock_guard lock(text_log_mutex); text_log = log; - text_log_max_priority = max_priority; + text_log_max_priority.store(max_priority, std::memory_order_relaxed); } } diff --git a/base/loggers/loggers/OwnSplitChannel.h b/base/loggers/OwnSplitChannel.h similarity index 95% rename from base/loggers/loggers/OwnSplitChannel.h rename to base/loggers/OwnSplitChannel.h index 78308e97ab7..ac313b383bb 100644 --- a/base/loggers/loggers/OwnSplitChannel.h +++ b/base/loggers/OwnSplitChannel.h @@ -33,7 +33,7 @@ private: std::mutex text_log_mutex; std::weak_ptr text_log; - int text_log_max_priority = -1; + std::atomic text_log_max_priority = -1; }; } diff --git a/base/loggers/loggers/CMakeLists.txt b/base/loggers/loggers/CMakeLists.txt deleted file mode 100644 index bada5a2fe1d..00000000000 --- a/base/loggers/loggers/CMakeLists.txt +++ /dev/null @@ -1,5 +0,0 @@ -include(${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake) -add_headers_and_sources(loggers .) -add_library(loggers ${loggers_sources} ${loggers_headers}) -target_link_libraries(loggers PRIVATE dbms clickhouse_common_io ${Poco_Foundation_LIBRARY}) -target_include_directories(loggers PUBLIC ..) diff --git a/base/loggers/ya.make b/base/loggers/ya.make new file mode 100644 index 00000000000..b1c84042eee --- /dev/null +++ b/base/loggers/ya.make @@ -0,0 +1,15 @@ +LIBRARY() + +PEERDIR( + clickhouse/src/Common +) + +SRCS( + ExtendedLogChannel.cpp + Loggers.cpp + OwnFormattingChannel.cpp + OwnPatternFormatter.cpp + OwnSplitChannel.cpp +) + +END() diff --git a/base/memcpy/CMakeLists.txt b/base/memcpy/CMakeLists.txt deleted file mode 100644 index 2f000d85e3c..00000000000 --- a/base/memcpy/CMakeLists.txt +++ /dev/null @@ -1 +0,0 @@ -add_library(memcpy memcpy.c) diff --git a/base/memcpy/impl/FastMemcpy.h b/base/memcpy/impl/FastMemcpy.h deleted file mode 100644 index 09dd4256f18..00000000000 --- a/base/memcpy/impl/FastMemcpy.h +++ /dev/null @@ -1,697 +0,0 @@ -//===================================================================== -// -// FastMemcpy.c - skywind3000@163.com, 2015 -// -// feature: -// 50% speed up in avg. vs standard memcpy (tested in vc2012/gcc5.1) -// -//===================================================================== -#ifndef __FAST_MEMCPY_H__ -#define __FAST_MEMCPY_H__ - -#include -#include -#include - - -//--------------------------------------------------------------------- -// force inline for compilers -//--------------------------------------------------------------------- -#ifndef INLINE -#ifdef __GNUC__ -#if (__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1)) - #define INLINE __inline__ __attribute__((always_inline)) -#else - #define INLINE __inline__ -#endif -#elif defined(_MSC_VER) - #define INLINE __forceinline -#elif (defined(__BORLANDC__) || defined(__WATCOMC__)) - #define INLINE __inline -#else - #define INLINE -#endif -#endif - - -typedef __attribute__((__aligned__(1))) uint16_t uint16_unaligned_t; -typedef __attribute__((__aligned__(1))) uint32_t uint32_unaligned_t; -typedef __attribute__((__aligned__(1))) uint64_t uint64_unaligned_t; - - - -//--------------------------------------------------------------------- -// fast copy for different sizes -//--------------------------------------------------------------------- -static INLINE void memcpy_sse2_16(void *dst, const void *src) { - __m128i m0 = _mm_loadu_si128(((const __m128i*)src) + 0); - _mm_storeu_si128(((__m128i*)dst) + 0, m0); -} - -static INLINE void memcpy_sse2_32(void *dst, const void *src) { - __m128i m0 = _mm_loadu_si128(((const __m128i*)src) + 0); - __m128i m1 = _mm_loadu_si128(((const __m128i*)src) + 1); - _mm_storeu_si128(((__m128i*)dst) + 0, m0); - _mm_storeu_si128(((__m128i*)dst) + 1, m1); -} - -static INLINE void memcpy_sse2_64(void *dst, const void *src) { - __m128i m0 = _mm_loadu_si128(((const __m128i*)src) + 0); - __m128i m1 = _mm_loadu_si128(((const __m128i*)src) + 1); - __m128i m2 = _mm_loadu_si128(((const __m128i*)src) + 2); - __m128i m3 = _mm_loadu_si128(((const __m128i*)src) + 3); - _mm_storeu_si128(((__m128i*)dst) + 0, m0); - _mm_storeu_si128(((__m128i*)dst) + 1, m1); - _mm_storeu_si128(((__m128i*)dst) + 2, m2); - _mm_storeu_si128(((__m128i*)dst) + 3, m3); -} - -static INLINE void memcpy_sse2_128(void *dst, const void *src) { - __m128i m0 = _mm_loadu_si128(((const __m128i*)src) + 0); - __m128i m1 = _mm_loadu_si128(((const __m128i*)src) + 1); - __m128i m2 = _mm_loadu_si128(((const __m128i*)src) + 2); - __m128i m3 = _mm_loadu_si128(((const __m128i*)src) + 3); - __m128i m4 = _mm_loadu_si128(((const __m128i*)src) + 4); - __m128i m5 = _mm_loadu_si128(((const __m128i*)src) + 5); - __m128i m6 = _mm_loadu_si128(((const __m128i*)src) + 6); - __m128i m7 = _mm_loadu_si128(((const __m128i*)src) + 7); - _mm_storeu_si128(((__m128i*)dst) + 0, m0); - _mm_storeu_si128(((__m128i*)dst) + 1, m1); - _mm_storeu_si128(((__m128i*)dst) + 2, m2); - _mm_storeu_si128(((__m128i*)dst) + 3, m3); - _mm_storeu_si128(((__m128i*)dst) + 4, m4); - _mm_storeu_si128(((__m128i*)dst) + 5, m5); - _mm_storeu_si128(((__m128i*)dst) + 6, m6); - _mm_storeu_si128(((__m128i*)dst) + 7, m7); -} - - -//--------------------------------------------------------------------- -// tiny memory copy with jump table optimized -//--------------------------------------------------------------------- -static INLINE void *memcpy_tiny(void *dst, const void *src, size_t size) { - unsigned char *dd = ((unsigned char*)dst) + size; - const unsigned char *ss = ((const unsigned char*)src) + size; - - switch (size) { - case 64: - memcpy_sse2_64(dd - 64, ss - 64); - case 0: - break; - - case 65: - memcpy_sse2_64(dd - 65, ss - 65); - case 1: - dd[-1] = ss[-1]; - break; - - case 66: - memcpy_sse2_64(dd - 66, ss - 66); - case 2: - *((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2)); - break; - - case 67: - memcpy_sse2_64(dd - 67, ss - 67); - case 3: - *((uint16_unaligned_t*)(dd - 3)) = *((uint16_unaligned_t*)(ss - 3)); - dd[-1] = ss[-1]; - break; - - case 68: - memcpy_sse2_64(dd - 68, ss - 68); - case 4: - *((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4)); - break; - - case 69: - memcpy_sse2_64(dd - 69, ss - 69); - case 5: - *((uint32_unaligned_t*)(dd - 5)) = *((uint32_unaligned_t*)(ss - 5)); - dd[-1] = ss[-1]; - break; - - case 70: - memcpy_sse2_64(dd - 70, ss - 70); - case 6: - *((uint32_unaligned_t*)(dd - 6)) = *((uint32_unaligned_t*)(ss - 6)); - *((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2)); - break; - - case 71: - memcpy_sse2_64(dd - 71, ss - 71); - case 7: - *((uint32_unaligned_t*)(dd - 7)) = *((uint32_unaligned_t*)(ss - 7)); - *((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4)); - break; - - case 72: - memcpy_sse2_64(dd - 72, ss - 72); - case 8: - *((uint64_unaligned_t*)(dd - 8)) = *((uint64_unaligned_t*)(ss - 8)); - break; - - case 73: - memcpy_sse2_64(dd - 73, ss - 73); - case 9: - *((uint64_unaligned_t*)(dd - 9)) = *((uint64_unaligned_t*)(ss - 9)); - dd[-1] = ss[-1]; - break; - - case 74: - memcpy_sse2_64(dd - 74, ss - 74); - case 10: - *((uint64_unaligned_t*)(dd - 10)) = *((uint64_unaligned_t*)(ss - 10)); - *((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2)); - break; - - case 75: - memcpy_sse2_64(dd - 75, ss - 75); - case 11: - *((uint64_unaligned_t*)(dd - 11)) = *((uint64_unaligned_t*)(ss - 11)); - *((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4)); - break; - - case 76: - memcpy_sse2_64(dd - 76, ss - 76); - case 12: - *((uint64_unaligned_t*)(dd - 12)) = *((uint64_unaligned_t*)(ss - 12)); - *((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4)); - break; - - case 77: - memcpy_sse2_64(dd - 77, ss - 77); - case 13: - *((uint64_unaligned_t*)(dd - 13)) = *((uint64_unaligned_t*)(ss - 13)); - *((uint32_unaligned_t*)(dd - 5)) = *((uint32_unaligned_t*)(ss - 5)); - dd[-1] = ss[-1]; - break; - - case 78: - memcpy_sse2_64(dd - 78, ss - 78); - case 14: - *((uint64_unaligned_t*)(dd - 14)) = *((uint64_unaligned_t*)(ss - 14)); - *((uint64_unaligned_t*)(dd - 8)) = *((uint64_unaligned_t*)(ss - 8)); - break; - - case 79: - memcpy_sse2_64(dd - 79, ss - 79); - case 15: - *((uint64_unaligned_t*)(dd - 15)) = *((uint64_unaligned_t*)(ss - 15)); - *((uint64_unaligned_t*)(dd - 8)) = *((uint64_unaligned_t*)(ss - 8)); - break; - - case 80: - memcpy_sse2_64(dd - 80, ss - 80); - case 16: - memcpy_sse2_16(dd - 16, ss - 16); - break; - - case 81: - memcpy_sse2_64(dd - 81, ss - 81); - case 17: - memcpy_sse2_16(dd - 17, ss - 17); - dd[-1] = ss[-1]; - break; - - case 82: - memcpy_sse2_64(dd - 82, ss - 82); - case 18: - memcpy_sse2_16(dd - 18, ss - 18); - *((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2)); - break; - - case 83: - memcpy_sse2_64(dd - 83, ss - 83); - case 19: - memcpy_sse2_16(dd - 19, ss - 19); - *((uint16_unaligned_t*)(dd - 3)) = *((uint16_unaligned_t*)(ss - 3)); - dd[-1] = ss[-1]; - break; - - case 84: - memcpy_sse2_64(dd - 84, ss - 84); - case 20: - memcpy_sse2_16(dd - 20, ss - 20); - *((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4)); - break; - - case 85: - memcpy_sse2_64(dd - 85, ss - 85); - case 21: - memcpy_sse2_16(dd - 21, ss - 21); - *((uint32_unaligned_t*)(dd - 5)) = *((uint32_unaligned_t*)(ss - 5)); - dd[-1] = ss[-1]; - break; - - case 86: - memcpy_sse2_64(dd - 86, ss - 86); - case 22: - memcpy_sse2_16(dd - 22, ss - 22); - *((uint32_unaligned_t*)(dd - 6)) = *((uint32_unaligned_t*)(ss - 6)); - *((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2)); - break; - - case 87: - memcpy_sse2_64(dd - 87, ss - 87); - case 23: - memcpy_sse2_16(dd - 23, ss - 23); - *((uint32_unaligned_t*)(dd - 7)) = *((uint32_unaligned_t*)(ss - 7)); - *((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4)); - break; - - case 88: - memcpy_sse2_64(dd - 88, ss - 88); - case 24: - memcpy_sse2_16(dd - 24, ss - 24); - memcpy_sse2_16(dd - 16, ss - 16); - break; - - case 89: - memcpy_sse2_64(dd - 89, ss - 89); - case 25: - memcpy_sse2_16(dd - 25, ss - 25); - memcpy_sse2_16(dd - 16, ss - 16); - break; - - case 90: - memcpy_sse2_64(dd - 90, ss - 90); - case 26: - memcpy_sse2_16(dd - 26, ss - 26); - memcpy_sse2_16(dd - 16, ss - 16); - break; - - case 91: - memcpy_sse2_64(dd - 91, ss - 91); - case 27: - memcpy_sse2_16(dd - 27, ss - 27); - memcpy_sse2_16(dd - 16, ss - 16); - break; - - case 92: - memcpy_sse2_64(dd - 92, ss - 92); - case 28: - memcpy_sse2_16(dd - 28, ss - 28); - memcpy_sse2_16(dd - 16, ss - 16); - break; - - case 93: - memcpy_sse2_64(dd - 93, ss - 93); - case 29: - memcpy_sse2_16(dd - 29, ss - 29); - memcpy_sse2_16(dd - 16, ss - 16); - break; - - case 94: - memcpy_sse2_64(dd - 94, ss - 94); - case 30: - memcpy_sse2_16(dd - 30, ss - 30); - memcpy_sse2_16(dd - 16, ss - 16); - break; - - case 95: - memcpy_sse2_64(dd - 95, ss - 95); - case 31: - memcpy_sse2_16(dd - 31, ss - 31); - memcpy_sse2_16(dd - 16, ss - 16); - break; - - case 96: - memcpy_sse2_64(dd - 96, ss - 96); - case 32: - memcpy_sse2_32(dd - 32, ss - 32); - break; - - case 97: - memcpy_sse2_64(dd - 97, ss - 97); - case 33: - memcpy_sse2_32(dd - 33, ss - 33); - dd[-1] = ss[-1]; - break; - - case 98: - memcpy_sse2_64(dd - 98, ss - 98); - case 34: - memcpy_sse2_32(dd - 34, ss - 34); - *((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2)); - break; - - case 99: - memcpy_sse2_64(dd - 99, ss - 99); - case 35: - memcpy_sse2_32(dd - 35, ss - 35); - *((uint16_unaligned_t*)(dd - 3)) = *((uint16_unaligned_t*)(ss - 3)); - dd[-1] = ss[-1]; - break; - - case 100: - memcpy_sse2_64(dd - 100, ss - 100); - case 36: - memcpy_sse2_32(dd - 36, ss - 36); - *((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4)); - break; - - case 101: - memcpy_sse2_64(dd - 101, ss - 101); - case 37: - memcpy_sse2_32(dd - 37, ss - 37); - *((uint32_unaligned_t*)(dd - 5)) = *((uint32_unaligned_t*)(ss - 5)); - dd[-1] = ss[-1]; - break; - - case 102: - memcpy_sse2_64(dd - 102, ss - 102); - case 38: - memcpy_sse2_32(dd - 38, ss - 38); - *((uint32_unaligned_t*)(dd - 6)) = *((uint32_unaligned_t*)(ss - 6)); - *((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2)); - break; - - case 103: - memcpy_sse2_64(dd - 103, ss - 103); - case 39: - memcpy_sse2_32(dd - 39, ss - 39); - *((uint32_unaligned_t*)(dd - 7)) = *((uint32_unaligned_t*)(ss - 7)); - *((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4)); - break; - - case 104: - memcpy_sse2_64(dd - 104, ss - 104); - case 40: - memcpy_sse2_32(dd - 40, ss - 40); - *((uint64_unaligned_t*)(dd - 8)) = *((uint64_unaligned_t*)(ss - 8)); - break; - - case 105: - memcpy_sse2_64(dd - 105, ss - 105); - case 41: - memcpy_sse2_32(dd - 41, ss - 41); - *((uint64_unaligned_t*)(dd - 9)) = *((uint64_unaligned_t*)(ss - 9)); - dd[-1] = ss[-1]; - break; - - case 106: - memcpy_sse2_64(dd - 106, ss - 106); - case 42: - memcpy_sse2_32(dd - 42, ss - 42); - *((uint64_unaligned_t*)(dd - 10)) = *((uint64_unaligned_t*)(ss - 10)); - *((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2)); - break; - - case 107: - memcpy_sse2_64(dd - 107, ss - 107); - case 43: - memcpy_sse2_32(dd - 43, ss - 43); - *((uint64_unaligned_t*)(dd - 11)) = *((uint64_unaligned_t*)(ss - 11)); - *((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4)); - break; - - case 108: - memcpy_sse2_64(dd - 108, ss - 108); - case 44: - memcpy_sse2_32(dd - 44, ss - 44); - *((uint64_unaligned_t*)(dd - 12)) = *((uint64_unaligned_t*)(ss - 12)); - *((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4)); - break; - - case 109: - memcpy_sse2_64(dd - 109, ss - 109); - case 45: - memcpy_sse2_32(dd - 45, ss - 45); - *((uint64_unaligned_t*)(dd - 13)) = *((uint64_unaligned_t*)(ss - 13)); - *((uint32_unaligned_t*)(dd - 5)) = *((uint32_unaligned_t*)(ss - 5)); - dd[-1] = ss[-1]; - break; - - case 110: - memcpy_sse2_64(dd - 110, ss - 110); - case 46: - memcpy_sse2_32(dd - 46, ss - 46); - *((uint64_unaligned_t*)(dd - 14)) = *((uint64_unaligned_t*)(ss - 14)); - *((uint64_unaligned_t*)(dd - 8)) = *((uint64_unaligned_t*)(ss - 8)); - break; - - case 111: - memcpy_sse2_64(dd - 111, ss - 111); - case 47: - memcpy_sse2_32(dd - 47, ss - 47); - *((uint64_unaligned_t*)(dd - 15)) = *((uint64_unaligned_t*)(ss - 15)); - *((uint64_unaligned_t*)(dd - 8)) = *((uint64_unaligned_t*)(ss - 8)); - break; - - case 112: - memcpy_sse2_64(dd - 112, ss - 112); - case 48: - memcpy_sse2_32(dd - 48, ss - 48); - memcpy_sse2_16(dd - 16, ss - 16); - break; - - case 113: - memcpy_sse2_64(dd - 113, ss - 113); - case 49: - memcpy_sse2_32(dd - 49, ss - 49); - memcpy_sse2_16(dd - 17, ss - 17); - dd[-1] = ss[-1]; - break; - - case 114: - memcpy_sse2_64(dd - 114, ss - 114); - case 50: - memcpy_sse2_32(dd - 50, ss - 50); - memcpy_sse2_16(dd - 18, ss - 18); - *((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2)); - break; - - case 115: - memcpy_sse2_64(dd - 115, ss - 115); - case 51: - memcpy_sse2_32(dd - 51, ss - 51); - memcpy_sse2_16(dd - 19, ss - 19); - *((uint16_unaligned_t*)(dd - 3)) = *((uint16_unaligned_t*)(ss - 3)); - dd[-1] = ss[-1]; - break; - - case 116: - memcpy_sse2_64(dd - 116, ss - 116); - case 52: - memcpy_sse2_32(dd - 52, ss - 52); - memcpy_sse2_16(dd - 20, ss - 20); - *((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4)); - break; - - case 117: - memcpy_sse2_64(dd - 117, ss - 117); - case 53: - memcpy_sse2_32(dd - 53, ss - 53); - memcpy_sse2_16(dd - 21, ss - 21); - *((uint32_unaligned_t*)(dd - 5)) = *((uint32_unaligned_t*)(ss - 5)); - dd[-1] = ss[-1]; - break; - - case 118: - memcpy_sse2_64(dd - 118, ss - 118); - case 54: - memcpy_sse2_32(dd - 54, ss - 54); - memcpy_sse2_16(dd - 22, ss - 22); - *((uint32_unaligned_t*)(dd - 6)) = *((uint32_unaligned_t*)(ss - 6)); - *((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2)); - break; - - case 119: - memcpy_sse2_64(dd - 119, ss - 119); - case 55: - memcpy_sse2_32(dd - 55, ss - 55); - memcpy_sse2_16(dd - 23, ss - 23); - *((uint32_unaligned_t*)(dd - 7)) = *((uint32_unaligned_t*)(ss - 7)); - *((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4)); - break; - - case 120: - memcpy_sse2_64(dd - 120, ss - 120); - case 56: - memcpy_sse2_32(dd - 56, ss - 56); - memcpy_sse2_16(dd - 24, ss - 24); - memcpy_sse2_16(dd - 16, ss - 16); - break; - - case 121: - memcpy_sse2_64(dd - 121, ss - 121); - case 57: - memcpy_sse2_32(dd - 57, ss - 57); - memcpy_sse2_16(dd - 25, ss - 25); - memcpy_sse2_16(dd - 16, ss - 16); - break; - - case 122: - memcpy_sse2_64(dd - 122, ss - 122); - case 58: - memcpy_sse2_32(dd - 58, ss - 58); - memcpy_sse2_16(dd - 26, ss - 26); - memcpy_sse2_16(dd - 16, ss - 16); - break; - - case 123: - memcpy_sse2_64(dd - 123, ss - 123); - case 59: - memcpy_sse2_32(dd - 59, ss - 59); - memcpy_sse2_16(dd - 27, ss - 27); - memcpy_sse2_16(dd - 16, ss - 16); - break; - - case 124: - memcpy_sse2_64(dd - 124, ss - 124); - case 60: - memcpy_sse2_32(dd - 60, ss - 60); - memcpy_sse2_16(dd - 28, ss - 28); - memcpy_sse2_16(dd - 16, ss - 16); - break; - - case 125: - memcpy_sse2_64(dd - 125, ss - 125); - case 61: - memcpy_sse2_32(dd - 61, ss - 61); - memcpy_sse2_16(dd - 29, ss - 29); - memcpy_sse2_16(dd - 16, ss - 16); - break; - - case 126: - memcpy_sse2_64(dd - 126, ss - 126); - case 62: - memcpy_sse2_32(dd - 62, ss - 62); - memcpy_sse2_16(dd - 30, ss - 30); - memcpy_sse2_16(dd - 16, ss - 16); - break; - - case 127: - memcpy_sse2_64(dd - 127, ss - 127); - case 63: - memcpy_sse2_32(dd - 63, ss - 63); - memcpy_sse2_16(dd - 31, ss - 31); - memcpy_sse2_16(dd - 16, ss - 16); - break; - - case 128: - memcpy_sse2_128(dd - 128, ss - 128); - break; - } - - return dst; -} - - -//--------------------------------------------------------------------- -// main routine -//--------------------------------------------------------------------- -static INLINE void* memcpy_fast(void *destination, const void *source, size_t size) -{ - unsigned char *dst = (unsigned char*)destination; - const unsigned char *src = (const unsigned char*)source; - size_t padding; - - // small memory copy - if (size <= 128) { - return memcpy_tiny(dst, src, size); - } - - // align destination to 16 bytes boundary - padding = (16 - (((size_t)dst) & 15)) & 15; - - if (padding > 0) { - __m128i head = _mm_loadu_si128((const __m128i*)src); - _mm_storeu_si128((__m128i*)dst, head); - dst += padding; - src += padding; - size -= padding; - } - - // medium size copy - if (size <= 0x200000) // something around half of LL-cache size - { - __m128i c0, c1, c2, c3, c4, c5, c6, c7; - - for (; size >= 128; size -= 128) { - c0 = _mm_loadu_si128(((const __m128i*)src) + 0); - c1 = _mm_loadu_si128(((const __m128i*)src) + 1); - c2 = _mm_loadu_si128(((const __m128i*)src) + 2); - c3 = _mm_loadu_si128(((const __m128i*)src) + 3); - c4 = _mm_loadu_si128(((const __m128i*)src) + 4); - c5 = _mm_loadu_si128(((const __m128i*)src) + 5); - c6 = _mm_loadu_si128(((const __m128i*)src) + 6); - c7 = _mm_loadu_si128(((const __m128i*)src) + 7); - _mm_prefetch((const char*)(src + 256), _MM_HINT_NTA); - src += 128; - _mm_store_si128((((__m128i*)dst) + 0), c0); - _mm_store_si128((((__m128i*)dst) + 1), c1); - _mm_store_si128((((__m128i*)dst) + 2), c2); - _mm_store_si128((((__m128i*)dst) + 3), c3); - _mm_store_si128((((__m128i*)dst) + 4), c4); - _mm_store_si128((((__m128i*)dst) + 5), c5); - _mm_store_si128((((__m128i*)dst) + 6), c6); - _mm_store_si128((((__m128i*)dst) + 7), c7); - dst += 128; - } - } - else { // big memory copy - __m128i c0, c1, c2, c3, c4, c5, c6, c7; - - _mm_prefetch((const char*)(src), _MM_HINT_NTA); - - if ((((size_t)src) & 15) == 0) { // source aligned - for (; size >= 128; size -= 128) { - c0 = _mm_load_si128(((const __m128i*)src) + 0); - c1 = _mm_load_si128(((const __m128i*)src) + 1); - c2 = _mm_load_si128(((const __m128i*)src) + 2); - c3 = _mm_load_si128(((const __m128i*)src) + 3); - c4 = _mm_load_si128(((const __m128i*)src) + 4); - c5 = _mm_load_si128(((const __m128i*)src) + 5); - c6 = _mm_load_si128(((const __m128i*)src) + 6); - c7 = _mm_load_si128(((const __m128i*)src) + 7); - _mm_prefetch((const char*)(src + 256), _MM_HINT_NTA); - src += 128; - _mm_stream_si128((((__m128i*)dst) + 0), c0); - _mm_stream_si128((((__m128i*)dst) + 1), c1); - _mm_stream_si128((((__m128i*)dst) + 2), c2); - _mm_stream_si128((((__m128i*)dst) + 3), c3); - _mm_stream_si128((((__m128i*)dst) + 4), c4); - _mm_stream_si128((((__m128i*)dst) + 5), c5); - _mm_stream_si128((((__m128i*)dst) + 6), c6); - _mm_stream_si128((((__m128i*)dst) + 7), c7); - dst += 128; - } - } - else { // source unaligned - for (; size >= 128; size -= 128) { - c0 = _mm_loadu_si128(((const __m128i*)src) + 0); - c1 = _mm_loadu_si128(((const __m128i*)src) + 1); - c2 = _mm_loadu_si128(((const __m128i*)src) + 2); - c3 = _mm_loadu_si128(((const __m128i*)src) + 3); - c4 = _mm_loadu_si128(((const __m128i*)src) + 4); - c5 = _mm_loadu_si128(((const __m128i*)src) + 5); - c6 = _mm_loadu_si128(((const __m128i*)src) + 6); - c7 = _mm_loadu_si128(((const __m128i*)src) + 7); - _mm_prefetch((const char*)(src + 256), _MM_HINT_NTA); - src += 128; - _mm_stream_si128((((__m128i*)dst) + 0), c0); - _mm_stream_si128((((__m128i*)dst) + 1), c1); - _mm_stream_si128((((__m128i*)dst) + 2), c2); - _mm_stream_si128((((__m128i*)dst) + 3), c3); - _mm_stream_si128((((__m128i*)dst) + 4), c4); - _mm_stream_si128((((__m128i*)dst) + 5), c5); - _mm_stream_si128((((__m128i*)dst) + 6), c6); - _mm_stream_si128((((__m128i*)dst) + 7), c7); - dst += 128; - } - } - _mm_sfence(); - } - - memcpy_tiny(dst, src, size); - - return destination; -} - - -#endif - - - diff --git a/base/memcpy/impl/README.md b/base/memcpy/impl/README.md deleted file mode 100644 index ac807ecd925..00000000000 --- a/base/memcpy/impl/README.md +++ /dev/null @@ -1,97 +0,0 @@ -Build -===== - -with gcc: -> gcc -O3 -msse2 FastMemcpy.c -o FastMemcpy - -with msvc: -> cl -nologo -O2 FastMemcpy.c - -Features -======== - -* 50% speedup in avg. vs traditional memcpy in msvc 2012 or gcc 4.9 -* small size copy optimized with jump table -* medium size copy optimized with sse2 vector copy -* huge size copy optimized with cache prefetch & movntdq - -Reference -========= - -[Using Block Prefetch for Optimized Memory Performance](http://files.rsdn.ru/23380/AMD_block_prefetch_paper.pdf) - -The article only focused on aligned huge memory copy. You need handle other conditions by your self. - - -Results -======= - -``` -result: gcc4.9 (msvc 2012 got a similar result): - -benchmark(size=32 bytes, times=16777216): -result(dst aligned, src aligned): memcpy_fast=81ms memcpy=281 ms -result(dst aligned, src unalign): memcpy_fast=88ms memcpy=254 ms -result(dst unalign, src aligned): memcpy_fast=87ms memcpy=245 ms -result(dst unalign, src unalign): memcpy_fast=81ms memcpy=258 ms - -benchmark(size=64 bytes, times=16777216): -result(dst aligned, src aligned): memcpy_fast=91ms memcpy=364 ms -result(dst aligned, src unalign): memcpy_fast=95ms memcpy=336 ms -result(dst unalign, src aligned): memcpy_fast=96ms memcpy=353 ms -result(dst unalign, src unalign): memcpy_fast=99ms memcpy=346 ms - -benchmark(size=512 bytes, times=8388608): -result(dst aligned, src aligned): memcpy_fast=124ms memcpy=242 ms -result(dst aligned, src unalign): memcpy_fast=166ms memcpy=555 ms -result(dst unalign, src aligned): memcpy_fast=168ms memcpy=602 ms -result(dst unalign, src unalign): memcpy_fast=174ms memcpy=614 ms - -benchmark(size=1024 bytes, times=4194304): -result(dst aligned, src aligned): memcpy_fast=119ms memcpy=171 ms -result(dst aligned, src unalign): memcpy_fast=182ms memcpy=442 ms -result(dst unalign, src aligned): memcpy_fast=163ms memcpy=466 ms -result(dst unalign, src unalign): memcpy_fast=168ms memcpy=472 ms - -benchmark(size=4096 bytes, times=524288): -result(dst aligned, src aligned): memcpy_fast=68ms memcpy=82 ms -result(dst aligned, src unalign): memcpy_fast=94ms memcpy=226 ms -result(dst unalign, src aligned): memcpy_fast=134ms memcpy=216 ms -result(dst unalign, src unalign): memcpy_fast=84ms memcpy=188 ms - -benchmark(size=8192 bytes, times=262144): -result(dst aligned, src aligned): memcpy_fast=55ms memcpy=70 ms -result(dst aligned, src unalign): memcpy_fast=75ms memcpy=192 ms -result(dst unalign, src aligned): memcpy_fast=79ms memcpy=223 ms -result(dst unalign, src unalign): memcpy_fast=91ms memcpy=219 ms - -benchmark(size=1048576 bytes, times=2048): -result(dst aligned, src aligned): memcpy_fast=181ms memcpy=165 ms -result(dst aligned, src unalign): memcpy_fast=192ms memcpy=303 ms -result(dst unalign, src aligned): memcpy_fast=218ms memcpy=310 ms -result(dst unalign, src unalign): memcpy_fast=183ms memcpy=307 ms - -benchmark(size=4194304 bytes, times=512): -result(dst aligned, src aligned): memcpy_fast=263ms memcpy=398 ms -result(dst aligned, src unalign): memcpy_fast=269ms memcpy=433 ms -result(dst unalign, src aligned): memcpy_fast=306ms memcpy=497 ms -result(dst unalign, src unalign): memcpy_fast=285ms memcpy=417 ms - -benchmark(size=8388608 bytes, times=256): -result(dst aligned, src aligned): memcpy_fast=287ms memcpy=421 ms -result(dst aligned, src unalign): memcpy_fast=288ms memcpy=430 ms -result(dst unalign, src aligned): memcpy_fast=285ms memcpy=510 ms -result(dst unalign, src unalign): memcpy_fast=291ms memcpy=440 ms - -benchmark random access: -memcpy_fast=487ms memcpy=1000ms - -``` - - -About -===== - -skywind - -http://www.skywind.me diff --git a/base/memcpy/memcpy.c b/base/memcpy/memcpy.c deleted file mode 100644 index 9e1b175bc57..00000000000 --- a/base/memcpy/memcpy.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "memcpy.h" - -/// This is needed to generate an object file for linking. diff --git a/base/memcpy/memcpy.h b/base/memcpy/memcpy.h deleted file mode 100644 index 1f898d4aff6..00000000000 --- a/base/memcpy/memcpy.h +++ /dev/null @@ -1,17 +0,0 @@ -#pragma once - -#ifdef __cplusplus -extern "C" -{ -#endif - -#include "impl/FastMemcpy.h" - -void * __attribute__((__weak__)) memcpy(void * __restrict destination, const void * __restrict source, size_t size) -{ - return memcpy_fast(destination, source, size); -} - -#ifdef __cplusplus -} -#endif diff --git a/base/mysqlxx/CMakeLists.txt b/base/mysqlxx/CMakeLists.txt index 2d2ad75628d..702e0197ffb 100644 --- a/base/mysqlxx/CMakeLists.txt +++ b/base/mysqlxx/CMakeLists.txt @@ -1,32 +1,18 @@ add_library (mysqlxx - src/Connection.cpp - src/Exception.cpp - src/Query.cpp - src/ResultBase.cpp - src/StoreQueryResult.cpp - src/UseQueryResult.cpp - src/Row.cpp - src/Value.cpp - src/Pool.cpp - src/PoolWithFailover.cpp - - include/mysqlxx/Connection.h - include/mysqlxx/Exception.h - include/mysqlxx/mysqlxx.h - include/mysqlxx/Null.h - include/mysqlxx/Pool.h - include/mysqlxx/PoolWithFailover.h - include/mysqlxx/Query.h - include/mysqlxx/ResultBase.h - include/mysqlxx/Row.h - include/mysqlxx/StoreQueryResult.h - include/mysqlxx/Transaction.h - include/mysqlxx/Types.h - include/mysqlxx/UseQueryResult.h - include/mysqlxx/Value.h + Connection.cpp + Exception.cpp + Query.cpp + ResultBase.cpp + StoreQueryResult.cpp + UseQueryResult.cpp + Row.cpp + Value.cpp + Pool.cpp + PoolFactory.cpp + PoolWithFailover.cpp ) -target_include_directories (mysqlxx PUBLIC include) +target_include_directories (mysqlxx PUBLIC ..) if (USE_INTERNAL_MYSQL_LIBRARY) target_include_directories (mysqlxx PUBLIC ${ClickHouse_SOURCE_DIR}/contrib/mariadb-connector-c/include) @@ -46,7 +32,7 @@ else () endif () endif () -target_link_libraries(mysqlxx PUBLIC common ${Poco_Util_LIBRARY} ${Poco_Foundation_LIBRARY} PRIVATE ${MYSQLCLIENT_LIBRARIES} PUBLIC ${Boost_SYSTEM_LIBRARY} PRIVATE ${ZLIB_LIBRARIES}) +target_link_libraries(mysqlxx PUBLIC common PRIVATE ${MYSQLCLIENT_LIBRARIES} PUBLIC ${Boost_SYSTEM_LIBRARY} PRIVATE ${ZLIB_LIBRARIES}) if(OPENSSL_LIBRARIES) target_link_libraries(mysqlxx PRIVATE ${OPENSSL_LIBRARIES}) endif() @@ -57,5 +43,5 @@ if (NOT USE_INTERNAL_MYSQL_LIBRARY AND OPENSSL_INCLUDE_DIR) endif () if (ENABLE_TESTS) - add_subdirectory (src/tests) + add_subdirectory (tests) endif () diff --git a/base/mysqlxx/src/Connection.cpp b/base/mysqlxx/Connection.cpp similarity index 100% rename from base/mysqlxx/src/Connection.cpp rename to base/mysqlxx/Connection.cpp diff --git a/base/mysqlxx/include/mysqlxx/Connection.h b/base/mysqlxx/Connection.h similarity index 100% rename from base/mysqlxx/include/mysqlxx/Connection.h rename to base/mysqlxx/Connection.h diff --git a/base/mysqlxx/Exception.cpp b/base/mysqlxx/Exception.cpp new file mode 100644 index 00000000000..188e7bd740d --- /dev/null +++ b/base/mysqlxx/Exception.cpp @@ -0,0 +1,34 @@ +#if __has_include() +#include +#else +#include +#endif +#include + + +namespace mysqlxx +{ + +std::string errorMessage(MYSQL * driver) +{ + std::stringstream res; + res << mysql_error(driver) + << " (" << (driver->host ? driver->host : "(nullptr)") + << ":" << driver->port << ")"; + return res.str(); +} + +void checkError(MYSQL * driver) +{ + unsigned num = mysql_errno(driver); + + if (num) + throw Exception(errorMessage(driver), num); +} + +void onError(MYSQL * driver) +{ + throw Exception(errorMessage(driver), mysql_errno(driver)); +} + +} diff --git a/base/mysqlxx/Exception.h b/base/mysqlxx/Exception.h new file mode 100644 index 00000000000..eaeb3565af1 --- /dev/null +++ b/base/mysqlxx/Exception.h @@ -0,0 +1,53 @@ +#pragma once + +#include +#include +#include + + +namespace mysqlxx +{ +/// Common exception class for MySQL library. Functions code() and errnum() return error numbers from MySQL, for details see mysqld_error.h +struct Exception : public Poco::Exception +{ + Exception(const std::string & msg, int code = 0) : Poco::Exception(msg, code) {} + int errnum() const { return code(); } + const char * name() const throw() override { return "mysqlxx::Exception"; } + const char * className() const throw() override { return "mysqlxx::Exception"; } +}; + + +/// Cannot connect to MySQL server +struct ConnectionFailed : public Exception +{ + ConnectionFailed(const std::string & msg, int code = 0) : Exception(msg, code) {} + const char * name() const throw() override { return "mysqlxx::ConnectionFailed"; } + const char * className() const throw() override { return "mysqlxx::ConnectionFailed"; } +}; + + +/// Erroneous query. +struct BadQuery : public Exception +{ + BadQuery(const std::string & msg, int code = 0) : Exception(msg, code) {} + const char * name() const throw() override { return "mysqlxx::BadQuery"; } + const char * className() const throw() override { return "mysqlxx::BadQuery"; } +}; + + +/// Value parsing failure +struct CannotParseValue : public Exception +{ + CannotParseValue(const std::string & msg, int code = 0) : Exception(msg, code) {} + const char * name() const throw() override { return "mysqlxx::CannotParseValue"; } + const char * className() const throw() override { return "mysqlxx::CannotParseValue"; } +}; + + +std::string errorMessage(MYSQL * driver); + +/// For internal need of library. +void checkError(MYSQL * driver); +[[noreturn]] void onError(MYSQL * driver); + +} diff --git a/base/mysqlxx/include/mysqlxx/Null.h b/base/mysqlxx/Null.h similarity index 84% rename from base/mysqlxx/include/mysqlxx/Null.h rename to base/mysqlxx/Null.h index a3ba3a48cb1..b12e1e7f0bf 100644 --- a/base/mysqlxx/include/mysqlxx/Null.h +++ b/base/mysqlxx/Null.h @@ -27,7 +27,7 @@ public: Null() : is_null(true) {} Null(const Null &) = default; Null(Null &&) noexcept = default; - Null(NullType data) : is_null(true) {} + Null(NullType) : is_null(true) {} explicit Null(const T & data_) : data(data_), is_null(false) {} operator T & () @@ -47,7 +47,7 @@ public: Null & operator= (Null &&) noexcept = default; Null & operator= (const Null &) = default; Null & operator= (const T & data_) { is_null = false; data = data_; return *this; } - Null & operator= (const NullType other) { is_null = true; data = T(); return *this; } + Null & operator= (const NullType) { is_null = true; data = T(); return *this; } bool isNull() const { return is_null; } @@ -57,7 +57,7 @@ public: || (is_null == other.is_null && data < other.data); } - bool operator< (const NullType other) const { return false; } + bool operator< (const NullType) const { return false; } bool operator== (const Null & other) const { @@ -69,14 +69,14 @@ public: return !is_null && data == other; } - bool operator== (const NullType other) const { return is_null; } + bool operator== (const NullType) const { return is_null; } bool operator!= (const Null & other) const { return !(*this == other); } - bool operator!= (const NullType other) const { return !is_null; } + bool operator!= (const NullType) const { return !is_null; } bool operator!= (const T & other) const { diff --git a/base/mysqlxx/src/Pool.cpp b/base/mysqlxx/Pool.cpp similarity index 93% rename from base/mysqlxx/src/Pool.cpp rename to base/mysqlxx/Pool.cpp index 410ac062039..99815363a56 100644 --- a/base/mysqlxx/src/Pool.cpp +++ b/base/mysqlxx/Pool.cpp @@ -22,15 +22,20 @@ void Pool::Entry::incrementRefCount() if (!data) return; ++data->ref_count; - mysql_thread_init(); + if (data->ref_count == 1) + mysql_thread_init(); } void Pool::Entry::decrementRefCount() { if (!data) return; - --data->ref_count; - mysql_thread_end(); + if (data->ref_count > 0) + { + --data->ref_count; + if (data->ref_count == 0) + mysql_thread_end(); + } } @@ -114,7 +119,7 @@ Pool::~Pool() } -Pool::Entry Pool::Get() +Pool::Entry Pool::get() { std::unique_lock lock(mutex); @@ -169,14 +174,24 @@ Pool::Entry Pool::tryGet() return Entry(); } +void Pool::removeConnection(Connection* connection) +{ + std::lock_guard lock(mutex); + if (connection) + { + if (connection->ref_count > 0) + { + connection->conn.disconnect(); + connection->ref_count = 0; + } + connections.remove(connection); + } +} + void Pool::Entry::disconnect() { - if (data) - { - decrementRefCount(); - data->conn.disconnect(); - } + pool->removeConnection(data); } diff --git a/base/mysqlxx/include/mysqlxx/Pool.h b/base/mysqlxx/Pool.h similarity index 99% rename from base/mysqlxx/include/mysqlxx/Pool.h rename to base/mysqlxx/Pool.h index 5261ffab017..bf9365a064a 100644 --- a/base/mysqlxx/include/mysqlxx/Pool.h +++ b/base/mysqlxx/Pool.h @@ -185,7 +185,7 @@ public: ~Pool(); /// Allocates connection. - Entry Get(); + Entry get(); /// Allocates connection. /// If database is not accessible, returns empty Entry object. @@ -198,6 +198,8 @@ public: return description; } + void removeConnection(Connection * connection); + protected: /// Number of MySQL connections which are created at launch. unsigned default_connections; diff --git a/base/mysqlxx/PoolFactory.cpp b/base/mysqlxx/PoolFactory.cpp new file mode 100644 index 00000000000..f0a5543d723 --- /dev/null +++ b/base/mysqlxx/PoolFactory.cpp @@ -0,0 +1,122 @@ +#include +#include +#include + +namespace mysqlxx +{ + +struct PoolFactory::Impl +{ + // Cache of already affected pools identified by their config name + std::map> pools; + + // Cache of Pool ID (host + port + user +...) cibling already established shareable pool + std::map pools_by_ids; + + /// Protect pools and pools_by_ids caches + std::mutex mutex; +}; + +PoolWithFailover PoolFactory::get(const std::string & config_name, unsigned default_connections, + unsigned max_connections, size_t max_tries) +{ + return get(Poco::Util::Application::instance().config(), config_name, default_connections, max_connections, max_tries); +} + +/// Duplicate of code from StringUtils.h. Copied here for less dependencies. +static bool startsWith(const std::string & s, const char * prefix) +{ + return s.size() >= strlen(prefix) && 0 == memcmp(s.data(), prefix, strlen(prefix)); +} + +static std::string getPoolEntryName(const Poco::Util::AbstractConfiguration & config, + const std::string & config_name) +{ + bool shared = config.getBool(config_name + ".share_connection", false); + + // Not shared no need to generate a name the pool won't be stored + if (!shared) + return ""; + + std::string entry_name; + std::string host = config.getString(config_name + ".host", ""); + std::string port = config.getString(config_name + ".port", ""); + std::string user = config.getString(config_name + ".user", ""); + std::string db = config.getString(config_name + ".db", ""); + std::string table = config.getString(config_name + ".table", ""); + + Poco::Util::AbstractConfiguration::Keys keys; + config.keys(config_name, keys); + + if (config.has(config_name + ".replica")) + { + Poco::Util::AbstractConfiguration::Keys replica_keys; + config.keys(config_name, replica_keys); + for (const auto & replica_config_key : replica_keys) + { + /// There could be another elements in the same level in configuration file, like "user", "port"... + if (startsWith(replica_config_key, "replica")) + { + std::string replica_name = config_name + "." + replica_config_key; + std::string tmp_host = config.getString(replica_name + ".host", host); + std::string tmp_port = config.getString(replica_name + ".port", port); + std::string tmp_user = config.getString(replica_name + ".user", user); + entry_name += (entry_name.empty() ? "" : "|") + tmp_user + "@" + tmp_host + ":" + tmp_port + "/" + db; + } + } + } + else + { + entry_name = user + "@" + host + ":" + port + "/" + db; + } + return entry_name; +} + +PoolWithFailover PoolFactory::get(const Poco::Util::AbstractConfiguration & config, + const std::string & config_name, unsigned default_connections, unsigned max_connections, size_t max_tries) +{ + + std::lock_guard lock(impl->mutex); + if (auto entry = impl->pools.find(config_name); entry != impl->pools.end()) + { + return *(entry->second.get()); + } + else + { + std::string entry_name = getPoolEntryName(config, config_name); + if (auto id = impl->pools_by_ids.find(entry_name); id != impl->pools_by_ids.end()) + { + entry = impl->pools.find(id->second); + std::shared_ptr pool = entry->second; + impl->pools.insert_or_assign(config_name, pool); + return *pool; + } + + auto pool = std::make_shared(config, config_name, default_connections, max_connections, max_tries); + // Check the pool will be shared + if (!entry_name.empty()) + { + // Store shared pool + impl->pools.insert_or_assign(config_name, pool); + impl->pools_by_ids.insert_or_assign(entry_name, config_name); + } + return *(pool.get()); + } +} + +void PoolFactory::reset() +{ + std::lock_guard lock(impl->mutex); + impl->pools.clear(); + impl->pools_by_ids.clear(); +} + +PoolFactory::PoolFactory() : impl(std::make_unique()) {} + +PoolFactory & PoolFactory::instance() +{ + static PoolFactory ret; + return ret; +} + +} diff --git a/base/mysqlxx/PoolFactory.h b/base/mysqlxx/PoolFactory.h new file mode 100644 index 00000000000..a7bb97cd0c6 --- /dev/null +++ b/base/mysqlxx/PoolFactory.h @@ -0,0 +1,55 @@ +#pragma once + +#include +#include +#include + +#include + + +#define MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS 1 +#define MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_CONNECTIONS 16 +#define MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES 3 + + +namespace mysqlxx +{ + +/* + * PoolFactory.h + * This class is a helper singleton to mutualize connections to MySQL. + */ +class PoolFactory final : private boost::noncopyable +{ +public: + static PoolFactory & instance(); + + PoolFactory(const PoolFactory &) = delete; + + /** Allocates a PoolWithFailover to connect to MySQL. */ + PoolWithFailover get(const std::string & config_name, + unsigned default_connections = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS, + unsigned max_connections = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_CONNECTIONS, + size_t max_tries = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES); + + /** Allocates a PoolWithFailover to connect to MySQL. */ + PoolWithFailover get(const Poco::Util::AbstractConfiguration & config, + const std::string & config_name, + unsigned default_connections = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS, + unsigned max_connections = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_CONNECTIONS, + size_t max_tries = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES); + + void reset(); + + + ~PoolFactory() = default; + PoolFactory& operator=(const PoolFactory &) = delete; + +private: + PoolFactory(); + + struct Impl; + std::unique_ptr impl; +}; + +} diff --git a/base/mysqlxx/PoolWithFailover.cpp b/base/mysqlxx/PoolWithFailover.cpp new file mode 100644 index 00000000000..5bee75aab1b --- /dev/null +++ b/base/mysqlxx/PoolWithFailover.cpp @@ -0,0 +1,135 @@ +#include + + +/// Duplicate of code from StringUtils.h. Copied here for less dependencies. +static bool startsWith(const std::string & s, const char * prefix) +{ + return s.size() >= strlen(prefix) && 0 == memcmp(s.data(), prefix, strlen(prefix)); +} + + +using namespace mysqlxx; + +PoolWithFailover::PoolWithFailover(const Poco::Util::AbstractConfiguration & config_, + const std::string & config_name_, const unsigned default_connections_, + const unsigned max_connections_, const size_t max_tries_) + : max_tries(max_tries_) +{ + shareable = config_.getBool(config_name_ + ".share_connection", false); + if (config_.has(config_name_ + ".replica")) + { + Poco::Util::AbstractConfiguration::Keys replica_keys; + config_.keys(config_name_, replica_keys); + for (const auto & replica_config_key : replica_keys) + { + /// There could be another elements in the same level in configuration file, like "password", "port"... + if (startsWith(replica_config_key, "replica")) + { + std::string replica_name = config_name_ + "." + replica_config_key; + + int priority = config_.getInt(replica_name + ".priority", 0); + + replicas_by_priority[priority].emplace_back( + std::make_shared(config_, replica_name, default_connections_, max_connections_, config_name_.c_str())); + } + } + } + else + { + replicas_by_priority[0].emplace_back( + std::make_shared(config_, config_name_, default_connections_, max_connections_)); + } +} + +PoolWithFailover::PoolWithFailover(const std::string & config_name_, const unsigned default_connections_, + const unsigned max_connections_, const size_t max_tries_) + : PoolWithFailover{ + Poco::Util::Application::instance().config(), config_name_, + default_connections_, max_connections_, max_tries_} +{ +} + +PoolWithFailover::PoolWithFailover(const PoolWithFailover & other) + : max_tries{other.max_tries}, shareable{other.shareable} +{ + if (shareable) + { + replicas_by_priority = other.replicas_by_priority; + } + else + { + for (const auto & priority_replicas : other.replicas_by_priority) + { + Replicas replicas; + replicas.reserve(priority_replicas.second.size()); + for (const auto & pool : priority_replicas.second) + replicas.emplace_back(std::make_shared(*pool)); + replicas_by_priority.emplace(priority_replicas.first, std::move(replicas)); + } + } +} + +PoolWithFailover::Entry PoolWithFailover::get() +{ + Poco::Util::Application & app = Poco::Util::Application::instance(); + std::lock_guard locker(mutex); + + /// If we cannot connect to some replica due to pool overflow, than we will wait and connect. + PoolPtr * full_pool = nullptr; + + for (size_t try_no = 0; try_no < max_tries; ++try_no) + { + full_pool = nullptr; + + for (auto & priority_replicas : replicas_by_priority) + { + Replicas & replicas = priority_replicas.second; + for (size_t i = 0, size = replicas.size(); i < size; ++i) + { + PoolPtr & pool = replicas[i]; + + try + { + Entry entry = shareable ? pool->get() : pool->tryGet(); + + if (!entry.isNull()) + { + /// Move all traversed replicas to the end of queue. + /// (No need to move replicas with another priority) + std::rotate(replicas.begin(), replicas.begin() + i + 1, replicas.end()); + + return entry; + } + } + catch (const Poco::Exception & e) + { + if (e.displayText().find("mysqlxx::Pool is full") != std::string::npos) /// NOTE: String comparison is trashy code. + { + full_pool = &pool; + } + + app.logger().warning("Connection to " + pool->getDescription() + " failed: " + e.displayText()); + continue; + } + + app.logger().warning("Connection to " + pool->getDescription() + " failed."); + } + } + + app.logger().error("Connection to all replicas failed " + std::to_string(try_no + 1) + " times"); + } + + if (full_pool) + { + app.logger().error("All connections failed, trying to wait on a full pool " + (*full_pool)->getDescription()); + return (*full_pool)->get(); + } + + std::stringstream message; + message << "Connections to all replicas failed: "; + for (auto it = replicas_by_priority.begin(); it != replicas_by_priority.end(); ++it) + for (auto jt = it->second.begin(); jt != it->second.end(); ++jt) + message << (it == replicas_by_priority.begin() && jt == it->second.begin() ? "" : ", ") << (*jt)->getDescription(); + + throw Poco::Exception(message.str()); +} diff --git a/base/mysqlxx/include/mysqlxx/PoolWithFailover.h b/base/mysqlxx/PoolWithFailover.h similarity index 79% rename from base/mysqlxx/include/mysqlxx/PoolWithFailover.h rename to base/mysqlxx/PoolWithFailover.h index 21b27ebd4fe..029fc3ebad3 100644 --- a/base/mysqlxx/include/mysqlxx/PoolWithFailover.h +++ b/base/mysqlxx/PoolWithFailover.h @@ -78,6 +78,9 @@ namespace mysqlxx /// Mutex for set of replicas. std::mutex mutex; + /// Can the Pool be shared + bool shareable; + public: using Entry = Pool::Entry; @@ -87,22 +90,20 @@ namespace mysqlxx * max_connections Maximum number of connections in pool to each replica. * max_tries_ Max number of connection tries. */ - PoolWithFailover(const std::string & config_name, - unsigned default_connections = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS, - unsigned max_connections = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_CONNECTIONS, - size_t max_tries = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES); + PoolWithFailover(const std::string & config_name_, + unsigned default_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS, + unsigned max_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_CONNECTIONS, + size_t max_tries_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES); - PoolWithFailover(const Poco::Util::AbstractConfiguration & config, - const std::string & config_name, - unsigned default_connections = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS, - unsigned max_connections = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_CONNECTIONS, - size_t max_tries = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES); + PoolWithFailover(const Poco::Util::AbstractConfiguration & config_, + const std::string & config_name_, + unsigned default_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS, + unsigned max_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_CONNECTIONS, + size_t max_tries_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES); PoolWithFailover(const PoolWithFailover & other); - PoolWithFailover & operator=(const PoolWithFailover &) = delete; - /** Allocates a connection to use. */ - Entry Get(); + Entry get(); }; } diff --git a/base/mysqlxx/src/Query.cpp b/base/mysqlxx/Query.cpp similarity index 80% rename from base/mysqlxx/src/Query.cpp rename to base/mysqlxx/Query.cpp index dc5c3274641..ab9bb174d4a 100644 --- a/base/mysqlxx/src/Query.cpp +++ b/base/mysqlxx/Query.cpp @@ -11,29 +11,23 @@ namespace mysqlxx { -Query::Query(Connection * conn_, const std::string & query_string) : std::ostream(0), conn(conn_) +Query::Query(Connection * conn_, const std::string & query_string) : conn(conn_) { /// Важно в случае, если Query используется не из того же потока, что Connection. mysql_thread_init(); - init(&query_buf); - if (!query_string.empty()) - { - query_buf.str(query_string); - seekp(0, std::ios::end); - } + query_buf << query_string; - imbue(std::locale::classic()); + query_buf.imbue(std::locale::classic()); } -Query::Query(const Query & other) : std::ostream(0), conn(other.conn) +Query::Query(const Query & other) : conn(other.conn) { /// Важно в случае, если Query используется не из того же потока, что Connection. mysql_thread_init(); - init(&query_buf); - imbue(std::locale::classic()); + query_buf.imbue(std::locale::classic()); *this << other.str(); } @@ -45,9 +39,7 @@ Query & Query::operator= (const Query & other) conn = other.conn; - seekp(0); - clear(); - *this << other.str(); + query_buf.str(other.str()); return *this; } @@ -59,9 +51,7 @@ Query::~Query() void Query::reset() { - seekp(0); - clear(); - query_buf.str(""); + query_buf.str({}); } void Query::executeImpl() diff --git a/base/mysqlxx/include/mysqlxx/Query.h b/base/mysqlxx/Query.h similarity index 91% rename from base/mysqlxx/include/mysqlxx/Query.h rename to base/mysqlxx/Query.h index 66371bc7af0..1d3ab9678d5 100644 --- a/base/mysqlxx/include/mysqlxx/Query.h +++ b/base/mysqlxx/Query.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include #include @@ -28,7 +27,7 @@ namespace mysqlxx * * Внимание! Один объект запроса можно использовать только из одного потока. */ -class Query : public std::ostream +class Query { public: Query(Connection * conn_, const std::string & query_string = ""); @@ -64,9 +63,21 @@ public: return query_buf.str(); } + auto rdbuf() const + { + return query_buf.rdbuf(); + } + + template + inline Query & operator<< (T && x) + { + query_buf << std::forward(x); + return *this; + } + private: Connection * conn; - std::stringbuf query_buf; + std::ostringstream query_buf; void executeImpl(); }; diff --git a/base/mysqlxx/src/ResultBase.cpp b/base/mysqlxx/ResultBase.cpp similarity index 100% rename from base/mysqlxx/src/ResultBase.cpp rename to base/mysqlxx/ResultBase.cpp diff --git a/base/mysqlxx/include/mysqlxx/ResultBase.h b/base/mysqlxx/ResultBase.h similarity index 77% rename from base/mysqlxx/include/mysqlxx/ResultBase.h rename to base/mysqlxx/ResultBase.h index 00247b6c90b..126a5c1ecca 100644 --- a/base/mysqlxx/include/mysqlxx/ResultBase.h +++ b/base/mysqlxx/ResultBase.h @@ -22,11 +22,11 @@ class ResultBase public: ResultBase(MYSQL_RES * res_, Connection * conn_, const Query * query_); - Connection * getConnection() { return conn; } - MYSQL_FIELDS getFields() { return fields; } - unsigned getNumFields() { return num_fields; } - MYSQL_RES * getRes() { return res; } - const Query * getQuery() const { return query; } + Connection * getConnection() { return conn; } + MYSQL_FIELDS getFields() { return fields; } + unsigned getNumFields() { return num_fields; } + MYSQL_RES * getRes() { return res; } + const Query * getQuery() const { return query; } virtual ~ResultBase(); diff --git a/base/mysqlxx/src/Row.cpp b/base/mysqlxx/Row.cpp similarity index 100% rename from base/mysqlxx/src/Row.cpp rename to base/mysqlxx/Row.cpp diff --git a/base/mysqlxx/include/mysqlxx/Row.h b/base/mysqlxx/Row.h similarity index 100% rename from base/mysqlxx/include/mysqlxx/Row.h rename to base/mysqlxx/Row.h diff --git a/base/mysqlxx/StoreQueryResult.cpp b/base/mysqlxx/StoreQueryResult.cpp new file mode 100644 index 00000000000..620ed8def56 --- /dev/null +++ b/base/mysqlxx/StoreQueryResult.cpp @@ -0,0 +1,30 @@ +#if __has_include() +#include +#else +#include +#endif + +#include +#include + + +namespace mysqlxx +{ + +StoreQueryResult::StoreQueryResult(MYSQL_RES * res_, Connection * conn_, const Query * query_) : ResultBase(res_, conn_, query_) +{ + UInt64 rows = mysql_num_rows(res); + reserve(rows); + lengths.resize(rows * num_fields); + + for (UInt64 i = 0; MYSQL_ROW row = mysql_fetch_row(res); ++i) + { + MYSQL_LENGTHS lengths_for_row = mysql_fetch_lengths(res); + memcpy(&lengths[i * num_fields], lengths_for_row, sizeof(lengths[0]) * num_fields); + + push_back(Row(row, this, &lengths[i * num_fields])); + } + checkError(conn->getDriver()); +} + +} diff --git a/base/mysqlxx/include/mysqlxx/StoreQueryResult.h b/base/mysqlxx/StoreQueryResult.h similarity index 100% rename from base/mysqlxx/include/mysqlxx/StoreQueryResult.h rename to base/mysqlxx/StoreQueryResult.h diff --git a/base/mysqlxx/include/mysqlxx/Transaction.h b/base/mysqlxx/Transaction.h similarity index 100% rename from base/mysqlxx/include/mysqlxx/Transaction.h rename to base/mysqlxx/Transaction.h diff --git a/base/mysqlxx/include/mysqlxx/Types.h b/base/mysqlxx/Types.h similarity index 100% rename from base/mysqlxx/include/mysqlxx/Types.h rename to base/mysqlxx/Types.h diff --git a/base/mysqlxx/src/UseQueryResult.cpp b/base/mysqlxx/UseQueryResult.cpp similarity index 100% rename from base/mysqlxx/src/UseQueryResult.cpp rename to base/mysqlxx/UseQueryResult.cpp diff --git a/base/mysqlxx/include/mysqlxx/UseQueryResult.h b/base/mysqlxx/UseQueryResult.h similarity index 100% rename from base/mysqlxx/include/mysqlxx/UseQueryResult.h rename to base/mysqlxx/UseQueryResult.h diff --git a/base/mysqlxx/src/Value.cpp b/base/mysqlxx/Value.cpp similarity index 97% rename from base/mysqlxx/src/Value.cpp rename to base/mysqlxx/Value.cpp index 4fed33c5da9..ed66167e8ea 100644 --- a/base/mysqlxx/src/Value.cpp +++ b/base/mysqlxx/Value.cpp @@ -154,7 +154,7 @@ double Value::readFloatText(const char * buf, size_t length) const void Value::throwException(const char * text) const { - static constexpr size_t MYSQLXX_QUERY_PREVIEW_LENGTH = 1000; + static constexpr size_t preview_length = 1000; std::stringstream info; info << text; @@ -166,7 +166,7 @@ void Value::throwException(const char * text) const } if (res && res->getQuery()) - info << ", query: " << res->getQuery()->str().substr(0, MYSQLXX_QUERY_PREVIEW_LENGTH); + info << ", query: " << res->getQuery()->str().substr(0, preview_length); throw CannotParseValue(info.str()); } diff --git a/base/mysqlxx/include/mysqlxx/Value.h b/base/mysqlxx/Value.h similarity index 77% rename from base/mysqlxx/include/mysqlxx/Value.h rename to base/mysqlxx/Value.h index 4b0c6c7cbfa..9fdb33a442d 100644 --- a/base/mysqlxx/include/mysqlxx/Value.h +++ b/base/mysqlxx/Value.h @@ -9,7 +9,7 @@ #include #include -#include +#include #include #include @@ -23,26 +23,26 @@ namespace mysqlxx class ResultBase; -/** Представляет одно значение, считанное из MySQL. - * Объект сам не хранит данные, а является всего лишь обёрткой над парой (const char *, size_t). - * Если уничтожить UseQueryResult/StoreQueryResult или Connection, - * или считать следующий Row при использовании UseQueryResult, то объект станет некорректным. - * Позволяет преобразовать значение (распарсить) в различные типы данных: - * - с помощью функций вида getUInt(), getString(), ... (рекомендуется); - * - с помощью шаблонной функции get(), которая специализирована для многих типов (для шаблонного кода); - * - шаблонная функция get работает также для всех типов, у которых есть конструктор из Value - * (это сделано для возможности расширения); - * - с помощью operator Type() - но этот метод реализован лишь для совместимости и не рекомендуется - * к использованию, так как неудобен (часто возникают неоднозначности). +/** Represents a single value read from MySQL. + * It doesn't owns the value. It's just a wrapper of a pair (const char *, size_t). + * If the UseQueryResult/StoreQueryResult or Connection is destroyed, + * or you have read the next Row while using UseQueryResult, then the object is invalidated. + * Allows to transform (parse) the value to various data types: + * - with getUInt(), getString(), ... (recommended); + * - with template function get() that is specialized for multiple data types; + * - the template function get also works for all types that can be constructed from Value + * (it is an extension point); + * - with operator Type() - this is done for compatibility and not recommended because ambiguities possible. * - * При ошибке парсинга, выкидывается исключение. - * При попытке достать значение, которое равно nullptr, выкидывается исключение - * - используйте метод isNull() для проверки. + * On parsing error, exception is thrown. + * When trying to extract a value that is nullptr, exception is thrown + * - use isNull() method to check. * - * Во всех распространённых системах, time_t - это всего лишь typedef от Int64 или Int32. - * Для того, чтобы можно было писать row[0].get(), ожидая, что значение вида '2011-01-01 00:00:00' - * корректно распарсится согласно текущей тайм-зоне, сделано так, что метод getUInt и соответствующие методы get<>() - * также умеют парсить дату и дату-время. + * As time_t is just an alias for integer data type + * to allow to write row[0].get(), and expect that the values like '2011-01-01 00:00:00' + * will be successfully parsed according to the current time zone, + * the getUInt method and the corresponding get<>() methods + * are capable of parsing Date and DateTime. */ class Value { @@ -166,7 +166,7 @@ private: else throwException("Cannot parse DateTime"); - return 0; /// чтобы не было warning-а. + return 0; /// avoid warning. } @@ -184,7 +184,7 @@ private: else throwException("Cannot parse Date"); - return 0; /// чтобы не было warning-а. + return 0; /// avoid warning. } @@ -231,7 +231,7 @@ private: double readFloatText(const char * buf, size_t length) const; /// Выкинуть исключение с подробной информацией - void throwException(const char * text) const; + [[noreturn]] void throwException(const char * text) const; }; diff --git a/base/mysqlxx/include/mysqlxx/Exception.h b/base/mysqlxx/include/mysqlxx/Exception.h deleted file mode 100644 index 92f3f34806f..00000000000 --- a/base/mysqlxx/include/mysqlxx/Exception.h +++ /dev/null @@ -1,53 +0,0 @@ -#pragma once - -#include -#include -#include - - -namespace mysqlxx -{ -/// Common exception class for MySQL library. Functions code() and errnum() return error numbers from MySQL, for details see mysqld_error.h -struct Exception : public Poco::Exception -{ - Exception(const std::string & msg, int code = 0) : Poco::Exception(msg, code) {} - int errnum() const { return code(); } - const char * name() const throw() override { return "mysqlxx::Exception"; } - const char * className() const throw() override { return "mysqlxx::Exception"; } -}; - - -/// Cannot connect to MySQL server -struct ConnectionFailed : public Exception -{ - ConnectionFailed(const std::string & msg, int code = 0) : Exception(msg, code) {} - const char * name() const throw() override { return "mysqlxx::ConnectionFailed"; } - const char * className() const throw() override { return "mysqlxx::ConnectionFailed"; } -}; - - -/// Erroneous query. -struct BadQuery : public Exception -{ - BadQuery(const std::string & msg, int code = 0) : Exception(msg, code) {} - const char * name() const throw() override { return "mysqlxx::BadQuery"; } - const char * className() const throw() override { return "mysqlxx::BadQuery"; } -}; - - -/// Value parsing failure -struct CannotParseValue : public Exception -{ - CannotParseValue(const std::string & msg, int code = 0) : Exception(msg, code) {} - const char * name() const throw() override { return "mysqlxx::CannotParseValue"; } - const char * className() const throw() override { return "mysqlxx::CannotParseValue"; } -}; - - -std::string errorMessage(MYSQL * driver); - -/// For internal need of library. -void checkError(MYSQL * driver); -void onError(MYSQL * driver); - -} diff --git a/base/mysqlxx/include/mysqlxx/mysqlxx.h b/base/mysqlxx/mysqlxx.h similarity index 100% rename from base/mysqlxx/include/mysqlxx/mysqlxx.h rename to base/mysqlxx/mysqlxx.h diff --git a/base/mysqlxx/src/Exception.cpp b/base/mysqlxx/src/Exception.cpp deleted file mode 100644 index b065d17ed51..00000000000 --- a/base/mysqlxx/src/Exception.cpp +++ /dev/null @@ -1,38 +0,0 @@ -#if __has_include() -#include -#else -#include -#endif -#include - - -namespace mysqlxx -{ - -std::string errorMessage(MYSQL * driver) -{ - std::stringstream res; - res << mysql_error(driver) - << " (" << (driver->host ? driver->host : "(nullptr)") - << ":" << driver->port << ")"; - return res.str(); -} - - -/// Для внутренних нужд библиотеки. -void checkError(MYSQL * driver) -{ - unsigned num = mysql_errno(driver); - - if (num) - throw Exception(errorMessage(driver), num); -} - - -/// Для внутренних нужд библиотеки. -void onError(MYSQL * driver) -{ - throw Exception(errorMessage(driver), mysql_errno(driver)); -} - -} diff --git a/base/mysqlxx/src/PoolWithFailover.cpp b/base/mysqlxx/src/PoolWithFailover.cpp deleted file mode 100644 index dd89f1596d3..00000000000 --- a/base/mysqlxx/src/PoolWithFailover.cpp +++ /dev/null @@ -1,126 +0,0 @@ -#include - - -/// Duplicate of code from StringUtils.h. Copied here for less dependencies. -static bool startsWith(const std::string & s, const char * prefix) -{ - return s.size() >= strlen(prefix) && 0 == memcmp(s.data(), prefix, strlen(prefix)); -} - - -using namespace mysqlxx; - -PoolWithFailover::PoolWithFailover(const Poco::Util::AbstractConfiguration & cfg, - const std::string & config_name, const unsigned default_connections, - const unsigned max_connections, const size_t max_tries) - : max_tries(max_tries) -{ - if (cfg.has(config_name + ".replica")) - { - Poco::Util::AbstractConfiguration::Keys replica_keys; - cfg.keys(config_name, replica_keys); - for (const auto & replica_config_key : replica_keys) - { - /// There could be another elements in the same level in configuration file, like "password", "port"... - if (startsWith(replica_config_key, "replica")) - { - std::string replica_name = config_name + "." + replica_config_key; - - int priority = cfg.getInt(replica_name + ".priority", 0); - - replicas_by_priority[priority].emplace_back( - std::make_shared(cfg, replica_name, default_connections, max_connections, config_name.c_str())); - } - } - } - else - { - replicas_by_priority[0].emplace_back( - std::make_shared(cfg, config_name, default_connections, max_connections)); - } -} - -PoolWithFailover::PoolWithFailover(const std::string & config_name, const unsigned default_connections, - const unsigned max_connections, const size_t max_tries) - : PoolWithFailover{ - Poco::Util::Application::instance().config(), config_name, - default_connections, max_connections, max_tries} -{} - -PoolWithFailover::PoolWithFailover(const PoolWithFailover & other) - : max_tries{other.max_tries} -{ - for (const auto & priority_replicas : other.replicas_by_priority) - { - Replicas replicas; - replicas.reserve(priority_replicas.second.size()); - for (const auto & pool : priority_replicas.second) - replicas.emplace_back(std::make_shared(*pool)); - replicas_by_priority.emplace(priority_replicas.first, std::move(replicas)); - } -} - -PoolWithFailover::Entry PoolWithFailover::Get() -{ - Poco::Util::Application & app = Poco::Util::Application::instance(); - std::lock_guard locker(mutex); - - /// If we cannot connect to some replica due to pool overflow, than we will wait and connect. - PoolPtr * full_pool = nullptr; - - for (size_t try_no = 0; try_no < max_tries; ++try_no) - { - full_pool = nullptr; - - for (auto & priority_replicas : replicas_by_priority) - { - Replicas & replicas = priority_replicas.second; - for (size_t i = 0, size = replicas.size(); i < size; ++i) - { - PoolPtr & pool = replicas[i]; - - try - { - Entry entry = pool->tryGet(); - - if (!entry.isNull()) - { - /// Move all traversed replicas to the end of queue. - /// (No need to move replicas with another priority) - std::rotate(replicas.begin(), replicas.begin() + i + 1, replicas.end()); - - return entry; - } - } - catch (const Poco::Exception & e) - { - if (e.displayText().find("mysqlxx::Pool is full") != std::string::npos) /// NOTE: String comparison is trashy code. - { - full_pool = &pool; - } - - app.logger().warning("Connection to " + pool->getDescription() + " failed: " + e.displayText()); - continue; - } - - app.logger().warning("Connection to " + pool->getDescription() + " failed."); - } - } - - app.logger().error("Connection to all replicas failed " + std::to_string(try_no + 1) + " times"); - } - - if (full_pool) - { - app.logger().error("All connections failed, trying to wait on a full pool " + (*full_pool)->getDescription()); - return (*full_pool)->Get(); - } - - std::stringstream message; - message << "Connections to all replicas failed: "; - for (auto it = replicas_by_priority.begin(); it != replicas_by_priority.end(); ++it) - for (auto jt = it->second.begin(); jt != it->second.end(); ++jt) - message << (it == replicas_by_priority.begin() && jt == it->second.begin() ? "" : ", ") << (*jt)->getDescription(); - - throw Poco::Exception(message.str()); -} diff --git a/base/mysqlxx/src/StoreQueryResult.cpp b/base/mysqlxx/src/StoreQueryResult.cpp deleted file mode 100644 index a09986a3014..00000000000 --- a/base/mysqlxx/src/StoreQueryResult.cpp +++ /dev/null @@ -1,31 +0,0 @@ -#if __has_include() -#include -#else -#include -#endif - -#include -#include - - -namespace mysqlxx -{ - -StoreQueryResult::StoreQueryResult(MYSQL_RES * res_, Connection * conn_, const Query * query_) : ResultBase(res_, conn_, query_) -{ - UInt64 rows = mysql_num_rows(res); - UInt32 fields = getNumFields(); - reserve(rows); - lengths.resize(rows * fields); - - for (UInt64 i = 0; MYSQL_ROW row = mysql_fetch_row(res); ++i) - { - MYSQL_LENGTHS lengths_for_row = mysql_fetch_lengths(res); - memcpy(&lengths[i * fields], lengths_for_row, sizeof(lengths[0]) * fields); - - push_back(Row(row, this, &lengths[i * fields])); - } - checkError(conn->getDriver()); -} - -} diff --git a/base/mysqlxx/src/tests/CMakeLists.txt b/base/mysqlxx/tests/CMakeLists.txt similarity index 100% rename from base/mysqlxx/src/tests/CMakeLists.txt rename to base/mysqlxx/tests/CMakeLists.txt diff --git a/base/mysqlxx/src/tests/failover.xml b/base/mysqlxx/tests/failover.xml similarity index 100% rename from base/mysqlxx/src/tests/failover.xml rename to base/mysqlxx/tests/failover.xml diff --git a/base/mysqlxx/src/tests/mysqlxx_test.cpp b/base/mysqlxx/tests/mysqlxx_test.cpp similarity index 95% rename from base/mysqlxx/src/tests/mysqlxx_test.cpp rename to base/mysqlxx/tests/mysqlxx_test.cpp index d99900b1a39..cf304a5cb5f 100644 --- a/base/mysqlxx/src/tests/mysqlxx_test.cpp +++ b/base/mysqlxx/tests/mysqlxx_test.cpp @@ -68,10 +68,10 @@ int main(int, char **) Queries queries; queries.push_back(query); - for (Queries::iterator it = queries.begin(); it != queries.end(); ++it) + for (auto & q : queries) { - std::cerr << it->str() << std::endl; - std::cerr << it->store().at(0) << std::endl; + std::cerr << q.str() << std::endl; + std::cerr << q.store().at(0) << std::endl; } } @@ -92,10 +92,10 @@ int main(int, char **) mysqlxx::Query & qref = queries.back(); qref << " 1"; - for (Queries::iterator it = queries.begin(); it != queries.end(); ++it) + for (auto & query : queries) { - std::cerr << it->str() << std::endl; - std::cerr << it->store().at(0) << std::endl; + std::cerr << query.str() << std::endl; + std::cerr << query.store().at(0) << std::endl; } } diff --git a/base/pcg-random/CMakeLists.txt b/base/pcg-random/CMakeLists.txt new file mode 100644 index 00000000000..88acabba6a7 --- /dev/null +++ b/base/pcg-random/CMakeLists.txt @@ -0,0 +1,2 @@ +add_library(pcg_random INTERFACE) +target_include_directories(pcg_random INTERFACE .) diff --git a/contrib/libpcg-random/LICENSE-APACHE.txt b/base/pcg-random/LICENSE similarity index 100% rename from contrib/libpcg-random/LICENSE-APACHE.txt rename to base/pcg-random/LICENSE diff --git a/contrib/libpcg-random/README b/base/pcg-random/README similarity index 100% rename from contrib/libpcg-random/README rename to base/pcg-random/README diff --git a/contrib/libpcg-random/include/pcg_extras.hpp b/base/pcg-random/pcg_extras.hpp similarity index 98% rename from contrib/libpcg-random/include/pcg_extras.hpp rename to base/pcg-random/pcg_extras.hpp index 929c756b151..118b726dd57 100644 --- a/contrib/libpcg-random/include/pcg_extras.hpp +++ b/base/pcg-random/pcg_extras.hpp @@ -292,7 +292,7 @@ inline itype rotl(itype value, bitcount_t rot) { constexpr bitcount_t bits = sizeof(itype) * 8; constexpr bitcount_t mask = bits - 1; -#if PCG_USE_ZEROCHECK_ROTATE_IDIOM +#if defined(PCG_USE_ZEROCHECK_ROTATE_IDIOM) return rot ? (value << rot) | (value >> (bits - rot)) : value; #else return (value << rot) | (value >> ((- rot) & mask)); @@ -304,7 +304,7 @@ inline itype rotr(itype value, bitcount_t rot) { constexpr bitcount_t bits = sizeof(itype) * 8; constexpr bitcount_t mask = bits - 1; -#if PCG_USE_ZEROCHECK_ROTATE_IDIOM +#if defined(PCG_USE_ZEROCHECK_ROTATE_IDIOM) return rot ? (value >> rot) | (value << (bits - rot)) : value; #else return (value >> rot) | (value << ((- rot) & mask)); @@ -318,7 +318,7 @@ inline itype rotr(itype value, bitcount_t rot) * * These overloads will be preferred over the general template code above. */ -#if PCG_USE_INLINE_ASM && __GNUC__ && (__x86_64__ || __i386__) +#if defined(PCG_USE_INLINE_ASM) && __GNUC__ && (__x86_64__ || __i386__) inline uint8_t rotr(uint8_t value, bitcount_t rot) { @@ -600,7 +600,7 @@ std::ostream& operator<<(std::ostream& out, printable_typename) { #ifdef __GNUC__ int status; char* pretty_name = - abi::__cxa_demangle(implementation_typename, NULL, NULL, &status); + abi::__cxa_demangle(implementation_typename, nullptr, nullptr, &status); if (status == 0) out << pretty_name; free(static_cast(pretty_name)); diff --git a/contrib/libpcg-random/include/pcg_random.hpp b/base/pcg-random/pcg_random.hpp similarity index 100% rename from contrib/libpcg-random/include/pcg_random.hpp rename to base/pcg-random/pcg_random.hpp diff --git a/contrib/libpcg-random/include/pcg_uint128.hpp b/base/pcg-random/pcg_uint128.hpp similarity index 100% rename from contrib/libpcg-random/include/pcg_uint128.hpp rename to base/pcg-random/pcg_uint128.hpp diff --git a/base/pcg-random/ya.make b/base/pcg-random/ya.make new file mode 100644 index 00000000000..c6a50887178 --- /dev/null +++ b/base/pcg-random/ya.make @@ -0,0 +1,5 @@ +LIBRARY() + +ADDINCL (GLOBAL clickhouse/base/pcg-random) + +END() diff --git a/base/widechar_width/ya.make b/base/widechar_width/ya.make new file mode 100644 index 00000000000..fa0b4f705db --- /dev/null +++ b/base/widechar_width/ya.make @@ -0,0 +1,9 @@ +LIBRARY() + +ADDINCL(GLOBAL clickhouse/base/widechar_width) + +SRCS( + widechar_width.cpp +) + +END() diff --git a/base/ya.make b/base/ya.make new file mode 100644 index 00000000000..004da9af2ae --- /dev/null +++ b/base/ya.make @@ -0,0 +1,7 @@ +RECURSE( + common + daemon + loggers + pcg-random + widechar_width +) diff --git a/dbms/benchmark/benchmark.sh b/benchmark/benchmark.sh similarity index 100% rename from dbms/benchmark/benchmark.sh rename to benchmark/benchmark.sh diff --git a/dbms/benchmark/clickhouse/benchmark-chyt.sh b/benchmark/clickhouse/benchmark-chyt.sh similarity index 100% rename from dbms/benchmark/clickhouse/benchmark-chyt.sh rename to benchmark/clickhouse/benchmark-chyt.sh diff --git a/dbms/benchmark/clickhouse/benchmark-new.sh b/benchmark/clickhouse/benchmark-new.sh similarity index 100% rename from dbms/benchmark/clickhouse/benchmark-new.sh rename to benchmark/clickhouse/benchmark-new.sh diff --git a/dbms/benchmark/clickhouse/benchmark-yql.sh b/benchmark/clickhouse/benchmark-yql.sh similarity index 100% rename from dbms/benchmark/clickhouse/benchmark-yql.sh rename to benchmark/clickhouse/benchmark-yql.sh diff --git a/dbms/benchmark/clickhouse/queries.sql b/benchmark/clickhouse/queries.sql similarity index 100% rename from dbms/benchmark/clickhouse/queries.sql rename to benchmark/clickhouse/queries.sql diff --git a/dbms/benchmark/create_dump.sh b/benchmark/create_dump.sh similarity index 100% rename from dbms/benchmark/create_dump.sh rename to benchmark/create_dump.sh diff --git a/dbms/benchmark/greenplum/README b/benchmark/greenplum/README similarity index 100% rename from dbms/benchmark/greenplum/README rename to benchmark/greenplum/README diff --git a/dbms/benchmark/greenplum/benchmark.sh b/benchmark/greenplum/benchmark.sh similarity index 100% rename from dbms/benchmark/greenplum/benchmark.sh rename to benchmark/greenplum/benchmark.sh diff --git a/dbms/benchmark/greenplum/dump_dataset_from_ch.sh b/benchmark/greenplum/dump_dataset_from_ch.sh similarity index 100% rename from dbms/benchmark/greenplum/dump_dataset_from_ch.sh rename to benchmark/greenplum/dump_dataset_from_ch.sh diff --git a/dbms/benchmark/greenplum/load_data_set.sql b/benchmark/greenplum/load_data_set.sql similarity index 100% rename from dbms/benchmark/greenplum/load_data_set.sql rename to benchmark/greenplum/load_data_set.sql diff --git a/dbms/benchmark/greenplum/queries.sql b/benchmark/greenplum/queries.sql similarity index 100% rename from dbms/benchmark/greenplum/queries.sql rename to benchmark/greenplum/queries.sql diff --git a/dbms/benchmark/greenplum/result_parser.py b/benchmark/greenplum/result_parser.py similarity index 100% rename from dbms/benchmark/greenplum/result_parser.py rename to benchmark/greenplum/result_parser.py diff --git a/dbms/benchmark/greenplum/schema.sql b/benchmark/greenplum/schema.sql similarity index 100% rename from dbms/benchmark/greenplum/schema.sql rename to benchmark/greenplum/schema.sql diff --git a/dbms/benchmark/hive/conf.sh b/benchmark/hive/conf.sh similarity index 100% rename from dbms/benchmark/hive/conf.sh rename to benchmark/hive/conf.sh diff --git a/dbms/benchmark/hive/define_schema.sql b/benchmark/hive/define_schema.sql similarity index 100% rename from dbms/benchmark/hive/define_schema.sql rename to benchmark/hive/define_schema.sql diff --git a/dbms/benchmark/hive/expect.tcl b/benchmark/hive/expect.tcl similarity index 100% rename from dbms/benchmark/hive/expect.tcl rename to benchmark/hive/expect.tcl diff --git a/dbms/benchmark/hive/log/log_100m_tuned b/benchmark/hive/log/log_100m_tuned similarity index 100% rename from dbms/benchmark/hive/log/log_100m_tuned rename to benchmark/hive/log/log_100m_tuned diff --git a/dbms/benchmark/hive/log/log_10m/log_10m_ b/benchmark/hive/log/log_10m/log_10m_ similarity index 100% rename from dbms/benchmark/hive/log/log_10m/log_10m_ rename to benchmark/hive/log/log_10m/log_10m_ diff --git a/dbms/benchmark/hive/log/log_10m/log_10m_1 b/benchmark/hive/log/log_10m/log_10m_1 similarity index 100% rename from dbms/benchmark/hive/log/log_10m/log_10m_1 rename to benchmark/hive/log/log_10m/log_10m_1 diff --git a/dbms/benchmark/hive/log/log_10m/log_10m_2 b/benchmark/hive/log/log_10m/log_10m_2 similarity index 100% rename from dbms/benchmark/hive/log/log_10m/log_10m_2 rename to benchmark/hive/log/log_10m/log_10m_2 diff --git a/dbms/benchmark/hive/log/log_10m/log_10m_3 b/benchmark/hive/log/log_10m/log_10m_3 similarity index 100% rename from dbms/benchmark/hive/log/log_10m/log_10m_3 rename to benchmark/hive/log/log_10m/log_10m_3 diff --git a/dbms/benchmark/hive/log/log_10m/log_10m_tuned b/benchmark/hive/log/log_10m/log_10m_tuned similarity index 100% rename from dbms/benchmark/hive/log/log_10m/log_10m_tuned rename to benchmark/hive/log/log_10m/log_10m_tuned diff --git a/dbms/benchmark/hive/log/log_10m/log_hits_10m b/benchmark/hive/log/log_10m/log_hits_10m similarity index 100% rename from dbms/benchmark/hive/log/log_10m/log_hits_10m rename to benchmark/hive/log/log_10m/log_hits_10m diff --git a/dbms/benchmark/hive/queries.sql b/benchmark/hive/queries.sql similarity index 100% rename from dbms/benchmark/hive/queries.sql rename to benchmark/hive/queries.sql diff --git a/dbms/benchmark/hive/run_hive.sh b/benchmark/hive/run_hive.sh similarity index 100% rename from dbms/benchmark/hive/run_hive.sh rename to benchmark/hive/run_hive.sh diff --git a/dbms/benchmark/infinidb/conf.sh b/benchmark/infinidb/conf.sh similarity index 100% rename from dbms/benchmark/infinidb/conf.sh rename to benchmark/infinidb/conf.sh diff --git a/dbms/benchmark/infinidb/define_schema.sql b/benchmark/infinidb/define_schema.sql similarity index 100% rename from dbms/benchmark/infinidb/define_schema.sql rename to benchmark/infinidb/define_schema.sql diff --git a/dbms/benchmark/infinidb/expect.tcl b/benchmark/infinidb/expect.tcl similarity index 100% rename from dbms/benchmark/infinidb/expect.tcl rename to benchmark/infinidb/expect.tcl diff --git a/dbms/benchmark/infinidb/log/log_100m b/benchmark/infinidb/log/log_100m similarity index 100% rename from dbms/benchmark/infinidb/log/log_100m rename to benchmark/infinidb/log/log_100m diff --git a/dbms/benchmark/infinidb/log/log_100m_tuned b/benchmark/infinidb/log/log_100m_tuned similarity index 100% rename from dbms/benchmark/infinidb/log/log_100m_tuned rename to benchmark/infinidb/log/log_100m_tuned diff --git a/dbms/benchmark/infinidb/log/log_10m b/benchmark/infinidb/log/log_10m similarity index 100% rename from dbms/benchmark/infinidb/log/log_10m rename to benchmark/infinidb/log/log_10m diff --git a/dbms/benchmark/infinidb/log/log_10m_tuned b/benchmark/infinidb/log/log_10m_tuned similarity index 100% rename from dbms/benchmark/infinidb/log/log_10m_tuned rename to benchmark/infinidb/log/log_10m_tuned diff --git a/dbms/benchmark/infinidb/queries.sql b/benchmark/infinidb/queries.sql similarity index 100% rename from dbms/benchmark/infinidb/queries.sql rename to benchmark/infinidb/queries.sql diff --git a/dbms/benchmark/infobright/conf.sh b/benchmark/infobright/conf.sh similarity index 100% rename from dbms/benchmark/infobright/conf.sh rename to benchmark/infobright/conf.sh diff --git a/dbms/benchmark/infobright/define_schema.sql b/benchmark/infobright/define_schema.sql similarity index 100% rename from dbms/benchmark/infobright/define_schema.sql rename to benchmark/infobright/define_schema.sql diff --git a/dbms/benchmark/infobright/expect.tcl b/benchmark/infobright/expect.tcl similarity index 100% rename from dbms/benchmark/infobright/expect.tcl rename to benchmark/infobright/expect.tcl diff --git a/dbms/benchmark/infobright/log-community/log_10m b/benchmark/infobright/log-community/log_10m similarity index 100% rename from dbms/benchmark/infobright/log-community/log_10m rename to benchmark/infobright/log-community/log_10m diff --git a/dbms/benchmark/infobright/queries.sql b/benchmark/infobright/queries.sql similarity index 100% rename from dbms/benchmark/infobright/queries.sql rename to benchmark/infobright/queries.sql diff --git a/dbms/benchmark/memsql/benchmark.sh b/benchmark/memsql/benchmark.sh similarity index 100% rename from dbms/benchmark/memsql/benchmark.sh rename to benchmark/memsql/benchmark.sh diff --git a/dbms/benchmark/memsql/instructions.txt b/benchmark/memsql/instructions.txt similarity index 100% rename from dbms/benchmark/memsql/instructions.txt rename to benchmark/memsql/instructions.txt diff --git a/dbms/benchmark/memsql/queries.sql b/benchmark/memsql/queries.sql similarity index 100% rename from dbms/benchmark/memsql/queries.sql rename to benchmark/memsql/queries.sql diff --git a/dbms/benchmark/monetdb/conf.sh b/benchmark/monetdb/conf.sh similarity index 100% rename from dbms/benchmark/monetdb/conf.sh rename to benchmark/monetdb/conf.sh diff --git a/dbms/benchmark/monetdb/define_schema.sql b/benchmark/monetdb/define_schema.sql similarity index 100% rename from dbms/benchmark/monetdb/define_schema.sql rename to benchmark/monetdb/define_schema.sql diff --git a/dbms/benchmark/monetdb/expect.tcl b/benchmark/monetdb/expect.tcl similarity index 100% rename from dbms/benchmark/monetdb/expect.tcl rename to benchmark/monetdb/expect.tcl diff --git a/dbms/benchmark/monetdb/log/log_100m b/benchmark/monetdb/log/log_100m similarity index 100% rename from dbms/benchmark/monetdb/log/log_100m rename to benchmark/monetdb/log/log_100m diff --git a/dbms/benchmark/monetdb/log/log_100m_1 b/benchmark/monetdb/log/log_100m_1 similarity index 100% rename from dbms/benchmark/monetdb/log/log_100m_1 rename to benchmark/monetdb/log/log_100m_1 diff --git a/dbms/benchmark/monetdb/log/log_100m_corrected b/benchmark/monetdb/log/log_100m_corrected similarity index 100% rename from dbms/benchmark/monetdb/log/log_100m_corrected rename to benchmark/monetdb/log/log_100m_corrected diff --git a/dbms/benchmark/monetdb/log/log_100m_corrected_1 b/benchmark/monetdb/log/log_100m_corrected_1 similarity index 100% rename from dbms/benchmark/monetdb/log/log_100m_corrected_1 rename to benchmark/monetdb/log/log_100m_corrected_1 diff --git a/dbms/benchmark/monetdb/log/log_100m_corrected_2 b/benchmark/monetdb/log/log_100m_corrected_2 similarity index 100% rename from dbms/benchmark/monetdb/log/log_100m_corrected_2 rename to benchmark/monetdb/log/log_100m_corrected_2 diff --git a/dbms/benchmark/monetdb/log/log_10m b/benchmark/monetdb/log/log_10m similarity index 100% rename from dbms/benchmark/monetdb/log/log_10m rename to benchmark/monetdb/log/log_10m diff --git a/dbms/benchmark/monetdb/log/log_10m_corrected b/benchmark/monetdb/log/log_10m_corrected similarity index 100% rename from dbms/benchmark/monetdb/log/log_10m_corrected rename to benchmark/monetdb/log/log_10m_corrected diff --git a/dbms/benchmark/monetdb/log/log_10m_corrected_1 b/benchmark/monetdb/log/log_10m_corrected_1 similarity index 100% rename from dbms/benchmark/monetdb/log/log_10m_corrected_1 rename to benchmark/monetdb/log/log_10m_corrected_1 diff --git a/dbms/benchmark/monetdb/log/log_upload_100m b/benchmark/monetdb/log/log_upload_100m similarity index 100% rename from dbms/benchmark/monetdb/log/log_upload_100m rename to benchmark/monetdb/log/log_upload_100m diff --git a/dbms/benchmark/monetdb/log/log_upload_1b b/benchmark/monetdb/log/log_upload_1b similarity index 100% rename from dbms/benchmark/monetdb/log/log_upload_1b rename to benchmark/monetdb/log/log_upload_1b diff --git a/dbms/benchmark/monetdb/queries.sql b/benchmark/monetdb/queries.sql similarity index 100% rename from dbms/benchmark/monetdb/queries.sql rename to benchmark/monetdb/queries.sql diff --git a/dbms/benchmark/vertica/README b/benchmark/vertica/README similarity index 100% rename from dbms/benchmark/vertica/README rename to benchmark/vertica/README diff --git a/dbms/benchmark/vertica/benchmark.sh b/benchmark/vertica/benchmark.sh similarity index 100% rename from dbms/benchmark/vertica/benchmark.sh rename to benchmark/vertica/benchmark.sh diff --git a/dbms/benchmark/vertica/hits_define_schema.sql b/benchmark/vertica/hits_define_schema.sql similarity index 100% rename from dbms/benchmark/vertica/hits_define_schema.sql rename to benchmark/vertica/hits_define_schema.sql diff --git a/dbms/benchmark/vertica/queries.sql b/benchmark/vertica/queries.sql similarity index 100% rename from dbms/benchmark/vertica/queries.sql rename to benchmark/vertica/queries.sql diff --git a/cmake/Modules/FindJeMalloc.cmake b/cmake/Modules/FindJeMalloc.cmake deleted file mode 100644 index 264415dc9b2..00000000000 --- a/cmake/Modules/FindJeMalloc.cmake +++ /dev/null @@ -1,46 +0,0 @@ -# https://github.com/bro/cmake/blob/master/FindJeMalloc.cmake -# -# - Try to find jemalloc headers and libraries. -# -# Usage of this module as follows: -# -# find_package(JeMalloc) -# -# Variables used by this module, they can change the default behaviour and need -# to be set before calling find_package: -# -# JEMALLOC_ROOT_DIR Set this variable to the root installation of -# jemalloc if the module has problems finding -# the proper installation path. -# -# Variables defined by this module: -# -# JEMALLOC_FOUND System has jemalloc libs/headers -# JEMALLOC_LIBRARIES The jemalloc library/libraries -# JEMALLOC_INCLUDE_DIR The location of jemalloc headers - -find_path(JEMALLOC_ROOT_DIR - NAMES include/jemalloc/jemalloc.h -) - -find_library(JEMALLOC_LIBRARIES - NAMES jemalloc - HINTS ${JEMALLOC_ROOT_DIR}/lib -) - -find_path(JEMALLOC_INCLUDE_DIR - NAMES jemalloc/jemalloc.h - HINTS ${JEMALLOC_ROOT_DIR}/include -) - -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args(JeMalloc DEFAULT_MSG - JEMALLOC_LIBRARIES - JEMALLOC_INCLUDE_DIR -) - -mark_as_advanced( - JEMALLOC_ROOT_DIR - JEMALLOC_LIBRARIES - JEMALLOC_INCLUDE_DIR -) diff --git a/cmake/Modules/FindODBC.cmake b/cmake/Modules/FindODBC.cmake deleted file mode 100644 index 9e209c15777..00000000000 --- a/cmake/Modules/FindODBC.cmake +++ /dev/null @@ -1,147 +0,0 @@ -# Distributed under the OSI-approved BSD 3-Clause License. See accompanying -# file Copyright.txt or https://cmake.org/licensing for details. - -#.rst: -# FindMySQL -# ------- -# -# Find ODBC Runtime -# -# This will define the following variables:: -# -# ODBC_FOUND - True if the system has the libraries -# ODBC_INCLUDE_DIRS - where to find the headers -# ODBC_LIBRARIES - where to find the libraries -# ODBC_DEFINITIONS - compile definitons -# -# Hints: -# Set ``ODBC_ROOT_DIR`` to the root directory of an installation. -# -include(FindPackageHandleStandardArgs) - -find_package(PkgConfig QUIET) -pkg_check_modules(PC_ODBC QUIET odbc) - -if(WIN32) - get_filename_component(kit_dir "[HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows Kits\\Installed Roots;KitsRoot]" REALPATH) - get_filename_component(kit81_dir "[HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows Kits\\Installed Roots;KitsRoot81]" REALPATH) -endif() - -find_path(ODBC_INCLUDE_DIR - NAMES sql.h - HINTS - ${ODBC_ROOT_DIR}/include - ${ODBC_ROOT_INCLUDE_DIRS} - PATHS - ${PC_ODBC_INCLUDE_DIRS} - /usr/include - /usr/local/include - /usr/local/odbc/include - /usr/local/iodbc/include - "C:/Program Files/ODBC/include" - "C:/Program Files/Microsoft SDKs/Windows/v7.0/include" - "C:/Program Files/Microsoft SDKs/Windows/v6.0a/include" - "C:/ODBC/include" - "${kit_dir}/Include/um" - "${kit81_dir}/Include/um" - PATH_SUFFIXES - odbc - iodbc - DOC "Specify the directory containing sql.h." -) - -if(NOT ODBC_INCLUDE_DIR AND WIN32) - set(ODBC_INCLUDE_DIR "") -else() - set(REQUIRED_INCLUDE_DIR ODBC_INCLUDE_DIR) -endif() - -if(WIN32 AND CMAKE_SIZEOF_VOID_P EQUAL 8) - set(WIN_ARCH x64) -elseif(WIN32 AND CMAKE_SIZEOF_VOID_P EQUAL 4) - set(WIN_ARCH x86) -endif() - -find_library(ODBC_LIBRARY - NAMES unixodbc iodbc odbc odbc32 - HINTS - ${ODBC_ROOT_DIR}/lib - ${ODBC_ROOT_LIBRARY_DIRS} - PATHS - ${PC_ODBC_LIBRARY_DIRS} - /usr/lib - /usr/local/lib - /usr/local/odbc/lib - /usr/local/iodbc/lib - "C:/Program Files/ODBC/lib" - "C:/ODBC/lib/debug" - "C:/Program Files (x86)/Microsoft SDKs/Windows/v7.0A/Lib" - "${kit81_dir}/Lib/winv6.3/um" - "${kit_dir}/Lib/win8/um" - PATH_SUFIXES - odbc - ${WIN_ARCH} - DOC "Specify the ODBC driver manager library here." -) - -if(NOT ODBC_LIBRARY AND WIN32) - # List names of ODBC libraries on Windows - set(ODBC_LIBRARY odbc32.lib) -endif() - -# List additional libraries required to use ODBC library -if(WIN32 AND MSVC OR CMAKE_CXX_COMPILER_ID MATCHES "Intel") - set(_odbc_required_libs_names odbccp32;ws2_32) -endif() -foreach(_lib_name IN LISTS _odbc_required_libs_names) - find_library(_lib_path - NAMES ${_lib_name} - HINTS - ${ODBC_ROOT_DIR}/lib - ${ODBC_ROOT_LIBRARY_DIRS} - PATHS - ${PC_ODBC_LIBRARY_DIRS} - /usr/lib - /usr/local/lib - /usr/local/odbc/lib - /usr/local/iodbc/lib - "C:/Program Files/ODBC/lib" - "C:/ODBC/lib/debug" - "C:/Program Files (x86)/Microsoft SDKs/Windows/v7.0A/Lib" - PATH_SUFFIXES - odbc - ) - if (_lib_path) - list(APPEND _odbc_required_libs_paths ${_lib_path}) - endif() - unset(_lib_path CACHE) -endforeach() -unset(_odbc_lib_paths) -unset(_odbc_required_libs_names) - - -find_package_handle_standard_args(ODBC - FOUND_VAR ODBC_FOUND - REQUIRED_VARS - ODBC_LIBRARY - ${REQUIRED_INCLUDE_DIR} - VERSION_VAR ODBC_VERSION -) - -if(ODBC_FOUND) - set(ODBC_LIBRARIES ${ODBC_LIBRARY} ${_odbc_required_libs_paths} ${LTDL_LIBRARY}) - set(ODBC_INCLUDE_DIRS ${ODBC_INCLUDE_DIR}) - set(ODBC_DEFINITIONS ${PC_ODBC_CFLAGS_OTHER}) -endif() - -if(ODBC_FOUND AND NOT TARGET ODBC::ODBC) - add_library(ODBC::ODBC UNKNOWN IMPORTED) - set_target_properties(ODBC::ODBC PROPERTIES - IMPORTED_LOCATION "${ODBC_LIBRARY}" - INTERFACE_LINK_LIBRARIES "${_odbc_required_libs_paths}" - INTERFACE_COMPILE_OPTIONS "${PC_ODBC_CFLAGS_OTHER}" - INTERFACE_INCLUDE_DIRECTORIES "${ODBC_INCLUDE_DIR}" - ) -endif() - -mark_as_advanced(ODBC_LIBRARY ODBC_INCLUDE_DIR) diff --git a/cmake/Modules/FindOpenLDAP.cmake b/cmake/Modules/FindOpenLDAP.cmake new file mode 100644 index 00000000000..c33eafdcb2e --- /dev/null +++ b/cmake/Modules/FindOpenLDAP.cmake @@ -0,0 +1,55 @@ +# Find OpenLDAP libraries. +# +# Can be configured with: +# OPENLDAP_ROOT_DIR - path to the OpenLDAP installation prefix +# OPENLDAP_USE_STATIC_LIBS - look for static version of the libraries +# OPENLDAP_USE_REENTRANT_LIBS - look for thread-safe version of the libraries +# +# Sets values of: +# OPENLDAP_FOUND - TRUE if found +# OPENLDAP_INCLUDE_DIR - path to the include directory +# OPENLDAP_LIBRARIES - paths to the libldap and liblber libraries +# OPENLDAP_LDAP_LIBRARY - paths to the libldap library +# OPENLDAP_LBER_LIBRARY - paths to the liblber library +# + +if(OPENLDAP_USE_STATIC_LIBS) + set(_orig_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES}) + if(WIN32) + set(CMAKE_FIND_LIBRARY_SUFFIXES ".lib" ".a" ${CMAKE_FIND_LIBRARY_SUFFIXES}) + else() + set(CMAKE_FIND_LIBRARY_SUFFIXES ".a") + endif() +endif() + +set(_r_suffix) +if(OPENLDAP_USE_REENTRANT_LIBS) + set(_r_suffix "_r") +endif() + +if(OPENLDAP_ROOT_DIR) + find_path(OPENLDAP_INCLUDE_DIR NAMES "ldap.h" "lber.h" PATHS "${OPENLDAP_ROOT_DIR}" PATH_SUFFIXES "include" NO_DEFAULT_PATH) + find_library(OPENLDAP_LDAP_LIBRARY NAMES "ldap${_r_suffix}" PATHS "${OPENLDAP_ROOT_DIR}" PATH_SUFFIXES "lib" NO_DEFAULT_PATH) + find_library(OPENLDAP_LBER_LIBRARY NAMES "lber" PATHS "${OPENLDAP_ROOT_DIR}" PATH_SUFFIXES "lib" NO_DEFAULT_PATH) +else() + find_path(OPENLDAP_INCLUDE_DIR NAMES "ldap.h" "lber.h") + find_library(OPENLDAP_LDAP_LIBRARY NAMES "ldap${_r_suffix}") + find_library(OPENLDAP_LBER_LIBRARY NAMES "lber") +endif() + +unset(_r_suffix) + +set(OPENLDAP_LIBRARIES ${OPENLDAP_LDAP_LIBRARY} ${OPENLDAP_LBER_LIBRARY}) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args( + OpenLDAP DEFAULT_MSG + OPENLDAP_INCLUDE_DIR OPENLDAP_LDAP_LIBRARY OPENLDAP_LBER_LIBRARY +) + +mark_as_advanced(OPENLDAP_INCLUDE_DIR OPENLDAP_LIBRARIES OPENLDAP_LDAP_LIBRARY OPENLDAP_LBER_LIBRARY) + +if(OPENLDAP_USE_STATIC_LIBS) + set(CMAKE_FIND_LIBRARY_SUFFIXES ${_orig_CMAKE_FIND_LIBRARY_SUFFIXES}) + unset(_orig_CMAKE_FIND_LIBRARY_SUFFIXES) +endif() diff --git a/cmake/Modules/FindPoco.cmake b/cmake/Modules/FindPoco.cmake deleted file mode 100644 index cad7e9afc8f..00000000000 --- a/cmake/Modules/FindPoco.cmake +++ /dev/null @@ -1,247 +0,0 @@ -# https://github.com/astahl/poco-cmake/blob/master/cmake/FindPoco.cmake - -# - finds the Poco C++ libraries -# This module finds the Applied Informatics Poco libraries. -# It supports the following components: -# -# Util (loaded by default) -# Foundation (loaded by default) -# XML -# Zip -# Crypto -# Data -# Net -# NetSSL -# OSP -# -# Usage: -# set(ENV{Poco_DIR} path/to/poco/sdk) -# find_package(Poco REQUIRED OSP Data Crypto) -# -# On completion, the script defines the following variables: -# -# - Compound variables: -# Poco_FOUND -# - true if all requested components were found. -# Poco_LIBRARIES -# - contains release (and debug if available) libraries for all requested components. -# It has the form "optimized LIB1 debug LIBd1 optimized LIB2 ...", ready for use with the target_link_libraries command. -# Poco_INCLUDE_DIRS -# - Contains include directories for all requested components. -# -# - Component variables: -# Poco_Xxx_FOUND -# - Where Xxx is the properly cased component name (eg. 'Util', 'OSP'). -# True if a component's library or debug library was found successfully. -# Poco_Xxx_LIBRARY -# - Library for component Xxx. -# Poco_Xxx_LIBRARY_DEBUG -# - debug library for component Xxx -# Poco_Xxx_INCLUDE_DIR -# - include directory for component Xxx -# -# - OSP BundleCreator variables: (i.e. bundle.exe on windows, bundle on unix-likes) -# (is only discovered if OSP is a requested component) -# Poco_OSP_Bundle_EXECUTABLE_FOUND -# - true if the bundle-creator executable was found. -# Poco_OSP_Bundle_EXECUTABLE -# - the path to the bundle-creator executable. -# -# Author: Andreas Stahl andreas.stahl@tu-dresden.de - -set(Poco_HINTS - /usr/local - /usr/local/include/Poco - C:/AppliedInformatics - ${Poco_DIR} - $ENV{Poco_DIR} -) - -if(NOT Poco_ROOT_DIR) - # look for the root directory, first for the source-tree variant - find_path(Poco_ROOT_DIR - NAMES Foundation/include/Poco/Poco.h - HINTS ${Poco_HINTS} - ) - if(NOT Poco_ROOT_DIR) - # this means poco may have a different directory structure, maybe it was installed, let's check for that - message(STATUS "Looking for Poco install directory structure.") - find_path(Poco_ROOT_DIR - NAMES include/Poco/Poco.h - HINTS ${Poco_HINTS} - ) - if(NOT Poco_ROOT_DIR) - # poco was still not found -> Fail - if(Poco_FIND_REQUIRED) - message(FATAL_ERROR "Poco: Could not find Poco install directory") - endif() - if(NOT Poco_FIND_QUIETLY) - message(STATUS "Poco: Could not find Poco install directory") - endif() - return() - else() - # poco was found with the make install directory structure - message(STATUS "Assuming Poco install directory structure at ${Poco_ROOT_DIR}.") - set(Poco_INSTALLED true) - endif() - endif() -endif() - -# add dynamic library directory -if(WIN32) - find_path(Poco_RUNTIME_LIBRARY_DIRS - NAMES PocoFoundation.dll - HINTS ${Poco_ROOT_DIR} - PATH_SUFFIXES - bin - lib - ) -endif() - -# if installed directory structure, set full include dir -if(Poco_INSTALLED) - set(Poco_INCLUDE_DIRS ${Poco_ROOT_DIR}/include/ CACHE PATH "The global include path for Poco") -endif() - -# append the default minimum components to the list to find -list(APPEND components - ${Poco_FIND_COMPONENTS} - # default components: - "Util" - "Foundation" -) -list(REMOVE_DUPLICATES components) # remove duplicate defaults - -foreach( component ${components} ) - #if(NOT Poco_${component}_FOUND) - - # include directory for the component - if(NOT Poco_${component}_INCLUDE_DIR) - set (component_alt "${component}") - set (component_root "${component}") - if (${component} STREQUAL "DataODBC") - set (component_top "Data") - set (component_in "ODBC") - set (component_root "Data/ODBC") - endif () - if (${component} STREQUAL "SQLODBC") - set (component_top "SQL") - set (component_in "ODBC") - set (component_root "SQL/ODBC") - endif () - if (${component} STREQUAL "NetSSL") - set (component_alt "Net") - set (component_root "NetSSL_OpenSSL") - endif () - find_path(Poco_${component}_INCLUDE_DIR - NAMES - Poco/${component}.h # e.g. Foundation.h - Poco/${component}/${component}.h # e.g. OSP/OSP.h Util/Util.h - Poco/${component_alt}/${component}.h # e.g. Net/NetSSL.h - Poco/${component_top}/${component_in}/${component_in}.h # e.g. Data/ODBC/ODBC.h - HINTS - ${Poco_ROOT_DIR} - PATH_SUFFIXES - include - ${component_root}/include - ) - # message(STATUS "poco include debug: {component}: ${Poco_${component}_INCLUDE_DIR}") - endif() - if(NOT Poco_${component}_INCLUDE_DIR) - message(WARNING "Poco_${component}_INCLUDE_DIR NOT FOUND") - else() - list(APPEND Poco_INCLUDE_DIRS ${Poco_${component}_INCLUDE_DIR}) - endif() - - # release library - if(NOT Poco_${component}_LIBRARY) - find_library( - Poco_${component}_LIBRARY - NAMES Poco${component} - HINTS ${Poco_ROOT_DIR} - PATH_SUFFIXES - lib - bin - ) - if(Poco_${component}_LIBRARY) - message(STATUS "Found Poco ${component}: ${Poco_${component}_LIBRARY}") - endif() - endif() - if(Poco_${component}_LIBRARY) - list(APPEND Poco_LIBRARIES "optimized" ${Poco_${component}_LIBRARY} ) - mark_as_advanced(Poco_${component}_LIBRARY) - endif() - - # debug library - if(NOT Poco_${component}_LIBRARY_DEBUG) - find_library( - Poco_${component}_LIBRARY_DEBUG - Names Poco${component}d - HINTS ${Poco_ROOT_DIR} - PATH_SUFFIXES - lib - bin - ) - if(Poco_${component}_LIBRARY_DEBUG) - message(STATUS "Found Poco ${component} (debug): ${Poco_${component}_LIBRARY_DEBUG}") - endif() - endif(NOT Poco_${component}_LIBRARY_DEBUG) - if(Poco_${component}_LIBRARY_DEBUG) - list(APPEND Poco_LIBRARIES "debug" ${Poco_${component}_LIBRARY_DEBUG}) - mark_as_advanced(Poco_${component}_LIBRARY_DEBUG) - endif() - - # mark component as found or handle not finding it - if(Poco_${component}_LIBRARY_DEBUG OR Poco_${component}_LIBRARY) - set(Poco_${component}_FOUND TRUE) - elseif(NOT Poco_FIND_QUIETLY) - message(WARNING "Could not find Poco component ${component}!") - endif() - - # message(STATUS "Poco component ${component}: Poco_${component}_LIBRARY : Poco_${component}_INCLUDE_DIR") -endforeach() - -if(Poco_DataODBC_LIBRARY) - list(APPEND Poco_DataODBC_LIBRARY ${ODBC_LIBRARIES} ${LTDL_LIBRARY}) - list(APPEND Poco_INCLUDE_DIRS ${ODBC_INCLUDE_DIRS}) -endif() - -if(Poco_SQLODBC_LIBRARY) - list(APPEND Poco_SQLODBC_LIBRARY ${ODBC_LIBRARIES} ${LTDL_LIBRARY}) - list(APPEND Poco_INCLUDE_DIRS ${ODBC_INCLUDE_DIRS}) -endif() - -if(Poco_NetSSL_LIBRARY) - list(APPEND Poco_NetSSL_LIBRARY ${OPENSSL_LIBRARIES}) - list(APPEND Poco_INCLUDE_DIRS ${OPENSSL_INCLUDE_DIR}) -endif() - -if(DEFINED Poco_LIBRARIES) - set(Poco_FOUND true) -endif() - -if(${Poco_OSP_FOUND}) - # find the osp bundle program - find_program( - Poco_OSP_Bundle_EXECUTABLE - NAMES bundle - HINTS - ${Poco_RUNTIME_LIBRARY_DIRS} - ${Poco_ROOT_DIR} - PATH_SUFFIXES - bin - OSP/BundleCreator/bin/Darwin/x86_64 - OSP/BundleCreator/bin/Darwin/i386 - DOC "The executable that bundles OSP packages according to a .bndlspec specification." - ) - if(Poco_OSP_Bundle_EXECUTABLE) - set(Poco_OSP_Bundle_EXECUTABLE_FOUND true) - endif() - # include bundle script file - find_file(Poco_OSP_Bundles_file NAMES PocoBundles.cmake HINTS ${CMAKE_MODULE_PATH}) - if(${Poco_OSP_Bundles_file}) - include(${Poco_OSP_Bundles_file}) - endif() -endif() - -message(STATUS "Found Poco: ${Poco_LIBRARIES}") diff --git a/cmake/analysis.cmake b/cmake/analysis.cmake new file mode 100644 index 00000000000..287c36a8de7 --- /dev/null +++ b/cmake/analysis.cmake @@ -0,0 +1,18 @@ +# This file configures static analysis tools that can be integrated to the build process + +option (ENABLE_CLANG_TIDY "Use 'clang-tidy' static analyzer if present" OFF) +if (ENABLE_CLANG_TIDY) + if (${CMAKE_VERSION} VERSION_LESS "3.6.0") + message(FATAL_ERROR "clang-tidy requires CMake version at least 3.6.") + endif() + + find_program (CLANG_TIDY_PATH NAMES "clang-tidy" "clang-tidy-10" "clang-tidy-9" "clang-tidy-8") + if (CLANG_TIDY_PATH) + message(STATUS "Using clang-tidy: ${CLANG_TIDY_PATH}. The checks will be run during build process. See the .clang-tidy file at the root directory to configure the checks.") + set (USE_CLANG_TIDY 1) + # The variable CMAKE_CXX_CLANG_TIDY will be set inside src and base directories with non third-party code. + # set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}") + else () + message(STATUS "clang-tidy is not found. This is normal - the tool is used only for static code analysis and not essential for build.") + endif () +endif () diff --git a/cmake/arch.cmake b/cmake/arch.cmake index ec644b6fe77..57ed42295bb 100644 --- a/cmake/arch.cmake +++ b/cmake/arch.cmake @@ -11,7 +11,6 @@ if (CMAKE_LIBRARY_ARCHITECTURE MATCHES "i386") set (ARCH_I386 1) endif () if ((ARCH_ARM AND NOT ARCH_AARCH64) OR ARCH_I386) - set (ARCH_32 1) message (FATAL_ERROR "32bit platforms are not supported") endif () diff --git a/cmake/find/cctz.cmake b/cmake/find/cctz.cmake deleted file mode 100644 index aae8078512d..00000000000 --- a/cmake/find/cctz.cmake +++ /dev/null @@ -1,23 +0,0 @@ -option (USE_INTERNAL_CCTZ_LIBRARY "Set to FALSE to use system cctz library instead of bundled" ${NOT_UNBUNDLED}) - -if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/cctz/include/cctz/time_zone.h") - if(USE_INTERNAL_CCTZ_LIBRARY) - message(WARNING "submodule contrib/cctz is missing. to fix try run: \n git submodule update --init --recursive") - endif() - set(USE_INTERNAL_CCTZ_LIBRARY 0) - set(MISSING_INTERNAL_CCTZ_LIBRARY 1) -endif() - -if (NOT USE_INTERNAL_CCTZ_LIBRARY) - find_library (CCTZ_LIBRARY cctz) - find_path (CCTZ_INCLUDE_DIR NAMES cctz/civil_time.h civil_time.h PATHS ${CCTZ_INCLUDE_PATHS}) -endif () - -if (CCTZ_LIBRARY AND CCTZ_INCLUDE_DIR) -elseif (NOT MISSING_INTERNAL_CCTZ_LIBRARY) - set (USE_INTERNAL_CCTZ_LIBRARY 1) - set (CCTZ_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/cctz/include") - set (CCTZ_LIBRARY cctz) -endif () - -message (STATUS "Using cctz: ${CCTZ_INCLUDE_DIR} : ${CCTZ_LIBRARY}") diff --git a/cmake/find/consistent-hashing.cmake b/cmake/find/consistent-hashing.cmake deleted file mode 100644 index 23e709d2228..00000000000 --- a/cmake/find/consistent-hashing.cmake +++ /dev/null @@ -1,14 +0,0 @@ -option (USE_INTERNAL_CONSISTENT_HASHING_LIBRARY "Set to FALSE to use consistent-hashing library from Arcadia (Yandex internal repository) instead of bundled" ${NOT_UNBUNDLED}) - -if (NOT USE_INTERNAL_CONSISTENT_HASHING_LIBRARY) - find_library (CONSISTENT_HASHING_LIBRARY consistent-hashing) - find_path (CONSISTENT_HASHING_INCLUDE_DIR NAMES consistent_hashing.h PATHS ${CONSISTENT_HASHING_INCLUDE_PATHS}) -endif () - -if (CONSISTENT_HASHING_LIBRARY AND CONSISTENT_HASHING_INCLUDE_DIR) -else () - set (USE_INTERNAL_CONSISTENT_HASHING_LIBRARY 1) - set (CONSISTENT_HASHING_LIBRARY consistent-hashing) -endif () - -message (STATUS "Using consistent-hashing: ${CONSISTENT_HASHING_INCLUDE_DIR} : ${CONSISTENT_HASHING_LIBRARY}") diff --git a/cmake/find/cpuid.cmake b/cmake/find/cpuid.cmake deleted file mode 100644 index 552cf3fb3d9..00000000000 --- a/cmake/find/cpuid.cmake +++ /dev/null @@ -1,32 +0,0 @@ -# ARM: Cannot cpuid_get_raw_data: CPUID instruction is not supported -if (NOT ARCH_ARM) - option (USE_INTERNAL_CPUID_LIBRARY "Set to FALSE to use system cpuid library instead of bundled" ${NOT_UNBUNDLED}) -endif () - -if (USE_INTERNAL_CPUID_LIBRARY AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libcpuid/CMakeLists.txt") - message (WARNING "submodule contrib/libcpuid is missing. to fix try run: \n git submodule update --init --recursive") - set (USE_INTERNAL_CPUID_LIBRARY 0) - set (MISSING_INTERNAL_CPUID_LIBRARY 1) -endif () - -if (NOT USE_INTERNAL_CPUID_LIBRARY) - find_library (CPUID_LIBRARY cpuid) - find_path (CPUID_INCLUDE_DIR NAMES libcpuid/libcpuid.h PATHS ${CPUID_INCLUDE_PATHS}) -endif () - -if (CPUID_LIBRARY AND CPUID_INCLUDE_DIR) - if (OS_FREEBSD) - # need in /usr/local/include/libcpuid/libcpuid_types.h - # Freebsd: /usr/local/include/libcpuid/libcpuid_types.h:61:29: error: conflicting declaration 'typedef long long int int64_t' - add_definitions(-DHAVE_STDINT_H) - # TODO: make virtual target cpuid:cpuid with COMPILE_DEFINITIONS property - endif () - set (USE_CPUID 1) -elseif (NOT ARCH_ARM AND NOT MISSING_INTERNAL_CPUID_LIBRARY) - set (CPUID_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libcpuid/include) - set (USE_INTERNAL_CPUID_LIBRARY 1) - set (CPUID_LIBRARY cpuid) - set (USE_CPUID 1) -endif () - -message (STATUS "Using cpuid=${USE_CPUID}: ${CPUID_INCLUDE_DIR} : ${CPUID_LIBRARY}") diff --git a/cmake/find/cpuinfo.cmake b/cmake/find/cpuinfo.cmake deleted file mode 100644 index 9553372109b..00000000000 --- a/cmake/find/cpuinfo.cmake +++ /dev/null @@ -1,24 +0,0 @@ -option(USE_INTERNAL_CPUINFO_LIBRARY "Set to FALSE to use system cpuinfo library instead of bundled" ${NOT_UNBUNDLED}) - -# Now we have no contrib/libcpuinfo, use from system. -if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libcpuinfo/include") - #message (WARNING "submodule contrib/libcpuinfo is missing. to fix try run: \n git submodule update --init --recursive") - set (USE_INTERNAL_CPUINFO_LIBRARY 0) - set (MISSING_INTERNAL_CPUINFO_LIBRARY 1) -endif () - -if(NOT USE_INTERNAL_CPUINFO_LIBRARY) - find_library(CPUINFO_LIBRARY cpuinfo) - find_path(CPUINFO_INCLUDE_DIR NAMES cpuinfo.h PATHS ${CPUINFO_INCLUDE_PATHS}) -endif() - -if(CPUINFO_LIBRARY AND CPUINFO_INCLUDE_DIR) - set(USE_CPUINFO 1) -elseif(NOT MISSING_INTERNAL_CPUINFO_LIBRARY) - set(CPUINFO_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libcpuinfo/include) - set(USE_INTERNAL_CPUINFO_LIBRARY 1) - set(CPUINFO_LIBRARY cpuinfo) - set(USE_CPUINFO 1) -endif() - -message(STATUS "Using cpuinfo=${USE_CPUINFO}: ${CPUINFO_INCLUDE_DIR} : ${CPUINFO_LIBRARY}") diff --git a/cmake/find/execinfo.cmake b/cmake/find/execinfo.cmake deleted file mode 100644 index 85cc5cf951a..00000000000 --- a/cmake/find/execinfo.cmake +++ /dev/null @@ -1,8 +0,0 @@ -if (OS_FREEBSD) - find_library (EXECINFO_LIBRARY execinfo) - find_library (ELF_LIBRARY elf) - set (EXECINFO_LIBRARIES ${EXECINFO_LIBRARY} ${ELF_LIBRARY}) - message (STATUS "Using execinfo: ${EXECINFO_LIBRARIES}") -else () - set (EXECINFO_LIBRARIES "") -endif () diff --git a/cmake/find/jemalloc.cmake b/cmake/find/jemalloc.cmake deleted file mode 100644 index 6508f1b675f..00000000000 --- a/cmake/find/jemalloc.cmake +++ /dev/null @@ -1,42 +0,0 @@ -if (OS_LINUX AND NOT SANITIZE AND NOT ARCH_32 AND NOT ARCH_PPC64LE) - set(ENABLE_JEMALLOC_DEFAULT ${ENABLE_LIBRARIES}) -else () - set(ENABLE_JEMALLOC_DEFAULT 0) -endif () - -option (ENABLE_JEMALLOC "Set to TRUE to use jemalloc" ${ENABLE_JEMALLOC_DEFAULT}) -if (OS_LINUX) - option (USE_INTERNAL_JEMALLOC_LIBRARY "Set to FALSE to use system jemalloc library instead of bundled" ${NOT_UNBUNDLED}) -else() - option (USE_INTERNAL_JEMALLOC_LIBRARY "Set to FALSE to use system jemalloc library instead of bundled" OFF) -endif() - -if (ENABLE_JEMALLOC) - if (USE_INTERNAL_JEMALLOC_LIBRARY AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/jemalloc/src/jemalloc.c") - message (WARNING "submodule contrib/jemalloc is missing. to fix try run: \n git submodule update --init --recursive") - set (USE_INTERNAL_JEMALLOC_LIBRARY 0) - set (MISSING_INTERNAL_JEMALLOC_LIBRARY 1) - endif () - - if (NOT USE_INTERNAL_JEMALLOC_LIBRARY) - find_package (JeMalloc) - endif () - - if ((NOT JEMALLOC_LIBRARIES OR NOT JEMALLOC_INCLUDE_DIR) AND NOT MISSING_INTERNAL_JEMALLOC_LIBRARY ) - set (JEMALLOC_LIBRARIES "jemalloc") - set (JEMALLOC_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/jemalloc-cmake/include" "${ClickHouse_SOURCE_DIR}/contrib/jemalloc-cmake/include_linux_x86_64") - set (USE_INTERNAL_JEMALLOC_LIBRARY 1) - endif () - - if (JEMALLOC_LIBRARIES) - set (USE_JEMALLOC 1) - elseif (NOT MISSING_INTERNAL_JEMALLOC_LIBRARY) - message (FATAL_ERROR "ENABLE_JEMALLOC is set to true, but library was not found") - endif () - - if (SANITIZE) - message (FATAL_ERROR "ENABLE_JEMALLOC is set to true, but it cannot be used with sanitizers") - endif () - - message (STATUS "Using jemalloc=${USE_JEMALLOC}: ${JEMALLOC_INCLUDE_DIR} : ${JEMALLOC_LIBRARIES}") -endif () diff --git a/cmake/find/ldap.cmake b/cmake/find/ldap.cmake new file mode 100644 index 00000000000..230727819e4 --- /dev/null +++ b/cmake/find/ldap.cmake @@ -0,0 +1,76 @@ +option (ENABLE_LDAP "Enable LDAP" ${ENABLE_LIBRARIES}) + +if (ENABLE_LDAP) + option (USE_INTERNAL_LDAP_LIBRARY "Set to FALSE to use system *LDAP library instead of bundled" ${NOT_UNBUNDLED}) + + if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/openldap/README") + if (USE_INTERNAL_LDAP_LIBRARY) + message (WARNING "Submodule contrib/openldap is missing. To fix try running:\n git submodule update --init --recursive") + endif () + + set (USE_INTERNAL_LDAP_LIBRARY 0) + set (MISSING_INTERNAL_LDAP_LIBRARY 1) + endif () + + set (OPENLDAP_USE_STATIC_LIBS ${USE_STATIC_LIBRARIES}) + set (OPENLDAP_USE_REENTRANT_LIBS 1) + + if (NOT USE_INTERNAL_LDAP_LIBRARY) + if (APPLE AND NOT OPENLDAP_ROOT_DIR) + set (OPENLDAP_ROOT_DIR "/usr/local/opt/openldap") + endif () + + find_package (OpenLDAP) + endif () + + if (NOT OPENLDAP_FOUND AND NOT MISSING_INTERNAL_LDAP_LIBRARY) + string (TOLOWER "${CMAKE_SYSTEM_NAME}" _system_name) + string (TOLOWER "${CMAKE_SYSTEM_PROCESSOR}" _system_processor) + + if ( + "${_system_processor}" STREQUAL "amd64" OR + "${_system_processor}" STREQUAL "x64" + ) + set (_system_processor "x86_64") + elseif ( + "${_system_processor}" STREQUAL "arm64" + ) + set (_system_processor "aarch64") + endif () + + if ( + ( "${_system_name}" STREQUAL "linux" AND "${_system_processor}" STREQUAL "x86_64" ) OR + ( "${_system_name}" STREQUAL "linux" AND "${_system_processor}" STREQUAL "aarch64" ) OR + ( "${_system_name}" STREQUAL "freebsd" AND "${_system_processor}" STREQUAL "x86_64" ) OR + ( "${_system_name}" STREQUAL "darwin" AND "${_system_processor}" STREQUAL "x86_64" ) + ) + set (_ldap_supported_platform TRUE) + endif () + + if (NOT _ldap_supported_platform) + message (WARNING "LDAP support using the bundled library is not implemented for ${CMAKE_SYSTEM_NAME} ${CMAKE_SYSTEM_PROCESSOR} platform.") + elseif (NOT USE_SSL) + message (WARNING "LDAP support using the bundled library is not possible if SSL is not used.") + else () + set (USE_INTERNAL_LDAP_LIBRARY 1) + set (OPENLDAP_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/openldap") + set (OPENLDAP_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/openldap/include") + # Below, 'ldap'/'ldap_r' and 'lber' will be resolved to + # the targets defined in contrib/openldap-cmake/CMakeLists.txt + if (OPENLDAP_USE_REENTRANT_LIBS) + set (OPENLDAP_LDAP_LIBRARY "ldap_r") + else () + set (OPENLDAP_LDAP_LIBRARY "ldap") + endif() + set (OPENLDAP_LBER_LIBRARY "lber") + set (OPENLDAP_LIBRARIES ${OPENLDAP_LDAP_LIBRARY} ${OPENLDAP_LBER_LIBRARY}) + set (OPENLDAP_FOUND 1) + endif () + endif () + + if (OPENLDAP_FOUND) + set (USE_LDAP 1) + endif () +endif () + +message (STATUS "Using ldap=${USE_LDAP}: ${OPENLDAP_INCLUDE_DIR} : ${OPENLDAP_LIBRARIES}") diff --git a/cmake/find/libgsasl.cmake b/cmake/find/libgsasl.cmake index 589e965e19b..801b63899da 100644 --- a/cmake/find/libgsasl.cmake +++ b/cmake/find/libgsasl.cmake @@ -1,6 +1,4 @@ -if (NOT ARCH_32) - option (USE_INTERNAL_LIBGSASL_LIBRARY "Set to FALSE to use system libgsasl library instead of bundled" ${NOT_UNBUNDLED}) -endif () +option (USE_INTERNAL_LIBGSASL_LIBRARY "Set to FALSE to use system libgsasl library instead of bundled" ${NOT_UNBUNDLED}) if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libgsasl/src/gsasl.h") if (USE_INTERNAL_LIBGSASL_LIBRARY) @@ -16,7 +14,7 @@ if (NOT USE_INTERNAL_LIBGSASL_LIBRARY) endif () if (LIBGSASL_LIBRARY AND LIBGSASL_INCLUDE_DIR) -elseif (NOT MISSING_INTERNAL_LIBGSASL_LIBRARY AND NOT ARCH_32) +elseif (NOT MISSING_INTERNAL_LIBGSASL_LIBRARY) set (LIBGSASL_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libgsasl/src ${ClickHouse_SOURCE_DIR}/contrib/libgsasl/linux_x86_64/include) set (USE_INTERNAL_LIBGSASL_LIBRARY 1) set (LIBGSASL_LIBRARY libgsasl) diff --git a/cmake/find/msgpack.cmake b/cmake/find/msgpack.cmake new file mode 100644 index 00000000000..46344fc162f --- /dev/null +++ b/cmake/find/msgpack.cmake @@ -0,0 +1,17 @@ +option (USE_INTERNAL_MSGPACK_LIBRARY "Set to FALSE to use system msgpack library instead of bundled" ${NOT_UNBUNDLED}) + +if (USE_INTERNAL_MSGPACK_LIBRARY) + if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/msgpack-c/include/msgpack.hpp") + message(WARNING "Submodule contrib/msgpack-c is missing. To fix try run: \n git submodule update --init --recursive") + set(USE_INTERNAL_MSGPACK_LIBRARY 0) + set(MISSING_INTERNAL_MSGPACK_LIBRARY 1) + endif() +endif() + +if (USE_INTERNAL_MSGPACK_LIBRARY) + set(MSGPACK_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/msgpack-c/include) +else() + find_path(MSGPACK_INCLUDE_DIR NAMES msgpack.hpp PATHS ${MSGPACK_INCLUDE_PATHS}) +endif() + +message(STATUS "Using msgpack: ${MSGPACK_INCLUDE_DIR}") diff --git a/cmake/find/odbc.cmake b/cmake/find/odbc.cmake deleted file mode 100644 index 65e09d5c96d..00000000000 --- a/cmake/find/odbc.cmake +++ /dev/null @@ -1,37 +0,0 @@ -# This file copied from contrib/poco/cmake/FindODBC.cmake to allow build without submodules - -if(OS_LINUX) - option (ENABLE_ODBC "Enable ODBC" ${ENABLE_LIBRARIES}) -endif() - -if(ENABLE_ODBC) - if (OS_LINUX) - option(USE_INTERNAL_ODBC_LIBRARY "Set to FALSE to use system odbc library instead of bundled" ${NOT_UNBUNDLED}) - else () - option(USE_INTERNAL_ODBC_LIBRARY "Set to FALSE to use system odbc library instead of bundled" OFF) - endif() - - if(USE_INTERNAL_ODBC_LIBRARY AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/unixodbc/README") - message(WARNING "submodule contrib/unixodbc is missing. to fix try run: \n git submodule update --init --recursive") - set(USE_INTERNAL_ODBC_LIBRARY 0) - set(MISSING_INTERNAL_ODBC_LIBRARY 1) - endif() - - set(ODBC_INCLUDE_DIRS ) # Include directories will be either used automatically by target_include_directories or set later. - if(USE_INTERNAL_ODBC_LIBRARY AND NOT MISSING_INTERNAL_ODBC_LIBRARY) - set(ODBC_LIBRARY unixodbc) - set(ODBC_LIBRARIES ${ODBC_LIBRARY}) - set(ODBC_INCLUDE_DIRS "${ClickHouse_SOURCE_DIR}/contrib/unixodbc/include") - set(ODBC_FOUND 1) - else() - find_package(ODBC) - endif () - - if(ODBC_FOUND) - set(USE_ODBC 1) - set(ODBC_INCLUDE_DIRECTORIES ${ODBC_INCLUDE_DIRS}) # for old poco - set(ODBC_INCLUDE_DIR ${ODBC_INCLUDE_DIRS}) # for old poco - endif() - - message(STATUS "Using odbc=${USE_ODBC}: ${ODBC_INCLUDE_DIRS} : ${ODBC_LIBRARIES}") -endif() diff --git a/cmake/find/opencl.cmake b/cmake/find/opencl.cmake new file mode 100644 index 00000000000..b1bf4630990 --- /dev/null +++ b/cmake/find/opencl.cmake @@ -0,0 +1,17 @@ +if(ENABLE_OPENCL) + +# Intel OpenCl driver: sudo apt install intel-opencl-icd +# TODO It's possible to add it as submodules: https://github.com/intel/compute-runtime/releases + +# OpenCL applications should link wiht ICD loader +# sudo apt install opencl-headers ocl-icd-libopencl1 +# sudo ln -s /usr/lib/x86_64-linux-gnu/libOpenCL.so.1.0.0 /usr/lib/libOpenCL.so + +find_package(OpenCL REQUIRED) +if(OpenCL_FOUND) + set(USE_OPENCL 1) +endif() + +endif() + +message(STATUS "Using opencl=${USE_OPENCL}: ${OpenCL_INCLUDE_DIRS} : ${OpenCL_LIBRARIES}") diff --git a/cmake/find/parquet.cmake b/cmake/find/parquet.cmake index c57aa9c9212..4c91286dae0 100644 --- a/cmake/find/parquet.cmake +++ b/cmake/find/parquet.cmake @@ -70,6 +70,7 @@ elseif(NOT MISSING_INTERNAL_PARQUET_LIBRARY AND NOT OS_FREEBSD) set(USE_PARQUET 1) set(USE_ORC 1) + set(USE_ARROW 1) endif() endif() diff --git a/cmake/find/poco.cmake b/cmake/find/poco.cmake deleted file mode 100644 index 0c676d374f1..00000000000 --- a/cmake/find/poco.cmake +++ /dev/null @@ -1,178 +0,0 @@ -option (USE_INTERNAL_POCO_LIBRARY "Set to FALSE to use system poco library instead of bundled" ${NOT_UNBUNDLED}) - -if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/poco/CMakeLists.txt") - if (USE_INTERNAL_POCO_LIBRARY) - message (WARNING "submodule contrib/poco is missing. to fix try run: \n git submodule update --init --recursive") - endif () - set (USE_INTERNAL_POCO_LIBRARY 0) - set (MISSING_INTERNAL_POCO_LIBRARY 1) -endif () - -if (NOT ENABLE_LIBRARIES) - set (ENABLE_POCO_NETSSL ${ENABLE_LIBRARIES} CACHE BOOL "") - set (ENABLE_POCO_MONGODB ${ENABLE_LIBRARIES} CACHE BOOL "") - set (ENABLE_POCO_REDIS ${ENABLE_LIBRARIES} CACHE BOOL "") - set (ENABLE_POCO_ODBC ${ENABLE_LIBRARIES} CACHE BOOL "") - set (ENABLE_POCO_SQL ${ENABLE_LIBRARIES} CACHE BOOL "") - set (ENABLE_POCO_JSON ${ENABLE_LIBRARIES} CACHE BOOL "") -endif () - -set (POCO_COMPONENTS Net XML SQL Data) -if (NOT DEFINED ENABLE_POCO_NETSSL OR ENABLE_POCO_NETSSL) - list (APPEND POCO_COMPONENTS Crypto NetSSL) -endif () -if (NOT DEFINED ENABLE_POCO_MONGODB OR ENABLE_POCO_MONGODB) - set(ENABLE_POCO_MONGODB 1 CACHE BOOL "") - list (APPEND POCO_COMPONENTS MongoDB) -else () - set(ENABLE_POCO_MONGODB 0 CACHE BOOL "") -endif () -if (NOT DEFINED ENABLE_POCO_REDIS OR ENABLE_POCO_REDIS) - list (APPEND POCO_COMPONENTS Redis) -endif () -# TODO: after new poco release with SQL library rename ENABLE_POCO_ODBC -> ENABLE_POCO_SQLODBC -if (NOT DEFINED ENABLE_POCO_ODBC OR ENABLE_POCO_ODBC) - list (APPEND POCO_COMPONENTS DataODBC) - list (APPEND POCO_COMPONENTS SQLODBC) -endif () -if (NOT DEFINED ENABLE_POCO_JSON OR ENABLE_POCO_JSON) - list (APPEND POCO_COMPONENTS JSON) -endif () - -if (NOT USE_INTERNAL_POCO_LIBRARY) - find_package (Poco COMPONENTS ${POCO_COMPONENTS}) -endif () - -if (Poco_INCLUDE_DIRS AND Poco_Foundation_LIBRARY) -elseif (NOT MISSING_INTERNAL_POCO_LIBRARY) - set (USE_INTERNAL_POCO_LIBRARY 1) - - set (ENABLE_ZIP 0 CACHE BOOL "") - set (ENABLE_PAGECOMPILER 0 CACHE BOOL "") - set (ENABLE_PAGECOMPILER_FILE2PAGE 0 CACHE BOOL "") - set (ENABLE_DATA_SQLITE 0 CACHE BOOL "") - set (ENABLE_DATA_MYSQL 0 CACHE BOOL "") - set (ENABLE_DATA_POSTGRESQL 0 CACHE BOOL "") - set (ENABLE_ENCODINGS 0 CACHE BOOL "") - set (ENABLE_MONGODB ${ENABLE_POCO_MONGODB} CACHE BOOL "" FORCE) - - # new after 2.0.0: - set (POCO_ENABLE_ZIP 0 CACHE BOOL "") - set (POCO_ENABLE_PAGECOMPILER 0 CACHE BOOL "") - set (POCO_ENABLE_PAGECOMPILER_FILE2PAGE 0 CACHE BOOL "") - set (POCO_ENABLE_SQL_SQLITE 0 CACHE BOOL "") - set (POCO_ENABLE_SQL_MYSQL 0 CACHE BOOL "") - set (POCO_ENABLE_SQL_POSTGRESQL 0 CACHE BOOL "") - - set (POCO_UNBUNDLED 1 CACHE BOOL "") - set (POCO_UNBUNDLED_PCRE 0 CACHE BOOL "") - set (POCO_UNBUNDLED_EXPAT 0 CACHE BOOL "") - set (POCO_STATIC ${MAKE_STATIC_LIBRARIES} CACHE BOOL "") - set (POCO_VERBOSE_MESSAGES 1 CACHE BOOL "") - - - # used in internal compiler - list (APPEND Poco_INCLUDE_DIRS - "${ClickHouse_SOURCE_DIR}/contrib/poco/Foundation/include/" - "${ClickHouse_SOURCE_DIR}/contrib/poco/Util/include/" - ) - - if (ENABLE_POCO_MONGODB) - set (Poco_MongoDB_LIBRARY PocoMongoDB) - set (Poco_MongoDB_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/poco/MongoDB/include/") - endif () - - if (NOT DEFINED ENABLE_POCO_REDIS OR ENABLE_POCO_REDIS) - set (Poco_Redis_LIBRARY PocoRedis) - set (Poco_Redis_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/poco/Redis/include/") - endif () - - if (EXISTS "${ClickHouse_SOURCE_DIR}/contrib/poco/SQL/ODBC/include/") - set (Poco_SQL_FOUND 1) - set (Poco_SQL_LIBRARY PocoSQL) - set (Poco_SQL_INCLUDE_DIR - "${ClickHouse_SOURCE_DIR}/contrib/poco/SQL/include" - "${ClickHouse_SOURCE_DIR}/contrib/poco/Data/include" - ) - if ((NOT DEFINED POCO_ENABLE_SQL_ODBC OR POCO_ENABLE_SQL_ODBC) AND ODBC_FOUND) - set (Poco_SQLODBC_INCLUDE_DIR - "${ClickHouse_SOURCE_DIR}/contrib/poco/SQL/ODBC/include/" - "${ClickHouse_SOURCE_DIR}/contrib/poco/Data/ODBC/include/" - ${ODBC_INCLUDE_DIRS} - ) - set (Poco_SQLODBC_LIBRARY PocoSQLODBC ${ODBC_LIBRARIES} ${LTDL_LIBRARY}) - endif () - else () - set (Poco_Data_FOUND 1) - set (Poco_Data_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/poco/Data/include") - set (Poco_Data_LIBRARY PocoData) - if ((NOT DEFINED ENABLE_DATA_ODBC OR ENABLE_DATA_ODBC) AND ODBC_FOUND) - set (USE_POCO_DATAODBC 1) - set (Poco_DataODBC_INCLUDE_DIR - "${ClickHouse_SOURCE_DIR}/contrib/poco/Data/ODBC/include/" - ${ODBC_INCLUDE_DIRS} - ) - set (Poco_DataODBC_LIBRARY PocoDataODBC ${ODBC_LIBRARIES} ${LTDL_LIBRARY}) - endif () - endif () - - if (NOT DEFINED ENABLE_POCO_JSON OR ENABLE_POCO_JSON) - set (Poco_JSON_LIBRARY PocoJSON) - set (Poco_JSON_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/poco/JSON/include/") - endif () - - if (OPENSSL_FOUND AND (NOT DEFINED ENABLE_POCO_NETSSL OR ENABLE_POCO_NETSSL)) - set (Poco_NetSSL_LIBRARY PocoNetSSL ${OPENSSL_LIBRARIES}) - set (Poco_Crypto_LIBRARY PocoCrypto ${OPENSSL_LIBRARIES}) - endif () - - if (USE_STATIC_LIBRARIES AND USE_INTERNAL_ZLIB_LIBRARY) - list (APPEND Poco_INCLUDE_DIRS - "${ClickHouse_SOURCE_DIR}/contrib/${INTERNAL_ZLIB_NAME}/" - "${ClickHouse_BINARY_DIR}/contrib/${INTERNAL_ZLIB_NAME}/" - ) - endif () - - set (Poco_Foundation_LIBRARY PocoFoundation) - set (Poco_Util_LIBRARY PocoUtil) - set (Poco_Net_LIBRARY PocoNet) - set (Poco_XML_LIBRARY PocoXML) -endif () - -if (Poco_NetSSL_LIBRARY AND Poco_Crypto_LIBRARY) - set (USE_POCO_NETSSL 1) -endif () -if (Poco_MongoDB_LIBRARY) - set (USE_POCO_MONGODB 1) -endif () -if (Poco_Redis_LIBRARY) - set (USE_POCO_REDIS 1) -endif () -if (Poco_DataODBC_LIBRARY AND ODBC_FOUND) - set (USE_POCO_DATAODBC 1) -endif () -if (Poco_SQLODBC_LIBRARY AND ODBC_FOUND) - set (USE_POCO_SQLODBC 1) -endif () -if (Poco_JSON_LIBRARY) - set (USE_POCO_JSON 1) -endif () - -message(STATUS "Using Poco: ${Poco_INCLUDE_DIRS} : ${Poco_Foundation_LIBRARY},${Poco_Util_LIBRARY},${Poco_Net_LIBRARY},${Poco_NetSSL_LIBRARY},${Poco_Crypto_LIBRARY},${Poco_XML_LIBRARY},${Poco_Data_LIBRARY},${Poco_DataODBC_LIBRARY},${Poco_SQL_LIBRARY},${Poco_SQLODBC_LIBRARY},${Poco_MongoDB_LIBRARY},${Poco_Redis_LIBRARY},${Poco_JSON_LIBRARY}; MongoDB=${USE_POCO_MONGODB}, Redis=${USE_POCO_REDIS}, DataODBC=${USE_POCO_DATAODBC}, NetSSL=${USE_POCO_NETSSL}, JSON=${USE_POCO_JSON}") - -# How to make sutable poco: -# use branch: -# develop OR poco-1.7.9-release + 6a49c94d18c654d7a20b8c8ea47071b1fdd4813b -# and merge: -# ClickHouse-Extras/clickhouse_unbundled -# ClickHouse-Extras/clickhouse_unbundled_zlib -# ClickHouse-Extras/clickhouse_task -# ClickHouse-Extras/clickhouse_misc -# ClickHouse-Extras/clickhouse_anl -# ClickHouse-Extras/clickhouse_http_header https://github.com/pocoproject/poco/pull/1574 -# ClickHouse-Extras/clickhouse_socket -# ClickHouse-Extras/clickhouse_warning -# ClickHouse-Extras/clickhouse-purge-logs-on-no-space -# ClickHouse-Extras/clickhouse_freebsd -# ClickHouse-Extras/clikhouse_no_zlib -# ClickHouse-Extras/clickhouse-fix-atomic diff --git a/cmake/find/protobuf.cmake b/cmake/find/protobuf.cmake index e3330a32e7c..5f686cfd96e 100644 --- a/cmake/find/protobuf.cmake +++ b/cmake/find/protobuf.cmake @@ -27,7 +27,7 @@ elseif(NOT MISSING_INTERNAL_PROTOBUF_LIBRARY) set(Protobuf_PROTOC_LIBRARY libprotoc) set(Protobuf_LITE_LIBRARY libprotobuf-lite) - set(Protobuf_PROTOC_EXECUTABLE ${ClickHouse_BINARY_DIR}/contrib/protobuf/cmake/protoc) + set(Protobuf_PROTOC_EXECUTABLE "$") if(NOT DEFINED PROTOBUF_GENERATE_CPP_APPEND_PATH) set(PROTOBUF_GENERATE_CPP_APPEND_PATH TRUE) diff --git a/cmake/find/rdkafka.cmake b/cmake/find/rdkafka.cmake index dfab142a3cd..f18674dd440 100644 --- a/cmake/find/rdkafka.cmake +++ b/cmake/find/rdkafka.cmake @@ -1,5 +1,5 @@ # Freebsd: contrib/cppkafka/include/cppkafka/detail/endianness.h:53:23: error: 'betoh16' was not declared in this scope -if (NOT ARCH_ARM AND NOT ARCH_32 AND NOT OS_FREEBSD AND OPENSSL_FOUND) +if (NOT ARCH_ARM AND NOT OS_FREEBSD AND OPENSSL_FOUND) option (ENABLE_RDKAFKA "Enable kafka" ${ENABLE_LIBRARIES}) endif () diff --git a/cmake/find/ssl.cmake b/cmake/find/ssl.cmake index 36f9d1e67ec..efc9127309c 100644 --- a/cmake/find/ssl.cmake +++ b/cmake/find/ssl.cmake @@ -2,9 +2,7 @@ option(ENABLE_SSL "Enable ssl" ${ENABLE_LIBRARIES}) if(ENABLE_SSL) -if(NOT ARCH_32) - option(USE_INTERNAL_SSL_LIBRARY "Set to FALSE to use system *ssl library instead of bundled" ${NOT_UNBUNDLED}) -endif() +option(USE_INTERNAL_SSL_LIBRARY "Set to FALSE to use system *ssl library instead of bundled" ${NOT_UNBUNDLED}) if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/openssl/README") if(USE_INTERNAL_SSL_LIBRARY) diff --git a/cmake/find/unwind.cmake b/cmake/find/unwind.cmake index d3653973082..c9f5f30a5d6 100644 --- a/cmake/find/unwind.cmake +++ b/cmake/find/unwind.cmake @@ -1,14 +1,5 @@ option (USE_UNWIND "Enable libunwind (better stacktraces)" ${ENABLE_LIBRARIES}) -if (NOT CMAKE_SYSTEM MATCHES "Linux" OR ARCH_ARM OR ARCH_32) - set (USE_UNWIND OFF) -endif () - -if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libunwind/CMakeLists.txt") - message(WARNING "submodule contrib/libunwind is missing. to fix try run: \n git submodule update --init --recursive") - set (USE_UNWIND OFF) -endif () - if (USE_UNWIND) add_subdirectory(contrib/libunwind-cmake) set (UNWIND_LIBRARIES unwind) diff --git a/cmake/find/zlib.cmake b/cmake/find/zlib.cmake index fb91622e298..f65d379f577 100644 --- a/cmake/find/zlib.cmake +++ b/cmake/find/zlib.cmake @@ -1,6 +1,4 @@ -if (NOT OS_FREEBSD AND NOT ARCH_32) - option (USE_INTERNAL_ZLIB_LIBRARY "Set to FALSE to use system zlib library instead of bundled" ${NOT_UNBUNDLED}) -endif () +option (USE_INTERNAL_ZLIB_LIBRARY "Set to FALSE to use system zlib library instead of bundled" ${NOT_UNBUNDLED}) if (NOT MSVC) set (INTERNAL_ZLIB_NAME "zlib-ng" CACHE INTERNAL "") diff --git a/cmake/freebsd/default_libs.cmake b/cmake/freebsd/default_libs.cmake new file mode 100644 index 00000000000..d60df52bc6d --- /dev/null +++ b/cmake/freebsd/default_libs.cmake @@ -0,0 +1,44 @@ +set (DEFAULT_LIBS "-nodefaultlibs") + +if (NOT COMPILER_CLANG) + message (FATAL_ERROR "FreeBSD build is supported only for Clang") +endif () + +if (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "amd64") + execute_process (COMMAND ${CMAKE_CXX_COMPILER} --print-file-name=libclang_rt.builtins-x86_64.a OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE) +else () + execute_process (COMMAND ${CMAKE_CXX_COMPILER} --print-file-name=libclang_rt.builtins-${CMAKE_SYSTEM_PROCESSOR}.a OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE) +endif () + +set (DEFAULT_LIBS "${DEFAULT_LIBS} ${BUILTINS_LIBRARY} ${COVERAGE_OPTION} -lc -lm -lrt -lpthread") + +message(STATUS "Default libraries: ${DEFAULT_LIBS}") + +set(CMAKE_CXX_STANDARD_LIBRARIES ${DEFAULT_LIBS}) +set(CMAKE_C_STANDARD_LIBRARIES ${DEFAULT_LIBS}) + +# Global libraries + +add_library(global-libs INTERFACE) + +# Unfortunately '-pthread' doesn't work with '-nodefaultlibs'. +# Just make sure we have pthreads at all. +set(THREADS_PREFER_PTHREAD_FLAG ON) +find_package(Threads REQUIRED) + +include (cmake/find/unwind.cmake) +include (cmake/find/cxx.cmake) + +add_library(global-group INTERFACE) + +target_link_libraries(global-group INTERFACE + $ +) + +link_libraries(global-group) + +# FIXME: remove when all contribs will get custom cmake lists +install( + TARGETS global-group global-libs + EXPORT global +) diff --git a/cmake/freebsd/toolchain-x86_64.cmake b/cmake/freebsd/toolchain-x86_64.cmake new file mode 100644 index 00000000000..d9839ec74ee --- /dev/null +++ b/cmake/freebsd/toolchain-x86_64.cmake @@ -0,0 +1,22 @@ +set (CMAKE_SYSTEM_NAME "FreeBSD") +set (CMAKE_SYSTEM_PROCESSOR "x86_64") +set (CMAKE_C_COMPILER_TARGET "x86_64-pc-freebsd11") +set (CMAKE_CXX_COMPILER_TARGET "x86_64-pc-freebsd11") +set (CMAKE_ASM_COMPILER_TARGET "x86_64-pc-freebsd11") +set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/freebsd-x86_64") + +set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake + +set (CMAKE_AR "/usr/bin/ar" CACHE FILEPATH "" FORCE) +set (CMAKE_RANLIB "/usr/bin/ranlib" CACHE FILEPATH "" FORCE) + +set (LINKER_NAME "lld" CACHE STRING "" FORCE) + +set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld") +set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld") + +set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) +set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) + +set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) +set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) diff --git a/cmake/fuzzer.cmake b/cmake/fuzzer.cmake new file mode 100644 index 00000000000..7ce4559ffae --- /dev/null +++ b/cmake/fuzzer.cmake @@ -0,0 +1,21 @@ +option (FUZZER "Enable fuzzer: libfuzzer") + +if (FUZZER) + if (FUZZER STREQUAL "libfuzzer") + # NOTE: Eldar Zaitov decided to name it "libfuzzer" instead of "fuzzer" to keep in mind another possible fuzzer backends. + # NOTE: no-link means that all the targets are built with instrumentation for fuzzer, but only some of them (tests) have entry point for fuzzer and it's not checked. + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} -fsanitize=fuzzer-no-link") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} -fsanitize=fuzzer-no-link") + if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=fuzzer-no-link") + endif() + + # NOTE: oss-fuzz can change LIB_FUZZING_ENGINE variable + if (NOT LIB_FUZZING_ENGINE) + set (LIB_FUZZING_ENGINE "-fsanitize=fuzzer") + endif () + + else () + message (FATAL_ERROR "Unknown fuzzer type: ${FUZZER}") + endif () +endif() diff --git a/cmake/lib_name.cmake b/cmake/lib_name.cmake index 51a424cb4e2..f18b2e52576 100644 --- a/cmake/lib_name.cmake +++ b/cmake/lib_name.cmake @@ -1,5 +1,4 @@ set(DIVIDE_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libdivide) -set(DBMS_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/dbms/src ${ClickHouse_BINARY_DIR}/dbms/src) +set(DBMS_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/src ${ClickHouse_BINARY_DIR}/src) set(DOUBLE_CONVERSION_CONTRIB_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/double-conversion) set(METROHASH_CONTRIB_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libmetrohash/src) -set(PCG_RANDOM_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libpcg-random/include) diff --git a/cmake/linux/default_libs.cmake b/cmake/linux/default_libs.cmake index d18a996e2c9..0ecdfd2a3ad 100644 --- a/cmake/linux/default_libs.cmake +++ b/cmake/linux/default_libs.cmake @@ -21,7 +21,7 @@ set(CMAKE_C_STANDARD_LIBRARIES ${DEFAULT_LIBS}) # glibc-compatibility library relies to fixed version of libc headers # (because minor changes in function attributes between different glibc versions will introduce incompatibilities) # This is for x86_64. For other architectures we have separate toolchains. -if (ARCH_AMD64) +if (ARCH_AMD64 AND NOT_UNBUNDLED) set(CMAKE_C_STANDARD_INCLUDE_DIRECTORIES ${ClickHouse_SOURCE_DIR}/contrib/libc-headers/x86_64-linux-gnu ${ClickHouse_SOURCE_DIR}/contrib/libc-headers) set(CMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES ${ClickHouse_SOURCE_DIR}/contrib/libc-headers/x86_64-linux-gnu ${ClickHouse_SOURCE_DIR}/contrib/libc-headers) endif () diff --git a/cmake/print_include_directories.cmake b/cmake/print_include_directories.cmake index fe5e9e8e6e9..62ebd434320 100644 --- a/cmake/print_include_directories.cmake +++ b/cmake/print_include_directories.cmake @@ -26,10 +26,6 @@ if (TARGET ${Boost_PROGRAM_OPTIONS_LIBRARY}) list(APPEND dirs ${dirs1}) endif () -if (USE_INTERNAL_POCO_LIBRARY) - list(APPEND dirs "./contrib/poco/Foundation/include") -endif () - list(REMOVE_DUPLICATES dirs) file (WRITE ${CMAKE_CURRENT_BINARY_DIR}/include_directories.txt "") foreach (dir ${dirs}) diff --git a/cmake/sanitize.cmake b/cmake/sanitize.cmake index 13947425f7b..32443ed78c3 100644 --- a/cmake/sanitize.cmake +++ b/cmake/sanitize.cmake @@ -23,7 +23,7 @@ if (SANITIZE) # RelWithDebInfo, and downgrade optimizations to -O1 but not to -Og, to # keep the binary size down. # TODO: try compiling with -Og and with ld.gold. - set (MSAN_FLAGS "-fsanitize=memory -fsanitize-memory-track-origins -fno-optimize-sibling-calls -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/dbms/tests/msan_suppressions.txt") + set (MSAN_FLAGS "-fsanitize=memory -fsanitize-memory-track-origins -fno-optimize-sibling-calls -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/msan_suppressions.txt") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}") @@ -35,30 +35,11 @@ if (SANITIZE) set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libmsan") endif () - # Temporarily disable many external libraries that don't work under - # MemorySanitizer yet. - set (ENABLE_HDFS 0 CACHE BOOL "") - set (ENABLE_CAPNP 0 CACHE BOOL "") - set (ENABLE_RDKAFKA 0 CACHE BOOL "") - set (ENABLE_POCO_MONGODB 0 CACHE BOOL "") - set (ENABLE_POCO_NETSSL 0 CACHE BOOL "") - set (ENABLE_POCO_ODBC 0 CACHE BOOL "") - set (ENABLE_ODBC 0 CACHE BOOL "") - set (ENABLE_MYSQL 0 CACHE BOOL "") - set (ENABLE_EMBEDDED_COMPILER 0 CACHE BOOL "") - set (USE_INTERNAL_CAPNP_LIBRARY 0 CACHE BOOL "") - set (USE_SIMDJSON 0 CACHE BOOL "") - set (ENABLE_ORC 0 CACHE BOOL "") - set (ENABLE_PARQUET 0 CACHE BOOL "") - set (USE_CAPNP 0 CACHE BOOL "") - set (USE_INTERNAL_ORC_LIBRARY 0 CACHE BOOL "") - set (USE_ORC 0 CACHE BOOL "") - set (USE_AVRO 0 CACHE BOOL "") - set (ENABLE_SSL 0 CACHE BOOL "") - elseif (SANITIZE STREQUAL "thread") - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} -fsanitize=thread") - set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} -fsanitize=thread") + set (TSAN_FLAGS "-fsanitize=thread -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/tsan_suppressions.txt") + + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${TSAN_FLAGS}") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${TSAN_FLAGS}") if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=thread") endif() @@ -79,18 +60,6 @@ if (SANITIZE) # llvm-tblgen, that is used during LLVM build, doesn't work with UBSan. set (ENABLE_EMBEDDED_COMPILER 0 CACHE BOOL "") - elseif (SANITIZE STREQUAL "libfuzzer") - # NOTE: Eldar Zaitov decided to name it "libfuzzer" instead of "fuzzer" to keep in mind another possible fuzzer backends. - # NOTE: no-link means that all the targets are built with instrumentation for fuzzer, but only some of them (tests) have entry point for fuzzer and it's not checked. - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} -fsanitize=fuzzer-no-link,address,undefined -fsanitize-address-use-after-scope") - set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} -fsanitize=fuzzer-no-link,address,undefined -fsanitize-address-use-after-scope") - if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=fuzzer-no-link,address,undefined -fsanitize-address-use-after-scope") - endif() - if (MAKE_STATIC_LIBRARIES AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libasan -static-libubsan") - endif () - set (LIBFUZZER_CMAKE_CXX_FLAGS "-fsanitize=fuzzer,address,undefined -fsanitize-address-use-after-scope") else () message (FATAL_ERROR "Unknown sanitizer type: ${SANITIZE}") endif () diff --git a/cmake/target.cmake b/cmake/target.cmake index 1f40e28e76b..1325758811f 100644 --- a/cmake/target.cmake +++ b/cmake/target.cmake @@ -24,6 +24,10 @@ if (CMAKE_CROSSCOMPILING) set (ENABLE_PARQUET OFF CACHE INTERNAL "") set (ENABLE_MYSQL OFF CACHE INTERNAL "") endif () + elseif (OS_FREEBSD) + # FIXME: broken dependencies + set (ENABLE_PROTOBUF OFF CACHE INTERNAL "") + set (ENABLE_EMBEDDED_COMPILER OFF CACHE INTERNAL "") else () message (FATAL_ERROR "Trying to cross-compile to unsupported system: ${CMAKE_SYSTEM_NAME}!") endif () diff --git a/cmake/toolchain/freebsd-x86_64/README.txt b/cmake/toolchain/freebsd-x86_64/README.txt new file mode 100644 index 00000000000..f9fb74f417b --- /dev/null +++ b/cmake/toolchain/freebsd-x86_64/README.txt @@ -0,0 +1,2 @@ +wget https://clickhouse-datasets.s3.yandex.net/toolchains/toolchains/freebsd-11.3-toolchain.tar.xz +tar xJf freebsd-11.3-toolchain.tar.xz --strip-components=1 diff --git a/cmake/tools.cmake b/cmake/tools.cmake index 6aaeb5b8a16..d261b62eca3 100644 --- a/cmake/tools.cmake +++ b/cmake/tools.cmake @@ -1,23 +1,40 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") set (COMPILER_GCC 1) -elseif (CMAKE_CXX_COMPILER_ID MATCHES "Clang|AppleClang") +elseif (CMAKE_CXX_COMPILER_ID MATCHES "AppleClang") + set (COMPILER_CLANG 1) # Safe to treat AppleClang as a regular Clang, in general. +elseif (CMAKE_CXX_COMPILER_ID MATCHES "Clang") set (COMPILER_CLANG 1) endif () if (COMPILER_GCC) # Require minimum version of gcc - set (GCC_MINIMUM_VERSION 8) + set (GCC_MINIMUM_VERSION 9) if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${GCC_MINIMUM_VERSION} AND NOT CMAKE_VERSION VERSION_LESS 2.8.9) message (FATAL_ERROR "GCC version must be at least ${GCC_MINIMUM_VERSION}. For example, if GCC ${GCC_MINIMUM_VERSION} is available under gcc-${GCC_MINIMUM_VERSION}, g++-${GCC_MINIMUM_VERSION} names, do the following: export CC=gcc-${GCC_MINIMUM_VERSION} CXX=g++-${GCC_MINIMUM_VERSION}; rm -rf CMakeCache.txt CMakeFiles; and re run cmake or ./release.") endif () elseif (COMPILER_CLANG) - # Require minimum version of clang - set (CLANG_MINIMUM_VERSION 7) - if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${CLANG_MINIMUM_VERSION}) - message (FATAL_ERROR "Clang version must be at least ${CLANG_MINIMUM_VERSION}.") + # Require minimum version of clang/apple-clang + if (CMAKE_CXX_COMPILER_ID MATCHES "AppleClang") + # AppleClang 10.0.1 (Xcode 10.2) corresponds to LLVM/Clang upstream version 7.0.0 + # AppleClang 11.0.0 (Xcode 11.0) corresponds to LLVM/Clang upstream version 8.0.0 + set (XCODE_MINIMUM_VERSION 10.2) + set (APPLE_CLANG_MINIMUM_VERSION 10.0.1) + if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${APPLE_CLANG_MINIMUM_VERSION}) + message (FATAL_ERROR "AppleClang compiler version must be at least ${APPLE_CLANG_MINIMUM_VERSION} (Xcode ${XCODE_MINIMUM_VERSION}).") + elseif (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 11.0.0) + # char8_t is available staring (upstream vanilla) Clang 7, but prior to Clang 8, + # it is not enabled by -std=c++20 and can be enabled with an explicit -fchar8_t. + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fchar8_t") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fchar8_t") + endif () + else () + set (CLANG_MINIMUM_VERSION 8) + if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${CLANG_MINIMUM_VERSION}) + message (FATAL_ERROR "Clang version must be at least ${CLANG_MINIMUM_VERSION}.") + endif () endif () else () - message (WARNING "You are using an unsupported compiler. Compilation has only been tested with Clang 6+ and GCC 7+.") + message (WARNING "You are using an unsupported compiler. Compilation has only been tested with Clang and GCC.") endif () STRING(REGEX MATCHALL "[0-9]+" COMPILER_VERSION_LIST ${CMAKE_CXX_COMPILER_VERSION}) @@ -32,29 +49,29 @@ else () find_program (GOLD_PATH NAMES "ld.gold" "gold") endif () -if (NOT OS_FREEBSD) -# We prefer LLD linker over Gold or BFD. - if (NOT LINKER_NAME) - if (LLD_PATH) - if (COMPILER_GCC) - # GCC driver requires one of supported linker names like "lld". - set (LINKER_NAME "lld") - else () - # Clang driver simply allows full linker path. - set (LINKER_NAME ${LLD_PATH}) +if (OS_LINUX) + # We prefer LLD linker over Gold or BFD on Linux. + if (NOT LINKER_NAME) + if (LLD_PATH) + if (COMPILER_GCC) + # GCC driver requires one of supported linker names like "lld". + set (LINKER_NAME "lld") + else () + # Clang driver simply allows full linker path. + set (LINKER_NAME ${LLD_PATH}) + endif () endif () endif () - endif () - if (NOT LINKER_NAME) - if (GOLD_PATH) - if (COMPILER_GCC) - set (LINKER_NAME "gold") - else () - set (LINKER_NAME ${GOLD_PATH}) + if (NOT LINKER_NAME) + if (GOLD_PATH) + if (COMPILER_GCC) + set (LINKER_NAME "gold") + else () + set (LINKER_NAME ${GOLD_PATH}) + endif () endif () endif () - endif () endif () if (LINKER_NAME) diff --git a/dbms/cmake/version.cmake b/cmake/version.cmake similarity index 79% rename from dbms/cmake/version.cmake rename to cmake/version.cmake index 16e3829c83b..3f51f59cf0f 100644 --- a/dbms/cmake/version.cmake +++ b/cmake/version.cmake @@ -1,11 +1,11 @@ # This strings autochanged from release_lib.sh: -set(VERSION_REVISION 54433) +set(VERSION_REVISION 54435) set(VERSION_MAJOR 20) -set(VERSION_MINOR 3) +set(VERSION_MINOR 5) set(VERSION_PATCH 1) -set(VERSION_GITHASH d93e7e5ccf8fcca724e917581b00bf569947fff9) -set(VERSION_DESCRIBE v20.3.1.1-prestable) -set(VERSION_STRING 20.3.1.1) +set(VERSION_GITHASH 91df18a906dcffdbee6816e5389df6c65f86e35f) +set(VERSION_DESCRIBE v20.5.1.1-prestable) +set(VERSION_STRING 20.5.1.1) # end of autochange set(VERSION_EXTRA "" CACHE STRING "") diff --git a/cmake/warnings.cmake b/cmake/warnings.cmake new file mode 100644 index 00000000000..cc97e727101 --- /dev/null +++ b/cmake/warnings.cmake @@ -0,0 +1,165 @@ +# Our principle is to enable as many warnings as possible and always do it with "warnings as errors" flag. +# +# But it comes with some cost: +# - we have to disable some warnings in 3rd party libraries (they are located in "contrib" directory) +# - we have to include headers of these libraries as -isystem to avoid warnings from headers +# (this is the same behaviour as if these libraries were located in /usr/include) +# - sometimes warnings from 3rd party libraries may come from macro substitutions in our code +# and we have to wrap them with #pragma GCC/clang diagnostic ignored + +if (NOT MSVC) + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wextra") +endif () + +if (USE_DEBUG_HELPERS) + set (INCLUDE_DEBUG_HELPERS "-I${ClickHouse_SOURCE_DIR}/base -include ${ClickHouse_SOURCE_DIR}/src/Core/iostream_debug_helpers.h") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${INCLUDE_DEBUG_HELPERS}") +endif () + +# Add some warnings that are not available even with -Wall -Wextra -Wpedantic. + +option (WEVERYTHING "Enables -Weverything option with some exceptions. This is intended for exploration of new compiler warnings that may be found to be useful. Only makes sense for clang." ON) + +if (COMPILER_CLANG) + add_warning(pedantic) + no_warning(vla-extension) + no_warning(zero-length-array) + + add_warning(comma) + add_warning(conditional-uninitialized) + add_warning(covered-switch-default) + add_warning(deprecated) + add_warning(embedded-directive) + add_warning(empty-init-stmt) # linux-only + add_warning(extra-semi-stmt) # linux-only + add_warning(extra-semi) + add_warning(gnu-case-range) + add_warning(inconsistent-missing-destructor-override) + add_warning(newline-eof) + add_warning(old-style-cast) + add_warning(range-loop-analysis) + add_warning(redundant-parens) + add_warning(reserved-id-macro) + add_warning(shadow-field) # clang 8+ + add_warning(shadow-uncaptured-local) + add_warning(shadow) + add_warning(string-plus-int) # clang 8+ + add_warning(undef) + add_warning(unreachable-code-return) + add_warning(unreachable-code) + add_warning(unused-exception-parameter) + add_warning(unused-macros) + add_warning(unused-member-function) + add_warning(zero-as-null-pointer-constant) + + if (WEVERYTHING) + add_warning(everything) + no_warning(c++98-compat-pedantic) + no_warning(c++98-compat) + no_warning(c99-extensions) + no_warning(conversion) + no_warning(ctad-maybe-unsupported) # clang 9+, linux-only + no_warning(deprecated-dynamic-exception-spec) + no_warning(disabled-macro-expansion) + no_warning(documentation-unknown-command) + no_warning(double-promotion) + no_warning(exit-time-destructors) + no_warning(float-equal) + no_warning(global-constructors) + no_warning(missing-prototypes) + no_warning(missing-variable-declarations) + no_warning(nested-anon-types) + no_warning(packed) + no_warning(padded) + no_warning(return-std-move-in-c++11) # clang 7+ + no_warning(shift-sign-overflow) + no_warning(sign-conversion) + no_warning(switch-enum) + no_warning(undefined-func-template) + no_warning(unused-template) + no_warning(vla) + no_warning(weak-template-vtables) + no_warning(weak-vtables) + + # TODO Enable conversion, sign-conversion, double-promotion warnings. + endif () +elseif (COMPILER_GCC) + # Add compiler options only to c++ compiler + function(add_cxx_compile_options option) + add_compile_options("$<$,CXX>:${option}>") + endfunction() + # Warn about boolean expression compared with an integer value different from true/false + add_cxx_compile_options(-Wbool-compare) + # Warn whenever a pointer is cast such that the required alignment of the target is increased. + add_cxx_compile_options(-Wcast-align) + # Warn whenever a pointer is cast so as to remove a type qualifier from the target type. + add_cxx_compile_options(-Wcast-qual) + # Warn when deleting a pointer to incomplete type, which may cause undefined behavior at runtime + add_cxx_compile_options(-Wdelete-incomplete) + # Warn if a requested optimization pass is disabled. Code is too big or too complex + add_cxx_compile_options(-Wdisabled-optimization) + # Warn about duplicated conditions in an if-else-if chain + add_cxx_compile_options(-Wduplicated-cond) + # Warn about a comparison between values of different enumerated types + add_cxx_compile_options(-Wenum-compare) + # Warn about uninitialized variables that are initialized with themselves + add_cxx_compile_options(-Winit-self) + # Warn about logical not used on the left hand side operand of a comparison + add_cxx_compile_options(-Wlogical-not-parentheses) + # Warn about suspicious uses of logical operators in expressions + add_cxx_compile_options(-Wlogical-op) + # Warn if there exists a path from the function entry to a use of the variable that is uninitialized. + add_cxx_compile_options(-Wmaybe-uninitialized) + # Warn when the indentation of the code does not reflect the block structure + add_cxx_compile_options(-Wmisleading-indentation) + # Warn if a global function is defined without a previous declaration - disabled because of build times + # add_cxx_compile_options(-Wmissing-declarations) + # Warn if a user-supplied include directory does not exist + add_cxx_compile_options(-Wmissing-include-dirs) + # Obvious + add_cxx_compile_options(-Wnon-virtual-dtor) + # Obvious + add_cxx_compile_options(-Wno-return-local-addr) + # This warning is disabled due to false positives if compiled with libc++: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90037 + #add_cxx_compile_options(-Wnull-dereference) + # Obvious + add_cxx_compile_options(-Wodr) + # Obvious + add_cxx_compile_options(-Wold-style-cast) + # Warn when a function declaration hides virtual functions from a base class + # add_cxx_compile_options(-Woverloaded-virtual) + # Warn about placement new expressions with undefined behavior + add_cxx_compile_options(-Wplacement-new=2) + # Warn about anything that depends on the “size of” a function type or of void + add_cxx_compile_options(-Wpointer-arith) + # Warn if anything is declared more than once in the same scope + add_cxx_compile_options(-Wredundant-decls) + # Member initialization reordering + add_cxx_compile_options(-Wreorder) + # Obvious + add_cxx_compile_options(-Wshadow) + # Warn if left shifting a negative value + add_cxx_compile_options(-Wshift-negative-value) + # Warn about a definition of an unsized deallocation function + add_cxx_compile_options(-Wsized-deallocation) + # Warn when the sizeof operator is applied to a parameter that is declared as an array in a function definition + add_cxx_compile_options(-Wsizeof-array-argument) + # Warn for suspicious length parameters to certain string and memory built-in functions if the argument uses sizeof + add_cxx_compile_options(-Wsizeof-pointer-memaccess) + + if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 9) + # Warn about overriding virtual functions that are not marked with the override keyword + add_cxx_compile_options(-Wsuggest-override) + endif () + + # Warn whenever a switch statement has an index of boolean type and the case values are outside the range of a boolean type + add_cxx_compile_options(-Wswitch-bool) + # Warn if a self-comparison always evaluates to true or false + add_cxx_compile_options(-Wtautological-compare) + # Warn about trampolines generated for pointers to nested functions + add_cxx_compile_options(-Wtrampolines) + # Obvious + add_cxx_compile_options(-Wunused) + # Warn if vector operation is not implemented via SIMD capabilities of the architecture + add_cxx_compile_options(-Wvector-operation-performance) +endif () diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 0208b4e859a..1031285eac7 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -1,10 +1,39 @@ # Third-party libraries may have substandard code. +# Put all targets defined here and in added subfolders under "contrib/" folder in GUI-based IDEs by default. +# Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they will +# appear not in "contrib/" as originally planned here. +get_filename_component (_current_dir_name "${CMAKE_CURRENT_LIST_DIR}" NAME) +if (CMAKE_FOLDER) + set (CMAKE_FOLDER "${CMAKE_FOLDER}/${_current_dir_name}") +else () + set (CMAKE_FOLDER "${_current_dir_name}") +endif () +unset (_current_dir_name) + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w") set_property(DIRECTORY PROPERTY EXCLUDE_FROM_ALL 1) +add_subdirectory (cctz-cmake) +add_subdirectory (consistent-hashing-sumbur) +add_subdirectory (consistent-hashing) +add_subdirectory (croaring) +add_subdirectory (FastMemcpy) +add_subdirectory (grpc-cmake) +add_subdirectory (jemalloc-cmake) +add_subdirectory (libcpuid-cmake) +add_subdirectory (murmurhash) +add_subdirectory (replxx-cmake) +add_subdirectory (ryu-cmake) +add_subdirectory (unixodbc-cmake) + +add_subdirectory (poco-cmake) + + +# TODO: refactor the contrib libraries below this comment. + if (USE_INTERNAL_BOOST_LIBRARY) add_subdirectory (boost-cmake) endif () @@ -27,8 +56,6 @@ if (USE_INTERNAL_DOUBLE_CONVERSION_LIBRARY) add_subdirectory (double-conversion-cmake) endif () -add_subdirectory (ryu-cmake) - if (USE_INTERNAL_CITYHASH_LIBRARY) add_subdirectory (cityhash102) endif () @@ -41,9 +68,6 @@ if (USE_INTERNAL_METROHASH_LIBRARY) add_subdirectory (libmetrohash) endif () -add_subdirectory (murmurhash) -add_subdirectory (croaring) - if (USE_INTERNAL_BTRIE_LIBRARY) add_subdirectory (libbtrie) endif () @@ -75,18 +99,6 @@ if (USE_INTERNAL_ZLIB_LIBRARY) endif () endif () -if (USE_INTERNAL_CCTZ_LIBRARY) - add_subdirectory (cctz-cmake) -endif () - -if (ENABLE_JEMALLOC AND USE_INTERNAL_JEMALLOC_LIBRARY) - add_subdirectory (jemalloc-cmake) -endif () - -if (USE_INTERNAL_CPUID_LIBRARY) - add_subdirectory (libcpuid) -endif () - if (USE_INTERNAL_H3_LIBRARY) add_subdirectory(h3-cmake) endif () @@ -94,12 +106,14 @@ endif () if (USE_INTERNAL_SSL_LIBRARY) add_subdirectory (openssl-cmake) - # This is for Poco library - set (POCO_SKIP_OPENSSL_FIND 1) add_library(OpenSSL::Crypto ALIAS ${OPENSSL_CRYPTO_LIBRARY}) add_library(OpenSSL::SSL ALIAS ${OPENSSL_SSL_LIBRARY}) endif () +if (ENABLE_LDAP AND USE_INTERNAL_LDAP_LIBRARY) + add_subdirectory (openldap-cmake) +endif () + function(mysql_support) set(CLIENT_PLUGIN_CACHING_SHA2_PASSWORD STATIC) set(CLIENT_PLUGIN_SHA256_PASSWORD STATIC) @@ -135,11 +149,6 @@ if (USE_RDKAFKA) add_subdirectory (cppkafka-cmake) endif() -if (ENABLE_ODBC AND USE_INTERNAL_ODBC_LIBRARY) - add_subdirectory (unixodbc-cmake) - add_library(ODBC::ODBC ALIAS ${ODBC_LIBRARIES}) -endif () - if (ENABLE_ICU AND USE_INTERNAL_ICU_LIBRARY) add_subdirectory (icu-cmake) endif () @@ -178,24 +187,27 @@ if (USE_INTERNAL_PARQUET_LIBRARY_NATIVE_CMAKE) set (PARQUET_ARROW_LINKAGE "static" CACHE INTERNAL "") set (ARROW_TEST_LINKAGE "static" CACHE INTERNAL "") set (ARROW_BUILD_STATIC ${MAKE_STATIC_LIBRARIES} CACHE INTERNAL "") - else() + else () set (PARQUET_ARROW_LINKAGE "shared" CACHE INTERNAL "") set (ARROW_TEST_LINKAGE "shared" CACHE INTERNAL "") - endif() + endif () - if(CMAKE_BUILD_TYPE STREQUAL "RELWITHDEBINFO") - set(_save_build_type ${CMAKE_BUILD_TYPE}) - set(CMAKE_BUILD_TYPE RELEASE) - endif() + if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO") + set (_save_build_type ${CMAKE_BUILD_TYPE}) + set (CMAKE_BUILD_TYPE Release) + string (TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UC) + endif () # Because Arrow uses CMAKE_SOURCE_DIR as a project path # Hopefully will be fixed in https://github.com/apache/arrow/pull/2676 set (CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/cmake_modules") add_subdirectory (arrow/cpp) - if(_save_build_type) - set(CMAKE_BUILD_TYPE ${_save_build_type}) - endif() + if (_save_build_type) + set (CMAKE_BUILD_TYPE ${_save_build_type}) + unset (_save_build_type) + string (TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UC) + endif () else() add_subdirectory(arrow-cmake) @@ -211,29 +223,6 @@ if (USE_INTERNAL_AVRO_LIBRARY) add_subdirectory(avro-cmake) endif() -if (USE_INTERNAL_POCO_LIBRARY) - set (POCO_VERBOSE_MESSAGES 0 CACHE INTERNAL "") - set (save_CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS}) - set (save_CMAKE_C_FLAGS ${CMAKE_C_FLAGS}) - set (_save ${ENABLE_TESTS}) - set (ENABLE_TESTS 0) - set (POCO_ENABLE_TESTS 0) - set (CMAKE_DISABLE_FIND_PACKAGE_ZLIB 1) - if (MSVC OR NOT USE_POCO_DATAODBC) - set (ENABLE_DATA_ODBC 0 CACHE INTERNAL "") # TODO (build fail) - endif () - add_subdirectory (poco) - unset (CMAKE_DISABLE_FIND_PACKAGE_ZLIB) - set (ENABLE_TESTS ${_save}) - set (CMAKE_CXX_FLAGS ${save_CMAKE_CXX_FLAGS}) - set (CMAKE_C_FLAGS ${save_CMAKE_C_FLAGS}) - - if (OPENSSL_FOUND AND TARGET Crypto AND (NOT DEFINED ENABLE_POCO_NETSSL OR ENABLE_POCO_NETSSL)) - # Bug in poco https://github.com/pocoproject/poco/pull/2100 found on macos - target_include_directories(Crypto SYSTEM PUBLIC ${OPENSSL_INCLUDE_DIR}) - endif () -endif () - if(USE_INTERNAL_GTEST_LIBRARY) # Google Test from sources add_subdirectory(${ClickHouse_SOURCE_DIR}/contrib/googletest/googletest ${CMAKE_CURRENT_BINARY_DIR}/googletest) @@ -310,12 +299,13 @@ if (USE_BASE64) endif() if (USE_INTERNAL_HYPERSCAN_LIBRARY) - add_subdirectory (hyperscan) - # The library is large - avoid bloat. if (USE_STATIC_LIBRARIES) + add_subdirectory (hyperscan) target_compile_options (hs PRIVATE -g0) else () + set(BUILD_SHARED_LIBS 1 CACHE INTERNAL "") + add_subdirectory (hyperscan) target_compile_options (hs_shared PRIVATE -g0) endif () endif() @@ -324,14 +314,6 @@ if (USE_SIMDJSON) add_subdirectory (simdjson-cmake) endif() -if (USE_MIMALLOC) - add_subdirectory (mimalloc) -endif() - if (USE_FASTOPS) add_subdirectory (fastops-cmake) endif() - -add_subdirectory(grpc-cmake) - -add_subdirectory(replxx-cmake) diff --git a/contrib/FastMemcpy/CMakeLists.txt b/contrib/FastMemcpy/CMakeLists.txt new file mode 100644 index 00000000000..8efe6d45dff --- /dev/null +++ b/contrib/FastMemcpy/CMakeLists.txt @@ -0,0 +1,28 @@ +option (ENABLE_FASTMEMCPY "Enable FastMemcpy library (only internal)" ${ENABLE_LIBRARIES}) + +if (NOT OS_LINUX OR ARCH_AARCH64) + set (ENABLE_FASTMEMCPY OFF) +endif () + +if (ENABLE_FASTMEMCPY) + set (LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/FastMemcpy) + + set (SRCS + ${LIBRARY_DIR}/FastMemcpy.c + + memcpy_wrapper.c + ) + + add_library (FastMemcpy ${SRCS}) + target_include_directories (FastMemcpy PUBLIC ${LIBRARY_DIR}) + + target_compile_definitions(FastMemcpy PUBLIC USE_FASTMEMCPY=1) + + message (STATUS "Using FastMemcpy") +else () + add_library (FastMemcpy INTERFACE) + + target_compile_definitions(FastMemcpy INTERFACE USE_FASTMEMCPY=0) + + message (STATUS "Not using FastMemcpy") +endif () diff --git a/contrib/FastMemcpy/FastMemcpy.c b/contrib/FastMemcpy/FastMemcpy.c new file mode 100644 index 00000000000..5021bcc7d16 --- /dev/null +++ b/contrib/FastMemcpy/FastMemcpy.c @@ -0,0 +1,220 @@ +//===================================================================== +// +// FastMemcpy.c - skywind3000@163.com, 2015 +// +// feature: +// 50% speed up in avg. vs standard memcpy (tested in vc2012/gcc4.9) +// +//===================================================================== +#include +#include +#include +#include + +#if (defined(_WIN32) || defined(WIN32)) +#include +#include +#ifdef _MSC_VER +#pragma comment(lib, "winmm.lib") +#endif +#elif defined(__unix) +#include +#include +#else +#error it can only be compiled under windows or unix +#endif + +#include "FastMemcpy.h" + +unsigned int gettime() +{ + #if (defined(_WIN32) || defined(WIN32)) + return timeGetTime(); + #else + static struct timezone tz={ 0,0 }; + struct timeval time; + gettimeofday(&time,&tz); + return (time.tv_sec * 1000 + time.tv_usec / 1000); + #endif +} + +void sleepms(unsigned int millisec) +{ +#if defined(_WIN32) || defined(WIN32) + Sleep(millisec); +#else + usleep(millisec * 1000); +#endif +} + + +void benchmark(int dstalign, int srcalign, size_t size, int times) +{ + char *DATA1 = (char*)malloc(size + 64); + char *DATA2 = (char*)malloc(size + 64); + size_t LINEAR1 = ((size_t)DATA1); + size_t LINEAR2 = ((size_t)DATA2); + char *ALIGN1 = (char*)(((64 - (LINEAR1 & 63)) & 63) + LINEAR1); + char *ALIGN2 = (char*)(((64 - (LINEAR2 & 63)) & 63) + LINEAR2); + char *dst = (dstalign)? ALIGN1 : (ALIGN1 + 1); + char *src = (srcalign)? ALIGN2 : (ALIGN2 + 3); + unsigned int t1, t2; + int k; + + sleepms(100); + t1 = gettime(); + for (k = times; k > 0; k--) { + memcpy(dst, src, size); + } + t1 = gettime() - t1; + sleepms(100); + t2 = gettime(); + for (k = times; k > 0; k--) { + memcpy_fast(dst, src, size); + } + t2 = gettime() - t2; + + free(DATA1); + free(DATA2); + + printf("result(dst %s, src %s): memcpy_fast=%dms memcpy=%d ms\n", + dstalign? "aligned" : "unalign", + srcalign? "aligned" : "unalign", (int)t2, (int)t1); +} + + +void bench(int copysize, int times) +{ + printf("benchmark(size=%d bytes, times=%d):\n", copysize, times); + benchmark(1, 1, copysize, times); + benchmark(1, 0, copysize, times); + benchmark(0, 1, copysize, times); + benchmark(0, 0, copysize, times); + printf("\n"); +} + + +void random_bench(int maxsize, int times) +{ + static char A[11 * 1024 * 1024 + 2]; + static char B[11 * 1024 * 1024 + 2]; + static int random_offsets[0x10000]; + static int random_sizes[0x8000]; + unsigned int i, p1, p2; + unsigned int t1, t2; + for (i = 0; i < 0x10000; i++) { // generate random offsets + random_offsets[i] = rand() % (10 * 1024 * 1024 + 1); + } + for (i = 0; i < 0x8000; i++) { // generate random sizes + random_sizes[i] = 1 + rand() % maxsize; + } + sleepms(100); + t1 = gettime(); + for (p1 = 0, p2 = 0, i = 0; i < times; i++) { + int offset1 = random_offsets[(p1++) & 0xffff]; + int offset2 = random_offsets[(p1++) & 0xffff]; + int size = random_sizes[(p2++) & 0x7fff]; + memcpy(A + offset1, B + offset2, size); + } + t1 = gettime() - t1; + sleepms(100); + t2 = gettime(); + for (p1 = 0, p2 = 0, i = 0; i < times; i++) { + int offset1 = random_offsets[(p1++) & 0xffff]; + int offset2 = random_offsets[(p1++) & 0xffff]; + int size = random_sizes[(p2++) & 0x7fff]; + memcpy_fast(A + offset1, B + offset2, size); + } + t2 = gettime() - t2; + printf("benchmark random access:\n"); + printf("memcpy_fast=%dms memcpy=%dms\n\n", (int)t2, (int)t1); +} + + +#ifdef _MSC_VER +#pragma comment(lib, "winmm.lib") +#endif + +int main(void) +{ + bench(32, 0x1000000); + bench(64, 0x1000000); + bench(512, 0x800000); + bench(1024, 0x400000); + bench(4096, 0x80000); + bench(8192, 0x40000); + bench(1024 * 1024 * 1, 0x800); + bench(1024 * 1024 * 4, 0x200); + bench(1024 * 1024 * 8, 0x100); + + random_bench(2048, 8000000); + + return 0; +} + + + + +/* +benchmark(size=32 bytes, times=16777216): +result(dst aligned, src aligned): memcpy_fast=78ms memcpy=260 ms +result(dst aligned, src unalign): memcpy_fast=78ms memcpy=250 ms +result(dst unalign, src aligned): memcpy_fast=78ms memcpy=266 ms +result(dst unalign, src unalign): memcpy_fast=78ms memcpy=234 ms + +benchmark(size=64 bytes, times=16777216): +result(dst aligned, src aligned): memcpy_fast=109ms memcpy=281 ms +result(dst aligned, src unalign): memcpy_fast=109ms memcpy=328 ms +result(dst unalign, src aligned): memcpy_fast=109ms memcpy=343 ms +result(dst unalign, src unalign): memcpy_fast=93ms memcpy=344 ms + +benchmark(size=512 bytes, times=8388608): +result(dst aligned, src aligned): memcpy_fast=125ms memcpy=218 ms +result(dst aligned, src unalign): memcpy_fast=156ms memcpy=484 ms +result(dst unalign, src aligned): memcpy_fast=172ms memcpy=546 ms +result(dst unalign, src unalign): memcpy_fast=172ms memcpy=515 ms + +benchmark(size=1024 bytes, times=4194304): +result(dst aligned, src aligned): memcpy_fast=109ms memcpy=172 ms +result(dst aligned, src unalign): memcpy_fast=187ms memcpy=453 ms +result(dst unalign, src aligned): memcpy_fast=172ms memcpy=437 ms +result(dst unalign, src unalign): memcpy_fast=156ms memcpy=452 ms + +benchmark(size=4096 bytes, times=524288): +result(dst aligned, src aligned): memcpy_fast=62ms memcpy=78 ms +result(dst aligned, src unalign): memcpy_fast=109ms memcpy=202 ms +result(dst unalign, src aligned): memcpy_fast=94ms memcpy=203 ms +result(dst unalign, src unalign): memcpy_fast=110ms memcpy=218 ms + +benchmark(size=8192 bytes, times=262144): +result(dst aligned, src aligned): memcpy_fast=62ms memcpy=78 ms +result(dst aligned, src unalign): memcpy_fast=78ms memcpy=202 ms +result(dst unalign, src aligned): memcpy_fast=78ms memcpy=203 ms +result(dst unalign, src unalign): memcpy_fast=94ms memcpy=203 ms + +benchmark(size=1048576 bytes, times=2048): +result(dst aligned, src aligned): memcpy_fast=203ms memcpy=191 ms +result(dst aligned, src unalign): memcpy_fast=219ms memcpy=281 ms +result(dst unalign, src aligned): memcpy_fast=218ms memcpy=328 ms +result(dst unalign, src unalign): memcpy_fast=218ms memcpy=312 ms + +benchmark(size=4194304 bytes, times=512): +result(dst aligned, src aligned): memcpy_fast=312ms memcpy=406 ms +result(dst aligned, src unalign): memcpy_fast=296ms memcpy=421 ms +result(dst unalign, src aligned): memcpy_fast=312ms memcpy=468 ms +result(dst unalign, src unalign): memcpy_fast=297ms memcpy=452 ms + +benchmark(size=8388608 bytes, times=256): +result(dst aligned, src aligned): memcpy_fast=281ms memcpy=452 ms +result(dst aligned, src unalign): memcpy_fast=280ms memcpy=468 ms +result(dst unalign, src aligned): memcpy_fast=298ms memcpy=514 ms +result(dst unalign, src unalign): memcpy_fast=344ms memcpy=472 ms + +benchmark random access: +memcpy_fast=515ms memcpy=1014ms + +*/ + + + + diff --git a/contrib/FastMemcpy/FastMemcpy.h b/contrib/FastMemcpy/FastMemcpy.h new file mode 100644 index 00000000000..dd89a55dbe9 --- /dev/null +++ b/contrib/FastMemcpy/FastMemcpy.h @@ -0,0 +1,691 @@ +//===================================================================== +// +// FastMemcpy.c - skywind3000@163.com, 2015 +// +// feature: +// 50% speed up in avg. vs standard memcpy (tested in vc2012/gcc5.1) +// +//===================================================================== +#ifndef __FAST_MEMCPY_H__ +#define __FAST_MEMCPY_H__ + +#include +#include +#include + + +//--------------------------------------------------------------------- +// force inline for compilers +//--------------------------------------------------------------------- +#ifndef INLINE +#ifdef __GNUC__ +#if (__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1)) + #define INLINE __inline__ __attribute__((always_inline)) +#else + #define INLINE __inline__ +#endif +#elif defined(_MSC_VER) + #define INLINE __forceinline +#elif (defined(__BORLANDC__) || defined(__WATCOMC__)) + #define INLINE __inline +#else + #define INLINE +#endif +#endif + +typedef __attribute__((__aligned__(1))) uint16_t uint16_unaligned_t; +typedef __attribute__((__aligned__(1))) uint32_t uint32_unaligned_t; +typedef __attribute__((__aligned__(1))) uint64_t uint64_unaligned_t; + +//--------------------------------------------------------------------- +// fast copy for different sizes +//--------------------------------------------------------------------- +static INLINE void memcpy_sse2_16(void *dst, const void *src) { + __m128i m0 = _mm_loadu_si128(((const __m128i*)src) + 0); + _mm_storeu_si128(((__m128i*)dst) + 0, m0); +} + +static INLINE void memcpy_sse2_32(void *dst, const void *src) { + __m128i m0 = _mm_loadu_si128(((const __m128i*)src) + 0); + __m128i m1 = _mm_loadu_si128(((const __m128i*)src) + 1); + _mm_storeu_si128(((__m128i*)dst) + 0, m0); + _mm_storeu_si128(((__m128i*)dst) + 1, m1); +} + +static INLINE void memcpy_sse2_64(void *dst, const void *src) { + __m128i m0 = _mm_loadu_si128(((const __m128i*)src) + 0); + __m128i m1 = _mm_loadu_si128(((const __m128i*)src) + 1); + __m128i m2 = _mm_loadu_si128(((const __m128i*)src) + 2); + __m128i m3 = _mm_loadu_si128(((const __m128i*)src) + 3); + _mm_storeu_si128(((__m128i*)dst) + 0, m0); + _mm_storeu_si128(((__m128i*)dst) + 1, m1); + _mm_storeu_si128(((__m128i*)dst) + 2, m2); + _mm_storeu_si128(((__m128i*)dst) + 3, m3); +} + +static INLINE void memcpy_sse2_128(void *dst, const void *src) { + __m128i m0 = _mm_loadu_si128(((const __m128i*)src) + 0); + __m128i m1 = _mm_loadu_si128(((const __m128i*)src) + 1); + __m128i m2 = _mm_loadu_si128(((const __m128i*)src) + 2); + __m128i m3 = _mm_loadu_si128(((const __m128i*)src) + 3); + __m128i m4 = _mm_loadu_si128(((const __m128i*)src) + 4); + __m128i m5 = _mm_loadu_si128(((const __m128i*)src) + 5); + __m128i m6 = _mm_loadu_si128(((const __m128i*)src) + 6); + __m128i m7 = _mm_loadu_si128(((const __m128i*)src) + 7); + _mm_storeu_si128(((__m128i*)dst) + 0, m0); + _mm_storeu_si128(((__m128i*)dst) + 1, m1); + _mm_storeu_si128(((__m128i*)dst) + 2, m2); + _mm_storeu_si128(((__m128i*)dst) + 3, m3); + _mm_storeu_si128(((__m128i*)dst) + 4, m4); + _mm_storeu_si128(((__m128i*)dst) + 5, m5); + _mm_storeu_si128(((__m128i*)dst) + 6, m6); + _mm_storeu_si128(((__m128i*)dst) + 7, m7); +} + + +//--------------------------------------------------------------------- +// tiny memory copy with jump table optimized +//--------------------------------------------------------------------- +static INLINE void *memcpy_tiny(void *dst, const void *src, size_t size) { + unsigned char *dd = ((unsigned char*)dst) + size; + const unsigned char *ss = ((const unsigned char*)src) + size; + + switch (size) { + case 64: + memcpy_sse2_64(dd - 64, ss - 64); + case 0: + break; + + case 65: + memcpy_sse2_64(dd - 65, ss - 65); + case 1: + dd[-1] = ss[-1]; + break; + + case 66: + memcpy_sse2_64(dd - 66, ss - 66); + case 2: + *((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2)); + break; + + case 67: + memcpy_sse2_64(dd - 67, ss - 67); + case 3: + *((uint16_unaligned_t*)(dd - 3)) = *((uint16_unaligned_t*)(ss - 3)); + dd[-1] = ss[-1]; + break; + + case 68: + memcpy_sse2_64(dd - 68, ss - 68); + case 4: + *((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4)); + break; + + case 69: + memcpy_sse2_64(dd - 69, ss - 69); + case 5: + *((uint32_unaligned_t*)(dd - 5)) = *((uint32_unaligned_t*)(ss - 5)); + dd[-1] = ss[-1]; + break; + + case 70: + memcpy_sse2_64(dd - 70, ss - 70); + case 6: + *((uint32_unaligned_t*)(dd - 6)) = *((uint32_unaligned_t*)(ss - 6)); + *((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2)); + break; + + case 71: + memcpy_sse2_64(dd - 71, ss - 71); + case 7: + *((uint32_unaligned_t*)(dd - 7)) = *((uint32_unaligned_t*)(ss - 7)); + *((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4)); + break; + + case 72: + memcpy_sse2_64(dd - 72, ss - 72); + case 8: + *((uint64_unaligned_t*)(dd - 8)) = *((uint64_unaligned_t*)(ss - 8)); + break; + + case 73: + memcpy_sse2_64(dd - 73, ss - 73); + case 9: + *((uint64_unaligned_t*)(dd - 9)) = *((uint64_unaligned_t*)(ss - 9)); + dd[-1] = ss[-1]; + break; + + case 74: + memcpy_sse2_64(dd - 74, ss - 74); + case 10: + *((uint64_unaligned_t*)(dd - 10)) = *((uint64_unaligned_t*)(ss - 10)); + *((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2)); + break; + + case 75: + memcpy_sse2_64(dd - 75, ss - 75); + case 11: + *((uint64_unaligned_t*)(dd - 11)) = *((uint64_unaligned_t*)(ss - 11)); + *((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4)); + break; + + case 76: + memcpy_sse2_64(dd - 76, ss - 76); + case 12: + *((uint64_unaligned_t*)(dd - 12)) = *((uint64_unaligned_t*)(ss - 12)); + *((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4)); + break; + + case 77: + memcpy_sse2_64(dd - 77, ss - 77); + case 13: + *((uint64_unaligned_t*)(dd - 13)) = *((uint64_unaligned_t*)(ss - 13)); + *((uint32_unaligned_t*)(dd - 5)) = *((uint32_unaligned_t*)(ss - 5)); + dd[-1] = ss[-1]; + break; + + case 78: + memcpy_sse2_64(dd - 78, ss - 78); + case 14: + *((uint64_unaligned_t*)(dd - 14)) = *((uint64_unaligned_t*)(ss - 14)); + *((uint64_unaligned_t*)(dd - 8)) = *((uint64_unaligned_t*)(ss - 8)); + break; + + case 79: + memcpy_sse2_64(dd - 79, ss - 79); + case 15: + *((uint64_unaligned_t*)(dd - 15)) = *((uint64_unaligned_t*)(ss - 15)); + *((uint64_unaligned_t*)(dd - 8)) = *((uint64_unaligned_t*)(ss - 8)); + break; + + case 80: + memcpy_sse2_64(dd - 80, ss - 80); + case 16: + memcpy_sse2_16(dd - 16, ss - 16); + break; + + case 81: + memcpy_sse2_64(dd - 81, ss - 81); + case 17: + memcpy_sse2_16(dd - 17, ss - 17); + dd[-1] = ss[-1]; + break; + + case 82: + memcpy_sse2_64(dd - 82, ss - 82); + case 18: + memcpy_sse2_16(dd - 18, ss - 18); + *((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2)); + break; + + case 83: + memcpy_sse2_64(dd - 83, ss - 83); + case 19: + memcpy_sse2_16(dd - 19, ss - 19); + *((uint16_unaligned_t*)(dd - 3)) = *((uint16_unaligned_t*)(ss - 3)); + dd[-1] = ss[-1]; + break; + + case 84: + memcpy_sse2_64(dd - 84, ss - 84); + case 20: + memcpy_sse2_16(dd - 20, ss - 20); + *((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4)); + break; + + case 85: + memcpy_sse2_64(dd - 85, ss - 85); + case 21: + memcpy_sse2_16(dd - 21, ss - 21); + *((uint32_unaligned_t*)(dd - 5)) = *((uint32_unaligned_t*)(ss - 5)); + dd[-1] = ss[-1]; + break; + + case 86: + memcpy_sse2_64(dd - 86, ss - 86); + case 22: + memcpy_sse2_16(dd - 22, ss - 22); + *((uint32_unaligned_t*)(dd - 6)) = *((uint32_unaligned_t*)(ss - 6)); + *((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2)); + break; + + case 87: + memcpy_sse2_64(dd - 87, ss - 87); + case 23: + memcpy_sse2_16(dd - 23, ss - 23); + *((uint32_unaligned_t*)(dd - 7)) = *((uint32_unaligned_t*)(ss - 7)); + *((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4)); + break; + + case 88: + memcpy_sse2_64(dd - 88, ss - 88); + case 24: + memcpy_sse2_16(dd - 24, ss - 24); + memcpy_sse2_16(dd - 16, ss - 16); + break; + + case 89: + memcpy_sse2_64(dd - 89, ss - 89); + case 25: + memcpy_sse2_16(dd - 25, ss - 25); + memcpy_sse2_16(dd - 16, ss - 16); + break; + + case 90: + memcpy_sse2_64(dd - 90, ss - 90); + case 26: + memcpy_sse2_16(dd - 26, ss - 26); + memcpy_sse2_16(dd - 16, ss - 16); + break; + + case 91: + memcpy_sse2_64(dd - 91, ss - 91); + case 27: + memcpy_sse2_16(dd - 27, ss - 27); + memcpy_sse2_16(dd - 16, ss - 16); + break; + + case 92: + memcpy_sse2_64(dd - 92, ss - 92); + case 28: + memcpy_sse2_16(dd - 28, ss - 28); + memcpy_sse2_16(dd - 16, ss - 16); + break; + + case 93: + memcpy_sse2_64(dd - 93, ss - 93); + case 29: + memcpy_sse2_16(dd - 29, ss - 29); + memcpy_sse2_16(dd - 16, ss - 16); + break; + + case 94: + memcpy_sse2_64(dd - 94, ss - 94); + case 30: + memcpy_sse2_16(dd - 30, ss - 30); + memcpy_sse2_16(dd - 16, ss - 16); + break; + + case 95: + memcpy_sse2_64(dd - 95, ss - 95); + case 31: + memcpy_sse2_16(dd - 31, ss - 31); + memcpy_sse2_16(dd - 16, ss - 16); + break; + + case 96: + memcpy_sse2_64(dd - 96, ss - 96); + case 32: + memcpy_sse2_32(dd - 32, ss - 32); + break; + + case 97: + memcpy_sse2_64(dd - 97, ss - 97); + case 33: + memcpy_sse2_32(dd - 33, ss - 33); + dd[-1] = ss[-1]; + break; + + case 98: + memcpy_sse2_64(dd - 98, ss - 98); + case 34: + memcpy_sse2_32(dd - 34, ss - 34); + *((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2)); + break; + + case 99: + memcpy_sse2_64(dd - 99, ss - 99); + case 35: + memcpy_sse2_32(dd - 35, ss - 35); + *((uint16_unaligned_t*)(dd - 3)) = *((uint16_unaligned_t*)(ss - 3)); + dd[-1] = ss[-1]; + break; + + case 100: + memcpy_sse2_64(dd - 100, ss - 100); + case 36: + memcpy_sse2_32(dd - 36, ss - 36); + *((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4)); + break; + + case 101: + memcpy_sse2_64(dd - 101, ss - 101); + case 37: + memcpy_sse2_32(dd - 37, ss - 37); + *((uint32_unaligned_t*)(dd - 5)) = *((uint32_unaligned_t*)(ss - 5)); + dd[-1] = ss[-1]; + break; + + case 102: + memcpy_sse2_64(dd - 102, ss - 102); + case 38: + memcpy_sse2_32(dd - 38, ss - 38); + *((uint32_unaligned_t*)(dd - 6)) = *((uint32_unaligned_t*)(ss - 6)); + *((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2)); + break; + + case 103: + memcpy_sse2_64(dd - 103, ss - 103); + case 39: + memcpy_sse2_32(dd - 39, ss - 39); + *((uint32_unaligned_t*)(dd - 7)) = *((uint32_unaligned_t*)(ss - 7)); + *((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4)); + break; + + case 104: + memcpy_sse2_64(dd - 104, ss - 104); + case 40: + memcpy_sse2_32(dd - 40, ss - 40); + *((uint64_unaligned_t*)(dd - 8)) = *((uint64_unaligned_t*)(ss - 8)); + break; + + case 105: + memcpy_sse2_64(dd - 105, ss - 105); + case 41: + memcpy_sse2_32(dd - 41, ss - 41); + *((uint64_unaligned_t*)(dd - 9)) = *((uint64_unaligned_t*)(ss - 9)); + dd[-1] = ss[-1]; + break; + + case 106: + memcpy_sse2_64(dd - 106, ss - 106); + case 42: + memcpy_sse2_32(dd - 42, ss - 42); + *((uint64_unaligned_t*)(dd - 10)) = *((uint64_unaligned_t*)(ss - 10)); + *((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2)); + break; + + case 107: + memcpy_sse2_64(dd - 107, ss - 107); + case 43: + memcpy_sse2_32(dd - 43, ss - 43); + *((uint64_unaligned_t*)(dd - 11)) = *((uint64_unaligned_t*)(ss - 11)); + *((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4)); + break; + + case 108: + memcpy_sse2_64(dd - 108, ss - 108); + case 44: + memcpy_sse2_32(dd - 44, ss - 44); + *((uint64_unaligned_t*)(dd - 12)) = *((uint64_unaligned_t*)(ss - 12)); + *((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4)); + break; + + case 109: + memcpy_sse2_64(dd - 109, ss - 109); + case 45: + memcpy_sse2_32(dd - 45, ss - 45); + *((uint64_unaligned_t*)(dd - 13)) = *((uint64_unaligned_t*)(ss - 13)); + *((uint32_unaligned_t*)(dd - 5)) = *((uint32_unaligned_t*)(ss - 5)); + dd[-1] = ss[-1]; + break; + + case 110: + memcpy_sse2_64(dd - 110, ss - 110); + case 46: + memcpy_sse2_32(dd - 46, ss - 46); + *((uint64_unaligned_t*)(dd - 14)) = *((uint64_unaligned_t*)(ss - 14)); + *((uint64_unaligned_t*)(dd - 8)) = *((uint64_unaligned_t*)(ss - 8)); + break; + + case 111: + memcpy_sse2_64(dd - 111, ss - 111); + case 47: + memcpy_sse2_32(dd - 47, ss - 47); + *((uint64_unaligned_t*)(dd - 15)) = *((uint64_unaligned_t*)(ss - 15)); + *((uint64_unaligned_t*)(dd - 8)) = *((uint64_unaligned_t*)(ss - 8)); + break; + + case 112: + memcpy_sse2_64(dd - 112, ss - 112); + case 48: + memcpy_sse2_32(dd - 48, ss - 48); + memcpy_sse2_16(dd - 16, ss - 16); + break; + + case 113: + memcpy_sse2_64(dd - 113, ss - 113); + case 49: + memcpy_sse2_32(dd - 49, ss - 49); + memcpy_sse2_16(dd - 17, ss - 17); + dd[-1] = ss[-1]; + break; + + case 114: + memcpy_sse2_64(dd - 114, ss - 114); + case 50: + memcpy_sse2_32(dd - 50, ss - 50); + memcpy_sse2_16(dd - 18, ss - 18); + *((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2)); + break; + + case 115: + memcpy_sse2_64(dd - 115, ss - 115); + case 51: + memcpy_sse2_32(dd - 51, ss - 51); + memcpy_sse2_16(dd - 19, ss - 19); + *((uint16_unaligned_t*)(dd - 3)) = *((uint16_unaligned_t*)(ss - 3)); + dd[-1] = ss[-1]; + break; + + case 116: + memcpy_sse2_64(dd - 116, ss - 116); + case 52: + memcpy_sse2_32(dd - 52, ss - 52); + memcpy_sse2_16(dd - 20, ss - 20); + *((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4)); + break; + + case 117: + memcpy_sse2_64(dd - 117, ss - 117); + case 53: + memcpy_sse2_32(dd - 53, ss - 53); + memcpy_sse2_16(dd - 21, ss - 21); + *((uint32_unaligned_t*)(dd - 5)) = *((uint32_unaligned_t*)(ss - 5)); + dd[-1] = ss[-1]; + break; + + case 118: + memcpy_sse2_64(dd - 118, ss - 118); + case 54: + memcpy_sse2_32(dd - 54, ss - 54); + memcpy_sse2_16(dd - 22, ss - 22); + *((uint32_unaligned_t*)(dd - 6)) = *((uint32_unaligned_t*)(ss - 6)); + *((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2)); + break; + + case 119: + memcpy_sse2_64(dd - 119, ss - 119); + case 55: + memcpy_sse2_32(dd - 55, ss - 55); + memcpy_sse2_16(dd - 23, ss - 23); + *((uint32_unaligned_t*)(dd - 7)) = *((uint32_unaligned_t*)(ss - 7)); + *((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4)); + break; + + case 120: + memcpy_sse2_64(dd - 120, ss - 120); + case 56: + memcpy_sse2_32(dd - 56, ss - 56); + memcpy_sse2_16(dd - 24, ss - 24); + memcpy_sse2_16(dd - 16, ss - 16); + break; + + case 121: + memcpy_sse2_64(dd - 121, ss - 121); + case 57: + memcpy_sse2_32(dd - 57, ss - 57); + memcpy_sse2_16(dd - 25, ss - 25); + memcpy_sse2_16(dd - 16, ss - 16); + break; + + case 122: + memcpy_sse2_64(dd - 122, ss - 122); + case 58: + memcpy_sse2_32(dd - 58, ss - 58); + memcpy_sse2_16(dd - 26, ss - 26); + memcpy_sse2_16(dd - 16, ss - 16); + break; + + case 123: + memcpy_sse2_64(dd - 123, ss - 123); + case 59: + memcpy_sse2_32(dd - 59, ss - 59); + memcpy_sse2_16(dd - 27, ss - 27); + memcpy_sse2_16(dd - 16, ss - 16); + break; + + case 124: + memcpy_sse2_64(dd - 124, ss - 124); + case 60: + memcpy_sse2_32(dd - 60, ss - 60); + memcpy_sse2_16(dd - 28, ss - 28); + memcpy_sse2_16(dd - 16, ss - 16); + break; + + case 125: + memcpy_sse2_64(dd - 125, ss - 125); + case 61: + memcpy_sse2_32(dd - 61, ss - 61); + memcpy_sse2_16(dd - 29, ss - 29); + memcpy_sse2_16(dd - 16, ss - 16); + break; + + case 126: + memcpy_sse2_64(dd - 126, ss - 126); + case 62: + memcpy_sse2_32(dd - 62, ss - 62); + memcpy_sse2_16(dd - 30, ss - 30); + memcpy_sse2_16(dd - 16, ss - 16); + break; + + case 127: + memcpy_sse2_64(dd - 127, ss - 127); + case 63: + memcpy_sse2_32(dd - 63, ss - 63); + memcpy_sse2_16(dd - 31, ss - 31); + memcpy_sse2_16(dd - 16, ss - 16); + break; + + case 128: + memcpy_sse2_128(dd - 128, ss - 128); + break; + } + + return dst; +} + + +//--------------------------------------------------------------------- +// main routine +//--------------------------------------------------------------------- +static void* memcpy_fast(void *destination, const void *source, size_t size) +{ + unsigned char *dst = (unsigned char*)destination; + const unsigned char *src = (const unsigned char*)source; + static size_t cachesize = 0x200000; // L2-cache size + size_t padding; + + // small memory copy + if (size <= 128) { + return memcpy_tiny(dst, src, size); + } + + // align destination to 16 bytes boundary + padding = (16 - (((size_t)dst) & 15)) & 15; + + if (padding > 0) { + __m128i head = _mm_loadu_si128((const __m128i*)src); + _mm_storeu_si128((__m128i*)dst, head); + dst += padding; + src += padding; + size -= padding; + } + + // medium size copy + if (size <= cachesize) { + __m128i c0, c1, c2, c3, c4, c5, c6, c7; + + for (; size >= 128; size -= 128) { + c0 = _mm_loadu_si128(((const __m128i*)src) + 0); + c1 = _mm_loadu_si128(((const __m128i*)src) + 1); + c2 = _mm_loadu_si128(((const __m128i*)src) + 2); + c3 = _mm_loadu_si128(((const __m128i*)src) + 3); + c4 = _mm_loadu_si128(((const __m128i*)src) + 4); + c5 = _mm_loadu_si128(((const __m128i*)src) + 5); + c6 = _mm_loadu_si128(((const __m128i*)src) + 6); + c7 = _mm_loadu_si128(((const __m128i*)src) + 7); + _mm_prefetch((const char*)(src + 256), _MM_HINT_NTA); + src += 128; + _mm_store_si128((((__m128i*)dst) + 0), c0); + _mm_store_si128((((__m128i*)dst) + 1), c1); + _mm_store_si128((((__m128i*)dst) + 2), c2); + _mm_store_si128((((__m128i*)dst) + 3), c3); + _mm_store_si128((((__m128i*)dst) + 4), c4); + _mm_store_si128((((__m128i*)dst) + 5), c5); + _mm_store_si128((((__m128i*)dst) + 6), c6); + _mm_store_si128((((__m128i*)dst) + 7), c7); + dst += 128; + } + } + else { // big memory copy + __m128i c0, c1, c2, c3, c4, c5, c6, c7; + + _mm_prefetch((const char*)(src), _MM_HINT_NTA); + + if ((((size_t)src) & 15) == 0) { // source aligned + for (; size >= 128; size -= 128) { + c0 = _mm_load_si128(((const __m128i*)src) + 0); + c1 = _mm_load_si128(((const __m128i*)src) + 1); + c2 = _mm_load_si128(((const __m128i*)src) + 2); + c3 = _mm_load_si128(((const __m128i*)src) + 3); + c4 = _mm_load_si128(((const __m128i*)src) + 4); + c5 = _mm_load_si128(((const __m128i*)src) + 5); + c6 = _mm_load_si128(((const __m128i*)src) + 6); + c7 = _mm_load_si128(((const __m128i*)src) + 7); + _mm_prefetch((const char*)(src + 256), _MM_HINT_NTA); + src += 128; + _mm_stream_si128((((__m128i*)dst) + 0), c0); + _mm_stream_si128((((__m128i*)dst) + 1), c1); + _mm_stream_si128((((__m128i*)dst) + 2), c2); + _mm_stream_si128((((__m128i*)dst) + 3), c3); + _mm_stream_si128((((__m128i*)dst) + 4), c4); + _mm_stream_si128((((__m128i*)dst) + 5), c5); + _mm_stream_si128((((__m128i*)dst) + 6), c6); + _mm_stream_si128((((__m128i*)dst) + 7), c7); + dst += 128; + } + } + else { // source unaligned + for (; size >= 128; size -= 128) { + c0 = _mm_loadu_si128(((const __m128i*)src) + 0); + c1 = _mm_loadu_si128(((const __m128i*)src) + 1); + c2 = _mm_loadu_si128(((const __m128i*)src) + 2); + c3 = _mm_loadu_si128(((const __m128i*)src) + 3); + c4 = _mm_loadu_si128(((const __m128i*)src) + 4); + c5 = _mm_loadu_si128(((const __m128i*)src) + 5); + c6 = _mm_loadu_si128(((const __m128i*)src) + 6); + c7 = _mm_loadu_si128(((const __m128i*)src) + 7); + _mm_prefetch((const char*)(src + 256), _MM_HINT_NTA); + src += 128; + _mm_stream_si128((((__m128i*)dst) + 0), c0); + _mm_stream_si128((((__m128i*)dst) + 1), c1); + _mm_stream_si128((((__m128i*)dst) + 2), c2); + _mm_stream_si128((((__m128i*)dst) + 3), c3); + _mm_stream_si128((((__m128i*)dst) + 4), c4); + _mm_stream_si128((((__m128i*)dst) + 5), c5); + _mm_stream_si128((((__m128i*)dst) + 6), c6); + _mm_stream_si128((((__m128i*)dst) + 7), c7); + dst += 128; + } + } + _mm_sfence(); + } + + memcpy_tiny(dst, src, size); + + return destination; +} + + +#endif diff --git a/contrib/FastMemcpy/FastMemcpy_Avx.c b/contrib/FastMemcpy/FastMemcpy_Avx.c new file mode 100644 index 00000000000..6538c6b2126 --- /dev/null +++ b/contrib/FastMemcpy/FastMemcpy_Avx.c @@ -0,0 +1,171 @@ +//===================================================================== +// +// FastMemcpy.c - skywind3000@163.com, 2015 +// +// feature: +// 50% speed up in avg. vs standard memcpy (tested in vc2012/gcc4.9) +// +//===================================================================== +#include +#include +#include +#include +#include + +#if (defined(_WIN32) || defined(WIN32)) +#include +#include +#ifdef _MSC_VER +#pragma comment(lib, "winmm.lib") +#endif +#elif defined(__unix) +#include +#include +#else +#error it can only be compiled under windows or unix +#endif + +#include "FastMemcpy_Avx.h" + + +unsigned int gettime() +{ + #if (defined(_WIN32) || defined(WIN32)) + return timeGetTime(); + #else + static struct timezone tz={ 0,0 }; + struct timeval time; + gettimeofday(&time,&tz); + return (time.tv_sec * 1000 + time.tv_usec / 1000); + #endif +} + +void sleepms(unsigned int millisec) +{ +#if defined(_WIN32) || defined(WIN32) + Sleep(millisec); +#else + usleep(millisec * 1000); +#endif +} + + + +void benchmark(int dstalign, int srcalign, size_t size, int times) +{ + char *DATA1 = (char*)malloc(size + 64); + char *DATA2 = (char*)malloc(size + 64); + size_t LINEAR1 = ((size_t)DATA1); + size_t LINEAR2 = ((size_t)DATA2); + char *ALIGN1 = (char*)(((64 - (LINEAR1 & 63)) & 63) + LINEAR1); + char *ALIGN2 = (char*)(((64 - (LINEAR2 & 63)) & 63) + LINEAR2); + char *dst = (dstalign)? ALIGN1 : (ALIGN1 + 1); + char *src = (srcalign)? ALIGN2 : (ALIGN2 + 3); + unsigned int t1, t2; + int k; + + sleepms(100); + t1 = gettime(); + for (k = times; k > 0; k--) { + memcpy(dst, src, size); + } + t1 = gettime() - t1; + sleepms(100); + t2 = gettime(); + for (k = times; k > 0; k--) { + memcpy_fast(dst, src, size); + } + t2 = gettime() - t2; + + free(DATA1); + free(DATA2); + + printf("result(dst %s, src %s): memcpy_fast=%dms memcpy=%d ms\n", + dstalign? "aligned" : "unalign", + srcalign? "aligned" : "unalign", (int)t2, (int)t1); +} + + +void bench(int copysize, int times) +{ + printf("benchmark(size=%d bytes, times=%d):\n", copysize, times); + benchmark(1, 1, copysize, times); + benchmark(1, 0, copysize, times); + benchmark(0, 1, copysize, times); + benchmark(0, 0, copysize, times); + printf("\n"); +} + + +void random_bench(int maxsize, int times) +{ + static char A[11 * 1024 * 1024 + 2]; + static char B[11 * 1024 * 1024 + 2]; + static int random_offsets[0x10000]; + static int random_sizes[0x8000]; + unsigned int i, p1, p2; + unsigned int t1, t2; + for (i = 0; i < 0x10000; i++) { // generate random offsets + random_offsets[i] = rand() % (10 * 1024 * 1024 + 1); + } + for (i = 0; i < 0x8000; i++) { // generate random sizes + random_sizes[i] = 1 + rand() % maxsize; + } + sleepms(100); + t1 = gettime(); + for (p1 = 0, p2 = 0, i = 0; i < times; i++) { + int offset1 = random_offsets[(p1++) & 0xffff]; + int offset2 = random_offsets[(p1++) & 0xffff]; + int size = random_sizes[(p2++) & 0x7fff]; + memcpy(A + offset1, B + offset2, size); + } + t1 = gettime() - t1; + sleepms(100); + t2 = gettime(); + for (p1 = 0, p2 = 0, i = 0; i < times; i++) { + int offset1 = random_offsets[(p1++) & 0xffff]; + int offset2 = random_offsets[(p1++) & 0xffff]; + int size = random_sizes[(p2++) & 0x7fff]; + memcpy_fast(A + offset1, B + offset2, size); + } + t2 = gettime() - t2; + printf("benchmark random access:\n"); + printf("memcpy_fast=%dms memcpy=%dms\n\n", (int)t2, (int)t1); +} + + +#ifdef _MSC_VER +#pragma comment(lib, "winmm.lib") +#endif + +int main(void) +{ +#if 1 + bench(32, 0x1000000); + bench(64, 0x1000000); + bench(512, 0x800000); + bench(1024, 0x400000); +#endif + bench(4096, 0x80000); + bench(8192, 0x40000); +#if 1 + bench(1024 * 1024 * 1, 0x800); + bench(1024 * 1024 * 4, 0x200); +#endif + bench(1024 * 1024 * 8, 0x100); + + random_bench(2048, 8000000); + + return 0; +} + + + + +/* + +*/ + + + + diff --git a/contrib/FastMemcpy/FastMemcpy_Avx.h b/contrib/FastMemcpy/FastMemcpy_Avx.h new file mode 100644 index 00000000000..8ba064b0350 --- /dev/null +++ b/contrib/FastMemcpy/FastMemcpy_Avx.h @@ -0,0 +1,492 @@ +//===================================================================== +// +// FastMemcpy.c - skywind3000@163.com, 2015 +// +// feature: +// 50% speed up in avg. vs standard memcpy (tested in vc2012/gcc5.1) +// +//===================================================================== +#ifndef __FAST_MEMCPY_H__ +#define __FAST_MEMCPY_H__ + +#include +#include +#include + + +//--------------------------------------------------------------------- +// force inline for compilers +//--------------------------------------------------------------------- +#ifndef INLINE +#ifdef __GNUC__ +#if (__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1)) + #define INLINE __inline__ __attribute__((always_inline)) +#else + #define INLINE __inline__ +#endif +#elif defined(_MSC_VER) + #define INLINE __forceinline +#elif (defined(__BORLANDC__) || defined(__WATCOMC__)) + #define INLINE __inline +#else + #define INLINE +#endif +#endif + + + +//--------------------------------------------------------------------- +// fast copy for different sizes +//--------------------------------------------------------------------- +static INLINE void memcpy_avx_16(void *dst, const void *src) { +#if 1 + __m128i m0 = _mm_loadu_si128(((const __m128i*)src) + 0); + _mm_storeu_si128(((__m128i*)dst) + 0, m0); +#else + *((uint64_t*)((char*)dst + 0)) = *((uint64_t*)((const char*)src + 0)); + *((uint64_t*)((char*)dst + 8)) = *((uint64_t*)((const char*)src + 8)); +#endif +} + +static INLINE void memcpy_avx_32(void *dst, const void *src) { + __m256i m0 = _mm256_loadu_si256(((const __m256i*)src) + 0); + _mm256_storeu_si256(((__m256i*)dst) + 0, m0); +} + +static INLINE void memcpy_avx_64(void *dst, const void *src) { + __m256i m0 = _mm256_loadu_si256(((const __m256i*)src) + 0); + __m256i m1 = _mm256_loadu_si256(((const __m256i*)src) + 1); + _mm256_storeu_si256(((__m256i*)dst) + 0, m0); + _mm256_storeu_si256(((__m256i*)dst) + 1, m1); +} + +static INLINE void memcpy_avx_128(void *dst, const void *src) { + __m256i m0 = _mm256_loadu_si256(((const __m256i*)src) + 0); + __m256i m1 = _mm256_loadu_si256(((const __m256i*)src) + 1); + __m256i m2 = _mm256_loadu_si256(((const __m256i*)src) + 2); + __m256i m3 = _mm256_loadu_si256(((const __m256i*)src) + 3); + _mm256_storeu_si256(((__m256i*)dst) + 0, m0); + _mm256_storeu_si256(((__m256i*)dst) + 1, m1); + _mm256_storeu_si256(((__m256i*)dst) + 2, m2); + _mm256_storeu_si256(((__m256i*)dst) + 3, m3); +} + +static INLINE void memcpy_avx_256(void *dst, const void *src) { + __m256i m0 = _mm256_loadu_si256(((const __m256i*)src) + 0); + __m256i m1 = _mm256_loadu_si256(((const __m256i*)src) + 1); + __m256i m2 = _mm256_loadu_si256(((const __m256i*)src) + 2); + __m256i m3 = _mm256_loadu_si256(((const __m256i*)src) + 3); + __m256i m4 = _mm256_loadu_si256(((const __m256i*)src) + 4); + __m256i m5 = _mm256_loadu_si256(((const __m256i*)src) + 5); + __m256i m6 = _mm256_loadu_si256(((const __m256i*)src) + 6); + __m256i m7 = _mm256_loadu_si256(((const __m256i*)src) + 7); + _mm256_storeu_si256(((__m256i*)dst) + 0, m0); + _mm256_storeu_si256(((__m256i*)dst) + 1, m1); + _mm256_storeu_si256(((__m256i*)dst) + 2, m2); + _mm256_storeu_si256(((__m256i*)dst) + 3, m3); + _mm256_storeu_si256(((__m256i*)dst) + 4, m4); + _mm256_storeu_si256(((__m256i*)dst) + 5, m5); + _mm256_storeu_si256(((__m256i*)dst) + 6, m6); + _mm256_storeu_si256(((__m256i*)dst) + 7, m7); +} + + +//--------------------------------------------------------------------- +// tiny memory copy with jump table optimized +//--------------------------------------------------------------------- +static INLINE void *memcpy_tiny(void *dst, const void *src, size_t size) { + unsigned char *dd = ((unsigned char*)dst) + size; + const unsigned char *ss = ((const unsigned char*)src) + size; + + switch (size) { + case 128: memcpy_avx_128(dd - 128, ss - 128); + case 0: break; + case 129: memcpy_avx_128(dd - 129, ss - 129); + case 1: dd[-1] = ss[-1]; break; + case 130: memcpy_avx_128(dd - 130, ss - 130); + case 2: *((uint16_t*)(dd - 2)) = *((uint16_t*)(ss - 2)); break; + case 131: memcpy_avx_128(dd - 131, ss - 131); + case 3: *((uint16_t*)(dd - 3)) = *((uint16_t*)(ss - 3)); dd[-1] = ss[-1]; break; + case 132: memcpy_avx_128(dd - 132, ss - 132); + case 4: *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break; + case 133: memcpy_avx_128(dd - 133, ss - 133); + case 5: *((uint32_t*)(dd - 5)) = *((uint32_t*)(ss - 5)); dd[-1] = ss[-1]; break; + case 134: memcpy_avx_128(dd - 134, ss - 134); + case 6: *((uint32_t*)(dd - 6)) = *((uint32_t*)(ss - 6)); *((uint16_t*)(dd - 2)) = *((uint16_t*)(ss - 2)); break; + case 135: memcpy_avx_128(dd - 135, ss - 135); + case 7: *((uint32_t*)(dd - 7)) = *((uint32_t*)(ss - 7)); *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break; + case 136: memcpy_avx_128(dd - 136, ss - 136); + case 8: *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break; + case 137: memcpy_avx_128(dd - 137, ss - 137); + case 9: *((uint64_t*)(dd - 9)) = *((uint64_t*)(ss - 9)); dd[-1] = ss[-1]; break; + case 138: memcpy_avx_128(dd - 138, ss - 138); + case 10: *((uint64_t*)(dd - 10)) = *((uint64_t*)(ss - 10)); *((uint16_t*)(dd - 2)) = *((uint16_t*)(ss - 2)); break; + case 139: memcpy_avx_128(dd - 139, ss - 139); + case 11: *((uint64_t*)(dd - 11)) = *((uint64_t*)(ss - 11)); *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break; + case 140: memcpy_avx_128(dd - 140, ss - 140); + case 12: *((uint64_t*)(dd - 12)) = *((uint64_t*)(ss - 12)); *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break; + case 141: memcpy_avx_128(dd - 141, ss - 141); + case 13: *((uint64_t*)(dd - 13)) = *((uint64_t*)(ss - 13)); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break; + case 142: memcpy_avx_128(dd - 142, ss - 142); + case 14: *((uint64_t*)(dd - 14)) = *((uint64_t*)(ss - 14)); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break; + case 143: memcpy_avx_128(dd - 143, ss - 143); + case 15: *((uint64_t*)(dd - 15)) = *((uint64_t*)(ss - 15)); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break; + case 144: memcpy_avx_128(dd - 144, ss - 144); + case 16: memcpy_avx_16(dd - 16, ss - 16); break; + case 145: memcpy_avx_128(dd - 145, ss - 145); + case 17: memcpy_avx_16(dd - 17, ss - 17); dd[-1] = ss[-1]; break; + case 146: memcpy_avx_128(dd - 146, ss - 146); + case 18: memcpy_avx_16(dd - 18, ss - 18); *((uint16_t*)(dd - 2)) = *((uint16_t*)(ss - 2)); break; + case 147: memcpy_avx_128(dd - 147, ss - 147); + case 19: memcpy_avx_16(dd - 19, ss - 19); *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break; + case 148: memcpy_avx_128(dd - 148, ss - 148); + case 20: memcpy_avx_16(dd - 20, ss - 20); *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break; + case 149: memcpy_avx_128(dd - 149, ss - 149); + case 21: memcpy_avx_16(dd - 21, ss - 21); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break; + case 150: memcpy_avx_128(dd - 150, ss - 150); + case 22: memcpy_avx_16(dd - 22, ss - 22); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break; + case 151: memcpy_avx_128(dd - 151, ss - 151); + case 23: memcpy_avx_16(dd - 23, ss - 23); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break; + case 152: memcpy_avx_128(dd - 152, ss - 152); + case 24: memcpy_avx_16(dd - 24, ss - 24); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break; + case 153: memcpy_avx_128(dd - 153, ss - 153); + case 25: memcpy_avx_16(dd - 25, ss - 25); memcpy_avx_16(dd - 16, ss - 16); break; + case 154: memcpy_avx_128(dd - 154, ss - 154); + case 26: memcpy_avx_16(dd - 26, ss - 26); memcpy_avx_16(dd - 16, ss - 16); break; + case 155: memcpy_avx_128(dd - 155, ss - 155); + case 27: memcpy_avx_16(dd - 27, ss - 27); memcpy_avx_16(dd - 16, ss - 16); break; + case 156: memcpy_avx_128(dd - 156, ss - 156); + case 28: memcpy_avx_16(dd - 28, ss - 28); memcpy_avx_16(dd - 16, ss - 16); break; + case 157: memcpy_avx_128(dd - 157, ss - 157); + case 29: memcpy_avx_16(dd - 29, ss - 29); memcpy_avx_16(dd - 16, ss - 16); break; + case 158: memcpy_avx_128(dd - 158, ss - 158); + case 30: memcpy_avx_16(dd - 30, ss - 30); memcpy_avx_16(dd - 16, ss - 16); break; + case 159: memcpy_avx_128(dd - 159, ss - 159); + case 31: memcpy_avx_16(dd - 31, ss - 31); memcpy_avx_16(dd - 16, ss - 16); break; + case 160: memcpy_avx_128(dd - 160, ss - 160); + case 32: memcpy_avx_32(dd - 32, ss - 32); break; + case 161: memcpy_avx_128(dd - 161, ss - 161); + case 33: memcpy_avx_32(dd - 33, ss - 33); dd[-1] = ss[-1]; break; + case 162: memcpy_avx_128(dd - 162, ss - 162); + case 34: memcpy_avx_32(dd - 34, ss - 34); *((uint16_t*)(dd - 2)) = *((uint16_t*)(ss - 2)); break; + case 163: memcpy_avx_128(dd - 163, ss - 163); + case 35: memcpy_avx_32(dd - 35, ss - 35); *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break; + case 164: memcpy_avx_128(dd - 164, ss - 164); + case 36: memcpy_avx_32(dd - 36, ss - 36); *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break; + case 165: memcpy_avx_128(dd - 165, ss - 165); + case 37: memcpy_avx_32(dd - 37, ss - 37); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break; + case 166: memcpy_avx_128(dd - 166, ss - 166); + case 38: memcpy_avx_32(dd - 38, ss - 38); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break; + case 167: memcpy_avx_128(dd - 167, ss - 167); + case 39: memcpy_avx_32(dd - 39, ss - 39); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break; + case 168: memcpy_avx_128(dd - 168, ss - 168); + case 40: memcpy_avx_32(dd - 40, ss - 40); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break; + case 169: memcpy_avx_128(dd - 169, ss - 169); + case 41: memcpy_avx_32(dd - 41, ss - 41); memcpy_avx_16(dd - 16, ss - 16); break; + case 170: memcpy_avx_128(dd - 170, ss - 170); + case 42: memcpy_avx_32(dd - 42, ss - 42); memcpy_avx_16(dd - 16, ss - 16); break; + case 171: memcpy_avx_128(dd - 171, ss - 171); + case 43: memcpy_avx_32(dd - 43, ss - 43); memcpy_avx_16(dd - 16, ss - 16); break; + case 172: memcpy_avx_128(dd - 172, ss - 172); + case 44: memcpy_avx_32(dd - 44, ss - 44); memcpy_avx_16(dd - 16, ss - 16); break; + case 173: memcpy_avx_128(dd - 173, ss - 173); + case 45: memcpy_avx_32(dd - 45, ss - 45); memcpy_avx_16(dd - 16, ss - 16); break; + case 174: memcpy_avx_128(dd - 174, ss - 174); + case 46: memcpy_avx_32(dd - 46, ss - 46); memcpy_avx_16(dd - 16, ss - 16); break; + case 175: memcpy_avx_128(dd - 175, ss - 175); + case 47: memcpy_avx_32(dd - 47, ss - 47); memcpy_avx_16(dd - 16, ss - 16); break; + case 176: memcpy_avx_128(dd - 176, ss - 176); + case 48: memcpy_avx_32(dd - 48, ss - 48); memcpy_avx_16(dd - 16, ss - 16); break; + case 177: memcpy_avx_128(dd - 177, ss - 177); + case 49: memcpy_avx_32(dd - 49, ss - 49); memcpy_avx_32(dd - 32, ss - 32); break; + case 178: memcpy_avx_128(dd - 178, ss - 178); + case 50: memcpy_avx_32(dd - 50, ss - 50); memcpy_avx_32(dd - 32, ss - 32); break; + case 179: memcpy_avx_128(dd - 179, ss - 179); + case 51: memcpy_avx_32(dd - 51, ss - 51); memcpy_avx_32(dd - 32, ss - 32); break; + case 180: memcpy_avx_128(dd - 180, ss - 180); + case 52: memcpy_avx_32(dd - 52, ss - 52); memcpy_avx_32(dd - 32, ss - 32); break; + case 181: memcpy_avx_128(dd - 181, ss - 181); + case 53: memcpy_avx_32(dd - 53, ss - 53); memcpy_avx_32(dd - 32, ss - 32); break; + case 182: memcpy_avx_128(dd - 182, ss - 182); + case 54: memcpy_avx_32(dd - 54, ss - 54); memcpy_avx_32(dd - 32, ss - 32); break; + case 183: memcpy_avx_128(dd - 183, ss - 183); + case 55: memcpy_avx_32(dd - 55, ss - 55); memcpy_avx_32(dd - 32, ss - 32); break; + case 184: memcpy_avx_128(dd - 184, ss - 184); + case 56: memcpy_avx_32(dd - 56, ss - 56); memcpy_avx_32(dd - 32, ss - 32); break; + case 185: memcpy_avx_128(dd - 185, ss - 185); + case 57: memcpy_avx_32(dd - 57, ss - 57); memcpy_avx_32(dd - 32, ss - 32); break; + case 186: memcpy_avx_128(dd - 186, ss - 186); + case 58: memcpy_avx_32(dd - 58, ss - 58); memcpy_avx_32(dd - 32, ss - 32); break; + case 187: memcpy_avx_128(dd - 187, ss - 187); + case 59: memcpy_avx_32(dd - 59, ss - 59); memcpy_avx_32(dd - 32, ss - 32); break; + case 188: memcpy_avx_128(dd - 188, ss - 188); + case 60: memcpy_avx_32(dd - 60, ss - 60); memcpy_avx_32(dd - 32, ss - 32); break; + case 189: memcpy_avx_128(dd - 189, ss - 189); + case 61: memcpy_avx_32(dd - 61, ss - 61); memcpy_avx_32(dd - 32, ss - 32); break; + case 190: memcpy_avx_128(dd - 190, ss - 190); + case 62: memcpy_avx_32(dd - 62, ss - 62); memcpy_avx_32(dd - 32, ss - 32); break; + case 191: memcpy_avx_128(dd - 191, ss - 191); + case 63: memcpy_avx_32(dd - 63, ss - 63); memcpy_avx_32(dd - 32, ss - 32); break; + case 192: memcpy_avx_128(dd - 192, ss - 192); + case 64: memcpy_avx_64(dd - 64, ss - 64); break; + case 193: memcpy_avx_128(dd - 193, ss - 193); + case 65: memcpy_avx_64(dd - 65, ss - 65); dd[-1] = ss[-1]; break; + case 194: memcpy_avx_128(dd - 194, ss - 194); + case 66: memcpy_avx_64(dd - 66, ss - 66); *((uint16_t*)(dd - 2)) = *((uint16_t*)(ss - 2)); break; + case 195: memcpy_avx_128(dd - 195, ss - 195); + case 67: memcpy_avx_64(dd - 67, ss - 67); *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break; + case 196: memcpy_avx_128(dd - 196, ss - 196); + case 68: memcpy_avx_64(dd - 68, ss - 68); *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break; + case 197: memcpy_avx_128(dd - 197, ss - 197); + case 69: memcpy_avx_64(dd - 69, ss - 69); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break; + case 198: memcpy_avx_128(dd - 198, ss - 198); + case 70: memcpy_avx_64(dd - 70, ss - 70); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break; + case 199: memcpy_avx_128(dd - 199, ss - 199); + case 71: memcpy_avx_64(dd - 71, ss - 71); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break; + case 200: memcpy_avx_128(dd - 200, ss - 200); + case 72: memcpy_avx_64(dd - 72, ss - 72); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break; + case 201: memcpy_avx_128(dd - 201, ss - 201); + case 73: memcpy_avx_64(dd - 73, ss - 73); memcpy_avx_16(dd - 16, ss - 16); break; + case 202: memcpy_avx_128(dd - 202, ss - 202); + case 74: memcpy_avx_64(dd - 74, ss - 74); memcpy_avx_16(dd - 16, ss - 16); break; + case 203: memcpy_avx_128(dd - 203, ss - 203); + case 75: memcpy_avx_64(dd - 75, ss - 75); memcpy_avx_16(dd - 16, ss - 16); break; + case 204: memcpy_avx_128(dd - 204, ss - 204); + case 76: memcpy_avx_64(dd - 76, ss - 76); memcpy_avx_16(dd - 16, ss - 16); break; + case 205: memcpy_avx_128(dd - 205, ss - 205); + case 77: memcpy_avx_64(dd - 77, ss - 77); memcpy_avx_16(dd - 16, ss - 16); break; + case 206: memcpy_avx_128(dd - 206, ss - 206); + case 78: memcpy_avx_64(dd - 78, ss - 78); memcpy_avx_16(dd - 16, ss - 16); break; + case 207: memcpy_avx_128(dd - 207, ss - 207); + case 79: memcpy_avx_64(dd - 79, ss - 79); memcpy_avx_16(dd - 16, ss - 16); break; + case 208: memcpy_avx_128(dd - 208, ss - 208); + case 80: memcpy_avx_64(dd - 80, ss - 80); memcpy_avx_16(dd - 16, ss - 16); break; + case 209: memcpy_avx_128(dd - 209, ss - 209); + case 81: memcpy_avx_64(dd - 81, ss - 81); memcpy_avx_32(dd - 32, ss - 32); break; + case 210: memcpy_avx_128(dd - 210, ss - 210); + case 82: memcpy_avx_64(dd - 82, ss - 82); memcpy_avx_32(dd - 32, ss - 32); break; + case 211: memcpy_avx_128(dd - 211, ss - 211); + case 83: memcpy_avx_64(dd - 83, ss - 83); memcpy_avx_32(dd - 32, ss - 32); break; + case 212: memcpy_avx_128(dd - 212, ss - 212); + case 84: memcpy_avx_64(dd - 84, ss - 84); memcpy_avx_32(dd - 32, ss - 32); break; + case 213: memcpy_avx_128(dd - 213, ss - 213); + case 85: memcpy_avx_64(dd - 85, ss - 85); memcpy_avx_32(dd - 32, ss - 32); break; + case 214: memcpy_avx_128(dd - 214, ss - 214); + case 86: memcpy_avx_64(dd - 86, ss - 86); memcpy_avx_32(dd - 32, ss - 32); break; + case 215: memcpy_avx_128(dd - 215, ss - 215); + case 87: memcpy_avx_64(dd - 87, ss - 87); memcpy_avx_32(dd - 32, ss - 32); break; + case 216: memcpy_avx_128(dd - 216, ss - 216); + case 88: memcpy_avx_64(dd - 88, ss - 88); memcpy_avx_32(dd - 32, ss - 32); break; + case 217: memcpy_avx_128(dd - 217, ss - 217); + case 89: memcpy_avx_64(dd - 89, ss - 89); memcpy_avx_32(dd - 32, ss - 32); break; + case 218: memcpy_avx_128(dd - 218, ss - 218); + case 90: memcpy_avx_64(dd - 90, ss - 90); memcpy_avx_32(dd - 32, ss - 32); break; + case 219: memcpy_avx_128(dd - 219, ss - 219); + case 91: memcpy_avx_64(dd - 91, ss - 91); memcpy_avx_32(dd - 32, ss - 32); break; + case 220: memcpy_avx_128(dd - 220, ss - 220); + case 92: memcpy_avx_64(dd - 92, ss - 92); memcpy_avx_32(dd - 32, ss - 32); break; + case 221: memcpy_avx_128(dd - 221, ss - 221); + case 93: memcpy_avx_64(dd - 93, ss - 93); memcpy_avx_32(dd - 32, ss - 32); break; + case 222: memcpy_avx_128(dd - 222, ss - 222); + case 94: memcpy_avx_64(dd - 94, ss - 94); memcpy_avx_32(dd - 32, ss - 32); break; + case 223: memcpy_avx_128(dd - 223, ss - 223); + case 95: memcpy_avx_64(dd - 95, ss - 95); memcpy_avx_32(dd - 32, ss - 32); break; + case 224: memcpy_avx_128(dd - 224, ss - 224); + case 96: memcpy_avx_64(dd - 96, ss - 96); memcpy_avx_32(dd - 32, ss - 32); break; + case 225: memcpy_avx_128(dd - 225, ss - 225); + case 97: memcpy_avx_64(dd - 97, ss - 97); memcpy_avx_64(dd - 64, ss - 64); break; + case 226: memcpy_avx_128(dd - 226, ss - 226); + case 98: memcpy_avx_64(dd - 98, ss - 98); memcpy_avx_64(dd - 64, ss - 64); break; + case 227: memcpy_avx_128(dd - 227, ss - 227); + case 99: memcpy_avx_64(dd - 99, ss - 99); memcpy_avx_64(dd - 64, ss - 64); break; + case 228: memcpy_avx_128(dd - 228, ss - 228); + case 100: memcpy_avx_64(dd - 100, ss - 100); memcpy_avx_64(dd - 64, ss - 64); break; + case 229: memcpy_avx_128(dd - 229, ss - 229); + case 101: memcpy_avx_64(dd - 101, ss - 101); memcpy_avx_64(dd - 64, ss - 64); break; + case 230: memcpy_avx_128(dd - 230, ss - 230); + case 102: memcpy_avx_64(dd - 102, ss - 102); memcpy_avx_64(dd - 64, ss - 64); break; + case 231: memcpy_avx_128(dd - 231, ss - 231); + case 103: memcpy_avx_64(dd - 103, ss - 103); memcpy_avx_64(dd - 64, ss - 64); break; + case 232: memcpy_avx_128(dd - 232, ss - 232); + case 104: memcpy_avx_64(dd - 104, ss - 104); memcpy_avx_64(dd - 64, ss - 64); break; + case 233: memcpy_avx_128(dd - 233, ss - 233); + case 105: memcpy_avx_64(dd - 105, ss - 105); memcpy_avx_64(dd - 64, ss - 64); break; + case 234: memcpy_avx_128(dd - 234, ss - 234); + case 106: memcpy_avx_64(dd - 106, ss - 106); memcpy_avx_64(dd - 64, ss - 64); break; + case 235: memcpy_avx_128(dd - 235, ss - 235); + case 107: memcpy_avx_64(dd - 107, ss - 107); memcpy_avx_64(dd - 64, ss - 64); break; + case 236: memcpy_avx_128(dd - 236, ss - 236); + case 108: memcpy_avx_64(dd - 108, ss - 108); memcpy_avx_64(dd - 64, ss - 64); break; + case 237: memcpy_avx_128(dd - 237, ss - 237); + case 109: memcpy_avx_64(dd - 109, ss - 109); memcpy_avx_64(dd - 64, ss - 64); break; + case 238: memcpy_avx_128(dd - 238, ss - 238); + case 110: memcpy_avx_64(dd - 110, ss - 110); memcpy_avx_64(dd - 64, ss - 64); break; + case 239: memcpy_avx_128(dd - 239, ss - 239); + case 111: memcpy_avx_64(dd - 111, ss - 111); memcpy_avx_64(dd - 64, ss - 64); break; + case 240: memcpy_avx_128(dd - 240, ss - 240); + case 112: memcpy_avx_64(dd - 112, ss - 112); memcpy_avx_64(dd - 64, ss - 64); break; + case 241: memcpy_avx_128(dd - 241, ss - 241); + case 113: memcpy_avx_64(dd - 113, ss - 113); memcpy_avx_64(dd - 64, ss - 64); break; + case 242: memcpy_avx_128(dd - 242, ss - 242); + case 114: memcpy_avx_64(dd - 114, ss - 114); memcpy_avx_64(dd - 64, ss - 64); break; + case 243: memcpy_avx_128(dd - 243, ss - 243); + case 115: memcpy_avx_64(dd - 115, ss - 115); memcpy_avx_64(dd - 64, ss - 64); break; + case 244: memcpy_avx_128(dd - 244, ss - 244); + case 116: memcpy_avx_64(dd - 116, ss - 116); memcpy_avx_64(dd - 64, ss - 64); break; + case 245: memcpy_avx_128(dd - 245, ss - 245); + case 117: memcpy_avx_64(dd - 117, ss - 117); memcpy_avx_64(dd - 64, ss - 64); break; + case 246: memcpy_avx_128(dd - 246, ss - 246); + case 118: memcpy_avx_64(dd - 118, ss - 118); memcpy_avx_64(dd - 64, ss - 64); break; + case 247: memcpy_avx_128(dd - 247, ss - 247); + case 119: memcpy_avx_64(dd - 119, ss - 119); memcpy_avx_64(dd - 64, ss - 64); break; + case 248: memcpy_avx_128(dd - 248, ss - 248); + case 120: memcpy_avx_64(dd - 120, ss - 120); memcpy_avx_64(dd - 64, ss - 64); break; + case 249: memcpy_avx_128(dd - 249, ss - 249); + case 121: memcpy_avx_64(dd - 121, ss - 121); memcpy_avx_64(dd - 64, ss - 64); break; + case 250: memcpy_avx_128(dd - 250, ss - 250); + case 122: memcpy_avx_64(dd - 122, ss - 122); memcpy_avx_64(dd - 64, ss - 64); break; + case 251: memcpy_avx_128(dd - 251, ss - 251); + case 123: memcpy_avx_64(dd - 123, ss - 123); memcpy_avx_64(dd - 64, ss - 64); break; + case 252: memcpy_avx_128(dd - 252, ss - 252); + case 124: memcpy_avx_64(dd - 124, ss - 124); memcpy_avx_64(dd - 64, ss - 64); break; + case 253: memcpy_avx_128(dd - 253, ss - 253); + case 125: memcpy_avx_64(dd - 125, ss - 125); memcpy_avx_64(dd - 64, ss - 64); break; + case 254: memcpy_avx_128(dd - 254, ss - 254); + case 126: memcpy_avx_64(dd - 126, ss - 126); memcpy_avx_64(dd - 64, ss - 64); break; + case 255: memcpy_avx_128(dd - 255, ss - 255); + case 127: memcpy_avx_64(dd - 127, ss - 127); memcpy_avx_64(dd - 64, ss - 64); break; + case 256: memcpy_avx_256(dd - 256, ss - 256); break; + } + + return dst; +} + + +//--------------------------------------------------------------------- +// main routine +//--------------------------------------------------------------------- +static void* memcpy_fast(void *destination, const void *source, size_t size) +{ + unsigned char *dst = (unsigned char*)destination; + const unsigned char *src = (const unsigned char*)source; + static size_t cachesize = 0x200000; // L3-cache size + size_t padding; + + // small memory copy + if (size <= 256) { + memcpy_tiny(dst, src, size); + _mm256_zeroupper(); + return destination; + } + + // align destination to 16 bytes boundary + padding = (32 - (((size_t)dst) & 31)) & 31; + +#if 0 + if (padding > 0) { + __m256i head = _mm256_loadu_si256((const __m256i*)src); + _mm256_storeu_si256((__m256i*)dst, head); + dst += padding; + src += padding; + size -= padding; + } +#else + __m256i head = _mm256_loadu_si256((const __m256i*)src); + _mm256_storeu_si256((__m256i*)dst, head); + dst += padding; + src += padding; + size -= padding; +#endif + + // medium size copy + if (size <= cachesize) { + __m256i c0, c1, c2, c3, c4, c5, c6, c7; + + for (; size >= 256; size -= 256) { + c0 = _mm256_loadu_si256(((const __m256i*)src) + 0); + c1 = _mm256_loadu_si256(((const __m256i*)src) + 1); + c2 = _mm256_loadu_si256(((const __m256i*)src) + 2); + c3 = _mm256_loadu_si256(((const __m256i*)src) + 3); + c4 = _mm256_loadu_si256(((const __m256i*)src) + 4); + c5 = _mm256_loadu_si256(((const __m256i*)src) + 5); + c6 = _mm256_loadu_si256(((const __m256i*)src) + 6); + c7 = _mm256_loadu_si256(((const __m256i*)src) + 7); + _mm_prefetch((const char*)(src + 512), _MM_HINT_NTA); + src += 256; + _mm256_storeu_si256((((__m256i*)dst) + 0), c0); + _mm256_storeu_si256((((__m256i*)dst) + 1), c1); + _mm256_storeu_si256((((__m256i*)dst) + 2), c2); + _mm256_storeu_si256((((__m256i*)dst) + 3), c3); + _mm256_storeu_si256((((__m256i*)dst) + 4), c4); + _mm256_storeu_si256((((__m256i*)dst) + 5), c5); + _mm256_storeu_si256((((__m256i*)dst) + 6), c6); + _mm256_storeu_si256((((__m256i*)dst) + 7), c7); + dst += 256; + } + } + else { // big memory copy + __m256i c0, c1, c2, c3, c4, c5, c6, c7; + /* __m256i c0, c1, c2, c3, c4, c5, c6, c7; */ + + _mm_prefetch((const char*)(src), _MM_HINT_NTA); + + if ((((size_t)src) & 31) == 0) { // source aligned + for (; size >= 256; size -= 256) { + c0 = _mm256_load_si256(((const __m256i*)src) + 0); + c1 = _mm256_load_si256(((const __m256i*)src) + 1); + c2 = _mm256_load_si256(((const __m256i*)src) + 2); + c3 = _mm256_load_si256(((const __m256i*)src) + 3); + c4 = _mm256_load_si256(((const __m256i*)src) + 4); + c5 = _mm256_load_si256(((const __m256i*)src) + 5); + c6 = _mm256_load_si256(((const __m256i*)src) + 6); + c7 = _mm256_load_si256(((const __m256i*)src) + 7); + _mm_prefetch((const char*)(src + 512), _MM_HINT_NTA); + src += 256; + _mm256_stream_si256((((__m256i*)dst) + 0), c0); + _mm256_stream_si256((((__m256i*)dst) + 1), c1); + _mm256_stream_si256((((__m256i*)dst) + 2), c2); + _mm256_stream_si256((((__m256i*)dst) + 3), c3); + _mm256_stream_si256((((__m256i*)dst) + 4), c4); + _mm256_stream_si256((((__m256i*)dst) + 5), c5); + _mm256_stream_si256((((__m256i*)dst) + 6), c6); + _mm256_stream_si256((((__m256i*)dst) + 7), c7); + dst += 256; + } + } + else { // source unaligned + for (; size >= 256; size -= 256) { + c0 = _mm256_loadu_si256(((const __m256i*)src) + 0); + c1 = _mm256_loadu_si256(((const __m256i*)src) + 1); + c2 = _mm256_loadu_si256(((const __m256i*)src) + 2); + c3 = _mm256_loadu_si256(((const __m256i*)src) + 3); + c4 = _mm256_loadu_si256(((const __m256i*)src) + 4); + c5 = _mm256_loadu_si256(((const __m256i*)src) + 5); + c6 = _mm256_loadu_si256(((const __m256i*)src) + 6); + c7 = _mm256_loadu_si256(((const __m256i*)src) + 7); + _mm_prefetch((const char*)(src + 512), _MM_HINT_NTA); + src += 256; + _mm256_stream_si256((((__m256i*)dst) + 0), c0); + _mm256_stream_si256((((__m256i*)dst) + 1), c1); + _mm256_stream_si256((((__m256i*)dst) + 2), c2); + _mm256_stream_si256((((__m256i*)dst) + 3), c3); + _mm256_stream_si256((((__m256i*)dst) + 4), c4); + _mm256_stream_si256((((__m256i*)dst) + 5), c5); + _mm256_stream_si256((((__m256i*)dst) + 6), c6); + _mm256_stream_si256((((__m256i*)dst) + 7), c7); + dst += 256; + } + } + _mm_sfence(); + } + + memcpy_tiny(dst, src, size); + _mm256_zeroupper(); + + return destination; +} + + +#endif + + + diff --git a/base/memcpy/impl/LICENSE b/contrib/FastMemcpy/LICENSE similarity index 100% rename from base/memcpy/impl/LICENSE rename to contrib/FastMemcpy/LICENSE diff --git a/base/memcpy/README.md b/contrib/FastMemcpy/README.md similarity index 100% rename from base/memcpy/README.md rename to contrib/FastMemcpy/README.md diff --git a/contrib/FastMemcpy/memcpy_wrapper.c b/contrib/FastMemcpy/memcpy_wrapper.c new file mode 100644 index 00000000000..3caf8577d21 --- /dev/null +++ b/contrib/FastMemcpy/memcpy_wrapper.c @@ -0,0 +1,6 @@ +#include + +void * memcpy(void * __restrict destination, const void * __restrict source, size_t size) +{ + return memcpy_fast(destination, source, size); +} diff --git a/contrib/arrow-cmake/CMakeLists.txt b/contrib/arrow-cmake/CMakeLists.txt index 20ff5c49eea..46c6b0e3918 100644 --- a/contrib/arrow-cmake/CMakeLists.txt +++ b/contrib/arrow-cmake/CMakeLists.txt @@ -83,7 +83,7 @@ add_custom_command(OUTPUT orc_proto.pb.h orc_proto.pb.cc set(FLATBUFFERS_SRC_DIR ${ClickHouse_SOURCE_DIR}/contrib/flatbuffers) set(FLATBUFFERS_BINARY_DIR ${ClickHouse_BINARY_DIR}/contrib/flatbuffers) set(FLATBUFFERS_INCLUDE_DIR ${FLATBUFFERS_SRC_DIR}/include) -set(FLATBUFFERS_COMPILER "${FLATBUFFERS_BINARY_DIR}/flatc") +set(FLATBUFFERS_COMPILER "$") # set flatbuffers CMake options if (${USE_STATIC_LIBRARIES}) diff --git a/contrib/aws b/contrib/aws index 45dd8552d3c..fb5c604525f 160000 --- a/contrib/aws +++ b/contrib/aws @@ -1 +1 @@ -Subproject commit 45dd8552d3c492defca79d2720bcc809e35654da +Subproject commit fb5c604525f5151d75a856462653e7e38b559b79 diff --git a/contrib/aws-s3-cmake/CMakeLists.txt b/contrib/aws-s3-cmake/CMakeLists.txt index 6ed6434dcfd..d889fca197f 100644 --- a/contrib/aws-s3-cmake/CMakeLists.txt +++ b/contrib/aws-s3-cmake/CMakeLists.txt @@ -83,7 +83,7 @@ set(S3_INCLUDES add_library(aws_s3_checksums ${AWS_CHECKSUMS_SOURCES}) target_include_directories(aws_s3_checksums PUBLIC "${AWS_CHECKSUMS_LIBRARY_DIR}/include/") -if(CMAKE_BUILD_TYPE STREQUAL "" OR CMAKE_BUILD_TYPE STREQUAL "Debug") +if(CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG") target_compile_definitions(aws_s3_checksums PRIVATE "-DDEBUG_BUILD") endif() set_target_properties(aws_s3_checksums PROPERTIES LINKER_LANGUAGE C) diff --git a/contrib/base64 b/contrib/base64 index 5257626d2be..95ba56a9b04 160000 --- a/contrib/base64 +++ b/contrib/base64 @@ -1 +1 @@ -Subproject commit 5257626d2be17a3eb23f79be17fe55ebba394ad2 +Subproject commit 95ba56a9b041f9933f5cd2bbb2ee4e083468c20a diff --git a/contrib/base64-cmake/CMakeLists.txt b/contrib/base64-cmake/CMakeLists.txt index b8e6fa536a6..63b4e324d29 100644 --- a/contrib/base64-cmake/CMakeLists.txt +++ b/contrib/base64-cmake/CMakeLists.txt @@ -31,3 +31,13 @@ else () endif () target_include_directories(base64 SYSTEM PUBLIC ${LIBRARY_DIR}) + +if (XCODE OR XCODE_VERSION) + # https://gitlab.kitware.com/cmake/cmake/issues/17457 + # Some native build systems may not like targets that have only object files, so consider adding at least one real source file + # This applies to Xcode. + if (NOT EXISTS "${CMAKE_CURRENT_BINARY_DIR}/dummy.c") + file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/dummy.c" "") + endif () + target_sources(base64 PRIVATE "${CMAKE_CURRENT_BINARY_DIR}/dummy.c") +endif () diff --git a/contrib/boost b/contrib/boost index 86be2aef20b..a04e72c0464 160000 --- a/contrib/boost +++ b/contrib/boost @@ -1 +1 @@ -Subproject commit 86be2aef20bee2356b744e5569eed6eaded85dbe +Subproject commit a04e72c0464f0c31d3384f18f0c0db36a05538e0 diff --git a/contrib/cctz b/contrib/cctz index 4f9776a310f..7a2db4ece6e 160000 --- a/contrib/cctz +++ b/contrib/cctz @@ -1 +1 @@ -Subproject commit 4f9776a310f4952454636363def82c2bf6641d5f +Subproject commit 7a2db4ece6e0f1b246173cbdb62711ae258ee841 diff --git a/contrib/cctz-cmake/CMakeLists.txt b/contrib/cctz-cmake/CMakeLists.txt index 9c2f6d9a658..df9fd6aa61c 100644 --- a/contrib/cctz-cmake/CMakeLists.txt +++ b/contrib/cctz-cmake/CMakeLists.txt @@ -1,31 +1,648 @@ -SET(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/cctz) +option (USE_INTERNAL_CCTZ "Use internal cctz library" ${NOT_UNBUNDLED}) -add_library(cctz - ${LIBRARY_DIR}/src/civil_time_detail.cc - ${LIBRARY_DIR}/src/time_zone_fixed.cc - ${LIBRARY_DIR}/src/time_zone_format.cc - ${LIBRARY_DIR}/src/time_zone_if.cc - ${LIBRARY_DIR}/src/time_zone_impl.cc - ${LIBRARY_DIR}/src/time_zone_info.cc - ${LIBRARY_DIR}/src/time_zone_libc.cc - ${LIBRARY_DIR}/src/time_zone_lookup.cc - ${LIBRARY_DIR}/src/time_zone_posix.cc - ${LIBRARY_DIR}/src/zone_info_source.cc +if (USE_INTERNAL_CCTZ) + SET(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/cctz) - ${LIBRARY_DIR}/src/time_zone_libc.h - ${LIBRARY_DIR}/src/time_zone_if.h - ${LIBRARY_DIR}/src/tzfile.h - ${LIBRARY_DIR}/src/time_zone_impl.h - ${LIBRARY_DIR}/src/time_zone_posix.h - ${LIBRARY_DIR}/src/time_zone_info.h + SET (SRCS + ${LIBRARY_DIR}/src/civil_time_detail.cc + ${LIBRARY_DIR}/src/time_zone_fixed.cc + ${LIBRARY_DIR}/src/time_zone_format.cc + ${LIBRARY_DIR}/src/time_zone_if.cc + ${LIBRARY_DIR}/src/time_zone_impl.cc + ${LIBRARY_DIR}/src/time_zone_info.cc + ${LIBRARY_DIR}/src/time_zone_libc.cc + ${LIBRARY_DIR}/src/time_zone_lookup.cc + ${LIBRARY_DIR}/src/time_zone_posix.cc + ${LIBRARY_DIR}/src/zone_info_source.cc + ) - ${LIBRARY_DIR}/include/cctz/time_zone.h - ${LIBRARY_DIR}/include/cctz/civil_time_detail.h - ${LIBRARY_DIR}/include/cctz/civil_time.h) + add_library (cctz ${SRCS}) + target_include_directories (cctz PUBLIC ${LIBRARY_DIR}/include) -if (CMAKE_SYSTEM MATCHES "FreeBSD") - # yes, need linux, because bsd check inside linux in time_zone_libc.cc:24 - target_compile_definitions (cctz PRIVATE __USE_BSD linux _XOPEN_SOURCE=600) + if (OS_FREEBSD) + # yes, need linux, because bsd check inside linux in time_zone_libc.cc:24 + target_compile_definitions (cctz PRIVATE __USE_BSD linux _XOPEN_SOURCE=600) + endif () + + # Build a libray with embedded tzdata + + if (OS_LINUX AND ARCH_AMD64) + + find_program (OBJCOPY_PATH NAMES "llvm-objcopy" "llvm-objcopy-10" "llvm-objcopy-9" "llvm-objcopy-8" "objcopy") + if (OBJCOPY_PATH) + message(STATUS "Using objcopy: ${OBJCOPY_PATH}.") + else () + message(FATAL_ERROR "Cannot find objcopy.") + endif () + + set (TIMEZONES + Africa/Abidjan + Africa/Accra + Africa/Addis_Ababa + Africa/Algiers + Africa/Asmara + Africa/Asmera + Africa/Bamako + Africa/Bangui + Africa/Banjul + Africa/Bissau + Africa/Blantyre + Africa/Brazzaville + Africa/Bujumbura + Africa/Cairo + Africa/Casablanca + Africa/Ceuta + Africa/Conakry + Africa/Dakar + Africa/Dar_es_Salaam + Africa/Djibouti + Africa/Douala + Africa/El_Aaiun + Africa/Freetown + Africa/Gaborone + Africa/Harare + Africa/Johannesburg + Africa/Juba + Africa/Kampala + Africa/Khartoum + Africa/Kigali + Africa/Kinshasa + Africa/Lagos + Africa/Libreville + Africa/Lome + Africa/Luanda + Africa/Lubumbashi + Africa/Lusaka + Africa/Malabo + Africa/Maputo + Africa/Maseru + Africa/Mbabane + Africa/Mogadishu + Africa/Monrovia + Africa/Nairobi + Africa/Ndjamena + Africa/Niamey + Africa/Nouakchott + Africa/Ouagadougou + Africa/Porto-Novo + Africa/Sao_Tome + Africa/Timbuktu + Africa/Tripoli + Africa/Tunis + Africa/Windhoek + America/Adak + America/Anchorage + America/Anguilla + America/Antigua + America/Araguaina + America/Argentina/Buenos_Aires + America/Argentina/Catamarca + America/Argentina/ComodRivadavia + America/Argentina/Cordoba + America/Argentina/Jujuy + America/Argentina/La_Rioja + America/Argentina/Mendoza + America/Argentina/Rio_Gallegos + America/Argentina/Salta + America/Argentina/San_Juan + America/Argentina/San_Luis + America/Argentina/Tucuman + America/Argentina/Ushuaia + America/Aruba + America/Asuncion + America/Atikokan + America/Atka + America/Bahia + America/Bahia_Banderas + America/Barbados + America/Belem + America/Belize + America/Blanc-Sablon + America/Boa_Vista + America/Bogota + America/Boise + America/Buenos_Aires + America/Cambridge_Bay + America/Campo_Grande + America/Cancun + America/Caracas + America/Catamarca + America/Cayenne + America/Cayman + America/Chicago + America/Chihuahua + America/Coral_Harbour + America/Cordoba + America/Costa_Rica + America/Creston + America/Cuiaba + America/Curacao + America/Danmarkshavn + America/Dawson + America/Dawson_Creek + America/Denver + America/Detroit + America/Dominica + America/Edmonton + America/Eirunepe + America/El_Salvador + America/Ensenada + America/Fortaleza + America/Fort_Nelson + America/Fort_Wayne + America/Glace_Bay + America/Godthab + America/Goose_Bay + America/Grand_Turk + America/Grenada + America/Guadeloupe + America/Guatemala + America/Guayaquil + America/Guyana + America/Halifax + America/Havana + America/Hermosillo + America/Indiana/Indianapolis + America/Indiana/Knox + America/Indiana/Marengo + America/Indiana/Petersburg + America/Indianapolis + America/Indiana/Tell_City + America/Indiana/Vevay + America/Indiana/Vincennes + America/Indiana/Winamac + America/Inuvik + America/Iqaluit + America/Jamaica + America/Jujuy + America/Juneau + America/Kentucky/Louisville + America/Kentucky/Monticello + America/Knox_IN + America/Kralendijk + America/La_Paz + America/Lima + America/Los_Angeles + America/Louisville + America/Lower_Princes + America/Maceio + America/Managua + America/Manaus + America/Marigot + America/Martinique + America/Matamoros + America/Mazatlan + America/Mendoza + America/Menominee + America/Merida + America/Metlakatla + America/Mexico_City + America/Miquelon + America/Moncton + America/Monterrey + America/Montevideo + America/Montreal + America/Montserrat + America/Nassau + America/New_York + America/Nipigon + America/Nome + America/Noronha + America/North_Dakota/Beulah + America/North_Dakota/Center + America/North_Dakota/New_Salem + America/Ojinaga + America/Panama + America/Pangnirtung + America/Paramaribo + America/Phoenix + America/Port-au-Prince + America/Porto_Acre + America/Port_of_Spain + America/Porto_Velho + America/Puerto_Rico + America/Punta_Arenas + America/Rainy_River + America/Rankin_Inlet + America/Recife + America/Regina + America/Resolute + America/Rio_Branco + America/Rosario + America/Santa_Isabel + America/Santarem + America/Santiago + America/Santo_Domingo + America/Sao_Paulo + America/Scoresbysund + America/Shiprock + America/Sitka + America/St_Barthelemy + America/St_Johns + America/St_Kitts + America/St_Lucia + America/St_Thomas + America/St_Vincent + America/Swift_Current + America/Tegucigalpa + America/Thule + America/Thunder_Bay + America/Tijuana + America/Toronto + America/Tortola + America/Vancouver + America/Virgin + America/Whitehorse + America/Winnipeg + America/Yakutat + America/Yellowknife + Antarctica/Casey + Antarctica/Davis + Antarctica/DumontDUrville + Antarctica/Macquarie + Antarctica/Mawson + Antarctica/McMurdo + Antarctica/Palmer + Antarctica/Rothera + Antarctica/South_Pole + Antarctica/Syowa + Antarctica/Troll + Antarctica/Vostok + Arctic/Longyearbyen + Asia/Aden + Asia/Almaty + Asia/Amman + Asia/Anadyr + Asia/Aqtau + Asia/Aqtobe + Asia/Ashgabat + Asia/Ashkhabad + Asia/Atyrau + Asia/Baghdad + Asia/Bahrain + Asia/Baku + Asia/Bangkok + Asia/Barnaul + Asia/Beirut + Asia/Bishkek + Asia/Brunei + Asia/Calcutta + Asia/Chita + Asia/Choibalsan + Asia/Chongqing + Asia/Chungking + Asia/Colombo + Asia/Dacca + Asia/Damascus + Asia/Dhaka + Asia/Dili + Asia/Dubai + Asia/Dushanbe + Asia/Famagusta + Asia/Gaza + Asia/Harbin + Asia/Hebron + Asia/Ho_Chi_Minh + Asia/Hong_Kong + Asia/Hovd + Asia/Irkutsk + Asia/Istanbul + Asia/Jakarta + Asia/Jayapura + Asia/Jerusalem + Asia/Kabul + Asia/Kamchatka + Asia/Karachi + Asia/Kashgar + Asia/Kathmandu + Asia/Katmandu + Asia/Khandyga + Asia/Kolkata + Asia/Krasnoyarsk + Asia/Kuala_Lumpur + Asia/Kuching + Asia/Kuwait + Asia/Macao + Asia/Macau + Asia/Magadan + Asia/Makassar + Asia/Manila + Asia/Muscat + Asia/Nicosia + Asia/Novokuznetsk + Asia/Novosibirsk + Asia/Omsk + Asia/Oral + Asia/Phnom_Penh + Asia/Pontianak + Asia/Pyongyang + Asia/Qatar + Asia/Qostanay + Asia/Qyzylorda + Asia/Rangoon + Asia/Riyadh + Asia/Saigon + Asia/Sakhalin + Asia/Samarkand + Asia/Seoul + Asia/Shanghai + Asia/Singapore + Asia/Srednekolymsk + Asia/Taipei + Asia/Tashkent + Asia/Tbilisi + Asia/Tehran + Asia/Tel_Aviv + Asia/Thimbu + Asia/Thimphu + Asia/Tokyo + Asia/Tomsk + Asia/Ujung_Pandang + Asia/Ulaanbaatar + Asia/Ulan_Bator + Asia/Urumqi + Asia/Ust-Nera + Asia/Vientiane + Asia/Vladivostok + Asia/Yakutsk + Asia/Yangon + Asia/Yekaterinburg + Asia/Yerevan + Atlantic/Azores + Atlantic/Bermuda + Atlantic/Canary + Atlantic/Cape_Verde + Atlantic/Faeroe + Atlantic/Faroe + Atlantic/Jan_Mayen + Atlantic/Madeira + Atlantic/Reykjavik + Atlantic/South_Georgia + Atlantic/Stanley + Atlantic/St_Helena + Australia/ACT + Australia/Adelaide + Australia/Brisbane + Australia/Broken_Hill + Australia/Canberra + Australia/Currie + Australia/Darwin + Australia/Eucla + Australia/Hobart + Australia/LHI + Australia/Lindeman + Australia/Lord_Howe + Australia/Melbourne + Australia/North + Australia/NSW + Australia/Perth + Australia/Queensland + Australia/South + Australia/Sydney + Australia/Tasmania + Australia/Victoria + Australia/West + Australia/Yancowinna + Brazil/Acre + Brazil/DeNoronha + Brazil/East + Brazil/West + Canada/Atlantic + Canada/Central + Canada/Eastern + Canada/Mountain + Canada/Newfoundland + Canada/Pacific + Canada/Saskatchewan + Canada/Yukon + CET + Chile/Continental + Chile/EasterIsland + CST6CDT + Cuba + EET + Egypt + Eire + EST + EST5EDT + Etc/GMT + Etc/Greenwich + Etc/UCT + Etc/Universal + Etc/UTC + Etc/Zulu + Europe/Amsterdam + Europe/Andorra + Europe/Astrakhan + Europe/Athens + Europe/Belfast + Europe/Belgrade + Europe/Berlin + Europe/Bratislava + Europe/Brussels + Europe/Bucharest + Europe/Budapest + Europe/Busingen + Europe/Chisinau + Europe/Copenhagen + Europe/Dublin + Europe/Gibraltar + Europe/Guernsey + Europe/Helsinki + Europe/Isle_of_Man + Europe/Istanbul + Europe/Jersey + Europe/Kaliningrad + Europe/Kiev + Europe/Kirov + Europe/Lisbon + Europe/Ljubljana + Europe/London + Europe/Luxembourg + Europe/Madrid + Europe/Malta + Europe/Mariehamn + Europe/Minsk + Europe/Monaco + Europe/Moscow + Europe/Nicosia + Europe/Oslo + Europe/Paris + Europe/Podgorica + Europe/Prague + Europe/Riga + Europe/Rome + Europe/Samara + Europe/San_Marino + Europe/Sarajevo + Europe/Saratov + Europe/Simferopol + Europe/Skopje + Europe/Sofia + Europe/Stockholm + Europe/Tallinn + Europe/Tirane + Europe/Tiraspol + Europe/Ulyanovsk + Europe/Uzhgorod + Europe/Vaduz + Europe/Vatican + Europe/Vienna + Europe/Vilnius + Europe/Volgograd + Europe/Warsaw + Europe/Zagreb + Europe/Zaporozhye + Europe/Zurich + Factory + GB + GB-Eire + GMT + GMT0 + Greenwich + Hongkong + HST + Iceland + Indian/Antananarivo + Indian/Chagos + Indian/Christmas + Indian/Cocos + Indian/Comoro + Indian/Kerguelen + Indian/Mahe + Indian/Maldives + Indian/Mauritius + Indian/Mayotte + Indian/Reunion + Iran + Israel + Jamaica + Japan + Kwajalein + Libya + MET + Mexico/BajaNorte + Mexico/BajaSur + Mexico/General + MST + MST7MDT + Navajo + NZ + NZ-CHAT + Pacific/Apia + Pacific/Auckland + Pacific/Bougainville + Pacific/Chatham + Pacific/Chuuk + Pacific/Easter + Pacific/Efate + Pacific/Enderbury + Pacific/Fakaofo + Pacific/Fiji + Pacific/Funafuti + Pacific/Galapagos + Pacific/Gambier + Pacific/Guadalcanal + Pacific/Guam + Pacific/Honolulu + Pacific/Johnston + Pacific/Kiritimati + Pacific/Kosrae + Pacific/Kwajalein + Pacific/Majuro + Pacific/Marquesas + Pacific/Midway + Pacific/Nauru + Pacific/Niue + Pacific/Norfolk + Pacific/Noumea + Pacific/Pago_Pago + Pacific/Palau + Pacific/Pitcairn + Pacific/Pohnpei + Pacific/Ponape + Pacific/Port_Moresby + Pacific/Rarotonga + Pacific/Saipan + Pacific/Samoa + Pacific/Tahiti + Pacific/Tarawa + Pacific/Tongatapu + Pacific/Truk + Pacific/Wake + Pacific/Wallis + Pacific/Yap + Poland + Portugal + PRC + PST8PDT + ROC + ROK + Singapore + Turkey + UCT + Universal + US/Alaska + US/Aleutian + US/Arizona + US/Central + US/Eastern + US/East-Indiana + US/Hawaii + US/Indiana-Starke + US/Michigan + US/Mountain + US/Pacific + US/Samoa + UTC + WET + W-SU + Zulu) + + set(TZDIR ${LIBRARY_DIR}/testdata/zoneinfo) + set(TZ_OBJS) + + foreach(TIMEZONE ${TIMEZONES}) + string(REPLACE "/" "_" TIMEZONE_ID ${TIMEZONE}) + set(TZ_OBJ ${TIMEZONE_ID}.o) + set(TZ_OBJS ${TZ_OBJS} ${TZ_OBJ}) + + # https://stackoverflow.com/questions/14776463/compile-and-add-an-object-file-from-a-binary-with-cmake + add_custom_command(OUTPUT ${TZ_OBJ} + COMMAND cd ${TZDIR} && ${OBJCOPY_PATH} -I binary -O elf64-x86-64 -B i386 ${TIMEZONE} ${CMAKE_CURRENT_BINARY_DIR}/${TZ_OBJ} + COMMAND ${OBJCOPY_PATH} --rename-section .data=.rodata,alloc,load,readonly,data,contents + ${CMAKE_CURRENT_BINARY_DIR}/${TZ_OBJ} ${CMAKE_CURRENT_BINARY_DIR}/${TZ_OBJ}) + + set_source_files_properties(${TZ_OBJ} PROPERTIES EXTERNAL_OBJECT true GENERATED true) + endforeach(TIMEZONE) + + add_library(tzdata STATIC ${TZ_OBJS}) + set_target_properties(tzdata PROPERTIES LINKER_LANGUAGE C) + target_link_libraries(cctz -Wl,--whole-archive tzdata -Wl,--no-whole-archive) # whole-archive prevents symbols from being discarded + endif () + +else () + find_library (LIBRARY_CCTZ cctz) + find_path (INCLUDE_CCTZ NAMES cctz/civil_time.h) + + add_library (cctz UNKNOWN IMPORTED) + set_property (TARGET cctz PROPERTY IMPORTED_LOCATION ${LIBRARY_CCTZ}) + set_property (TARGET cctz PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${INCLUDE_CCTZ}) + + set(CMAKE_REQUIRED_LIBRARIES cctz) + check_cxx_source_compiles( + " + #include + int main() { + cctz::civil_day date; + } + " + EXTERNAL_CCTZ_WORKS + ) + + if (NOT EXTERNAL_CCTZ_WORKS) + message (FATAL_ERROR "cctz is unusable: ${LIBRARY_CCTZ} ${INCLUDE_CCTZ}") + endif () endif () -target_include_directories (cctz PUBLIC ${LIBRARY_DIR}/include) +message (STATUS "Using cctz") diff --git a/base/consistent-hashing-sumbur/CMakeLists.txt b/contrib/consistent-hashing-sumbur/CMakeLists.txt similarity index 100% rename from base/consistent-hashing-sumbur/CMakeLists.txt rename to contrib/consistent-hashing-sumbur/CMakeLists.txt diff --git a/base/consistent-hashing-sumbur/sumbur.cpp b/contrib/consistent-hashing-sumbur/sumbur.cpp similarity index 99% rename from base/consistent-hashing-sumbur/sumbur.cpp rename to contrib/consistent-hashing-sumbur/sumbur.cpp index 3b905f0adc7..78f59ca875f 100644 --- a/base/consistent-hashing-sumbur/sumbur.cpp +++ b/contrib/consistent-hashing-sumbur/sumbur.cpp @@ -108,6 +108,6 @@ unsigned int sumburConsistentHash(unsigned int hashed_int, unsigned int capacity if (L / i - h < part) return n - 1; } } - } while(0); + } while(false); return n - 1; } diff --git a/base/consistent-hashing-sumbur/sumbur.h b/contrib/consistent-hashing-sumbur/sumbur.h similarity index 100% rename from base/consistent-hashing-sumbur/sumbur.h rename to contrib/consistent-hashing-sumbur/sumbur.h diff --git a/base/consistent-hashing/CMakeLists.txt b/contrib/consistent-hashing/CMakeLists.txt similarity index 100% rename from base/consistent-hashing/CMakeLists.txt rename to contrib/consistent-hashing/CMakeLists.txt diff --git a/base/consistent-hashing/bitops.h b/contrib/consistent-hashing/bitops.h similarity index 100% rename from base/consistent-hashing/bitops.h rename to contrib/consistent-hashing/bitops.h diff --git a/base/consistent-hashing/consistent_hashing.cpp b/contrib/consistent-hashing/consistent_hashing.cpp similarity index 100% rename from base/consistent-hashing/consistent_hashing.cpp rename to contrib/consistent-hashing/consistent_hashing.cpp diff --git a/base/consistent-hashing/consistent_hashing.h b/contrib/consistent-hashing/consistent_hashing.h similarity index 100% rename from base/consistent-hashing/consistent_hashing.h rename to contrib/consistent-hashing/consistent_hashing.h diff --git a/base/consistent-hashing/popcount.cpp b/contrib/consistent-hashing/popcount.cpp similarity index 100% rename from base/consistent-hashing/popcount.cpp rename to contrib/consistent-hashing/popcount.cpp diff --git a/base/consistent-hashing/popcount.h b/contrib/consistent-hashing/popcount.h similarity index 100% rename from base/consistent-hashing/popcount.h rename to contrib/consistent-hashing/popcount.h diff --git a/contrib/grpc-cmake/CMakeLists.txt b/contrib/grpc-cmake/CMakeLists.txt index 950bcb2b7f6..0180c0c1d31 100644 --- a/contrib/grpc-cmake/CMakeLists.txt +++ b/contrib/grpc-cmake/CMakeLists.txt @@ -1,8 +1,9 @@ cmake_minimum_required(VERSION 3.5.1) -set(GRPC_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../grpc) -set(GRPC_INCLUDE_DIR ${GRPC_SOURCE_DIR}/include/) +set(GRPC_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/grpc) +set(GRPC_INCLUDE_DIR ${GRPC_SOURCE_DIR}/include/) +set(GRPC_BINARY_DIR ${ClickHouse_BINARY_DIR}/contrib/grpc) if(UNIX) if(${CMAKE_SYSTEM_NAME} MATCHES "Linux") set(_gRPC_PLATFORM_LINUX ON) @@ -41,42 +42,33 @@ set(_gRPC_ADDRESS_SORTING_LIBRARIES address_sorting) # cares.cmake set(CARES_ROOT_DIR ${GRPC_SOURCE_DIR}/third_party/cares/cares) +set(CARES_BINARY_DIR ${GRPC_BINARY_DIR}/third_party/cares/cares) set(CARES_SHARED OFF CACHE BOOL "disable shared library") set(CARES_STATIC ON CACHE BOOL "link cares statically") if(gRPC_BACKWARDS_COMPATIBILITY_MODE) # See https://github.com/grpc/grpc/issues/17255 set(HAVE_LIBNSL OFF CACHE BOOL "avoid cares dependency on libnsl") endif() -add_subdirectory(${CARES_ROOT_DIR} ${CARES_ROOT_DIR}) +add_subdirectory(${CARES_ROOT_DIR} ${CARES_BINARY_DIR}) if(TARGET c-ares) set(_gRPC_CARES_LIBRARIES c-ares) endif() # protobuf.cmake set(PROTOBUF_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../protobuf) -# if(NOT protobuf_BUILD_TESTS) - # set(protobuf_BUILD_TESTS OFF CACHE BOOL "Build protobuf tests") -# endif() + set(protobuf_BUILD_TESTS OFF CACHE BOOL "Build protobuf tests") if(NOT protobuf_WITH_ZLIB) set(protobuf_WITH_ZLIB OFF CACHE BOOL "Build protobuf with zlib.") endif() set(protobuf_MSVC_STATIC_RUNTIME OFF CACHE BOOL "Link static runtime libraries") -if(NOT USE_INTERNAL_PROTOBUF_LIBRARY) - add_subdirectory(${PROTOBUF_ROOT_DIR}/cmake ${GRPC_SOURCE_DIR}/third_party/protobuf) -endif() -if(TARGET ${_gRPC_PROTOBUF_LIBRARY_NAME}) - set(_gRPC_PROTOBUF_LIBRARIES ${_gRPC_PROTOBUF_LIBRARY_NAME}) -endif() -if(TARGET libprotoc) - set(_gRPC_PROTOBUF_PROTOC_LIBRARIES libprotoc) -endif() -if(TARGET protoc) - set(_gRPC_PROTOBUF_PROTOC protoc) - set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE $) -endif() -# For well-known .proto files distributed with protobuf -set(_gRPC_PROTOBUF_WELLKNOWN_INCLUDE_DIR "${PROTOBUF_ROOT_DIR}/src") + +set(_gRPC_PROTOBUF_LIBRARIES libprotobuf) +set(_gRPC_PROTOBUF_PROTOC_LIBRARIES libprotoc) +set(_gRPC_PROTOBUF_PROTOC protoc) +set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE $) +set(_gRPC_PROTOBUF_INCLUDE_DIR "${PROTOBUF_ROOT_DIR}/src") + if(gRPC_INSTALL) message(WARNING "gRPC_INSTALL will be forced to FALSE because gRPC_PROTOBUF_PROVIDER is \"module\"") set(gRPC_INSTALL FALSE) @@ -96,12 +88,13 @@ set(_gRPC_UPB_GRPC_GENERATED_DIR "${GRPC_SOURCE_DIR}/src/core/ext/upb-generated" set(_gRPC_UPB_LIBRARIES upb) # zlib.cmake -set(ZLIB_ROOT_DIR ${GRPC_SOURCE_DIR}/third_party/zlib-ng) +set(ZLIB_ROOT_DIR ${GRPC_SOURCE_DIR}/../zlib-ng) include_directories("${ZLIB_ROOT_DIR}") -# add_subdirectory(${ZLIB_ROOT_DIR} ${ZLIB_ROOT_DIR}) -set(_gRPC_ZLIB_LIBRARIES zlibstatic) -set(_gRPC_ZLIB_INCLUDE_DIR "${ZLIB_ROOT_DIR}") - +## add_subdirectory(${ZLIB_ROOT_DIR} ${ZLIB_ROOT_DIR}) +if(TARGET zlibstatic) + set(_gRPC_ZLIB_LIBRARIES zlibstatic) + set(_gRPC_ZLIB_INCLUDE_DIR "${ZLIB_ROOT_DIR}" "${GRPC_SOURCE_DIR}/third_party/zlib") +endif() set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c99") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") @@ -132,6 +125,7 @@ target_include_directories(address_sorting ) target_link_libraries(address_sorting ${_gRPC_BASELIB_LIBRARIES} + ${_gRPC_PROTOBUF_LIBRARIES} ${_gRPC_ALLTARGETS_LIBRARIES} ) @@ -192,6 +186,7 @@ target_include_directories(gpr ) target_link_libraries(gpr ${_gRPC_ALLTARGETS_LIBRARIES} + ${_gRPC_PROTOBUF_LIBRARIES} ) add_library(grpc @@ -595,6 +590,7 @@ target_link_libraries(grpc ${_gRPC_CARES_LIBRARIES} ${_gRPC_ADDRESS_SORTING_LIBRARIES} ${_gRPC_ALLTARGETS_LIBRARIES} + ${_gRPC_PROTOBUF_LIBRARIES} gpr ) if (_gRPC_PLATFORM_MAC) @@ -941,6 +937,7 @@ target_link_libraries(grpc_cronet ${_gRPC_CARES_LIBRARIES} ${_gRPC_ADDRESS_SORTING_LIBRARIES} ${_gRPC_ALLTARGETS_LIBRARIES} + ${_gRPC_PROTOBUF_LIBRARIES} gpr ) if (_gRPC_PLATFORM_MAC) @@ -1271,6 +1268,7 @@ target_link_libraries(grpc_unsecure ${_gRPC_CARES_LIBRARIES} ${_gRPC_ADDRESS_SORTING_LIBRARIES} ${_gRPC_ALLTARGETS_LIBRARIES} + ${_gRPC_PROTOBUF_LIBRARIES} gpr ) if (_gRPC_PLATFORM_MAC) diff --git a/contrib/jemalloc-cmake/CMakeLists.txt b/contrib/jemalloc-cmake/CMakeLists.txt index 5b420246168..5ae09bec8aa 100644 --- a/contrib/jemalloc-cmake/CMakeLists.txt +++ b/contrib/jemalloc-cmake/CMakeLists.txt @@ -1,70 +1,110 @@ -set(JEMALLOC_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/jemalloc) +option (ENABLE_JEMALLOC "Enable jemalloc allocator" ${ENABLE_LIBRARIES}) -set(SRCS -${JEMALLOC_SOURCE_DIR}/src/arena.c -${JEMALLOC_SOURCE_DIR}/src/background_thread.c -${JEMALLOC_SOURCE_DIR}/src/base.c -${JEMALLOC_SOURCE_DIR}/src/bin.c -${JEMALLOC_SOURCE_DIR}/src/bitmap.c -${JEMALLOC_SOURCE_DIR}/src/ckh.c -${JEMALLOC_SOURCE_DIR}/src/ctl.c -${JEMALLOC_SOURCE_DIR}/src/div.c -${JEMALLOC_SOURCE_DIR}/src/extent.c -${JEMALLOC_SOURCE_DIR}/src/extent_dss.c -${JEMALLOC_SOURCE_DIR}/src/extent_mmap.c -${JEMALLOC_SOURCE_DIR}/src/hash.c -${JEMALLOC_SOURCE_DIR}/src/hook.c -${JEMALLOC_SOURCE_DIR}/src/jemalloc.c -${JEMALLOC_SOURCE_DIR}/src/large.c -${JEMALLOC_SOURCE_DIR}/src/log.c -${JEMALLOC_SOURCE_DIR}/src/malloc_io.c -${JEMALLOC_SOURCE_DIR}/src/mutex.c -${JEMALLOC_SOURCE_DIR}/src/mutex_pool.c -${JEMALLOC_SOURCE_DIR}/src/nstime.c -${JEMALLOC_SOURCE_DIR}/src/pages.c -${JEMALLOC_SOURCE_DIR}/src/prng.c -${JEMALLOC_SOURCE_DIR}/src/prof.c -${JEMALLOC_SOURCE_DIR}/src/rtree.c -${JEMALLOC_SOURCE_DIR}/src/sc.c -${JEMALLOC_SOURCE_DIR}/src/stats.c -${JEMALLOC_SOURCE_DIR}/src/sz.c -${JEMALLOC_SOURCE_DIR}/src/tcache.c -${JEMALLOC_SOURCE_DIR}/src/test_hooks.c -${JEMALLOC_SOURCE_DIR}/src/ticker.c -${JEMALLOC_SOURCE_DIR}/src/tsd.c -${JEMALLOC_SOURCE_DIR}/src/witness.c -) - -if(OS_DARWIN) - list(APPEND SRCS ${JEMALLOC_SOURCE_DIR}/src/zone.c) -endif() - -if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w") +if (SANITIZE OR NOT OS_LINUX OR NOT (ARCH_AMD64 OR ARCH_ARM)) + set (ENABLE_JEMALLOC OFF) + message (STATUS "jemalloc is disabled implicitly: it doesn't work with sanitizers and can only be used on Linux with x86_64 or aarch64.") endif () -add_library(jemalloc STATIC ${SRCS}) +if (ENABLE_JEMALLOC) + option (USE_INTERNAL_JEMALLOC "Use internal jemalloc library" ${NOT_UNBUNDLED}) -target_include_directories(jemalloc PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include) + if (USE_INTERNAL_JEMALLOC) + set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/jemalloc") -if (ARCH_AMD64) - target_include_directories(jemalloc PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include_linux_x86_64) -elseif (ARCH_ARM) - target_include_directories(jemalloc PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include_linux_aarch64) -else () - message (FATAL_ERROR "jemalloc can only be used on x86_64 or aarch64.") -endif () + set (SRCS + ${LIBRARY_DIR}/src/arena.c + ${LIBRARY_DIR}/src/background_thread.c + ${LIBRARY_DIR}/src/base.c + ${LIBRARY_DIR}/src/bin.c + ${LIBRARY_DIR}/src/bitmap.c + ${LIBRARY_DIR}/src/ckh.c + ${LIBRARY_DIR}/src/ctl.c + ${LIBRARY_DIR}/src/div.c + ${LIBRARY_DIR}/src/extent.c + ${LIBRARY_DIR}/src/extent_dss.c + ${LIBRARY_DIR}/src/extent_mmap.c + ${LIBRARY_DIR}/src/hash.c + ${LIBRARY_DIR}/src/hook.c + ${LIBRARY_DIR}/src/jemalloc.c + ${LIBRARY_DIR}/src/large.c + ${LIBRARY_DIR}/src/log.c + ${LIBRARY_DIR}/src/malloc_io.c + ${LIBRARY_DIR}/src/mutex.c + ${LIBRARY_DIR}/src/mutex_pool.c + ${LIBRARY_DIR}/src/nstime.c + ${LIBRARY_DIR}/src/pages.c + ${LIBRARY_DIR}/src/prng.c + ${LIBRARY_DIR}/src/prof.c + ${LIBRARY_DIR}/src/rtree.c + ${LIBRARY_DIR}/src/sc.c + ${LIBRARY_DIR}/src/stats.c + ${LIBRARY_DIR}/src/sz.c + ${LIBRARY_DIR}/src/tcache.c + ${LIBRARY_DIR}/src/test_hooks.c + ${LIBRARY_DIR}/src/ticker.c + ${LIBRARY_DIR}/src/tsd.c + ${LIBRARY_DIR}/src/witness.c + ) + if (OS_DARWIN) + list(APPEND SRCS ${LIBRARY_DIR}/src/zone.c) + endif () -target_include_directories(jemalloc PRIVATE - ${JEMALLOC_SOURCE_DIR}/include) + add_library(jemalloc ${SRCS}) + target_include_directories(jemalloc PRIVATE ${LIBRARY_DIR}/include) + target_include_directories(jemalloc SYSTEM PUBLIC include) + if (ARCH_AMD64) + target_include_directories(jemalloc SYSTEM PUBLIC include_linux_x86_64) + elseif (ARCH_ARM) + target_include_directories(jemalloc SYSTEM PUBLIC include_linux_aarch64) + endif () -target_compile_definitions(jemalloc PRIVATE -DJEMALLOC_NO_PRIVATE_NAMESPACE) + target_compile_definitions(jemalloc PRIVATE -DJEMALLOC_NO_PRIVATE_NAMESPACE) -if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG") - target_compile_definitions(jemalloc PRIVATE -DJEMALLOC_DEBUG=1 -DJEMALLOC_PROF=1) + if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG") + target_compile_definitions(jemalloc PRIVATE -DJEMALLOC_DEBUG=1 -DJEMALLOC_PROF=1) - if (USE_UNWIND) - target_compile_definitions (jemalloc PRIVATE -DJEMALLOC_PROF_LIBUNWIND=1) - target_link_libraries (jemalloc PRIVATE ${UNWIND_LIBRARIES}) + if (USE_UNWIND) + target_compile_definitions (jemalloc PRIVATE -DJEMALLOC_PROF_LIBUNWIND=1) + target_link_libraries (jemalloc PRIVATE unwind) + endif () + endif () + + target_compile_options(jemalloc PRIVATE -Wno-redundant-decls) + else () + find_library(LIBRARY_JEMALLOC jemalloc) + find_path(INCLUDE_JEMALLOC jemalloc/jemalloc.h) + + set(THREADS_PREFER_PTHREAD_FLAG ON) + find_package(Threads REQUIRED) + + add_library (jemalloc STATIC IMPORTED) + set_property (TARGET jemalloc PROPERTY IMPORTED_LOCATION ${LIBRARY_JEMALLOC}) + set_property (TARGET jemalloc PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${INCLUDE_JEMALLOC}) + set_property (TARGET jemalloc PROPERTY INTERFACE_LINK_LIBRARIES Threads::Threads dl) + + set (CMAKE_REQUIRED_LIBRARIES jemalloc) + check_cxx_source_compiles ( + " + #include + + int main() { + free(mallocx(1, 0)); + } + " + EXTERNAL_JEMALLOC_WORKS + ) + + if (NOT EXTERNAL_JEMALLOC_WORKS) + message (FATAL_ERROR "jemalloc is unusable: ${LIBRARY_JEMALLOC} ${INCLUDE_JEMALLOC}") + endif () endif () + + set_property(TARGET jemalloc APPEND PROPERTY INTERFACE_COMPILE_DEFINITIONS USE_JEMALLOC=1) + + message (STATUS "Using jemalloc") +else () + add_library(jemalloc INTERFACE) + target_compile_definitions(jemalloc INTERFACE USE_JEMALLOC=0) + + message (STATUS "Not using jemalloc") endif () diff --git a/contrib/libc-headers b/contrib/libc-headers index 9676d2645a7..92c74f938cf 160000 --- a/contrib/libc-headers +++ b/contrib/libc-headers @@ -1 +1 @@ -Subproject commit 9676d2645a713e679dc981ffd84dee99fcd68b8e +Subproject commit 92c74f938cf2c4dd529cae4f3d2923d153b029a7 diff --git a/contrib/libcpuid b/contrib/libcpuid new file mode 160000 index 00000000000..8db3b8d2d32 --- /dev/null +++ b/contrib/libcpuid @@ -0,0 +1 @@ +Subproject commit 8db3b8d2d32d22437f063ce692a1b9bb15e42d18 diff --git a/contrib/libcpuid-cmake/CMakeLists.txt b/contrib/libcpuid-cmake/CMakeLists.txt new file mode 100644 index 00000000000..cb28cbd21da --- /dev/null +++ b/contrib/libcpuid-cmake/CMakeLists.txt @@ -0,0 +1,35 @@ +option (ENABLE_CPUID "Enable libcpuid library (only internal)" ${ENABLE_LIBRARIES}) + +if (ARCH_ARM) + set (ENABLE_CPUID 0) +endif () + +if (ENABLE_CPUID) + set (LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/libcpuid) + + set (SRCS + ${LIBRARY_DIR}/libcpuid/asm-bits.c + ${LIBRARY_DIR}/libcpuid/cpuid_main.c + ${LIBRARY_DIR}/libcpuid/libcpuid_util.c + ${LIBRARY_DIR}/libcpuid/msrdriver.c + ${LIBRARY_DIR}/libcpuid/rdmsr.c + ${LIBRARY_DIR}/libcpuid/rdtsc.c + ${LIBRARY_DIR}/libcpuid/recog_amd.c + ${LIBRARY_DIR}/libcpuid/recog_intel.c + ) + + add_library (cpuid ${SRCS}) + + target_include_directories (cpuid SYSTEM PUBLIC ${LIBRARY_DIR}) + target_compile_definitions (cpuid PUBLIC USE_CPUID=1) + target_compile_definitions (cpuid PRIVATE VERSION="v0.4.1") + if (COMPILER_CLANG) + target_compile_options (cpuid PRIVATE -Wno-reserved-id-macro) + endif () + + message (STATUS "Using cpuid") +else () + add_library (cpuid INTERFACE) + + target_compile_definitions (cpuid INTERFACE USE_CPUID=0) +endif () diff --git a/contrib/libcpuid/CMakeLists.txt b/contrib/libcpuid/CMakeLists.txt deleted file mode 100644 index cd3e7fa06fe..00000000000 --- a/contrib/libcpuid/CMakeLists.txt +++ /dev/null @@ -1,20 +0,0 @@ -add_library(cpuid -include/libcpuid/asm-bits.c -include/libcpuid/cpuid_main.c -include/libcpuid/libcpuid_util.c -include/libcpuid/rdtsc.c -include/libcpuid/recog_amd.c -include/libcpuid/recog_intel.c - -include/libcpuid/asm-bits.h -include/libcpuid/config.h -include/libcpuid/libcpuid_constants.h -include/libcpuid/libcpuid.h -include/libcpuid/libcpuid_types.h -include/libcpuid/libcpuid_util.h -include/libcpuid/rdtsc.h -include/libcpuid/recog_amd.h -include/libcpuid/recog_intel.h -) - -target_include_directories (cpuid SYSTEM PUBLIC include) diff --git a/contrib/libcpuid/COPYING b/contrib/libcpuid/COPYING deleted file mode 100644 index bf4755c203f..00000000000 --- a/contrib/libcpuid/COPYING +++ /dev/null @@ -1,23 +0,0 @@ -Copyright 2008 Veselin Georgiev, -anrieffNOSPAM @ mgail_DOT.com (convert to gmail) - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/contrib/libcpuid/README.txt b/contrib/libcpuid/README.txt deleted file mode 100644 index a6817683f8b..00000000000 --- a/contrib/libcpuid/README.txt +++ /dev/null @@ -1 +0,0 @@ -https://github.com/anrieff/libcpuid.git diff --git a/contrib/libcpuid/include/libcpuid/amd_code_t.h b/contrib/libcpuid/include/libcpuid/amd_code_t.h deleted file mode 100644 index 2472a3d61d5..00000000000 --- a/contrib/libcpuid/include/libcpuid/amd_code_t.h +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright 2016 Veselin Georgiev, - * anrieffNOSPAM @ mgail_DOT.com (convert to gmail) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * This file contains a list of internal codes we use in detection. It is - * of no external use and isn't a complete list of AMD products. - */ - CODE2(OPTERON_GENERIC, 1000), - CODE(OPTERON_800), - CODE(ATHLON_XP), - CODE(ATHLON_XP_M), - CODE(ATHLON_XP_M_LV), - CODE(ATHLON), - CODE(ATHLON_MP), - CODE(MOBILE_ATHLON64), - CODE(ATHLON_FX), - CODE(DURON), - CODE(DURON_MP), - CODE(MOBILE_DURON), - CODE(MOBILE_SEMPRON), - CODE(OPTERON_SINGLE), - CODE(OPTERON_DUALCORE), - CODE(OPTERON_800_DUALCORE), - CODE(MOBILE_TURION), - CODE(ATHLON_64), - CODE(ATHLON_64_FX), - CODE(TURION_64), - CODE(TURION_X2), - CODE(SEMPRON), - CODE(M_SEMPRON), - CODE(SEMPRON_DUALCORE), - CODE(PHENOM), - CODE(PHENOM2), - CODE(ATHLON_64_X2), - CODE(ATHLON_64_X3), - CODE(ATHLON_64_X4), - CODE(FUSION_C), - CODE(FUSION_E), - CODE(FUSION_EA), - CODE(FUSION_Z), - CODE(FUSION_A), - diff --git a/contrib/libcpuid/include/libcpuid/asm-bits.c b/contrib/libcpuid/include/libcpuid/asm-bits.c deleted file mode 100644 index b8e32284f57..00000000000 --- a/contrib/libcpuid/include/libcpuid/asm-bits.c +++ /dev/null @@ -1,825 +0,0 @@ -/* - * Copyright 2008 Veselin Georgiev, - * anrieffNOSPAM @ mgail_DOT.com (convert to gmail) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "libcpuid.h" -#include "asm-bits.h" - -int cpuid_exists_by_eflags(void) -{ -#if defined(PLATFORM_X64) - return 1; /* CPUID is always present on the x86_64 */ -#elif defined(PLATFORM_X86) -# if defined(COMPILER_GCC) - int result; - __asm __volatile( - " pushfl\n" - " pop %%eax\n" - " mov %%eax, %%ecx\n" - " xor $0x200000, %%eax\n" - " push %%eax\n" - " popfl\n" - " pushfl\n" - " pop %%eax\n" - " xor %%ecx, %%eax\n" - " mov %%eax, %0\n" - " push %%ecx\n" - " popfl\n" - : "=m"(result) - : :"eax", "ecx", "memory"); - return (result != 0); -# elif defined(COMPILER_MICROSOFT) - int result; - __asm { - pushfd - pop eax - mov ecx, eax - xor eax, 0x200000 - push eax - popfd - pushfd - pop eax - xor eax, ecx - mov result, eax - push ecx - popfd - }; - return (result != 0); -# else - return 0; -# endif /* COMPILER_MICROSOFT */ -#else - return 0; -#endif /* PLATFORM_X86 */ -} - -#ifdef INLINE_ASM_SUPPORTED -/* - * with MSVC/AMD64, the exec_cpuid() and cpu_rdtsc() functions - * are implemented in separate .asm files. Otherwise, use inline assembly - */ -void exec_cpuid(uint32_t *regs) -{ -#ifdef COMPILER_GCC -# ifdef PLATFORM_X64 - __asm __volatile( - " mov %0, %%rdi\n" - - " push %%rbx\n" - " push %%rcx\n" - " push %%rdx\n" - - " mov (%%rdi), %%eax\n" - " mov 4(%%rdi), %%ebx\n" - " mov 8(%%rdi), %%ecx\n" - " mov 12(%%rdi), %%edx\n" - - " cpuid\n" - - " movl %%eax, (%%rdi)\n" - " movl %%ebx, 4(%%rdi)\n" - " movl %%ecx, 8(%%rdi)\n" - " movl %%edx, 12(%%rdi)\n" - " pop %%rdx\n" - " pop %%rcx\n" - " pop %%rbx\n" - : - :"m"(regs) - :"memory", "eax", "rdi" - ); -# else - __asm __volatile( - " mov %0, %%edi\n" - - " push %%ebx\n" - " push %%ecx\n" - " push %%edx\n" - - " mov (%%edi), %%eax\n" - " mov 4(%%edi), %%ebx\n" - " mov 8(%%edi), %%ecx\n" - " mov 12(%%edi), %%edx\n" - - " cpuid\n" - - " mov %%eax, (%%edi)\n" - " mov %%ebx, 4(%%edi)\n" - " mov %%ecx, 8(%%edi)\n" - " mov %%edx, 12(%%edi)\n" - " pop %%edx\n" - " pop %%ecx\n" - " pop %%ebx\n" - : - :"m"(regs) - :"memory", "eax", "edi" - ); -# endif /* COMPILER_GCC */ -#else -# ifdef COMPILER_MICROSOFT - __asm { - push ebx - push ecx - push edx - push edi - mov edi, regs - - mov eax, [edi] - mov ebx, [edi+4] - mov ecx, [edi+8] - mov edx, [edi+12] - - cpuid - - mov [edi], eax - mov [edi+4], ebx - mov [edi+8], ecx - mov [edi+12], edx - - pop edi - pop edx - pop ecx - pop ebx - } -# else -# error "Unsupported compiler" -# endif /* COMPILER_MICROSOFT */ -#endif -} -#endif /* INLINE_ASSEMBLY_SUPPORTED */ - -#ifdef INLINE_ASM_SUPPORTED -void cpu_rdtsc(uint64_t* result) -{ - uint32_t low_part, hi_part; -#ifdef COMPILER_GCC - __asm __volatile ( - " rdtsc\n" - " mov %%eax, %0\n" - " mov %%edx, %1\n" - :"=m"(low_part), "=m"(hi_part)::"memory", "eax", "edx" - ); -#else -# ifdef COMPILER_MICROSOFT - __asm { - rdtsc - mov low_part, eax - mov hi_part, edx - }; -# else -# error "Unsupported compiler" -# endif /* COMPILER_MICROSOFT */ -#endif /* COMPILER_GCC */ - *result = (uint64_t)low_part + (((uint64_t) hi_part) << 32); -} -#endif /* INLINE_ASM_SUPPORTED */ - -#ifdef INLINE_ASM_SUPPORTED -void busy_sse_loop(int cycles) -{ -#ifdef COMPILER_GCC -#ifndef __APPLE__ -# define XALIGN ".balign 16\n" -#else -# define XALIGN ".align 4\n" -#endif - __asm __volatile ( - " xorps %%xmm0, %%xmm0\n" - " xorps %%xmm1, %%xmm1\n" - " xorps %%xmm2, %%xmm2\n" - " xorps %%xmm3, %%xmm3\n" - " xorps %%xmm4, %%xmm4\n" - " xorps %%xmm5, %%xmm5\n" - " xorps %%xmm6, %%xmm6\n" - " xorps %%xmm7, %%xmm7\n" - XALIGN - /* ".bsLoop:\n" */ - "1:\n" - // 0: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - // 1: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - // 2: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - // 3: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - // 4: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - // 5: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - // 6: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - // 7: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - // 8: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - // 9: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //10: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //11: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //12: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //13: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //14: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //15: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //16: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //17: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //18: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //19: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //20: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //21: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //22: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //23: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //24: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //25: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //26: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //27: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //28: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //29: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //30: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - //31: - " addps %%xmm1, %%xmm0\n" - " addps %%xmm2, %%xmm1\n" - " addps %%xmm3, %%xmm2\n" - " addps %%xmm4, %%xmm3\n" - " addps %%xmm5, %%xmm4\n" - " addps %%xmm6, %%xmm5\n" - " addps %%xmm7, %%xmm6\n" - " addps %%xmm0, %%xmm7\n" - - " dec %%eax\n" - /* "jnz .bsLoop\n" */ - " jnz 1b\n" - ::"a"(cycles) - ); -#else -# ifdef COMPILER_MICROSOFT - __asm { - mov eax, cycles - xorps xmm0, xmm0 - xorps xmm1, xmm1 - xorps xmm2, xmm2 - xorps xmm3, xmm3 - xorps xmm4, xmm4 - xorps xmm5, xmm5 - xorps xmm6, xmm6 - xorps xmm7, xmm7 - //-- - align 16 -bsLoop: - // 0: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 1: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 2: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 3: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 4: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 5: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 6: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 7: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 8: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 9: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 10: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 11: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 12: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 13: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 14: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 15: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 16: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 17: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 18: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 19: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 20: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 21: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 22: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 23: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 24: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 25: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 26: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 27: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 28: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 29: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 30: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - // 31: - addps xmm0, xmm1 - addps xmm1, xmm2 - addps xmm2, xmm3 - addps xmm3, xmm4 - addps xmm4, xmm5 - addps xmm5, xmm6 - addps xmm6, xmm7 - addps xmm7, xmm0 - //---------------------- - dec eax - jnz bsLoop - } -# else -# error "Unsupported compiler" -# endif /* COMPILER_MICROSOFT */ -#endif /* COMPILER_GCC */ -} -#endif /* INLINE_ASSEMBLY_SUPPORTED */ diff --git a/contrib/libcpuid/include/libcpuid/asm-bits.h b/contrib/libcpuid/include/libcpuid/asm-bits.h deleted file mode 100644 index 3a03e11ce8c..00000000000 --- a/contrib/libcpuid/include/libcpuid/asm-bits.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2008 Veselin Georgiev, - * anrieffNOSPAM @ mgail_DOT.com (convert to gmail) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef __ASM_BITS_H__ -#define __ASM_BITS_H__ -#include "libcpuid.h" - -/* Determine Compiler: */ -#if defined(_MSC_VER) -# define COMPILER_MICROSOFT -#elif defined(__GNUC__) -# define COMPILER_GCC -#endif - -/* Determine Platform */ -#if defined(__x86_64__) || defined(_M_AMD64) -# define PLATFORM_X64 -#elif defined(__i386__) || defined(_M_IX86) -# define PLATFORM_X86 -#endif - -/* Under Windows/AMD64 with MSVC, inline assembly isn't supported */ -#if (defined(COMPILER_GCC) && defined(PLATFORM_X64)) || defined(PLATFORM_X86) -# define INLINE_ASM_SUPPORTED -#endif - -int cpuid_exists_by_eflags(void); -void exec_cpuid(uint32_t *regs); -void busy_sse_loop(int cycles); - -#endif /* __ASM_BITS_H__ */ diff --git a/contrib/libcpuid/include/libcpuid/config.h b/contrib/libcpuid/include/libcpuid/config.h deleted file mode 100644 index 2326cfeede5..00000000000 --- a/contrib/libcpuid/include/libcpuid/config.h +++ /dev/null @@ -1,2 +0,0 @@ -/* Version number of package */ -#define VERSION "0.4.0" diff --git a/contrib/libcpuid/include/libcpuid/cpuid_main.c b/contrib/libcpuid/include/libcpuid/cpuid_main.c deleted file mode 100644 index 02a7cb7ad50..00000000000 --- a/contrib/libcpuid/include/libcpuid/cpuid_main.c +++ /dev/null @@ -1,771 +0,0 @@ -/* - * Copyright 2008 Veselin Georgiev, - * anrieffNOSPAM @ mgail_DOT.com (convert to gmail) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#include "libcpuid.h" -#include "libcpuid_internal.h" -#include "recog_intel.h" -#include "recog_amd.h" -#include "asm-bits.h" -#include "libcpuid_util.h" -//#ifdef HAVE_CONFIG_H // CLICKHOUSE PATCH -#include "config.h" -//#endif // CLICKHOUSE PATCH -#include -#include -#include - -/* Implementation: */ - -static int _libcpiud_errno = ERR_OK; - -int set_error(cpu_error_t err) -{ - _libcpiud_errno = (int) err; - return (int) err; -} - -static void raw_data_t_constructor(struct cpu_raw_data_t* raw) -{ - memset(raw, 0, sizeof(struct cpu_raw_data_t)); -} - -static void cpu_id_t_constructor(struct cpu_id_t* id) -{ - memset(id, 0, sizeof(struct cpu_id_t)); - id->l1_data_cache = id->l1_instruction_cache = id->l2_cache = id->l3_cache = id->l4_cache = -1; - id->l1_assoc = id->l2_assoc = id->l3_assoc = id->l4_assoc = -1; - id->l1_cacheline = id->l2_cacheline = id->l3_cacheline = id->l4_cacheline = -1; - id->sse_size = -1; -} - -static int parse_token(const char* expected_token, const char *token, - const char *value, uint32_t array[][4], int limit, int *recognized) -{ - char format[32]; - int veax, vebx, vecx, vedx; - int index; - - if (*recognized) return 1; /* already recognized */ - if (strncmp(token, expected_token, strlen(expected_token))) return 1; /* not what we search for */ - sprintf(format, "%s[%%d]", expected_token); - *recognized = 1; - if (1 == sscanf(token, format, &index) && index >=0 && index < limit) { - if (4 == sscanf(value, "%x%x%x%x", &veax, &vebx, &vecx, &vedx)) { - array[index][0] = veax; - array[index][1] = vebx; - array[index][2] = vecx; - array[index][3] = vedx; - return 1; - } - } - return 0; -} - -/* get_total_cpus() system specific code: uses OS routines to determine total number of CPUs */ -#ifdef __APPLE__ -#include -#include -#include -#include -static int get_total_cpus(void) -{ - kern_return_t kr; - host_basic_info_data_t basic_info; - host_info_t info = (host_info_t)&basic_info; - host_flavor_t flavor = HOST_BASIC_INFO; - mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT; - kr = host_info(mach_host_self(), flavor, info, &count); - if (kr != KERN_SUCCESS) return 1; - return basic_info.avail_cpus; -} -#define GET_TOTAL_CPUS_DEFINED -#endif - -#ifdef _WIN32 -#include -static int get_total_cpus(void) -{ - SYSTEM_INFO system_info; - GetSystemInfo(&system_info); - return system_info.dwNumberOfProcessors; -} -#define GET_TOTAL_CPUS_DEFINED -#endif - -#if defined linux || defined __linux__ || defined __sun -#include -#include - -static int get_total_cpus(void) -{ - return sysconf(_SC_NPROCESSORS_ONLN); -} -#define GET_TOTAL_CPUS_DEFINED -#endif - -#if defined __FreeBSD__ || defined __OpenBSD__ || defined __NetBSD__ || defined __bsdi__ || defined __QNX__ -#include -#include - -static int get_total_cpus(void) -{ - int mib[2] = { CTL_HW, HW_NCPU }; - int ncpus; - size_t len = sizeof(ncpus); - if (sysctl(mib, 2, &ncpus, &len, (void *) 0, 0) != 0) return 1; - return ncpus; -} -#define GET_TOTAL_CPUS_DEFINED -#endif - -#ifndef GET_TOTAL_CPUS_DEFINED -static int get_total_cpus(void) -{ - static int warning_printed = 0; - if (!warning_printed) { - warning_printed = 1; - warnf("Your system is not supported by libcpuid -- don't know how to detect the\n"); - warnf("total number of CPUs on your system. It will be reported as 1.\n"); - printf("Please use cpu_id_t.logical_cpus field instead.\n"); - } - return 1; -} -#endif /* GET_TOTAL_CPUS_DEFINED */ - - -static void load_features_common(struct cpu_raw_data_t* raw, struct cpu_id_t* data) -{ - const struct feature_map_t matchtable_edx1[] = { - { 0, CPU_FEATURE_FPU }, - { 1, CPU_FEATURE_VME }, - { 2, CPU_FEATURE_DE }, - { 3, CPU_FEATURE_PSE }, - { 4, CPU_FEATURE_TSC }, - { 5, CPU_FEATURE_MSR }, - { 6, CPU_FEATURE_PAE }, - { 7, CPU_FEATURE_MCE }, - { 8, CPU_FEATURE_CX8 }, - { 9, CPU_FEATURE_APIC }, - { 11, CPU_FEATURE_SEP }, - { 12, CPU_FEATURE_MTRR }, - { 13, CPU_FEATURE_PGE }, - { 14, CPU_FEATURE_MCA }, - { 15, CPU_FEATURE_CMOV }, - { 16, CPU_FEATURE_PAT }, - { 17, CPU_FEATURE_PSE36 }, - { 19, CPU_FEATURE_CLFLUSH }, - { 23, CPU_FEATURE_MMX }, - { 24, CPU_FEATURE_FXSR }, - { 25, CPU_FEATURE_SSE }, - { 26, CPU_FEATURE_SSE2 }, - { 28, CPU_FEATURE_HT }, - }; - const struct feature_map_t matchtable_ecx1[] = { - { 0, CPU_FEATURE_PNI }, - { 1, CPU_FEATURE_PCLMUL }, - { 3, CPU_FEATURE_MONITOR }, - { 9, CPU_FEATURE_SSSE3 }, - { 12, CPU_FEATURE_FMA3 }, - { 13, CPU_FEATURE_CX16 }, - { 19, CPU_FEATURE_SSE4_1 }, - { 20, CPU_FEATURE_SSE4_2 }, - { 22, CPU_FEATURE_MOVBE }, - { 23, CPU_FEATURE_POPCNT }, - { 25, CPU_FEATURE_AES }, - { 26, CPU_FEATURE_XSAVE }, - { 27, CPU_FEATURE_OSXSAVE }, - { 28, CPU_FEATURE_AVX }, - { 29, CPU_FEATURE_F16C }, - { 30, CPU_FEATURE_RDRAND }, - }; - const struct feature_map_t matchtable_ebx7[] = { - { 3, CPU_FEATURE_BMI1 }, - { 5, CPU_FEATURE_AVX2 }, - { 8, CPU_FEATURE_BMI2 }, - }; - const struct feature_map_t matchtable_edx81[] = { - { 11, CPU_FEATURE_SYSCALL }, - { 27, CPU_FEATURE_RDTSCP }, - { 29, CPU_FEATURE_LM }, - }; - const struct feature_map_t matchtable_ecx81[] = { - { 0, CPU_FEATURE_LAHF_LM }, - }; - const struct feature_map_t matchtable_edx87[] = { - { 8, CPU_FEATURE_CONSTANT_TSC }, - }; - if (raw->basic_cpuid[0][0] >= 1) { - match_features(matchtable_edx1, COUNT_OF(matchtable_edx1), raw->basic_cpuid[1][3], data); - match_features(matchtable_ecx1, COUNT_OF(matchtable_ecx1), raw->basic_cpuid[1][2], data); - } - if (raw->basic_cpuid[0][0] >= 7) { - match_features(matchtable_ebx7, COUNT_OF(matchtable_ebx7), raw->basic_cpuid[7][1], data); - } - if (raw->ext_cpuid[0][0] >= 0x80000001) { - match_features(matchtable_edx81, COUNT_OF(matchtable_edx81), raw->ext_cpuid[1][3], data); - match_features(matchtable_ecx81, COUNT_OF(matchtable_ecx81), raw->ext_cpuid[1][2], data); - } - if (raw->ext_cpuid[0][0] >= 0x80000007) { - match_features(matchtable_edx87, COUNT_OF(matchtable_edx87), raw->ext_cpuid[7][3], data); - } - if (data->flags[CPU_FEATURE_SSE]) { - /* apply guesswork to check if the SSE unit width is 128 bit */ - switch (data->vendor) { - case VENDOR_AMD: - data->sse_size = (data->ext_family >= 16 && data->ext_family != 17) ? 128 : 64; - break; - case VENDOR_INTEL: - data->sse_size = (data->family == 6 && data->ext_model >= 15) ? 128 : 64; - break; - default: - break; - } - /* leave the CPU_FEATURE_128BIT_SSE_AUTH 0; the advanced per-vendor detection routines - * will set it accordingly if they detect the needed bit */ - } -} - -static cpu_vendor_t cpuid_vendor_identify(const uint32_t *raw_vendor, char *vendor_str) -{ - int i; - cpu_vendor_t vendor = VENDOR_UNKNOWN; - const struct { cpu_vendor_t vendor; char match[16]; } - matchtable[NUM_CPU_VENDORS] = { - /* source: http://www.sandpile.org/ia32/cpuid.htm */ - { VENDOR_INTEL , "GenuineIntel" }, - { VENDOR_AMD , "AuthenticAMD" }, - { VENDOR_CYRIX , "CyrixInstead" }, - { VENDOR_NEXGEN , "NexGenDriven" }, - { VENDOR_TRANSMETA , "GenuineTMx86" }, - { VENDOR_UMC , "UMC UMC UMC " }, - { VENDOR_CENTAUR , "CentaurHauls" }, - { VENDOR_RISE , "RiseRiseRise" }, - { VENDOR_SIS , "SiS SiS SiS " }, - { VENDOR_NSC , "Geode by NSC" }, - }; - - memcpy(vendor_str + 0, &raw_vendor[1], 4); - memcpy(vendor_str + 4, &raw_vendor[3], 4); - memcpy(vendor_str + 8, &raw_vendor[2], 4); - vendor_str[12] = 0; - - /* Determine vendor: */ - for (i = 0; i < NUM_CPU_VENDORS; i++) - if (!strcmp(vendor_str, matchtable[i].match)) { - vendor = matchtable[i].vendor; - break; - } - return vendor; -} - -static int cpuid_basic_identify(struct cpu_raw_data_t* raw, struct cpu_id_t* data) -{ - int i, j, basic, xmodel, xfamily, ext; - char brandstr[64] = {0}; - data->vendor = cpuid_vendor_identify(raw->basic_cpuid[0], data->vendor_str); - - if (data->vendor == VENDOR_UNKNOWN) - return set_error(ERR_CPU_UNKN); - basic = raw->basic_cpuid[0][0]; - if (basic >= 1) { - data->family = (raw->basic_cpuid[1][0] >> 8) & 0xf; - data->model = (raw->basic_cpuid[1][0] >> 4) & 0xf; - data->stepping = raw->basic_cpuid[1][0] & 0xf; - xmodel = (raw->basic_cpuid[1][0] >> 16) & 0xf; - xfamily = (raw->basic_cpuid[1][0] >> 20) & 0xff; - if (data->vendor == VENDOR_AMD && data->family < 0xf) - data->ext_family = data->family; - else - data->ext_family = data->family + xfamily; - data->ext_model = data->model + (xmodel << 4); - } - ext = raw->ext_cpuid[0][0] - 0x8000000; - - /* obtain the brand string, if present: */ - if (ext >= 4) { - for (i = 0; i < 3; i++) - for (j = 0; j < 4; j++) - memcpy(brandstr + i * 16 + j * 4, - &raw->ext_cpuid[2 + i][j], 4); - brandstr[48] = 0; - i = 0; - while (brandstr[i] == ' ') i++; - strncpy(data->brand_str, brandstr + i, sizeof(data->brand_str)); - data->brand_str[48] = 0; - } - load_features_common(raw, data); - data->total_logical_cpus = get_total_cpus(); - return set_error(ERR_OK); -} - -static void make_list_from_string(const char* csv, struct cpu_list_t* list) -{ - int i, n, l, last; - l = (int) strlen(csv); - n = 0; - for (i = 0; i < l; i++) if (csv[i] == ',') n++; - n++; - list->num_entries = n; - list->names = (char**) malloc(sizeof(char*) * n); - last = -1; - n = 0; - for (i = 0; i <= l; i++) if (i == l || csv[i] == ',') { - list->names[n] = (char*) malloc(i - last); - memcpy(list->names[n], &csv[last + 1], i - last - 1); - list->names[n][i - last - 1] = '\0'; - n++; - last = i; - } -} - - -/* Interface: */ - -int cpuid_get_total_cpus(void) -{ - return get_total_cpus(); -} - -int cpuid_present(void) -{ - return cpuid_exists_by_eflags(); -} - -void cpu_exec_cpuid(uint32_t eax, uint32_t* regs) -{ - regs[0] = eax; - regs[1] = regs[2] = regs[3] = 0; - exec_cpuid(regs); -} - -void cpu_exec_cpuid_ext(uint32_t* regs) -{ - exec_cpuid(regs); -} - -int cpuid_get_raw_data(struct cpu_raw_data_t* data) -{ - unsigned i; - if (!cpuid_present()) - return set_error(ERR_NO_CPUID); - for (i = 0; i < 32; i++) - cpu_exec_cpuid(i, data->basic_cpuid[i]); - for (i = 0; i < 32; i++) - cpu_exec_cpuid(0x80000000 + i, data->ext_cpuid[i]); - for (i = 0; i < MAX_INTELFN4_LEVEL; i++) { - memset(data->intel_fn4[i], 0, sizeof(data->intel_fn4[i])); - data->intel_fn4[i][0] = 4; - data->intel_fn4[i][2] = i; - cpu_exec_cpuid_ext(data->intel_fn4[i]); - } - for (i = 0; i < MAX_INTELFN11_LEVEL; i++) { - memset(data->intel_fn11[i], 0, sizeof(data->intel_fn11[i])); - data->intel_fn11[i][0] = 11; - data->intel_fn11[i][2] = i; - cpu_exec_cpuid_ext(data->intel_fn11[i]); - } - for (i = 0; i < MAX_INTELFN12H_LEVEL; i++) { - memset(data->intel_fn12h[i], 0, sizeof(data->intel_fn12h[i])); - data->intel_fn12h[i][0] = 0x12; - data->intel_fn12h[i][2] = i; - cpu_exec_cpuid_ext(data->intel_fn12h[i]); - } - for (i = 0; i < MAX_INTELFN14H_LEVEL; i++) { - memset(data->intel_fn14h[i], 0, sizeof(data->intel_fn14h[i])); - data->intel_fn14h[i][0] = 0x14; - data->intel_fn14h[i][2] = i; - cpu_exec_cpuid_ext(data->intel_fn14h[i]); - } - return set_error(ERR_OK); -} - -int cpuid_serialize_raw_data(struct cpu_raw_data_t* data, const char* filename) -{ - int i; - FILE *f; - - if (!strcmp(filename, "")) - f = stdout; - else - f = fopen(filename, "wt"); - if (!f) return set_error(ERR_OPEN); - - fprintf(f, "version=%s\n", VERSION); - for (i = 0; i < MAX_CPUID_LEVEL; i++) - fprintf(f, "basic_cpuid[%d]=%08x %08x %08x %08x\n", i, - data->basic_cpuid[i][0], data->basic_cpuid[i][1], - data->basic_cpuid[i][2], data->basic_cpuid[i][3]); - for (i = 0; i < MAX_EXT_CPUID_LEVEL; i++) - fprintf(f, "ext_cpuid[%d]=%08x %08x %08x %08x\n", i, - data->ext_cpuid[i][0], data->ext_cpuid[i][1], - data->ext_cpuid[i][2], data->ext_cpuid[i][3]); - for (i = 0; i < MAX_INTELFN4_LEVEL; i++) - fprintf(f, "intel_fn4[%d]=%08x %08x %08x %08x\n", i, - data->intel_fn4[i][0], data->intel_fn4[i][1], - data->intel_fn4[i][2], data->intel_fn4[i][3]); - for (i = 0; i < MAX_INTELFN11_LEVEL; i++) - fprintf(f, "intel_fn11[%d]=%08x %08x %08x %08x\n", i, - data->intel_fn11[i][0], data->intel_fn11[i][1], - data->intel_fn11[i][2], data->intel_fn11[i][3]); - for (i = 0; i < MAX_INTELFN12H_LEVEL; i++) - fprintf(f, "intel_fn12h[%d]=%08x %08x %08x %08x\n", i, - data->intel_fn12h[i][0], data->intel_fn12h[i][1], - data->intel_fn12h[i][2], data->intel_fn12h[i][3]); - for (i = 0; i < MAX_INTELFN14H_LEVEL; i++) - fprintf(f, "intel_fn14h[%d]=%08x %08x %08x %08x\n", i, - data->intel_fn14h[i][0], data->intel_fn14h[i][1], - data->intel_fn14h[i][2], data->intel_fn14h[i][3]); - - if (strcmp(filename, "")) - fclose(f); - return set_error(ERR_OK); -} - -int cpuid_deserialize_raw_data(struct cpu_raw_data_t* data, const char* filename) -{ - int i, len; - char line[100]; - char token[100]; - char *value; - int syntax; - int cur_line = 0; - int recognized; - FILE *f; - - raw_data_t_constructor(data); - - if (!strcmp(filename, "")) - f = stdin; - else - f = fopen(filename, "rt"); - if (!f) return set_error(ERR_OPEN); - while (fgets(line, sizeof(line), f)) { - ++cur_line; - len = (int) strlen(line); - if (len < 2) continue; - if (line[len - 1] == '\n') - line[--len] = '\0'; - for (i = 0; i < len && line[i] != '='; i++) - if (i >= len && i < 1 && len - i - 1 <= 0) { - fclose(f); - return set_error(ERR_BADFMT); - } - strncpy(token, line, i); - token[i] = '\0'; - value = &line[i + 1]; - /* try to recognize the line */ - recognized = 0; - if (!strcmp(token, "version") || !strcmp(token, "build_date")) { - recognized = 1; - } - syntax = 1; - syntax = syntax && parse_token("basic_cpuid", token, value, data->basic_cpuid, MAX_CPUID_LEVEL, &recognized); - syntax = syntax && parse_token("ext_cpuid", token, value, data->ext_cpuid, MAX_EXT_CPUID_LEVEL, &recognized); - syntax = syntax && parse_token("intel_fn4", token, value, data->intel_fn4, MAX_INTELFN4_LEVEL, &recognized); - syntax = syntax && parse_token("intel_fn11", token, value, data->intel_fn11, MAX_INTELFN11_LEVEL, &recognized); - syntax = syntax && parse_token("intel_fn12h", token, value, data->intel_fn12h, MAX_INTELFN12H_LEVEL, &recognized); - syntax = syntax && parse_token("intel_fn14h", token, value, data->intel_fn14h, MAX_INTELFN14H_LEVEL, &recognized); - if (!syntax) { - warnf("Error: %s:%d: Syntax error\n", filename, cur_line); - fclose(f); - return set_error(ERR_BADFMT); - } - if (!recognized) { - warnf("Warning: %s:%d not understood!\n", filename, cur_line); - } - } - - if (strcmp(filename, "")) - fclose(f); - return set_error(ERR_OK); -} - -int cpu_ident_internal(struct cpu_raw_data_t* raw, struct cpu_id_t* data, struct internal_id_info_t* internal) -{ - int r; - struct cpu_raw_data_t myraw; - if (!raw) { - if ((r = cpuid_get_raw_data(&myraw)) < 0) - return set_error(r); - raw = &myraw; - } - cpu_id_t_constructor(data); - if ((r = cpuid_basic_identify(raw, data)) < 0) - return set_error(r); - switch (data->vendor) { - case VENDOR_INTEL: - r = cpuid_identify_intel(raw, data, internal); - break; - case VENDOR_AMD: - r = cpuid_identify_amd(raw, data, internal); - break; - default: - break; - } - return set_error(r); -} - -int cpu_identify(struct cpu_raw_data_t* raw, struct cpu_id_t* data) -{ - struct internal_id_info_t throwaway; - return cpu_ident_internal(raw, data, &throwaway); -} - -const char* cpu_feature_str(cpu_feature_t feature) -{ - const struct { cpu_feature_t feature; const char* name; } - matchtable[] = { - { CPU_FEATURE_FPU, "fpu" }, - { CPU_FEATURE_VME, "vme" }, - { CPU_FEATURE_DE, "de" }, - { CPU_FEATURE_PSE, "pse" }, - { CPU_FEATURE_TSC, "tsc" }, - { CPU_FEATURE_MSR, "msr" }, - { CPU_FEATURE_PAE, "pae" }, - { CPU_FEATURE_MCE, "mce" }, - { CPU_FEATURE_CX8, "cx8" }, - { CPU_FEATURE_APIC, "apic" }, - { CPU_FEATURE_MTRR, "mtrr" }, - { CPU_FEATURE_SEP, "sep" }, - { CPU_FEATURE_PGE, "pge" }, - { CPU_FEATURE_MCA, "mca" }, - { CPU_FEATURE_CMOV, "cmov" }, - { CPU_FEATURE_PAT, "pat" }, - { CPU_FEATURE_PSE36, "pse36" }, - { CPU_FEATURE_PN, "pn" }, - { CPU_FEATURE_CLFLUSH, "clflush" }, - { CPU_FEATURE_DTS, "dts" }, - { CPU_FEATURE_ACPI, "acpi" }, - { CPU_FEATURE_MMX, "mmx" }, - { CPU_FEATURE_FXSR, "fxsr" }, - { CPU_FEATURE_SSE, "sse" }, - { CPU_FEATURE_SSE2, "sse2" }, - { CPU_FEATURE_SS, "ss" }, - { CPU_FEATURE_HT, "ht" }, - { CPU_FEATURE_TM, "tm" }, - { CPU_FEATURE_IA64, "ia64" }, - { CPU_FEATURE_PBE, "pbe" }, - { CPU_FEATURE_PNI, "pni" }, - { CPU_FEATURE_PCLMUL, "pclmul" }, - { CPU_FEATURE_DTS64, "dts64" }, - { CPU_FEATURE_MONITOR, "monitor" }, - { CPU_FEATURE_DS_CPL, "ds_cpl" }, - { CPU_FEATURE_VMX, "vmx" }, - { CPU_FEATURE_SMX, "smx" }, - { CPU_FEATURE_EST, "est" }, - { CPU_FEATURE_TM2, "tm2" }, - { CPU_FEATURE_SSSE3, "ssse3" }, - { CPU_FEATURE_CID, "cid" }, - { CPU_FEATURE_CX16, "cx16" }, - { CPU_FEATURE_XTPR, "xtpr" }, - { CPU_FEATURE_PDCM, "pdcm" }, - { CPU_FEATURE_DCA, "dca" }, - { CPU_FEATURE_SSE4_1, "sse4_1" }, - { CPU_FEATURE_SSE4_2, "sse4_2" }, - { CPU_FEATURE_SYSCALL, "syscall" }, - { CPU_FEATURE_XD, "xd" }, - { CPU_FEATURE_X2APIC, "x2apic"}, - { CPU_FEATURE_MOVBE, "movbe" }, - { CPU_FEATURE_POPCNT, "popcnt" }, - { CPU_FEATURE_AES, "aes" }, - { CPU_FEATURE_XSAVE, "xsave" }, - { CPU_FEATURE_OSXSAVE, "osxsave" }, - { CPU_FEATURE_AVX, "avx" }, - { CPU_FEATURE_MMXEXT, "mmxext" }, - { CPU_FEATURE_3DNOW, "3dnow" }, - { CPU_FEATURE_3DNOWEXT, "3dnowext" }, - { CPU_FEATURE_NX, "nx" }, - { CPU_FEATURE_FXSR_OPT, "fxsr_opt" }, - { CPU_FEATURE_RDTSCP, "rdtscp" }, - { CPU_FEATURE_LM, "lm" }, - { CPU_FEATURE_LAHF_LM, "lahf_lm" }, - { CPU_FEATURE_CMP_LEGACY, "cmp_legacy" }, - { CPU_FEATURE_SVM, "svm" }, - { CPU_FEATURE_SSE4A, "sse4a" }, - { CPU_FEATURE_MISALIGNSSE, "misalignsse" }, - { CPU_FEATURE_ABM, "abm" }, - { CPU_FEATURE_3DNOWPREFETCH, "3dnowprefetch" }, - { CPU_FEATURE_OSVW, "osvw" }, - { CPU_FEATURE_IBS, "ibs" }, - { CPU_FEATURE_SSE5, "sse5" }, - { CPU_FEATURE_SKINIT, "skinit" }, - { CPU_FEATURE_WDT, "wdt" }, - { CPU_FEATURE_TS, "ts" }, - { CPU_FEATURE_FID, "fid" }, - { CPU_FEATURE_VID, "vid" }, - { CPU_FEATURE_TTP, "ttp" }, - { CPU_FEATURE_TM_AMD, "tm_amd" }, - { CPU_FEATURE_STC, "stc" }, - { CPU_FEATURE_100MHZSTEPS, "100mhzsteps" }, - { CPU_FEATURE_HWPSTATE, "hwpstate" }, - { CPU_FEATURE_CONSTANT_TSC, "constant_tsc" }, - { CPU_FEATURE_XOP, "xop" }, - { CPU_FEATURE_FMA3, "fma3" }, - { CPU_FEATURE_FMA4, "fma4" }, - { CPU_FEATURE_TBM, "tbm" }, - { CPU_FEATURE_F16C, "f16c" }, - { CPU_FEATURE_RDRAND, "rdrand" }, - { CPU_FEATURE_CPB, "cpb" }, - { CPU_FEATURE_APERFMPERF, "aperfmperf" }, - { CPU_FEATURE_PFI, "pfi" }, - { CPU_FEATURE_PA, "pa" }, - { CPU_FEATURE_AVX2, "avx2" }, - { CPU_FEATURE_BMI1, "bmi1" }, - { CPU_FEATURE_BMI2, "bmi2" }, - { CPU_FEATURE_HLE, "hle" }, - { CPU_FEATURE_RTM, "rtm" }, - { CPU_FEATURE_AVX512F, "avx512f" }, - { CPU_FEATURE_AVX512DQ, "avx512dq" }, - { CPU_FEATURE_AVX512PF, "avx512pf" }, - { CPU_FEATURE_AVX512ER, "avx512er" }, - { CPU_FEATURE_AVX512CD, "avx512cd" }, - { CPU_FEATURE_SHA_NI, "sha_ni" }, - { CPU_FEATURE_AVX512BW, "avx512bw" }, - { CPU_FEATURE_AVX512VL, "avx512vl" }, - { CPU_FEATURE_SGX, "sgx" }, - { CPU_FEATURE_RDSEED, "rdseed" }, - { CPU_FEATURE_ADX, "adx" }, - }; - unsigned i, n = COUNT_OF(matchtable); - if (n != NUM_CPU_FEATURES) { - warnf("Warning: incomplete library, feature matchtable size differs from the actual number of features.\n"); - } - for (i = 0; i < n; i++) - if (matchtable[i].feature == feature) - return matchtable[i].name; - return ""; -} - -const char* cpuid_error(void) -{ - const struct { cpu_error_t error; const char *description; } - matchtable[] = { - { ERR_OK , "No error"}, - { ERR_NO_CPUID , "CPUID instruction is not supported"}, - { ERR_NO_RDTSC , "RDTSC instruction is not supported"}, - { ERR_NO_MEM , "Memory allocation failed"}, - { ERR_OPEN , "File open operation failed"}, - { ERR_BADFMT , "Bad file format"}, - { ERR_NOT_IMP , "Not implemented"}, - { ERR_CPU_UNKN , "Unsupported processor"}, - { ERR_NO_RDMSR , "RDMSR instruction is not supported"}, - { ERR_NO_DRIVER, "RDMSR driver error (generic)"}, - { ERR_NO_PERMS , "No permissions to install RDMSR driver"}, - { ERR_EXTRACT , "Cannot extract RDMSR driver (read only media?)"}, - { ERR_HANDLE , "Bad handle"}, - { ERR_INVMSR , "Invalid MSR"}, - { ERR_INVCNB , "Invalid core number"}, - { ERR_HANDLE_R , "Error on handle read"}, - { ERR_INVRANGE , "Invalid given range"}, - }; - unsigned i; - for (i = 0; i < COUNT_OF(matchtable); i++) - if (_libcpiud_errno == matchtable[i].error) - return matchtable[i].description; - return "Unknown error"; -} - - -const char* cpuid_lib_version(void) -{ - return VERSION; -} - -libcpuid_warn_fn_t cpuid_set_warn_function(libcpuid_warn_fn_t new_fn) -{ - libcpuid_warn_fn_t ret = _warn_fun; - _warn_fun = new_fn; - return ret; -} - -void cpuid_set_verbosiness_level(int level) -{ - _current_verboselevel = level; -} - -cpu_vendor_t cpuid_get_vendor(void) -{ - static cpu_vendor_t vendor = VENDOR_UNKNOWN; - uint32_t raw_vendor[4]; - char vendor_str[VENDOR_STR_MAX]; - - if(vendor == VENDOR_UNKNOWN) { - if (!cpuid_present()) - set_error(ERR_NO_CPUID); - else { - cpu_exec_cpuid(0, raw_vendor); - vendor = cpuid_vendor_identify(raw_vendor, vendor_str); - } - } - return vendor; -} - -void cpuid_get_cpu_list(cpu_vendor_t vendor, struct cpu_list_t* list) -{ - switch (vendor) { - case VENDOR_INTEL: - cpuid_get_list_intel(list); - break; - case VENDOR_AMD: - cpuid_get_list_amd(list); - break; - case VENDOR_CYRIX: - make_list_from_string("Cx486,Cx5x86,6x86,6x86MX,M II,MediaGX,MediaGXi,MediaGXm", list); - break; - case VENDOR_NEXGEN: - make_list_from_string("Nx586", list); - break; - case VENDOR_TRANSMETA: - make_list_from_string("Crusoe,Efficeon", list); - break; - case VENDOR_UMC: - make_list_from_string("UMC x86 CPU", list); - break; - case VENDOR_CENTAUR: - make_list_from_string("VIA C3,VIA C7,VIA Nano", list); - break; - case VENDOR_RISE: - make_list_from_string("Rise mP6", list); - break; - case VENDOR_SIS: - make_list_from_string("SiS mP6", list); - break; - case VENDOR_NSC: - make_list_from_string("Geode GXm,Geode GXLV,Geode GX1,Geode GX2", list); - break; - default: - warnf("Unknown vendor passed to cpuid_get_cpu_list()\n"); - break; - } -} - -void cpuid_free_cpu_list(struct cpu_list_t* list) -{ - int i; - if (list->num_entries <= 0) return; - for (i = 0; i < list->num_entries; i++) - free(list->names[i]); - free(list->names); -} diff --git a/contrib/libcpuid/include/libcpuid/intel_code_t.h b/contrib/libcpuid/include/libcpuid/intel_code_t.h deleted file mode 100644 index c50ec9c5a83..00000000000 --- a/contrib/libcpuid/include/libcpuid/intel_code_t.h +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright 2016 Veselin Georgiev, - * anrieffNOSPAM @ mgail_DOT.com (convert to gmail) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * This file contains a list of internal codes we use in detection. It is - * of no external use and isn't a complete list of intel products. - */ - CODE2(PENTIUM, 2000), - CODE(MOBILE_PENTIUM), - - CODE(XEON), - CODE(XEON_IRWIN), - CODE(XEONMP), - CODE(XEON_POTOMAC), - CODE(XEON_I7), - CODE(XEON_GAINESTOWN), - CODE(XEON_WESTMERE), - - CODE(MOBILE_PENTIUM_M), - CODE(CELERON), - CODE(MOBILE_CELERON), - CODE(NOT_CELERON), - - - CODE(CORE_SOLO), - CODE(MOBILE_CORE_SOLO), - CODE(CORE_DUO), - CODE(MOBILE_CORE_DUO), - - CODE(WOLFDALE), - CODE(MEROM), - CODE(PENRYN), - CODE(QUAD_CORE), - CODE(DUAL_CORE_HT), - CODE(QUAD_CORE_HT), - CODE(MORE_THAN_QUADCORE), - CODE(PENTIUM_D), - - CODE(ATOM_UNKNOWN), - CODE(ATOM_SILVERTHORNE), - CODE(ATOM_DIAMONDVILLE), - CODE(ATOM_PINEVIEW), - CODE(ATOM_CEDARVIEW), - - CODE(CORE_I3), - CODE(CORE_I5), - CODE(CORE_I7), - CODE(CORE_IVY3), /* 22nm Core-iX */ - CODE(CORE_IVY5), - CODE(CORE_IVY7), - CODE(CORE_HASWELL3), /* 22nm Core-iX, Haswell */ - CODE(CORE_HASWELL5), - CODE(CORE_HASWELL7), - CODE(CORE_BROADWELL3), /* 14nm Core-iX, Broadwell */ - CODE(CORE_BROADWELL5), - CODE(CORE_BROADWELL7), - CODE(CORE_SKYLAKE3), /* 14nm Core-iX, Skylake */ - CODE(CORE_SKYLAKE5), - CODE(CORE_SKYLAKE7), - diff --git a/contrib/libcpuid/include/libcpuid/libcpuid.h b/contrib/libcpuid/include/libcpuid/libcpuid.h deleted file mode 100644 index 866c0e8441d..00000000000 --- a/contrib/libcpuid/include/libcpuid/libcpuid.h +++ /dev/null @@ -1,1129 +0,0 @@ -/* - * Copyright 2008 Veselin Georgiev, - * anrieffNOSPAM @ mgail_DOT.com (convert to gmail) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef __LIBCPUID_H__ -#define __LIBCPUID_H__ -/** - * \file libcpuid.h - * \author Veselin Georgiev - * \date Oct 2008 - * \version 0.4.0 - * - * Version history: - * - * * 0.1.0 (2008-10-15): initial adaptation from wxfractgui sources - * * 0.1.1 (2009-07-06): Added intel_fn11 fields to cpu_raw_data_t to handle - * new processor topology enumeration required on Core i7 - * * 0.1.2 (2009-09-26): Added support for MSR reading through self-extracting - * kernel driver on Win32. - * * 0.1.3 (2010-04-20): Added support for greater more accurate CPU clock - * measurements with cpu_clock_by_ic() - * * 0.2.0 (2011-10-11): Support for AMD Bulldozer CPUs, 128-bit SSE unit size - * checking. A backwards-incompatible change, since the - * sizeof cpu_id_t is now different. - * * 0.2.1 (2012-05-26): Support for Ivy Bridge, and detecting the presence of - * the RdRand instruction. - * * 0.2.2 (2015-11-04): Support for newer processors up to Haswell and Vishera. - * Fix clock detection in cpu_clock_by_ic() for Bulldozer. - * More entries supported in cpu_msrinfo(). - * *BSD and Solaris support (unofficial). - * * 0.3.0 (2016-07-09): Support for Skylake; MSR ops in FreeBSD; INFO_VOLTAGE - * for AMD CPUs. Level 4 cache support for Crystalwell - * (a backwards-incompatible change since the sizeof - * cpu_raw_data_t is now different). - * * 0.4.0 (2016-09-30): Better detection of AMD clock multiplier with msrinfo. - * Support for Intel SGX detection - * (a backwards-incompatible change since the sizeof - * cpu_raw_data_t and cpu_id_t is now different). - */ - -/** @mainpage A simple libcpuid introduction - * - * LibCPUID provides CPU identification and access to the CPUID and RDTSC - * instructions on the x86. - *

- * To execute CPUID, use \ref cpu_exec_cpuid
- * To execute RDTSC, use \ref cpu_rdtsc
- * To fetch the CPUID info needed for CPU identification, use - * \ref cpuid_get_raw_data
- * To make sense of that data (decode, extract features), use \ref cpu_identify
- * To detect the CPU speed, use either \ref cpu_clock, \ref cpu_clock_by_os, - * \ref cpu_tsc_mark + \ref cpu_tsc_unmark + \ref cpu_clock_by_mark, - * \ref cpu_clock_measure or \ref cpu_clock_by_ic. - * Read carefully for pros/cons of each method.
- * - * To read MSRs, use \ref cpu_msr_driver_open to get a handle, and then - * \ref cpu_rdmsr for querying abilities. Some MSR decoding is available on recent - * CPUs, and can be queried through \ref cpu_msrinfo; the various types of queries - * are described in \ref cpu_msrinfo_request_t. - *

- */ - -/** @defgroup libcpuid LibCPUID - @{ */ - -/* Include some integer type specifications: */ -#include "libcpuid_types.h" - -/* Some limits and other constants */ -#include "libcpuid_constants.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * @brief CPU vendor, as guessed from the Vendor String. - */ -typedef enum { - VENDOR_INTEL = 0, /*!< Intel CPU */ - VENDOR_AMD, /*!< AMD CPU */ - VENDOR_CYRIX, /*!< Cyrix CPU */ - VENDOR_NEXGEN, /*!< NexGen CPU */ - VENDOR_TRANSMETA, /*!< Transmeta CPU */ - VENDOR_UMC, /*!< x86 CPU by UMC */ - VENDOR_CENTAUR, /*!< x86 CPU by IDT */ - VENDOR_RISE, /*!< x86 CPU by Rise Technology */ - VENDOR_SIS, /*!< x86 CPU by SiS */ - VENDOR_NSC, /*!< x86 CPU by National Semiconductor */ - - NUM_CPU_VENDORS, /*!< Valid CPU vendor ids: 0..NUM_CPU_VENDORS - 1 */ - VENDOR_UNKNOWN = -1, -} cpu_vendor_t; -#define NUM_CPU_VENDORS NUM_CPU_VENDORS - -/** - * @brief Contains just the raw CPUID data. - * - * This contains only the most basic CPU data, required to do identification - * and feature recognition. Every processor should be identifiable using this - * data only. - */ -struct cpu_raw_data_t { - /** contains results of CPUID for eax = 0, 1, ...*/ - uint32_t basic_cpuid[MAX_CPUID_LEVEL][4]; - - /** contains results of CPUID for eax = 0x80000000, 0x80000001, ...*/ - uint32_t ext_cpuid[MAX_EXT_CPUID_LEVEL][4]; - - /** when the CPU is intel and it supports deterministic cache - information: this contains the results of CPUID for eax = 4 - and ecx = 0, 1, ... */ - uint32_t intel_fn4[MAX_INTELFN4_LEVEL][4]; - - /** when the CPU is intel and it supports leaf 0Bh (Extended Topology - enumeration leaf), this stores the result of CPUID with - eax = 11 and ecx = 0, 1, 2... */ - uint32_t intel_fn11[MAX_INTELFN11_LEVEL][4]; - - /** when the CPU is intel and supports leaf 12h (SGX enumeration leaf), - * this stores the result of CPUID with eax = 0x12 and - * ecx = 0, 1, 2... */ - uint32_t intel_fn12h[MAX_INTELFN12H_LEVEL][4]; - - /** when the CPU is intel and supports leaf 14h (Intel Processor Trace - * capabilities leaf). - * this stores the result of CPUID with eax = 0x12 and - * ecx = 0, 1, 2... */ - uint32_t intel_fn14h[MAX_INTELFN14H_LEVEL][4]; -}; - -/** - * @brief This contains information about SGX features of the processor - * Example usage: - * @code - * ... - * struct cpu_raw_data_t raw; - * struct cpu_id_t id; - * - * if (cpuid_get_raw_data(&raw) == 0 && cpu_identify(&raw, &id) == 0 && id.sgx.present) { - * printf("SGX is present.\n"); - * printf("SGX1 instructions: %s.\n", id.sgx.flags[INTEL_SGX1] ? "present" : "absent"); - * printf("SGX2 instructions: %s.\n", id.sgx.flags[INTEL_SGX2] ? "present" : "absent"); - * printf("Max 32-bit enclave size: 2^%d bytes.\n", id.sgx.max_enclave_32bit); - * printf("Max 64-bit enclave size: 2^%d bytes.\n", id.sgx.max_enclave_64bit); - * for (int i = 0; i < id.sgx.num_epc_sections; i++) { - * struct cpu_epc_t epc = cpuid_get_epc(i, NULL); - * printf("EPC section #%d: address = %x, size = %d bytes.\n", epc.address, epc.size); - * } - * } else { - * printf("SGX is not present.\n"); - * } - * @endcode - */ -struct cpu_sgx_t { - /** Whether SGX is present (boolean) */ - uint32_t present; - - /** Max enclave size in 32-bit mode. This is a power-of-two value: - * if it is "31", then the max enclave size is 2^31 bytes (2 GiB). - */ - uint8_t max_enclave_32bit; - - /** Max enclave size in 64-bit mode. This is a power-of-two value: - * if it is "36", then the max enclave size is 2^36 bytes (64 GiB). - */ - uint8_t max_enclave_64bit; - - /** - * contains SGX feature flags. See the \ref cpu_sgx_feature_t - * "INTEL_SGX*" macros below. - */ - uint8_t flags[SGX_FLAGS_MAX]; - - /** number of Enclave Page Cache (EPC) sections. Info for each - * section is available through the \ref cpuid_get_epc() function - */ - int num_epc_sections; - - /** bit vector of the supported extended features that can be written - * to the MISC region of the SSA (Save State Area) - */ - uint32_t misc_select; - - /** a bit vector of the attributes that can be set to SECS.ATTRIBUTES - * via ECREATE. Corresponds to bits 0-63 (incl.) of SECS.ATTRIBUTES. - */ - uint64_t secs_attributes; - - /** a bit vector of the bits that can be set in the XSAVE feature - * request mask; Corresponds to bits 64-127 of SECS.ATTRIBUTES. - */ - uint64_t secs_xfrm; -}; - -/** - * @brief This contains the recognized CPU features/info - */ -struct cpu_id_t { - /** contains the CPU vendor string, e.g. "GenuineIntel" */ - char vendor_str[VENDOR_STR_MAX]; - - /** contains the brand string, e.g. "Intel(R) Xeon(TM) CPU 2.40GHz" */ - char brand_str[BRAND_STR_MAX]; - - /** contains the recognized CPU vendor */ - cpu_vendor_t vendor; - - /** - * contain CPU flags. Used to test for features. See - * the \ref cpu_feature_t "CPU_FEATURE_*" macros below. - * @see Features - */ - uint8_t flags[CPU_FLAGS_MAX]; - - /** CPU family */ - int32_t family; - - /** CPU model */ - int32_t model; - - /** CPU stepping */ - int32_t stepping; - - /** CPU extended family */ - int32_t ext_family; - - /** CPU extended model */ - int32_t ext_model; - - /** Number of CPU cores on the current processor */ - int32_t num_cores; - - /** - * Number of logical processors on the current processor. - * Could be more than the number of physical cores, - * e.g. when the processor has HyperThreading. - */ - int32_t num_logical_cpus; - - /** - * The total number of logical processors. - * The same value is availabe through \ref cpuid_get_total_cpus. - * - * This is num_logical_cpus * {total physical processors in the system} - * (but only on a real system, under a VM this number may be lower). - * - * If you're writing a multithreaded program and you want to run it on - * all CPUs, this is the number of threads you need. - * - * @note in a VM, this will exactly match the number of CPUs set in - * the VM's configuration. - * - */ - int32_t total_logical_cpus; - - /** - * L1 data cache size in KB. Could be zero, if the CPU lacks cache. - * If the size cannot be determined, it will be -1. - */ - int32_t l1_data_cache; - - /** - * L1 instruction cache size in KB. Could be zero, if the CPU lacks - * cache. If the size cannot be determined, it will be -1. - * @note On some Intel CPUs, whose instruction cache is in fact - * a trace cache, the size will be expressed in K uOps. - */ - int32_t l1_instruction_cache; - - /** - * L2 cache size in KB. Could be zero, if the CPU lacks L2 cache. - * If the size of the cache could not be determined, it will be -1 - */ - int32_t l2_cache; - - /** L3 cache size in KB. Zero on most systems */ - int32_t l3_cache; - - /** L4 cache size in KB. Zero on most systems */ - int32_t l4_cache; - - /** Cache associativity for the L1 data cache. -1 if undetermined */ - int32_t l1_assoc; - - /** Cache associativity for the L2 cache. -1 if undetermined */ - int32_t l2_assoc; - - /** Cache associativity for the L3 cache. -1 if undetermined */ - int32_t l3_assoc; - - /** Cache associativity for the L4 cache. -1 if undetermined */ - int32_t l4_assoc; - - /** Cache-line size for L1 data cache. -1 if undetermined */ - int32_t l1_cacheline; - - /** Cache-line size for L2 cache. -1 if undetermined */ - int32_t l2_cacheline; - - /** Cache-line size for L3 cache. -1 if undetermined */ - int32_t l3_cacheline; - - /** Cache-line size for L4 cache. -1 if undetermined */ - int32_t l4_cacheline; - - /** - * The brief and human-friendly CPU codename, which was recognized.
- * Examples: - * @code - * +--------+--------+-------+-------+-------+---------------------------------------+-----------------------+ - * | Vendor | Family | Model | Step. | Cache | Brand String | cpu_id_t.cpu_codename | - * +--------+--------+-------+-------+-------+---------------------------------------+-----------------------+ - * | AMD | 6 | 8 | 0 | 256 | (not available - will be ignored) | "K6-2" | - * | Intel | 15 | 2 | 5 | 512 | "Intel(R) Xeon(TM) CPU 2.40GHz" | "Xeon (Prestonia)" | - * | Intel | 6 | 15 | 11 | 4096 | "Intel(R) Core(TM)2 Duo CPU E6550..." | "Conroe (Core 2 Duo)" | - * | AMD | 15 | 35 | 2 | 1024 | "Dual Core AMD Opteron(tm) Proces..." | "Opteron (Dual Core)" | - * +--------+--------+-------+-------+-------+---------------------------------------+-----------------------+ - * @endcode - */ - char cpu_codename[64]; - - /** SSE execution unit size (64 or 128; -1 if N/A) */ - int32_t sse_size; - - /** - * contain miscellaneous detection information. Used to test about specifics of - * certain detected features. See \ref cpu_hint_t "CPU_HINT_*" macros below. - * @see Hints - */ - uint8_t detection_hints[CPU_HINTS_MAX]; - - /** contains information about SGX features if the processor, if present */ - struct cpu_sgx_t sgx; -}; - -/** - * @brief CPU feature identifiers - * - * Usage: - * @code - * ... - * struct cpu_raw_data_t raw; - * struct cpu_id_t id; - * if (cpuid_get_raw_data(&raw) == 0 && cpu_identify(&raw, &id) == 0) { - * if (id.flags[CPU_FEATURE_SSE2]) { - * // The CPU has SSE2... - * ... - * } else { - * // no SSE2 - * } - * } else { - * // processor cannot be determined. - * } - * @endcode - */ -typedef enum { - CPU_FEATURE_FPU = 0, /*!< Floating point unit */ - CPU_FEATURE_VME, /*!< Virtual mode extension */ - CPU_FEATURE_DE, /*!< Debugging extension */ - CPU_FEATURE_PSE, /*!< Page size extension */ - CPU_FEATURE_TSC, /*!< Time-stamp counter */ - CPU_FEATURE_MSR, /*!< Model-specific regsisters, RDMSR/WRMSR supported */ - CPU_FEATURE_PAE, /*!< Physical address extension */ - CPU_FEATURE_MCE, /*!< Machine check exception */ - CPU_FEATURE_CX8, /*!< CMPXCHG8B instruction supported */ - CPU_FEATURE_APIC, /*!< APIC support */ - CPU_FEATURE_MTRR, /*!< Memory type range registers */ - CPU_FEATURE_SEP, /*!< SYSENTER / SYSEXIT instructions supported */ - CPU_FEATURE_PGE, /*!< Page global enable */ - CPU_FEATURE_MCA, /*!< Machine check architecture */ - CPU_FEATURE_CMOV, /*!< CMOVxx instructions supported */ - CPU_FEATURE_PAT, /*!< Page attribute table */ - CPU_FEATURE_PSE36, /*!< 36-bit page address extension */ - CPU_FEATURE_PN, /*!< Processor serial # implemented (Intel P3 only) */ - CPU_FEATURE_CLFLUSH, /*!< CLFLUSH instruction supported */ - CPU_FEATURE_DTS, /*!< Debug store supported */ - CPU_FEATURE_ACPI, /*!< ACPI support (power states) */ - CPU_FEATURE_MMX, /*!< MMX instruction set supported */ - CPU_FEATURE_FXSR, /*!< FXSAVE / FXRSTOR supported */ - CPU_FEATURE_SSE, /*!< Streaming-SIMD Extensions (SSE) supported */ - CPU_FEATURE_SSE2, /*!< SSE2 instructions supported */ - CPU_FEATURE_SS, /*!< Self-snoop */ - CPU_FEATURE_HT, /*!< Hyper-threading supported (but might be disabled) */ - CPU_FEATURE_TM, /*!< Thermal monitor */ - CPU_FEATURE_IA64, /*!< IA64 supported (Itanium only) */ - CPU_FEATURE_PBE, /*!< Pending-break enable */ - CPU_FEATURE_PNI, /*!< PNI (SSE3) instructions supported */ - CPU_FEATURE_PCLMUL, /*!< PCLMULQDQ instruction supported */ - CPU_FEATURE_DTS64, /*!< 64-bit Debug store supported */ - CPU_FEATURE_MONITOR, /*!< MONITOR / MWAIT supported */ - CPU_FEATURE_DS_CPL, /*!< CPL Qualified Debug Store */ - CPU_FEATURE_VMX, /*!< Virtualization technology supported */ - CPU_FEATURE_SMX, /*!< Safer mode exceptions */ - CPU_FEATURE_EST, /*!< Enhanced SpeedStep */ - CPU_FEATURE_TM2, /*!< Thermal monitor 2 */ - CPU_FEATURE_SSSE3, /*!< SSSE3 instructionss supported (this is different from SSE3!) */ - CPU_FEATURE_CID, /*!< Context ID supported */ - CPU_FEATURE_CX16, /*!< CMPXCHG16B instruction supported */ - CPU_FEATURE_XTPR, /*!< Send Task Priority Messages disable */ - CPU_FEATURE_PDCM, /*!< Performance capabilities MSR supported */ - CPU_FEATURE_DCA, /*!< Direct cache access supported */ - CPU_FEATURE_SSE4_1, /*!< SSE 4.1 instructions supported */ - CPU_FEATURE_SSE4_2, /*!< SSE 4.2 instructions supported */ - CPU_FEATURE_SYSCALL, /*!< SYSCALL / SYSRET instructions supported */ - CPU_FEATURE_XD, /*!< Execute disable bit supported */ - CPU_FEATURE_MOVBE, /*!< MOVBE instruction supported */ - CPU_FEATURE_POPCNT, /*!< POPCNT instruction supported */ - CPU_FEATURE_AES, /*!< AES* instructions supported */ - CPU_FEATURE_XSAVE, /*!< XSAVE/XRSTOR/etc instructions supported */ - CPU_FEATURE_OSXSAVE, /*!< non-privileged copy of OSXSAVE supported */ - CPU_FEATURE_AVX, /*!< Advanced vector extensions supported */ - CPU_FEATURE_MMXEXT, /*!< AMD MMX-extended instructions supported */ - CPU_FEATURE_3DNOW, /*!< AMD 3DNow! instructions supported */ - CPU_FEATURE_3DNOWEXT, /*!< AMD 3DNow! extended instructions supported */ - CPU_FEATURE_NX, /*!< No-execute bit supported */ - CPU_FEATURE_FXSR_OPT, /*!< FFXSR: FXSAVE and FXRSTOR optimizations */ - CPU_FEATURE_RDTSCP, /*!< RDTSCP instruction supported (AMD-only) */ - CPU_FEATURE_LM, /*!< Long mode (x86_64/EM64T) supported */ - CPU_FEATURE_LAHF_LM, /*!< LAHF/SAHF supported in 64-bit mode */ - CPU_FEATURE_CMP_LEGACY, /*!< core multi-processing legacy mode */ - CPU_FEATURE_SVM, /*!< AMD Secure virtual machine */ - CPU_FEATURE_ABM, /*!< LZCNT instruction support */ - CPU_FEATURE_MISALIGNSSE,/*!< Misaligned SSE supported */ - CPU_FEATURE_SSE4A, /*!< SSE 4a from AMD */ - CPU_FEATURE_3DNOWPREFETCH, /*!< PREFETCH/PREFETCHW support */ - CPU_FEATURE_OSVW, /*!< OS Visible Workaround (AMD) */ - CPU_FEATURE_IBS, /*!< Instruction-based sampling */ - CPU_FEATURE_SSE5, /*!< SSE 5 instructions supported (deprecated, will never be 1) */ - CPU_FEATURE_SKINIT, /*!< SKINIT / STGI supported */ - CPU_FEATURE_WDT, /*!< Watchdog timer support */ - CPU_FEATURE_TS, /*!< Temperature sensor */ - CPU_FEATURE_FID, /*!< Frequency ID control */ - CPU_FEATURE_VID, /*!< Voltage ID control */ - CPU_FEATURE_TTP, /*!< THERMTRIP */ - CPU_FEATURE_TM_AMD, /*!< AMD-specified hardware thermal control */ - CPU_FEATURE_STC, /*!< Software thermal control */ - CPU_FEATURE_100MHZSTEPS,/*!< 100 MHz multiplier control */ - CPU_FEATURE_HWPSTATE, /*!< Hardware P-state control */ - CPU_FEATURE_CONSTANT_TSC, /*!< TSC ticks at constant rate */ - CPU_FEATURE_XOP, /*!< The XOP instruction set (same as the old CPU_FEATURE_SSE5) */ - CPU_FEATURE_FMA3, /*!< The FMA3 instruction set */ - CPU_FEATURE_FMA4, /*!< The FMA4 instruction set */ - CPU_FEATURE_TBM, /*!< Trailing bit manipulation instruction support */ - CPU_FEATURE_F16C, /*!< 16-bit FP convert instruction support */ - CPU_FEATURE_RDRAND, /*!< RdRand instruction */ - CPU_FEATURE_X2APIC, /*!< x2APIC, APIC_BASE.EXTD, MSRs 0000_0800h...0000_0BFFh 64-bit ICR (+030h but not +031h), no DFR (+00Eh), SELF_IPI (+040h) also see standard level 0000_000Bh */ - CPU_FEATURE_CPB, /*!< Core performance boost */ - CPU_FEATURE_APERFMPERF, /*!< MPERF/APERF MSRs support */ - CPU_FEATURE_PFI, /*!< Processor Feedback Interface support */ - CPU_FEATURE_PA, /*!< Processor accumulator */ - CPU_FEATURE_AVX2, /*!< AVX2 instructions */ - CPU_FEATURE_BMI1, /*!< BMI1 instructions */ - CPU_FEATURE_BMI2, /*!< BMI2 instructions */ - CPU_FEATURE_HLE, /*!< Hardware Lock Elision prefixes */ - CPU_FEATURE_RTM, /*!< Restricted Transactional Memory instructions */ - CPU_FEATURE_AVX512F, /*!< AVX-512 Foundation */ - CPU_FEATURE_AVX512DQ, /*!< AVX-512 Double/Quad granular insns */ - CPU_FEATURE_AVX512PF, /*!< AVX-512 Prefetch */ - CPU_FEATURE_AVX512ER, /*!< AVX-512 Exponential/Reciprocal */ - CPU_FEATURE_AVX512CD, /*!< AVX-512 Conflict detection */ - CPU_FEATURE_SHA_NI, /*!< SHA-1/SHA-256 instructions */ - CPU_FEATURE_AVX512BW, /*!< AVX-512 Byte/Word granular insns */ - CPU_FEATURE_AVX512VL, /*!< AVX-512 128/256 vector length extensions */ - CPU_FEATURE_SGX, /*!< SGX extensions. Non-autoritative, check cpu_id_t::sgx::present to verify presence */ - CPU_FEATURE_RDSEED, /*!< RDSEED instruction */ - CPU_FEATURE_ADX, /*!< ADX extensions (arbitrary precision) */ - /* termination: */ - NUM_CPU_FEATURES, -} cpu_feature_t; - -/** - * @brief CPU detection hints identifiers - * - * Usage: similar to the flags usage - */ -typedef enum { - CPU_HINT_SSE_SIZE_AUTH = 0, /*!< SSE unit size is authoritative (not only a Family/Model guesswork, but based on an actual CPUID bit) */ - /* termination */ - NUM_CPU_HINTS, -} cpu_hint_t; - -/** - * @brief SGX features flags - * \see cpu_sgx_t - * - * Usage: - * @code - * ... - * struct cpu_raw_data_t raw; - * struct cpu_id_t id; - * if (cpuid_get_raw_data(&raw) == 0 && cpu_identify(&raw, &id) == 0 && id.sgx.present) { - * if (id.sgx.flags[INTEL_SGX1]) - * // The CPU has SGX1 instructions support... - * ... - * } else { - * // no SGX - * } - * } else { - * // processor cannot be determined. - * } - * @endcode - */ - -typedef enum { - INTEL_SGX1, /*!< SGX1 instructions support */ - INTEL_SGX2, /*!< SGX2 instructions support */ - - /* termination: */ - NUM_SGX_FEATURES, -} cpu_sgx_feature_t; - -/** - * @brief Describes common library error codes - */ -typedef enum { - ERR_OK = 0, /*!< "No error" */ - ERR_NO_CPUID = -1, /*!< "CPUID instruction is not supported" */ - ERR_NO_RDTSC = -2, /*!< "RDTSC instruction is not supported" */ - ERR_NO_MEM = -3, /*!< "Memory allocation failed" */ - ERR_OPEN = -4, /*!< "File open operation failed" */ - ERR_BADFMT = -5, /*!< "Bad file format" */ - ERR_NOT_IMP = -6, /*!< "Not implemented" */ - ERR_CPU_UNKN = -7, /*!< "Unsupported processor" */ - ERR_NO_RDMSR = -8, /*!< "RDMSR instruction is not supported" */ - ERR_NO_DRIVER= -9, /*!< "RDMSR driver error (generic)" */ - ERR_NO_PERMS = -10, /*!< "No permissions to install RDMSR driver" */ - ERR_EXTRACT = -11, /*!< "Cannot extract RDMSR driver (read only media?)" */ - ERR_HANDLE = -12, /*!< "Bad handle" */ - ERR_INVMSR = -13, /*!< "Invalid MSR" */ - ERR_INVCNB = -14, /*!< "Invalid core number" */ - ERR_HANDLE_R = -15, /*!< "Error on handle read" */ - ERR_INVRANGE = -16, /*!< "Invalid given range" */ -} cpu_error_t; - -/** - * @brief Internal structure, used in cpu_tsc_mark, cpu_tsc_unmark and - * cpu_clock_by_mark - */ -struct cpu_mark_t { - uint64_t tsc; /*!< Time-stamp from RDTSC */ - uint64_t sys_clock; /*!< In microsecond resolution */ -}; - -/** - * @brief Returns the total number of logical CPU threads (even if CPUID is not present). - * - * Under VM, this number (and total_logical_cpus, since they are fetched with the same code) - * may be nonsensical, i.e. might not equal NumPhysicalCPUs*NumCoresPerCPU*HyperThreading. - * This is because no matter how many logical threads the host machine has, you may limit them - * in the VM to any number you like. **This** is the number returned by cpuid_get_total_cpus(). - * - * @returns Number of logical CPU threads available. Equals the \ref cpu_id_t::total_logical_cpus. - */ -int cpuid_get_total_cpus(void); - -/** - * @brief Checks if the CPUID instruction is supported - * @retval 1 if CPUID is present - * @retval 0 the CPU doesn't have CPUID. - */ -int cpuid_present(void); - -/** - * @brief Executes the CPUID instruction - * @param eax - the value of the EAX register when executing CPUID - * @param regs - the results will be stored here. regs[0] = EAX, regs[1] = EBX, ... - * @note CPUID will be executed with EAX set to the given value and EBX, ECX, - * EDX set to zero. - */ -void cpu_exec_cpuid(uint32_t eax, uint32_t* regs); - -/** - * @brief Executes the CPUID instruction with the given input registers - * @note This is just a bit more generic version of cpu_exec_cpuid - it allows - * you to control all the registers. - * @param regs - Input/output. Prior to executing CPUID, EAX, EBX, ECX and - * EDX will be set to regs[0], regs[1], regs[2] and regs[3]. - * After CPUID, this array will contain the results. - */ -void cpu_exec_cpuid_ext(uint32_t* regs); - -/** - * @brief Obtains the raw CPUID data from the current CPU - * @param data - a pointer to cpu_raw_data_t structure - * @returns zero if successful, and some negative number on error. - * The error message can be obtained by calling \ref cpuid_error. - * @see cpu_error_t - */ -int cpuid_get_raw_data(struct cpu_raw_data_t* data); - -/** - * @brief Writes the raw CPUID data to a text file - * @param data - a pointer to cpu_raw_data_t structure - * @param filename - the path of the file, where the serialized data should be - * written. If empty, stdout will be used. - * @note This is intended primarily for debugging. On some processor, which is - * not currently supported or not completely recognized by cpu_identify, - * one can still successfully get the raw data and write it to a file. - * libcpuid developers can later import this file and debug the detection - * code as if running on the actual hardware. - * The file is simple text format of "something=value" pairs. Version info - * is also written, but the format is not intended to be neither backward- - * nor forward compatible. - * @returns zero if successful, and some negative number on error. - * The error message can be obtained by calling \ref cpuid_error. - * @see cpu_error_t - */ -int cpuid_serialize_raw_data(struct cpu_raw_data_t* data, const char* filename); - -/** - * @brief Reads raw CPUID data from file - * @param data - a pointer to cpu_raw_data_t structure. The deserialized data will - * be written here. - * @param filename - the path of the file, containing the serialized raw data. - * If empty, stdin will be used. - * @note This function may fail, if the file is created by different version of - * the library. Also, see the notes on cpuid_serialize_raw_data. - * @returns zero if successful, and some negative number on error. - * The error message can be obtained by calling \ref cpuid_error. - * @see cpu_error_t -*/ -int cpuid_deserialize_raw_data(struct cpu_raw_data_t* data, const char* filename); - -/** - * @brief Identifies the CPU - * @param raw - Input - a pointer to the raw CPUID data, which is obtained - * either by cpuid_get_raw_data or cpuid_deserialize_raw_data. - * Can also be NULL, in which case the functions calls - * cpuid_get_raw_data itself. - * @param data - Output - the decoded CPU features/info is written here. - * @note The function will not fail, even if some of the information - * cannot be obtained. Even when the CPU is new and thus unknown to - * libcpuid, some generic info, such as "AMD K9 family CPU" will be - * written to data.cpu_codename, and most other things, such as the - * CPU flags, cache sizes, etc. should be detected correctly anyway. - * However, the function CAN fail, if the CPU is completely alien to - * libcpuid. - * @note While cpu_identify() and cpuid_get_raw_data() are fast for most - * purposes, running them several thousand times per second can hamper - * performance significantly. Specifically, avoid writing "cpu feature - * checker" wrapping function, which calls cpu_identify and returns the - * value of some flag, if that function is going to be called frequently. - * @returns zero if successful, and some negative number on error. - * The error message can be obtained by calling \ref cpuid_error. - * @see cpu_error_t - */ -int cpu_identify(struct cpu_raw_data_t* raw, struct cpu_id_t* data); - -/** - * @brief Returns the short textual representation of a CPU flag - * @param feature - the feature, whose textual representation is wanted. - * @returns a constant string like "fpu", "tsc", "sse2", etc. - * @note the names of the returned flags are compatible with those from - * /proc/cpuinfo in Linux, with the exception of `tm_amd' - */ -const char* cpu_feature_str(cpu_feature_t feature); - -/** - * @brief Returns textual description of the last error - * - * libcpuid stores an `errno'-style error status, whose description - * can be obtained with this function. - * @note This function is not thread-safe - * @see cpu_error_t - */ -const char* cpuid_error(void); - -/** - * @brief Executes RDTSC - * - * The RDTSC (ReaD Time Stamp Counter) instruction gives access to an - * internal 64-bit counter, which usually increments at each clock cycle. - * This can be used for various timing routines, and as a very precise - * clock source. It is set to zero on system startup. Beware that may not - * increment at the same frequency as the CPU. Consecutive calls of RDTSC - * are, however, guaranteed to return monotonically-increasing values. - * - * @param result - a pointer to a 64-bit unsigned integer, where the TSC value - * will be stored - * - * @note If 100% compatibility is a concern, you must first check if the - * RDTSC instruction is present (if it is not, your program will crash - * with "invalid opcode" exception). Only some very old processors (i486, - * early AMD K5 and some Cyrix CPUs) lack that instruction - they should - * have become exceedingly rare these days. To verify RDTSC presence, - * run cpu_identify() and check flags[CPU_FEATURE_TSC]. - * - * @note The monotonically increasing nature of the TSC may be violated - * on SMP systems, if their TSC clocks run at different rate. If the OS - * doesn't account for that, the TSC drift may become arbitrary large. - */ -void cpu_rdtsc(uint64_t* result); - -/** - * @brief Store TSC and timing info - * - * This function stores the current TSC value and current - * time info from a precise OS-specific clock source in the cpu_mark_t - * structure. The sys_clock field contains time with microsecond resolution. - * The values can later be used to measure time intervals, number of clocks, - * FPU frequency, etc. - * @see cpu_rdtsc - * - * @param mark [out] - a pointer to a cpu_mark_t structure - */ -void cpu_tsc_mark(struct cpu_mark_t* mark); - -/** - * @brief Calculate TSC and timing difference - * - * @param mark - input/output: a pointer to a cpu_mark_t sturcture, which has - * already been initialized by cpu_tsc_mark. The difference in - * TSC and time will be written here. - * - * This function calculates the TSC and time difference, by obtaining the - * current TSC and timing values and subtracting the contents of the `mark' - * structure from them. Results are written in the same structure. - * - * Example: - * @code - * ... - * struct cpu_mark_t mark; - * cpu_tsc_mark(&mark); - * foo(); - * cpu_tsc_unmark(&mark); - * printf("Foo finished. Executed in %llu cycles and %llu usecs\n", - * mark.tsc, mark.sys_clock); - * ... - * @endcode - */ -void cpu_tsc_unmark(struct cpu_mark_t* mark); - -/** - * @brief Calculates the CPU clock - * - * @param mark - pointer to a cpu_mark_t structure, which has been initialized - * with cpu_tsc_mark and later `stopped' with cpu_tsc_unmark. - * - * @note For reliable results, the marked time interval should be at least about - * 10 ms. - * - * @returns the CPU clock frequency, in MHz. Due to measurement error, it will - * differ from the true value in a few least-significant bits. Accuracy depends - * on the timing interval - the more, the better. If the timing interval is - * insufficient, the result is -1. Also, see the comment on cpu_clock_measure - * for additional issues and pitfalls in using RDTSC for CPU frequency - * measurements. - */ -int cpu_clock_by_mark(struct cpu_mark_t* mark); - -/** - * @brief Returns the CPU clock, as reported by the OS - * - * This function uses OS-specific functions to obtain the CPU clock. It may - * differ from the true clock for several reasons:

- * - * i) The CPU might be in some power saving state, while the OS reports its - * full-power frequency, or vice-versa.
- * ii) In some cases you can raise or lower the CPU frequency with overclocking - * utilities and the OS will not notice. - * - * @returns the CPU clock frequency in MHz. If the OS is not (yet) supported - * or lacks the necessary reporting machinery, the return value is -1 - */ -int cpu_clock_by_os(void); - -/** - * @brief Measure the CPU clock frequency - * - * @param millis - How much time to waste in the busy-wait cycle. In millisecs. - * Useful values 10 - 1000 - * @param quad_check - Do a more thorough measurement if nonzero - * (see the explanation). - * - * The function performs a busy-wait cycle for the given time and calculates - * the CPU frequency by the difference of the TSC values. The accuracy of the - * calculation depends on the length of the busy-wait cycle: more is better, - * but 100ms should be enough for most purposes. - * - * While this will calculate the CPU frequency correctly in most cases, there are - * several reasons why it might be incorrect:
- * - * i) RDTSC doesn't guarantee it will run at the same clock as the CPU. - * Apparently there aren't CPUs at the moment, but still, there's no - * guarantee.
- * ii) The CPU might be in a low-frequency power saving mode, and the CPU - * might be switched to higher frequency at any time. If this happens - * during the measurement, the result can be anywhere between the - * low and high frequencies. Also, if you're interested in the - * high frequency value only, this function might return the low one - * instead.
- * iii) On SMP systems exhibiting TSC drift (see \ref cpu_rdtsc) - * - * the quad_check option will run four consecutive measurements and - * then return the average of the two most-consistent results. The total - * runtime of the function will still be `millis' - consider using - * a bit more time for the timing interval. - * - * Finally, for benchmarking / CPU intensive applications, the best strategy is - * to use the cpu_tsc_mark() / cpu_tsc_unmark() / cpu_clock_by_mark() method. - * Begin by mark()-ing about one second after application startup (allowing the - * power-saving manager to kick in and rise the frequency during that time), - * then unmark() just before application finishing. The result will most - * acurately represent at what frequency your app was running. - * - * @returns the CPU clock frequency in MHz (within some measurement error - * margin). If RDTSC is not supported, the result is -1. - */ -int cpu_clock_measure(int millis, int quad_check); - -/** - * @brief Measure the CPU clock frequency using instruction-counting - * - * @param millis - how much time to allocate for each run, in milliseconds - * @param runs - how many runs to perform - * - * The function performs a busy-wait cycle using a known number of "heavy" (SSE) - * instructions. These instructions run at (more or less guaranteed) 1 IPC rate, - * so by running a busy loop for a fixed amount of time, and measuring the - * amount of instructions done, the CPU clock is accurately measured. - * - * Of course, this function is still affected by the power-saving schemes, so - * the warnings as of cpu_clock_measure() still apply. However, this function is - * immune to problems with detection, related to the Intel Nehalem's "Turbo" - * mode, where the internal clock is raised, but the RDTSC rate is unaffected. - * - * The function will run for about (millis * runs) milliseconds. - * You can make only a single busy-wait run (runs == 1); however, this can - * be affected by task scheduling (which will break the counting), so allowing - * more than one run is recommended. As run length is not imperative for - * accurate readings (e.g., 50ms is sufficient), you can afford a lot of short - * runs, e.g. 10 runs of 50ms or 20 runs of 25ms. - * - * Recommended values - millis = 50, runs = 4. For more robustness, - * increase the number of runs. - * - * NOTE: on Bulldozer and later CPUs, the busy-wait cycle runs at 1.4 IPC, thus - * the results are skewed. This is corrected internally by dividing the resulting - * value by 1.4. - * However, this only occurs if the thread is executed on a single CMT - * module - if there are other threads competing for resources, the results are - * unpredictable. Make sure you run cpu_clock_by_ic() on a CPU that is free from - * competing threads, or if there are such threads, they shouldn't exceed the - * number of modules. On a Bulldozer X8, that means 4 threads. - * - * @returns the CPU clock frequency in MHz (within some measurement error - * margin). If SSE is not supported, the result is -1. If the input parameters - * are incorrect, or some other internal fault is detected, the result is -2. - */ -int cpu_clock_by_ic(int millis, int runs); - -/** - * @brief Get the CPU clock frequency (all-in-one method) - * - * This is an all-in-one method for getting the CPU clock frequency. - * It tries to use the OS for that. If the OS doesn't have this info, it - * uses cpu_clock_measure with 200ms time interval and quadruple checking. - * - * @returns the CPU clock frequency in MHz. If every possible method fails, - * the result is -1. - */ -int cpu_clock(void); - - -/** - * @brief The return value of cpuid_get_epc(). - * @details - * Describes an EPC (Enclave Page Cache) layout (physical address and size). - * A CPU may have one or more EPC areas, and information about each is - * fetched via \ref cpuid_get_epc. - */ -struct cpu_epc_t { - uint64_t start_addr; - uint64_t length; -}; - -/** - * @brief Fetches information about an EPC (Enclave Page Cache) area. - * @param index - zero-based index, valid range [0..cpu_id_t.egx.num_epc_sections) - * @param raw - a pointer to fetched raw CPUID data. Needed only for testing, - * you can safely pass NULL here (if you pass a real structure, - * it will be used for fetching the leaf 12h data if index < 2; - * otherwise the real CPUID instruction will be used). - * @returns the requested data. If the CPU doesn't support SGX, or if - * index >= cpu_id_t.egx.num_epc_sections, both fields of the returned - * structure will be zeros. - */ -struct cpu_epc_t cpuid_get_epc(int index, const struct cpu_raw_data_t* raw); - -/** - * @brief Returns the libcpuid version - * - * @returns the string representation of the libcpuid version, like "0.1.1" - */ -const char* cpuid_lib_version(void); - -typedef void (*libcpuid_warn_fn_t) (const char *msg); -/** - * @brief Sets the warning print function - * - * In some cases, the internal libcpuid machinery would like to emit useful - * debug warnings. By default, these warnings are written to stderr. However, - * you can set a custom function that will receive those warnings. - * - * @param warn_fun - the warning function you want to set. If NULL, warnings - * are disabled. The function takes const char* argument. - * - * @returns the current warning function. You can use the return value to - * keep the previous warning function and restore it at your discretion. - */ -libcpuid_warn_fn_t cpuid_set_warn_function(libcpuid_warn_fn_t warn_fun); - -/** - * @brief Sets the verbosiness level - * - * When the verbosiness level is above zero, some functions might print - * diagnostic information about what are they doing. The higher the level is, - * the more detail is printed. Level zero is guaranteed to omit all such - * output. The output is written using the same machinery as the warnings, - * @see cpuid_set_warn_function() - * - * @param level the desired verbosiness level. Useful values 0..2 inclusive - */ -void cpuid_set_verbosiness_level(int level); - - -/** - * @brief Obtains the CPU vendor from CPUID from the current CPU - * @note The result is cached. - * @returns VENDOR_UNKNOWN if failed, otherwise the CPU vendor type. - * @see cpu_vendor_t - */ -cpu_vendor_t cpuid_get_vendor(void); - -/** - * @brief a structure that holds a list of processor names - */ -struct cpu_list_t { - /** Number of entries in the list */ - int num_entries; - /** Pointers to names. There will be num_entries of them */ - char **names; -}; - -/** - * @brief Gets a list of all known CPU names from a specific vendor. - * - * This function compiles a list of all known CPU (code)names - * (i.e. the possible values of cpu_id_t::cpu_codename) for the given vendor. - * - * There are about 100 entries for Intel and AMD, and a few for the other - * vendors. The list is written out in approximate chronological introduction - * order of the parts. - * - * @param vendor the vendor to be queried - * @param list [out] the resulting list will be written here. - * NOTE: As the memory is dynamically allocated, be sure to call - * cpuid_free_cpu_list() after you're done with the data - * @see cpu_list_t - */ -void cpuid_get_cpu_list(cpu_vendor_t vendor, struct cpu_list_t* list); - -/** - * @brief Frees a CPU list - * - * This function deletes all the memory associated with a CPU list, as obtained - * by cpuid_get_cpu_list() - * - * @param list - the list to be free()'d. - */ -void cpuid_free_cpu_list(struct cpu_list_t* list); - -struct msr_driver_t; -/** - * @brief Starts/opens a driver, needed to read MSRs (Model Specific Registers) - * - * On systems that support it, this function will create a temporary - * system driver, that has privileges to execute the RDMSR instruction. - * After the driver is created, you can read MSRs by calling \ref cpu_rdmsr - * - * @returns a handle to the driver on success, and NULL on error. - * The error message can be obtained by calling \ref cpuid_error. - * @see cpu_error_t - */ -struct msr_driver_t* cpu_msr_driver_open(void); - -/** - * @brief Similar to \ref cpu_msr_driver_open, but accept one parameter - * - * This function works on certain operating systems (GNU/Linux, FreeBSD) - * - * @param core_num specify the core number for MSR. - * The first core number is 0. - * The last core number is \ref cpuid_get_total_cpus - 1. - * - * @returns a handle to the driver on success, and NULL on error. - * The error message can be obtained by calling \ref cpuid_error. - * @see cpu_error_t - */ -struct msr_driver_t* cpu_msr_driver_open_core(unsigned core_num); - -/** - * @brief Reads a Model-Specific Register (MSR) - * - * If the CPU has MSRs (as indicated by the CPU_FEATURE_MSR flag), you can - * read a MSR with the given index by calling this function. - * - * There are several prerequisites you must do before reading MSRs: - * 1) You must ensure the CPU has RDMSR. Check the CPU_FEATURE_MSR flag - * in cpu_id_t::flags - * 2) You must ensure that the CPU implements the specific MSR you intend to - * read. - * 3) You must open a MSR-reader driver. RDMSR is a privileged instruction and - * needs ring-0 access in order to work. This temporary driver is created - * by calling \ref cpu_msr_driver_open - * - * @param handle - a handle to the MSR reader driver, as created by - * cpu_msr_driver_open - * @param msr_index - the numeric ID of the MSR you want to read - * @param result - a pointer to a 64-bit integer, where the MSR value is stored - * - * @returns zero if successful, and some negative number on error. - * The error message can be obtained by calling \ref cpuid_error. - * @see cpu_error_t - */ -int cpu_rdmsr(struct msr_driver_t* handle, uint32_t msr_index, uint64_t* result); - - -typedef enum { - INFO_MPERF, /*!< Maximum performance frequency clock. This - is a counter, which increments as a - proportion of the actual processor speed. */ - INFO_APERF, /*!< Actual performance frequency clock. This - accumulates the core clock counts when the - core is active. */ - INFO_MIN_MULTIPLIER, /*!< Minimum CPU:FSB ratio for this CPU, - multiplied by 100. */ - INFO_CUR_MULTIPLIER, /*!< Current CPU:FSB ratio, multiplied by 100. - e.g., a CPU:FSB value of 18.5 reads as - "1850". */ - INFO_MAX_MULTIPLIER, /*!< Maximum CPU:FSB ratio for this CPU, - multiplied by 100. */ - INFO_TEMPERATURE, /*!< The current core temperature in Celsius. */ - INFO_THROTTLING, /*!< 1 if the current logical processor is - throttling. 0 if it is running normally. */ - INFO_VOLTAGE, /*!< The current core voltage in Volt, - multiplied by 100. */ - INFO_BCLK, /*!< See \ref INFO_BUS_CLOCK. */ - INFO_BUS_CLOCK, /*!< The main bus clock in MHz, - e.g., FSB/QPI/DMI/HT base clock, - multiplied by 100. */ -} cpu_msrinfo_request_t; - -/** - * @brief Similar to \ref cpu_rdmsr, but extract a range of bits - * - * @param handle - a handle to the MSR reader driver, as created by - * cpu_msr_driver_open - * @param msr_index - the numeric ID of the MSR you want to read - * @param highbit - the high bit in range, must be inferior to 64 - * @param lowbit - the low bit in range, must be equal or superior to 0 - * @param result - a pointer to a 64-bit integer, where the MSR value is stored - * - * @returns zero if successful, and some negative number on error. - * The error message can be obtained by calling \ref cpuid_error. - * @see cpu_error_t - */ -int cpu_rdmsr_range(struct msr_driver_t* handle, uint32_t msr_index, uint8_t highbit, - uint8_t lowbit, uint64_t* result); - -/** - * @brief Reads extended CPU information from Model-Specific Registers. - * @param handle - a handle to an open MSR driver, @see cpu_msr_driver_open - * @param which - which info field should be returned. A list of - * available information entities is listed in the - * cpu_msrinfo_request_t enum. - * @retval - if the requested information is available for the current - * processor model, the respective value is returned. - * if no information is available, or the CPU doesn't support - * the query, the special value CPU_INVALID_VALUE is returned - */ -int cpu_msrinfo(struct msr_driver_t* handle, cpu_msrinfo_request_t which); -#define CPU_INVALID_VALUE 0x3fffffff - -/** - * @brief Closes an open MSR driver - * - * This function unloads the MSR driver opened by cpu_msr_driver_open and - * frees any resources associated with it. - * - * @param handle - a handle to the MSR reader driver, as created by - * cpu_msr_driver_open - * - * @returns zero if successful, and some negative number on error. - * The error message can be obtained by calling \ref cpuid_error. - * @see cpu_error_t - */ -int cpu_msr_driver_close(struct msr_driver_t* handle); - -#ifdef __cplusplus -}; /* extern "C" */ -#endif - - -/** @} */ - -#endif /* __LIBCPUID_H__ */ diff --git a/contrib/libcpuid/include/libcpuid/libcpuid_constants.h b/contrib/libcpuid/include/libcpuid/libcpuid_constants.h deleted file mode 100644 index 3ddb6d5e14e..00000000000 --- a/contrib/libcpuid/include/libcpuid/libcpuid_constants.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2008 Veselin Georgiev, - * anrieffNOSPAM @ mgail_DOT.com (convert to gmail) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -/** - * @File libcpuid_constants.h - * @Author Veselin Georgiev - * @Brief Some limits and constants for libcpuid - */ - -#ifndef __LIBCPUID_CONSTANTS_H__ -#define __LIBCPUID_CONSTANTS_H__ - -#define VENDOR_STR_MAX 16 -#define BRAND_STR_MAX 64 -#define CPU_FLAGS_MAX 128 -#define MAX_CPUID_LEVEL 32 -#define MAX_EXT_CPUID_LEVEL 32 -#define MAX_INTELFN4_LEVEL 8 -#define MAX_INTELFN11_LEVEL 4 -#define MAX_INTELFN12H_LEVEL 4 -#define MAX_INTELFN14H_LEVEL 4 -#define CPU_HINTS_MAX 16 -#define SGX_FLAGS_MAX 14 - -#endif /* __LIBCPUID_CONSTANTS_H__ */ diff --git a/contrib/libcpuid/include/libcpuid/libcpuid_internal.h b/contrib/libcpuid/include/libcpuid/libcpuid_internal.h deleted file mode 100644 index 95528896219..00000000000 --- a/contrib/libcpuid/include/libcpuid/libcpuid_internal.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright 2016 Veselin Georgiev, - * anrieffNOSPAM @ mgail_DOT.com (convert to gmail) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef __LIBCPUID_INTERNAL_H__ -#define __LIBCPUID_INTERNAL_H__ -/* - * This file contains internal undocumented declarations and function prototypes - * for the workings of the internal library infrastructure. - */ - -enum _common_codes_t { - NA = 0, - NO_CODE, -}; - -#define CODE(x) x -#define CODE2(x, y) x = y -enum _amd_code_t { - #include "amd_code_t.h" -}; -typedef enum _amd_code_t amd_code_t; - -enum _intel_code_t { - #include "intel_code_t.h" -}; -typedef enum _intel_code_t intel_code_t; -#undef CODE -#undef CODE2 - -struct internal_id_info_t { - union { - amd_code_t amd; - intel_code_t intel; - } code; - int score; // detection (matchtable) score -}; - -int cpu_ident_internal(struct cpu_raw_data_t* raw, struct cpu_id_t* data, - struct internal_id_info_t* internal); - -#endif /* __LIBCPUID_INTERNAL_H__ */ diff --git a/contrib/libcpuid/include/libcpuid/libcpuid_types.h b/contrib/libcpuid/include/libcpuid/libcpuid_types.h deleted file mode 100644 index f1181edf2ee..00000000000 --- a/contrib/libcpuid/include/libcpuid/libcpuid_types.h +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright 2008 Veselin Georgiev, - * anrieffNOSPAM @ mgail_DOT.com (convert to gmail) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -/** - * @File libcpuid_types.h - * @Author Veselin Georgiev - * @Brief Type specifications for libcpuid. - */ - -#ifndef __LIBCPUID_TYPES_H__ -#define __LIBCPUID_TYPES_H__ - -//#ifdef HAVE_CONFIG_H // CLICKHOUSE PATCH -# include "config.h" -//#endif // CLICKHOUSE PATCH - -#if 1 // CLICKHOUSE PATCH -//#if defined(HAVE_STDINT_H) // CLICKHOUSE PATCH -# include -#else -/* we have to provide our own: */ -# if !defined(HAVE_INT32_T) && !defined(__int32_t_defined) -typedef int int32_t; -# endif - -# if !defined(HAVE_UINT32_T) && !defined(__uint32_t_defined) -typedef unsigned uint32_t; -# endif - -typedef signed char int8_t; -typedef unsigned char uint8_t; -typedef signed short int16_t; -typedef unsigned short uint16_t; -#if (defined _MSC_VER) && (_MSC_VER <= 1300) - /* MSVC 6.0: no long longs ... */ - typedef signed __int64 int64_t; - typedef unsigned __int64 uint64_t; -#else - /* all other sane compilers: */ - typedef signed long long int64_t; - typedef unsigned long long uint64_t; -#endif - -#endif - -#endif /* __LIBCPUID_TYPES_H__ */ diff --git a/contrib/libcpuid/include/libcpuid/libcpuid_util.c b/contrib/libcpuid/include/libcpuid/libcpuid_util.c deleted file mode 100644 index ea6b1b8dfb4..00000000000 --- a/contrib/libcpuid/include/libcpuid/libcpuid_util.c +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Copyright 2008 Veselin Georgiev, - * anrieffNOSPAM @ mgail_DOT.com (convert to gmail) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include "libcpuid.h" -#include "libcpuid_util.h" - -int _current_verboselevel; - -void match_features(const struct feature_map_t* matchtable, int count, uint32_t reg, struct cpu_id_t* data) -{ - int i; - for (i = 0; i < count; i++) - if (reg & (1u << matchtable[i].bit)) - data->flags[matchtable[i].feature] = 1; -} - -static void default_warn(const char *msg) -{ - fprintf(stderr, "%s", msg); -} - -libcpuid_warn_fn_t _warn_fun = default_warn; - -#if defined(_MSC_VER) -# define vsnprintf _vsnprintf -#endif -void warnf(const char* format, ...) -{ - char buff[1024]; - va_list va; - if (!_warn_fun) return; - va_start(va, format); - vsnprintf(buff, sizeof(buff), format, va); - va_end(va); - _warn_fun(buff); -} - -void debugf(int verboselevel, const char* format, ...) -{ - char buff[1024]; - va_list va; - if (verboselevel > _current_verboselevel) return; - va_start(va, format); - vsnprintf(buff, sizeof(buff), format, va); - va_end(va); - _warn_fun(buff); -} - -static int score(const struct match_entry_t* entry, const struct cpu_id_t* data, - int brand_code, int model_code) -{ - int res = 0; - if (entry->family == data->family ) res += 2; - if (entry->model == data->model ) res += 2; - if (entry->stepping == data->stepping ) res += 2; - if (entry->ext_family == data->ext_family) res += 2; - if (entry->ext_model == data->ext_model ) res += 2; - if (entry->ncores == data->num_cores ) res += 2; - if (entry->l2cache == data->l2_cache ) res += 1; - if (entry->l3cache == data->l3_cache ) res += 1; - if (entry->brand_code == brand_code ) res += 2; - if (entry->model_code == model_code ) res += 2; - return res; -} - -int match_cpu_codename(const struct match_entry_t* matchtable, int count, - struct cpu_id_t* data, int brand_code, int model_code) -{ - int bestscore = -1; - int bestindex = 0; - int i, t; - - debugf(3, "Matching cpu f:%d, m:%d, s:%d, xf:%d, xm:%d, ncore:%d, l2:%d, bcode:%d, code:%d\n", - data->family, data->model, data->stepping, data->ext_family, - data->ext_model, data->num_cores, data->l2_cache, brand_code, model_code); - - for (i = 0; i < count; i++) { - t = score(&matchtable[i], data, brand_code, model_code); - debugf(3, "Entry %d, `%s', score %d\n", i, matchtable[i].name, t); - if (t > bestscore) { - debugf(2, "Entry `%s' selected - best score so far (%d)\n", matchtable[i].name, t); - bestscore = t; - bestindex = i; - } - } - strcpy(data->cpu_codename, matchtable[bestindex].name); - return bestscore; -} - -void generic_get_cpu_list(const struct match_entry_t* matchtable, int count, - struct cpu_list_t* list) -{ - int i, j, n, good; - n = 0; - list->names = (char**) malloc(sizeof(char*) * count); - for (i = 0; i < count; i++) { - if (strstr(matchtable[i].name, "Unknown")) continue; - good = 1; - for (j = n - 1; j >= 0; j--) - if (!strcmp(list->names[j], matchtable[i].name)) { - good = 0; - break; - } - if (!good) continue; -#if defined(_MSC_VER) - list->names[n++] = _strdup(matchtable[i].name); -#else - list->names[n++] = strdup(matchtable[i].name); -#endif - } - list->num_entries = n; -} - -static int xmatch_entry(char c, const char* p) -{ - int i, j; - if (c == 0) return -1; - if (c == p[0]) return 1; - if (p[0] == '.') return 1; - if (p[0] == '#' && isdigit(c)) return 1; - if (p[0] == '[') { - j = 1; - while (p[j] && p[j] != ']') j++; - if (!p[j]) return -1; - for (i = 1; i < j; i++) - if (p[i] == c) return j + 1; - } - return -1; -} - -int match_pattern(const char* s, const char* p) -{ - int i, j, dj, k, n, m; - n = (int) strlen(s); - m = (int) strlen(p); - for (i = 0; i < n; i++) { - if (xmatch_entry(s[i], p) != -1) { - j = 0; - k = 0; - while (j < m && ((dj = xmatch_entry(s[i + k], p + j)) != -1)) { - k++; - j += dj; - } - if (j == m) return i + 1; - } - } - return 0; -} - -struct cpu_id_t* get_cached_cpuid(void) -{ - static int initialized = 0; - static struct cpu_id_t id; - if (initialized) return &id; - if (cpu_identify(NULL, &id)) - memset(&id, 0, sizeof(id)); - initialized = 1; - return &id; -} diff --git a/contrib/libcpuid/include/libcpuid/libcpuid_util.h b/contrib/libcpuid/include/libcpuid/libcpuid_util.h deleted file mode 100644 index 22becea26f6..00000000000 --- a/contrib/libcpuid/include/libcpuid/libcpuid_util.h +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright 2008 Veselin Georgiev, - * anrieffNOSPAM @ mgail_DOT.com (convert to gmail) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef __LIBCPUID_UTIL_H__ -#define __LIBCPUID_UTIL_H__ - -#define COUNT_OF(array) (sizeof(array) / sizeof(array[0])) - -struct feature_map_t { - unsigned bit; - cpu_feature_t feature; -}; - -void match_features(const struct feature_map_t* matchtable, int count, - uint32_t reg, struct cpu_id_t* data); - -struct match_entry_t { - int family, model, stepping, ext_family, ext_model; - int ncores, l2cache, l3cache, brand_code, model_code; - char name[32]; -}; - -// returns the match score: -int match_cpu_codename(const struct match_entry_t* matchtable, int count, - struct cpu_id_t* data, int brand_code, int model_code); - -void warnf(const char* format, ...) -#ifdef __GNUC__ -__attribute__((format(printf, 1, 2))) -#endif -; -void debugf(int verboselevel, const char* format, ...) -#ifdef __GNUC__ -__attribute__((format(printf, 2, 3))) -#endif -; -void generic_get_cpu_list(const struct match_entry_t* matchtable, int count, - struct cpu_list_t* list); - -/* - * Seek for a pattern in `haystack'. - * Pattern may be an fixed string, or contain the special metacharacters - * '.' - match any single character - * '#' - match any digit - * '[] - match any of the given chars (regex-like ranges are not - * supported) - * Return val: 0 if the pattern is not found. Nonzero if it is found (actually, - * x + 1 where x is the index where the match is found). - */ -int match_pattern(const char* haystack, const char* pattern); - -/* - * Gets an initialized cpu_id_t. It is cached, so that internal libcpuid - * machinery doesn't need to issue cpu_identify more than once. - */ -struct cpu_id_t* get_cached_cpuid(void); - -/* - * Sets the current errno - */ -int set_error(cpu_error_t err); - -extern libcpuid_warn_fn_t _warn_fun; -extern int _current_verboselevel; - -#endif /* __LIBCPUID_UTIL_H__ */ diff --git a/contrib/libcpuid/include/libcpuid/msrdriver.c b/contrib/libcpuid/include/libcpuid/msrdriver.c deleted file mode 100644 index 8f9d7ed0ca8..00000000000 --- a/contrib/libcpuid/include/libcpuid/msrdriver.c +++ /dev/null @@ -1,593 +0,0 @@ -/* - * Copyright 2009 Veselin Georgiev, - * anrieffNOSPAM @ mgail_DOT.com (convert to gmail) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @File msrdriver.c - * @Brief Contains the binary images of the x86 and x64 MSR drivers for Windows - * @Date 2009-09-29 - * - * The driver is courtesy of Nick 'Bombera' Gabareff, and its source is actually - * available, see the contrib/ dir. - * - * However, for simplicity, here we just include the images of the compiled .SYS - * files. - * They are extracted to the filesystem on demand and loaded in the kernel - * by the cpu_msr_driver_open() function - */ -#ifdef _WIN32 -#include "asm-bits.h" -//begin { -int cc_x86driver_code_size = 4608; -uint8_t cc_x86driver_code[4608] = { - 0x4d,0x5a,0x90,0x00,0x03,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0xff,0xff,0x00,0x00,0xb8,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0xc8,0x00,0x00,0x00,0x0e,0x1f,0xba,0x0e,0x00,0xb4,0x09,0xcd, - 0x21,0xb8,0x01,0x4c,0xcd,0x21,0x54,0x68,0x69,0x73,0x20,0x70,0x72,0x6f,0x67,0x72,0x61,0x6d, - 0x20,0x63,0x61,0x6e,0x6e,0x6f,0x74,0x20,0x62,0x65,0x20,0x72,0x75,0x6e,0x20,0x69,0x6e,0x20, - 0x44,0x4f,0x53,0x20,0x6d,0x6f,0x64,0x65,0x2e,0x0d,0x0d,0x0a,0x24,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x9f,0x99,0x48,0xdf,0xdb,0xf8,0x26,0x8c,0xdb,0xf8,0x26,0x8c,0xdb,0xf8,0x26,0x8c, - 0xdb,0xf8,0x27,0x8c,0xdd,0xf8,0x26,0x8c,0x21,0xdb,0x3f,0x8c,0xd8,0xf8,0x26,0x8c,0xfc,0x3e, - 0x57,0x8c,0xda,0xf8,0x26,0x8c,0xfc,0x3e,0x5a,0x8c,0xda,0xf8,0x26,0x8c,0xfc,0x3e,0x5e,0x8c, - 0xda,0xf8,0x26,0x8c,0x52,0x69,0x63,0x68,0xdb,0xf8,0x26,0x8c,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x50,0x45,0x00,0x00,0x4c,0x01,0x07,0x00,0x12,0x9b,0x9b,0x4a,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0xe0,0x00,0x02,0x21,0x0b,0x01,0x08,0x00,0x00,0x06,0x00,0x00,0x00,0x0a, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x00,0x00,0x00,0x10,0x00,0x00,0x00,0x20,0x00,0x00, - 0x00,0x00,0x01,0x00,0x00,0x10,0x00,0x00,0x00,0x02,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0x00,0x00,0x00,0x04,0x00,0x00, - 0xa9,0xd1,0x00,0x00,0x01,0x00,0x00,0x04,0x00,0x00,0x10,0x00,0x00,0x10,0x00,0x00,0x00,0x00, - 0x10,0x00,0x00,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x50,0x00,0x00,0x28,0x00,0x00,0x00,0x00,0x60,0x00,0x00,0xc0,0x03, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x70,0x00,0x00,0x30,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x20,0x00,0x00,0x1c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x2e,0x74, - 0x65,0x78,0x74,0x00,0x00,0x00,0xa3,0x00,0x00,0x00,0x00,0x10,0x00,0x00,0x00,0x02,0x00,0x00, - 0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x20,0x00, - 0x00,0x68,0x2e,0x72,0x64,0x61,0x74,0x61,0x00,0x00,0x62,0x00,0x00,0x00,0x00,0x20,0x00,0x00, - 0x00,0x02,0x00,0x00,0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x40,0x00,0x00,0x48,0x2e,0x64,0x61,0x74,0x61,0x00,0x00,0x00,0x04,0x00,0x00,0x00, - 0x00,0x30,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x00,0x00,0xc8,0x50,0x41,0x47,0x45,0x30,0x44,0x45,0x46, - 0x8c,0x00,0x00,0x00,0x00,0x40,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x20,0x00,0x00,0x60,0x49,0x4e,0x49,0x54, - 0x00,0x00,0x00,0x00,0xd4,0x00,0x00,0x00,0x00,0x50,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x0a, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x20,0x00,0x00,0xe2, - 0x2e,0x72,0x73,0x72,0x63,0x00,0x00,0x00,0xc0,0x03,0x00,0x00,0x00,0x60,0x00,0x00,0x00,0x04, - 0x00,0x00,0x00,0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x40,0x00,0x00,0x42,0x2e,0x72,0x65,0x6c,0x6f,0x63,0x00,0x00,0x68,0x00,0x00,0x00,0x00,0x70, - 0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x40,0x00,0x00,0x42,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x8b,0x4c, - 0x24,0x08,0x83,0x61,0x18,0x00,0x83,0x61,0x1c,0x00,0x32,0xd2,0xff,0x15,0x08,0x20,0x01,0x00, - 0x33,0xc0,0xc2,0x08,0x00,0x56,0x8b,0x74,0x24,0x0c,0x8b,0x46,0x60,0x81,0x78,0x0c,0x0c,0xe0, - 0x22,0x00,0x57,0x75,0x3c,0x83,0x78,0x04,0x08,0x72,0x36,0x83,0x78,0x08,0x04,0x75,0x07,0x8b, - 0x46,0x0c,0x8b,0x08,0xeb,0x05,0xb9,0x9c,0x01,0x00,0x00,0x8b,0x7e,0x0c,0x0f,0x32,0x89,0x07, - 0x89,0x57,0x04,0xc7,0x46,0x1c,0x08,0x00,0x00,0x00,0x33,0xff,0x32,0xd2,0x8b,0xce,0xff,0x15, - 0x08,0x20,0x01,0x00,0x8b,0xc7,0x5f,0x5e,0xc2,0x08,0x00,0x83,0x66,0x1c,0x00,0xbf,0x01,0x00, - 0x00,0xc0,0x89,0x7e,0x18,0xeb,0xe1,0x55,0x8b,0xec,0x51,0x51,0x8b,0x45,0x08,0xff,0x70,0x04, - 0xff,0x15,0x04,0x20,0x01,0x00,0x68,0x3c,0x20,0x01,0x00,0x8d,0x45,0xf8,0x50,0xff,0x15,0x00, - 0x20,0x01,0x00,0x8d,0x45,0xf8,0x50,0xff,0x15,0x14,0x20,0x01,0x00,0xc9,0xc2,0x04,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x5c,0x50,0x00,0x00,0x74,0x50,0x00,0x00,0x86,0x50,0x00,0x00, - 0x9c,0x50,0x00,0x00,0xb4,0x50,0x00,0x00,0x44,0x50,0x00,0x00,0x00,0x00,0x00,0x00,0x5c,0x00, - 0x44,0x00,0x65,0x00,0x76,0x00,0x69,0x00,0x63,0x00,0x65,0x00,0x5c,0x00,0x54,0x00,0x6d,0x00, - 0x70,0x00,0x52,0x00,0x64,0x00,0x72,0x00,0x00,0x00,0x00,0x00,0x5c,0x00,0x44,0x00,0x6f,0x00, - 0x73,0x00,0x44,0x00,0x65,0x00,0x76,0x00,0x69,0x00,0x63,0x00,0x65,0x00,0x73,0x00,0x5c,0x00, - 0x54,0x00,0x6d,0x00,0x70,0x00,0x52,0x00,0x64,0x00,0x72,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x55,0x8b,0xec,0x83, - 0xec,0x14,0x56,0x8b,0x35,0x00,0x20,0x01,0x00,0x57,0x68,0x1c,0x20,0x01,0x00,0x8d,0x45,0xf4, - 0x50,0xff,0xd6,0x8b,0x7d,0x08,0x8d,0x45,0xfc,0x50,0x6a,0x00,0x6a,0x00,0x6a,0x22,0x8d,0x45, - 0xf4,0x50,0x6a,0x04,0x57,0xff,0x15,0x10,0x20,0x01,0x00,0x85,0xc0,0x75,0x4f,0x68,0x3c,0x20, - 0x01,0x00,0x8d,0x45,0xec,0x50,0xff,0xd6,0x8d,0x45,0xf4,0x50,0x8d,0x45,0xec,0x50,0xff,0x15, - 0x0c,0x20,0x01,0x00,0x8b,0xf0,0x85,0xf6,0x74,0x0d,0xff,0x75,0xfc,0xff,0x15,0x04,0x20,0x01, - 0x00,0x8b,0xc6,0xeb,0x23,0x8b,0x45,0xfc,0xa3,0x00,0x30,0x01,0x00,0xb8,0x00,0x10,0x01,0x00, - 0x89,0x47,0x38,0x89,0x47,0x40,0xc7,0x47,0x34,0x75,0x10,0x01,0x00,0xc7,0x47,0x70,0x19,0x10, - 0x01,0x00,0x33,0xc0,0x5f,0x5e,0xc9,0xc2,0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x28,0x50,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xc6,0x50, - 0x00,0x00,0x00,0x20,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x5c,0x50,0x00,0x00,0x74,0x50,0x00,0x00,0x86,0x50, - 0x00,0x00,0x9c,0x50,0x00,0x00,0xb4,0x50,0x00,0x00,0x44,0x50,0x00,0x00,0x00,0x00,0x00,0x00, - 0x4b,0x01,0x49,0x6f,0x44,0x65,0x6c,0x65,0x74,0x65,0x53,0x79,0x6d,0x62,0x6f,0x6c,0x69,0x63, - 0x4c,0x69,0x6e,0x6b,0x00,0x00,0x0b,0x04,0x52,0x74,0x6c,0x49,0x6e,0x69,0x74,0x55,0x6e,0x69, - 0x63,0x6f,0x64,0x65,0x53,0x74,0x72,0x69,0x6e,0x67,0x00,0x00,0x49,0x01,0x49,0x6f,0x44,0x65, - 0x6c,0x65,0x74,0x65,0x44,0x65,0x76,0x69,0x63,0x65,0x00,0x00,0xda,0x01,0x49,0x6f,0x66,0x43, - 0x6f,0x6d,0x70,0x6c,0x65,0x74,0x65,0x52,0x65,0x71,0x75,0x65,0x73,0x74,0x00,0x00,0x41,0x01, - 0x49,0x6f,0x43,0x72,0x65,0x61,0x74,0x65,0x53,0x79,0x6d,0x62,0x6f,0x6c,0x69,0x63,0x4c,0x69, - 0x6e,0x6b,0x00,0x00,0x38,0x01,0x49,0x6f,0x43,0x72,0x65,0x61,0x74,0x65,0x44,0x65,0x76,0x69, - 0x63,0x65,0x00,0x00,0x6e,0x74,0x6f,0x73,0x6b,0x72,0x6e,0x6c,0x2e,0x65,0x78,0x65,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x10,0x00,0x00,0x00,0x18,0x00,0x00,0x80, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x01,0x00, - 0x00,0x00,0x30,0x00,0x00,0x80,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x01,0x00,0x09,0x04,0x00,0x00,0x48,0x00,0x00,0x00,0x60,0x60,0x00,0x00,0x5c,0x03, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x5c,0x03,0x34,0x00,0x00,0x00,0x56,0x00,0x53,0x00,0x5f,0x00,0x56,0x00,0x45,0x00,0x52,0x00, - 0x53,0x00,0x49,0x00,0x4f,0x00,0x4e,0x00,0x5f,0x00,0x49,0x00,0x4e,0x00,0x46,0x00,0x4f,0x00, - 0x00,0x00,0x00,0x00,0xbd,0x04,0xef,0xfe,0x00,0x00,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00, - 0x00,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0x00,0x00,0x17,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x04,0x00,0x04,0x00,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0xba,0x02,0x00,0x00,0x01,0x00,0x53,0x00,0x74,0x00,0x72,0x00,0x69,0x00,0x6e,0x00, - 0x67,0x00,0x46,0x00,0x69,0x00,0x6c,0x00,0x65,0x00,0x49,0x00,0x6e,0x00,0x66,0x00,0x6f,0x00, - 0x00,0x00,0x96,0x02,0x00,0x00,0x01,0x00,0x30,0x00,0x34,0x00,0x30,0x00,0x39,0x00,0x30,0x00, - 0x34,0x00,0x62,0x00,0x30,0x00,0x00,0x00,0x58,0x00,0x20,0x00,0x01,0x00,0x43,0x00,0x6f,0x00, - 0x6d,0x00,0x6d,0x00,0x65,0x00,0x6e,0x00,0x74,0x00,0x73,0x00,0x00,0x00,0x4d,0x00,0x53,0x00, - 0x52,0x00,0x20,0x00,0x72,0x00,0x65,0x00,0x61,0x00,0x64,0x00,0x65,0x00,0x72,0x00,0x20,0x00, - 0x33,0x00,0x32,0x00,0x2d,0x00,0x62,0x00,0x69,0x00,0x74,0x00,0x20,0x00,0x6b,0x00,0x65,0x00, - 0x72,0x00,0x6e,0x00,0x65,0x00,0x6c,0x00,0x20,0x00,0x64,0x00,0x72,0x00,0x69,0x00,0x76,0x00, - 0x65,0x00,0x72,0x00,0x00,0x00,0x42,0x00,0x11,0x00,0x01,0x00,0x43,0x00,0x6f,0x00,0x6d,0x00, - 0x70,0x00,0x61,0x00,0x6e,0x00,0x79,0x00,0x4e,0x00,0x61,0x00,0x6d,0x00,0x65,0x00,0x00,0x00, - 0x00,0x00,0x49,0x00,0x72,0x00,0x6f,0x00,0x6e,0x00,0x20,0x00,0x53,0x00,0x74,0x00,0x65,0x00, - 0x65,0x00,0x64,0x00,0x73,0x00,0x20,0x00,0x49,0x00,0x6e,0x00,0x63,0x00,0x2e,0x00,0x00,0x00, - 0x00,0x00,0x60,0x00,0x1c,0x00,0x01,0x00,0x46,0x00,0x69,0x00,0x6c,0x00,0x65,0x00,0x44,0x00, - 0x65,0x00,0x73,0x00,0x63,0x00,0x72,0x00,0x69,0x00,0x70,0x00,0x74,0x00,0x69,0x00,0x6f,0x00, - 0x6e,0x00,0x00,0x00,0x00,0x00,0x54,0x00,0x6d,0x00,0x70,0x00,0x52,0x00,0x64,0x00,0x72,0x00, - 0x20,0x00,0x33,0x00,0x32,0x00,0x2d,0x00,0x62,0x00,0x69,0x00,0x74,0x00,0x20,0x00,0x4b,0x00, - 0x65,0x00,0x72,0x00,0x6e,0x00,0x65,0x00,0x6c,0x00,0x20,0x00,0x4d,0x00,0x6f,0x00,0x64,0x00, - 0x75,0x00,0x6c,0x00,0x65,0x00,0x00,0x00,0x36,0x00,0x0b,0x00,0x01,0x00,0x46,0x00,0x69,0x00, - 0x6c,0x00,0x65,0x00,0x56,0x00,0x65,0x00,0x72,0x00,0x73,0x00,0x69,0x00,0x6f,0x00,0x6e,0x00, - 0x00,0x00,0x00,0x00,0x31,0x00,0x2c,0x00,0x20,0x00,0x30,0x00,0x2c,0x00,0x20,0x00,0x30,0x00, - 0x2c,0x00,0x20,0x00,0x31,0x00,0x00,0x00,0x00,0x00,0x2e,0x00,0x07,0x00,0x01,0x00,0x49,0x00, - 0x6e,0x00,0x74,0x00,0x65,0x00,0x72,0x00,0x6e,0x00,0x61,0x00,0x6c,0x00,0x4e,0x00,0x61,0x00, - 0x6d,0x00,0x65,0x00,0x00,0x00,0x54,0x00,0x6d,0x00,0x70,0x00,0x52,0x00,0x64,0x00,0x72,0x00, - 0x00,0x00,0x00,0x00,0x4a,0x00,0x13,0x00,0x01,0x00,0x4c,0x00,0x65,0x00,0x67,0x00,0x61,0x00, - 0x6c,0x00,0x43,0x00,0x6f,0x00,0x70,0x00,0x79,0x00,0x72,0x00,0x69,0x00,0x67,0x00,0x68,0x00, - 0x74,0x00,0x00,0x00,0x4e,0x00,0x69,0x00,0x63,0x00,0x6b,0x00,0x20,0x00,0x47,0x00,0x61,0x00, - 0x62,0x00,0x61,0x00,0x72,0x00,0x65,0x00,0x76,0x00,0x20,0x00,0x27,0x00,0x32,0x00,0x30,0x00, - 0x30,0x00,0x39,0x00,0x00,0x00,0x00,0x00,0x3e,0x00,0x0b,0x00,0x01,0x00,0x4f,0x00,0x72,0x00, - 0x69,0x00,0x67,0x00,0x69,0x00,0x6e,0x00,0x61,0x00,0x6c,0x00,0x46,0x00,0x69,0x00,0x6c,0x00, - 0x65,0x00,0x6e,0x00,0x61,0x00,0x6d,0x00,0x65,0x00,0x00,0x00,0x54,0x00,0x6d,0x00,0x70,0x00, - 0x52,0x00,0x64,0x00,0x72,0x00,0x2e,0x00,0x73,0x00,0x79,0x00,0x73,0x00,0x00,0x00,0x00,0x00, - 0x54,0x00,0x1a,0x00,0x01,0x00,0x50,0x00,0x72,0x00,0x6f,0x00,0x64,0x00,0x75,0x00,0x63,0x00, - 0x74,0x00,0x4e,0x00,0x61,0x00,0x6d,0x00,0x65,0x00,0x00,0x00,0x00,0x00,0x43,0x00,0x6f,0x00, - 0x72,0x00,0x65,0x00,0x20,0x00,0x32,0x00,0x20,0x00,0x54,0x00,0x65,0x00,0x6d,0x00,0x70,0x00, - 0x65,0x00,0x72,0x00,0x61,0x00,0x74,0x00,0x75,0x00,0x72,0x00,0x65,0x00,0x20,0x00,0x52,0x00, - 0x65,0x00,0x61,0x00,0x64,0x00,0x65,0x00,0x72,0x00,0x00,0x00,0x3a,0x00,0x0b,0x00,0x01,0x00, - 0x50,0x00,0x72,0x00,0x6f,0x00,0x64,0x00,0x75,0x00,0x63,0x00,0x74,0x00,0x56,0x00,0x65,0x00, - 0x72,0x00,0x73,0x00,0x69,0x00,0x6f,0x00,0x6e,0x00,0x00,0x00,0x31,0x00,0x2c,0x00,0x20,0x00, - 0x30,0x00,0x2c,0x00,0x20,0x00,0x30,0x00,0x2c,0x00,0x20,0x00,0x31,0x00,0x00,0x00,0x00,0x00, - 0x44,0x00,0x00,0x00,0x01,0x00,0x56,0x00,0x61,0x00,0x72,0x00,0x46,0x00,0x69,0x00,0x6c,0x00, - 0x65,0x00,0x49,0x00,0x6e,0x00,0x66,0x00,0x6f,0x00,0x00,0x00,0x00,0x00,0x24,0x00,0x04,0x00, - 0x00,0x00,0x54,0x00,0x72,0x00,0x61,0x00,0x6e,0x00,0x73,0x00,0x6c,0x00,0x61,0x00,0x74,0x00, - 0x69,0x00,0x6f,0x00,0x6e,0x00,0x00,0x00,0x00,0x00,0x09,0x04,0xb0,0x04,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0x00,0x14,0x00,0x00,0x00, - 0x10,0x30,0x5c,0x30,0x82,0x30,0x87,0x30,0x91,0x30,0x9b,0x30,0x00,0x40,0x00,0x00,0x1c,0x00, - 0x00,0x00,0x09,0x30,0x0f,0x30,0x2f,0x30,0x38,0x30,0x4c,0x30,0x5b,0x30,0x67,0x30,0x6c,0x30, - 0x79,0x30,0x80,0x30,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -}; -int cc_x64driver_code_size = 5120; -uint8_t cc_x64driver_code[5120] = { - 0x4d,0x5a,0x90,0x00,0x03,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0xff,0xff,0x00,0x00,0xb8,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0xd0,0x00,0x00,0x00,0x0e,0x1f,0xba,0x0e,0x00,0xb4,0x09,0xcd, - 0x21,0xb8,0x01,0x4c,0xcd,0x21,0x54,0x68,0x69,0x73,0x20,0x70,0x72,0x6f,0x67,0x72,0x61,0x6d, - 0x20,0x63,0x61,0x6e,0x6e,0x6f,0x74,0x20,0x62,0x65,0x20,0x72,0x75,0x6e,0x20,0x69,0x6e,0x20, - 0x44,0x4f,0x53,0x20,0x6d,0x6f,0x64,0x65,0x2e,0x0d,0x0d,0x0a,0x24,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0xb7,0x04,0xa8,0xc2,0xf3,0x65,0xc6,0x91,0xf3,0x65,0xc6,0x91,0xf3,0x65,0xc6,0x91, - 0xf3,0x65,0xc7,0x91,0xf4,0x65,0xc6,0x91,0x85,0xf8,0xbd,0x91,0xf0,0x65,0xc6,0x91,0x85,0xf8, - 0xab,0x91,0xf0,0x65,0xc6,0x91,0x30,0x6a,0x98,0x91,0xf2,0x65,0xc6,0x91,0x85,0xf8,0xbe,0x91, - 0xf2,0x65,0xc6,0x91,0x52,0x69,0x63,0x68,0xf3,0x65,0xc6,0x91,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x50,0x45,0x00,0x00,0x64,0x86,0x07,0x00, - 0x41,0xc8,0x6d,0x49,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xf0,0x00,0x22,0x00,0x0b,0x02, - 0x08,0x00,0x00,0x06,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x60,0x00,0x00, - 0x00,0x10,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0x00,0x00,0x02, - 0x00,0x00,0x05,0x00,0x02,0x00,0x05,0x00,0x02,0x00,0x05,0x00,0x02,0x00,0x00,0x00,0x00,0x00, - 0x00,0x80,0x00,0x00,0x00,0x04,0x00,0x00,0x79,0x44,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00, - 0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x00, - 0x00,0x00,0x00,0x00,0x00,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x6c,0x60,0x00,0x00,0x28,0x00,0x00,0x00, - 0x00,0x70,0x00,0x00,0xc0,0x03,0x00,0x00,0x00,0x40,0x00,0x00,0x48,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x20,0x00,0x00, - 0x1c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x20,0x00,0x00,0x38,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x2e,0x74,0x65,0x78,0x74,0x00,0x00,0x00,0x26,0x01,0x00,0x00,0x00,0x10, - 0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x20,0x00,0x00,0x68,0x2e,0x72,0x64,0x61,0x74,0x61,0x00,0x00,0xf0,0x00, - 0x00,0x00,0x00,0x20,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x00,0x00,0x48,0x2e,0x64,0x61,0x74,0x61,0x00, - 0x00,0x00,0x18,0x01,0x00,0x00,0x00,0x30,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x08,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x00,0x00,0xc8,0x2e,0x70, - 0x64,0x61,0x74,0x61,0x00,0x00,0x48,0x00,0x00,0x00,0x00,0x40,0x00,0x00,0x00,0x02,0x00,0x00, - 0x00,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x00, - 0x00,0x48,0x50,0x41,0x47,0x45,0x30,0x44,0x45,0x46,0x4e,0x01,0x00,0x00,0x00,0x50,0x00,0x00, - 0x00,0x02,0x00,0x00,0x00,0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x20,0x00,0x00,0x60,0x49,0x4e,0x49,0x54,0x00,0x00,0x00,0x00,0x60,0x01,0x00,0x00, - 0x00,0x60,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x0e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x20,0x00,0x00,0xe2,0x2e,0x72,0x73,0x72,0x63,0x00,0x00,0x00, - 0xc0,0x03,0x00,0x00,0x00,0x70,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x10,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x00,0x00,0x42,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x48,0x83,0xec,0x28, - 0x33,0xc9,0x48,0x8b,0xc2,0x89,0x4a,0x30,0x48,0x89,0x4a,0x38,0x33,0xd2,0x48,0x8b,0xc8,0xff, - 0x15,0xfd,0x0f,0x00,0x00,0x33,0xc0,0x48,0x83,0xc4,0x28,0xc3,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc, - 0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0x48,0x83,0xec,0x28,0x48,0x8b,0x82,0xb8,0x00,0x00, - 0x00,0x4c,0x8b,0xca,0x81,0x78,0x18,0x0c,0xe0,0x22,0x00,0x75,0x43,0x83,0x78,0x08,0x08,0x72, - 0x3d,0x83,0x78,0x10,0x04,0x75,0x08,0x48,0x8b,0x42,0x18,0x8b,0x08,0xeb,0x05,0xb9,0x9c,0x01, - 0x00,0x00,0x4c,0x8b,0x42,0x18,0x0f,0x32,0x48,0xc1,0xe2,0x20,0x49,0x8b,0xc9,0x48,0x0b,0xc2, - 0x33,0xd2,0x49,0x89,0x00,0x49,0xc7,0x41,0x38,0x08,0x00,0x00,0x00,0xff,0x15,0x95,0x0f,0x00, - 0x00,0x33,0xc0,0x48,0x83,0xc4,0x28,0xc3,0xc7,0x42,0x30,0x01,0x00,0x00,0xc0,0x48,0xc7,0x42, - 0x38,0x00,0x00,0x00,0x00,0x49,0x8b,0xc9,0x33,0xd2,0xff,0x15,0x74,0x0f,0x00,0x00,0xb8,0x01, - 0x00,0x00,0xc0,0x48,0x83,0xc4,0x28,0xc3,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc, - 0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0x48,0x83,0xec,0x38,0x48,0x8b,0x49,0x08,0xff,0x15, - 0x32,0x0f,0x00,0x00,0x48,0x8d,0x15,0x1b,0x00,0x00,0x00,0x48,0x8d,0x4c,0x24,0x20,0xff,0x15, - 0x18,0x0f,0x00,0x00,0x48,0x8d,0x4c,0x24,0x20,0xff,0x15,0x05,0x0f,0x00,0x00,0x48,0x83,0xc4, - 0x38,0xc3,0x5c,0x00,0x44,0x00,0x6f,0x00,0x73,0x00,0x44,0x00,0x65,0x00,0x76,0x00,0x69,0x00, - 0x63,0x00,0x65,0x00,0x73,0x00,0x5c,0x00,0x54,0x00,0x6d,0x00,0x70,0x00,0x52,0x00,0x64,0x00, - 0x72,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0xe6,0x60,0x00,0x00,0x00,0x00,0x00,0x00,0xfe,0x60,0x00,0x00, - 0x00,0x00,0x00,0x00,0x16,0x61,0x00,0x00,0x00,0x00,0x00,0x00,0x28,0x61,0x00,0x00,0x00,0x00, - 0x00,0x00,0x40,0x61,0x00,0x00,0x00,0x00,0x00,0x00,0xd0,0x60,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x41,0xc8,0x6d,0x49,0x00,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x49,0x00,0x00,0x00, - 0x5c,0x20,0x00,0x00,0x5c,0x06,0x00,0x00,0x52,0x53,0x44,0x53,0xd9,0x5e,0xab,0x47,0xc4,0xf2, - 0x4f,0x40,0xaa,0xe9,0x90,0x47,0x67,0x30,0xa5,0xfa,0x03,0x00,0x00,0x00,0x44,0x3a,0x5c,0x74, - 0x6d,0x70,0x5c,0x4b,0x65,0x72,0x6e,0x65,0x6c,0x5c,0x6f,0x62,0x6a,0x66,0x72,0x65,0x5f,0x77, - 0x6e,0x65,0x74,0x5f,0x41,0x4d,0x44,0x36,0x34,0x5c,0x61,0x6d,0x64,0x36,0x34,0x5c,0x54,0x6d, - 0x70,0x52,0x64,0x72,0x2e,0x70,0x64,0x62,0x00,0x00,0x00,0x00,0x01,0x04,0x01,0x00,0x04,0x42, - 0x00,0x00,0x01,0x04,0x01,0x00,0x04,0x42,0x00,0x00,0x01,0x04,0x01,0x00,0x04,0x62,0x00,0x00, - 0x21,0x00,0x00,0x00,0x10,0x50,0x00,0x00,0x74,0x50,0x00,0x00,0xe4,0x20,0x00,0x00,0x21,0x08, - 0x02,0x00,0x08,0x74,0x13,0x00,0x10,0x50,0x00,0x00,0x74,0x50,0x00,0x00,0xe4,0x20,0x00,0x00, - 0x01,0x0c,0x03,0x00,0x0c,0x34,0x12,0x00,0x04,0xe2,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0xcd,0x5d,0x20,0xd2,0x66,0xd4,0xff,0xff,0x32,0xa2,0xdf,0x2d,0x99,0x2b,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x10,0x10,0x00,0x00,0x32,0x10,0x00,0x00,0xa8,0x20,0x00,0x00,0x40,0x10, - 0x00,0x00,0xbe,0x10,0x00,0x00,0xb0,0x20,0x00,0x00,0xd0,0x10,0x00,0x00,0x00,0x11,0x00,0x00, - 0xb8,0x20,0x00,0x00,0x10,0x50,0x00,0x00,0x74,0x50,0x00,0x00,0xe4,0x20,0x00,0x00,0x74,0x50, - 0x00,0x00,0xe8,0x50,0x00,0x00,0xd0,0x20,0x00,0x00,0xe8,0x50,0x00,0x00,0xf5,0x50,0x00,0x00, - 0xc0,0x20,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x48,0x83,0xec,0x78,0x48,0x89,0x9c,0x24, - 0x90,0x00,0x00,0x00,0x48,0x8b,0xd9,0x48,0x8d,0x15,0x0a,0x01,0x00,0x00,0x48,0x8d,0x4c,0x24, - 0x48,0xff,0x15,0xd7,0xcf,0xff,0xff,0x41,0xb9,0x22,0x00,0x00,0x00,0x4c,0x8d,0x5c,0x24,0x40, - 0x4c,0x89,0x5c,0x24,0x30,0x4c,0x8d,0x44,0x24,0x48,0x41,0x8d,0x51,0xe6,0x48,0x8b,0xcb,0xc6, - 0x44,0x24,0x28,0x00,0xc7,0x44,0x24,0x20,0x00,0x00,0x00,0x00,0xff,0x15,0xc0,0xcf,0xff,0xff, - 0x85,0xc0,0x0f,0x85,0x80,0x00,0x00,0x00,0x48,0x8d,0x15,0x91,0x00,0x00,0x00,0x48,0x8d,0x4c, - 0x24,0x58,0x48,0x89,0xbc,0x24,0x98,0x00,0x00,0x00,0xff,0x15,0x86,0xcf,0xff,0xff,0x48,0x8d, - 0x54,0x24,0x48,0x48,0x8d,0x4c,0x24,0x58,0xff,0x15,0x86,0xcf,0xff,0xff,0x85,0xc0,0x8b,0xf8, - 0x74,0x0f,0x48,0x8b,0x4c,0x24,0x40,0xff,0x15,0x6d,0xcf,0xff,0xff,0x8b,0xc7,0xeb,0x39,0x48, - 0x8b,0x44,0x24,0x40,0x48,0x89,0x05,0x5d,0xe0,0xff,0xff,0x48,0x8d,0x05,0x16,0xc0,0xff,0xff, - 0x48,0x89,0x43,0x68,0x48,0x8d,0x05,0x4b,0xbf,0xff,0xff,0x48,0x89,0x43,0x70,0x48,0x89,0x83, - 0x80,0x00,0x00,0x00,0x48,0x8d,0x05,0x69,0xbf,0xff,0xff,0x48,0x89,0x83,0xe0,0x00,0x00,0x00, - 0x33,0xc0,0x48,0x8b,0xbc,0x24,0x98,0x00,0x00,0x00,0x48,0x8b,0x9c,0x24,0x90,0x00,0x00,0x00, - 0x48,0x83,0xc4,0x78,0xc3,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0x5c,0x00, - 0x44,0x00,0x6f,0x00,0x73,0x00,0x44,0x00,0x65,0x00,0x76,0x00,0x69,0x00,0x63,0x00,0x65,0x00, - 0x73,0x00,0x5c,0x00,0x54,0x00,0x6d,0x00,0x70,0x00,0x52,0x00,0x64,0x00,0x72,0x00,0x00,0x00, - 0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0x5c,0x00,0x44,0x00,0x65,0x00,0x76,0x00, - 0x69,0x00,0x63,0x00,0x65,0x00,0x5c,0x00,0x54,0x00,0x6d,0x00,0x70,0x00,0x52,0x00,0x64,0x00, - 0x72,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x48,0x8b,0x05,0xf1,0xd0,0xff,0xff,0x49,0xb9,0x32,0xa2,0xdf,0x2d,0x99,0x2b,0x00,0x00,0x48, - 0x85,0xc0,0x74,0x05,0x49,0x3b,0xc1,0x75,0x2f,0x4c,0x8d,0x05,0xd6,0xd0,0xff,0xff,0x48,0xb8, - 0x20,0x03,0x00,0x00,0x80,0xf7,0xff,0xff,0x48,0x8b,0x00,0x49,0x33,0xc0,0x49,0xb8,0xff,0xff, - 0xff,0xff,0xff,0xff,0x00,0x00,0x49,0x23,0xc0,0x49,0x0f,0x44,0xc1,0x48,0x89,0x05,0xae,0xd0, - 0xff,0xff,0x48,0xf7,0xd0,0x48,0x89,0x05,0x9c,0xd0,0xff,0xff,0xe9,0xa7,0xef,0xff,0xff,0xcc, - 0xcc,0xcc,0x98,0x60,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x52,0x61,0x00,0x00, - 0x00,0x20,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xe6,0x60,0x00,0x00,0x00,0x00,0x00,0x00, - 0xfe,0x60,0x00,0x00,0x00,0x00,0x00,0x00,0x16,0x61,0x00,0x00,0x00,0x00,0x00,0x00,0x28,0x61, - 0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x61,0x00,0x00,0x00,0x00,0x00,0x00,0xd0,0x60,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xf6,0x01,0x49,0x6f,0x66,0x43, - 0x6f,0x6d,0x70,0x6c,0x65,0x74,0x65,0x52,0x65,0x71,0x75,0x65,0x73,0x74,0x00,0x00,0x61,0x01, - 0x49,0x6f,0x44,0x65,0x6c,0x65,0x74,0x65,0x53,0x79,0x6d,0x62,0x6f,0x6c,0x69,0x63,0x4c,0x69, - 0x6e,0x6b,0x00,0x00,0x3e,0x04,0x52,0x74,0x6c,0x49,0x6e,0x69,0x74,0x55,0x6e,0x69,0x63,0x6f, - 0x64,0x65,0x53,0x74,0x72,0x69,0x6e,0x67,0x00,0x00,0x5f,0x01,0x49,0x6f,0x44,0x65,0x6c,0x65, - 0x74,0x65,0x44,0x65,0x76,0x69,0x63,0x65,0x00,0x00,0x55,0x01,0x49,0x6f,0x43,0x72,0x65,0x61, - 0x74,0x65,0x53,0x79,0x6d,0x62,0x6f,0x6c,0x69,0x63,0x4c,0x69,0x6e,0x6b,0x00,0x00,0x4c,0x01, - 0x49,0x6f,0x43,0x72,0x65,0x61,0x74,0x65,0x44,0x65,0x76,0x69,0x63,0x65,0x00,0x00,0x6e,0x74, - 0x6f,0x73,0x6b,0x72,0x6e,0x6c,0x2e,0x65,0x78,0x65,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x10,0x00,0x00,0x00,0x18,0x00,0x00,0x80,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0x00,0x00, - 0x30,0x00,0x00,0x80,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x01,0x00,0x09,0x04,0x00,0x00,0x48,0x00,0x00,0x00,0x60,0x70,0x00,0x00,0x60,0x03,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x60,0x03, - 0x34,0x00,0x00,0x00,0x56,0x00,0x53,0x00,0x5f,0x00,0x56,0x00,0x45,0x00,0x52,0x00,0x53,0x00, - 0x49,0x00,0x4f,0x00,0x4e,0x00,0x5f,0x00,0x49,0x00,0x4e,0x00,0x46,0x00,0x4f,0x00,0x00,0x00, - 0x00,0x00,0xbd,0x04,0xef,0xfe,0x00,0x00,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0x00,0x00, - 0x00,0x00,0x01,0x00,0x01,0x00,0x00,0x00,0x17,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x00, - 0x04,0x00,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0xbe,0x02,0x00,0x00,0x01,0x00,0x53,0x00,0x74,0x00,0x72,0x00,0x69,0x00,0x6e,0x00,0x67,0x00, - 0x46,0x00,0x69,0x00,0x6c,0x00,0x65,0x00,0x49,0x00,0x6e,0x00,0x66,0x00,0x6f,0x00,0x00,0x00, - 0x9a,0x02,0x00,0x00,0x01,0x00,0x30,0x00,0x34,0x00,0x30,0x00,0x39,0x00,0x30,0x00,0x34,0x00, - 0x62,0x00,0x30,0x00,0x00,0x00,0x58,0x00,0x20,0x00,0x01,0x00,0x43,0x00,0x6f,0x00,0x6d,0x00, - 0x6d,0x00,0x65,0x00,0x6e,0x00,0x74,0x00,0x73,0x00,0x00,0x00,0x4d,0x00,0x53,0x00,0x52,0x00, - 0x20,0x00,0x72,0x00,0x65,0x00,0x61,0x00,0x64,0x00,0x65,0x00,0x72,0x00,0x20,0x00,0x36,0x00, - 0x34,0x00,0x2d,0x00,0x62,0x00,0x69,0x00,0x74,0x00,0x20,0x00,0x6b,0x00,0x65,0x00,0x72,0x00, - 0x6e,0x00,0x65,0x00,0x6c,0x00,0x20,0x00,0x64,0x00,0x72,0x00,0x69,0x00,0x76,0x00,0x65,0x00, - 0x72,0x00,0x00,0x00,0x42,0x00,0x11,0x00,0x01,0x00,0x43,0x00,0x6f,0x00,0x6d,0x00,0x70,0x00, - 0x61,0x00,0x6e,0x00,0x79,0x00,0x4e,0x00,0x61,0x00,0x6d,0x00,0x65,0x00,0x00,0x00,0x00,0x00, - 0x49,0x00,0x72,0x00,0x6f,0x00,0x6e,0x00,0x20,0x00,0x53,0x00,0x74,0x00,0x65,0x00,0x65,0x00, - 0x64,0x00,0x73,0x00,0x20,0x00,0x49,0x00,0x6e,0x00,0x63,0x00,0x2e,0x00,0x00,0x00,0x00,0x00, - 0x60,0x00,0x1c,0x00,0x01,0x00,0x46,0x00,0x69,0x00,0x6c,0x00,0x65,0x00,0x44,0x00,0x65,0x00, - 0x73,0x00,0x63,0x00,0x72,0x00,0x69,0x00,0x70,0x00,0x74,0x00,0x69,0x00,0x6f,0x00,0x6e,0x00, - 0x00,0x00,0x00,0x00,0x54,0x00,0x6d,0x00,0x70,0x00,0x52,0x00,0x64,0x00,0x72,0x00,0x20,0x00, - 0x36,0x00,0x34,0x00,0x2d,0x00,0x62,0x00,0x69,0x00,0x74,0x00,0x20,0x00,0x4b,0x00,0x65,0x00, - 0x72,0x00,0x6e,0x00,0x65,0x00,0x6c,0x00,0x20,0x00,0x4d,0x00,0x6f,0x00,0x64,0x00,0x75,0x00, - 0x6c,0x00,0x65,0x00,0x00,0x00,0x36,0x00,0x0b,0x00,0x01,0x00,0x46,0x00,0x69,0x00,0x6c,0x00, - 0x65,0x00,0x56,0x00,0x65,0x00,0x72,0x00,0x73,0x00,0x69,0x00,0x6f,0x00,0x6e,0x00,0x00,0x00, - 0x00,0x00,0x31,0x00,0x2c,0x00,0x20,0x00,0x30,0x00,0x2c,0x00,0x20,0x00,0x30,0x00,0x2c,0x00, - 0x20,0x00,0x31,0x00,0x00,0x00,0x00,0x00,0x2e,0x00,0x07,0x00,0x01,0x00,0x49,0x00,0x6e,0x00, - 0x74,0x00,0x65,0x00,0x72,0x00,0x6e,0x00,0x61,0x00,0x6c,0x00,0x4e,0x00,0x61,0x00,0x6d,0x00, - 0x65,0x00,0x00,0x00,0x54,0x00,0x6d,0x00,0x70,0x00,0x52,0x00,0x64,0x00,0x72,0x00,0x00,0x00, - 0x00,0x00,0x4a,0x00,0x13,0x00,0x01,0x00,0x4c,0x00,0x65,0x00,0x67,0x00,0x61,0x00,0x6c,0x00, - 0x43,0x00,0x6f,0x00,0x70,0x00,0x79,0x00,0x72,0x00,0x69,0x00,0x67,0x00,0x68,0x00,0x74,0x00, - 0x00,0x00,0x4e,0x00,0x69,0x00,0x63,0x00,0x6b,0x00,0x20,0x00,0x47,0x00,0x61,0x00,0x62,0x00, - 0x61,0x00,0x72,0x00,0x65,0x00,0x76,0x00,0x20,0x00,0x27,0x00,0x32,0x00,0x30,0x00,0x30,0x00, - 0x39,0x00,0x00,0x00,0x00,0x00,0x42,0x00,0x0d,0x00,0x01,0x00,0x4f,0x00,0x72,0x00,0x69,0x00, - 0x67,0x00,0x69,0x00,0x6e,0x00,0x61,0x00,0x6c,0x00,0x46,0x00,0x69,0x00,0x6c,0x00,0x65,0x00, - 0x6e,0x00,0x61,0x00,0x6d,0x00,0x65,0x00,0x00,0x00,0x54,0x00,0x6d,0x00,0x70,0x00,0x52,0x00, - 0x64,0x00,0x72,0x00,0x36,0x00,0x34,0x00,0x2e,0x00,0x73,0x00,0x79,0x00,0x73,0x00,0x00,0x00, - 0x00,0x00,0x54,0x00,0x1a,0x00,0x01,0x00,0x50,0x00,0x72,0x00,0x6f,0x00,0x64,0x00,0x75,0x00, - 0x63,0x00,0x74,0x00,0x4e,0x00,0x61,0x00,0x6d,0x00,0x65,0x00,0x00,0x00,0x00,0x00,0x43,0x00, - 0x6f,0x00,0x72,0x00,0x65,0x00,0x20,0x00,0x32,0x00,0x20,0x00,0x54,0x00,0x65,0x00,0x6d,0x00, - 0x70,0x00,0x65,0x00,0x72,0x00,0x61,0x00,0x74,0x00,0x75,0x00,0x72,0x00,0x65,0x00,0x20,0x00, - 0x52,0x00,0x65,0x00,0x61,0x00,0x64,0x00,0x65,0x00,0x72,0x00,0x00,0x00,0x3a,0x00,0x0b,0x00, - 0x01,0x00,0x50,0x00,0x72,0x00,0x6f,0x00,0x64,0x00,0x75,0x00,0x63,0x00,0x74,0x00,0x56,0x00, - 0x65,0x00,0x72,0x00,0x73,0x00,0x69,0x00,0x6f,0x00,0x6e,0x00,0x00,0x00,0x31,0x00,0x2c,0x00, - 0x20,0x00,0x30,0x00,0x2c,0x00,0x20,0x00,0x30,0x00,0x2c,0x00,0x20,0x00,0x31,0x00,0x00,0x00, - 0x00,0x00,0x44,0x00,0x00,0x00,0x01,0x00,0x56,0x00,0x61,0x00,0x72,0x00,0x46,0x00,0x69,0x00, - 0x6c,0x00,0x65,0x00,0x49,0x00,0x6e,0x00,0x66,0x00,0x6f,0x00,0x00,0x00,0x00,0x00,0x24,0x00, - 0x04,0x00,0x00,0x00,0x54,0x00,0x72,0x00,0x61,0x00,0x6e,0x00,0x73,0x00,0x6c,0x00,0x61,0x00, - 0x74,0x00,0x69,0x00,0x6f,0x00,0x6e,0x00,0x00,0x00,0x00,0x00,0x09,0x04,0xb0,0x04,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -}; -//} end -#endif // _WIN32 - -int msrdriver_dummy; // a dummy to avoid a linker warning on OS X. diff --git a/contrib/libcpuid/include/libcpuid/rdmsr.c b/contrib/libcpuid/include/libcpuid/rdmsr.c deleted file mode 100644 index a27e939bba0..00000000000 --- a/contrib/libcpuid/include/libcpuid/rdmsr.c +++ /dev/null @@ -1,922 +0,0 @@ -/* - * Copyright 2009 Veselin Georgiev, - * anrieffNOSPAM @ mgail_DOT.com (convert to gmail) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#define _XOPEN_SOURCE 500 -#include -#include -#include "libcpuid.h" -#include "asm-bits.h" -#include "libcpuid_util.h" -#include "libcpuid_internal.h" -#include "rdtsc.h" - -#if defined (__linux__) || defined (__gnu_linux__) -/* Assuming linux with /dev/cpu/x/msr: */ -#include -#include -#include -#include -#include -#include -struct msr_driver_t { int fd; }; -static int rdmsr_supported(void); -static int load_driver(char *msr_path) -{ - const int file_exists = !access(msr_path, F_OK); - const int file_readable = !access(msr_path, R_OK); - - if (file_exists && file_readable) - return 1; - else if (file_exists && !file_readable) - return 0; - else if (getuid() != 0) - return 0; - else - return !system("modprobe msr 2> /dev/null"); -} - -struct msr_driver_t* cpu_msr_driver_open(void) -{ - return cpu_msr_driver_open_core(0); -} - -struct msr_driver_t* cpu_msr_driver_open_core(unsigned core_num) -{ - char msr[32]; - struct msr_driver_t* handle; - if (core_num >= cpuid_get_total_cpus()) { - set_error(ERR_INVCNB); - return NULL; - } - if (!rdmsr_supported()) { - set_error(ERR_NO_RDMSR); - return NULL; - } - sprintf(msr, "/dev/cpu/%u/msr", core_num); - if(!load_driver(msr)) { - set_error(ERR_NO_DRIVER); - return NULL; - } - int fd = open(msr, O_RDONLY); - if (fd < 0) { - if (errno == EIO) { - set_error(ERR_NO_RDMSR); - return NULL; - } - set_error(ERR_NO_DRIVER); - return NULL; - } - handle = (struct msr_driver_t*) malloc(sizeof(struct msr_driver_t)); - handle->fd = fd; - return handle; -} - -int cpu_rdmsr(struct msr_driver_t* driver, uint32_t msr_index, uint64_t* result) -{ - ssize_t ret; - - if (!driver || driver->fd < 0) - return set_error(ERR_HANDLE); - ret = pread(driver->fd, result, 8, msr_index); - if (ret != 8) - return set_error(ERR_INVMSR); - return 0; -} - -int cpu_msr_driver_close(struct msr_driver_t* drv) -{ - if (drv && drv->fd >= 0) { - close(drv->fd); - free(drv); - } - return 0; -} - -/* #endif defined (__linux__) || defined (__gnu_linux__) */ - -#elif defined (__FreeBSD__) || defined (__DragonFly__) -/* Assuming FreeBSD with /dev/cpuctlX */ -#include -#include -#include -#include -#include - -struct msr_driver_t { int fd; }; -static int rdmsr_supported(void); -static int load_driver(char *msr_path) -{ - const int file_exists = !access(msr_path, F_OK); - const int file_readable = !access(msr_path, R_OK); - - if (file_exists && file_readable) - return 1; - else if (file_exists && !file_readable) - return 0; - else if (getuid() != 0) - return 0; - else - return !system("kldload -n cpuctl 2> /dev/null"); -} - -struct msr_driver_t* cpu_msr_driver_open(void) -{ - return cpu_msr_driver_open_core(0); -} - -struct msr_driver_t* cpu_msr_driver_open_core(unsigned core_num) -{ - char msr[32]; - struct msr_driver_t* handle; - if (core_num >= cpuid_get_total_cpus()) { - set_error(ERR_INVCNB); - return NULL; - } - if (!rdmsr_supported()) { - set_error(ERR_NO_RDMSR); - return NULL; - } - sprintf(msr, "/dev/cpuctl%u", core_num); - if(!load_driver(msr)) { - set_error(ERR_NO_DRIVER); - return NULL; - } - int fd = open(msr, O_RDONLY); - if (fd < 0) { - if (errno == EIO) { - set_error(ERR_NO_RDMSR); - return NULL; - } - set_error(ERR_NO_DRIVER); - return NULL; - } - handle = (struct msr_driver_t*) malloc(sizeof(struct msr_driver_t)); - handle->fd = fd; - return handle; -} - -int cpu_rdmsr(struct msr_driver_t* driver, uint32_t msr_index, uint64_t* result) -{ - cpuctl_msr_args_t args; - args.msr = msr_index; - - if (!driver || driver->fd < 0) - return set_error(ERR_HANDLE); - - if(ioctl(driver->fd, CPUCTL_RDMSR, &args)) - return set_error(ERR_INVMSR); - - *result = args.data; - return 0; -} - -int cpu_msr_driver_close(struct msr_driver_t* drv) -{ - if (drv && drv->fd >= 0) { - close(drv->fd); - free(drv); - } - return 0; -} - -/* #endif defined (__FreeBSD__) || defined (__DragonFly__) */ - -#elif defined (_WIN32) -#include -#include -#include - -extern uint8_t cc_x86driver_code[]; -extern int cc_x86driver_code_size; -extern uint8_t cc_x64driver_code[]; -extern int cc_x64driver_code_size; - -struct msr_driver_t { - char driver_path[MAX_PATH + 1]; - SC_HANDLE scManager; - volatile SC_HANDLE scDriver; - HANDLE hhDriver; - OVERLAPPED ovl; - int errorcode; -}; - -static int rdmsr_supported(void); -static int extract_driver(struct msr_driver_t* driver); -static int load_driver(struct msr_driver_t* driver); - -struct msr_driver_t* cpu_msr_driver_open(void) -{ - struct msr_driver_t* drv; - int status; - if (!rdmsr_supported()) { - set_error(ERR_NO_RDMSR); - return NULL; - } - - drv = (struct msr_driver_t*) malloc(sizeof(struct msr_driver_t)); - if (!drv) { - set_error(ERR_NO_MEM); - return NULL; - } - memset(drv, 0, sizeof(struct msr_driver_t)); - - if (!extract_driver(drv)) { - free(drv); - set_error(ERR_EXTRACT); - return NULL; - } - - status = load_driver(drv); - if (!DeleteFile(drv->driver_path)) - debugf(1, "Deleting temporary driver file failed.\n"); - if (!status) { - set_error(drv->errorcode ? drv->errorcode : ERR_NO_DRIVER); - free(drv); - return NULL; - } - return drv; -} - -struct msr_driver_t* cpu_msr_driver_open_core(unsigned core_num) -{ - warnf("cpu_msr_driver_open_core(): parameter ignored (function is the same as cpu_msr_driver_open)\n"); - return cpu_msr_driver_open(); -} - -typedef BOOL (WINAPI *LPFN_ISWOW64PROCESS) (HANDLE, PBOOL); -static BOOL is_running_x64(void) -{ - BOOL bIsWow64 = FALSE; - - LPFN_ISWOW64PROCESS fnIsWow64Process = (LPFN_ISWOW64PROCESS)GetProcAddress(GetModuleHandle(__TEXT("kernel32")), "IsWow64Process"); - if(NULL != fnIsWow64Process) - fnIsWow64Process(GetCurrentProcess(), &bIsWow64); - return bIsWow64; -} - - -static int extract_driver(struct msr_driver_t* driver) -{ - FILE *f; - if (!GetTempPath(sizeof(driver->driver_path), driver->driver_path)) return 0; - strcat(driver->driver_path, "TmpRdr.sys"); - - f = fopen(driver->driver_path, "wb"); - if (!f) return 0; - if (is_running_x64()) - fwrite(cc_x64driver_code, 1, cc_x64driver_code_size, f); - else - fwrite(cc_x86driver_code, 1, cc_x86driver_code_size, f); - fclose(f); - return 1; -} - -static BOOL wait_for_service_state(SC_HANDLE hService, DWORD dwDesiredState, SERVICE_STATUS *lpsrvStatus){ - BOOL fOK = FALSE; - DWORD dwWaitHint; - - if(hService != NULL){ - while(TRUE){ - fOK = QueryServiceStatus(hService, lpsrvStatus); - if(!fOK) - break; - if(lpsrvStatus->dwCurrentState == dwDesiredState) - break; - - dwWaitHint = lpsrvStatus->dwWaitHint / 10; // Poll 1/10 of the wait hint - if (dwWaitHint < 1000) - dwWaitHint = 1000; // At most once per second - if (dwWaitHint > 10000) - dwWaitHint = 10000; // At least every 10 seconds - Sleep(dwWaitHint); - } - } - - return fOK; -} - -static int load_driver(struct msr_driver_t* drv) -{ - LPTSTR lpszInfo = __TEXT("RDMSR Executor Driver"); - USHORT uLen = 0; - SERVICE_STATUS srvStatus = {0}; - BOOL fRunning = FALSE; - DWORD dwLastError; - LPTSTR lpszDriverServiceName = __TEXT("TmpRdr"); - TCHAR lpszDriverName[] = __TEXT("\\\\.\\Global\\TmpRdr"); - - if((LPVOID)(drv->scManager = OpenSCManager(NULL, NULL, SC_MANAGER_ALL_ACCESS)) != NULL) { - drv->scDriver = CreateService(drv->scManager, lpszDriverServiceName, lpszInfo, SERVICE_ALL_ACCESS, - SERVICE_KERNEL_DRIVER, SERVICE_DEMAND_START, SERVICE_ERROR_NORMAL, - drv->driver_path, NULL, NULL, NULL, NULL, NULL); - if(drv->scDriver == NULL){ - switch(dwLastError = GetLastError()){ - case ERROR_SERVICE_EXISTS: - case ERROR_SERVICE_MARKED_FOR_DELETE:{ - LPQUERY_SERVICE_CONFIG lpqsc; - DWORD dwBytesNeeded; - - drv->scDriver = OpenService(drv->scManager, lpszDriverServiceName, SERVICE_ALL_ACCESS); - if(drv->scDriver == NULL){ - debugf(1, "Error opening service: %d\n", GetLastError()); - break; - } - - QueryServiceConfig(drv->scDriver, NULL, 0, &dwBytesNeeded); - if((dwLastError = GetLastError()) == ERROR_INSUFFICIENT_BUFFER){ - lpqsc = calloc(1, dwBytesNeeded); - if(!QueryServiceConfig(drv->scDriver, lpqsc, dwBytesNeeded, &dwBytesNeeded)){ - free(lpqsc); - debugf(1, "Error query service config(adjusted buffer): %d\n", GetLastError()); - goto clean_up; - } - else{ - free(lpqsc); - } - } - else{ - debugf(1, "Error query service config: %d\n", dwLastError); - goto clean_up; - } - - break; - } - case ERROR_ACCESS_DENIED: - drv->errorcode = ERR_NO_PERMS; - break; - default: - debugf(1, "Create driver service failed: %d\n", dwLastError); - break; - } - } - if(drv->scDriver != NULL){ - if(StartService(drv->scDriver, 0, NULL)){ - if(!wait_for_service_state(drv->scDriver, SERVICE_RUNNING, &srvStatus)){ - debugf(1, "Driver load failed.\n"); - DeleteService(drv->scDriver); - CloseServiceHandle(drv->scManager); - drv->scDriver = NULL; - goto clean_up; - } else { - fRunning = TRUE; - } - } else{ - if((dwLastError = GetLastError()) == ERROR_SERVICE_ALREADY_RUNNING) - fRunning = TRUE; - else{ - debugf(1, "Driver start failed.\n"); - DeleteService(drv->scDriver); - CloseServiceHandle(drv->scManager); - drv->scDriver = NULL; - goto clean_up; - } - - } - if(fRunning) - debugf(1, "Driver already running.\n"); - else - debugf(1, "Driver loaded.\n"); - CloseServiceHandle(drv->scManager); - drv->hhDriver = CreateFile(lpszDriverName, GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ | FILE_SHARE_WRITE, 0, OPEN_EXISTING, FILE_FLAG_OVERLAPPED, 0); - drv->ovl.hEvent = CreateEvent(NULL, TRUE, FALSE, NULL); - return 1; - } - } else { - debugf(1, "Open SCM failed: %d\n", GetLastError()); - } - -clean_up: - if(drv->scManager != NULL){ - CloseServiceHandle(drv->scManager); - drv->scManager = 0; // pointless - } - if(drv->scDriver != NULL){ - if(!DeleteService(drv->scDriver)) - debugf(1, "Delete driver service failed: %d\n", GetLastError()); - CloseServiceHandle(drv->scDriver); - drv->scDriver = 0; - } - - return 0; -} - -#define FILE_DEVICE_UNKNOWN 0x00000022 -#define IOCTL_UNKNOWN_BASE FILE_DEVICE_UNKNOWN -#define IOCTL_PROCVIEW_RDMSR CTL_CODE(IOCTL_UNKNOWN_BASE, 0x0803, METHOD_BUFFERED, FILE_READ_ACCESS | FILE_WRITE_ACCESS) - -int cpu_rdmsr(struct msr_driver_t* driver, uint32_t msr_index, uint64_t* result) -{ - DWORD dwBytesReturned; - __int64 msrdata; - SERVICE_STATUS srvStatus = {0}; - - if (!driver) - return set_error(ERR_HANDLE); - DeviceIoControl(driver->hhDriver, IOCTL_PROCVIEW_RDMSR, &msr_index, sizeof(int), &msrdata, sizeof(__int64), &dwBytesReturned, &driver->ovl); - GetOverlappedResult(driver->hhDriver, &driver->ovl, &dwBytesReturned, TRUE); - *result = msrdata; - return 0; -} - -int cpu_msr_driver_close(struct msr_driver_t* drv) -{ - SERVICE_STATUS srvStatus = {0}; - if (drv == NULL) return 0; - if(drv->scDriver != NULL){ - if (drv->hhDriver) CancelIo(drv->hhDriver); - if(drv->ovl.hEvent != NULL) - CloseHandle(drv->ovl.hEvent); - if (drv->hhDriver) CloseHandle(drv->hhDriver); - drv->hhDriver = NULL; - drv->ovl.hEvent = NULL; - if (ControlService(drv->scDriver, SERVICE_CONTROL_STOP, &srvStatus)){ - if (wait_for_service_state(drv->scDriver, SERVICE_STOPPED, &srvStatus)){ - DeleteService(drv->scDriver); - } - } - } - return 0; -} - -/* endif defined (_WIN32) */ - -#else /* Unsupported OS */ -/* On others OS (i.e., Darwin), we still do not support RDMSR, so supply dummy struct - and functions */ - -#define RDMSR_UNSUPPORTED_OS - -struct msr_driver_t { int dummy; }; -struct msr_driver_t* cpu_msr_driver_open(void) -{ - set_error(ERR_NOT_IMP); - return NULL; -} - -struct msr_driver_t* cpu_msr_driver_open_core(unsigned core_num) -{ - set_error(ERR_NOT_IMP); - return NULL; -} - -int cpu_rdmsr(struct msr_driver_t* driver, uint32_t msr_index, uint64_t* result) -{ - return set_error(ERR_NOT_IMP); -} - -int cpu_msr_driver_close(struct msr_driver_t* driver) -{ - return set_error(ERR_NOT_IMP); -} - -int cpu_rdmsr_range(struct msr_driver_t* handle, uint32_t msr_index, uint8_t highbit, - uint8_t lowbit, uint64_t* result) -{ - return set_error(ERR_NOT_IMP); -} - -int cpu_msrinfo(struct msr_driver_t* driver, cpu_msrinfo_request_t which) -{ - return set_error(ERR_NOT_IMP); -} - -#endif /* Unsupported OS */ - -#ifndef RDMSR_UNSUPPORTED_OS - -/* Useful links for hackers: -- AMD MSRs: - AMD BIOS and Kernel Developer’s Guide (BKDG) - * AMD Family 10h Processors - http://support.amd.com/TechDocs/31116.pdf - * AMD Family 11h Processors - http://support.amd.com/TechDocs/41256.pdf - * AMD Family 12h Processors - http://support.amd.com/TechDocs/41131.pdf - * AMD Family 14h Processors - http://support.amd.com/TechDocs/43170_14h_Mod_00h-0Fh_BKDG.pdf - * AMD Family 15h Processors - http://support.amd.com/TechDocs/42301_15h_Mod_00h-0Fh_BKDG.pdf - http://support.amd.com/TechDocs/42300_15h_Mod_10h-1Fh_BKDG.pdf - http://support.amd.com/TechDocs/49125_15h_Models_30h-3Fh_BKDG.pdf - http://support.amd.com/TechDocs/50742_15h_Models_60h-6Fh_BKDG.pdf - http://support.amd.com/TechDocs/49125_15h_Models_30h-3Fh_BKDG.pdf - * AMD Family 16h Processors - http://support.amd.com/TechDocs/48751_16h_bkdg.pdf - http://support.amd.com/TechDocs/52740_16h_Models_30h-3Fh_BKDG.pdf - -- Intel MSRs: - Intel® 64 and IA-32 Architectures Software Developer’s Manual - * Volume 3 (3A, 3B, 3C & 3D): System Programming Guide - http://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-system-programming-manual-325384.pdf -*/ - -/* AMD MSRs addresses */ -#define MSR_PSTATE_L 0xC0010061 -#define MSR_PSTATE_S 0xC0010063 -#define MSR_PSTATE_0 0xC0010064 -#define MSR_PSTATE_7 0xC001006B - -/* Intel MSRs addresses */ -#define IA32_MPERF 0xE7 -#define IA32_APERF 0xE8 -#define IA32_PERF_STATUS 0x198 -#define IA32_THERM_STATUS 0x19C -#define MSR_EBL_CR_POWERON 0x2A -#define MSR_TURBO_RATIO_LIMIT 0x1AD -#define MSR_TEMPERATURE_TARGET 0x1A2 -#define MSR_PERF_STATUS 0x198 -#define MSR_PLATFORM_INFO 0xCE - - -static int rdmsr_supported(void) -{ - struct cpu_id_t* id = get_cached_cpuid(); - return id->flags[CPU_FEATURE_MSR]; -} - -static int perfmsr_measure(struct msr_driver_t* handle, int msr) -{ - int err; - uint64_t a, b; - uint64_t x, y; - err = cpu_rdmsr(handle, msr, &x); - if (err) return CPU_INVALID_VALUE; - sys_precise_clock(&a); - busy_loop_delay(10); - cpu_rdmsr(handle, msr, &y); - sys_precise_clock(&b); - if (a >= b || x > y) return CPU_INVALID_VALUE; - return (int) ((y - x) / (b - a)); -} - -static int get_amd_multipliers(struct msr_driver_t* handle, struct cpu_id_t *id, - struct internal_id_info_t *internal, - uint32_t pstate, uint64_t *multiplier) -{ - int err; - static int clock = 0; - uint64_t CpuFid, CpuDid, CpuDidLSD; - double divisor; - - if (pstate < MSR_PSTATE_0 || MSR_PSTATE_7 < pstate) - return 1; - - switch (id->ext_family) { - case 0x11: - /* BKDG 11h, page 236 - MSRC001_00[6B:64][8:6] is CpuDid - MSRC001_00[6B:64][5:0] is CpuFid - CPU COF is ((100 MHz * (CpuFid + 08h)) / (2^CpuDid)) */ - err = cpu_rdmsr_range(handle, pstate, 8, 6, &CpuDid); - err += cpu_rdmsr_range(handle, pstate, 5, 0, &CpuFid); - *multiplier = (uint64_t) ((CpuFid + 0x8) / (1ull << CpuDid)); - break; - case 0x12: - /* BKDG 12h, page 469 - MSRC001_00[6B:64][8:4] is CpuFid - MSRC001_00[6B:64][3:0] is CpuDid - CPU COF is (100MHz * (CpuFid + 10h) / (divisor specified by CpuDid)) */ - err = cpu_rdmsr_range(handle, pstate, 8, 4, &CpuFid); - err += cpu_rdmsr_range(handle, pstate, 3, 0, &CpuDid); - if (CpuDid == 0x0) - divisor = 1; - else if (CpuDid == 0x1) - divisor = 1.5; - else if (CpuDid == 0x2) - divisor = 2; - else if (CpuDid == 0x3) - divisor = 3; - else if (CpuDid == 0x4) - divisor = 4; - else if (CpuDid == 0x5) - divisor = 6; - else if (CpuDid == 0x6) - divisor = 8; - else if (CpuDid == 0x7) - divisor = 12; - else if (CpuDid == 0x8) - divisor = 16; - else - divisor = 0; - - if (divisor > 0) - *multiplier = (uint64_t) ((CpuFid + 0x10) / divisor); - else - err++; - break; - case 0x14: - /* BKDG 14h, page 430 - MSRC001_00[6B:64][8:4] is CpuDidMSD - MSRC001_00[6B:64][3:0] is CpuDidLSD - PLL COF is (100 MHz * (D18F3xD4[MainPllOpFreqId] + 10h)) - Divisor is (CpuDidMSD + (CpuDidLSD * 0.25) + 1) - CPU COF is (main PLL frequency specified by D18F3xD4[MainPllOpFreqId]) / (core clock divisor specified by CpuDidMSD and CpuDidLSD) */ - err = cpu_rdmsr_range(handle, pstate, 8, 4, &CpuDid); - err += cpu_rdmsr_range(handle, pstate, 3, 0, &CpuDidLSD); - if (clock == 0) - clock = cpu_clock_measure(100, 1) + 5; // Fake round - *multiplier = (uint64_t) ((clock / 100 + 0x10) / (CpuDid + CpuDidLSD * 0.25 + 1)); - break; - case 0x10: - /* BKDG 10h, page 429 - MSRC001_00[6B:64][8:6] is CpuDid - MSRC001_00[6B:64][5:0] is CpuFid - CPU COF is (100 MHz * (CpuFid + 10h) / (2^CpuDid)) */ - case 0x15: - /* BKDG 15h, page 570/580/635/692 (00h-0Fh/10h-1Fh/30h-3Fh/60h-6Fh) - MSRC001_00[6B:64][8:6] is CpuDid - MSRC001_00[6B:64][5:0] is CpuFid - CoreCOF is (100 * (MSRC001_00[6B:64][CpuFid] + 10h) / (2^MSRC001_00[6B:64][CpuDid])) */ - case 0x16: - /* BKDG 16h, page 549/611 (00h-0Fh/30h-3Fh) - MSRC001_00[6B:64][8:6] is CpuDid - MSRC001_00[6B:64][5:0] is CpuFid - CoreCOF is (100 * (MSRC001_00[6B:64][CpuFid] + 10h) / (2^MSRC001_00[6B:64][CpuDid])) */ - err = cpu_rdmsr_range(handle, pstate, 8, 6, &CpuDid); - err += cpu_rdmsr_range(handle, pstate, 5, 0, &CpuFid); - *multiplier = (uint64_t) ((CpuFid + 0x10) / (1ull << CpuDid)); - break; - default: - err = 1; - break; - } - - return err; -} - -static double get_info_min_multiplier(struct msr_driver_t* handle, struct cpu_id_t *id, - struct internal_id_info_t *internal) -{ - int err; - uint64_t reg; - - if(id->vendor == VENDOR_INTEL) { - /* Refer links above - Table 35-12. MSRs in Next Generation Intel Atom Processors Based on the Goldmont Microarchitecture - Table 35-13. MSRs in Processors Based on Intel® Microarchitecture Code Name Nehalem - Table 35-18. MSRs Supported by Intel® Processors based on Intel® microarchitecture code name Sandy Bridge (Contd.) - Table 35-23. Additional MSRs Supported by 3rd Generation Intel® Core™ Processors (based on Intel® microarchitecture code name Ivy Bridge) - Table 35-24. MSRs Supported by Intel® Xeon® Processors E5 v2 Product Family (based on Ivy Bridge-E microarchitecture) - Table 35-27. Additional MSRs Supported by Processors based on the Haswell or Haswell-E microarchitectures - Table 35-34. Additional MSRs Common to Intel® Xeon® Processor D and Intel Xeon Processors E5 v4 Family Based on the Broadwell Microarchitecture - Table 35-40. Selected MSRs Supported by Next Generation Intel® Xeon Phi™ Processors with DisplayFamily_DisplayModel Signature 06_57H - MSR_PLATFORM_INFO[47:40] is Maximum Efficiency Ratio - Maximum Efficiency Ratio is the minimum ratio that the processor can operates */ - err = cpu_rdmsr_range(handle, MSR_PLATFORM_INFO, 47, 40, ®); - if (!err) return (double) reg; - } - else if(id->vendor == VENDOR_AMD) { - /* Refer links above - MSRC001_0061[6:4] is PstateMaxVal - PstateMaxVal is the lowest-performance non-boosted P-state */ - err = cpu_rdmsr_range(handle, MSR_PSTATE_L, 6, 4, ®); - err += get_amd_multipliers(handle, id, internal, MSR_PSTATE_0 + (uint32_t) reg, ®); - if (!err) return (double) reg; - } - - return (double) CPU_INVALID_VALUE / 100; -} - -static double get_info_cur_multiplier(struct msr_driver_t* handle, struct cpu_id_t *id, - struct internal_id_info_t *internal) -{ - int err; - uint64_t reg; - - if(id->vendor == VENDOR_INTEL && internal->code.intel == PENTIUM) { - err = cpu_rdmsr(handle, MSR_EBL_CR_POWERON, ®); - if (!err) return (double) ((reg>>22) & 0x1f); - } - else if(id->vendor == VENDOR_INTEL && internal->code.intel != PENTIUM) { - /* Refer links above - Table 35-2. IA-32 Architectural MSRs (Contd.) - IA32_PERF_STATUS[15:0] is Current performance State Value - [7:0] is 0x0, [15:8] looks like current ratio */ - err = cpu_rdmsr_range(handle, IA32_PERF_STATUS, 15, 8, ®); - if (!err) return (double) reg; - } - else if(id->vendor == VENDOR_AMD) { - /* Refer links above - MSRC001_0063[2:0] is CurPstate */ - err = cpu_rdmsr_range(handle, MSR_PSTATE_S, 2, 0, ®); - err += get_amd_multipliers(handle, id, internal, MSR_PSTATE_0 + (uint32_t) reg, ®); - if (!err) return (double) reg; - } - - return (double) CPU_INVALID_VALUE / 100; -} - -static double get_info_max_multiplier(struct msr_driver_t* handle, struct cpu_id_t *id, - struct internal_id_info_t *internal) -{ - int err; - uint64_t reg; - - if(id->vendor == VENDOR_INTEL && internal->code.intel == PENTIUM) { - err = cpu_rdmsr(handle, IA32_PERF_STATUS, ®); - if (!err) return (double) ((reg >> 40) & 0x1f); - } - else if(id->vendor == VENDOR_INTEL && internal->code.intel != PENTIUM) { - /* Refer links above - Table 35-10. Specific MSRs Supported by Intel® Atom™ Processor C2000 Series with CPUID Signature 06_4DH - Table 35-12. MSRs in Next Generation Intel Atom Processors Based on the Goldmont Microarchitecture (Contd.) - Table 35-13. MSRs in Processors Based on Intel® Microarchitecture Code Name Nehalem (Contd.) - Table 35-14. Additional MSRs in Intel® Xeon® Processor 5500 and 3400 Series - Table 35-16. Additional MSRs Supported by Intel Processors (Based on Intel® Microarchitecture Code Name Westmere) - Table 35-19. MSRs Supported by 2nd Generation Intel® Core™ Processors (Intel® microarchitecture code name Sandy Bridge) - Table 35-21. Selected MSRs Supported by Intel® Xeon® Processors E5 Family (based on Sandy Bridge microarchitecture) - Table 35-28. MSRs Supported by 4th Generation Intel® Core™ Processors (Haswell microarchitecture) (Contd.) - Table 35-30. Additional MSRs Supported by Intel® Xeon® Processor E5 v3 Family - Table 35-33. Additional MSRs Supported by Intel® Core™ M Processors and 5th Generation Intel® Core™ Processors - Table 35-34. Additional MSRs Common to Intel® Xeon® Processor D and Intel Xeon Processors E5 v4 Family Based on the Broadwell Microarchitecture - Table 35-37. Additional MSRs Supported by 6th Generation Intel® Core™ Processors Based on Skylake Microarchitecture - Table 35-40. Selected MSRs Supported by Next Generation Intel® Xeon Phi™ Processors with DisplayFamily_DisplayModel Signature 06_57H - MSR_TURBO_RATIO_LIMIT[7:0] is Maximum Ratio Limit for 1C */ - err = cpu_rdmsr_range(handle, MSR_TURBO_RATIO_LIMIT, 7, 0, ®); - if (!err) return (double) reg; - } - else if(id->vendor == VENDOR_AMD) { - /* Refer links above - MSRC001_0064 is Pb0 - Pb0 is the highest-performance boosted P-state */ - err = get_amd_multipliers(handle, id, internal, MSR_PSTATE_0, ®); - if (!err) return (double) reg; - } - - return (double) CPU_INVALID_VALUE / 100; -} - -static int get_info_temperature(struct msr_driver_t* handle, struct cpu_id_t *id, - struct internal_id_info_t *internal) -{ - int err; - uint64_t DigitalReadout, ReadingValid, TemperatureTarget; - - if(id->vendor == VENDOR_INTEL) { - /* Refer links above - Table 35-2. IA-32 Architectural MSRs - IA32_THERM_STATUS[22:16] is Digital Readout - IA32_THERM_STATUS[31] is Reading Valid - - Table 35-6. MSRs Common to the Silvermont Microarchitecture and Newer Microarchitectures for Intel® Atom - Table 35-13. MSRs in Processors Based on Intel® Microarchitecture Code Name Nehalem (Contd.) - Table 35-18. MSRs Supported by Intel® Processors based on Intel® microarchitecture code name Sandy Bridge (Contd.) - Table 35-24. MSRs Supported by Intel® Xeon® Processors E5 v2 Product Family (based on Ivy Bridge-E microarchitecture) (Contd.) - Table 35-34. Additional MSRs Common to Intel® Xeon® Processor D and Intel Xeon Processors E5 v4 Family Based on the Broadwell Microarchitecture - Table 35-40. Selected MSRs Supported by Next Generation Intel® Xeon Phi™ Processors with DisplayFamily_DisplayModel Signature 06_57H - MSR_TEMPERATURE_TARGET[23:16] is Temperature Target */ - err = cpu_rdmsr_range(handle, IA32_THERM_STATUS, 22, 16, &DigitalReadout); - err += cpu_rdmsr_range(handle, IA32_THERM_STATUS, 31, 31, &ReadingValid); - err += cpu_rdmsr_range(handle, MSR_TEMPERATURE_TARGET, 23, 16, &TemperatureTarget); - if(!err && ReadingValid) return (int) (TemperatureTarget - DigitalReadout); - } - - return CPU_INVALID_VALUE; -} - -static double get_info_voltage(struct msr_driver_t* handle, struct cpu_id_t *id, - struct internal_id_info_t *internal) -{ - int err; - uint64_t reg, CpuVid; - - if(id->vendor == VENDOR_INTEL) { - /* Refer links above - Table 35-18. MSRs Supported by Intel® Processors based on Intel® microarchitecture code name Sandy Bridge (Contd.) - MSR_PERF_STATUS[47:32] is Core Voltage - P-state core voltage can be computed by MSR_PERF_STATUS[37:32] * (float) 1/(2^13). */ - err = cpu_rdmsr_range(handle, MSR_PERF_STATUS, 47, 32, ®); - if (!err) return (double) reg / (1 << 13); - } - else if(id->vendor == VENDOR_AMD) { - /* Refer links above - MSRC001_00[6B:64][15:9] is CpuVid - MSRC001_0063[2:0] is P-state Status - 2.4.1.6.3 Serial VID (SVI) Encodings: voltage = 1.550V - 0.0125V * SviVid[6:0] */ - err = cpu_rdmsr_range(handle, MSR_PSTATE_S, 2, 0, ®); - err += cpu_rdmsr_range(handle, MSR_PSTATE_0 + (uint32_t) reg, 15, 9, &CpuVid); - if (!err && MSR_PSTATE_0 + (uint32_t) reg <= MSR_PSTATE_7) return 1.550 - 0.0125 * CpuVid; - } - - return (double) CPU_INVALID_VALUE / 100; -} - -static double get_info_bus_clock(struct msr_driver_t* handle, struct cpu_id_t *id, - struct internal_id_info_t *internal) -{ - int err; - static int clock = 0; - uint64_t reg; - - if(clock == 0) - clock = cpu_clock_measure(100, 1); - - if(id->vendor == VENDOR_INTEL) { - /* Refer links above - Table 35-12. MSRs in Next Generation Intel Atom Processors Based on the Goldmont Microarchitecture - Table 35-13. MSRs in Processors Based on Intel® Microarchitecture Code Name Nehalem - Table 35-18. MSRs Supported by Intel® Processors based on Intel® microarchitecture code name Sandy Bridge (Contd.) - Table 35-23. Additional MSRs Supported by 3rd Generation Intel® Core™ Processors (based on Intel® microarchitecture code name Ivy Bridge) - Table 35-24. MSRs Supported by Intel® Xeon® Processors E5 v2 Product Family (based on Ivy Bridge-E microarchitecture) - Table 35-27. Additional MSRs Supported by Processors based on the Haswell or Haswell-E microarchitectures - Table 35-40. Selected MSRs Supported by Next Generation Intel® Xeon Phi™ Processors with DisplayFamily_DisplayModel Signature 06_57H - MSR_PLATFORM_INFO[15:8] is Maximum Non-Turbo Ratio */ - err = cpu_rdmsr_range(handle, MSR_PLATFORM_INFO, 15, 8, ®); - if (!err) return (double) clock / reg; - } - else if(id->vendor == VENDOR_AMD) { - /* Refer links above - MSRC001_0061[2:0] is CurPstateLimit - CurPstateLimit is the highest-performance non-boosted P-state */ - err = cpu_rdmsr_range(handle, MSR_PSTATE_L, 2, 0, ®); - err += get_amd_multipliers(handle, id, internal, MSR_PSTATE_0 + (uint32_t) reg, ®); - if (!err) return (double) clock / reg; - } - - return (double) CPU_INVALID_VALUE / 100; -} - -int cpu_rdmsr_range(struct msr_driver_t* handle, uint32_t msr_index, uint8_t highbit, - uint8_t lowbit, uint64_t* result) -{ - int err; - const uint8_t bits = highbit - lowbit + 1; - - if(highbit > 63 || lowbit > highbit) - return set_error(ERR_INVRANGE); - - err = cpu_rdmsr(handle, msr_index, result); - - if(!err && bits < 64) { - /* Show only part of register */ - *result >>= lowbit; - *result &= (1ULL << bits) - 1; - } - - return err; -} - -int cpu_msrinfo(struct msr_driver_t* handle, cpu_msrinfo_request_t which) -{ - struct cpu_raw_data_t raw; - static struct cpu_id_t id; - static struct internal_id_info_t internal; - internal.score = -1; - - if (handle == NULL) - return set_error(ERR_HANDLE); - - if (internal.score == -1) { - cpuid_get_raw_data(&raw); - cpu_ident_internal(&raw, &id, &internal); - } - - switch (which) { - case INFO_MPERF: - return perfmsr_measure(handle, IA32_MPERF); - case INFO_APERF: - return perfmsr_measure(handle, IA32_APERF); - case INFO_MIN_MULTIPLIER: - return (int) (get_info_min_multiplier(handle, &id, &internal) * 100); - case INFO_CUR_MULTIPLIER: - return (int) (get_info_cur_multiplier(handle, &id, &internal) * 100); - case INFO_MAX_MULTIPLIER: - return (int) (get_info_max_multiplier(handle, &id, &internal) * 100); - case INFO_TEMPERATURE: - return get_info_temperature(handle, &id, &internal); - case INFO_THROTTLING: - return CPU_INVALID_VALUE; - case INFO_VOLTAGE: - return (int) (get_info_voltage(handle, &id, &internal) * 100); - case INFO_BCLK: - case INFO_BUS_CLOCK: - return (int) (get_info_bus_clock(handle, &id, &internal) * 100); - default: - return CPU_INVALID_VALUE; - } -} - -#endif // RDMSR_UNSUPPORTED_OS diff --git a/contrib/libcpuid/include/libcpuid/rdtsc.c b/contrib/libcpuid/include/libcpuid/rdtsc.c deleted file mode 100644 index df4543946f5..00000000000 --- a/contrib/libcpuid/include/libcpuid/rdtsc.c +++ /dev/null @@ -1,320 +0,0 @@ -/* - * Copyright 2008 Veselin Georgiev, - * anrieffNOSPAM @ mgail_DOT.com (convert to gmail) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#include -#include -#include "libcpuid.h" -#include "libcpuid_util.h" -#include "asm-bits.h" -#include "rdtsc.h" - -#ifdef _WIN32 -#include -void sys_precise_clock(uint64_t *result) -{ - double c, f; - LARGE_INTEGER freq, counter; - QueryPerformanceCounter(&counter); - QueryPerformanceFrequency(&freq); - c = (double) counter.QuadPart; - f = (double) freq.QuadPart; - *result = (uint64_t) ( c * 1000000.0 / f ); -} -#else -/* assuming Linux, Mac OS or other POSIX */ -#include -void sys_precise_clock(uint64_t *result) -{ - struct timeval tv; - gettimeofday(&tv, NULL); - *result = (uint64_t) tv.tv_sec * (uint64_t) 1000000 + - (uint64_t) tv.tv_usec; -} -#endif /* _WIN32 */ - -/* out = a - b */ -static void mark_t_subtract(struct cpu_mark_t* a, struct cpu_mark_t* b, struct cpu_mark_t *out) -{ - out->tsc = a->tsc - b->tsc; - out->sys_clock = a->sys_clock - b->sys_clock; -} - -void cpu_tsc_mark(struct cpu_mark_t* mark) -{ - cpu_rdtsc(&mark->tsc); - sys_precise_clock(&mark->sys_clock); -} - -void cpu_tsc_unmark(struct cpu_mark_t* mark) -{ - struct cpu_mark_t temp; - cpu_tsc_mark(&temp); - mark_t_subtract(&temp, mark, mark); -} - - -int cpu_clock_by_mark(struct cpu_mark_t* mark) -{ - uint64_t result; - - /* Check if some subtraction resulted in a negative number: */ - if ((mark->tsc >> 63) != 0 || (mark->sys_clock >> 63) != 0) return -1; - - /* Divide-by-zero check: */ - if (mark->sys_clock == 0) return -1; - - /* Check if the result fits in 32bits */ - result = mark->tsc / mark->sys_clock; - if (result > (uint64_t) 0x7fffffff) return -1; - return (int) result; -} - -#ifdef _WIN32 -int cpu_clock_by_os(void) -{ - HKEY key; - DWORD result; - DWORD size = 4; - - if (RegOpenKeyEx(HKEY_LOCAL_MACHINE, TEXT("HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0"), 0, KEY_READ, &key) != ERROR_SUCCESS) - return -1; - - if (RegQueryValueEx(key, TEXT("~MHz"), NULL, NULL, (LPBYTE) &result, (LPDWORD) &size) != ERROR_SUCCESS) { - RegCloseKey(key); - return -1; - } - RegCloseKey(key); - - return (int)result; -} -#else -#ifdef __APPLE__ -#include -#include -/* Assuming Mac OS X with hw.cpufrequency sysctl */ -int cpu_clock_by_os(void) -{ - long long result = -1; - size_t size = sizeof(result); - if (sysctlbyname("hw.cpufrequency", &result, &size, NULL, 0)) - return -1; - return (int) (result / (long long) 1000000); -} -#else -/* Assuming Linux with /proc/cpuinfo */ -int cpu_clock_by_os(void) -{ - FILE *f; - char line[1024], *s; - int result; - - f = fopen("/proc/cpuinfo", "rt"); - if (!f) return -1; - - while (fgets(line, sizeof(line), f)) { - if (!strncmp(line, "cpu MHz", 7)) { - s = strchr(line, ':'); - if (s && 1 == sscanf(s, ":%d.", &result)) { - fclose(f); - return result; - } - } - } - fclose(f); - return -1; -} -#endif /* __APPLE__ */ -#endif /* _WIN32 */ - -/* Emulate doing useful CPU intensive work */ -static int busy_loop(int amount) -{ - int i, j, k, s = 0; - static volatile int data[42] = {32, 12, -1, 5, 23, 0 }; - for (i = 0; i < amount; i++) - for (j = 0; j < 65536; j++) - for (k = 0; k < 42; k++) - s += data[k]; - return s; -} - -int busy_loop_delay(int milliseconds) -{ - int cycles = 0, r = 0, first = 1; - uint64_t a, b, c; - sys_precise_clock(&a); - while (1) { - sys_precise_clock(&c); - if ((c - a) / 1000 > milliseconds) return r; - r += busy_loop(cycles); - if (first) { - first = 0; - } else { - if (c - b < 1000) cycles *= 2; - if (c - b > 10000) cycles /= 2; - } - b = c; - } -} - -int cpu_clock_measure(int millis, int quad_check) -{ - struct cpu_mark_t begin[4], end[4], temp, temp2; - int results[4], cycles, n, k, i, j, bi, bj, mdiff, diff, _zero = 0; - uint64_t tl; - - if (millis < 1) return -1; - tl = millis * (uint64_t) 1000; - if (quad_check) - tl /= 4; - n = quad_check ? 4 : 1; - cycles = 1; - for (k = 0; k < n; k++) { - cpu_tsc_mark(&begin[k]); - end[k] = begin[k]; - do { - /* Run busy loop, and fool the compiler that we USE the garbishy - value it calculates */ - _zero |= (1 & busy_loop(cycles)); - cpu_tsc_mark(&temp); - mark_t_subtract(&temp, &end[k], &temp2); - /* If busy loop is too short, increase it */ - if (temp2.sys_clock < tl / 8) - cycles *= 2; - end[k] = temp; - } while (end[k].sys_clock - begin[k].sys_clock < tl); - mark_t_subtract(&end[k], &begin[k], &temp); - results[k] = cpu_clock_by_mark(&temp); - } - if (n == 1) return results[0]; - mdiff = 0x7fffffff; - bi = bj = -1; - for (i = 0; i < 4; i++) { - for (j = i + 1; j < 4; j++) { - diff = results[i] - results[j]; - if (diff < 0) diff = -diff; - if (diff < mdiff) { - mdiff = diff; - bi = i; - bj = j; - } - } - } - if (results[bi] == -1) return -1; - return (results[bi] + results[bj] + _zero) / 2; -} - - -static void adjust_march_ic_multiplier(const struct cpu_id_t* id, int* numerator, int* denom) -{ - /* - * for cpu_clock_by_ic: we need to know how many clocks does a typical ADDPS instruction - * take, when issued in rapid succesion without dependencies. The whole idea of - * cpu_clock_by_ic was that this is easy to determine, at least it was back in 2010. Now - * it's getting progressively more hairy, but here are the current measurements: - * - * 1. For CPUs with 64-bit SSE units, ADDPS issue rate is 0.5 IPC (one insn in 2 clocks) - * 2. For CPUs with 128-bit SSE units, issue rate is exactly 1.0 IPC - * 3. For Bulldozer and later, it is 1.4 IPC (we multiply by 5/7) - * 4. For Skylake and later, it is 1.6 IPC (we multiply by 5/8) - */ - // - if (id->sse_size < 128) { - debugf(1, "SSE execution path is 64-bit\n"); - // on a CPU with half SSE unit length, SSE instructions execute at 0.5 IPC; - // the resulting value must be multiplied by 2: - *numerator = 2; - } else { - debugf(1, "SSE execution path is 128-bit\n"); - } - // - // Bulldozer or later: assume 1.4 IPC - if (id->vendor == VENDOR_AMD && id->ext_family >= 21) { - debugf(1, "cpu_clock_by_ic: Bulldozer (or later) detected, dividing result by 1.4\n"); - *numerator = 5; - *denom = 7; // multiply by 5/7, to divide by 1.4 - } - // - // Skylake or later: assume 1.6 IPC - if (id->vendor == VENDOR_INTEL && id->ext_model >= 94) { - debugf(1, "cpu_clock_by_ic: Skylake (or later) detected, dividing result by 1.6\n"); - *numerator = 5; - *denom = 8; // to divide by 1.6, multiply by 5/8 - } -} - -int cpu_clock_by_ic(int millis, int runs) -{ - int max_value = 0, cur_value, i, ri, cycles_inner, cycles_outer, c; - struct cpu_id_t* id; - uint64_t t0, t1, tl, hz; - int multiplier_numerator = 1, multiplier_denom = 1; - if (millis <= 0 || runs <= 0) return -2; - id = get_cached_cpuid(); - // if there aren't SSE instructions - we can't run the test at all - if (!id || !id->flags[CPU_FEATURE_SSE]) return -1; - // - adjust_march_ic_multiplier(id, &multiplier_numerator, &multiplier_denom); - // - tl = millis * 125; // (*1000 / 8) - cycles_inner = 128; - cycles_outer = 1; - do { - if (cycles_inner < 1000000000) cycles_inner *= 2; - else cycles_outer *= 2; - sys_precise_clock(&t0); - for (i = 0; i < cycles_outer; i++) - busy_sse_loop(cycles_inner); - sys_precise_clock(&t1); - } while (t1 - t0 < tl); - debugf(2, "inner: %d, outer: %d\n", cycles_inner, cycles_outer); - for (ri = 0; ri < runs; ri++) { - sys_precise_clock(&t0); - c = 0; - do { - c++; - for (i = 0; i < cycles_outer; i++) - busy_sse_loop(cycles_inner); - sys_precise_clock(&t1); - } while (t1 - t0 < tl * (uint64_t) 8); - // cpu_Hz = cycles_inner * cycles_outer * 256 / (t1 - t0) * 1000000 - debugf(2, "c = %d, td = %d\n", c, (int) (t1 - t0)); - hz = ((uint64_t) cycles_inner * (uint64_t) 256 + 12) * - (uint64_t) cycles_outer * (uint64_t) multiplier_numerator * (uint64_t) c * (uint64_t) 1000000 - / ((t1 - t0) * (uint64_t) multiplier_denom); - cur_value = (int) (hz / 1000000); - if (cur_value > max_value) max_value = cur_value; - } - return max_value; -} - -int cpu_clock(void) -{ - int result; - result = cpu_clock_by_os(); - if (result <= 0) - result = cpu_clock_measure(200, 1); - return result; -} diff --git a/contrib/libcpuid/include/libcpuid/rdtsc.h b/contrib/libcpuid/include/libcpuid/rdtsc.h deleted file mode 100644 index b4aaf99a570..00000000000 --- a/contrib/libcpuid/include/libcpuid/rdtsc.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2010 Veselin Georgiev, - * anrieffNOSPAM @ mgail_DOT.com (convert to gmail) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef __RDTSC_H__ -#define __RDTSC_H__ - -void sys_precise_clock(uint64_t *result); -int busy_loop_delay(int milliseconds); - - -#endif /* __RDTSC_H__ */ diff --git a/contrib/libcpuid/include/libcpuid/recog_amd.c b/contrib/libcpuid/include/libcpuid/recog_amd.c deleted file mode 100644 index 2e6c8a9ead8..00000000000 --- a/contrib/libcpuid/include/libcpuid/recog_amd.c +++ /dev/null @@ -1,486 +0,0 @@ -/* - * Copyright 2008 Veselin Georgiev, - * anrieffNOSPAM @ mgail_DOT.com (convert to gmail) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include "libcpuid.h" -#include "libcpuid_util.h" -#include "libcpuid_internal.h" -#include "recog_amd.h" - -const struct amd_code_str { amd_code_t code; char *str; } amd_code_str[] = { - #define CODE(x) { x, #x } - #define CODE2(x, y) CODE(x) - #include "amd_code_t.h" - #undef CODE -}; - -const struct match_entry_t cpudb_amd[] = { - { -1, -1, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Unknown AMD CPU" }, - - /* 486 and the likes */ - { 4, -1, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Unknown AMD 486" }, - { 4, 3, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "AMD 486DX2" }, - { 4, 7, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "AMD 486DX2WB" }, - { 4, 8, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "AMD 486DX4" }, - { 4, 9, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "AMD 486DX4WB" }, - - /* Pentia clones */ - { 5, -1, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Unknown AMD 586" }, - { 5, 0, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "K5" }, - { 5, 1, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "K5" }, - { 5, 2, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "K5" }, - { 5, 3, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "K5" }, - - /* The K6 */ - { 5, 6, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "K6" }, - { 5, 7, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "K6" }, - - { 5, 8, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "K6-2" }, - { 5, 9, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "K6-III" }, - { 5, 10, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Unknown K6" }, - { 5, 11, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Unknown K6" }, - { 5, 12, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Unknown K6" }, - { 5, 13, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "K6-2+" }, - - /* Athlon et al. */ - { 6, 1, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Athlon (Slot-A)" }, - { 6, 2, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Athlon (Slot-A)" }, - { 6, 3, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Duron (Spitfire)" }, - { 6, 4, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Athlon (ThunderBird)" }, - - { 6, 6, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Unknown Athlon" }, - { 6, 6, -1, -1, -1, 1, -1, -1, ATHLON , 0, "Athlon (Palomino)" }, - { 6, 6, -1, -1, -1, 1, -1, -1, ATHLON_MP , 0, "Athlon MP (Palomino)" }, - { 6, 6, -1, -1, -1, 1, -1, -1, DURON , 0, "Duron (Palomino)" }, - { 6, 6, -1, -1, -1, 1, -1, -1, ATHLON_XP , 0, "Athlon XP" }, - - { 6, 7, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Unknown Athlon XP" }, - { 6, 7, -1, -1, -1, 1, -1, -1, DURON , 0, "Duron (Morgan)" }, - - { 6, 8, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Athlon XP" }, - { 6, 8, -1, -1, -1, 1, -1, -1, ATHLON , 0, "Athlon XP (Thoroughbred)" }, - { 6, 8, -1, -1, -1, 1, -1, -1, ATHLON_XP , 0, "Athlon XP (Thoroughbred)" }, - { 6, 8, -1, -1, -1, 1, -1, -1, DURON , 0, "Duron (Applebred)" }, - { 6, 8, -1, -1, -1, 1, -1, -1, SEMPRON , 0, "Sempron (Thoroughbred)" }, - { 6, 8, -1, -1, -1, 1, 128, -1, SEMPRON , 0, "Sempron (Thoroughbred)" }, - { 6, 8, -1, -1, -1, 1, 256, -1, SEMPRON , 0, "Sempron (Thoroughbred)" }, - { 6, 8, -1, -1, -1, 1, -1, -1, ATHLON_MP , 0, "Athlon MP (Thoroughbred)" }, - { 6, 8, -1, -1, -1, 1, -1, -1, ATHLON_XP_M , 0, "Mobile Athlon (T-Bred)" }, - { 6, 8, -1, -1, -1, 1, -1, -1, ATHLON_XP_M_LV , 0, "Mobile Athlon (T-Bred)" }, - - { 6, 10, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Athlon XP (Barton)" }, - { 6, 10, -1, -1, -1, 1, 512, -1, ATHLON_XP , 0, "Athlon XP (Barton)" }, - { 6, 10, -1, -1, -1, 1, 512, -1, SEMPRON , 0, "Sempron (Barton)" }, - { 6, 10, -1, -1, -1, 1, 256, -1, SEMPRON , 0, "Sempron (Thorton)" }, - { 6, 10, -1, -1, -1, 1, 256, -1, ATHLON_XP , 0, "Athlon XP (Thorton)" }, - { 6, 10, -1, -1, -1, 1, -1, -1, ATHLON_MP , 0, "Athlon MP (Barton)" }, - { 6, 10, -1, -1, -1, 1, -1, -1, ATHLON_XP_M , 0, "Mobile Athlon (Barton)" }, - { 6, 10, -1, -1, -1, 1, -1, -1, ATHLON_XP_M_LV , 0, "Mobile Athlon (Barton)" }, - - /* K8 Architecture */ - { 15, -1, -1, 15, -1, 1, -1, -1, NO_CODE , 0, "Unknown K8" }, - { 15, -1, -1, 16, -1, 1, -1, -1, NO_CODE , 0, "Unknown K9" }, - - { 15, -1, -1, 15, -1, 1, -1, -1, NO_CODE , 0, "Unknown A64" }, - { 15, -1, -1, 15, -1, 1, -1, -1, OPTERON_SINGLE , 0, "Opteron" }, - { 15, -1, -1, 15, -1, 2, -1, -1, OPTERON_DUALCORE , 0, "Opteron (Dual Core)" }, - { 15, 3, -1, 15, -1, 1, -1, -1, OPTERON_SINGLE , 0, "Opteron" }, - { 15, 3, -1, 15, -1, 2, -1, -1, OPTERON_DUALCORE , 0, "Opteron (Dual Core)" }, - { 15, -1, -1, 15, -1, 1, 512, -1, ATHLON_64 , 0, "Athlon 64 (512K)" }, - { 15, -1, -1, 15, -1, 1, 1024, -1, ATHLON_64 , 0, "Athlon 64 (1024K)" }, - { 15, -1, -1, 15, -1, 1, -1, -1, ATHLON_FX , 0, "Athlon FX" }, - { 15, -1, -1, 15, -1, 1, -1, -1, ATHLON_64_FX , 0, "Athlon 64 FX" }, - { 15, 3, -1, 15, 35, 2, -1, -1, ATHLON_64_FX , 0, "Athlon 64 FX X2 (Toledo)" }, - { 15, -1, -1, 15, -1, 2, 512, -1, ATHLON_64_X2 , 0, "Athlon 64 X2 (512K)" }, - { 15, -1, -1, 15, -1, 2, 1024, -1, ATHLON_64_X2 , 0, "Athlon 64 X2 (1024K)" }, - { 15, -1, -1, 15, -1, 1, 512, -1, TURION_64 , 0, "Turion 64 (512K)" }, - { 15, -1, -1, 15, -1, 1, 1024, -1, TURION_64 , 0, "Turion 64 (1024K)" }, - { 15, -1, -1, 15, -1, 2, 512, -1, TURION_X2 , 0, "Turion 64 X2 (512K)" }, - { 15, -1, -1, 15, -1, 2, 1024, -1, TURION_X2 , 0, "Turion 64 X2 (1024K)" }, - { 15, -1, -1, 15, -1, 1, 128, -1, SEMPRON , 0, "A64 Sempron (128K)" }, - { 15, -1, -1, 15, -1, 1, 256, -1, SEMPRON , 0, "A64 Sempron (256K)" }, - { 15, -1, -1, 15, -1, 1, 512, -1, SEMPRON , 0, "A64 Sempron (512K)" }, - { 15, -1, -1, 15, 0x4f, 1, 512, -1, ATHLON_64 , 0, "Athlon 64 (Orleans/512K)" }, - { 15, -1, -1, 15, 0x5f, 1, 512, -1, ATHLON_64 , 0, "Athlon 64 (Orleans/512K)" }, - { 15, -1, -1, 15, 0x2f, 1, 512, -1, ATHLON_64 , 0, "Athlon 64 (Venice/512K)" }, - { 15, -1, -1, 15, 0x2c, 1, 512, -1, ATHLON_64 , 0, "Athlon 64 (Venice/512K)" }, - { 15, -1, -1, 15, 0x1f, 1, 512, -1, ATHLON_64 , 0, "Athlon 64 (Winchester/512K)" }, - { 15, -1, -1, 15, 0x0c, 1, 512, -1, ATHLON_64 , 0, "Athlon 64 (Newcastle/512K)" }, - { 15, -1, -1, 15, 0x27, 1, 512, -1, ATHLON_64 , 0, "Athlon 64 (San Diego/512K)" }, - { 15, -1, -1, 15, 0x37, 1, 512, -1, ATHLON_64 , 0, "Athlon 64 (San Diego/512K)" }, - { 15, -1, -1, 15, 0x04, 1, 512, -1, ATHLON_64 , 0, "Athlon 64 (ClawHammer/512K)" }, - - { 15, -1, -1, 15, 0x5f, 1, 1024, -1, ATHLON_64 , 0, "Athlon 64 (Orleans/1024K)" }, - { 15, -1, -1, 15, 0x27, 1, 1024, -1, ATHLON_64 , 0, "Athlon 64 (San Diego/1024K)" }, - { 15, -1, -1, 15, 0x04, 1, 1024, -1, ATHLON_64 , 0, "Athlon 64 (ClawHammer/1024K)" }, - - { 15, -1, -1, 15, 0x4b, 2, 256, -1, SEMPRON_DUALCORE , 0, "Athlon 64 X2 (Windsor/256K)" }, - - { 15, -1, -1, 15, 0x23, 2, 512, -1, ATHLON_64_X2 , 0, "Athlon 64 X2 (Toledo/512K)" }, - { 15, -1, -1, 15, 0x4b, 2, 512, -1, ATHLON_64_X2 , 0, "Athlon 64 X2 (Windsor/512K)" }, - { 15, -1, -1, 15, 0x43, 2, 512, -1, ATHLON_64_X2 , 0, "Athlon 64 X2 (Windsor/512K)" }, - { 15, -1, -1, 15, 0x6b, 2, 512, -1, ATHLON_64_X2 , 0, "Athlon 64 X2 (Brisbane/512K)" }, - { 15, -1, -1, 15, 0x2b, 2, 512, -1, ATHLON_64_X2 , 0, "Athlon 64 X2 (Manchester/512K)"}, - - { 15, -1, -1, 15, 0x23, 2, 1024, -1, ATHLON_64_X2 , 0, "Athlon 64 X2 (Toledo/1024K)" }, - { 15, -1, -1, 15, 0x43, 2, 1024, -1, ATHLON_64_X2 , 0, "Athlon 64 X2 (Windsor/1024K)" }, - - { 15, -1, -1, 15, 0x08, 1, 128, -1, M_SEMPRON , 0, "Mobile Sempron 64 (Dublin/128K)"}, - { 15, -1, -1, 15, 0x08, 1, 256, -1, M_SEMPRON , 0, "Mobile Sempron 64 (Dublin/256K)"}, - { 15, -1, -1, 15, 0x0c, 1, 256, -1, SEMPRON , 0, "Sempron 64 (Paris)" }, - { 15, -1, -1, 15, 0x1c, 1, 128, -1, SEMPRON , 0, "Sempron 64 (Palermo/128K)" }, - { 15, -1, -1, 15, 0x1c, 1, 256, -1, SEMPRON , 0, "Sempron 64 (Palermo/256K)" }, - { 15, -1, -1, 15, 0x1c, 1, 128, -1, M_SEMPRON , 0, "Mobile Sempron 64 (Sonora/128K)"}, - { 15, -1, -1, 15, 0x1c, 1, 256, -1, M_SEMPRON , 0, "Mobile Sempron 64 (Sonora/256K)"}, - { 15, -1, -1, 15, 0x2c, 1, 128, -1, SEMPRON , 0, "Sempron 64 (Palermo/128K)" }, - { 15, -1, -1, 15, 0x2c, 1, 256, -1, SEMPRON , 0, "Sempron 64 (Palermo/256K)" }, - { 15, -1, -1, 15, 0x2c, 1, 128, -1, M_SEMPRON , 0, "Mobile Sempron 64 (Albany/128K)"}, - { 15, -1, -1, 15, 0x2c, 1, 256, -1, M_SEMPRON , 0, "Mobile Sempron 64 (Albany/256K)"}, - { 15, -1, -1, 15, 0x2f, 1, 128, -1, SEMPRON , 0, "Sempron 64 (Palermo/128K)" }, - { 15, -1, -1, 15, 0x2f, 1, 256, -1, SEMPRON , 0, "Sempron 64 (Palermo/256K)" }, - { 15, -1, -1, 15, 0x4f, 1, 128, -1, SEMPRON , 0, "Sempron 64 (Manila/128K)" }, - { 15, -1, -1, 15, 0x4f, 1, 256, -1, SEMPRON , 0, "Sempron 64 (Manila/256K)" }, - { 15, -1, -1, 15, 0x5f, 1, 128, -1, SEMPRON , 0, "Sempron 64 (Manila/128K)" }, - { 15, -1, -1, 15, 0x5f, 1, 256, -1, SEMPRON , 0, "Sempron 64 (Manila/256K)" }, - { 15, -1, -1, 15, 0x6b, 2, 256, -1, SEMPRON , 0, "Sempron 64 Dual (Sherman/256K)"}, - { 15, -1, -1, 15, 0x6b, 2, 512, -1, SEMPRON , 0, "Sempron 64 Dual (Sherman/512K)"}, - { 15, -1, -1, 15, 0x7f, 1, 256, -1, SEMPRON , 0, "Sempron 64 (Sparta/256K)" }, - { 15, -1, -1, 15, 0x7f, 1, 512, -1, SEMPRON , 0, "Sempron 64 (Sparta/512K)" }, - { 15, -1, -1, 15, 0x4c, 1, 256, -1, M_SEMPRON , 0, "Mobile Sempron 64 (Keene/256K)"}, - { 15, -1, -1, 15, 0x4c, 1, 512, -1, M_SEMPRON , 0, "Mobile Sempron 64 (Keene/512K)"}, - { 15, -1, -1, 15, -1, 2, -1, -1, SEMPRON_DUALCORE , 0, "Sempron Dual Core" }, - - { 15, -1, -1, 15, 0x24, 1, 512, -1, TURION_64 , 0, "Turion 64 (Lancaster/512K)" }, - { 15, -1, -1, 15, 0x24, 1, 1024, -1, TURION_64 , 0, "Turion 64 (Lancaster/1024K)" }, - { 15, -1, -1, 15, 0x48, 2, 256, -1, TURION_X2 , 0, "Turion X2 (Taylor)" }, - { 15, -1, -1, 15, 0x48, 2, 512, -1, TURION_X2 , 0, "Turion X2 (Trinidad)" }, - { 15, -1, -1, 15, 0x4c, 1, 512, -1, TURION_64 , 0, "Turion 64 (Richmond)" }, - { 15, -1, -1, 15, 0x68, 2, 256, -1, TURION_X2 , 0, "Turion X2 (Tyler/256K)" }, - { 15, -1, -1, 15, 0x68, 2, 512, -1, TURION_X2 , 0, "Turion X2 (Tyler/512K)" }, - { 15, -1, -1, 17, 3, 2, 512, -1, TURION_X2 , 0, "Turion X2 (Griffin/512K)" }, - { 15, -1, -1, 17, 3, 2, 1024, -1, TURION_X2 , 0, "Turion X2 (Griffin/1024K)" }, - - /* K9 Architecture */ - { 15, -1, -1, 16, -1, 1, -1, -1, PHENOM , 0, "Unknown AMD Phenom" }, - { 15, 2, -1, 16, -1, 1, -1, -1, PHENOM , 0, "Phenom" }, - { 15, 2, -1, 16, -1, 3, -1, -1, PHENOM , 0, "Phenom X3 (Toliman)" }, - { 15, 2, -1, 16, -1, 4, -1, -1, PHENOM , 0, "Phenom X4 (Agena)" }, - { 15, 2, -1, 16, -1, 3, 512, -1, PHENOM , 0, "Phenom X3 (Toliman/256K)" }, - { 15, 2, -1, 16, -1, 3, 512, -1, PHENOM , 0, "Phenom X3 (Toliman/512K)" }, - { 15, 2, -1, 16, -1, 4, 128, -1, PHENOM , 0, "Phenom X4 (Agena/128K)" }, - { 15, 2, -1, 16, -1, 4, 256, -1, PHENOM , 0, "Phenom X4 (Agena/256K)" }, - { 15, 2, -1, 16, -1, 4, 512, -1, PHENOM , 0, "Phenom X4 (Agena/512K)" }, - { 15, 2, -1, 16, -1, 2, 512, -1, ATHLON_64_X2 , 0, "Athlon X2 (Kuma)" }, - /* Phenom II derivates: */ - { 15, 4, -1, 16, -1, 4, -1, -1, NO_CODE , 0, "Phenom (Deneb-based)" }, - { 15, 4, -1, 16, -1, 1, 1024, -1, SEMPRON , 0, "Sempron (Sargas)" }, - { 15, 4, -1, 16, -1, 2, 512, -1, PHENOM2 , 0, "Phenom II X2 (Callisto)" }, - { 15, 4, -1, 16, -1, 3, 512, -1, PHENOM2 , 0, "Phenom II X3 (Heka)" }, - { 15, 4, -1, 16, -1, 4, 512, -1, PHENOM2 , 0, "Phenom II X4" }, - { 15, 4, -1, 16, 4, 4, 512, -1, PHENOM2 , 0, "Phenom II X4 (Deneb)" }, - { 15, 5, -1, 16, 5, 4, 512, -1, PHENOM2 , 0, "Phenom II X4 (Deneb)" }, - { 15, 4, -1, 16, 10, 4, 512, -1, PHENOM2 , 0, "Phenom II X4 (Zosma)" }, - { 15, 4, -1, 16, 10, 6, 512, -1, PHENOM2 , 0, "Phenom II X6 (Thuban)" }, - - { 15, 6, -1, 16, 6, 2, 512, -1, ATHLON , 0, "Athlon II (Champlain)" }, - { 15, 6, -1, 16, 6, 2, 512, -1, ATHLON_64_X2 , 0, "Athlon II X2 (Regor)" }, - { 15, 6, -1, 16, 6, 2, 1024, -1, ATHLON_64_X2 , 0, "Athlon II X2 (Regor)" }, - { 15, 5, -1, 16, 5, 3, 512, -1, ATHLON_64_X3 , 0, "Athlon II X3 (Rana)" }, - { 15, 5, -1, 16, 5, 4, 512, -1, ATHLON_64_X4 , 0, "Athlon II X4 (Propus)" }, - - /* 2011 CPUs: K10 architecture: Llano */ - { 15, 1, -1, 18, 1, 2, 512, -1, FUSION_EA , 0, "Llano X2" }, - { 15, 1, -1, 18, 1, 2, 1024, -1, FUSION_EA , 0, "Llano X2" }, - { 15, 1, -1, 18, 1, 3, 1024, -1, FUSION_EA , 0, "Llano X3" }, - { 15, 1, -1, 18, 1, 4, 1024, -1, FUSION_EA , 0, "Llano X4" }, - /* 2011 CPUs: Bobcat architecture: Ontario, Zacate, Desna, Hondo */ - { 15, 2, -1, 20, -1, 1, 512, -1, FUSION_C , 0, "Brazos Ontario" }, - { 15, 2, -1, 20, -1, 2, 512, -1, FUSION_C , 0, "Brazos Ontario (Dual-core)" }, - { 15, 1, -1, 20, -1, 1, 512, -1, FUSION_E , 0, "Brazos Zacate" }, - { 15, 1, -1, 20, -1, 2, 512, -1, FUSION_E , 0, "Brazos Zacate (Dual-core)" }, - { 15, 2, -1, 20, -1, 2, 512, -1, FUSION_Z , 0, "Brazos Desna (Dual-core)" }, - /* 2012 CPUs: Piledriver architecture: Trinity and Richland */ - { 15, 0, -1, 21, 10, 2, 1024, -1, FUSION_A , 0, "Trinity X2" }, - { 15, 0, -1, 21, 16, 2, 1024, -1, FUSION_A , 0, "Trinity X2" }, - { 15, 0, -1, 21, 10, 4, 1024, -1, FUSION_A , 0, "Trinity X4" }, - { 15, 0, -1, 21, 16, 4, 1024, -1, FUSION_A , 0, "Trinity X4" }, - { 15, 3, -1, 21, 13, 2, 1024, -1, FUSION_A , 0, "Richland X2" }, - { 15, 3, -1, 21, 13, 4, 1024, -1, FUSION_A , 0, "Richland X4" }, - /* 2013 CPUs: Jaguar architecture: Kabini and Temash */ - { 15, 0, -1, 22, 0, 2, 1024, -1, FUSION_A , 0, "Kabini X2" }, - { 15, 0, -1, 22, 0, 4, 1024, -1, FUSION_A , 0, "Kabini X4" }, - /* 2014 CPUs: Steamroller architecture: Kaveri */ - { 15, 0, -1, 21, 30, 2, 1024, -1, FUSION_A , 0, "Kaveri X2" }, - { 15, 0, -1, 21, 30, 4, 1024, -1, FUSION_A , 0, "Kaveri X4" }, - /* 2014 CPUs: Puma architecture: Beema and Mullins */ - { 15, 0, -1, 22, 30, 2, 1024, -1, FUSION_E , 0, "Mullins X2" }, - { 15, 0, -1, 22, 30, 4, 1024, -1, FUSION_A , 0, "Mullins X4" }, - /* 2015 CPUs: Excavator architecture: Carrizo */ - { 15, 1, -1, 21, 60, 2, 1024, -1, FUSION_A , 0, "Carrizo X2" }, - { 15, 1, -1, 21, 60, 4, 1024, -1, FUSION_A , 0, "Carrizo X4" }, - /* 2015 CPUs: Steamroller architecture: Godavari */ - //TODO - /* 2016 CPUs: Excavator architecture: Bristol Ridge */ - //TODO - - /* Newer Opterons: */ - { 15, 9, -1, 22, 9, 8, -1, -1, OPTERON_GENERIC , 0, "Magny-Cours Opteron" }, - - /* Bulldozer CPUs: */ - { 15, -1, -1, 21, 0, 4, 2048, -1, NO_CODE , 0, "Bulldozer X2" }, - { 15, -1, -1, 21, 1, 4, 2048, -1, NO_CODE , 0, "Bulldozer X2" }, - { 15, -1, -1, 21, 1, 6, 2048, -1, NO_CODE , 0, "Bulldozer X3" }, - { 15, -1, -1, 21, 1, 8, 2048, -1, NO_CODE , 0, "Bulldozer X4" }, - /* Piledriver CPUs: */ - { 15, -1, -1, 21, 2, 4, 2048, -1, NO_CODE , 0, "Vishera X2" }, - { 15, -1, -1, 21, 2, 6, 2048, -1, NO_CODE , 0, "Vishera X3" }, - { 15, -1, -1, 21, 2, 8, 2048, -1, NO_CODE , 0, "Vishera X4" }, - /* Steamroller CPUs: */ - //TODO - /* Excavator CPUs: */ - //TODO - /* Zen CPUs: */ - //TODO -}; - - -static void load_amd_features(struct cpu_raw_data_t* raw, struct cpu_id_t* data) -{ - const struct feature_map_t matchtable_edx81[] = { - { 20, CPU_FEATURE_NX }, - { 22, CPU_FEATURE_MMXEXT }, - { 25, CPU_FEATURE_FXSR_OPT }, - { 30, CPU_FEATURE_3DNOWEXT }, - { 31, CPU_FEATURE_3DNOW }, - }; - const struct feature_map_t matchtable_ecx81[] = { - { 1, CPU_FEATURE_CMP_LEGACY }, - { 2, CPU_FEATURE_SVM }, - { 5, CPU_FEATURE_ABM }, - { 6, CPU_FEATURE_SSE4A }, - { 7, CPU_FEATURE_MISALIGNSSE }, - { 8, CPU_FEATURE_3DNOWPREFETCH }, - { 9, CPU_FEATURE_OSVW }, - { 10, CPU_FEATURE_IBS }, - { 11, CPU_FEATURE_XOP }, - { 12, CPU_FEATURE_SKINIT }, - { 13, CPU_FEATURE_WDT }, - { 16, CPU_FEATURE_FMA4 }, - { 21, CPU_FEATURE_TBM }, - }; - const struct feature_map_t matchtable_edx87[] = { - { 0, CPU_FEATURE_TS }, - { 1, CPU_FEATURE_FID }, - { 2, CPU_FEATURE_VID }, - { 3, CPU_FEATURE_TTP }, - { 4, CPU_FEATURE_TM_AMD }, - { 5, CPU_FEATURE_STC }, - { 6, CPU_FEATURE_100MHZSTEPS }, - { 7, CPU_FEATURE_HWPSTATE }, - /* id 8 is handled in common */ - { 9, CPU_FEATURE_CPB }, - { 10, CPU_FEATURE_APERFMPERF }, - { 11, CPU_FEATURE_PFI }, - { 12, CPU_FEATURE_PA }, - }; - if (raw->ext_cpuid[0][0] >= 0x80000001) { - match_features(matchtable_edx81, COUNT_OF(matchtable_edx81), raw->ext_cpuid[1][3], data); - match_features(matchtable_ecx81, COUNT_OF(matchtable_ecx81), raw->ext_cpuid[1][2], data); - } - if (raw->ext_cpuid[0][0] >= 0x80000007) - match_features(matchtable_edx87, COUNT_OF(matchtable_edx87), raw->ext_cpuid[7][3], data); - if (raw->ext_cpuid[0][0] >= 0x8000001a) { - /* We have the extended info about SSE unit size */ - data->detection_hints[CPU_HINT_SSE_SIZE_AUTH] = 1; - data->sse_size = (raw->ext_cpuid[0x1a][0] & 1) ? 128 : 64; - } -} - -static void decode_amd_cache_info(struct cpu_raw_data_t* raw, struct cpu_id_t* data) -{ - int l3_result; - const int assoc_table[16] = { - 0, 1, 2, 0, 4, 0, 8, 0, 16, 0, 32, 48, 64, 96, 128, 255 - }; - unsigned n = raw->ext_cpuid[0][0]; - - if (n >= 0x80000005) { - data->l1_data_cache = (raw->ext_cpuid[5][2] >> 24) & 0xff; - data->l1_assoc = (raw->ext_cpuid[5][2] >> 16) & 0xff; - data->l1_cacheline = (raw->ext_cpuid[5][2]) & 0xff; - data->l1_instruction_cache = (raw->ext_cpuid[5][3] >> 24) & 0xff; - } - if (n >= 0x80000006) { - data->l2_cache = (raw->ext_cpuid[6][2] >> 16) & 0xffff; - data->l2_assoc = assoc_table[(raw->ext_cpuid[6][2] >> 12) & 0xf]; - data->l2_cacheline = (raw->ext_cpuid[6][2]) & 0xff; - - l3_result = (raw->ext_cpuid[6][3] >> 18); - if (l3_result > 0) { - l3_result = 512 * l3_result; /* AMD spec says it's a range, - but we take the lower bound */ - data->l3_cache = l3_result; - data->l3_assoc = assoc_table[(raw->ext_cpuid[6][3] >> 12) & 0xf]; - data->l3_cacheline = (raw->ext_cpuid[6][3]) & 0xff; - } else { - data->l3_cache = 0; - } - } -} - -static void decode_amd_number_of_cores(struct cpu_raw_data_t* raw, struct cpu_id_t* data) -{ - int logical_cpus = -1, num_cores = -1; - - if (raw->basic_cpuid[0][0] >= 1) { - logical_cpus = (raw->basic_cpuid[1][1] >> 16) & 0xff; - if (raw->ext_cpuid[0][0] >= 8) { - num_cores = 1 + (raw->ext_cpuid[8][2] & 0xff); - } - } - if (data->flags[CPU_FEATURE_HT]) { - if (num_cores > 1) { - data->num_cores = num_cores; - data->num_logical_cpus = logical_cpus; - } else { - data->num_cores = 1; - data->num_logical_cpus = (logical_cpus >= 2 ? logical_cpus : 2); - } - } else { - data->num_cores = data->num_logical_cpus = 1; - } -} - -static int amd_has_turion_modelname(const char *bs) -{ - /* We search for something like TL-60. Ahh, I miss regexes...*/ - int i, l, k; - char code[3] = {0}; - const char* codes[] = { "ML", "MT", "MK", "TK", "TL", "RM", "ZM", "" }; - l = (int) strlen(bs); - for (i = 3; i < l - 2; i++) { - if (bs[i] == '-' && - isupper(bs[i-1]) && isupper(bs[i-2]) && !isupper(bs[i-3]) && - isdigit(bs[i+1]) && isdigit(bs[i+2]) && !isdigit(bs[i+3])) - { - code[0] = bs[i-2]; - code[1] = bs[i-1]; - for (k = 0; codes[k][0]; k++) - if (!strcmp(codes[k], code)) return 1; - } - } - return 0; -} - -static amd_code_t decode_amd_codename_part1(const char *bs) -{ - int is_dual = 0, is_quad = 0, is_tri = 0; - if (strstr(bs, "Dual Core") || - strstr(bs, "Dual-Core") || - strstr(bs, " X2 ")) - is_dual = 1; - if (strstr(bs, " X4 ")) is_quad = 1; - if (strstr(bs, " X3 ")) is_tri = 1; - if (strstr(bs, "Opteron")) { - return is_dual ? OPTERON_DUALCORE : OPTERON_SINGLE; - } - if (strstr(bs, "Phenom")) { - if (strstr(bs, "II")) return PHENOM2; - else return PHENOM; - } - if (amd_has_turion_modelname(bs)) { - return is_dual ? TURION_X2 : TURION_64; - } - if (strstr(bs, "Athlon(tm) 64 FX")) return ATHLON_64_FX; - if (strstr(bs, "Athlon(tm) FX")) return ATHLON_FX; - if (strstr(bs, "Athlon(tm) 64") || strstr(bs, "Athlon(tm) II X") || match_pattern(bs, "Athlon(tm) X#")) { - if (is_quad) return ATHLON_64_X4; - if (is_dual) return ATHLON_64_X2; - if (is_tri) return ATHLON_64_X3; - return ATHLON_64; - } - if (strstr(bs, "Turion")) { - return is_dual ? TURION_X2 : TURION_64; - } - - if (strstr(bs, "mobile") || strstr(bs, "Mobile")) { - if (strstr(bs, "Athlon(tm) XP-M (LV)")) return ATHLON_XP_M_LV; - if (strstr(bs, "Athlon(tm) XP")) return ATHLON_XP_M; - if (strstr(bs, "Sempron(tm)")) return M_SEMPRON; - if (strstr(bs, "Athlon")) return MOBILE_ATHLON64; - if (strstr(bs, "Duron")) return MOBILE_DURON; - - } else { - if (strstr(bs, "Athlon(tm) XP")) return ATHLON_XP; - if (strstr(bs, "Athlon(tm) MP")) return ATHLON_MP; - if (strstr(bs, "Sempron(tm)")) return SEMPRON; - if (strstr(bs, "Duron")) return DURON; - if (strstr(bs, "Athlon")) return ATHLON; - } - if (match_pattern(bs, "C-##")) return FUSION_C; - if (match_pattern(bs, "E-###")) return FUSION_E; - if (match_pattern(bs, "Z-##")) return FUSION_Z; - if (match_pattern(bs, "E#-####") || match_pattern(bs, "A#-####")) return FUSION_EA; - - return (amd_code_t) NO_CODE; -} - -static void decode_amd_codename(struct cpu_raw_data_t* raw, struct cpu_id_t* data, struct internal_id_info_t* internal) -{ - amd_code_t code = decode_amd_codename_part1(data->brand_str); - int i = 0; - char* code_str = NULL; - for (i = 0; i < COUNT_OF(amd_code_str); i++) { - if (code == amd_code_str[i].code) { - code_str = amd_code_str[i].str; - break; - } - } - if (code == ATHLON_64_X2 && data->l2_cache < 512) - code = SEMPRON_DUALCORE; - if (code_str) - debugf(2, "Detected AMD brand code: %d (%s)\n", code, code_str); - else - debugf(2, "Detected AMD brand code: %d\n", code); - internal->code.amd = code; - internal->score = match_cpu_codename(cpudb_amd, COUNT_OF(cpudb_amd), data, code, 0); -} - -int cpuid_identify_amd(struct cpu_raw_data_t* raw, struct cpu_id_t* data, struct internal_id_info_t* internal) -{ - load_amd_features(raw, data); - decode_amd_cache_info(raw, data); - decode_amd_number_of_cores(raw, data); - decode_amd_codename(raw, data, internal); - return 0; -} - -void cpuid_get_list_amd(struct cpu_list_t* list) -{ - generic_get_cpu_list(cpudb_amd, COUNT_OF(cpudb_amd), list); -} diff --git a/contrib/libcpuid/include/libcpuid/recog_amd.h b/contrib/libcpuid/include/libcpuid/recog_amd.h deleted file mode 100644 index 34e89598397..00000000000 --- a/contrib/libcpuid/include/libcpuid/recog_amd.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2008 Veselin Georgiev, - * anrieffNOSPAM @ mgail_DOT.com (convert to gmail) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef __RECOG_AMD_H__ -#define __RECOG_AMD_H__ - -int cpuid_identify_amd(struct cpu_raw_data_t* raw, struct cpu_id_t* data, struct internal_id_info_t* internal); -void cpuid_get_list_amd(struct cpu_list_t* list); - -#endif /* __RECOG_AMD_H__ */ diff --git a/contrib/libcpuid/include/libcpuid/recog_intel.c b/contrib/libcpuid/include/libcpuid/recog_intel.c deleted file mode 100644 index 2ffc41c8a15..00000000000 --- a/contrib/libcpuid/include/libcpuid/recog_intel.c +++ /dev/null @@ -1,897 +0,0 @@ -/* - * Copyright 2008 Veselin Georgiev, - * anrieffNOSPAM @ mgail_DOT.com (convert to gmail) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#include -#include -#include "libcpuid.h" -#include "libcpuid_util.h" -#include "libcpuid_internal.h" -#include "recog_intel.h" - -const struct intel_bcode_str { intel_code_t code; char *str; } intel_bcode_str[] = { - #define CODE(x) { x, #x } - #define CODE2(x, y) CODE(x) - #include "intel_code_t.h" - #undef CODE -}; - -enum _intel_model_t { - UNKNOWN = -1, - _3000 = 100, - _3100, - _3200, - X3200, - _3300, - X3300, - _5100, - _5200, - _5300, - _5400, - _2xxx, /* Core i[357] 2xxx */ - _3xxx, /* Core i[357] 3xxx */ -}; -typedef enum _intel_model_t intel_model_t; - -const struct match_entry_t cpudb_intel[] = { - { -1, -1, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Unknown Intel CPU" }, - - /* i486 */ - { 4, -1, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Unknown i486" }, - { 4, 0, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "i486 DX-25/33" }, - { 4, 1, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "i486 DX-50" }, - { 4, 2, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "i486 SX" }, - { 4, 3, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "i486 DX2" }, - { 4, 4, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "i486 SL" }, - { 4, 5, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "i486 SX2" }, - { 4, 7, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "i486 DX2 WriteBack" }, - { 4, 8, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "i486 DX4" }, - { 4, 9, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "i486 DX4 WriteBack" }, - - /* All Pentia: - Pentium 1 */ - { 5, -1, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Unknown Pentium" }, - { 5, 0, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Pentium A-Step" }, - { 5, 1, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Pentium 1 (0.8u)" }, - { 5, 2, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Pentium 1 (0.35u)" }, - { 5, 3, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Pentium OverDrive" }, - { 5, 4, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Pentium 1 (0.35u)" }, - { 5, 7, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Pentium 1 (0.35u)" }, - { 5, 8, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Pentium MMX (0.25u)" }, - - /* Pentium 2 / 3 / M / Conroe / whatsnext - all P6 based. */ - { 6, -1, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Unknown P6" }, - { 6, 0, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Pentium Pro" }, - { 6, 1, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Pentium Pro" }, - { 6, 3, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Pentium II (Klamath)" }, - { 6, 5, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Pentium II (Deschutes)" }, - { 6, 5, -1, -1, -1, 1, -1, -1, MOBILE_PENTIUM , 0, "Mobile Pentium II (Tonga)"}, - { 6, 6, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Pentium II (Dixon)" }, - - { 6, 3, -1, -1, -1, 1, -1, -1, XEON , 0, "P-II Xeon (Klamath)" }, - { 6, 5, -1, -1, -1, 1, -1, -1, XEON , 0, "P-II Xeon (Drake)" }, - { 6, 6, -1, -1, -1, 1, -1, -1, XEON , 0, "P-II Xeon (Dixon)" }, - - { 6, 5, -1, -1, -1, 1, -1, -1, CELERON , 0, "P-II Celeron (Covingtons" }, - { 6, 6, -1, -1, -1, 1, -1, -1, CELERON , 0, "P-II Celeron (Mendocino)" }, - - /* -------------------------------------------------- */ - - { 6, 7, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Pentium III (Katmai)" }, - { 6, 8, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Pentium III (Coppermine)"}, - { 6, 10, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Pentium III (Coppermine)"}, - { 6, 11, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Pentium III (Tualatin)" }, - - { 6, 7, -1, -1, -1, 1, -1, -1, XEON , 0, "P-III Xeon (Tanner)" }, - { 6, 8, -1, -1, -1, 1, -1, -1, XEON , 0, "P-III Xeon (Cascades)" }, - { 6, 10, -1, -1, -1, 1, -1, -1, XEON , 0, "P-III Xeon (Cascades)" }, - { 6, 11, -1, -1, -1, 1, -1, -1, XEON , 0, "P-III Xeon (Tualatin)" }, - - { 6, 7, -1, -1, -1, 1, -1, -1, CELERON , 0, "P-III Celeron (Katmai)" }, - { 6, 8, -1, -1, -1, 1, -1, -1, CELERON , 0, "P-III Celeron (Coppermine)" }, - { 6, 10, -1, -1, -1, 1, -1, -1, CELERON , 0, "P-III Celeron (Coppermine)" }, - { 6, 11, -1, -1, -1, 1, -1, -1, CELERON , 0, "P-III Celeron (Tualatin)" }, - - /* Netburst based (Pentium 4 and later) - classic P4s */ - { 15, -1, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Unknown Pentium 4" }, - { 15, -1, -1, 15, -1, 1, -1, -1, CELERON , 0, "Unknown P-4 Celeron" }, - { 15, -1, -1, 15, -1, 1, -1, -1, XEON , 0, "Unknown Xeon" }, - - { 15, 0, -1, 15, -1, 1, -1, -1, NO_CODE , 0, "Pentium 4 (Willamette)" }, - { 15, 1, -1, 15, -1, 1, -1, -1, NO_CODE , 0, "Pentium 4 (Willamette)" }, - { 15, 2, -1, 15, -1, 1, -1, -1, NO_CODE , 0, "Pentium 4 (Northwood)" }, - { 15, 3, -1, 15, -1, 1, -1, -1, NO_CODE , 0, "Pentium 4 (Prescott)" }, - { 15, 4, -1, 15, -1, 1, -1, -1, NO_CODE , 0, "Pentium 4 (Prescott)" }, - { 15, 6, -1, 15, -1, 1, -1, -1, NO_CODE , 0, "Pentium 4 (Cedar Mill)" }, - { 15, 0, -1, 15, -1, 1, -1, -1, MOBILE_PENTIUM , 0, "Mobile P-4 (Willamette)" }, - { 15, 1, -1, 15, -1, 1, -1, -1, MOBILE_PENTIUM , 0, "Mobile P-4 (Willamette)" }, - { 15, 2, -1, 15, -1, 1, -1, -1, MOBILE_PENTIUM , 0, "Mobile P-4 (Northwood)" }, - { 15, 3, -1, 15, -1, 1, -1, -1, MOBILE_PENTIUM , 0, "Mobile P-4 (Prescott)" }, - { 15, 4, -1, 15, -1, 1, -1, -1, MOBILE_PENTIUM , 0, "Mobile P-4 (Prescott)" }, - { 15, 6, -1, 15, -1, 1, -1, -1, MOBILE_PENTIUM , 0, "Mobile P-4 (Cedar Mill)" }, - - /* server CPUs */ - { 15, 0, -1, 15, -1, 1, -1, -1, XEON , 0, "Xeon (Foster)" }, - { 15, 1, -1, 15, -1, 1, -1, -1, XEON , 0, "Xeon (Foster)" }, - { 15, 2, -1, 15, -1, 1, -1, -1, XEON , 0, "Xeon (Prestonia)" }, - { 15, 2, -1, 15, -1, 1, -1, -1, XEONMP , 0, "Xeon (Gallatin)" }, - { 15, 3, -1, 15, -1, 1, -1, -1, XEON , 0, "Xeon (Nocona)" }, - { 15, 4, -1, 15, -1, 1, -1, -1, XEON , 0, "Xeon (Nocona)" }, - { 15, 4, -1, 15, -1, 1, -1, -1, XEON_IRWIN , 0, "Xeon (Irwindale)" }, - { 15, 4, -1, 15, -1, 1, -1, -1, XEONMP , 0, "Xeon (Cranford)" }, - { 15, 4, -1, 15, -1, 1, -1, -1, XEON_POTOMAC , 0, "Xeon (Potomac)" }, - { 15, 6, -1, 15, -1, 1, -1, -1, XEON , 0, "Xeon (Dempsey)" }, - - /* Pentium Ds */ - { 15, 4, 4, 15, -1, 1, -1, -1, NO_CODE , 0, "Pentium D (SmithField)" }, - { 15, 4, -1, 15, -1, 1, -1, -1, PENTIUM_D , 0, "Pentium D (SmithField)" }, - { 15, 4, 7, 15, -1, 1, -1, -1, NO_CODE , 0, "Pentium D (SmithField)" }, - { 15, 6, -1, 15, -1, 1, -1, -1, PENTIUM_D , 0, "Pentium D (Presler)" }, - - /* Celeron and Celeron Ds */ - { 15, 1, -1, 15, -1, 1, -1, -1, CELERON , 0, "P-4 Celeron (Willamette)" }, - { 15, 2, -1, 15, -1, 1, -1, -1, CELERON , 0, "P-4 Celeron (Northwood)" }, - { 15, 3, -1, 15, -1, 1, -1, -1, CELERON , 0, "P-4 Celeron D (Prescott)" }, - { 15, 4, -1, 15, -1, 1, -1, -1, CELERON , 0, "P-4 Celeron D (Prescott)" }, - { 15, 6, -1, 15, -1, 1, -1, -1, CELERON , 0, "P-4 Celeron D (Cedar Mill)" }, - - /* -------------------------------------------------- */ - /* Intel Core microarchitecture - P6-based */ - - { 6, 9, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Unknown Pentium M" }, - { 6, 9, -1, -1, -1, 1, -1, -1, MOBILE_PENTIUM_M , 0, "Unknown Pentium M" }, - { 6, 9, -1, -1, -1, 1, -1, -1, PENTIUM , 0, "Pentium M (Banias)" }, - { 6, 9, -1, -1, -1, 1, -1, -1, MOBILE_PENTIUM_M , 0, "Pentium M (Banias)" }, - { 6, 9, -1, -1, -1, 1, -1, -1, CELERON , 0, "Celeron M" }, - { 6, 13, -1, -1, -1, 1, -1, -1, PENTIUM , 0, "Pentium M (Dothan)" }, - { 6, 13, -1, -1, -1, 1, -1, -1, MOBILE_PENTIUM_M , 0, "Pentium M (Dothan)" }, - { 6, 13, -1, -1, -1, 1, -1, -1, CELERON , 0, "Celeron M" }, - - { 6, 12, -1, -1, -1, -1, -1, -1, ATOM_UNKNOWN , 0, "Unknown Atom" }, - { 6, 12, -1, -1, -1, -1, -1, -1, ATOM_DIAMONDVILLE , 0, "Atom (Diamondville)" }, - { 6, 12, -1, -1, -1, -1, -1, -1, ATOM_SILVERTHORNE , 0, "Atom (Silverthorne)" }, - { 6, 12, -1, -1, -1, -1, -1, -1, ATOM_CEDARVIEW , 0, "Atom (Cedarview)" }, - { 6, 6, -1, -1, -1, -1, -1, -1, ATOM_CEDARVIEW , 0, "Atom (Cedarview)" }, - { 6, 12, -1, -1, -1, -1, -1, -1, ATOM_PINEVIEW , 0, "Atom (Pineview)" }, - - /* -------------------------------------------------- */ - - { 6, 14, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Unknown Yonah" }, - { 6, 14, -1, -1, -1, 1, -1, -1, CORE_SOLO , 0, "Yonah (Core Solo)" }, - { 6, 14, -1, -1, -1, 2, -1, -1, CORE_DUO , 0, "Yonah (Core Duo)" }, - { 6, 14, -1, -1, -1, 1, -1, -1, MOBILE_CORE_SOLO , 0, "Yonah (Core Solo)" }, - { 6, 14, -1, -1, -1, 2, -1, -1, MOBILE_CORE_DUO , 0, "Yonah (Core Duo)" }, - { 6, 14, -1, -1, -1, 1, -1, -1, CORE_SOLO , 0, "Yonah (Core Solo)" }, - - { 6, 15, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Unknown Core 2" }, - { 6, 15, -1, -1, -1, 2, 4096, -1, CORE_DUO , 0, "Conroe (Core 2 Duo)" }, - { 6, 15, -1, -1, -1, 2, 1024, -1, CORE_DUO , 0, "Conroe (Core 2 Duo) 1024K" }, - { 6, 15, -1, -1, -1, 2, 512, -1, CORE_DUO , 0, "Conroe (Core 2 Duo) 512K" }, - { 6, 15, -1, -1, -1, 4, -1, -1, QUAD_CORE , 0, "Kentsfield (Core 2 Quad)" }, - { 6, 15, -1, -1, -1, 4, 4096, -1, QUAD_CORE , 0, "Kentsfield (Core 2 Quad)" }, - { 6, 15, -1, -1, -1, 400, -1, -1, MORE_THAN_QUADCORE, 0, "More than quad-core" }, - { 6, 15, -1, -1, -1, 2, 2048, -1, CORE_DUO , 0, "Allendale (Core 2 Duo)" }, - { 6, 15, -1, -1, -1, 2, -1, -1, MOBILE_CORE_DUO , 0, "Merom (Core 2 Duo)" }, - { 6, 15, -1, -1, -1, 2, 2048, -1, MEROM , 0, "Merom (Core 2 Duo) 2048K" }, - { 6, 15, -1, -1, -1, 2, 4096, -1, MEROM , 0, "Merom (Core 2 Duo) 4096K" }, - - { 6, 15, -1, -1, 15, 1, -1, -1, CELERON , 0, "Conroe-L (Celeron)" }, - { 6, 6, -1, -1, 22, 1, -1, -1, CELERON , 0, "Conroe-L (Celeron)" }, - { 6, 15, -1, -1, 15, 2, -1, -1, CELERON , 0, "Conroe-L (Allendale)" }, - { 6, 6, -1, -1, 22, 2, -1, -1, CELERON , 0, "Conroe-L (Allendale)" }, - - - { 6, 6, -1, -1, 22, 1, -1, -1, NO_CODE , 0, "Unknown Core ?" }, - { 6, 7, -1, -1, 23, 1, -1, -1, NO_CODE , 0, "Unknown Core ?" }, - { 6, 6, -1, -1, 22, 400, -1, -1, MORE_THAN_QUADCORE, 0, "More than quad-core" }, - { 6, 7, -1, -1, 23, 400, -1, -1, MORE_THAN_QUADCORE, 0, "More than quad-core" }, - - { 6, 7, -1, -1, 23, 1, -1, -1, CORE_SOLO , 0, "Unknown Core 45nm" }, - { 6, 7, -1, -1, 23, 1, -1, -1, CORE_DUO , 0, "Unknown Core 45nm" }, - { 6, 7, -1, -1, 23, 2, 1024, -1, WOLFDALE , 0, "Celeron Wolfdale 1M" }, - { 6, 7, -1, -1, 23, 2, 2048, -1, WOLFDALE , 0, "Wolfdale (Core 2 Duo) 2M" }, - { 6, 7, -1, -1, 23, 2, 3072, -1, WOLFDALE , 0, "Wolfdale (Core 2 Duo) 3M" }, - { 6, 7, -1, -1, 23, 2, 6144, -1, WOLFDALE , 0, "Wolfdale (Core 2 Duo) 6M" }, - { 6, 7, -1, -1, 23, 1, -1, -1, MOBILE_CORE_DUO , 0, "Penryn (Core 2 Duo)" }, - { 6, 7, -1, -1, 23, 2, 1024, -1, PENRYN , 0, "Penryn (Core 2 Duo)" }, - { 6, 7, -1, -1, 23, 2, 3072, -1, PENRYN , 0, "Penryn (Core 2 Duo) 3M" }, - { 6, 7, -1, -1, 23, 2, 6144, -1, PENRYN , 0, "Penryn (Core 2 Duo) 6M" }, - { 6, 7, -1, -1, 23, 4, 2048, -1, QUAD_CORE , 0, "Yorkfield (Core 2 Quad) 2M"}, - { 6, 7, -1, -1, 23, 4, 3072, -1, QUAD_CORE , 0, "Yorkfield (Core 2 Quad) 3M"}, - { 6, 7, -1, -1, 23, 4, 6144, -1, QUAD_CORE , 0, "Yorkfield (Core 2 Quad) 6M"}, - - /* Core microarchitecture-based Xeons: */ - { 6, 14, -1, -1, 14, 1, -1, -1, XEON , 0, "Xeon LV" }, - { 6, 15, -1, -1, 15, 2, 4096, -1, XEON , _5100, "Xeon (Woodcrest)" }, - { 6, 15, -1, -1, 15, 2, 2048, -1, XEON , _3000, "Xeon (Conroe/2M)" }, - { 6, 15, -1, -1, 15, 2, 4096, -1, XEON , _3000, "Xeon (Conroe/4M)" }, - { 6, 15, -1, -1, 15, 4, 4096, -1, XEON , X3200, "Xeon (Kentsfield)" }, - { 6, 15, -1, -1, 15, 4, 4096, -1, XEON , _5300, "Xeon (Clovertown)" }, - { 6, 7, -1, -1, 23, 2, 6144, -1, XEON , _3100, "Xeon (Wolfdale)" }, - { 6, 7, -1, -1, 23, 2, 6144, -1, XEON , _5200, "Xeon (Wolfdale DP)" }, - { 6, 7, -1, -1, 23, 4, 6144, -1, XEON , _5400, "Xeon (Harpertown)" }, - { 6, 7, -1, -1, 23, 4, 3072, -1, XEON , X3300, "Xeon (Yorkfield/3M)" }, - { 6, 7, -1, -1, 23, 4, 6144, -1, XEON , X3300, "Xeon (Yorkfield/6M)" }, - - /* Nehalem CPUs (45nm): */ - { 6, 10, -1, -1, 26, 4, -1, -1, XEON_GAINESTOWN , 0, "Gainestown (Xeon)" }, - { 6, 10, -1, -1, 26, 4, -1, 4096, XEON_GAINESTOWN , 0, "Gainestown 4M (Xeon)" }, - { 6, 10, -1, -1, 26, 4, -1, 8192, XEON_GAINESTOWN , 0, "Gainestown 8M (Xeon)" }, - { 6, 10, -1, -1, 26, 4, -1, -1, XEON_I7 , 0, "Bloomfield (Xeon)" }, - { 6, 10, -1, -1, 26, 4, -1, -1, CORE_I7 , 0, "Bloomfield (Core i7)" }, - { 6, 10, -1, -1, 30, 4, -1, -1, CORE_I7 , 0, "Lynnfield (Core i7)" }, - { 6, 5, -1, -1, 37, 4, -1, 8192, CORE_I5 , 0, "Lynnfield (Core i5)" }, - - /* Westmere CPUs (32nm): */ - { 6, 5, -1, -1, 37, 2, -1, -1, NO_CODE , 0, "Unknown Core i3/i5" }, - { 6, 12, -1, -1, 44, -1, -1, -1, XEON_WESTMERE , 0, "Westmere (Xeon)" }, - { 6, 12, -1, -1, 44, -1, -1, 12288, XEON_WESTMERE , 0, "Gulftown (Xeon)" }, - { 6, 12, -1, -1, 44, 4, -1, 12288, CORE_I7 , 0, "Gulftown (Core i7)" }, - { 6, 5, -1, -1, 37, 2, -1, 4096, CORE_I5 , 0, "Clarkdale (Core i5)" }, - { 6, 5, -1, -1, 37, 2, -1, 4096, CORE_I3 , 0, "Clarkdale (Core i3)" }, - { 6, 5, -1, -1, 37, 2, -1, -1, PENTIUM , 0, "Arrandale" }, - { 6, 5, -1, -1, 37, 2, -1, 4096, CORE_I7 , 0, "Arrandale (Core i7)" }, - { 6, 5, -1, -1, 37, 2, -1, 3072, CORE_I5 , 0, "Arrandale (Core i5)" }, - { 6, 5, -1, -1, 37, 2, -1, 3072, CORE_I3 , 0, "Arrandale (Core i3)" }, - - /* Sandy Bridge CPUs (32nm): */ - { 6, 10, -1, -1, 42, -1, -1, -1, NO_CODE , 0, "Unknown Sandy Bridge" }, - { 6, 10, -1, -1, 42, -1, -1, -1, XEON , 0, "Sandy Bridge (Xeon)" }, - { 6, 10, -1, -1, 42, -1, -1, -1, CORE_I7 , 0, "Sandy Bridge (Core i7)" }, - { 6, 10, -1, -1, 42, 4, -1, -1, CORE_I7 , 0, "Sandy Bridge (Core i7)" }, - { 6, 10, -1, -1, 42, 4, -1, -1, CORE_I5 , 0, "Sandy Bridge (Core i5)" }, - { 6, 10, -1, -1, 42, 2, -1, -1, CORE_I3 , 0, "Sandy Bridge (Core i3)" }, - { 6, 10, -1, -1, 42, 2, -1, -1, PENTIUM , 0, "Sandy Bridge (Pentium)" }, - { 6, 10, -1, -1, 42, 1, -1, -1, CELERON , 0, "Sandy Bridge (Celeron)" }, - { 6, 10, -1, -1, 42, 2, -1, -1, CELERON , 0, "Sandy Bridge (Celeron)" }, - { 6, 13, -1, -1, 45, -1, -1, -1, NO_CODE , 0, "Sandy Bridge-E" }, - { 6, 13, -1, -1, 45, -1, -1, -1, XEON , 0, "Sandy Bridge-E (Xeon)" }, - - /* Ivy Bridge CPUs (22nm): */ - { 6, 10, -1, -1, 58, -1, -1, -1, XEON , 0, "Ivy Bridge (Xeon)" }, - { 6, 10, -1, -1, 58, 4, -1, -1, CORE_IVY7 , 0, "Ivy Bridge (Core i7)" }, - { 6, 10, -1, -1, 58, 4, -1, -1, CORE_IVY5 , 0, "Ivy Bridge (Core i5)" }, - { 6, 10, -1, -1, 58, 2, -1, -1, CORE_IVY3 , 0, "Ivy Bridge (Core i3)" }, - { 6, 10, -1, -1, 58, 2, -1, -1, PENTIUM , 0, "Ivy Bridge (Pentium)" }, - { 6, 10, -1, -1, 58, 1, -1, -1, CELERON , 0, "Ivy Bridge (Celeron)" }, - { 6, 10, -1, -1, 58, 2, -1, -1, CELERON , 0, "Ivy Bridge (Celeron)" }, - { 6, 14, -1, -1, 62, -1, -1, -1, NO_CODE , 0, "Ivy Bridge-E" }, - - /* Haswell CPUs (22nm): */ - { 6, 12, -1, -1, 60, -1, -1, -1, XEON , 0, "Haswell (Xeon)" }, - { 6, 12, -1, -1, 60, 4, -1, -1, CORE_HASWELL7 , 0, "Haswell (Core i7)" }, - { 6, 5, -1, -1, 69, 4, -1, -1, CORE_HASWELL7 , 0, "Haswell (Core i7)" }, - { 6, 12, -1, -1, 60, 4, -1, -1, CORE_HASWELL5 , 0, "Haswell (Core i5)" }, - { 6, 5, -1, -1, 69, 4, -1, -1, CORE_HASWELL5 , 0, "Haswell (Core i5)" }, - { 6, 12, -1, -1, 60, 2, -1, -1, CORE_HASWELL3 , 0, "Haswell (Core i3)" }, - { 6, 5, -1, -1, 69, 2, -1, -1, CORE_HASWELL3 , 0, "Haswell (Core i3)" }, - { 6, 12, -1, -1, 60, 2, -1, -1, PENTIUM , 0, "Haswell (Pentium)" }, - { 6, 12, -1, -1, 60, 2, -1, -1, CELERON , 0, "Haswell (Celeron)" }, - { 6, 12, -1, -1, 60, 1, -1, -1, CELERON , 0, "Haswell (Celeron)" }, - { 6, 15, -1, -1, 63, -1, -1, -1, NO_CODE , 0, "Haswell-E" }, - - /* Broadwell CPUs (14nm): */ - { 6, 7, -1, -1, 71, 4, -1, -1, CORE_BROADWELL7 , 0, "Broadwell (Core i7)" }, - { 6, 7, -1, -1, 71, 4, -1, -1, CORE_BROADWELL5 , 0, "Broadwell (Core i5)" }, - { 6, 13, -1, -1, 61, 4, -1, -1, CORE_BROADWELL7 , 0, "Broadwell-U (Core i7)" }, - { 6, 13, -1, -1, 61, 2, -1, -1, CORE_BROADWELL7 , 0, "Broadwell-U (Core i7)" }, - { 6, 13, -1, -1, 61, 2, -1, -1, CORE_BROADWELL5 , 0, "Broadwell-U (Core i5)" }, - { 6, 13, -1, -1, 61, 2, -1, -1, CORE_BROADWELL3 , 0, "Broadwell-U (Core i3)" }, - { 6, 13, -1, -1, 61, 2, -1, -1, PENTIUM , 0, "Broadwell-U (Pentium)" }, - { 6, 13, -1, -1, 61, 2, -1, -1, CELERON , 0, "Broadwell-U (Celeron)" }, - { 6, 13, -1, -1, 61, 2, -1, -1, NA , 0, "Broadwell-U (Core M)" }, - { 6, 15, -1, -1, 79, 2, -1, -1, CORE_BROADWELL3 , 0, "Broadwell-E (Core i3)" }, - { 6, 15, -1, -1, 79, 2, -1, -1, CORE_BROADWELL5 , 0, "Broadwell-E (Core i5)" }, - { 6, 15, -1, -1, 79, 4, -1, -1, CORE_BROADWELL5 , 0, "Broadwell-E (Core i5)" }, - { 6, 15, -1, -1, 79, 2, -1, -1, CORE_BROADWELL7 , 0, "Broadwell-E (Core i7)" }, - { 6, 15, -1, -1, 79, 4, -1, -1, CORE_BROADWELL7 , 0, "Broadwell-E (Core i7)" }, - - /* Skylake CPUs (14nm): */ - { 6, 14, -1, -1, 94, 4, -1, -1, CORE_BROADWELL7 , 0, "Skylake (Core i7)" }, - { 6, 14, -1, -1, 94, 4, -1, -1, CORE_BROADWELL5 , 0, "Skylake (Core i5)" }, - { 6, 14, -1, -1, 94, 4, -1, -1, CORE_BROADWELL3 , 0, "Skylake (Core i3)" }, - { 6, 14, -1, -1, 94, 4, -1, -1, PENTIUM , 0, "Skylake (Pentium)" }, - - /* Itaniums */ - { 7, -1, -1, -1, -1, 1, -1, -1, NO_CODE , 0, "Itanium" }, - { 15, -1, -1, 16, -1, 1, -1, -1, NO_CODE , 0, "Itanium 2" }, - -}; - - -static void load_intel_features(struct cpu_raw_data_t* raw, struct cpu_id_t* data) -{ - const struct feature_map_t matchtable_edx1[] = { - { 18, CPU_FEATURE_PN }, - { 21, CPU_FEATURE_DTS }, - { 22, CPU_FEATURE_ACPI }, - { 27, CPU_FEATURE_SS }, - { 29, CPU_FEATURE_TM }, - { 30, CPU_FEATURE_IA64 }, - { 31, CPU_FEATURE_PBE }, - }; - const struct feature_map_t matchtable_ecx1[] = { - { 2, CPU_FEATURE_DTS64 }, - { 4, CPU_FEATURE_DS_CPL }, - { 5, CPU_FEATURE_VMX }, - { 6, CPU_FEATURE_SMX }, - { 7, CPU_FEATURE_EST }, - { 8, CPU_FEATURE_TM2 }, - { 10, CPU_FEATURE_CID }, - { 14, CPU_FEATURE_XTPR }, - { 15, CPU_FEATURE_PDCM }, - { 18, CPU_FEATURE_DCA }, - { 21, CPU_FEATURE_X2APIC }, - }; - const struct feature_map_t matchtable_edx81[] = { - { 20, CPU_FEATURE_XD }, - }; - const struct feature_map_t matchtable_ebx7[] = { - { 2, CPU_FEATURE_SGX }, - { 4, CPU_FEATURE_HLE }, - { 11, CPU_FEATURE_RTM }, - { 16, CPU_FEATURE_AVX512F }, - { 17, CPU_FEATURE_AVX512DQ }, - { 18, CPU_FEATURE_RDSEED }, - { 19, CPU_FEATURE_ADX }, - { 26, CPU_FEATURE_AVX512PF }, - { 27, CPU_FEATURE_AVX512ER }, - { 28, CPU_FEATURE_AVX512CD }, - { 29, CPU_FEATURE_SHA_NI }, - { 30, CPU_FEATURE_AVX512BW }, - { 31, CPU_FEATURE_AVX512VL }, - }; - if (raw->basic_cpuid[0][0] >= 1) { - match_features(matchtable_edx1, COUNT_OF(matchtable_edx1), raw->basic_cpuid[1][3], data); - match_features(matchtable_ecx1, COUNT_OF(matchtable_ecx1), raw->basic_cpuid[1][2], data); - } - if (raw->ext_cpuid[0][0] >= 1) { - match_features(matchtable_edx81, COUNT_OF(matchtable_edx81), raw->ext_cpuid[1][3], data); - } - // detect TSX/AVX512: - if (raw->basic_cpuid[0][0] >= 7) { - match_features(matchtable_ebx7, COUNT_OF(matchtable_ebx7), raw->basic_cpuid[7][1], data); - } -} - -enum _cache_type_t { - L1I, - L1D, - L2, - L3, - L4 -}; -typedef enum _cache_type_t cache_type_t; - -static void check_case(uint8_t on, cache_type_t cache, int size, int assoc, int linesize, struct cpu_id_t* data) -{ - if (!on) return; - switch (cache) { - case L1I: - data->l1_instruction_cache = size; - break; - case L1D: - data->l1_data_cache = size; - data->l1_assoc = assoc; - data->l1_cacheline = linesize; - break; - case L2: - data->l2_cache = size; - data->l2_assoc = assoc; - data->l2_cacheline = linesize; - break; - case L3: - data->l3_cache = size; - data->l3_assoc = assoc; - data->l3_cacheline = linesize; - break; - case L4: - data->l4_cache = size; - data->l4_assoc = assoc; - data->l4_cacheline = linesize; - break; - default: - break; - } -} - -static void decode_intel_oldstyle_cache_info(struct cpu_raw_data_t* raw, struct cpu_id_t* data) -{ - uint8_t f[256] = {0}; - int reg, off; - uint32_t x; - for (reg = 0; reg < 4; reg++) { - x = raw->basic_cpuid[2][reg]; - if (x & 0x80000000) continue; - for (off = 0; off < 4; off++) { - f[x & 0xff] = 1; - x >>= 8; - } - } - - check_case(f[0x06], L1I, 8, 4, 32, data); - check_case(f[0x08], L1I, 16, 4, 32, data); - check_case(f[0x0A], L1D, 8, 2, 32, data); - check_case(f[0x0C], L1D, 16, 4, 32, data); - check_case(f[0x22], L3, 512, 4, 64, data); - check_case(f[0x23], L3, 1024, 8, 64, data); - check_case(f[0x25], L3, 2048, 8, 64, data); - check_case(f[0x29], L3, 4096, 8, 64, data); - check_case(f[0x2C], L1D, 32, 8, 64, data); - check_case(f[0x30], L1I, 32, 8, 64, data); - check_case(f[0x39], L2, 128, 4, 64, data); - check_case(f[0x3A], L2, 192, 6, 64, data); - check_case(f[0x3B], L2, 128, 2, 64, data); - check_case(f[0x3C], L2, 256, 4, 64, data); - check_case(f[0x3D], L2, 384, 6, 64, data); - check_case(f[0x3E], L2, 512, 4, 64, data); - check_case(f[0x41], L2, 128, 4, 32, data); - check_case(f[0x42], L2, 256, 4, 32, data); - check_case(f[0x43], L2, 512, 4, 32, data); - check_case(f[0x44], L2, 1024, 4, 32, data); - check_case(f[0x45], L2, 2048, 4, 32, data); - check_case(f[0x46], L3, 4096, 4, 64, data); - check_case(f[0x47], L3, 8192, 8, 64, data); - check_case(f[0x4A], L3, 6144, 12, 64, data); - check_case(f[0x4B], L3, 8192, 16, 64, data); - check_case(f[0x4C], L3, 12288, 12, 64, data); - check_case(f[0x4D], L3, 16384, 16, 64, data); - check_case(f[0x4E], L2, 6144, 24, 64, data); - check_case(f[0x60], L1D, 16, 8, 64, data); - check_case(f[0x66], L1D, 8, 4, 64, data); - check_case(f[0x67], L1D, 16, 4, 64, data); - check_case(f[0x68], L1D, 32, 4, 64, data); - /* The following four entries are trace cache. Intel does not - * specify a cache-line size, so we use -1 instead - */ - check_case(f[0x70], L1I, 12, 8, -1, data); - check_case(f[0x71], L1I, 16, 8, -1, data); - check_case(f[0x72], L1I, 32, 8, -1, data); - check_case(f[0x73], L1I, 64, 8, -1, data); - - check_case(f[0x78], L2, 1024, 4, 64, data); - check_case(f[0x79], L2, 128, 8, 64, data); - check_case(f[0x7A], L2, 256, 8, 64, data); - check_case(f[0x7B], L2, 512, 8, 64, data); - check_case(f[0x7C], L2, 1024, 8, 64, data); - check_case(f[0x7D], L2, 2048, 8, 64, data); - check_case(f[0x7F], L2, 512, 2, 64, data); - check_case(f[0x82], L2, 256, 8, 32, data); - check_case(f[0x83], L2, 512, 8, 32, data); - check_case(f[0x84], L2, 1024, 8, 32, data); - check_case(f[0x85], L2, 2048, 8, 32, data); - check_case(f[0x86], L2, 512, 4, 64, data); - check_case(f[0x87], L2, 1024, 8, 64, data); - - if (f[0x49]) { - /* This flag is overloaded with two meanings. On Xeon MP - * (family 0xf, model 0x6) this means L3 cache. On all other - * CPUs (notably Conroe et al), this is L2 cache. In both cases - * it means 4MB, 16-way associative, 64-byte line size. - */ - if (data->family == 0xf && data->model == 0x6) { - data->l3_cache = 4096; - data->l3_assoc = 16; - data->l3_cacheline = 64; - } else { - data->l2_cache = 4096; - data->l2_assoc = 16; - data->l2_cacheline = 64; - } - } - if (f[0x40]) { - /* Again, a special flag. It means: - * 1) If no L2 is specified, then CPU is w/o L2 (0 KB) - * 2) If L2 is specified by other flags, then, CPU is w/o L3. - */ - if (data->l2_cache == -1) { - data->l2_cache = 0; - } else { - data->l3_cache = 0; - } - } -} - -static void decode_intel_deterministic_cache_info(struct cpu_raw_data_t* raw, - struct cpu_id_t* data) -{ - int ecx; - int ways, partitions, linesize, sets, size, level, typenumber; - cache_type_t type; - for (ecx = 0; ecx < MAX_INTELFN4_LEVEL; ecx++) { - typenumber = raw->intel_fn4[ecx][0] & 0x1f; - if (typenumber == 0) break; - level = (raw->intel_fn4[ecx][0] >> 5) & 0x7; - if (level == 1 && typenumber == 1) - type = L1D; - else if (level == 1 && typenumber == 2) - type = L1I; - else if (level == 2 && typenumber == 3) - type = L2; - else if (level == 3 && typenumber == 3) - type = L3; - else if (level == 4 && typenumber == 3) - type = L4; - else { - warnf("deterministic_cache: unknown level/typenumber combo (%d/%d), cannot\n", level, typenumber); - warnf("deterministic_cache: recognize cache type\n"); - continue; - } - ways = ((raw->intel_fn4[ecx][1] >> 22) & 0x3ff) + 1; - partitions = ((raw->intel_fn4[ecx][1] >> 12) & 0x3ff) + 1; - linesize = (raw->intel_fn4[ecx][1] & 0xfff) + 1; - sets = raw->intel_fn4[ecx][2] + 1; - size = ways * partitions * linesize * sets / 1024; - check_case(1, type, size, ways, linesize, data); - } -} - -static int decode_intel_extended_topology(struct cpu_raw_data_t* raw, - struct cpu_id_t* data) -{ - int i, level_type, num_smt = -1, num_core = -1; - for (i = 0; i < MAX_INTELFN11_LEVEL; i++) { - level_type = (raw->intel_fn11[i][2] & 0xff00) >> 8; - switch (level_type) { - case 0x01: - num_smt = raw->intel_fn11[i][1] & 0xffff; - break; - case 0x02: - num_core = raw->intel_fn11[i][1] & 0xffff; - break; - default: - break; - } - } - if (num_smt == -1 || num_core == -1) return 0; - data->num_logical_cpus = num_core; - data->num_cores = num_core / num_smt; - // make sure num_cores is at least 1. In VMs, the CPUID instruction - // is rigged and may give nonsensical results, but we should at least - // avoid outputs like data->num_cores == 0. - if (data->num_cores <= 0) data->num_cores = 1; - return 1; -} - -static void decode_intel_number_of_cores(struct cpu_raw_data_t* raw, - struct cpu_id_t* data) -{ - int logical_cpus = -1, num_cores = -1; - - if (raw->basic_cpuid[0][0] >= 11) { - if (decode_intel_extended_topology(raw, data)) return; - } - - if (raw->basic_cpuid[0][0] >= 1) { - logical_cpus = (raw->basic_cpuid[1][1] >> 16) & 0xff; - if (raw->basic_cpuid[0][0] >= 4) { - num_cores = 1 + ((raw->basic_cpuid[4][0] >> 26) & 0x3f); - } - } - if (data->flags[CPU_FEATURE_HT]) { - if (num_cores > 1) { - data->num_cores = num_cores; - data->num_logical_cpus = logical_cpus; - } else { - data->num_cores = 1; - data->num_logical_cpus = (logical_cpus >= 1 ? logical_cpus : 1); - if (data->num_logical_cpus == 1) - data->flags[CPU_FEATURE_HT] = 0; - } - } else { - data->num_cores = data->num_logical_cpus = 1; - } -} - -static intel_code_t get_brand_code(struct cpu_id_t* data) -{ - intel_code_t code = (intel_code_t) NO_CODE; - int i, need_matchtable = 1, core_ix_base = 0; - const char* bs = data->brand_str; - const char* s; - const struct { intel_code_t c; const char *search; } matchtable[] = { - { XEONMP, "Xeon MP" }, - { XEONMP, "Xeon(TM) MP" }, - { XEON, "Xeon" }, - { CELERON, "Celeron" }, - { MOBILE_PENTIUM_M, "Pentium(R) M" }, - { CORE_SOLO, "Pentium(R) Dual CPU" }, - { CORE_SOLO, "Pentium(R) Dual-Core" }, - { PENTIUM_D, "Pentium(R) D" }, - { PENTIUM, "Pentium" }, - { CORE_SOLO, "Genuine Intel(R) CPU" }, - { CORE_SOLO, "Intel(R) Core(TM)" }, - { ATOM_DIAMONDVILLE, "Atom(TM) CPU [N ][23]## " }, - { ATOM_SILVERTHORNE, "Atom(TM) CPU Z" }, - { ATOM_PINEVIEW, "Atom(TM) CPU [ND][45]## " }, - { ATOM_CEDARVIEW, "Atom(TM) CPU [ND]#### " }, - { ATOM_UNKNOWN, "Atom(TM) CPU" }, - }; - - if (strstr(bs, "Mobile")) { - need_matchtable = 0; - if (strstr(bs, "Celeron")) - code = MOBILE_CELERON; - else if (strstr(bs, "Pentium")) - code = MOBILE_PENTIUM; - } - if ((i = match_pattern(bs, "Core(TM) i[357]")) != 0) { - /* Core i3, Core i5 or Core i7 */ - need_matchtable = 0; - - core_ix_base = CORE_I3; - - /* if it has RdRand, then it is at least Ivy Bridge */ - if (data->flags[CPU_FEATURE_RDRAND]) - core_ix_base = CORE_IVY3; - /* if it has FMA, then it is at least Haswell */ - if (data->flags[CPU_FEATURE_FMA3]) - core_ix_base = CORE_HASWELL3; - /* if it has RTM, then it is at least a Broadwell-E or Skylake */ - if (data->flags[CPU_FEATURE_RDSEED]) - core_ix_base = CORE_BROADWELL3; - - switch (bs[i + 9]) { - case '3': code = core_ix_base + 0; break; - case '5': code = core_ix_base + 1; break; - case '7': code = core_ix_base + 2; break; - } - } - if (need_matchtable) { - for (i = 0; i < COUNT_OF(matchtable); i++) - if (match_pattern(bs, matchtable[i].search)) { - code = matchtable[i].c; - break; - } - debugf(2, "intel matchtable result is %d\n", code); - } - if (code == XEON) { - if (match_pattern(bs, "W35##") || match_pattern(bs, "[ELXW]75##")) - code = XEON_I7; - else if (match_pattern(bs, "[ELXW]55##")) - code = XEON_GAINESTOWN; - else if (match_pattern(bs, "[ELXW]56##")) - code = XEON_WESTMERE; - else if (data->l3_cache > 0 && data->family == 16) - /* restrict by family, since later Xeons also have L3 ... */ - code = XEON_IRWIN; - } - if (code == XEONMP && data->l3_cache > 0) - code = XEON_POTOMAC; - if (code == CORE_SOLO) { - s = strstr(bs, "CPU"); - if (s) { - s += 3; - while (*s == ' ') s++; - if (*s == 'T') - code = (data->num_cores == 1) ? MOBILE_CORE_SOLO : MOBILE_CORE_DUO; - } - } - if (code == CORE_SOLO) { - switch (data->num_cores) { - case 1: break; - case 2: - { - code = CORE_DUO; - if (data->num_logical_cpus > 2) - code = DUAL_CORE_HT; - break; - } - case 4: - { - code = QUAD_CORE; - if (data->num_logical_cpus > 4) - code = QUAD_CORE_HT; - break; - } - default: - code = MORE_THAN_QUADCORE; break; - } - } - - if (code == CORE_DUO && data->ext_model >= 23) { - code = WOLFDALE; - } - if (code == PENTIUM_D && data->ext_model >= 23) { - code = WOLFDALE; - } - if (code == MOBILE_CORE_DUO && data->model != 14) { - if (data->ext_model < 23) { - code = MEROM; - } else { - code = PENRYN; - } - } - return code; -} - -static intel_model_t get_model_code(struct cpu_id_t* data) -{ - int i = 0; - int l = (int) strlen(data->brand_str); - const char *bs = data->brand_str; - int mod_flags = 0, model_no = 0, ndigs = 0; - /* If the CPU is a Core ix, then just return the model number generation: */ - if ((i = match_pattern(bs, "Core(TM) i[357]")) != 0) { - i += 11; - if (i + 4 >= l) return UNKNOWN; - if (bs[i] == '2') return _2xxx; - if (bs[i] == '3') return _3xxx; - return UNKNOWN; - } - - /* For Core2-based Xeons: */ - while (i < l - 3) { - if (bs[i] == 'C' && bs[i+1] == 'P' && bs[i+2] == 'U') - break; - i++; - } - if (i >= l - 3) return UNKNOWN; - i += 3; - while (i < l - 4 && bs[i] == ' ') i++; - if (i >= l - 4) return UNKNOWN; - while (i < l - 4 && !isdigit(bs[i])) { - if (bs[i] >= 'A' && bs[i] <= 'Z') - mod_flags |= (1 << (bs[i] - 'A')); - i++; - } - if (i >= l - 4) return UNKNOWN; - while (isdigit(bs[i])) { - ndigs++; - model_no = model_no * 10 + (int) (bs[i] - '0'); - i++; - } - if (ndigs != 4) return UNKNOWN; -#define HAVE(ch, flags) ((flags & (1 << ((int)(ch-'A')))) != 0) - switch (model_no / 100) { - case 30: return _3000; - case 31: return _3100; - case 32: - { - return (HAVE('X', mod_flags)) ? X3200 : _3200; - } - case 33: - { - return (HAVE('X', mod_flags)) ? X3300 : _3300; - } - case 51: return _5100; - case 52: return _5200; - case 53: return _5300; - case 54: return _5400; - default: - return UNKNOWN; - } -#undef HAVE -} - -static void decode_intel_sgx_features(const struct cpu_raw_data_t* raw, struct cpu_id_t* data) -{ - struct cpu_epc_t epc; - int i; - - if (raw->basic_cpuid[0][0] < 0x12) return; // no 12h leaf - if (raw->basic_cpuid[0x12][0] == 0) return; // no sub-leafs available, probably it's disabled by BIOS - - // decode sub-leaf 0: - if (raw->basic_cpuid[0x12][0] & 1) data->sgx.flags[INTEL_SGX1] = 1; - if (raw->basic_cpuid[0x12][0] & 2) data->sgx.flags[INTEL_SGX2] = 1; - if (data->sgx.flags[INTEL_SGX1] || data->sgx.flags[INTEL_SGX2]) - data->sgx.present = 1; - data->sgx.misc_select = raw->basic_cpuid[0x12][1]; - data->sgx.max_enclave_32bit = (raw->basic_cpuid[0x12][3] ) & 0xff; - data->sgx.max_enclave_64bit = (raw->basic_cpuid[0x12][3] >> 8) & 0xff; - - // decode sub-leaf 1: - data->sgx.secs_attributes = raw->intel_fn12h[1][0] | (((uint64_t) raw->intel_fn12h[1][1]) << 32); - data->sgx.secs_xfrm = raw->intel_fn12h[1][2] | (((uint64_t) raw->intel_fn12h[1][3]) << 32); - - // decode higher-order subleafs, whenever present: - data->sgx.num_epc_sections = -1; - for (i = 0; i < 1000000; i++) { - epc = cpuid_get_epc(i, raw); - if (epc.length == 0) { - debugf(2, "SGX: epc section request for %d returned null, no more EPC sections.\n", i); - data->sgx.num_epc_sections = i; - break; - } - } - if (data->sgx.num_epc_sections == -1) { - debugf(1, "SGX: warning: seems to be infinitude of EPC sections.\n"); - data->sgx.num_epc_sections = 1000000; - } -} - -struct cpu_epc_t cpuid_get_epc(int index, const struct cpu_raw_data_t* raw) -{ - uint32_t regs[4]; - struct cpu_epc_t retval = {0, 0}; - if (raw && index < MAX_INTELFN12H_LEVEL - 2) { - // this was queried already, use the data: - memcpy(regs, raw->intel_fn12h[2 + index], sizeof(regs)); - } else { - // query this ourselves: - regs[0] = 0x12; - regs[2] = 2 + index; - regs[1] = regs[3] = 0; - cpu_exec_cpuid_ext(regs); - } - - // decode values: - if ((regs[0] & 0xf) == 0x1) { - retval.start_addr |= (regs[0] & 0xfffff000); // bits [12, 32) -> bits [12, 32) - retval.start_addr |= ((uint64_t) (regs[1] & 0x000fffff)) << 32; // bits [0, 20) -> bits [32, 52) - retval.length |= (regs[2] & 0xfffff000); // bits [12, 32) -> bits [12, 32) - retval.length |= ((uint64_t) (regs[3] & 0x000fffff)) << 32; // bits [0, 20) -> bits [32, 52) - } - return retval; -} - -int cpuid_identify_intel(struct cpu_raw_data_t* raw, struct cpu_id_t* data, struct internal_id_info_t* internal) -{ - intel_code_t brand_code; - intel_model_t model_code; - int i; - char* brand_code_str = NULL; - - load_intel_features(raw, data); - if (raw->basic_cpuid[0][0] >= 4) { - /* Deterministic way is preferred, being more generic */ - decode_intel_deterministic_cache_info(raw, data); - } else if (raw->basic_cpuid[0][0] >= 2) { - decode_intel_oldstyle_cache_info(raw, data); - } - decode_intel_number_of_cores(raw, data); - - brand_code = get_brand_code(data); - model_code = get_model_code(data); - for (i = 0; i < COUNT_OF(intel_bcode_str); i++) { - if (brand_code == intel_bcode_str[i].code) { - brand_code_str = intel_bcode_str[i].str; - break; - } - } - if (brand_code_str) - debugf(2, "Detected Intel brand code: %d (%s)\n", brand_code, brand_code_str); - else - debugf(2, "Detected Intel brand code: %d\n", brand_code); - debugf(2, "Detected Intel model code: %d\n", model_code); - - internal->code.intel = brand_code; - - if (data->flags[CPU_FEATURE_SGX]) { - debugf(2, "SGX seems to be present, decoding...\n"); - // if SGX is indicated by the CPU, verify its presence: - decode_intel_sgx_features(raw, data); - } - - internal->score = match_cpu_codename(cpudb_intel, COUNT_OF(cpudb_intel), data, - brand_code, model_code); - return 0; -} - -void cpuid_get_list_intel(struct cpu_list_t* list) -{ - generic_get_cpu_list(cpudb_intel, COUNT_OF(cpudb_intel), list); -} diff --git a/contrib/libcpuid/include/libcpuid/recog_intel.h b/contrib/libcpuid/include/libcpuid/recog_intel.h deleted file mode 100644 index b99c783bf43..00000000000 --- a/contrib/libcpuid/include/libcpuid/recog_intel.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2008 Veselin Georgiev, - * anrieffNOSPAM @ mgail_DOT.com (convert to gmail) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef __RECOG_INTEL_H__ -#define __RECOG_INTEL_H__ - -int cpuid_identify_intel(struct cpu_raw_data_t* raw, struct cpu_id_t* data, struct internal_id_info_t* internal); -void cpuid_get_list_intel(struct cpu_list_t* list); - -#endif /*__RECOG_INTEL_H__*/ diff --git a/contrib/libdivide/libdivide.h b/contrib/libdivide/libdivide.h index eaeaec7db6b..a153e7f9c5e 100644 --- a/contrib/libdivide/libdivide.h +++ b/contrib/libdivide/libdivide.h @@ -1,117 +1,106 @@ -/* libdivide.h - Copyright 2010 ridiculous_fish -*/ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wold-style-cast" +// libdivide.h - Optimized integer division +// https://libdivide.com +// +// Copyright (C) 2010 - 2019 ridiculous_fish, +// Copyright (C) 2016 - 2019 Kim Walisch, +// +// libdivide is dual-licensed under the Boost or zlib licenses. +// You may use libdivide under the terms of either of these. +// See LICENSE.txt for more details. -#if defined(_WIN32) || defined(WIN32) -#define LIBDIVIDE_WINDOWS 1 -#endif +#ifndef LIBDIVIDE_H +#define LIBDIVIDE_H -#if defined(_MSC_VER) -#define LIBDIVIDE_VC 1 -#endif +#define LIBDIVIDE_VERSION "3.0" +#define LIBDIVIDE_VERSION_MAJOR 3 +#define LIBDIVIDE_VERSION_MINOR 0 -#ifdef __cplusplus -#include -#include -#include -#else -#include -#include -#include -#endif - -#if ! LIBDIVIDE_HAS_STDINT_TYPES && (! LIBDIVIDE_VC || _MSC_VER >= 1600) -/* Only Visual C++ 2010 and later include stdint.h */ #include -#define LIBDIVIDE_HAS_STDINT_TYPES 1 + +#if defined(__cplusplus) + #include + #include + #include +#else + #include + #include #endif -#if ! LIBDIVIDE_HAS_STDINT_TYPES -typedef __int32 int32_t; -typedef unsigned __int32 uint32_t; -typedef __int64 int64_t; -typedef unsigned __int64 uint64_t; -typedef __int8 int8_t; -typedef unsigned __int8 uint8_t; -#endif - -#if LIBDIVIDE_USE_SSE2 +#if defined(LIBDIVIDE_AVX512) + #include +#elif defined(LIBDIVIDE_AVX2) + #include +#elif defined(LIBDIVIDE_SSE2) #include #endif -#if LIBDIVIDE_VC +#if defined(_MSC_VER) #include + // disable warning C4146: unary minus operator applied + // to unsigned type, result still unsigned + #pragma warning(disable: 4146) + #define LIBDIVIDE_VC #endif -#ifndef __has_builtin -#define __has_builtin(x) 0 // Compatibility with non-clang compilers. +#if !defined(__has_builtin) + #define __has_builtin(x) 0 #endif -#ifdef __ICC -#define HAS_INT128_T 0 -#else -#define HAS_INT128_T __LP64__ +#if defined(__SIZEOF_INT128__) + #define HAS_INT128_T + // clang-cl on Windows does not yet support 128-bit division + #if !(defined(__clang__) && defined(LIBDIVIDE_VC)) + #define HAS_INT128_DIV + #endif #endif -#if defined(__x86_64__) || defined(_WIN64) || defined(_M_64) -#define LIBDIVIDE_IS_X86_64 1 +#if defined(__x86_64__) || defined(_M_X64) + #define LIBDIVIDE_X86_64 #endif #if defined(__i386__) -#define LIBDIVIDE_IS_i386 1 + #define LIBDIVIDE_i386 #endif -#if __GNUC__ || __clang__ -#define LIBDIVIDE_GCC_STYLE_ASM 1 +#if defined(__GNUC__) || defined(__clang__) + #define LIBDIVIDE_GCC_STYLE_ASM #endif +#if defined(__cplusplus) || defined(LIBDIVIDE_VC) + #define LIBDIVIDE_FUNCTION __FUNCTION__ +#else + #define LIBDIVIDE_FUNCTION __func__ +#endif -/* libdivide may use the pmuldq (vector signed 32x32->64 mult instruction) which is in SSE 4.1. However, signed multiplication can be emulated efficiently with unsigned multiplication, and SSE 4.1 is currently rare, so it is OK to not turn this on */ -#ifdef LIBDIVIDE_USE_SSE4_1 -#include +#define LIBDIVIDE_ERROR(msg) \ + do { \ + fprintf(stderr, "libdivide.h:%d: %s(): Error: %s\n", \ + __LINE__, LIBDIVIDE_FUNCTION, msg); \ + exit(-1); \ + } while (0) + +#if defined(LIBDIVIDE_ASSERTIONS_ON) + #define LIBDIVIDE_ASSERT(x) \ + do { \ + if (!(x)) { \ + fprintf(stderr, "libdivide.h:%d: %s(): Assertion failed: %s\n", \ + __LINE__, LIBDIVIDE_FUNCTION, #x); \ + exit(-1); \ + } \ + } while (0) +#else + #define LIBDIVIDE_ASSERT(x) #endif #ifdef __cplusplus -/* We place libdivide within the libdivide namespace, and that goes in an anonymous namespace so that the functions are only visible to files that #include this header and don't get external linkage. At least that's the theory. */ -namespace { namespace libdivide { #endif -/* Explanation of "more" field: bit 6 is whether to use shift path. If we are using the shift path, bit 7 is whether the divisor is negative in the signed case; in the unsigned case it is 0. Bits 0-4 is shift value (for shift path or mult path). In 32 bit case, bit 5 is always 0. We use bit 7 as the "negative divisor indicator" so that we can use sign extension to efficiently go to a full-width -1. - - -u32: [0-4] shift value - [5] ignored - [6] add indicator - [7] shift path - -s32: [0-4] shift value - [5] shift path - [6] add indicator - [7] indicates negative divisor - -u64: [0-5] shift value - [6] add indicator - [7] shift path - -s64: [0-5] shift value - [6] add indicator - [7] indicates negative divisor - magic number of 0 indicates shift path (we ran out of bits!) -*/ - -enum { - LIBDIVIDE_32_SHIFT_MASK = 0x1F, - LIBDIVIDE_64_SHIFT_MASK = 0x3F, - LIBDIVIDE_ADD_MARKER = 0x40, - LIBDIVIDE_U32_SHIFT_PATH = 0x80, - LIBDIVIDE_U64_SHIFT_PATH = 0x80, - LIBDIVIDE_S32_SHIFT_PATH = 0x20, - LIBDIVIDE_NEGATIVE_DIVISOR = 0x80 -}; - +// pack divider structs to prevent compilers from padding. +// This reduces memory usage by up to 43% when using a large +// array of libdivide dividers and improves performance +// by up to 10% because of reduced memory bandwidth. +#pragma pack(push, 1) struct libdivide_u32_t { uint32_t magic; @@ -133,497 +122,446 @@ struct libdivide_s64_t { uint8_t more; }; +struct libdivide_u32_branchfree_t { + uint32_t magic; + uint8_t more; +}; +struct libdivide_s32_branchfree_t { + int32_t magic; + uint8_t more; +}; -#ifndef LIBDIVIDE_API - #ifdef __cplusplus - /* In C++, we don't want our public functions to be static, because they are arguments to templates and static functions can't do that. They get internal linkage through virtue of the anonymous namespace. In C, they should be static. */ - #define LIBDIVIDE_API - #else - #define LIBDIVIDE_API static - #endif -#endif +struct libdivide_u64_branchfree_t { + uint64_t magic; + uint8_t more; +}; -#ifdef __APPLE__ -typedef signed long Int64; -typedef unsigned long UInt64; -#endif +struct libdivide_s64_branchfree_t { + int64_t magic; + uint8_t more; +}; -LIBDIVIDE_API struct libdivide_s32_t libdivide_s32_gen(int32_t y); -LIBDIVIDE_API struct libdivide_u32_t libdivide_u32_gen(uint32_t y); -LIBDIVIDE_API struct libdivide_s64_t libdivide_s64_gen(int64_t y); -LIBDIVIDE_API struct libdivide_u64_t libdivide_u64_gen(uint64_t y); -#if defined(__APPLE__) && defined(__cplusplus) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-function" -LIBDIVIDE_API struct libdivide_s64_t libdivide_s64_gen(Int64 y) { return libdivide_s64_gen(int64_t(y)); }; -LIBDIVIDE_API struct libdivide_u64_t libdivide_u64_gen(UInt64 y) { return libdivide_u64_gen(uint64_t(y)); }; -#pragma GCC diagnostic pop -#endif +#pragma pack(pop) -LIBDIVIDE_API int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom); -LIBDIVIDE_API uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom); -LIBDIVIDE_API int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom); -LIBDIVIDE_API uint64_t libdivide_u64_do(uint64_t y, const struct libdivide_u64_t *denom); -#if defined(__APPLE__) && defined(__cplusplus) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-function" -LIBDIVIDE_API Int64 libdivide_s64_do(Int64 numer, const struct libdivide_s64_t *denom) { return Int64(libdivide_s64_do(int64_t(numer), denom)); }; -LIBDIVIDE_API UInt64 libdivide_u64_do(UInt64 y, const struct libdivide_u64_t *denom) { return UInt64(libdivide_u64_do(uint64_t(y), denom)); }; -#pragma GCC diagnostic pop -#endif +// Explanation of the "more" field: +// +// * Bits 0-5 is the shift value (for shift path or mult path). +// * Bit 6 is the add indicator for mult path. +// * Bit 7 is set if the divisor is negative. We use bit 7 as the negative +// divisor indicator so that we can efficiently use sign extension to +// create a bitmask with all bits set to 1 (if the divisor is negative) +// or 0 (if the divisor is positive). +// +// u32: [0-4] shift value +// [5] ignored +// [6] add indicator +// magic number of 0 indicates shift path +// +// s32: [0-4] shift value +// [5] ignored +// [6] add indicator +// [7] indicates negative divisor +// magic number of 0 indicates shift path +// +// u64: [0-5] shift value +// [6] add indicator +// magic number of 0 indicates shift path +// +// s64: [0-5] shift value +// [6] add indicator +// [7] indicates negative divisor +// magic number of 0 indicates shift path +// +// In s32 and s64 branchfree modes, the magic number is negated according to +// whether the divisor is negated. In branchfree strategy, it is not negated. -LIBDIVIDE_API int libdivide_u32_get_algorithm(const struct libdivide_u32_t *denom); -LIBDIVIDE_API uint32_t libdivide_u32_do_alg0(uint32_t numer, const struct libdivide_u32_t *denom); -LIBDIVIDE_API uint32_t libdivide_u32_do_alg1(uint32_t numer, const struct libdivide_u32_t *denom); -LIBDIVIDE_API uint32_t libdivide_u32_do_alg2(uint32_t numer, const struct libdivide_u32_t *denom); +enum { + LIBDIVIDE_32_SHIFT_MASK = 0x1F, + LIBDIVIDE_64_SHIFT_MASK = 0x3F, + LIBDIVIDE_ADD_MARKER = 0x40, + LIBDIVIDE_NEGATIVE_DIVISOR = 0x80 +}; -LIBDIVIDE_API int libdivide_u64_get_algorithm(const struct libdivide_u64_t *denom); -LIBDIVIDE_API uint64_t libdivide_u64_do_alg0(uint64_t numer, const struct libdivide_u64_t *denom); -LIBDIVIDE_API uint64_t libdivide_u64_do_alg1(uint64_t numer, const struct libdivide_u64_t *denom); -LIBDIVIDE_API uint64_t libdivide_u64_do_alg2(uint64_t numer, const struct libdivide_u64_t *denom); -#if defined(__APPLE__) && defined(__cplusplus) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-function" -LIBDIVIDE_API UInt64 libdivide_u64_do_alg0(UInt64 numer, const struct libdivide_u64_t *denom) { return UInt64(libdivide_u64_do_alg0(uint64_t(numer), denom)); } -LIBDIVIDE_API UInt64 libdivide_u64_do_alg1(UInt64 numer, const struct libdivide_u64_t *denom) { return UInt64(libdivide_u64_do_alg1(uint64_t(numer), denom)); } -LIBDIVIDE_API UInt64 libdivide_u64_do_alg2(UInt64 numer, const struct libdivide_u64_t *denom) { return UInt64(libdivide_u64_do_alg2(uint64_t(numer), denom)); } -#pragma GCC diagnostic pop -#endif +static inline struct libdivide_s32_t libdivide_s32_gen(int32_t d); +static inline struct libdivide_u32_t libdivide_u32_gen(uint32_t d); +static inline struct libdivide_s64_t libdivide_s64_gen(int64_t d); +static inline struct libdivide_u64_t libdivide_u64_gen(uint64_t d); -LIBDIVIDE_API int libdivide_s32_get_algorithm(const struct libdivide_s32_t *denom); -LIBDIVIDE_API int32_t libdivide_s32_do_alg0(int32_t numer, const struct libdivide_s32_t *denom); -LIBDIVIDE_API int32_t libdivide_s32_do_alg1(int32_t numer, const struct libdivide_s32_t *denom); -LIBDIVIDE_API int32_t libdivide_s32_do_alg2(int32_t numer, const struct libdivide_s32_t *denom); -LIBDIVIDE_API int32_t libdivide_s32_do_alg3(int32_t numer, const struct libdivide_s32_t *denom); -LIBDIVIDE_API int32_t libdivide_s32_do_alg4(int32_t numer, const struct libdivide_s32_t *denom); +static inline struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d); +static inline struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d); +static inline struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d); +static inline struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d); -LIBDIVIDE_API int libdivide_s64_get_algorithm(const struct libdivide_s64_t *denom); -LIBDIVIDE_API int64_t libdivide_s64_do_alg0(int64_t numer, const struct libdivide_s64_t *denom); -LIBDIVIDE_API int64_t libdivide_s64_do_alg1(int64_t numer, const struct libdivide_s64_t *denom); -LIBDIVIDE_API int64_t libdivide_s64_do_alg2(int64_t numer, const struct libdivide_s64_t *denom); -LIBDIVIDE_API int64_t libdivide_s64_do_alg3(int64_t numer, const struct libdivide_s64_t *denom); -LIBDIVIDE_API int64_t libdivide_s64_do_alg4(int64_t numer, const struct libdivide_s64_t *denom); -#if defined(__APPLE__) && defined(__cplusplus) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-function" -LIBDIVIDE_API Int64 libdivide_s64_do_alg0(Int64 numer, const struct libdivide_s64_t *denom) { return Int64(libdivide_s64_do_alg0(int64_t(numer), denom)); } -LIBDIVIDE_API Int64 libdivide_s64_do_alg1(Int64 numer, const struct libdivide_s64_t *denom) { return Int64(libdivide_s64_do_alg1(int64_t(numer), denom)); } -LIBDIVIDE_API Int64 libdivide_s64_do_alg2(Int64 numer, const struct libdivide_s64_t *denom) { return Int64(libdivide_s64_do_alg2(int64_t(numer), denom)); } -LIBDIVIDE_API Int64 libdivide_s64_do_alg3(Int64 numer, const struct libdivide_s64_t *denom) { return Int64(libdivide_s64_do_alg3(int64_t(numer), denom)); } -LIBDIVIDE_API Int64 libdivide_s64_do_alg4(Int64 numer, const struct libdivide_s64_t *denom) { return Int64(libdivide_s64_do_alg4(int64_t(numer), denom)); } -#pragma GCC diagnostic pop -#endif +static inline int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom); +static inline uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom); +static inline int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom); +static inline uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom); +static inline int32_t libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom); +static inline uint32_t libdivide_u32_branchfree_do(uint32_t numer, const struct libdivide_u32_branchfree_t *denom); +static inline int64_t libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom); +static inline uint64_t libdivide_u64_branchfree_do(uint64_t numer, const struct libdivide_u64_branchfree_t *denom); -#if LIBDIVIDE_USE_SSE2 -LIBDIVIDE_API __m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t * denom); -LIBDIVIDE_API __m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t * denom); -LIBDIVIDE_API __m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t * denom); -LIBDIVIDE_API __m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t * denom); - -LIBDIVIDE_API __m128i libdivide_u32_do_vector_alg0(__m128i numers, const struct libdivide_u32_t * denom); -LIBDIVIDE_API __m128i libdivide_u32_do_vector_alg1(__m128i numers, const struct libdivide_u32_t * denom); -LIBDIVIDE_API __m128i libdivide_u32_do_vector_alg2(__m128i numers, const struct libdivide_u32_t * denom); - -LIBDIVIDE_API __m128i libdivide_s32_do_vector_alg0(__m128i numers, const struct libdivide_s32_t * denom); -LIBDIVIDE_API __m128i libdivide_s32_do_vector_alg1(__m128i numers, const struct libdivide_s32_t * denom); -LIBDIVIDE_API __m128i libdivide_s32_do_vector_alg2(__m128i numers, const struct libdivide_s32_t * denom); -LIBDIVIDE_API __m128i libdivide_s32_do_vector_alg3(__m128i numers, const struct libdivide_s32_t * denom); -LIBDIVIDE_API __m128i libdivide_s32_do_vector_alg4(__m128i numers, const struct libdivide_s32_t * denom); - -LIBDIVIDE_API __m128i libdivide_u64_do_vector_alg0(__m128i numers, const struct libdivide_u64_t * denom); -LIBDIVIDE_API __m128i libdivide_u64_do_vector_alg1(__m128i numers, const struct libdivide_u64_t * denom); -LIBDIVIDE_API __m128i libdivide_u64_do_vector_alg2(__m128i numers, const struct libdivide_u64_t * denom); - -LIBDIVIDE_API __m128i libdivide_s64_do_vector_alg0(__m128i numers, const struct libdivide_s64_t * denom); -LIBDIVIDE_API __m128i libdivide_s64_do_vector_alg1(__m128i numers, const struct libdivide_s64_t * denom); -LIBDIVIDE_API __m128i libdivide_s64_do_vector_alg2(__m128i numers, const struct libdivide_s64_t * denom); -LIBDIVIDE_API __m128i libdivide_s64_do_vector_alg3(__m128i numers, const struct libdivide_s64_t * denom); -LIBDIVIDE_API __m128i libdivide_s64_do_vector_alg4(__m128i numers, const struct libdivide_s64_t * denom); -#endif - +static inline int32_t libdivide_s32_recover(const struct libdivide_s32_t *denom); +static inline uint32_t libdivide_u32_recover(const struct libdivide_u32_t *denom); +static inline int64_t libdivide_s64_recover(const struct libdivide_s64_t *denom); +static inline uint64_t libdivide_u64_recover(const struct libdivide_u64_t *denom); +static inline int32_t libdivide_s32_branchfree_recover(const struct libdivide_s32_branchfree_t *denom); +static inline uint32_t libdivide_u32_branchfree_recover(const struct libdivide_u32_branchfree_t *denom); +static inline int64_t libdivide_s64_branchfree_recover(const struct libdivide_s64_branchfree_t *denom); +static inline uint64_t libdivide_u64_branchfree_recover(const struct libdivide_u64_branchfree_t *denom); //////// Internal Utility Functions -static inline uint32_t libdivide__mullhi_u32(uint32_t x, uint32_t y) { +static inline uint32_t libdivide_mullhi_u32(uint32_t x, uint32_t y) { uint64_t xl = x, yl = y; uint64_t rl = xl * yl; return (uint32_t)(rl >> 32); } -static uint64_t libdivide__mullhi_u64(uint64_t x, uint64_t y) { -#if HAS_INT128_T +static inline int32_t libdivide_mullhi_s32(int32_t x, int32_t y) { + int64_t xl = x, yl = y; + int64_t rl = xl * yl; + // needs to be arithmetic shift + return (int32_t)(rl >> 32); +} + +static inline uint64_t libdivide_mullhi_u64(uint64_t x, uint64_t y) { +#if defined(LIBDIVIDE_VC) && \ + defined(LIBDIVIDE_X86_64) + return __umulh(x, y); +#elif defined(HAS_INT128_T) __uint128_t xl = x, yl = y; __uint128_t rl = xl * yl; return (uint64_t)(rl >> 64); #else - //full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64) - const uint32_t mask = 0xFFFFFFFF; - const uint32_t x0 = (uint32_t)(x & mask), x1 = (uint32_t)(x >> 32); - const uint32_t y0 = (uint32_t)(y & mask), y1 = (uint32_t)(y >> 32); - const uint32_t x0y0_hi = libdivide__mullhi_u32(x0, y0); - const uint64_t x0y1 = x0 * (uint64_t)y1; - const uint64_t x1y0 = x1 * (uint64_t)y0; - const uint64_t x1y1 = x1 * (uint64_t)y1; - + // full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64) + uint32_t mask = 0xFFFFFFFF; + uint32_t x0 = (uint32_t)(x & mask); + uint32_t x1 = (uint32_t)(x >> 32); + uint32_t y0 = (uint32_t)(y & mask); + uint32_t y1 = (uint32_t)(y >> 32); + uint32_t x0y0_hi = libdivide_mullhi_u32(x0, y0); + uint64_t x0y1 = x0 * (uint64_t)y1; + uint64_t x1y0 = x1 * (uint64_t)y0; + uint64_t x1y1 = x1 * (uint64_t)y1; uint64_t temp = x1y0 + x0y0_hi; - uint64_t temp_lo = temp & mask, temp_hi = temp >> 32; + uint64_t temp_lo = temp & mask; + uint64_t temp_hi = temp >> 32; + return x1y1 + temp_hi + ((temp_lo + x0y1) >> 32); #endif } -static inline int64_t libdivide__mullhi_s64(int64_t x, int64_t y) { -#if HAS_INT128_T +static inline int64_t libdivide_mullhi_s64(int64_t x, int64_t y) { +#if defined(LIBDIVIDE_VC) && \ + defined(LIBDIVIDE_X86_64) + return __mulh(x, y); +#elif defined(HAS_INT128_T) __int128_t xl = x, yl = y; __int128_t rl = xl * yl; return (int64_t)(rl >> 64); #else - //full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64) - const uint32_t mask = 0xFFFFFFFF; - const uint32_t x0 = (uint32_t)(x & mask), y0 = (uint32_t)(y & mask); - const int32_t x1 = (int32_t)(x >> 32), y1 = (int32_t)(y >> 32); - const uint32_t x0y0_hi = libdivide__mullhi_u32(x0, y0); - const int64_t t = x1*(int64_t)y0 + x0y0_hi; - const int64_t w1 = x0*(int64_t)y1 + (t & mask); - return x1*(int64_t)y1 + (t >> 32) + (w1 >> 32); + // full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64) + uint32_t mask = 0xFFFFFFFF; + uint32_t x0 = (uint32_t)(x & mask); + uint32_t y0 = (uint32_t)(y & mask); + int32_t x1 = (int32_t)(x >> 32); + int32_t y1 = (int32_t)(y >> 32); + uint32_t x0y0_hi = libdivide_mullhi_u32(x0, y0); + int64_t t = x1 * (int64_t)y0 + x0y0_hi; + int64_t w1 = x0 * (int64_t)y1 + (t & mask); + + return x1 * (int64_t)y1 + (t >> 32) + (w1 >> 32); #endif } -#if LIBDIVIDE_USE_SSE2 - -static inline __m128i libdivide__u64_to_m128(uint64_t x) { -#if LIBDIVIDE_VC && ! _WIN64 - //64 bit windows doesn't seem to have an implementation of any of these load intrinsics, and 32 bit Visual C++ crashes - _declspec(align(16)) uint64_t temp[2] = {x, x}; - return _mm_load_si128((const __m128i*)temp); -#elif defined(__ICC) - uint64_t __attribute__((aligned(16))) temp[2] = {x,x}; - return _mm_load_si128((const __m128i*)temp); -#elif __clang__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wc++11-narrowing" // narrowing from uint64_t (aka 'unsigned long') to 'long long' - // clang does not provide this intrinsic either - return (__m128i){x, x}; -#pragma clang diagnostic pop -#else - // everyone else gets it right - return _mm_set1_epi64x(x); -#endif -} - -static inline __m128i libdivide_get_FFFFFFFF00000000(void) { - //returns the same as _mm_set1_epi64(0xFFFFFFFF00000000ULL) without touching memory - __m128i result = _mm_set1_epi8(-1); //optimizes to pcmpeqd on OS X - return _mm_slli_epi64(result, 32); -} - -static inline __m128i libdivide_get_00000000FFFFFFFF(void) { - //returns the same as _mm_set1_epi64(0x00000000FFFFFFFFULL) without touching memory - __m128i result = _mm_set1_epi8(-1); //optimizes to pcmpeqd on OS X - result = _mm_srli_epi64(result, 32); - return result; -} - -#if __clang__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wuninitialized" -#endif -static inline __m128i libdivide_get_0000FFFF(void) { - //returns the same as _mm_set1_epi32(0x0000FFFFULL) without touching memory - __m128i result; //we don't care what its contents are - result = _mm_cmpeq_epi8(result, result); //all 1s - result = _mm_srli_epi32(result, 16); - return result; -} -#if __clang__ -#pragma clang diagnostic pop -#endif - -/// This is a bug in gcc-8, _MM_SHUFFLE was forgotten, though in trunk it is ok https://github.com/gcc-mirror/gcc/blob/master/gcc/config/rs6000/xmmintrin.h#L61 -#if defined(__PPC__) -#ifndef _MM_SHUFFLE -#define _MM_SHUFFLE(w,x,y,z) (((w) << 6) | ((x) << 4) | ((y) << 2) | (z)) -#endif -#endif - -static inline __m128i libdivide_s64_signbits(__m128i v) { - //we want to compute v >> 63, that is, _mm_srai_epi64(v, 63). But there is no 64 bit shift right arithmetic instruction in SSE2. So we have to fake it by first duplicating the high 32 bit values, and then using a 32 bit shift. Another option would be to use _mm_srli_epi64(v, 63) and then subtract that from 0, but that approach appears to be substantially slower for unknown reasons - __m128i hiBitsDuped = _mm_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1)); - __m128i signBits = _mm_srai_epi32(hiBitsDuped, 31); - return signBits; -} - -/* Returns an __m128i whose low 32 bits are equal to amt and has zero elsewhere. */ -static inline __m128i libdivide_u32_to_m128i(uint32_t amt) { - return _mm_set_epi32(0, 0, 0, amt); -} - -static inline __m128i libdivide_s64_shift_right_vector(__m128i v, int amt) { - //implementation of _mm_sra_epi64. Here we have two 64 bit values which are shifted right to logically become (64 - amt) values, and are then sign extended from a (64 - amt) bit number. - const int b = 64 - amt; - __m128i m = libdivide__u64_to_m128(1ULL << (b - 1)); - __m128i x = _mm_srl_epi64(v, libdivide_u32_to_m128i(amt)); - __m128i result = _mm_sub_epi64(_mm_xor_si128(x, m), m); //result = x^m - m - return result; -} - -/* Here, b is assumed to contain one 32 bit value repeated four times. If it did not, the function would not work. */ -static inline __m128i libdivide__mullhi_u32_flat_vector(__m128i a, __m128i b) { - __m128i hi_product_0Z2Z = _mm_srli_epi64(_mm_mul_epu32(a, b), 32); - __m128i a1X3X = _mm_srli_epi64(a, 32); - __m128i hi_product_Z1Z3 = _mm_and_si128(_mm_mul_epu32(a1X3X, b), libdivide_get_FFFFFFFF00000000()); - return _mm_or_si128(hi_product_0Z2Z, hi_product_Z1Z3); // = hi_product_0123 -} - - -/* Here, y is assumed to contain one 64 bit value repeated twice. */ -static inline __m128i libdivide_mullhi_u64_flat_vector(__m128i x, __m128i y) { - //full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64) - const __m128i mask = libdivide_get_00000000FFFFFFFF(); - const __m128i x0 = _mm_and_si128(x, mask), x1 = _mm_srli_epi64(x, 32); //x0 is low half of 2 64 bit values, x1 is high half in low slots - const __m128i y0 = _mm_and_si128(y, mask), y1 = _mm_srli_epi64(y, 32); - const __m128i x0y0_hi = _mm_srli_epi64(_mm_mul_epu32(x0, y0), 32); //x0 happens to have the low half of the two 64 bit values in 32 bit slots 0 and 2, so _mm_mul_epu32 computes their full product, and then we shift right by 32 to get just the high values - const __m128i x0y1 = _mm_mul_epu32(x0, y1); - const __m128i x1y0 = _mm_mul_epu32(x1, y0); - const __m128i x1y1 = _mm_mul_epu32(x1, y1); - - const __m128i temp = _mm_add_epi64(x1y0, x0y0_hi); - __m128i temp_lo = _mm_and_si128(temp, mask), temp_hi = _mm_srli_epi64(temp, 32); - temp_lo = _mm_srli_epi64(_mm_add_epi64(temp_lo, x0y1), 32); - temp_hi = _mm_add_epi64(x1y1, temp_hi); - - return _mm_add_epi64(temp_lo, temp_hi); -} - -/* y is one 64 bit value repeated twice */ -static inline __m128i libdivide_mullhi_s64_flat_vector(__m128i x, __m128i y) { - __m128i p = libdivide_mullhi_u64_flat_vector(x, y); - __m128i t1 = _mm_and_si128(libdivide_s64_signbits(x), y); - p = _mm_sub_epi64(p, t1); - __m128i t2 = _mm_and_si128(libdivide_s64_signbits(y), x); - p = _mm_sub_epi64(p, t2); - return p; -} - -#ifdef LIBDIVIDE_USE_SSE4_1 - -/* b is one 32 bit value repeated four times. */ -static inline __m128i libdivide_mullhi_s32_flat_vector(__m128i a, __m128i b) { - __m128i hi_product_0Z2Z = _mm_srli_epi64(_mm_mul_epi32(a, b), 32); - __m128i a1X3X = _mm_srli_epi64(a, 32); - __m128i hi_product_Z1Z3 = _mm_and_si128(_mm_mul_epi32(a1X3X, b), libdivide_get_FFFFFFFF00000000()); - return _mm_or_si128(hi_product_0Z2Z, hi_product_Z1Z3); // = hi_product_0123 -} - -#else - -/* SSE2 does not have a signed multiplication instruction, but we can convert unsigned to signed pretty efficiently. Again, b is just a 32 bit value repeated four times. */ -static inline __m128i libdivide_mullhi_s32_flat_vector(__m128i a, __m128i b) { - __m128i p = libdivide__mullhi_u32_flat_vector(a, b); - __m128i t1 = _mm_and_si128(_mm_srai_epi32(a, 31), b); //t1 = (a >> 31) & y, arithmetic shift - __m128i t2 = _mm_and_si128(_mm_srai_epi32(b, 31), a); - p = _mm_sub_epi32(p, t1); - p = _mm_sub_epi32(p, t2); - return p; -} -#endif -#endif - -static inline int32_t libdivide__count_trailing_zeros32(uint32_t val) { -#if __GNUC__ || __has_builtin(__builtin_ctz) - /* Fast way to count trailing zeros */ - return __builtin_ctz(val); -#elif LIBDIVIDE_VC - unsigned long result; - if (_BitScanForward(&result, val)) { - return result; - } - return 0; -#else - /* Dorky way to count trailing zeros. Note that this hangs for val = 0! */ - int32_t result = 0; - val = (val ^ (val - 1)) >> 1; // Set v's trailing 0s to 1s and zero rest - while (val) { - val >>= 1; - result++; - } - return result; -#endif -} - -static inline int32_t libdivide__count_trailing_zeros64(uint64_t val) { -#if __LP64__ && (__GNUC__ || __has_builtin(__builtin_ctzll)) - /* Fast way to count trailing zeros. Note that we disable this in 32 bit because gcc does something horrible - it calls through to a dynamically bound function. */ - return __builtin_ctzll(val); -#elif LIBDIVIDE_VC && _WIN64 - unsigned long result; - if (_BitScanForward64(&result, val)) { - return result; - } - return 0; -#else - /* Pretty good way to count trailing zeros. Note that this hangs for val = 0! */ - uint32_t lo = val & 0xFFFFFFFF; - if (lo != 0) return libdivide__count_trailing_zeros32(lo); - return 32 + libdivide__count_trailing_zeros32(val >> 32); -#endif -} - -static inline int32_t libdivide__count_leading_zeros32(uint32_t val) { -#if __GNUC__ || __has_builtin(__builtin_clzll) - /* Fast way to count leading zeros */ +static inline int32_t libdivide_count_leading_zeros32(uint32_t val) { +#if defined(__GNUC__) || \ + __has_builtin(__builtin_clz) + // Fast way to count leading zeros return __builtin_clz(val); -#elif LIBDIVIDE_VC +#elif defined(LIBDIVIDE_VC) unsigned long result; if (_BitScanReverse(&result, val)) { return 31 - result; } return 0; #else - /* Dorky way to count leading zeros. Note that this hangs for val = 0! */ int32_t result = 0; - while (! (val & (1U << 31))) { - val <<= 1; + uint32_t hi = 1U << 31; + for (; ~val & hi; hi >>= 1) { result++; } return result; #endif } -static inline int32_t libdivide__count_leading_zeros64(uint64_t val) { -#if __GNUC__ || __has_builtin(__builtin_clzll) - /* Fast way to count leading zeros */ +static inline int32_t libdivide_count_leading_zeros64(uint64_t val) { +#if defined(__GNUC__) || \ + __has_builtin(__builtin_clzll) + // Fast way to count leading zeros return __builtin_clzll(val); -#elif LIBDIVIDE_VC && _WIN64 +#elif defined(LIBDIVIDE_VC) && defined(_WIN64) unsigned long result; if (_BitScanReverse64(&result, val)) { return 63 - result; } return 0; #else - /* Dorky way to count leading zeros. Note that this hangs for val = 0! */ - int32_t result = 0; - while (! (val & (1ULL << 63))) { - val <<= 1; - result++; - } - return result; + uint32_t hi = val >> 32; + uint32_t lo = val & 0xFFFFFFFF; + if (hi != 0) return libdivide_count_leading_zeros32(hi); + return 32 + libdivide_count_leading_zeros32(lo); #endif } -//libdivide_64_div_32_to_32: divides a 64 bit uint {u1, u0} by a 32 bit uint {v}. The result must fit in 32 bits. Returns the quotient directly and the remainder in *r -#if (LIBDIVIDE_IS_i386 || LIBDIVIDE_IS_X86_64) && LIBDIVIDE_GCC_STYLE_ASM -static uint32_t libdivide_64_div_32_to_32(uint32_t u1, uint32_t u0, uint32_t v, uint32_t *r) { +// libdivide_64_div_32_to_32: divides a 64-bit uint {u1, u0} by a 32-bit +// uint {v}. The result must fit in 32 bits. +// Returns the quotient directly and the remainder in *r +static inline uint32_t libdivide_64_div_32_to_32(uint32_t u1, uint32_t u0, uint32_t v, uint32_t *r) { +#if (defined(LIBDIVIDE_i386) || defined(LIBDIVIDE_X86_64)) && \ + defined(LIBDIVIDE_GCC_STYLE_ASM) uint32_t result; __asm__("divl %[v]" : "=a"(result), "=d"(*r) : [v] "r"(v), "a"(u0), "d"(u1) ); return result; -} #else -static uint32_t libdivide_64_div_32_to_32(uint32_t u1, uint32_t u0, uint32_t v, uint32_t *r) { - uint64_t n = (((uint64_t)u1) << 32) | u0; + uint64_t n = ((uint64_t)u1 << 32) | u0; uint32_t result = (uint32_t)(n / v); *r = (uint32_t)(n - result * (uint64_t)v); return result; -} #endif +} -#if LIBDIVIDE_IS_X86_64 && LIBDIVIDE_GCC_STYLE_ASM +// libdivide_128_div_64_to_64: divides a 128-bit uint {u1, u0} by a 64-bit +// uint {v}. The result must fit in 64 bits. +// Returns the quotient directly and the remainder in *r static uint64_t libdivide_128_div_64_to_64(uint64_t u1, uint64_t u0, uint64_t v, uint64_t *r) { - //u0 -> rax - //u1 -> rdx - //divq +#if defined(LIBDIVIDE_X86_64) && \ + defined(LIBDIVIDE_GCC_STYLE_ASM) uint64_t result; __asm__("divq %[v]" : "=a"(result), "=d"(*r) : [v] "r"(v), "a"(u0), "d"(u1) ); return result; - -} +#elif defined(HAS_INT128_T) && \ + defined(HAS_INT128_DIV) + __uint128_t n = ((__uint128_t)u1 << 64) | u0; + uint64_t result = (uint64_t)(n / v); + *r = (uint64_t)(n - result * (__uint128_t)v); + return result; #else + // Code taken from Hacker's Delight: + // http://www.hackersdelight.org/HDcode/divlu.c. + // License permits inclusion here per: + // http://www.hackersdelight.org/permissions.htm -/* Code taken from Hacker's Delight, http://www.hackersdelight.org/HDcode/divlu.c . License permits inclusion here per http://www.hackersdelight.org/permissions.htm - */ -static uint64_t libdivide_128_div_64_to_64(uint64_t u1, uint64_t u0, uint64_t v, uint64_t *r) { - const uint64_t b = (1ULL << 32); // Number base (16 bits). - uint64_t un1, un0, // Norm. dividend LSD's. - vn1, vn0, // Norm. divisor digits. - q1, q0, // Quotient digits. - un64, un21, un10,// Dividend digit pairs. - rhat; // A remainder. - int s; // Shift amount for norm. + const uint64_t b = (1ULL << 32); // Number base (32 bits) + uint64_t un1, un0; // Norm. dividend LSD's + uint64_t vn1, vn0; // Norm. divisor digits + uint64_t q1, q0; // Quotient digits + uint64_t un64, un21, un10; // Dividend digit pairs + uint64_t rhat; // A remainder + int32_t s; // Shift amount for norm - if (u1 >= v) { // If overflow, set rem. - if (r != NULL) // to an impossible value, - *r = (uint64_t)(-1); // and return the largest - return (uint64_t)(-1);} // possible quotient. + // If overflow, set rem. to an impossible value, + // and return the largest possible quotient + if (u1 >= v) { + *r = (uint64_t) -1; + return (uint64_t) -1; + } - /* count leading zeros */ - s = libdivide__count_leading_zeros64(v); // 0 <= s <= 63. + // count leading zeros + s = libdivide_count_leading_zeros64(v); if (s > 0) { - v = v << s; // Normalize divisor. - un64 = (u1 << s) | ((u0 >> (64 - s)) & (-s >> 31)); - un10 = u0 << s; // Shift dividend left. + // Normalize divisor + v = v << s; + un64 = (u1 << s) | (u0 >> (64 - s)); + un10 = u0 << s; // Shift dividend left } else { - // Avoid undefined behavior. - un64 = u1 | u0; + // Avoid undefined behavior of (u0 >> 64). + // The behavior is undefined if the right operand is + // negative, or greater than or equal to the length + // in bits of the promoted left operand. + un64 = u1; un10 = u0; } - vn1 = v >> 32; // Break divisor up into - vn0 = v & 0xFFFFFFFF; // two 32-bit digits. + // Break divisor up into two 32-bit digits + vn1 = v >> 32; + vn0 = v & 0xFFFFFFFF; - un1 = un10 >> 32; // Break right half of - un0 = un10 & 0xFFFFFFFF; // dividend into two digits. + // Break right half of dividend into two digits + un1 = un10 >> 32; + un0 = un10 & 0xFFFFFFFF; - q1 = un64/vn1; // Compute the first - rhat = un64 - q1*vn1; // quotient digit, q1. -again1: - if (q1 >= b || q1*vn0 > b*rhat + un1) { + // Compute the first quotient digit, q1 + q1 = un64 / vn1; + rhat = un64 - q1 * vn1; + + while (q1 >= b || q1 * vn0 > b * rhat + un1) { q1 = q1 - 1; rhat = rhat + vn1; - if (rhat < b) goto again1;} + if (rhat >= b) + break; + } - un21 = un64*b + un1 - q1*v; // Multiply and subtract. + // Multiply and subtract + un21 = un64 * b + un1 - q1 * v; - q0 = un21/vn1; // Compute the second - rhat = un21 - q0*vn1; // quotient digit, q0. -again2: - if (q0 >= b || q0*vn0 > b*rhat + un0) { + // Compute the second quotient digit + q0 = un21 / vn1; + rhat = un21 - q0 * vn1; + + while (q0 >= b || q0 * vn0 > b * rhat + un0) { q0 = q0 - 1; rhat = rhat + vn1; - if (rhat < b) goto again2;} + if (rhat >= b) + break; + } - if (r != NULL) // If remainder is wanted, - *r = (un21*b + un0 - q0*v) >> s; // return it. - return q1*b + q0; + *r = (un21 * b + un0 - q0 * v) >> s; + return q1 * b + q0; +#endif } -#endif -#if LIBDIVIDE_ASSERTIONS_ON -#define LIBDIVIDE_ASSERT(x) do { if (! (x)) { fprintf(stderr, "Assertion failure on line %ld: %s\n", (long)__LINE__, #x); exit(-1); } } while (0) +// Bitshift a u128 in place, left (signed_shift > 0) or right (signed_shift < 0) +static inline void libdivide_u128_shift(uint64_t *u1, uint64_t *u0, int32_t signed_shift) { + if (signed_shift > 0) { + uint32_t shift = signed_shift; + *u1 <<= shift; + *u1 |= *u0 >> (64 - shift); + *u0 <<= shift; + } + else if (signed_shift < 0) { + uint32_t shift = -signed_shift; + *u0 >>= shift; + *u0 |= *u1 << (64 - shift); + *u1 >>= shift; + } +} + +// Computes a 128 / 128 -> 64 bit division, with a 128 bit remainder. +static uint64_t libdivide_128_div_128_to_64(uint64_t u_hi, uint64_t u_lo, uint64_t v_hi, uint64_t v_lo, uint64_t *r_hi, uint64_t *r_lo) { +#if defined(HAS_INT128_T) && \ + defined(HAS_INT128_DIV) + __uint128_t ufull = u_hi; + __uint128_t vfull = v_hi; + ufull = (ufull << 64) | u_lo; + vfull = (vfull << 64) | v_lo; + uint64_t res = (uint64_t)(ufull / vfull); + __uint128_t remainder = ufull - (vfull * res); + *r_lo = (uint64_t)remainder; + *r_hi = (uint64_t)(remainder >> 64); + return res; #else -#define LIBDIVIDE_ASSERT(x) -#endif + // Adapted from "Unsigned Doubleword Division" in Hacker's Delight + // We want to compute u / v + typedef struct { uint64_t hi; uint64_t lo; } u128_t; + u128_t u = {u_hi, u_lo}; + u128_t v = {v_hi, v_lo}; -#ifndef LIBDIVIDE_HEADER_ONLY + if (v.hi == 0) { + // divisor v is a 64 bit value, so we just need one 128/64 division + // Note that we are simpler than Hacker's Delight here, because we know + // the quotient fits in 64 bits whereas Hacker's Delight demands a full + // 128 bit quotient + *r_hi = 0; + return libdivide_128_div_64_to_64(u.hi, u.lo, v.lo, r_lo); + } + // Here v >= 2**64 + // We know that v.hi != 0, so count leading zeros is OK + // We have 0 <= n <= 63 + uint32_t n = libdivide_count_leading_zeros64(v.hi); + + // Normalize the divisor so its MSB is 1 + u128_t v1t = v; + libdivide_u128_shift(&v1t.hi, &v1t.lo, n); + uint64_t v1 = v1t.hi; // i.e. v1 = v1t >> 64 + + // To ensure no overflow + u128_t u1 = u; + libdivide_u128_shift(&u1.hi, &u1.lo, -1); + + // Get quotient from divide unsigned insn. + uint64_t rem_ignored; + uint64_t q1 = libdivide_128_div_64_to_64(u1.hi, u1.lo, v1, &rem_ignored); + + // Undo normalization and division of u by 2. + u128_t q0 = {0, q1}; + libdivide_u128_shift(&q0.hi, &q0.lo, n); + libdivide_u128_shift(&q0.hi, &q0.lo, -63); + + // Make q0 correct or too small by 1 + // Equivalent to `if (q0 != 0) q0 = q0 - 1;` + if (q0.hi != 0 || q0.lo != 0) { + q0.hi -= (q0.lo == 0); // borrow + q0.lo -= 1; + } + + // Now q0 is correct. + // Compute q0 * v as q0v + // = (q0.hi << 64 + q0.lo) * (v.hi << 64 + v.lo) + // = (q0.hi * v.hi << 128) + (q0.hi * v.lo << 64) + + // (q0.lo * v.hi << 64) + q0.lo * v.lo) + // Each term is 128 bit + // High half of full product (upper 128 bits!) are dropped + u128_t q0v = {0, 0}; + q0v.hi = q0.hi*v.lo + q0.lo*v.hi + libdivide_mullhi_u64(q0.lo, v.lo); + q0v.lo = q0.lo*v.lo; + + // Compute u - q0v as u_q0v + // This is the remainder + u128_t u_q0v = u; + u_q0v.hi -= q0v.hi + (u.lo < q0v.lo); // second term is borrow + u_q0v.lo -= q0v.lo; + + // Check if u_q0v >= v + // This checks if our remainder is larger than the divisor + if ((u_q0v.hi > v.hi) || + (u_q0v.hi == v.hi && u_q0v.lo >= v.lo)) { + // Increment q0 + q0.lo += 1; + q0.hi += (q0.lo == 0); // carry + + // Subtract v from remainder + u_q0v.hi -= v.hi + (u_q0v.lo < v.lo); + u_q0v.lo -= v.lo; + } + + *r_hi = u_q0v.hi; + *r_lo = u_q0v.lo; + + LIBDIVIDE_ASSERT(q0.hi == 0); + return q0.lo; +#endif +} ////////// UINT32 -struct libdivide_u32_t libdivide_u32_gen(uint32_t d) { - struct libdivide_u32_t result; - if ((d & (d - 1)) == 0) { - result.magic = 0; - result.more = libdivide__count_trailing_zeros32(d) | LIBDIVIDE_U32_SHIFT_PATH; +static inline struct libdivide_u32_t libdivide_internal_u32_gen(uint32_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); } - else { - const uint32_t floor_log_2_d = 31 - libdivide__count_leading_zeros32(d); + struct libdivide_u32_t result; + uint32_t floor_log_2_d = 31 - libdivide_count_leading_zeros32(d); + + // Power of 2 + if ((d & (d - 1)) == 0) { + // We need to subtract 1 from the shift value in case of an unsigned + // branchfree divider because there is a hardcoded right shift by 1 + // in its division algorithm. Because of this we also need to add back + // 1 in its recovery algorithm. + result.magic = 0; + result.more = (uint8_t)(floor_log_2_d - (branchfree != 0)); + } else { uint8_t more; uint32_t rem, proposed_m; proposed_m = libdivide_64_div_32_to_32(1U << floor_log_2_d, 0, d, &rem); @@ -631,570 +569,1358 @@ struct libdivide_u32_t libdivide_u32_gen(uint32_t d) { LIBDIVIDE_ASSERT(rem > 0 && rem < d); const uint32_t e = d - rem; - /* This power works if e < 2**floor_log_2_d. */ - if (e < (1U << floor_log_2_d)) { - /* This power works */ + // This power works if e < 2**floor_log_2_d. + if (!branchfree && (e < (1U << floor_log_2_d))) { + // This power works more = floor_log_2_d; - } - else { - /* We have to use the general 33-bit algorithm. We need to compute (2**power) / d. However, we already have (2**(power-1))/d and its remainder. By doubling both, and then correcting the remainder, we can compute the larger division. */ - proposed_m += proposed_m; //don't care about overflow here - in fact, we expect it + } else { + // We have to use the general 33-bit algorithm. We need to compute + // (2**power) / d. However, we already have (2**(power-1))/d and + // its remainder. By doubling both, and then correcting the + // remainder, we can compute the larger division. + // don't care about overflow here - in fact, we expect it + proposed_m += proposed_m; const uint32_t twice_rem = rem + rem; if (twice_rem >= d || twice_rem < rem) proposed_m += 1; more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; } result.magic = 1 + proposed_m; result.more = more; - //result.more's shift should in general be ceil_log_2_d. But if we used the smaller power, we subtract one from the shift because we're using the smaller power. If we're using the larger power, we subtract one from the shift because it's taken care of by the add indicator. So floor_log_2_d happens to be correct in both cases. - + // result.more's shift should in general be ceil_log_2_d. But if we + // used the smaller power, we subtract one from the shift because we're + // using the smaller power. If we're using the larger power, we + // subtract one from the shift because it's taken care of by the add + // indicator. So floor_log_2_d happens to be correct in both cases. } return result; } +struct libdivide_u32_t libdivide_u32_gen(uint32_t d) { + return libdivide_internal_u32_gen(d, 0); +} + +struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d) { + if (d == 1) { + LIBDIVIDE_ERROR("branchfree divider must be != 1"); + } + struct libdivide_u32_t tmp = libdivide_internal_u32_gen(d, 1); + struct libdivide_u32_branchfree_t ret = {tmp.magic, (uint8_t)(tmp.more & LIBDIVIDE_32_SHIFT_MASK)}; + return ret; +} + uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom) { uint8_t more = denom->more; - if (more & LIBDIVIDE_U32_SHIFT_PATH) { - return numer >> (more & LIBDIVIDE_32_SHIFT_MASK); + if (!denom->magic) { + return numer >> more; } else { - uint32_t q = libdivide__mullhi_u32(denom->magic, numer); + uint32_t q = libdivide_mullhi_u32(denom->magic, numer); if (more & LIBDIVIDE_ADD_MARKER) { uint32_t t = ((numer - q) >> 1) + q; return t >> (more & LIBDIVIDE_32_SHIFT_MASK); } else { - return q >> more; //all upper bits are 0 - don't need to mask them off + // All upper bits are 0, + // don't need to mask them off. + return q >> more; } } } - -int libdivide_u32_get_algorithm(const struct libdivide_u32_t *denom) { - uint8_t more = denom->more; - if (more & LIBDIVIDE_U32_SHIFT_PATH) return 0; - else if (! (more & LIBDIVIDE_ADD_MARKER)) return 1; - else return 2; -} - -uint32_t libdivide_u32_do_alg0(uint32_t numer, const struct libdivide_u32_t *denom) { - return numer >> (denom->more & LIBDIVIDE_32_SHIFT_MASK); -} - -uint32_t libdivide_u32_do_alg1(uint32_t numer, const struct libdivide_u32_t *denom) { - uint32_t q = libdivide__mullhi_u32(denom->magic, numer); - return q >> denom->more; -} - -uint32_t libdivide_u32_do_alg2(uint32_t numer, const struct libdivide_u32_t *denom) { - // denom->add != 0 - uint32_t q = libdivide__mullhi_u32(denom->magic, numer); +uint32_t libdivide_u32_branchfree_do(uint32_t numer, const struct libdivide_u32_branchfree_t *denom) { + uint32_t q = libdivide_mullhi_u32(denom->magic, numer); uint32_t t = ((numer - q) >> 1) + q; - return t >> (denom->more & LIBDIVIDE_32_SHIFT_MASK); + return t >> denom->more; } - - - -#if LIBDIVIDE_USE_SSE2 -__m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom) { +uint32_t libdivide_u32_recover(const struct libdivide_u32_t *denom) { uint8_t more = denom->more; - if (more & LIBDIVIDE_U32_SHIFT_PATH) { - return _mm_srl_epi32(numers, libdivide_u32_to_m128i(more & LIBDIVIDE_32_SHIFT_MASK)); - } - else { - __m128i q = libdivide__mullhi_u32_flat_vector(numers, _mm_set1_epi32(denom->magic)); - if (more & LIBDIVIDE_ADD_MARKER) { - //uint32_t t = ((numer - q) >> 1) + q; - //return t >> denom->shift; - __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q); - return _mm_srl_epi32(t, libdivide_u32_to_m128i(more & LIBDIVIDE_32_SHIFT_MASK)); + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; - } - else { - //q >> denom->shift - return _mm_srl_epi32(q, libdivide_u32_to_m128i(more)); - } + if (!denom->magic) { + return 1U << shift; + } else if (!(more & LIBDIVIDE_ADD_MARKER)) { + // We compute q = n/d = n*m / 2^(32 + shift) + // Therefore we have d = 2^(32 + shift) / m + // We need to ceil it. + // We know d is not a power of 2, so m is not a power of 2, + // so we can just add 1 to the floor + uint32_t hi_dividend = 1U << shift; + uint32_t rem_ignored; + return 1 + libdivide_64_div_32_to_32(hi_dividend, 0, denom->magic, &rem_ignored); + } else { + // Here we wish to compute d = 2^(32+shift+1)/(m+2^32). + // Notice (m + 2^32) is a 33 bit number. Use 64 bit division for now + // Also note that shift may be as high as 31, so shift + 1 will + // overflow. So we have to compute it as 2^(32+shift)/(m+2^32), and + // then double the quotient and remainder. + uint64_t half_n = 1ULL << (32 + shift); + uint64_t d = (1ULL << 32) | denom->magic; + // Note that the quotient is guaranteed <= 32 bits, but the remainder + // may need 33! + uint32_t half_q = (uint32_t)(half_n / d); + uint64_t rem = half_n % d; + // We computed 2^(32+shift)/(m+2^32) + // Need to double it, and then add 1 to the quotient if doubling th + // remainder would increase the quotient. + // Note that rem<<1 cannot overflow, since rem < d and d is 33 bits + uint32_t full_q = half_q + half_q + ((rem<<1) >= d); + + // We rounded down in gen (hence +1) + return full_q + 1; } } -__m128i libdivide_u32_do_vector_alg0(__m128i numers, const struct libdivide_u32_t *denom) { - return _mm_srl_epi32(numers, libdivide_u32_to_m128i(denom->more & LIBDIVIDE_32_SHIFT_MASK)); -} +uint32_t libdivide_u32_branchfree_recover(const struct libdivide_u32_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; -__m128i libdivide_u32_do_vector_alg1(__m128i numers, const struct libdivide_u32_t *denom) { - __m128i q = libdivide__mullhi_u32_flat_vector(numers, _mm_set1_epi32(denom->magic)); - return _mm_srl_epi32(q, libdivide_u32_to_m128i(denom->more)); -} + if (!denom->magic) { + return 1U << (shift + 1); + } else { + // Here we wish to compute d = 2^(32+shift+1)/(m+2^32). + // Notice (m + 2^32) is a 33 bit number. Use 64 bit division for now + // Also note that shift may be as high as 31, so shift + 1 will + // overflow. So we have to compute it as 2^(32+shift)/(m+2^32), and + // then double the quotient and remainder. + uint64_t half_n = 1ULL << (32 + shift); + uint64_t d = (1ULL << 32) | denom->magic; + // Note that the quotient is guaranteed <= 32 bits, but the remainder + // may need 33! + uint32_t half_q = (uint32_t)(half_n / d); + uint64_t rem = half_n % d; + // We computed 2^(32+shift)/(m+2^32) + // Need to double it, and then add 1 to the quotient if doubling th + // remainder would increase the quotient. + // Note that rem<<1 cannot overflow, since rem < d and d is 33 bits + uint32_t full_q = half_q + half_q + ((rem<<1) >= d); -__m128i libdivide_u32_do_vector_alg2(__m128i numers, const struct libdivide_u32_t *denom) { - __m128i q = libdivide__mullhi_u32_flat_vector(numers, _mm_set1_epi32(denom->magic)); - __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q); - return _mm_srl_epi32(t, libdivide_u32_to_m128i(denom->more & LIBDIVIDE_32_SHIFT_MASK)); + // We rounded down in gen (hence +1) + return full_q + 1; + } } -#endif - /////////// UINT64 -struct libdivide_u64_t libdivide_u64_gen(uint64_t d) { - struct libdivide_u64_t result; - if ((d & (d - 1)) == 0) { - result.more = libdivide__count_trailing_zeros64(d) | LIBDIVIDE_U64_SHIFT_PATH; - result.magic = 0; +static inline struct libdivide_u64_t libdivide_internal_u64_gen(uint64_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); } - else { - const uint32_t floor_log_2_d = 63 - libdivide__count_leading_zeros64(d); + struct libdivide_u64_t result; + uint32_t floor_log_2_d = 63 - libdivide_count_leading_zeros64(d); + + // Power of 2 + if ((d & (d - 1)) == 0) { + // We need to subtract 1 from the shift value in case of an unsigned + // branchfree divider because there is a hardcoded right shift by 1 + // in its division algorithm. Because of this we also need to add back + // 1 in its recovery algorithm. + result.magic = 0; + result.more = (uint8_t)(floor_log_2_d - (branchfree != 0)); + } else { uint64_t proposed_m, rem; uint8_t more; - proposed_m = libdivide_128_div_64_to_64(1ULL << floor_log_2_d, 0, d, &rem); //== (1 << (64 + floor_log_2_d)) / d + // (1 << (64 + floor_log_2_d)) / d + proposed_m = libdivide_128_div_64_to_64(1ULL << floor_log_2_d, 0, d, &rem); LIBDIVIDE_ASSERT(rem > 0 && rem < d); const uint64_t e = d - rem; - /* This power works if e < 2**floor_log_2_d. */ - if (e < (1ULL << floor_log_2_d)) { - /* This power works */ + // This power works if e < 2**floor_log_2_d. + if (!branchfree && e < (1ULL << floor_log_2_d)) { + // This power works more = floor_log_2_d; - } - else { - /* We have to use the general 65-bit algorithm. We need to compute (2**power) / d. However, we already have (2**(power-1))/d and its remainder. By doubling both, and then correcting the remainder, we can compute the larger division. */ - proposed_m += proposed_m; //don't care about overflow here - in fact, we expect it + } else { + // We have to use the general 65-bit algorithm. We need to compute + // (2**power) / d. However, we already have (2**(power-1))/d and + // its remainder. By doubling both, and then correcting the + // remainder, we can compute the larger division. + // don't care about overflow here - in fact, we expect it + proposed_m += proposed_m; const uint64_t twice_rem = rem + rem; if (twice_rem >= d || twice_rem < rem) proposed_m += 1; - more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; } result.magic = 1 + proposed_m; result.more = more; - //result.more's shift should in general be ceil_log_2_d. But if we used the smaller power, we subtract one from the shift because we're using the smaller power. If we're using the larger power, we subtract one from the shift because it's taken care of by the add indicator. So floor_log_2_d happens to be correct in both cases, which is why we do it outside of the if statement. + // result.more's shift should in general be ceil_log_2_d. But if we + // used the smaller power, we subtract one from the shift because we're + // using the smaller power. If we're using the larger power, we + // subtract one from the shift because it's taken care of by the add + // indicator. So floor_log_2_d happens to be correct in both cases, + // which is why we do it outside of the if statement. } return result; } +struct libdivide_u64_t libdivide_u64_gen(uint64_t d) { + return libdivide_internal_u64_gen(d, 0); +} + +struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d) { + if (d == 1) { + LIBDIVIDE_ERROR("branchfree divider must be != 1"); + } + struct libdivide_u64_t tmp = libdivide_internal_u64_gen(d, 1); + struct libdivide_u64_branchfree_t ret = {tmp.magic, (uint8_t)(tmp.more & LIBDIVIDE_64_SHIFT_MASK)}; + return ret; +} + uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom) { uint8_t more = denom->more; - if (more & LIBDIVIDE_U64_SHIFT_PATH) { - return numer >> (more & LIBDIVIDE_64_SHIFT_MASK); + if (!denom->magic) { + return numer >> more; } else { - uint64_t q = libdivide__mullhi_u64(denom->magic, numer); + uint64_t q = libdivide_mullhi_u64(denom->magic, numer); if (more & LIBDIVIDE_ADD_MARKER) { uint64_t t = ((numer - q) >> 1) + q; return t >> (more & LIBDIVIDE_64_SHIFT_MASK); } else { - return q >> more; //all upper bits are 0 - don't need to mask them off + // All upper bits are 0, + // don't need to mask them off. + return q >> more; } } } - -int libdivide_u64_get_algorithm(const struct libdivide_u64_t *denom) { - uint8_t more = denom->more; - if (more & LIBDIVIDE_U64_SHIFT_PATH) return 0; - else if (! (more & LIBDIVIDE_ADD_MARKER)) return 1; - else return 2; -} - -uint64_t libdivide_u64_do_alg0(uint64_t numer, const struct libdivide_u64_t *denom) { - return numer >> (denom->more & LIBDIVIDE_64_SHIFT_MASK); -} - -uint64_t libdivide_u64_do_alg1(uint64_t numer, const struct libdivide_u64_t *denom) { - uint64_t q = libdivide__mullhi_u64(denom->magic, numer); - return q >> denom->more; -} - -uint64_t libdivide_u64_do_alg2(uint64_t numer, const struct libdivide_u64_t *denom) { - uint64_t q = libdivide__mullhi_u64(denom->magic, numer); +uint64_t libdivide_u64_branchfree_do(uint64_t numer, const struct libdivide_u64_branchfree_t *denom) { + uint64_t q = libdivide_mullhi_u64(denom->magic, numer); uint64_t t = ((numer - q) >> 1) + q; - return t >> (denom->more & LIBDIVIDE_64_SHIFT_MASK); + return t >> denom->more; } -#if LIBDIVIDE_USE_SSE2 -__m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t * denom) { +uint64_t libdivide_u64_recover(const struct libdivide_u64_t *denom) { uint8_t more = denom->more; - if (more & LIBDIVIDE_U64_SHIFT_PATH) { - return _mm_srl_epi64(numers, libdivide_u32_to_m128i(more & LIBDIVIDE_64_SHIFT_MASK)); - } - else { - __m128i q = libdivide_mullhi_u64_flat_vector(numers, libdivide__u64_to_m128(denom->magic)); - if (more & LIBDIVIDE_ADD_MARKER) { - //uint32_t t = ((numer - q) >> 1) + q; - //return t >> denom->shift; - __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q); - return _mm_srl_epi64(t, libdivide_u32_to_m128i(more & LIBDIVIDE_64_SHIFT_MASK)); - } - else { - //q >> denom->shift - return _mm_srl_epi64(q, libdivide_u32_to_m128i(more)); - } + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + + if (!denom->magic) { + return 1ULL << shift; + } else if (!(more & LIBDIVIDE_ADD_MARKER)) { + // We compute q = n/d = n*m / 2^(64 + shift) + // Therefore we have d = 2^(64 + shift) / m + // We need to ceil it. + // We know d is not a power of 2, so m is not a power of 2, + // so we can just add 1 to the floor + uint64_t hi_dividend = 1ULL << shift; + uint64_t rem_ignored; + return 1 + libdivide_128_div_64_to_64(hi_dividend, 0, denom->magic, &rem_ignored); + } else { + // Here we wish to compute d = 2^(64+shift+1)/(m+2^64). + // Notice (m + 2^64) is a 65 bit number. This gets hairy. See + // libdivide_u32_recover for more on what we do here. + // TODO: do something better than 128 bit math + + // Full n is a (potentially) 129 bit value + // half_n is a 128 bit value + // Compute the hi half of half_n. Low half is 0. + uint64_t half_n_hi = 1ULL << shift, half_n_lo = 0; + // d is a 65 bit value. The high bit is always set to 1. + const uint64_t d_hi = 1, d_lo = denom->magic; + // Note that the quotient is guaranteed <= 64 bits, + // but the remainder may need 65! + uint64_t r_hi, r_lo; + uint64_t half_q = libdivide_128_div_128_to_64(half_n_hi, half_n_lo, d_hi, d_lo, &r_hi, &r_lo); + // We computed 2^(64+shift)/(m+2^64) + // Double the remainder ('dr') and check if that is larger than d + // Note that d is a 65 bit value, so r1 is small and so r1 + r1 + // cannot overflow + uint64_t dr_lo = r_lo + r_lo; + uint64_t dr_hi = r_hi + r_hi + (dr_lo < r_lo); // last term is carry + int dr_exceeds_d = (dr_hi > d_hi) || (dr_hi == d_hi && dr_lo >= d_lo); + uint64_t full_q = half_q + half_q + (dr_exceeds_d ? 1 : 0); + return full_q + 1; } } -__m128i libdivide_u64_do_vector_alg0(__m128i numers, const struct libdivide_u64_t *denom) { - return _mm_srl_epi64(numers, libdivide_u32_to_m128i(denom->more & LIBDIVIDE_64_SHIFT_MASK)); +uint64_t libdivide_u64_branchfree_recover(const struct libdivide_u64_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + + if (!denom->magic) { + return 1ULL << (shift + 1); + } else { + // Here we wish to compute d = 2^(64+shift+1)/(m+2^64). + // Notice (m + 2^64) is a 65 bit number. This gets hairy. See + // libdivide_u32_recover for more on what we do here. + // TODO: do something better than 128 bit math + + // Full n is a (potentially) 129 bit value + // half_n is a 128 bit value + // Compute the hi half of half_n. Low half is 0. + uint64_t half_n_hi = 1ULL << shift, half_n_lo = 0; + // d is a 65 bit value. The high bit is always set to 1. + const uint64_t d_hi = 1, d_lo = denom->magic; + // Note that the quotient is guaranteed <= 64 bits, + // but the remainder may need 65! + uint64_t r_hi, r_lo; + uint64_t half_q = libdivide_128_div_128_to_64(half_n_hi, half_n_lo, d_hi, d_lo, &r_hi, &r_lo); + // We computed 2^(64+shift)/(m+2^64) + // Double the remainder ('dr') and check if that is larger than d + // Note that d is a 65 bit value, so r1 is small and so r1 + r1 + // cannot overflow + uint64_t dr_lo = r_lo + r_lo; + uint64_t dr_hi = r_hi + r_hi + (dr_lo < r_lo); // last term is carry + int dr_exceeds_d = (dr_hi > d_hi) || (dr_hi == d_hi && dr_lo >= d_lo); + uint64_t full_q = half_q + half_q + (dr_exceeds_d ? 1 : 0); + return full_q + 1; + } } -__m128i libdivide_u64_do_vector_alg1(__m128i numers, const struct libdivide_u64_t *denom) { - __m128i q = libdivide_mullhi_u64_flat_vector(numers, libdivide__u64_to_m128(denom->magic)); - return _mm_srl_epi64(q, libdivide_u32_to_m128i(denom->more)); -} - -__m128i libdivide_u64_do_vector_alg2(__m128i numers, const struct libdivide_u64_t *denom) { - __m128i q = libdivide_mullhi_u64_flat_vector(numers, libdivide__u64_to_m128(denom->magic)); - __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q); - return _mm_srl_epi64(t, libdivide_u32_to_m128i(denom->more & LIBDIVIDE_64_SHIFT_MASK)); -} - - -#endif - /////////// SINT32 +static inline struct libdivide_s32_t libdivide_internal_s32_gen(int32_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } -static inline int32_t libdivide__mullhi_s32(int32_t x, int32_t y) { - int64_t xl = x, yl = y; - int64_t rl = xl * yl; - return (int32_t)(rl >> 32); //needs to be arithmetic shift -} - -struct libdivide_s32_t libdivide_s32_gen(int32_t d) { struct libdivide_s32_t result; - /* If d is a power of 2, or negative a power of 2, we have to use a shift. This is especially important because the magic algorithm fails for -1. To check if d is a power of 2 or its inverse, it suffices to check whether its absolute value has exactly one bit set. This works even for INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set and is a power of 2. */ - uint32_t absD = (uint32_t)(d < 0 ? -d : d); //gcc optimizes this to the fast abs trick - if ((absD & (absD - 1)) == 0) { //check if exactly one bit is set, don't care if absD is 0 since that's divide by zero + // If d is a power of 2, or negative a power of 2, we have to use a shift. + // This is especially important because the magic algorithm fails for -1. + // To check if d is a power of 2 or its inverse, it suffices to check + // whether its absolute value has exactly one bit set. This works even for + // INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set + // and is a power of 2. + uint32_t ud = (uint32_t)d; + uint32_t absD = (d < 0) ? -ud : ud; + uint32_t floor_log_2_d = 31 - libdivide_count_leading_zeros32(absD); + // check if exactly one bit is set, + // don't care if absD is 0 since that's divide by zero + if ((absD & (absD - 1)) == 0) { + // Branchfree and normal paths are exactly the same result.magic = 0; - result.more = libdivide__count_trailing_zeros32(absD) | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0) | LIBDIVIDE_S32_SHIFT_PATH; - } - else { - const uint32_t floor_log_2_d = 31 - libdivide__count_leading_zeros32(absD); + result.more = floor_log_2_d | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0); + } else { LIBDIVIDE_ASSERT(floor_log_2_d >= 1); uint8_t more; - //the dividend here is 2**(floor_log_2_d + 31), so the low 32 bit word is 0 and the high word is floor_log_2_d - 1 + // the dividend here is 2**(floor_log_2_d + 31), so the low 32 bit word + // is 0 and the high word is floor_log_2_d - 1 uint32_t rem, proposed_m; proposed_m = libdivide_64_div_32_to_32(1U << (floor_log_2_d - 1), 0, absD, &rem); const uint32_t e = absD - rem; - /* We are going to start with a power of floor_log_2_d - 1. This works if works if e < 2**floor_log_2_d. */ - if (e < (1U << floor_log_2_d)) { - /* This power works */ + // We are going to start with a power of floor_log_2_d - 1. + // This works if works if e < 2**floor_log_2_d. + if (!branchfree && e < (1U << floor_log_2_d)) { + // This power works more = floor_log_2_d - 1; - } - else { - /* We need to go one higher. This should not make proposed_m overflow, but it will make it negative when interpreted as an int32_t. */ + } else { + // We need to go one higher. This should not make proposed_m + // overflow, but it will make it negative when interpreted as an + // int32_t. proposed_m += proposed_m; const uint32_t twice_rem = rem + rem; if (twice_rem >= absD || twice_rem < rem) proposed_m += 1; - more = floor_log_2_d | LIBDIVIDE_ADD_MARKER | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0); //use the general algorithm + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; } - proposed_m += 1; - result.magic = (d < 0 ? -(int32_t)proposed_m : (int32_t)proposed_m); - result.more = more; + proposed_m += 1; + int32_t magic = (int32_t)proposed_m; + + // Mark if we are negative. Note we only negate the magic number in the + // branchfull case. + if (d < 0) { + more |= LIBDIVIDE_NEGATIVE_DIVISOR; + if (!branchfree) { + magic = -magic; + } + } + + result.more = more; + result.magic = magic; } return result; } +struct libdivide_s32_t libdivide_s32_gen(int32_t d) { + return libdivide_internal_s32_gen(d, 0); +} + +struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d) { + struct libdivide_s32_t tmp = libdivide_internal_s32_gen(d, 1); + struct libdivide_s32_branchfree_t result = {tmp.magic, tmp.more}; + return result; +} + int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom) { uint8_t more = denom->more; - if (more & LIBDIVIDE_S32_SHIFT_PATH) { - uint8_t shifter = more & LIBDIVIDE_32_SHIFT_MASK; - int32_t q = numer + ((numer >> 31) & ((1 << shifter) - 1)); - q = q >> shifter; - int32_t shiftMask = (int8_t)more >> 7; //must be arithmetic shift and then sign-extend - q = (q ^ shiftMask) - shiftMask; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + + if (!denom->magic) { + uint32_t sign = (int8_t)more >> 7; + uint32_t mask = (1U << shift) - 1; + uint32_t uq = numer + ((numer >> 31) & mask); + int32_t q = (int32_t)uq; + q >>= shift; + q = (q ^ sign) - sign; return q; - } - else { - int32_t q = libdivide__mullhi_s32(denom->magic, numer); + } else { + uint32_t uq = (uint32_t)libdivide_mullhi_s32(denom->magic, numer); if (more & LIBDIVIDE_ADD_MARKER) { - int32_t sign = (int8_t)more >> 7; //must be arithmetic shift and then sign extend - q += ((numer ^ sign) - sign); + // must be arithmetic shift and then sign extend + int32_t sign = (int8_t)more >> 7; + // q += (more < 0 ? -numer : numer) + // cast required to avoid UB + uq += ((uint32_t)numer ^ sign) - sign; } - q >>= more & LIBDIVIDE_32_SHIFT_MASK; + int32_t q = (int32_t)uq; + q >>= shift; q += (q < 0); return q; } } -int libdivide_s32_get_algorithm(const struct libdivide_s32_t *denom) { +int32_t libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom) { uint8_t more = denom->more; - int positiveDivisor = ! (more & LIBDIVIDE_NEGATIVE_DIVISOR); - if (more & LIBDIVIDE_S32_SHIFT_PATH) return (positiveDivisor ? 0 : 1); - else if (more & LIBDIVIDE_ADD_MARKER) return (positiveDivisor ? 2 : 3); - else return 4; -} - -int32_t libdivide_s32_do_alg0(int32_t numer, const struct libdivide_s32_t *denom) { - uint8_t shifter = denom->more & LIBDIVIDE_32_SHIFT_MASK; - int32_t q = numer + ((numer >> 31) & ((1 << shifter) - 1)); - return q >> shifter; -} - -int32_t libdivide_s32_do_alg1(int32_t numer, const struct libdivide_s32_t *denom) { - uint8_t shifter = denom->more & LIBDIVIDE_32_SHIFT_MASK; - int32_t q = numer + ((numer >> 31) & ((1 << shifter) - 1)); - return - (q >> shifter); -} - -int32_t libdivide_s32_do_alg2(int32_t numer, const struct libdivide_s32_t *denom) { - int32_t q = libdivide__mullhi_s32(denom->magic, numer); + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift and then sign extend + int32_t sign = (int8_t)more >> 7; + int32_t magic = denom->magic; + int32_t q = libdivide_mullhi_s32(magic, numer); q += numer; - q >>= denom->more & LIBDIVIDE_32_SHIFT_MASK; - q += (q < 0); + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is a power of + // 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + uint32_t q_sign = (uint32_t)(q >> 31); + q += q_sign & ((1U << shift) - is_power_of_2); + + // Now arithmetic right shift + q >>= shift; + // Negate if needed + q = (q ^ sign) - sign; + return q; } -int32_t libdivide_s32_do_alg3(int32_t numer, const struct libdivide_s32_t *denom) { - int32_t q = libdivide__mullhi_s32(denom->magic, numer); - q -= numer; - q >>= denom->more & LIBDIVIDE_32_SHIFT_MASK; - q += (q < 0); - return q; -} - -int32_t libdivide_s32_do_alg4(int32_t numer, const struct libdivide_s32_t *denom) { - int32_t q = libdivide__mullhi_s32(denom->magic, numer); - q >>= denom->more & LIBDIVIDE_32_SHIFT_MASK; - q += (q < 0); - return q; -} - -#if LIBDIVIDE_USE_SSE2 -__m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t * denom) { +int32_t libdivide_s32_recover(const struct libdivide_s32_t *denom) { uint8_t more = denom->more; - if (more & LIBDIVIDE_S32_SHIFT_PATH) { - uint32_t shifter = more & LIBDIVIDE_32_SHIFT_MASK; - __m128i roundToZeroTweak = _mm_set1_epi32((1 << shifter) - 1); //could use _mm_srli_epi32 with an all -1 register - __m128i q = _mm_add_epi32(numers, _mm_and_si128(_mm_srai_epi32(numers, 31), roundToZeroTweak)); //q = numer + ((numer >> 31) & roundToZeroTweak); - q = _mm_sra_epi32(q, libdivide_u32_to_m128i(shifter)); // q = q >> shifter - __m128i shiftMask = _mm_set1_epi32((int32_t)((int8_t)more >> 7)); //set all bits of shift mask = to the sign bit of more - q = _mm_sub_epi32(_mm_xor_si128(q, shiftMask), shiftMask); //q = (q ^ shiftMask) - shiftMask; - return q; - } - else { - __m128i q = libdivide_mullhi_s32_flat_vector(numers, _mm_set1_epi32(denom->magic)); - if (more & LIBDIVIDE_ADD_MARKER) { - __m128i sign = _mm_set1_epi32((int32_t)(int8_t)more >> 7); //must be arithmetic shift - q = _mm_add_epi32(q, _mm_sub_epi32(_mm_xor_si128(numers, sign), sign)); // q += ((numer ^ sign) - sign); + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + if (!denom->magic) { + uint32_t absD = 1U << shift; + if (more & LIBDIVIDE_NEGATIVE_DIVISOR) { + absD = -absD; } - q = _mm_sra_epi32(q, libdivide_u32_to_m128i(more & LIBDIVIDE_32_SHIFT_MASK)); //q >>= shift - q = _mm_add_epi32(q, _mm_srli_epi32(q, 31)); // q += (q < 0) - return q; + return (int32_t)absD; + } else { + // Unsigned math is much easier + // We negate the magic number only in the branchfull case, and we don't + // know which case we're in. However we have enough information to + // determine the correct sign of the magic number. The divisor was + // negative if LIBDIVIDE_NEGATIVE_DIVISOR is set. If ADD_MARKER is set, + // the magic number's sign is opposite that of the divisor. + // We want to compute the positive magic number. + int negative_divisor = (more & LIBDIVIDE_NEGATIVE_DIVISOR); + int magic_was_negated = (more & LIBDIVIDE_ADD_MARKER) + ? denom->magic > 0 : denom->magic < 0; + + // Handle the power of 2 case (including branchfree) + if (denom->magic == 0) { + int32_t result = 1U << shift; + return negative_divisor ? -result : result; + } + + uint32_t d = (uint32_t)(magic_was_negated ? -denom->magic : denom->magic); + uint64_t n = 1ULL << (32 + shift); // this shift cannot exceed 30 + uint32_t q = (uint32_t)(n / d); + int32_t result = (int32_t)q; + result += 1; + return negative_divisor ? -result : result; } } -__m128i libdivide_s32_do_vector_alg0(__m128i numers, const struct libdivide_s32_t *denom) { - uint8_t shifter = denom->more & LIBDIVIDE_32_SHIFT_MASK; - __m128i roundToZeroTweak = _mm_set1_epi32((1 << shifter) - 1); - __m128i q = _mm_add_epi32(numers, _mm_and_si128(_mm_srai_epi32(numers, 31), roundToZeroTweak)); - return _mm_sra_epi32(q, libdivide_u32_to_m128i(shifter)); +int32_t libdivide_s32_branchfree_recover(const struct libdivide_s32_branchfree_t *denom) { + return libdivide_s32_recover((const struct libdivide_s32_t *)denom); } -__m128i libdivide_s32_do_vector_alg1(__m128i numers, const struct libdivide_s32_t *denom) { - uint8_t shifter = denom->more & LIBDIVIDE_32_SHIFT_MASK; - __m128i roundToZeroTweak = _mm_set1_epi32((1 << shifter) - 1); - __m128i q = _mm_add_epi32(numers, _mm_and_si128(_mm_srai_epi32(numers, 31), roundToZeroTweak)); - return _mm_sub_epi32(_mm_setzero_si128(), _mm_sra_epi32(q, libdivide_u32_to_m128i(shifter))); -} - -__m128i libdivide_s32_do_vector_alg2(__m128i numers, const struct libdivide_s32_t *denom) { - __m128i q = libdivide_mullhi_s32_flat_vector(numers, _mm_set1_epi32(denom->magic)); - q = _mm_add_epi32(q, numers); - q = _mm_sra_epi32(q, libdivide_u32_to_m128i(denom->more & LIBDIVIDE_32_SHIFT_MASK)); - q = _mm_add_epi32(q, _mm_srli_epi32(q, 31)); - return q; -} - -__m128i libdivide_s32_do_vector_alg3(__m128i numers, const struct libdivide_s32_t *denom) { - __m128i q = libdivide_mullhi_s32_flat_vector(numers, _mm_set1_epi32(denom->magic)); - q = _mm_sub_epi32(q, numers); - q = _mm_sra_epi32(q, libdivide_u32_to_m128i(denom->more & LIBDIVIDE_32_SHIFT_MASK)); - q = _mm_add_epi32(q, _mm_srli_epi32(q, 31)); - return q; -} - -__m128i libdivide_s32_do_vector_alg4(__m128i numers, const struct libdivide_s32_t *denom) { - __m128i q = libdivide_mullhi_s32_flat_vector(numers, _mm_set1_epi32(denom->magic)); - q = _mm_sra_epi32(q, libdivide_u32_to_m128i(denom->more)); //q >>= shift - q = _mm_add_epi32(q, _mm_srli_epi32(q, 31)); // q += (q < 0) - return q; -} -#endif - ///////////// SINT64 +static inline struct libdivide_s64_t libdivide_internal_s64_gen(int64_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } -struct libdivide_s64_t libdivide_s64_gen(int64_t d) { struct libdivide_s64_t result; - /* If d is a power of 2, or negative a power of 2, we have to use a shift. This is especially important because the magic algorithm fails for -1. To check if d is a power of 2 or its inverse, it suffices to check whether its absolute value has exactly one bit set. This works even for INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set and is a power of 2. */ - const uint64_t absD = (uint64_t)(d < 0 ? -d : d); //gcc optimizes this to the fast abs trick - if ((absD & (absD - 1)) == 0) { //check if exactly one bit is set, don't care if absD is 0 since that's divide by zero - result.more = libdivide__count_trailing_zeros64(absD) | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0); + // If d is a power of 2, or negative a power of 2, we have to use a shift. + // This is especially important because the magic algorithm fails for -1. + // To check if d is a power of 2 or its inverse, it suffices to check + // whether its absolute value has exactly one bit set. This works even for + // INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set + // and is a power of 2. + uint64_t ud = (uint64_t)d; + uint64_t absD = (d < 0) ? -ud : ud; + uint32_t floor_log_2_d = 63 - libdivide_count_leading_zeros64(absD); + // check if exactly one bit is set, + // don't care if absD is 0 since that's divide by zero + if ((absD & (absD - 1)) == 0) { + // Branchfree and non-branchfree cases are the same result.magic = 0; - } - else { - const uint32_t floor_log_2_d = 63 - libdivide__count_leading_zeros64(absD); - - //the dividend here is 2**(floor_log_2_d + 63), so the low 64 bit word is 0 and the high word is floor_log_2_d - 1 + result.more = floor_log_2_d | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0); + } else { + // the dividend here is 2**(floor_log_2_d + 63), so the low 64 bit word + // is 0 and the high word is floor_log_2_d - 1 uint8_t more; uint64_t rem, proposed_m; proposed_m = libdivide_128_div_64_to_64(1ULL << (floor_log_2_d - 1), 0, absD, &rem); const uint64_t e = absD - rem; - /* We are going to start with a power of floor_log_2_d - 1. This works if works if e < 2**floor_log_2_d. */ - if (e < (1ULL << floor_log_2_d)) { - /* This power works */ + // We are going to start with a power of floor_log_2_d - 1. + // This works if works if e < 2**floor_log_2_d. + if (!branchfree && e < (1ULL << floor_log_2_d)) { + // This power works more = floor_log_2_d - 1; - } - else { - /* We need to go one higher. This should not make proposed_m overflow, but it will make it negative when interpreted as an int32_t. */ + } else { + // We need to go one higher. This should not make proposed_m + // overflow, but it will make it negative when interpreted as an + // int32_t. proposed_m += proposed_m; const uint64_t twice_rem = rem + rem; if (twice_rem >= absD || twice_rem < rem) proposed_m += 1; - more = floor_log_2_d | LIBDIVIDE_ADD_MARKER | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0); + // note that we only set the LIBDIVIDE_NEGATIVE_DIVISOR bit if we + // also set ADD_MARKER this is an annoying optimization that + // enables algorithm #4 to avoid the mask. However we always set it + // in the branchfree case + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; } proposed_m += 1; + int64_t magic = (int64_t)proposed_m; + + // Mark if we are negative + if (d < 0) { + more |= LIBDIVIDE_NEGATIVE_DIVISOR; + if (!branchfree) { + magic = -magic; + } + } + result.more = more; - result.magic = (d < 0 ? -(int64_t)proposed_m : (int64_t)proposed_m); + result.magic = magic; } return result; } +struct libdivide_s64_t libdivide_s64_gen(int64_t d) { + return libdivide_internal_s64_gen(d, 0); +} + +struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d) { + struct libdivide_s64_t tmp = libdivide_internal_s64_gen(d, 1); + struct libdivide_s64_branchfree_t ret = {tmp.magic, tmp.more}; + return ret; +} + int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom) { uint8_t more = denom->more; - int64_t magic = denom->magic; - if (magic == 0) { //shift path - uint32_t shifter = more & LIBDIVIDE_64_SHIFT_MASK; - int64_t q = numer + ((numer >> 63) & ((1LL << shifter) - 1)); - q = q >> shifter; - int64_t shiftMask = (int8_t)more >> 7; //must be arithmetic shift and then sign-extend - q = (q ^ shiftMask) - shiftMask; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + + if (!denom->magic) { // shift path + uint64_t mask = (1ULL << shift) - 1; + uint64_t uq = numer + ((numer >> 63) & mask); + int64_t q = (int64_t)uq; + q >>= shift; + // must be arithmetic shift and then sign-extend + int64_t sign = (int8_t)more >> 7; + q = (q ^ sign) - sign; return q; - } - else { - int64_t q = libdivide__mullhi_s64(magic, numer); + } else { + uint64_t uq = (uint64_t)libdivide_mullhi_s64(denom->magic, numer); if (more & LIBDIVIDE_ADD_MARKER) { - int64_t sign = (int8_t)more >> 7; //must be arithmetic shift and then sign extend - q += ((numer ^ sign) - sign); + // must be arithmetic shift and then sign extend + int64_t sign = (int8_t)more >> 7; + // q += (more < 0 ? -numer : numer) + // cast required to avoid UB + uq += ((uint64_t)numer ^ sign) - sign; } - q >>= more & LIBDIVIDE_64_SHIFT_MASK; + int64_t q = (int64_t)uq; + q >>= shift; q += (q < 0); return q; } } - -int libdivide_s64_get_algorithm(const struct libdivide_s64_t *denom) { - uint8_t more = denom->more; - int positiveDivisor = ! (more & LIBDIVIDE_NEGATIVE_DIVISOR); - if (denom->magic == 0) return (positiveDivisor ? 0 : 1); //shift path - else if (more & LIBDIVIDE_ADD_MARKER) return (positiveDivisor ? 2 : 3); - else return 4; -} - -int64_t libdivide_s64_do_alg0(int64_t numer, const struct libdivide_s64_t *denom) { - uint32_t shifter = denom->more & LIBDIVIDE_64_SHIFT_MASK; - int64_t q = numer + ((numer >> 63) & ((1LL << shifter) - 1)); - return q >> shifter; -} - -int64_t libdivide_s64_do_alg1(int64_t numer, const struct libdivide_s64_t *denom) { - //denom->shifter != -1 && demo->shiftMask != 0 - uint32_t shifter = denom->more & LIBDIVIDE_64_SHIFT_MASK; - int64_t q = numer + ((numer >> 63) & ((1LL << shifter) - 1)); - return - (q >> shifter); -} - -int64_t libdivide_s64_do_alg2(int64_t numer, const struct libdivide_s64_t *denom) { - int64_t q = libdivide__mullhi_s64(denom->magic, numer); - q += numer; - q >>= denom->more & LIBDIVIDE_64_SHIFT_MASK; - q += (q < 0); - return q; -} - -int64_t libdivide_s64_do_alg3(int64_t numer, const struct libdivide_s64_t *denom) { - int64_t q = libdivide__mullhi_s64(denom->magic, numer); - q -= numer; - q >>= denom->more & LIBDIVIDE_64_SHIFT_MASK; - q += (q < 0); - return q; -} - -int64_t libdivide_s64_do_alg4(int64_t numer, const struct libdivide_s64_t *denom) { - int64_t q = libdivide__mullhi_s64(denom->magic, numer); - q >>= denom->more; - q += (q < 0); - return q; -} - - -#if LIBDIVIDE_USE_SSE2 -__m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t * denom) { +int64_t libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom) { uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift and then sign extend + int64_t sign = (int8_t)more >> 7; int64_t magic = denom->magic; - if (magic == 0) { //shift path - uint32_t shifter = more & LIBDIVIDE_64_SHIFT_MASK; - __m128i roundToZeroTweak = libdivide__u64_to_m128((1LL << shifter) - 1); - __m128i q = _mm_add_epi64(numers, _mm_and_si128(libdivide_s64_signbits(numers), roundToZeroTweak)); //q = numer + ((numer >> 63) & roundToZeroTweak); - q = libdivide_s64_shift_right_vector(q, shifter); // q = q >> shifter - __m128i shiftMask = _mm_set1_epi32((int32_t)((int8_t)more >> 7)); - q = _mm_sub_epi64(_mm_xor_si128(q, shiftMask), shiftMask); //q = (q ^ shiftMask) - shiftMask; + int64_t q = libdivide_mullhi_s64(magic, numer); + q += numer; + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is a power of + // 2, or (2**shift) if it is not a power of 2. + uint64_t is_power_of_2 = (magic == 0); + uint64_t q_sign = (uint64_t)(q >> 63); + q += q_sign & ((1ULL << shift) - is_power_of_2); + + // Arithmetic right shift + q >>= shift; + // Negate if needed + q = (q ^ sign) - sign; + + return q; +} + +int64_t libdivide_s64_recover(const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + if (denom->magic == 0) { // shift path + uint64_t absD = 1ULL << shift; + if (more & LIBDIVIDE_NEGATIVE_DIVISOR) { + absD = -absD; + } + return (int64_t)absD; + } else { + // Unsigned math is much easier + int negative_divisor = (more & LIBDIVIDE_NEGATIVE_DIVISOR); + int magic_was_negated = (more & LIBDIVIDE_ADD_MARKER) + ? denom->magic > 0 : denom->magic < 0; + + uint64_t d = (uint64_t)(magic_was_negated ? -denom->magic : denom->magic); + uint64_t n_hi = 1ULL << shift, n_lo = 0; + uint64_t rem_ignored; + uint64_t q = libdivide_128_div_64_to_64(n_hi, n_lo, d, &rem_ignored); + int64_t result = (int64_t)(q + 1); + if (negative_divisor) { + result = -result; + } + return result; + } +} + +int64_t libdivide_s64_branchfree_recover(const struct libdivide_s64_branchfree_t *denom) { + return libdivide_s64_recover((const struct libdivide_s64_t *)denom); +} + +#if defined(LIBDIVIDE_AVX512) + +static inline __m512i libdivide_u32_do_vector(__m512i numers, const struct libdivide_u32_t *denom); +static inline __m512i libdivide_s32_do_vector(__m512i numers, const struct libdivide_s32_t *denom); +static inline __m512i libdivide_u64_do_vector(__m512i numers, const struct libdivide_u64_t *denom); +static inline __m512i libdivide_s64_do_vector(__m512i numers, const struct libdivide_s64_t *denom); + +static inline __m512i libdivide_u32_branchfree_do_vector(__m512i numers, const struct libdivide_u32_branchfree_t *denom); +static inline __m512i libdivide_s32_branchfree_do_vector(__m512i numers, const struct libdivide_s32_branchfree_t *denom); +static inline __m512i libdivide_u64_branchfree_do_vector(__m512i numers, const struct libdivide_u64_branchfree_t *denom); +static inline __m512i libdivide_s64_branchfree_do_vector(__m512i numers, const struct libdivide_s64_branchfree_t *denom); + +//////// Internal Utility Functions + +static inline __m512i libdivide_s64_signbits(__m512i v) {; + return _mm512_srai_epi64(v, 63); +} + +static inline __m512i libdivide_s64_shift_right_vector(__m512i v, int amt) { + return _mm512_srai_epi64(v, amt); +} + +// Here, b is assumed to contain one 32-bit value repeated. +static inline __m512i libdivide_mullhi_u32_vector(__m512i a, __m512i b) { + __m512i hi_product_0Z2Z = _mm512_srli_epi64(_mm512_mul_epu32(a, b), 32); + __m512i a1X3X = _mm512_srli_epi64(a, 32); + __m512i mask = _mm512_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0); + __m512i hi_product_Z1Z3 = _mm512_and_si512(_mm512_mul_epu32(a1X3X, b), mask); + return _mm512_or_si512(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// b is one 32-bit value repeated. +static inline __m512i libdivide_mullhi_s32_vector(__m512i a, __m512i b) { + __m512i hi_product_0Z2Z = _mm512_srli_epi64(_mm512_mul_epi32(a, b), 32); + __m512i a1X3X = _mm512_srli_epi64(a, 32); + __m512i mask = _mm512_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0); + __m512i hi_product_Z1Z3 = _mm512_and_si512(_mm512_mul_epi32(a1X3X, b), mask); + return _mm512_or_si512(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// Here, y is assumed to contain one 64-bit value repeated. +// https://stackoverflow.com/a/28827013 +static inline __m512i libdivide_mullhi_u64_vector(__m512i x, __m512i y) { + __m512i lomask = _mm512_set1_epi64(0xffffffff); + __m512i xh = _mm512_shuffle_epi32(x, (_MM_PERM_ENUM) 0xB1); + __m512i yh = _mm512_shuffle_epi32(y, (_MM_PERM_ENUM) 0xB1); + __m512i w0 = _mm512_mul_epu32(x, y); + __m512i w1 = _mm512_mul_epu32(x, yh); + __m512i w2 = _mm512_mul_epu32(xh, y); + __m512i w3 = _mm512_mul_epu32(xh, yh); + __m512i w0h = _mm512_srli_epi64(w0, 32); + __m512i s1 = _mm512_add_epi64(w1, w0h); + __m512i s1l = _mm512_and_si512(s1, lomask); + __m512i s1h = _mm512_srli_epi64(s1, 32); + __m512i s2 = _mm512_add_epi64(w2, s1l); + __m512i s2h = _mm512_srli_epi64(s2, 32); + __m512i hi = _mm512_add_epi64(w3, s1h); + hi = _mm512_add_epi64(hi, s2h); + + return hi; +} + +// y is one 64-bit value repeated. +static inline __m512i libdivide_mullhi_s64_vector(__m512i x, __m512i y) { + __m512i p = libdivide_mullhi_u64_vector(x, y); + __m512i t1 = _mm512_and_si512(libdivide_s64_signbits(x), y); + __m512i t2 = _mm512_and_si512(libdivide_s64_signbits(y), x); + p = _mm512_sub_epi64(p, t1); + p = _mm512_sub_epi64(p, t2); + return p; +} + +////////// UINT32 + +__m512i libdivide_u32_do_vector(__m512i numers, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm512_srli_epi32(numers, more); + } + else { + __m512i q = libdivide_mullhi_u32_vector(numers, _mm512_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + __m512i t = _mm512_add_epi32(_mm512_srli_epi32(_mm512_sub_epi32(numers, q), 1), q); + return _mm512_srli_epi32(t, shift); + } + else { + return _mm512_srli_epi32(q, more); + } + } +} + +__m512i libdivide_u32_branchfree_do_vector(__m512i numers, const struct libdivide_u32_branchfree_t *denom) { + __m512i q = libdivide_mullhi_u32_vector(numers, _mm512_set1_epi32(denom->magic)); + __m512i t = _mm512_add_epi32(_mm512_srli_epi32(_mm512_sub_epi32(numers, q), 1), q); + return _mm512_srli_epi32(t, denom->more); +} + +////////// UINT64 + +__m512i libdivide_u64_do_vector(__m512i numers, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm512_srli_epi64(numers, more); + } + else { + __m512i q = libdivide_mullhi_u64_vector(numers, _mm512_set1_epi64(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + __m512i t = _mm512_add_epi64(_mm512_srli_epi64(_mm512_sub_epi64(numers, q), 1), q); + return _mm512_srli_epi64(t, shift); + } + else { + return _mm512_srli_epi64(q, more); + } + } +} + +__m512i libdivide_u64_branchfree_do_vector(__m512i numers, const struct libdivide_u64_branchfree_t *denom) { + __m512i q = libdivide_mullhi_u64_vector(numers, _mm512_set1_epi64(denom->magic)); + __m512i t = _mm512_add_epi64(_mm512_srli_epi64(_mm512_sub_epi64(numers, q), 1), q); + return _mm512_srli_epi64(t, denom->more); +} + +////////// SINT32 + +__m512i libdivide_s32_do_vector(__m512i numers, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + uint32_t mask = (1U << shift) - 1; + __m512i roundToZeroTweak = _mm512_set1_epi32(mask); + // q = numer + ((numer >> 31) & roundToZeroTweak); + __m512i q = _mm512_add_epi32(numers, _mm512_and_si512(_mm512_srai_epi32(numers, 31), roundToZeroTweak)); + q = _mm512_srai_epi32(q, shift); + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm512_sub_epi32(_mm512_xor_si512(q, sign), sign); return q; } else { - __m128i q = libdivide_mullhi_s64_flat_vector(numers, libdivide__u64_to_m128(magic)); + __m512i q = libdivide_mullhi_s32_vector(numers, _mm512_set1_epi32(denom->magic)); if (more & LIBDIVIDE_ADD_MARKER) { - __m128i sign = _mm_set1_epi32((int32_t)((int8_t)more >> 7)); //must be arithmetic shift - q = _mm_add_epi64(q, _mm_sub_epi64(_mm_xor_si128(numers, sign), sign)); // q += ((numer ^ sign) - sign); + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm512_add_epi32(q, _mm512_sub_epi32(_mm512_xor_si512(numers, sign), sign)); } - q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); //q >>= denom->mult_path.shift + // q >>= shift + q = _mm512_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK); + q = _mm512_add_epi32(q, _mm512_srli_epi32(q, 31)); // q += (q < 0) + return q; + } +} + +__m512i libdivide_s32_branchfree_do_vector(__m512i numers, const struct libdivide_s32_branchfree_t *denom) { + int32_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + __m512i q = libdivide_mullhi_s32_vector(numers, _mm512_set1_epi32(magic)); + q = _mm512_add_epi32(q, numers); // q += numers + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + __m512i q_sign = _mm512_srai_epi32(q, 31); // q_sign = q >> 31 + __m512i mask = _mm512_set1_epi32((1U << shift) - is_power_of_2); + q = _mm512_add_epi32(q, _mm512_and_si512(q_sign, mask)); // q = q + (q_sign & mask) + q = _mm512_srai_epi32(q, shift); // q >>= shift + q = _mm512_sub_epi32(_mm512_xor_si512(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +////////// SINT64 + +__m512i libdivide_s64_do_vector(__m512i numers, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + int64_t magic = denom->magic; + if (magic == 0) { // shift path + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + uint64_t mask = (1ULL << shift) - 1; + __m512i roundToZeroTweak = _mm512_set1_epi64(mask); + // q = numer + ((numer >> 63) & roundToZeroTweak); + __m512i q = _mm512_add_epi64(numers, _mm512_and_si512(libdivide_s64_signbits(numers), roundToZeroTweak)); + q = libdivide_s64_shift_right_vector(q, shift); + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm512_sub_epi64(_mm512_xor_si512(q, sign), sign); + return q; + } + else { + __m512i q = libdivide_mullhi_s64_vector(numers, _mm512_set1_epi64(magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm512_add_epi64(q, _mm512_sub_epi64(_mm512_xor_si512(numers, sign), sign)); + } + // q >>= denom->mult_path.shift + q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); + q = _mm512_add_epi64(q, _mm512_srli_epi64(q, 63)); // q += (q < 0) + return q; + } +} + +__m512i libdivide_s64_branchfree_do_vector(__m512i numers, const struct libdivide_s64_branchfree_t *denom) { + int64_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + + // libdivide_mullhi_s64(numers, magic); + __m512i q = libdivide_mullhi_s64_vector(numers, _mm512_set1_epi64(magic)); + q = _mm512_add_epi64(q, numers); // q += numers + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2. + uint32_t is_power_of_2 = (magic == 0); + __m512i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63 + __m512i mask = _mm512_set1_epi64((1ULL << shift) - is_power_of_2); + q = _mm512_add_epi64(q, _mm512_and_si512(q_sign, mask)); // q = q + (q_sign & mask) + q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift + q = _mm512_sub_epi64(_mm512_xor_si512(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +#elif defined(LIBDIVIDE_AVX2) + +static inline __m256i libdivide_u32_do_vector(__m256i numers, const struct libdivide_u32_t *denom); +static inline __m256i libdivide_s32_do_vector(__m256i numers, const struct libdivide_s32_t *denom); +static inline __m256i libdivide_u64_do_vector(__m256i numers, const struct libdivide_u64_t *denom); +static inline __m256i libdivide_s64_do_vector(__m256i numers, const struct libdivide_s64_t *denom); + +static inline __m256i libdivide_u32_branchfree_do_vector(__m256i numers, const struct libdivide_u32_branchfree_t *denom); +static inline __m256i libdivide_s32_branchfree_do_vector(__m256i numers, const struct libdivide_s32_branchfree_t *denom); +static inline __m256i libdivide_u64_branchfree_do_vector(__m256i numers, const struct libdivide_u64_branchfree_t *denom); +static inline __m256i libdivide_s64_branchfree_do_vector(__m256i numers, const struct libdivide_s64_branchfree_t *denom); + +//////// Internal Utility Functions + +// Implementation of _mm256_srai_epi64(v, 63) (from AVX512). +static inline __m256i libdivide_s64_signbits(__m256i v) { + __m256i hiBitsDuped = _mm256_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1)); + __m256i signBits = _mm256_srai_epi32(hiBitsDuped, 31); + return signBits; +} + +// Implementation of _mm256_srai_epi64 (from AVX512). +static inline __m256i libdivide_s64_shift_right_vector(__m256i v, int amt) { + const int b = 64 - amt; + __m256i m = _mm256_set1_epi64x(1ULL << (b - 1)); + __m256i x = _mm256_srli_epi64(v, amt); + __m256i result = _mm256_sub_epi64(_mm256_xor_si256(x, m), m); + return result; +} + +// Here, b is assumed to contain one 32-bit value repeated. +static inline __m256i libdivide_mullhi_u32_vector(__m256i a, __m256i b) { + __m256i hi_product_0Z2Z = _mm256_srli_epi64(_mm256_mul_epu32(a, b), 32); + __m256i a1X3X = _mm256_srli_epi64(a, 32); + __m256i mask = _mm256_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0); + __m256i hi_product_Z1Z3 = _mm256_and_si256(_mm256_mul_epu32(a1X3X, b), mask); + return _mm256_or_si256(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// b is one 32-bit value repeated. +static inline __m256i libdivide_mullhi_s32_vector(__m256i a, __m256i b) { + __m256i hi_product_0Z2Z = _mm256_srli_epi64(_mm256_mul_epi32(a, b), 32); + __m256i a1X3X = _mm256_srli_epi64(a, 32); + __m256i mask = _mm256_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0); + __m256i hi_product_Z1Z3 = _mm256_and_si256(_mm256_mul_epi32(a1X3X, b), mask); + return _mm256_or_si256(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// Here, y is assumed to contain one 64-bit value repeated. +// https://stackoverflow.com/a/28827013 +static inline __m256i libdivide_mullhi_u64_vector(__m256i x, __m256i y) { + __m256i lomask = _mm256_set1_epi64x(0xffffffff); + __m256i xh = _mm256_shuffle_epi32(x, 0xB1); // x0l, x0h, x1l, x1h + __m256i yh = _mm256_shuffle_epi32(y, 0xB1); // y0l, y0h, y1l, y1h + __m256i w0 = _mm256_mul_epu32(x, y); // x0l*y0l, x1l*y1l + __m256i w1 = _mm256_mul_epu32(x, yh); // x0l*y0h, x1l*y1h + __m256i w2 = _mm256_mul_epu32(xh, y); // x0h*y0l, x1h*y0l + __m256i w3 = _mm256_mul_epu32(xh, yh); // x0h*y0h, x1h*y1h + __m256i w0h = _mm256_srli_epi64(w0, 32); + __m256i s1 = _mm256_add_epi64(w1, w0h); + __m256i s1l = _mm256_and_si256(s1, lomask); + __m256i s1h = _mm256_srli_epi64(s1, 32); + __m256i s2 = _mm256_add_epi64(w2, s1l); + __m256i s2h = _mm256_srli_epi64(s2, 32); + __m256i hi = _mm256_add_epi64(w3, s1h); + hi = _mm256_add_epi64(hi, s2h); + + return hi; +} + +// y is one 64-bit value repeated. +static inline __m256i libdivide_mullhi_s64_vector(__m256i x, __m256i y) { + __m256i p = libdivide_mullhi_u64_vector(x, y); + __m256i t1 = _mm256_and_si256(libdivide_s64_signbits(x), y); + __m256i t2 = _mm256_and_si256(libdivide_s64_signbits(y), x); + p = _mm256_sub_epi64(p, t1); + p = _mm256_sub_epi64(p, t2); + return p; +} + +////////// UINT32 + +__m256i libdivide_u32_do_vector(__m256i numers, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm256_srli_epi32(numers, more); + } + else { + __m256i q = libdivide_mullhi_u32_vector(numers, _mm256_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + __m256i t = _mm256_add_epi32(_mm256_srli_epi32(_mm256_sub_epi32(numers, q), 1), q); + return _mm256_srli_epi32(t, shift); + } + else { + return _mm256_srli_epi32(q, more); + } + } +} + +__m256i libdivide_u32_branchfree_do_vector(__m256i numers, const struct libdivide_u32_branchfree_t *denom) { + __m256i q = libdivide_mullhi_u32_vector(numers, _mm256_set1_epi32(denom->magic)); + __m256i t = _mm256_add_epi32(_mm256_srli_epi32(_mm256_sub_epi32(numers, q), 1), q); + return _mm256_srli_epi32(t, denom->more); +} + +////////// UINT64 + +__m256i libdivide_u64_do_vector(__m256i numers, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm256_srli_epi64(numers, more); + } + else { + __m256i q = libdivide_mullhi_u64_vector(numers, _mm256_set1_epi64x(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + __m256i t = _mm256_add_epi64(_mm256_srli_epi64(_mm256_sub_epi64(numers, q), 1), q); + return _mm256_srli_epi64(t, shift); + } + else { + return _mm256_srli_epi64(q, more); + } + } +} + +__m256i libdivide_u64_branchfree_do_vector(__m256i numers, const struct libdivide_u64_branchfree_t *denom) { + __m256i q = libdivide_mullhi_u64_vector(numers, _mm256_set1_epi64x(denom->magic)); + __m256i t = _mm256_add_epi64(_mm256_srli_epi64(_mm256_sub_epi64(numers, q), 1), q); + return _mm256_srli_epi64(t, denom->more); +} + +////////// SINT32 + +__m256i libdivide_s32_do_vector(__m256i numers, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + uint32_t mask = (1U << shift) - 1; + __m256i roundToZeroTweak = _mm256_set1_epi32(mask); + // q = numer + ((numer >> 31) & roundToZeroTweak); + __m256i q = _mm256_add_epi32(numers, _mm256_and_si256(_mm256_srai_epi32(numers, 31), roundToZeroTweak)); + q = _mm256_srai_epi32(q, shift); + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm256_sub_epi32(_mm256_xor_si256(q, sign), sign); + return q; + } + else { + __m256i q = libdivide_mullhi_s32_vector(numers, _mm256_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm256_add_epi32(q, _mm256_sub_epi32(_mm256_xor_si256(numers, sign), sign)); + } + // q >>= shift + q = _mm256_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK); + q = _mm256_add_epi32(q, _mm256_srli_epi32(q, 31)); // q += (q < 0) + return q; + } +} + +__m256i libdivide_s32_branchfree_do_vector(__m256i numers, const struct libdivide_s32_branchfree_t *denom) { + int32_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + __m256i q = libdivide_mullhi_s32_vector(numers, _mm256_set1_epi32(magic)); + q = _mm256_add_epi32(q, numers); // q += numers + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + __m256i q_sign = _mm256_srai_epi32(q, 31); // q_sign = q >> 31 + __m256i mask = _mm256_set1_epi32((1U << shift) - is_power_of_2); + q = _mm256_add_epi32(q, _mm256_and_si256(q_sign, mask)); // q = q + (q_sign & mask) + q = _mm256_srai_epi32(q, shift); // q >>= shift + q = _mm256_sub_epi32(_mm256_xor_si256(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +////////// SINT64 + +__m256i libdivide_s64_do_vector(__m256i numers, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + int64_t magic = denom->magic; + if (magic == 0) { // shift path + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + uint64_t mask = (1ULL << shift) - 1; + __m256i roundToZeroTweak = _mm256_set1_epi64x(mask); + // q = numer + ((numer >> 63) & roundToZeroTweak); + __m256i q = _mm256_add_epi64(numers, _mm256_and_si256(libdivide_s64_signbits(numers), roundToZeroTweak)); + q = libdivide_s64_shift_right_vector(q, shift); + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm256_sub_epi64(_mm256_xor_si256(q, sign), sign); + return q; + } + else { + __m256i q = libdivide_mullhi_s64_vector(numers, _mm256_set1_epi64x(magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm256_add_epi64(q, _mm256_sub_epi64(_mm256_xor_si256(numers, sign), sign)); + } + // q >>= denom->mult_path.shift + q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); + q = _mm256_add_epi64(q, _mm256_srli_epi64(q, 63)); // q += (q < 0) + return q; + } +} + +__m256i libdivide_s64_branchfree_do_vector(__m256i numers, const struct libdivide_s64_branchfree_t *denom) { + int64_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + + // libdivide_mullhi_s64(numers, magic); + __m256i q = libdivide_mullhi_s64_vector(numers, _mm256_set1_epi64x(magic)); + q = _mm256_add_epi64(q, numers); // q += numers + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2. + uint32_t is_power_of_2 = (magic == 0); + __m256i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63 + __m256i mask = _mm256_set1_epi64x((1ULL << shift) - is_power_of_2); + q = _mm256_add_epi64(q, _mm256_and_si256(q_sign, mask)); // q = q + (q_sign & mask) + q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift + q = _mm256_sub_epi64(_mm256_xor_si256(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +#elif defined(LIBDIVIDE_SSE2) + +static inline __m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom); +static inline __m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t *denom); +static inline __m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t *denom); +static inline __m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t *denom); + +static inline __m128i libdivide_u32_branchfree_do_vector(__m128i numers, const struct libdivide_u32_branchfree_t *denom); +static inline __m128i libdivide_s32_branchfree_do_vector(__m128i numers, const struct libdivide_s32_branchfree_t *denom); +static inline __m128i libdivide_u64_branchfree_do_vector(__m128i numers, const struct libdivide_u64_branchfree_t *denom); +static inline __m128i libdivide_s64_branchfree_do_vector(__m128i numers, const struct libdivide_s64_branchfree_t *denom); + +//////// Internal Utility Functions + +// Implementation of _mm_srai_epi64(v, 63) (from AVX512). +static inline __m128i libdivide_s64_signbits(__m128i v) { + __m128i hiBitsDuped = _mm_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1)); + __m128i signBits = _mm_srai_epi32(hiBitsDuped, 31); + return signBits; +} + +// Implementation of _mm_srai_epi64 (from AVX512). +static inline __m128i libdivide_s64_shift_right_vector(__m128i v, int amt) { + const int b = 64 - amt; + __m128i m = _mm_set1_epi64x(1ULL << (b - 1)); + __m128i x = _mm_srli_epi64(v, amt); + __m128i result = _mm_sub_epi64(_mm_xor_si128(x, m), m); + return result; +} + +// Here, b is assumed to contain one 32-bit value repeated. +static inline __m128i libdivide_mullhi_u32_vector(__m128i a, __m128i b) { + __m128i hi_product_0Z2Z = _mm_srli_epi64(_mm_mul_epu32(a, b), 32); + __m128i a1X3X = _mm_srli_epi64(a, 32); + __m128i mask = _mm_set_epi32(-1, 0, -1, 0); + __m128i hi_product_Z1Z3 = _mm_and_si128(_mm_mul_epu32(a1X3X, b), mask); + return _mm_or_si128(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// SSE2 does not have a signed multiplication instruction, but we can convert +// unsigned to signed pretty efficiently. Again, b is just a 32 bit value +// repeated four times. +static inline __m128i libdivide_mullhi_s32_vector(__m128i a, __m128i b) { + __m128i p = libdivide_mullhi_u32_vector(a, b); + // t1 = (a >> 31) & y, arithmetic shift + __m128i t1 = _mm_and_si128(_mm_srai_epi32(a, 31), b); + __m128i t2 = _mm_and_si128(_mm_srai_epi32(b, 31), a); + p = _mm_sub_epi32(p, t1); + p = _mm_sub_epi32(p, t2); + return p; +} + +// Here, y is assumed to contain one 64-bit value repeated. +// https://stackoverflow.com/a/28827013 +static inline __m128i libdivide_mullhi_u64_vector(__m128i x, __m128i y) { + __m128i lomask = _mm_set1_epi64x(0xffffffff); + __m128i xh = _mm_shuffle_epi32(x, 0xB1); // x0l, x0h, x1l, x1h + __m128i yh = _mm_shuffle_epi32(y, 0xB1); // y0l, y0h, y1l, y1h + __m128i w0 = _mm_mul_epu32(x, y); // x0l*y0l, x1l*y1l + __m128i w1 = _mm_mul_epu32(x, yh); // x0l*y0h, x1l*y1h + __m128i w2 = _mm_mul_epu32(xh, y); // x0h*y0l, x1h*y0l + __m128i w3 = _mm_mul_epu32(xh, yh); // x0h*y0h, x1h*y1h + __m128i w0h = _mm_srli_epi64(w0, 32); + __m128i s1 = _mm_add_epi64(w1, w0h); + __m128i s1l = _mm_and_si128(s1, lomask); + __m128i s1h = _mm_srli_epi64(s1, 32); + __m128i s2 = _mm_add_epi64(w2, s1l); + __m128i s2h = _mm_srli_epi64(s2, 32); + __m128i hi = _mm_add_epi64(w3, s1h); + hi = _mm_add_epi64(hi, s2h); + + return hi; +} + +// y is one 64-bit value repeated. +static inline __m128i libdivide_mullhi_s64_vector(__m128i x, __m128i y) { + __m128i p = libdivide_mullhi_u64_vector(x, y); + __m128i t1 = _mm_and_si128(libdivide_s64_signbits(x), y); + __m128i t2 = _mm_and_si128(libdivide_s64_signbits(y), x); + p = _mm_sub_epi64(p, t1); + p = _mm_sub_epi64(p, t2); + return p; +} + +////////// UINT32 + +__m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm_srli_epi32(numers, more); + } + else { + __m128i q = libdivide_mullhi_u32_vector(numers, _mm_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q); + return _mm_srli_epi32(t, shift); + } + else { + return _mm_srli_epi32(q, more); + } + } +} + +__m128i libdivide_u32_branchfree_do_vector(__m128i numers, const struct libdivide_u32_branchfree_t *denom) { + __m128i q = libdivide_mullhi_u32_vector(numers, _mm_set1_epi32(denom->magic)); + __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q); + return _mm_srli_epi32(t, denom->more); +} + +////////// UINT64 + +__m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm_srli_epi64(numers, more); + } + else { + __m128i q = libdivide_mullhi_u64_vector(numers, _mm_set1_epi64x(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q); + return _mm_srli_epi64(t, shift); + } + else { + return _mm_srli_epi64(q, more); + } + } +} + +__m128i libdivide_u64_branchfree_do_vector(__m128i numers, const struct libdivide_u64_branchfree_t *denom) { + __m128i q = libdivide_mullhi_u64_vector(numers, _mm_set1_epi64x(denom->magic)); + __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q); + return _mm_srli_epi64(t, denom->more); +} + +////////// SINT32 + +__m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + uint32_t mask = (1U << shift) - 1; + __m128i roundToZeroTweak = _mm_set1_epi32(mask); + // q = numer + ((numer >> 31) & roundToZeroTweak); + __m128i q = _mm_add_epi32(numers, _mm_and_si128(_mm_srai_epi32(numers, 31), roundToZeroTweak)); + q = _mm_srai_epi32(q, shift); + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm_sub_epi32(_mm_xor_si128(q, sign), sign); + return q; + } + else { + __m128i q = libdivide_mullhi_s32_vector(numers, _mm_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm_add_epi32(q, _mm_sub_epi32(_mm_xor_si128(numers, sign), sign)); + } + // q >>= shift + q = _mm_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK); + q = _mm_add_epi32(q, _mm_srli_epi32(q, 31)); // q += (q < 0) + return q; + } +} + +__m128i libdivide_s32_branchfree_do_vector(__m128i numers, const struct libdivide_s32_branchfree_t *denom) { + int32_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + __m128i q = libdivide_mullhi_s32_vector(numers, _mm_set1_epi32(magic)); + q = _mm_add_epi32(q, numers); // q += numers + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + __m128i q_sign = _mm_srai_epi32(q, 31); // q_sign = q >> 31 + __m128i mask = _mm_set1_epi32((1U << shift) - is_power_of_2); + q = _mm_add_epi32(q, _mm_and_si128(q_sign, mask)); // q = q + (q_sign & mask) + q = _mm_srai_epi32(q, shift); // q >>= shift + q = _mm_sub_epi32(_mm_xor_si128(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +////////// SINT64 + +__m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + int64_t magic = denom->magic; + if (magic == 0) { // shift path + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + uint64_t mask = (1ULL << shift) - 1; + __m128i roundToZeroTweak = _mm_set1_epi64x(mask); + // q = numer + ((numer >> 63) & roundToZeroTweak); + __m128i q = _mm_add_epi64(numers, _mm_and_si128(libdivide_s64_signbits(numers), roundToZeroTweak)); + q = libdivide_s64_shift_right_vector(q, shift); + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm_sub_epi64(_mm_xor_si128(q, sign), sign); + return q; + } + else { + __m128i q = libdivide_mullhi_s64_vector(numers, _mm_set1_epi64x(magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm_add_epi64(q, _mm_sub_epi64(_mm_xor_si128(numers, sign), sign)); + } + // q >>= denom->mult_path.shift + q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); q = _mm_add_epi64(q, _mm_srli_epi64(q, 63)); // q += (q < 0) return q; } } -__m128i libdivide_s64_do_vector_alg0(__m128i numers, const struct libdivide_s64_t *denom) { - uint32_t shifter = denom->more & LIBDIVIDE_64_SHIFT_MASK; - __m128i roundToZeroTweak = libdivide__u64_to_m128((1LL << shifter) - 1); - __m128i q = _mm_add_epi64(numers, _mm_and_si128(libdivide_s64_signbits(numers), roundToZeroTweak)); - q = libdivide_s64_shift_right_vector(q, shifter); - return q; -} +__m128i libdivide_s64_branchfree_do_vector(__m128i numers, const struct libdivide_s64_branchfree_t *denom) { + int64_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); -__m128i libdivide_s64_do_vector_alg1(__m128i numers, const struct libdivide_s64_t *denom) { - uint32_t shifter = denom->more & LIBDIVIDE_64_SHIFT_MASK; - __m128i roundToZeroTweak = libdivide__u64_to_m128((1LL << shifter) - 1); - __m128i q = _mm_add_epi64(numers, _mm_and_si128(libdivide_s64_signbits(numers), roundToZeroTweak)); - q = libdivide_s64_shift_right_vector(q, shifter); - return _mm_sub_epi64(_mm_setzero_si128(), q); -} + // libdivide_mullhi_s64(numers, magic); + __m128i q = libdivide_mullhi_s64_vector(numers, _mm_set1_epi64x(magic)); + q = _mm_add_epi64(q, numers); // q += numers -__m128i libdivide_s64_do_vector_alg2(__m128i numers, const struct libdivide_s64_t *denom) { - __m128i q = libdivide_mullhi_s64_flat_vector(numers, libdivide__u64_to_m128(denom->magic)); - q = _mm_add_epi64(q, numers); - q = libdivide_s64_shift_right_vector(q, denom->more & LIBDIVIDE_64_SHIFT_MASK); - q = _mm_add_epi64(q, _mm_srli_epi64(q, 63)); // q += (q < 0) - return q; -} - -__m128i libdivide_s64_do_vector_alg3(__m128i numers, const struct libdivide_s64_t *denom) { - __m128i q = libdivide_mullhi_s64_flat_vector(numers, libdivide__u64_to_m128(denom->magic)); - q = _mm_sub_epi64(q, numers); - q = libdivide_s64_shift_right_vector(q, denom->more & LIBDIVIDE_64_SHIFT_MASK); - q = _mm_add_epi64(q, _mm_srli_epi64(q, 63)); // q += (q < 0) - return q; -} - -__m128i libdivide_s64_do_vector_alg4(__m128i numers, const struct libdivide_s64_t *denom) { - __m128i q = libdivide_mullhi_s64_flat_vector(numers, libdivide__u64_to_m128(denom->magic)); - q = libdivide_s64_shift_right_vector(q, denom->more); - q = _mm_add_epi64(q, _mm_srli_epi64(q, 63)); + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2. + uint32_t is_power_of_2 = (magic == 0); + __m128i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63 + __m128i mask = _mm_set1_epi64x((1ULL << shift) - is_power_of_2); + q = _mm_add_epi64(q, _mm_and_si128(q_sign, mask)); // q = q + (q_sign & mask) + q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift + q = _mm_sub_epi64(_mm_xor_si128(q, sign), sign); // q = (q ^ sign) - sign return q; } @@ -1204,228 +1930,143 @@ __m128i libdivide_s64_do_vector_alg4(__m128i numers, const struct libdivide_s64_ #ifdef __cplusplus -/* The C++ template design here is a total mess. This needs to be fixed by someone better at templates than I. The current design is: - -- The base is a template divider_base that takes the integer type, the libdivide struct, a generating function, a get algorithm function, a do function, and either a do vector function or a dummy int. -- The base has storage for the libdivide struct. This is the only storage (so the C++ class should be no larger than the libdivide struct). - -- Above that, there's divider_mid. This is an empty struct by default, but it is specialized against our four int types. divider_mid contains a template struct algo, that contains a typedef for a specialization of divider_base. struct algo is specialized to take an "algorithm number," where -1 means to use the general algorithm. - -- Publicly we have class divider, which inherits from divider_mid::algo. This also take an algorithm number, which defaults to -1 (the general algorithm). -- divider has a operator / which allows you to use a divider as the divisor in a quotient expression. - -*/ - -namespace libdivide_internal { - -#if LIBDIVIDE_USE_SSE2 -#define MAYBE_VECTOR(x) x -#define MAYBE_VECTOR_PARAM __m128i vector_func(__m128i, const DenomType *) -#else -#define MAYBE_VECTOR(x) 0 -#define MAYBE_VECTOR_PARAM int vector_func -#endif - - /* Some bogus unswitch functions for unsigned types so the same (presumably templated) code can work for both signed and unsigned. */ - uint32_t crash_u32(uint32_t, const libdivide_u32_t *) { abort(); } - uint64_t crash_u64(uint64_t, const libdivide_u64_t *) { abort(); } -#ifdef __APPLE__ - UInt64 crash_u64(UInt64, const libdivide_u64_t *) { abort(); } -#endif -#if LIBDIVIDE_USE_SSE2 - __m128i crash_u32_vector(__m128i, const libdivide_u32_t *) { abort(); } - __m128i crash_u64_vector(__m128i, const libdivide_u64_t *) { abort(); } -#endif - - template - class divider_base { - public: - DenomType denom; - divider_base(IntType d) : denom(gen_func(d)) { } - divider_base(const DenomType & d) : denom(d) { } - - IntType perform_divide(IntType val) const { return do_func(val, &denom); } -#if LIBDIVIDE_USE_SSE2 - __m128i perform_divide_vector(__m128i val) const { return vector_func(val, &denom); } -#endif - - int get_algorithm() const { return get_algo(&denom); } - }; - - - template struct divider_mid { }; - - template<> struct divider_mid { - typedef uint32_t IntType; - typedef struct libdivide_u32_t DenomType; - template struct denom { - typedef divider_base divider; - }; - - template struct algo { }; - template struct algo<-1, J> { typedef denom::divider divider; }; - template struct algo<0, J> { typedef denom::divider divider; }; - template struct algo<1, J> { typedef denom::divider divider; }; - template struct algo<2, J> { typedef denom::divider divider; }; - - /* Define two more bogus ones so that the same (templated, presumably) code can handle both signed and unsigned */ - template struct algo<3, J> { typedef denom::divider divider; }; - template struct algo<4, J> { typedef denom::divider divider; }; - - }; - - template<> struct divider_mid { - typedef int32_t IntType; - typedef struct libdivide_s32_t DenomType; - template struct denom { - typedef divider_base divider; - }; - - - template struct algo { }; - template struct algo<-1, J> { typedef denom::divider divider; }; - template struct algo<0, J> { typedef denom::divider divider; }; - template struct algo<1, J> { typedef denom::divider divider; }; - template struct algo<2, J> { typedef denom::divider divider; }; - template struct algo<3, J> { typedef denom::divider divider; }; - template struct algo<4, J> { typedef denom::divider divider; }; - - }; - -#ifdef __APPLE__ - template<> struct divider_mid { - typedef Int64 IntType; - typedef struct libdivide_s64_t DenomType; - template struct denom { - typedef divider_base divider; - }; - - template struct algo { }; - template struct algo<-1, J> { typedef denom::divider divider; }; - template struct algo<0, J> { typedef denom::divider divider; }; - template struct algo<1, J> { typedef denom::divider divider; }; - template struct algo<2, J> { typedef denom::divider divider; }; - template struct algo<3, J> { typedef denom::divider divider; }; - template struct algo<4, J> { typedef denom::divider divider; }; - }; - - template<> struct divider_mid { - typedef UInt64 IntType; - typedef struct libdivide_u64_t DenomType; - template struct denom { - typedef divider_base divider; - }; - - template struct algo { }; - template struct algo<-1, J> { typedef denom::divider divider; }; - template struct algo<0, J> { typedef denom::divider divider; }; - template struct algo<1, J> { typedef denom::divider divider; }; - template struct algo<2, J> { typedef denom::divider divider; }; - - /* Define two more bogus ones so that the same (templated, presumably) code can handle both signed and unsigned */ - template struct algo<3, J> { typedef denom::divider divider; }; - template struct algo<4, J> { typedef denom::divider divider; }; - - - }; -#endif - - template<> struct divider_mid { - typedef uint64_t IntType; - typedef struct libdivide_u64_t DenomType; - template struct denom { - typedef divider_base divider; - }; - - template struct algo { }; - template struct algo<-1, J> { typedef denom::divider divider; }; - template struct algo<0, J> { typedef denom::divider divider; }; - template struct algo<1, J> { typedef denom::divider divider; }; - template struct algo<2, J> { typedef denom::divider divider; }; - - /* Define two more bogus ones so that the same (templated, presumably) code can handle both signed and unsigned */ - template struct algo<3, J> { typedef denom::divider divider; }; - template struct algo<4, J> { typedef denom::divider divider; }; - - - }; - - template<> struct divider_mid { - typedef int64_t IntType; - typedef struct libdivide_s64_t DenomType; - template struct denom { - typedef divider_base divider; - }; - - template struct algo { }; - template struct algo<-1, J> { typedef denom::divider divider; }; - template struct algo<0, J> { typedef denom::divider divider; }; - template struct algo<1, J> { typedef denom::divider divider; }; - template struct algo<2, J> { typedef denom::divider divider; }; - template struct algo<3, J> { typedef denom::divider divider; }; - template struct algo<4, J> { typedef denom::divider divider; }; - }; - -} - -template -class divider -{ - private: - typename libdivide_internal::divider_mid::template algo::divider sub; - template friend divider unswitch(const divider & d); - divider(const typename libdivide_internal::divider_mid::DenomType & denom) : sub(denom) { } - - public: - - /* Ordinary constructor, that takes the divisor as a parameter. */ - divider(T n) : sub(n) { } - - /* Default constructor, that divides by 1 */ - divider() : sub(1) { } - - /* Divides the parameter by the divisor, returning the quotient */ - T perform_divide(T val) const { return sub.perform_divide(val); } - -#if LIBDIVIDE_USE_SSE2 - /* Treats the vector as either two or four packed values (depending on the size), and divides each of them by the divisor, returning the packed quotients. */ - __m128i perform_divide_vector(__m128i val) const { return sub.perform_divide_vector(val); } -#endif - - /* Returns the index of algorithm, for use in the unswitch function */ - int get_algorithm() const { return sub.get_algorithm(); } // returns the algorithm for unswitching - - /* operator== */ - bool operator==(const divider & him) const { return sub.denom.magic == him.sub.denom.magic && sub.denom.more == him.sub.denom.more; } - - bool operator!=(const divider & him) const { return ! (*this == him); } +// The C++ divider class is templated on both an integer type +// (like uint64_t) and an algorithm type. +// * BRANCHFULL is the default algorithm type. +// * BRANCHFREE is the branchfree algorithm type. +enum { + BRANCHFULL, + BRANCHFREE }; -/* Returns a divider specialized for the given algorithm. */ -template -divider unswitch(const divider & d) { return divider(d.sub.denom); } - -/* Overload of the / operator for scalar division. */ -template -int_type operator/(int_type numer, const divider & denom) { - return denom.perform_divide(numer); -} - -#if LIBDIVIDE_USE_SSE2 -/* Overload of the / operator for vector division. */ -template -__m128i operator/(__m128i numer, const divider & denom) { - return denom.perform_divide_vector(numer); -} +#if defined(LIBDIVIDE_AVX512) + #define LIBDIVIDE_VECTOR_TYPE __m512i +#elif defined(LIBDIVIDE_AVX2) + #define LIBDIVIDE_VECTOR_TYPE __m256i +#elif defined(LIBDIVIDE_SSE2) + #define LIBDIVIDE_VECTOR_TYPE __m128i #endif - -#endif //__cplusplus - -#endif //LIBDIVIDE_HEADER_ONLY -#ifdef __cplusplus -} //close namespace libdivide -} //close anonymous namespace +#if !defined(LIBDIVIDE_VECTOR_TYPE) + #define LIBDIVIDE_DIVIDE_VECTOR(ALGO) +#else + #define LIBDIVIDE_DIVIDE_VECTOR(ALGO) \ + LIBDIVIDE_VECTOR_TYPE divide(LIBDIVIDE_VECTOR_TYPE n) const { \ + return libdivide_##ALGO##_do_vector(n, &denom); \ + } #endif -#pragma GCC diagnostic pop +// The DISPATCHER_GEN() macro generates C++ methods (for the given integer +// and algorithm types) that redirect to libdivide's C API. +#define DISPATCHER_GEN(T, ALGO) \ + libdivide_##ALGO##_t denom; \ + dispatcher() { } \ + dispatcher(T d) \ + : denom(libdivide_##ALGO##_gen(d)) \ + { } \ + T divide(T n) const { \ + return libdivide_##ALGO##_do(n, &denom); \ + } \ + LIBDIVIDE_DIVIDE_VECTOR(ALGO) \ + T recover() const { \ + return libdivide_##ALGO##_recover(&denom); \ + } + +// The dispatcher selects a specific division algorithm for a given +// type and ALGO using partial template specialization. +template struct dispatcher { }; + +template<> struct dispatcher { DISPATCHER_GEN(int32_t, s32) }; +template<> struct dispatcher { DISPATCHER_GEN(int32_t, s32_branchfree) }; +template<> struct dispatcher { DISPATCHER_GEN(uint32_t, u32) }; +template<> struct dispatcher { DISPATCHER_GEN(uint32_t, u32_branchfree) }; +template<> struct dispatcher { DISPATCHER_GEN(int64_t, s64) }; +template<> struct dispatcher { DISPATCHER_GEN(int64_t, s64_branchfree) }; +template<> struct dispatcher { DISPATCHER_GEN(uint64_t, u64) }; +template<> struct dispatcher { DISPATCHER_GEN(uint64_t, u64_branchfree) }; + +// This is the main divider class for use by the user (C++ API). +// The actual division algorithm is selected using the dispatcher struct +// based on the integer and algorithm template parameters. +template +class divider { +public: + // We leave the default constructor empty so that creating + // an array of dividers and then initializing them + // later doesn't slow us down. + divider() { } + + // Constructor that takes the divisor as a parameter + divider(T d) : div(d) { } + + // Divides n by the divisor + T divide(T n) const { + return div.divide(n); + } + + // Recovers the divisor, returns the value that was + // used to initialize this divider object. + T recover() const { + return div.recover(); + } + + bool operator==(const divider& other) const { + return div.denom.magic == other.denom.magic && + div.denom.more == other.denom.more; + } + + bool operator!=(const divider& other) const { + return !(*this == other); + } + +#if defined(LIBDIVIDE_VECTOR_TYPE) + // Treats the vector as packed integer values with the same type as + // the divider (e.g. s32, u32, s64, u64) and divides each of + // them by the divider, returning the packed quotients. + LIBDIVIDE_VECTOR_TYPE divide(LIBDIVIDE_VECTOR_TYPE n) const { + return div.divide(n); + } +#endif + +private: + // Storage for the actual divisor + dispatcher::value, + std::is_signed::value, sizeof(T), ALGO> div; +}; + +// Overload of operator / for scalar division +template +T operator/(T n, const divider& div) { + return div.divide(n); +} + +// Overload of operator /= for scalar division +template +T& operator/=(T& n, const divider& div) { + n = div.divide(n); + return n; +} + +#if defined(LIBDIVIDE_VECTOR_TYPE) + // Overload of operator / for vector division + template + LIBDIVIDE_VECTOR_TYPE operator/(LIBDIVIDE_VECTOR_TYPE n, const divider& div) { + return div.divide(n); + } + // Overload of operator /= for vector division + template + LIBDIVIDE_VECTOR_TYPE& operator/=(LIBDIVIDE_VECTOR_TYPE& n, const divider& div) { + n = div.divide(n); + return n; + } +#endif + +// libdivdie::branchfree_divider +template +using branchfree_divider = divider; + +} // namespace libdivide + +#endif // __cplusplus + +#endif // LIBDIVIDE_H diff --git a/contrib/libgsasl b/contrib/libgsasl index 42ef2068704..140fb582505 160000 --- a/contrib/libgsasl +++ b/contrib/libgsasl @@ -1 +1 @@ -Subproject commit 42ef20687042637252e64df1934b6d47771486d1 +Subproject commit 140fb58250588c8323285b75fcf127c4adc33dfa diff --git a/contrib/libpcg-random/README.md b/contrib/libpcg-random/README.md deleted file mode 100644 index c6e579cc2cc..00000000000 --- a/contrib/libpcg-random/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# PCG Random Number Generation, C++ Edition - -[PCG-Random website]: http://www.pcg-random.org - -This code provides an implementation of the PCG family of random number -generators, which are fast, statistically excellent, and offer a number of -useful features. - -Full details can be found at the [PCG-Random website]. This version -of the code provides many family members -- if you just want one -simple generator, you may prefer the minimal C version of the library. - -There are two kinds of generator, normal generators and extended generators. -Extended generators provide *k* dimensional equidistribution and can perform -party tricks, but generally speaking most people only need the normal -generators. - -There are two ways to access the generators, using a convenience typedef -or by using the underlying templates directly (similar to C++11's `std::mt19937` typedef vs its `std::mersenne_twister_engine` template). For most users, the convenience typedef is what you want, and probably you're fine with `pcg32` for 32-bit numbers. If you want 64-bit numbers, either use `pcg64` (or, if you're on a 32-bit system, making 64 bits from two calls to `pcg32_k2` may be faster). - -## Documentation and Examples - -Visit [PCG-Random website] for information on how to use this library, or look -at the sample code in the `sample` directory -- hopefully it should be fairly -self explanatory. - -## Building - -The code is written in C++11, as an include-only library (i.e., there is -nothing you need to build). There are some provided demo programs and tests -however. On a Unix-style system (e.g., Linux, Mac OS X) you should be able -to just type - - make - -To build the demo programs. - -## Testing - -Run - - make test - -## Directory Structure - -The directories are arranged as follows: - -* `include` -- contains `pcg_random.hpp` and supporting include files -* `test-high` -- test code for the high-level API where the functions have - shorter, less scary-looking names. -* `sample` -- sample code, some similar to the code in `test-high` but more - human readable, some other examples too diff --git a/contrib/libunwind b/contrib/libunwind index 68cffcbbd18..27026ef4a9c 160000 --- a/contrib/libunwind +++ b/contrib/libunwind @@ -1 +1 @@ -Subproject commit 68cffcbbd1840e14664a5f7f19c5e43f65c525b5 +Subproject commit 27026ef4a9c6c8cc956d1d131c4d794e24096981 diff --git a/contrib/llvm b/contrib/llvm index 5dab18f4861..3d6c7e91676 160000 --- a/contrib/llvm +++ b/contrib/llvm @@ -1 +1 @@ -Subproject commit 5dab18f4861677548b8f7f6815f49384480ecead +Subproject commit 3d6c7e916760b395908f28a1c885c8334d4fa98b diff --git a/contrib/lz4 b/contrib/lz4 index 3d67671559b..f39b79fb029 160000 --- a/contrib/lz4 +++ b/contrib/lz4 @@ -1 +1 @@ -Subproject commit 3d67671559be723b0912bbee2fcd2eb14783a721 +Subproject commit f39b79fb02962a1cd880bbdecb6dffba4f754a11 diff --git a/contrib/lz4-cmake/CMakeLists.txt b/contrib/lz4-cmake/CMakeLists.txt index 25cceaa4574..856389395ca 100644 --- a/contrib/lz4-cmake/CMakeLists.txt +++ b/contrib/lz4-cmake/CMakeLists.txt @@ -12,5 +12,6 @@ add_library (lz4 ${LIBRARY_DIR}/lz4hc.h) target_compile_definitions(lz4 PUBLIC LZ4_DISABLE_DEPRECATE_WARNINGS=1) +target_compile_options(lz4 PRIVATE -fno-sanitize=undefined) target_include_directories(lz4 PUBLIC ${LIBRARY_DIR}) diff --git a/contrib/msgpack-c b/contrib/msgpack-c new file mode 160000 index 00000000000..46684265d50 --- /dev/null +++ b/contrib/msgpack-c @@ -0,0 +1 @@ +Subproject commit 46684265d50b5d1b062d4c5c428ba08462844b1d diff --git a/contrib/openldap b/contrib/openldap new file mode 160000 index 00000000000..34b9ba94b30 --- /dev/null +++ b/contrib/openldap @@ -0,0 +1 @@ +Subproject commit 34b9ba94b30319ed6389a4e001d057f7983fe363 diff --git a/contrib/openldap-cmake/CMakeLists.txt b/contrib/openldap-cmake/CMakeLists.txt new file mode 100644 index 00000000000..b0a5f4048ff --- /dev/null +++ b/contrib/openldap-cmake/CMakeLists.txt @@ -0,0 +1,202 @@ +set(OPENLDAP_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/openldap) + +# How these lists were generated? +# I compiled the original OpenLDAP with it's original build system and copied the list of source files from build commands. + +set(_libs_type SHARED) +if(OPENLDAP_USE_STATIC_LIBS) + set(_libs_type STATIC) +endif() + +set(OPENLDAP_VERSION_STRING "2.5.X") + +macro(mkversion _lib_name) + add_custom_command( + OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${_lib_name}-version.c + COMMAND ${CMAKE_COMMAND} -E env bash -c "${OPENLDAP_SOURCE_DIR}/build/mkversion -v '${OPENLDAP_VERSION_STRING}' liblber.la > ${CMAKE_CURRENT_BINARY_DIR}/${_lib_name}-version.c" + MAIN_DEPENDENCY ${OPENLDAP_SOURCE_DIR}/build/mkversion + WORKING_DIRECTORY ${OPENLDAP_SOURCE_DIR} + VERBATIM + ) +endmacro() + +string(TOLOWER "${CMAKE_SYSTEM_NAME}" _system_name) +string(TOLOWER "${CMAKE_SYSTEM_PROCESSOR}" _system_processor) + +if( + "${_system_processor}" STREQUAL "amd64" OR + "${_system_processor}" STREQUAL "x64" +) + set(_system_processor "x86_64") +elseif( + "${_system_processor}" STREQUAL "arm64" +) + set (_system_processor "aarch64") +endif() + +set(_extra_build_dir "${CMAKE_CURRENT_SOURCE_DIR}/${_system_name}_${_system_processor}") + +set(_lber_srcs + ${OPENLDAP_SOURCE_DIR}/libraries/liblber/assert.c + ${OPENLDAP_SOURCE_DIR}/libraries/liblber/decode.c + ${OPENLDAP_SOURCE_DIR}/libraries/liblber/encode.c + ${OPENLDAP_SOURCE_DIR}/libraries/liblber/io.c + ${OPENLDAP_SOURCE_DIR}/libraries/liblber/bprint.c + ${OPENLDAP_SOURCE_DIR}/libraries/liblber/debug.c + ${OPENLDAP_SOURCE_DIR}/libraries/liblber/memory.c + ${OPENLDAP_SOURCE_DIR}/libraries/liblber/options.c + ${OPENLDAP_SOURCE_DIR}/libraries/liblber/sockbuf.c + ${OPENLDAP_SOURCE_DIR}/libraries/liblber/stdio.c +) + +mkversion(lber) + +add_library(lber ${_libs_type} + ${_lber_srcs} + ${CMAKE_CURRENT_BINARY_DIR}/lber-version.c +) + +target_link_libraries(lber + PRIVATE ${OPENSSL_LIBRARIES} +) + +target_include_directories(lber + PRIVATE ${_extra_build_dir}/include + PRIVATE ${OPENLDAP_SOURCE_DIR}/include + PRIVATE ${OPENLDAP_SOURCE_DIR}/libraries/liblber + PRIVATE ${OPENSSL_INCLUDE_DIR} +) + +target_compile_definitions(lber + PRIVATE LBER_LIBRARY +) + +set(_ldap_srcs + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/bind.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/open.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/result.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/error.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/compare.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/search.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/controls.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/messages.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/references.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/extended.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/cyrus.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/modify.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/add.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/modrdn.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/delete.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/abandon.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/sasl.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/sbind.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/unbind.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/cancel.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/filter.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/free.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/sort.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/passwd.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/whoami.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/vc.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/getdn.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/getentry.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/getattr.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/getvalues.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/addentry.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/request.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/os-ip.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/url.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/pagectrl.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/sortctrl.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/vlvctrl.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/init.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/options.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/print.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/string.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/util-int.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/schema.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/charray.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/os-local.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/dnssrv.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/utf-8.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/utf-8-conv.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/tls2.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/tls_o.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/tls_g.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/turn.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/ppolicy.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/dds.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/txn.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/ldap_sync.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/stctrl.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/assertion.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/deref.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/ldifutil.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/ldif.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/fetch.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/lbase64.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/msctrl.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap/psearchctrl.c +) + +mkversion(ldap) + +add_library(ldap ${_libs_type} + ${_ldap_srcs} + ${CMAKE_CURRENT_BINARY_DIR}/ldap-version.c +) + +target_link_libraries(ldap + PRIVATE lber + PRIVATE ${OPENSSL_LIBRARIES} +) + +target_include_directories(ldap + PRIVATE ${_extra_build_dir}/include + PRIVATE ${OPENLDAP_SOURCE_DIR}/include + PRIVATE ${OPENLDAP_SOURCE_DIR}/libraries/libldap + PRIVATE ${OPENSSL_INCLUDE_DIR} +) + +target_compile_definitions(ldap + PRIVATE LDAP_LIBRARY +) + +set(_ldap_r_specific_srcs + ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/threads.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/rdwr.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/tpool.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/rq.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_posix.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_thr.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_nt.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_pth.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_stub.c + ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_debug.c +) + +mkversion(ldap_r) + +add_library(ldap_r ${_libs_type} + ${_ldap_r_specific_srcs} + ${_ldap_srcs} + ${CMAKE_CURRENT_BINARY_DIR}/ldap_r-version.c +) + +target_link_libraries(ldap_r + PRIVATE lber + PRIVATE ${OPENSSL_LIBRARIES} +) + +target_include_directories(ldap_r + PRIVATE ${_extra_build_dir}/include + PRIVATE ${OPENLDAP_SOURCE_DIR}/include + PRIVATE ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r + PRIVATE ${OPENLDAP_SOURCE_DIR}/libraries/libldap + PRIVATE ${OPENSSL_INCLUDE_DIR} +) + +target_compile_definitions(ldap_r + PRIVATE LDAP_R_COMPILE + PRIVATE LDAP_LIBRARY +) diff --git a/contrib/openldap-cmake/darwin_x86_64/include/lber_types.h b/contrib/openldap-cmake/darwin_x86_64/include/lber_types.h new file mode 100644 index 00000000000..dbd59430527 --- /dev/null +++ b/contrib/openldap-cmake/darwin_x86_64/include/lber_types.h @@ -0,0 +1,63 @@ +/* include/lber_types.h. Generated from lber_types.hin by configure. */ +/* $OpenLDAP$ */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +/* + * LBER types + */ + +#ifndef _LBER_TYPES_H +#define _LBER_TYPES_H + +#include + +LDAP_BEGIN_DECL + +/* LBER boolean, enum, integers (32 bits or larger) */ +#define LBER_INT_T int + +/* LBER tags (32 bits or larger) */ +#define LBER_TAG_T long + +/* LBER socket descriptor */ +#define LBER_SOCKET_T int + +/* LBER lengths (32 bits or larger) */ +#define LBER_LEN_T long + +/* ------------------------------------------------------------ */ + +/* booleans, enumerations, and integers */ +typedef LBER_INT_T ber_int_t; + +/* signed and unsigned versions */ +typedef signed LBER_INT_T ber_sint_t; +typedef unsigned LBER_INT_T ber_uint_t; + +/* tags */ +typedef unsigned LBER_TAG_T ber_tag_t; + +/* "socket" descriptors */ +typedef LBER_SOCKET_T ber_socket_t; + +/* lengths */ +typedef unsigned LBER_LEN_T ber_len_t; + +/* signed lengths */ +typedef signed LBER_LEN_T ber_slen_t; + +LDAP_END_DECL + +#endif /* _LBER_TYPES_H */ diff --git a/contrib/openldap-cmake/darwin_x86_64/include/ldap_config.h b/contrib/openldap-cmake/darwin_x86_64/include/ldap_config.h new file mode 100644 index 00000000000..89f7b40b884 --- /dev/null +++ b/contrib/openldap-cmake/darwin_x86_64/include/ldap_config.h @@ -0,0 +1,74 @@ +/* include/ldap_config.h. Generated from ldap_config.hin by configure. */ +/* $OpenLDAP$ */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +/* + * This file works in conjunction with OpenLDAP configure system. + * If you do no like the values below, adjust your configure options. + */ + +#ifndef _LDAP_CONFIG_H +#define _LDAP_CONFIG_H + +/* directory separator */ +#ifndef LDAP_DIRSEP +#ifndef _WIN32 +#define LDAP_DIRSEP "/" +#else +#define LDAP_DIRSEP "\\" +#endif +#endif + +/* directory for temporary files */ +#if defined(_WIN32) +# define LDAP_TMPDIR "C:\\." /* we don't have much of a choice */ +#elif defined( _P_tmpdir ) +# define LDAP_TMPDIR _P_tmpdir +#elif defined( P_tmpdir ) +# define LDAP_TMPDIR P_tmpdir +#elif defined( _PATH_TMPDIR ) +# define LDAP_TMPDIR _PATH_TMPDIR +#else +# define LDAP_TMPDIR LDAP_DIRSEP "tmp" +#endif + +/* directories */ +#ifndef LDAP_BINDIR +#define LDAP_BINDIR "/tmp/ldap-prefix/bin" +#endif +#ifndef LDAP_SBINDIR +#define LDAP_SBINDIR "/tmp/ldap-prefix/sbin" +#endif +#ifndef LDAP_DATADIR +#define LDAP_DATADIR "/tmp/ldap-prefix/share/openldap" +#endif +#ifndef LDAP_SYSCONFDIR +#define LDAP_SYSCONFDIR "/tmp/ldap-prefix/etc/openldap" +#endif +#ifndef LDAP_LIBEXECDIR +#define LDAP_LIBEXECDIR "/tmp/ldap-prefix/libexec" +#endif +#ifndef LDAP_MODULEDIR +#define LDAP_MODULEDIR "/tmp/ldap-prefix/libexec/openldap" +#endif +#ifndef LDAP_RUNDIR +#define LDAP_RUNDIR "/tmp/ldap-prefix/var" +#endif +#ifndef LDAP_LOCALEDIR +#define LDAP_LOCALEDIR "" +#endif + + +#endif /* _LDAP_CONFIG_H */ diff --git a/contrib/openldap-cmake/darwin_x86_64/include/ldap_features.h b/contrib/openldap-cmake/darwin_x86_64/include/ldap_features.h new file mode 100644 index 00000000000..f0cc7c3626f --- /dev/null +++ b/contrib/openldap-cmake/darwin_x86_64/include/ldap_features.h @@ -0,0 +1,61 @@ +/* include/ldap_features.h. Generated from ldap_features.hin by configure. */ +/* $OpenLDAP$ */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +/* + * LDAP Features + */ + +#ifndef _LDAP_FEATURES_H +#define _LDAP_FEATURES_H 1 + +/* OpenLDAP API version macros */ +#define LDAP_VENDOR_VERSION 20501 +#define LDAP_VENDOR_VERSION_MAJOR 2 +#define LDAP_VENDOR_VERSION_MINOR 5 +#define LDAP_VENDOR_VERSION_PATCH X + +/* +** WORK IN PROGRESS! +** +** OpenLDAP reentrancy/thread-safeness should be dynamically +** checked using ldap_get_option(). +** +** The -lldap implementation is not thread-safe. +** +** The -lldap_r implementation is: +** LDAP_API_FEATURE_THREAD_SAFE (basic thread safety) +** but also be: +** LDAP_API_FEATURE_SESSION_THREAD_SAFE +** LDAP_API_FEATURE_OPERATION_THREAD_SAFE +** +** The preprocessor flag LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE +** can be used to determine if -lldap_r is available at compile +** time. You must define LDAP_THREAD_SAFE if and only if you +** link with -lldap_r. +** +** If you fail to define LDAP_THREAD_SAFE when linking with +** -lldap_r or define LDAP_THREAD_SAFE when linking with -lldap, +** provided header definitions and declarations may be incorrect. +** +*/ + +/* is -lldap_r available or not */ +#define LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE 1 + +/* LDAP v2 Referrals */ +/* #undef LDAP_API_FEATURE_X_OPENLDAP_V2_REFERRALS */ + +#endif /* LDAP_FEATURES */ diff --git a/contrib/openldap-cmake/darwin_x86_64/include/portable.h b/contrib/openldap-cmake/darwin_x86_64/include/portable.h new file mode 100644 index 00000000000..fdf4e89017e --- /dev/null +++ b/contrib/openldap-cmake/darwin_x86_64/include/portable.h @@ -0,0 +1,1169 @@ +/* include/portable.h. Generated from portable.hin by configure. */ +/* include/portable.hin. Generated from configure.in by autoheader. */ + + +/* begin of portable.h.pre */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +#ifndef _LDAP_PORTABLE_H +#define _LDAP_PORTABLE_H + +/* define this if needed to get reentrant functions */ +#ifndef REENTRANT +#define REENTRANT 1 +#endif +#ifndef _REENTRANT +#define _REENTRANT 1 +#endif + +/* define this if needed to get threadsafe functions */ +#ifndef THREADSAFE +#define THREADSAFE 1 +#endif +#ifndef _THREADSAFE +#define _THREADSAFE 1 +#endif +#ifndef THREAD_SAFE +#define THREAD_SAFE 1 +#endif +#ifndef _THREAD_SAFE +#define _THREAD_SAFE 1 +#endif + +#ifndef _SGI_MP_SOURCE +#define _SGI_MP_SOURCE 1 +#endif + +/* end of portable.h.pre */ + + +/* Define if building universal (internal helper macro) */ +/* #undef AC_APPLE_UNIVERSAL_BUILD */ + +/* define to use both and */ +/* #undef BOTH_STRINGS_H */ + +/* define if cross compiling */ +/* #undef CROSS_COMPILING */ + +/* set to the number of arguments ctime_r() expects */ +#define CTIME_R_NARGS 2 + +/* define if toupper() requires islower() */ +/* #undef C_UPPER_LOWER */ + +/* define if sys_errlist is not declared in stdio.h or errno.h */ +/* #undef DECL_SYS_ERRLIST */ + +/* define to enable slapi library */ +/* #undef ENABLE_SLAPI */ + +/* defined to be the EXE extension */ +#define EXEEXT "" + +/* set to the number of arguments gethostbyaddr_r() expects */ +/* #undef GETHOSTBYADDR_R_NARGS */ + +/* set to the number of arguments gethostbyname_r() expects */ +/* #undef GETHOSTBYNAME_R_NARGS */ + +/* Define to 1 if `TIOCGWINSZ' requires . */ +/* #undef GWINSZ_IN_SYS_IOCTL */ + +/* define if you have AIX security lib */ +/* #undef HAVE_AIX_SECURITY */ + +/* Define to 1 if you have the header file. */ +#define HAVE_ARPA_INET_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_ARPA_NAMESER_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_ASSERT_H 1 + +/* Define to 1 if you have the `bcopy' function. */ +#define HAVE_BCOPY 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_BITS_TYPES_H */ + +/* Define to 1 if you have the `chroot' function. */ +#define HAVE_CHROOT 1 + +/* Define to 1 if you have the `closesocket' function. */ +/* #undef HAVE_CLOSESOCKET */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_CONIO_H */ + +/* define if crypt(3) is available */ +/* #undef HAVE_CRYPT */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_CRYPT_H */ + +/* define if crypt_r() is also available */ +/* #undef HAVE_CRYPT_R */ + +/* Define to 1 if you have the `ctime_r' function. */ +#define HAVE_CTIME_R 1 + +/* define if you have Cyrus SASL */ +/* #undef HAVE_CYRUS_SASL */ + +/* define if your system supports /dev/poll */ +/* #undef HAVE_DEVPOLL */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_DIRECT_H */ + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +#define HAVE_DIRENT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Define to 1 if you don't have `vprintf' but do have `_doprnt.' */ +/* #undef HAVE_DOPRNT */ + +/* define if system uses EBCDIC instead of ASCII */ +/* #undef HAVE_EBCDIC */ + +/* Define to 1 if you have the `endgrent' function. */ +#define HAVE_ENDGRENT 1 + +/* Define to 1 if you have the `endpwent' function. */ +#define HAVE_ENDPWENT 1 + +/* define if your system supports epoll */ +/* #undef HAVE_EPOLL */ + +/* Define to 1 if you have the header file. */ +#define HAVE_ERRNO_H 1 + +/* Define to 1 if you have the `fcntl' function. */ +#define HAVE_FCNTL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_FCNTL_H 1 + +/* define if you actually have FreeBSD fetch(3) */ +/* #undef HAVE_FETCH */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_FILIO_H */ + +/* Define to 1 if you have the `flock' function. */ +#define HAVE_FLOCK 1 + +/* Define to 1 if you have the `fstat' function. */ +#define HAVE_FSTAT 1 + +/* Define to 1 if you have the `gai_strerror' function. */ +#define HAVE_GAI_STRERROR 1 + +/* Define to 1 if you have the `getaddrinfo' function. */ +#define HAVE_GETADDRINFO 1 + +/* Define to 1 if you have the `getdtablesize' function. */ +#define HAVE_GETDTABLESIZE 1 + +/* Define to 1 if you have the `geteuid' function. */ +#define HAVE_GETEUID 1 + +/* Define to 1 if you have the `getgrgid' function. */ +#define HAVE_GETGRGID 1 + +/* Define to 1 if you have the `gethostbyaddr_r' function. */ +/* #undef HAVE_GETHOSTBYADDR_R */ + +/* Define to 1 if you have the `gethostbyname_r' function. */ +/* #undef HAVE_GETHOSTBYNAME_R */ + +/* Define to 1 if you have the `gethostname' function. */ +#define HAVE_GETHOSTNAME 1 + +/* Define to 1 if you have the `getnameinfo' function. */ +#define HAVE_GETNAMEINFO 1 + +/* Define to 1 if you have the `getopt' function. */ +#define HAVE_GETOPT 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_GETOPT_H 1 + +/* Define to 1 if you have the `getpassphrase' function. */ +/* #undef HAVE_GETPASSPHRASE */ + +/* Define to 1 if you have the `getpeereid' function. */ +#define HAVE_GETPEEREID 1 + +/* Define to 1 if you have the `getpeerucred' function. */ +/* #undef HAVE_GETPEERUCRED */ + +/* Define to 1 if you have the `getpwnam' function. */ +#define HAVE_GETPWNAM 1 + +/* Define to 1 if you have the `getpwuid' function. */ +#define HAVE_GETPWUID 1 + +/* Define to 1 if you have the `getspnam' function. */ +/* #undef HAVE_GETSPNAM */ + +/* Define to 1 if you have the `gettimeofday' function. */ +#define HAVE_GETTIMEOFDAY 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_GMP_H */ + +/* Define to 1 if you have the `gmtime_r' function. */ +#define HAVE_GMTIME_R 1 + +/* define if you have GNUtls */ +/* #undef HAVE_GNUTLS */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_GNUTLS_GNUTLS_H */ + +/* if you have GNU Pth */ +/* #undef HAVE_GNU_PTH */ + +/* Define to 1 if you have the header file. */ +#define HAVE_GRP_H 1 + +/* Define to 1 if you have the `hstrerror' function. */ +#define HAVE_HSTRERROR 1 + +/* define to you inet_aton(3) is available */ +#define HAVE_INET_ATON 1 + +/* Define to 1 if you have the `inet_ntoa_b' function. */ +/* #undef HAVE_INET_NTOA_B */ + +/* Define to 1 if you have the `inet_ntop' function. */ +#define HAVE_INET_NTOP 1 + +/* Define to 1 if you have the `initgroups' function. */ +#define HAVE_INITGROUPS 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define to 1 if you have the `ioctl' function. */ +#define HAVE_IOCTL 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_IO_H */ + +/* define if your system supports kqueue */ +#define HAVE_KQUEUE 1 + +/* Define to 1 if you have the `gen' library (-lgen). */ +/* #undef HAVE_LIBGEN */ + +/* Define to 1 if you have the `gmp' library (-lgmp). */ +/* #undef HAVE_LIBGMP */ + +/* Define to 1 if you have the `inet' library (-linet). */ +/* #undef HAVE_LIBINET */ + +/* define if you have libtool -ltdl */ +/* #undef HAVE_LIBLTDL */ + +/* Define to 1 if you have the `net' library (-lnet). */ +/* #undef HAVE_LIBNET */ + +/* Define to 1 if you have the `nsl' library (-lnsl). */ +/* #undef HAVE_LIBNSL */ + +/* Define to 1 if you have the `nsl_s' library (-lnsl_s). */ +/* #undef HAVE_LIBNSL_S */ + +/* Define to 1 if you have the `socket' library (-lsocket). */ +/* #undef HAVE_LIBSOCKET */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_LIBUTIL_H */ + +/* Define to 1 if you have the `V3' library (-lV3). */ +/* #undef HAVE_LIBV3 */ + +/* Define to 1 if you have the header file. */ +#define HAVE_LIMITS_H 1 + +/* if you have LinuxThreads */ +/* #undef HAVE_LINUX_THREADS */ + +/* Define to 1 if you have the header file. */ +#define HAVE_LOCALE_H 1 + +/* Define to 1 if you have the `localtime_r' function. */ +#define HAVE_LOCALTIME_R 1 + +/* Define to 1 if you have the `lockf' function. */ +#define HAVE_LOCKF 1 + +/* Define to 1 if the system has the type `long long'. */ +#define HAVE_LONG_LONG 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_LTDL_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_MALLOC_H */ + +/* Define to 1 if you have the `memcpy' function. */ +#define HAVE_MEMCPY 1 + +/* Define to 1 if you have the `memmove' function. */ +#define HAVE_MEMMOVE 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the `memrchr' function. */ +/* #undef HAVE_MEMRCHR */ + +/* Define to 1 if you have the `mkstemp' function. */ +#define HAVE_MKSTEMP 1 + +/* Define to 1 if you have the `mktemp' function. */ +#define HAVE_MKTEMP 1 + +/* define this if you have mkversion */ +#define HAVE_MKVERSION 1 + +/* Define to 1 if you have the header file, and it defines `DIR'. */ +/* #undef HAVE_NDIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_NETINET_TCP_H 1 + +/* define if strerror_r returns char* instead of int */ +/* #undef HAVE_NONPOSIX_STRERROR_R */ + +/* if you have NT Event Log */ +/* #undef HAVE_NT_EVENT_LOG */ + +/* if you have NT Service Manager */ +/* #undef HAVE_NT_SERVICE_MANAGER */ + +/* if you have NT Threads */ +/* #undef HAVE_NT_THREADS */ + +/* define if you have OpenSSL */ +#define HAVE_OPENSSL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_OPENSSL_BN_H 1 + +/* define if you have OpenSSL with CRL checking capability */ +#define HAVE_OPENSSL_CRL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_OPENSSL_CRYPTO_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_OPENSSL_SSL_H 1 + +/* Define to 1 if you have the `pipe' function. */ +#define HAVE_PIPE 1 + +/* Define to 1 if you have the `poll' function. */ +#define HAVE_POLL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_POLL_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_PROCESS_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_PSAP_H */ + +/* define to pthreads API spec revision */ +#define HAVE_PTHREADS 10 + +/* define if you have pthread_detach function */ +#define HAVE_PTHREAD_DETACH 1 + +/* Define to 1 if you have the `pthread_getconcurrency' function. */ +#define HAVE_PTHREAD_GETCONCURRENCY 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_PTHREAD_H 1 + +/* Define to 1 if you have the `pthread_kill' function. */ +#define HAVE_PTHREAD_KILL 1 + +/* Define to 1 if you have the `pthread_kill_other_threads_np' function. */ +/* #undef HAVE_PTHREAD_KILL_OTHER_THREADS_NP */ + +/* define if you have pthread_rwlock_destroy function */ +#define HAVE_PTHREAD_RWLOCK_DESTROY 1 + +/* Define to 1 if you have the `pthread_setconcurrency' function. */ +#define HAVE_PTHREAD_SETCONCURRENCY 1 + +/* Define to 1 if you have the `pthread_yield' function. */ +/* #undef HAVE_PTHREAD_YIELD */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_PTH_H */ + +/* Define to 1 if the system has the type `ptrdiff_t'. */ +#define HAVE_PTRDIFF_T 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_PWD_H 1 + +/* Define to 1 if you have the `read' function. */ +#define HAVE_READ 1 + +/* Define to 1 if you have the `recv' function. */ +#define HAVE_RECV 1 + +/* Define to 1 if you have the `recvfrom' function. */ +#define HAVE_RECVFROM 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_REGEX_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_RESOLV_H */ + +/* define if you have res_query() */ +/* #undef HAVE_RES_QUERY */ + +/* define if OpenSSL needs RSAref */ +/* #undef HAVE_RSAREF */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SASL_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SASL_SASL_H */ + +/* define if your SASL library has sasl_version() */ +/* #undef HAVE_SASL_VERSION */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SCHED_H 1 + +/* Define to 1 if you have the `sched_yield' function. */ +#define HAVE_SCHED_YIELD 1 + +/* Define to 1 if you have the `send' function. */ +#define HAVE_SEND 1 + +/* Define to 1 if you have the `sendmsg' function. */ +#define HAVE_SENDMSG 1 + +/* Define to 1 if you have the `sendto' function. */ +#define HAVE_SENDTO 1 + +/* Define to 1 if you have the `setegid' function. */ +#define HAVE_SETEGID 1 + +/* Define to 1 if you have the `seteuid' function. */ +#define HAVE_SETEUID 1 + +/* Define to 1 if you have the `setgid' function. */ +#define HAVE_SETGID 1 + +/* Define to 1 if you have the `setpwfile' function. */ +/* #undef HAVE_SETPWFILE */ + +/* Define to 1 if you have the `setsid' function. */ +#define HAVE_SETSID 1 + +/* Define to 1 if you have the `setuid' function. */ +#define HAVE_SETUID 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SGTTY_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SHADOW_H */ + +/* Define to 1 if you have the `sigaction' function. */ +#define HAVE_SIGACTION 1 + +/* Define to 1 if you have the `signal' function. */ +#define HAVE_SIGNAL 1 + +/* Define to 1 if you have the `sigset' function. */ +#define HAVE_SIGSET 1 + +/* define if you have -lslp */ +/* #undef HAVE_SLP */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SLP_H */ + +/* Define to 1 if you have the `snprintf' function. */ +#define HAVE_SNPRINTF 1 + +/* if you have spawnlp() */ +/* #undef HAVE_SPAWNLP */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SQLEXT_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SQL_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_STDDEF_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the `strdup' function. */ +#define HAVE_STRDUP 1 + +/* Define to 1 if you have the `strerror' function. */ +#define HAVE_STRERROR 1 + +/* Define to 1 if you have the `strerror_r' function. */ +#define HAVE_STRERROR_R 1 + +/* Define to 1 if you have the `strftime' function. */ +#define HAVE_STRFTIME 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the `strpbrk' function. */ +#define HAVE_STRPBRK 1 + +/* Define to 1 if you have the `strrchr' function. */ +#define HAVE_STRRCHR 1 + +/* Define to 1 if you have the `strsep' function. */ +#define HAVE_STRSEP 1 + +/* Define to 1 if you have the `strspn' function. */ +#define HAVE_STRSPN 1 + +/* Define to 1 if you have the `strstr' function. */ +#define HAVE_STRSTR 1 + +/* Define to 1 if you have the `strtol' function. */ +#define HAVE_STRTOL 1 + +/* Define to 1 if you have the `strtoll' function. */ +#define HAVE_STRTOLL 1 + +/* Define to 1 if you have the `strtoq' function. */ +#define HAVE_STRTOQ 1 + +/* Define to 1 if you have the `strtoul' function. */ +#define HAVE_STRTOUL 1 + +/* Define to 1 if you have the `strtoull' function. */ +#define HAVE_STRTOULL 1 + +/* Define to 1 if you have the `strtouq' function. */ +#define HAVE_STRTOUQ 1 + +/* Define to 1 if `msg_accrightslen' is a member of `struct msghdr'. */ +/* #undef HAVE_STRUCT_MSGHDR_MSG_ACCRIGHTSLEN */ + +/* Define to 1 if `msg_control' is a member of `struct msghdr'. */ +/* #undef HAVE_STRUCT_MSGHDR_MSG_CONTROL */ + +/* Define to 1 if `pw_gecos' is a member of `struct passwd'. */ +#define HAVE_STRUCT_PASSWD_PW_GECOS 1 + +/* Define to 1 if `pw_passwd' is a member of `struct passwd'. */ +#define HAVE_STRUCT_PASSWD_PW_PASSWD 1 + +/* Define to 1 if `st_blksize' is a member of `struct stat'. */ +#define HAVE_STRUCT_STAT_ST_BLKSIZE 1 + +/* Define to 1 if `st_fstype' is a member of `struct stat'. */ +/* #undef HAVE_STRUCT_STAT_ST_FSTYPE */ + +/* define to 1 if st_fstype is char * */ +/* #undef HAVE_STRUCT_STAT_ST_FSTYPE_CHAR */ + +/* define to 1 if st_fstype is int */ +/* #undef HAVE_STRUCT_STAT_ST_FSTYPE_INT */ + +/* Define to 1 if `st_vfstype' is a member of `struct stat'. */ +/* #undef HAVE_STRUCT_STAT_ST_VFSTYPE */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYNCH_H */ + +/* Define to 1 if you have the `sysconf' function. */ +#define HAVE_SYSCONF 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYSEXITS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYSLOG_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_DEVPOLL_H */ + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_DIR_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_EPOLL_H */ + +/* define if you actually have sys_errlist in your libs */ +#define HAVE_SYS_ERRLIST 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_ERRNO_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_EVENT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_FILE_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_FILIO_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_FSTYP_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_IOCTL_H 1 + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_NDIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_PARAM_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_POLL_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_PRIVGRP_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_RESOURCE_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SELECT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SOCKET_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SYSLOG_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TIME_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_UCRED_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_UIO_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_UN_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_UUID_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_VMOUNT_H */ + +/* Define to 1 if you have that is POSIX.1 compatible. */ +#define HAVE_SYS_WAIT_H 1 + +/* define if you have -lwrap */ +/* #undef HAVE_TCPD */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_TCPD_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_TERMIOS_H 1 + +/* if you have Solaris LWP (thr) package */ +/* #undef HAVE_THR */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_THREAD_H */ + +/* Define to 1 if you have the `thr_getconcurrency' function. */ +/* #undef HAVE_THR_GETCONCURRENCY */ + +/* Define to 1 if you have the `thr_setconcurrency' function. */ +/* #undef HAVE_THR_SETCONCURRENCY */ + +/* Define to 1 if you have the `thr_yield' function. */ +/* #undef HAVE_THR_YIELD */ + +/* define if you have TLS */ +#define HAVE_TLS 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UTIME_H 1 + +/* define if you have uuid_generate() */ +/* #undef HAVE_UUID_GENERATE */ + +/* define if you have uuid_to_str() */ +/* #undef HAVE_UUID_TO_STR */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_UUID_UUID_H */ + +/* Define to 1 if you have the `vprintf' function. */ +#define HAVE_VPRINTF 1 + +/* Define to 1 if you have the `vsnprintf' function. */ +#define HAVE_VSNPRINTF 1 + +/* Define to 1 if you have the `wait4' function. */ +#define HAVE_WAIT4 1 + +/* Define to 1 if you have the `waitpid' function. */ +#define HAVE_WAITPID 1 + +/* define if you have winsock */ +/* #undef HAVE_WINSOCK */ + +/* define if you have winsock2 */ +/* #undef HAVE_WINSOCK2 */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_WINSOCK2_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_WINSOCK_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_WIREDTIGER_H */ + +/* Define to 1 if you have the `write' function. */ +#define HAVE_WRITE 1 + +/* define if select implicitly yields */ +#define HAVE_YIELDING_SELECT 1 + +/* Define to 1 if you have the `_vsnprintf' function. */ +/* #undef HAVE__VSNPRINTF */ + +/* define to 32-bit or greater integer type */ +#define LBER_INT_T int + +/* define to large integer type */ +#define LBER_LEN_T long + +/* define to socket descriptor type */ +#define LBER_SOCKET_T int + +/* define to large integer type */ +#define LBER_TAG_T long + +/* define to 1 if library is thread safe */ +#define LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE 1 + +/* define to LDAP VENDOR VERSION */ +/* #undef LDAP_API_FEATURE_X_OPENLDAP_V2_REFERRALS */ + +/* define this to add debugging code */ +/* #undef LDAP_DEBUG */ + +/* define if LDAP libs are dynamic */ +/* #undef LDAP_LIBS_DYNAMIC */ + +/* define to support PF_INET6 */ +#define LDAP_PF_INET6 1 + +/* define to support PF_LOCAL */ +#define LDAP_PF_LOCAL 1 + +/* define this to add SLAPI code */ +/* #undef LDAP_SLAPI */ + +/* define this to add syslog code */ +/* #undef LDAP_SYSLOG */ + +/* Version */ +#define LDAP_VENDOR_VERSION 20501 + +/* Major */ +#define LDAP_VENDOR_VERSION_MAJOR 2 + +/* Minor */ +#define LDAP_VENDOR_VERSION_MINOR 5 + +/* Patch */ +#define LDAP_VENDOR_VERSION_PATCH X + +/* Define to the sub-directory where libtool stores uninstalled libraries. */ +#define LT_OBJDIR ".libs/" + +/* define if memcmp is not 8-bit clean or is otherwise broken */ +/* #undef NEED_MEMCMP_REPLACEMENT */ + +/* define if you have (or want) no threads */ +/* #undef NO_THREADS */ + +/* define to use the original debug style */ +/* #undef OLD_DEBUG */ + +/* Package */ +#define OPENLDAP_PACKAGE "OpenLDAP" + +/* Version */ +#define OPENLDAP_VERSION "2.5.X" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "" + +/* define if sched_yield yields the entire process */ +/* #undef REPLACE_BROKEN_YIELD */ + +/* Define as the return type of signal handlers (`int' or `void'). */ +#define RETSIGTYPE void + +/* Define to the type of arg 1 for `select'. */ +#define SELECT_TYPE_ARG1 int + +/* Define to the type of args 2, 3 and 4 for `select'. */ +#define SELECT_TYPE_ARG234 (fd_set *) + +/* Define to the type of arg 5 for `select'. */ +#define SELECT_TYPE_ARG5 (struct timeval *) + +/* The size of `int', as computed by sizeof. */ +#define SIZEOF_INT 4 + +/* The size of `long', as computed by sizeof. */ +#define SIZEOF_LONG 8 + +/* The size of `long long', as computed by sizeof. */ +#define SIZEOF_LONG_LONG 8 + +/* The size of `short', as computed by sizeof. */ +#define SIZEOF_SHORT 2 + +/* The size of `wchar_t', as computed by sizeof. */ +#define SIZEOF_WCHAR_T 4 + +/* define to support per-object ACIs */ +/* #undef SLAPD_ACI_ENABLED */ + +/* define to support LDAP Async Metadirectory backend */ +/* #undef SLAPD_ASYNCMETA */ + +/* define to support cleartext passwords */ +/* #undef SLAPD_CLEARTEXT */ + +/* define to support crypt(3) passwords */ +/* #undef SLAPD_CRYPT */ + +/* define to support DNS SRV backend */ +/* #undef SLAPD_DNSSRV */ + +/* define to support LDAP backend */ +/* #undef SLAPD_LDAP */ + +/* define to support MDB backend */ +/* #undef SLAPD_MDB */ + +/* define to support LDAP Metadirectory backend */ +/* #undef SLAPD_META */ + +/* define to support modules */ +/* #undef SLAPD_MODULES */ + +/* dynamically linked module */ +#define SLAPD_MOD_DYNAMIC 2 + +/* statically linked module */ +#define SLAPD_MOD_STATIC 1 + +/* define to support cn=Monitor backend */ +/* #undef SLAPD_MONITOR */ + +/* define to support NDB backend */ +/* #undef SLAPD_NDB */ + +/* define to support NULL backend */ +/* #undef SLAPD_NULL */ + +/* define for In-Directory Access Logging overlay */ +/* #undef SLAPD_OVER_ACCESSLOG */ + +/* define for Audit Logging overlay */ +/* #undef SLAPD_OVER_AUDITLOG */ + +/* define for Automatic Certificate Authority overlay */ +/* #undef SLAPD_OVER_AUTOCA */ + +/* define for Collect overlay */ +/* #undef SLAPD_OVER_COLLECT */ + +/* define for Attribute Constraint overlay */ +/* #undef SLAPD_OVER_CONSTRAINT */ + +/* define for Dynamic Directory Services overlay */ +/* #undef SLAPD_OVER_DDS */ + +/* define for Dynamic Directory Services overlay */ +/* #undef SLAPD_OVER_DEREF */ + +/* define for Dynamic Group overlay */ +/* #undef SLAPD_OVER_DYNGROUP */ + +/* define for Dynamic List overlay */ +/* #undef SLAPD_OVER_DYNLIST */ + +/* define for Reverse Group Membership overlay */ +/* #undef SLAPD_OVER_MEMBEROF */ + +/* define for Password Policy overlay */ +/* #undef SLAPD_OVER_PPOLICY */ + +/* define for Proxy Cache overlay */ +/* #undef SLAPD_OVER_PROXYCACHE */ + +/* define for Referential Integrity overlay */ +/* #undef SLAPD_OVER_REFINT */ + +/* define for Return Code overlay */ +/* #undef SLAPD_OVER_RETCODE */ + +/* define for Rewrite/Remap overlay */ +/* #undef SLAPD_OVER_RWM */ + +/* define for Sequential Modify overlay */ +/* #undef SLAPD_OVER_SEQMOD */ + +/* define for ServerSideSort/VLV overlay */ +/* #undef SLAPD_OVER_SSSVLV */ + +/* define for Syncrepl Provider overlay */ +/* #undef SLAPD_OVER_SYNCPROV */ + +/* define for Translucent Proxy overlay */ +/* #undef SLAPD_OVER_TRANSLUCENT */ + +/* define for Attribute Uniqueness overlay */ +/* #undef SLAPD_OVER_UNIQUE */ + +/* define for Value Sorting overlay */ +/* #undef SLAPD_OVER_VALSORT */ + +/* define to support PASSWD backend */ +/* #undef SLAPD_PASSWD */ + +/* define to support PERL backend */ +/* #undef SLAPD_PERL */ + +/* define to support relay backend */ +/* #undef SLAPD_RELAY */ + +/* define to support reverse lookups */ +/* #undef SLAPD_RLOOKUPS */ + +/* define to support SHELL backend */ +/* #undef SLAPD_SHELL */ + +/* define to support SOCK backend */ +/* #undef SLAPD_SOCK */ + +/* define to support SASL passwords */ +/* #undef SLAPD_SPASSWD */ + +/* define to support SQL backend */ +/* #undef SLAPD_SQL */ + +/* define to support WiredTiger backend */ +/* #undef SLAPD_WT */ + +/* define to support run-time loadable ACL */ +/* #undef SLAP_DYNACL */ + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Define to 1 if you can safely include both and . */ +#define TIME_WITH_SYS_TIME 1 + +/* Define to 1 if your declares `struct tm'. */ +/* #undef TM_IN_SYS_TIME */ + +/* set to urandom device */ +#define URANDOM_DEVICE "/dev/urandom" + +/* define to use OpenSSL BIGNUM for MP */ +/* #undef USE_MP_BIGNUM */ + +/* define to use GMP for MP */ +/* #undef USE_MP_GMP */ + +/* define to use 'long' for MP */ +/* #undef USE_MP_LONG */ + +/* define to use 'long long' for MP */ +/* #undef USE_MP_LONG_LONG */ + +/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most + significant byte first (like Motorola and SPARC, unlike Intel). */ +#if defined AC_APPLE_UNIVERSAL_BUILD +# if defined __BIG_ENDIAN__ +# define WORDS_BIGENDIAN 1 +# endif +#else +# ifndef WORDS_BIGENDIAN +/* # undef WORDS_BIGENDIAN */ +# endif +#endif + +/* Define to the type of arg 3 for `accept'. */ +#define ber_socklen_t socklen_t + +/* Define to `char *' if does not define. */ +/* #undef caddr_t */ + +/* Define to empty if `const' does not conform to ANSI C. */ +/* #undef const */ + +/* Define to `int' if doesn't define. */ +/* #undef gid_t */ + +/* Define to `int' if does not define. */ +/* #undef mode_t */ + +/* Define to `long' if does not define. */ +/* #undef off_t */ + +/* Define to `int' if does not define. */ +/* #undef pid_t */ + +/* Define to `int' if does not define. */ +/* #undef sig_atomic_t */ + +/* Define to `unsigned' if does not define. */ +/* #undef size_t */ + +/* define to snprintf routine */ +/* #undef snprintf */ + +/* Define like ber_socklen_t if does not define. */ +/* #undef socklen_t */ + +/* Define to `signed int' if does not define. */ +/* #undef ssize_t */ + +/* Define to `int' if doesn't define. */ +/* #undef uid_t */ + +/* define as empty if volatile is not supported */ +/* #undef volatile */ + +/* define to snprintf routine */ +/* #undef vsnprintf */ + + +/* begin of portable.h.post */ + +#ifdef _WIN32 +/* don't suck in all of the win32 api */ +# define WIN32_LEAN_AND_MEAN 1 +#endif + +#ifndef LDAP_NEEDS_PROTOTYPES +/* force LDAP_P to always include prototypes */ +#define LDAP_NEEDS_PROTOTYPES 1 +#endif + +#ifndef LDAP_REL_ENG +#if (LDAP_VENDOR_VERSION == 000000) && !defined(LDAP_DEVEL) +#define LDAP_DEVEL +#endif +#if defined(LDAP_DEVEL) && !defined(LDAP_TEST) +#define LDAP_TEST +#endif +#endif + +#ifdef HAVE_STDDEF_H +# include +#endif + +#ifdef HAVE_EBCDIC +/* ASCII/EBCDIC converting replacements for stdio funcs + * vsnprintf and snprintf are used too, but they are already + * checked by the configure script + */ +#define fputs ber_pvt_fputs +#define fgets ber_pvt_fgets +#define printf ber_pvt_printf +#define fprintf ber_pvt_fprintf +#define vfprintf ber_pvt_vfprintf +#define vsprintf ber_pvt_vsprintf +#endif + +#include "ac/fdset.h" + +#include "ldap_cdefs.h" +#include "ldap_features.h" + +#include "ac/assert.h" +#include "ac/localize.h" + +#endif /* _LDAP_PORTABLE_H */ +/* end of portable.h.post */ + diff --git a/contrib/openldap-cmake/freebsd_x86_64/include/lber_types.h b/contrib/openldap-cmake/freebsd_x86_64/include/lber_types.h new file mode 100644 index 00000000000..dbd59430527 --- /dev/null +++ b/contrib/openldap-cmake/freebsd_x86_64/include/lber_types.h @@ -0,0 +1,63 @@ +/* include/lber_types.h. Generated from lber_types.hin by configure. */ +/* $OpenLDAP$ */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +/* + * LBER types + */ + +#ifndef _LBER_TYPES_H +#define _LBER_TYPES_H + +#include + +LDAP_BEGIN_DECL + +/* LBER boolean, enum, integers (32 bits or larger) */ +#define LBER_INT_T int + +/* LBER tags (32 bits or larger) */ +#define LBER_TAG_T long + +/* LBER socket descriptor */ +#define LBER_SOCKET_T int + +/* LBER lengths (32 bits or larger) */ +#define LBER_LEN_T long + +/* ------------------------------------------------------------ */ + +/* booleans, enumerations, and integers */ +typedef LBER_INT_T ber_int_t; + +/* signed and unsigned versions */ +typedef signed LBER_INT_T ber_sint_t; +typedef unsigned LBER_INT_T ber_uint_t; + +/* tags */ +typedef unsigned LBER_TAG_T ber_tag_t; + +/* "socket" descriptors */ +typedef LBER_SOCKET_T ber_socket_t; + +/* lengths */ +typedef unsigned LBER_LEN_T ber_len_t; + +/* signed lengths */ +typedef signed LBER_LEN_T ber_slen_t; + +LDAP_END_DECL + +#endif /* _LBER_TYPES_H */ diff --git a/contrib/openldap-cmake/freebsd_x86_64/include/ldap_config.h b/contrib/openldap-cmake/freebsd_x86_64/include/ldap_config.h new file mode 100644 index 00000000000..89f7b40b884 --- /dev/null +++ b/contrib/openldap-cmake/freebsd_x86_64/include/ldap_config.h @@ -0,0 +1,74 @@ +/* include/ldap_config.h. Generated from ldap_config.hin by configure. */ +/* $OpenLDAP$ */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +/* + * This file works in conjunction with OpenLDAP configure system. + * If you do no like the values below, adjust your configure options. + */ + +#ifndef _LDAP_CONFIG_H +#define _LDAP_CONFIG_H + +/* directory separator */ +#ifndef LDAP_DIRSEP +#ifndef _WIN32 +#define LDAP_DIRSEP "/" +#else +#define LDAP_DIRSEP "\\" +#endif +#endif + +/* directory for temporary files */ +#if defined(_WIN32) +# define LDAP_TMPDIR "C:\\." /* we don't have much of a choice */ +#elif defined( _P_tmpdir ) +# define LDAP_TMPDIR _P_tmpdir +#elif defined( P_tmpdir ) +# define LDAP_TMPDIR P_tmpdir +#elif defined( _PATH_TMPDIR ) +# define LDAP_TMPDIR _PATH_TMPDIR +#else +# define LDAP_TMPDIR LDAP_DIRSEP "tmp" +#endif + +/* directories */ +#ifndef LDAP_BINDIR +#define LDAP_BINDIR "/tmp/ldap-prefix/bin" +#endif +#ifndef LDAP_SBINDIR +#define LDAP_SBINDIR "/tmp/ldap-prefix/sbin" +#endif +#ifndef LDAP_DATADIR +#define LDAP_DATADIR "/tmp/ldap-prefix/share/openldap" +#endif +#ifndef LDAP_SYSCONFDIR +#define LDAP_SYSCONFDIR "/tmp/ldap-prefix/etc/openldap" +#endif +#ifndef LDAP_LIBEXECDIR +#define LDAP_LIBEXECDIR "/tmp/ldap-prefix/libexec" +#endif +#ifndef LDAP_MODULEDIR +#define LDAP_MODULEDIR "/tmp/ldap-prefix/libexec/openldap" +#endif +#ifndef LDAP_RUNDIR +#define LDAP_RUNDIR "/tmp/ldap-prefix/var" +#endif +#ifndef LDAP_LOCALEDIR +#define LDAP_LOCALEDIR "" +#endif + + +#endif /* _LDAP_CONFIG_H */ diff --git a/contrib/openldap-cmake/freebsd_x86_64/include/ldap_features.h b/contrib/openldap-cmake/freebsd_x86_64/include/ldap_features.h new file mode 100644 index 00000000000..f0cc7c3626f --- /dev/null +++ b/contrib/openldap-cmake/freebsd_x86_64/include/ldap_features.h @@ -0,0 +1,61 @@ +/* include/ldap_features.h. Generated from ldap_features.hin by configure. */ +/* $OpenLDAP$ */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +/* + * LDAP Features + */ + +#ifndef _LDAP_FEATURES_H +#define _LDAP_FEATURES_H 1 + +/* OpenLDAP API version macros */ +#define LDAP_VENDOR_VERSION 20501 +#define LDAP_VENDOR_VERSION_MAJOR 2 +#define LDAP_VENDOR_VERSION_MINOR 5 +#define LDAP_VENDOR_VERSION_PATCH X + +/* +** WORK IN PROGRESS! +** +** OpenLDAP reentrancy/thread-safeness should be dynamically +** checked using ldap_get_option(). +** +** The -lldap implementation is not thread-safe. +** +** The -lldap_r implementation is: +** LDAP_API_FEATURE_THREAD_SAFE (basic thread safety) +** but also be: +** LDAP_API_FEATURE_SESSION_THREAD_SAFE +** LDAP_API_FEATURE_OPERATION_THREAD_SAFE +** +** The preprocessor flag LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE +** can be used to determine if -lldap_r is available at compile +** time. You must define LDAP_THREAD_SAFE if and only if you +** link with -lldap_r. +** +** If you fail to define LDAP_THREAD_SAFE when linking with +** -lldap_r or define LDAP_THREAD_SAFE when linking with -lldap, +** provided header definitions and declarations may be incorrect. +** +*/ + +/* is -lldap_r available or not */ +#define LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE 1 + +/* LDAP v2 Referrals */ +/* #undef LDAP_API_FEATURE_X_OPENLDAP_V2_REFERRALS */ + +#endif /* LDAP_FEATURES */ diff --git a/contrib/openldap-cmake/freebsd_x86_64/include/portable.h b/contrib/openldap-cmake/freebsd_x86_64/include/portable.h new file mode 100644 index 00000000000..10a15fe3ca1 --- /dev/null +++ b/contrib/openldap-cmake/freebsd_x86_64/include/portable.h @@ -0,0 +1,1169 @@ +/* include/portable.h. Generated from portable.hin by configure. */ +/* include/portable.hin. Generated from configure.in by autoheader. */ + + +/* begin of portable.h.pre */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +#ifndef _LDAP_PORTABLE_H +#define _LDAP_PORTABLE_H + +/* define this if needed to get reentrant functions */ +#ifndef REENTRANT +#define REENTRANT 1 +#endif +#ifndef _REENTRANT +#define _REENTRANT 1 +#endif + +/* define this if needed to get threadsafe functions */ +#ifndef THREADSAFE +#define THREADSAFE 1 +#endif +#ifndef _THREADSAFE +#define _THREADSAFE 1 +#endif +#ifndef THREAD_SAFE +#define THREAD_SAFE 1 +#endif +#ifndef _THREAD_SAFE +#define _THREAD_SAFE 1 +#endif + +#ifndef _SGI_MP_SOURCE +#define _SGI_MP_SOURCE 1 +#endif + +/* end of portable.h.pre */ + + +/* Define if building universal (internal helper macro) */ +/* #undef AC_APPLE_UNIVERSAL_BUILD */ + +/* define to use both and */ +/* #undef BOTH_STRINGS_H */ + +/* define if cross compiling */ +/* #undef CROSS_COMPILING */ + +/* set to the number of arguments ctime_r() expects */ +#define CTIME_R_NARGS 2 + +/* define if toupper() requires islower() */ +/* #undef C_UPPER_LOWER */ + +/* define if sys_errlist is not declared in stdio.h or errno.h */ +/* #undef DECL_SYS_ERRLIST */ + +/* define to enable slapi library */ +/* #undef ENABLE_SLAPI */ + +/* defined to be the EXE extension */ +#define EXEEXT "" + +/* set to the number of arguments gethostbyaddr_r() expects */ +#define GETHOSTBYADDR_R_NARGS 8 + +/* set to the number of arguments gethostbyname_r() expects */ +#define GETHOSTBYNAME_R_NARGS 6 + +/* Define to 1 if `TIOCGWINSZ' requires . */ +/* #undef GWINSZ_IN_SYS_IOCTL */ + +/* define if you have AIX security lib */ +/* #undef HAVE_AIX_SECURITY */ + +/* Define to 1 if you have the header file. */ +#define HAVE_ARPA_INET_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_ARPA_NAMESER_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_ASSERT_H 1 + +/* Define to 1 if you have the `bcopy' function. */ +#define HAVE_BCOPY 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_BITS_TYPES_H */ + +/* Define to 1 if you have the `chroot' function. */ +#define HAVE_CHROOT 1 + +/* Define to 1 if you have the `closesocket' function. */ +/* #undef HAVE_CLOSESOCKET */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_CONIO_H */ + +/* define if crypt(3) is available */ +/* #undef HAVE_CRYPT */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_CRYPT_H */ + +/* define if crypt_r() is also available */ +/* #undef HAVE_CRYPT_R */ + +/* Define to 1 if you have the `ctime_r' function. */ +#define HAVE_CTIME_R 1 + +/* define if you have Cyrus SASL */ +/* #undef HAVE_CYRUS_SASL */ + +/* define if your system supports /dev/poll */ +/* #undef HAVE_DEVPOLL */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_DIRECT_H */ + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +#define HAVE_DIRENT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Define to 1 if you don't have `vprintf' but do have `_doprnt.' */ +/* #undef HAVE_DOPRNT */ + +/* define if system uses EBCDIC instead of ASCII */ +/* #undef HAVE_EBCDIC */ + +/* Define to 1 if you have the `endgrent' function. */ +#define HAVE_ENDGRENT 1 + +/* Define to 1 if you have the `endpwent' function. */ +#define HAVE_ENDPWENT 1 + +/* define if your system supports epoll */ +/* #undef HAVE_EPOLL */ + +/* Define to 1 if you have the header file. */ +#define HAVE_ERRNO_H 1 + +/* Define to 1 if you have the `fcntl' function. */ +#define HAVE_FCNTL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_FCNTL_H 1 + +/* define if you actually have FreeBSD fetch(3) */ +/* #undef HAVE_FETCH */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_FILIO_H */ + +/* Define to 1 if you have the `flock' function. */ +#define HAVE_FLOCK 1 + +/* Define to 1 if you have the `fstat' function. */ +#define HAVE_FSTAT 1 + +/* Define to 1 if you have the `gai_strerror' function. */ +#define HAVE_GAI_STRERROR 1 + +/* Define to 1 if you have the `getaddrinfo' function. */ +#define HAVE_GETADDRINFO 1 + +/* Define to 1 if you have the `getdtablesize' function. */ +#define HAVE_GETDTABLESIZE 1 + +/* Define to 1 if you have the `geteuid' function. */ +#define HAVE_GETEUID 1 + +/* Define to 1 if you have the `getgrgid' function. */ +#define HAVE_GETGRGID 1 + +/* Define to 1 if you have the `gethostbyaddr_r' function. */ +#define HAVE_GETHOSTBYADDR_R 1 + +/* Define to 1 if you have the `gethostbyname_r' function. */ +#define HAVE_GETHOSTBYNAME_R 1 + +/* Define to 1 if you have the `gethostname' function. */ +#define HAVE_GETHOSTNAME 1 + +/* Define to 1 if you have the `getnameinfo' function. */ +#define HAVE_GETNAMEINFO 1 + +/* Define to 1 if you have the `getopt' function. */ +#define HAVE_GETOPT 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_GETOPT_H 1 + +/* Define to 1 if you have the `getpassphrase' function. */ +/* #undef HAVE_GETPASSPHRASE */ + +/* Define to 1 if you have the `getpeereid' function. */ +#define HAVE_GETPEEREID 1 + +/* Define to 1 if you have the `getpeerucred' function. */ +/* #undef HAVE_GETPEERUCRED */ + +/* Define to 1 if you have the `getpwnam' function. */ +#define HAVE_GETPWNAM 1 + +/* Define to 1 if you have the `getpwuid' function. */ +#define HAVE_GETPWUID 1 + +/* Define to 1 if you have the `getspnam' function. */ +/* #undef HAVE_GETSPNAM */ + +/* Define to 1 if you have the `gettimeofday' function. */ +#define HAVE_GETTIMEOFDAY 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_GMP_H */ + +/* Define to 1 if you have the `gmtime_r' function. */ +#define HAVE_GMTIME_R 1 + +/* define if you have GNUtls */ +/* #undef HAVE_GNUTLS */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_GNUTLS_GNUTLS_H */ + +/* if you have GNU Pth */ +/* #undef HAVE_GNU_PTH */ + +/* Define to 1 if you have the header file. */ +#define HAVE_GRP_H 1 + +/* Define to 1 if you have the `hstrerror' function. */ +#define HAVE_HSTRERROR 1 + +/* define to you inet_aton(3) is available */ +#define HAVE_INET_ATON 1 + +/* Define to 1 if you have the `inet_ntoa_b' function. */ +/* #undef HAVE_INET_NTOA_B */ + +/* Define to 1 if you have the `inet_ntop' function. */ +#define HAVE_INET_NTOP 1 + +/* Define to 1 if you have the `initgroups' function. */ +#define HAVE_INITGROUPS 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define to 1 if you have the `ioctl' function. */ +#define HAVE_IOCTL 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_IO_H */ + +/* define if your system supports kqueue */ +#define HAVE_KQUEUE 1 + +/* Define to 1 if you have the `gen' library (-lgen). */ +/* #undef HAVE_LIBGEN */ + +/* Define to 1 if you have the `gmp' library (-lgmp). */ +/* #undef HAVE_LIBGMP */ + +/* Define to 1 if you have the `inet' library (-linet). */ +/* #undef HAVE_LIBINET */ + +/* define if you have libtool -ltdl */ +/* #undef HAVE_LIBLTDL */ + +/* Define to 1 if you have the `net' library (-lnet). */ +/* #undef HAVE_LIBNET */ + +/* Define to 1 if you have the `nsl' library (-lnsl). */ +/* #undef HAVE_LIBNSL */ + +/* Define to 1 if you have the `nsl_s' library (-lnsl_s). */ +/* #undef HAVE_LIBNSL_S */ + +/* Define to 1 if you have the `socket' library (-lsocket). */ +/* #undef HAVE_LIBSOCKET */ + +/* Define to 1 if you have the header file. */ +#define HAVE_LIBUTIL_H 1 + +/* Define to 1 if you have the `V3' library (-lV3). */ +/* #undef HAVE_LIBV3 */ + +/* Define to 1 if you have the header file. */ +#define HAVE_LIMITS_H 1 + +/* if you have LinuxThreads */ +/* #undef HAVE_LINUX_THREADS */ + +/* Define to 1 if you have the header file. */ +#define HAVE_LOCALE_H 1 + +/* Define to 1 if you have the `localtime_r' function. */ +#define HAVE_LOCALTIME_R 1 + +/* Define to 1 if you have the `lockf' function. */ +#define HAVE_LOCKF 1 + +/* Define to 1 if the system has the type `long long'. */ +#define HAVE_LONG_LONG 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_LTDL_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_MALLOC_H */ + +/* Define to 1 if you have the `memcpy' function. */ +#define HAVE_MEMCPY 1 + +/* Define to 1 if you have the `memmove' function. */ +#define HAVE_MEMMOVE 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the `memrchr' function. */ +#define HAVE_MEMRCHR 1 + +/* Define to 1 if you have the `mkstemp' function. */ +#define HAVE_MKSTEMP 1 + +/* Define to 1 if you have the `mktemp' function. */ +#define HAVE_MKTEMP 1 + +/* define this if you have mkversion */ +#define HAVE_MKVERSION 1 + +/* Define to 1 if you have the header file, and it defines `DIR'. */ +/* #undef HAVE_NDIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_NETINET_TCP_H 1 + +/* define if strerror_r returns char* instead of int */ +/* #undef HAVE_NONPOSIX_STRERROR_R */ + +/* if you have NT Event Log */ +/* #undef HAVE_NT_EVENT_LOG */ + +/* if you have NT Service Manager */ +/* #undef HAVE_NT_SERVICE_MANAGER */ + +/* if you have NT Threads */ +/* #undef HAVE_NT_THREADS */ + +/* define if you have OpenSSL */ +#define HAVE_OPENSSL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_OPENSSL_BN_H 1 + +/* define if you have OpenSSL with CRL checking capability */ +#define HAVE_OPENSSL_CRL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_OPENSSL_CRYPTO_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_OPENSSL_SSL_H 1 + +/* Define to 1 if you have the `pipe' function. */ +#define HAVE_PIPE 1 + +/* Define to 1 if you have the `poll' function. */ +#define HAVE_POLL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_POLL_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_PROCESS_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_PSAP_H */ + +/* define to pthreads API spec revision */ +#define HAVE_PTHREADS 10 + +/* define if you have pthread_detach function */ +#define HAVE_PTHREAD_DETACH 1 + +/* Define to 1 if you have the `pthread_getconcurrency' function. */ +#define HAVE_PTHREAD_GETCONCURRENCY 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_PTHREAD_H 1 + +/* Define to 1 if you have the `pthread_kill' function. */ +#define HAVE_PTHREAD_KILL 1 + +/* Define to 1 if you have the `pthread_kill_other_threads_np' function. */ +/* #undef HAVE_PTHREAD_KILL_OTHER_THREADS_NP */ + +/* define if you have pthread_rwlock_destroy function */ +#define HAVE_PTHREAD_RWLOCK_DESTROY 1 + +/* Define to 1 if you have the `pthread_setconcurrency' function. */ +#define HAVE_PTHREAD_SETCONCURRENCY 1 + +/* Define to 1 if you have the `pthread_yield' function. */ +#define HAVE_PTHREAD_YIELD 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_PTH_H */ + +/* Define to 1 if the system has the type `ptrdiff_t'. */ +#define HAVE_PTRDIFF_T 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_PWD_H 1 + +/* Define to 1 if you have the `read' function. */ +#define HAVE_READ 1 + +/* Define to 1 if you have the `recv' function. */ +#define HAVE_RECV 1 + +/* Define to 1 if you have the `recvfrom' function. */ +#define HAVE_RECVFROM 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_REGEX_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_RESOLV_H */ + +/* define if you have res_query() */ +/* #undef HAVE_RES_QUERY */ + +/* define if OpenSSL needs RSAref */ +/* #undef HAVE_RSAREF */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SASL_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SASL_SASL_H */ + +/* define if your SASL library has sasl_version() */ +/* #undef HAVE_SASL_VERSION */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SCHED_H 1 + +/* Define to 1 if you have the `sched_yield' function. */ +#define HAVE_SCHED_YIELD 1 + +/* Define to 1 if you have the `send' function. */ +#define HAVE_SEND 1 + +/* Define to 1 if you have the `sendmsg' function. */ +#define HAVE_SENDMSG 1 + +/* Define to 1 if you have the `sendto' function. */ +#define HAVE_SENDTO 1 + +/* Define to 1 if you have the `setegid' function. */ +#define HAVE_SETEGID 1 + +/* Define to 1 if you have the `seteuid' function. */ +#define HAVE_SETEUID 1 + +/* Define to 1 if you have the `setgid' function. */ +#define HAVE_SETGID 1 + +/* Define to 1 if you have the `setpwfile' function. */ +/* #undef HAVE_SETPWFILE */ + +/* Define to 1 if you have the `setsid' function. */ +#define HAVE_SETSID 1 + +/* Define to 1 if you have the `setuid' function. */ +#define HAVE_SETUID 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SGTTY_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SHADOW_H */ + +/* Define to 1 if you have the `sigaction' function. */ +#define HAVE_SIGACTION 1 + +/* Define to 1 if you have the `signal' function. */ +#define HAVE_SIGNAL 1 + +/* Define to 1 if you have the `sigset' function. */ +#define HAVE_SIGSET 1 + +/* define if you have -lslp */ +/* #undef HAVE_SLP */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SLP_H */ + +/* Define to 1 if you have the `snprintf' function. */ +#define HAVE_SNPRINTF 1 + +/* if you have spawnlp() */ +/* #undef HAVE_SPAWNLP */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SQLEXT_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SQL_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_STDDEF_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the `strdup' function. */ +#define HAVE_STRDUP 1 + +/* Define to 1 if you have the `strerror' function. */ +#define HAVE_STRERROR 1 + +/* Define to 1 if you have the `strerror_r' function. */ +#define HAVE_STRERROR_R 1 + +/* Define to 1 if you have the `strftime' function. */ +#define HAVE_STRFTIME 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the `strpbrk' function. */ +#define HAVE_STRPBRK 1 + +/* Define to 1 if you have the `strrchr' function. */ +#define HAVE_STRRCHR 1 + +/* Define to 1 if you have the `strsep' function. */ +#define HAVE_STRSEP 1 + +/* Define to 1 if you have the `strspn' function. */ +#define HAVE_STRSPN 1 + +/* Define to 1 if you have the `strstr' function. */ +#define HAVE_STRSTR 1 + +/* Define to 1 if you have the `strtol' function. */ +#define HAVE_STRTOL 1 + +/* Define to 1 if you have the `strtoll' function. */ +#define HAVE_STRTOLL 1 + +/* Define to 1 if you have the `strtoq' function. */ +#define HAVE_STRTOQ 1 + +/* Define to 1 if you have the `strtoul' function. */ +#define HAVE_STRTOUL 1 + +/* Define to 1 if you have the `strtoull' function. */ +#define HAVE_STRTOULL 1 + +/* Define to 1 if you have the `strtouq' function. */ +#define HAVE_STRTOUQ 1 + +/* Define to 1 if `msg_accrightslen' is a member of `struct msghdr'. */ +/* #undef HAVE_STRUCT_MSGHDR_MSG_ACCRIGHTSLEN */ + +/* Define to 1 if `msg_control' is a member of `struct msghdr'. */ +/* #undef HAVE_STRUCT_MSGHDR_MSG_CONTROL */ + +/* Define to 1 if `pw_gecos' is a member of `struct passwd'. */ +#define HAVE_STRUCT_PASSWD_PW_GECOS 1 + +/* Define to 1 if `pw_passwd' is a member of `struct passwd'. */ +#define HAVE_STRUCT_PASSWD_PW_PASSWD 1 + +/* Define to 1 if `st_blksize' is a member of `struct stat'. */ +#define HAVE_STRUCT_STAT_ST_BLKSIZE 1 + +/* Define to 1 if `st_fstype' is a member of `struct stat'. */ +/* #undef HAVE_STRUCT_STAT_ST_FSTYPE */ + +/* define to 1 if st_fstype is char * */ +/* #undef HAVE_STRUCT_STAT_ST_FSTYPE_CHAR */ + +/* define to 1 if st_fstype is int */ +/* #undef HAVE_STRUCT_STAT_ST_FSTYPE_INT */ + +/* Define to 1 if `st_vfstype' is a member of `struct stat'. */ +/* #undef HAVE_STRUCT_STAT_ST_VFSTYPE */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYNCH_H */ + +/* Define to 1 if you have the `sysconf' function. */ +#define HAVE_SYSCONF 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYSEXITS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYSLOG_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_DEVPOLL_H */ + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_DIR_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_EPOLL_H */ + +/* define if you actually have sys_errlist in your libs */ +#define HAVE_SYS_ERRLIST 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_ERRNO_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_EVENT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_FILE_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_FILIO_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_FSTYP_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_IOCTL_H 1 + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_NDIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_PARAM_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_POLL_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_PRIVGRP_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_RESOURCE_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SELECT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SOCKET_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SYSLOG_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TIME_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_UCRED_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_UIO_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_UN_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_UUID_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_VMOUNT_H */ + +/* Define to 1 if you have that is POSIX.1 compatible. */ +#define HAVE_SYS_WAIT_H 1 + +/* define if you have -lwrap */ +/* #undef HAVE_TCPD */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_TCPD_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_TERMIOS_H 1 + +/* if you have Solaris LWP (thr) package */ +/* #undef HAVE_THR */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_THREAD_H */ + +/* Define to 1 if you have the `thr_getconcurrency' function. */ +/* #undef HAVE_THR_GETCONCURRENCY */ + +/* Define to 1 if you have the `thr_setconcurrency' function. */ +/* #undef HAVE_THR_SETCONCURRENCY */ + +/* Define to 1 if you have the `thr_yield' function. */ +/* #undef HAVE_THR_YIELD */ + +/* define if you have TLS */ +#define HAVE_TLS 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UTIME_H 1 + +/* define if you have uuid_generate() */ +/* #undef HAVE_UUID_GENERATE */ + +/* define if you have uuid_to_str() */ +/* #undef HAVE_UUID_TO_STR */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_UUID_UUID_H */ + +/* Define to 1 if you have the `vprintf' function. */ +#define HAVE_VPRINTF 1 + +/* Define to 1 if you have the `vsnprintf' function. */ +#define HAVE_VSNPRINTF 1 + +/* Define to 1 if you have the `wait4' function. */ +#define HAVE_WAIT4 1 + +/* Define to 1 if you have the `waitpid' function. */ +#define HAVE_WAITPID 1 + +/* define if you have winsock */ +/* #undef HAVE_WINSOCK */ + +/* define if you have winsock2 */ +/* #undef HAVE_WINSOCK2 */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_WINSOCK2_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_WINSOCK_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_WIREDTIGER_H */ + +/* Define to 1 if you have the `write' function. */ +#define HAVE_WRITE 1 + +/* define if select implicitly yields */ +#define HAVE_YIELDING_SELECT 1 + +/* Define to 1 if you have the `_vsnprintf' function. */ +/* #undef HAVE__VSNPRINTF */ + +/* define to 32-bit or greater integer type */ +#define LBER_INT_T int + +/* define to large integer type */ +#define LBER_LEN_T long + +/* define to socket descriptor type */ +#define LBER_SOCKET_T int + +/* define to large integer type */ +#define LBER_TAG_T long + +/* define to 1 if library is thread safe */ +#define LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE 1 + +/* define to LDAP VENDOR VERSION */ +/* #undef LDAP_API_FEATURE_X_OPENLDAP_V2_REFERRALS */ + +/* define this to add debugging code */ +/* #undef LDAP_DEBUG */ + +/* define if LDAP libs are dynamic */ +/* #undef LDAP_LIBS_DYNAMIC */ + +/* define to support PF_INET6 */ +#define LDAP_PF_INET6 1 + +/* define to support PF_LOCAL */ +#define LDAP_PF_LOCAL 1 + +/* define this to add SLAPI code */ +/* #undef LDAP_SLAPI */ + +/* define this to add syslog code */ +/* #undef LDAP_SYSLOG */ + +/* Version */ +#define LDAP_VENDOR_VERSION 20501 + +/* Major */ +#define LDAP_VENDOR_VERSION_MAJOR 2 + +/* Minor */ +#define LDAP_VENDOR_VERSION_MINOR 5 + +/* Patch */ +#define LDAP_VENDOR_VERSION_PATCH X + +/* Define to the sub-directory where libtool stores uninstalled libraries. */ +#define LT_OBJDIR ".libs/" + +/* define if memcmp is not 8-bit clean or is otherwise broken */ +/* #undef NEED_MEMCMP_REPLACEMENT */ + +/* define if you have (or want) no threads */ +/* #undef NO_THREADS */ + +/* define to use the original debug style */ +/* #undef OLD_DEBUG */ + +/* Package */ +#define OPENLDAP_PACKAGE "OpenLDAP" + +/* Version */ +#define OPENLDAP_VERSION "2.5.X" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "" + +/* define if sched_yield yields the entire process */ +/* #undef REPLACE_BROKEN_YIELD */ + +/* Define as the return type of signal handlers (`int' or `void'). */ +#define RETSIGTYPE void + +/* Define to the type of arg 1 for `select'. */ +#define SELECT_TYPE_ARG1 int + +/* Define to the type of args 2, 3 and 4 for `select'. */ +#define SELECT_TYPE_ARG234 (fd_set *) + +/* Define to the type of arg 5 for `select'. */ +#define SELECT_TYPE_ARG5 (struct timeval *) + +/* The size of `int', as computed by sizeof. */ +#define SIZEOF_INT 4 + +/* The size of `long', as computed by sizeof. */ +#define SIZEOF_LONG 8 + +/* The size of `long long', as computed by sizeof. */ +#define SIZEOF_LONG_LONG 8 + +/* The size of `short', as computed by sizeof. */ +#define SIZEOF_SHORT 2 + +/* The size of `wchar_t', as computed by sizeof. */ +#define SIZEOF_WCHAR_T 4 + +/* define to support per-object ACIs */ +/* #undef SLAPD_ACI_ENABLED */ + +/* define to support LDAP Async Metadirectory backend */ +/* #undef SLAPD_ASYNCMETA */ + +/* define to support cleartext passwords */ +/* #undef SLAPD_CLEARTEXT */ + +/* define to support crypt(3) passwords */ +/* #undef SLAPD_CRYPT */ + +/* define to support DNS SRV backend */ +/* #undef SLAPD_DNSSRV */ + +/* define to support LDAP backend */ +/* #undef SLAPD_LDAP */ + +/* define to support MDB backend */ +/* #undef SLAPD_MDB */ + +/* define to support LDAP Metadirectory backend */ +/* #undef SLAPD_META */ + +/* define to support modules */ +/* #undef SLAPD_MODULES */ + +/* dynamically linked module */ +#define SLAPD_MOD_DYNAMIC 2 + +/* statically linked module */ +#define SLAPD_MOD_STATIC 1 + +/* define to support cn=Monitor backend */ +/* #undef SLAPD_MONITOR */ + +/* define to support NDB backend */ +/* #undef SLAPD_NDB */ + +/* define to support NULL backend */ +/* #undef SLAPD_NULL */ + +/* define for In-Directory Access Logging overlay */ +/* #undef SLAPD_OVER_ACCESSLOG */ + +/* define for Audit Logging overlay */ +/* #undef SLAPD_OVER_AUDITLOG */ + +/* define for Automatic Certificate Authority overlay */ +/* #undef SLAPD_OVER_AUTOCA */ + +/* define for Collect overlay */ +/* #undef SLAPD_OVER_COLLECT */ + +/* define for Attribute Constraint overlay */ +/* #undef SLAPD_OVER_CONSTRAINT */ + +/* define for Dynamic Directory Services overlay */ +/* #undef SLAPD_OVER_DDS */ + +/* define for Dynamic Directory Services overlay */ +/* #undef SLAPD_OVER_DEREF */ + +/* define for Dynamic Group overlay */ +/* #undef SLAPD_OVER_DYNGROUP */ + +/* define for Dynamic List overlay */ +/* #undef SLAPD_OVER_DYNLIST */ + +/* define for Reverse Group Membership overlay */ +/* #undef SLAPD_OVER_MEMBEROF */ + +/* define for Password Policy overlay */ +/* #undef SLAPD_OVER_PPOLICY */ + +/* define for Proxy Cache overlay */ +/* #undef SLAPD_OVER_PROXYCACHE */ + +/* define for Referential Integrity overlay */ +/* #undef SLAPD_OVER_REFINT */ + +/* define for Return Code overlay */ +/* #undef SLAPD_OVER_RETCODE */ + +/* define for Rewrite/Remap overlay */ +/* #undef SLAPD_OVER_RWM */ + +/* define for Sequential Modify overlay */ +/* #undef SLAPD_OVER_SEQMOD */ + +/* define for ServerSideSort/VLV overlay */ +/* #undef SLAPD_OVER_SSSVLV */ + +/* define for Syncrepl Provider overlay */ +/* #undef SLAPD_OVER_SYNCPROV */ + +/* define for Translucent Proxy overlay */ +/* #undef SLAPD_OVER_TRANSLUCENT */ + +/* define for Attribute Uniqueness overlay */ +/* #undef SLAPD_OVER_UNIQUE */ + +/* define for Value Sorting overlay */ +/* #undef SLAPD_OVER_VALSORT */ + +/* define to support PASSWD backend */ +/* #undef SLAPD_PASSWD */ + +/* define to support PERL backend */ +/* #undef SLAPD_PERL */ + +/* define to support relay backend */ +/* #undef SLAPD_RELAY */ + +/* define to support reverse lookups */ +/* #undef SLAPD_RLOOKUPS */ + +/* define to support SHELL backend */ +/* #undef SLAPD_SHELL */ + +/* define to support SOCK backend */ +/* #undef SLAPD_SOCK */ + +/* define to support SASL passwords */ +/* #undef SLAPD_SPASSWD */ + +/* define to support SQL backend */ +/* #undef SLAPD_SQL */ + +/* define to support WiredTiger backend */ +/* #undef SLAPD_WT */ + +/* define to support run-time loadable ACL */ +/* #undef SLAP_DYNACL */ + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Define to 1 if you can safely include both and . */ +#define TIME_WITH_SYS_TIME 1 + +/* Define to 1 if your declares `struct tm'. */ +/* #undef TM_IN_SYS_TIME */ + +/* set to urandom device */ +#define URANDOM_DEVICE "/dev/urandom" + +/* define to use OpenSSL BIGNUM for MP */ +/* #undef USE_MP_BIGNUM */ + +/* define to use GMP for MP */ +/* #undef USE_MP_GMP */ + +/* define to use 'long' for MP */ +/* #undef USE_MP_LONG */ + +/* define to use 'long long' for MP */ +/* #undef USE_MP_LONG_LONG */ + +/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most + significant byte first (like Motorola and SPARC, unlike Intel). */ +#if defined AC_APPLE_UNIVERSAL_BUILD +# if defined __BIG_ENDIAN__ +# define WORDS_BIGENDIAN 1 +# endif +#else +# ifndef WORDS_BIGENDIAN +/* # undef WORDS_BIGENDIAN */ +# endif +#endif + +/* Define to the type of arg 3 for `accept'. */ +#define ber_socklen_t socklen_t + +/* Define to `char *' if does not define. */ +/* #undef caddr_t */ + +/* Define to empty if `const' does not conform to ANSI C. */ +/* #undef const */ + +/* Define to `int' if doesn't define. */ +/* #undef gid_t */ + +/* Define to `int' if does not define. */ +/* #undef mode_t */ + +/* Define to `long' if does not define. */ +/* #undef off_t */ + +/* Define to `int' if does not define. */ +/* #undef pid_t */ + +/* Define to `int' if does not define. */ +/* #undef sig_atomic_t */ + +/* Define to `unsigned' if does not define. */ +/* #undef size_t */ + +/* define to snprintf routine */ +/* #undef snprintf */ + +/* Define like ber_socklen_t if does not define. */ +/* #undef socklen_t */ + +/* Define to `signed int' if does not define. */ +/* #undef ssize_t */ + +/* Define to `int' if doesn't define. */ +/* #undef uid_t */ + +/* define as empty if volatile is not supported */ +/* #undef volatile */ + +/* define to snprintf routine */ +/* #undef vsnprintf */ + + +/* begin of portable.h.post */ + +#ifdef _WIN32 +/* don't suck in all of the win32 api */ +# define WIN32_LEAN_AND_MEAN 1 +#endif + +#ifndef LDAP_NEEDS_PROTOTYPES +/* force LDAP_P to always include prototypes */ +#define LDAP_NEEDS_PROTOTYPES 1 +#endif + +#ifndef LDAP_REL_ENG +#if (LDAP_VENDOR_VERSION == 000000) && !defined(LDAP_DEVEL) +#define LDAP_DEVEL +#endif +#if defined(LDAP_DEVEL) && !defined(LDAP_TEST) +#define LDAP_TEST +#endif +#endif + +#ifdef HAVE_STDDEF_H +# include +#endif + +#ifdef HAVE_EBCDIC +/* ASCII/EBCDIC converting replacements for stdio funcs + * vsnprintf and snprintf are used too, but they are already + * checked by the configure script + */ +#define fputs ber_pvt_fputs +#define fgets ber_pvt_fgets +#define printf ber_pvt_printf +#define fprintf ber_pvt_fprintf +#define vfprintf ber_pvt_vfprintf +#define vsprintf ber_pvt_vsprintf +#endif + +#include "ac/fdset.h" + +#include "ldap_cdefs.h" +#include "ldap_features.h" + +#include "ac/assert.h" +#include "ac/localize.h" + +#endif /* _LDAP_PORTABLE_H */ +/* end of portable.h.post */ + diff --git a/contrib/openldap-cmake/linux_aarch64/include/lber_types.h b/contrib/openldap-cmake/linux_aarch64/include/lber_types.h new file mode 100644 index 00000000000..dbd59430527 --- /dev/null +++ b/contrib/openldap-cmake/linux_aarch64/include/lber_types.h @@ -0,0 +1,63 @@ +/* include/lber_types.h. Generated from lber_types.hin by configure. */ +/* $OpenLDAP$ */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +/* + * LBER types + */ + +#ifndef _LBER_TYPES_H +#define _LBER_TYPES_H + +#include + +LDAP_BEGIN_DECL + +/* LBER boolean, enum, integers (32 bits or larger) */ +#define LBER_INT_T int + +/* LBER tags (32 bits or larger) */ +#define LBER_TAG_T long + +/* LBER socket descriptor */ +#define LBER_SOCKET_T int + +/* LBER lengths (32 bits or larger) */ +#define LBER_LEN_T long + +/* ------------------------------------------------------------ */ + +/* booleans, enumerations, and integers */ +typedef LBER_INT_T ber_int_t; + +/* signed and unsigned versions */ +typedef signed LBER_INT_T ber_sint_t; +typedef unsigned LBER_INT_T ber_uint_t; + +/* tags */ +typedef unsigned LBER_TAG_T ber_tag_t; + +/* "socket" descriptors */ +typedef LBER_SOCKET_T ber_socket_t; + +/* lengths */ +typedef unsigned LBER_LEN_T ber_len_t; + +/* signed lengths */ +typedef signed LBER_LEN_T ber_slen_t; + +LDAP_END_DECL + +#endif /* _LBER_TYPES_H */ diff --git a/contrib/openldap-cmake/linux_aarch64/include/ldap_config.h b/contrib/openldap-cmake/linux_aarch64/include/ldap_config.h new file mode 100644 index 00000000000..89f7b40b884 --- /dev/null +++ b/contrib/openldap-cmake/linux_aarch64/include/ldap_config.h @@ -0,0 +1,74 @@ +/* include/ldap_config.h. Generated from ldap_config.hin by configure. */ +/* $OpenLDAP$ */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +/* + * This file works in conjunction with OpenLDAP configure system. + * If you do no like the values below, adjust your configure options. + */ + +#ifndef _LDAP_CONFIG_H +#define _LDAP_CONFIG_H + +/* directory separator */ +#ifndef LDAP_DIRSEP +#ifndef _WIN32 +#define LDAP_DIRSEP "/" +#else +#define LDAP_DIRSEP "\\" +#endif +#endif + +/* directory for temporary files */ +#if defined(_WIN32) +# define LDAP_TMPDIR "C:\\." /* we don't have much of a choice */ +#elif defined( _P_tmpdir ) +# define LDAP_TMPDIR _P_tmpdir +#elif defined( P_tmpdir ) +# define LDAP_TMPDIR P_tmpdir +#elif defined( _PATH_TMPDIR ) +# define LDAP_TMPDIR _PATH_TMPDIR +#else +# define LDAP_TMPDIR LDAP_DIRSEP "tmp" +#endif + +/* directories */ +#ifndef LDAP_BINDIR +#define LDAP_BINDIR "/tmp/ldap-prefix/bin" +#endif +#ifndef LDAP_SBINDIR +#define LDAP_SBINDIR "/tmp/ldap-prefix/sbin" +#endif +#ifndef LDAP_DATADIR +#define LDAP_DATADIR "/tmp/ldap-prefix/share/openldap" +#endif +#ifndef LDAP_SYSCONFDIR +#define LDAP_SYSCONFDIR "/tmp/ldap-prefix/etc/openldap" +#endif +#ifndef LDAP_LIBEXECDIR +#define LDAP_LIBEXECDIR "/tmp/ldap-prefix/libexec" +#endif +#ifndef LDAP_MODULEDIR +#define LDAP_MODULEDIR "/tmp/ldap-prefix/libexec/openldap" +#endif +#ifndef LDAP_RUNDIR +#define LDAP_RUNDIR "/tmp/ldap-prefix/var" +#endif +#ifndef LDAP_LOCALEDIR +#define LDAP_LOCALEDIR "" +#endif + + +#endif /* _LDAP_CONFIG_H */ diff --git a/contrib/openldap-cmake/linux_aarch64/include/ldap_features.h b/contrib/openldap-cmake/linux_aarch64/include/ldap_features.h new file mode 100644 index 00000000000..f0cc7c3626f --- /dev/null +++ b/contrib/openldap-cmake/linux_aarch64/include/ldap_features.h @@ -0,0 +1,61 @@ +/* include/ldap_features.h. Generated from ldap_features.hin by configure. */ +/* $OpenLDAP$ */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +/* + * LDAP Features + */ + +#ifndef _LDAP_FEATURES_H +#define _LDAP_FEATURES_H 1 + +/* OpenLDAP API version macros */ +#define LDAP_VENDOR_VERSION 20501 +#define LDAP_VENDOR_VERSION_MAJOR 2 +#define LDAP_VENDOR_VERSION_MINOR 5 +#define LDAP_VENDOR_VERSION_PATCH X + +/* +** WORK IN PROGRESS! +** +** OpenLDAP reentrancy/thread-safeness should be dynamically +** checked using ldap_get_option(). +** +** The -lldap implementation is not thread-safe. +** +** The -lldap_r implementation is: +** LDAP_API_FEATURE_THREAD_SAFE (basic thread safety) +** but also be: +** LDAP_API_FEATURE_SESSION_THREAD_SAFE +** LDAP_API_FEATURE_OPERATION_THREAD_SAFE +** +** The preprocessor flag LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE +** can be used to determine if -lldap_r is available at compile +** time. You must define LDAP_THREAD_SAFE if and only if you +** link with -lldap_r. +** +** If you fail to define LDAP_THREAD_SAFE when linking with +** -lldap_r or define LDAP_THREAD_SAFE when linking with -lldap, +** provided header definitions and declarations may be incorrect. +** +*/ + +/* is -lldap_r available or not */ +#define LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE 1 + +/* LDAP v2 Referrals */ +/* #undef LDAP_API_FEATURE_X_OPENLDAP_V2_REFERRALS */ + +#endif /* LDAP_FEATURES */ diff --git a/contrib/openldap-cmake/linux_aarch64/include/portable.h b/contrib/openldap-cmake/linux_aarch64/include/portable.h new file mode 100644 index 00000000000..2924b6713a4 --- /dev/null +++ b/contrib/openldap-cmake/linux_aarch64/include/portable.h @@ -0,0 +1,1169 @@ +/* include/portable.h. Generated from portable.hin by configure. */ +/* include/portable.hin. Generated from configure.in by autoheader. */ + + +/* begin of portable.h.pre */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +#ifndef _LDAP_PORTABLE_H +#define _LDAP_PORTABLE_H + +/* define this if needed to get reentrant functions */ +#ifndef REENTRANT +#define REENTRANT 1 +#endif +#ifndef _REENTRANT +#define _REENTRANT 1 +#endif + +/* define this if needed to get threadsafe functions */ +#ifndef THREADSAFE +#define THREADSAFE 1 +#endif +#ifndef _THREADSAFE +#define _THREADSAFE 1 +#endif +#ifndef THREAD_SAFE +#define THREAD_SAFE 1 +#endif +#ifndef _THREAD_SAFE +#define _THREAD_SAFE 1 +#endif + +#ifndef _SGI_MP_SOURCE +#define _SGI_MP_SOURCE 1 +#endif + +/* end of portable.h.pre */ + + +/* Define if building universal (internal helper macro) */ +/* #undef AC_APPLE_UNIVERSAL_BUILD */ + +/* define to use both and */ +/* #undef BOTH_STRINGS_H */ + +/* define if cross compiling */ +/* #undef CROSS_COMPILING */ + +/* set to the number of arguments ctime_r() expects */ +#define CTIME_R_NARGS 2 + +/* define if toupper() requires islower() */ +/* #undef C_UPPER_LOWER */ + +/* define if sys_errlist is not declared in stdio.h or errno.h */ +/* #undef DECL_SYS_ERRLIST */ + +/* define to enable slapi library */ +/* #undef ENABLE_SLAPI */ + +/* defined to be the EXE extension */ +#define EXEEXT "" + +/* set to the number of arguments gethostbyaddr_r() expects */ +#define GETHOSTBYADDR_R_NARGS 8 + +/* set to the number of arguments gethostbyname_r() expects */ +#define GETHOSTBYNAME_R_NARGS 6 + +/* Define to 1 if `TIOCGWINSZ' requires . */ +#define GWINSZ_IN_SYS_IOCTL 1 + +/* define if you have AIX security lib */ +/* #undef HAVE_AIX_SECURITY */ + +/* Define to 1 if you have the header file. */ +#define HAVE_ARPA_INET_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_ARPA_NAMESER_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_ASSERT_H 1 + +/* Define to 1 if you have the `bcopy' function. */ +#define HAVE_BCOPY 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_BITS_TYPES_H 1 + +/* Define to 1 if you have the `chroot' function. */ +#define HAVE_CHROOT 1 + +/* Define to 1 if you have the `closesocket' function. */ +/* #undef HAVE_CLOSESOCKET */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_CONIO_H */ + +/* define if crypt(3) is available */ +/* #undef HAVE_CRYPT */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_CRYPT_H */ + +/* define if crypt_r() is also available */ +/* #undef HAVE_CRYPT_R */ + +/* Define to 1 if you have the `ctime_r' function. */ +#define HAVE_CTIME_R 1 + +/* define if you have Cyrus SASL */ +/* #undef HAVE_CYRUS_SASL */ + +/* define if your system supports /dev/poll */ +/* #undef HAVE_DEVPOLL */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_DIRECT_H */ + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +#define HAVE_DIRENT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Define to 1 if you don't have `vprintf' but do have `_doprnt.' */ +/* #undef HAVE_DOPRNT */ + +/* define if system uses EBCDIC instead of ASCII */ +/* #undef HAVE_EBCDIC */ + +/* Define to 1 if you have the `endgrent' function. */ +#define HAVE_ENDGRENT 1 + +/* Define to 1 if you have the `endpwent' function. */ +#define HAVE_ENDPWENT 1 + +/* define if your system supports epoll */ +#define HAVE_EPOLL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_ERRNO_H 1 + +/* Define to 1 if you have the `fcntl' function. */ +#define HAVE_FCNTL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_FCNTL_H 1 + +/* define if you actually have FreeBSD fetch(3) */ +/* #undef HAVE_FETCH */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_FILIO_H */ + +/* Define to 1 if you have the `flock' function. */ +#define HAVE_FLOCK 1 + +/* Define to 1 if you have the `fstat' function. */ +#define HAVE_FSTAT 1 + +/* Define to 1 if you have the `gai_strerror' function. */ +#define HAVE_GAI_STRERROR 1 + +/* Define to 1 if you have the `getaddrinfo' function. */ +#define HAVE_GETADDRINFO 1 + +/* Define to 1 if you have the `getdtablesize' function. */ +#define HAVE_GETDTABLESIZE 1 + +/* Define to 1 if you have the `geteuid' function. */ +#define HAVE_GETEUID 1 + +/* Define to 1 if you have the `getgrgid' function. */ +#define HAVE_GETGRGID 1 + +/* Define to 1 if you have the `gethostbyaddr_r' function. */ +#define HAVE_GETHOSTBYADDR_R 1 + +/* Define to 1 if you have the `gethostbyname_r' function. */ +#define HAVE_GETHOSTBYNAME_R 1 + +/* Define to 1 if you have the `gethostname' function. */ +#define HAVE_GETHOSTNAME 1 + +/* Define to 1 if you have the `getnameinfo' function. */ +#define HAVE_GETNAMEINFO 1 + +/* Define to 1 if you have the `getopt' function. */ +#define HAVE_GETOPT 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_GETOPT_H 1 + +/* Define to 1 if you have the `getpassphrase' function. */ +/* #undef HAVE_GETPASSPHRASE */ + +/* Define to 1 if you have the `getpeereid' function. */ +/* #undef HAVE_GETPEEREID */ + +/* Define to 1 if you have the `getpeerucred' function. */ +/* #undef HAVE_GETPEERUCRED */ + +/* Define to 1 if you have the `getpwnam' function. */ +#define HAVE_GETPWNAM 1 + +/* Define to 1 if you have the `getpwuid' function. */ +#define HAVE_GETPWUID 1 + +/* Define to 1 if you have the `getspnam' function. */ +#define HAVE_GETSPNAM 1 + +/* Define to 1 if you have the `gettimeofday' function. */ +#define HAVE_GETTIMEOFDAY 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_GMP_H */ + +/* Define to 1 if you have the `gmtime_r' function. */ +#define HAVE_GMTIME_R 1 + +/* define if you have GNUtls */ +/* #undef HAVE_GNUTLS */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_GNUTLS_GNUTLS_H */ + +/* if you have GNU Pth */ +/* #undef HAVE_GNU_PTH */ + +/* Define to 1 if you have the header file. */ +#define HAVE_GRP_H 1 + +/* Define to 1 if you have the `hstrerror' function. */ +#define HAVE_HSTRERROR 1 + +/* define to you inet_aton(3) is available */ +#define HAVE_INET_ATON 1 + +/* Define to 1 if you have the `inet_ntoa_b' function. */ +/* #undef HAVE_INET_NTOA_B */ + +/* Define to 1 if you have the `inet_ntop' function. */ +#define HAVE_INET_NTOP 1 + +/* Define to 1 if you have the `initgroups' function. */ +#define HAVE_INITGROUPS 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define to 1 if you have the `ioctl' function. */ +#define HAVE_IOCTL 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_IO_H */ + +/* define if your system supports kqueue */ +/* #undef HAVE_KQUEUE */ + +/* Define to 1 if you have the `gen' library (-lgen). */ +/* #undef HAVE_LIBGEN */ + +/* Define to 1 if you have the `gmp' library (-lgmp). */ +/* #undef HAVE_LIBGMP */ + +/* Define to 1 if you have the `inet' library (-linet). */ +/* #undef HAVE_LIBINET */ + +/* define if you have libtool -ltdl */ +/* #undef HAVE_LIBLTDL */ + +/* Define to 1 if you have the `net' library (-lnet). */ +/* #undef HAVE_LIBNET */ + +/* Define to 1 if you have the `nsl' library (-lnsl). */ +/* #undef HAVE_LIBNSL */ + +/* Define to 1 if you have the `nsl_s' library (-lnsl_s). */ +/* #undef HAVE_LIBNSL_S */ + +/* Define to 1 if you have the `socket' library (-lsocket). */ +/* #undef HAVE_LIBSOCKET */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_LIBUTIL_H */ + +/* Define to 1 if you have the `V3' library (-lV3). */ +/* #undef HAVE_LIBV3 */ + +/* Define to 1 if you have the header file. */ +#define HAVE_LIMITS_H 1 + +/* if you have LinuxThreads */ +/* #undef HAVE_LINUX_THREADS */ + +/* Define to 1 if you have the header file. */ +#define HAVE_LOCALE_H 1 + +/* Define to 1 if you have the `localtime_r' function. */ +#define HAVE_LOCALTIME_R 1 + +/* Define to 1 if you have the `lockf' function. */ +#define HAVE_LOCKF 1 + +/* Define to 1 if the system has the type `long long'. */ +#define HAVE_LONG_LONG 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_LTDL_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_MALLOC_H 1 + +/* Define to 1 if you have the `memcpy' function. */ +#define HAVE_MEMCPY 1 + +/* Define to 1 if you have the `memmove' function. */ +#define HAVE_MEMMOVE 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the `memrchr' function. */ +#define HAVE_MEMRCHR 1 + +/* Define to 1 if you have the `mkstemp' function. */ +#define HAVE_MKSTEMP 1 + +/* Define to 1 if you have the `mktemp' function. */ +#define HAVE_MKTEMP 1 + +/* define this if you have mkversion */ +#define HAVE_MKVERSION 1 + +/* Define to 1 if you have the header file, and it defines `DIR'. */ +/* #undef HAVE_NDIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_NETINET_TCP_H 1 + +/* define if strerror_r returns char* instead of int */ +/* #undef HAVE_NONPOSIX_STRERROR_R */ + +/* if you have NT Event Log */ +/* #undef HAVE_NT_EVENT_LOG */ + +/* if you have NT Service Manager */ +/* #undef HAVE_NT_SERVICE_MANAGER */ + +/* if you have NT Threads */ +/* #undef HAVE_NT_THREADS */ + +/* define if you have OpenSSL */ +#define HAVE_OPENSSL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_OPENSSL_BN_H 1 + +/* define if you have OpenSSL with CRL checking capability */ +#define HAVE_OPENSSL_CRL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_OPENSSL_CRYPTO_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_OPENSSL_SSL_H 1 + +/* Define to 1 if you have the `pipe' function. */ +#define HAVE_PIPE 1 + +/* Define to 1 if you have the `poll' function. */ +#define HAVE_POLL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_POLL_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_PROCESS_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_PSAP_H */ + +/* define to pthreads API spec revision */ +#define HAVE_PTHREADS 10 + +/* define if you have pthread_detach function */ +#define HAVE_PTHREAD_DETACH 1 + +/* Define to 1 if you have the `pthread_getconcurrency' function. */ +#define HAVE_PTHREAD_GETCONCURRENCY 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_PTHREAD_H 1 + +/* Define to 1 if you have the `pthread_kill' function. */ +#define HAVE_PTHREAD_KILL 1 + +/* Define to 1 if you have the `pthread_kill_other_threads_np' function. */ +/* #undef HAVE_PTHREAD_KILL_OTHER_THREADS_NP */ + +/* define if you have pthread_rwlock_destroy function */ +#define HAVE_PTHREAD_RWLOCK_DESTROY 1 + +/* Define to 1 if you have the `pthread_setconcurrency' function. */ +#define HAVE_PTHREAD_SETCONCURRENCY 1 + +/* Define to 1 if you have the `pthread_yield' function. */ +#define HAVE_PTHREAD_YIELD 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_PTH_H */ + +/* Define to 1 if the system has the type `ptrdiff_t'. */ +#define HAVE_PTRDIFF_T 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_PWD_H 1 + +/* Define to 1 if you have the `read' function. */ +#define HAVE_READ 1 + +/* Define to 1 if you have the `recv' function. */ +#define HAVE_RECV 1 + +/* Define to 1 if you have the `recvfrom' function. */ +#define HAVE_RECVFROM 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_REGEX_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_RESOLV_H */ + +/* define if you have res_query() */ +/* #undef HAVE_RES_QUERY */ + +/* define if OpenSSL needs RSAref */ +/* #undef HAVE_RSAREF */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SASL_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SASL_SASL_H */ + +/* define if your SASL library has sasl_version() */ +/* #undef HAVE_SASL_VERSION */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SCHED_H 1 + +/* Define to 1 if you have the `sched_yield' function. */ +#define HAVE_SCHED_YIELD 1 + +/* Define to 1 if you have the `send' function. */ +#define HAVE_SEND 1 + +/* Define to 1 if you have the `sendmsg' function. */ +#define HAVE_SENDMSG 1 + +/* Define to 1 if you have the `sendto' function. */ +#define HAVE_SENDTO 1 + +/* Define to 1 if you have the `setegid' function. */ +#define HAVE_SETEGID 1 + +/* Define to 1 if you have the `seteuid' function. */ +#define HAVE_SETEUID 1 + +/* Define to 1 if you have the `setgid' function. */ +#define HAVE_SETGID 1 + +/* Define to 1 if you have the `setpwfile' function. */ +/* #undef HAVE_SETPWFILE */ + +/* Define to 1 if you have the `setsid' function. */ +#define HAVE_SETSID 1 + +/* Define to 1 if you have the `setuid' function. */ +#define HAVE_SETUID 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SGTTY_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SHADOW_H */ + +/* Define to 1 if you have the `sigaction' function. */ +#define HAVE_SIGACTION 1 + +/* Define to 1 if you have the `signal' function. */ +#define HAVE_SIGNAL 1 + +/* Define to 1 if you have the `sigset' function. */ +#define HAVE_SIGSET 1 + +/* define if you have -lslp */ +/* #undef HAVE_SLP */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SLP_H */ + +/* Define to 1 if you have the `snprintf' function. */ +#define HAVE_SNPRINTF 1 + +/* if you have spawnlp() */ +/* #undef HAVE_SPAWNLP */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SQLEXT_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SQL_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_STDDEF_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the `strdup' function. */ +#define HAVE_STRDUP 1 + +/* Define to 1 if you have the `strerror' function. */ +#define HAVE_STRERROR 1 + +/* Define to 1 if you have the `strerror_r' function. */ +#define HAVE_STRERROR_R 1 + +/* Define to 1 if you have the `strftime' function. */ +#define HAVE_STRFTIME 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the `strpbrk' function. */ +#define HAVE_STRPBRK 1 + +/* Define to 1 if you have the `strrchr' function. */ +#define HAVE_STRRCHR 1 + +/* Define to 1 if you have the `strsep' function. */ +#define HAVE_STRSEP 1 + +/* Define to 1 if you have the `strspn' function. */ +#define HAVE_STRSPN 1 + +/* Define to 1 if you have the `strstr' function. */ +#define HAVE_STRSTR 1 + +/* Define to 1 if you have the `strtol' function. */ +#define HAVE_STRTOL 1 + +/* Define to 1 if you have the `strtoll' function. */ +#define HAVE_STRTOLL 1 + +/* Define to 1 if you have the `strtoq' function. */ +#define HAVE_STRTOQ 1 + +/* Define to 1 if you have the `strtoul' function. */ +#define HAVE_STRTOUL 1 + +/* Define to 1 if you have the `strtoull' function. */ +#define HAVE_STRTOULL 1 + +/* Define to 1 if you have the `strtouq' function. */ +#define HAVE_STRTOUQ 1 + +/* Define to 1 if `msg_accrightslen' is a member of `struct msghdr'. */ +/* #undef HAVE_STRUCT_MSGHDR_MSG_ACCRIGHTSLEN */ + +/* Define to 1 if `msg_control' is a member of `struct msghdr'. */ +#define HAVE_STRUCT_MSGHDR_MSG_CONTROL 1 + +/* Define to 1 if `pw_gecos' is a member of `struct passwd'. */ +#define HAVE_STRUCT_PASSWD_PW_GECOS 1 + +/* Define to 1 if `pw_passwd' is a member of `struct passwd'. */ +#define HAVE_STRUCT_PASSWD_PW_PASSWD 1 + +/* Define to 1 if `st_blksize' is a member of `struct stat'. */ +#define HAVE_STRUCT_STAT_ST_BLKSIZE 1 + +/* Define to 1 if `st_fstype' is a member of `struct stat'. */ +/* #undef HAVE_STRUCT_STAT_ST_FSTYPE */ + +/* define to 1 if st_fstype is char * */ +/* #undef HAVE_STRUCT_STAT_ST_FSTYPE_CHAR */ + +/* define to 1 if st_fstype is int */ +/* #undef HAVE_STRUCT_STAT_ST_FSTYPE_INT */ + +/* Define to 1 if `st_vfstype' is a member of `struct stat'. */ +/* #undef HAVE_STRUCT_STAT_ST_VFSTYPE */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYNCH_H */ + +/* Define to 1 if you have the `sysconf' function. */ +#define HAVE_SYSCONF 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYSEXITS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYSLOG_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_DEVPOLL_H */ + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_DIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_EPOLL_H 1 + +/* define if you actually have sys_errlist in your libs */ +#define HAVE_SYS_ERRLIST 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_ERRNO_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_EVENT_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_FILE_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_FILIO_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_FSTYP_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_IOCTL_H 1 + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_NDIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_PARAM_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_POLL_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_PRIVGRP_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_RESOURCE_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SELECT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SOCKET_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SYSLOG_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TIME_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_UCRED_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_UIO_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_UN_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_UUID_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_VMOUNT_H */ + +/* Define to 1 if you have that is POSIX.1 compatible. */ +#define HAVE_SYS_WAIT_H 1 + +/* define if you have -lwrap */ +/* #undef HAVE_TCPD */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_TCPD_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_TERMIOS_H 1 + +/* if you have Solaris LWP (thr) package */ +/* #undef HAVE_THR */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_THREAD_H */ + +/* Define to 1 if you have the `thr_getconcurrency' function. */ +/* #undef HAVE_THR_GETCONCURRENCY */ + +/* Define to 1 if you have the `thr_setconcurrency' function. */ +/* #undef HAVE_THR_SETCONCURRENCY */ + +/* Define to 1 if you have the `thr_yield' function. */ +/* #undef HAVE_THR_YIELD */ + +/* define if you have TLS */ +#define HAVE_TLS 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UTIME_H 1 + +/* define if you have uuid_generate() */ +/* #undef HAVE_UUID_GENERATE */ + +/* define if you have uuid_to_str() */ +/* #undef HAVE_UUID_TO_STR */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_UUID_UUID_H */ + +/* Define to 1 if you have the `vprintf' function. */ +#define HAVE_VPRINTF 1 + +/* Define to 1 if you have the `vsnprintf' function. */ +#define HAVE_VSNPRINTF 1 + +/* Define to 1 if you have the `wait4' function. */ +#define HAVE_WAIT4 1 + +/* Define to 1 if you have the `waitpid' function. */ +#define HAVE_WAITPID 1 + +/* define if you have winsock */ +/* #undef HAVE_WINSOCK */ + +/* define if you have winsock2 */ +/* #undef HAVE_WINSOCK2 */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_WINSOCK2_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_WINSOCK_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_WIREDTIGER_H */ + +/* Define to 1 if you have the `write' function. */ +#define HAVE_WRITE 1 + +/* define if select implicitly yields */ +#define HAVE_YIELDING_SELECT 1 + +/* Define to 1 if you have the `_vsnprintf' function. */ +/* #undef HAVE__VSNPRINTF */ + +/* define to 32-bit or greater integer type */ +#define LBER_INT_T int + +/* define to large integer type */ +#define LBER_LEN_T long + +/* define to socket descriptor type */ +#define LBER_SOCKET_T int + +/* define to large integer type */ +#define LBER_TAG_T long + +/* define to 1 if library is thread safe */ +#define LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE 1 + +/* define to LDAP VENDOR VERSION */ +/* #undef LDAP_API_FEATURE_X_OPENLDAP_V2_REFERRALS */ + +/* define this to add debugging code */ +/* #undef LDAP_DEBUG */ + +/* define if LDAP libs are dynamic */ +/* #undef LDAP_LIBS_DYNAMIC */ + +/* define to support PF_INET6 */ +#define LDAP_PF_INET6 1 + +/* define to support PF_LOCAL */ +#define LDAP_PF_LOCAL 1 + +/* define this to add SLAPI code */ +/* #undef LDAP_SLAPI */ + +/* define this to add syslog code */ +/* #undef LDAP_SYSLOG */ + +/* Version */ +#define LDAP_VENDOR_VERSION 20501 + +/* Major */ +#define LDAP_VENDOR_VERSION_MAJOR 2 + +/* Minor */ +#define LDAP_VENDOR_VERSION_MINOR 5 + +/* Patch */ +#define LDAP_VENDOR_VERSION_PATCH X + +/* Define to the sub-directory where libtool stores uninstalled libraries. */ +#define LT_OBJDIR ".libs/" + +/* define if memcmp is not 8-bit clean or is otherwise broken */ +/* #undef NEED_MEMCMP_REPLACEMENT */ + +/* define if you have (or want) no threads */ +/* #undef NO_THREADS */ + +/* define to use the original debug style */ +/* #undef OLD_DEBUG */ + +/* Package */ +#define OPENLDAP_PACKAGE "OpenLDAP" + +/* Version */ +#define OPENLDAP_VERSION "2.5.X" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "" + +/* define if sched_yield yields the entire process */ +/* #undef REPLACE_BROKEN_YIELD */ + +/* Define as the return type of signal handlers (`int' or `void'). */ +#define RETSIGTYPE void + +/* Define to the type of arg 1 for `select'. */ +#define SELECT_TYPE_ARG1 int + +/* Define to the type of args 2, 3 and 4 for `select'. */ +#define SELECT_TYPE_ARG234 (fd_set *) + +/* Define to the type of arg 5 for `select'. */ +#define SELECT_TYPE_ARG5 (struct timeval *) + +/* The size of `int', as computed by sizeof. */ +#define SIZEOF_INT 4 + +/* The size of `long', as computed by sizeof. */ +#define SIZEOF_LONG 8 + +/* The size of `long long', as computed by sizeof. */ +#define SIZEOF_LONG_LONG 8 + +/* The size of `short', as computed by sizeof. */ +#define SIZEOF_SHORT 2 + +/* The size of `wchar_t', as computed by sizeof. */ +#define SIZEOF_WCHAR_T 4 + +/* define to support per-object ACIs */ +/* #undef SLAPD_ACI_ENABLED */ + +/* define to support LDAP Async Metadirectory backend */ +/* #undef SLAPD_ASYNCMETA */ + +/* define to support cleartext passwords */ +/* #undef SLAPD_CLEARTEXT */ + +/* define to support crypt(3) passwords */ +/* #undef SLAPD_CRYPT */ + +/* define to support DNS SRV backend */ +/* #undef SLAPD_DNSSRV */ + +/* define to support LDAP backend */ +/* #undef SLAPD_LDAP */ + +/* define to support MDB backend */ +/* #undef SLAPD_MDB */ + +/* define to support LDAP Metadirectory backend */ +/* #undef SLAPD_META */ + +/* define to support modules */ +/* #undef SLAPD_MODULES */ + +/* dynamically linked module */ +#define SLAPD_MOD_DYNAMIC 2 + +/* statically linked module */ +#define SLAPD_MOD_STATIC 1 + +/* define to support cn=Monitor backend */ +/* #undef SLAPD_MONITOR */ + +/* define to support NDB backend */ +/* #undef SLAPD_NDB */ + +/* define to support NULL backend */ +/* #undef SLAPD_NULL */ + +/* define for In-Directory Access Logging overlay */ +/* #undef SLAPD_OVER_ACCESSLOG */ + +/* define for Audit Logging overlay */ +/* #undef SLAPD_OVER_AUDITLOG */ + +/* define for Automatic Certificate Authority overlay */ +/* #undef SLAPD_OVER_AUTOCA */ + +/* define for Collect overlay */ +/* #undef SLAPD_OVER_COLLECT */ + +/* define for Attribute Constraint overlay */ +/* #undef SLAPD_OVER_CONSTRAINT */ + +/* define for Dynamic Directory Services overlay */ +/* #undef SLAPD_OVER_DDS */ + +/* define for Dynamic Directory Services overlay */ +/* #undef SLAPD_OVER_DEREF */ + +/* define for Dynamic Group overlay */ +/* #undef SLAPD_OVER_DYNGROUP */ + +/* define for Dynamic List overlay */ +/* #undef SLAPD_OVER_DYNLIST */ + +/* define for Reverse Group Membership overlay */ +/* #undef SLAPD_OVER_MEMBEROF */ + +/* define for Password Policy overlay */ +/* #undef SLAPD_OVER_PPOLICY */ + +/* define for Proxy Cache overlay */ +/* #undef SLAPD_OVER_PROXYCACHE */ + +/* define for Referential Integrity overlay */ +/* #undef SLAPD_OVER_REFINT */ + +/* define for Return Code overlay */ +/* #undef SLAPD_OVER_RETCODE */ + +/* define for Rewrite/Remap overlay */ +/* #undef SLAPD_OVER_RWM */ + +/* define for Sequential Modify overlay */ +/* #undef SLAPD_OVER_SEQMOD */ + +/* define for ServerSideSort/VLV overlay */ +/* #undef SLAPD_OVER_SSSVLV */ + +/* define for Syncrepl Provider overlay */ +/* #undef SLAPD_OVER_SYNCPROV */ + +/* define for Translucent Proxy overlay */ +/* #undef SLAPD_OVER_TRANSLUCENT */ + +/* define for Attribute Uniqueness overlay */ +/* #undef SLAPD_OVER_UNIQUE */ + +/* define for Value Sorting overlay */ +/* #undef SLAPD_OVER_VALSORT */ + +/* define to support PASSWD backend */ +/* #undef SLAPD_PASSWD */ + +/* define to support PERL backend */ +/* #undef SLAPD_PERL */ + +/* define to support relay backend */ +/* #undef SLAPD_RELAY */ + +/* define to support reverse lookups */ +/* #undef SLAPD_RLOOKUPS */ + +/* define to support SHELL backend */ +/* #undef SLAPD_SHELL */ + +/* define to support SOCK backend */ +/* #undef SLAPD_SOCK */ + +/* define to support SASL passwords */ +/* #undef SLAPD_SPASSWD */ + +/* define to support SQL backend */ +/* #undef SLAPD_SQL */ + +/* define to support WiredTiger backend */ +/* #undef SLAPD_WT */ + +/* define to support run-time loadable ACL */ +/* #undef SLAP_DYNACL */ + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Define to 1 if you can safely include both and . */ +#define TIME_WITH_SYS_TIME 1 + +/* Define to 1 if your declares `struct tm'. */ +/* #undef TM_IN_SYS_TIME */ + +/* set to urandom device */ +#define URANDOM_DEVICE "/dev/urandom" + +/* define to use OpenSSL BIGNUM for MP */ +/* #undef USE_MP_BIGNUM */ + +/* define to use GMP for MP */ +/* #undef USE_MP_GMP */ + +/* define to use 'long' for MP */ +/* #undef USE_MP_LONG */ + +/* define to use 'long long' for MP */ +/* #undef USE_MP_LONG_LONG */ + +/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most + significant byte first (like Motorola and SPARC, unlike Intel). */ +#if defined AC_APPLE_UNIVERSAL_BUILD +# if defined __BIG_ENDIAN__ +# define WORDS_BIGENDIAN 1 +# endif +#else +# ifndef WORDS_BIGENDIAN +/* # undef WORDS_BIGENDIAN */ +# endif +#endif + +/* Define to the type of arg 3 for `accept'. */ +#define ber_socklen_t socklen_t + +/* Define to `char *' if does not define. */ +/* #undef caddr_t */ + +/* Define to empty if `const' does not conform to ANSI C. */ +/* #undef const */ + +/* Define to `int' if doesn't define. */ +/* #undef gid_t */ + +/* Define to `int' if does not define. */ +/* #undef mode_t */ + +/* Define to `long' if does not define. */ +/* #undef off_t */ + +/* Define to `int' if does not define. */ +/* #undef pid_t */ + +/* Define to `int' if does not define. */ +/* #undef sig_atomic_t */ + +/* Define to `unsigned' if does not define. */ +/* #undef size_t */ + +/* define to snprintf routine */ +/* #undef snprintf */ + +/* Define like ber_socklen_t if does not define. */ +/* #undef socklen_t */ + +/* Define to `signed int' if does not define. */ +/* #undef ssize_t */ + +/* Define to `int' if doesn't define. */ +/* #undef uid_t */ + +/* define as empty if volatile is not supported */ +/* #undef volatile */ + +/* define to snprintf routine */ +/* #undef vsnprintf */ + + +/* begin of portable.h.post */ + +#ifdef _WIN32 +/* don't suck in all of the win32 api */ +# define WIN32_LEAN_AND_MEAN 1 +#endif + +#ifndef LDAP_NEEDS_PROTOTYPES +/* force LDAP_P to always include prototypes */ +#define LDAP_NEEDS_PROTOTYPES 1 +#endif + +#ifndef LDAP_REL_ENG +#if (LDAP_VENDOR_VERSION == 000000) && !defined(LDAP_DEVEL) +#define LDAP_DEVEL +#endif +#if defined(LDAP_DEVEL) && !defined(LDAP_TEST) +#define LDAP_TEST +#endif +#endif + +#ifdef HAVE_STDDEF_H +# include +#endif + +#ifdef HAVE_EBCDIC +/* ASCII/EBCDIC converting replacements for stdio funcs + * vsnprintf and snprintf are used too, but they are already + * checked by the configure script + */ +#define fputs ber_pvt_fputs +#define fgets ber_pvt_fgets +#define printf ber_pvt_printf +#define fprintf ber_pvt_fprintf +#define vfprintf ber_pvt_vfprintf +#define vsprintf ber_pvt_vsprintf +#endif + +#include "ac/fdset.h" + +#include "ldap_cdefs.h" +#include "ldap_features.h" + +#include "ac/assert.h" +#include "ac/localize.h" + +#endif /* _LDAP_PORTABLE_H */ +/* end of portable.h.post */ + diff --git a/contrib/openldap-cmake/linux_x86_64/include/lber_types.h b/contrib/openldap-cmake/linux_x86_64/include/lber_types.h new file mode 100644 index 00000000000..dbd59430527 --- /dev/null +++ b/contrib/openldap-cmake/linux_x86_64/include/lber_types.h @@ -0,0 +1,63 @@ +/* include/lber_types.h. Generated from lber_types.hin by configure. */ +/* $OpenLDAP$ */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +/* + * LBER types + */ + +#ifndef _LBER_TYPES_H +#define _LBER_TYPES_H + +#include + +LDAP_BEGIN_DECL + +/* LBER boolean, enum, integers (32 bits or larger) */ +#define LBER_INT_T int + +/* LBER tags (32 bits or larger) */ +#define LBER_TAG_T long + +/* LBER socket descriptor */ +#define LBER_SOCKET_T int + +/* LBER lengths (32 bits or larger) */ +#define LBER_LEN_T long + +/* ------------------------------------------------------------ */ + +/* booleans, enumerations, and integers */ +typedef LBER_INT_T ber_int_t; + +/* signed and unsigned versions */ +typedef signed LBER_INT_T ber_sint_t; +typedef unsigned LBER_INT_T ber_uint_t; + +/* tags */ +typedef unsigned LBER_TAG_T ber_tag_t; + +/* "socket" descriptors */ +typedef LBER_SOCKET_T ber_socket_t; + +/* lengths */ +typedef unsigned LBER_LEN_T ber_len_t; + +/* signed lengths */ +typedef signed LBER_LEN_T ber_slen_t; + +LDAP_END_DECL + +#endif /* _LBER_TYPES_H */ diff --git a/contrib/openldap-cmake/linux_x86_64/include/ldap_config.h b/contrib/openldap-cmake/linux_x86_64/include/ldap_config.h new file mode 100644 index 00000000000..89f7b40b884 --- /dev/null +++ b/contrib/openldap-cmake/linux_x86_64/include/ldap_config.h @@ -0,0 +1,74 @@ +/* include/ldap_config.h. Generated from ldap_config.hin by configure. */ +/* $OpenLDAP$ */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +/* + * This file works in conjunction with OpenLDAP configure system. + * If you do no like the values below, adjust your configure options. + */ + +#ifndef _LDAP_CONFIG_H +#define _LDAP_CONFIG_H + +/* directory separator */ +#ifndef LDAP_DIRSEP +#ifndef _WIN32 +#define LDAP_DIRSEP "/" +#else +#define LDAP_DIRSEP "\\" +#endif +#endif + +/* directory for temporary files */ +#if defined(_WIN32) +# define LDAP_TMPDIR "C:\\." /* we don't have much of a choice */ +#elif defined( _P_tmpdir ) +# define LDAP_TMPDIR _P_tmpdir +#elif defined( P_tmpdir ) +# define LDAP_TMPDIR P_tmpdir +#elif defined( _PATH_TMPDIR ) +# define LDAP_TMPDIR _PATH_TMPDIR +#else +# define LDAP_TMPDIR LDAP_DIRSEP "tmp" +#endif + +/* directories */ +#ifndef LDAP_BINDIR +#define LDAP_BINDIR "/tmp/ldap-prefix/bin" +#endif +#ifndef LDAP_SBINDIR +#define LDAP_SBINDIR "/tmp/ldap-prefix/sbin" +#endif +#ifndef LDAP_DATADIR +#define LDAP_DATADIR "/tmp/ldap-prefix/share/openldap" +#endif +#ifndef LDAP_SYSCONFDIR +#define LDAP_SYSCONFDIR "/tmp/ldap-prefix/etc/openldap" +#endif +#ifndef LDAP_LIBEXECDIR +#define LDAP_LIBEXECDIR "/tmp/ldap-prefix/libexec" +#endif +#ifndef LDAP_MODULEDIR +#define LDAP_MODULEDIR "/tmp/ldap-prefix/libexec/openldap" +#endif +#ifndef LDAP_RUNDIR +#define LDAP_RUNDIR "/tmp/ldap-prefix/var" +#endif +#ifndef LDAP_LOCALEDIR +#define LDAP_LOCALEDIR "" +#endif + + +#endif /* _LDAP_CONFIG_H */ diff --git a/contrib/openldap-cmake/linux_x86_64/include/ldap_features.h b/contrib/openldap-cmake/linux_x86_64/include/ldap_features.h new file mode 100644 index 00000000000..f0cc7c3626f --- /dev/null +++ b/contrib/openldap-cmake/linux_x86_64/include/ldap_features.h @@ -0,0 +1,61 @@ +/* include/ldap_features.h. Generated from ldap_features.hin by configure. */ +/* $OpenLDAP$ */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +/* + * LDAP Features + */ + +#ifndef _LDAP_FEATURES_H +#define _LDAP_FEATURES_H 1 + +/* OpenLDAP API version macros */ +#define LDAP_VENDOR_VERSION 20501 +#define LDAP_VENDOR_VERSION_MAJOR 2 +#define LDAP_VENDOR_VERSION_MINOR 5 +#define LDAP_VENDOR_VERSION_PATCH X + +/* +** WORK IN PROGRESS! +** +** OpenLDAP reentrancy/thread-safeness should be dynamically +** checked using ldap_get_option(). +** +** The -lldap implementation is not thread-safe. +** +** The -lldap_r implementation is: +** LDAP_API_FEATURE_THREAD_SAFE (basic thread safety) +** but also be: +** LDAP_API_FEATURE_SESSION_THREAD_SAFE +** LDAP_API_FEATURE_OPERATION_THREAD_SAFE +** +** The preprocessor flag LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE +** can be used to determine if -lldap_r is available at compile +** time. You must define LDAP_THREAD_SAFE if and only if you +** link with -lldap_r. +** +** If you fail to define LDAP_THREAD_SAFE when linking with +** -lldap_r or define LDAP_THREAD_SAFE when linking with -lldap, +** provided header definitions and declarations may be incorrect. +** +*/ + +/* is -lldap_r available or not */ +#define LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE 1 + +/* LDAP v2 Referrals */ +/* #undef LDAP_API_FEATURE_X_OPENLDAP_V2_REFERRALS */ + +#endif /* LDAP_FEATURES */ diff --git a/contrib/openldap-cmake/linux_x86_64/include/portable.h b/contrib/openldap-cmake/linux_x86_64/include/portable.h new file mode 100644 index 00000000000..2924b6713a4 --- /dev/null +++ b/contrib/openldap-cmake/linux_x86_64/include/portable.h @@ -0,0 +1,1169 @@ +/* include/portable.h. Generated from portable.hin by configure. */ +/* include/portable.hin. Generated from configure.in by autoheader. */ + + +/* begin of portable.h.pre */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +#ifndef _LDAP_PORTABLE_H +#define _LDAP_PORTABLE_H + +/* define this if needed to get reentrant functions */ +#ifndef REENTRANT +#define REENTRANT 1 +#endif +#ifndef _REENTRANT +#define _REENTRANT 1 +#endif + +/* define this if needed to get threadsafe functions */ +#ifndef THREADSAFE +#define THREADSAFE 1 +#endif +#ifndef _THREADSAFE +#define _THREADSAFE 1 +#endif +#ifndef THREAD_SAFE +#define THREAD_SAFE 1 +#endif +#ifndef _THREAD_SAFE +#define _THREAD_SAFE 1 +#endif + +#ifndef _SGI_MP_SOURCE +#define _SGI_MP_SOURCE 1 +#endif + +/* end of portable.h.pre */ + + +/* Define if building universal (internal helper macro) */ +/* #undef AC_APPLE_UNIVERSAL_BUILD */ + +/* define to use both and */ +/* #undef BOTH_STRINGS_H */ + +/* define if cross compiling */ +/* #undef CROSS_COMPILING */ + +/* set to the number of arguments ctime_r() expects */ +#define CTIME_R_NARGS 2 + +/* define if toupper() requires islower() */ +/* #undef C_UPPER_LOWER */ + +/* define if sys_errlist is not declared in stdio.h or errno.h */ +/* #undef DECL_SYS_ERRLIST */ + +/* define to enable slapi library */ +/* #undef ENABLE_SLAPI */ + +/* defined to be the EXE extension */ +#define EXEEXT "" + +/* set to the number of arguments gethostbyaddr_r() expects */ +#define GETHOSTBYADDR_R_NARGS 8 + +/* set to the number of arguments gethostbyname_r() expects */ +#define GETHOSTBYNAME_R_NARGS 6 + +/* Define to 1 if `TIOCGWINSZ' requires . */ +#define GWINSZ_IN_SYS_IOCTL 1 + +/* define if you have AIX security lib */ +/* #undef HAVE_AIX_SECURITY */ + +/* Define to 1 if you have the header file. */ +#define HAVE_ARPA_INET_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_ARPA_NAMESER_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_ASSERT_H 1 + +/* Define to 1 if you have the `bcopy' function. */ +#define HAVE_BCOPY 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_BITS_TYPES_H 1 + +/* Define to 1 if you have the `chroot' function. */ +#define HAVE_CHROOT 1 + +/* Define to 1 if you have the `closesocket' function. */ +/* #undef HAVE_CLOSESOCKET */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_CONIO_H */ + +/* define if crypt(3) is available */ +/* #undef HAVE_CRYPT */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_CRYPT_H */ + +/* define if crypt_r() is also available */ +/* #undef HAVE_CRYPT_R */ + +/* Define to 1 if you have the `ctime_r' function. */ +#define HAVE_CTIME_R 1 + +/* define if you have Cyrus SASL */ +/* #undef HAVE_CYRUS_SASL */ + +/* define if your system supports /dev/poll */ +/* #undef HAVE_DEVPOLL */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_DIRECT_H */ + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +#define HAVE_DIRENT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Define to 1 if you don't have `vprintf' but do have `_doprnt.' */ +/* #undef HAVE_DOPRNT */ + +/* define if system uses EBCDIC instead of ASCII */ +/* #undef HAVE_EBCDIC */ + +/* Define to 1 if you have the `endgrent' function. */ +#define HAVE_ENDGRENT 1 + +/* Define to 1 if you have the `endpwent' function. */ +#define HAVE_ENDPWENT 1 + +/* define if your system supports epoll */ +#define HAVE_EPOLL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_ERRNO_H 1 + +/* Define to 1 if you have the `fcntl' function. */ +#define HAVE_FCNTL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_FCNTL_H 1 + +/* define if you actually have FreeBSD fetch(3) */ +/* #undef HAVE_FETCH */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_FILIO_H */ + +/* Define to 1 if you have the `flock' function. */ +#define HAVE_FLOCK 1 + +/* Define to 1 if you have the `fstat' function. */ +#define HAVE_FSTAT 1 + +/* Define to 1 if you have the `gai_strerror' function. */ +#define HAVE_GAI_STRERROR 1 + +/* Define to 1 if you have the `getaddrinfo' function. */ +#define HAVE_GETADDRINFO 1 + +/* Define to 1 if you have the `getdtablesize' function. */ +#define HAVE_GETDTABLESIZE 1 + +/* Define to 1 if you have the `geteuid' function. */ +#define HAVE_GETEUID 1 + +/* Define to 1 if you have the `getgrgid' function. */ +#define HAVE_GETGRGID 1 + +/* Define to 1 if you have the `gethostbyaddr_r' function. */ +#define HAVE_GETHOSTBYADDR_R 1 + +/* Define to 1 if you have the `gethostbyname_r' function. */ +#define HAVE_GETHOSTBYNAME_R 1 + +/* Define to 1 if you have the `gethostname' function. */ +#define HAVE_GETHOSTNAME 1 + +/* Define to 1 if you have the `getnameinfo' function. */ +#define HAVE_GETNAMEINFO 1 + +/* Define to 1 if you have the `getopt' function. */ +#define HAVE_GETOPT 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_GETOPT_H 1 + +/* Define to 1 if you have the `getpassphrase' function. */ +/* #undef HAVE_GETPASSPHRASE */ + +/* Define to 1 if you have the `getpeereid' function. */ +/* #undef HAVE_GETPEEREID */ + +/* Define to 1 if you have the `getpeerucred' function. */ +/* #undef HAVE_GETPEERUCRED */ + +/* Define to 1 if you have the `getpwnam' function. */ +#define HAVE_GETPWNAM 1 + +/* Define to 1 if you have the `getpwuid' function. */ +#define HAVE_GETPWUID 1 + +/* Define to 1 if you have the `getspnam' function. */ +#define HAVE_GETSPNAM 1 + +/* Define to 1 if you have the `gettimeofday' function. */ +#define HAVE_GETTIMEOFDAY 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_GMP_H */ + +/* Define to 1 if you have the `gmtime_r' function. */ +#define HAVE_GMTIME_R 1 + +/* define if you have GNUtls */ +/* #undef HAVE_GNUTLS */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_GNUTLS_GNUTLS_H */ + +/* if you have GNU Pth */ +/* #undef HAVE_GNU_PTH */ + +/* Define to 1 if you have the header file. */ +#define HAVE_GRP_H 1 + +/* Define to 1 if you have the `hstrerror' function. */ +#define HAVE_HSTRERROR 1 + +/* define to you inet_aton(3) is available */ +#define HAVE_INET_ATON 1 + +/* Define to 1 if you have the `inet_ntoa_b' function. */ +/* #undef HAVE_INET_NTOA_B */ + +/* Define to 1 if you have the `inet_ntop' function. */ +#define HAVE_INET_NTOP 1 + +/* Define to 1 if you have the `initgroups' function. */ +#define HAVE_INITGROUPS 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define to 1 if you have the `ioctl' function. */ +#define HAVE_IOCTL 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_IO_H */ + +/* define if your system supports kqueue */ +/* #undef HAVE_KQUEUE */ + +/* Define to 1 if you have the `gen' library (-lgen). */ +/* #undef HAVE_LIBGEN */ + +/* Define to 1 if you have the `gmp' library (-lgmp). */ +/* #undef HAVE_LIBGMP */ + +/* Define to 1 if you have the `inet' library (-linet). */ +/* #undef HAVE_LIBINET */ + +/* define if you have libtool -ltdl */ +/* #undef HAVE_LIBLTDL */ + +/* Define to 1 if you have the `net' library (-lnet). */ +/* #undef HAVE_LIBNET */ + +/* Define to 1 if you have the `nsl' library (-lnsl). */ +/* #undef HAVE_LIBNSL */ + +/* Define to 1 if you have the `nsl_s' library (-lnsl_s). */ +/* #undef HAVE_LIBNSL_S */ + +/* Define to 1 if you have the `socket' library (-lsocket). */ +/* #undef HAVE_LIBSOCKET */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_LIBUTIL_H */ + +/* Define to 1 if you have the `V3' library (-lV3). */ +/* #undef HAVE_LIBV3 */ + +/* Define to 1 if you have the header file. */ +#define HAVE_LIMITS_H 1 + +/* if you have LinuxThreads */ +/* #undef HAVE_LINUX_THREADS */ + +/* Define to 1 if you have the header file. */ +#define HAVE_LOCALE_H 1 + +/* Define to 1 if you have the `localtime_r' function. */ +#define HAVE_LOCALTIME_R 1 + +/* Define to 1 if you have the `lockf' function. */ +#define HAVE_LOCKF 1 + +/* Define to 1 if the system has the type `long long'. */ +#define HAVE_LONG_LONG 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_LTDL_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_MALLOC_H 1 + +/* Define to 1 if you have the `memcpy' function. */ +#define HAVE_MEMCPY 1 + +/* Define to 1 if you have the `memmove' function. */ +#define HAVE_MEMMOVE 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the `memrchr' function. */ +#define HAVE_MEMRCHR 1 + +/* Define to 1 if you have the `mkstemp' function. */ +#define HAVE_MKSTEMP 1 + +/* Define to 1 if you have the `mktemp' function. */ +#define HAVE_MKTEMP 1 + +/* define this if you have mkversion */ +#define HAVE_MKVERSION 1 + +/* Define to 1 if you have the header file, and it defines `DIR'. */ +/* #undef HAVE_NDIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_NETINET_TCP_H 1 + +/* define if strerror_r returns char* instead of int */ +/* #undef HAVE_NONPOSIX_STRERROR_R */ + +/* if you have NT Event Log */ +/* #undef HAVE_NT_EVENT_LOG */ + +/* if you have NT Service Manager */ +/* #undef HAVE_NT_SERVICE_MANAGER */ + +/* if you have NT Threads */ +/* #undef HAVE_NT_THREADS */ + +/* define if you have OpenSSL */ +#define HAVE_OPENSSL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_OPENSSL_BN_H 1 + +/* define if you have OpenSSL with CRL checking capability */ +#define HAVE_OPENSSL_CRL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_OPENSSL_CRYPTO_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_OPENSSL_SSL_H 1 + +/* Define to 1 if you have the `pipe' function. */ +#define HAVE_PIPE 1 + +/* Define to 1 if you have the `poll' function. */ +#define HAVE_POLL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_POLL_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_PROCESS_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_PSAP_H */ + +/* define to pthreads API spec revision */ +#define HAVE_PTHREADS 10 + +/* define if you have pthread_detach function */ +#define HAVE_PTHREAD_DETACH 1 + +/* Define to 1 if you have the `pthread_getconcurrency' function. */ +#define HAVE_PTHREAD_GETCONCURRENCY 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_PTHREAD_H 1 + +/* Define to 1 if you have the `pthread_kill' function. */ +#define HAVE_PTHREAD_KILL 1 + +/* Define to 1 if you have the `pthread_kill_other_threads_np' function. */ +/* #undef HAVE_PTHREAD_KILL_OTHER_THREADS_NP */ + +/* define if you have pthread_rwlock_destroy function */ +#define HAVE_PTHREAD_RWLOCK_DESTROY 1 + +/* Define to 1 if you have the `pthread_setconcurrency' function. */ +#define HAVE_PTHREAD_SETCONCURRENCY 1 + +/* Define to 1 if you have the `pthread_yield' function. */ +#define HAVE_PTHREAD_YIELD 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_PTH_H */ + +/* Define to 1 if the system has the type `ptrdiff_t'. */ +#define HAVE_PTRDIFF_T 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_PWD_H 1 + +/* Define to 1 if you have the `read' function. */ +#define HAVE_READ 1 + +/* Define to 1 if you have the `recv' function. */ +#define HAVE_RECV 1 + +/* Define to 1 if you have the `recvfrom' function. */ +#define HAVE_RECVFROM 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_REGEX_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_RESOLV_H */ + +/* define if you have res_query() */ +/* #undef HAVE_RES_QUERY */ + +/* define if OpenSSL needs RSAref */ +/* #undef HAVE_RSAREF */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SASL_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SASL_SASL_H */ + +/* define if your SASL library has sasl_version() */ +/* #undef HAVE_SASL_VERSION */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SCHED_H 1 + +/* Define to 1 if you have the `sched_yield' function. */ +#define HAVE_SCHED_YIELD 1 + +/* Define to 1 if you have the `send' function. */ +#define HAVE_SEND 1 + +/* Define to 1 if you have the `sendmsg' function. */ +#define HAVE_SENDMSG 1 + +/* Define to 1 if you have the `sendto' function. */ +#define HAVE_SENDTO 1 + +/* Define to 1 if you have the `setegid' function. */ +#define HAVE_SETEGID 1 + +/* Define to 1 if you have the `seteuid' function. */ +#define HAVE_SETEUID 1 + +/* Define to 1 if you have the `setgid' function. */ +#define HAVE_SETGID 1 + +/* Define to 1 if you have the `setpwfile' function. */ +/* #undef HAVE_SETPWFILE */ + +/* Define to 1 if you have the `setsid' function. */ +#define HAVE_SETSID 1 + +/* Define to 1 if you have the `setuid' function. */ +#define HAVE_SETUID 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SGTTY_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SHADOW_H */ + +/* Define to 1 if you have the `sigaction' function. */ +#define HAVE_SIGACTION 1 + +/* Define to 1 if you have the `signal' function. */ +#define HAVE_SIGNAL 1 + +/* Define to 1 if you have the `sigset' function. */ +#define HAVE_SIGSET 1 + +/* define if you have -lslp */ +/* #undef HAVE_SLP */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SLP_H */ + +/* Define to 1 if you have the `snprintf' function. */ +#define HAVE_SNPRINTF 1 + +/* if you have spawnlp() */ +/* #undef HAVE_SPAWNLP */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SQLEXT_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SQL_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_STDDEF_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the `strdup' function. */ +#define HAVE_STRDUP 1 + +/* Define to 1 if you have the `strerror' function. */ +#define HAVE_STRERROR 1 + +/* Define to 1 if you have the `strerror_r' function. */ +#define HAVE_STRERROR_R 1 + +/* Define to 1 if you have the `strftime' function. */ +#define HAVE_STRFTIME 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the `strpbrk' function. */ +#define HAVE_STRPBRK 1 + +/* Define to 1 if you have the `strrchr' function. */ +#define HAVE_STRRCHR 1 + +/* Define to 1 if you have the `strsep' function. */ +#define HAVE_STRSEP 1 + +/* Define to 1 if you have the `strspn' function. */ +#define HAVE_STRSPN 1 + +/* Define to 1 if you have the `strstr' function. */ +#define HAVE_STRSTR 1 + +/* Define to 1 if you have the `strtol' function. */ +#define HAVE_STRTOL 1 + +/* Define to 1 if you have the `strtoll' function. */ +#define HAVE_STRTOLL 1 + +/* Define to 1 if you have the `strtoq' function. */ +#define HAVE_STRTOQ 1 + +/* Define to 1 if you have the `strtoul' function. */ +#define HAVE_STRTOUL 1 + +/* Define to 1 if you have the `strtoull' function. */ +#define HAVE_STRTOULL 1 + +/* Define to 1 if you have the `strtouq' function. */ +#define HAVE_STRTOUQ 1 + +/* Define to 1 if `msg_accrightslen' is a member of `struct msghdr'. */ +/* #undef HAVE_STRUCT_MSGHDR_MSG_ACCRIGHTSLEN */ + +/* Define to 1 if `msg_control' is a member of `struct msghdr'. */ +#define HAVE_STRUCT_MSGHDR_MSG_CONTROL 1 + +/* Define to 1 if `pw_gecos' is a member of `struct passwd'. */ +#define HAVE_STRUCT_PASSWD_PW_GECOS 1 + +/* Define to 1 if `pw_passwd' is a member of `struct passwd'. */ +#define HAVE_STRUCT_PASSWD_PW_PASSWD 1 + +/* Define to 1 if `st_blksize' is a member of `struct stat'. */ +#define HAVE_STRUCT_STAT_ST_BLKSIZE 1 + +/* Define to 1 if `st_fstype' is a member of `struct stat'. */ +/* #undef HAVE_STRUCT_STAT_ST_FSTYPE */ + +/* define to 1 if st_fstype is char * */ +/* #undef HAVE_STRUCT_STAT_ST_FSTYPE_CHAR */ + +/* define to 1 if st_fstype is int */ +/* #undef HAVE_STRUCT_STAT_ST_FSTYPE_INT */ + +/* Define to 1 if `st_vfstype' is a member of `struct stat'. */ +/* #undef HAVE_STRUCT_STAT_ST_VFSTYPE */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYNCH_H */ + +/* Define to 1 if you have the `sysconf' function. */ +#define HAVE_SYSCONF 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYSEXITS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYSLOG_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_DEVPOLL_H */ + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_DIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_EPOLL_H 1 + +/* define if you actually have sys_errlist in your libs */ +#define HAVE_SYS_ERRLIST 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_ERRNO_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_EVENT_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_FILE_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_FILIO_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_FSTYP_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_IOCTL_H 1 + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_NDIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_PARAM_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_POLL_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_PRIVGRP_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_RESOURCE_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SELECT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SOCKET_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SYSLOG_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TIME_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_UCRED_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_UIO_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_UN_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_UUID_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_VMOUNT_H */ + +/* Define to 1 if you have that is POSIX.1 compatible. */ +#define HAVE_SYS_WAIT_H 1 + +/* define if you have -lwrap */ +/* #undef HAVE_TCPD */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_TCPD_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_TERMIOS_H 1 + +/* if you have Solaris LWP (thr) package */ +/* #undef HAVE_THR */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_THREAD_H */ + +/* Define to 1 if you have the `thr_getconcurrency' function. */ +/* #undef HAVE_THR_GETCONCURRENCY */ + +/* Define to 1 if you have the `thr_setconcurrency' function. */ +/* #undef HAVE_THR_SETCONCURRENCY */ + +/* Define to 1 if you have the `thr_yield' function. */ +/* #undef HAVE_THR_YIELD */ + +/* define if you have TLS */ +#define HAVE_TLS 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UTIME_H 1 + +/* define if you have uuid_generate() */ +/* #undef HAVE_UUID_GENERATE */ + +/* define if you have uuid_to_str() */ +/* #undef HAVE_UUID_TO_STR */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_UUID_UUID_H */ + +/* Define to 1 if you have the `vprintf' function. */ +#define HAVE_VPRINTF 1 + +/* Define to 1 if you have the `vsnprintf' function. */ +#define HAVE_VSNPRINTF 1 + +/* Define to 1 if you have the `wait4' function. */ +#define HAVE_WAIT4 1 + +/* Define to 1 if you have the `waitpid' function. */ +#define HAVE_WAITPID 1 + +/* define if you have winsock */ +/* #undef HAVE_WINSOCK */ + +/* define if you have winsock2 */ +/* #undef HAVE_WINSOCK2 */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_WINSOCK2_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_WINSOCK_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_WIREDTIGER_H */ + +/* Define to 1 if you have the `write' function. */ +#define HAVE_WRITE 1 + +/* define if select implicitly yields */ +#define HAVE_YIELDING_SELECT 1 + +/* Define to 1 if you have the `_vsnprintf' function. */ +/* #undef HAVE__VSNPRINTF */ + +/* define to 32-bit or greater integer type */ +#define LBER_INT_T int + +/* define to large integer type */ +#define LBER_LEN_T long + +/* define to socket descriptor type */ +#define LBER_SOCKET_T int + +/* define to large integer type */ +#define LBER_TAG_T long + +/* define to 1 if library is thread safe */ +#define LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE 1 + +/* define to LDAP VENDOR VERSION */ +/* #undef LDAP_API_FEATURE_X_OPENLDAP_V2_REFERRALS */ + +/* define this to add debugging code */ +/* #undef LDAP_DEBUG */ + +/* define if LDAP libs are dynamic */ +/* #undef LDAP_LIBS_DYNAMIC */ + +/* define to support PF_INET6 */ +#define LDAP_PF_INET6 1 + +/* define to support PF_LOCAL */ +#define LDAP_PF_LOCAL 1 + +/* define this to add SLAPI code */ +/* #undef LDAP_SLAPI */ + +/* define this to add syslog code */ +/* #undef LDAP_SYSLOG */ + +/* Version */ +#define LDAP_VENDOR_VERSION 20501 + +/* Major */ +#define LDAP_VENDOR_VERSION_MAJOR 2 + +/* Minor */ +#define LDAP_VENDOR_VERSION_MINOR 5 + +/* Patch */ +#define LDAP_VENDOR_VERSION_PATCH X + +/* Define to the sub-directory where libtool stores uninstalled libraries. */ +#define LT_OBJDIR ".libs/" + +/* define if memcmp is not 8-bit clean or is otherwise broken */ +/* #undef NEED_MEMCMP_REPLACEMENT */ + +/* define if you have (or want) no threads */ +/* #undef NO_THREADS */ + +/* define to use the original debug style */ +/* #undef OLD_DEBUG */ + +/* Package */ +#define OPENLDAP_PACKAGE "OpenLDAP" + +/* Version */ +#define OPENLDAP_VERSION "2.5.X" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "" + +/* define if sched_yield yields the entire process */ +/* #undef REPLACE_BROKEN_YIELD */ + +/* Define as the return type of signal handlers (`int' or `void'). */ +#define RETSIGTYPE void + +/* Define to the type of arg 1 for `select'. */ +#define SELECT_TYPE_ARG1 int + +/* Define to the type of args 2, 3 and 4 for `select'. */ +#define SELECT_TYPE_ARG234 (fd_set *) + +/* Define to the type of arg 5 for `select'. */ +#define SELECT_TYPE_ARG5 (struct timeval *) + +/* The size of `int', as computed by sizeof. */ +#define SIZEOF_INT 4 + +/* The size of `long', as computed by sizeof. */ +#define SIZEOF_LONG 8 + +/* The size of `long long', as computed by sizeof. */ +#define SIZEOF_LONG_LONG 8 + +/* The size of `short', as computed by sizeof. */ +#define SIZEOF_SHORT 2 + +/* The size of `wchar_t', as computed by sizeof. */ +#define SIZEOF_WCHAR_T 4 + +/* define to support per-object ACIs */ +/* #undef SLAPD_ACI_ENABLED */ + +/* define to support LDAP Async Metadirectory backend */ +/* #undef SLAPD_ASYNCMETA */ + +/* define to support cleartext passwords */ +/* #undef SLAPD_CLEARTEXT */ + +/* define to support crypt(3) passwords */ +/* #undef SLAPD_CRYPT */ + +/* define to support DNS SRV backend */ +/* #undef SLAPD_DNSSRV */ + +/* define to support LDAP backend */ +/* #undef SLAPD_LDAP */ + +/* define to support MDB backend */ +/* #undef SLAPD_MDB */ + +/* define to support LDAP Metadirectory backend */ +/* #undef SLAPD_META */ + +/* define to support modules */ +/* #undef SLAPD_MODULES */ + +/* dynamically linked module */ +#define SLAPD_MOD_DYNAMIC 2 + +/* statically linked module */ +#define SLAPD_MOD_STATIC 1 + +/* define to support cn=Monitor backend */ +/* #undef SLAPD_MONITOR */ + +/* define to support NDB backend */ +/* #undef SLAPD_NDB */ + +/* define to support NULL backend */ +/* #undef SLAPD_NULL */ + +/* define for In-Directory Access Logging overlay */ +/* #undef SLAPD_OVER_ACCESSLOG */ + +/* define for Audit Logging overlay */ +/* #undef SLAPD_OVER_AUDITLOG */ + +/* define for Automatic Certificate Authority overlay */ +/* #undef SLAPD_OVER_AUTOCA */ + +/* define for Collect overlay */ +/* #undef SLAPD_OVER_COLLECT */ + +/* define for Attribute Constraint overlay */ +/* #undef SLAPD_OVER_CONSTRAINT */ + +/* define for Dynamic Directory Services overlay */ +/* #undef SLAPD_OVER_DDS */ + +/* define for Dynamic Directory Services overlay */ +/* #undef SLAPD_OVER_DEREF */ + +/* define for Dynamic Group overlay */ +/* #undef SLAPD_OVER_DYNGROUP */ + +/* define for Dynamic List overlay */ +/* #undef SLAPD_OVER_DYNLIST */ + +/* define for Reverse Group Membership overlay */ +/* #undef SLAPD_OVER_MEMBEROF */ + +/* define for Password Policy overlay */ +/* #undef SLAPD_OVER_PPOLICY */ + +/* define for Proxy Cache overlay */ +/* #undef SLAPD_OVER_PROXYCACHE */ + +/* define for Referential Integrity overlay */ +/* #undef SLAPD_OVER_REFINT */ + +/* define for Return Code overlay */ +/* #undef SLAPD_OVER_RETCODE */ + +/* define for Rewrite/Remap overlay */ +/* #undef SLAPD_OVER_RWM */ + +/* define for Sequential Modify overlay */ +/* #undef SLAPD_OVER_SEQMOD */ + +/* define for ServerSideSort/VLV overlay */ +/* #undef SLAPD_OVER_SSSVLV */ + +/* define for Syncrepl Provider overlay */ +/* #undef SLAPD_OVER_SYNCPROV */ + +/* define for Translucent Proxy overlay */ +/* #undef SLAPD_OVER_TRANSLUCENT */ + +/* define for Attribute Uniqueness overlay */ +/* #undef SLAPD_OVER_UNIQUE */ + +/* define for Value Sorting overlay */ +/* #undef SLAPD_OVER_VALSORT */ + +/* define to support PASSWD backend */ +/* #undef SLAPD_PASSWD */ + +/* define to support PERL backend */ +/* #undef SLAPD_PERL */ + +/* define to support relay backend */ +/* #undef SLAPD_RELAY */ + +/* define to support reverse lookups */ +/* #undef SLAPD_RLOOKUPS */ + +/* define to support SHELL backend */ +/* #undef SLAPD_SHELL */ + +/* define to support SOCK backend */ +/* #undef SLAPD_SOCK */ + +/* define to support SASL passwords */ +/* #undef SLAPD_SPASSWD */ + +/* define to support SQL backend */ +/* #undef SLAPD_SQL */ + +/* define to support WiredTiger backend */ +/* #undef SLAPD_WT */ + +/* define to support run-time loadable ACL */ +/* #undef SLAP_DYNACL */ + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Define to 1 if you can safely include both and . */ +#define TIME_WITH_SYS_TIME 1 + +/* Define to 1 if your declares `struct tm'. */ +/* #undef TM_IN_SYS_TIME */ + +/* set to urandom device */ +#define URANDOM_DEVICE "/dev/urandom" + +/* define to use OpenSSL BIGNUM for MP */ +/* #undef USE_MP_BIGNUM */ + +/* define to use GMP for MP */ +/* #undef USE_MP_GMP */ + +/* define to use 'long' for MP */ +/* #undef USE_MP_LONG */ + +/* define to use 'long long' for MP */ +/* #undef USE_MP_LONG_LONG */ + +/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most + significant byte first (like Motorola and SPARC, unlike Intel). */ +#if defined AC_APPLE_UNIVERSAL_BUILD +# if defined __BIG_ENDIAN__ +# define WORDS_BIGENDIAN 1 +# endif +#else +# ifndef WORDS_BIGENDIAN +/* # undef WORDS_BIGENDIAN */ +# endif +#endif + +/* Define to the type of arg 3 for `accept'. */ +#define ber_socklen_t socklen_t + +/* Define to `char *' if does not define. */ +/* #undef caddr_t */ + +/* Define to empty if `const' does not conform to ANSI C. */ +/* #undef const */ + +/* Define to `int' if doesn't define. */ +/* #undef gid_t */ + +/* Define to `int' if does not define. */ +/* #undef mode_t */ + +/* Define to `long' if does not define. */ +/* #undef off_t */ + +/* Define to `int' if does not define. */ +/* #undef pid_t */ + +/* Define to `int' if does not define. */ +/* #undef sig_atomic_t */ + +/* Define to `unsigned' if does not define. */ +/* #undef size_t */ + +/* define to snprintf routine */ +/* #undef snprintf */ + +/* Define like ber_socklen_t if does not define. */ +/* #undef socklen_t */ + +/* Define to `signed int' if does not define. */ +/* #undef ssize_t */ + +/* Define to `int' if doesn't define. */ +/* #undef uid_t */ + +/* define as empty if volatile is not supported */ +/* #undef volatile */ + +/* define to snprintf routine */ +/* #undef vsnprintf */ + + +/* begin of portable.h.post */ + +#ifdef _WIN32 +/* don't suck in all of the win32 api */ +# define WIN32_LEAN_AND_MEAN 1 +#endif + +#ifndef LDAP_NEEDS_PROTOTYPES +/* force LDAP_P to always include prototypes */ +#define LDAP_NEEDS_PROTOTYPES 1 +#endif + +#ifndef LDAP_REL_ENG +#if (LDAP_VENDOR_VERSION == 000000) && !defined(LDAP_DEVEL) +#define LDAP_DEVEL +#endif +#if defined(LDAP_DEVEL) && !defined(LDAP_TEST) +#define LDAP_TEST +#endif +#endif + +#ifdef HAVE_STDDEF_H +# include +#endif + +#ifdef HAVE_EBCDIC +/* ASCII/EBCDIC converting replacements for stdio funcs + * vsnprintf and snprintf are used too, but they are already + * checked by the configure script + */ +#define fputs ber_pvt_fputs +#define fgets ber_pvt_fgets +#define printf ber_pvt_printf +#define fprintf ber_pvt_fprintf +#define vfprintf ber_pvt_vfprintf +#define vsprintf ber_pvt_vsprintf +#endif + +#include "ac/fdset.h" + +#include "ldap_cdefs.h" +#include "ldap_features.h" + +#include "ac/assert.h" +#include "ac/localize.h" + +#endif /* _LDAP_PORTABLE_H */ +/* end of portable.h.post */ + diff --git a/contrib/openssl b/contrib/openssl index debbae80cb4..07e96230645 160000 --- a/contrib/openssl +++ b/contrib/openssl @@ -1 +1 @@ -Subproject commit debbae80cb44de55fd8040fdfbe4b506601ff2a6 +Subproject commit 07e9623064508d15dd61367f960ebe7fc9aecd77 diff --git a/contrib/pdqsort/pdqsort.h b/contrib/pdqsort/pdqsort.h index 31eb06fece4..01e82b710ee 100644 --- a/contrib/pdqsort/pdqsort.h +++ b/contrib/pdqsort/pdqsort.h @@ -124,11 +124,9 @@ namespace pdqsort_detail { inline bool partial_insertion_sort(Iter begin, Iter end, Compare comp) { typedef typename std::iterator_traits::value_type T; if (begin == end) return true; - - int limit = 0; - for (Iter cur = begin + 1; cur != end; ++cur) { - if (limit > partial_insertion_sort_limit) return false; + std::size_t limit = 0; + for (Iter cur = begin + 1; cur != end; ++cur) { Iter sift = cur; Iter sift_1 = cur - 1; @@ -142,6 +140,8 @@ namespace pdqsort_detail { *sift = PDQSORT_PREFER_MOVE(tmp); limit += cur - sift; } + + if (limit > partial_insertion_sort_limit) return false; } return true; @@ -232,7 +232,7 @@ namespace pdqsort_detail { unsigned char* offsets_r = align_cacheline(offsets_r_storage); int num_l, num_r, start_l, start_r; num_l = num_r = start_l = start_r = 0; - + while (last - first > 2 * block_size) { // Fill up offset blocks with elements that are on the wrong side. if (num_l == 0) { @@ -275,7 +275,7 @@ namespace pdqsort_detail { } int l_size = 0, r_size = 0; - int unknown_left = (last - first) - ((num_r || num_l) ? block_size : 0); + int unknown_left = (int)(last - first) - ((num_r || num_l) ? block_size : 0); if (num_r) { // Handle leftover block by assigning the unknown elements to the other block. l_size = unknown_left; @@ -311,7 +311,7 @@ namespace pdqsort_detail { start_l += num; start_r += num; if (num_l == 0) first += l_size; if (num_r == 0) last -= r_size; - + // We have now fully identified [first, last)'s proper position. Swap the last elements. if (num_l) { offsets_l += start_l; @@ -340,7 +340,7 @@ namespace pdqsort_detail { template inline std::pair partition_right(Iter begin, Iter end, Compare comp) { typedef typename std::iterator_traits::value_type T; - + // Move pivot into local for speed. T pivot(PDQSORT_PREFER_MOVE(*begin)); @@ -359,7 +359,7 @@ namespace pdqsort_detail { // If the first pair of elements that should be swapped to partition are the same element, // the passed in sequence already was correctly partitioned. bool already_partitioned = first >= last; - + // Keep swapping pairs of elements that are on the wrong side of the pivot. Previously // swapped pairs guard the searches, which is why the first iteration is special-cased // above. @@ -388,7 +388,7 @@ namespace pdqsort_detail { T pivot(PDQSORT_PREFER_MOVE(*begin)); Iter first = begin; Iter last = end; - + while (comp(pivot, *--last)); if (last + 1 == end) while (first < last && !comp(pivot, *++first)); @@ -475,11 +475,11 @@ namespace pdqsort_detail { std::iter_swap(pivot_pos - 3, pivot_pos - (l_size / 4 + 2)); } } - + if (r_size >= insertion_sort_threshold) { std::iter_swap(pivot_pos + 1, pivot_pos + (1 + r_size / 4)); std::iter_swap(end - 1, end - r_size / 4); - + if (r_size > ninther_threshold) { std::iter_swap(pivot_pos + 2, pivot_pos + (2 + r_size / 4)); std::iter_swap(pivot_pos + 3, pivot_pos + (3 + r_size / 4)); @@ -493,7 +493,7 @@ namespace pdqsort_detail { if (already_partitioned && partial_insertion_sort(begin, pivot_pos, comp) && partial_insertion_sort(pivot_pos + 1, end, comp)) return; } - + // Sort the left partition first using recursion and do tail recursion elimination for // the right-hand partition. pdqsort_loop(begin, pivot_pos, comp, bad_allowed, leftmost); diff --git a/contrib/poco b/contrib/poco index 1f3e4638f25..be2ab90ba5d 160000 --- a/contrib/poco +++ b/contrib/poco @@ -1 +1 @@ -Subproject commit 1f3e4638f250ad4d028a2499af20d4185463e07d +Subproject commit be2ab90ba5dccd46919a116e3fe4fa77bb85063b diff --git a/contrib/poco-cmake/CMakeLists.txt b/contrib/poco-cmake/CMakeLists.txt new file mode 100644 index 00000000000..59b6c84a1d1 --- /dev/null +++ b/contrib/poco-cmake/CMakeLists.txt @@ -0,0 +1,19 @@ +option (USE_INTERNAL_POCO_LIBRARY "Use internal Poco library" ${NOT_UNBUNDLED}) + +if (USE_INTERNAL_POCO_LIBRARY) + set (LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/poco) +else () + find_path (ROOT_DIR NAMES Foundation/include/Poco/Poco.h include/Poco/Poco.h) +endif () + +add_subdirectory (Crypto) +add_subdirectory (Data) +add_subdirectory (Data/ODBC) +add_subdirectory (Foundation) +add_subdirectory (JSON) +add_subdirectory (MongoDB) +add_subdirectory (Net) +add_subdirectory (Net/SSL) +add_subdirectory (Redis) +add_subdirectory (Util) +add_subdirectory (XML) diff --git a/contrib/poco-cmake/Crypto/CMakeLists.txt b/contrib/poco-cmake/Crypto/CMakeLists.txt new file mode 100644 index 00000000000..468f1641510 --- /dev/null +++ b/contrib/poco-cmake/Crypto/CMakeLists.txt @@ -0,0 +1,51 @@ +if (ENABLE_SSL) + if (USE_INTERNAL_POCO_LIBRARY) + set (SRCS + ${LIBRARY_DIR}/Crypto/src/Cipher.cpp + ${LIBRARY_DIR}/Crypto/src/CipherFactory.cpp + ${LIBRARY_DIR}/Crypto/src/CipherImpl.cpp + ${LIBRARY_DIR}/Crypto/src/CipherKey.cpp + ${LIBRARY_DIR}/Crypto/src/CipherKeyImpl.cpp + ${LIBRARY_DIR}/Crypto/src/CryptoException.cpp + ${LIBRARY_DIR}/Crypto/src/CryptoStream.cpp + ${LIBRARY_DIR}/Crypto/src/CryptoTransform.cpp + ${LIBRARY_DIR}/Crypto/src/DigestEngine.cpp + ${LIBRARY_DIR}/Crypto/src/ECDSADigestEngine.cpp + ${LIBRARY_DIR}/Crypto/src/ECKey.cpp + ${LIBRARY_DIR}/Crypto/src/ECKeyImpl.cpp + ${LIBRARY_DIR}/Crypto/src/EVPPKey.cpp + ${LIBRARY_DIR}/Crypto/src/KeyPair.cpp + ${LIBRARY_DIR}/Crypto/src/KeyPairImpl.cpp + ${LIBRARY_DIR}/Crypto/src/OpenSSLInitializer.cpp + ${LIBRARY_DIR}/Crypto/src/PKCS12Container.cpp + ${LIBRARY_DIR}/Crypto/src/RSACipherImpl.cpp + ${LIBRARY_DIR}/Crypto/src/RSADigestEngine.cpp + ${LIBRARY_DIR}/Crypto/src/RSAKey.cpp + ${LIBRARY_DIR}/Crypto/src/RSAKeyImpl.cpp + ${LIBRARY_DIR}/Crypto/src/X509Certificate.cpp + ) + + add_library (_poco_crypto ${SRCS}) + add_library (Poco::Crypto ALIAS _poco_crypto) + + target_compile_options (_poco_crypto PRIVATE -Wno-newline-eof) + target_include_directories (_poco_crypto SYSTEM PUBLIC ${LIBRARY_DIR}/Crypto/include) + target_link_libraries (_poco_crypto PUBLIC Poco::Foundation ssl) + else () + add_library (Poco::Crypto UNKNOWN IMPORTED GLOBAL) + + find_library(LIBRARY_POCO_CRYPTO PocoCrypto) + find_path(INCLUDE_POCO_CRYPTO Poco/Crypto/Crypto.h) + set_target_properties (Poco::Crypto PROPERTIES IMPORTED_LOCATION ${LIBRARY_POCO_CRYPTO}) + set_target_properties (Poco::Crypto PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${INCLUDE_POCO_CRYPTO}) + + target_link_libraries (Poco::Crypto INTERFACE Poco::Foundation) + endif () + + message (STATUS "Using Poco::Crypto") +else () + add_library (_poco_crypto INTERFACE) + add_library (Poco::Crypto ALIAS _poco_crypto) + + message (STATUS "Not using Poco::Crypto") +endif () diff --git a/contrib/poco-cmake/Data/CMakeLists.txt b/contrib/poco-cmake/Data/CMakeLists.txt new file mode 100644 index 00000000000..1c185df8961 --- /dev/null +++ b/contrib/poco-cmake/Data/CMakeLists.txt @@ -0,0 +1,60 @@ +if (USE_INTERNAL_POCO_LIBRARY) + set (SRCS + ${LIBRARY_DIR}/Data/src/AbstractBinder.cpp + ${LIBRARY_DIR}/Data/src/AbstractBinding.cpp + ${LIBRARY_DIR}/Data/src/AbstractExtraction.cpp + ${LIBRARY_DIR}/Data/src/AbstractExtractor.cpp + ${LIBRARY_DIR}/Data/src/AbstractPreparation.cpp + ${LIBRARY_DIR}/Data/src/AbstractPreparator.cpp + ${LIBRARY_DIR}/Data/src/ArchiveStrategy.cpp + ${LIBRARY_DIR}/Data/src/Bulk.cpp + ${LIBRARY_DIR}/Data/src/Connector.cpp + ${LIBRARY_DIR}/Data/src/DataException.cpp + ${LIBRARY_DIR}/Data/src/Date.cpp + ${LIBRARY_DIR}/Data/src/DynamicLOB.cpp + ${LIBRARY_DIR}/Data/src/Limit.cpp + ${LIBRARY_DIR}/Data/src/MetaColumn.cpp + ${LIBRARY_DIR}/Data/src/PooledSessionHolder.cpp + ${LIBRARY_DIR}/Data/src/PooledSessionImpl.cpp + ${LIBRARY_DIR}/Data/src/Position.cpp + ${LIBRARY_DIR}/Data/src/Range.cpp + ${LIBRARY_DIR}/Data/src/RecordSet.cpp + ${LIBRARY_DIR}/Data/src/Row.cpp + ${LIBRARY_DIR}/Data/src/RowFilter.cpp + ${LIBRARY_DIR}/Data/src/RowFormatter.cpp + ${LIBRARY_DIR}/Data/src/RowIterator.cpp + ${LIBRARY_DIR}/Data/src/Session.cpp + ${LIBRARY_DIR}/Data/src/SessionFactory.cpp + ${LIBRARY_DIR}/Data/src/SessionImpl.cpp + ${LIBRARY_DIR}/Data/src/SessionPool.cpp + ${LIBRARY_DIR}/Data/src/SessionPoolContainer.cpp + ${LIBRARY_DIR}/Data/src/SimpleRowFormatter.cpp + ${LIBRARY_DIR}/Data/src/SQLChannel.cpp + ${LIBRARY_DIR}/Data/src/Statement.cpp + ${LIBRARY_DIR}/Data/src/StatementCreator.cpp + ${LIBRARY_DIR}/Data/src/StatementImpl.cpp + ${LIBRARY_DIR}/Data/src/Time.cpp + ${LIBRARY_DIR}/Data/src/Transaction.cpp + ) + + add_library (_poco_data ${SRCS}) + add_library (Poco::Data ALIAS _poco_data) + + if (COMPILER_GCC) + target_compile_options (_poco_data PRIVATE -Wno-deprecated-copy) + endif () + target_include_directories (_poco_data SYSTEM PUBLIC ${LIBRARY_DIR}/Data/include) + target_link_libraries (_poco_data PUBLIC Poco::Foundation) +else () + # NOTE: don't know why, but the GLOBAL is required here. + add_library (Poco::Data UNKNOWN IMPORTED GLOBAL) + + find_library(LIBRARY_POCO_DATA PocoData) + find_path(INCLUDE_POCO_DATA Poco/Data/Data.h) + set_target_properties (Poco::Data PROPERTIES IMPORTED_LOCATION ${LIBRARY_POCO_DATA}) + set_target_properties (Poco::Data PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${INCLUDE_POCO_DATA}) + + target_link_libraries (Poco::Data INTERFACE Poco::Foundation) + + message (STATUS "Using Poco::Data: ${LIBRARY_POCO_DATA} ${INCLUDE_POCO_DATA}") +endif () diff --git a/contrib/poco-cmake/Data/ODBC/CMakeLists.txt b/contrib/poco-cmake/Data/ODBC/CMakeLists.txt new file mode 100644 index 00000000000..a0e4f83a7cc --- /dev/null +++ b/contrib/poco-cmake/Data/ODBC/CMakeLists.txt @@ -0,0 +1,44 @@ +if (ENABLE_ODBC) + if (USE_INTERNAL_POCO_LIBRARY) + set (SRCS + ${LIBRARY_DIR}/Data/ODBC/src/Binder.cpp + ${LIBRARY_DIR}/Data/ODBC/src/ConnectionHandle.cpp + ${LIBRARY_DIR}/Data/ODBC/src/Connector.cpp + ${LIBRARY_DIR}/Data/ODBC/src/EnvironmentHandle.cpp + ${LIBRARY_DIR}/Data/ODBC/src/Extractor.cpp + ${LIBRARY_DIR}/Data/ODBC/src/ODBCException.cpp + ${LIBRARY_DIR}/Data/ODBC/src/ODBCMetaColumn.cpp + ${LIBRARY_DIR}/Data/ODBC/src/ODBCStatementImpl.cpp + ${LIBRARY_DIR}/Data/ODBC/src/Parameter.cpp + ${LIBRARY_DIR}/Data/ODBC/src/Preparator.cpp + ${LIBRARY_DIR}/Data/ODBC/src/SessionImpl.cpp + ${LIBRARY_DIR}/Data/ODBC/src/TypeInfo.cpp + ${LIBRARY_DIR}/Data/ODBC/src/Unicode.cpp + ${LIBRARY_DIR}/Data/ODBC/src/Utility.cpp + ) + + add_library (_poco_data_odbc ${SRCS}) + add_library (Poco::Data::ODBC ALIAS _poco_data_odbc) + + target_compile_options (_poco_data_odbc PRIVATE -Wno-unused-variable) + target_include_directories (_poco_data_odbc SYSTEM PUBLIC ${LIBRARY_DIR}/Data/ODBC/include) + target_link_libraries (_poco_data_odbc PUBLIC Poco::Data unixodbc) + else () + add_library (Poco::Data::ODBC UNKNOWN IMPORTED) + + find_library(LIBRARY_POCO_DATA_ODBC PocoDataODBC) + find_path(INCLUDE_POCO_DATA_ODBC Poco/Data/ODBC/ODBC.h) + set_target_properties (Poco::Data::ODBC PROPERTIES IMPORTED_LOCATION ${LIBRARY_POCO_DATA_ODBC}) + set_target_properties (Poco::Data::ODBC PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${INCLUDE_POCO_DATA_ODBC}) + + target_link_libraries (Poco::Data::ODBC INTERFACE Poco::Data) + endif () + + message (STATUS "Using Poco::Data::ODBC") +else () + add_library (_poco_data_odbc INTERFACE) + add_library (Poco::Data::ODBC ALIAS _poco_data_odbc) + target_link_libraries (_poco_data_odbc INTERFACE unixodbc) + + message (STATUS "Not using Poco::Data::ODBC") +endif () diff --git a/contrib/poco-cmake/Foundation/CMakeLists.txt b/contrib/poco-cmake/Foundation/CMakeLists.txt new file mode 100644 index 00000000000..740fe53db1b --- /dev/null +++ b/contrib/poco-cmake/Foundation/CMakeLists.txt @@ -0,0 +1,235 @@ +if (USE_INTERNAL_POCO_LIBRARY) + # Foundation (pcre) + + set (SRCS_PCRE + ${LIBRARY_DIR}/Foundation/src/pcre_config.c + ${LIBRARY_DIR}/Foundation/src/pcre_byte_order.c + ${LIBRARY_DIR}/Foundation/src/pcre_chartables.c + ${LIBRARY_DIR}/Foundation/src/pcre_compile.c + ${LIBRARY_DIR}/Foundation/src/pcre_exec.c + ${LIBRARY_DIR}/Foundation/src/pcre_fullinfo.c + ${LIBRARY_DIR}/Foundation/src/pcre_globals.c + ${LIBRARY_DIR}/Foundation/src/pcre_maketables.c + ${LIBRARY_DIR}/Foundation/src/pcre_newline.c + ${LIBRARY_DIR}/Foundation/src/pcre_ord2utf8.c + ${LIBRARY_DIR}/Foundation/src/pcre_study.c + ${LIBRARY_DIR}/Foundation/src/pcre_tables.c + ${LIBRARY_DIR}/Foundation/src/pcre_dfa_exec.c + ${LIBRARY_DIR}/Foundation/src/pcre_get.c + ${LIBRARY_DIR}/Foundation/src/pcre_jit_compile.c + ${LIBRARY_DIR}/Foundation/src/pcre_refcount.c + ${LIBRARY_DIR}/Foundation/src/pcre_string_utils.c + ${LIBRARY_DIR}/Foundation/src/pcre_version.c + ${LIBRARY_DIR}/Foundation/src/pcre_ucd.c + ${LIBRARY_DIR}/Foundation/src/pcre_valid_utf8.c + ${LIBRARY_DIR}/Foundation/src/pcre_xclass.c + ) + + add_library (_poco_foundation_pcre ${SRCS_PCRE}) + add_library (Poco::Foundation::PCRE ALIAS _poco_foundation_pcre) + + target_compile_options (_poco_foundation_pcre PRIVATE -Wno-sign-compare) + + # Foundation + + set (SRCS + ${LIBRARY_DIR}/Foundation/src/AbstractObserver.cpp + ${LIBRARY_DIR}/Foundation/src/ActiveDispatcher.cpp + ${LIBRARY_DIR}/Foundation/src/ArchiveStrategy.cpp + ${LIBRARY_DIR}/Foundation/src/Ascii.cpp + ${LIBRARY_DIR}/Foundation/src/ASCIIEncoding.cpp + ${LIBRARY_DIR}/Foundation/src/AsyncChannel.cpp + ${LIBRARY_DIR}/Foundation/src/AtomicCounter.cpp + ${LIBRARY_DIR}/Foundation/src/Base32Decoder.cpp + ${LIBRARY_DIR}/Foundation/src/Base32Encoder.cpp + ${LIBRARY_DIR}/Foundation/src/Base64Decoder.cpp + ${LIBRARY_DIR}/Foundation/src/Base64Encoder.cpp + ${LIBRARY_DIR}/Foundation/src/BinaryReader.cpp + ${LIBRARY_DIR}/Foundation/src/BinaryWriter.cpp + ${LIBRARY_DIR}/Foundation/src/Bugcheck.cpp + ${LIBRARY_DIR}/Foundation/src/ByteOrder.cpp + ${LIBRARY_DIR}/Foundation/src/Channel.cpp + ${LIBRARY_DIR}/Foundation/src/Checksum.cpp + ${LIBRARY_DIR}/Foundation/src/Clock.cpp + ${LIBRARY_DIR}/Foundation/src/Condition.cpp + ${LIBRARY_DIR}/Foundation/src/Configurable.cpp + ${LIBRARY_DIR}/Foundation/src/ConsoleChannel.cpp + ${LIBRARY_DIR}/Foundation/src/CountingStream.cpp + ${LIBRARY_DIR}/Foundation/src/DateTime.cpp + ${LIBRARY_DIR}/Foundation/src/DateTimeFormat.cpp + ${LIBRARY_DIR}/Foundation/src/DateTimeFormatter.cpp + ${LIBRARY_DIR}/Foundation/src/DateTimeParser.cpp + ${LIBRARY_DIR}/Foundation/src/Debugger.cpp + ${LIBRARY_DIR}/Foundation/src/DeflatingStream.cpp + ${LIBRARY_DIR}/Foundation/src/DigestEngine.cpp + ${LIBRARY_DIR}/Foundation/src/DigestStream.cpp + ${LIBRARY_DIR}/Foundation/src/DirectoryIterator.cpp + ${LIBRARY_DIR}/Foundation/src/DirectoryIteratorStrategy.cpp + ${LIBRARY_DIR}/Foundation/src/DirectoryWatcher.cpp + ${LIBRARY_DIR}/Foundation/src/Environment.cpp + ${LIBRARY_DIR}/Foundation/src/Error.cpp + ${LIBRARY_DIR}/Foundation/src/ErrorHandler.cpp + ${LIBRARY_DIR}/Foundation/src/Event.cpp + ${LIBRARY_DIR}/Foundation/src/EventArgs.cpp + ${LIBRARY_DIR}/Foundation/src/EventChannel.cpp + ${LIBRARY_DIR}/Foundation/src/Exception.cpp + ${LIBRARY_DIR}/Foundation/src/FIFOBufferStream.cpp + ${LIBRARY_DIR}/Foundation/src/File.cpp + ${LIBRARY_DIR}/Foundation/src/FileChannel.cpp + ${LIBRARY_DIR}/Foundation/src/FileStream.cpp + ${LIBRARY_DIR}/Foundation/src/FileStreamFactory.cpp + ${LIBRARY_DIR}/Foundation/src/Format.cpp + ${LIBRARY_DIR}/Foundation/src/Formatter.cpp + ${LIBRARY_DIR}/Foundation/src/FormattingChannel.cpp + ${LIBRARY_DIR}/Foundation/src/FPEnvironment.cpp + ${LIBRARY_DIR}/Foundation/src/Glob.cpp + ${LIBRARY_DIR}/Foundation/src/Hash.cpp + ${LIBRARY_DIR}/Foundation/src/HashStatistic.cpp + ${LIBRARY_DIR}/Foundation/src/HexBinaryDecoder.cpp + ${LIBRARY_DIR}/Foundation/src/HexBinaryEncoder.cpp + ${LIBRARY_DIR}/Foundation/src/InflatingStream.cpp + ${LIBRARY_DIR}/Foundation/src/JSONString.cpp + ${LIBRARY_DIR}/Foundation/src/Latin1Encoding.cpp + ${LIBRARY_DIR}/Foundation/src/Latin2Encoding.cpp + ${LIBRARY_DIR}/Foundation/src/Latin9Encoding.cpp + ${LIBRARY_DIR}/Foundation/src/LineEndingConverter.cpp + ${LIBRARY_DIR}/Foundation/src/LocalDateTime.cpp + ${LIBRARY_DIR}/Foundation/src/LogFile.cpp + ${LIBRARY_DIR}/Foundation/src/Logger.cpp + ${LIBRARY_DIR}/Foundation/src/LoggingFactory.cpp + ${LIBRARY_DIR}/Foundation/src/LoggingRegistry.cpp + ${LIBRARY_DIR}/Foundation/src/LogStream.cpp + ${LIBRARY_DIR}/Foundation/src/Manifest.cpp + ${LIBRARY_DIR}/Foundation/src/MD4Engine.cpp + ${LIBRARY_DIR}/Foundation/src/MD5Engine.cpp + ${LIBRARY_DIR}/Foundation/src/MemoryPool.cpp + ${LIBRARY_DIR}/Foundation/src/MemoryStream.cpp + ${LIBRARY_DIR}/Foundation/src/Message.cpp + ${LIBRARY_DIR}/Foundation/src/Mutex.cpp + ${LIBRARY_DIR}/Foundation/src/NamedEvent.cpp + ${LIBRARY_DIR}/Foundation/src/NamedMutex.cpp + ${LIBRARY_DIR}/Foundation/src/NestedDiagnosticContext.cpp + ${LIBRARY_DIR}/Foundation/src/Notification.cpp + ${LIBRARY_DIR}/Foundation/src/NotificationCenter.cpp + ${LIBRARY_DIR}/Foundation/src/NotificationQueue.cpp + ${LIBRARY_DIR}/Foundation/src/NullChannel.cpp + ${LIBRARY_DIR}/Foundation/src/NullStream.cpp + ${LIBRARY_DIR}/Foundation/src/NumberFormatter.cpp + ${LIBRARY_DIR}/Foundation/src/NumberParser.cpp + ${LIBRARY_DIR}/Foundation/src/NumericString.cpp + ${LIBRARY_DIR}/Foundation/src/Path.cpp + ${LIBRARY_DIR}/Foundation/src/PatternFormatter.cpp + ${LIBRARY_DIR}/Foundation/src/Pipe.cpp + ${LIBRARY_DIR}/Foundation/src/PipeImpl.cpp + ${LIBRARY_DIR}/Foundation/src/PipeStream.cpp + ${LIBRARY_DIR}/Foundation/src/PriorityNotificationQueue.cpp + ${LIBRARY_DIR}/Foundation/src/Process.cpp + ${LIBRARY_DIR}/Foundation/src/PurgeStrategy.cpp + ${LIBRARY_DIR}/Foundation/src/Random.cpp + ${LIBRARY_DIR}/Foundation/src/RandomStream.cpp + ${LIBRARY_DIR}/Foundation/src/RefCountedObject.cpp + ${LIBRARY_DIR}/Foundation/src/RegularExpression.cpp + ${LIBRARY_DIR}/Foundation/src/RotateStrategy.cpp + ${LIBRARY_DIR}/Foundation/src/Runnable.cpp + ${LIBRARY_DIR}/Foundation/src/RWLock.cpp + ${LIBRARY_DIR}/Foundation/src/Semaphore.cpp + ${LIBRARY_DIR}/Foundation/src/SHA1Engine.cpp + ${LIBRARY_DIR}/Foundation/src/SharedLibrary.cpp + ${LIBRARY_DIR}/Foundation/src/SharedMemory.cpp + ${LIBRARY_DIR}/Foundation/src/SignalHandler.cpp + ${LIBRARY_DIR}/Foundation/src/SimpleFileChannel.cpp + ${LIBRARY_DIR}/Foundation/src/SortedDirectoryIterator.cpp + ${LIBRARY_DIR}/Foundation/src/SplitterChannel.cpp + ${LIBRARY_DIR}/Foundation/src/Stopwatch.cpp + ${LIBRARY_DIR}/Foundation/src/StreamChannel.cpp + ${LIBRARY_DIR}/Foundation/src/StreamConverter.cpp + ${LIBRARY_DIR}/Foundation/src/StreamCopier.cpp + ${LIBRARY_DIR}/Foundation/src/StreamTokenizer.cpp + ${LIBRARY_DIR}/Foundation/src/String.cpp + ${LIBRARY_DIR}/Foundation/src/StringTokenizer.cpp + ${LIBRARY_DIR}/Foundation/src/SynchronizedObject.cpp + ${LIBRARY_DIR}/Foundation/src/SyslogChannel.cpp + ${LIBRARY_DIR}/Foundation/src/Task.cpp + ${LIBRARY_DIR}/Foundation/src/TaskManager.cpp + ${LIBRARY_DIR}/Foundation/src/TaskNotification.cpp + ${LIBRARY_DIR}/Foundation/src/TeeStream.cpp + ${LIBRARY_DIR}/Foundation/src/TemporaryFile.cpp + ${LIBRARY_DIR}/Foundation/src/TextBufferIterator.cpp + ${LIBRARY_DIR}/Foundation/src/TextConverter.cpp + ${LIBRARY_DIR}/Foundation/src/TextEncoding.cpp + ${LIBRARY_DIR}/Foundation/src/TextIterator.cpp + ${LIBRARY_DIR}/Foundation/src/Thread.cpp + ${LIBRARY_DIR}/Foundation/src/ThreadLocal.cpp + ${LIBRARY_DIR}/Foundation/src/ThreadPool.cpp + ${LIBRARY_DIR}/Foundation/src/ThreadTarget.cpp + ${LIBRARY_DIR}/Foundation/src/TimedNotificationQueue.cpp + ${LIBRARY_DIR}/Foundation/src/Timer.cpp + ${LIBRARY_DIR}/Foundation/src/Timespan.cpp + ${LIBRARY_DIR}/Foundation/src/Timestamp.cpp + ${LIBRARY_DIR}/Foundation/src/Timezone.cpp + ${LIBRARY_DIR}/Foundation/src/Token.cpp + ${LIBRARY_DIR}/Foundation/src/Unicode.cpp + ${LIBRARY_DIR}/Foundation/src/UnicodeConverter.cpp + ${LIBRARY_DIR}/Foundation/src/URI.cpp + ${LIBRARY_DIR}/Foundation/src/URIStreamFactory.cpp + ${LIBRARY_DIR}/Foundation/src/URIStreamOpener.cpp + ${LIBRARY_DIR}/Foundation/src/UTF16Encoding.cpp + ${LIBRARY_DIR}/Foundation/src/UTF32Encoding.cpp + ${LIBRARY_DIR}/Foundation/src/UTF8Encoding.cpp + ${LIBRARY_DIR}/Foundation/src/UTF8String.cpp + ${LIBRARY_DIR}/Foundation/src/UUID.cpp + ${LIBRARY_DIR}/Foundation/src/UUIDGenerator.cpp + ${LIBRARY_DIR}/Foundation/src/Var.cpp + ${LIBRARY_DIR}/Foundation/src/VarHolder.cpp + ${LIBRARY_DIR}/Foundation/src/VarIterator.cpp + ${LIBRARY_DIR}/Foundation/src/Void.cpp + ${LIBRARY_DIR}/Foundation/src/Windows1250Encoding.cpp + ${LIBRARY_DIR}/Foundation/src/Windows1251Encoding.cpp + ${LIBRARY_DIR}/Foundation/src/Windows1252Encoding.cpp + ) + + add_library (_poco_foundation ${SRCS}) + add_library (Poco::Foundation ALIAS _poco_foundation) + + if (COMPILER_GCC) + target_compile_options (_poco_foundation + PRIVATE + -Wno-suggest-override + ) + elseif (COMPILER_CLANG) + target_compile_options (_poco_foundation + PRIVATE + -Wno-atomic-implicit-seq-cst + -Wno-deprecated + -Wno-extra-semi-stmt + -Wno-zero-as-null-pointer-constant + -Wno-implicit-int-float-conversion + -Wno-thread-safety-analysis + -Wno-thread-safety-negative + ) + endif () + target_compile_options (_poco_foundation + PRIVATE + -Wno-sign-compare + -Wno-unused-parameter + ) + target_compile_definitions (_poco_foundation + PRIVATE + POCO_UNBUNDLED + POCO_UNBUNDLED_ZLIB + PUBLIC + POCO_ENABLE_CPP11 + POCO_OS_FAMILY_UNIX + ) + target_include_directories (_poco_foundation SYSTEM PUBLIC ${LIBRARY_DIR}/Foundation/include) + target_link_libraries (_poco_foundation PRIVATE Poco::Foundation::PCRE zlib) +else () + add_library (Poco::Foundation UNKNOWN IMPORTED GLOBAL) + + find_library (LIBRARY_POCO_FOUNDATION PocoFoundation) + find_path (INCLUDE_POCO_FOUNDATION Poco/Foundation.h) + set_target_properties (Poco::Foundation PROPERTIES IMPORTED_LOCATION ${LIBRARY_POCO_FOUNDATION}) + set_target_properties (Poco::Foundation PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${INCLUDE_POCO_FOUNDATION}) + + message (STATUS "Using Poco::Foundation: ${LIBRARY_POCO_FOUNDATION} ${INCLUDE_POCO_FOUNDATION}") +endif () diff --git a/contrib/poco-cmake/JSON/CMakeLists.txt b/contrib/poco-cmake/JSON/CMakeLists.txt new file mode 100644 index 00000000000..89054cf225d --- /dev/null +++ b/contrib/poco-cmake/JSON/CMakeLists.txt @@ -0,0 +1,42 @@ +if (USE_INTERNAL_POCO_LIBRARY) + # Poco::JSON (pdjson) + + set (SRCS_PDJSON + ${LIBRARY_DIR}/JSON/src/pdjson.c + ) + + add_library (_poco_json_pdjson ${SRCS_PDJSON}) + add_library (Poco::JSON::Pdjson ALIAS _poco_json_pdjson) + + # Poco::JSON + + set (SRCS + ${LIBRARY_DIR}/JSON/src/Array.cpp + ${LIBRARY_DIR}/JSON/src/Handler.cpp + ${LIBRARY_DIR}/JSON/src/JSONException.cpp + ${LIBRARY_DIR}/JSON/src/Object.cpp + ${LIBRARY_DIR}/JSON/src/ParseHandler.cpp + ${LIBRARY_DIR}/JSON/src/Parser.cpp + ${LIBRARY_DIR}/JSON/src/ParserImpl.cpp + ${LIBRARY_DIR}/JSON/src/PrintHandler.cpp + ${LIBRARY_DIR}/JSON/src/Query.cpp + ${LIBRARY_DIR}/JSON/src/Stringifier.cpp + ${LIBRARY_DIR}/JSON/src/Template.cpp + ${LIBRARY_DIR}/JSON/src/TemplateCache.cpp + ) + + add_library (_poco_json ${SRCS}) + add_library (Poco::JSON ALIAS _poco_json) + + target_include_directories (_poco_json SYSTEM PUBLIC ${LIBRARY_DIR}/JSON/include) + target_link_libraries (_poco_json PUBLIC Poco::Foundation Poco::JSON::Pdjson) +else () + add_library (Poco::JSON UNKNOWN IMPORTED GLOBAL) + + find_library (LIBRARY_POCO_JSON PocoJSON) + find_path (INCLUDE_POCO_JSON Poco/JSON/JSON.h) + set_target_properties (Poco::JSON PROPERTIES IMPORTED_LOCATION ${LIBRARY_POCO_JSON}) + set_target_properties (Poco::JSON PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${INCLUDE_POCO_JSON}) + + message (STATUS "Using Poco::JSON: ${LIBRARY_POCO_JSON} ${INCLUDE_POCO_JSON}") +endif () diff --git a/contrib/poco-cmake/MongoDB/CMakeLists.txt b/contrib/poco-cmake/MongoDB/CMakeLists.txt new file mode 100644 index 00000000000..0d79f680a64 --- /dev/null +++ b/contrib/poco-cmake/MongoDB/CMakeLists.txt @@ -0,0 +1,40 @@ +if (USE_INTERNAL_POCO_LIBRARY) + set (SRCS + ${LIBRARY_DIR}/MongoDB/src/Array.cpp + ${LIBRARY_DIR}/MongoDB/src/Binary.cpp + ${LIBRARY_DIR}/MongoDB/src/Connection.cpp + ${LIBRARY_DIR}/MongoDB/src/Cursor.cpp + ${LIBRARY_DIR}/MongoDB/src/Database.cpp + ${LIBRARY_DIR}/MongoDB/src/DeleteRequest.cpp + ${LIBRARY_DIR}/MongoDB/src/Document.cpp + ${LIBRARY_DIR}/MongoDB/src/Element.cpp + ${LIBRARY_DIR}/MongoDB/src/GetMoreRequest.cpp + ${LIBRARY_DIR}/MongoDB/src/InsertRequest.cpp + ${LIBRARY_DIR}/MongoDB/src/JavaScriptCode.cpp + ${LIBRARY_DIR}/MongoDB/src/KillCursorsRequest.cpp + ${LIBRARY_DIR}/MongoDB/src/Message.cpp + ${LIBRARY_DIR}/MongoDB/src/MessageHeader.cpp + ${LIBRARY_DIR}/MongoDB/src/ObjectId.cpp + ${LIBRARY_DIR}/MongoDB/src/QueryRequest.cpp + ${LIBRARY_DIR}/MongoDB/src/RegularExpression.cpp + ${LIBRARY_DIR}/MongoDB/src/ReplicaSet.cpp + ${LIBRARY_DIR}/MongoDB/src/RequestMessage.cpp + ${LIBRARY_DIR}/MongoDB/src/ResponseMessage.cpp + ${LIBRARY_DIR}/MongoDB/src/UpdateRequest.cpp + ) + + add_library (_poco_mongodb ${SRCS}) + add_library (Poco::MongoDB ALIAS _poco_mongodb) + + target_include_directories (_poco_mongodb SYSTEM PUBLIC ${LIBRARY_DIR}/MongoDB/include) + target_link_libraries (_poco_mongodb PUBLIC Poco::Net) +else () + add_library (Poco::MongoDB UNKNOWN IMPORTED GLOBAL) + + find_library (LIBRARY_POCO_MONGODB PocoMongoDB) + find_path (INCLUDE_POCO_MONGODB Poco/MongoDB/MongoDB.h) + set_target_properties (Poco::MongoDB PROPERTIES IMPORTED_LOCATION ${LIBRARY_POCO_MONGODB}) + set_target_properties (Poco::MongoDB PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${INCLUDE_POCO_MONGODB}) + + message (STATUS "Using Poco::MongoDB: ${LIBRARY_POCO_MONGODB} ${INCLUDE_POCO_MONGODB}") +endif () diff --git a/contrib/poco-cmake/Net/CMakeLists.txt b/contrib/poco-cmake/Net/CMakeLists.txt new file mode 100644 index 00000000000..9bc06e52e05 --- /dev/null +++ b/contrib/poco-cmake/Net/CMakeLists.txt @@ -0,0 +1,139 @@ +if (USE_INTERNAL_POCO_LIBRARY) + set (SRCS + ${LIBRARY_DIR}/Net/src/AbstractHTTPRequestHandler.cpp + ${LIBRARY_DIR}/Net/src/DatagramSocket.cpp + ${LIBRARY_DIR}/Net/src/DatagramSocketImpl.cpp + ${LIBRARY_DIR}/Net/src/DialogSocket.cpp + ${LIBRARY_DIR}/Net/src/DNS.cpp + ${LIBRARY_DIR}/Net/src/FilePartSource.cpp + ${LIBRARY_DIR}/Net/src/FTPClientSession.cpp + ${LIBRARY_DIR}/Net/src/FTPStreamFactory.cpp + ${LIBRARY_DIR}/Net/src/HostEntry.cpp + ${LIBRARY_DIR}/Net/src/HTMLForm.cpp + ${LIBRARY_DIR}/Net/src/HTTPAuthenticationParams.cpp + ${LIBRARY_DIR}/Net/src/HTTPBasicCredentials.cpp + ${LIBRARY_DIR}/Net/src/HTTPBufferAllocator.cpp + ${LIBRARY_DIR}/Net/src/HTTPChunkedStream.cpp + ${LIBRARY_DIR}/Net/src/HTTPClientSession.cpp + ${LIBRARY_DIR}/Net/src/HTTPCookie.cpp + ${LIBRARY_DIR}/Net/src/HTTPCredentials.cpp + ${LIBRARY_DIR}/Net/src/HTTPDigestCredentials.cpp + ${LIBRARY_DIR}/Net/src/HTTPFixedLengthStream.cpp + ${LIBRARY_DIR}/Net/src/HTTPHeaderStream.cpp + ${LIBRARY_DIR}/Net/src/HTTPIOStream.cpp + ${LIBRARY_DIR}/Net/src/HTTPMessage.cpp + ${LIBRARY_DIR}/Net/src/HTTPRequest.cpp + ${LIBRARY_DIR}/Net/src/HTTPRequestHandler.cpp + ${LIBRARY_DIR}/Net/src/HTTPRequestHandlerFactory.cpp + ${LIBRARY_DIR}/Net/src/HTTPResponse.cpp + ${LIBRARY_DIR}/Net/src/HTTPServer.cpp + ${LIBRARY_DIR}/Net/src/HTTPServerConnection.cpp + ${LIBRARY_DIR}/Net/src/HTTPServerConnectionFactory.cpp + ${LIBRARY_DIR}/Net/src/HTTPServerParams.cpp + ${LIBRARY_DIR}/Net/src/HTTPServerRequest.cpp + ${LIBRARY_DIR}/Net/src/HTTPServerRequestImpl.cpp + ${LIBRARY_DIR}/Net/src/HTTPServerResponse.cpp + ${LIBRARY_DIR}/Net/src/HTTPServerResponseImpl.cpp + ${LIBRARY_DIR}/Net/src/HTTPServerSession.cpp + ${LIBRARY_DIR}/Net/src/HTTPSession.cpp + ${LIBRARY_DIR}/Net/src/HTTPSessionFactory.cpp + ${LIBRARY_DIR}/Net/src/HTTPSessionInstantiator.cpp + ${LIBRARY_DIR}/Net/src/HTTPStream.cpp + ${LIBRARY_DIR}/Net/src/HTTPStreamFactory.cpp + ${LIBRARY_DIR}/Net/src/ICMPClient.cpp + ${LIBRARY_DIR}/Net/src/ICMPEventArgs.cpp + ${LIBRARY_DIR}/Net/src/ICMPPacket.cpp + ${LIBRARY_DIR}/Net/src/ICMPPacketImpl.cpp + ${LIBRARY_DIR}/Net/src/ICMPSocket.cpp + ${LIBRARY_DIR}/Net/src/ICMPSocketImpl.cpp + ${LIBRARY_DIR}/Net/src/ICMPv4PacketImpl.cpp + ${LIBRARY_DIR}/Net/src/IPAddress.cpp + ${LIBRARY_DIR}/Net/src/IPAddressImpl.cpp + ${LIBRARY_DIR}/Net/src/MailMessage.cpp + ${LIBRARY_DIR}/Net/src/MailRecipient.cpp + ${LIBRARY_DIR}/Net/src/MailStream.cpp + ${LIBRARY_DIR}/Net/src/MediaType.cpp + ${LIBRARY_DIR}/Net/src/MessageHeader.cpp + ${LIBRARY_DIR}/Net/src/MulticastSocket.cpp + ${LIBRARY_DIR}/Net/src/MultipartReader.cpp + ${LIBRARY_DIR}/Net/src/MultipartWriter.cpp + ${LIBRARY_DIR}/Net/src/NameValueCollection.cpp + ${LIBRARY_DIR}/Net/src/Net.cpp + ${LIBRARY_DIR}/Net/src/NetException.cpp + ${LIBRARY_DIR}/Net/src/NetworkInterface.cpp + ${LIBRARY_DIR}/Net/src/NTPClient.cpp + ${LIBRARY_DIR}/Net/src/NTPEventArgs.cpp + ${LIBRARY_DIR}/Net/src/NTPPacket.cpp + ${LIBRARY_DIR}/Net/src/NullPartHandler.cpp + ${LIBRARY_DIR}/Net/src/OAuth10Credentials.cpp + ${LIBRARY_DIR}/Net/src/OAuth20Credentials.cpp + ${LIBRARY_DIR}/Net/src/PartHandler.cpp + ${LIBRARY_DIR}/Net/src/PartSource.cpp + ${LIBRARY_DIR}/Net/src/PartStore.cpp + ${LIBRARY_DIR}/Net/src/PollSet.cpp + ${LIBRARY_DIR}/Net/src/POP3ClientSession.cpp + ${LIBRARY_DIR}/Net/src/QuotedPrintableDecoder.cpp + ${LIBRARY_DIR}/Net/src/QuotedPrintableEncoder.cpp + ${LIBRARY_DIR}/Net/src/RawSocket.cpp + ${LIBRARY_DIR}/Net/src/RawSocketImpl.cpp + ${LIBRARY_DIR}/Net/src/RemoteSyslogChannel.cpp + ${LIBRARY_DIR}/Net/src/RemoteSyslogListener.cpp + ${LIBRARY_DIR}/Net/src/ServerSocket.cpp + ${LIBRARY_DIR}/Net/src/ServerSocketImpl.cpp + ${LIBRARY_DIR}/Net/src/SMTPChannel.cpp + ${LIBRARY_DIR}/Net/src/SMTPClientSession.cpp + ${LIBRARY_DIR}/Net/src/Socket.cpp + ${LIBRARY_DIR}/Net/src/SocketAddress.cpp + ${LIBRARY_DIR}/Net/src/SocketAddressImpl.cpp + ${LIBRARY_DIR}/Net/src/SocketImpl.cpp + ${LIBRARY_DIR}/Net/src/SocketNotification.cpp + ${LIBRARY_DIR}/Net/src/SocketNotifier.cpp + ${LIBRARY_DIR}/Net/src/SocketReactor.cpp + ${LIBRARY_DIR}/Net/src/SocketStream.cpp + ${LIBRARY_DIR}/Net/src/StreamSocket.cpp + ${LIBRARY_DIR}/Net/src/StreamSocketImpl.cpp + ${LIBRARY_DIR}/Net/src/StringPartSource.cpp + ${LIBRARY_DIR}/Net/src/TCPServer.cpp + ${LIBRARY_DIR}/Net/src/TCPServerConnection.cpp + ${LIBRARY_DIR}/Net/src/TCPServerConnectionFactory.cpp + ${LIBRARY_DIR}/Net/src/TCPServerDispatcher.cpp + ${LIBRARY_DIR}/Net/src/TCPServerParams.cpp + ${LIBRARY_DIR}/Net/src/WebSocket.cpp + ${LIBRARY_DIR}/Net/src/WebSocketImpl.cpp + ) + + add_library (_poco_net ${SRCS}) + add_library (Poco::Net ALIAS _poco_net) + + if (OS_LINUX) + target_compile_definitions (_poco_net PUBLIC POCO_HAVE_FD_EPOLL) + elseif (OS_DARWIN OR OS_FREEBSD) + target_compile_definitions (_poco_net PUBLIC POCO_HAVE_FD_POLL) + endif () + + if (COMPILER_CLANG) + # clang-specific warnings + target_compile_options (_poco_net + PRIVATE + -Wno-atomic-implicit-seq-cst + -Wno-extra-semi-stmt + -Wno-extra-semi + ) + endif () + target_compile_options (_poco_net + PRIVATE + -Wno-deprecated + -Wno-extra-semi + ) + target_include_directories (_poco_net SYSTEM PUBLIC ${LIBRARY_DIR}/Net/include) + target_link_libraries (_poco_net PUBLIC Poco::Foundation) +else () + add_library (Poco::Net UNKNOWN IMPORTED GLOBAL) + + find_library (LIBRARY_POCO_NET PocoNet) + find_path (INCLUDE_POCO_NET Poco/Net/Net.h) + set_target_properties (Poco::Net PROPERTIES IMPORTED_LOCATION ${LIBRARY_POCO_NET}) + set_target_properties (Poco::Net PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${INCLUDE_POCO_NET}) + + message (STATUS "Using Poco::Net: ${LIBRARY_POCO_NET} ${INCLUDE_POCO_NET}") +endif () diff --git a/contrib/poco-cmake/Net/SSL/CMakeLists.txt b/contrib/poco-cmake/Net/SSL/CMakeLists.txt new file mode 100644 index 00000000000..7cc71f441c7 --- /dev/null +++ b/contrib/poco-cmake/Net/SSL/CMakeLists.txt @@ -0,0 +1,53 @@ +if (ENABLE_SSL) + if (USE_INTERNAL_POCO_LIBRARY) + set (SRCS + ${LIBRARY_DIR}/NetSSL_OpenSSL/src/AcceptCertificateHandler.cpp + ${LIBRARY_DIR}/NetSSL_OpenSSL/src/CertificateHandlerFactory.cpp + ${LIBRARY_DIR}/NetSSL_OpenSSL/src/CertificateHandlerFactoryMgr.cpp + ${LIBRARY_DIR}/NetSSL_OpenSSL/src/ConsoleCertificateHandler.cpp + ${LIBRARY_DIR}/NetSSL_OpenSSL/src/Context.cpp + ${LIBRARY_DIR}/NetSSL_OpenSSL/src/HTTPSClientSession.cpp + ${LIBRARY_DIR}/NetSSL_OpenSSL/src/HTTPSSessionInstantiator.cpp + ${LIBRARY_DIR}/NetSSL_OpenSSL/src/HTTPSStreamFactory.cpp + ${LIBRARY_DIR}/NetSSL_OpenSSL/src/InvalidCertificateHandler.cpp + ${LIBRARY_DIR}/NetSSL_OpenSSL/src/KeyConsoleHandler.cpp + ${LIBRARY_DIR}/NetSSL_OpenSSL/src/KeyFileHandler.cpp + ${LIBRARY_DIR}/NetSSL_OpenSSL/src/PrivateKeyFactory.cpp + ${LIBRARY_DIR}/NetSSL_OpenSSL/src/PrivateKeyFactoryMgr.cpp + ${LIBRARY_DIR}/NetSSL_OpenSSL/src/PrivateKeyPassphraseHandler.cpp + ${LIBRARY_DIR}/NetSSL_OpenSSL/src/RejectCertificateHandler.cpp + ${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureServerSocket.cpp + ${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureServerSocketImpl.cpp + ${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureSMTPClientSession.cpp + ${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureSocketImpl.cpp + ${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureStreamSocket.cpp + ${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureStreamSocketImpl.cpp + ${LIBRARY_DIR}/NetSSL_OpenSSL/src/Session.cpp + ${LIBRARY_DIR}/NetSSL_OpenSSL/src/SSLException.cpp + ${LIBRARY_DIR}/NetSSL_OpenSSL/src/SSLManager.cpp + ${LIBRARY_DIR}/NetSSL_OpenSSL/src/Utility.cpp + ${LIBRARY_DIR}/NetSSL_OpenSSL/src/VerificationErrorArgs.cpp + ${LIBRARY_DIR}/NetSSL_OpenSSL/src/X509Certificate.cpp + ) + + add_library (_poco_net_ssl ${SRCS}) + add_library (Poco::Net::SSL ALIAS _poco_net_ssl) + + target_include_directories (_poco_net_ssl SYSTEM PUBLIC ${LIBRARY_DIR}/NetSSL_OpenSSL/include) + target_link_libraries (_poco_net_ssl PUBLIC Poco::Crypto Poco::Net Poco::Util) + else () + add_library (Poco::Net::SSL UNKNOWN IMPORTED GLOBAL) + + find_library (LIBRARY_POCO_NET_SSL PocoNetSSL) + find_path (INCLUDE_POCO_NET_SSL Poco/Net/NetSSL.h) + set_target_properties (Poco::Net::SSL PROPERTIES IMPORTED_LOCATION ${LIBRARY_POCO_NET_SSL}) + set_target_properties (Poco::Net::SSL PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${INCLUDE_POCO_NET_SSL}) + + message (STATUS "Using Poco::Net::SSL: ${LIBRARY_POCO_NET_SSL} ${INCLUDE_POCO_NET_SSL}") + endif () +else () + add_library (_poco_net_ssl INTERFACE) + add_library (Poco::Net::SSL ALIAS _poco_net_ssl) + + message (STATUS "Not using Poco::Net::SSL") +endif () diff --git a/contrib/poco-cmake/Redis/CMakeLists.txt b/contrib/poco-cmake/Redis/CMakeLists.txt new file mode 100644 index 00000000000..43d0009101c --- /dev/null +++ b/contrib/poco-cmake/Redis/CMakeLists.txt @@ -0,0 +1,34 @@ +if (USE_INTERNAL_POCO_LIBRARY) + set (SRCS + ${LIBRARY_DIR}/Redis/src/Array.cpp + ${LIBRARY_DIR}/Redis/src/AsyncReader.cpp + ${LIBRARY_DIR}/Redis/src/Client.cpp + ${LIBRARY_DIR}/Redis/src/Command.cpp + ${LIBRARY_DIR}/Redis/src/Error.cpp + ${LIBRARY_DIR}/Redis/src/Exception.cpp + ${LIBRARY_DIR}/Redis/src/RedisEventArgs.cpp + ${LIBRARY_DIR}/Redis/src/RedisStream.cpp + ${LIBRARY_DIR}/Redis/src/Type.cpp + ) + + add_library (_poco_redis ${SRCS}) + add_library (Poco::Redis ALIAS _poco_redis) + + if (COMPILER_GCC) + target_compile_options (_poco_redis PRIVATE -Wno-deprecated-copy) + endif () + target_compile_options (_poco_redis PRIVATE -Wno-shadow) + target_include_directories (_poco_redis SYSTEM PUBLIC ${LIBRARY_DIR}/Redis/include) + target_link_libraries (_poco_redis PUBLIC Poco::Net) +else () + add_library (Poco::Redis UNKNOWN IMPORTED GLOBAL) + + find_library (LIBRARY_POCO_REDIS PocoRedis) + find_path (INCLUDE_POCO_REDIS Poco/Redis/Redis.h) + set_target_properties (Poco::Redis PROPERTIES IMPORTED_LOCATION ${LIBRARY_POCO_REDIS}) + set_target_properties (Poco::Redis PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${INCLUDE_POCO_REDIS}) + + target_link_libraries (Poco::Redis INTERFACE Poco::Net) + + message (STATUS "Using Poco::Redis: ${LIBRARY_POCO_REDIS} ${INCLUDE_POCO_REDIS}") +endif () diff --git a/contrib/poco-cmake/Util/CMakeLists.txt b/contrib/poco-cmake/Util/CMakeLists.txt new file mode 100644 index 00000000000..f5af3a5793c --- /dev/null +++ b/contrib/poco-cmake/Util/CMakeLists.txt @@ -0,0 +1,46 @@ +if (USE_INTERNAL_POCO_LIBRARY) + set (SRCS + ${LIBRARY_DIR}/Util/src/AbstractConfiguration.cpp + ${LIBRARY_DIR}/Util/src/Application.cpp + ${LIBRARY_DIR}/Util/src/ConfigurationMapper.cpp + ${LIBRARY_DIR}/Util/src/ConfigurationView.cpp + ${LIBRARY_DIR}/Util/src/FilesystemConfiguration.cpp + ${LIBRARY_DIR}/Util/src/HelpFormatter.cpp + ${LIBRARY_DIR}/Util/src/IniFileConfiguration.cpp + ${LIBRARY_DIR}/Util/src/IntValidator.cpp + ${LIBRARY_DIR}/Util/src/JSONConfiguration.cpp + ${LIBRARY_DIR}/Util/src/LayeredConfiguration.cpp + ${LIBRARY_DIR}/Util/src/LoggingConfigurator.cpp + ${LIBRARY_DIR}/Util/src/LoggingSubsystem.cpp + ${LIBRARY_DIR}/Util/src/MapConfiguration.cpp + ${LIBRARY_DIR}/Util/src/Option.cpp + ${LIBRARY_DIR}/Util/src/OptionCallback.cpp + ${LIBRARY_DIR}/Util/src/OptionException.cpp + ${LIBRARY_DIR}/Util/src/OptionProcessor.cpp + ${LIBRARY_DIR}/Util/src/OptionSet.cpp + ${LIBRARY_DIR}/Util/src/PropertyFileConfiguration.cpp + ${LIBRARY_DIR}/Util/src/RegExpValidator.cpp + ${LIBRARY_DIR}/Util/src/ServerApplication.cpp + ${LIBRARY_DIR}/Util/src/Subsystem.cpp + ${LIBRARY_DIR}/Util/src/SystemConfiguration.cpp + ${LIBRARY_DIR}/Util/src/Timer.cpp + ${LIBRARY_DIR}/Util/src/TimerTask.cpp + ${LIBRARY_DIR}/Util/src/Validator.cpp + ${LIBRARY_DIR}/Util/src/XMLConfiguration.cpp + ) + + add_library (_poco_util ${SRCS}) + add_library (Poco::Util ALIAS _poco_util) + + target_include_directories (_poco_util SYSTEM PUBLIC ${LIBRARY_DIR}/Util/include) + target_link_libraries (_poco_util PUBLIC Poco::JSON Poco::XML) +else () + add_library (Poco::Util UNKNOWN IMPORTED GLOBAL) + + find_library (LIBRARY_POCO_UTIL PocoUtil) + find_path (INCLUDE_POCO_UTIL Poco/Util/Util.h) + set_target_properties (Poco::Util PROPERTIES IMPORTED_LOCATION ${LIBRARY_POCO_UTIL}) + set_target_properties (Poco::Util PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${INCLUDE_POCO_UTIL}) + + message (STATUS "Using Poco::Util: ${LIBRARY_POCO_UTIL} ${INCLUDE_POCO_UTIL}") +endif () diff --git a/contrib/poco-cmake/XML/CMakeLists.txt b/contrib/poco-cmake/XML/CMakeLists.txt new file mode 100644 index 00000000000..448b7e22c7c --- /dev/null +++ b/contrib/poco-cmake/XML/CMakeLists.txt @@ -0,0 +1,110 @@ +if (USE_INTERNAL_POCO_LIBRARY) + # Poco::XML (expat) + + set (SRCS_EXPAT + ${LIBRARY_DIR}/XML/src/xmlrole.c + ${LIBRARY_DIR}/XML/src/xmltok_impl.c + ${LIBRARY_DIR}/XML/src/xmltok_ns.c + ${LIBRARY_DIR}/XML/src/xmltok.c + ) + + add_library (_poco_xml_expat ${SRCS_EXPAT}) + add_library (Poco::XML::Expat ALIAS _poco_xml_expat) + + target_include_directories (_poco_xml_expat PUBLIC ${LIBRARY_DIR}/XML/include) + + # Poco::XML + + set (SRCS + ${LIBRARY_DIR}/XML/src/AbstractContainerNode.cpp + ${LIBRARY_DIR}/XML/src/AbstractNode.cpp + ${LIBRARY_DIR}/XML/src/Attr.cpp + ${LIBRARY_DIR}/XML/src/Attributes.cpp + ${LIBRARY_DIR}/XML/src/AttributesImpl.cpp + ${LIBRARY_DIR}/XML/src/AttrMap.cpp + ${LIBRARY_DIR}/XML/src/CDATASection.cpp + ${LIBRARY_DIR}/XML/src/CharacterData.cpp + ${LIBRARY_DIR}/XML/src/ChildNodesList.cpp + ${LIBRARY_DIR}/XML/src/Comment.cpp + ${LIBRARY_DIR}/XML/src/ContentHandler.cpp + ${LIBRARY_DIR}/XML/src/DeclHandler.cpp + ${LIBRARY_DIR}/XML/src/DefaultHandler.cpp + ${LIBRARY_DIR}/XML/src/Document.cpp + ${LIBRARY_DIR}/XML/src/DocumentEvent.cpp + ${LIBRARY_DIR}/XML/src/DocumentFragment.cpp + ${LIBRARY_DIR}/XML/src/DocumentType.cpp + ${LIBRARY_DIR}/XML/src/DOMBuilder.cpp + ${LIBRARY_DIR}/XML/src/DOMException.cpp + ${LIBRARY_DIR}/XML/src/DOMImplementation.cpp + ${LIBRARY_DIR}/XML/src/DOMObject.cpp + ${LIBRARY_DIR}/XML/src/DOMParser.cpp + ${LIBRARY_DIR}/XML/src/DOMSerializer.cpp + ${LIBRARY_DIR}/XML/src/DOMWriter.cpp + ${LIBRARY_DIR}/XML/src/DTDHandler.cpp + ${LIBRARY_DIR}/XML/src/DTDMap.cpp + ${LIBRARY_DIR}/XML/src/Element.cpp + ${LIBRARY_DIR}/XML/src/ElementsByTagNameList.cpp + ${LIBRARY_DIR}/XML/src/Entity.cpp + ${LIBRARY_DIR}/XML/src/EntityReference.cpp + ${LIBRARY_DIR}/XML/src/EntityResolver.cpp + ${LIBRARY_DIR}/XML/src/EntityResolverImpl.cpp + ${LIBRARY_DIR}/XML/src/ErrorHandler.cpp + ${LIBRARY_DIR}/XML/src/Event.cpp + ${LIBRARY_DIR}/XML/src/EventDispatcher.cpp + ${LIBRARY_DIR}/XML/src/EventException.cpp + ${LIBRARY_DIR}/XML/src/EventListener.cpp + ${LIBRARY_DIR}/XML/src/EventTarget.cpp + ${LIBRARY_DIR}/XML/src/InputSource.cpp + ${LIBRARY_DIR}/XML/src/LexicalHandler.cpp + ${LIBRARY_DIR}/XML/src/Locator.cpp + ${LIBRARY_DIR}/XML/src/LocatorImpl.cpp + ${LIBRARY_DIR}/XML/src/MutationEvent.cpp + ${LIBRARY_DIR}/XML/src/Name.cpp + ${LIBRARY_DIR}/XML/src/NamedNodeMap.cpp + ${LIBRARY_DIR}/XML/src/NamePool.cpp + ${LIBRARY_DIR}/XML/src/NamespaceStrategy.cpp + ${LIBRARY_DIR}/XML/src/NamespaceSupport.cpp + ${LIBRARY_DIR}/XML/src/Node.cpp + ${LIBRARY_DIR}/XML/src/NodeAppender.cpp + ${LIBRARY_DIR}/XML/src/NodeFilter.cpp + ${LIBRARY_DIR}/XML/src/NodeIterator.cpp + ${LIBRARY_DIR}/XML/src/NodeList.cpp + ${LIBRARY_DIR}/XML/src/Notation.cpp + ${LIBRARY_DIR}/XML/src/ParserEngine.cpp + ${LIBRARY_DIR}/XML/src/ProcessingInstruction.cpp + ${LIBRARY_DIR}/XML/src/QName.cpp + ${LIBRARY_DIR}/XML/src/SAXException.cpp + ${LIBRARY_DIR}/XML/src/SAXParser.cpp + ${LIBRARY_DIR}/XML/src/Text.cpp + ${LIBRARY_DIR}/XML/src/TreeWalker.cpp + ${LIBRARY_DIR}/XML/src/ValueTraits.cpp + ${LIBRARY_DIR}/XML/src/WhitespaceFilter.cpp + ${LIBRARY_DIR}/XML/src/XMLException.cpp + ${LIBRARY_DIR}/XML/src/XMLFilter.cpp + ${LIBRARY_DIR}/XML/src/XMLFilterImpl.cpp + ${LIBRARY_DIR}/XML/src/XMLReader.cpp + ${LIBRARY_DIR}/XML/src/XMLStreamParser.cpp + ${LIBRARY_DIR}/XML/src/XMLStreamParserException.cpp + ${LIBRARY_DIR}/XML/src/XMLString.cpp + ${LIBRARY_DIR}/XML/src/XMLWriter.cpp + + # expat + ${LIBRARY_DIR}/XML/src/xmlparse.cpp + ) + + add_library (_poco_xml ${SRCS}) + add_library (Poco::XML ALIAS _poco_xml) + + target_compile_options (_poco_xml PRIVATE -Wno-old-style-cast) + target_include_directories (_poco_xml SYSTEM PUBLIC ${LIBRARY_DIR}/XML/include) + target_link_libraries (_poco_xml PUBLIC Poco::Foundation Poco::XML::Expat) +else () + add_library (Poco::XML UNKNOWN IMPORTED GLOBAL) + + find_library (LIBRARY_POCO_XML PocoXML) + find_path (INCLUDE_POCO_XML Poco/XML/XML.h) + set_target_properties (Poco::XML PROPERTIES IMPORTED_LOCATION ${LIBRARY_POCO_XML}) + set_target_properties (Poco::XML PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${INCLUDE_POCO_XML}) + + message (STATUS "Using Poco::XML: ${LIBRARY_POCO_XML} ${INCLUDE_POCO_XML}") +endif () diff --git a/contrib/rapidjson b/contrib/rapidjson index 01950eb7ace..8f4c021fa2f 160000 --- a/contrib/rapidjson +++ b/contrib/rapidjson @@ -1 +1 @@ -Subproject commit 01950eb7acec78818d68b762efc869bba2420d82 +Subproject commit 8f4c021fa2f1e001d2376095928fc0532adf2ae6 diff --git a/contrib/replxx b/contrib/replxx index 07cbfbec550..f1332626639 160000 --- a/contrib/replxx +++ b/contrib/replxx @@ -1 +1 @@ -Subproject commit 07cbfbec550133b88c91c4073fa5af2ae2ae6a9a +Subproject commit f1332626639d6492eaf170758642da14fbbda7bf diff --git a/contrib/replxx-cmake/CMakeLists.txt b/contrib/replxx-cmake/CMakeLists.txt index cc428d957cf..48d7e8bb36b 100644 --- a/contrib/replxx-cmake/CMakeLists.txt +++ b/contrib/replxx-cmake/CMakeLists.txt @@ -20,14 +20,14 @@ if (ENABLE_REPLXX) ) add_library (replxx ${SRCS}) - target_include_directories(replxx PUBLIC ${LIBRARY_DIR}/include) + target_include_directories(replxx SYSTEM PUBLIC ${LIBRARY_DIR}/include) else () find_library(LIBRARY_REPLXX NAMES replxx replxx-static) find_path(INCLUDE_REPLXX replxx.hxx) add_library(replxx UNKNOWN IMPORTED) set_property(TARGET replxx PROPERTY IMPORTED_LOCATION ${LIBRARY_REPLXX}) - target_include_directories(replxx PUBLIC ${INCLUDE_REPLXX}) + target_include_directories(replxx SYSTEM PUBLIC ${INCLUDE_REPLXX}) set(CMAKE_REQUIRED_LIBRARIES replxx) check_cxx_source_compiles( @@ -45,8 +45,8 @@ if (ENABLE_REPLXX) endif () endif () - if (NOT (COMPILER_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 9)) - target_compile_options(replxx PUBLIC -Wno-documentation) + if (COMPILER_CLANG) + target_compile_options(replxx PRIVATE -Wno-documentation) endif () target_compile_definitions(replxx PUBLIC USE_REPLXX=1) diff --git a/contrib/unixodbc-cmake/CMakeLists.txt b/contrib/unixodbc-cmake/CMakeLists.txt index 1715747191c..6d1922075a6 100644 --- a/contrib/unixodbc-cmake/CMakeLists.txt +++ b/contrib/unixodbc-cmake/CMakeLists.txt @@ -1,288 +1,318 @@ -set(ODBC_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/unixodbc) -set(ODBC_BINARY_DIR ${ClickHouse_BINARY_DIR}/contrib/unixodbc) +option (ENABLE_ODBC "Enable ODBC library" ${ENABLE_LIBRARIES}) +if (NOT OS_LINUX) + set (ENABLE_ODBC OFF CACHE INTERNAL "") +endif () -set(SRCS -${ODBC_SOURCE_DIR}/libltdl/lt__alloc.c -${ODBC_SOURCE_DIR}/libltdl/lt__strl.c -${ODBC_SOURCE_DIR}/libltdl/ltdl.c -${ODBC_SOURCE_DIR}/libltdl/lt_dlloader.c -${ODBC_SOURCE_DIR}/libltdl/slist.c -${ODBC_SOURCE_DIR}/libltdl/lt_error.c -${ODBC_SOURCE_DIR}/libltdl/loaders/dlopen.c -${ODBC_SOURCE_DIR}/libltdl/loaders/preopen.c -#${ODBC_SOURCE_DIR}/libltdl/lt__dirent.c -#${ODBC_SOURCE_DIR}/libltdl/lt__argz.c -#${ODBC_SOURCE_DIR}/libltdl/loaders/dld_link.c -#${ODBC_SOURCE_DIR}/libltdl/loaders/load_add_on.c -#${ODBC_SOURCE_DIR}/libltdl/loaders/shl_load.c -#${ODBC_SOURCE_DIR}/libltdl/loaders/loadlibrary.c -#${ODBC_SOURCE_DIR}/libltdl/loaders/dyld.c +if (ENABLE_ODBC) + option (USE_INTERNAL_ODBC_LIBRARY "Use internal ODBC library" ${NOT_UNBUNDLED}) -# This file is generated by 'libtool' inside libltdl directory and then removed. -${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64/libltdl/libltdlcS.c -) + if (USE_INTERNAL_ODBC_LIBRARY) + set (LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/unixodbc) -add_library(ltdl ${SRCS}) + # ltdl -target_include_directories(ltdl PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64/libltdl) -target_include_directories(ltdl PUBLIC ${ODBC_SOURCE_DIR}/libltdl) -target_include_directories(ltdl PUBLIC ${ODBC_SOURCE_DIR}/libltdl/libltdl) + set (SRCS_LTDL + # This file is generated by 'libtool' inside libltdl directory and then removed. + linux_x86_64/libltdl/libltdlcS.c -target_compile_definitions(ltdl PRIVATE -DHAVE_CONFIG_H -DLTDL -DLTDLOPEN=libltdlc) + ${LIBRARY_DIR}/libltdl/lt__alloc.c + ${LIBRARY_DIR}/libltdl/lt__strl.c + ${LIBRARY_DIR}/libltdl/ltdl.c + ${LIBRARY_DIR}/libltdl/lt_dlloader.c + ${LIBRARY_DIR}/libltdl/slist.c + ${LIBRARY_DIR}/libltdl/lt_error.c + ${LIBRARY_DIR}/libltdl/loaders/dlopen.c + ${LIBRARY_DIR}/libltdl/loaders/preopen.c + ) -target_compile_options(ltdl PRIVATE -Wno-constant-logical-operand -Wno-unknown-warning-option -O2) + add_library (ltdl ${SRCS_LTDL}) + target_include_directories(ltdl + PRIVATE + linux_x86_64/libltdl + PUBLIC + ${LIBRARY_DIR}/libltdl + ${LIBRARY_DIR}/libltdl/libltdl + ) + target_compile_definitions(ltdl PRIVATE -DHAVE_CONFIG_H -DLTDL -DLTDLOPEN=libltdlc) + target_compile_options(ltdl PRIVATE -Wno-constant-logical-operand -Wno-unknown-warning-option -O2) -set(SRCS -${ODBC_SOURCE_DIR}/DriverManager/__attribute.c -${ODBC_SOURCE_DIR}/DriverManager/__connection.c -${ODBC_SOURCE_DIR}/DriverManager/__handles.c -${ODBC_SOURCE_DIR}/DriverManager/__info.c -${ODBC_SOURCE_DIR}/DriverManager/__stats.c -${ODBC_SOURCE_DIR}/DriverManager/SQLAllocConnect.c -${ODBC_SOURCE_DIR}/DriverManager/SQLAllocEnv.c -${ODBC_SOURCE_DIR}/DriverManager/SQLAllocHandle.c -${ODBC_SOURCE_DIR}/DriverManager/SQLAllocHandleStd.c -${ODBC_SOURCE_DIR}/DriverManager/SQLAllocStmt.c -${ODBC_SOURCE_DIR}/DriverManager/SQLBindCol.c -${ODBC_SOURCE_DIR}/DriverManager/SQLBindParam.c -${ODBC_SOURCE_DIR}/DriverManager/SQLBindParameter.c -${ODBC_SOURCE_DIR}/DriverManager/SQLBrowseConnect.c -${ODBC_SOURCE_DIR}/DriverManager/SQLBrowseConnectW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLBulkOperations.c -${ODBC_SOURCE_DIR}/DriverManager/SQLCancel.c -${ODBC_SOURCE_DIR}/DriverManager/SQLCancelHandle.c -${ODBC_SOURCE_DIR}/DriverManager/SQLCloseCursor.c -${ODBC_SOURCE_DIR}/DriverManager/SQLColAttribute.c -${ODBC_SOURCE_DIR}/DriverManager/SQLColAttributes.c -${ODBC_SOURCE_DIR}/DriverManager/SQLColAttributesW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLColAttributeW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLColumnPrivileges.c -${ODBC_SOURCE_DIR}/DriverManager/SQLColumnPrivilegesW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLColumns.c -${ODBC_SOURCE_DIR}/DriverManager/SQLColumnsW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLConnect.c -${ODBC_SOURCE_DIR}/DriverManager/SQLConnectW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLCopyDesc.c -${ODBC_SOURCE_DIR}/DriverManager/SQLDataSources.c -${ODBC_SOURCE_DIR}/DriverManager/SQLDataSourcesW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLDescribeCol.c -${ODBC_SOURCE_DIR}/DriverManager/SQLDescribeColW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLDescribeParam.c -${ODBC_SOURCE_DIR}/DriverManager/SQLDisconnect.c -${ODBC_SOURCE_DIR}/DriverManager/SQLDriverConnect.c -${ODBC_SOURCE_DIR}/DriverManager/SQLDriverConnectW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLDrivers.c -${ODBC_SOURCE_DIR}/DriverManager/SQLDriversW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLEndTran.c -${ODBC_SOURCE_DIR}/DriverManager/SQLError.c -${ODBC_SOURCE_DIR}/DriverManager/SQLErrorW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLExecDirect.c -${ODBC_SOURCE_DIR}/DriverManager/SQLExecDirectW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLExecute.c -${ODBC_SOURCE_DIR}/DriverManager/SQLExtendedFetch.c -${ODBC_SOURCE_DIR}/DriverManager/SQLFetch.c -${ODBC_SOURCE_DIR}/DriverManager/SQLFetchScroll.c -${ODBC_SOURCE_DIR}/DriverManager/SQLForeignKeys.c -${ODBC_SOURCE_DIR}/DriverManager/SQLForeignKeysW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLFreeConnect.c -${ODBC_SOURCE_DIR}/DriverManager/SQLFreeEnv.c -${ODBC_SOURCE_DIR}/DriverManager/SQLFreeHandle.c -${ODBC_SOURCE_DIR}/DriverManager/SQLFreeStmt.c -${ODBC_SOURCE_DIR}/DriverManager/SQLGetConnectAttr.c -${ODBC_SOURCE_DIR}/DriverManager/SQLGetConnectAttrW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLGetConnectOption.c -${ODBC_SOURCE_DIR}/DriverManager/SQLGetConnectOptionW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLGetCursorName.c -${ODBC_SOURCE_DIR}/DriverManager/SQLGetCursorNameW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLGetData.c -${ODBC_SOURCE_DIR}/DriverManager/SQLGetDescField.c -${ODBC_SOURCE_DIR}/DriverManager/SQLGetDescFieldW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLGetDescRec.c -${ODBC_SOURCE_DIR}/DriverManager/SQLGetDescRecW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLGetDiagField.c -${ODBC_SOURCE_DIR}/DriverManager/SQLGetDiagFieldW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLGetDiagRec.c -${ODBC_SOURCE_DIR}/DriverManager/SQLGetDiagRecW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLGetEnvAttr.c -${ODBC_SOURCE_DIR}/DriverManager/SQLGetFunctions.c -${ODBC_SOURCE_DIR}/DriverManager/SQLGetInfo.c -${ODBC_SOURCE_DIR}/DriverManager/SQLGetInfoW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLGetStmtAttr.c -${ODBC_SOURCE_DIR}/DriverManager/SQLGetStmtAttrW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLGetStmtOption.c -${ODBC_SOURCE_DIR}/DriverManager/SQLGetTypeInfo.c -${ODBC_SOURCE_DIR}/DriverManager/SQLGetTypeInfoW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLMoreResults.c -${ODBC_SOURCE_DIR}/DriverManager/SQLNativeSql.c -${ODBC_SOURCE_DIR}/DriverManager/SQLNativeSqlW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLNumParams.c -${ODBC_SOURCE_DIR}/DriverManager/SQLNumResultCols.c -${ODBC_SOURCE_DIR}/DriverManager/SQLParamData.c -${ODBC_SOURCE_DIR}/DriverManager/SQLParamOptions.c -${ODBC_SOURCE_DIR}/DriverManager/SQLPrepare.c -${ODBC_SOURCE_DIR}/DriverManager/SQLPrepareW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLPrimaryKeys.c -${ODBC_SOURCE_DIR}/DriverManager/SQLPrimaryKeysW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLProcedureColumns.c -${ODBC_SOURCE_DIR}/DriverManager/SQLProcedureColumnsW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLProcedures.c -${ODBC_SOURCE_DIR}/DriverManager/SQLProceduresW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLPutData.c -${ODBC_SOURCE_DIR}/DriverManager/SQLRowCount.c -${ODBC_SOURCE_DIR}/DriverManager/SQLSetConnectAttr.c -${ODBC_SOURCE_DIR}/DriverManager/SQLSetConnectAttrW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLSetConnectOption.c -${ODBC_SOURCE_DIR}/DriverManager/SQLSetConnectOptionW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLSetCursorName.c -${ODBC_SOURCE_DIR}/DriverManager/SQLSetCursorNameW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLSetDescField.c -${ODBC_SOURCE_DIR}/DriverManager/SQLSetDescFieldW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLSetDescRec.c -${ODBC_SOURCE_DIR}/DriverManager/SQLSetEnvAttr.c -${ODBC_SOURCE_DIR}/DriverManager/SQLSetParam.c -${ODBC_SOURCE_DIR}/DriverManager/SQLSetPos.c -${ODBC_SOURCE_DIR}/DriverManager/SQLSetScrollOptions.c -${ODBC_SOURCE_DIR}/DriverManager/SQLSetStmtAttr.c -${ODBC_SOURCE_DIR}/DriverManager/SQLSetStmtAttrW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLSetStmtOption.c -${ODBC_SOURCE_DIR}/DriverManager/SQLSetStmtOptionW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLSpecialColumns.c -${ODBC_SOURCE_DIR}/DriverManager/SQLSpecialColumnsW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLStatistics.c -${ODBC_SOURCE_DIR}/DriverManager/SQLStatisticsW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLTablePrivileges.c -${ODBC_SOURCE_DIR}/DriverManager/SQLTablePrivilegesW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLTables.c -${ODBC_SOURCE_DIR}/DriverManager/SQLTablesW.c -${ODBC_SOURCE_DIR}/DriverManager/SQLTransact.c + # odbc -${ODBC_SOURCE_DIR}/odbcinst/_logging.c -${ODBC_SOURCE_DIR}/odbcinst/_odbcinst_ConfigModeINI.c -${ODBC_SOURCE_DIR}/odbcinst/ODBCINSTConstructProperties.c -${ODBC_SOURCE_DIR}/odbcinst/ODBCINSTDestructProperties.c -${ODBC_SOURCE_DIR}/odbcinst/_odbcinst_GetEntries.c -${ODBC_SOURCE_DIR}/odbcinst/_odbcinst_GetSections.c -${ODBC_SOURCE_DIR}/odbcinst/ODBCINSTSetProperty.c -${ODBC_SOURCE_DIR}/odbcinst/_odbcinst_SystemINI.c -${ODBC_SOURCE_DIR}/odbcinst/_odbcinst_UserINI.c -${ODBC_SOURCE_DIR}/odbcinst/ODBCINSTValidateProperties.c -${ODBC_SOURCE_DIR}/odbcinst/ODBCINSTValidateProperty.c -${ODBC_SOURCE_DIR}/odbcinst/SQLConfigDataSource.c -${ODBC_SOURCE_DIR}/odbcinst/SQLConfigDriver.c -${ODBC_SOURCE_DIR}/odbcinst/SQLCreateDataSource.c -${ODBC_SOURCE_DIR}/odbcinst/_SQLDriverConnectPrompt.c -${ODBC_SOURCE_DIR}/odbcinst/SQLGetAvailableDrivers.c -${ODBC_SOURCE_DIR}/odbcinst/SQLGetConfigMode.c -${ODBC_SOURCE_DIR}/odbcinst/_SQLGetInstalledDrivers.c -${ODBC_SOURCE_DIR}/odbcinst/SQLGetInstalledDrivers.c -${ODBC_SOURCE_DIR}/odbcinst/SQLGetPrivateProfileString.c -${ODBC_SOURCE_DIR}/odbcinst/SQLGetTranslator.c -${ODBC_SOURCE_DIR}/odbcinst/SQLInstallDriverEx.c -${ODBC_SOURCE_DIR}/odbcinst/SQLInstallDriverManager.c -${ODBC_SOURCE_DIR}/odbcinst/SQLInstallerError.c -${ODBC_SOURCE_DIR}/odbcinst/SQLInstallODBC.c -${ODBC_SOURCE_DIR}/odbcinst/SQLInstallTranslatorEx.c -${ODBC_SOURCE_DIR}/odbcinst/SQLManageDataSources.c -${ODBC_SOURCE_DIR}/odbcinst/SQLPostInstallerError.c -${ODBC_SOURCE_DIR}/odbcinst/SQLReadFileDSN.c -${ODBC_SOURCE_DIR}/odbcinst/SQLRemoveDriver.c -${ODBC_SOURCE_DIR}/odbcinst/SQLRemoveDriverManager.c -${ODBC_SOURCE_DIR}/odbcinst/SQLRemoveDSNFromIni.c -${ODBC_SOURCE_DIR}/odbcinst/SQLRemoveTranslator.c -${ODBC_SOURCE_DIR}/odbcinst/SQLSetConfigMode.c -${ODBC_SOURCE_DIR}/odbcinst/SQLValidDSN.c -${ODBC_SOURCE_DIR}/odbcinst/SQLWriteDSNToIni.c -${ODBC_SOURCE_DIR}/odbcinst/SQLWriteFileDSN.c -${ODBC_SOURCE_DIR}/odbcinst/_SQLWriteInstalledDrivers.c -${ODBC_SOURCE_DIR}/odbcinst/SQLWritePrivateProfileString.c + set (SRCS + ${LIBRARY_DIR}/DriverManager/__attribute.c + ${LIBRARY_DIR}/DriverManager/__connection.c + ${LIBRARY_DIR}/DriverManager/__handles.c + ${LIBRARY_DIR}/DriverManager/__info.c + ${LIBRARY_DIR}/DriverManager/__stats.c + ${LIBRARY_DIR}/DriverManager/SQLAllocConnect.c + ${LIBRARY_DIR}/DriverManager/SQLAllocEnv.c + ${LIBRARY_DIR}/DriverManager/SQLAllocHandle.c + ${LIBRARY_DIR}/DriverManager/SQLAllocHandleStd.c + ${LIBRARY_DIR}/DriverManager/SQLAllocStmt.c + ${LIBRARY_DIR}/DriverManager/SQLBindCol.c + ${LIBRARY_DIR}/DriverManager/SQLBindParam.c + ${LIBRARY_DIR}/DriverManager/SQLBindParameter.c + ${LIBRARY_DIR}/DriverManager/SQLBrowseConnect.c + ${LIBRARY_DIR}/DriverManager/SQLBrowseConnectW.c + ${LIBRARY_DIR}/DriverManager/SQLBulkOperations.c + ${LIBRARY_DIR}/DriverManager/SQLCancel.c + ${LIBRARY_DIR}/DriverManager/SQLCancelHandle.c + ${LIBRARY_DIR}/DriverManager/SQLCloseCursor.c + ${LIBRARY_DIR}/DriverManager/SQLColAttribute.c + ${LIBRARY_DIR}/DriverManager/SQLColAttributes.c + ${LIBRARY_DIR}/DriverManager/SQLColAttributesW.c + ${LIBRARY_DIR}/DriverManager/SQLColAttributeW.c + ${LIBRARY_DIR}/DriverManager/SQLColumnPrivileges.c + ${LIBRARY_DIR}/DriverManager/SQLColumnPrivilegesW.c + ${LIBRARY_DIR}/DriverManager/SQLColumns.c + ${LIBRARY_DIR}/DriverManager/SQLColumnsW.c + ${LIBRARY_DIR}/DriverManager/SQLConnect.c + ${LIBRARY_DIR}/DriverManager/SQLConnectW.c + ${LIBRARY_DIR}/DriverManager/SQLCopyDesc.c + ${LIBRARY_DIR}/DriverManager/SQLDataSources.c + ${LIBRARY_DIR}/DriverManager/SQLDataSourcesW.c + ${LIBRARY_DIR}/DriverManager/SQLDescribeCol.c + ${LIBRARY_DIR}/DriverManager/SQLDescribeColW.c + ${LIBRARY_DIR}/DriverManager/SQLDescribeParam.c + ${LIBRARY_DIR}/DriverManager/SQLDisconnect.c + ${LIBRARY_DIR}/DriverManager/SQLDriverConnect.c + ${LIBRARY_DIR}/DriverManager/SQLDriverConnectW.c + ${LIBRARY_DIR}/DriverManager/SQLDrivers.c + ${LIBRARY_DIR}/DriverManager/SQLDriversW.c + ${LIBRARY_DIR}/DriverManager/SQLEndTran.c + ${LIBRARY_DIR}/DriverManager/SQLError.c + ${LIBRARY_DIR}/DriverManager/SQLErrorW.c + ${LIBRARY_DIR}/DriverManager/SQLExecDirect.c + ${LIBRARY_DIR}/DriverManager/SQLExecDirectW.c + ${LIBRARY_DIR}/DriverManager/SQLExecute.c + ${LIBRARY_DIR}/DriverManager/SQLExtendedFetch.c + ${LIBRARY_DIR}/DriverManager/SQLFetch.c + ${LIBRARY_DIR}/DriverManager/SQLFetchScroll.c + ${LIBRARY_DIR}/DriverManager/SQLForeignKeys.c + ${LIBRARY_DIR}/DriverManager/SQLForeignKeysW.c + ${LIBRARY_DIR}/DriverManager/SQLFreeConnect.c + ${LIBRARY_DIR}/DriverManager/SQLFreeEnv.c + ${LIBRARY_DIR}/DriverManager/SQLFreeHandle.c + ${LIBRARY_DIR}/DriverManager/SQLFreeStmt.c + ${LIBRARY_DIR}/DriverManager/SQLGetConnectAttr.c + ${LIBRARY_DIR}/DriverManager/SQLGetConnectAttrW.c + ${LIBRARY_DIR}/DriverManager/SQLGetConnectOption.c + ${LIBRARY_DIR}/DriverManager/SQLGetConnectOptionW.c + ${LIBRARY_DIR}/DriverManager/SQLGetCursorName.c + ${LIBRARY_DIR}/DriverManager/SQLGetCursorNameW.c + ${LIBRARY_DIR}/DriverManager/SQLGetData.c + ${LIBRARY_DIR}/DriverManager/SQLGetDescField.c + ${LIBRARY_DIR}/DriverManager/SQLGetDescFieldW.c + ${LIBRARY_DIR}/DriverManager/SQLGetDescRec.c + ${LIBRARY_DIR}/DriverManager/SQLGetDescRecW.c + ${LIBRARY_DIR}/DriverManager/SQLGetDiagField.c + ${LIBRARY_DIR}/DriverManager/SQLGetDiagFieldW.c + ${LIBRARY_DIR}/DriverManager/SQLGetDiagRec.c + ${LIBRARY_DIR}/DriverManager/SQLGetDiagRecW.c + ${LIBRARY_DIR}/DriverManager/SQLGetEnvAttr.c + ${LIBRARY_DIR}/DriverManager/SQLGetFunctions.c + ${LIBRARY_DIR}/DriverManager/SQLGetInfo.c + ${LIBRARY_DIR}/DriverManager/SQLGetInfoW.c + ${LIBRARY_DIR}/DriverManager/SQLGetStmtAttr.c + ${LIBRARY_DIR}/DriverManager/SQLGetStmtAttrW.c + ${LIBRARY_DIR}/DriverManager/SQLGetStmtOption.c + ${LIBRARY_DIR}/DriverManager/SQLGetTypeInfo.c + ${LIBRARY_DIR}/DriverManager/SQLGetTypeInfoW.c + ${LIBRARY_DIR}/DriverManager/SQLMoreResults.c + ${LIBRARY_DIR}/DriverManager/SQLNativeSql.c + ${LIBRARY_DIR}/DriverManager/SQLNativeSqlW.c + ${LIBRARY_DIR}/DriverManager/SQLNumParams.c + ${LIBRARY_DIR}/DriverManager/SQLNumResultCols.c + ${LIBRARY_DIR}/DriverManager/SQLParamData.c + ${LIBRARY_DIR}/DriverManager/SQLParamOptions.c + ${LIBRARY_DIR}/DriverManager/SQLPrepare.c + ${LIBRARY_DIR}/DriverManager/SQLPrepareW.c + ${LIBRARY_DIR}/DriverManager/SQLPrimaryKeys.c + ${LIBRARY_DIR}/DriverManager/SQLPrimaryKeysW.c + ${LIBRARY_DIR}/DriverManager/SQLProcedureColumns.c + ${LIBRARY_DIR}/DriverManager/SQLProcedureColumnsW.c + ${LIBRARY_DIR}/DriverManager/SQLProcedures.c + ${LIBRARY_DIR}/DriverManager/SQLProceduresW.c + ${LIBRARY_DIR}/DriverManager/SQLPutData.c + ${LIBRARY_DIR}/DriverManager/SQLRowCount.c + ${LIBRARY_DIR}/DriverManager/SQLSetConnectAttr.c + ${LIBRARY_DIR}/DriverManager/SQLSetConnectAttrW.c + ${LIBRARY_DIR}/DriverManager/SQLSetConnectOption.c + ${LIBRARY_DIR}/DriverManager/SQLSetConnectOptionW.c + ${LIBRARY_DIR}/DriverManager/SQLSetCursorName.c + ${LIBRARY_DIR}/DriverManager/SQLSetCursorNameW.c + ${LIBRARY_DIR}/DriverManager/SQLSetDescField.c + ${LIBRARY_DIR}/DriverManager/SQLSetDescFieldW.c + ${LIBRARY_DIR}/DriverManager/SQLSetDescRec.c + ${LIBRARY_DIR}/DriverManager/SQLSetEnvAttr.c + ${LIBRARY_DIR}/DriverManager/SQLSetParam.c + ${LIBRARY_DIR}/DriverManager/SQLSetPos.c + ${LIBRARY_DIR}/DriverManager/SQLSetScrollOptions.c + ${LIBRARY_DIR}/DriverManager/SQLSetStmtAttr.c + ${LIBRARY_DIR}/DriverManager/SQLSetStmtAttrW.c + ${LIBRARY_DIR}/DriverManager/SQLSetStmtOption.c + ${LIBRARY_DIR}/DriverManager/SQLSetStmtOptionW.c + ${LIBRARY_DIR}/DriverManager/SQLSpecialColumns.c + ${LIBRARY_DIR}/DriverManager/SQLSpecialColumnsW.c + ${LIBRARY_DIR}/DriverManager/SQLStatistics.c + ${LIBRARY_DIR}/DriverManager/SQLStatisticsW.c + ${LIBRARY_DIR}/DriverManager/SQLTablePrivileges.c + ${LIBRARY_DIR}/DriverManager/SQLTablePrivilegesW.c + ${LIBRARY_DIR}/DriverManager/SQLTables.c + ${LIBRARY_DIR}/DriverManager/SQLTablesW.c + ${LIBRARY_DIR}/DriverManager/SQLTransact.c + ${LIBRARY_DIR}/ini/_iniDump.c + ${LIBRARY_DIR}/ini/_iniObjectRead.c + ${LIBRARY_DIR}/ini/_iniPropertyRead.c + ${LIBRARY_DIR}/ini/_iniScanUntilObject.c + ${LIBRARY_DIR}/ini/iniAllTrim.c + ${LIBRARY_DIR}/ini/iniAppend.c + ${LIBRARY_DIR}/ini/iniClose.c + ${LIBRARY_DIR}/ini/iniCommit.c + ${LIBRARY_DIR}/ini/iniCursor.c + ${LIBRARY_DIR}/ini/iniDelete.c + ${LIBRARY_DIR}/ini/iniElement.c + ${LIBRARY_DIR}/ini/iniElementCount.c + ${LIBRARY_DIR}/ini/iniGetBookmark.c + ${LIBRARY_DIR}/ini/iniGotoBookmark.c + ${LIBRARY_DIR}/ini/iniObject.c + ${LIBRARY_DIR}/ini/iniObjectDelete.c + ${LIBRARY_DIR}/ini/iniObjectEOL.c + ${LIBRARY_DIR}/ini/iniObjectFirst.c + ${LIBRARY_DIR}/ini/iniObjectInsert.c + ${LIBRARY_DIR}/ini/iniObjectLast.c + ${LIBRARY_DIR}/ini/iniObjectNext.c + ${LIBRARY_DIR}/ini/iniObjectSeek.c + ${LIBRARY_DIR}/ini/iniObjectSeekSure.c + ${LIBRARY_DIR}/ini/iniObjectUpdate.c + ${LIBRARY_DIR}/ini/iniOpen.c + ${LIBRARY_DIR}/ini/iniProperty.c + ${LIBRARY_DIR}/ini/iniPropertyDelete.c + ${LIBRARY_DIR}/ini/iniPropertyEOL.c + ${LIBRARY_DIR}/ini/iniPropertyFirst.c + ${LIBRARY_DIR}/ini/iniPropertyInsert.c + ${LIBRARY_DIR}/ini/iniPropertyLast.c + ${LIBRARY_DIR}/ini/iniPropertyNext.c + ${LIBRARY_DIR}/ini/iniPropertySeek.c + ${LIBRARY_DIR}/ini/iniPropertySeekSure.c + ${LIBRARY_DIR}/ini/iniPropertyUpdate.c + ${LIBRARY_DIR}/ini/iniPropertyValue.c + ${LIBRARY_DIR}/ini/iniToUpper.c + ${LIBRARY_DIR}/ini/iniValue.c + ${LIBRARY_DIR}/log/_logFreeMsg.c + ${LIBRARY_DIR}/log/logClear.c + ${LIBRARY_DIR}/log/logClose.c + ${LIBRARY_DIR}/log/logOn.c + ${LIBRARY_DIR}/log/logOpen.c + ${LIBRARY_DIR}/log/logPeekMsg.c + ${LIBRARY_DIR}/log/logPopMsg.c + ${LIBRARY_DIR}/log/logPushMsg.c + ${LIBRARY_DIR}/lst/_lstAdjustCurrent.c + ${LIBRARY_DIR}/lst/_lstDump.c + ${LIBRARY_DIR}/lst/_lstFreeItem.c + ${LIBRARY_DIR}/lst/_lstNextValidItem.c + ${LIBRARY_DIR}/lst/_lstPrevValidItem.c + ${LIBRARY_DIR}/lst/_lstVisible.c + ${LIBRARY_DIR}/lst/lstAppend.c + ${LIBRARY_DIR}/lst/lstClose.c + ${LIBRARY_DIR}/lst/lstDelete.c + ${LIBRARY_DIR}/lst/lstEOL.c + ${LIBRARY_DIR}/lst/lstFirst.c + ${LIBRARY_DIR}/lst/lstGet.c + ${LIBRARY_DIR}/lst/lstGetBookMark.c + ${LIBRARY_DIR}/lst/lstGoto.c + ${LIBRARY_DIR}/lst/lstGotoBookMark.c + ${LIBRARY_DIR}/lst/lstInsert.c + ${LIBRARY_DIR}/lst/lstLast.c + ${LIBRARY_DIR}/lst/lstNext.c + ${LIBRARY_DIR}/lst/lstOpen.c + ${LIBRARY_DIR}/lst/lstOpenCursor.c + ${LIBRARY_DIR}/lst/lstPrev.c + ${LIBRARY_DIR}/lst/lstSeek.c + ${LIBRARY_DIR}/lst/lstSeekItem.c + ${LIBRARY_DIR}/lst/lstSet.c + ${LIBRARY_DIR}/lst/lstSetFreeFunc.c + ${LIBRARY_DIR}/odbcinst/_logging.c + ${LIBRARY_DIR}/odbcinst/_odbcinst_ConfigModeINI.c + ${LIBRARY_DIR}/odbcinst/_odbcinst_GetEntries.c + ${LIBRARY_DIR}/odbcinst/_odbcinst_GetSections.c + ${LIBRARY_DIR}/odbcinst/_odbcinst_SystemINI.c + ${LIBRARY_DIR}/odbcinst/_odbcinst_UserINI.c + ${LIBRARY_DIR}/odbcinst/_SQLDriverConnectPrompt.c + ${LIBRARY_DIR}/odbcinst/_SQLGetInstalledDrivers.c + ${LIBRARY_DIR}/odbcinst/_SQLWriteInstalledDrivers.c + ${LIBRARY_DIR}/odbcinst/ODBCINSTConstructProperties.c + ${LIBRARY_DIR}/odbcinst/ODBCINSTDestructProperties.c + ${LIBRARY_DIR}/odbcinst/ODBCINSTSetProperty.c + ${LIBRARY_DIR}/odbcinst/ODBCINSTValidateProperties.c + ${LIBRARY_DIR}/odbcinst/ODBCINSTValidateProperty.c + ${LIBRARY_DIR}/odbcinst/SQLConfigDataSource.c + ${LIBRARY_DIR}/odbcinst/SQLConfigDriver.c + ${LIBRARY_DIR}/odbcinst/SQLCreateDataSource.c + ${LIBRARY_DIR}/odbcinst/SQLGetAvailableDrivers.c + ${LIBRARY_DIR}/odbcinst/SQLGetConfigMode.c + ${LIBRARY_DIR}/odbcinst/SQLGetInstalledDrivers.c + ${LIBRARY_DIR}/odbcinst/SQLGetPrivateProfileString.c + ${LIBRARY_DIR}/odbcinst/SQLGetTranslator.c + ${LIBRARY_DIR}/odbcinst/SQLInstallDriverEx.c + ${LIBRARY_DIR}/odbcinst/SQLInstallDriverManager.c + ${LIBRARY_DIR}/odbcinst/SQLInstallerError.c + ${LIBRARY_DIR}/odbcinst/SQLInstallODBC.c + ${LIBRARY_DIR}/odbcinst/SQLInstallTranslatorEx.c + ${LIBRARY_DIR}/odbcinst/SQLManageDataSources.c + ${LIBRARY_DIR}/odbcinst/SQLPostInstallerError.c + ${LIBRARY_DIR}/odbcinst/SQLReadFileDSN.c + ${LIBRARY_DIR}/odbcinst/SQLRemoveDriver.c + ${LIBRARY_DIR}/odbcinst/SQLRemoveDriverManager.c + ${LIBRARY_DIR}/odbcinst/SQLRemoveDSNFromIni.c + ${LIBRARY_DIR}/odbcinst/SQLRemoveTranslator.c + ${LIBRARY_DIR}/odbcinst/SQLSetConfigMode.c + ${LIBRARY_DIR}/odbcinst/SQLValidDSN.c + ${LIBRARY_DIR}/odbcinst/SQLWriteDSNToIni.c + ${LIBRARY_DIR}/odbcinst/SQLWriteFileDSN.c + ${LIBRARY_DIR}/odbcinst/SQLWritePrivateProfileString.c + ) -${ODBC_SOURCE_DIR}/ini/iniAllTrim.c -${ODBC_SOURCE_DIR}/ini/iniAppend.c -${ODBC_SOURCE_DIR}/ini/iniClose.c -${ODBC_SOURCE_DIR}/ini/iniCommit.c -${ODBC_SOURCE_DIR}/ini/iniCursor.c -${ODBC_SOURCE_DIR}/ini/iniDelete.c -${ODBC_SOURCE_DIR}/ini/_iniDump.c -${ODBC_SOURCE_DIR}/ini/iniElement.c -${ODBC_SOURCE_DIR}/ini/iniElementCount.c -${ODBC_SOURCE_DIR}/ini/iniGetBookmark.c -${ODBC_SOURCE_DIR}/ini/iniGotoBookmark.c -${ODBC_SOURCE_DIR}/ini/iniObject.c -${ODBC_SOURCE_DIR}/ini/iniObjectDelete.c -${ODBC_SOURCE_DIR}/ini/iniObjectEOL.c -${ODBC_SOURCE_DIR}/ini/iniObjectFirst.c -${ODBC_SOURCE_DIR}/ini/iniObjectInsert.c -${ODBC_SOURCE_DIR}/ini/iniObjectLast.c -${ODBC_SOURCE_DIR}/ini/iniObjectNext.c -${ODBC_SOURCE_DIR}/ini/_iniObjectRead.c -${ODBC_SOURCE_DIR}/ini/iniObjectSeek.c -${ODBC_SOURCE_DIR}/ini/iniObjectSeekSure.c -${ODBC_SOURCE_DIR}/ini/iniObjectUpdate.c -${ODBC_SOURCE_DIR}/ini/iniOpen.c -${ODBC_SOURCE_DIR}/ini/iniProperty.c -${ODBC_SOURCE_DIR}/ini/iniPropertyDelete.c -${ODBC_SOURCE_DIR}/ini/iniPropertyEOL.c -${ODBC_SOURCE_DIR}/ini/iniPropertyFirst.c -${ODBC_SOURCE_DIR}/ini/iniPropertyInsert.c -${ODBC_SOURCE_DIR}/ini/iniPropertyLast.c -${ODBC_SOURCE_DIR}/ini/iniPropertyNext.c -${ODBC_SOURCE_DIR}/ini/_iniPropertyRead.c -${ODBC_SOURCE_DIR}/ini/iniPropertySeek.c -${ODBC_SOURCE_DIR}/ini/iniPropertySeekSure.c -${ODBC_SOURCE_DIR}/ini/iniPropertyUpdate.c -${ODBC_SOURCE_DIR}/ini/iniPropertyValue.c -${ODBC_SOURCE_DIR}/ini/_iniScanUntilObject.c -${ODBC_SOURCE_DIR}/ini/iniToUpper.c -${ODBC_SOURCE_DIR}/ini/iniValue.c + add_library (unixodbc ${SRCS}) -${ODBC_SOURCE_DIR}/log/logClear.c -${ODBC_SOURCE_DIR}/log/logClose.c -${ODBC_SOURCE_DIR}/log/_logFreeMsg.c -${ODBC_SOURCE_DIR}/log/logOn.c -${ODBC_SOURCE_DIR}/log/logOpen.c -${ODBC_SOURCE_DIR}/log/logPeekMsg.c -${ODBC_SOURCE_DIR}/log/logPopMsg.c -${ODBC_SOURCE_DIR}/log/logPushMsg.c + target_link_libraries (unixodbc PRIVATE ltdl) -${ODBC_SOURCE_DIR}/lst/_lstAdjustCurrent.c -${ODBC_SOURCE_DIR}/lst/lstAppend.c -${ODBC_SOURCE_DIR}/lst/lstClose.c -${ODBC_SOURCE_DIR}/lst/lstDelete.c -${ODBC_SOURCE_DIR}/lst/_lstDump.c -${ODBC_SOURCE_DIR}/lst/lstEOL.c -${ODBC_SOURCE_DIR}/lst/lstFirst.c -${ODBC_SOURCE_DIR}/lst/_lstFreeItem.c -${ODBC_SOURCE_DIR}/lst/lstGetBookMark.c -${ODBC_SOURCE_DIR}/lst/lstGet.c -${ODBC_SOURCE_DIR}/lst/lstGotoBookMark.c -${ODBC_SOURCE_DIR}/lst/lstGoto.c -${ODBC_SOURCE_DIR}/lst/lstInsert.c -${ODBC_SOURCE_DIR}/lst/lstLast.c -${ODBC_SOURCE_DIR}/lst/lstNext.c -${ODBC_SOURCE_DIR}/lst/_lstNextValidItem.c -${ODBC_SOURCE_DIR}/lst/lstOpen.c -${ODBC_SOURCE_DIR}/lst/lstOpenCursor.c -${ODBC_SOURCE_DIR}/lst/lstPrev.c -${ODBC_SOURCE_DIR}/lst/_lstPrevValidItem.c -${ODBC_SOURCE_DIR}/lst/lstSeek.c -${ODBC_SOURCE_DIR}/lst/lstSeekItem.c -${ODBC_SOURCE_DIR}/lst/lstSet.c -${ODBC_SOURCE_DIR}/lst/lstSetFreeFunc.c -${ODBC_SOURCE_DIR}/lst/_lstVisible.c -) + # SYSTEM_FILE_PATH was changed to /etc -add_library(unixodbc ${SRCS}) + target_include_directories (unixodbc + PRIVATE + linux_x86_64/private + PUBLIC + linux_x86_64 + ${LIBRARY_DIR}/include + ) + target_compile_definitions (unixodbc PRIVATE -DHAVE_CONFIG_H) + target_compile_options (unixodbc + PRIVATE + -Wno-dangling-else + -Wno-parentheses + -Wno-misleading-indentation + -Wno-unknown-warning-option + -Wno-reserved-id-macro + -O2 + ) + else () + add_library (unixodbc UNKNOWN IMPORTED) -target_link_libraries(unixodbc PRIVATE ltdl) + find_library (LIBRARY_ODBC unixodbc) + find_path (INCLUDE_ODBC sql.h) + set_target_properties (unixodbc PROPERTIES IMPORTED_LOCATION ${LIBRARY_ODBC}) + set_target_properties (unixodbc PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${INCLUDE_ODBC}) + endif () -# SYSTEM_FILE_PATH was changed to /etc + target_compile_definitions (unixodbc PUBLIC USE_ODBC=1) -target_include_directories(unixodbc SYSTEM PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64/private) -target_include_directories(unixodbc SYSTEM PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64) -target_include_directories(unixodbc SYSTEM PUBLIC ${ODBC_SOURCE_DIR}/include) + message (STATUS "Using unixodbc") +else () + add_library (unixodbc INTERFACE) + target_compile_definitions (unixodbc INTERFACE USE_ODBC=0) -target_compile_definitions(unixodbc PRIVATE -DHAVE_CONFIG_H) - -target_compile_options(unixodbc PRIVATE -Wno-dangling-else -Wno-parentheses -Wno-misleading-indentation -Wno-unknown-warning-option -O2) + message (STATUS "Not using unixodbc") +endif () diff --git a/contrib/zstd b/contrib/zstd index 255597502c3..10f0e6993f9 160000 --- a/contrib/zstd +++ b/contrib/zstd @@ -1 +1 @@ -Subproject commit 255597502c3a4ef150abc964e376d4202a8c2929 +Subproject commit 10f0e6993f9d2f682da6d04aa2385b7d53cbb4ee diff --git a/contrib/zstd-cmake/CMakeLists.txt b/contrib/zstd-cmake/CMakeLists.txt index 6d8ecce2a44..58a827761ea 100644 --- a/contrib/zstd-cmake/CMakeLists.txt +++ b/contrib/zstd-cmake/CMakeLists.txt @@ -49,7 +49,10 @@ FILE(READ ${LIBRARY_DIR}/zstd.h HEADER_CONTENT) GetLibraryVersion("${HEADER_CONTENT}" LIBVER_MAJOR LIBVER_MINOR LIBVER_RELEASE) MESSAGE(STATUS "ZSTD VERSION ${LIBVER_MAJOR}.${LIBVER_MINOR}.${LIBVER_RELEASE}") +# cd contrib/zstd/lib +# find . -name '*.c' | grep -vP 'deprecated|legacy' | sort | sed 's/^\./ ${LIBRARY_DIR}/' SET(Sources + ${LIBRARY_DIR}/common/debug.c ${LIBRARY_DIR}/common/entropy_common.c ${LIBRARY_DIR}/common/error_private.c ${LIBRARY_DIR}/common/fse_decompress.c @@ -58,8 +61,11 @@ SET(Sources ${LIBRARY_DIR}/common/xxhash.c ${LIBRARY_DIR}/common/zstd_common.c ${LIBRARY_DIR}/compress/fse_compress.c + ${LIBRARY_DIR}/compress/hist.c ${LIBRARY_DIR}/compress/huf_compress.c ${LIBRARY_DIR}/compress/zstd_compress.c + ${LIBRARY_DIR}/compress/zstd_compress_literals.c + ${LIBRARY_DIR}/compress/zstd_compress_sequences.c ${LIBRARY_DIR}/compress/zstd_double_fast.c ${LIBRARY_DIR}/compress/zstd_fast.c ${LIBRARY_DIR}/compress/zstd_lazy.c @@ -67,16 +73,21 @@ SET(Sources ${LIBRARY_DIR}/compress/zstdmt_compress.c ${LIBRARY_DIR}/compress/zstd_opt.c ${LIBRARY_DIR}/decompress/huf_decompress.c + ${LIBRARY_DIR}/decompress/zstd_ddict.c + ${LIBRARY_DIR}/decompress/zstd_decompress_block.c ${LIBRARY_DIR}/decompress/zstd_decompress.c - ${LIBRARY_DIR}/deprecated/zbuff_common.c - ${LIBRARY_DIR}/deprecated/zbuff_compress.c - ${LIBRARY_DIR}/deprecated/zbuff_decompress.c ${LIBRARY_DIR}/dictBuilder/cover.c ${LIBRARY_DIR}/dictBuilder/divsufsort.c + ${LIBRARY_DIR}/dictBuilder/fastcover.c ${LIBRARY_DIR}/dictBuilder/zdict.c) +# cd contrib/zstd/lib +# find . -name '*.h' | grep -vP 'deprecated|legacy' | sort | sed 's/^\./ ${LIBRARY_DIR}/' SET(Headers ${LIBRARY_DIR}/common/bitstream.h + ${LIBRARY_DIR}/common/compiler.h + ${LIBRARY_DIR}/common/cpu.h + ${LIBRARY_DIR}/common/debug.h ${LIBRARY_DIR}/common/error_private.h ${LIBRARY_DIR}/common/fse.h ${LIBRARY_DIR}/common/huf.h @@ -86,14 +97,21 @@ SET(Headers ${LIBRARY_DIR}/common/xxhash.h ${LIBRARY_DIR}/common/zstd_errors.h ${LIBRARY_DIR}/common/zstd_internal.h + ${LIBRARY_DIR}/compress/hist.h + ${LIBRARY_DIR}/compress/zstd_compress_internal.h + ${LIBRARY_DIR}/compress/zstd_compress_literals.h + ${LIBRARY_DIR}/compress/zstd_compress_sequences.h + ${LIBRARY_DIR}/compress/zstd_cwksp.h ${LIBRARY_DIR}/compress/zstd_double_fast.h ${LIBRARY_DIR}/compress/zstd_fast.h ${LIBRARY_DIR}/compress/zstd_lazy.h ${LIBRARY_DIR}/compress/zstd_ldm.h ${LIBRARY_DIR}/compress/zstdmt_compress.h ${LIBRARY_DIR}/compress/zstd_opt.h - ${LIBRARY_DIR}/compress/zstd_ldm.h - ${LIBRARY_DIR}/deprecated/zbuff.h + ${LIBRARY_DIR}/decompress/zstd_ddict.h + ${LIBRARY_DIR}/decompress/zstd_decompress_block.h + ${LIBRARY_DIR}/decompress/zstd_decompress_internal.h + ${LIBRARY_DIR}/dictBuilder/cover.h ${LIBRARY_DIR}/dictBuilder/divsufsort.h ${LIBRARY_DIR}/dictBuilder/zdict.h ${LIBRARY_DIR}/zstd.h) @@ -128,3 +146,4 @@ ENDIF (ZSTD_LEGACY_SUPPORT) ADD_LIBRARY(zstd ${Sources} ${Headers}) target_include_directories (zstd PUBLIC ${LIBRARY_DIR}) +target_compile_options(zstd PRIVATE -fno-sanitize=undefined) diff --git a/dbms/CMakeLists.txt b/dbms/CMakeLists.txt deleted file mode 100644 index 821cf3f6654..00000000000 --- a/dbms/CMakeLists.txt +++ /dev/null @@ -1,602 +0,0 @@ -set(ConfigIncludePath ${CMAKE_CURRENT_BINARY_DIR}/includes/configs CACHE INTERNAL "Path to generated configuration files.") -include_directories(${ConfigIncludePath}) - -if (USE_INCLUDE_WHAT_YOU_USE) - set (CMAKE_CXX_INCLUDE_WHAT_YOU_USE ${IWYU_PATH}) -endif () - -if (USE_CLANG_TIDY) - set (CMAKE_CXX_CLANG_TIDY "${DO_CLANG_TIDY}") -endif () - -if(COMPILER_PIPE) - set(MAX_COMPILER_MEMORY 2500) -else() - set(MAX_COMPILER_MEMORY 1500) -endif() -if(MAKE_STATIC_LIBRARIES) - set(MAX_LINKER_MEMORY 3500) -else() - set(MAX_LINKER_MEMORY 2500) -endif() -include(../cmake/limit_jobs.cmake) - -include(cmake/find_vectorclass.cmake) - -set (CONFIG_VERSION ${CMAKE_CURRENT_BINARY_DIR}/src/Common/config_version.h) -set (CONFIG_COMMON ${CMAKE_CURRENT_BINARY_DIR}/src/Common/config.h) - -include (cmake/version.cmake) -message (STATUS "Will build ${VERSION_FULL} revision ${VERSION_REVISION} ${VERSION_OFFICIAL}") -configure_file (src/Common/config.h.in ${CONFIG_COMMON}) -configure_file (src/Common/config_version.h.in ${CONFIG_VERSION}) -configure_file (src/Core/config_core.h.in ${CMAKE_CURRENT_BINARY_DIR}/src/Core/include/config_core.h) - -if (NOT MSVC) - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wextra") -endif () - -if (USE_DEBUG_HELPERS) - set (INCLUDE_DEBUG_HELPERS "-I${ClickHouse_SOURCE_DIR}/base -include ${ClickHouse_SOURCE_DIR}/dbms/src/Core/iostream_debug_helpers.h") - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${INCLUDE_DEBUG_HELPERS}") -endif () - -# Add some warnings that are not available even with -Wall -Wextra -Wpedantic. - -option (WEVERYTHING "Enables -Weverything option with some exceptions. This is intended for exploration of new compiler warnings that may be found to be useful. Only makes sense for clang." ON) - -if (COMPILER_CLANG) - add_warning(pedantic) - no_warning(gnu-anonymous-struct) - no_warning(nested-anon-types) - no_warning(vla-extension) - no_warning(zero-length-array) - - add_warning(comma) - add_warning(conditional-uninitialized) - add_warning(covered-switch-default) - add_warning(deprecated) - add_warning(embedded-directive) - add_warning(empty-init-stmt) # linux-only - add_warning(extra-semi-stmt) # linux-only - add_warning(extra-semi) - add_warning(gnu-case-range) - add_warning(inconsistent-missing-destructor-override) - add_warning(newline-eof) - add_warning(old-style-cast) - add_warning(range-loop-analysis) - add_warning(redundant-parens) - add_warning(reserved-id-macro) - add_warning(shadow-field) # clang 8+ - add_warning(shadow-uncaptured-local) - add_warning(shadow) - add_warning(string-plus-int) # clang 8+ - add_warning(undef) - add_warning(unreachable-code-return) - add_warning(unreachable-code) - add_warning(unused-exception-parameter) - add_warning(unused-macros) - add_warning(unused-member-function) - add_warning(zero-as-null-pointer-constant) - - if (WEVERYTHING) - add_warning(everything) - no_warning(c++98-compat-pedantic) - no_warning(c++98-compat) - no_warning(c99-extensions) - no_warning(conversion) - no_warning(ctad-maybe-unsupported) # clang 9+, linux-only - no_warning(deprecated-dynamic-exception-spec) - no_warning(disabled-macro-expansion) - no_warning(documentation-unknown-command) - no_warning(double-promotion) - no_warning(exit-time-destructors) - no_warning(float-equal) - no_warning(global-constructors) - no_warning(gnu-anonymous-struct) - no_warning(missing-prototypes) - no_warning(missing-variable-declarations) - no_warning(nested-anon-types) - no_warning(packed) - no_warning(padded) - no_warning(return-std-move-in-c++11) # clang 7+ - no_warning(shift-sign-overflow) - no_warning(sign-conversion) - no_warning(switch-enum) - no_warning(undefined-func-template) - no_warning(unused-template) - no_warning(vla-extension) - no_warning(vla) - no_warning(weak-template-vtables) - no_warning(weak-vtables) - no_warning(zero-length-array) - - # TODO Enable conversion, sign-conversion, double-promotion warnings. - endif () -elseif (COMPILER_GCC) - # Add compiler options only to c++ compiler - function(add_cxx_compile_options option) - add_compile_options("$<$,CXX>:${option}>") - endfunction() - # Warn about boolean expression compared with an integer value different from true/false - add_cxx_compile_options(-Wbool-compare) - # Warn whenever a pointer is cast such that the required alignment of the target is increased. - add_cxx_compile_options(-Wcast-align) - # Warn whenever a pointer is cast so as to remove a type qualifier from the target type. - add_cxx_compile_options(-Wcast-qual) - # Warn when deleting a pointer to incomplete type, which may cause undefined behavior at runtime - add_cxx_compile_options(-Wdelete-incomplete) - # Warn if a requested optimization pass is disabled. Code is too big or too complex - add_cxx_compile_options(-Wdisabled-optimization) - # Warn about duplicated conditions in an if-else-if chain - add_cxx_compile_options(-Wduplicated-cond) - # Warn about a comparison between values of different enumerated types - add_cxx_compile_options(-Wenum-compare) - # Warn about uninitialized variables that are initialized with themselves - add_cxx_compile_options(-Winit-self) - # Warn about logical not used on the left hand side operand of a comparison - add_cxx_compile_options(-Wlogical-not-parentheses) - # Warn about suspicious uses of logical operators in expressions - add_cxx_compile_options(-Wlogical-op) - # Warn if there exists a path from the function entry to a use of the variable that is uninitialized. - add_cxx_compile_options(-Wmaybe-uninitialized) - # Warn when the indentation of the code does not reflect the block structure - add_cxx_compile_options(-Wmisleading-indentation) - # Warn if a global function is defined without a previous declaration - disabled because of build times - # add_cxx_compile_options(-Wmissing-declarations) - # Warn if a user-supplied include directory does not exist - add_cxx_compile_options(-Wmissing-include-dirs) - # Obvious - add_cxx_compile_options(-Wnon-virtual-dtor) - # Obvious - add_cxx_compile_options(-Wno-return-local-addr) - # This warning is disabled due to false positives if compiled with libc++: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90037 - #add_cxx_compile_options(-Wnull-dereference) - # Obvious - add_cxx_compile_options(-Wodr) - # Obvious - add_cxx_compile_options(-Wold-style-cast) - # Warn when a function declaration hides virtual functions from a base class - # add_cxx_compile_options(-Woverloaded-virtual) - # Warn about placement new expressions with undefined behavior - add_cxx_compile_options(-Wplacement-new=2) - # Warn about anything that depends on the “size of” a function type or of void - add_cxx_compile_options(-Wpointer-arith) - # Warn if anything is declared more than once in the same scope - add_cxx_compile_options(-Wredundant-decls) - # Member initialization reordering - add_cxx_compile_options(-Wreorder) - # Obvious - add_cxx_compile_options(-Wshadow) - # Warn if left shifting a negative value - add_cxx_compile_options(-Wshift-negative-value) - # Warn about a definition of an unsized deallocation function - add_cxx_compile_options(-Wsized-deallocation) - # Warn when the sizeof operator is applied to a parameter that is declared as an array in a function definition - add_cxx_compile_options(-Wsizeof-array-argument) - # Warn for suspicious length parameters to certain string and memory built-in functions if the argument uses sizeof - add_cxx_compile_options(-Wsizeof-pointer-memaccess) - - if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 9) - # Warn about overriding virtual functions that are not marked with the override keyword - add_cxx_compile_options(-Wsuggest-override) - endif () - - # Warn whenever a switch statement has an index of boolean type and the case values are outside the range of a boolean type - add_cxx_compile_options(-Wswitch-bool) - # Warn if a self-comparison always evaluates to true or false - add_cxx_compile_options(-Wtautological-compare) - # Warn about trampolines generated for pointers to nested functions - add_cxx_compile_options(-Wtrampolines) - # Obvious - add_cxx_compile_options(-Wunused) - # Warn if vector operation is not implemented via SIMD capabilities of the architecture - add_cxx_compile_options(-Wvector-operation-performance) -endif () - -if (COMPILER_GCC) - # If we leave this optimization enabled, gcc-7 replaces a pair of SSE intrinsics (16 byte load, store) with a call to memcpy. - # It leads to slow code. This is compiler bug. It looks like this: - # - # (gdb) bt - #0 memcpy (destination=0x7faa6e9f1638, source=0x7faa81d9e9a8, size=16) at ../libs/libmemcpy/memcpy.h:11 - #1 0x0000000005341c5f in _mm_storeu_si128 (__B=..., __P=) at /usr/lib/gcc/x86_64-linux-gnu/7/include/emmintrin.h:720 - #2 memcpySmallAllowReadWriteOverflow15Impl (n=, src=, dst=) at ../dbms/src/Common/memcpySmall.h:37 - - add_definitions ("-fno-tree-loop-distribute-patterns") -endif () - -add_subdirectory (src) - -set(dbms_headers) -set(dbms_sources) - -add_headers_and_sources(clickhouse_common_io src/Common) -add_headers_and_sources(clickhouse_common_io src/Common/HashTable) -add_headers_and_sources(clickhouse_common_io src/IO) -list (REMOVE_ITEM clickhouse_common_io_sources src/Common/malloc.cpp src/Common/new_delete.cpp) - -if(USE_RDKAFKA) - add_headers_and_sources(dbms src/Storages/Kafka) -endif() - - -list (APPEND clickhouse_common_io_sources ${CONFIG_BUILD}) -list (APPEND clickhouse_common_io_headers ${CONFIG_VERSION} ${CONFIG_COMMON}) - -list (APPEND dbms_sources src/Functions/IFunction.cpp src/Functions/FunctionFactory.cpp src/Functions/FunctionHelpers.cpp src/Functions/extractTimeZoneFromFunctionArguments.cpp) -list (APPEND dbms_headers src/Functions/IFunctionImpl.h src/Functions/FunctionFactory.h src/Functions/FunctionHelpers.h src/Functions/extractTimeZoneFromFunctionArguments.h) - -list (APPEND dbms_sources - src/AggregateFunctions/AggregateFunctionFactory.cpp - src/AggregateFunctions/AggregateFunctionCombinatorFactory.cpp - src/AggregateFunctions/AggregateFunctionState.cpp - src/AggregateFunctions/parseAggregateFunctionParameters.cpp) - -list (APPEND dbms_headers - src/AggregateFunctions/IAggregateFunction.h - src/AggregateFunctions/IAggregateFunctionCombinator.h - src/AggregateFunctions/AggregateFunctionFactory.h - src/AggregateFunctions/AggregateFunctionCombinatorFactory.h - src/AggregateFunctions/AggregateFunctionState.h - src/AggregateFunctions/FactoryHelpers.h - src/AggregateFunctions/parseAggregateFunctionParameters.h) - -list (APPEND dbms_sources src/TableFunctions/ITableFunction.cpp src/TableFunctions/TableFunctionFactory.cpp) -list (APPEND dbms_headers src/TableFunctions/ITableFunction.h src/TableFunctions/TableFunctionFactory.h) -list (APPEND dbms_sources src/Dictionaries/DictionaryFactory.cpp src/Dictionaries/DictionarySourceFactory.cpp src/Dictionaries/DictionaryStructure.cpp src/Dictionaries/getDictionaryConfigurationFromAST.cpp) -list (APPEND dbms_headers src/Dictionaries/DictionaryFactory.h src/Dictionaries/DictionarySourceFactory.h src/Dictionaries/DictionaryStructure.h src/Dictionaries/getDictionaryConfigurationFromAST.h) - -if (NOT ENABLE_SSL) - list (REMOVE_ITEM clickhouse_common_io_sources src/Common/OpenSSLHelpers.cpp) - list (REMOVE_ITEM clickhouse_common_io_headers src/Common/OpenSSLHelpers.h) -endif () - -add_library(clickhouse_common_io ${clickhouse_common_io_headers} ${clickhouse_common_io_sources}) - -add_library (clickhouse_malloc OBJECT src/Common/malloc.cpp) -set_source_files_properties(src/Common/malloc.cpp PROPERTIES COMPILE_FLAGS "-fno-builtin") - -add_library (clickhouse_new_delete STATIC src/Common/new_delete.cpp) -target_link_libraries (clickhouse_new_delete PRIVATE clickhouse_common_io) - -if (OS_FREEBSD) - target_compile_definitions (clickhouse_common_io PUBLIC CLOCK_MONOTONIC_COARSE=CLOCK_MONOTONIC_FAST) -endif () - -add_subdirectory(src/Common/ZooKeeper) -add_subdirectory(src/Common/Config) - -set (all_modules) -macro(add_object_library name common_path) - if (MAKE_STATIC_LIBRARIES OR NOT SPLIT_SHARED_LIBRARIES) - add_headers_and_sources(dbms ${common_path}) - else () - list (APPEND all_modules ${name}) - add_headers_and_sources(${name} ${common_path}) - add_library(${name} SHARED ${${name}_sources} ${${name}_headers}) - target_link_libraries (${name} PRIVATE -Wl,--unresolved-symbols=ignore-all) - endif () -endmacro() - -add_object_library(clickhouse_access src/Access) -add_object_library(clickhouse_core src/Core) -add_object_library(clickhouse_compression src/Compression) -add_object_library(clickhouse_datastreams src/DataStreams) -add_object_library(clickhouse_datatypes src/DataTypes) -add_object_library(clickhouse_databases src/Databases) -add_object_library(clickhouse_disks src/Disks) -add_object_library(clickhouse_interpreters src/Interpreters) -add_object_library(clickhouse_interpreters_clusterproxy src/Interpreters/ClusterProxy) -add_object_library(clickhouse_columns src/Columns) -add_object_library(clickhouse_storages src/Storages) -add_object_library(clickhouse_storages_distributed src/Storages/Distributed) -add_object_library(clickhouse_storages_mergetree src/Storages/MergeTree) -add_object_library(clickhouse_storages_liveview src/Storages/LiveView) -add_object_library(clickhouse_client src/Client) -add_object_library(clickhouse_formats src/Formats) -add_object_library(clickhouse_processors src/Processors) -add_object_library(clickhouse_processors_executors src/Processors/Executors) -add_object_library(clickhouse_processors_formats src/Processors/Formats) -add_object_library(clickhouse_processors_formats_impl src/Processors/Formats/Impl) -add_object_library(clickhouse_processors_transforms src/Processors/Transforms) -add_object_library(clickhouse_processors_sources src/Processors/Sources) - - -if (MAKE_STATIC_LIBRARIES OR NOT SPLIT_SHARED_LIBRARIES) - add_library (dbms STATIC ${dbms_headers} ${dbms_sources}) - set (all_modules dbms) -else() - add_library (dbms SHARED ${dbms_headers} ${dbms_sources}) - target_link_libraries (dbms PUBLIC ${all_modules}) - list (APPEND all_modules dbms) - # force all split libs to be linked - set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--no-as-needed") -endif () - -macro (dbms_target_include_directories) - foreach (module ${all_modules}) - target_include_directories (${module} ${ARGN}) - endforeach () -endmacro () - -macro (dbms_target_link_libraries) - foreach (module ${all_modules}) - target_link_libraries (${module} ${ARGN}) - endforeach () -endmacro () - -if (USE_EMBEDDED_COMPILER) - dbms_target_link_libraries (PRIVATE ${REQUIRED_LLVM_LIBRARIES}) - dbms_target_include_directories (SYSTEM BEFORE PUBLIC ${LLVM_INCLUDE_DIRS}) -endif () - -if (CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE" OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" OR CMAKE_BUILD_TYPE_UC STREQUAL "MINSIZEREL") - # Won't generate debug info for files with heavy template instantiation to achieve faster linking and lower size. - set_source_files_properties( - src/Dictionaries/FlatDictionary.cpp - src/Dictionaries/HashedDictionary.cpp - src/Dictionaries/CacheDictionary.cpp - src/Dictionaries/TrieDictionary.cpp - src/Dictionaries/RangeHashedDictionary.cpp - src/Dictionaries/ComplexKeyHashedDictionary.cpp - src/Dictionaries/ComplexKeyCacheDictionary.cpp - src/Dictionaries/ComplexKeyCacheDictionary_generate1.cpp - src/Dictionaries/ComplexKeyCacheDictionary_generate2.cpp - src/Dictionaries/ComplexKeyCacheDictionary_generate3.cpp - src/Dictionaries/ODBCBlockInputStream.cpp - src/Dictionaries/HTTPDictionarySource.cpp - src/Dictionaries/LibraryDictionarySource.cpp - src/Dictionaries/ExecutableDictionarySource.cpp - src/Dictionaries/ClickHouseDictionarySource.cpp - PROPERTIES COMPILE_FLAGS -g0) -endif () - -# Otherwise it will slow down stack traces printing too much. -set_source_files_properties( - src/Common/Elf.cpp - src/Common/Dwarf.cpp - src/Common/SymbolIndex.cpp - PROPERTIES COMPILE_FLAGS "-O3 ${WITHOUT_COVERAGE}") - -target_link_libraries (clickhouse_common_io - PUBLIC - common - PRIVATE - string_utils - widechar_width - ${LINK_LIBRARIES_ONLY_ON_X86_64} - PUBLIC - ${DOUBLE_CONVERSION_LIBRARIES} - ryu - PUBLIC - ${Poco_Net_LIBRARY} - ${Poco_Util_LIBRARY} - ${Poco_Foundation_LIBRARY} - ${Poco_XML_LIBRARY} -) - -if(RE2_LIBRARY) - target_link_libraries(clickhouse_common_io PUBLIC ${RE2_LIBRARY}) -endif() -if(RE2_ST_LIBRARY) - target_link_libraries(clickhouse_common_io PUBLIC ${RE2_ST_LIBRARY}) -endif() - -target_link_libraries(clickhouse_common_io - PUBLIC - ${CITYHASH_LIBRARIES} - PRIVATE - ${Poco_XML_LIBRARY} - ${ZLIB_LIBRARIES} - ${EXECINFO_LIBRARIES} - PUBLIC - ${Boost_SYSTEM_LIBRARY} - ${Boost_PROGRAM_OPTIONS_LIBRARY} - PUBLIC - roaring -) - -if (USE_RDKAFKA) - dbms_target_link_libraries(PRIVATE ${CPPKAFKA_LIBRARY} ${RDKAFKA_LIBRARY}) - if(NOT USE_INTERNAL_RDKAFKA_LIBRARY) - dbms_target_include_directories(SYSTEM BEFORE PRIVATE ${RDKAFKA_INCLUDE_DIR}) - endif() -endif() - - -if(RE2_INCLUDE_DIR) - target_include_directories(clickhouse_common_io SYSTEM BEFORE PUBLIC ${RE2_INCLUDE_DIR}) -endif() - -if(CPUID_LIBRARY) - target_link_libraries(clickhouse_common_io PRIVATE ${CPUID_LIBRARY}) -endif() - -if(CPUINFO_LIBRARY) - target_link_libraries(clickhouse_common_io PRIVATE ${CPUINFO_LIBRARY}) -endif() - -dbms_target_link_libraries ( - PRIVATE - clickhouse_parsers - clickhouse_common_config - clickhouse_common_zookeeper - string_utils # FIXME: not sure if it's private - PUBLIC - clickhouse_common_io - PRIVATE - clickhouse_dictionaries_embedded - ${LZ4_LIBRARY} - PUBLIC - ${MYSQLXX_LIBRARY} - PRIVATE - ${BTRIE_LIBRARIES} - ${Boost_PROGRAM_OPTIONS_LIBRARY} - ${Boost_FILESYSTEM_LIBRARY} - PUBLIC - ${Boost_SYSTEM_LIBRARY} -) - -target_include_directories(clickhouse_common_io PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/src/Core/include) # uses some includes from core -dbms_target_include_directories(PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/src/Core/include) - -target_include_directories(clickhouse_common_io SYSTEM PUBLIC ${PCG_RANDOM_INCLUDE_DIR}) -dbms_target_include_directories(SYSTEM PUBLIC ${PCG_RANDOM_INCLUDE_DIR}) - -dbms_target_include_directories(SYSTEM BEFORE PUBLIC ${PDQSORT_INCLUDE_DIR}) - -if (NOT USE_INTERNAL_LZ4_LIBRARY AND LZ4_INCLUDE_DIR) - dbms_target_include_directories(SYSTEM BEFORE PRIVATE ${LZ4_INCLUDE_DIR}) -endif () - -if (ZSTD_LIBRARY) - dbms_target_link_libraries(PRIVATE ${ZSTD_LIBRARY}) - if (NOT USE_INTERNAL_ZSTD_LIBRARY AND ZSTD_INCLUDE_DIR) - dbms_target_include_directories(SYSTEM BEFORE PRIVATE ${ZSTD_INCLUDE_DIR}) - endif () -endif() - -if (NOT USE_INTERNAL_BOOST_LIBRARY) - target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${Boost_INCLUDE_DIRS}) -endif () - -if (Poco_SQL_FOUND AND NOT USE_INTERNAL_POCO_LIBRARY) - target_include_directories (clickhouse_common_io SYSTEM PRIVATE ${Poco_SQL_INCLUDE_DIR}) - dbms_target_include_directories (SYSTEM PRIVATE ${Poco_SQL_INCLUDE_DIR}) -endif() - -if (USE_POCO_SQLODBC) - target_link_libraries (clickhouse_common_io PRIVATE ${Poco_SQL_LIBRARY}) - dbms_target_link_libraries (PRIVATE ${Poco_SQLODBC_LIBRARY} ${Poco_SQL_LIBRARY}) - if (NOT USE_INTERNAL_POCO_LIBRARY) - target_include_directories (clickhouse_common_io SYSTEM PRIVATE ${ODBC_INCLUDE_DIRS} ${Poco_SQL_INCLUDE_DIR}) - dbms_target_include_directories (SYSTEM PRIVATE ${ODBC_INCLUDE_DIRS} ${Poco_SQLODBC_INCLUDE_DIR} SYSTEM PUBLIC ${Poco_SQL_INCLUDE_DIR}) - endif() -endif() - -if (Poco_Data_FOUND) - target_include_directories (clickhouse_common_io SYSTEM PRIVATE ${Poco_Data_INCLUDE_DIR}) - dbms_target_include_directories (SYSTEM PRIVATE ${Poco_Data_INCLUDE_DIR}) -endif() - -if (USE_POCO_DATAODBC) - target_link_libraries (clickhouse_common_io PRIVATE ${Poco_Data_LIBRARY}) - dbms_target_link_libraries (PRIVATE ${Poco_DataODBC_LIBRARY}) - if (NOT USE_INTERNAL_POCO_LIBRARY) - dbms_target_include_directories (SYSTEM PRIVATE ${ODBC_INCLUDE_DIRS} ${Poco_DataODBC_INCLUDE_DIR}) - endif() -endif() - -if (USE_POCO_MONGODB) - dbms_target_link_libraries (PRIVATE ${Poco_MongoDB_LIBRARY}) -endif() - -if (USE_POCO_REDIS) - dbms_target_link_libraries (PRIVATE ${Poco_Redis_LIBRARY}) -endif() - -if (USE_POCO_NETSSL) - target_link_libraries (clickhouse_common_io PRIVATE ${Poco_NetSSL_LIBRARY} ${Poco_Crypto_LIBRARY}) - dbms_target_link_libraries (PRIVATE ${Poco_NetSSL_LIBRARY} ${Poco_Crypto_LIBRARY}) -endif() - -if (USE_POCO_JSON) - dbms_target_link_libraries (PRIVATE ${Poco_JSON_LIBRARY}) -endif() - -dbms_target_link_libraries (PRIVATE ${Poco_Foundation_LIBRARY}) - -if (USE_ICU) - dbms_target_link_libraries (PRIVATE ${ICU_LIBRARIES}) - dbms_target_include_directories (SYSTEM PRIVATE ${ICU_INCLUDE_DIRS}) -endif () - -if (USE_CAPNP) - dbms_target_link_libraries (PRIVATE ${CAPNP_LIBRARIES}) -endif () - -if (USE_PARQUET) - dbms_target_link_libraries(PRIVATE ${PARQUET_LIBRARY}) - if (NOT USE_INTERNAL_PARQUET_LIBRARY OR USE_INTERNAL_PARQUET_LIBRARY_NATIVE_CMAKE) - dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${PARQUET_INCLUDE_DIR} ${ARROW_INCLUDE_DIR}) - endif () -endif () - -if (USE_AVRO) - dbms_target_link_libraries(PRIVATE ${AVROCPP_LIBRARY}) - dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${AVROCPP_INCLUDE_DIR}) -endif () - -if (OPENSSL_CRYPTO_LIBRARY) - dbms_target_link_libraries (PRIVATE ${OPENSSL_CRYPTO_LIBRARY}) - target_link_libraries (clickhouse_common_io PRIVATE ${OPENSSL_CRYPTO_LIBRARY}) -endif () - -dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${DIVIDE_INCLUDE_DIR}) -dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${SPARSEHASH_INCLUDE_DIR}) - -if (USE_PROTOBUF) - dbms_target_link_libraries (PRIVATE ${Protobuf_LIBRARY}) - dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${Protobuf_INCLUDE_DIR}) -endif () - -if (USE_HDFS) - target_link_libraries (clickhouse_common_io PUBLIC ${HDFS3_LIBRARY}) - target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${HDFS3_INCLUDE_DIR}) -endif() - -if (USE_AWS_S3) - target_link_libraries (clickhouse_common_io PUBLIC ${AWS_S3_LIBRARY}) - target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${AWS_S3_CORE_INCLUDE_DIR}) - target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${AWS_S3_INCLUDE_DIR}) -endif() - -if (USE_BROTLI) - target_link_libraries (clickhouse_common_io PRIVATE ${BROTLI_LIBRARY}) - target_include_directories (clickhouse_common_io SYSTEM BEFORE PRIVATE ${BROTLI_INCLUDE_DIR}) -endif() - -if (USE_JEMALLOC) - dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${JEMALLOC_INCLUDE_DIR}) # used in Interpreters/AsynchronousMetrics.cpp - target_include_directories (clickhouse_new_delete SYSTEM BEFORE PRIVATE ${JEMALLOC_INCLUDE_DIR}) - - if(NOT MAKE_STATIC_LIBRARIES AND ${JEMALLOC_LIBRARIES} MATCHES "${CMAKE_STATIC_LIBRARY_SUFFIX}$") - # mallctl in dbms/src/Interpreters/AsynchronousMetrics.cpp - # Actually we link JEMALLOC to almost all libraries. - # This is just hotfix for some uninvestigated problem. - target_link_libraries(clickhouse_interpreters PRIVATE ${JEMALLOC_LIBRARIES}) - endif() -endif () - -dbms_target_include_directories (PUBLIC ${DBMS_INCLUDE_DIR}) -target_include_directories (clickhouse_common_io PUBLIC ${DBMS_INCLUDE_DIR}) - -target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${DOUBLE_CONVERSION_INCLUDE_DIR}) - -add_subdirectory (programs) -add_subdirectory (tests) - -if (ENABLE_TESTS AND USE_GTEST) - macro (grep_gtest_sources BASE_DIR DST_VAR) - # Cold match files that are not in tests/ directories - file(GLOB_RECURSE "${DST_VAR}" RELATIVE "${BASE_DIR}" "gtest*.cpp") - endmacro() - - # attach all dbms gtest sources - grep_gtest_sources(${ClickHouse_SOURCE_DIR}/dbms dbms_gtest_sources) - add_executable(unit_tests_dbms ${dbms_gtest_sources}) - - # gtest framework has substandard code - target_compile_options(unit_tests_dbms PRIVATE - -Wno-zero-as-null-pointer-constant - -Wno-undef - -Wno-sign-compare - -Wno-used-but-marked-unused - -Wno-missing-noreturn - -Wno-gnu-zero-variadic-macro-arguments - ) - - target_link_libraries(unit_tests_dbms PRIVATE ${GTEST_BOTH_LIBRARIES} clickhouse_functions clickhouse_parsers dbms clickhouse_common_zookeeper string_utils) - add_check(unit_tests_dbms) -endif () diff --git a/dbms/cmake/find_vectorclass.cmake b/dbms/cmake/find_vectorclass.cmake deleted file mode 100644 index 021929a4090..00000000000 --- a/dbms/cmake/find_vectorclass.cmake +++ /dev/null @@ -1,15 +0,0 @@ - -option (ENABLE_VECTORCLASS "Faster math functions with vectorclass lib" OFF) - -if (ENABLE_VECTORCLASS) - - set (VECTORCLASS_INCLUDE_PATHS "${ClickHouse_SOURCE_DIR}/contrib/vectorclass" CACHE STRING "Path of vectorclass library") - find_path (VECTORCLASS_INCLUDE_DIR NAMES vectorf128.h PATHS ${VECTORCLASS_INCLUDE_PATHS}) - - if (VECTORCLASS_INCLUDE_DIR) - set (USE_VECTORCLASS 1) - endif () - - message (STATUS "Using vectorclass=${USE_VECTORCLASS}: ${VECTORCLASS_INCLUDE_DIR}") - -endif () diff --git a/dbms/programs/CMakeLists.txt b/dbms/programs/CMakeLists.txt deleted file mode 100644 index eea7a5f4fbe..00000000000 --- a/dbms/programs/CMakeLists.txt +++ /dev/null @@ -1,214 +0,0 @@ -# 'clickhouse' binary is a multi purpose tool, -# that contain multiple execution modes (client, server, etc.) -# each of them is built and linked as a separate library, defined below. - -option (ENABLE_CLICKHOUSE_ALL "Enable all tools" ON) -option (ENABLE_CLICKHOUSE_SERVER "Enable clickhouse-server" ${ENABLE_CLICKHOUSE_ALL}) -option (ENABLE_CLICKHOUSE_CLIENT "Enable clickhouse-client" ${ENABLE_CLICKHOUSE_ALL}) -option (ENABLE_CLICKHOUSE_LOCAL "Enable clickhouse-local" ${ENABLE_CLICKHOUSE_ALL}) -option (ENABLE_CLICKHOUSE_BENCHMARK "Enable clickhouse-benchmark" ${ENABLE_CLICKHOUSE_ALL}) -option (ENABLE_CLICKHOUSE_PERFORMANCE_TEST "Enable clickhouse-performance-test" ${ENABLE_CLICKHOUSE_ALL}) -option (ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG "Enable clickhouse-extract-from-config" ${ENABLE_CLICKHOUSE_ALL}) -option (ENABLE_CLICKHOUSE_COMPRESSOR "Enable clickhouse-compressor" ${ENABLE_CLICKHOUSE_ALL}) -option (ENABLE_CLICKHOUSE_COPIER "Enable clickhouse-copier" ${ENABLE_CLICKHOUSE_ALL}) -option (ENABLE_CLICKHOUSE_FORMAT "Enable clickhouse-format" ${ENABLE_CLICKHOUSE_ALL}) -option (ENABLE_CLICKHOUSE_OBFUSCATOR "Enable clickhouse-obfuscator" ${ENABLE_CLICKHOUSE_ALL}) -option (ENABLE_CLICKHOUSE_ODBC_BRIDGE "Enable clickhouse-odbc-bridge" ${ENABLE_CLICKHOUSE_ALL}) - -if(NOT (MAKE_STATIC_LIBRARIES OR SPLIT_SHARED_LIBRARIES)) - set(CLICKHOUSE_ONE_SHARED 1) -endif() - -configure_file (config_tools.h.in ${ConfigIncludePath}/config_tools.h) - - -macro(clickhouse_target_link_split_lib target name) - if(NOT CLICKHOUSE_ONE_SHARED) - target_link_libraries(${target} PRIVATE clickhouse-${name}-lib) - else() - target_link_libraries(${target} PRIVATE clickhouse-lib) - endif() -endmacro() - -macro(clickhouse_program_link_split_binary name) - clickhouse_target_link_split_lib(clickhouse-${name} ${name}) -endmacro() - -macro(clickhouse_program_add_library name) - string(TOUPPER ${name} name_uc) - string(REPLACE "-" "_" name_uc ${name_uc}) - - # Some dark magic - set(CLICKHOUSE_${name_uc}_SOURCES ${CLICKHOUSE_${name_uc}_SOURCES} PARENT_SCOPE) - set(CLICKHOUSE_${name_uc}_LINK ${CLICKHOUSE_${name_uc}_LINK} PARENT_SCOPE) - set(CLICKHOUSE_${name_uc}_INCLUDE ${CLICKHOUSE_${name_uc}_INCLUDE} PARENT_SCOPE) - - if(NOT CLICKHOUSE_ONE_SHARED) - add_library(clickhouse-${name}-lib ${CLICKHOUSE_${name_uc}_SOURCES}) - - set(_link ${CLICKHOUSE_${name_uc}_LINK}) # can't use ${} in if() - if(_link) - target_link_libraries(clickhouse-${name}-lib ${CLICKHOUSE_${name_uc}_LINK}) - endif() - - set(_include ${CLICKHOUSE_${name_uc}_INCLUDE}) # can't use ${} in if() - if (_include) - target_include_directories(clickhouse-${name}-lib ${CLICKHOUSE_${name_uc}_INCLUDE}) - endif() - endif() -endmacro() - -macro(clickhouse_program_add_executable name) - if(CLICKHOUSE_SPLIT_BINARY) - add_executable(clickhouse-${name} clickhouse-${name}.cpp) - clickhouse_program_link_split_binary(${name}) - install(TARGETS clickhouse-${name} ${CLICKHOUSE_ALL_TARGETS} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) - endif() -endmacro() - -macro(clickhouse_program_add name) - clickhouse_program_add_library(${name}) - clickhouse_program_add_executable(${name}) -endmacro() - - -add_subdirectory (server) -add_subdirectory (client) -add_subdirectory (local) -add_subdirectory (benchmark) -add_subdirectory (performance-test) -add_subdirectory (extract-from-config) -add_subdirectory (compressor) -add_subdirectory (copier) -add_subdirectory (format) -add_subdirectory (obfuscator) - -if (ENABLE_CLICKHOUSE_ODBC_BRIDGE) - add_subdirectory (odbc-bridge) -endif () - -if (CLICKHOUSE_ONE_SHARED) - add_library(clickhouse-lib SHARED ${CLICKHOUSE_SERVER_SOURCES} ${CLICKHOUSE_CLIENT_SOURCES} ${CLICKHOUSE_LOCAL_SOURCES} ${CLICKHOUSE_BENCHMARK_SOURCES} ${CLICKHOUSE_PERFORMANCE_TEST_SOURCES} ${CLICKHOUSE_COPIER_SOURCES} ${CLICKHOUSE_EXTRACT_FROM_CONFIG_SOURCES} ${CLICKHOUSE_COMPRESSOR_SOURCES} ${CLICKHOUSE_FORMAT_SOURCES} ${CLICKHOUSE_OBFUSCATOR_SOURCES} ${CLICKHOUSE_ODBC_BRIDGE_SOURCES}) - target_link_libraries(clickhouse-lib ${CLICKHOUSE_SERVER_LINK} ${CLICKHOUSE_CLIENT_LINK} ${CLICKHOUSE_LOCAL_LINK} ${CLICKHOUSE_BENCHMARK_LINK} ${CLICKHOUSE_PERFORMANCE_TEST_LINK} ${CLICKHOUSE_COPIER_LINK} ${CLICKHOUSE_EXTRACT_FROM_CONFIG_LINK} ${CLICKHOUSE_COMPRESSOR_LINK} ${CLICKHOUSE_FORMAT_LINK} ${CLICKHOUSE_OBFUSCATOR_LINK} ${CLICKHOUSE_ODBC_BRIDGE_LINK}) - target_include_directories(clickhouse-lib ${CLICKHOUSE_SERVER_INCLUDE} ${CLICKHOUSE_CLIENT_INCLUDE} ${CLICKHOUSE_LOCAL_INCLUDE} ${CLICKHOUSE_BENCHMARK_INCLUDE} ${CLICKHOUSE_PERFORMANCE_TEST_INCLUDE} ${CLICKHOUSE_COPIER_INCLUDE} ${CLICKHOUSE_EXTRACT_FROM_CONFIG_INCLUDE} ${CLICKHOUSE_COMPRESSOR_INCLUDE} ${CLICKHOUSE_FORMAT_INCLUDE} ${CLICKHOUSE_OBFUSCATOR_INCLUDE} ${CLICKHOUSE_ODBC_BRIDGE_INCLUDE}) - set_target_properties(clickhouse-lib PROPERTIES SOVERSION ${VERSION_MAJOR}.${VERSION_MINOR} VERSION ${VERSION_SO} OUTPUT_NAME clickhouse DEBUG_POSTFIX "") - install (TARGETS clickhouse-lib LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT clickhouse) -endif() - -if (CLICKHOUSE_SPLIT_BINARY) - set (CLICKHOUSE_ALL_TARGETS clickhouse-server clickhouse-client clickhouse-local clickhouse-benchmark clickhouse-performance-test - clickhouse-extract-from-config clickhouse-compressor clickhouse-format clickhouse-obfuscator clickhouse-copier) - - if (ENABLE_CLICKHOUSE_ODBC_BRIDGE) - list (APPEND CLICKHOUSE_ALL_TARGETS clickhouse-odbc-bridge) - endif () - - set_target_properties(${CLICKHOUSE_ALL_TARGETS} PROPERTIES RUNTIME_OUTPUT_DIRECTORY ..) - - add_custom_target (clickhouse-bundle ALL DEPENDS ${CLICKHOUSE_ALL_TARGETS}) - add_custom_target (clickhouse ALL DEPENDS clickhouse-bundle) - - install(PROGRAMS clickhouse-split-helper DESTINATION ${CMAKE_INSTALL_BINDIR} RENAME clickhouse COMPONENT clickhouse) -else () - add_executable (clickhouse main.cpp) - target_link_libraries (clickhouse PRIVATE clickhouse_common_io string_utils) - target_include_directories (clickhouse PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) - - if (ENABLE_CLICKHOUSE_SERVER) - clickhouse_target_link_split_lib(clickhouse server) - endif () - if (ENABLE_CLICKHOUSE_CLIENT) - clickhouse_target_link_split_lib(clickhouse client) - endif () - if (ENABLE_CLICKHOUSE_LOCAL) - clickhouse_target_link_split_lib(clickhouse local) - endif () - if (ENABLE_CLICKHOUSE_BENCHMARK) - clickhouse_target_link_split_lib(clickhouse benchmark) - endif () - if (ENABLE_CLICKHOUSE_PERFORMANCE_TEST) - clickhouse_target_link_split_lib(clickhouse performance-test) - endif () - if (ENABLE_CLICKHOUSE_COPIER) - clickhouse_target_link_split_lib(clickhouse copier) - endif () - if (ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG) - clickhouse_target_link_split_lib(clickhouse extract-from-config) - endif () - if (ENABLE_CLICKHOUSE_COMPRESSOR) - clickhouse_target_link_split_lib(clickhouse compressor) - endif () - if (ENABLE_CLICKHOUSE_FORMAT) - clickhouse_target_link_split_lib(clickhouse format) - endif () - if (ENABLE_CLICKHOUSE_OBFUSCATOR) - clickhouse_target_link_split_lib(clickhouse obfuscator) - endif () - - set (CLICKHOUSE_BUNDLE) - if (ENABLE_CLICKHOUSE_SERVER) - add_custom_target (clickhouse-server ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-server DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-server DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) - list(APPEND CLICKHOUSE_BUNDLE clickhouse-server) - endif () - if (ENABLE_CLICKHOUSE_CLIENT) - add_custom_target (clickhouse-client ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-client DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-client DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) - list(APPEND CLICKHOUSE_BUNDLE clickhouse-client) - endif () - if (ENABLE_CLICKHOUSE_LOCAL) - add_custom_target (clickhouse-local ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-local DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-local DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) - list(APPEND CLICKHOUSE_BUNDLE clickhouse-local) - endif () - if (ENABLE_CLICKHOUSE_BENCHMARK) - add_custom_target (clickhouse-benchmark ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-benchmark DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-benchmark DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) - list(APPEND CLICKHOUSE_BUNDLE clickhouse-benchmark) - endif () - if (ENABLE_CLICKHOUSE_PERFORMANCE_TEST) - add_custom_target (clickhouse-performance-test ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-performance-test DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-performance-test DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) - list(APPEND CLICKHOUSE_BUNDLE clickhouse-performance-test) - endif () - if (ENABLE_CLICKHOUSE_COPIER) - add_custom_target (clickhouse-copier ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-copier DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-copier DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) - list(APPEND CLICKHOUSE_BUNDLE clickhouse-copier) - endif () - if (ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG) - add_custom_target (clickhouse-extract-from-config ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-extract-from-config DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-extract-from-config DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) - list(APPEND CLICKHOUSE_BUNDLE clickhouse-extract-from-config) - endif () - if (ENABLE_CLICKHOUSE_COMPRESSOR) - add_custom_target (clickhouse-compressor ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-compressor DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-compressor DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) - list(APPEND CLICKHOUSE_BUNDLE clickhouse-compressor) - endif () - if (ENABLE_CLICKHOUSE_FORMAT) - add_custom_target (clickhouse-format ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-format DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-format DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) - list(APPEND CLICKHOUSE_BUNDLE clickhouse-format) - endif () - if (ENABLE_CLICKHOUSE_OBFUSCATOR) - add_custom_target (clickhouse-obfuscator ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-obfuscator DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-obfuscator DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) - list(APPEND CLICKHOUSE_BUNDLE clickhouse-obfuscator) - endif () - if(ENABLE_CLICKHOUSE_ODBC_BRIDGE) - list(APPEND CLICKHOUSE_BUNDLE clickhouse-odbc-bridge) - endif() - - install (TARGETS clickhouse RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) - - add_custom_target (clickhouse-bundle ALL DEPENDS ${CLICKHOUSE_BUNDLE}) - - if (USE_GDB_ADD_INDEX) - add_custom_command(TARGET clickhouse POST_BUILD COMMAND ${GDB_ADD_INDEX_EXE} clickhouse COMMENT "Adding .gdb-index to clickhouse" VERBATIM) - endif() -endif () - -if (TARGET clickhouse-server AND TARGET copy-headers) - add_dependencies(clickhouse-server copy-headers) -endif () diff --git a/dbms/programs/benchmark/CMakeLists.txt b/dbms/programs/benchmark/CMakeLists.txt deleted file mode 100644 index 58096985037..00000000000 --- a/dbms/programs/benchmark/CMakeLists.txt +++ /dev/null @@ -1,9 +0,0 @@ -set(CLICKHOUSE_BENCHMARK_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/Benchmark.cpp) -set(CLICKHOUSE_BENCHMARK_LINK PRIVATE dbms clickhouse_aggregate_functions clickhouse_common_config ${Boost_PROGRAM_OPTIONS_LIBRARY}) -set(CLICKHOUSE_BENCHMARK_INCLUDE SYSTEM PRIVATE ${PCG_RANDOM_INCLUDE_DIR}) - -clickhouse_program_add(benchmark) - -if(NOT CLICKHOUSE_ONE_SHARED) - target_link_libraries (clickhouse-benchmark-lib PRIVATE clickhouse-client-lib) -endif() diff --git a/dbms/programs/client/Suggest.cpp b/dbms/programs/client/Suggest.cpp deleted file mode 100644 index cc31eed53e3..00000000000 --- a/dbms/programs/client/Suggest.cpp +++ /dev/null @@ -1,161 +0,0 @@ -#include "Suggest.h" - -#include -#include - -namespace DB -{ -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; - extern const int UNKNOWN_PACKET_FROM_SERVER; -} - -void Suggest::load(const ConnectionParameters & connection_parameters, size_t suggestion_limit) -{ - loading_thread = std::thread([connection_parameters, suggestion_limit, this] - { - try - { - Connection connection( - connection_parameters.host, - connection_parameters.port, - connection_parameters.default_database, - connection_parameters.user, - connection_parameters.password, - "client", - connection_parameters.compression, - connection_parameters.security); - - loadImpl(connection, connection_parameters.timeouts, suggestion_limit); - } - catch (...) - { - std::cerr << "Cannot load data for command line suggestions: " << getCurrentExceptionMessage(false, true) << "\n"; - } - - /// Note that keyword suggestions are available even if we cannot load data from server. - - if (case_insensitive) - std::sort(words.begin(), words.end(), [](const std::string & str1, const std::string & str2) - { - return std::lexicographical_compare(begin(str1), end(str1), begin(str2), end(str2), [](const char char1, const char char2) - { - return std::tolower(char1) < std::tolower(char2); - }); - }); - else - std::sort(words.begin(), words.end()); - - ready = true; - }); -} - -Suggest::Suggest() -{ - /// Keywords may be not up to date with ClickHouse parser. - words = {"CREATE", "DATABASE", "IF", "NOT", "EXISTS", "TEMPORARY", "TABLE", "ON", "CLUSTER", "DEFAULT", - "MATERIALIZED", "ALIAS", "ENGINE", "AS", "VIEW", "POPULATE", "SETTINGS", "ATTACH", "DETACH", "DROP", - "RENAME", "TO", "ALTER", "ADD", "MODIFY", "CLEAR", "COLUMN", "AFTER", "COPY", "PROJECT", - "PRIMARY", "KEY", "CHECK", "PARTITION", "PART", "FREEZE", "FETCH", "FROM", "SHOW", "INTO", - "OUTFILE", "FORMAT", "TABLES", "DATABASES", "LIKE", "PROCESSLIST", "CASE", "WHEN", "THEN", "ELSE", - "END", "DESCRIBE", "DESC", "USE", "SET", "OPTIMIZE", "FINAL", "DEDUPLICATE", "INSERT", "VALUES", - "SELECT", "DISTINCT", "SAMPLE", "ARRAY", "JOIN", "GLOBAL", "LOCAL", "ANY", "ALL", "INNER", - "LEFT", "RIGHT", "FULL", "OUTER", "CROSS", "USING", "PREWHERE", "WHERE", "GROUP", "BY", - "WITH", "TOTALS", "HAVING", "ORDER", "COLLATE", "LIMIT", "UNION", "AND", "OR", "ASC", - "IN", "KILL", "QUERY", "SYNC", "ASYNC", "TEST", "BETWEEN", "TRUNCATE"}; -} - -void Suggest::loadImpl(Connection & connection, const ConnectionTimeouts & timeouts, size_t suggestion_limit) -{ - std::stringstream query; - query << "SELECT DISTINCT arrayJoin(extractAll(name, '[\\\\w_]{2,}')) AS res FROM (" - "SELECT name FROM system.functions" - " UNION ALL " - "SELECT name FROM system.table_engines" - " UNION ALL " - "SELECT name FROM system.formats" - " UNION ALL " - "SELECT name FROM system.table_functions" - " UNION ALL " - "SELECT name FROM system.data_type_families" - " UNION ALL " - "SELECT name FROM system.merge_tree_settings" - " UNION ALL " - "SELECT name FROM system.settings" - " UNION ALL " - "SELECT cluster FROM system.clusters" - " UNION ALL " - "SELECT concat(func.name, comb.name) FROM system.functions AS func CROSS JOIN system.aggregate_function_combinators AS comb WHERE is_aggregate"; - - /// The user may disable loading of databases, tables, columns by setting suggestion_limit to zero. - if (suggestion_limit > 0) - { - String limit_str = toString(suggestion_limit); - query << - " UNION ALL " - "SELECT name FROM system.databases LIMIT " << limit_str - << " UNION ALL " - "SELECT DISTINCT name FROM system.tables LIMIT " << limit_str - << " UNION ALL " - "SELECT DISTINCT name FROM system.columns LIMIT " << limit_str; - } - - query << ") WHERE notEmpty(res)"; - - fetch(connection, timeouts, query.str()); -} - -void Suggest::fetch(Connection & connection, const ConnectionTimeouts & timeouts, const std::string & query) -{ - connection.sendQuery(timeouts, query); - - while (true) - { - Packet packet = connection.receivePacket(); - switch (packet.type) - { - case Protocol::Server::Data: - fillWordsFromBlock(packet.block); - continue; - - case Protocol::Server::Progress: - continue; - case Protocol::Server::ProfileInfo: - continue; - case Protocol::Server::Totals: - continue; - case Protocol::Server::Extremes: - continue; - case Protocol::Server::Log: - continue; - - case Protocol::Server::Exception: - packet.exception->rethrow(); - return; - - case Protocol::Server::EndOfStream: - return; - - default: - throw Exception("Unknown packet from server", ErrorCodes::UNKNOWN_PACKET_FROM_SERVER); - } - } -} - -void Suggest::fillWordsFromBlock(const Block & block) -{ - if (!block) - return; - - if (block.columns() != 1) - throw Exception("Wrong number of columns received for query to read words for suggestion", ErrorCodes::LOGICAL_ERROR); - - const ColumnString & column = typeid_cast(*block.getByPosition(0).column); - - size_t rows = block.rows(); - for (size_t i = 0; i < rows; ++i) - words.emplace_back(column.getDataAt(i).toString()); -} - -} diff --git a/dbms/programs/client/readpassphrase/readpassphrase.c b/dbms/programs/client/readpassphrase/readpassphrase.c deleted file mode 100644 index 8c56877196c..00000000000 --- a/dbms/programs/client/readpassphrase/readpassphrase.c +++ /dev/null @@ -1,211 +0,0 @@ -/* $OpenBSD: readpassphrase.c,v 1.26 2016/10/18 12:47:18 millert Exp $ */ - -/* - * Copyright (c) 2000-2002, 2007, 2010 - * Todd C. Miller - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - * - * Sponsored in part by the Defense Advanced Research Projects - * Agency (DARPA) and Air Force Research Laboratory, Air Force - * Materiel Command, USAF, under agreement number F39502-99-1-0512. - */ - -/* OPENBSD ORIGINAL: lib/libc/gen/readpassphrase.c */ - -#include "includes.h" - -#ifndef HAVE_READPASSPHRASE - -#include -#include -#include -#include -#include -#include -#include -#include - -#ifndef TCSASOFT -/* If we don't have TCSASOFT define it so that ORing it it below is a no-op. */ -# define TCSASOFT 0 -#endif - -/* SunOS 4.x which lacks _POSIX_VDISABLE, but has VDISABLE */ -#if !defined(_POSIX_VDISABLE) && defined(VDISABLE) -# define _POSIX_VDISABLE VDISABLE -#endif - -static volatile sig_atomic_t signo[NSIG]; - -static void handler(int); - -char * -readpassphrase(const char *prompt, char *buf, size_t bufsiz, int flags) -{ - ssize_t nr; - int input, output, save_errno, i, need_restart; - char ch, *p, *end; - struct termios term, oterm; - struct sigaction sa, savealrm, saveint, savehup, savequit, saveterm; - struct sigaction savetstp, savettin, savettou, savepipe; - - /* I suppose we could alloc on demand in this case (XXX). */ - if (bufsiz == 0) { - errno = EINVAL; - return(NULL); - } - -restart: - for (i = 0; i < NSIG; i++) - signo[i] = 0; - nr = -1; - save_errno = 0; - need_restart = 0; - /* - * Read and write to /dev/tty if available. If not, read from - * stdin and write to stderr unless a tty is required. - */ - if ((flags & RPP_STDIN) || - (input = output = open(_PATH_TTY, O_RDWR)) == -1) { - if (flags & RPP_REQUIRE_TTY) { - errno = ENOTTY; - return(NULL); - } - input = STDIN_FILENO; - output = STDERR_FILENO; - } - - /* - * Turn off echo if possible. - * If we are using a tty but are not the foreground pgrp this will - * generate SIGTTOU, so do it *before* installing the signal handlers. - */ - if (input != STDIN_FILENO && tcgetattr(input, &oterm) == 0) { - memcpy(&term, &oterm, sizeof(term)); - if (!(flags & RPP_ECHO_ON)) - term.c_lflag &= ~(ECHO | ECHONL); -#ifdef VSTATUS - if (term.c_cc[VSTATUS] != _POSIX_VDISABLE) - term.c_cc[VSTATUS] = _POSIX_VDISABLE; -#endif - (void)tcsetattr(input, TCSAFLUSH|TCSASOFT, &term); - } else { - memset(&term, 0, sizeof(term)); - term.c_lflag |= ECHO; - memset(&oterm, 0, sizeof(oterm)); - oterm.c_lflag |= ECHO; - } - - /* - * Catch signals that would otherwise cause the user to end - * up with echo turned off in the shell. Don't worry about - * things like SIGXCPU and SIGVTALRM for now. - */ - sigemptyset(&sa.sa_mask); - sa.sa_flags = 0; /* don't restart system calls */ - sa.sa_handler = handler; - (void)sigaction(SIGALRM, &sa, &savealrm); - (void)sigaction(SIGHUP, &sa, &savehup); - (void)sigaction(SIGINT, &sa, &saveint); - (void)sigaction(SIGPIPE, &sa, &savepipe); - (void)sigaction(SIGQUIT, &sa, &savequit); - (void)sigaction(SIGTERM, &sa, &saveterm); - (void)sigaction(SIGTSTP, &sa, &savetstp); - (void)sigaction(SIGTTIN, &sa, &savettin); - (void)sigaction(SIGTTOU, &sa, &savettou); - - if (!(flags & RPP_STDIN)) - (void)write(output, prompt, strlen(prompt)); - end = buf + bufsiz - 1; - p = buf; - while ((nr = read(input, &ch, 1)) == 1 && ch != '\n' && ch != '\r') { - if (p < end) { - if ((flags & RPP_SEVENBIT)) - ch &= 0x7f; - if (isalpha((unsigned char)ch)) { - if ((flags & RPP_FORCELOWER)) - ch = (char)tolower((unsigned char)ch); - if ((flags & RPP_FORCEUPPER)) - ch = (char)toupper((unsigned char)ch); - } - *p++ = ch; - } - } - *p = '\0'; - save_errno = errno; - if (!(term.c_lflag & ECHO)) - (void)write(output, "\n", 1); - - /* Restore old terminal settings and signals. */ - if (memcmp(&term, &oterm, sizeof(term)) != 0) { - const int sigttou = signo[SIGTTOU]; - - /* Ignore SIGTTOU generated when we are not the fg pgrp. */ - while (tcsetattr(input, TCSAFLUSH|TCSASOFT, &oterm) == -1 && - errno == EINTR && !signo[SIGTTOU]) - continue; - signo[SIGTTOU] = sigttou; - } - (void)sigaction(SIGALRM, &savealrm, NULL); - (void)sigaction(SIGHUP, &savehup, NULL); - (void)sigaction(SIGINT, &saveint, NULL); - (void)sigaction(SIGQUIT, &savequit, NULL); - (void)sigaction(SIGPIPE, &savepipe, NULL); - (void)sigaction(SIGTERM, &saveterm, NULL); - (void)sigaction(SIGTSTP, &savetstp, NULL); - (void)sigaction(SIGTTIN, &savettin, NULL); - (void)sigaction(SIGTTOU, &savettou, NULL); - if (input != STDIN_FILENO) - (void)close(input); - - /* - * If we were interrupted by a signal, resend it to ourselves - * now that we have restored the signal handlers. - */ - for (i = 0; i < NSIG; i++) { - if (signo[i]) { - kill(getpid(), i); - switch (i) { - case SIGTSTP: - case SIGTTIN: - case SIGTTOU: - need_restart = 1; - } - } - } - if (need_restart) - goto restart; - - if (save_errno) - errno = save_errno; - return(nr == -1 ? NULL : buf); -} -//DEF_WEAK(readpassphrase); - -#if 0 -char * -getpass(const char *prompt) -{ - static char buf[_PASSWORD_LEN + 1]; - - return(readpassphrase(prompt, buf, sizeof(buf), RPP_ECHO_OFF)); -} -#endif - -static void handler(int s) -{ - - signo[s] = 1; -} -#endif /* HAVE_READPASSPHRASE */ diff --git a/dbms/programs/copier/CMakeLists.txt b/dbms/programs/copier/CMakeLists.txt deleted file mode 100644 index ff9ba2f250f..00000000000 --- a/dbms/programs/copier/CMakeLists.txt +++ /dev/null @@ -1,17 +0,0 @@ -set(CLICKHOUSE_COPIER_SOURCES - ${CMAKE_CURRENT_SOURCE_DIR}/ClusterCopierApp.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/ClusterCopier.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/Internals.cpp) - -set(CLICKHOUSE_COPIER_LINK PRIVATE - clickhouse_common_zookeeper - clickhouse_parsers - clickhouse_functions - clickhouse_table_functions - clickhouse_aggregate_functions - clickhouse_dictionaries - string_utils ${Poco_XML_LIBRARY} PUBLIC daemon) - -set(CLICKHOUSE_COPIER_INCLUDE SYSTEM PRIVATE ${PCG_RANDOM_INCLUDE_DIR}) - -clickhouse_program_add(copier) diff --git a/dbms/programs/copier/ClusterCopier.cpp b/dbms/programs/copier/ClusterCopier.cpp deleted file mode 100644 index 35c112da4d1..00000000000 --- a/dbms/programs/copier/ClusterCopier.cpp +++ /dev/null @@ -1,1388 +0,0 @@ -#include "ClusterCopier.h" - -#include "Internals.h" - -#include -#include - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int NOT_IMPLEMENTED; - extern const int LOGICAL_ERROR; - extern const int UNFINISHED; - extern const int BAD_ARGUMENTS; -} - - -void ClusterCopier::init() -{ - auto zookeeper = context.getZooKeeper(); - - task_description_watch_callback = [this] (const Coordination::WatchResponse & response) - { - if (response.error != Coordination::ZOK) - return; - UInt64 version = ++task_descprtion_version; - LOG_DEBUG(log, "Task description should be updated, local version " << version); - }; - - task_description_path = task_zookeeper_path + "/description"; - task_cluster = std::make_unique(task_zookeeper_path, working_database_name); - - reloadTaskDescription(); - task_cluster_initial_config = task_cluster_current_config; - - task_cluster->loadTasks(*task_cluster_initial_config); - context.setClustersConfig(task_cluster_initial_config, task_cluster->clusters_prefix); - - /// Set up shards and their priority - task_cluster->random_engine.seed(task_cluster->random_device()); - for (auto & task_table : task_cluster->table_tasks) - { - task_table.cluster_pull = context.getCluster(task_table.cluster_pull_name); - task_table.cluster_push = context.getCluster(task_table.cluster_push_name); - task_table.initShards(task_cluster->random_engine); - } - - LOG_DEBUG(log, "Will process " << task_cluster->table_tasks.size() << " table tasks"); - - /// Do not initialize tables, will make deferred initialization in process() - - zookeeper->createAncestors(getWorkersPathVersion() + "/"); - zookeeper->createAncestors(getWorkersPath() + "/"); -} - -template -decltype(auto) ClusterCopier::retry(T && func, UInt64 max_tries) -{ - std::exception_ptr exception; - - for (UInt64 try_number = 1; try_number <= max_tries; ++try_number) - { - try - { - return func(); - } - catch (...) - { - exception = std::current_exception(); - if (try_number < max_tries) - { - tryLogCurrentException(log, "Will retry"); - std::this_thread::sleep_for(default_sleep_time); - } - } - } - - std::rethrow_exception(exception); -} - - -void ClusterCopier::discoverShardPartitions(const ConnectionTimeouts & timeouts, const TaskShardPtr & task_shard) -{ - TaskTable & task_table = task_shard->task_table; - - LOG_INFO(log, "Discover partitions of shard " << task_shard->getDescription()); - - auto get_partitions = [&] () { return getShardPartitions(timeouts, *task_shard); }; - auto existing_partitions_names = retry(get_partitions, 60); - Strings filtered_partitions_names; - Strings missing_partitions; - - /// Check that user specified correct partition names - auto check_partition_format = [] (const DataTypePtr & type, const String & partition_text_quoted) - { - MutableColumnPtr column_dummy = type->createColumn(); - ReadBufferFromString rb(partition_text_quoted); - - try - { - type->deserializeAsTextQuoted(*column_dummy, rb, FormatSettings()); - } - catch (Exception & e) - { - throw Exception("Partition " + partition_text_quoted + " has incorrect format. " + e.displayText(), ErrorCodes::BAD_ARGUMENTS); - } - }; - - if (task_table.has_enabled_partitions) - { - /// Process partition in order specified by - for (const String & partition_name : task_table.enabled_partitions) - { - /// Check that user specified correct partition names - check_partition_format(task_shard->partition_key_column.type, partition_name); - - auto it = existing_partitions_names.find(partition_name); - - /// Do not process partition if it is not in enabled_partitions list - if (it == existing_partitions_names.end()) - { - missing_partitions.emplace_back(partition_name); - continue; - } - - filtered_partitions_names.emplace_back(*it); - } - - for (const String & partition_name : existing_partitions_names) - { - if (!task_table.enabled_partitions_set.count(partition_name)) - { - LOG_DEBUG(log, "Partition " << partition_name << " will not be processed, since it is not in " - << "enabled_partitions of " << task_table.table_id); - } - } - } - else - { - for (const String & partition_name : existing_partitions_names) - filtered_partitions_names.emplace_back(partition_name); - } - - for (const String & partition_name : filtered_partitions_names) - { - task_shard->partition_tasks.emplace(partition_name, ShardPartition(*task_shard, partition_name)); - task_shard->checked_partitions.emplace(partition_name, true); - } - - if (!missing_partitions.empty()) - { - std::stringstream ss; - for (const String & missing_partition : missing_partitions) - ss << " " << missing_partition; - - LOG_WARNING(log, "There are no " << missing_partitions.size() << " partitions from enabled_partitions in shard " - << task_shard->getDescription() << " :" << ss.str()); - } - - LOG_DEBUG(log, "Will copy " << task_shard->partition_tasks.size() << " partitions from shard " << task_shard->getDescription()); -} - -void ClusterCopier::discoverTablePartitions(const ConnectionTimeouts & timeouts, TaskTable & task_table, UInt64 num_threads) -{ - /// Fetch partitions list from a shard - { - ThreadPool thread_pool(num_threads ? num_threads : 2 * getNumberOfPhysicalCPUCores()); - - for (const TaskShardPtr & task_shard : task_table.all_shards) - thread_pool.scheduleOrThrowOnError([this, timeouts, task_shard]() { discoverShardPartitions(timeouts, task_shard); }); - - LOG_DEBUG(log, "Waiting for " << thread_pool.active() << " setup jobs"); - thread_pool.wait(); - } -} - -void ClusterCopier::uploadTaskDescription(const std::string & task_path, const std::string & task_file, const bool force) -{ - auto local_task_description_path = task_path + "/description"; - - String task_config_str; - { - ReadBufferFromFile in(task_file); - readStringUntilEOF(task_config_str, in); - } - if (task_config_str.empty()) - return; - - auto zookeeper = context.getZooKeeper(); - - zookeeper->createAncestors(local_task_description_path); - auto code = zookeeper->tryCreate(local_task_description_path, task_config_str, zkutil::CreateMode::Persistent); - if (code && force) - zookeeper->createOrUpdate(local_task_description_path, task_config_str, zkutil::CreateMode::Persistent); - - LOG_DEBUG(log, "Task description " << ((code && !force) ? "not " : "") << "uploaded to " << local_task_description_path << " with result " << code << " ("<< zookeeper->error2string(code) << ")"); -} - -void ClusterCopier::reloadTaskDescription() -{ - auto zookeeper = context.getZooKeeper(); - task_description_watch_zookeeper = zookeeper; - - String task_config_str; - Coordination::Stat stat; - int code; - - zookeeper->tryGetWatch(task_description_path, task_config_str, &stat, task_description_watch_callback, &code); - if (code) - throw Exception("Can't get description node " + task_description_path, ErrorCodes::BAD_ARGUMENTS); - - LOG_DEBUG(log, "Loading description, zxid=" << task_descprtion_current_stat.czxid); - auto config = getConfigurationFromXMLString(task_config_str); - - /// Setup settings - task_cluster->reloadSettings(*config); - context.getSettingsRef() = task_cluster->settings_common; - - task_cluster_current_config = config; - task_descprtion_current_stat = stat; -} - -void ClusterCopier::updateConfigIfNeeded() -{ - UInt64 version_to_update = task_descprtion_version; - bool is_outdated_version = task_descprtion_current_version != version_to_update; - bool is_expired_session = !task_description_watch_zookeeper || task_description_watch_zookeeper->expired(); - - if (!is_outdated_version && !is_expired_session) - return; - - LOG_DEBUG(log, "Updating task description"); - reloadTaskDescription(); - - task_descprtion_current_version = version_to_update; -} - -void ClusterCopier::process(const ConnectionTimeouts & timeouts) -{ - for (TaskTable & task_table : task_cluster->table_tasks) - { - LOG_INFO(log, "Process table task " << task_table.table_id << " with " - << task_table.all_shards.size() << " shards, " << task_table.local_shards.size() << " of them are local ones"); - - if (task_table.all_shards.empty()) - continue; - - /// Discover partitions of each shard and total set of partitions - if (!task_table.has_enabled_partitions) - { - /// If there are no specified enabled_partitions, we must discover them manually - discoverTablePartitions(timeouts, task_table); - - /// After partitions of each shard are initialized, initialize cluster partitions - for (const TaskShardPtr & task_shard : task_table.all_shards) - { - for (const auto & partition_elem : task_shard->partition_tasks) - { - const String & partition_name = partition_elem.first; - task_table.cluster_partitions.emplace(partition_name, ClusterPartition{}); - } - } - - for (auto & partition_elem : task_table.cluster_partitions) - { - const String & partition_name = partition_elem.first; - - for (const TaskShardPtr & task_shard : task_table.all_shards) - task_shard->checked_partitions.emplace(partition_name); - - task_table.ordered_partition_names.emplace_back(partition_name); - } - } - else - { - /// If enabled_partitions are specified, assume that each shard has all partitions - /// We will refine partition set of each shard in future - - for (const String & partition_name : task_table.enabled_partitions) - { - task_table.cluster_partitions.emplace(partition_name, ClusterPartition{}); - task_table.ordered_partition_names.emplace_back(partition_name); - } - } - - task_table.watch.restart(); - - /// Retry table processing - bool table_is_done = false; - for (UInt64 num_table_tries = 0; num_table_tries < max_table_tries; ++num_table_tries) - { - if (tryProcessTable(timeouts, task_table)) - { - table_is_done = true; - break; - } - } - - if (!table_is_done) - { - throw Exception("Too many tries to process table " + task_table.table_id + ". Abort remaining execution", - ErrorCodes::UNFINISHED); - } - } -} - -/// Protected section - -zkutil::EphemeralNodeHolder::Ptr ClusterCopier::createTaskWorkerNodeAndWaitIfNeed( - const zkutil::ZooKeeperPtr & zookeeper, - const String & description, - bool unprioritized) -{ - std::chrono::milliseconds current_sleep_time = default_sleep_time; - static constexpr std::chrono::milliseconds max_sleep_time(30000); // 30 sec - - if (unprioritized) - std::this_thread::sleep_for(current_sleep_time); - - String workers_version_path = getWorkersPathVersion(); - String workers_path = getWorkersPath(); - String current_worker_path = getCurrentWorkerNodePath(); - - UInt64 num_bad_version_errors = 0; - - while (true) - { - updateConfigIfNeeded(); - - Coordination::Stat stat; - zookeeper->get(workers_version_path, &stat); - auto version = stat.version; - zookeeper->get(workers_path, &stat); - - if (static_cast(stat.numChildren) >= task_cluster->max_workers) - { - LOG_DEBUG(log, "Too many workers (" << stat.numChildren << ", maximum " << task_cluster->max_workers << ")" - << ". Postpone processing " << description); - - if (unprioritized) - current_sleep_time = std::min(max_sleep_time, current_sleep_time + default_sleep_time); - - std::this_thread::sleep_for(current_sleep_time); - num_bad_version_errors = 0; - } - else - { - Coordination::Requests ops; - ops.emplace_back(zkutil::makeSetRequest(workers_version_path, description, version)); - ops.emplace_back(zkutil::makeCreateRequest(current_worker_path, description, zkutil::CreateMode::Ephemeral)); - Coordination::Responses responses; - auto code = zookeeper->tryMulti(ops, responses); - - if (code == Coordination::ZOK || code == Coordination::ZNODEEXISTS) - return std::make_shared(current_worker_path, *zookeeper, false, false, description); - - if (code == Coordination::ZBADVERSION) - { - ++num_bad_version_errors; - - /// Try to make fast retries - if (num_bad_version_errors > 3) - { - LOG_DEBUG(log, "A concurrent worker has just been added, will check free worker slots again"); - std::chrono::milliseconds random_sleep_time(std::uniform_int_distribution(1, 1000)(task_cluster->random_engine)); - std::this_thread::sleep_for(random_sleep_time); - num_bad_version_errors = 0; - } - } - else - throw Coordination::Exception(code); - } - } -} - -/** Checks that the whole partition of a table was copied. We should do it carefully due to dirty lock. - * State of some task could change during the processing. - * We have to ensure that all shards have the finished state and there is no dirty flag. - * Moreover, we have to check status twice and check zxid, because state can change during the checking. - */ -bool ClusterCopier::checkPartitionIsDone(const TaskTable & task_table, const String & partition_name, const TasksShard & shards_with_partition) -{ - LOG_DEBUG(log, "Check that all shards processed partition " << partition_name << " successfully"); - - auto zookeeper = context.getZooKeeper(); - - Strings status_paths; - for (auto & shard : shards_with_partition) - { - ShardPartition & task_shard_partition = shard->partition_tasks.find(partition_name)->second; - status_paths.emplace_back(task_shard_partition.getShardStatusPath()); - } - - std::vector zxid1, zxid2; - - try - { - std::vector get_futures; - for (const String & path : status_paths) - get_futures.emplace_back(zookeeper->asyncGet(path)); - - // Check that state is Finished and remember zxid - for (auto & future : get_futures) - { - auto res = future.get(); - - TaskStateWithOwner status = TaskStateWithOwner::fromString(res.data); - if (status.state != TaskState::Finished) - { - LOG_INFO(log, "The task " << res.data << " is being rewritten by " << status.owner << ". Partition will be rechecked"); - return false; - } - - zxid1.push_back(res.stat.pzxid); - } - - // Check that partition is not dirty - { - CleanStateClock clean_state_clock ( - zookeeper, - task_table.getPartitionIsDirtyPath(partition_name), - task_table.getPartitionIsCleanedPath(partition_name) - ); - Coordination::Stat stat; - LogicalClock task_start_clock; - if (zookeeper->exists(task_table.getPartitionTaskStatusPath(partition_name), &stat)) - task_start_clock = LogicalClock(stat.mzxid); - zookeeper->get(task_table.getPartitionTaskStatusPath(partition_name), &stat); - if (!clean_state_clock.is_clean() || task_start_clock <= clean_state_clock.discovery_zxid) - { - LOG_INFO(log, "Partition " << partition_name << " become dirty"); - return false; - } - } - - get_futures.clear(); - for (const String & path : status_paths) - get_futures.emplace_back(zookeeper->asyncGet(path)); - - // Remember zxid of states again - for (auto & future : get_futures) - { - auto res = future.get(); - zxid2.push_back(res.stat.pzxid); - } - } - catch (const Coordination::Exception & e) - { - LOG_INFO(log, "A ZooKeeper error occurred while checking partition " << partition_name - << ". Will recheck the partition. Error: " << e.displayText()); - return false; - } - - // If all task is finished and zxid is not changed then partition could not become dirty again - for (UInt64 shard_num = 0; shard_num < status_paths.size(); ++shard_num) - { - if (zxid1[shard_num] != zxid2[shard_num]) - { - LOG_INFO(log, "The task " << status_paths[shard_num] << " is being modified now. Partition will be rechecked"); - return false; - } - } - - LOG_INFO(log, "Partition " << partition_name << " is copied successfully"); - return true; -} - -ASTPtr ClusterCopier::removeAliasColumnsFromCreateQuery(const ASTPtr & query_ast) -{ - const ASTs & column_asts = query_ast->as().columns_list->columns->children; - auto new_columns = std::make_shared(); - - for (const ASTPtr & column_ast : column_asts) - { - const auto & column = column_ast->as(); - - if (!column.default_specifier.empty()) - { - ColumnDefaultKind kind = columnDefaultKindFromString(column.default_specifier); - if (kind == ColumnDefaultKind::Materialized || kind == ColumnDefaultKind::Alias) - continue; - } - - new_columns->children.emplace_back(column_ast->clone()); - } - - ASTPtr new_query_ast = query_ast->clone(); - auto & new_query = new_query_ast->as(); - - auto new_columns_list = std::make_shared(); - new_columns_list->set(new_columns_list->columns, new_columns); - if (auto indices = query_ast->as()->columns_list->indices) - new_columns_list->set(new_columns_list->indices, indices->clone()); - - new_query.replace(new_query.columns_list, new_columns_list); - - return new_query_ast; -} - -std::shared_ptr ClusterCopier::rewriteCreateQueryStorage(const ASTPtr & create_query_ast, const DatabaseAndTableName & new_table, const ASTPtr & new_storage_ast) -{ - const auto & create = create_query_ast->as(); - auto res = std::make_shared(create); - - if (create.storage == nullptr || new_storage_ast == nullptr) - throw Exception("Storage is not specified", ErrorCodes::LOGICAL_ERROR); - - res->database = new_table.first; - res->table = new_table.second; - - res->children.clear(); - res->set(res->columns_list, create.columns_list->clone()); - res->set(res->storage, new_storage_ast->clone()); - - return res; -} - - -bool ClusterCopier::tryDropPartition(ShardPartition & task_partition, const zkutil::ZooKeeperPtr & zookeeper, const CleanStateClock & clean_state_clock) -{ - if (is_safe_mode) - throw Exception("DROP PARTITION is prohibited in safe mode", ErrorCodes::NOT_IMPLEMENTED); - - TaskTable & task_table = task_partition.task_shard.task_table; - - const String current_shards_path = task_partition.getPartitionShardsPath(); - const String current_partition_active_workers_dir = task_partition.getPartitionActiveWorkersPath(); - const String is_dirty_flag_path = task_partition.getCommonPartitionIsDirtyPath(); - const String dirt_cleaner_path = is_dirty_flag_path + "/cleaner"; - const String is_dirt_cleaned_path = task_partition.getCommonPartitionIsCleanedPath(); - - zkutil::EphemeralNodeHolder::Ptr cleaner_holder; - try - { - cleaner_holder = zkutil::EphemeralNodeHolder::create(dirt_cleaner_path, *zookeeper, host_id); - } - catch (const Coordination::Exception & e) - { - if (e.code == Coordination::ZNODEEXISTS) - { - LOG_DEBUG(log, "Partition " << task_partition.name << " is cleaning now by somebody, sleep"); - std::this_thread::sleep_for(default_sleep_time); - return false; - } - - throw; - } - - Coordination::Stat stat; - if (zookeeper->exists(current_partition_active_workers_dir, &stat)) - { - if (stat.numChildren != 0) - { - LOG_DEBUG(log, "Partition " << task_partition.name << " contains " << stat.numChildren << " active workers while trying to drop it. Going to sleep."); - std::this_thread::sleep_for(default_sleep_time); - return false; - } - else - { - zookeeper->remove(current_partition_active_workers_dir); - } - } - - { - zkutil::EphemeralNodeHolder::Ptr active_workers_lock; - try - { - active_workers_lock = zkutil::EphemeralNodeHolder::create(current_partition_active_workers_dir, *zookeeper, host_id); - } - catch (const Coordination::Exception & e) - { - if (e.code == Coordination::ZNODEEXISTS) - { - LOG_DEBUG(log, "Partition " << task_partition.name << " is being filled now by somebody, sleep"); - return false; - } - - throw; - } - - // Lock the dirty flag - zookeeper->set(is_dirty_flag_path, host_id, clean_state_clock.discovery_version.value()); - zookeeper->tryRemove(task_partition.getPartitionCleanStartPath()); - CleanStateClock my_clock(zookeeper, is_dirty_flag_path, is_dirt_cleaned_path); - - /// Remove all status nodes - { - Strings children; - if (zookeeper->tryGetChildren(current_shards_path, children) == Coordination::ZOK) - for (const auto & child : children) - { - zookeeper->removeRecursive(current_shards_path + "/" + child); - } - } - - String query = "ALTER TABLE " + getQuotedTable(task_table.table_push); - query += " DROP PARTITION " + task_partition.name + ""; - - /// TODO: use this statement after servers will be updated up to 1.1.54310 - // query += " DROP PARTITION ID '" + task_partition.name + "'"; - - ClusterPtr & cluster_push = task_table.cluster_push; - Settings settings_push = task_cluster->settings_push; - - /// It is important, DROP PARTITION must be done synchronously - settings_push.replication_alter_partitions_sync = 2; - - LOG_DEBUG(log, "Execute distributed DROP PARTITION: " << query); - /// Limit number of max executing replicas to 1 - UInt64 num_shards = executeQueryOnCluster(cluster_push, query, nullptr, &settings_push, PoolMode::GET_ONE, 1); - - if (num_shards < cluster_push->getShardCount()) - { - LOG_INFO(log, "DROP PARTITION wasn't successfully executed on " << cluster_push->getShardCount() - num_shards << " shards"); - return false; - } - - /// Update the locking node - if (!my_clock.is_stale()) - { - zookeeper->set(is_dirty_flag_path, host_id, my_clock.discovery_version.value()); - if (my_clock.clean_state_version) - zookeeper->set(is_dirt_cleaned_path, host_id, my_clock.clean_state_version.value()); - else - zookeeper->create(is_dirt_cleaned_path, host_id, zkutil::CreateMode::Persistent); - } - else - { - LOG_DEBUG(log, "Clean state is altered when dropping the partition, cowardly bailing"); - /// clean state is stale - return false; - } - - LOG_INFO(log, "Partition " << task_partition.name << " was dropped on cluster " << task_table.cluster_push_name); - if (zookeeper->tryCreate(current_shards_path, host_id, zkutil::CreateMode::Persistent) == Coordination::ZNODEEXISTS) - zookeeper->set(current_shards_path, host_id); - } - - LOG_INFO(log, "Partition " << task_partition.name << " is safe for work now."); - return true; -} - -bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTable & task_table) -{ - /// An heuristic: if previous shard is already done, then check next one without sleeps due to max_workers constraint - bool previous_shard_is_instantly_finished = false; - - /// Process each partition that is present in cluster - for (const String & partition_name : task_table.ordered_partition_names) - { - if (!task_table.cluster_partitions.count(partition_name)) - throw Exception("There are no expected partition " + partition_name + ". It is a bug", ErrorCodes::LOGICAL_ERROR); - - ClusterPartition & cluster_partition = task_table.cluster_partitions[partition_name]; - - Stopwatch watch; - TasksShard expected_shards; - UInt64 num_failed_shards = 0; - - ++cluster_partition.total_tries; - - LOG_DEBUG(log, "Processing partition " << partition_name << " for the whole cluster"); - - /// Process each source shard having current partition and copy current partition - /// NOTE: shards are sorted by "distance" to current host - bool has_shard_to_process = false; - for (const TaskShardPtr & shard : task_table.all_shards) - { - /// Does shard have a node with current partition? - if (shard->partition_tasks.count(partition_name) == 0) - { - /// If not, did we check existence of that partition previously? - if (shard->checked_partitions.count(partition_name) == 0) - { - auto check_shard_has_partition = [&] () { return checkShardHasPartition(timeouts, *shard, partition_name); }; - bool has_partition = retry(check_shard_has_partition); - - shard->checked_partitions.emplace(partition_name); - - if (has_partition) - { - shard->partition_tasks.emplace(partition_name, ShardPartition(*shard, partition_name)); - LOG_DEBUG(log, "Discovered partition " << partition_name << " in shard " << shard->getDescription()); - } - else - { - LOG_DEBUG(log, "Found that shard " << shard->getDescription() << " does not contain current partition " << partition_name); - continue; - } - } - else - { - /// We have already checked that partition, but did not discover it - previous_shard_is_instantly_finished = true; - continue; - } - } - - auto it_shard_partition = shard->partition_tasks.find(partition_name); - if (it_shard_partition == shard->partition_tasks.end()) - throw Exception("There are no such partition in a shard. This is a bug.", ErrorCodes::LOGICAL_ERROR); - auto & partition = it_shard_partition->second; - - expected_shards.emplace_back(shard); - - /// Do not sleep if there is a sequence of already processed shards to increase startup - bool is_unprioritized_task = !previous_shard_is_instantly_finished && shard->priority.is_remote; - PartitionTaskStatus task_status = PartitionTaskStatus::Error; - bool was_error = false; - has_shard_to_process = true; - for (UInt64 try_num = 0; try_num < max_shard_partition_tries; ++try_num) - { - task_status = tryProcessPartitionTask(timeouts, partition, is_unprioritized_task); - - /// Exit if success - if (task_status == PartitionTaskStatus::Finished) - break; - - was_error = true; - - /// Skip if the task is being processed by someone - if (task_status == PartitionTaskStatus::Active) - break; - - /// Repeat on errors - std::this_thread::sleep_for(default_sleep_time); - } - - if (task_status == PartitionTaskStatus::Error) - ++num_failed_shards; - - previous_shard_is_instantly_finished = !was_error; - } - - cluster_partition.elapsed_time_seconds += watch.elapsedSeconds(); - - /// Check that whole cluster partition is done - /// Firstly check the number of failed partition tasks, then look into ZooKeeper and ensure that each partition is done - bool partition_is_done = num_failed_shards == 0; - try - { - partition_is_done = - !has_shard_to_process - || (partition_is_done && checkPartitionIsDone(task_table, partition_name, expected_shards)); - } - catch (...) - { - tryLogCurrentException(log); - partition_is_done = false; - } - - if (partition_is_done) - { - task_table.finished_cluster_partitions.emplace(partition_name); - - task_table.bytes_copied += cluster_partition.bytes_copied; - task_table.rows_copied += cluster_partition.rows_copied; - double elapsed = cluster_partition.elapsed_time_seconds; - - LOG_INFO(log, "It took " << std::fixed << std::setprecision(2) << elapsed << " seconds to copy partition " << partition_name - << ": " << formatReadableSizeWithDecimalSuffix(cluster_partition.bytes_copied) << " uncompressed bytes" - << ", " << formatReadableQuantity(cluster_partition.rows_copied) << " rows" - << " and " << cluster_partition.blocks_copied << " source blocks are copied"); - - if (cluster_partition.rows_copied) - { - LOG_INFO(log, "Average partition speed: " - << formatReadableSizeWithDecimalSuffix(cluster_partition.bytes_copied / elapsed) << " per second."); - } - - if (task_table.rows_copied) - { - LOG_INFO(log, "Average table " << task_table.table_id << " speed: " - << formatReadableSizeWithDecimalSuffix(task_table.bytes_copied / elapsed) << " per second."); - } - } - } - - UInt64 required_partitions = task_table.cluster_partitions.size(); - UInt64 finished_partitions = task_table.finished_cluster_partitions.size(); - bool table_is_done = finished_partitions >= required_partitions; - - if (!table_is_done) - { - LOG_INFO(log, "Table " + task_table.table_id + " is not processed yet." - << "Copied " << finished_partitions << " of " << required_partitions << ", will retry"); - } - - return table_is_done; -} - - -PartitionTaskStatus ClusterCopier::tryProcessPartitionTask(const ConnectionTimeouts & timeouts, ShardPartition & task_partition, bool is_unprioritized_task) -{ - PartitionTaskStatus res; - - try - { - res = processPartitionTaskImpl(timeouts, task_partition, is_unprioritized_task); - } - catch (...) - { - tryLogCurrentException(log, "An error occurred while processing partition " + task_partition.name); - res = PartitionTaskStatus::Error; - } - - /// At the end of each task check if the config is updated - try - { - updateConfigIfNeeded(); - } - catch (...) - { - tryLogCurrentException(log, "An error occurred while updating the config"); - } - - return res; -} - -PartitionTaskStatus ClusterCopier::processPartitionTaskImpl(const ConnectionTimeouts & timeouts, ShardPartition & task_partition, bool is_unprioritized_task) -{ - TaskShard & task_shard = task_partition.task_shard; - TaskTable & task_table = task_shard.task_table; - ClusterPartition & cluster_partition = task_table.getClusterPartition(task_partition.name); - - /// We need to update table definitions for each partition, it could be changed after ALTER - createShardInternalTables(timeouts, task_shard); - - auto zookeeper = context.getZooKeeper(); - - const String is_dirty_flag_path = task_partition.getCommonPartitionIsDirtyPath(); - const String is_dirt_cleaned_path = task_partition.getCommonPartitionIsCleanedPath(); - const String current_task_is_active_path = task_partition.getActiveWorkerPath(); - const String current_task_status_path = task_partition.getShardStatusPath(); - - /// Auxiliary functions: - - /// Creates is_dirty node to initialize DROP PARTITION - auto create_is_dirty_node = [&, this] (const CleanStateClock & clock) - { - if (clock.is_stale()) - LOG_DEBUG(log, "Clean state clock is stale while setting dirty flag, cowardly bailing"); - else if (!clock.is_clean()) - LOG_DEBUG(log, "Thank you, Captain Obvious"); - else if (clock.discovery_version) - { - LOG_DEBUG(log, "Updating clean state clock"); - zookeeper->set(is_dirty_flag_path, host_id, clock.discovery_version.value()); - } - else - { - LOG_DEBUG(log, "Creating clean state clock"); - zookeeper->create(is_dirty_flag_path, host_id, zkutil::CreateMode::Persistent); - } - }; - - /// Returns SELECT query filtering current partition and applying user filter - auto get_select_query = [&] (const DatabaseAndTableName & from_table, const String & fields, String limit = "") - { - String query; - query += "SELECT " + fields + " FROM " + getQuotedTable(from_table); - /// TODO: Bad, it is better to rewrite with ASTLiteral(partition_key_field) - query += " WHERE (" + queryToString(task_table.engine_push_partition_key_ast) + " = (" + task_partition.name + " AS partition_key))"; - if (!task_table.where_condition_str.empty()) - query += " AND (" + task_table.where_condition_str + ")"; - if (!limit.empty()) - query += " LIMIT " + limit; - - ParserQuery p_query(query.data() + query.size()); - return parseQuery(p_query, query, 0); - }; - - /// Load balancing - auto worker_node_holder = createTaskWorkerNodeAndWaitIfNeed(zookeeper, current_task_status_path, is_unprioritized_task); - - LOG_DEBUG(log, "Processing " << current_task_status_path); - - CleanStateClock clean_state_clock (zookeeper, is_dirty_flag_path, is_dirt_cleaned_path); - - LogicalClock task_start_clock; - { - Coordination::Stat stat; - if (zookeeper->exists(task_partition.getPartitionShardsPath(), &stat)) - task_start_clock = LogicalClock(stat.mzxid); - } - - /// Do not start if partition is dirty, try to clean it - if (clean_state_clock.is_clean() - && (!task_start_clock.hasHappened() || clean_state_clock.discovery_zxid <= task_start_clock)) - { - LOG_DEBUG(log, "Partition " << task_partition.name << " appears to be clean"); - zookeeper->createAncestors(current_task_status_path); - } - else - { - LOG_DEBUG(log, "Partition " << task_partition.name << " is dirty, try to drop it"); - - try - { - tryDropPartition(task_partition, zookeeper, clean_state_clock); - } - catch (...) - { - tryLogCurrentException(log, "An error occurred when clean partition"); - } - - return PartitionTaskStatus::Error; - } - - /// Create ephemeral node to mark that we are active and process the partition - zookeeper->createAncestors(current_task_is_active_path); - zkutil::EphemeralNodeHolderPtr partition_task_node_holder; - try - { - partition_task_node_holder = zkutil::EphemeralNodeHolder::create(current_task_is_active_path, *zookeeper, host_id); - } - catch (const Coordination::Exception & e) - { - if (e.code == Coordination::ZNODEEXISTS) - { - LOG_DEBUG(log, "Someone is already processing " << current_task_is_active_path); - return PartitionTaskStatus::Active; - } - - throw; - } - - /// Exit if task has been already processed; - /// create blocking node to signal cleaning up if it is abandoned - { - String status_data; - if (zookeeper->tryGet(current_task_status_path, status_data)) - { - TaskStateWithOwner status = TaskStateWithOwner::fromString(status_data); - if (status.state == TaskState::Finished) - { - LOG_DEBUG(log, "Task " << current_task_status_path << " has been successfully executed by " << status.owner); - return PartitionTaskStatus::Finished; - } - - // Task is abandoned, initialize DROP PARTITION - LOG_DEBUG(log, "Task " << current_task_status_path << " has not been successfully finished by " << status.owner << ". Partition will be dropped and refilled."); - - create_is_dirty_node(clean_state_clock); - return PartitionTaskStatus::Error; - } - } - - /// Check that destination partition is empty if we are first worker - /// NOTE: this check is incorrect if pull and push tables have different partition key! - String clean_start_status; - if (!zookeeper->tryGet(task_partition.getPartitionCleanStartPath(), clean_start_status) || clean_start_status != "ok") - { - zookeeper->createIfNotExists(task_partition.getPartitionCleanStartPath(), ""); - auto checker = zkutil::EphemeralNodeHolder::create(task_partition.getPartitionCleanStartPath() + "/checker", *zookeeper, host_id); - // Maybe we are the first worker - ASTPtr query_select_ast = get_select_query(task_shard.table_split_shard, "count()"); - UInt64 count; - { - Context local_context = context; - // Use pull (i.e. readonly) settings, but fetch data from destination servers - local_context.getSettingsRef() = task_cluster->settings_pull; - local_context.getSettingsRef().skip_unavailable_shards = true; - - Block block = getBlockWithAllStreamData(InterpreterFactory::get(query_select_ast, local_context)->execute().in); - count = (block) ? block.safeGetByPosition(0).column->getUInt(0) : 0; - } - - if (count != 0) - { - Coordination::Stat stat_shards; - zookeeper->get(task_partition.getPartitionShardsPath(), &stat_shards); - - /// NOTE: partition is still fresh if dirt discovery happens before cleaning - if (stat_shards.numChildren == 0) - { - LOG_WARNING(log, "There are no workers for partition " << task_partition.name - << ", but destination table contains " << count << " rows" - << ". Partition will be dropped and refilled."); - - create_is_dirty_node(clean_state_clock); - return PartitionTaskStatus::Error; - } - } - zookeeper->set(task_partition.getPartitionCleanStartPath(), "ok"); - } - /// At this point, we need to sync that the destination table is clean - /// before any actual work - - /// Try start processing, create node about it - { - String start_state = TaskStateWithOwner::getData(TaskState::Started, host_id); - CleanStateClock new_clean_state_clock (zookeeper, is_dirty_flag_path, is_dirt_cleaned_path); - if (clean_state_clock != new_clean_state_clock) - { - LOG_INFO(log, "Partition " << task_partition.name << " clean state changed, cowardly bailing"); - return PartitionTaskStatus::Error; - } - else if (!new_clean_state_clock.is_clean()) - { - LOG_INFO(log, "Partition " << task_partition.name << " is dirty and will be dropped and refilled"); - create_is_dirty_node(new_clean_state_clock); - return PartitionTaskStatus::Error; - } - zookeeper->create(current_task_status_path, start_state, zkutil::CreateMode::Persistent); - } - - /// Try create table (if not exists) on each shard - { - auto create_query_push_ast = rewriteCreateQueryStorage(task_shard.current_pull_table_create_query, task_table.table_push, task_table.engine_push_ast); - create_query_push_ast->as().if_not_exists = true; - String query = queryToString(create_query_push_ast); - - LOG_DEBUG(log, "Create destination tables. Query: " << query); - UInt64 shards = executeQueryOnCluster(task_table.cluster_push, query, create_query_push_ast, &task_cluster->settings_push, - PoolMode::GET_MANY); - LOG_DEBUG(log, "Destination tables " << getQuotedTable(task_table.table_push) << " have been created on " << shards - << " shards of " << task_table.cluster_push->getShardCount()); - } - - /// Do the copying - { - bool inject_fault = false; - if (copy_fault_probability > 0) - { - double value = std::uniform_real_distribution<>(0, 1)(task_table.task_cluster.random_engine); - inject_fault = value < copy_fault_probability; - } - - // Select all fields - ASTPtr query_select_ast = get_select_query(task_shard.table_read_shard, "*", inject_fault ? "1" : ""); - - LOG_DEBUG(log, "Executing SELECT query and pull from " << task_shard.getDescription() - << " : " << queryToString(query_select_ast)); - - ASTPtr query_insert_ast; - { - String query; - query += "INSERT INTO " + getQuotedTable(task_shard.table_split_shard) + " VALUES "; - - ParserQuery p_query(query.data() + query.size()); - query_insert_ast = parseQuery(p_query, query, 0); - - LOG_DEBUG(log, "Executing INSERT query: " << query); - } - - try - { - /// Custom INSERT SELECT implementation - Context context_select = context; - context_select.getSettingsRef() = task_cluster->settings_pull; - - Context context_insert = context; - context_insert.getSettingsRef() = task_cluster->settings_push; - - BlockInputStreamPtr input; - BlockOutputStreamPtr output; - { - BlockIO io_select = InterpreterFactory::get(query_select_ast, context_select)->execute(); - BlockIO io_insert = InterpreterFactory::get(query_insert_ast, context_insert)->execute(); - - input = io_select.in; - output = io_insert.out; - } - - /// Fail-fast optimization to abort copying when the current clean state expires - std::future future_is_dirty_checker; - - Stopwatch watch(CLOCK_MONOTONIC_COARSE); - constexpr UInt64 check_period_milliseconds = 500; - - /// Will asynchronously check that ZooKeeper connection and is_dirty flag appearing while copying data - auto cancel_check = [&] () - { - if (zookeeper->expired()) - throw Exception("ZooKeeper session is expired, cancel INSERT SELECT", ErrorCodes::UNFINISHED); - - if (!future_is_dirty_checker.valid()) - future_is_dirty_checker = zookeeper->asyncExists(is_dirty_flag_path); - - /// check_period_milliseconds should less than average insert time of single block - /// Otherwise, the insertion will slow a little bit - if (watch.elapsedMilliseconds() >= check_period_milliseconds) - { - Coordination::ExistsResponse status = future_is_dirty_checker.get(); - - if (status.error != Coordination::ZNONODE) - { - LogicalClock dirt_discovery_epoch (status.stat.mzxid); - if (dirt_discovery_epoch == clean_state_clock.discovery_zxid) - return false; - throw Exception("Partition is dirty, cancel INSERT SELECT", ErrorCodes::UNFINISHED); - } - } - - return false; - }; - - /// Update statistics - /// It is quite rough: bytes_copied don't take into account DROP PARTITION. - auto update_stats = [&cluster_partition] (const Block & block) - { - cluster_partition.bytes_copied += block.bytes(); - cluster_partition.rows_copied += block.rows(); - cluster_partition.blocks_copied += 1; - }; - - /// Main work is here - copyData(*input, *output, cancel_check, update_stats); - - // Just in case - if (future_is_dirty_checker.valid()) - future_is_dirty_checker.get(); - - if (inject_fault) - throw Exception("Copy fault injection is activated", ErrorCodes::UNFINISHED); - } - catch (...) - { - tryLogCurrentException(log, "An error occurred during copying, partition will be marked as dirty"); - return PartitionTaskStatus::Error; - } - } - - /// Finalize the processing, change state of current partition task (and also check is_dirty flag) - { - String state_finished = TaskStateWithOwner::getData(TaskState::Finished, host_id); - CleanStateClock new_clean_state_clock (zookeeper, is_dirty_flag_path, is_dirt_cleaned_path); - if (clean_state_clock != new_clean_state_clock) - { - LOG_INFO(log, "Partition " << task_partition.name << " clean state changed, cowardly bailing"); - return PartitionTaskStatus::Error; - } - else if (!new_clean_state_clock.is_clean()) - { - LOG_INFO(log, "Partition " << task_partition.name << " became dirty and will be dropped and refilled"); - create_is_dirty_node(new_clean_state_clock); - return PartitionTaskStatus::Error; - } - zookeeper->set(current_task_status_path, state_finished, 0); - } - - LOG_INFO(log, "Partition " << task_partition.name << " copied"); - return PartitionTaskStatus::Finished; -} - -void ClusterCopier::dropAndCreateLocalTable(const ASTPtr & create_ast) -{ - const auto & create = create_ast->as(); - dropLocalTableIfExists({create.database, create.table}); - - InterpreterCreateQuery interpreter(create_ast, context); - interpreter.execute(); -} - -void ClusterCopier::dropLocalTableIfExists(const DatabaseAndTableName & table_name) const -{ - auto drop_ast = std::make_shared(); - drop_ast->if_exists = true; - drop_ast->database = table_name.first; - drop_ast->table = table_name.second; - - InterpreterDropQuery interpreter(drop_ast, context); - interpreter.execute(); -} - -String ClusterCopier::getRemoteCreateTable(const DatabaseAndTableName & table, Connection & connection, const Settings * settings) -{ - String query = "SHOW CREATE TABLE " + getQuotedTable(table); - Block block = getBlockWithAllStreamData(std::make_shared( - connection, query, InterpreterShowCreateQuery::getSampleBlock(), context, settings)); - - return typeid_cast(*block.safeGetByPosition(0).column).getDataAt(0).toString(); -} - -ASTPtr ClusterCopier::getCreateTableForPullShard(const ConnectionTimeouts & timeouts, TaskShard & task_shard) -{ - /// Fetch and parse (possibly) new definition - auto connection_entry = task_shard.info.pool->get(timeouts, &task_cluster->settings_pull); - String create_query_pull_str = getRemoteCreateTable( - task_shard.task_table.table_pull, - *connection_entry, - &task_cluster->settings_pull); - - ParserCreateQuery parser_create_query; - return parseQuery(parser_create_query, create_query_pull_str, 0); -} - -void ClusterCopier::createShardInternalTables(const ConnectionTimeouts & timeouts, TaskShard & task_shard, bool create_split) -{ - TaskTable & task_table = task_shard.task_table; - - /// We need to update table definitions for each part, it could be changed after ALTER - task_shard.current_pull_table_create_query = getCreateTableForPullShard(timeouts, task_shard); - - /// Create local Distributed tables: - /// a table fetching data from current shard and a table inserting data to the whole destination cluster - String read_shard_prefix = ".read_shard_" + toString(task_shard.indexInCluster()) + "."; - String split_shard_prefix = ".split."; - task_shard.table_read_shard = DatabaseAndTableName(working_database_name, read_shard_prefix + task_table.table_id); - task_shard.table_split_shard = DatabaseAndTableName(working_database_name, split_shard_prefix + task_table.table_id); - - /// Create special cluster with single shard - String shard_read_cluster_name = read_shard_prefix + task_table.cluster_pull_name; - ClusterPtr cluster_pull_current_shard = task_table.cluster_pull->getClusterWithSingleShard(task_shard.indexInCluster()); - context.setCluster(shard_read_cluster_name, cluster_pull_current_shard); - - auto storage_shard_ast = createASTStorageDistributed(shard_read_cluster_name, task_table.table_pull.first, task_table.table_pull.second); - const auto & storage_split_ast = task_table.engine_split_ast; - - auto create_query_ast = removeAliasColumnsFromCreateQuery(task_shard.current_pull_table_create_query); - auto create_table_pull_ast = rewriteCreateQueryStorage(create_query_ast, task_shard.table_read_shard, storage_shard_ast); - auto create_table_split_ast = rewriteCreateQueryStorage(create_query_ast, task_shard.table_split_shard, storage_split_ast); - - dropAndCreateLocalTable(create_table_pull_ast); - - if (create_split) - dropAndCreateLocalTable(create_table_split_ast); -} - - -std::set ClusterCopier::getShardPartitions(const ConnectionTimeouts & timeouts, TaskShard & task_shard) -{ - createShardInternalTables(timeouts, task_shard, false); - - TaskTable & task_table = task_shard.task_table; - - String query; - { - WriteBufferFromOwnString wb; - wb << "SELECT DISTINCT " << queryToString(task_table.engine_push_partition_key_ast) << " AS partition FROM" - << " " << getQuotedTable(task_shard.table_read_shard) << " ORDER BY partition DESC"; - query = wb.str(); - } - - ParserQuery parser_query(query.data() + query.size()); - ASTPtr query_ast = parseQuery(parser_query, query, 0); - - LOG_DEBUG(log, "Computing destination partition set, executing query: " << query); - - Context local_context = context; - local_context.setSettings(task_cluster->settings_pull); - Block block = getBlockWithAllStreamData(InterpreterFactory::get(query_ast, local_context)->execute().in); - - std::set res; - if (block) - { - ColumnWithTypeAndName & column = block.getByPosition(0); - task_shard.partition_key_column = column; - - for (size_t i = 0; i < column.column->size(); ++i) - { - WriteBufferFromOwnString wb; - column.type->serializeAsTextQuoted(*column.column, i, wb, FormatSettings()); - res.emplace(wb.str()); - } - } - - LOG_DEBUG(log, "There are " << res.size() << " destination partitions in shard " << task_shard.getDescription()); - - return res; -} - -bool ClusterCopier::checkShardHasPartition(const ConnectionTimeouts & timeouts, TaskShard & task_shard, const String & partition_quoted_name) -{ - createShardInternalTables(timeouts, task_shard, false); - - TaskTable & task_table = task_shard.task_table; - - std::string query = "SELECT 1 FROM " + getQuotedTable(task_shard.table_read_shard) - + " WHERE (" + queryToString(task_table.engine_push_partition_key_ast) + " = (" + partition_quoted_name + " AS partition_key))"; - - if (!task_table.where_condition_str.empty()) - query += " AND (" + task_table.where_condition_str + ")"; - - query += " LIMIT 1"; - - LOG_DEBUG(log, "Checking shard " << task_shard.getDescription() << " for partition " - << partition_quoted_name << " existence, executing query: " << query); - - ParserQuery parser_query(query.data() + query.size()); - ASTPtr query_ast = parseQuery(parser_query, query, 0); - - Context local_context = context; - local_context.setSettings(task_cluster->settings_pull); - return InterpreterFactory::get(query_ast, local_context)->execute().in->read().rows() != 0; -} - - -UInt64 ClusterCopier::executeQueryOnCluster( - const ClusterPtr & cluster, - const String & query, - const ASTPtr & query_ast_, - const Settings * settings, - PoolMode pool_mode, - UInt64 max_successful_executions_per_shard) const -{ - auto num_shards = cluster->getShardsInfo().size(); - std::vector per_shard_num_successful_replicas(num_shards, 0); - - ASTPtr query_ast; - if (query_ast_ == nullptr) - { - ParserQuery p_query(query.data() + query.size()); - query_ast = parseQuery(p_query, query, 0); - } - else - query_ast = query_ast_; - - - /// We need to execute query on one replica at least - auto do_for_shard = [&] (UInt64 shard_index) - { - const Cluster::ShardInfo & shard = cluster->getShardsInfo().at(shard_index); - UInt64 & num_successful_executions = per_shard_num_successful_replicas.at(shard_index); - num_successful_executions = 0; - - auto increment_and_check_exit = [&] () - { - ++num_successful_executions; - return max_successful_executions_per_shard && num_successful_executions >= max_successful_executions_per_shard; - }; - - UInt64 num_replicas = cluster->getShardsAddresses().at(shard_index).size(); - UInt64 num_local_replicas = shard.getLocalNodeCount(); - UInt64 num_remote_replicas = num_replicas - num_local_replicas; - - /// In that case we don't have local replicas, but do it just in case - for (UInt64 i = 0; i < num_local_replicas; ++i) - { - auto interpreter = InterpreterFactory::get(query_ast, context); - interpreter->execute(); - - if (increment_and_check_exit()) - return; - } - - /// Will try to make as many as possible queries - if (shard.hasRemoteConnections()) - { - Settings current_settings = settings ? *settings : task_cluster->settings_common; - current_settings.max_parallel_replicas = num_remote_replicas ? num_remote_replicas : 1; - - auto timeouts = ConnectionTimeouts::getTCPTimeoutsWithFailover(current_settings).getSaturated(current_settings.max_execution_time); - auto connections = shard.pool->getMany(timeouts, ¤t_settings, pool_mode); - - for (auto & connection : connections) - { - if (connection.isNull()) - continue; - - try - { - /// CREATE TABLE and DROP PARTITION queries return empty block - RemoteBlockInputStream stream{*connection, query, Block{}, context, ¤t_settings}; - NullBlockOutputStream output{Block{}}; - copyData(stream, output); - - if (increment_and_check_exit()) - return; - } - catch (const Exception &) - { - LOG_INFO(log, getCurrentExceptionMessage(false, true)); - } - } - } - }; - - { - ThreadPool thread_pool(std::min(num_shards, getNumberOfPhysicalCPUCores())); - - for (UInt64 shard_index = 0; shard_index < num_shards; ++shard_index) - thread_pool.scheduleOrThrowOnError([=] { do_for_shard(shard_index); }); - - thread_pool.wait(); - } - - UInt64 successful_shards = 0; - for (UInt64 num_replicas : per_shard_num_successful_replicas) - successful_shards += (num_replicas > 0); - - return successful_shards; -} - -} diff --git a/dbms/programs/copier/ClusterCopier.h b/dbms/programs/copier/ClusterCopier.h deleted file mode 100644 index 90a003a2528..00000000000 --- a/dbms/programs/copier/ClusterCopier.h +++ /dev/null @@ -1,175 +0,0 @@ -#pragma once - -#include "Aliases.h" -#include "Internals.h" -#include "TaskCluster.h" -#include "TaskTableAndShard.h" -#include "ShardPartition.h" -#include "ZooKeeperStaff.h" - -namespace DB -{ - -class ClusterCopier -{ -public: - - ClusterCopier(const String & task_path_, - const String & host_id_, - const String & proxy_database_name_, - Context & context_) - : - task_zookeeper_path(task_path_), - host_id(host_id_), - working_database_name(proxy_database_name_), - context(context_), - log(&Poco::Logger::get("ClusterCopier")) {} - - void init(); - - template - decltype(auto) retry(T && func, UInt64 max_tries = 100); - - void discoverShardPartitions(const ConnectionTimeouts & timeouts, const TaskShardPtr & task_shard) ; - - /// Compute set of partitions, assume set of partitions aren't changed during the processing - void discoverTablePartitions(const ConnectionTimeouts & timeouts, TaskTable & task_table, UInt64 num_threads = 0); - - void uploadTaskDescription(const std::string & task_path, const std::string & task_file, const bool force); - - void reloadTaskDescription(); - - void updateConfigIfNeeded(); - - void process(const ConnectionTimeouts & timeouts); - - /// Disables DROP PARTITION commands that used to clear data after errors - void setSafeMode(bool is_safe_mode_ = true) - { - is_safe_mode = is_safe_mode_; - } - - void setCopyFaultProbability(double copy_fault_probability_) - { - copy_fault_probability = copy_fault_probability_; - } - - -protected: - - String getWorkersPath() const - { - return task_cluster->task_zookeeper_path + "/task_active_workers"; - } - - String getWorkersPathVersion() const - { - return getWorkersPath() + "_version"; - } - - String getCurrentWorkerNodePath() const - { - return getWorkersPath() + "/" + host_id; - } - - zkutil::EphemeralNodeHolder::Ptr createTaskWorkerNodeAndWaitIfNeed( - const zkutil::ZooKeeperPtr &zookeeper, - const String &description, - bool unprioritized); - - /** Checks that the whole partition of a table was copied. We should do it carefully due to dirty lock. - * State of some task could change during the processing. - * We have to ensure that all shards have the finished state and there is no dirty flag. - * Moreover, we have to check status twice and check zxid, because state can change during the checking. - */ - bool checkPartitionIsDone(const TaskTable & task_table, const String & partition_name, - const TasksShard & shards_with_partition); - - /// Removes MATERIALIZED and ALIAS columns from create table query - static ASTPtr removeAliasColumnsFromCreateQuery(const ASTPtr &query_ast); - - /// Replaces ENGINE and table name in a create query - std::shared_ptr - rewriteCreateQueryStorage(const ASTPtr & create_query_ast, const DatabaseAndTableName & new_table, - const ASTPtr & new_storage_ast); - - bool tryDropPartition(ShardPartition & task_partition, - const zkutil::ZooKeeperPtr & zookeeper, - const CleanStateClock & clean_state_clock); - - - static constexpr UInt64 max_table_tries = 1000; - static constexpr UInt64 max_shard_partition_tries = 600; - - bool tryProcessTable(const ConnectionTimeouts & timeouts, TaskTable & task_table); - - PartitionTaskStatus tryProcessPartitionTask(const ConnectionTimeouts & timeouts, - ShardPartition & task_partition, - bool is_unprioritized_task); - - PartitionTaskStatus processPartitionTaskImpl(const ConnectionTimeouts & timeouts, - ShardPartition & task_partition, - bool is_unprioritized_task); - - void dropAndCreateLocalTable(const ASTPtr & create_ast); - - void dropLocalTableIfExists (const DatabaseAndTableName & table_name) const; - - String getRemoteCreateTable(const DatabaseAndTableName & table, - Connection & connection, - const Settings * settings = nullptr); - - ASTPtr getCreateTableForPullShard(const ConnectionTimeouts & timeouts, - TaskShard & task_shard); - - void createShardInternalTables(const ConnectionTimeouts & timeouts, - TaskShard & task_shard, - bool create_split = true); - - std::set getShardPartitions(const ConnectionTimeouts & timeouts, - TaskShard & task_shard); - - bool checkShardHasPartition(const ConnectionTimeouts & timeouts, - TaskShard & task_shard, - const String & partition_quoted_name); - - /** Executes simple query (without output streams, for example DDL queries) on each shard of the cluster - * Returns number of shards for which at least one replica executed query successfully - */ - UInt64 executeQueryOnCluster( - const ClusterPtr & cluster, - const String & query, - const ASTPtr & query_ast_ = nullptr, - const Settings * settings = nullptr, - PoolMode pool_mode = PoolMode::GET_ALL, - UInt64 max_successful_executions_per_shard = 0) const; - -private: - String task_zookeeper_path; - String task_description_path; - String host_id; - String working_database_name; - - /// Auto update config stuff - UInt64 task_descprtion_current_version = 1; - std::atomic task_descprtion_version{1}; - Coordination::WatchCallback task_description_watch_callback; - /// ZooKeeper session used to set the callback - zkutil::ZooKeeperPtr task_description_watch_zookeeper; - - ConfigurationPtr task_cluster_initial_config; - ConfigurationPtr task_cluster_current_config; - Coordination::Stat task_descprtion_current_stat{}; - - std::unique_ptr task_cluster; - - bool is_safe_mode = false; - double copy_fault_probability = 0.0; - - Context & context; - Poco::Logger * log; - - std::chrono::milliseconds default_sleep_time{1000}; -}; - -} diff --git a/dbms/programs/copier/ClusterCopierApp.cpp b/dbms/programs/copier/ClusterCopierApp.cpp deleted file mode 100644 index 93b6ff6da50..00000000000 --- a/dbms/programs/copier/ClusterCopierApp.cpp +++ /dev/null @@ -1,172 +0,0 @@ -#include "ClusterCopierApp.h" - -namespace DB -{ - -/// ClusterCopierApp - -void ClusterCopierApp::initialize(Poco::Util::Application & self) -{ - is_help = config().has("help"); - if (is_help) - return; - - config_xml_path = config().getString("config-file"); - task_path = config().getString("task-path"); - log_level = config().getString("log-level", "trace"); - is_safe_mode = config().has("safe-mode"); - if (config().has("copy-fault-probability")) - copy_fault_probability = std::max(std::min(config().getDouble("copy-fault-probability"), 1.0), 0.0); - base_dir = (config().has("base-dir")) ? config().getString("base-dir") : Poco::Path::current(); - // process_id is '#_' - time_t timestamp = Poco::Timestamp().epochTime(); - auto curr_pid = Poco::Process::id(); - - process_id = std::to_string(DateLUT::instance().toNumYYYYMMDDhhmmss(timestamp)) + "_" + std::to_string(curr_pid); - host_id = escapeForFileName(getFQDNOrHostName()) + '#' + process_id; - process_path = Poco::Path(base_dir + "/clickhouse-copier_" + process_id).absolute().toString(); - Poco::File(process_path).createDirectories(); - - /// Override variables for BaseDaemon - if (config().has("log-level")) - config().setString("logger.level", config().getString("log-level")); - - if (config().has("base-dir") || !config().has("logger.log")) - config().setString("logger.log", process_path + "/log.log"); - - if (config().has("base-dir") || !config().has("logger.errorlog")) - config().setString("logger.errorlog", process_path + "/log.err.log"); - - Base::initialize(self); -} - - -void ClusterCopierApp::handleHelp(const std::string &, const std::string &) -{ - Poco::Util::HelpFormatter helpFormatter(options()); - helpFormatter.setCommand(commandName()); - helpFormatter.setHeader("Copies tables from one cluster to another"); - helpFormatter.setUsage("--config-file --task-path "); - helpFormatter.format(std::cerr); - - stopOptionsProcessing(); -} - - -void ClusterCopierApp::defineOptions(Poco::Util::OptionSet & options) -{ - Base::defineOptions(options); - - options.addOption(Poco::Util::Option("task-path", "", "path to task in ZooKeeper") - .argument("task-path").binding("task-path")); - options.addOption(Poco::Util::Option("task-file", "", "path to task file for uploading in ZooKeeper to task-path") - .argument("task-file").binding("task-file")); - options.addOption(Poco::Util::Option("task-upload-force", "", "Force upload task-file even node already exists") - .argument("task-upload-force").binding("task-upload-force")); - options.addOption(Poco::Util::Option("safe-mode", "", "disables ALTER DROP PARTITION in case of errors") - .binding("safe-mode")); - options.addOption(Poco::Util::Option("copy-fault-probability", "", "the copying fails with specified probability (used to test partition state recovering)") - .argument("copy-fault-probability").binding("copy-fault-probability")); - options.addOption(Poco::Util::Option("log-level", "", "sets log level") - .argument("log-level").binding("log-level")); - options.addOption(Poco::Util::Option("base-dir", "", "base directory for copiers, consecutive copier launches will populate /base-dir/launch_id/* directories") - .argument("base-dir").binding("base-dir")); - - using Me = std::decay_t; - options.addOption(Poco::Util::Option("help", "", "produce this help message").binding("help") - .callback(Poco::Util::OptionCallback(this, &Me::handleHelp))); -} - - -void ClusterCopierApp::mainImpl() -{ - StatusFile status_file(process_path + "/status"); - ThreadStatus thread_status; - - auto log = &logger(); - LOG_INFO(log, "Starting clickhouse-copier (" - << "id " << process_id << ", " - << "host_id " << host_id << ", " - << "path " << process_path << ", " - << "revision " << ClickHouseRevision::get() << ")"); - - auto context = std::make_unique(Context::createGlobal()); - context->makeGlobalContext(); - SCOPE_EXIT(context->shutdown()); - - context->setConfig(loaded_config.configuration); - context->setApplicationType(Context::ApplicationType::LOCAL); - context->setPath(process_path); - - registerFunctions(); - registerAggregateFunctions(); - registerTableFunctions(); - registerStorages(); - registerDictionaries(); - registerDisks(); - - static const std::string default_database = "_local"; - context->addDatabase(default_database, std::make_shared(default_database)); - context->setCurrentDatabase(default_database); - - /// Initialize query scope just in case. - CurrentThread::QueryScope query_scope(*context); - - auto copier = std::make_unique(task_path, host_id, default_database, *context); - copier->setSafeMode(is_safe_mode); - copier->setCopyFaultProbability(copy_fault_probability); - - auto task_file = config().getString("task-file", ""); - if (!task_file.empty()) - copier->uploadTaskDescription(task_path, task_file, config().getBool("task-upload-force", false)); - - copier->init(); - copier->process(ConnectionTimeouts::getTCPTimeoutsWithoutFailover(context->getSettingsRef())); - - /// Reset ZooKeeper before removing ClusterCopier. - /// Otherwise zookeeper watch can call callback which use already removed ClusterCopier object. - context->resetZooKeeper(); -} - - -int ClusterCopierApp::main(const std::vector &) -{ - if (is_help) - return 0; - - try - { - mainImpl(); - } - catch (...) - { - tryLogCurrentException(&Poco::Logger::root(), __PRETTY_FUNCTION__); - auto code = getCurrentExceptionCode(); - - return (code) ? code : -1; - } - - return 0; -} - - -} - -#pragma GCC diagnostic ignored "-Wunused-function" -#pragma GCC diagnostic ignored "-Wmissing-declarations" - -int mainEntryClickHouseClusterCopier(int argc, char ** argv) -{ - try - { - DB::ClusterCopierApp app; - return app.run(argc, argv); - } - catch (...) - { - std::cerr << DB::getCurrentExceptionMessage(true) << "\n"; - auto code = DB::getCurrentExceptionCode(); - - return (code) ? code : -1; - } -} diff --git a/dbms/programs/copier/Internals.cpp b/dbms/programs/copier/Internals.cpp deleted file mode 100644 index c7fb8405ec9..00000000000 --- a/dbms/programs/copier/Internals.cpp +++ /dev/null @@ -1,145 +0,0 @@ -#include "Internals.h" - -namespace DB -{ -namespace ErrorCodes -{ - extern const int BAD_ARGUMENTS; -} - -ConfigurationPtr getConfigurationFromXMLString(const std::string & xml_data) -{ - std::stringstream ss(xml_data); - Poco::XML::InputSource input_source{ss}; - return {new Poco::Util::XMLConfiguration{&input_source}}; -} - - -String getQuotedTable(const String & database, const String & table) -{ - if (database.empty()) - return backQuoteIfNeed(table); - - return backQuoteIfNeed(database) + "." + backQuoteIfNeed(table); -} - -String getQuotedTable(const DatabaseAndTableName & db_and_table) -{ - return getQuotedTable(db_and_table.first, db_and_table.second); -} - - -// Creates AST representing 'ENGINE = Distributed(cluster, db, table, [sharding_key]) -std::shared_ptr createASTStorageDistributed( - const String & cluster_name, const String & database, const String & table, - const ASTPtr & sharding_key_ast) -{ - auto args = std::make_shared(); - args->children.emplace_back(std::make_shared(cluster_name)); - args->children.emplace_back(std::make_shared(database)); - args->children.emplace_back(std::make_shared(table)); - if (sharding_key_ast) - args->children.emplace_back(sharding_key_ast); - - auto engine = std::make_shared(); - engine->name = "Distributed"; - engine->arguments = args; - - auto storage = std::make_shared(); - storage->set(storage->engine, engine); - - return storage; -} - - -BlockInputStreamPtr squashStreamIntoOneBlock(const BlockInputStreamPtr & stream) -{ - return std::make_shared( - stream, - std::numeric_limits::max(), - std::numeric_limits::max()); -} - -Block getBlockWithAllStreamData(const BlockInputStreamPtr & stream) -{ - return squashStreamIntoOneBlock(stream)->read(); -} - - -bool isExtendedDefinitionStorage(const ASTPtr & storage_ast) -{ - const auto & storage = storage_ast->as(); - return storage.partition_by || storage.order_by || storage.sample_by; -} - -ASTPtr extractPartitionKey(const ASTPtr & storage_ast) -{ - String storage_str = queryToString(storage_ast); - - const auto & storage = storage_ast->as(); - const auto & engine = storage.engine->as(); - - if (!endsWith(engine.name, "MergeTree")) - { - throw Exception( - "Unsupported engine was specified in " + storage_str + ", only *MergeTree engines are supported", - ErrorCodes::BAD_ARGUMENTS); - } - - if (isExtendedDefinitionStorage(storage_ast)) - { - if (storage.partition_by) - return storage.partition_by->clone(); - - static const char * all = "all"; - return std::make_shared(Field(all, strlen(all))); - } - else - { - bool is_replicated = startsWith(engine.name, "Replicated"); - size_t min_args = is_replicated ? 3 : 1; - - if (!engine.arguments) - throw Exception("Expected arguments in " + storage_str, ErrorCodes::BAD_ARGUMENTS); - - ASTPtr arguments_ast = engine.arguments->clone(); - ASTs & arguments = arguments_ast->children; - - if (arguments.size() < min_args) - throw Exception("Expected at least " + toString(min_args) + " arguments in " + storage_str, - ErrorCodes::BAD_ARGUMENTS); - - ASTPtr & month_arg = is_replicated ? arguments[2] : arguments[1]; - return makeASTFunction("toYYYYMM", month_arg->clone()); - } -} - -ShardPriority getReplicasPriority(const Cluster::Addresses & replicas, const std::string & local_hostname, UInt8 random) -{ - ShardPriority res; - - if (replicas.empty()) - return res; - - res.is_remote = 1; - for (auto & replica : replicas) - { - if (isLocalAddress(DNSResolver::instance().resolveHost(replica.host_name))) - { - res.is_remote = 0; - break; - } - } - - res.hostname_difference = std::numeric_limits::max(); - for (auto & replica : replicas) - { - size_t difference = getHostNameDifference(local_hostname, replica.host_name); - res.hostname_difference = std::min(difference, res.hostname_difference); - } - - res.random = random; - return res; -} - -} diff --git a/dbms/programs/copier/Internals.h b/dbms/programs/copier/Internals.h deleted file mode 100644 index 6326e17dd08..00000000000 --- a/dbms/programs/copier/Internals.h +++ /dev/null @@ -1,180 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "Aliases.h" - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; -} - - -ConfigurationPtr getConfigurationFromXMLString(const std::string & xml_data); - -String getQuotedTable(const String & database, const String & table); - -String getQuotedTable(const DatabaseAndTableName & db_and_table); - - -enum class TaskState -{ - Started = 0, - Finished, - Unknown -}; - -/// Used to mark status of shard partition tasks -struct TaskStateWithOwner -{ - TaskStateWithOwner() = default; - - TaskStateWithOwner(TaskState state_, const String & owner_) : state(state_), owner(owner_) {} - - TaskState state{TaskState::Unknown}; - String owner; - - static String getData(TaskState state, const String &owner) - { - return TaskStateWithOwner(state, owner).toString(); - } - - String toString() - { - WriteBufferFromOwnString wb; - wb << static_cast(state) << "\n" << escape << owner; - return wb.str(); - } - - static TaskStateWithOwner fromString(const String & data) - { - ReadBufferFromString rb(data); - TaskStateWithOwner res; - UInt32 state; - - rb >> state >> "\n" >> escape >> res.owner; - - if (state >= static_cast(TaskState::Unknown)) - throw Exception("Unknown state " + data, ErrorCodes::LOGICAL_ERROR); - - res.state = static_cast(state); - return res; - } -}; - - - -struct ShardPriority -{ - UInt8 is_remote = 1; - size_t hostname_difference = 0; - UInt8 random = 0; - - static bool greaterPriority(const ShardPriority & current, const ShardPriority & other) - { - return std::forward_as_tuple(current.is_remote, current.hostname_difference, current.random) - < std::forward_as_tuple(other.is_remote, other.hostname_difference, other.random); - } -}; - -/// Execution status of a task -enum class PartitionTaskStatus -{ - Active, - Finished, - Error, -}; - - -struct MultiTransactionInfo -{ - int32_t code; - Coordination::Requests requests; - Coordination::Responses responses; -}; - -// Creates AST representing 'ENGINE = Distributed(cluster, db, table, [sharding_key]) -std::shared_ptr createASTStorageDistributed( - const String & cluster_name, const String & database, const String & table, - const ASTPtr & sharding_key_ast = nullptr); - - -BlockInputStreamPtr squashStreamIntoOneBlock(const BlockInputStreamPtr & stream); - -Block getBlockWithAllStreamData(const BlockInputStreamPtr & stream); - -bool isExtendedDefinitionStorage(const ASTPtr & storage_ast); - -ASTPtr extractPartitionKey(const ASTPtr & storage_ast); - -ShardPriority getReplicasPriority(const Cluster::Addresses & replicas, const std::string & local_hostname, UInt8 random); - -} diff --git a/dbms/programs/copier/ShardPartition.h b/dbms/programs/copier/ShardPartition.h deleted file mode 100644 index a299cf496d8..00000000000 --- a/dbms/programs/copier/ShardPartition.h +++ /dev/null @@ -1,68 +0,0 @@ -#pragma once - -#include "Aliases.h" - -namespace DB -{ - -/// Just destination partition of a shard -struct ShardPartition -{ - ShardPartition(TaskShard & parent, const String & name_quoted_) : task_shard(parent), name(name_quoted_) {} - - String getPartitionPath() const; - String getPartitionCleanStartPath() const; - String getCommonPartitionIsDirtyPath() const; - String getCommonPartitionIsCleanedPath() const; - String getPartitionActiveWorkersPath() const; - String getActiveWorkerPath() const; - String getPartitionShardsPath() const; - String getShardStatusPath() const; - - TaskShard & task_shard; - String name; -}; - -inline String ShardPartition::getPartitionCleanStartPath() const -{ - return getPartitionPath() + "/clean_start"; -} - -inline String ShardPartition::getPartitionPath() const -{ - return task_shard.task_table.getPartitionPath(name); -} - -inline String ShardPartition::getShardStatusPath() const -{ - // schema: //tables/
//shards/ - // e.g. /root/table_test.hits/201701/shards/1 - return getPartitionShardsPath() + "/" + toString(task_shard.numberInCluster()); -} - -inline String ShardPartition::getPartitionShardsPath() const -{ - return getPartitionPath() + "/shards"; -} - -inline String ShardPartition::getPartitionActiveWorkersPath() const -{ - return getPartitionPath() + "/partition_active_workers"; -} - -inline String ShardPartition::getActiveWorkerPath() const -{ - return getPartitionActiveWorkersPath() + "/" + toString(task_shard.numberInCluster()); -} - -inline String ShardPartition::getCommonPartitionIsDirtyPath() const -{ - return getPartitionPath() + "/is_dirty"; -} - -inline String ShardPartition::getCommonPartitionIsCleanedPath() const -{ - return getCommonPartitionIsDirtyPath() + "/cleaned"; -} - -} diff --git a/dbms/programs/copier/TaskTableAndShard.h b/dbms/programs/copier/TaskTableAndShard.h deleted file mode 100644 index cb4753264ed..00000000000 --- a/dbms/programs/copier/TaskTableAndShard.h +++ /dev/null @@ -1,279 +0,0 @@ -#pragma once - -#include "Aliases.h" -#include "Internals.h" -#include "ClusterPartition.h" - -namespace DB -{ -namespace ErrorCodes -{ - extern const int UNKNOWN_ELEMENT_IN_CONFIG; - extern const int LOGICAL_ERROR; -} - -struct TaskShard; - -struct TaskTable -{ - TaskTable(TaskCluster & parent, const Poco::Util::AbstractConfiguration & config, const String & prefix, - const String & table_key); - - TaskCluster & task_cluster; - - String getPartitionPath(const String & partition_name) const; - String getPartitionIsDirtyPath(const String & partition_name) const; - String getPartitionIsCleanedPath(const String & partition_name) const; - String getPartitionTaskStatusPath(const String & partition_name) const; - - String name_in_config; - - /// Used as task ID - String table_id; - - /// Source cluster and table - String cluster_pull_name; - DatabaseAndTableName table_pull; - - /// Destination cluster and table - String cluster_push_name; - DatabaseAndTableName table_push; - - /// Storage of destination table - String engine_push_str; - ASTPtr engine_push_ast; - ASTPtr engine_push_partition_key_ast; - - /// A Distributed table definition used to split data - String sharding_key_str; - ASTPtr sharding_key_ast; - ASTPtr engine_split_ast; - - /// Additional WHERE expression to filter input data - String where_condition_str; - ASTPtr where_condition_ast; - - /// Resolved clusters - ClusterPtr cluster_pull; - ClusterPtr cluster_push; - - /// Filter partitions that should be copied - bool has_enabled_partitions = false; - Strings enabled_partitions; - NameSet enabled_partitions_set; - - /// Prioritized list of shards - TasksShard all_shards; - TasksShard local_shards; - - ClusterPartitions cluster_partitions; - NameSet finished_cluster_partitions; - - /// Parition names to process in user-specified order - Strings ordered_partition_names; - - ClusterPartition & getClusterPartition(const String & partition_name) - { - auto it = cluster_partitions.find(partition_name); - if (it == cluster_partitions.end()) - throw Exception("There are no cluster partition " + partition_name + " in " + table_id, ErrorCodes::LOGICAL_ERROR); - return it->second; - } - - Stopwatch watch; - UInt64 bytes_copied = 0; - UInt64 rows_copied = 0; - - template - void initShards(RandomEngine && random_engine); -}; - - -struct TaskShard -{ - TaskShard(TaskTable &parent, const ShardInfo &info_) : task_table(parent), info(info_) {} - - TaskTable & task_table; - - ShardInfo info; - - UInt32 numberInCluster() const { return info.shard_num; } - - UInt32 indexInCluster() const { return info.shard_num - 1; } - - String getDescription() const; - - String getHostNameExample() const; - - /// Used to sort clusters by their proximity - ShardPriority priority; - - /// Column with unique destination partitions (computed from engine_push_partition_key expr.) in the shard - ColumnWithTypeAndName partition_key_column; - - /// There is a task for each destination partition - TasksPartition partition_tasks; - - /// Which partitions have been checked for existence - /// If some partition from this lists is exists, it is in partition_tasks - std::set checked_partitions; - - /// Last CREATE TABLE query of the table of the shard - ASTPtr current_pull_table_create_query; - - /// Internal distributed tables - DatabaseAndTableName table_read_shard; - DatabaseAndTableName table_split_shard; -}; - - -inline String TaskTable::getPartitionPath(const String & partition_name) const -{ - return task_cluster.task_zookeeper_path // root - + "/tables/" + table_id // tables/dst_cluster.merge.hits - + "/" + escapeForFileName(partition_name); // 201701 -} - -inline String TaskTable::getPartitionIsDirtyPath(const String & partition_name) const -{ - return getPartitionPath(partition_name) + "/is_dirty"; -} - -inline String TaskTable::getPartitionIsCleanedPath(const String & partition_name) const -{ - return getPartitionIsDirtyPath(partition_name) + "/cleaned"; -} - -inline String TaskTable::getPartitionTaskStatusPath(const String & partition_name) const -{ - return getPartitionPath(partition_name) + "/shards"; -} - -inline TaskTable::TaskTable(TaskCluster & parent, const Poco::Util::AbstractConfiguration & config, const String & prefix_, - const String & table_key) - : task_cluster(parent) -{ - String table_prefix = prefix_ + "." + table_key + "."; - - name_in_config = table_key; - - cluster_pull_name = config.getString(table_prefix + "cluster_pull"); - cluster_push_name = config.getString(table_prefix + "cluster_push"); - - table_pull.first = config.getString(table_prefix + "database_pull"); - table_pull.second = config.getString(table_prefix + "table_pull"); - - table_push.first = config.getString(table_prefix + "database_push"); - table_push.second = config.getString(table_prefix + "table_push"); - - /// Used as node name in ZooKeeper - table_id = escapeForFileName(cluster_push_name) - + "." + escapeForFileName(table_push.first) - + "." + escapeForFileName(table_push.second); - - engine_push_str = config.getString(table_prefix + "engine"); - { - ParserStorage parser_storage; - engine_push_ast = parseQuery(parser_storage, engine_push_str, 0); - engine_push_partition_key_ast = extractPartitionKey(engine_push_ast); - } - - sharding_key_str = config.getString(table_prefix + "sharding_key"); - { - ParserExpressionWithOptionalAlias parser_expression(false); - sharding_key_ast = parseQuery(parser_expression, sharding_key_str, 0); - engine_split_ast = createASTStorageDistributed(cluster_push_name, table_push.first, table_push.second, sharding_key_ast); - } - - where_condition_str = config.getString(table_prefix + "where_condition", ""); - if (!where_condition_str.empty()) - { - ParserExpressionWithOptionalAlias parser_expression(false); - where_condition_ast = parseQuery(parser_expression, where_condition_str, 0); - - // Will use canonical expression form - where_condition_str = queryToString(where_condition_ast); - } - - String enabled_partitions_prefix = table_prefix + "enabled_partitions"; - has_enabled_partitions = config.has(enabled_partitions_prefix); - - if (has_enabled_partitions) - { - Strings keys; - config.keys(enabled_partitions_prefix, keys); - - if (keys.empty()) - { - /// Parse list of partition from space-separated string - String partitions_str = config.getString(table_prefix + "enabled_partitions"); - boost::trim_if(partitions_str, isWhitespaceASCII); - boost::split(enabled_partitions, partitions_str, isWhitespaceASCII, boost::token_compress_on); - } - else - { - /// Parse sequence of ... - for (const String & key : keys) - { - if (!startsWith(key, "partition")) - throw Exception("Unknown key " + key + " in " + enabled_partitions_prefix, ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG); - - enabled_partitions.emplace_back(config.getString(enabled_partitions_prefix + "." + key)); - } - } - - std::copy(enabled_partitions.begin(), enabled_partitions.end(), std::inserter(enabled_partitions_set, enabled_partitions_set.begin())); - } -} - -template -inline void TaskTable::initShards(RandomEngine && random_engine) -{ - const String & fqdn_name = getFQDNOrHostName(); - std::uniform_int_distribution get_urand(0, std::numeric_limits::max()); - - // Compute the priority - for (auto & shard_info : cluster_pull->getShardsInfo()) - { - TaskShardPtr task_shard = std::make_shared(*this, shard_info); - const auto & replicas = cluster_pull->getShardsAddresses().at(task_shard->indexInCluster()); - task_shard->priority = getReplicasPriority(replicas, fqdn_name, get_urand(random_engine)); - - all_shards.emplace_back(task_shard); - } - - // Sort by priority - std::sort(all_shards.begin(), all_shards.end(), - [](const TaskShardPtr & lhs, const TaskShardPtr & rhs) - { - return ShardPriority::greaterPriority(lhs->priority, rhs->priority); - }); - - // Cut local shards - auto it_first_remote = std::lower_bound(all_shards.begin(), all_shards.end(), 1, - [](const TaskShardPtr & lhs, UInt8 is_remote) - { - return lhs->priority.is_remote < is_remote; - }); - - local_shards.assign(all_shards.begin(), it_first_remote); -} - - -inline String DB::TaskShard::getDescription() const -{ - std::stringstream ss; - ss << "N" << numberInCluster() - << " (having a replica " << getHostNameExample() - << ", pull table " + getQuotedTable(task_table.table_pull) - << " of cluster " + task_table.cluster_pull_name << ")"; - return ss.str(); -} - -inline String DB::TaskShard::getHostNameExample() const -{ - auto &replicas = task_table.cluster_pull->getShardsAddresses().at(indexInCluster()); - return replicas.at(0).readableString(); -} - -} diff --git a/dbms/programs/main.cpp b/dbms/programs/main.cpp deleted file mode 100644 index a063463d7c9..00000000000 --- a/dbms/programs/main.cpp +++ /dev/null @@ -1,167 +0,0 @@ -#include -#include -#include -#include -#include /// pair - -#if __has_include("config_tools.h") -#include "config_tools.h" -#endif -#if __has_include() /// "Arcadia" build system lacks configure files. -#include -#endif -#if __has_include("config_core.h") -#include "config_core.h" -#endif - -#include - -#include -#include - - -/// Universal executable for various clickhouse applications -#if ENABLE_CLICKHOUSE_SERVER || !defined(ENABLE_CLICKHOUSE_SERVER) -int mainEntryClickHouseServer(int argc, char ** argv); -#endif -#if ENABLE_CLICKHOUSE_CLIENT || !defined(ENABLE_CLICKHOUSE_CLIENT) -int mainEntryClickHouseClient(int argc, char ** argv); -#endif -#if ENABLE_CLICKHOUSE_LOCAL || !defined(ENABLE_CLICKHOUSE_LOCAL) -int mainEntryClickHouseLocal(int argc, char ** argv); -#endif -#if ENABLE_CLICKHOUSE_BENCHMARK || !defined(ENABLE_CLICKHOUSE_BENCHMARK) -int mainEntryClickHouseBenchmark(int argc, char ** argv); -#endif -#if ENABLE_CLICKHOUSE_PERFORMANCE_TEST || !defined(ENABLE_CLICKHOUSE_PERFORMANCE_TEST) -int mainEntryClickHousePerformanceTest(int argc, char ** argv); -#endif -#if ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG || !defined(ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG) -int mainEntryClickHouseExtractFromConfig(int argc, char ** argv); -#endif -#if ENABLE_CLICKHOUSE_COMPRESSOR || !defined(ENABLE_CLICKHOUSE_COMPRESSOR) -int mainEntryClickHouseCompressor(int argc, char ** argv); -#endif -#if ENABLE_CLICKHOUSE_FORMAT || !defined(ENABLE_CLICKHOUSE_FORMAT) -int mainEntryClickHouseFormat(int argc, char ** argv); -#endif -#if ENABLE_CLICKHOUSE_COPIER || !defined(ENABLE_CLICKHOUSE_COPIER) -int mainEntryClickHouseClusterCopier(int argc, char ** argv); -#endif -#if ENABLE_CLICKHOUSE_OBFUSCATOR || !defined(ENABLE_CLICKHOUSE_OBFUSCATOR) -int mainEntryClickHouseObfuscator(int argc, char ** argv); -#endif - - -namespace -{ - -using MainFunc = int (*)(int, char**); - - -/// Add an item here to register new application -std::pair clickhouse_applications[] = -{ -#if ENABLE_CLICKHOUSE_LOCAL || !defined(ENABLE_CLICKHOUSE_LOCAL) - {"local", mainEntryClickHouseLocal}, -#endif -#if ENABLE_CLICKHOUSE_CLIENT || !defined(ENABLE_CLICKHOUSE_CLIENT) - {"client", mainEntryClickHouseClient}, -#endif -#if ENABLE_CLICKHOUSE_BENCHMARK || !defined(ENABLE_CLICKHOUSE_BENCHMARK) - {"benchmark", mainEntryClickHouseBenchmark}, -#endif -#if ENABLE_CLICKHOUSE_SERVER || !defined(ENABLE_CLICKHOUSE_SERVER) - {"server", mainEntryClickHouseServer}, -#endif -#if ENABLE_CLICKHOUSE_PERFORMANCE_TEST || !defined(ENABLE_CLICKHOUSE_PERFORMANCE_TEST) - {"performance-test", mainEntryClickHousePerformanceTest}, -#endif -#if ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG || !defined(ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG) - {"extract-from-config", mainEntryClickHouseExtractFromConfig}, -#endif -#if ENABLE_CLICKHOUSE_COMPRESSOR || !defined(ENABLE_CLICKHOUSE_COMPRESSOR) - {"compressor", mainEntryClickHouseCompressor}, -#endif -#if ENABLE_CLICKHOUSE_FORMAT || !defined(ENABLE_CLICKHOUSE_FORMAT) - {"format", mainEntryClickHouseFormat}, -#endif -#if ENABLE_CLICKHOUSE_COPIER || !defined(ENABLE_CLICKHOUSE_COPIER) - {"copier", mainEntryClickHouseClusterCopier}, -#endif -#if ENABLE_CLICKHOUSE_OBFUSCATOR || !defined(ENABLE_CLICKHOUSE_OBFUSCATOR) - {"obfuscator", mainEntryClickHouseObfuscator}, -#endif -}; - - -int printHelp(int, char **) -{ - std::cerr << "Use one of the following commands:" << std::endl; - for (auto & application : clickhouse_applications) - std::cerr << "clickhouse " << application.first << " [args] " << std::endl; - return -1; -} - - -bool isClickhouseApp(const std::string & app_suffix, std::vector & argv) -{ - /// Use app if the first arg 'app' is passed (the arg should be quietly removed) - if (argv.size() >= 2) - { - auto first_arg = argv.begin() + 1; - - /// 'clickhouse --client ...' and 'clickhouse client ...' are Ok - if (*first_arg == "--" + app_suffix || *first_arg == app_suffix) - { - argv.erase(first_arg); - return true; - } - } - - /// Use app if clickhouse binary is run through symbolic link with name clickhouse-app - std::string app_name = "clickhouse-" + app_suffix; - return !argv.empty() && (app_name == argv[0] || endsWith(argv[0], "/" + app_name)); -} - -} - - -/// This allows to implement assert to forbid initialization of a class in static constructors. -/// Usage: -/// -/// extern bool inside_main; -/// class C { C() { assert(inside_main); } }; -bool inside_main = false; - - -int main(int argc_, char ** argv_) -{ - inside_main = true; - SCOPE_EXIT({ inside_main = false; }); - - /// Reset new handler to default (that throws std::bad_alloc) - /// It is needed because LLVM library clobbers it. - std::set_new_handler(nullptr); - - /// PHDR cache is required for query profiler to work reliably - /// It also speed up exception handling, but exceptions from dynamically loaded libraries (dlopen) - /// will work only after additional call of this function. - updatePHDRCache(); - - std::vector argv(argv_, argv_ + argc_); - - /// Print a basic help if nothing was matched - MainFunc main_func = printHelp; - - for (auto & application : clickhouse_applications) - { - if (isClickhouseApp(application.first, argv)) - { - main_func = application.second; - break; - } - } - - return main_func(static_cast(argv.size()), argv.data()); -} diff --git a/dbms/programs/odbc-bridge/CMakeLists.txt b/dbms/programs/odbc-bridge/CMakeLists.txt deleted file mode 100644 index 7bb128902e0..00000000000 --- a/dbms/programs/odbc-bridge/CMakeLists.txt +++ /dev/null @@ -1,59 +0,0 @@ -set(CLICKHOUSE_ODBC_BRIDGE_SOURCES - ${CMAKE_CURRENT_SOURCE_DIR}/ColumnInfoHandler.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/getIdentifierQuote.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/HandlerFactory.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/IdentifierQuoteHandler.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/MainHandler.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/ODBCBlockInputStream.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/ODBCBridge.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/PingHandler.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/validateODBCConnectionString.cpp -) - -set(CLICKHOUSE_ODBC_BRIDGE_LINK PRIVATE dbms clickhouse_parsers PUBLIC daemon) -set(CLICKHOUSE_ODBC_BRIDGE_INCLUDE PUBLIC ${ClickHouse_SOURCE_DIR}/base) - -if (USE_POCO_SQLODBC) - set(CLICKHOUSE_ODBC_BRIDGE_LINK ${CLICKHOUSE_ODBC_BRIDGE_LINK} PRIVATE ${Poco_SQLODBC_LIBRARY}) - # Wouldnt work anyway because of the way list variable got expanded in `target_include_directories` - # set(CLICKHOUSE_ODBC_BRIDGE_INCLUDE ${CLICKHOUSE_ODBC_BRIDGE_INCLUDE} SYSTEM PRIVATE ${ODBC_INCLUDE_DIRS} ${Poco_SQLODBC_INCLUDE_DIR}) -endif () -if (Poco_SQL_FOUND) - set(CLICKHOUSE_ODBC_BRIDGE_LINK ${CLICKHOUSE_ODBC_BRIDGE_LINK} PRIVATE ${Poco_SQL_LIBRARY}) - # Wouldnt work anyway because of the way list variable got expanded in `target_include_directories` - # set(CLICKHOUSE_ODBC_BRIDGE_INCLUDE ${CLICKHOUSE_ODBC_BRIDGE_INCLUDE} SYSTEM PRIVATE ${Poco_SQL_INCLUDE_DIR}) -endif () - -if (USE_POCO_DATAODBC) - set(CLICKHOUSE_ODBC_BRIDGE_LINK ${CLICKHOUSE_ODBC_BRIDGE_LINK} PRIVATE ${Poco_DataODBC_LIBRARY}) - # Wouldnt work anyway because of the way list variable got expanded in `target_include_directories` - # set(CLICKHOUSE_ODBC_BRIDGE_INCLUDE ${CLICKHOUSE_ODBC_BRIDGE_INCLUDE} SYSTEM PRIVATE ${ODBC_INCLUDE_DIRS} ${Poco_DataODBC_INCLUDE_DIR}) -endif() -if (Poco_Data_FOUND) - set(CLICKHOUSE_ODBC_BRIDGE_LINK ${CLICKHOUSE_ODBC_BRIDGE_LINK} PRIVATE ${Poco_Data_LIBRARY}) - # Wouldnt work anyway because of the way list variable got expanded in `target_include_directories` - # set(CLICKHOUSE_ODBC_BRIDGE_INCLUDE ${CLICKHOUSE_ODBC_BRIDGE_INCLUDE} SYSTEM PRIVATE ${Poco_Data_INCLUDE_DIR}) -endif () - -clickhouse_program_add_library(odbc-bridge) - -if (OS_LINUX) - # clickhouse-odbc-bridge is always a separate binary. - # Reason: it must not export symbols from SSL, mariadb-client, etc. to not break ABI compatibility with ODBC drivers. - set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-export-dynamic") -endif () - -add_executable(clickhouse-odbc-bridge odbc-bridge.cpp) -set_target_properties(clickhouse-odbc-bridge PROPERTIES RUNTIME_OUTPUT_DIRECTORY ..) - -clickhouse_program_link_split_binary(odbc-bridge) - -if (USE_GDB_ADD_INDEX) - add_custom_command(TARGET clickhouse-odbc-bridge POST_BUILD COMMAND ${GDB_ADD_INDEX_EXE} ../clickhouse-odbc-bridge COMMENT "Adding .gdb-index to clickhouse-odbc-bridge" VERBATIM) -endif() - -install(TARGETS clickhouse-odbc-bridge RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) - -if(ENABLE_TESTS) - add_subdirectory(tests) -endif() diff --git a/dbms/programs/odbc-bridge/ColumnInfoHandler.cpp b/dbms/programs/odbc-bridge/ColumnInfoHandler.cpp deleted file mode 100644 index 3dadc7632de..00000000000 --- a/dbms/programs/odbc-bridge/ColumnInfoHandler.cpp +++ /dev/null @@ -1,191 +0,0 @@ -#include "ColumnInfoHandler.h" -#include "getIdentifierQuote.h" -#if USE_POCO_SQLODBC || USE_POCO_DATAODBC - -#if USE_POCO_SQLODBC -#include -#include -#include -#define POCO_SQL_ODBC_CLASS Poco::SQL::ODBC -#endif -#if USE_POCO_DATAODBC -#include -#include -#include -#define POCO_SQL_ODBC_CLASS Poco::Data::ODBC -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "validateODBCConnectionString.h" - -namespace DB -{ -namespace -{ - DataTypePtr getDataType(SQLSMALLINT type) - { - const auto & factory = DataTypeFactory::instance(); - - switch (type) - { - case SQL_TINYINT: - return factory.get("Int8"); - case SQL_INTEGER: - return factory.get("Int32"); - case SQL_SMALLINT: - return factory.get("Int16"); - case SQL_BIGINT: - return factory.get("Int64"); - case SQL_FLOAT: - return factory.get("Float64"); - case SQL_REAL: - return factory.get("Float32"); - case SQL_DOUBLE: - return factory.get("Float64"); - case SQL_DATETIME: - return factory.get("DateTime"); - case SQL_TYPE_TIMESTAMP: - return factory.get("DateTime"); - case SQL_TYPE_DATE: - return factory.get("Date"); - default: - return factory.get("String"); - } - } -} - -namespace ErrorCodes -{ - extern const int ILLEGAL_TYPE_OF_ARGUMENT; -} - -void ODBCColumnsInfoHandler::handleRequest(Poco::Net::HTTPServerRequest & request, Poco::Net::HTTPServerResponse & response) -{ - Poco::Net::HTMLForm params(request, request.stream()); - LOG_TRACE(log, "Request URI: " + request.getURI()); - - auto process_error = [&response, this](const std::string & message) - { - response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR); - if (!response.sent()) - response.send() << message << std::endl; - LOG_WARNING(log, message); - }; - - if (!params.has("table")) - { - process_error("No 'table' param in request URL"); - return; - } - if (!params.has("connection_string")) - { - process_error("No 'connection_string' in request URL"); - return; - } - std::string schema_name = ""; - std::string table_name = params.get("table"); - std::string connection_string = params.get("connection_string"); - - if (params.has("schema")) - { - schema_name = params.get("schema"); - LOG_TRACE(log, "Will fetch info for table '" << schema_name + "." + table_name << "'"); - } - else - LOG_TRACE(log, "Will fetch info for table '" << table_name << "'"); - LOG_TRACE(log, "Got connection str '" << connection_string << "'"); - - try - { - const bool external_table_functions_use_nulls = Poco::NumberParser::parseBool(params.get("external_table_functions_use_nulls", "false")); - - POCO_SQL_ODBC_CLASS::SessionImpl session(validateODBCConnectionString(connection_string), DBMS_DEFAULT_CONNECT_TIMEOUT_SEC); - SQLHDBC hdbc = session.dbc().handle(); - - SQLHSTMT hstmt = nullptr; - - if (POCO_SQL_ODBC_CLASS::Utility::isError(SQLAllocStmt(hdbc, &hstmt))) - throw POCO_SQL_ODBC_CLASS::ODBCException("Could not allocate connection handle."); - - SCOPE_EXIT(SQLFreeStmt(hstmt, SQL_DROP)); - - /// TODO Why not do SQLColumns instead? - std::string name = schema_name.empty() ? table_name : schema_name + "." + table_name; - std::stringstream ss; - std::string input = "SELECT * FROM " + name + " WHERE 1 = 0"; - ParserQueryWithOutput parser; - ASTPtr select = parseQuery(parser, input.data(), input.data() + input.size(), "", 0); - - IAST::FormatSettings settings(ss, true); - settings.always_quote_identifiers = true; - - auto identifier_quote = getIdentifierQuote(hdbc); - if (identifier_quote.length() == 0) - settings.identifier_quoting_style = IdentifierQuotingStyle::None; - else if (identifier_quote[0] == '`') - settings.identifier_quoting_style = IdentifierQuotingStyle::Backticks; - else if (identifier_quote[0] == '"') - settings.identifier_quoting_style = IdentifierQuotingStyle::DoubleQuotes; - else - throw Exception("Can not map quote identifier '" + identifier_quote + "' to IdentifierQuotingStyle value", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); - - select->format(settings); - std::string query = ss.str(); - - LOG_TRACE(log, "Inferring structure with query '" << query << "'"); - - if (POCO_SQL_ODBC_CLASS::Utility::isError(POCO_SQL_ODBC_CLASS::SQLPrepare(hstmt, reinterpret_cast(query.data()), query.size()))) - throw POCO_SQL_ODBC_CLASS::DescriptorException(session.dbc()); - - if (POCO_SQL_ODBC_CLASS::Utility::isError(SQLExecute(hstmt))) - throw POCO_SQL_ODBC_CLASS::StatementException(hstmt); - - SQLSMALLINT cols = 0; - if (POCO_SQL_ODBC_CLASS::Utility::isError(SQLNumResultCols(hstmt, &cols))) - throw POCO_SQL_ODBC_CLASS::StatementException(hstmt); - - /// TODO cols not checked - - NamesAndTypesList columns; - for (SQLSMALLINT ncol = 1; ncol <= cols; ++ncol) - { - SQLSMALLINT type = 0; - /// TODO Why 301? - SQLCHAR column_name[301]; - - SQLSMALLINT is_nullable; - const auto result = POCO_SQL_ODBC_CLASS::SQLDescribeCol(hstmt, ncol, column_name, sizeof(column_name), nullptr, &type, nullptr, nullptr, &is_nullable); - if (POCO_SQL_ODBC_CLASS::Utility::isError(result)) - throw POCO_SQL_ODBC_CLASS::StatementException(hstmt); - - auto column_type = getDataType(type); - if (external_table_functions_use_nulls && is_nullable == SQL_NULLABLE) - { - column_type = std::make_shared(column_type); - } - - columns.emplace_back(reinterpret_cast(column_name), std::move(column_type)); - } - - WriteBufferFromHTTPServerResponse out(request, response, keep_alive_timeout); - writeStringBinary(columns.toString(), out); - } - catch (...) - { - process_error("Error getting columns from ODBC '" + getCurrentExceptionMessage(false) + "'"); - tryLogCurrentException(log); - } -} -} -#endif diff --git a/dbms/programs/odbc-bridge/ColumnInfoHandler.h b/dbms/programs/odbc-bridge/ColumnInfoHandler.h deleted file mode 100644 index 426cea15b34..00000000000 --- a/dbms/programs/odbc-bridge/ColumnInfoHandler.h +++ /dev/null @@ -1,30 +0,0 @@ -#pragma once -#include -#include -#include -#include - -#if USE_POCO_SQLODBC || USE_POCO_DATAODBC -/** The structure of the table is taken from the query "SELECT * FROM table WHERE 1=0". - * TODO: It would be much better to utilize ODBC methods dedicated for columns description. - * If there is no such table, an exception is thrown. - */ -namespace DB -{ -class ODBCColumnsInfoHandler : public Poco::Net::HTTPRequestHandler -{ -public: - ODBCColumnsInfoHandler(size_t keep_alive_timeout_, std::shared_ptr context_) - : log(&Poco::Logger::get("ODBCColumnsInfoHandler")), keep_alive_timeout(keep_alive_timeout_), context(context_) - { - } - - void handleRequest(Poco::Net::HTTPServerRequest & request, Poco::Net::HTTPServerResponse & response) override; - -private: - Poco::Logger * log; - size_t keep_alive_timeout; - std::shared_ptr context; -}; -} -#endif diff --git a/dbms/programs/odbc-bridge/IdentifierQuoteHandler.cpp b/dbms/programs/odbc-bridge/IdentifierQuoteHandler.cpp deleted file mode 100644 index 303165a4d45..00000000000 --- a/dbms/programs/odbc-bridge/IdentifierQuoteHandler.cpp +++ /dev/null @@ -1,69 +0,0 @@ -#include "IdentifierQuoteHandler.h" -#if USE_POCO_SQLODBC || USE_POCO_DATAODBC - -#if USE_POCO_SQLODBC -#include -#include -#include -#define POCO_SQL_ODBC_CLASS Poco::SQL::ODBC -#endif -#if USE_POCO_DATAODBC -#include -#include -#include -#define POCO_SQL_ODBC_CLASS Poco::Data::ODBC -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "getIdentifierQuote.h" -#include "validateODBCConnectionString.h" - -namespace DB -{ -void IdentifierQuoteHandler::handleRequest(Poco::Net::HTTPServerRequest & request, Poco::Net::HTTPServerResponse & response) -{ - Poco::Net::HTMLForm params(request, request.stream()); - LOG_TRACE(log, "Request URI: " + request.getURI()); - - auto process_error = [&response, this](const std::string & message) - { - response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR); - if (!response.sent()) - response.send() << message << std::endl; - LOG_WARNING(log, message); - }; - - if (!params.has("connection_string")) - { - process_error("No 'connection_string' in request URL"); - return; - } - - try - { - std::string connection_string = params.get("connection_string"); - POCO_SQL_ODBC_CLASS::SessionImpl session(validateODBCConnectionString(connection_string), DBMS_DEFAULT_CONNECT_TIMEOUT_SEC); - SQLHDBC hdbc = session.dbc().handle(); - - auto identifier = getIdentifierQuote(hdbc); - - WriteBufferFromHTTPServerResponse out(request, response, keep_alive_timeout); - writeStringBinary(identifier, out); - } - catch (...) - { - process_error("Error getting identifier quote style from ODBC '" + getCurrentExceptionMessage(false) + "'"); - tryLogCurrentException(log); - } -} -} -#endif diff --git a/dbms/programs/odbc-bridge/IdentifierQuoteHandler.h b/dbms/programs/odbc-bridge/IdentifierQuoteHandler.h deleted file mode 100644 index 2d4cf0277be..00000000000 --- a/dbms/programs/odbc-bridge/IdentifierQuoteHandler.h +++ /dev/null @@ -1,28 +0,0 @@ -#pragma once -#include -#include -#include -#include - -#if USE_POCO_SQLODBC || USE_POCO_DATAODBC -/** This handler establish connection to database, and retrieve quote style identifier - */ -namespace DB -{ -class IdentifierQuoteHandler : public Poco::Net::HTTPRequestHandler -{ -public: - IdentifierQuoteHandler(size_t keep_alive_timeout_, std::shared_ptr context_) - : log(&Poco::Logger::get("IdentifierQuoteHandler")), keep_alive_timeout(keep_alive_timeout_), context(context_) - { - } - - void handleRequest(Poco::Net::HTTPServerRequest & request, Poco::Net::HTTPServerResponse & response) override; - -private: - Poco::Logger * log; - size_t keep_alive_timeout; - std::shared_ptr context; -}; -} -#endif diff --git a/dbms/programs/odbc-bridge/MainHandler.cpp b/dbms/programs/odbc-bridge/MainHandler.cpp deleted file mode 100644 index 3ae5f49f24b..00000000000 --- a/dbms/programs/odbc-bridge/MainHandler.cpp +++ /dev/null @@ -1,146 +0,0 @@ -#include "MainHandler.h" - -#include "validateODBCConnectionString.h" -#include -#include -#include -#include "ODBCBlockInputStream.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace DB -{ -namespace -{ - std::unique_ptr parseColumns(std::string && column_string) - { - std::unique_ptr sample_block = std::make_unique(); - auto names_and_types = NamesAndTypesList::parse(column_string); - for (const NameAndTypePair & column_data : names_and_types) - sample_block->insert({column_data.type, column_data.name}); - return sample_block; - } -} - -using PocoSessionPoolConstructor = std::function()>; -/** Is used to adjust max size of default Poco thread pool. See issue #750 - * Acquire the lock, resize pool and construct new Session. - */ -static std::shared_ptr createAndCheckResizePocoSessionPool(PocoSessionPoolConstructor pool_constr) -{ - static std::mutex mutex; - - Poco::ThreadPool & pool = Poco::ThreadPool::defaultPool(); - - /// NOTE: The lock don't guarantee that external users of the pool don't change its capacity - std::unique_lock lock(mutex); - - if (pool.available() == 0) - pool.addCapacity(2 * std::max(pool.capacity(), 1)); - - return pool_constr(); -} - -ODBCHandler::PoolPtr ODBCHandler::getPool(const std::string & connection_str) -{ - std::lock_guard lock(mutex); - if (!pool_map->count(connection_str)) - { - pool_map->emplace(connection_str, createAndCheckResizePocoSessionPool([connection_str] - { - return std::make_shared("ODBC", validateODBCConnectionString(connection_str)); - })); - } - return pool_map->at(connection_str); -} - -void ODBCHandler::handleRequest(Poco::Net::HTTPServerRequest & request, Poco::Net::HTTPServerResponse & response) -{ - Poco::Net::HTMLForm params(request, request.stream()); - LOG_TRACE(log, "Request URI: " + request.getURI()); - - auto process_error = [&response, this](const std::string & message) - { - response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR); - if (!response.sent()) - response.send() << message << std::endl; - LOG_WARNING(log, message); - }; - - if (!params.has("query")) - { - process_error("No 'query' in request body"); - return; - } - - if (!params.has("columns")) - { - process_error("No 'columns' in request URL"); - return; - } - - if (!params.has("connection_string")) - { - process_error("No 'connection_string' in request URL"); - return; - } - - UInt64 max_block_size = DEFAULT_BLOCK_SIZE; - if (params.has("max_block_size")) - { - std::string max_block_size_str = params.get("max_block_size", ""); - if (max_block_size_str.empty()) - { - process_error("Empty max_block_size specified"); - return; - } - max_block_size = parse(max_block_size_str); - } - - std::string columns = params.get("columns"); - std::unique_ptr sample_block; - try - { - sample_block = parseColumns(std::move(columns)); - } - catch (const Exception & ex) - { - process_error("Invalid 'columns' parameter in request body '" + ex.message() + "'"); - LOG_WARNING(log, ex.getStackTraceString()); - return; - } - - std::string format = params.get("format", "RowBinary"); - std::string query = params.get("query"); - LOG_TRACE(log, "Query: " << query); - - std::string connection_string = params.get("connection_string"); - LOG_TRACE(log, "Connection string: '" << connection_string << "'"); - - WriteBufferFromHTTPServerResponse out(request, response, keep_alive_timeout); - try - { - BlockOutputStreamPtr writer = FormatFactory::instance().getOutput(format, out, *sample_block, *context); - auto pool = getPool(connection_string); - ODBCBlockInputStream inp(pool->get(), query, *sample_block, max_block_size); - copyData(inp, *writer); - } - catch (...) - { - auto message = getCurrentExceptionMessage(true); - response.setStatusAndReason( - Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR); // can't call process_error, because of too soon response sending - writeStringBinary(message, out); - tryLogCurrentException(log); - } -} -} diff --git a/dbms/programs/odbc-bridge/getIdentifierQuote.cpp b/dbms/programs/odbc-bridge/getIdentifierQuote.cpp deleted file mode 100644 index 9ac0a56bfc6..00000000000 --- a/dbms/programs/odbc-bridge/getIdentifierQuote.cpp +++ /dev/null @@ -1,44 +0,0 @@ -#include "getIdentifierQuote.h" -#if USE_POCO_SQLODBC || USE_POCO_DATAODBC - -#if USE_POCO_SQLODBC -#include -#include -#include -#define POCO_SQL_ODBC_CLASS Poco::SQL::ODBC -#endif -#if USE_POCO_DATAODBC -#include -#include -#include -#define POCO_SQL_ODBC_CLASS Poco::Data::ODBC -#endif - - -namespace DB -{ -std::string getIdentifierQuote(SQLHDBC hdbc) -{ - std::string identifier; - - SQLSMALLINT t; - SQLRETURN r = POCO_SQL_ODBC_CLASS::SQLGetInfo(hdbc, SQL_IDENTIFIER_QUOTE_CHAR, nullptr, 0, &t); - - if (POCO_SQL_ODBC_CLASS::Utility::isError(r)) - throw POCO_SQL_ODBC_CLASS::ConnectionException(hdbc); - - if (t > 0) - { - // I have no idea, why to add '2' here, got from: contrib/poco/Data/ODBC/src/ODBCStatementImpl.cpp:60 (SQL_DRIVER_NAME) - identifier.resize(static_cast(t) + 2); - - if (POCO_SQL_ODBC_CLASS::Utility::isError(POCO_SQL_ODBC_CLASS::SQLGetInfo( - hdbc, SQL_IDENTIFIER_QUOTE_CHAR, &identifier[0], SQLSMALLINT((identifier.length() - 1) * sizeof(identifier[0])), &t))) - throw POCO_SQL_ODBC_CLASS::ConnectionException(hdbc); - - identifier.resize(static_cast(t)); - } - return identifier; -} -} -#endif diff --git a/dbms/programs/odbc-bridge/getIdentifierQuote.h b/dbms/programs/odbc-bridge/getIdentifierQuote.h deleted file mode 100644 index 30371b4060d..00000000000 --- a/dbms/programs/odbc-bridge/getIdentifierQuote.h +++ /dev/null @@ -1,22 +0,0 @@ -#pragma once - -#include -#include -#include -#include - -#if USE_POCO_SQLODBC || USE_POCO_DATAODBC - -#if USE_POCO_SQLODBC -#include -#endif -#if USE_POCO_DATAODBC -#include -#endif - -namespace DB -{ - -std::string getIdentifierQuote(SQLHDBC hdbc); -} -#endif diff --git a/dbms/programs/performance-test/CMakeLists.txt b/dbms/programs/performance-test/CMakeLists.txt deleted file mode 100644 index 94e346c83cd..00000000000 --- a/dbms/programs/performance-test/CMakeLists.txt +++ /dev/null @@ -1,18 +0,0 @@ -set(CLICKHOUSE_PERFORMANCE_TEST_SOURCES - ${CMAKE_CURRENT_SOURCE_DIR}/JSONString.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/StopConditionsSet.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/TestStopConditions.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/TestStats.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/ConfigPreprocessor.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/PerformanceTest.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/PerformanceTestInfo.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/executeQuery.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/applySubstitutions.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/ReportBuilder.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/PerformanceTestSuite.cpp - ) - -set(CLICKHOUSE_PERFORMANCE_TEST_LINK PRIVATE dbms clickhouse_common_config ${Boost_FILESYSTEM_LIBRARY} ${Boost_PROGRAM_OPTIONS_LIBRARY}) -set(CLICKHOUSE_PERFORMANCE_TEST_INCLUDE SYSTEM PRIVATE ${PCG_RANDOM_INCLUDE_DIR}) - -clickhouse_program_add(performance-test) diff --git a/dbms/programs/performance-test/ConfigPreprocessor.cpp b/dbms/programs/performance-test/ConfigPreprocessor.cpp deleted file mode 100644 index 5da9650454a..00000000000 --- a/dbms/programs/performance-test/ConfigPreprocessor.cpp +++ /dev/null @@ -1,95 +0,0 @@ -#include "ConfigPreprocessor.h" -#include -#include -#include -namespace DB -{ -std::vector ConfigPreprocessor::processConfig( - const Strings & tests_tags, - const Strings & tests_names, - const Strings & tests_names_regexp, - const Strings & skip_tags, - const Strings & skip_names, - const Strings & skip_names_regexp) const -{ - - std::vector result; - for (const auto & path_str : paths) - { - auto test = XMLConfigurationPtr(new XMLConfiguration(path_str)); - result.push_back(test); - - const auto path = Poco::Path(path_str); - test->setString("path", path.absolute().toString()); - if (test->getString("name", "") == "") - test->setString("name", path.getBaseName()); - } - - /// Leave tests: - removeConfigurationsIf(result, FilterType::Tag, tests_tags, true); - removeConfigurationsIf(result, FilterType::Name, tests_names, true); - removeConfigurationsIf(result, FilterType::Name_regexp, tests_names_regexp, true); - - /// Skip tests - removeConfigurationsIf(result, FilterType::Tag, skip_tags, false); - removeConfigurationsIf(result, FilterType::Name, skip_names, false); - removeConfigurationsIf(result, FilterType::Name_regexp, skip_names_regexp, false); - return result; -} - -void ConfigPreprocessor::removeConfigurationsIf( - std::vector & configs, - ConfigPreprocessor::FilterType filter_type, - const Strings & values, - bool leave) const -{ - auto checker = [&filter_type, &values, &leave] (XMLConfigurationPtr & config) - { - if (values.size() == 0) - return false; - - bool remove_or_not = false; - - if (filter_type == FilterType::Tag) - { - Strings tags_keys; - config->keys("tags", tags_keys); - - Strings tags(tags_keys.size()); - for (size_t i = 0; i != tags_keys.size(); ++i) - tags[i] = config->getString("tags.tag[" + std::to_string(i) + "]"); - - for (const std::string & config_tag : tags) - { - if (std::find(values.begin(), values.end(), config_tag) != values.end()) - remove_or_not = true; - } - } - - if (filter_type == FilterType::Name) - { - remove_or_not = (std::find(values.begin(), values.end(), config->getString("name", "")) != values.end()); - } - - if (filter_type == FilterType::Name_regexp) - { - std::string config_name = config->getString("name", ""); - auto regex_checker = [&config_name](const std::string & name_regexp) - { - std::regex pattern(name_regexp); - return std::regex_search(config_name, pattern); - }; - - remove_or_not = config->has("name") ? (std::find_if(values.begin(), values.end(), regex_checker) != values.end()) : false; - } - - if (leave) - remove_or_not = !remove_or_not; - return remove_or_not; - }; - - auto new_end = std::remove_if(configs.begin(), configs.end(), checker); - configs.erase(new_end, configs.end()); -} - -} diff --git a/dbms/programs/performance-test/ConfigPreprocessor.h b/dbms/programs/performance-test/ConfigPreprocessor.h deleted file mode 100644 index 375bf9503cb..00000000000 --- a/dbms/programs/performance-test/ConfigPreprocessor.h +++ /dev/null @@ -1,50 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include - -namespace DB -{ - -using XMLConfiguration = Poco::Util::XMLConfiguration; -using XMLConfigurationPtr = Poco::AutoPtr; -using XMLDocumentPtr = Poco::AutoPtr; - -class ConfigPreprocessor -{ -public: - ConfigPreprocessor(const Strings & paths_) - : paths(paths_) - {} - - std::vector processConfig( - const Strings & tests_tags, - const Strings & tests_names, - const Strings & tests_names_regexp, - const Strings & skip_tags, - const Strings & skip_names, - const Strings & skip_names_regexp) const; - -private: - - enum class FilterType - { - Tag, - Name, - Name_regexp - }; - - /// Removes configurations that has a given value. - /// If leave is true, the logic is reversed. - void removeConfigurationsIf( - std::vector & configs, - FilterType filter_type, - const Strings & values, - bool leave = false) const; - - const Strings paths; -}; -} diff --git a/dbms/programs/performance-test/JSONString.cpp b/dbms/programs/performance-test/JSONString.cpp deleted file mode 100644 index d25e190be50..00000000000 --- a/dbms/programs/performance-test/JSONString.cpp +++ /dev/null @@ -1,66 +0,0 @@ -#include "JSONString.h" - -#include -#include -namespace DB -{ - -namespace -{ -std::string pad(size_t padding) -{ - return std::string(padding * 4, ' '); -} - -const std::regex NEW_LINE{"\n"}; -} - -void JSONString::set(const std::string & key, std::string value, bool wrap) -{ - if (value.empty()) - value = "null"; - - bool reserved = (value[0] == '[' || value[0] == '{' || value == "null"); - if (!reserved && wrap) - value = '"' + std::regex_replace(value, NEW_LINE, "\\n") + '"'; - - content[key] = value; -} - -void JSONString::set(const std::string & key, const std::vector & run_infos) -{ - std::ostringstream value; - value << "[\n"; - - for (size_t i = 0; i < run_infos.size(); ++i) - { - value << pad(padding + 1) + run_infos[i].asString(padding + 2); - if (i != run_infos.size() - 1) - value << ','; - - value << "\n"; - } - - value << pad(padding) << ']'; - content[key] = value.str(); -} - -std::string JSONString::asString(size_t cur_padding) const -{ - std::ostringstream repr; - repr << "{"; - - for (auto it = content.begin(); it != content.end(); ++it) - { - if (it != content.begin()) - repr << ','; - /// construct "key": "value" string with padding - repr << "\n" << pad(cur_padding) << '"' << it->first << '"' << ": " << it->second; - } - - repr << "\n" << pad(cur_padding - 1) << '}'; - return repr.str(); -} - - -} diff --git a/dbms/programs/performance-test/JSONString.h b/dbms/programs/performance-test/JSONString.h deleted file mode 100644 index ebd850877d7..00000000000 --- a/dbms/programs/performance-test/JSONString.h +++ /dev/null @@ -1,40 +0,0 @@ -#pragma once -#include - -#include -#include -#include -#include - -namespace DB -{ - -/// NOTE The code is totally wrong. -class JSONString -{ -private: - std::map content; - size_t padding; - -public: - explicit JSONString(size_t padding_ = 1) : padding(padding_) {} - - void set(const std::string & key, std::string value, bool wrap = true); - - template - std::enable_if_t> set(const std::string key, T value) - { - set(key, std::to_string(value), /*wrap= */ false); - } - - void set(const std::string & key, const std::vector & run_infos); - - std::string asString() const - { - return asString(padding); - } - - std::string asString(size_t cur_padding) const; -}; - -} diff --git a/dbms/programs/performance-test/PerformanceTest.cpp b/dbms/programs/performance-test/PerformanceTest.cpp deleted file mode 100644 index e2c5c0d8741..00000000000 --- a/dbms/programs/performance-test/PerformanceTest.cpp +++ /dev/null @@ -1,345 +0,0 @@ -#include "PerformanceTest.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "executeQuery.h" - - -namespace DB -{ - -namespace ErrorCodes -{ -extern const int NOT_IMPLEMENTED; -} - -namespace -{ -void waitQuery(Connection & connection) -{ - bool finished = false; - - while (true) - { - if (!connection.poll(1000000)) - continue; - - Packet packet = connection.receivePacket(); - switch (packet.type) - { - case Protocol::Server::EndOfStream: - finished = true; - break; - case Protocol::Server::Exception: - throw *packet.exception; - } - - if (finished) - break; - } -} -} - -namespace fs = std::filesystem; - -PerformanceTest::PerformanceTest( - const XMLConfigurationPtr & config_, - Connection & connection_, - const ConnectionTimeouts & timeouts_, - InterruptListener & interrupt_listener_, - const PerformanceTestInfo & test_info_, - Context & context_, - const std::vector & queries_to_run_) - : config(config_) - , connection(connection_) - , timeouts(timeouts_) - , interrupt_listener(interrupt_listener_) - , test_info(test_info_) - , context(context_) - , queries_to_run(queries_to_run_) - , log(&Poco::Logger::get("PerformanceTest")) -{ -} - -bool PerformanceTest::checkPreconditions() const -{ - if (!config->has("preconditions")) - return true; - - Strings preconditions; - config->keys("preconditions", preconditions); - size_t table_precondition_index = 0; - size_t cpu_precondition_index = 0; - - for (const std::string & precondition : preconditions) - { - if (precondition == "ram_size") - { - size_t ram_size_needed = config->getUInt64("preconditions.ram_size"); - size_t actual_ram = getMemoryAmount(); - if (!actual_ram) - throw Exception("ram_size precondition not available on this platform", ErrorCodes::NOT_IMPLEMENTED); - - if (ram_size_needed > actual_ram) - { - LOG_WARNING(log, "Not enough RAM: need = " << ram_size_needed << ", present = " << actual_ram); - return false; - } - } - - if (precondition == "table_exists") - { - std::string precondition_key = "preconditions.table_exists[" + std::to_string(table_precondition_index++) + "]"; - std::string table_to_check = config->getString(precondition_key); - std::string query = "EXISTS TABLE " + table_to_check + ";"; - - size_t exist = 0; - - connection.sendQuery(timeouts, query, "", QueryProcessingStage::Complete, &test_info.settings, nullptr, false); - - while (true) - { - Packet packet = connection.receivePacket(); - - if (packet.type == Protocol::Server::Data) - { - for (const ColumnWithTypeAndName & column : packet.block) - { - if (column.name == "result" && column.column->size() > 0) - { - exist = column.column->get64(0); - if (exist) - break; - } - } - } - - if (packet.type == Protocol::Server::Exception - || packet.type == Protocol::Server::EndOfStream) - break; - } - - if (!exist) - { - LOG_WARNING(log, "Table " << backQuote(table_to_check) << " doesn't exist"); - return false; - } - } - - if (precondition == "cpu") - { - std::string precondition_key = "preconditions.cpu[" + std::to_string(cpu_precondition_index++) + "]"; - std::string flag_to_check = config->getString(precondition_key); - - #define CHECK_CPU_PRECONDITION(OP) \ - if (flag_to_check == #OP) \ - { \ - if (!Cpu::CpuFlagsCache::have_##OP) \ - { \ - LOG_WARNING(log, "CPU doesn't support " << #OP); \ - return false; \ - } \ - } else - - CPU_ID_ENUMERATE(CHECK_CPU_PRECONDITION) - { - LOG_WARNING(log, "CPU doesn't support " << flag_to_check); - return false; - } - - #undef CHECK_CPU_PRECONDITION - } - } - - return true; -} - - -UInt64 PerformanceTest::calculateMaxExecTime() const -{ - - UInt64 result = 0; - for (const auto & stop_conditions : test_info.stop_conditions_by_run) - { - UInt64 condition_max_time = stop_conditions.getMaxExecTime(); - if (condition_max_time == 0) - return 0; - result += condition_max_time; - } - return result; -} - - -void PerformanceTest::prepare() const -{ - for (const auto & query : test_info.create_and_fill_queries) - { - LOG_INFO(log, "Executing create or fill query \"" << query << '\"'); - connection.sendQuery(timeouts, query, "", QueryProcessingStage::Complete, &test_info.settings, nullptr, false); - waitQuery(connection); - LOG_INFO(log, "Query finished"); - } - -} - -void PerformanceTest::finish() const -{ - for (const auto & query : test_info.drop_queries) - { - LOG_INFO(log, "Executing drop query \"" << query << '\"'); - connection.sendQuery(timeouts, query, "", QueryProcessingStage::Complete, &test_info.settings, nullptr, false); - waitQuery(connection); - LOG_INFO(log, "Query finished"); - } -} - -std::vector PerformanceTest::execute() -{ - std::vector statistics_by_run; - size_t query_count; - if (queries_to_run.empty()) - query_count = test_info.queries.size(); - else - query_count = queries_to_run.size(); - size_t total_runs = test_info.times_to_run * test_info.queries.size(); - statistics_by_run.resize(total_runs); - LOG_INFO(log, "Totally will run cases " << test_info.times_to_run * query_count << " times"); - UInt64 max_exec_time = calculateMaxExecTime(); - if (max_exec_time != 0) - LOG_INFO(log, "Test will be executed for a maximum of " << max_exec_time / 1000. << " seconds"); - else - LOG_INFO(log, "Test execution time cannot be determined"); - - for (size_t number_of_launch = 0; number_of_launch < test_info.times_to_run; ++number_of_launch) - { - QueriesWithIndexes queries_with_indexes; - - for (size_t query_index = 0; query_index < test_info.queries.size(); ++query_index) - { - if (queries_to_run.empty() || std::find(queries_to_run.begin(), queries_to_run.end(), query_index) != queries_to_run.end()) - { - size_t statistic_index = number_of_launch * test_info.queries.size() + query_index; - queries_with_indexes.push_back({test_info.queries[query_index], statistic_index}); - } - else - LOG_INFO(log, "Will skip query " << test_info.queries[query_index] << " by index"); - } - - if (got_SIGINT) - break; - - runQueries(queries_with_indexes, statistics_by_run); - } - - if (got_SIGINT) - { - return statistics_by_run; - } - - // Pull memory usage data from query log. The log is normally filled in - // background, so we have to flush it synchronously here to see all the - // previous queries. - { - NullBlockOutputStream null_output(Block{}); - RemoteBlockInputStream flush_log(connection, "system flush logs", - {} /* header */, context); - copyData(flush_log, null_output); - } - - for (auto & statistics : statistics_by_run) - { - if (statistics.query_id.empty()) - { - // We have statistics structs for skipped queries as well, so we - // have to filter them out. - continue; - } - - // We run some test queries several times, specifying the same query id, - // so this query to the log may return several records. Choose the - // last one, because this is when the query performance has stabilized. - RemoteBlockInputStream log_reader(connection, - "select memory_usage, query_start_time from system.query_log " - "where type = 2 and query_id = '" + statistics.query_id + "' " - "order by query_start_time desc", - {} /* header */, context); - - log_reader.readPrefix(); - Block block = log_reader.read(); - if (block.columns() == 0) - { - LOG_WARNING(log, "Query '" << statistics.query_id << "' is not found in query log."); - continue; - } - - auto column = block.getByName("memory_usage").column; - statistics.memory_usage = column->get64(0); - - log_reader.readSuffix(); - } - - return statistics_by_run; -} - -void PerformanceTest::runQueries( - const QueriesWithIndexes & queries_with_indexes, - std::vector & statistics_by_run) -{ - for (const auto & [query, run_index] : queries_with_indexes) - { - LOG_INFO(log, "[" << run_index<< "] Run query '" << query << "'"); - TestStopConditions & stop_conditions = test_info.stop_conditions_by_run[run_index]; - TestStats & statistics = statistics_by_run[run_index]; - statistics.startWatches(); - try - { - executeQuery(connection, query, statistics, stop_conditions, interrupt_listener, context, test_info.settings); - - if (test_info.exec_type == ExecutionType::Loop) - { - LOG_INFO(log, "Will run query in loop"); - for (size_t iteration = 1; !statistics.got_SIGINT; ++iteration) - { - stop_conditions.reportIterations(iteration); - if (stop_conditions.areFulfilled()) - { - LOG_INFO(log, "Stop conditions fulfilled"); - break; - } - - executeQuery(connection, query, statistics, stop_conditions, interrupt_listener, context, test_info.settings); - } - } - } - catch (const Exception & e) - { - statistics.exception = "Code: " + std::to_string(e.code()) + ", e.displayText() = " + e.displayText(); - LOG_WARNING(log, "Code: " << e.code() << ", e.displayText() = " << e.displayText() - << ", Stack trace:\n\n" << e.getStackTraceString()); - } - - if (!statistics.got_SIGINT) - statistics.ready = true; - else - { - got_SIGINT = true; - LOG_INFO(log, "Got SIGINT, will terminate as soon as possible"); - break; - } - } -} - - -} diff --git a/dbms/programs/performance-test/PerformanceTest.h b/dbms/programs/performance-test/PerformanceTest.h deleted file mode 100644 index 961a348d099..00000000000 --- a/dbms/programs/performance-test/PerformanceTest.h +++ /dev/null @@ -1,62 +0,0 @@ -#pragma once - -#include -#include -#include -#include - -#include -#include "PerformanceTestInfo.h" - -namespace DB -{ - -using XMLConfiguration = Poco::Util::XMLConfiguration; -using XMLConfigurationPtr = Poco::AutoPtr; -using QueriesWithIndexes = std::vector>; - -class PerformanceTest -{ -public: - PerformanceTest( - const XMLConfigurationPtr & config_, - Connection & connection_, - const ConnectionTimeouts & timeouts_, - InterruptListener & interrupt_listener_, - const PerformanceTestInfo & test_info_, - Context & context_, - const std::vector & queries_to_run_); - - bool checkPreconditions() const; - void prepare() const; - std::vector execute(); - void finish() const; - - bool checkSIGINT() const - { - return got_SIGINT; - } - -private: - void runQueries( - const QueriesWithIndexes & queries_with_indexes, - std::vector & statistics_by_run); - - UInt64 calculateMaxExecTime() const; - -private: - XMLConfigurationPtr config; - Connection & connection; - const ConnectionTimeouts & timeouts; - InterruptListener & interrupt_listener; - - PerformanceTestInfo test_info; - Context & context; - - std::vector queries_to_run; - Poco::Logger * log; - - bool got_SIGINT = false; -}; - -} diff --git a/dbms/programs/performance-test/PerformanceTestInfo.cpp b/dbms/programs/performance-test/PerformanceTestInfo.cpp deleted file mode 100644 index b0f877abfc7..00000000000 --- a/dbms/programs/performance-test/PerformanceTestInfo.cpp +++ /dev/null @@ -1,200 +0,0 @@ -#include "PerformanceTestInfo.h" -#include -#include -#include -#include -#include -#include "applySubstitutions.h" -#include -#include - - -namespace DB -{ -namespace ErrorCodes -{ -extern const int BAD_ARGUMENTS; -} - -namespace -{ - -void extractSettings( - const XMLConfigurationPtr & config, - const std::string & key, - const Strings & settings_list, - SettingsChanges & settings_to_apply) -{ - for (const std::string & setup : settings_list) - { - if (setup == "profile") - continue; - - std::string value = config->getString(key + "." + setup); - if (value.empty()) - value = "true"; - - settings_to_apply.emplace_back(SettingChange{setup, value}); - } -} - -} - - -namespace fs = std::filesystem; - -PerformanceTestInfo::PerformanceTestInfo( - XMLConfigurationPtr config, - const Settings & global_settings_) - : settings(global_settings_) -{ - path = config->getString("path"); - test_name = fs::path(path).stem().string(); - applySettings(config); - extractQueries(config); - extractAuxiliaryQueries(config); - processSubstitutions(config); - getExecutionType(config); - getStopConditions(config); -} - -void PerformanceTestInfo::applySettings(XMLConfigurationPtr config) -{ - if (config->has("settings")) - { - SettingsChanges settings_to_apply; - Strings config_settings; - config->keys("settings", config_settings); - extractSettings(config, "settings", config_settings, settings_to_apply); - settings.applyChanges(settings_to_apply); - } -} - -void PerformanceTestInfo::extractQueries(XMLConfigurationPtr config) -{ - if (config->has("query")) - queries = getMultipleValuesFromConfig(*config, "", "query"); - - if (config->has("query_file")) - { - const std::string filename = config->getString("query_file"); - if (filename.empty()) - throw Exception("Empty file name", ErrorCodes::BAD_ARGUMENTS); - - bool tsv = fs::path(filename).extension().string() == ".tsv"; - - ReadBufferFromFile query_file(filename); - std::string query; - - if (tsv) - { - while (!query_file.eof()) - { - readEscapedString(query, query_file); - assertChar('\n', query_file); - queries.push_back(query); - } - } - else - { - readStringUntilEOF(query, query_file); - queries.push_back(query); - } - } - - if (queries.empty()) - throw Exception("Did not find any query to execute: " + test_name, - ErrorCodes::BAD_ARGUMENTS); -} - -void PerformanceTestInfo::processSubstitutions(XMLConfigurationPtr config) -{ - if (config->has("substitutions")) - { - /// Make "subconfig" of inner xml block - ConfigurationPtr substitutions_view(config->createView("substitutions")); - constructSubstitutions(substitutions_view, substitutions); - - auto create_and_fill_queries_preformat = create_and_fill_queries; - create_and_fill_queries.clear(); - for (const auto & query : create_and_fill_queries_preformat) - { - auto formatted = formatQueries(query, substitutions); - create_and_fill_queries.insert(create_and_fill_queries.end(), formatted.begin(), formatted.end()); - } - - auto queries_preformat = queries; - queries.clear(); - for (const auto & query : queries_preformat) - { - auto formatted = formatQueries(query, substitutions); - queries.insert(queries.end(), formatted.begin(), formatted.end()); - } - - auto drop_queries_preformat = drop_queries; - drop_queries.clear(); - for (const auto & query : drop_queries_preformat) - { - auto formatted = formatQueries(query, substitutions); - drop_queries.insert(drop_queries.end(), formatted.begin(), formatted.end()); - } - } -} - -void PerformanceTestInfo::getExecutionType(XMLConfigurationPtr config) -{ - if (!config->has("type")) - throw Exception("Missing type property in config: " + test_name, - ErrorCodes::BAD_ARGUMENTS); - - std::string config_exec_type = config->getString("type"); - if (config_exec_type == "loop") - exec_type = ExecutionType::Loop; - else if (config_exec_type == "once") - exec_type = ExecutionType::Once; - else - throw Exception("Unknown type " + config_exec_type + " in :" + test_name, - ErrorCodes::BAD_ARGUMENTS); -} - - -void PerformanceTestInfo::getStopConditions(XMLConfigurationPtr config) -{ - TestStopConditions stop_conditions_template; - if (config->has("stop_conditions")) - { - ConfigurationPtr stop_conditions_config(config->createView("stop_conditions")); - stop_conditions_template.loadFromConfig(stop_conditions_config); - } - - if (stop_conditions_template.empty()) - throw Exception("No termination conditions were found in config", - ErrorCodes::BAD_ARGUMENTS); - - times_to_run = config->getUInt("times_to_run", 1); - - for (size_t i = 0; i < times_to_run * queries.size(); ++i) - stop_conditions_by_run.push_back(stop_conditions_template); - -} - -void PerformanceTestInfo::extractAuxiliaryQueries(XMLConfigurationPtr config) -{ - if (config->has("create_query")) - { - create_and_fill_queries = getMultipleValuesFromConfig(*config, "", "create_query"); - } - - if (config->has("fill_query")) - { - auto fill_queries = getMultipleValuesFromConfig(*config, "", "fill_query"); - create_and_fill_queries.insert(create_and_fill_queries.end(), fill_queries.begin(), fill_queries.end()); - } - - if (config->has("drop_query")) - { - drop_queries = getMultipleValuesFromConfig(*config, "", "drop_query"); - } -} - -} diff --git a/dbms/programs/performance-test/PerformanceTestInfo.h b/dbms/programs/performance-test/PerformanceTestInfo.h deleted file mode 100644 index 8e6b1c5f43a..00000000000 --- a/dbms/programs/performance-test/PerformanceTestInfo.h +++ /dev/null @@ -1,55 +0,0 @@ -#pragma once -#include -#include -#include -#include -#include -#include - -#include "StopConditionsSet.h" -#include "TestStopConditions.h" -#include "TestStats.h" - -namespace DB -{ -enum class ExecutionType -{ - Loop, - Once -}; - -using XMLConfiguration = Poco::Util::XMLConfiguration; -using XMLConfigurationPtr = Poco::AutoPtr; -using StringToVector = std::map; - -/// Class containing all info to run performance test -class PerformanceTestInfo -{ -public: - PerformanceTestInfo(XMLConfigurationPtr config, const Settings & global_settings_); - - std::string test_name; - std::string path; - - Strings queries; - - Settings settings; - ExecutionType exec_type; - StringToVector substitutions; - size_t times_to_run; - - std::vector stop_conditions_by_run; - - Strings create_and_fill_queries; - Strings drop_queries; - -private: - void applySettings(XMLConfigurationPtr config); - void extractQueries(XMLConfigurationPtr config); - void processSubstitutions(XMLConfigurationPtr config); - void getExecutionType(XMLConfigurationPtr config); - void getStopConditions(XMLConfigurationPtr config); - void extractAuxiliaryQueries(XMLConfigurationPtr config); -}; - -} diff --git a/dbms/programs/performance-test/PerformanceTestSuite.cpp b/dbms/programs/performance-test/PerformanceTestSuite.cpp deleted file mode 100644 index 73ae2ae90ed..00000000000 --- a/dbms/programs/performance-test/PerformanceTestSuite.cpp +++ /dev/null @@ -1,416 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "TestStopConditions.h" -#include "TestStats.h" -#include "ConfigPreprocessor.h" -#include "PerformanceTest.h" -#include "ReportBuilder.h" - - -namespace fs = std::filesystem; -namespace po = boost::program_options; - -namespace DB -{ -namespace ErrorCodes -{ - extern const int BAD_ARGUMENTS; - extern const int FILE_DOESNT_EXIST; -} - -/** Tests launcher for ClickHouse. - * The tool walks through given or default folder in order to find files with - * tests' descriptions and launches it. - */ -class PerformanceTestSuite -{ -public: - - PerformanceTestSuite(const std::string & host_, - const UInt16 port_, - const bool secure_, - const std::string & default_database_, - const std::string & user_, - const std::string & password_, - const Settings & cmd_settings, - const bool lite_output_, - Strings && input_files_, - Strings && tests_tags_, - Strings && skip_tags_, - Strings && tests_names_, - Strings && skip_names_, - Strings && tests_names_regexp_, - Strings && skip_names_regexp_, - const std::unordered_map> query_indexes_, - const ConnectionTimeouts & timeouts_) - : connection(host_, port_, default_database_, user_, - password_, "performance-test", Protocol::Compression::Enable, - secure_ ? Protocol::Secure::Enable : Protocol::Secure::Disable) - , timeouts(timeouts_) - , tests_tags(std::move(tests_tags_)) - , tests_names(std::move(tests_names_)) - , tests_names_regexp(std::move(tests_names_regexp_)) - , skip_tags(std::move(skip_tags_)) - , skip_names(std::move(skip_names_)) - , skip_names_regexp(std::move(skip_names_regexp_)) - , query_indexes(query_indexes_) - , lite_output(lite_output_) - , input_files(input_files_) - , log(&Poco::Logger::get("PerformanceTestSuite")) - { - global_context.makeGlobalContext(); - global_context.getSettingsRef().copyChangesFrom(cmd_settings); - if (input_files.size() < 1) - throw Exception("No tests were specified", ErrorCodes::BAD_ARGUMENTS); - } - - int run() - { - std::string name; - UInt64 version_major; - UInt64 version_minor; - UInt64 version_patch; - UInt64 version_revision; - connection.getServerVersion(timeouts, name, version_major, version_minor, version_patch, version_revision); - - std::stringstream ss; - ss << version_major << "." << version_minor << "." << version_patch; - server_version = ss.str(); - - report_builder = std::make_shared(server_version); - - processTestsConfigurations(input_files); - - return 0; - } - -private: - Connection connection; - const ConnectionTimeouts & timeouts; - - const Strings & tests_tags; - const Strings & tests_names; - const Strings & tests_names_regexp; - const Strings & skip_tags; - const Strings & skip_names; - const Strings & skip_names_regexp; - std::unordered_map> query_indexes; - - Context global_context = Context::createGlobal(); - std::shared_ptr report_builder; - - std::string server_version; - - InterruptListener interrupt_listener; - - using XMLConfiguration = Poco::Util::XMLConfiguration; - using XMLConfigurationPtr = Poco::AutoPtr; - - bool lite_output; - - Strings input_files; - std::vector tests_configurations; - Poco::Logger * log; - - void processTestsConfigurations(const Strings & paths) - { - LOG_INFO(log, "Preparing test configurations"); - ConfigPreprocessor config_prep(paths); - tests_configurations = config_prep.processConfig( - tests_tags, - tests_names, - tests_names_regexp, - skip_tags, - skip_names, - skip_names_regexp); - - LOG_INFO(log, "Test configurations prepared"); - - if (tests_configurations.size()) - { - Strings outputs; - - for (auto & test_config : tests_configurations) - { - auto [output, signal] = runTest(test_config); - if (!output.empty()) - { - if (lite_output) - std::cout << output; - else - outputs.push_back(output); - } - if (signal) - break; - } - - if (!lite_output && outputs.size()) - { - std::cout << "[" << std::endl; - - for (size_t i = 0; i != outputs.size(); ++i) - { - std::cout << outputs[i]; - if (i != outputs.size() - 1) - std::cout << ","; - - std::cout << std::endl; - } - - std::cout << "]" << std::endl; - } - } - } - - std::pair runTest(XMLConfigurationPtr & test_config) - { - PerformanceTestInfo info(test_config, global_context.getSettingsRef()); - LOG_INFO(log, "Config for test '" << info.test_name << "' parsed"); - PerformanceTest current(test_config, connection, timeouts, interrupt_listener, info, global_context, query_indexes[info.path]); - - if (current.checkPreconditions()) - { - LOG_INFO(log, "Preconditions for test '" << info.test_name << "' are fulfilled"); - LOG_INFO( - log, - "Preparing for run, have " << info.create_and_fill_queries.size() << " create and fill queries"); - current.prepare(); - LOG_INFO(log, "Prepared"); - LOG_INFO(log, "Running test '" << info.test_name << "'"); - auto result = current.execute(); - LOG_INFO(log, "Test '" << info.test_name << "' finished"); - - LOG_INFO(log, "Running post run queries"); - current.finish(); - LOG_INFO(log, "Postqueries finished"); - if (lite_output) - return {report_builder->buildCompactReport(info, result, query_indexes[info.path]), current.checkSIGINT()}; - else - return {report_builder->buildFullReport(info, result, query_indexes[info.path]), current.checkSIGINT()}; - } - else - LOG_INFO(log, "Preconditions for test '" << info.test_name << "' are not fulfilled, skip run"); - - return {"", current.checkSIGINT()}; - } -}; - -} - -static void getFilesFromDir(const fs::path & dir, std::vector & input_files, const bool recursive = false) -{ - Poco::Logger * log = &Poco::Logger::get("PerformanceTestSuite"); - if (dir.extension().string() == ".xml") - LOG_WARNING(log, dir.string() + "' is a directory, but has .xml extension"); - - fs::directory_iterator end; - for (fs::directory_iterator it(dir); it != end; ++it) - { - const fs::path file = (*it); - if (recursive && fs::is_directory(file)) - getFilesFromDir(file, input_files, recursive); - else if (!fs::is_directory(file) && file.extension().string() == ".xml") - input_files.push_back(file.string()); - } -} - -static std::vector getInputFiles(const po::variables_map & options, Poco::Logger * log) -{ - std::vector input_files; - bool recursive = options.count("recursive"); - - if (!options.count("input-files")) - { - LOG_INFO(log, "Trying to find test scenario files in the current folder..."); - fs::path curr_dir("."); - - getFilesFromDir(curr_dir, input_files, recursive); - - if (input_files.empty()) - throw DB::Exception("Did not find any xml files", DB::ErrorCodes::BAD_ARGUMENTS); - } - else - { - input_files = options["input-files"].as>(); - - std::vector collected_files; - for (const std::string & filename : input_files) - { - fs::path file(filename); - - if (!fs::exists(file)) - throw DB::Exception("File '" + filename + "' does not exist", DB::ErrorCodes::FILE_DOESNT_EXIST); - - if (fs::is_directory(file)) - { - getFilesFromDir(file, collected_files, recursive); - } - else - { - if (file.extension().string() != ".xml") - throw DB::Exception("File '" + filename + "' does not have .xml extension", DB::ErrorCodes::BAD_ARGUMENTS); - collected_files.push_back(filename); - } - } - - input_files = std::move(collected_files); - } - - LOG_INFO(log, "Found " + std::to_string(input_files.size()) + " input files"); - std::sort(input_files.begin(), input_files.end()); - return input_files; -} - -static std::unordered_map> getTestQueryIndexes(const po::basic_parsed_options & parsed_opts) -{ - std::unordered_map> result; - const auto & options = parsed_opts.options; - if (options.empty()) - return result; - for (size_t i = 0; i < options.size() - 1; ++i) - { - const auto & opt = options[i]; - if (opt.string_key == "input-files") - { - if (options[i + 1].string_key == "query-indexes") - { - const std::string & test_path = Poco::Path(opt.value[0]).absolute().toString(); - for (const auto & query_num_str : options[i + 1].value) - { - size_t query_num = std::stoul(query_num_str); - result[test_path].push_back(query_num); - } - } - } - } - return result; -} - -#pragma GCC diagnostic ignored "-Wunused-function" -#pragma GCC diagnostic ignored "-Wmissing-declarations" - -int mainEntryClickHousePerformanceTest(int argc, char ** argv) -try -{ - using po::value; - using Strings = DB::Strings; - - po::options_description desc = createOptionsDescription("Allowed options", getTerminalWidth()); - desc.add_options() - ("help", "produce help message") - ("lite", "use lite version of output") - ("host,h", value()->default_value("localhost"), "") - ("port", value()->default_value(9000), "") - ("secure,s", "Use TLS connection") - ("database", value()->default_value("default"), "") - ("user", value()->default_value("default"), "") - ("password", value()->default_value(""), "") - ("log-level", value()->default_value("information"), "Set log level") - ("tags", value()->multitoken(), "Run only tests with tag") - ("skip-tags", value()->multitoken(), "Do not run tests with tag") - ("names", value()->multitoken(), "Run tests with specific name") - ("skip-names", value()->multitoken(), "Do not run tests with name") - ("names-regexp", value()->multitoken(), "Run tests with names matching regexp") - ("skip-names-regexp", value()->multitoken(), "Do not run tests with names matching regexp") - ("input-files", value()->multitoken(), "Input .xml files") - ("query-indexes", value>()->multitoken(), "Input query indexes") - ("recursive,r", "Recurse in directories to find all xml's") - ; - - DB::Settings cmd_settings; - cmd_settings.addProgramOptions(desc); - - po::options_description cmdline_options; - cmdline_options.add(desc); - - po::variables_map options; - po::basic_parsed_options parsed = po::command_line_parser(argc, argv).options(cmdline_options).run(); - auto queries_with_indexes = getTestQueryIndexes(parsed); - po::store(parsed, options); - - po::notify(options); - - Poco::AutoPtr formatter(new Poco::PatternFormatter("%Y.%m.%d %H:%M:%S.%F <%p> %s: %t")); - Poco::AutoPtr console_channel(new Poco::ConsoleChannel); - Poco::AutoPtr channel(new Poco::FormattingChannel(formatter, console_channel)); - - Poco::Logger::root().setLevel(options["log-level"].as()); - Poco::Logger::root().setChannel(channel); - - Poco::Logger * log = &Poco::Logger::get("PerformanceTestSuite"); - if (options.count("help")) - { - std::cout << "Usage: " << argv[0] << " [options]\n"; - std::cout << desc << "\n"; - return 0; - } - - Strings input_files = getInputFiles(options, log); - - Strings tests_tags = options.count("tags") ? options["tags"].as() : Strings({}); - Strings skip_tags = options.count("skip-tags") ? options["skip-tags"].as() : Strings({}); - Strings tests_names = options.count("names") ? options["names"].as() : Strings({}); - Strings skip_names = options.count("skip-names") ? options["skip-names"].as() : Strings({}); - Strings tests_names_regexp = options.count("names-regexp") ? options["names-regexp"].as() : Strings({}); - Strings skip_names_regexp = options.count("skip-names-regexp") ? options["skip-names-regexp"].as() : Strings({}); - - auto timeouts = DB::ConnectionTimeouts::getTCPTimeoutsWithoutFailover(DB::Settings()); - - DB::UseSSL use_ssl; - - DB::PerformanceTestSuite performance_test_suite( - options["host"].as(), - options["port"].as(), - options.count("secure"), - options["database"].as(), - options["user"].as(), - options["password"].as(), - cmd_settings, - options.count("lite") > 0, - std::move(input_files), - std::move(tests_tags), - std::move(skip_tags), - std::move(tests_names), - std::move(skip_names), - std::move(tests_names_regexp), - std::move(skip_names_regexp), - queries_with_indexes, - timeouts); - return performance_test_suite.run(); -} -catch (...) -{ - std::cout << DB::getCurrentExceptionMessage(/*with stacktrace = */ true) << std::endl; - int code = DB::getCurrentExceptionCode(); - return code ? code : 1; -} diff --git a/dbms/programs/performance-test/ReportBuilder.cpp b/dbms/programs/performance-test/ReportBuilder.cpp deleted file mode 100644 index c95b4d56a1e..00000000000 --- a/dbms/programs/performance-test/ReportBuilder.cpp +++ /dev/null @@ -1,210 +0,0 @@ -#include "ReportBuilder.h" - -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "JSONString.h" - -namespace DB -{ - -namespace -{ -std::string getMainMetric(const PerformanceTestInfo & test_info) -{ - if (test_info.exec_type == ExecutionType::Loop) - return "min_time"; - else - return "rows_per_second"; -} - -bool isASCIIString(const std::string & str) -{ - return std::all_of(str.begin(), str.end(), isASCII); -} - -String jsonString(const String & str, FormatSettings & settings) -{ - WriteBufferFromOwnString buffer; - writeJSONString(str, buffer, settings); - return std::move(buffer.str()); -} -} - -ReportBuilder::ReportBuilder(const std::string & server_version_) - : server_version(server_version_) - , hostname(getFQDNOrHostName()) - , num_cores(getNumberOfPhysicalCPUCores()) - , num_threads(std::thread::hardware_concurrency()) - , ram(getMemoryAmount()) -{ -} - -std::string ReportBuilder::getCurrentTime() const -{ - return DateLUT::instance().timeToString(time(nullptr)); -} - -std::string ReportBuilder::buildFullReport( - const PerformanceTestInfo & test_info, - std::vector & stats, - const std::vector & queries_to_run) const -{ - FormatSettings settings; - - JSONString json_output; - - json_output.set("hostname", hostname); - json_output.set("num_cores", num_cores); - json_output.set("num_threads", num_threads); - json_output.set("ram", ram); - json_output.set("server_version", server_version); - json_output.set("time", getCurrentTime()); - json_output.set("test_name", test_info.test_name); - json_output.set("path", test_info.path); - - if (!test_info.substitutions.empty()) - { - JSONString json_parameters(2); /// here, 2 is the size of \t padding - - for (auto & [parameter, values] : test_info.substitutions) - { - std::ostringstream array_string; - array_string << "["; - for (size_t i = 0; i != values.size(); ++i) - { - array_string << jsonString(values[i], settings); - if (i != values.size() - 1) - { - array_string << ", "; - } - } - array_string << ']'; - - json_parameters.set(parameter, array_string.str()); - } - - json_output.set("parameters", json_parameters.asString()); - } - - std::vector run_infos; - for (size_t query_index = 0; query_index < test_info.queries.size(); ++query_index) - { - if (!queries_to_run.empty() && std::find(queries_to_run.begin(), queries_to_run.end(), query_index) == queries_to_run.end()) - continue; - - for (size_t number_of_launch = 0; number_of_launch < test_info.times_to_run; ++number_of_launch) - { - size_t stat_index = number_of_launch * test_info.queries.size() + query_index; - TestStats & statistics = stats[stat_index]; - - if (!statistics.ready) - continue; - - JSONString runJSON; - - runJSON.set("query", jsonString(test_info.queries[query_index], settings), false); - runJSON.set("query_index", query_index); - if (!statistics.exception.empty()) - { - if (isASCIIString(statistics.exception)) - runJSON.set("exception", jsonString(statistics.exception, settings), false); - else - runJSON.set("exception", "Some exception occurred with non ASCII message. This may produce invalid JSON. Try reproduce locally."); - } - - if (test_info.exec_type == ExecutionType::Loop) - { - /// in seconds - runJSON.set("min_time", statistics.min_time / double(1000)); - - if (statistics.sampler.size() != 0) - { - JSONString quantiles(4); /// here, 4 is the size of \t padding - for (double percent = 10; percent <= 90; percent += 10) - { - std::string quantile_key = std::to_string(percent / 100.0); - while (quantile_key.back() == '0') - quantile_key.pop_back(); - - quantiles.set(quantile_key, - statistics.sampler.quantileInterpolated(percent / 100.0)); - } - quantiles.set("0.95", - statistics.sampler.quantileInterpolated(95 / 100.0)); - quantiles.set("0.99", - statistics.sampler.quantileInterpolated(99 / 100.0)); - quantiles.set("0.999", - statistics.sampler.quantileInterpolated(99.9 / 100.0)); - quantiles.set("0.9999", - statistics.sampler.quantileInterpolated(99.99 / 100.0)); - - runJSON.set("quantiles", quantiles.asString()); - } - - runJSON.set("total_time", statistics.total_time); - - if (statistics.total_time != 0) - { - runJSON.set("queries_per_second", static_cast(statistics.queries) / statistics.total_time); - runJSON.set("rows_per_second", static_cast(statistics.total_rows_read) / statistics.total_time); - runJSON.set("bytes_per_second", static_cast(statistics.total_bytes_read) / statistics.total_time); - } - } - else - { - runJSON.set("max_rows_per_second", statistics.max_rows_speed); - runJSON.set("max_bytes_per_second", statistics.max_bytes_speed); - runJSON.set("avg_rows_per_second", statistics.avg_rows_speed_value); - runJSON.set("avg_bytes_per_second", statistics.avg_bytes_speed_value); - } - - runJSON.set("memory_usage", statistics.memory_usage); - - run_infos.push_back(runJSON); - } - } - - json_output.set("runs", run_infos); - - return json_output.asString(); -} - -std::string ReportBuilder::buildCompactReport( - const PerformanceTestInfo & test_info, - std::vector & stats, - const std::vector & queries_to_run) const -{ - FormatSettings settings; - std::ostringstream output; - - for (size_t query_index = 0; query_index < test_info.queries.size(); ++query_index) - { - if (!queries_to_run.empty() && std::find(queries_to_run.begin(), queries_to_run.end(), query_index) == queries_to_run.end()) - continue; - - for (size_t number_of_launch = 0; number_of_launch < test_info.times_to_run; ++number_of_launch) - { - if (test_info.queries.size() > 1) - output << "query " << jsonString(test_info.queries[query_index], settings) << ", "; - - output << "run " << std::to_string(number_of_launch + 1) << ": "; - - std::string main_metric = getMainMetric(test_info); - - output << main_metric << " = "; - size_t index = number_of_launch * test_info.queries.size() + query_index; - output << stats[index].getStatisticByName(main_metric); - output << "\n"; - } - } - return output.str(); -} -} diff --git a/dbms/programs/performance-test/ReportBuilder.h b/dbms/programs/performance-test/ReportBuilder.h deleted file mode 100644 index 473ba42b728..00000000000 --- a/dbms/programs/performance-test/ReportBuilder.h +++ /dev/null @@ -1,36 +0,0 @@ -#pragma once -#include "PerformanceTestInfo.h" -#include -#include - -namespace DB -{ - -class ReportBuilder -{ -public: - ReportBuilder(const std::string & server_version_); - std::string buildFullReport( - const PerformanceTestInfo & test_info, - std::vector & stats, - const std::vector & queries_to_run) const; - - - std::string buildCompactReport( - const PerformanceTestInfo & test_info, - std::vector & stats, - const std::vector & queries_to_run) const; - -private: - std::string server_version; - std::string hostname; - size_t num_cores; - size_t num_threads; - size_t ram; - -private: - std::string getCurrentTime() const; - -}; - -} diff --git a/dbms/programs/performance-test/StopConditionsSet.cpp b/dbms/programs/performance-test/StopConditionsSet.cpp deleted file mode 100644 index 58d3383e81c..00000000000 --- a/dbms/programs/performance-test/StopConditionsSet.cpp +++ /dev/null @@ -1,64 +0,0 @@ -#include "StopConditionsSet.h" -#include - -namespace DB -{ - -namespace ErrorCodes -{ -extern const int LOGICAL_ERROR; -} - -void StopConditionsSet::loadFromConfig(const ConfigurationPtr & stop_conditions_view) -{ - Strings keys; - stop_conditions_view->keys(keys); - - for (const std::string & key : keys) - { - if (key == "total_time_ms") - total_time_ms.value = stop_conditions_view->getUInt64(key); - else if (key == "rows_read") - rows_read.value = stop_conditions_view->getUInt64(key); - else if (key == "bytes_read_uncompressed") - bytes_read_uncompressed.value = stop_conditions_view->getUInt64(key); - else if (key == "iterations") - iterations.value = stop_conditions_view->getUInt64(key); - else if (key == "min_time_not_changing_for_ms") - min_time_not_changing_for_ms.value = stop_conditions_view->getUInt64(key); - else if (key == "max_speed_not_changing_for_ms") - max_speed_not_changing_for_ms.value = stop_conditions_view->getUInt64(key); - else if (key == "average_speed_not_changing_for_ms") - average_speed_not_changing_for_ms.value = stop_conditions_view->getUInt64(key); - else - throw Exception("Met unknown stop condition: " + key, ErrorCodes::LOGICAL_ERROR); - - ++initialized_count; - } -} - -void StopConditionsSet::reset() -{ - total_time_ms.fulfilled = false; - rows_read.fulfilled = false; - bytes_read_uncompressed.fulfilled = false; - iterations.fulfilled = false; - min_time_not_changing_for_ms.fulfilled = false; - max_speed_not_changing_for_ms.fulfilled = false; - average_speed_not_changing_for_ms.fulfilled = false; - - fulfilled_count = 0; -} - -void StopConditionsSet::report(UInt64 value, StopConditionsSet::StopCondition & condition) -{ - if (condition.value && !condition.fulfilled && value >= condition.value) - { - condition.fulfilled = true; - ++fulfilled_count; - } -} - - - -} diff --git a/dbms/programs/performance-test/StopConditionsSet.h b/dbms/programs/performance-test/StopConditionsSet.h deleted file mode 100644 index ad29c748a76..00000000000 --- a/dbms/programs/performance-test/StopConditionsSet.h +++ /dev/null @@ -1,39 +0,0 @@ -#pragma once - -#include -#include - -namespace DB -{ - -using ConfigurationPtr = Poco::AutoPtr; - -/// A set of supported stop conditions. -struct StopConditionsSet -{ - void loadFromConfig(const ConfigurationPtr & stop_conditions_view); - void reset(); - - /// Note: only conditions with UInt64 minimal thresholds are supported. - /// I.e. condition is fulfilled when value is exceeded. - struct StopCondition - { - UInt64 value = 0; - bool fulfilled = false; - }; - - void report(UInt64 value, StopCondition & condition); - - StopCondition total_time_ms; - StopCondition rows_read; - StopCondition bytes_read_uncompressed; - StopCondition iterations; - StopCondition min_time_not_changing_for_ms; - StopCondition max_speed_not_changing_for_ms; - StopCondition average_speed_not_changing_for_ms; - - size_t initialized_count = 0; - size_t fulfilled_count = 0; -}; - -} diff --git a/dbms/programs/performance-test/TestStats.cpp b/dbms/programs/performance-test/TestStats.cpp deleted file mode 100644 index 4a3ec281d90..00000000000 --- a/dbms/programs/performance-test/TestStats.cpp +++ /dev/null @@ -1,165 +0,0 @@ -#include "TestStats.h" -#include -namespace DB -{ - -namespace -{ -const std::string FOUR_SPACES = " "; -} - -std::string TestStats::getStatisticByName(const std::string & statistic_name) -{ - if (statistic_name == "min_time") - return std::to_string(min_time) + "ms"; - - if (statistic_name == "quantiles") - { - std::string result = "\n"; - - for (double percent = 10; percent <= 90; percent += 10) - { - result += FOUR_SPACES + std::to_string((percent / 100)); - result += ": " + std::to_string(sampler.quantileInterpolated(percent / 100.0)); - result += "\n"; - } - result += FOUR_SPACES + "0.95: " + std::to_string(sampler.quantileInterpolated(95 / 100.0)) + "\n"; - result += FOUR_SPACES + "0.99: " + std::to_string(sampler.quantileInterpolated(99 / 100.0)) + "\n"; - result += FOUR_SPACES + "0.999: " + std::to_string(sampler.quantileInterpolated(99.9 / 100.)) + "\n"; - result += FOUR_SPACES + "0.9999: " + std::to_string(sampler.quantileInterpolated(99.99 / 100.)); - - return result; - } - if (statistic_name == "total_time") - return std::to_string(total_time) + "s"; - - if (statistic_name == "queries_per_second") - return std::to_string(queries / total_time); - - if (statistic_name == "rows_per_second") - return std::to_string(total_rows_read / total_time); - - if (statistic_name == "bytes_per_second") - return std::to_string(total_bytes_read / total_time); - - if (statistic_name == "max_rows_per_second") - return std::to_string(max_rows_speed); - - if (statistic_name == "max_bytes_per_second") - return std::to_string(max_bytes_speed); - - if (statistic_name == "avg_rows_per_second") - return std::to_string(avg_rows_speed_value); - - if (statistic_name == "avg_bytes_per_second") - return std::to_string(avg_bytes_speed_value); - - return ""; -} - - -void TestStats::update_min_time(UInt64 min_time_candidate) -{ - if (min_time_candidate < min_time) - { - min_time = min_time_candidate; - min_time_watch.restart(); - } -} - -void TestStats::update_max_speed( - size_t max_speed_candidate, - Stopwatch & max_speed_watch, - UInt64 & max_speed) -{ - if (max_speed_candidate > max_speed) - { - max_speed = max_speed_candidate; - max_speed_watch.restart(); - } -} - - -void TestStats::update_average_speed( - double new_speed_info, - Stopwatch & avg_speed_watch, - size_t & number_of_info_batches, - double precision, - double & avg_speed_first, - double & avg_speed_value) -{ - avg_speed_value = ((avg_speed_value * number_of_info_batches) + new_speed_info); - ++number_of_info_batches; - avg_speed_value /= number_of_info_batches; - - if (avg_speed_first == 0) - avg_speed_first = avg_speed_value; - - auto [min, max] = std::minmax(avg_speed_value, avg_speed_first); - if (1 - min / max >= precision) - { - avg_speed_first = avg_speed_value; - avg_speed_watch.restart(); - } -} - -void TestStats::add(size_t rows_read_inc, size_t bytes_read_inc) -{ - total_rows_read += rows_read_inc; - total_bytes_read += bytes_read_inc; - last_query_rows_read += rows_read_inc; - last_query_bytes_read += bytes_read_inc; - - double new_rows_speed = last_query_rows_read / watch_per_query.elapsedSeconds(); - double new_bytes_speed = last_query_bytes_read / watch_per_query.elapsedSeconds(); - - /// Update rows speed - update_max_speed(new_rows_speed, max_rows_speed_watch, max_rows_speed); - update_average_speed(new_rows_speed, - avg_rows_speed_watch, - number_of_rows_speed_info_batches, - avg_rows_speed_precision, - avg_rows_speed_first, - avg_rows_speed_value); - /// Update bytes speed - update_max_speed(new_bytes_speed, max_bytes_speed_watch, max_bytes_speed); - update_average_speed(new_bytes_speed, - avg_bytes_speed_watch, - number_of_bytes_speed_info_batches, - avg_bytes_speed_precision, - avg_bytes_speed_first, - avg_bytes_speed_value); -} - -void TestStats::updateQueryInfo() -{ - ++queries; - sampler.insert(watch_per_query.elapsedSeconds()); - update_min_time(watch_per_query.elapsed() / (1000 * 1000)); /// ns to ms -} - - -TestStats::TestStats() -{ - watch.reset(); - watch_per_query.reset(); - min_time_watch.reset(); - max_rows_speed_watch.reset(); - max_bytes_speed_watch.reset(); - avg_rows_speed_watch.reset(); - avg_bytes_speed_watch.reset(); -} - - -void TestStats::startWatches() -{ - watch.start(); - watch_per_query.start(); - min_time_watch.start(); - max_rows_speed_watch.start(); - max_bytes_speed_watch.start(); - avg_rows_speed_watch.start(); - avg_bytes_speed_watch.start(); -} - -} diff --git a/dbms/programs/performance-test/TestStats.h b/dbms/programs/performance-test/TestStats.h deleted file mode 100644 index b38ffa7386a..00000000000 --- a/dbms/programs/performance-test/TestStats.h +++ /dev/null @@ -1,90 +0,0 @@ -#pragma once - -#include -#include -#include -#include - -namespace DB -{ -struct TestStats -{ - TestStats(); - Stopwatch watch; - Stopwatch watch_per_query; - Stopwatch min_time_watch; - Stopwatch max_rows_speed_watch; - Stopwatch max_bytes_speed_watch; - Stopwatch avg_rows_speed_watch; - Stopwatch avg_bytes_speed_watch; - - bool last_query_was_cancelled = false; - std::string query_id; - - size_t queries = 0; - - size_t total_rows_read = 0; - size_t total_bytes_read = 0; - - size_t last_query_rows_read = 0; - size_t last_query_bytes_read = 0; - - using Sampler = ReservoirSampler; - Sampler sampler{1 << 16}; - - /// min_time in ms - UInt64 min_time = std::numeric_limits::max(); - double total_time = 0; - - UInt64 max_rows_speed = 0; - UInt64 max_bytes_speed = 0; - - double avg_rows_speed_value = 0; - double avg_rows_speed_first = 0; - static inline double avg_rows_speed_precision = 0.005; - - double avg_bytes_speed_value = 0; - double avg_bytes_speed_first = 0; - static inline double avg_bytes_speed_precision = 0.005; - - size_t number_of_rows_speed_info_batches = 0; - size_t number_of_bytes_speed_info_batches = 0; - - UInt64 memory_usage = 0; - - bool ready = false; // check if a query wasn't interrupted by SIGINT - std::string exception; - - /// Hack, actually this field doesn't required for statistics - bool got_SIGINT = false; - - std::string getStatisticByName(const std::string & statistic_name); - - void update_min_time(UInt64 min_time_candidate); - - void update_average_speed( - double new_speed_info, - Stopwatch & avg_speed_watch, - size_t & number_of_info_batches, - double precision, - double & avg_speed_first, - double & avg_speed_value); - - void update_max_speed( - size_t max_speed_candidate, - Stopwatch & max_speed_watch, - UInt64 & max_speed); - - void add(size_t rows_read_inc, size_t bytes_read_inc); - - void updateQueryInfo(); - - void setTotalTime() - { - total_time = watch.elapsedSeconds(); - } - - void startWatches(); -}; - -} diff --git a/dbms/programs/performance-test/TestStopConditions.cpp b/dbms/programs/performance-test/TestStopConditions.cpp deleted file mode 100644 index b88526b0261..00000000000 --- a/dbms/programs/performance-test/TestStopConditions.cpp +++ /dev/null @@ -1,38 +0,0 @@ -#include "TestStopConditions.h" - -namespace DB -{ - -void TestStopConditions::loadFromConfig(ConfigurationPtr & stop_conditions_config) -{ - if (stop_conditions_config->has("all_of")) - { - ConfigurationPtr config_all_of(stop_conditions_config->createView("all_of")); - conditions_all_of.loadFromConfig(config_all_of); - } - if (stop_conditions_config->has("any_of")) - { - ConfigurationPtr config_any_of(stop_conditions_config->createView("any_of")); - conditions_any_of.loadFromConfig(config_any_of); - } -} - -bool TestStopConditions::areFulfilled() const -{ - return (conditions_all_of.initialized_count && conditions_all_of.fulfilled_count >= conditions_all_of.initialized_count) - || (conditions_any_of.initialized_count && conditions_any_of.fulfilled_count); -} - -UInt64 TestStopConditions::getMaxExecTime() const -{ - UInt64 all_of_time = conditions_all_of.total_time_ms.value; - if (all_of_time == 0 && conditions_all_of.initialized_count != 0) /// max time is not set in all conditions - return 0; - else if(all_of_time != 0 && conditions_all_of.initialized_count > 1) /// max time is set, but we have other conditions - return 0; - - UInt64 any_of_time = conditions_any_of.total_time_ms.value; - return std::max(all_of_time, any_of_time); -} - -} diff --git a/dbms/programs/performance-test/TestStopConditions.h b/dbms/programs/performance-test/TestStopConditions.h deleted file mode 100644 index 2dcbcce4674..00000000000 --- a/dbms/programs/performance-test/TestStopConditions.h +++ /dev/null @@ -1,57 +0,0 @@ -#pragma once -#include "StopConditionsSet.h" -#include - -namespace DB -{ -/// Stop conditions for a test run. The running test will be terminated in either of two conditions: -/// 1. All conditions marked 'all_of' are fulfilled -/// or -/// 2. Any condition marked 'any_of' is fulfilled - -using ConfigurationPtr = Poco::AutoPtr; - -class TestStopConditions -{ -public: - void loadFromConfig(ConfigurationPtr & stop_conditions_config); - inline bool empty() const - { - return !conditions_all_of.initialized_count && !conditions_any_of.initialized_count; - } - -#define DEFINE_REPORT_FUNC(FUNC_NAME, CONDITION) \ - void FUNC_NAME(UInt64 value) \ - { \ - conditions_all_of.report(value, conditions_all_of.CONDITION); \ - conditions_any_of.report(value, conditions_any_of.CONDITION); \ - } - - DEFINE_REPORT_FUNC(reportTotalTime, total_time_ms) - DEFINE_REPORT_FUNC(reportRowsRead, rows_read) - DEFINE_REPORT_FUNC(reportBytesReadUncompressed, bytes_read_uncompressed) - DEFINE_REPORT_FUNC(reportIterations, iterations) - DEFINE_REPORT_FUNC(reportMinTimeNotChangingFor, min_time_not_changing_for_ms) - DEFINE_REPORT_FUNC(reportMaxSpeedNotChangingFor, max_speed_not_changing_for_ms) - DEFINE_REPORT_FUNC(reportAverageSpeedNotChangingFor, average_speed_not_changing_for_ms) - -#undef REPORT - - bool areFulfilled() const; - - void reset() - { - conditions_all_of.reset(); - conditions_any_of.reset(); - } - - /// Return max exec time for these conditions - /// Return zero if max time cannot be determined - UInt64 getMaxExecTime() const; - -private: - StopConditionsSet conditions_all_of; - StopConditionsSet conditions_any_of; -}; - -} diff --git a/dbms/programs/performance-test/applySubstitutions.cpp b/dbms/programs/performance-test/applySubstitutions.cpp deleted file mode 100644 index a18e066fb01..00000000000 --- a/dbms/programs/performance-test/applySubstitutions.cpp +++ /dev/null @@ -1,82 +0,0 @@ -#include "applySubstitutions.h" -#include -#include - -namespace DB -{ - -void constructSubstitutions(ConfigurationPtr & substitutions_view, StringToVector & out_substitutions) -{ - Strings xml_substitutions; - substitutions_view->keys(xml_substitutions); - - for (size_t i = 0; i != xml_substitutions.size(); ++i) - { - const ConfigurationPtr xml_substitution(substitutions_view->createView("substitution[" + std::to_string(i) + "]")); - - /// Property values for substitution will be stored in a vector - /// accessible by property name - Strings xml_values; - xml_substitution->keys("values", xml_values); - - std::string name = xml_substitution->getString("name"); - - for (size_t j = 0; j != xml_values.size(); ++j) - { - out_substitutions[name].push_back(xml_substitution->getString("values.value[" + std::to_string(j) + "]")); - } - } -} - -/// Recursive method which goes through all substitution blocks in xml -/// and replaces property {names} by their values -static void runThroughAllOptionsAndPush(StringToVector::iterator substitutions_left, - StringToVector::iterator substitutions_right, - const std::string & template_query, - Strings & out_queries) -{ - if (substitutions_left == substitutions_right) - { - out_queries.push_back(template_query); /// completely substituted query - return; - } - - std::string substitution_mask = "{" + substitutions_left->first + "}"; - - if (template_query.find(substitution_mask) == std::string::npos) /// nothing to substitute here - { - runThroughAllOptionsAndPush(std::next(substitutions_left), substitutions_right, template_query, out_queries); - return; - } - - for (const std::string & value : substitutions_left->second) - { - /// Copy query string for each unique permutation - std::string query = template_query; - size_t substr_pos = 0; - - while (substr_pos != std::string::npos) - { - substr_pos = query.find(substitution_mask); - - if (substr_pos != std::string::npos) - query.replace(substr_pos, substitution_mask.length(), value); - } - - runThroughAllOptionsAndPush(std::next(substitutions_left), substitutions_right, query, out_queries); - } -} - -Strings formatQueries(const std::string & query, StringToVector substitutions_to_generate) -{ - Strings queries_res; - runThroughAllOptionsAndPush( - substitutions_to_generate.begin(), - substitutions_to_generate.end(), - query, - queries_res); - return queries_res; -} - - -} diff --git a/dbms/programs/performance-test/applySubstitutions.h b/dbms/programs/performance-test/applySubstitutions.h deleted file mode 100644 index 3412167d6be..00000000000 --- a/dbms/programs/performance-test/applySubstitutions.h +++ /dev/null @@ -1,19 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include - -namespace DB -{ - -using StringToVector = std::map; -using ConfigurationPtr = Poco::AutoPtr; - -void constructSubstitutions(ConfigurationPtr & substitutions_view, StringToVector & out_substitutions); - -Strings formatQueries(const std::string & query, StringToVector substitutions_to_generate); - -} diff --git a/dbms/programs/performance-test/clickhouse-performance-test.cpp b/dbms/programs/performance-test/clickhouse-performance-test.cpp deleted file mode 100644 index e0efe3cb56a..00000000000 --- a/dbms/programs/performance-test/clickhouse-performance-test.cpp +++ /dev/null @@ -1,2 +0,0 @@ -int mainEntryClickHousePerformanceTest(int argc, char ** argv); -int main(int argc_, char ** argv_) { return mainEntryClickHousePerformanceTest(argc_, argv_); } diff --git a/dbms/programs/performance-test/executeQuery.cpp b/dbms/programs/performance-test/executeQuery.cpp deleted file mode 100644 index db82a48d0c1..00000000000 --- a/dbms/programs/performance-test/executeQuery.cpp +++ /dev/null @@ -1,82 +0,0 @@ -#include "executeQuery.h" -#include -#include -#include -#include - -namespace DB -{ - -namespace -{ - -void checkFulfilledConditionsAndUpdate( - const Progress & progress, RemoteBlockInputStream & stream, - TestStats & statistics, TestStopConditions & stop_conditions, - InterruptListener & interrupt_listener) -{ - statistics.add(progress.read_rows, progress.read_bytes); - - stop_conditions.reportRowsRead(statistics.total_rows_read); - stop_conditions.reportBytesReadUncompressed(statistics.total_bytes_read); - stop_conditions.reportTotalTime(statistics.watch.elapsed() / (1000 * 1000)); - stop_conditions.reportMinTimeNotChangingFor(statistics.min_time_watch.elapsed() / (1000 * 1000)); - stop_conditions.reportMaxSpeedNotChangingFor(statistics.max_rows_speed_watch.elapsed() / (1000 * 1000)); - stop_conditions.reportAverageSpeedNotChangingFor(statistics.avg_rows_speed_watch.elapsed() / (1000 * 1000)); - - if (stop_conditions.areFulfilled()) - { - statistics.last_query_was_cancelled = true; - stream.cancel(false); - } - - if (interrupt_listener.check()) - { - statistics.got_SIGINT = true; - statistics.last_query_was_cancelled = true; - stream.cancel(false); - } -} - -} // anonymous namespace - -void executeQuery( - Connection & connection, - const std::string & query, - TestStats & statistics, - TestStopConditions & stop_conditions, - InterruptListener & interrupt_listener, - Context & context, - const Settings & settings) -{ - static const std::string query_id_prefix - = Poco::UUIDGenerator::defaultGenerator().create().toString() + "-"; - static int next_query_id = 1; - - statistics.watch_per_query.restart(); - statistics.last_query_was_cancelled = false; - statistics.last_query_rows_read = 0; - statistics.last_query_bytes_read = 0; - statistics.query_id = query_id_prefix + std::to_string(next_query_id++); - - RemoteBlockInputStream stream(connection, query, {}, context, &settings); - stream.setQueryId(statistics.query_id); - - stream.setProgressCallback( - [&](const Progress & value) - { - checkFulfilledConditionsAndUpdate( - value, stream, statistics, - stop_conditions, interrupt_listener); - }); - stream.readPrefix(); - while (Block block = stream.read()); - stream.readSuffix(); - - if (!statistics.last_query_was_cancelled) - statistics.updateQueryInfo(); - - statistics.setTotalTime(); -} - -} diff --git a/dbms/programs/performance-test/executeQuery.h b/dbms/programs/performance-test/executeQuery.h deleted file mode 100644 index d3b37a6a678..00000000000 --- a/dbms/programs/performance-test/executeQuery.h +++ /dev/null @@ -1,20 +0,0 @@ -#pragma once -#include -#include "TestStats.h" -#include "TestStopConditions.h" -#include -#include -#include -#include - -namespace DB -{ -void executeQuery( - Connection & connection, - const std::string & query, - TestStats & statistics, - TestStopConditions & stop_conditions, - InterruptListener & interrupt_listener, - Context & context, - const Settings & settings); -} diff --git a/dbms/programs/server/CMakeLists.txt b/dbms/programs/server/CMakeLists.txt deleted file mode 100644 index f6e9d1ff702..00000000000 --- a/dbms/programs/server/CMakeLists.txt +++ /dev/null @@ -1,37 +0,0 @@ -set(CLICKHOUSE_SERVER_SOURCES - ${CMAKE_CURRENT_SOURCE_DIR}/HTTPHandler.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/HTTPHandlerFactory.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/InterserverIOHTTPHandler.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/MetricsTransmitter.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/NotFoundHandler.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/PingRequestHandler.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/PrometheusMetricsWriter.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/PrometheusRequestHandler.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/ReplicasStatusHandler.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/RootRequestHandler.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/Server.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/TCPHandler.cpp - ) - -set(CLICKHOUSE_SERVER_SOURCES - ${CLICKHOUSE_SERVER_SOURCES} - ${CMAKE_CURRENT_SOURCE_DIR}/MySQLHandler.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/MySQLHandlerFactory.cpp -) - -set(CLICKHOUSE_SERVER_LINK PRIVATE clickhouse_dictionaries clickhouse_common_io clickhouse_common_config clickhouse_common_zookeeper clickhouse_parsers string_utils PUBLIC daemon PRIVATE clickhouse_storages_system clickhouse_functions clickhouse_aggregate_functions clickhouse_table_functions ${Poco_Net_LIBRARY}) -if (USE_POCO_NETSSL) - set(CLICKHOUSE_SERVER_LINK ${CLICKHOUSE_SERVER_LINK} PRIVATE ${Poco_NetSSL_LIBRARY} ${Poco_Crypto_LIBRARY}) -endif () - -clickhouse_program_add(server) - -if (GLIBC_COMPATIBILITY) - set (GLIBC_MAX_REQUIRED 2.4 CACHE INTERNAL "") - # temporary disabled. to enable - change 'exit 0' to 'exit $a' - add_test(NAME GLIBC_required_version COMMAND bash -c "readelf -s ${CMAKE_CURRENT_BINARY_DIR}/../clickhouse-server | perl -nE 'END {exit 0 if $a} ++$a, print if /\\x40GLIBC_(\\S+)/ and pack(q{C*}, split /\\./, \$1) gt pack q{C*}, split /\\./, q{${GLIBC_MAX_REQUIRED}}'") - - #add_test(NAME GLIBC_required_version COMMAND bash -c "readelf -s ${CMAKE_CURRENT_BINARY_DIR}/../clickhouse-server | grep '@GLIBC' | grep -oP 'GLIBC_[\\d\\.]+' | sort | uniq | sort --version-sort --reverse | perl -lnE 'warn($_), exit 1 if $_ gt q{GLIBC_${GLIBC_MAX_REQUIRED}}'") # old -endif () - -install(FILES config.xml users.xml DESTINATION ${CLICKHOUSE_ETC_DIR}/clickhouse-server COMPONENT clickhouse) diff --git a/dbms/programs/server/HTTPHandler.cpp b/dbms/programs/server/HTTPHandler.cpp deleted file mode 100644 index be9404854ee..00000000000 --- a/dbms/programs/server/HTTPHandler.cpp +++ /dev/null @@ -1,729 +0,0 @@ -#include "HTTPHandler.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace DB -{ - -namespace ErrorCodes -{ - - extern const int LOGICAL_ERROR; - extern const int CANNOT_PARSE_TEXT; - extern const int CANNOT_PARSE_ESCAPE_SEQUENCE; - extern const int CANNOT_PARSE_QUOTED_STRING; - extern const int CANNOT_PARSE_DATE; - extern const int CANNOT_PARSE_DATETIME; - extern const int CANNOT_PARSE_NUMBER; - extern const int CANNOT_OPEN_FILE; - - extern const int UNKNOWN_ELEMENT_IN_AST; - extern const int UNKNOWN_TYPE_OF_AST_NODE; - extern const int TOO_DEEP_AST; - extern const int TOO_BIG_AST; - extern const int UNEXPECTED_AST_STRUCTURE; - - extern const int SYNTAX_ERROR; - - extern const int INCORRECT_DATA; - extern const int TYPE_MISMATCH; - - extern const int UNKNOWN_TABLE; - extern const int UNKNOWN_FUNCTION; - extern const int UNKNOWN_IDENTIFIER; - extern const int UNKNOWN_TYPE; - extern const int UNKNOWN_STORAGE; - extern const int UNKNOWN_DATABASE; - extern const int UNKNOWN_SETTING; - extern const int UNKNOWN_DIRECTION_OF_SORTING; - extern const int UNKNOWN_AGGREGATE_FUNCTION; - extern const int UNKNOWN_FORMAT; - extern const int UNKNOWN_DATABASE_ENGINE; - extern const int UNKNOWN_TYPE_OF_QUERY; - - extern const int QUERY_IS_TOO_LARGE; - - extern const int NOT_IMPLEMENTED; - extern const int SOCKET_TIMEOUT; - - extern const int UNKNOWN_USER; - extern const int WRONG_PASSWORD; - extern const int REQUIRED_PASSWORD; - - extern const int INVALID_SESSION_TIMEOUT; - extern const int HTTP_LENGTH_REQUIRED; -} - - -static Poco::Net::HTTPResponse::HTTPStatus exceptionCodeToHTTPStatus(int exception_code) -{ - using namespace Poco::Net; - - if (exception_code == ErrorCodes::REQUIRED_PASSWORD) - return HTTPResponse::HTTP_UNAUTHORIZED; - else if (exception_code == ErrorCodes::CANNOT_PARSE_TEXT || - exception_code == ErrorCodes::CANNOT_PARSE_ESCAPE_SEQUENCE || - exception_code == ErrorCodes::CANNOT_PARSE_QUOTED_STRING || - exception_code == ErrorCodes::CANNOT_PARSE_DATE || - exception_code == ErrorCodes::CANNOT_PARSE_DATETIME || - exception_code == ErrorCodes::CANNOT_PARSE_NUMBER || - - exception_code == ErrorCodes::UNKNOWN_ELEMENT_IN_AST || - exception_code == ErrorCodes::UNKNOWN_TYPE_OF_AST_NODE || - exception_code == ErrorCodes::TOO_DEEP_AST || - exception_code == ErrorCodes::TOO_BIG_AST || - exception_code == ErrorCodes::UNEXPECTED_AST_STRUCTURE || - - exception_code == ErrorCodes::SYNTAX_ERROR || - - exception_code == ErrorCodes::INCORRECT_DATA || - exception_code == ErrorCodes::TYPE_MISMATCH) - return HTTPResponse::HTTP_BAD_REQUEST; - else if (exception_code == ErrorCodes::UNKNOWN_TABLE || - exception_code == ErrorCodes::UNKNOWN_FUNCTION || - exception_code == ErrorCodes::UNKNOWN_IDENTIFIER || - exception_code == ErrorCodes::UNKNOWN_TYPE || - exception_code == ErrorCodes::UNKNOWN_STORAGE || - exception_code == ErrorCodes::UNKNOWN_DATABASE || - exception_code == ErrorCodes::UNKNOWN_SETTING || - exception_code == ErrorCodes::UNKNOWN_DIRECTION_OF_SORTING || - exception_code == ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION || - exception_code == ErrorCodes::UNKNOWN_FORMAT || - exception_code == ErrorCodes::UNKNOWN_DATABASE_ENGINE || - - exception_code == ErrorCodes::UNKNOWN_TYPE_OF_QUERY) - return HTTPResponse::HTTP_NOT_FOUND; - else if (exception_code == ErrorCodes::QUERY_IS_TOO_LARGE) - return HTTPResponse::HTTP_REQUESTENTITYTOOLARGE; - else if (exception_code == ErrorCodes::NOT_IMPLEMENTED) - return HTTPResponse::HTTP_NOT_IMPLEMENTED; - else if (exception_code == ErrorCodes::SOCKET_TIMEOUT || - exception_code == ErrorCodes::CANNOT_OPEN_FILE) - return HTTPResponse::HTTP_SERVICE_UNAVAILABLE; - else if (exception_code == ErrorCodes::HTTP_LENGTH_REQUIRED) - return HTTPResponse::HTTP_LENGTH_REQUIRED; - - return HTTPResponse::HTTP_INTERNAL_SERVER_ERROR; -} - - -static std::chrono::steady_clock::duration parseSessionTimeout( - const Poco::Util::AbstractConfiguration & config, - const HTMLForm & params) -{ - unsigned session_timeout = config.getInt("default_session_timeout", 60); - - if (params.has("session_timeout")) - { - unsigned max_session_timeout = config.getUInt("max_session_timeout", 3600); - std::string session_timeout_str = params.get("session_timeout"); - - ReadBufferFromString buf(session_timeout_str); - if (!tryReadIntText(session_timeout, buf) || !buf.eof()) - throw Exception("Invalid session timeout: '" + session_timeout_str + "'", ErrorCodes::INVALID_SESSION_TIMEOUT); - - if (session_timeout > max_session_timeout) - throw Exception("Session timeout '" + session_timeout_str + "' is larger than max_session_timeout: " + toString(max_session_timeout) - + ". Maximum session timeout could be modified in configuration file.", - ErrorCodes::INVALID_SESSION_TIMEOUT); - } - - return std::chrono::seconds(session_timeout); -} - - -void HTTPHandler::pushDelayedResults(Output & used_output) -{ - std::vector write_buffers; - std::vector read_buffers; - std::vector read_buffers_raw_ptr; - - auto cascade_buffer = typeid_cast(used_output.out_maybe_delayed_and_compressed.get()); - if (!cascade_buffer) - throw Exception("Expected CascadeWriteBuffer", ErrorCodes::LOGICAL_ERROR); - - cascade_buffer->getResultBuffers(write_buffers); - - if (write_buffers.empty()) - throw Exception("At least one buffer is expected to overwrite result into HTTP response", ErrorCodes::LOGICAL_ERROR); - - for (auto & write_buf : write_buffers) - { - IReadableWriteBuffer * write_buf_concrete; - ReadBufferPtr reread_buf; - - if (write_buf - && (write_buf_concrete = dynamic_cast(write_buf.get())) - && (reread_buf = write_buf_concrete->tryGetReadBuffer())) - { - read_buffers.emplace_back(reread_buf); - read_buffers_raw_ptr.emplace_back(reread_buf.get()); - } - } - - ConcatReadBuffer concat_read_buffer(read_buffers_raw_ptr); - copyData(concat_read_buffer, *used_output.out_maybe_compressed); -} - - -HTTPHandler::HTTPHandler(IServer & server_) - : server(server_) - , log(&Logger::get("HTTPHandler")) -{ - server_display_name = server.config().getString("display_name", getFQDNOrHostName()); -} - - -void HTTPHandler::processQuery( - Poco::Net::HTTPServerRequest & request, - HTMLForm & params, - Poco::Net::HTTPServerResponse & response, - Output & used_output) -{ - Context context = server.context(); - - CurrentThread::QueryScope query_scope(context); - - LOG_TRACE(log, "Request URI: " << request.getURI()); - - std::istream & istr = request.stream(); - - /// Part of the query can be passed in the 'query' parameter and the rest in the request body - /// (http method need not necessarily be POST). In this case the entire query consists of the - /// contents of the 'query' parameter, a line break and the request body. - std::string query_param = params.get("query", ""); - if (!query_param.empty()) - query_param += '\n'; - - /// The user and password can be passed by headers (similar to X-Auth-*), - /// which is used by load balancers to pass authentication information. - std::string user = request.get("X-ClickHouse-User", ""); - std::string password = request.get("X-ClickHouse-Key", ""); - std::string quota_key = request.get("X-ClickHouse-Quota", ""); - - if (user.empty() && password.empty() && quota_key.empty()) - { - /// User name and password can be passed using query parameters - /// or using HTTP Basic auth (both methods are insecure). - if (request.hasCredentials()) - { - Poco::Net::HTTPBasicCredentials credentials(request); - - user = credentials.getUsername(); - password = credentials.getPassword(); - } - else - { - user = params.get("user", "default"); - password = params.get("password", ""); - } - - quota_key = params.get("quota_key", ""); - } - else - { - /// It is prohibited to mix different authorization schemes. - if (request.hasCredentials() - || params.has("user") - || params.has("password") - || params.has("quota_key")) - { - throw Exception("Invalid authentication: it is not allowed to use X-ClickHouse HTTP headers and other authentication methods simultaneously", ErrorCodes::REQUIRED_PASSWORD); - } - } - - std::string query_id = params.get("query_id", ""); - context.setUser(user, password, request.clientAddress(), quota_key); - context.setCurrentQueryId(query_id); - - /// The user could specify session identifier and session timeout. - /// It allows to modify settings, create temporary tables and reuse them in subsequent requests. - - std::shared_ptr session; - String session_id; - std::chrono::steady_clock::duration session_timeout; - bool session_is_set = params.has("session_id"); - const auto & config = server.config(); - - if (session_is_set) - { - session_id = params.get("session_id"); - session_timeout = parseSessionTimeout(config, params); - std::string session_check = params.get("session_check", ""); - - session = context.acquireSession(session_id, session_timeout, session_check == "1"); - - context = *session; - context.setSessionContext(*session); - } - - SCOPE_EXIT({ - if (session_is_set) - session->releaseSession(session_id, session_timeout); - }); - - /// The client can pass a HTTP header indicating supported compression method (gzip or deflate). - String http_response_compression_methods = request.get("Accept-Encoding", ""); - CompressionMethod http_response_compression_method = CompressionMethod::None; - - if (!http_response_compression_methods.empty()) - { - /// If client supports brotli - it's preferred. - /// Both gzip and deflate are supported. If the client supports both, gzip is preferred. - /// NOTE parsing of the list of methods is slightly incorrect. - - if (std::string::npos != http_response_compression_methods.find("br")) - http_response_compression_method = CompressionMethod::Brotli; - else if (std::string::npos != http_response_compression_methods.find("gzip")) - http_response_compression_method = CompressionMethod::Gzip; - else if (std::string::npos != http_response_compression_methods.find("deflate")) - http_response_compression_method = CompressionMethod::Zlib; - } - - bool client_supports_http_compression = http_response_compression_method != CompressionMethod::None; - - /// Client can pass a 'compress' flag in the query string. In this case the query result is - /// compressed using internal algorithm. This is not reflected in HTTP headers. - bool internal_compression = params.getParsed("compress", false); - - /// At least, we should postpone sending of first buffer_size result bytes - size_t buffer_size_total = std::max( - params.getParsed("buffer_size", DBMS_DEFAULT_BUFFER_SIZE), static_cast(DBMS_DEFAULT_BUFFER_SIZE)); - - /// If it is specified, the whole result will be buffered. - /// First ~buffer_size bytes will be buffered in memory, the remaining bytes will be stored in temporary file. - bool buffer_until_eof = params.getParsed("wait_end_of_query", false); - - size_t buffer_size_http = DBMS_DEFAULT_BUFFER_SIZE; - size_t buffer_size_memory = (buffer_size_total > buffer_size_http) ? buffer_size_total : 0; - - unsigned keep_alive_timeout = config.getUInt("keep_alive_timeout", 10); - - used_output.out = std::make_shared( - request, response, keep_alive_timeout, client_supports_http_compression, http_response_compression_method); - - if (internal_compression) - used_output.out_maybe_compressed = std::make_shared(*used_output.out); - else - used_output.out_maybe_compressed = used_output.out; - - if (buffer_size_memory > 0 || buffer_until_eof) - { - CascadeWriteBuffer::WriteBufferPtrs cascade_buffer1; - CascadeWriteBuffer::WriteBufferConstructors cascade_buffer2; - - if (buffer_size_memory > 0) - cascade_buffer1.emplace_back(std::make_shared(buffer_size_memory)); - - if (buffer_until_eof) - { - const std::string tmp_path(context.getTemporaryVolume()->getNextDisk()->getPath()); - const std::string tmp_path_template(tmp_path + "http_buffers/"); - - auto create_tmp_disk_buffer = [tmp_path_template] (const WriteBufferPtr &) - { - return WriteBufferFromTemporaryFile::create(tmp_path_template); - }; - - cascade_buffer2.emplace_back(std::move(create_tmp_disk_buffer)); - } - else - { - auto push_memory_buffer_and_continue = [next_buffer = used_output.out_maybe_compressed] (const WriteBufferPtr & prev_buf) - { - auto prev_memory_buffer = typeid_cast(prev_buf.get()); - if (!prev_memory_buffer) - throw Exception("Expected MemoryWriteBuffer", ErrorCodes::LOGICAL_ERROR); - - auto rdbuf = prev_memory_buffer->tryGetReadBuffer(); - copyData(*rdbuf , *next_buffer); - - return next_buffer; - }; - - cascade_buffer2.emplace_back(push_memory_buffer_and_continue); - } - - used_output.out_maybe_delayed_and_compressed = std::make_shared( - std::move(cascade_buffer1), std::move(cascade_buffer2)); - } - else - { - used_output.out_maybe_delayed_and_compressed = used_output.out_maybe_compressed; - } - - std::unique_ptr in_param = std::make_unique(query_param); - - std::unique_ptr in_post_raw = std::make_unique(istr); - - /// Request body can be compressed using algorithm specified in the Content-Encoding header. - String http_request_compression_method_str = request.get("Content-Encoding", ""); - std::unique_ptr in_post = wrapReadBufferWithCompressionMethod( - std::make_unique(istr), chooseCompressionMethod({}, http_request_compression_method_str)); - - /// The data can also be compressed using incompatible internal algorithm. This is indicated by - /// 'decompress' query parameter. - std::unique_ptr in_post_maybe_compressed; - bool in_post_compressed = false; - if (params.getParsed("decompress", false)) - { - in_post_maybe_compressed = std::make_unique(*in_post); - in_post_compressed = true; - } - else - in_post_maybe_compressed = std::move(in_post); - - std::unique_ptr in; - - static const NameSet reserved_param_names{"query", "compress", "decompress", "user", "password", "quota_key", "query_id", "stacktrace", - "buffer_size", "wait_end_of_query", "session_id", "session_timeout", "session_check"}; - - Names reserved_param_suffixes; - - auto param_could_be_skipped = [&] (const String & name) - { - if (reserved_param_names.count(name)) - return true; - - for (const String & suffix : reserved_param_suffixes) - { - if (endsWith(name, suffix)) - return true; - } - - return false; - }; - - /// Settings can be overridden in the query. - /// Some parameters (database, default_format, everything used in the code above) do not - /// belong to the Settings class. - - /// 'readonly' setting values mean: - /// readonly = 0 - any query is allowed, client can change any setting. - /// readonly = 1 - only readonly queries are allowed, client can't change settings. - /// readonly = 2 - only readonly queries are allowed, client can change any setting except 'readonly'. - - /// In theory if initially readonly = 0, the client can change any setting and then set readonly - /// to some other value. - auto & settings = context.getSettingsRef(); - - /// Only readonly queries are allowed for HTTP GET requests. - if (request.getMethod() == Poco::Net::HTTPServerRequest::HTTP_GET) - { - if (settings.readonly == 0) - settings.readonly = 2; - } - - bool has_external_data = startsWith(request.getContentType().data(), "multipart/form-data"); - - if (has_external_data) - { - /// Skip unneeded parameters to avoid confusing them later with context settings or query parameters. - reserved_param_suffixes.reserve(3); - /// It is a bug and ambiguity with `date_time_input_format` and `low_cardinality_allow_in_native_format` formats/settings. - reserved_param_suffixes.emplace_back("_format"); - reserved_param_suffixes.emplace_back("_types"); - reserved_param_suffixes.emplace_back("_structure"); - } - - SettingsChanges settings_changes; - for (const auto & [key, value] : params) - { - if (key == "database") - { - context.setCurrentDatabase(value); - } - else if (key == "default_format") - { - context.setDefaultFormat(value); - } - else if (param_could_be_skipped(key)) - { - } - else if (startsWith(key, "param_")) - { - /// Save name and values of substitution in dictionary. - const String parameter_name = key.substr(strlen("param_")); - context.setQueryParameter(parameter_name, value); - } - else - { - /// All other query parameters are treated as settings. - settings_changes.push_back({key, value}); - } - } - - /// For external data we also want settings - context.checkSettingsConstraints(settings_changes); - context.applySettingsChanges(settings_changes); - - /// Used in case of POST request with form-data, but it isn't expected to be deleted after that scope. - std::string full_query; - - /// Support for "external data for query processing". - if (has_external_data) - { - ExternalTablesHandler handler(context, params); - params.load(request, istr, handler); - - /// Params are of both form params POST and uri (GET params) - for (const auto & it : params) - if (it.first == "query") - full_query += it.second; - - in = std::make_unique(full_query); - } - else - in = std::make_unique(*in_param, *in_post_maybe_compressed); - - - /// HTTP response compression is turned on only if the client signalled that they support it - /// (using Accept-Encoding header) and 'enable_http_compression' setting is turned on. - used_output.out->setCompression(client_supports_http_compression && settings.enable_http_compression); - if (client_supports_http_compression) - used_output.out->setCompressionLevel(settings.http_zlib_compression_level); - - used_output.out->setSendProgressInterval(settings.http_headers_progress_interval_ms); - - /// If 'http_native_compression_disable_checksumming_on_decompress' setting is turned on, - /// checksums of client data compressed with internal algorithm are not checked. - if (in_post_compressed && settings.http_native_compression_disable_checksumming_on_decompress) - static_cast(*in_post_maybe_compressed).disableChecksumming(); - - /// Add CORS header if 'add_http_cors_header' setting is turned on and the client passed - /// Origin header. - used_output.out->addHeaderCORS(settings.add_http_cors_header && !request.get("Origin", "").empty()); - - ClientInfo & client_info = context.getClientInfo(); - client_info.query_kind = ClientInfo::QueryKind::INITIAL_QUERY; - client_info.interface = ClientInfo::Interface::HTTP; - - /// Query sent through HTTP interface is initial. - client_info.initial_user = client_info.current_user; - client_info.initial_query_id = client_info.current_query_id; - client_info.initial_address = client_info.current_address; - - ClientInfo::HTTPMethod http_method = ClientInfo::HTTPMethod::UNKNOWN; - if (request.getMethod() == Poco::Net::HTTPServerRequest::HTTP_GET) - http_method = ClientInfo::HTTPMethod::GET; - else if (request.getMethod() == Poco::Net::HTTPServerRequest::HTTP_POST) - http_method = ClientInfo::HTTPMethod::POST; - - client_info.http_method = http_method; - client_info.http_user_agent = request.get("User-Agent", ""); - - auto appendCallback = [&context] (ProgressCallback callback) - { - auto prev = context.getProgressCallback(); - - context.setProgressCallback([prev, callback] (const Progress & progress) - { - if (prev) - prev(progress); - - callback(progress); - }); - }; - - /// While still no data has been sent, we will report about query execution progress by sending HTTP headers. - if (settings.send_progress_in_http_headers) - appendCallback([&used_output] (const Progress & progress) { used_output.out->onProgress(progress); }); - - if (settings.readonly > 0 && settings.cancel_http_readonly_queries_on_client_close) - { - Poco::Net::StreamSocket & socket = dynamic_cast(request).socket(); - - appendCallback([&context, &socket](const Progress &) - { - /// Assume that at the point this method is called no one is reading data from the socket any more. - /// True for read-only queries. - try - { - char b; - //FIXME looks like MSG_DONTWAIT is useless because of POCO_BROKEN_TIMEOUTS - int status = socket.receiveBytes(&b, 1, MSG_DONTWAIT | MSG_PEEK); - if (status == 0) - context.killCurrentQuery(); - } - catch (Poco::TimeoutException &) - { - } - catch (...) - { - context.killCurrentQuery(); - } - }); - } - - customizeContext(context); - - executeQuery(*in, *used_output.out_maybe_delayed_and_compressed, /* allow_into_outfile = */ false, context, - [&response] (const String & content_type, const String & format) - { - response.setContentType(content_type); - response.add("X-ClickHouse-Format", format); - }, - [&response] (const String & current_query_id) { response.add("X-ClickHouse-Query-Id", current_query_id); }); - - if (used_output.hasDelayed()) - { - /// TODO: set Content-Length if possible - pushDelayedResults(used_output); - } - - /// Send HTTP headers with code 200 if no exception happened and the data is still not sent to - /// the client. - used_output.out->finalize(); -} - -void HTTPHandler::trySendExceptionToClient(const std::string & s, int exception_code, - Poco::Net::HTTPServerRequest & request, Poco::Net::HTTPServerResponse & response, - Output & used_output) -{ - try - { - response.set("X-ClickHouse-Exception-Code", toString(exception_code)); - - /// If HTTP method is POST and Keep-Alive is turned on, we should read the whole request body - /// to avoid reading part of the current request body in the next request. - if (request.getMethod() == Poco::Net::HTTPRequest::HTTP_POST - && response.getKeepAlive() - && !request.stream().eof() - && exception_code != ErrorCodes::HTTP_LENGTH_REQUIRED) - { - request.stream().ignore(std::numeric_limits::max()); - } - - bool auth_fail = exception_code == ErrorCodes::UNKNOWN_USER || - exception_code == ErrorCodes::WRONG_PASSWORD || - exception_code == ErrorCodes::REQUIRED_PASSWORD; - - if (auth_fail) - { - response.requireAuthentication("ClickHouse server HTTP API"); - } - else - { - response.setStatusAndReason(exceptionCodeToHTTPStatus(exception_code)); - } - - if (!response.sent() && !used_output.out_maybe_compressed) - { - /// If nothing was sent yet and we don't even know if we must compress the response. - response.send() << s << std::endl; - } - else if (used_output.out_maybe_compressed) - { - /// Destroy CascadeBuffer to actualize buffers' positions and reset extra references - if (used_output.hasDelayed()) - used_output.out_maybe_delayed_and_compressed.reset(); - - /// Send the error message into already used (and possibly compressed) stream. - /// Note that the error message will possibly be sent after some data. - /// Also HTTP code 200 could have already been sent. - - /// If buffer has data, and that data wasn't sent yet, then no need to send that data - bool data_sent = used_output.out->count() != used_output.out->offset(); - - if (!data_sent) - { - used_output.out_maybe_compressed->position() = used_output.out_maybe_compressed->buffer().begin(); - used_output.out->position() = used_output.out->buffer().begin(); - } - - writeString(s, *used_output.out_maybe_compressed); - writeChar('\n', *used_output.out_maybe_compressed); - - used_output.out_maybe_compressed->next(); - used_output.out->next(); - used_output.out->finalize(); - } - } - catch (...) - { - tryLogCurrentException(log, "Cannot send exception to client"); - } -} - - -void HTTPHandler::handleRequest(Poco::Net::HTTPServerRequest & request, Poco::Net::HTTPServerResponse & response) -{ - setThreadName("HTTPHandler"); - ThreadStatus thread_status; - - Output used_output; - - /// In case of exception, send stack trace to client. - bool with_stacktrace = false; - - try - { - response.setContentType("text/plain; charset=UTF-8"); - response.set("X-ClickHouse-Server-Display-Name", server_display_name); - /// For keep-alive to work. - if (request.getVersion() == Poco::Net::HTTPServerRequest::HTTP_1_1) - response.setChunkedTransferEncoding(true); - - HTMLForm params(request); - with_stacktrace = params.getParsed("stacktrace", false); - - /// Workaround. Poco does not detect 411 Length Required case. - if (request.getMethod() == Poco::Net::HTTPRequest::HTTP_POST && !request.getChunkedTransferEncoding() && - !request.hasContentLength()) - { - throw Exception("There is neither Transfer-Encoding header nor Content-Length header", ErrorCodes::HTTP_LENGTH_REQUIRED); - } - - processQuery(request, params, response, used_output); - LOG_INFO(log, "Done processing query"); - } - catch (...) - { - tryLogCurrentException(log); - - /** If exception is received from remote server, then stack trace is embedded in message. - * If exception is thrown on local server, then stack trace is in separate field. - */ - std::string exception_message = getCurrentExceptionMessage(with_stacktrace, true); - int exception_code = getCurrentExceptionCode(); - - trySendExceptionToClient(exception_message, exception_code, request, response, used_output); - } -} - - -} diff --git a/dbms/programs/server/HTTPHandler.h b/dbms/programs/server/HTTPHandler.h deleted file mode 100644 index fb6c9fb532c..00000000000 --- a/dbms/programs/server/HTTPHandler.h +++ /dev/null @@ -1,83 +0,0 @@ -#pragma once - -#include "IServer.h" - -#include - -#include -#include - - -namespace CurrentMetrics -{ - extern const Metric HTTPConnection; -} - -namespace Poco { class Logger; } - -namespace DB -{ - -class WriteBufferFromHTTPServerResponse; - - -class HTTPHandler : public Poco::Net::HTTPRequestHandler -{ -public: - explicit HTTPHandler(IServer & server_); - - void handleRequest(Poco::Net::HTTPServerRequest & request, Poco::Net::HTTPServerResponse & response) override; - - /// This method is called right before the query execution. - virtual void customizeContext(DB::Context& /* context */) {} - -private: - struct Output - { - /* Raw data - * ↓ - * CascadeWriteBuffer out_maybe_delayed_and_compressed (optional) - * ↓ (forwards data if an overflow is occur or explicitly via pushDelayedResults) - * CompressedWriteBuffer out_maybe_compressed (optional) - * ↓ - * WriteBufferFromHTTPServerResponse out - */ - - std::shared_ptr out; - /// Points to 'out' or to CompressedWriteBuffer(*out), depending on settings. - std::shared_ptr out_maybe_compressed; - /// Points to 'out' or to CompressedWriteBuffer(*out) or to CascadeWriteBuffer. - std::shared_ptr out_maybe_delayed_and_compressed; - - inline bool hasDelayed() const - { - return out_maybe_delayed_and_compressed != out_maybe_compressed; - } - }; - - IServer & server; - Poco::Logger * log; - - /// It is the name of the server that will be sent in an http-header X-ClickHouse-Server-Display-Name. - String server_display_name; - - CurrentMetrics::Increment metric_increment{CurrentMetrics::HTTPConnection}; - - /// Also initializes 'used_output'. - void processQuery( - Poco::Net::HTTPServerRequest & request, - HTMLForm & params, - Poco::Net::HTTPServerResponse & response, - Output & used_output); - - void trySendExceptionToClient( - const std::string & s, - int exception_code, - Poco::Net::HTTPServerRequest & request, - Poco::Net::HTTPServerResponse & response, - Output & used_output); - - void pushDelayedResults(Output & used_output); -}; - -} diff --git a/dbms/programs/server/HTTPHandlerFactory.cpp b/dbms/programs/server/HTTPHandlerFactory.cpp deleted file mode 100644 index ab8fb4f7336..00000000000 --- a/dbms/programs/server/HTTPHandlerFactory.cpp +++ /dev/null @@ -1,43 +0,0 @@ -#include "HTTPHandlerFactory.h" - - -namespace DB -{ - -HTTPRequestHandlerFactoryMain::HTTPRequestHandlerFactoryMain(IServer & server_, const std::string & name_) - : server(server_), log(&Logger::get(name_)), name(name_) -{ -} - -Poco::Net::HTTPRequestHandler * HTTPRequestHandlerFactoryMain::createRequestHandler( - const Poco::Net::HTTPServerRequest & request) // override -{ - LOG_TRACE(log, "HTTP Request for " << name << ". " - << "Method: " - << request.getMethod() - << ", Address: " - << request.clientAddress().toString() - << ", User-Agent: " - << (request.has("User-Agent") ? request.get("User-Agent") : "none") - << (request.hasContentLength() ? (", Length: " + std::to_string(request.getContentLength())) : ("")) - << ", Content Type: " << request.getContentType() - << ", Transfer Encoding: " << request.getTransferEncoding()); - - for (auto & handlerFactory: child_handler_factories) - { - auto handler = handlerFactory->createRequestHandler(request); - if (handler != nullptr) - return handler; - } - - if (request.getMethod() == Poco::Net::HTTPRequest::HTTP_GET - || request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD - || request.getMethod() == Poco::Net::HTTPRequest::HTTP_POST) - { - return new NotFoundHandler; - } - - return nullptr; -} - -} diff --git a/dbms/programs/server/HTTPHandlerFactory.h b/dbms/programs/server/HTTPHandlerFactory.h deleted file mode 100644 index fcd7fb5d4a2..00000000000 --- a/dbms/programs/server/HTTPHandlerFactory.h +++ /dev/null @@ -1,127 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include "IServer.h" -#include "HTTPHandler.h" -#include "InterserverIOHTTPHandler.h" -#include "NotFoundHandler.h" -#include "PingRequestHandler.h" -#include "PrometheusRequestHandler.h" -#include "ReplicasStatusHandler.h" -#include "RootRequestHandler.h" - - -namespace DB -{ - -/// Handle request using child handlers -class HTTPRequestHandlerFactoryMain : public Poco::Net::HTTPRequestHandlerFactory -{ -private: - using TThis = HTTPRequestHandlerFactoryMain; - - IServer & server; - Logger * log; - std::string name; - - std::vector> child_handler_factories; - -public: - HTTPRequestHandlerFactoryMain(IServer & server_, const std::string & name_); - - Poco::Net::HTTPRequestHandler * createRequestHandler(const Poco::Net::HTTPServerRequest & request) override; - - template - TThis * addHandler(TArgs &&... args) - { - child_handler_factories.emplace_back(std::make_unique(server, std::forward(args)...)); - return this; - } -}; - - -/// Handle POST or GET with params -template -class HTTPQueryRequestHandlerFactory : public Poco::Net::HTTPRequestHandlerFactory -{ -private: - IServer & server; - -public: - HTTPQueryRequestHandlerFactory(IServer & server_) : server(server_) {} - - Poco::Net::HTTPRequestHandler * createRequestHandler(const Poco::Net::HTTPServerRequest & request) override - { - if (request.getURI().find('?') != std::string::npos || request.getMethod() == Poco::Net::HTTPRequest::HTTP_POST) - return new HandleType(server); - return nullptr; - } -}; - - -/// Handle GET or HEAD endpoint on specified path -template -class HTTPGetRequestHandlerFactory : public Poco::Net::HTTPRequestHandlerFactory -{ -private: - IServer & server; -public: - HTTPGetRequestHandlerFactory(IServer & server_) : server(server_) {} - - Poco::Net::HTTPRequestHandler * createRequestHandler(const Poco::Net::HTTPServerRequest & request) override - { - auto & method = request.getMethod(); - if (!(method == Poco::Net::HTTPRequest::HTTP_GET || method == Poco::Net::HTTPRequest::HTTP_HEAD)) - return nullptr; - - auto & uri = request.getURI(); - bool uri_match = TGetEndpoint::strict_path ? uri == TGetEndpoint::path : startsWith(uri, TGetEndpoint::path); - if (uri_match) - return new typename TGetEndpoint::HandleType(server); - - return nullptr; - } -}; - - -struct RootEndpoint -{ - static constexpr auto path = "/"; - static constexpr auto strict_path = true; - using HandleType = RootRequestHandler; -}; - -struct PingEndpoint -{ - static constexpr auto path = "/ping"; - static constexpr auto strict_path = true; - using HandleType = PingRequestHandler; -}; - -struct ReplicasStatusEndpoint -{ - static constexpr auto path = "/replicas_status"; - static constexpr auto strict_path = false; - using HandleType = ReplicasStatusHandler; -}; - -using HTTPRootRequestHandlerFactory = HTTPGetRequestHandlerFactory; -using HTTPPingRequestHandlerFactory = HTTPGetRequestHandlerFactory; -using HTTPReplicasStatusRequestHandlerFactory = HTTPGetRequestHandlerFactory; - -template -HTTPRequestHandlerFactoryMain * createDefaultHandlerFatory(IServer & server, const std::string & name) -{ - auto handlerFactory = new HTTPRequestHandlerFactoryMain(server, name); - handlerFactory->addHandler() - ->addHandler() - ->addHandler() - ->addHandler>(); - return handlerFactory; -} - - -} diff --git a/dbms/programs/server/PingRequestHandler.cpp b/dbms/programs/server/PingRequestHandler.cpp deleted file mode 100644 index 141161ef45c..00000000000 --- a/dbms/programs/server/PingRequestHandler.cpp +++ /dev/null @@ -1,31 +0,0 @@ -#include "PingRequestHandler.h" - -#include - -#include - -#include -#include - -namespace DB -{ - -void PingRequestHandler::handleRequest( - Poco::Net::HTTPServerRequest &, - Poco::Net::HTTPServerResponse & response) -{ - try - { - const auto & config = server.config(); - setResponseDefaultHeaders(response, config.getUInt("keep_alive_timeout", 10)); - - const char * data = "Ok.\n"; - response.sendBuffer(data, strlen(data)); - } - catch (...) - { - tryLogCurrentException("PingRequestHandler"); - } -} - -} diff --git a/dbms/programs/server/PingRequestHandler.h b/dbms/programs/server/PingRequestHandler.h deleted file mode 100644 index 3728fb40adb..00000000000 --- a/dbms/programs/server/PingRequestHandler.h +++ /dev/null @@ -1,27 +0,0 @@ -#pragma once - -#include "IServer.h" - -#include - - -namespace DB -{ - -/// Response with "Ok.\n". Used for availability checks. -class PingRequestHandler : public Poco::Net::HTTPRequestHandler -{ -private: - IServer & server; - -public: - explicit PingRequestHandler(IServer & server_) : server(server_) - { - } - - void handleRequest( - Poco::Net::HTTPServerRequest & request, - Poco::Net::HTTPServerResponse & response) override; -}; - -} diff --git a/dbms/programs/server/PrometheusMetricsWriter.cpp b/dbms/programs/server/PrometheusMetricsWriter.cpp deleted file mode 100644 index 11782710104..00000000000 --- a/dbms/programs/server/PrometheusMetricsWriter.cpp +++ /dev/null @@ -1,101 +0,0 @@ -#include "PrometheusMetricsWriter.h" - -#include - -#include - -namespace -{ - -template -void writeOutLine(DB::WriteBuffer & wb, T && val) -{ - DB::writeText(std::forward(val), wb); - DB::writeChar('\n', wb); -} - -template -void writeOutLine(DB::WriteBuffer & wb, T && val, TArgs &&... args) -{ - DB::writeText(std::forward(val), wb); - DB::writeChar(' ', wb); - writeOutLine(wb, std::forward(args)...); -} - -void replaceInvalidChars(std::string & metric_name) -{ - std::replace(metric_name.begin(), metric_name.end(), '.', '_'); -} - -} - - -namespace DB -{ - -PrometheusMetricsWriter::PrometheusMetricsWriter( - const Poco::Util::AbstractConfiguration & config, const std::string & config_name, - const AsynchronousMetrics & async_metrics_) - : async_metrics(async_metrics_) - , send_events(config.getBool(config_name + ".events", true)) - , send_metrics(config.getBool(config_name + ".metrics", true)) - , send_asynchronous_metrics(config.getBool(config_name + ".asynchronous_metrics", true)) -{ -} - -void PrometheusMetricsWriter::write(WriteBuffer & wb) const -{ - if (send_events) - { - for (size_t i = 0, end = ProfileEvents::end(); i < end; ++i) - { - const auto counter = ProfileEvents::global_counters[i].load(std::memory_order_relaxed); - - std::string metric_name{ProfileEvents::getName(static_cast(i))}; - std::string metric_doc{ProfileEvents::getDocumentation(static_cast(i))}; - - replaceInvalidChars(metric_name); - std::string key{profile_events_prefix + metric_name}; - - writeOutLine(wb, "# HELP", key, metric_doc); - writeOutLine(wb, "# TYPE", key, "counter"); - writeOutLine(wb, key, counter); - } - } - - if (send_metrics) - { - for (size_t i = 0, end = CurrentMetrics::end(); i < end; ++i) - { - const auto value = CurrentMetrics::values[i].load(std::memory_order_relaxed); - - std::string metric_name{CurrentMetrics::getName(static_cast(i))}; - std::string metric_doc{CurrentMetrics::getDocumentation(static_cast(i))}; - - replaceInvalidChars(metric_name); - std::string key{current_metrics_prefix + metric_name}; - - writeOutLine(wb, "# HELP", key, metric_doc); - writeOutLine(wb, "# TYPE", key, "gauge"); - writeOutLine(wb, key, value); - } - } - - if (send_asynchronous_metrics) - { - auto async_metrics_values = async_metrics.getValues(); - for (const auto & name_value : async_metrics_values) - { - std::string key{asynchronous_metrics_prefix + name_value.first}; - - replaceInvalidChars(key); - auto value = name_value.second; - - // TODO: add HELP section? asynchronous_metrics contains only key and value - writeOutLine(wb, "# TYPE", key, "gauge"); - writeOutLine(wb, key, value); - } - } -} - -} diff --git a/dbms/programs/server/PrometheusRequestHandler.h b/dbms/programs/server/PrometheusRequestHandler.h deleted file mode 100644 index d3d1dee88b1..00000000000 --- a/dbms/programs/server/PrometheusRequestHandler.h +++ /dev/null @@ -1,61 +0,0 @@ -#pragma once - -#include "IServer.h" -#include "PrometheusMetricsWriter.h" - -#include -#include -#include -#include - -namespace DB -{ - -class PrometheusRequestHandler : public Poco::Net::HTTPRequestHandler -{ -private: - IServer & server; - const PrometheusMetricsWriter & metrics_writer; - -public: - explicit PrometheusRequestHandler(IServer & server_, PrometheusMetricsWriter & metrics_writer_) - : server(server_) - , metrics_writer(metrics_writer_) - { - } - - void handleRequest( - Poco::Net::HTTPServerRequest & request, - Poco::Net::HTTPServerResponse & response) override; -}; - - -template -class PrometheusRequestHandlerFactory : public Poco::Net::HTTPRequestHandlerFactory -{ -private: - IServer & server; - std::string endpoint_path; - PrometheusMetricsWriter metrics_writer; - -public: - PrometheusRequestHandlerFactory(IServer & server_, const AsynchronousMetrics & async_metrics_) - : server(server_) - , endpoint_path(server_.config().getString("prometheus.endpoint", "/metrics")) - , metrics_writer(server_.config(), "prometheus", async_metrics_) - { - } - - Poco::Net::HTTPRequestHandler * createRequestHandler(const Poco::Net::HTTPServerRequest & request) override - { - if (request.getMethod() == Poco::Net::HTTPRequest::HTTP_GET - && request.getURI() == endpoint_path) - return new HandlerType(server, metrics_writer); - - return nullptr; - } -}; - -using PrometheusHandlerFactory = PrometheusRequestHandlerFactory; - -} diff --git a/dbms/programs/server/RootRequestHandler.cpp b/dbms/programs/server/RootRequestHandler.cpp deleted file mode 100644 index 86b57b63208..00000000000 --- a/dbms/programs/server/RootRequestHandler.cpp +++ /dev/null @@ -1,33 +0,0 @@ -#include "RootRequestHandler.h" - -#include - -#include - -#include -#include - -namespace DB -{ - -void RootRequestHandler::handleRequest( - Poco::Net::HTTPServerRequest &, - Poco::Net::HTTPServerResponse & response) -{ - try - { - const auto & config = server.config(); - setResponseDefaultHeaders(response, config.getUInt("keep_alive_timeout", 10)); - - response.setContentType("text/html; charset=UTF-8"); - - const std::string data = config.getString("http_server_default_response", "Ok.\n"); - response.sendBuffer(data.data(), data.size()); - } - catch (...) - { - tryLogCurrentException("RootRequestHandler"); - } -} - -} diff --git a/dbms/programs/server/RootRequestHandler.h b/dbms/programs/server/RootRequestHandler.h deleted file mode 100644 index 1b6d53eeeda..00000000000 --- a/dbms/programs/server/RootRequestHandler.h +++ /dev/null @@ -1,27 +0,0 @@ -#pragma once - -#include "IServer.h" - -#include - - -namespace DB -{ - -/// Response with custom string. Can be used for browser. -class RootRequestHandler : public Poco::Net::HTTPRequestHandler -{ -private: - IServer & server; - -public: - explicit RootRequestHandler(IServer & server_) : server(server_) - { - } - - void handleRequest( - Poco::Net::HTTPServerRequest & request, - Poco::Net::HTTPServerResponse & response) override; -}; - -} diff --git a/dbms/programs/server/TCPHandlerFactory.h b/dbms/programs/server/TCPHandlerFactory.h deleted file mode 100644 index 0eb8be13a2d..00000000000 --- a/dbms/programs/server/TCPHandlerFactory.h +++ /dev/null @@ -1,37 +0,0 @@ -#pragma once - -#include -#include -#include "IServer.h" -#include "TCPHandler.h" - -namespace Poco { class Logger; } - -namespace DB -{ - -class TCPHandlerFactory : public Poco::Net::TCPServerConnectionFactory -{ -private: - IServer & server; - Poco::Logger * log; - -public: - explicit TCPHandlerFactory(IServer & server_, bool secure_ = false) - : server(server_) - , log(&Logger::get(std::string("TCP") + (secure_ ? "S" : "") + "HandlerFactory")) - { - } - - Poco::Net::TCPServerConnection * createConnection(const Poco::Net::StreamSocket & socket) override - { - LOG_TRACE(log, - "TCP Request. " - << "Address: " - << socket.peerAddress().toString()); - - return new TCPHandler(server, socket); - } -}; - -} diff --git a/dbms/programs/server/clickhouse-server.cpp b/dbms/programs/server/clickhouse-server.cpp deleted file mode 100644 index 7e0e114f742..00000000000 --- a/dbms/programs/server/clickhouse-server.cpp +++ /dev/null @@ -1,2 +0,0 @@ -int mainEntryClickHouseServer(int argc, char ** argv); -int main(int argc_, char ** argv_) { return mainEntryClickHouseServer(argc_, argv_); } diff --git a/dbms/programs/server/config.xml b/dbms/programs/server/config.xml deleted file mode 100644 index ae15a583fcd..00000000000 --- a/dbms/programs/server/config.xml +++ /dev/null @@ -1,527 +0,0 @@ - - - - - - - - - - - - - trace - /var/log/clickhouse-server/clickhouse-server.log - /var/log/clickhouse-server/clickhouse-server.err.log - 1000M - 10 - - - - 8123 - 9000 - 9004 - - - - - - - - /etc/clickhouse-server/server.crt - /etc/clickhouse-server/server.key - - /etc/clickhouse-server/dhparam.pem - none - true - true - sslv2,sslv3 - true - - - - true - true - sslv2,sslv3 - true - - - - RejectCertificateHandler - - - - - - - - - 9009 - - - - - - - - - - - - - - - - - - - - 4096 - 3 - - - 100 - - - - - - 8589934592 - - - 5368709120 - - - - /var/lib/clickhouse/ - - - /var/lib/clickhouse/tmp/ - - - - - - /var/lib/clickhouse/user_files/ - - - users.xml - - - default - - - - - - default - - - - - - - - - false - - - - - - - - localhost - 9000 - - - - - - - localhost - 9000 - - - - - localhost - 9000 - - - - - - - 127.0.0.1 - 9000 - - - - - 127.0.0.2 - 9000 - - - - - - - localhost - 9440 - 1 - - - - - - - localhost - 9000 - - - - - localhost - 1 - - - - - - - - - - - - - - - - - - 3600 - - - - 3600 - - - 60 - - - - - - - - - - - - - system -
query_log
- - toYYYYMM(event_date) - - - - - 7500 - - - - - system - trace_log
- - toYYYYMM(event_date) - 7500 -
- - - - system - query_thread_log
- toYYYYMM(event_date) - 7500 -
- - - - - - - - system - metric_log
- 7500 - 1000 -
- - - - - - - - - - - - *_dictionary.xml - - - - - - - - - - /clickhouse/task_queue/ddl - - - - - - - - - - - - - - - - click_cost - any - - 0 - 3600 - - - 86400 - 60 - - - - max - - 0 - 60 - - - 3600 - 300 - - - 86400 - 3600 - - - - - - /var/lib/clickhouse/format_schemas/ - - - - - - - diff --git a/dbms/programs/server/users.xml b/dbms/programs/server/users.xml deleted file mode 100644 index d631fbb0f8a..00000000000 --- a/dbms/programs/server/users.xml +++ /dev/null @@ -1,107 +0,0 @@ - - - - - - - - 10000000000 - - - 0 - - - random - - - - - 1 - - - - - - - - - - - - - ::/0 - - - - default - - - default - - - - - - - - - - - 3600 - - - 0 - 0 - 0 - 0 - 0 - - - - diff --git a/dbms/src/Access/AccessControlManager.cpp b/dbms/src/Access/AccessControlManager.cpp deleted file mode 100644 index 5c1806a535b..00000000000 --- a/dbms/src/Access/AccessControlManager.cpp +++ /dev/null @@ -1,85 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ -namespace -{ - std::vector> createStorages() - { - std::vector> list; - list.emplace_back(std::make_unique()); - list.emplace_back(std::make_unique()); - return list; - } -} - - -AccessControlManager::AccessControlManager() - : MultipleAccessStorage(createStorages()), - access_rights_context_factory(std::make_unique(*this)), - role_context_factory(std::make_unique(*this)), - row_policy_context_factory(std::make_unique(*this)), - quota_context_factory(std::make_unique(*this)) -{ -} - - -AccessControlManager::~AccessControlManager() -{ -} - - -void AccessControlManager::loadFromConfig(const Poco::Util::AbstractConfiguration & users_config) -{ - auto & users_config_access_storage = dynamic_cast(getStorageByIndex(1)); - users_config_access_storage.loadFromConfig(users_config); -} - - -AccessRightsContextPtr AccessControlManager::getAccessRightsContext( - const UUID & user_id, - const std::vector & current_roles, - bool use_default_roles, - const Settings & settings, - const String & current_database, - const ClientInfo & client_info) const -{ - return access_rights_context_factory->createContext(user_id, current_roles, use_default_roles, settings, current_database, client_info); -} - - -RoleContextPtr AccessControlManager::getRoleContext( - const std::vector & current_roles, - const std::vector & current_roles_with_admin_option) const -{ - return role_context_factory->createContext(current_roles, current_roles_with_admin_option); -} - - -RowPolicyContextPtr AccessControlManager::getRowPolicyContext(const UUID & user_id, const std::vector & enabled_roles) const -{ - return row_policy_context_factory->createContext(user_id, enabled_roles); -} - - -QuotaContextPtr AccessControlManager::getQuotaContext( - const String & user_name, const UUID & user_id, const std::vector & enabled_roles, const Poco::Net::IPAddress & address, const String & custom_quota_key) const -{ - return quota_context_factory->createContext(user_name, user_id, enabled_roles, address, custom_quota_key); -} - - -std::vector AccessControlManager::getQuotaUsageInfo() const -{ - return quota_context_factory->getUsageInfo(); -} - -} diff --git a/dbms/src/Access/AccessControlManager.h b/dbms/src/Access/AccessControlManager.h deleted file mode 100644 index 22c3bafd25c..00000000000 --- a/dbms/src/Access/AccessControlManager.h +++ /dev/null @@ -1,82 +0,0 @@ -#pragma once - -#include -#include -#include - - -namespace Poco -{ - namespace Net - { - class IPAddress; - } - namespace Util - { - class AbstractConfiguration; - } -} - -namespace DB -{ -class AccessRightsContext; -using AccessRightsContextPtr = std::shared_ptr; -class AccessRightsContextFactory; -struct User; -using UserPtr = std::shared_ptr; -class RoleContext; -using RoleContextPtr = std::shared_ptr; -class RoleContextFactory; -class RowPolicyContext; -using RowPolicyContextPtr = std::shared_ptr; -class RowPolicyContextFactory; -class QuotaContext; -using QuotaContextPtr = std::shared_ptr; -class QuotaContextFactory; -struct QuotaUsageInfo; -class ClientInfo; -struct Settings; - - -/// Manages access control entities. -class AccessControlManager : public MultipleAccessStorage -{ -public: - AccessControlManager(); - ~AccessControlManager(); - - void loadFromConfig(const Poco::Util::AbstractConfiguration & users_config); - - AccessRightsContextPtr getAccessRightsContext( - const UUID & user_id, - const std::vector & current_roles, - bool use_default_roles, - const Settings & settings, - const String & current_database, - const ClientInfo & client_info) const; - - RoleContextPtr getRoleContext( - const std::vector & current_roles, - const std::vector & current_roles_with_admin_option) const; - - RowPolicyContextPtr getRowPolicyContext( - const UUID & user_id, - const std::vector & enabled_roles) const; - - QuotaContextPtr getQuotaContext( - const String & user_name, - const UUID & user_id, - const std::vector & enabled_roles, - const Poco::Net::IPAddress & address, - const String & custom_quota_key) const; - - std::vector getQuotaUsageInfo() const; - -private: - std::unique_ptr access_rights_context_factory; - std::unique_ptr role_context_factory; - std::unique_ptr row_policy_context_factory; - std::unique_ptr quota_context_factory; -}; - -} diff --git a/dbms/src/Access/AccessFlags.h b/dbms/src/Access/AccessFlags.h deleted file mode 100644 index f35c88f9831..00000000000 --- a/dbms/src/Access/AccessFlags.h +++ /dev/null @@ -1,498 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ -/// Represents a combination of access types which can be granted globally, on databases, tables, columns, etc. -/// For example "SELECT, CREATE USER" is an access type. -class AccessFlags -{ -public: - AccessFlags(AccessType type); - - /// The same as AccessFlags(AccessType::NONE). - AccessFlags() = default; - - /// Constructs from a string like "SELECT". - AccessFlags(const std::string_view & keyword); - - /// Constructs from a list of strings like "SELECT, UPDATE, INSERT". - AccessFlags(const std::vector & keywords); - AccessFlags(const Strings & keywords); - - AccessFlags(const AccessFlags & src) = default; - AccessFlags(AccessFlags && src) = default; - AccessFlags & operator =(const AccessFlags & src) = default; - AccessFlags & operator =(AccessFlags && src) = default; - - /// Returns the access type which contains two specified access types. - AccessFlags & operator |=(const AccessFlags & other) { flags |= other.flags; return *this; } - friend AccessFlags operator |(const AccessFlags & left, const AccessFlags & right) { return AccessFlags(left) |= right; } - - /// Returns the access type which contains the common part of two access types. - AccessFlags & operator &=(const AccessFlags & other) { flags &= other.flags; return *this; } - friend AccessFlags operator &(const AccessFlags & left, const AccessFlags & right) { return AccessFlags(left) &= right; } - - /// Returns the access type which contains only the part of the first access type which is not the part of the second access type. - /// (lhs - rhs) is the same as (lhs & ~rhs). - AccessFlags & operator -=(const AccessFlags & other) { flags &= ~other.flags; return *this; } - friend AccessFlags operator -(const AccessFlags & left, const AccessFlags & right) { return AccessFlags(left) -= right; } - - AccessFlags operator ~() const { AccessFlags res; res.flags = ~flags; return res; } - - bool isEmpty() const { return flags.none(); } - explicit operator bool() const { return !isEmpty(); } - bool contains(const AccessFlags & other) const { return (flags & other.flags) == other.flags; } - - friend bool operator ==(const AccessFlags & left, const AccessFlags & right) { return left.flags == right.flags; } - friend bool operator !=(const AccessFlags & left, const AccessFlags & right) { return !(left == right); } - - void clear() { flags.reset(); } - - /// Returns a comma-separated list of keywords, like "SELECT, CREATE USER, UPDATE". - String toString() const; - - /// Returns a list of keywords. - std::vector toKeywords() const; - - /// Returns the access types which could be granted on the database level. - /// For example, SELECT can be granted on the database level, but CREATE_USER cannot. - static AccessFlags databaseLevel(); - - /// Returns the access types which could be granted on the table/dictionary level. - static AccessFlags tableLevel(); - - /// Returns the access types which could be granted on the column/attribute level. - static AccessFlags columnLevel(); - -private: - static constexpr size_t NUM_FLAGS = 128; - using Flags = std::bitset; - Flags flags; - - AccessFlags(const Flags & flags_) : flags(flags_) {} - - template - class Impl; -}; - - -namespace ErrorCodes -{ - extern const int UNKNOWN_ACCESS_TYPE; -} - -template -class AccessFlags::Impl -{ -public: - static const Impl & instance() - { - static const Impl res; - return res; - } - - Flags accessTypeToFlags(AccessType type) const - { - return access_type_to_flags_mapping[static_cast(type)]; - } - - Flags keywordToFlags(const std::string_view & keyword) const - { - auto it = keyword_to_flags_map.find(keyword); - if (it == keyword_to_flags_map.end()) - { - String uppercased_keyword{keyword}; - boost::to_upper(uppercased_keyword); - it = keyword_to_flags_map.find(uppercased_keyword); - if (it == keyword_to_flags_map.end()) - throw Exception("Unknown access type: " + String(keyword), ErrorCodes::UNKNOWN_ACCESS_TYPE); - } - return it->second; - } - - Flags keywordsToFlags(const std::vector & keywords) const - { - Flags res; - for (const auto & keyword : keywords) - res |= keywordToFlags(keyword); - return res; - } - - Flags keywordsToFlags(const Strings & keywords) const - { - Flags res; - for (const auto & keyword : keywords) - res |= keywordToFlags(keyword); - return res; - } - - std::vector flagsToKeywords(const Flags & flags_) const - { - std::vector keywords; - flagsToKeywordsRec(flags_, keywords, *flags_to_keyword_tree); - - if (keywords.empty()) - keywords.push_back("USAGE"); - - return keywords; - } - - String flagsToString(const Flags & flags_) const - { - String str; - for (const auto & keyword : flagsToKeywords(flags_)) - { - if (!str.empty()) - str += ", "; - str += keyword; - } - return str; - } - - const Flags & getDatabaseLevelFlags() const { return all_grantable_on_level[DATABASE_LEVEL]; } - const Flags & getTableLevelFlags() const { return all_grantable_on_level[TABLE_LEVEL]; } - const Flags & getColumnLevelFlags() const { return all_grantable_on_level[COLUMN_LEVEL]; } - -private: - enum Level - { - UNKNOWN_LEVEL = -1, - GLOBAL_LEVEL = 0, - DATABASE_LEVEL = 1, - TABLE_LEVEL = 2, - VIEW_LEVEL = 2, - DICTIONARY_LEVEL = 2, - COLUMN_LEVEL = 3, - }; - - struct Node; - using NodePtr = std::unique_ptr; - using Nodes = std::vector; - - template - static Nodes nodes(Args&& ... args) - { - Nodes res; - ext::push_back(res, std::move(args)...); - return res; - } - - struct Node - { - std::string_view keyword; - std::vector aliases; - Flags flags; - Level level = UNKNOWN_LEVEL; - Nodes children; - - Node(std::string_view keyword_, size_t flag_, Level level_) - : keyword(keyword_), level(level_) - { - flags.set(flag_); - } - - Node(std::string_view keyword_, Nodes children_) - : keyword(keyword_), children(std::move(children_)) - { - for (const auto & child : children) - flags |= child->flags; - } - - template - Node(std::string_view keyword_, NodePtr first_child, Args &&... other_children) - : Node(keyword_, nodes(std::move(first_child), std::move(other_children)...)) {} - }; - - static void flagsToKeywordsRec(const Flags & flags_, std::vector & keywords, const Node & start_node) - { - Flags matching_flags = (flags_ & start_node.flags); - if (matching_flags.any()) - { - if (matching_flags == start_node.flags) - { - keywords.push_back(start_node.keyword); - } - else - { - for (const auto & child : start_node.children) - flagsToKeywordsRec(flags_, keywords, *child); - } - } - } - - static void makeFlagsToKeywordTree(NodePtr & flags_to_keyword_tree_) - { - size_t next_flag = 0; - Nodes all; - - auto show = std::make_unique("SHOW", next_flag++, COLUMN_LEVEL); - auto exists = std::make_unique("EXISTS", next_flag++, COLUMN_LEVEL); - ext::push_back(all, std::move(show), std::move(exists)); - - auto select = std::make_unique("SELECT", next_flag++, COLUMN_LEVEL); - auto insert = std::make_unique("INSERT", next_flag++, COLUMN_LEVEL); - ext::push_back(all, std::move(select), std::move(insert)); - - auto update = std::make_unique("UPDATE", next_flag++, COLUMN_LEVEL); - ext::push_back(update->aliases, "ALTER UPDATE"); - auto delet = std::make_unique("DELETE", next_flag++, TABLE_LEVEL); - ext::push_back(delet->aliases, "ALTER DELETE"); - - auto add_column = std::make_unique("ADD COLUMN", next_flag++, COLUMN_LEVEL); - add_column->aliases.push_back("ALTER ADD COLUMN"); - auto modify_column = std::make_unique("MODIFY COLUMN", next_flag++, COLUMN_LEVEL); - modify_column->aliases.push_back("ALTER MODIFY COLUMN"); - auto drop_column = std::make_unique("DROP COLUMN", next_flag++, COLUMN_LEVEL); - drop_column->aliases.push_back("ALTER DROP COLUMN"); - auto comment_column = std::make_unique("COMMENT COLUMN", next_flag++, COLUMN_LEVEL); - comment_column->aliases.push_back("ALTER COMMENT COLUMN"); - auto clear_column = std::make_unique("CLEAR COLUMN", next_flag++, COLUMN_LEVEL); - clear_column->aliases.push_back("ALTER CLEAR COLUMN"); - auto alter_column = std::make_unique("ALTER COLUMN", std::move(add_column), std::move(modify_column), std::move(drop_column), std::move(comment_column), std::move(clear_column)); - - auto alter_order_by = std::make_unique("ALTER ORDER BY", next_flag++, TABLE_LEVEL); - alter_order_by->aliases.push_back("MODIFY ORDER BY"); - alter_order_by->aliases.push_back("ALTER MODIFY ORDER BY"); - auto add_index = std::make_unique("ADD INDEX", next_flag++, TABLE_LEVEL); - add_index->aliases.push_back("ALTER ADD INDEX"); - auto drop_index = std::make_unique("DROP INDEX", next_flag++, TABLE_LEVEL); - drop_index->aliases.push_back("ALTER DROP INDEX"); - auto materialize_index = std::make_unique("MATERIALIZE INDEX", next_flag++, TABLE_LEVEL); - materialize_index->aliases.push_back("ALTER MATERIALIZE INDEX"); - auto clear_index = std::make_unique("CLEAR INDEX", next_flag++, TABLE_LEVEL); - clear_index->aliases.push_back("ALTER CLEAR INDEX"); - auto index = std::make_unique("INDEX", std::move(alter_order_by), std::move(add_index), std::move(drop_index), std::move(materialize_index), std::move(clear_index)); - index->aliases.push_back("ALTER INDEX"); - - auto add_constraint = std::make_unique("ADD CONSTRAINT", next_flag++, TABLE_LEVEL); - add_constraint->aliases.push_back("ALTER ADD CONSTRAINT"); - auto drop_constraint = std::make_unique("DROP CONSTRAINT", next_flag++, TABLE_LEVEL); - drop_constraint->aliases.push_back("ALTER DROP CONSTRAINT"); - auto alter_constraint = std::make_unique("CONSTRAINT", std::move(add_constraint), std::move(drop_constraint)); - alter_constraint->aliases.push_back("ALTER CONSTRAINT"); - - auto modify_ttl = std::make_unique("MODIFY TTL", next_flag++, TABLE_LEVEL); - modify_ttl->aliases.push_back("ALTER MODIFY TTL"); - auto materialize_ttl = std::make_unique("MATERIALIZE TTL", next_flag++, TABLE_LEVEL); - materialize_ttl->aliases.push_back("ALTER MATERIALIZE TTL"); - - auto modify_setting = std::make_unique("MODIFY SETTING", next_flag++, TABLE_LEVEL); - modify_setting->aliases.push_back("ALTER MODIFY SETTING"); - - auto move_partition = std::make_unique("MOVE PARTITION", next_flag++, TABLE_LEVEL); - ext::push_back(move_partition->aliases, "ALTER MOVE PARTITION", "MOVE PART", "ALTER MOVE PART"); - auto fetch_partition = std::make_unique("FETCH PARTITION", next_flag++, TABLE_LEVEL); - ext::push_back(fetch_partition->aliases, "ALTER FETCH PARTITION"); - auto freeze_partition = std::make_unique("FREEZE PARTITION", next_flag++, TABLE_LEVEL); - ext::push_back(freeze_partition->aliases, "ALTER FREEZE PARTITION"); - - auto alter_table = std::make_unique("ALTER TABLE", std::move(update), std::move(delet), std::move(alter_column), std::move(index), std::move(alter_constraint), std::move(modify_ttl), std::move(materialize_ttl), std::move(modify_setting), std::move(move_partition), std::move(fetch_partition), std::move(freeze_partition)); - - auto refresh_view = std::make_unique("REFRESH VIEW", next_flag++, VIEW_LEVEL); - ext::push_back(refresh_view->aliases, "ALTER LIVE VIEW REFRESH"); - auto modify_view_query = std::make_unique("MODIFY VIEW QUERY", next_flag++, VIEW_LEVEL); - auto alter_view = std::make_unique("ALTER VIEW", std::move(refresh_view), std::move(modify_view_query)); - - auto alter = std::make_unique("ALTER", std::move(alter_table), std::move(alter_view)); - ext::push_back(all, std::move(alter)); - - auto create_database = std::make_unique("CREATE DATABASE", next_flag++, DATABASE_LEVEL); - auto create_table = std::make_unique("CREATE TABLE", next_flag++, TABLE_LEVEL); - auto create_view = std::make_unique("CREATE VIEW", next_flag++, VIEW_LEVEL); - auto create_dictionary = std::make_unique("CREATE DICTIONARY", next_flag++, DICTIONARY_LEVEL); - auto create = std::make_unique("CREATE", std::move(create_database), std::move(create_table), std::move(create_view), std::move(create_dictionary)); - ext::push_back(all, std::move(create)); - - auto create_temporary_table = std::make_unique("CREATE TEMPORARY TABLE", next_flag++, GLOBAL_LEVEL); - ext::push_back(all, std::move(create_temporary_table)); - - auto drop_database = std::make_unique("DROP DATABASE", next_flag++, DATABASE_LEVEL); - auto drop_table = std::make_unique("DROP TABLE", next_flag++, TABLE_LEVEL); - auto drop_view = std::make_unique("DROP VIEW", next_flag++, VIEW_LEVEL); - auto drop_dictionary = std::make_unique("DROP DICTIONARY", next_flag++, DICTIONARY_LEVEL); - auto drop = std::make_unique("DROP", std::move(drop_database), std::move(drop_table), std::move(drop_view), std::move(drop_dictionary)); - ext::push_back(all, std::move(drop)); - - auto truncate_table = std::make_unique("TRUNCATE TABLE", next_flag++, TABLE_LEVEL); - auto truncate_view = std::make_unique("TRUNCATE VIEW", next_flag++, VIEW_LEVEL); - auto truncate = std::make_unique("TRUNCATE", std::move(truncate_table), std::move(truncate_view)); - ext::push_back(all, std::move(truncate)); - - auto optimize = std::make_unique("OPTIMIZE", next_flag++, TABLE_LEVEL); - optimize->aliases.push_back("OPTIMIZE TABLE"); - ext::push_back(all, std::move(optimize)); - - auto kill_query = std::make_unique("KILL QUERY", next_flag++, GLOBAL_LEVEL); - auto kill_mutation = std::make_unique("KILL MUTATION", next_flag++, TABLE_LEVEL); - auto kill = std::make_unique("KILL", std::move(kill_query), std::move(kill_mutation)); - ext::push_back(all, std::move(kill)); - - auto create_user = std::make_unique("CREATE USER", next_flag++, GLOBAL_LEVEL); - auto alter_user = std::make_unique("ALTER USER", next_flag++, GLOBAL_LEVEL); - auto drop_user = std::make_unique("DROP USER", next_flag++, GLOBAL_LEVEL); - auto create_role = std::make_unique("CREATE ROLE", next_flag++, GLOBAL_LEVEL); - auto drop_role = std::make_unique("DROP ROLE", next_flag++, GLOBAL_LEVEL); - auto create_policy = std::make_unique("CREATE POLICY", next_flag++, GLOBAL_LEVEL); - auto alter_policy = std::make_unique("ALTER POLICY", next_flag++, GLOBAL_LEVEL); - auto drop_policy = std::make_unique("DROP POLICY", next_flag++, GLOBAL_LEVEL); - auto create_quota = std::make_unique("CREATE QUOTA", next_flag++, GLOBAL_LEVEL); - auto alter_quota = std::make_unique("ALTER QUOTA", next_flag++, GLOBAL_LEVEL); - auto drop_quota = std::make_unique("DROP QUOTA", next_flag++, GLOBAL_LEVEL); - auto role_admin = std::make_unique("ROLE ADMIN", next_flag++, GLOBAL_LEVEL); - ext::push_back(all, std::move(create_user), std::move(alter_user), std::move(drop_user), std::move(create_role), std::move(drop_role), std::move(create_policy), std::move(alter_policy), std::move(drop_policy), std::move(create_quota), std::move(alter_quota), std::move(drop_quota), std::move(role_admin)); - - auto shutdown = std::make_unique("SHUTDOWN", next_flag++, GLOBAL_LEVEL); - ext::push_back(shutdown->aliases, "SYSTEM SHUTDOWN", "SYSTEM KILL"); - auto drop_cache = std::make_unique("DROP CACHE", next_flag++, GLOBAL_LEVEL); - ext::push_back(drop_cache->aliases, "SYSTEM DROP CACHE", "DROP DNS CACHE", "SYSTEM DROP DNS CACHE", "DROP MARK CACHE", "SYSTEM DROP MARK CACHE", "DROP UNCOMPRESSED CACHE", "SYSTEM DROP UNCOMPRESSED CACHE", "DROP COMPILED EXPRESSION CACHE", "SYSTEM DROP COMPILED EXPRESSION CACHE"); - auto reload_config = std::make_unique("RELOAD CONFIG", next_flag++, GLOBAL_LEVEL); - ext::push_back(reload_config->aliases, "SYSTEM RELOAD CONFIG"); - auto reload_dictionary = std::make_unique("RELOAD DICTIONARY", next_flag++, GLOBAL_LEVEL); - ext::push_back(reload_dictionary->aliases, "SYSTEM RELOAD DICTIONARY", "RELOAD DICTIONARIES", "SYSTEM RELOAD DICTIONARIES", "RELOAD EMBEDDED DICTIONARIES", "SYSTEM RELOAD EMBEDDED DICTIONARIES"); - auto stop_merges = std::make_unique("STOP MERGES", next_flag++, TABLE_LEVEL); - ext::push_back(stop_merges->aliases, "SYSTEM STOP MERGES", "START MERGES", "SYSTEM START MERGES"); - auto stop_ttl_merges = std::make_unique("STOP TTL MERGES", next_flag++, TABLE_LEVEL); - ext::push_back(stop_ttl_merges->aliases, "SYSTEM STOP TTL MERGES", "START TTL MERGES", "SYSTEM START TTL MERGES"); - auto stop_fetches = std::make_unique("STOP FETCHES", next_flag++, TABLE_LEVEL); - ext::push_back(stop_fetches->aliases, "SYSTEM STOP FETCHES", "START FETCHES", "SYSTEM START FETCHES"); - auto stop_moves = std::make_unique("STOP MOVES", next_flag++, TABLE_LEVEL); - ext::push_back(stop_moves->aliases, "SYSTEM STOP MOVES", "START MOVES", "SYSTEM START MOVES"); - auto stop_distributed_sends = std::make_unique("STOP DISTRIBUTED SENDS", next_flag++, TABLE_LEVEL); - ext::push_back(stop_distributed_sends->aliases, "SYSTEM STOP DISTRIBUTED SENDS", "START DISTRIBUTED SENDS", "SYSTEM START DISTRIBUTED SENDS"); - auto stop_replicated_sends = std::make_unique("STOP REPLICATED SENDS", next_flag++, TABLE_LEVEL); - ext::push_back(stop_replicated_sends->aliases, "SYSTEM STOP REPLICATED SENDS", "START REPLICATED SENDS", "SYSTEM START REPLICATED SENDS"); - auto stop_replication_queues = std::make_unique("STOP REPLICATION QUEUES", next_flag++, TABLE_LEVEL); - ext::push_back(stop_replication_queues->aliases, "SYSTEM STOP REPLICATION QUEUES", "START REPLICATION QUEUES", "SYSTEM START REPLICATION QUEUES"); - auto sync_replica = std::make_unique("SYNC REPLICA", next_flag++, TABLE_LEVEL); - ext::push_back(sync_replica->aliases, "SYSTEM SYNC REPLICA"); - auto restart_replica = std::make_unique("RESTART REPLICA", next_flag++, TABLE_LEVEL); - ext::push_back(restart_replica->aliases, "SYSTEM RESTART REPLICA"); - auto flush_distributed = std::make_unique("FLUSH DISTRIBUTED", next_flag++, TABLE_LEVEL); - ext::push_back(flush_distributed->aliases, "SYSTEM FLUSH DISTRIBUTED"); - auto flush_logs = std::make_unique("FLUSH LOGS", next_flag++, GLOBAL_LEVEL); - ext::push_back(flush_logs->aliases, "SYSTEM FLUSH LOGS"); - auto system = std::make_unique("SYSTEM", std::move(shutdown), std::move(drop_cache), std::move(reload_config), std::move(reload_dictionary), std::move(stop_merges), std::move(stop_ttl_merges), std::move(stop_fetches), std::move(stop_moves), std::move(stop_distributed_sends), std::move(stop_replicated_sends), std::move(stop_replication_queues), std::move(sync_replica), std::move(restart_replica), std::move(flush_distributed), std::move(flush_logs)); - ext::push_back(all, std::move(system)); - - auto dict_get = std::make_unique("dictGet()", next_flag++, DICTIONARY_LEVEL); - dict_get->aliases.push_back("dictHas()"); - dict_get->aliases.push_back("dictGetHierarchy()"); - dict_get->aliases.push_back("dictIsIn()"); - ext::push_back(all, std::move(dict_get)); - - auto address_to_line = std::make_unique("addressToLine()", next_flag++, GLOBAL_LEVEL); - auto address_to_symbol = std::make_unique("addressToSymbol()", next_flag++, GLOBAL_LEVEL); - auto demangle = std::make_unique("demangle()", next_flag++, GLOBAL_LEVEL); - auto introspection = std::make_unique("INTROSPECTION", std::move(address_to_line), std::move(address_to_symbol), std::move(demangle)); - ext::push_back(introspection->aliases, "INTROSPECTION FUNCTIONS"); - ext::push_back(all, std::move(introspection)); - - auto file = std::make_unique("file()", next_flag++, GLOBAL_LEVEL); - auto url = std::make_unique("url()", next_flag++, GLOBAL_LEVEL); - auto input = std::make_unique("input()", next_flag++, GLOBAL_LEVEL); - auto values = std::make_unique("values()", next_flag++, GLOBAL_LEVEL); - auto numbers = std::make_unique("numbers()", next_flag++, GLOBAL_LEVEL); - auto merge = std::make_unique("merge()", next_flag++, DATABASE_LEVEL); - auto remote = std::make_unique("remote()", next_flag++, GLOBAL_LEVEL); - ext::push_back(remote->aliases, "remoteSecure()", "cluster()"); - auto mysql = std::make_unique("mysql()", next_flag++, GLOBAL_LEVEL); - auto odbc = std::make_unique("odbc()", next_flag++, GLOBAL_LEVEL); - auto jdbc = std::make_unique("jdbc()", next_flag++, GLOBAL_LEVEL); - auto hdfs = std::make_unique("hdfs()", next_flag++, GLOBAL_LEVEL); - auto s3 = std::make_unique("s3()", next_flag++, GLOBAL_LEVEL); - auto table_functions = std::make_unique("TABLE FUNCTIONS", std::move(file), std::move(url), std::move(input), std::move(values), std::move(numbers), std::move(merge), std::move(remote), std::move(mysql), std::move(odbc), std::move(jdbc), std::move(hdfs), std::move(s3)); - ext::push_back(all, std::move(table_functions)); - - flags_to_keyword_tree_ = std::make_unique("ALL", std::move(all)); - flags_to_keyword_tree_->aliases.push_back("ALL PRIVILEGES"); - } - - void makeKeywordToFlagsMap(std::unordered_map & keyword_to_flags_map_, Node * start_node = nullptr) - { - if (!start_node) - { - start_node = flags_to_keyword_tree.get(); - keyword_to_flags_map_["USAGE"] = {}; - keyword_to_flags_map_["NONE"] = {}; - keyword_to_flags_map_["NO PRIVILEGES"] = {}; - } - start_node->aliases.emplace_back(start_node->keyword); - for (auto & alias : start_node->aliases) - { - boost::to_upper(alias); - keyword_to_flags_map_[alias] = start_node->flags; - } - for (auto & child : start_node->children) - makeKeywordToFlagsMap(keyword_to_flags_map_, child.get()); - } - - void makeAccessTypeToFlagsMapping(std::vector & access_type_to_flags_mapping_) - { - access_type_to_flags_mapping_.resize(MAX_ACCESS_TYPE); - for (auto access_type : ext::range_with_static_cast(0, MAX_ACCESS_TYPE)) - { - auto str = toKeyword(access_type); - auto it = keyword_to_flags_map.find(str); - if (it == keyword_to_flags_map.end()) - { - String uppercased{str}; - boost::to_upper(uppercased); - it = keyword_to_flags_map.find(uppercased); - } - access_type_to_flags_mapping_[static_cast(access_type)] = it->second; - } - } - - void collectAllGrantableOnLevel(std::vector & all_grantable_on_level_, const Node * start_node = nullptr) - { - if (!start_node) - { - start_node = flags_to_keyword_tree.get(); - all_grantable_on_level.resize(COLUMN_LEVEL + 1); - } - for (int i = 0; i <= start_node->level; ++i) - all_grantable_on_level_[i] |= start_node->flags; - for (const auto & child : start_node->children) - collectAllGrantableOnLevel(all_grantable_on_level_, child.get()); - } - - Impl() - { - makeFlagsToKeywordTree(flags_to_keyword_tree); - makeKeywordToFlagsMap(keyword_to_flags_map); - makeAccessTypeToFlagsMapping(access_type_to_flags_mapping); - collectAllGrantableOnLevel(all_grantable_on_level); - } - - std::unique_ptr flags_to_keyword_tree; - std::unordered_map keyword_to_flags_map; - std::vector access_type_to_flags_mapping; - std::vector all_grantable_on_level; -}; - - -inline AccessFlags::AccessFlags(AccessType type) : flags(Impl<>::instance().accessTypeToFlags(type)) {} -inline AccessFlags::AccessFlags(const std::string_view & keyword) : flags(Impl<>::instance().keywordToFlags(keyword)) {} -inline AccessFlags::AccessFlags(const std::vector & keywords) : flags(Impl<>::instance().keywordsToFlags(keywords)) {} -inline AccessFlags::AccessFlags(const Strings & keywords) : flags(Impl<>::instance().keywordsToFlags(keywords)) {} -inline String AccessFlags::toString() const { return Impl<>::instance().flagsToString(flags); } -inline std::vector AccessFlags::toKeywords() const { return Impl<>::instance().flagsToKeywords(flags); } -inline AccessFlags AccessFlags::databaseLevel() { return Impl<>::instance().getDatabaseLevelFlags(); } -inline AccessFlags AccessFlags::tableLevel() { return Impl<>::instance().getTableLevelFlags(); } -inline AccessFlags AccessFlags::columnLevel() { return Impl<>::instance().getColumnLevelFlags(); } - -inline AccessFlags operator |(AccessType left, AccessType right) { return AccessFlags(left) | right; } -inline AccessFlags operator &(AccessType left, AccessType right) { return AccessFlags(left) & right; } -inline AccessFlags operator -(AccessType left, AccessType right) { return AccessFlags(left) - right; } -inline AccessFlags operator ~(AccessType x) { return ~AccessFlags(x); } - -} diff --git a/dbms/src/Access/AccessRights.cpp b/dbms/src/Access/AccessRights.cpp deleted file mode 100644 index 4d33463c3f1..00000000000 --- a/dbms/src/Access/AccessRights.cpp +++ /dev/null @@ -1,785 +0,0 @@ -#include -#include -#include -#include -#include - -namespace DB -{ -namespace ErrorCodes -{ - extern const int INVALID_GRANT; -} - - -namespace -{ - enum Level - { - GLOBAL_LEVEL, - DATABASE_LEVEL, - TABLE_LEVEL, - COLUMN_LEVEL, - }; - - enum RevokeMode - { - NORMAL_REVOKE_MODE, /// for AccessRights::revoke() - PARTIAL_REVOKE_MODE, /// for AccessRights::partialRevoke() - FULL_REVOKE_MODE, /// for AccessRights::fullRevoke() - }; - - struct Helper - { - static const Helper & instance() - { - static const Helper res; - return res; - } - - const AccessFlags database_level_flags = AccessFlags::databaseLevel(); - const AccessFlags table_level_flags = AccessFlags::tableLevel(); - const AccessFlags column_level_flags = AccessFlags::columnLevel(); - const AccessFlags show_flag = AccessType::SHOW; - const AccessFlags exists_flag = AccessType::EXISTS; - const AccessFlags create_table_flag = AccessType::CREATE_TABLE; - const AccessFlags create_temporary_table_flag = AccessType::CREATE_TEMPORARY_TABLE; - }; -} - - -struct AccessRights::Node -{ -public: - std::shared_ptr node_name; - Level level = GLOBAL_LEVEL; - AccessFlags explicit_grants; - AccessFlags partial_revokes; - AccessFlags inherited_access; /// the access inherited from the parent node - AccessFlags raw_access; /// raw_access = (inherited_access - partial_revokes) | explicit_grants - AccessFlags access; /// access = raw_access | implicit_access - AccessFlags min_access; /// min_access = access & child[0].access & ... | child[N-1].access - AccessFlags max_access; /// max_access = access | child[0].access | ... | child[N-1].access - std::unique_ptr> children; - - Node() = default; - Node(const Node & src) { *this = src; } - - Node & operator =(const Node & src) - { - node_name = src.node_name; - level = src.level; - inherited_access = src.inherited_access; - explicit_grants = src.explicit_grants; - partial_revokes = src.partial_revokes; - raw_access = src.raw_access; - access = src.access; - min_access = src.min_access; - max_access = src.max_access; - if (src.children) - children = std::make_unique>(*src.children); - else - children = nullptr; - return *this; - } - - void grant(AccessFlags access_to_grant, const Helper & helper) - { - if (!access_to_grant) - return; - - if (level == GLOBAL_LEVEL) - { - /// Everything can be granted on the global level. - } - else if (level == DATABASE_LEVEL) - { - AccessFlags grantable = access_to_grant & helper.database_level_flags; - if (!grantable) - throw Exception(access_to_grant.toString() + " cannot be granted on the database level", ErrorCodes::INVALID_GRANT); - access_to_grant = grantable; - } - else if (level == TABLE_LEVEL) - { - AccessFlags grantable = access_to_grant & helper.table_level_flags; - if (!grantable) - throw Exception(access_to_grant.toString() + " cannot be granted on the table level", ErrorCodes::INVALID_GRANT); - access_to_grant = grantable; - } - else if (level == COLUMN_LEVEL) - { - AccessFlags grantable = access_to_grant & helper.column_level_flags; - if (!grantable) - throw Exception(access_to_grant.toString() + " cannot be granted on the column level", ErrorCodes::INVALID_GRANT); - access_to_grant = grantable; - } - - AccessFlags new_explicit_grants = access_to_grant - partial_revokes; - if (level == TABLE_LEVEL) - removeExplicitGrantsRec(new_explicit_grants); - removePartialRevokesRec(access_to_grant); - explicit_grants |= new_explicit_grants; - - calculateAllAccessRec(helper); - } - - template - void grant(const AccessFlags & access_to_grant, const Helper & helper, const std::string_view & name, const Args &... subnames) - { - auto & child = getChild(name); - child.grant(access_to_grant, helper, subnames...); - eraseChildIfEmpty(child); - calculateImplicitAccess(helper); - calculateMinAndMaxAccess(); - } - - template - void grant(const AccessFlags & access_to_grant, const Helper & helper, const std::vector & names) - { - for (const auto & name : names) - { - auto & child = getChild(name); - child.grant(access_to_grant, helper); - eraseChildIfEmpty(child); - } - calculateImplicitAccess(helper); - calculateMinAndMaxAccess(); - } - - template - void revoke(const AccessFlags & access_to_revoke, const Helper & helper) - { - if constexpr (mode == NORMAL_REVOKE_MODE) - { - if (level == TABLE_LEVEL) - removeExplicitGrantsRec(access_to_revoke); - else - removeExplicitGrants(access_to_revoke); - } - else if constexpr (mode == PARTIAL_REVOKE_MODE) - { - AccessFlags new_partial_revokes = access_to_revoke - explicit_grants; - if (level == TABLE_LEVEL) - removeExplicitGrantsRec(access_to_revoke); - else - removeExplicitGrants(access_to_revoke); - removePartialRevokesRec(new_partial_revokes); - partial_revokes |= new_partial_revokes; - } - else /// mode == FULL_REVOKE_MODE - { - AccessFlags new_partial_revokes = access_to_revoke - explicit_grants; - removeExplicitGrantsRec(access_to_revoke); - removePartialRevokesRec(new_partial_revokes); - partial_revokes |= new_partial_revokes; - } - calculateAllAccessRec(helper); - } - - template - void revoke(const AccessFlags & access_to_revoke, const Helper & helper, const std::string_view & name, const Args &... subnames) - { - Node * child; - if (mode == NORMAL_REVOKE_MODE) - { - if (!(child = tryGetChild(name))) - return; - } - else - child = &getChild(name); - - child->revoke(access_to_revoke, helper, subnames...); - eraseChildIfEmpty(*child); - calculateImplicitAccess(helper); - calculateMinAndMaxAccess(); - } - - template - void revoke(const AccessFlags & access_to_revoke, const Helper & helper, const std::vector & names) - { - Node * child; - for (const auto & name : names) - { - if (mode == NORMAL_REVOKE_MODE) - { - if (!(child = tryGetChild(name))) - continue; - } - else - child = &getChild(name); - - child->revoke(access_to_revoke, helper); - eraseChildIfEmpty(*child); - } - calculateImplicitAccess(helper); - calculateMinAndMaxAccess(); - } - - bool isGranted(const AccessFlags & flags) const - { - return min_access.contains(flags); - } - - template - bool isGranted(AccessFlags flags, const std::string_view & name, const Args &... subnames) const - { - if (min_access.contains(flags)) - return true; - if (!max_access.contains(flags)) - return false; - - const Node * child = tryGetChild(name); - if (child) - return child->isGranted(flags, subnames...); - else - return access.contains(flags); - } - - template - bool isGranted(AccessFlags flags, const std::vector & names) const - { - if (min_access.contains(flags)) - return true; - if (!max_access.contains(flags)) - return false; - - for (const auto & name : names) - { - const Node * child = tryGetChild(name); - if (child) - { - if (!child->isGranted(flags, name)) - return false; - } - else - { - if (!access.contains(flags)) - return false; - } - } - return true; - } - - friend bool operator ==(const Node & left, const Node & right) - { - if ((left.explicit_grants != right.explicit_grants) || (left.partial_revokes != right.partial_revokes)) - return false; - - if (!left.children) - return !right.children; - - if (!right.children) - return false; - return *left.children == *right.children; - } - - friend bool operator!=(const Node & left, const Node & right) { return !(left == right); } - - bool isEmpty() const - { - return !explicit_grants && !partial_revokes && !children; - } - - void merge(const Node & other, const Helper & helper) - { - mergeRawAccessRec(other); - calculateGrantsAndPartialRevokesRec(); - calculateAllAccessRec(helper); - } - - void traceTree(Poco::Logger * log) const - { - LOG_TRACE(log, "Tree(" << level << "): name=" << (node_name ? *node_name : "NULL") - << ", explicit_grants=" << explicit_grants.toString() - << ", partial_revokes=" << partial_revokes.toString() - << ", inherited_access=" << inherited_access.toString() - << ", raw_access=" << raw_access.toString() - << ", access=" << access.toString() - << ", min_access=" << min_access.toString() - << ", max_access=" << max_access.toString() - << ", num_children=" << (children ? children->size() : 0)); - if (children) - { - for (auto & child : *children | boost::adaptors::map_values) - child.traceTree(log); - } - } - -private: - Node * tryGetChild(const std::string_view & name) - { - if (!children) - return nullptr; - auto it = children->find(name); - if (it == children->end()) - return nullptr; - return &it->second; - } - - const Node * tryGetChild(const std::string_view & name) const - { - if (!children) - return nullptr; - auto it = children->find(name); - if (it == children->end()) - return nullptr; - return &it->second; - } - - Node & getChild(const std::string_view & name) - { - auto * child = tryGetChild(name); - if (child) - return *child; - if (!children) - children = std::make_unique>(); - auto new_child_name = std::make_shared(name); - Node & new_child = (*children)[*new_child_name]; - new_child.node_name = std::move(new_child_name); - new_child.level = static_cast(level + 1); - new_child.inherited_access = raw_access; - new_child.raw_access = raw_access; - return new_child; - } - - void eraseChildIfEmpty(Node & child) - { - if (!child.isEmpty()) - return; - auto it = children->find(*child.node_name); - children->erase(it); - if (children->empty()) - children = nullptr; - } - - void calculateImplicitAccess(const Helper & helper) - { - access = raw_access; - if (access & helper.database_level_flags) - access |= helper.show_flag | helper.exists_flag; - else if ((level >= DATABASE_LEVEL) && children) - access |= helper.exists_flag; - - if ((level == GLOBAL_LEVEL) && (access & helper.create_table_flag)) - access |= helper.create_temporary_table_flag; - } - - void calculateMinAndMaxAccess() - { - min_access = access; - max_access = access; - if (children) - { - for (const auto & child : *children | boost::adaptors::map_values) - { - min_access &= child.min_access; - max_access |= child.max_access; - } - } - } - - void calculateAllAccessRec(const Helper & helper) - { - partial_revokes &= inherited_access; - raw_access = (inherited_access - partial_revokes) | explicit_grants; - - /// Traverse tree. - if (children) - { - for (auto it = children->begin(); it != children->end();) - { - auto & child = it->second; - child.inherited_access = raw_access; - child.calculateAllAccessRec(helper); - if (child.isEmpty()) - it = children->erase(it); - else - ++it; - } - if (children->empty()) - children = nullptr; - } - - calculateImplicitAccess(helper); - calculateMinAndMaxAccess(); - } - - void removeExplicitGrants(const AccessFlags & change) - { - explicit_grants -= change; - } - - void removeExplicitGrantsRec(const AccessFlags & change) - { - removeExplicitGrants(change); - if (children) - { - for (auto & child : *children | boost::adaptors::map_values) - child.removeExplicitGrantsRec(change); - } - } - - void removePartialRevokesRec(const AccessFlags & change) - { - partial_revokes -= change; - if (children) - { - for (auto & child : *children | boost::adaptors::map_values) - child.removePartialRevokesRec(change); - } - } - - void mergeRawAccessRec(const Node & rhs) - { - if (rhs.children) - { - for (const auto & [rhs_childname, rhs_child] : *rhs.children) - getChild(rhs_childname).mergeRawAccessRec(rhs_child); - } - raw_access |= rhs.raw_access; - if (children) - { - for (auto & [lhs_childname, lhs_child] : *children) - { - lhs_child.inherited_access = raw_access; - if (!rhs.tryGetChild(lhs_childname)) - lhs_child.raw_access |= rhs.raw_access; - } - } - } - - void calculateGrantsAndPartialRevokesRec() - { - explicit_grants = raw_access - inherited_access; - partial_revokes = inherited_access - raw_access; - if (children) - { - for (auto & child : *children | boost::adaptors::map_values) - child.calculateGrantsAndPartialRevokesRec(); - } - } -}; - - -AccessRights::AccessRights() = default; -AccessRights::~AccessRights() = default; -AccessRights::AccessRights(AccessRights && src) = default; -AccessRights & AccessRights::operator =(AccessRights && src) = default; - - -AccessRights::AccessRights(const AccessRights & src) -{ - *this = src; -} - - -AccessRights & AccessRights::operator =(const AccessRights & src) -{ - if (src.root) - root = std::make_unique(*src.root); - else - root = nullptr; - return *this; -} - - -AccessRights::AccessRights(const AccessFlags & access) -{ - grant(access); -} - - -bool AccessRights::isEmpty() const -{ - return !root; -} - - -void AccessRights::clear() -{ - root = nullptr; -} - - -template -void AccessRights::grantImpl(const AccessFlags & access, const Args &... args) -{ - if (!root) - root = std::make_unique(); - root->grant(access, Helper::instance(), args...); - if (root->isEmpty()) - root = nullptr; -} - -void AccessRights::grantImpl(const AccessRightsElement & element, std::string_view current_database) -{ - if (element.any_database) - { - grantImpl(element.access_flags); - } - else if (element.any_table) - { - if (element.database.empty()) - grantImpl(element.access_flags, current_database); - else - grantImpl(element.access_flags, element.database); - } - else if (element.any_column) - { - if (element.database.empty()) - grantImpl(element.access_flags, current_database, element.table); - else - grantImpl(element.access_flags, element.database, element.table); - } - else - { - if (element.database.empty()) - grantImpl(element.access_flags, current_database, element.table, element.columns); - else - grantImpl(element.access_flags, element.database, element.table, element.columns); - } -} - -void AccessRights::grantImpl(const AccessRightsElements & elements, std::string_view current_database) -{ - for (const auto & element : elements) - grantImpl(element, current_database); -} - -void AccessRights::grant(const AccessFlags & access) { grantImpl(access); } -void AccessRights::grant(const AccessFlags & access, const std::string_view & database) { grantImpl(access, database); } -void AccessRights::grant(const AccessFlags & access, const std::string_view & database, const std::string_view & table) { grantImpl(access, database, table); } -void AccessRights::grant(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::string_view & column) { grantImpl(access, database, table, column); } -void AccessRights::grant(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::vector & columns) { grantImpl(access, database, table, columns); } -void AccessRights::grant(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const Strings & columns) { grantImpl(access, database, table, columns); } -void AccessRights::grant(const AccessRightsElement & element, std::string_view current_database) { grantImpl(element, current_database); } -void AccessRights::grant(const AccessRightsElements & elements, std::string_view current_database) { grantImpl(elements, current_database); } - -template -void AccessRights::revokeImpl(const AccessFlags & access, const Args &... args) -{ - if (!root) - return; - root->revoke(access, Helper::instance(), args...); - if (root->isEmpty()) - root = nullptr; -} - -template -void AccessRights::revokeImpl(const AccessRightsElement & element, std::string_view current_database) -{ - if (element.any_database) - { - revokeImpl(element.access_flags); - } - else if (element.any_table) - { - if (element.database.empty()) - revokeImpl(element.access_flags, current_database); - else - revokeImpl(element.access_flags, element.database); - } - else if (element.any_column) - { - if (element.database.empty()) - revokeImpl(element.access_flags, current_database, element.table); - else - revokeImpl(element.access_flags, element.database, element.table); - } - else - { - if (element.database.empty()) - revokeImpl(element.access_flags, current_database, element.table, element.columns); - else - revokeImpl(element.access_flags, element.database, element.table, element.columns); - } -} - -template -void AccessRights::revokeImpl(const AccessRightsElements & elements, std::string_view current_database) -{ - for (const auto & element : elements) - revokeImpl(element, current_database); -} - -void AccessRights::revoke(const AccessFlags & access) { revokeImpl(access); } -void AccessRights::revoke(const AccessFlags & access, const std::string_view & database) { revokeImpl(access, database); } -void AccessRights::revoke(const AccessFlags & access, const std::string_view & database, const std::string_view & table) { revokeImpl(access, database, table); } -void AccessRights::revoke(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::string_view & column) { revokeImpl(access, database, table, column); } -void AccessRights::revoke(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::vector & columns) { revokeImpl(access, database, table, columns); } -void AccessRights::revoke(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const Strings & columns) { revokeImpl(access, database, table, columns); } -void AccessRights::revoke(const AccessRightsElement & element, std::string_view current_database) { revokeImpl(element, current_database); } -void AccessRights::revoke(const AccessRightsElements & elements, std::string_view current_database) { revokeImpl(elements, current_database); } - -void AccessRights::partialRevoke(const AccessFlags & access) { revokeImpl(access); } -void AccessRights::partialRevoke(const AccessFlags & access, const std::string_view & database) { revokeImpl(access, database); } -void AccessRights::partialRevoke(const AccessFlags & access, const std::string_view & database, const std::string_view & table) { revokeImpl(access, database, table); } -void AccessRights::partialRevoke(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::string_view & column) { revokeImpl(access, database, table, column); } -void AccessRights::partialRevoke(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::vector & columns) { revokeImpl(access, database, table, columns); } -void AccessRights::partialRevoke(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const Strings & columns) { revokeImpl(access, database, table, columns); } -void AccessRights::partialRevoke(const AccessRightsElement & element, std::string_view current_database) { revokeImpl(element, current_database); } -void AccessRights::partialRevoke(const AccessRightsElements & elements, std::string_view current_database) { revokeImpl(elements, current_database); } - -void AccessRights::fullRevoke(const AccessFlags & access) { revokeImpl(access); } -void AccessRights::fullRevoke(const AccessFlags & access, const std::string_view & database) { revokeImpl(access, database); } -void AccessRights::fullRevoke(const AccessFlags & access, const std::string_view & database, const std::string_view & table) { revokeImpl(access, database, table); } -void AccessRights::fullRevoke(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::string_view & column) { revokeImpl(access, database, table, column); } -void AccessRights::fullRevoke(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::vector & columns) { revokeImpl(access, database, table, columns); } -void AccessRights::fullRevoke(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const Strings & columns) { revokeImpl(access, database, table, columns); } -void AccessRights::fullRevoke(const AccessRightsElement & element, std::string_view current_database) { revokeImpl(element, current_database); } -void AccessRights::fullRevoke(const AccessRightsElements & elements, std::string_view current_database) { revokeImpl(elements, current_database); } - - -AccessRights::Elements AccessRights::getElements() const -{ - if (!root) - return {}; - Elements res; - if (root->explicit_grants) - res.grants.push_back({root->explicit_grants}); - if (root->children) - { - for (const auto & [db_name, db_node] : *root->children) - { - if (db_node.partial_revokes) - res.partial_revokes.push_back({db_node.partial_revokes, db_name}); - if (db_node.explicit_grants) - res.grants.push_back({db_node.explicit_grants, db_name}); - if (db_node.children) - { - for (const auto & [table_name, table_node] : *db_node.children) - { - if (table_node.partial_revokes) - res.partial_revokes.push_back({table_node.partial_revokes, db_name, table_name}); - if (table_node.explicit_grants) - res.grants.push_back({table_node.explicit_grants, db_name, table_name}); - if (table_node.children) - { - for (const auto & [column_name, column_node] : *table_node.children) - { - if (column_node.partial_revokes) - res.partial_revokes.push_back({column_node.partial_revokes, db_name, table_name, column_name}); - if (column_node.explicit_grants) - res.grants.push_back({column_node.explicit_grants, db_name, table_name, column_name}); - } - } - } - } - } - } - return res; -} - - -String AccessRights::toString() const -{ - auto elements = getElements(); - String res; - if (!elements.grants.empty()) - { - res += "GRANT "; - res += elements.grants.toString(); - } - if (!elements.partial_revokes.empty()) - { - if (!res.empty()) - res += ", "; - res += "REVOKE "; - res += elements.partial_revokes.toString(); - } - if (res.empty()) - res = "GRANT USAGE ON *.*"; - return res; -} - - -template -bool AccessRights::isGrantedImpl(const AccessFlags & access, const Args &... args) const -{ - if (!root) - return access.isEmpty(); - return root->isGranted(access, args...); -} - -bool AccessRights::isGrantedImpl(const AccessRightsElement & element, std::string_view current_database) const -{ - if (element.any_database) - { - return isGrantedImpl(element.access_flags); - } - else if (element.any_table) - { - if (element.database.empty()) - return isGrantedImpl(element.access_flags, current_database); - else - return isGrantedImpl(element.access_flags, element.database); - } - else if (element.any_column) - { - if (element.database.empty()) - return isGrantedImpl(element.access_flags, current_database, element.table); - else - return isGrantedImpl(element.access_flags, element.database, element.table); - } - else - { - if (element.database.empty()) - return isGrantedImpl(element.access_flags, current_database, element.table, element.columns); - else - return isGrantedImpl(element.access_flags, element.database, element.table, element.columns); - } -} - -bool AccessRights::isGrantedImpl(const AccessRightsElements & elements, std::string_view current_database) const -{ - for (const auto & element : elements) - if (!isGrantedImpl(element, current_database)) - return false; - return true; -} - -bool AccessRights::isGranted(const AccessFlags & access) const { return isGrantedImpl(access); } -bool AccessRights::isGranted(const AccessFlags & access, const std::string_view & database) const { return isGrantedImpl(access, database); } -bool AccessRights::isGranted(const AccessFlags & access, const std::string_view & database, const std::string_view & table) const { return isGrantedImpl(access, database, table); } -bool AccessRights::isGranted(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::string_view & column) const { return isGrantedImpl(access, database, table, column); } -bool AccessRights::isGranted(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::vector & columns) const { return isGrantedImpl(access, database, table, columns); } -bool AccessRights::isGranted(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const Strings & columns) const { return isGrantedImpl(access, database, table, columns); } -bool AccessRights::isGranted(const AccessRightsElement & element, std::string_view current_database) const { return isGrantedImpl(element, current_database); } -bool AccessRights::isGranted(const AccessRightsElements & elements, std::string_view current_database) const { return isGrantedImpl(elements, current_database); } - - -bool operator ==(const AccessRights & left, const AccessRights & right) -{ - if (!left.root) - return !right.root; - if (!right.root) - return false; - return *left.root == *right.root; -} - - -void AccessRights::merge(const AccessRights & other) -{ - if (!root) - { - *this = other; - return; - } - if (other.root) - { - root->merge(*other.root, Helper::instance()); - if (root->isEmpty()) - root = nullptr; - } -} - - -void AccessRights::traceTree() const -{ - auto * log = &Poco::Logger::get("AccessRights"); - if (root) - root->traceTree(log); - else - LOG_TRACE(log, "Tree: NULL"); -} -} diff --git a/dbms/src/Access/AccessRights.h b/dbms/src/Access/AccessRights.h deleted file mode 100644 index 5c5ed382f50..00000000000 --- a/dbms/src/Access/AccessRights.h +++ /dev/null @@ -1,139 +0,0 @@ -#pragma once - -#include -#include -#include -#include - - -namespace DB -{ -/// Represents a set of access types granted on databases, tables, columns, etc. -/// For example, "GRANT SELECT, UPDATE ON db.*, GRANT INSERT ON db2.mytbl2" are access rights. -class AccessRights -{ -public: - AccessRights(); - AccessRights(const AccessFlags & access); - ~AccessRights(); - AccessRights(const AccessRights & src); - AccessRights & operator =(const AccessRights & src); - AccessRights(AccessRights && src); - AccessRights & operator =(AccessRights && src); - - bool isEmpty() const; - - /// Revokes everything. It's the same as fullRevoke(AccessType::ALL). - void clear(); - - /// Grants access on a specified database/table/column. - /// Does nothing if the specified access has been already granted. - void grant(const AccessFlags & access); - void grant(const AccessFlags & access, const std::string_view & database); - void grant(const AccessFlags & access, const std::string_view & database, const std::string_view & table); - void grant(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::string_view & column); - void grant(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::vector & columns); - void grant(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const Strings & columns); - void grant(const AccessRightsElement & access, std::string_view current_database = {}); - void grant(const AccessRightsElements & access, std::string_view current_database = {}); - - /// Revokes a specified access granted earlier on a specified database/table/column. - /// Does nothing if the specified access is not granted. - /// If the specified access is granted but on upper level (e.g. database for table, table for columns) - /// or lower level, the function also does nothing. - /// This function implements the standard SQL REVOKE behaviour. - void revoke(const AccessFlags & access); - void revoke(const AccessFlags & access, const std::string_view & database); - void revoke(const AccessFlags & access, const std::string_view & database, const std::string_view & table); - void revoke(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::string_view & column); - void revoke(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::vector & columns); - void revoke(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const Strings & columns); - void revoke(const AccessRightsElement & access, std::string_view current_database = {}); - void revoke(const AccessRightsElements & access, std::string_view current_database = {}); - - /// Revokes a specified access granted earlier on a specified database/table/column or on lower levels. - /// The function also restricts access if it's granted on upper level. - /// For example, an access could be granted on a database and then revoked on a table in this database. - /// This function implements the MySQL REVOKE behaviour with partial_revokes is ON. - void partialRevoke(const AccessFlags & access); - void partialRevoke(const AccessFlags & access, const std::string_view & database); - void partialRevoke(const AccessFlags & access, const std::string_view & database, const std::string_view & table); - void partialRevoke(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::string_view & column); - void partialRevoke(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::vector & columns); - void partialRevoke(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const Strings & columns); - void partialRevoke(const AccessRightsElement & access, std::string_view current_database = {}); - void partialRevoke(const AccessRightsElements & access, std::string_view current_database = {}); - - /// Revokes a specified access granted earlier on a specified database/table/column or on lower levels. - /// The function also restricts access if it's granted on upper level. - /// For example, fullRevoke(AccessType::ALL) revokes all grants at all, just like clear(); - /// fullRevoke(AccessType::SELECT, db) means it's not allowed to execute SELECT in that database anymore (from any table). - void fullRevoke(const AccessFlags & access); - void fullRevoke(const AccessFlags & access, const std::string_view & database); - void fullRevoke(const AccessFlags & access, const std::string_view & database, const std::string_view & table); - void fullRevoke(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::string_view & column); - void fullRevoke(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::vector & columns); - void fullRevoke(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const Strings & columns); - void fullRevoke(const AccessRightsElement & access, std::string_view current_database = {}); - void fullRevoke(const AccessRightsElements & access, std::string_view current_database = {}); - - /// Returns the information about all the access granted. - struct Elements - { - AccessRightsElements grants; - AccessRightsElements partial_revokes; - }; - Elements getElements() const; - - /// Returns the information about all the access granted as a string. - String toString() const; - - /// Whether a specified access granted. - bool isGranted(const AccessFlags & access) const; - bool isGranted(const AccessFlags & access, const std::string_view & database) const; - bool isGranted(const AccessFlags & access, const std::string_view & database, const std::string_view & table) const; - bool isGranted(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::string_view & column) const; - bool isGranted(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::vector & columns) const; - bool isGranted(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const Strings & columns) const; - bool isGranted(const AccessRightsElement & access, std::string_view current_database = {}) const; - bool isGranted(const AccessRightsElements & access, std::string_view current_database = {}) const; - - friend bool operator ==(const AccessRights & left, const AccessRights & right); - friend bool operator !=(const AccessRights & left, const AccessRights & right) { return !(left == right); } - - /// Merges two sets of access rights together. - /// It's used to combine access rights from multiple roles. - void merge(const AccessRights & other); - -private: - template - void grantImpl(const AccessFlags & access, const Args &... args); - - void grantImpl(const AccessRightsElement & access, std::string_view current_database); - void grantImpl(const AccessRightsElements & access, std::string_view current_database); - - template - void revokeImpl(const AccessFlags & access, const Args &... args); - - template - void revokeImpl(const AccessRightsElement & access, std::string_view current_database); - - template - void revokeImpl(const AccessRightsElements & access, std::string_view current_database); - - template - bool isGrantedImpl(const AccessFlags & access, const Args &... args) const; - - bool isGrantedImpl(const AccessRightsElement & access, std::string_view current_database) const; - bool isGrantedImpl(const AccessRightsElements & access, std::string_view current_database) const; - - template - AccessFlags getAccessImpl(const Args &... args) const; - - void traceTree() const; - - struct Node; - std::unique_ptr root; -}; - -} diff --git a/dbms/src/Access/AccessRightsContext.cpp b/dbms/src/Access/AccessRightsContext.cpp deleted file mode 100644 index 9a32a1234f0..00000000000 --- a/dbms/src/Access/AccessRightsContext.cpp +++ /dev/null @@ -1,576 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ -namespace ErrorCodes -{ - extern const int ACCESS_DENIED; - extern const int READONLY; - extern const int QUERY_IS_PROHIBITED; - extern const int FUNCTION_NOT_ALLOWED; - extern const int UNKNOWN_USER; -} - - -namespace -{ - enum CheckAccessRightsMode - { - RETURN_FALSE_IF_ACCESS_DENIED, - LOG_WARNING_IF_ACCESS_DENIED, - THROW_IF_ACCESS_DENIED, - }; - - - String formatSkippedMessage() - { - return ""; - } - - String formatSkippedMessage(const std::string_view & database) - { - return ". Skipped database " + backQuoteIfNeed(database); - } - - String formatSkippedMessage(const std::string_view & database, const std::string_view & table) - { - String str = ". Skipped table "; - if (!database.empty()) - str += backQuoteIfNeed(database) + "."; - str += backQuoteIfNeed(table); - return str; - } - - String formatSkippedMessage(const std::string_view & database, const std::string_view & table, const std::string_view & column) - { - String str = ". Skipped column " + backQuoteIfNeed(column) + " ON "; - if (!database.empty()) - str += backQuoteIfNeed(database) + "."; - str += backQuoteIfNeed(table); - return str; - } - - template - String formatSkippedMessage(const std::string_view & database, const std::string_view & table, const std::vector & columns) - { - if (columns.size() == 1) - return formatSkippedMessage(database, table, columns[0]); - - String str = ". Skipped columns "; - bool need_comma = false; - for (const auto & column : columns) - { - if (std::exchange(need_comma, true)) - str += ", "; - str += backQuoteIfNeed(column); - } - str += " ON "; - if (!database.empty()) - str += backQuoteIfNeed(database) + "."; - str += backQuoteIfNeed(table); - return str; - } -} - - -AccessRightsContext::AccessRightsContext() -{ - auto everything_granted = boost::make_shared(); - everything_granted->grant(AccessType::ALL); - boost::range::fill(result_access_cache, everything_granted); - - enabled_roles_with_admin_option = boost::make_shared>(); - - row_policy_context = std::make_shared(); - quota_context = std::make_shared(); -} - - -AccessRightsContext::AccessRightsContext(const AccessControlManager & manager_, const Params & params_) - : manager(&manager_) - , params(params_) -{ - subscription_for_user_change = manager->subscribeForChanges( - *params.user_id, [this](const UUID &, const AccessEntityPtr & entity) - { - UserPtr changed_user = entity ? typeid_cast(entity) : nullptr; - std::lock_guard lock{mutex}; - setUser(changed_user); - }); - - setUser(manager->read(*params.user_id)); -} - - -void AccessRightsContext::setUser(const UserPtr & user_) const -{ - user = user_; - if (!user) - { - /// User has been dropped. - auto nothing_granted = boost::make_shared(); - boost::range::fill(result_access_cache, nothing_granted); - subscription_for_user_change = {}; - subscription_for_roles_info_change = {}; - role_context = nullptr; - enabled_roles_with_admin_option = boost::make_shared>(); - row_policy_context = std::make_shared(); - quota_context = std::make_shared(); - return; - } - - user_name = user->getName(); - trace_log = &Poco::Logger::get("AccessRightsContext (" + user_name + ")"); - - std::vector current_roles, current_roles_with_admin_option; - if (params.use_default_roles) - { - for (const UUID & id : user->granted_roles) - { - if (user->default_roles.match(id)) - current_roles.push_back(id); - } - boost::range::set_intersection(current_roles, user->granted_roles_with_admin_option, - std::back_inserter(current_roles_with_admin_option)); - } - else - { - current_roles.reserve(params.current_roles.size()); - for (const auto & id : params.current_roles) - { - if (user->granted_roles.contains(id)) - current_roles.push_back(id); - if (user->granted_roles_with_admin_option.contains(id)) - current_roles_with_admin_option.push_back(id); - } - } - - subscription_for_roles_info_change = {}; - role_context = manager->getRoleContext(current_roles, current_roles_with_admin_option); - subscription_for_roles_info_change = role_context->subscribeForChanges([this](const CurrentRolesInfoPtr & roles_info_) - { - std::lock_guard lock{mutex}; - setRolesInfo(roles_info_); - }); - - setRolesInfo(role_context->getInfo()); -} - - -void AccessRightsContext::setRolesInfo(const CurrentRolesInfoPtr & roles_info_) const -{ - assert(roles_info_); - roles_info = roles_info_; - enabled_roles_with_admin_option.store(nullptr /* need to recalculate */); - boost::range::fill(result_access_cache, nullptr /* need recalculate */); - row_policy_context = manager->getRowPolicyContext(*params.user_id, roles_info->enabled_roles); - quota_context = manager->getQuotaContext(user_name, *params.user_id, roles_info->enabled_roles, params.address, params.quota_key); -} - - -void AccessRightsContext::checkPassword(const String & password) const -{ - std::lock_guard lock{mutex}; - if (!user) - throw Exception(user_name + ": User has been dropped", ErrorCodes::UNKNOWN_USER); - user->authentication.checkPassword(password, user_name); -} - -void AccessRightsContext::checkHostIsAllowed() const -{ - std::lock_guard lock{mutex}; - if (!user) - throw Exception(user_name + ": User has been dropped", ErrorCodes::UNKNOWN_USER); - user->allowed_client_hosts.checkContains(params.address, user_name); -} - - -template -bool AccessRightsContext::checkAccessImpl(Poco::Logger * log_, const AccessFlags & access, const Args &... args) const -{ - auto result_access = calculateResultAccess(grant_option); - bool is_granted = result_access->isGranted(access, args...); - - if (trace_log) - LOG_TRACE(trace_log, "Access " << (is_granted ? "granted" : "denied") << ": " << (AccessRightsElement{access, args...}.toString())); - - if (is_granted) - return true; - - if constexpr (mode == RETURN_FALSE_IF_ACCESS_DENIED) - return false; - - if constexpr (mode == LOG_WARNING_IF_ACCESS_DENIED) - { - if (!log_) - return false; - } - - auto show_error = [&](const String & msg, [[maybe_unused]] int error_code) - { - if constexpr (mode == THROW_IF_ACCESS_DENIED) - throw Exception(user_name + ": " + msg, error_code); - else if constexpr (mode == LOG_WARNING_IF_ACCESS_DENIED) - LOG_WARNING(log_, user_name + ": " + msg + formatSkippedMessage(args...)); - }; - - if (!user) - { - show_error("User has been dropped", ErrorCodes::UNKNOWN_USER); - } - else if (grant_option && calculateResultAccess(false, params.readonly, params.allow_ddl, params.allow_introspection)->isGranted(access, args...)) - { - show_error( - "Not enough privileges. " - "The required privileges have been granted, but without grant option. " - "To execute this query it's necessary to have the grant " - + AccessRightsElement{access, args...}.toString() + " WITH GRANT OPTION", - ErrorCodes::ACCESS_DENIED); - } - else if (params.readonly && calculateResultAccess(false, false, params.allow_ddl, params.allow_introspection)->isGranted(access, args...)) - { - if (params.interface == ClientInfo::Interface::HTTP && params.http_method == ClientInfo::HTTPMethod::GET) - show_error( - "Cannot execute query in readonly mode. " - "For queries over HTTP, method GET implies readonly. You should use method POST for modifying queries", - ErrorCodes::READONLY); - else - show_error("Cannot execute query in readonly mode", ErrorCodes::READONLY); - } - else if (!params.allow_ddl && calculateResultAccess(false, params.readonly, true, params.allow_introspection)->isGranted(access, args...)) - { - show_error("Cannot execute query. DDL queries are prohibited for the user", ErrorCodes::QUERY_IS_PROHIBITED); - } - else if (!params.allow_introspection && calculateResultAccess(false, params.readonly, params.allow_ddl, true)->isGranted(access, args...)) - { - show_error("Introspection functions are disabled, because setting 'allow_introspection_functions' is set to 0", ErrorCodes::FUNCTION_NOT_ALLOWED); - } - else - { - show_error( - "Not enough privileges. To execute this query it's necessary to have the grant " - + AccessRightsElement{access, args...}.toString() + (grant_option ? " WITH GRANT OPTION" : ""), - ErrorCodes::ACCESS_DENIED); - } - - return false; -} - - -template -bool AccessRightsContext::checkAccessImpl(Poco::Logger * log_, const AccessRightsElement & element) const -{ - if (element.any_database) - { - return checkAccessImpl(log_, element.access_flags); - } - else if (element.any_table) - { - if (element.database.empty()) - return checkAccessImpl(log_, element.access_flags, params.current_database); - else - return checkAccessImpl(log_, element.access_flags, element.database); - } - else if (element.any_column) - { - if (element.database.empty()) - return checkAccessImpl(log_, element.access_flags, params.current_database, element.table); - else - return checkAccessImpl(log_, element.access_flags, element.database, element.table); - } - else - { - if (element.database.empty()) - return checkAccessImpl(log_, element.access_flags, params.current_database, element.table, element.columns); - else - return checkAccessImpl(log_, element.access_flags, element.database, element.table, element.columns); - } -} - - -template -bool AccessRightsContext::checkAccessImpl(Poco::Logger * log_, const AccessRightsElements & elements) const -{ - for (const auto & element : elements) - if (!checkAccessImpl(log_, element)) - return false; - return true; -} - - -void AccessRightsContext::checkAccess(const AccessFlags & access) const { checkAccessImpl(nullptr, access); } -void AccessRightsContext::checkAccess(const AccessFlags & access, const std::string_view & database) const { checkAccessImpl(nullptr, access, database); } -void AccessRightsContext::checkAccess(const AccessFlags & access, const std::string_view & database, const std::string_view & table) const { checkAccessImpl(nullptr, access, database, table); } -void AccessRightsContext::checkAccess(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::string_view & column) const { checkAccessImpl(nullptr, access, database, table, column); } -void AccessRightsContext::checkAccess(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::vector & columns) const { checkAccessImpl(nullptr, access, database, table, columns); } -void AccessRightsContext::checkAccess(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const Strings & columns) const { checkAccessImpl(nullptr, access, database, table, columns); } -void AccessRightsContext::checkAccess(const AccessRightsElement & access) const { checkAccessImpl(nullptr, access); } -void AccessRightsContext::checkAccess(const AccessRightsElements & access) const { checkAccessImpl(nullptr, access); } - -bool AccessRightsContext::isGranted(const AccessFlags & access) const { return checkAccessImpl(nullptr, access); } -bool AccessRightsContext::isGranted(const AccessFlags & access, const std::string_view & database) const { return checkAccessImpl(nullptr, access, database); } -bool AccessRightsContext::isGranted(const AccessFlags & access, const std::string_view & database, const std::string_view & table) const { return checkAccessImpl(nullptr, access, database, table); } -bool AccessRightsContext::isGranted(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::string_view & column) const { return checkAccessImpl(nullptr, access, database, table, column); } -bool AccessRightsContext::isGranted(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::vector & columns) const { return checkAccessImpl(nullptr, access, database, table, columns); } -bool AccessRightsContext::isGranted(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const Strings & columns) const { return checkAccessImpl(nullptr, access, database, table, columns); } -bool AccessRightsContext::isGranted(const AccessRightsElement & access) const { return checkAccessImpl(nullptr, access); } -bool AccessRightsContext::isGranted(const AccessRightsElements & access) const { return checkAccessImpl(nullptr, access); } - -bool AccessRightsContext::isGranted(Poco::Logger * log_, const AccessFlags & access) const { return checkAccessImpl(log_, access); } -bool AccessRightsContext::isGranted(Poco::Logger * log_, const AccessFlags & access, const std::string_view & database) const { return checkAccessImpl(log_, access, database); } -bool AccessRightsContext::isGranted(Poco::Logger * log_, const AccessFlags & access, const std::string_view & database, const std::string_view & table) const { return checkAccessImpl(log_, access, database, table); } -bool AccessRightsContext::isGranted(Poco::Logger * log_, const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::string_view & column) const { return checkAccessImpl(log_, access, database, table, column); } -bool AccessRightsContext::isGranted(Poco::Logger * log_, const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::vector & columns) const { return checkAccessImpl(log_, access, database, table, columns); } -bool AccessRightsContext::isGranted(Poco::Logger * log_, const AccessFlags & access, const std::string_view & database, const std::string_view & table, const Strings & columns) const { return checkAccessImpl(log_, access, database, table, columns); } -bool AccessRightsContext::isGranted(Poco::Logger * log_, const AccessRightsElement & access) const { return checkAccessImpl(log_, access); } -bool AccessRightsContext::isGranted(Poco::Logger * log_, const AccessRightsElements & access) const { return checkAccessImpl(log_, access); } - -void AccessRightsContext::checkGrantOption(const AccessFlags & access) const { checkAccessImpl(nullptr, access); } -void AccessRightsContext::checkGrantOption(const AccessFlags & access, const std::string_view & database) const { checkAccessImpl(nullptr, access, database); } -void AccessRightsContext::checkGrantOption(const AccessFlags & access, const std::string_view & database, const std::string_view & table) const { checkAccessImpl(nullptr, access, database, table); } -void AccessRightsContext::checkGrantOption(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::string_view & column) const { checkAccessImpl(nullptr, access, database, table, column); } -void AccessRightsContext::checkGrantOption(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::vector & columns) const { checkAccessImpl(nullptr, access, database, table, columns); } -void AccessRightsContext::checkGrantOption(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const Strings & columns) const { checkAccessImpl(nullptr, access, database, table, columns); } -void AccessRightsContext::checkGrantOption(const AccessRightsElement & access) const { checkAccessImpl(nullptr, access); } -void AccessRightsContext::checkGrantOption(const AccessRightsElements & access) const { checkAccessImpl(nullptr, access); } - - -void AccessRightsContext::checkAdminOption(const UUID & role_id) const -{ - if (isGranted(AccessType::ROLE_ADMIN)) - return; - - boost::shared_ptr> enabled_roles = enabled_roles_with_admin_option.load(); - if (!enabled_roles) - { - std::lock_guard lock{mutex}; - enabled_roles = enabled_roles_with_admin_option.load(); - if (!enabled_roles) - { - if (roles_info) - enabled_roles = boost::make_shared>(roles_info->enabled_roles_with_admin_option.begin(), roles_info->enabled_roles_with_admin_option.end()); - else - enabled_roles = boost::make_shared>(); - enabled_roles_with_admin_option.store(enabled_roles); - } - } - - if (enabled_roles->contains(role_id)) - return; - - std::optional role_name = manager->readName(role_id); - if (!role_name) - role_name = "ID {" + toString(role_id) + "}"; - throw Exception( - getUserName() + ": Not enough privileges. To execute this query it's necessary to have the grant " + backQuoteIfNeed(*role_name) - + " WITH ADMIN OPTION ", - ErrorCodes::ACCESS_DENIED); -} - - -boost::shared_ptr AccessRightsContext::calculateResultAccess(bool grant_option) const -{ - return calculateResultAccess(grant_option, params.readonly, params.allow_ddl, params.allow_introspection); -} - - -boost::shared_ptr AccessRightsContext::calculateResultAccess(bool grant_option, UInt64 readonly_, bool allow_ddl_, bool allow_introspection_) const -{ - size_t cache_index = static_cast(readonly_ != params.readonly) - + static_cast(allow_ddl_ != params.allow_ddl) * 2 + - + static_cast(allow_introspection_ != params.allow_introspection) * 3 - + static_cast(grant_option) * 4; - assert(cache_index < std::size(result_access_cache)); - auto cached = result_access_cache[cache_index].load(); - if (cached) - return cached; - - std::lock_guard lock{mutex}; - cached = result_access_cache[cache_index].load(); - if (cached) - return cached; - - auto result_ptr = boost::make_shared(); - auto & result = *result_ptr; - - if (grant_option) - { - result = user->access_with_grant_option; - if (roles_info) - result.merge(roles_info->access_with_grant_option); - } - else - { - result = user->access; - if (roles_info) - result.merge(roles_info->access); - } - - static const AccessFlags table_ddl = AccessType::CREATE_DATABASE | AccessType::CREATE_TABLE | AccessType::CREATE_VIEW - | AccessType::ALTER_TABLE | AccessType::ALTER_VIEW | AccessType::DROP_DATABASE | AccessType::DROP_TABLE | AccessType::DROP_VIEW - | AccessType::TRUNCATE; - static const AccessFlags dictionary_ddl = AccessType::CREATE_DICTIONARY | AccessType::DROP_DICTIONARY; - static const AccessFlags table_and_dictionary_ddl = table_ddl | dictionary_ddl; - static const AccessFlags write_table_access = AccessType::INSERT | AccessType::OPTIMIZE; - static const AccessFlags all_dcl = AccessType::CREATE_USER | AccessType::CREATE_ROLE | AccessType::CREATE_POLICY - | AccessType::CREATE_QUOTA | AccessType::ALTER_USER | AccessType::ALTER_POLICY | AccessType::ALTER_QUOTA | AccessType::DROP_USER - | AccessType::DROP_ROLE | AccessType::DROP_POLICY | AccessType::DROP_QUOTA | AccessType::ROLE_ADMIN; - - /// Anyone has access to the "system" database. - if (!result.isGranted(AccessType::SELECT, "system")) - result.grant(AccessType::SELECT, "system"); - - if (readonly_) - result.fullRevoke(write_table_access | all_dcl | AccessType::SYSTEM | AccessType::KILL); - - if (readonly_ || !allow_ddl_) - result.fullRevoke(table_and_dictionary_ddl); - - if (readonly_ && grant_option) - result.fullRevoke(AccessType::ALL); - - if (readonly_ == 1) - { - /// Table functions are forbidden in readonly mode. - /// For example, for readonly = 2 - allowed. - result.fullRevoke(AccessType::CREATE_TEMPORARY_TABLE | AccessType::TABLE_FUNCTIONS); - } - - if (!allow_introspection_) - result.fullRevoke(AccessType::INTROSPECTION); - - result_access_cache[cache_index].store(result_ptr); - - if (trace_log && (params.readonly == readonly_) && (params.allow_ddl == allow_ddl_) && (params.allow_introspection == allow_introspection_)) - { - LOG_TRACE(trace_log, "List of all grants: " << result_ptr->toString() << (grant_option ? " WITH GRANT OPTION" : "")); - if (roles_info && !roles_info->getCurrentRolesNames().empty()) - { - LOG_TRACE( - trace_log, - "Current_roles: " << boost::algorithm::join(roles_info->getCurrentRolesNames(), ", ") - << ", enabled_roles: " << boost::algorithm::join(roles_info->getEnabledRolesNames(), ", ")); - } - } - - return result_ptr; -} - - -UserPtr AccessRightsContext::getUser() const -{ - std::lock_guard lock{mutex}; - return user; -} - -String AccessRightsContext::getUserName() const -{ - std::lock_guard lock{mutex}; - return user_name; -} - -CurrentRolesInfoPtr AccessRightsContext::getRolesInfo() const -{ - std::lock_guard lock{mutex}; - return roles_info; -} - -std::vector AccessRightsContext::getCurrentRoles() const -{ - std::lock_guard lock{mutex}; - return roles_info ? roles_info->current_roles : std::vector{}; -} - -Strings AccessRightsContext::getCurrentRolesNames() const -{ - std::lock_guard lock{mutex}; - return roles_info ? roles_info->getCurrentRolesNames() : Strings{}; -} - -std::vector AccessRightsContext::getEnabledRoles() const -{ - std::lock_guard lock{mutex}; - return roles_info ? roles_info->enabled_roles : std::vector{}; -} - -Strings AccessRightsContext::getEnabledRolesNames() const -{ - std::lock_guard lock{mutex}; - return roles_info ? roles_info->getEnabledRolesNames() : Strings{}; -} - -RowPolicyContextPtr AccessRightsContext::getRowPolicy() const -{ - std::lock_guard lock{mutex}; - return row_policy_context; -} - -QuotaContextPtr AccessRightsContext::getQuota() const -{ - std::lock_guard lock{mutex}; - return quota_context; -} - - -bool operator <(const AccessRightsContext::Params & lhs, const AccessRightsContext::Params & rhs) -{ -#define ACCESS_RIGHTS_CONTEXT_PARAMS_COMPARE_HELPER(field) \ - if (lhs.field < rhs.field) \ - return true; \ - if (lhs.field > rhs.field) \ - return false - ACCESS_RIGHTS_CONTEXT_PARAMS_COMPARE_HELPER(user_id); - ACCESS_RIGHTS_CONTEXT_PARAMS_COMPARE_HELPER(current_roles); - ACCESS_RIGHTS_CONTEXT_PARAMS_COMPARE_HELPER(use_default_roles); - ACCESS_RIGHTS_CONTEXT_PARAMS_COMPARE_HELPER(address); - ACCESS_RIGHTS_CONTEXT_PARAMS_COMPARE_HELPER(quota_key); - ACCESS_RIGHTS_CONTEXT_PARAMS_COMPARE_HELPER(current_database); - ACCESS_RIGHTS_CONTEXT_PARAMS_COMPARE_HELPER(readonly); - ACCESS_RIGHTS_CONTEXT_PARAMS_COMPARE_HELPER(allow_ddl); - ACCESS_RIGHTS_CONTEXT_PARAMS_COMPARE_HELPER(allow_introspection); - ACCESS_RIGHTS_CONTEXT_PARAMS_COMPARE_HELPER(interface); - ACCESS_RIGHTS_CONTEXT_PARAMS_COMPARE_HELPER(http_method); - return false; -#undef ACCESS_RIGHTS_CONTEXT_PARAMS_COMPARE_HELPER -} - - -bool operator ==(const AccessRightsContext::Params & lhs, const AccessRightsContext::Params & rhs) -{ -#define ACCESS_RIGHTS_CONTEXT_PARAMS_COMPARE_HELPER(field) \ - if (lhs.field != rhs.field) \ - return false - ACCESS_RIGHTS_CONTEXT_PARAMS_COMPARE_HELPER(user_id); - ACCESS_RIGHTS_CONTEXT_PARAMS_COMPARE_HELPER(current_roles); - ACCESS_RIGHTS_CONTEXT_PARAMS_COMPARE_HELPER(use_default_roles); - ACCESS_RIGHTS_CONTEXT_PARAMS_COMPARE_HELPER(address); - ACCESS_RIGHTS_CONTEXT_PARAMS_COMPARE_HELPER(quota_key); - ACCESS_RIGHTS_CONTEXT_PARAMS_COMPARE_HELPER(current_database); - ACCESS_RIGHTS_CONTEXT_PARAMS_COMPARE_HELPER(readonly); - ACCESS_RIGHTS_CONTEXT_PARAMS_COMPARE_HELPER(allow_ddl); - ACCESS_RIGHTS_CONTEXT_PARAMS_COMPARE_HELPER(allow_introspection); - ACCESS_RIGHTS_CONTEXT_PARAMS_COMPARE_HELPER(interface); - ACCESS_RIGHTS_CONTEXT_PARAMS_COMPARE_HELPER(http_method); - return true; -#undef ACCESS_RIGHTS_CONTEXT_PARAMS_COMPARE_HELPER -} - -} diff --git a/dbms/src/Access/AccessRightsContext.h b/dbms/src/Access/AccessRightsContext.h deleted file mode 100644 index f129d70162d..00000000000 --- a/dbms/src/Access/AccessRightsContext.h +++ /dev/null @@ -1,157 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace Poco { class Logger; } - -namespace DB -{ -struct User; -using UserPtr = std::shared_ptr; -struct CurrentRolesInfo; -using CurrentRolesInfoPtr = std::shared_ptr; -class RoleContext; -using RoleContextPtr = std::shared_ptr; -class RowPolicyContext; -using RowPolicyContextPtr = std::shared_ptr; -class QuotaContext; -using QuotaContextPtr = std::shared_ptr; -struct Settings; -class AccessControlManager; - - -class AccessRightsContext -{ -public: - struct Params - { - std::optional user_id; - std::vector current_roles; - bool use_default_roles = false; - UInt64 readonly = 0; - bool allow_ddl = false; - bool allow_introspection = false; - String current_database; - ClientInfo::Interface interface = ClientInfo::Interface::TCP; - ClientInfo::HTTPMethod http_method = ClientInfo::HTTPMethod::UNKNOWN; - Poco::Net::IPAddress address; - String quota_key; - - friend bool operator ==(const Params & lhs, const Params & rhs); - friend bool operator !=(const Params & lhs, const Params & rhs) { return !(lhs == rhs); } - friend bool operator <(const Params & lhs, const Params & rhs); - friend bool operator >(const Params & lhs, const Params & rhs) { return rhs < lhs; } - friend bool operator <=(const Params & lhs, const Params & rhs) { return !(rhs < lhs); } - friend bool operator >=(const Params & lhs, const Params & rhs) { return !(lhs < rhs); } - }; - - /// Default constructor creates access rights' context which allows everything. - AccessRightsContext(); - - const Params & getParams() const { return params; } - UserPtr getUser() const; - String getUserName() const; - - void checkPassword(const String & password) const; - void checkHostIsAllowed() const; - - CurrentRolesInfoPtr getRolesInfo() const; - std::vector getCurrentRoles() const; - Strings getCurrentRolesNames() const; - std::vector getEnabledRoles() const; - Strings getEnabledRolesNames() const; - - RowPolicyContextPtr getRowPolicy() const; - QuotaContextPtr getQuota() const; - - /// Checks if a specified access is granted, and throws an exception if not. - /// Empty database means the current database. - void checkAccess(const AccessFlags & access) const; - void checkAccess(const AccessFlags & access, const std::string_view & database) const; - void checkAccess(const AccessFlags & access, const std::string_view & database, const std::string_view & table) const; - void checkAccess(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::string_view & column) const; - void checkAccess(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::vector & columns) const; - void checkAccess(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const Strings & columns) const; - void checkAccess(const AccessRightsElement & access) const; - void checkAccess(const AccessRightsElements & access) const; - - /// Checks if a specified access is granted. - bool isGranted(const AccessFlags & access) const; - bool isGranted(const AccessFlags & access, const std::string_view & database) const; - bool isGranted(const AccessFlags & access, const std::string_view & database, const std::string_view & table) const; - bool isGranted(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::string_view & column) const; - bool isGranted(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::vector & columns) const; - bool isGranted(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const Strings & columns) const; - bool isGranted(const AccessRightsElement & access) const; - bool isGranted(const AccessRightsElements & access) const; - - /// Checks if a specified access is granted, and logs a warning if not. - bool isGranted(Poco::Logger * log_, const AccessFlags & access) const; - bool isGranted(Poco::Logger * log_, const AccessFlags & access, const std::string_view & database) const; - bool isGranted(Poco::Logger * log_, const AccessFlags & access, const std::string_view & database, const std::string_view & table) const; - bool isGranted(Poco::Logger * log_, const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::string_view & column) const; - bool isGranted(Poco::Logger * log_, const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::vector & columns) const; - bool isGranted(Poco::Logger * log_, const AccessFlags & access, const std::string_view & database, const std::string_view & table, const Strings & columns) const; - bool isGranted(Poco::Logger * log_, const AccessRightsElement & access) const; - bool isGranted(Poco::Logger * log_, const AccessRightsElements & access) const; - - /// Checks if a specified access is granted with grant option, and throws an exception if not. - void checkGrantOption(const AccessFlags & access) const; - void checkGrantOption(const AccessFlags & access, const std::string_view & database) const; - void checkGrantOption(const AccessFlags & access, const std::string_view & database, const std::string_view & table) const; - void checkGrantOption(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::string_view & column) const; - void checkGrantOption(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const std::vector & columns) const; - void checkGrantOption(const AccessFlags & access, const std::string_view & database, const std::string_view & table, const Strings & columns) const; - void checkGrantOption(const AccessRightsElement & access) const; - void checkGrantOption(const AccessRightsElements & access) const; - - /// Checks if a specified role is granted with admin option, and throws an exception if not. - void checkAdminOption(const UUID & role_id) const; - -private: - friend class AccessRightsContextFactory; - friend struct ext::shared_ptr_helper; - AccessRightsContext(const AccessControlManager & manager_, const Params & params_); /// AccessRightsContext should be created by AccessRightsContextFactory. - - void setUser(const UserPtr & user_) const; - void setRolesInfo(const CurrentRolesInfoPtr & roles_info_) const; - - template - bool checkAccessImpl(Poco::Logger * log_, const AccessFlags & access, const Args &... args) const; - - template - bool checkAccessImpl(Poco::Logger * log_, const AccessRightsElement & access) const; - - template - bool checkAccessImpl(Poco::Logger * log_, const AccessRightsElements & access) const; - - boost::shared_ptr calculateResultAccess(bool grant_option) const; - boost::shared_ptr calculateResultAccess(bool grant_option, UInt64 readonly_, bool allow_ddl_, bool allow_introspection_) const; - - const AccessControlManager * manager = nullptr; - const Params params; - mutable Poco::Logger * trace_log = nullptr; - mutable UserPtr user; - mutable String user_name; - mutable ext::scope_guard subscription_for_user_change; - mutable RoleContextPtr role_context; - mutable ext::scope_guard subscription_for_roles_info_change; - mutable CurrentRolesInfoPtr roles_info; - mutable boost::atomic_shared_ptr> enabled_roles_with_admin_option; - mutable boost::atomic_shared_ptr result_access_cache[7]; - mutable RowPolicyContextPtr row_policy_context; - mutable QuotaContextPtr quota_context; - mutable std::mutex mutex; -}; - -using AccessRightsContextPtr = std::shared_ptr; - -} diff --git a/dbms/src/Access/AccessRightsContextFactory.cpp b/dbms/src/Access/AccessRightsContextFactory.cpp deleted file mode 100644 index 8d542a5f439..00000000000 --- a/dbms/src/Access/AccessRightsContextFactory.cpp +++ /dev/null @@ -1,48 +0,0 @@ -#include -#include -#include - - -namespace DB -{ -AccessRightsContextFactory::AccessRightsContextFactory(const AccessControlManager & manager_) - : manager(manager_), cache(600000 /* 10 minutes */) {} - -AccessRightsContextFactory::~AccessRightsContextFactory() = default; - - -AccessRightsContextPtr AccessRightsContextFactory::createContext(const Params & params) -{ - std::lock_guard lock{mutex}; - auto x = cache.get(params); - if (x) - return *x; - auto res = ext::shared_ptr_helper::create(manager, params); - cache.add(params, res); - return res; -} - -AccessRightsContextPtr AccessRightsContextFactory::createContext( - const UUID & user_id, - const std::vector & current_roles, - bool use_default_roles, - const Settings & settings, - const String & current_database, - const ClientInfo & client_info) -{ - Params params; - params.user_id = user_id; - params.current_roles = current_roles; - params.use_default_roles = use_default_roles; - params.current_database = current_database; - params.readonly = settings.readonly; - params.allow_ddl = settings.allow_ddl; - params.allow_introspection = settings.allow_introspection_functions; - params.interface = client_info.interface; - params.http_method = client_info.http_method; - params.address = client_info.current_address.host(); - params.quota_key = client_info.quota_key; - return createContext(params); -} - -} diff --git a/dbms/src/Access/AccessRightsContextFactory.h b/dbms/src/Access/AccessRightsContextFactory.h deleted file mode 100644 index c480307757a..00000000000 --- a/dbms/src/Access/AccessRightsContextFactory.h +++ /dev/null @@ -1,29 +0,0 @@ -#pragma once - -#include -#include -#include - - -namespace DB -{ -class AccessControlManager; - - -class AccessRightsContextFactory -{ -public: - AccessRightsContextFactory(const AccessControlManager & manager_); - ~AccessRightsContextFactory(); - - using Params = AccessRightsContext::Params; - AccessRightsContextPtr createContext(const Params & params); - AccessRightsContextPtr createContext(const UUID & user_id, const std::vector & current_roles, bool use_default_roles, const Settings & settings, const String & current_database, const ClientInfo & client_info); - -private: - const AccessControlManager & manager; - Poco::ExpireCache cache; - std::mutex mutex; -}; - -} diff --git a/dbms/src/Access/AccessRightsElement.cpp b/dbms/src/Access/AccessRightsElement.cpp deleted file mode 100644 index b99cffc191c..00000000000 --- a/dbms/src/Access/AccessRightsElement.cpp +++ /dev/null @@ -1,86 +0,0 @@ -#include -#include -#include - - -namespace DB -{ -void AccessRightsElement::setDatabase(const String & new_database) -{ - database = new_database; - any_database = false; -} - - -void AccessRightsElement::replaceEmptyDatabase(const String & new_database) -{ - if (isEmptyDatabase()) - setDatabase(new_database); -} - - -bool AccessRightsElement::isEmptyDatabase() const -{ - return !any_database && database.empty(); -} - - -String AccessRightsElement::toString() const -{ - String columns_in_parentheses; - if (!any_column) - { - for (const auto & column : columns) - { - columns_in_parentheses += columns_in_parentheses.empty() ? "(" : ", "; - columns_in_parentheses += backQuoteIfNeed(column); - } - columns_in_parentheses += ")"; - } - - String msg; - for (const std::string_view & keyword : access_flags.toKeywords()) - { - if (!msg.empty()) - msg += ", "; - msg += String{keyword} + columns_in_parentheses; - } - - msg += " ON "; - - if (any_database) - msg += "*."; - else if (!database.empty() && (database != IDictionary::NO_DATABASE_TAG)) - msg += backQuoteIfNeed(database) + "."; - - if (any_table) - msg += "*"; - else - msg += backQuoteIfNeed(table); - return msg; -} - - -void AccessRightsElements::replaceEmptyDatabase(const String & new_database) -{ - for (auto & element : *this) - element.replaceEmptyDatabase(new_database); -} - - -String AccessRightsElements::toString() const -{ - String res; - bool need_comma = false; - for (auto & element : *this) - { - if (std::exchange(need_comma, true)) - res += ", "; - res += element.toString(); - } - - if (res.empty()) - res = "USAGE ON *.*"; - return res; -} -} diff --git a/dbms/src/Access/AccessRightsElement.h b/dbms/src/Access/AccessRightsElement.h deleted file mode 100644 index 3894b6f5157..00000000000 --- a/dbms/src/Access/AccessRightsElement.h +++ /dev/null @@ -1,100 +0,0 @@ -#pragma once - -#include - - -namespace DB -{ -/// An element of access rights which can be represented by single line -/// GRANT ... ON ... -struct AccessRightsElement -{ - AccessFlags access_flags; - String database; - String table; - Strings columns; - bool any_database = true; - bool any_table = true; - bool any_column = true; - - AccessRightsElement() = default; - AccessRightsElement(const AccessRightsElement &) = default; - AccessRightsElement & operator=(const AccessRightsElement &) = default; - AccessRightsElement(AccessRightsElement &&) = default; - AccessRightsElement & operator=(AccessRightsElement &&) = default; - - AccessRightsElement(AccessFlags access_flags_) : access_flags(access_flags_) {} - - AccessRightsElement(AccessFlags access_flags_, const std::string_view & database_) - : access_flags(access_flags_), database(database_), any_database(false) - { - } - - AccessRightsElement(AccessFlags access_flags_, const std::string_view & database_, const std::string_view & table_) - : access_flags(access_flags_), database(database_), table(table_), any_database(false), any_table(false) - { - } - - AccessRightsElement( - AccessFlags access_flags_, const std::string_view & database_, const std::string_view & table_, const std::string_view & column_) - : access_flags(access_flags_) - , database(database_) - , table(table_) - , columns({String{column_}}) - , any_database(false) - , any_table(false) - , any_column(false) - { - } - - AccessRightsElement( - AccessFlags access_flags_, - const std::string_view & database_, - const std::string_view & table_, - const std::vector & columns_) - : access_flags(access_flags_), database(database_), table(table_), any_database(false), any_table(false), any_column(false) - { - columns.resize(columns_.size()); - for (size_t i = 0; i != columns_.size(); ++i) - columns[i] = String{columns_[i]}; - } - - AccessRightsElement( - AccessFlags access_flags_, const std::string_view & database_, const std::string_view & table_, const Strings & columns_) - : access_flags(access_flags_) - , database(database_) - , table(table_) - , columns(columns_) - , any_database(false) - , any_table(false) - , any_column(false) - { - } - - /// Sets the database. - void setDatabase(const String & new_database); - - /// If the database is empty, replaces it with `new_database`. Otherwise does nothing. - void replaceEmptyDatabase(const String & new_database); - - bool isEmptyDatabase() const; - - /// Returns a human-readable representation like "SELECT, UPDATE(x, y) ON db.table". - /// The returned string isn't prefixed with the "GRANT" keyword. - String toString() const; -}; - - -/// Multiple elements of access rights. -class AccessRightsElements : public std::vector -{ -public: - /// Replaces the empty database with `new_database`. - void replaceEmptyDatabase(const String & new_database); - - /// Returns a human-readable representation like "SELECT, UPDATE(x, y) ON db.table". - /// The returned string isn't prefixed with the "GRANT" keyword. - String toString() const; -}; - -} diff --git a/dbms/src/Access/AccessType.h b/dbms/src/Access/AccessType.h deleted file mode 100644 index 4b65f949885..00000000000 --- a/dbms/src/Access/AccessType.h +++ /dev/null @@ -1,309 +0,0 @@ -#pragma once - -#include -#include -#include -#include - - -namespace DB -{ -/// Represents an access type which can be granted on databases, tables, columns, etc. -enum class AccessType -{ - NONE, /// no access - ALL, /// full access - - SHOW, /// allows to execute SHOW TABLES, SHOW CREATE TABLE, SHOW DATABASES and so on - /// (granted implicitly with any other grant) - - EXISTS, /// allows to execute EXISTS, USE, i.e. to check existence - /// (granted implicitly on the database level with any other grant on the database and lower levels, - /// e.g. "GRANT SELECT(x) ON db.table" also grants EXISTS on db.*) - - SELECT, - INSERT, - UPDATE, /// allows to execute ALTER UPDATE - DELETE, /// allows to execute ALTER DELETE - - ADD_COLUMN, - DROP_COLUMN, - MODIFY_COLUMN, - COMMENT_COLUMN, - CLEAR_COLUMN, - ALTER_COLUMN, /// allow to execute ALTER {ADD|DROP|MODIFY...} COLUMN - - ALTER_ORDER_BY, - ADD_INDEX, - DROP_INDEX, - MATERIALIZE_INDEX, - CLEAR_INDEX, - INDEX, /// allows to execute ALTER ORDER BY or ALTER {ADD|DROP...} INDEX - - ADD_CONSTRAINT, - DROP_CONSTRAINT, - ALTER_CONSTRAINT, /// allows to execute ALTER {ADD|DROP} CONSTRAINT - - MODIFY_TTL, /// allows to execute ALTER MODIFY TTL - MATERIALIZE_TTL, /// allows to execute ALTER MATERIALIZE TTL - MODIFY_SETTING, /// allows to execute ALTER MODIFY SETTING - - MOVE_PARTITION, - FETCH_PARTITION, - FREEZE_PARTITION, - - ALTER_TABLE, /// allows to execute ALTER TABLE ... - - REFRESH_VIEW, /// allows to execute ALTER LIVE VIEW REFRESH - MODIFY_VIEW_QUERY, /// allows to execute ALTER TABLE MODIFY QUERY - ALTER_VIEW, /// allows to execute ALTER LIVE VIEW REFRESH, ALTER TABLE MODIFY QUERY - - ALTER, /// allows to execute ALTER {TABLE|LIVE VIEW} ... - - CREATE_DATABASE, /// allows to execute {CREATE|ATTACH} DATABASE - CREATE_TABLE, /// allows to execute {CREATE|ATTACH} TABLE - CREATE_VIEW, /// allows to execute {CREATE|ATTACH} VIEW - CREATE_DICTIONARY, /// allows to execute {CREATE|ATTACH} DICTIONARY - CREATE_TEMPORARY_TABLE, /// allows to create and manipulate temporary tables and views. - CREATE, /// allows to execute {CREATE|ATTACH} [TEMPORARY] {DATABASE|TABLE|VIEW|DICTIONARY} - - DROP_DATABASE, - DROP_TABLE, - DROP_VIEW, - DROP_DICTIONARY, - DROP, /// allows to execute DROP {DATABASE|TABLE|VIEW|DICTIONARY} - - TRUNCATE_TABLE, - TRUNCATE_VIEW, - TRUNCATE, /// allows to execute TRUNCATE {TABLE|VIEW} - - OPTIMIZE, /// allows to execute OPTIMIZE TABLE - - KILL_QUERY, /// allows to kill a query started by another user (anyone can kill his own queries) - KILL_MUTATION, /// allows to kill a mutation - KILL, /// allows to execute KILL {MUTATION|QUERY} - - CREATE_USER, - ALTER_USER, - DROP_USER, - CREATE_ROLE, - DROP_ROLE, - CREATE_POLICY, - ALTER_POLICY, - DROP_POLICY, - CREATE_QUOTA, - ALTER_QUOTA, - DROP_QUOTA, - - ROLE_ADMIN, /// allows to grant and revoke any roles. - - SHUTDOWN, - DROP_CACHE, - RELOAD_CONFIG, - RELOAD_DICTIONARY, - STOP_MERGES, - STOP_TTL_MERGES, - STOP_FETCHES, - STOP_MOVES, - STOP_DISTRIBUTED_SENDS, - STOP_REPLICATED_SENDS, - STOP_REPLICATION_QUEUES, - SYNC_REPLICA, - RESTART_REPLICA, - FLUSH_DISTRIBUTED, - FLUSH_LOGS, - SYSTEM, /// allows to execute SYSTEM {SHUTDOWN|RELOAD CONFIG|...} - - dictGet, /// allows to execute functions dictGet, dictHas, dictGetHierarchy, dictIsIn - dictHas, /// allows to execute functions dictGet, dictHas, dictGetHierarchy, dictIsIn - dictGetHierarchy, /// allows to execute functions dictGet, dictHas, dictGetHierarchy, dictIsIn - dictIsIn, /// allows to execute functions dictGet, dictHas, dictGetHierarchy, dictIsIn - - addressToLine, /// allows to execute function addressToLine - addressToSymbol, /// allows to execute function addressToSymbol - demangle, /// allows to execute function demangle - INTROSPECTION, /// allows to execute functions addressToLine, addressToSymbol, demangle - - file, - url, - input, - values, - numbers, - merge, - remote, - mysql, - odbc, - jdbc, - hdfs, - s3, - TABLE_FUNCTIONS, /// allows to execute any table function -}; - -constexpr size_t MAX_ACCESS_TYPE = static_cast(AccessType::TABLE_FUNCTIONS) + 1; - -std::string_view toString(AccessType type); - - -namespace impl -{ - template - class AccessTypeToKeywordConverter - { - public: - static const AccessTypeToKeywordConverter & instance() - { - static const AccessTypeToKeywordConverter res; - return res; - } - - std::string_view convert(AccessType type) const - { - return access_type_to_keyword_mapping[static_cast(type)]; - } - - private: - void addToMapping(AccessType type, const std::string_view & str) - { - String str2{str}; - boost::replace_all(str2, "_", " "); - if (islower(str2[0])) - str2 += "()"; - access_type_to_keyword_mapping[static_cast(type)] = str2; - } - - AccessTypeToKeywordConverter() - { -#define ACCESS_TYPE_TO_KEYWORD_CASE(type) \ - addToMapping(AccessType::type, #type) - - ACCESS_TYPE_TO_KEYWORD_CASE(NONE); - ACCESS_TYPE_TO_KEYWORD_CASE(ALL); - ACCESS_TYPE_TO_KEYWORD_CASE(SHOW); - ACCESS_TYPE_TO_KEYWORD_CASE(EXISTS); - - ACCESS_TYPE_TO_KEYWORD_CASE(SELECT); - ACCESS_TYPE_TO_KEYWORD_CASE(INSERT); - ACCESS_TYPE_TO_KEYWORD_CASE(UPDATE); - ACCESS_TYPE_TO_KEYWORD_CASE(DELETE); - - ACCESS_TYPE_TO_KEYWORD_CASE(ADD_COLUMN); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_COLUMN); - ACCESS_TYPE_TO_KEYWORD_CASE(MODIFY_COLUMN); - ACCESS_TYPE_TO_KEYWORD_CASE(COMMENT_COLUMN); - ACCESS_TYPE_TO_KEYWORD_CASE(CLEAR_COLUMN); - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_COLUMN); - - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_ORDER_BY); - ACCESS_TYPE_TO_KEYWORD_CASE(ADD_INDEX); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_INDEX); - ACCESS_TYPE_TO_KEYWORD_CASE(MATERIALIZE_INDEX); - ACCESS_TYPE_TO_KEYWORD_CASE(CLEAR_INDEX); - ACCESS_TYPE_TO_KEYWORD_CASE(INDEX); - - ACCESS_TYPE_TO_KEYWORD_CASE(ADD_CONSTRAINT); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_CONSTRAINT); - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_CONSTRAINT); - - ACCESS_TYPE_TO_KEYWORD_CASE(MODIFY_TTL); - ACCESS_TYPE_TO_KEYWORD_CASE(MATERIALIZE_TTL); - ACCESS_TYPE_TO_KEYWORD_CASE(MODIFY_SETTING); - - ACCESS_TYPE_TO_KEYWORD_CASE(MOVE_PARTITION); - ACCESS_TYPE_TO_KEYWORD_CASE(FETCH_PARTITION); - ACCESS_TYPE_TO_KEYWORD_CASE(FREEZE_PARTITION); - - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_TABLE); - - ACCESS_TYPE_TO_KEYWORD_CASE(REFRESH_VIEW); - ACCESS_TYPE_TO_KEYWORD_CASE(MODIFY_VIEW_QUERY); - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_VIEW); - - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER); - - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE_DATABASE); - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE_TABLE); - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE_VIEW); - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE_DICTIONARY); - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE_TEMPORARY_TABLE); - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE); - - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_DATABASE); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_TABLE); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_VIEW); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_DICTIONARY); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP); - - ACCESS_TYPE_TO_KEYWORD_CASE(TRUNCATE_TABLE); - ACCESS_TYPE_TO_KEYWORD_CASE(TRUNCATE_VIEW); - ACCESS_TYPE_TO_KEYWORD_CASE(TRUNCATE); - - ACCESS_TYPE_TO_KEYWORD_CASE(OPTIMIZE); - - ACCESS_TYPE_TO_KEYWORD_CASE(KILL_QUERY); - ACCESS_TYPE_TO_KEYWORD_CASE(KILL_MUTATION); - ACCESS_TYPE_TO_KEYWORD_CASE(KILL); - - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE_USER); - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_USER); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_USER); - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE_ROLE); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_ROLE); - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE_POLICY); - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_POLICY); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_POLICY); - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE_QUOTA); - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_QUOTA); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_QUOTA); - ACCESS_TYPE_TO_KEYWORD_CASE(ROLE_ADMIN); - - ACCESS_TYPE_TO_KEYWORD_CASE(SHUTDOWN); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_CACHE); - ACCESS_TYPE_TO_KEYWORD_CASE(RELOAD_CONFIG); - ACCESS_TYPE_TO_KEYWORD_CASE(RELOAD_DICTIONARY); - ACCESS_TYPE_TO_KEYWORD_CASE(STOP_MERGES); - ACCESS_TYPE_TO_KEYWORD_CASE(STOP_TTL_MERGES); - ACCESS_TYPE_TO_KEYWORD_CASE(STOP_FETCHES); - ACCESS_TYPE_TO_KEYWORD_CASE(STOP_MOVES); - ACCESS_TYPE_TO_KEYWORD_CASE(STOP_DISTRIBUTED_SENDS); - ACCESS_TYPE_TO_KEYWORD_CASE(STOP_REPLICATED_SENDS); - ACCESS_TYPE_TO_KEYWORD_CASE(STOP_REPLICATION_QUEUES); - ACCESS_TYPE_TO_KEYWORD_CASE(SYNC_REPLICA); - ACCESS_TYPE_TO_KEYWORD_CASE(RESTART_REPLICA); - ACCESS_TYPE_TO_KEYWORD_CASE(FLUSH_DISTRIBUTED); - ACCESS_TYPE_TO_KEYWORD_CASE(FLUSH_LOGS); - ACCESS_TYPE_TO_KEYWORD_CASE(SYSTEM); - - ACCESS_TYPE_TO_KEYWORD_CASE(dictGet); - ACCESS_TYPE_TO_KEYWORD_CASE(dictHas); - ACCESS_TYPE_TO_KEYWORD_CASE(dictGetHierarchy); - ACCESS_TYPE_TO_KEYWORD_CASE(dictIsIn); - - ACCESS_TYPE_TO_KEYWORD_CASE(addressToLine); - ACCESS_TYPE_TO_KEYWORD_CASE(addressToSymbol); - ACCESS_TYPE_TO_KEYWORD_CASE(demangle); - ACCESS_TYPE_TO_KEYWORD_CASE(INTROSPECTION); - - ACCESS_TYPE_TO_KEYWORD_CASE(file); - ACCESS_TYPE_TO_KEYWORD_CASE(url); - ACCESS_TYPE_TO_KEYWORD_CASE(input); - ACCESS_TYPE_TO_KEYWORD_CASE(values); - ACCESS_TYPE_TO_KEYWORD_CASE(numbers); - ACCESS_TYPE_TO_KEYWORD_CASE(merge); - ACCESS_TYPE_TO_KEYWORD_CASE(remote); - ACCESS_TYPE_TO_KEYWORD_CASE(mysql); - ACCESS_TYPE_TO_KEYWORD_CASE(odbc); - ACCESS_TYPE_TO_KEYWORD_CASE(jdbc); - ACCESS_TYPE_TO_KEYWORD_CASE(hdfs); - ACCESS_TYPE_TO_KEYWORD_CASE(s3); - ACCESS_TYPE_TO_KEYWORD_CASE(TABLE_FUNCTIONS); - -#undef ACCESS_TYPE_TO_KEYWORD_CASE - } - - std::array access_type_to_keyword_mapping; - }; -} - -inline std::string_view toKeyword(AccessType type) { return impl::AccessTypeToKeywordConverter<>::instance().convert(type); } - -} diff --git a/dbms/src/Access/CurrentRolesInfo.cpp b/dbms/src/Access/CurrentRolesInfo.cpp deleted file mode 100644 index f4cbd739021..00000000000 --- a/dbms/src/Access/CurrentRolesInfo.cpp +++ /dev/null @@ -1,34 +0,0 @@ -#include - - -namespace DB -{ - -Strings CurrentRolesInfo::getCurrentRolesNames() const -{ - Strings result; - result.reserve(current_roles.size()); - for (const auto & id : current_roles) - result.emplace_back(names_of_roles.at(id)); - return result; -} - - -Strings CurrentRolesInfo::getEnabledRolesNames() const -{ - Strings result; - result.reserve(enabled_roles.size()); - for (const auto & id : enabled_roles) - result.emplace_back(names_of_roles.at(id)); - return result; -} - - -bool operator==(const CurrentRolesInfo & lhs, const CurrentRolesInfo & rhs) -{ - return (lhs.current_roles == rhs.current_roles) && (lhs.enabled_roles == rhs.enabled_roles) - && (lhs.enabled_roles_with_admin_option == rhs.enabled_roles_with_admin_option) && (lhs.names_of_roles == rhs.names_of_roles) - && (lhs.access == rhs.access) && (lhs.access_with_grant_option == rhs.access_with_grant_option); -} - -} diff --git a/dbms/src/Access/CurrentRolesInfo.h b/dbms/src/Access/CurrentRolesInfo.h deleted file mode 100644 index a4dd26be0f7..00000000000 --- a/dbms/src/Access/CurrentRolesInfo.h +++ /dev/null @@ -1,31 +0,0 @@ -#pragma once - -#include -#include -#include -#include - - -namespace DB -{ - -/// Information about a role. -struct CurrentRolesInfo -{ - std::vector current_roles; - std::vector enabled_roles; - std::vector enabled_roles_with_admin_option; - std::unordered_map names_of_roles; - AccessRights access; - AccessRights access_with_grant_option; - - Strings getCurrentRolesNames() const; - Strings getEnabledRolesNames() const; - - friend bool operator ==(const CurrentRolesInfo & lhs, const CurrentRolesInfo & rhs); - friend bool operator !=(const CurrentRolesInfo & lhs, const CurrentRolesInfo & rhs) { return !(lhs == rhs); } -}; - -using CurrentRolesInfoPtr = std::shared_ptr; - -} diff --git a/dbms/src/Access/GenericRoleSet.cpp b/dbms/src/Access/GenericRoleSet.cpp deleted file mode 100644 index f48c1221a46..00000000000 --- a/dbms/src/Access/GenericRoleSet.cpp +++ /dev/null @@ -1,292 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; -} -GenericRoleSet::GenericRoleSet() = default; -GenericRoleSet::GenericRoleSet(const GenericRoleSet & src) = default; -GenericRoleSet & GenericRoleSet::operator =(const GenericRoleSet & src) = default; -GenericRoleSet::GenericRoleSet(GenericRoleSet && src) = default; -GenericRoleSet & GenericRoleSet::operator =(GenericRoleSet && src) = default; - - -GenericRoleSet::GenericRoleSet(AllTag) -{ - all = true; -} - -GenericRoleSet::GenericRoleSet(const UUID & id) -{ - add(id); -} - - -GenericRoleSet::GenericRoleSet(const std::vector & ids_) -{ - add(ids_); -} - - -GenericRoleSet::GenericRoleSet(const boost::container::flat_set & ids_) -{ - add(ids_); -} - - -GenericRoleSet::GenericRoleSet(const ASTGenericRoleSet & ast, const AccessControlManager & manager, const std::optional & current_user_id) -{ - all = ast.all; - - if (!ast.names.empty() && !all) - { - ids.reserve(ast.names.size()); - for (const String & name : ast.names) - { - auto id = manager.find(name); - if (!id) - id = manager.getID(name); - ids.insert(*id); - } - } - - if (ast.current_user && !all) - { - if (!current_user_id) - throw Exception("Current user is unknown", ErrorCodes::LOGICAL_ERROR); - ids.insert(*current_user_id); - } - - if (!ast.except_names.empty()) - { - except_ids.reserve(ast.except_names.size()); - for (const String & except_name : ast.except_names) - { - auto except_id = manager.find(except_name); - if (!except_id) - except_id = manager.getID(except_name); - except_ids.insert(*except_id); - } - } - - if (ast.except_current_user) - { - if (!current_user_id) - throw Exception("Current user is unknown", ErrorCodes::LOGICAL_ERROR); - except_ids.insert(*current_user_id); - } - - for (const UUID & except_id : except_ids) - ids.erase(except_id); -} - -std::shared_ptr GenericRoleSet::toAST(const AccessControlManager & manager) const -{ - auto ast = std::make_shared(); - ast->all = all; - - if (!ids.empty()) - { - ast->names.reserve(ids.size()); - for (const UUID & id : ids) - { - auto name = manager.tryReadName(id); - if (name) - ast->names.emplace_back(std::move(*name)); - } - boost::range::sort(ast->names); - } - - if (!except_ids.empty()) - { - ast->except_names.reserve(except_ids.size()); - for (const UUID & except_id : except_ids) - { - auto except_name = manager.tryReadName(except_id); - if (except_name) - ast->except_names.emplace_back(std::move(*except_name)); - } - boost::range::sort(ast->except_names); - } - - return ast; -} - - -String GenericRoleSet::toString(const AccessControlManager & manager) const -{ - auto ast = toAST(manager); - return serializeAST(*ast); -} - - -Strings GenericRoleSet::toStrings(const AccessControlManager & manager) const -{ - if (all || !except_ids.empty()) - return {toString(manager)}; - - Strings names; - names.reserve(ids.size()); - for (const UUID & id : ids) - { - auto name = manager.tryReadName(id); - if (name) - names.emplace_back(std::move(*name)); - } - boost::range::sort(names); - return names; -} - - -bool GenericRoleSet::empty() const -{ - return ids.empty() && !all; -} - - -void GenericRoleSet::clear() -{ - ids.clear(); - all = false; - except_ids.clear(); -} - - -void GenericRoleSet::add(const UUID & id) -{ - ids.insert(id); -} - - -void GenericRoleSet::add(const std::vector & ids_) -{ - for (const auto & id : ids_) - add(id); -} - - -void GenericRoleSet::add(const boost::container::flat_set & ids_) -{ - for (const auto & id : ids_) - add(id); -} - - -bool GenericRoleSet::match(const UUID & id) const -{ - return (all || ids.contains(id)) && !except_ids.contains(id); -} - - -bool GenericRoleSet::match(const UUID & user_id, const std::vector & enabled_roles) const -{ - if (!all && !ids.contains(user_id)) - { - bool found_enabled_role = std::any_of( - enabled_roles.begin(), enabled_roles.end(), [this](const UUID & enabled_role) { return ids.contains(enabled_role); }); - if (!found_enabled_role) - return false; - } - - if (except_ids.contains(user_id)) - return false; - - bool in_except_list = std::any_of( - enabled_roles.begin(), enabled_roles.end(), [this](const UUID & enabled_role) { return except_ids.contains(enabled_role); }); - if (in_except_list) - return false; - - return true; -} - - -bool GenericRoleSet::match(const UUID & user_id, const boost::container::flat_set & enabled_roles) const -{ - if (!all && !ids.contains(user_id)) - { - bool found_enabled_role = std::any_of( - enabled_roles.begin(), enabled_roles.end(), [this](const UUID & enabled_role) { return ids.contains(enabled_role); }); - if (!found_enabled_role) - return false; - } - - if (except_ids.contains(user_id)) - return false; - - bool in_except_list = std::any_of( - enabled_roles.begin(), enabled_roles.end(), [this](const UUID & enabled_role) { return except_ids.contains(enabled_role); }); - if (in_except_list) - return false; - - return true; -} - - -std::vector GenericRoleSet::getMatchingIDs() const -{ - if (all) - throw Exception("getAllMatchingIDs() can't get ALL ids", ErrorCodes::LOGICAL_ERROR); - std::vector res; - boost::range::set_difference(ids, except_ids, std::back_inserter(res)); - return res; -} - - -std::vector GenericRoleSet::getMatchingUsers(const AccessControlManager & manager) const -{ - if (!all) - return getMatchingIDs(); - - std::vector res; - for (const UUID & id : manager.findAll()) - { - if (match(id)) - res.push_back(id); - } - return res; -} - - -std::vector GenericRoleSet::getMatchingRoles(const AccessControlManager & manager) const -{ - if (!all) - return getMatchingIDs(); - - std::vector res; - for (const UUID & id : manager.findAll()) - { - if (match(id)) - res.push_back(id); - } - return res; -} - - -std::vector GenericRoleSet::getMatchingUsersAndRoles(const AccessControlManager & manager) const -{ - if (!all) - return getMatchingIDs(); - - std::vector vec = getMatchingUsers(manager); - boost::range::push_back(vec, getMatchingRoles(manager)); - return vec; -} - - -bool operator ==(const GenericRoleSet & lhs, const GenericRoleSet & rhs) -{ - return (lhs.all == rhs.all) && (lhs.ids == rhs.ids) && (lhs.except_ids == rhs.except_ids); -} - -} diff --git a/dbms/src/Access/GenericRoleSet.h b/dbms/src/Access/GenericRoleSet.h deleted file mode 100644 index 2caee348813..00000000000 --- a/dbms/src/Access/GenericRoleSet.h +++ /dev/null @@ -1,66 +0,0 @@ -#pragma once - -#include -#include -#include -#include - - -namespace DB -{ -class ASTGenericRoleSet; -class AccessControlManager; - - -/// Represents a set of users/roles like -/// {user_name | role_name | CURRENT_USER} [,...] | NONE | ALL | ALL EXCEPT {user_name | role_name | CURRENT_USER} [,...] -/// Similar to ASTGenericRoleSet, but with IDs instead of names. -struct GenericRoleSet -{ - GenericRoleSet(); - GenericRoleSet(const GenericRoleSet & src); - GenericRoleSet & operator =(const GenericRoleSet & src); - GenericRoleSet(GenericRoleSet && src); - GenericRoleSet & operator =(GenericRoleSet && src); - - struct AllTag {}; - GenericRoleSet(AllTag); - - GenericRoleSet(const UUID & id); - GenericRoleSet(const std::vector & ids_); - GenericRoleSet(const boost::container::flat_set & ids_); - - GenericRoleSet(const ASTGenericRoleSet & ast, const AccessControlManager & manager, const std::optional & current_user_id = {}); - std::shared_ptr toAST(const AccessControlManager & manager) const; - - String toString(const AccessControlManager & manager) const; - Strings toStrings(const AccessControlManager & manager) const; - - bool empty() const; - void clear(); - void add(const UUID & id); - void add(const std::vector & ids_); - void add(const boost::container::flat_set & ids_); - - /// Checks if a specified ID matches this GenericRoleSet. - bool match(const UUID & id) const; - bool match(const UUID & user_id, const std::vector & enabled_roles) const; - bool match(const UUID & user_id, const boost::container::flat_set & enabled_roles) const; - - /// Returns a list of matching IDs. The function must not be called if `all` == `true`. - std::vector getMatchingIDs() const; - - /// Returns a list of matching users. - std::vector getMatchingUsers(const AccessControlManager & manager) const; - std::vector getMatchingRoles(const AccessControlManager & manager) const; - std::vector getMatchingUsersAndRoles(const AccessControlManager & manager) const; - - friend bool operator ==(const GenericRoleSet & lhs, const GenericRoleSet & rhs); - friend bool operator !=(const GenericRoleSet & lhs, const GenericRoleSet & rhs) { return !(lhs == rhs); } - - boost::container::flat_set ids; - bool all = false; - boost::container::flat_set except_ids; -}; - -} diff --git a/dbms/src/Access/IAccessEntity.cpp b/dbms/src/Access/IAccessEntity.cpp deleted file mode 100644 index 361946863b2..00000000000 --- a/dbms/src/Access/IAccessEntity.cpp +++ /dev/null @@ -1,25 +0,0 @@ -#include -#include -#include -#include -#include - - -namespace DB -{ -String IAccessEntity::getTypeName(std::type_index type) -{ - if (type == typeid(User)) - return "User"; - if (type == typeid(Quota)) - return "Quota"; - if (type == typeid(RowPolicy)) - return "Row policy"; - return demangle(type.name()); -} - -bool IAccessEntity::equal(const IAccessEntity & other) const -{ - return (full_name == other.full_name) && (getType() == other.getType()); -} -} diff --git a/dbms/src/Access/IAccessEntity.h b/dbms/src/Access/IAccessEntity.h deleted file mode 100644 index 272fde006ac..00000000000 --- a/dbms/src/Access/IAccessEntity.h +++ /dev/null @@ -1,49 +0,0 @@ -#pragma once - -#include -#include -#include -#include - - -namespace DB -{ -/// Access entity is a set of data which have a name and a type. Access entity control something related to the access control. -/// Entities can be stored to a file or another storage, see IAccessStorage. -struct IAccessEntity -{ - IAccessEntity() = default; - IAccessEntity(const IAccessEntity &) = default; - virtual ~IAccessEntity() = default; - virtual std::shared_ptr clone() const = 0; - - std::type_index getType() const { return typeid(*this); } - static String getTypeName(std::type_index type); - const String getTypeName() const { return getTypeName(getType()); } - - template - bool isTypeOf() const { return isTypeOf(typeid(EntityType)); } - bool isTypeOf(std::type_index type) const { return type == getType(); } - - virtual void setName(const String & name_) { full_name = name_; } - virtual String getName() const { return full_name; } - String getFullName() const { return full_name; } - - friend bool operator ==(const IAccessEntity & lhs, const IAccessEntity & rhs) { return lhs.equal(rhs); } - friend bool operator !=(const IAccessEntity & lhs, const IAccessEntity & rhs) { return !(lhs == rhs); } - -protected: - String full_name; - - virtual bool equal(const IAccessEntity & other) const; - - /// Helper function to define clone() in the derived classes. - template - std::shared_ptr cloneImpl() const - { - return std::make_shared(typeid_cast(*this)); - } -}; - -using AccessEntityPtr = std::shared_ptr; -} diff --git a/dbms/src/Access/IAccessStorage.cpp b/dbms/src/Access/IAccessStorage.cpp deleted file mode 100644 index d3a0edf3ba0..00000000000 --- a/dbms/src/Access/IAccessStorage.cpp +++ /dev/null @@ -1,440 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ -namespace ErrorCodes -{ - extern const int BAD_CAST; - extern const int ACCESS_ENTITY_NOT_FOUND; - extern const int ACCESS_ENTITY_ALREADY_EXISTS; - extern const int ACCESS_ENTITY_STORAGE_READONLY; - extern const int UNKNOWN_USER; - extern const int UNKNOWN_ROLE; -} - - -std::vector IAccessStorage::findAll(std::type_index type) const -{ - return findAllImpl(type); -} - - -std::optional IAccessStorage::find(std::type_index type, const String & name) const -{ - return findImpl(type, name); -} - - -std::vector IAccessStorage::find(std::type_index type, const Strings & names) const -{ - std::vector ids; - ids.reserve(names.size()); - for (const String & name : names) - { - auto id = findImpl(type, name); - if (id) - ids.push_back(*id); - } - return ids; -} - - -UUID IAccessStorage::getID(std::type_index type, const String & name) const -{ - auto id = findImpl(type, name); - if (id) - return *id; - throwNotFound(type, name); -} - - -std::vector IAccessStorage::getIDs(std::type_index type, const Strings & names) const -{ - std::vector ids; - ids.reserve(names.size()); - for (const String & name : names) - ids.push_back(getID(type, name)); - return ids; -} - - -bool IAccessStorage::exists(const UUID & id) const -{ - return existsImpl(id); -} - - - -AccessEntityPtr IAccessStorage::tryReadBase(const UUID & id) const -{ - try - { - return readImpl(id); - } - catch (Exception &) - { - return nullptr; - } -} - - -String IAccessStorage::readName(const UUID & id) const -{ - return readNameImpl(id); -} - - -std::optional IAccessStorage::tryReadName(const UUID & id) const -{ - try - { - return readNameImpl(id); - } - catch (Exception &) - { - return {}; - } -} - - -UUID IAccessStorage::insert(const AccessEntityPtr & entity) -{ - return insertImpl(entity, false); -} - - -std::vector IAccessStorage::insert(const std::vector & multiple_entities) -{ - std::vector ids; - ids.reserve(multiple_entities.size()); - String error_message; - for (const auto & entity : multiple_entities) - { - try - { - ids.push_back(insertImpl(entity, false)); - } - catch (Exception & e) - { - if (e.code() != ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS) - throw; - error_message += (error_message.empty() ? "" : ". ") + e.message(); - } - } - if (!error_message.empty()) - throw Exception(error_message, ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS); - return ids; -} - - -std::optional IAccessStorage::tryInsert(const AccessEntityPtr & entity) -{ - try - { - return insertImpl(entity, false); - } - catch (Exception &) - { - return {}; - } -} - - -std::vector IAccessStorage::tryInsert(const std::vector & multiple_entities) -{ - std::vector ids; - ids.reserve(multiple_entities.size()); - for (const auto & entity : multiple_entities) - { - try - { - ids.push_back(insertImpl(entity, false)); - } - catch (Exception &) - { - } - } - return ids; -} - - -UUID IAccessStorage::insertOrReplace(const AccessEntityPtr & entity) -{ - return insertImpl(entity, true); -} - - -std::vector IAccessStorage::insertOrReplace(const std::vector & multiple_entities) -{ - std::vector ids; - ids.reserve(multiple_entities.size()); - for (const auto & entity : multiple_entities) - ids.push_back(insertImpl(entity, true)); - return ids; -} - - -void IAccessStorage::remove(const UUID & id) -{ - removeImpl(id); -} - - -void IAccessStorage::remove(const std::vector & ids) -{ - String error_message; - for (const auto & id : ids) - { - try - { - removeImpl(id); - } - catch (Exception & e) - { - if (e.code() != ErrorCodes::ACCESS_ENTITY_NOT_FOUND) - throw; - error_message += (error_message.empty() ? "" : ". ") + e.message(); - } - } - if (!error_message.empty()) - throw Exception(error_message, ErrorCodes::ACCESS_ENTITY_NOT_FOUND); -} - - -bool IAccessStorage::tryRemove(const UUID & id) -{ - try - { - removeImpl(id); - return true; - } - catch (Exception &) - { - return false; - } -} - - -std::vector IAccessStorage::tryRemove(const std::vector & ids) -{ - std::vector removed; - removed.reserve(ids.size()); - for (const auto & id : ids) - { - try - { - removeImpl(id); - removed.push_back(id); - } - catch (Exception &) - { - } - } - return removed; -} - - -void IAccessStorage::update(const UUID & id, const UpdateFunc & update_func) -{ - updateImpl(id, update_func); -} - - -void IAccessStorage::update(const std::vector & ids, const UpdateFunc & update_func) -{ - String error_message; - for (const auto & id : ids) - { - try - { - updateImpl(id, update_func); - } - catch (Exception & e) - { - if (e.code() != ErrorCodes::ACCESS_ENTITY_NOT_FOUND) - throw; - error_message += (error_message.empty() ? "" : ". ") + e.message(); - } - } - if (!error_message.empty()) - throw Exception(error_message, ErrorCodes::ACCESS_ENTITY_NOT_FOUND); -} - - -bool IAccessStorage::tryUpdate(const UUID & id, const UpdateFunc & update_func) -{ - try - { - updateImpl(id, update_func); - return true; - } - catch (Exception &) - { - return false; - } -} - - -std::vector IAccessStorage::tryUpdate(const std::vector & ids, const UpdateFunc & update_func) -{ - std::vector updated; - updated.reserve(ids.size()); - for (const auto & id : ids) - { - try - { - updateImpl(id, update_func); - updated.push_back(id); - } - catch (Exception &) - { - } - } - return updated; -} - - -ext::scope_guard IAccessStorage::subscribeForChanges(std::type_index type, const OnChangedHandler & handler) const -{ - return subscribeForChangesImpl(type, handler); -} - - -ext::scope_guard IAccessStorage::subscribeForChanges(const UUID & id, const OnChangedHandler & handler) const -{ - return subscribeForChangesImpl(id, handler); -} - - -ext::scope_guard IAccessStorage::subscribeForChanges(const std::vector & ids, const OnChangedHandler & handler) const -{ - ext::scope_guard subscriptions; - for (const auto & id : ids) - subscriptions.join(subscribeForChangesImpl(id, handler)); - return subscriptions; -} - - -bool IAccessStorage::hasSubscription(std::type_index type) const -{ - return hasSubscriptionImpl(type); -} - - -bool IAccessStorage::hasSubscription(const UUID & id) const -{ - return hasSubscriptionImpl(id); -} - - -void IAccessStorage::notify(const Notifications & notifications) -{ - for (const auto & [fn, id, new_entity] : notifications) - fn(id, new_entity); -} - - -UUID IAccessStorage::generateRandomID() -{ - static Poco::UUIDGenerator generator; - UUID id; - generator.createRandom().copyTo(reinterpret_cast(&id)); - return id; -} - - -Poco::Logger * IAccessStorage::getLogger() const -{ - Poco::Logger * ptr = log.load(); - if (!ptr) - log.store(ptr = &Poco::Logger::get("Access(" + storage_name + ")"), std::memory_order_relaxed); - return ptr; -} - - -void IAccessStorage::throwNotFound(const UUID & id) const -{ - throw Exception("ID {" + toString(id) + "} not found in " + getStorageName(), ErrorCodes::ACCESS_ENTITY_NOT_FOUND); -} - - -void IAccessStorage::throwNotFound(std::type_index type, const String & name) const -{ - int error_code; - if (type == typeid(User)) - error_code = ErrorCodes::UNKNOWN_USER; - else if (type == typeid(Role)) - error_code = ErrorCodes::UNKNOWN_ROLE; - else - error_code = ErrorCodes::ACCESS_ENTITY_NOT_FOUND; - - throw Exception(getTypeName(type) + " " + backQuote(name) + " not found in " + getStorageName(), error_code); -} - - -void IAccessStorage::throwBadCast(const UUID & id, std::type_index type, const String & name, std::type_index required_type) const -{ - throw Exception( - "ID {" + toString(id) + "}: " + getTypeName(type) + backQuote(name) + " expected to be of type " + getTypeName(required_type), - ErrorCodes::BAD_CAST); -} - - -void IAccessStorage::throwIDCollisionCannotInsert(const UUID & id, std::type_index type, const String & name, std::type_index existing_type, const String & existing_name) const -{ - throw Exception( - getTypeName(type) + " " + backQuote(name) + ": cannot insert because the ID {" + toString(id) + "} is already used by " - + getTypeName(existing_type) + " " + backQuote(existing_name) + " in " + getStorageName(), - ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS); -} - - -void IAccessStorage::throwNameCollisionCannotInsert(std::type_index type, const String & name) const -{ - throw Exception( - getTypeName(type) + " " + backQuote(name) + ": cannot insert because " + getTypeName(type) + " " + backQuote(name) - + " already exists in " + getStorageName(), - ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS); -} - - -void IAccessStorage::throwNameCollisionCannotRename(std::type_index type, const String & old_name, const String & new_name) const -{ - throw Exception( - getTypeName(type) + " " + backQuote(old_name) + ": cannot rename to " + backQuote(new_name) + " because " + getTypeName(type) + " " - + backQuote(new_name) + " already exists in " + getStorageName(), - ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS); -} - - -void IAccessStorage::throwReadonlyCannotInsert(std::type_index type, const String & name) const -{ - throw Exception( - "Cannot insert " + getTypeName(type) + " " + backQuote(name) + " to " + getStorageName() + " because this storage is readonly", - ErrorCodes::ACCESS_ENTITY_STORAGE_READONLY); -} - - -void IAccessStorage::throwReadonlyCannotUpdate(std::type_index type, const String & name) const -{ - throw Exception( - "Cannot update " + getTypeName(type) + " " + backQuote(name) + " in " + getStorageName() + " because this storage is readonly", - ErrorCodes::ACCESS_ENTITY_STORAGE_READONLY); -} - - -void IAccessStorage::throwReadonlyCannotRemove(std::type_index type, const String & name) const -{ - throw Exception( - "Cannot remove " + getTypeName(type) + " " + backQuote(name) + " from " + getStorageName() + " because this storage is readonly", - ErrorCodes::ACCESS_ENTITY_STORAGE_READONLY); -} -} diff --git a/dbms/src/Access/IAccessStorage.h b/dbms/src/Access/IAccessStorage.h deleted file mode 100644 index d0be38caf07..00000000000 --- a/dbms/src/Access/IAccessStorage.h +++ /dev/null @@ -1,203 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace Poco { class Logger; } - -namespace DB -{ -/// Contains entities, i.e. instances of classes derived from IAccessEntity. -/// The implementations of this class MUST be thread-safe. -class IAccessStorage -{ -public: - IAccessStorage(const String & storage_name_) : storage_name(storage_name_) {} - virtual ~IAccessStorage() {} - - /// Returns the name of this storage. - const String & getStorageName() const { return storage_name; } - - /// Returns the identifiers of all the entities of a specified type contained in the storage. - std::vector findAll(std::type_index type) const; - - template - std::vector findAll() const { return findAll(typeid(EntityType)); } - - /// Searchs for an entity with specified type and name. Returns std::nullopt if not found. - std::optional find(std::type_index type, const String & name) const; - - template - std::optional find(const String & name) const { return find(typeid(EntityType), name); } - - std::vector find(std::type_index type, const Strings & names) const; - - template - std::vector find(const Strings & names) const { return find(typeid(EntityType), names); } - - /// Searchs for an entity with specified name and type. Throws an exception if not found. - UUID getID(std::type_index type, const String & name) const; - - template - UUID getID(const String & name) const { return getID(typeid(EntityType), name); } - - std::vector getIDs(std::type_index type, const Strings & names) const; - - template - std::vector getIDs(const Strings & names) const { return getIDs(typeid(EntityType), names); } - - /// Returns whether there is an entity with such identifier in the storage. - bool exists(const UUID & id) const; - - /// Reads an entity. Throws an exception if not found. - template - std::shared_ptr read(const UUID & id) const; - - template - std::shared_ptr read(const String & name) const; - - /// Reads an entity. Returns nullptr if not found. - template - std::shared_ptr tryRead(const UUID & id) const; - - template - std::shared_ptr tryRead(const String & name) const; - - /// Reads only name of an entity. - String readName(const UUID & id) const; - std::optional tryReadName(const UUID & id) const; - - /// Inserts an entity to the storage. Returns ID of a new entry in the storage. - /// Throws an exception if the specified name already exists. - UUID insert(const AccessEntityPtr & entity); - std::vector insert(const std::vector & multiple_entities); - - /// Inserts an entity to the storage. Returns ID of a new entry in the storage. - std::optional tryInsert(const AccessEntityPtr & entity); - std::vector tryInsert(const std::vector & multiple_entities); - - /// Inserts an entity to the storage. Return ID of a new entry in the storage. - /// Replaces an existing entry in the storage if the specified name already exists. - UUID insertOrReplace(const AccessEntityPtr & entity); - std::vector insertOrReplace(const std::vector & multiple_entities); - - /// Removes an entity from the storage. Throws an exception if couldn't remove. - void remove(const UUID & id); - void remove(const std::vector & ids); - - /// Removes an entity from the storage. Returns false if couldn't remove. - bool tryRemove(const UUID & id); - - /// Removes multiple entities from the storage. Returns the list of successfully dropped. - std::vector tryRemove(const std::vector & ids); - - using UpdateFunc = std::function; - - /// Updates an entity stored in the storage. Throws an exception if couldn't update. - void update(const UUID & id, const UpdateFunc & update_func); - void update(const std::vector & ids, const UpdateFunc & update_func); - - /// Updates an entity stored in the storage. Returns false if couldn't update. - bool tryUpdate(const UUID & id, const UpdateFunc & update_func); - - /// Updates multiple entities in the storage. Returns the list of successfully updated. - std::vector tryUpdate(const std::vector & ids, const UpdateFunc & update_func); - - using OnChangedHandler = std::function; - - /// Subscribes for all changes. - /// Can return nullptr if cannot subscribe (identifier not found) or if it doesn't make sense (the storage is read-only). - ext::scope_guard subscribeForChanges(std::type_index type, const OnChangedHandler & handler) const; - - template - ext::scope_guard subscribeForChanges(OnChangedHandler handler) const { return subscribeForChanges(typeid(EntityType), handler); } - - /// Subscribes for changes of a specific entry. - /// Can return nullptr if cannot subscribe (identifier not found) or if it doesn't make sense (the storage is read-only). - ext::scope_guard subscribeForChanges(const UUID & id, const OnChangedHandler & handler) const; - ext::scope_guard subscribeForChanges(const std::vector & ids, const OnChangedHandler & handler) const; - - bool hasSubscription(std::type_index type) const; - bool hasSubscription(const UUID & id) const; - -protected: - virtual std::optional findImpl(std::type_index type, const String & name) const = 0; - virtual std::vector findAllImpl(std::type_index type) const = 0; - virtual bool existsImpl(const UUID & id) const = 0; - virtual AccessEntityPtr readImpl(const UUID & id) const = 0; - virtual String readNameImpl(const UUID & id) const = 0; - virtual UUID insertImpl(const AccessEntityPtr & entity, bool replace_if_exists) = 0; - virtual void removeImpl(const UUID & id) = 0; - virtual void updateImpl(const UUID & id, const UpdateFunc & update_func) = 0; - virtual ext::scope_guard subscribeForChangesImpl(const UUID & id, const OnChangedHandler & handler) const = 0; - virtual ext::scope_guard subscribeForChangesImpl(std::type_index type, const OnChangedHandler & handler) const = 0; - virtual bool hasSubscriptionImpl(const UUID & id) const = 0; - virtual bool hasSubscriptionImpl(std::type_index type) const = 0; - - static UUID generateRandomID(); - Poco::Logger * getLogger() const; - static String getTypeName(std::type_index type) { return IAccessEntity::getTypeName(type); } - [[noreturn]] void throwNotFound(const UUID & id) const; - [[noreturn]] void throwNotFound(std::type_index type, const String & name) const; - [[noreturn]] void throwBadCast(const UUID & id, std::type_index type, const String & name, std::type_index required_type) const; - [[noreturn]] void throwIDCollisionCannotInsert(const UUID & id, std::type_index type, const String & name, std::type_index existing_type, const String & existing_name) const; - [[noreturn]] void throwNameCollisionCannotInsert(std::type_index type, const String & name) const; - [[noreturn]] void throwNameCollisionCannotRename(std::type_index type, const String & old_name, const String & new_name) const; - [[noreturn]] void throwReadonlyCannotInsert(std::type_index type, const String & name) const; - [[noreturn]] void throwReadonlyCannotUpdate(std::type_index type, const String & name) const; - [[noreturn]] void throwReadonlyCannotRemove(std::type_index type, const String & name) const; - - using Notification = std::tuple; - using Notifications = std::vector; - static void notify(const Notifications & notifications); - -private: - AccessEntityPtr tryReadBase(const UUID & id) const; - - const String storage_name; - mutable std::atomic log = nullptr; -}; - - -template -std::shared_ptr IAccessStorage::read(const UUID & id) const -{ - auto entity = readImpl(id); - auto ptr = typeid_cast>(entity); - if (ptr) - return ptr; - throwBadCast(id, entity->getType(), entity->getFullName(), typeid(EntityType)); -} - - -template -std::shared_ptr IAccessStorage::read(const String & name) const -{ - return read(getID(name)); -} - - -template -std::shared_ptr IAccessStorage::tryRead(const UUID & id) const -{ - auto entity = tryReadBase(id); - if (!entity) - return nullptr; - return typeid_cast>(entity); -} - - -template -std::shared_ptr IAccessStorage::tryRead(const String & name) const -{ - auto id = find(name); - return id ? tryRead(*id) : nullptr; -} -} diff --git a/dbms/src/Access/MemoryAccessStorage.cpp b/dbms/src/Access/MemoryAccessStorage.cpp deleted file mode 100644 index 310f4b97fda..00000000000 --- a/dbms/src/Access/MemoryAccessStorage.cpp +++ /dev/null @@ -1,311 +0,0 @@ -#include -#include -#include - - -namespace DB -{ -MemoryAccessStorage::MemoryAccessStorage(const String & storage_name_) - : IAccessStorage(storage_name_) -{ -} - - -MemoryAccessStorage::~MemoryAccessStorage() {} - - -std::optional MemoryAccessStorage::findImpl(std::type_index type, const String & name) const -{ - std::lock_guard lock{mutex}; - auto it = names.find({name, type}); - if (it == names.end()) - return {}; - - Entry & entry = *(it->second); - return entry.id; -} - - -std::vector MemoryAccessStorage::findAllImpl(std::type_index type) const -{ - std::lock_guard lock{mutex}; - std::vector result; - result.reserve(entries.size()); - for (const auto & [id, entry] : entries) - if (entry.entity->isTypeOf(type)) - result.emplace_back(id); - return result; -} - - -bool MemoryAccessStorage::existsImpl(const UUID & id) const -{ - std::lock_guard lock{mutex}; - return entries.count(id); -} - - -AccessEntityPtr MemoryAccessStorage::readImpl(const UUID & id) const -{ - std::lock_guard lock{mutex}; - auto it = entries.find(id); - if (it == entries.end()) - throwNotFound(id); - const Entry & entry = it->second; - return entry.entity; -} - - -String MemoryAccessStorage::readNameImpl(const UUID & id) const -{ - return readImpl(id)->getFullName(); -} - - -UUID MemoryAccessStorage::insertImpl(const AccessEntityPtr & new_entity, bool replace_if_exists) -{ - Notifications notifications; - SCOPE_EXIT({ notify(notifications); }); - - UUID id = generateRandomID(); - std::lock_guard lock{mutex}; - insertNoLock(generateRandomID(), new_entity, replace_if_exists, notifications); - return id; -} - - -void MemoryAccessStorage::insertNoLock(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, Notifications & notifications) -{ - const String & name = new_entity->getFullName(); - std::type_index type = new_entity->getType(); - - /// Check that we can insert. - auto it = entries.find(id); - if (it != entries.end()) - { - const auto & existing_entry = it->second; - throwIDCollisionCannotInsert(id, type, name, existing_entry.entity->getType(), existing_entry.entity->getFullName()); - } - - auto it2 = names.find({name, type}); - if (it2 != names.end()) - { - const auto & existing_entry = *(it2->second); - if (replace_if_exists) - removeNoLock(existing_entry.id, notifications); - else - throwNameCollisionCannotInsert(type, name); - } - - /// Do insertion. - auto & entry = entries[id]; - entry.id = id; - entry.entity = new_entity; - names[std::pair{name, type}] = &entry; - prepareNotifications(entry, false, notifications); -} - - -void MemoryAccessStorage::removeImpl(const UUID & id) -{ - Notifications notifications; - SCOPE_EXIT({ notify(notifications); }); - - std::lock_guard lock{mutex}; - removeNoLock(id, notifications); -} - - -void MemoryAccessStorage::removeNoLock(const UUID & id, Notifications & notifications) -{ - auto it = entries.find(id); - if (it == entries.end()) - throwNotFound(id); - - Entry & entry = it->second; - const String & name = entry.entity->getFullName(); - std::type_index type = entry.entity->getType(); - - prepareNotifications(entry, true, notifications); - - /// Do removing. - names.erase({name, type}); - entries.erase(it); -} - - -void MemoryAccessStorage::updateImpl(const UUID & id, const UpdateFunc & update_func) -{ - Notifications notifications; - SCOPE_EXIT({ notify(notifications); }); - - std::lock_guard lock{mutex}; - updateNoLock(id, update_func, notifications); -} - - -void MemoryAccessStorage::updateNoLock(const UUID & id, const UpdateFunc & update_func, Notifications & notifications) -{ - auto it = entries.find(id); - if (it == entries.end()) - throwNotFound(id); - - Entry & entry = it->second; - auto old_entity = entry.entity; - auto new_entity = update_func(old_entity); - - if (*new_entity == *old_entity) - return; - - entry.entity = new_entity; - - if (new_entity->getFullName() != old_entity->getFullName()) - { - auto it2 = names.find({new_entity->getFullName(), new_entity->getType()}); - if (it2 != names.end()) - throwNameCollisionCannotRename(old_entity->getType(), old_entity->getFullName(), new_entity->getFullName()); - - names.erase({old_entity->getFullName(), old_entity->getType()}); - names[std::pair{new_entity->getFullName(), new_entity->getType()}] = &entry; - } - - prepareNotifications(entry, false, notifications); -} - - -void MemoryAccessStorage::setAll(const std::vector & all_entities) -{ - std::vector> entities_with_ids; - entities_with_ids.reserve(all_entities.size()); - for (const auto & entity : all_entities) - entities_with_ids.emplace_back(generateRandomID(), entity); - setAll(entities_with_ids); -} - - -void MemoryAccessStorage::setAll(const std::vector> & all_entities) -{ - Notifications notifications; - SCOPE_EXIT({ notify(notifications); }); - - std::lock_guard lock{mutex}; - setAllNoLock(all_entities, notifications); -} - - -void MemoryAccessStorage::setAllNoLock(const std::vector> & all_entities, Notifications & notifications) -{ - /// Get list of the currently used IDs. Later we will remove those of them which are not used anymore. - std::unordered_set not_used_ids; - for (const auto & id_and_entry : entries) - not_used_ids.emplace(id_and_entry.first); - - /// Remove conflicting entities. - for (const auto & [id, entity] : all_entities) - { - auto it = entries.find(id); - if (it != entries.end()) - { - not_used_ids.erase(id); /// ID is used. - Entry & entry = it->second; - if (entry.entity->getType() != entity->getType()) - { - removeNoLock(id, notifications); - continue; - } - } - auto it2 = names.find({entity->getFullName(), entity->getType()}); - if (it2 != names.end()) - { - Entry & entry = *(it2->second); - if (entry.id != id) - removeNoLock(id, notifications); - } - } - - /// Remove entities which are not used anymore. - for (const auto & id : not_used_ids) - removeNoLock(id, notifications); - - /// Insert or update entities. - for (const auto & [id, entity] : all_entities) - { - auto it = entries.find(id); - if (it != entries.end()) - { - if (*(it->second.entity) != *entity) - { - const AccessEntityPtr & changed_entity = entity; - updateNoLock(id, [&changed_entity](const AccessEntityPtr &) { return changed_entity; }, notifications); - } - } - else - insertNoLock(id, entity, false, notifications); - } -} - - -void MemoryAccessStorage::prepareNotifications(const Entry & entry, bool remove, Notifications & notifications) const -{ - for (const auto & handler : entry.handlers_by_id) - notifications.push_back({handler, entry.id, remove ? nullptr : entry.entity}); - - auto range = handlers_by_type.equal_range(entry.entity->getType()); - for (auto it = range.first; it != range.second; ++it) - notifications.push_back({it->second, entry.id, remove ? nullptr : entry.entity}); -} - - -ext::scope_guard MemoryAccessStorage::subscribeForChangesImpl(std::type_index type, const OnChangedHandler & handler) const -{ - std::lock_guard lock{mutex}; - auto handler_it = handlers_by_type.emplace(type, handler); - - return [this, handler_it] - { - std::lock_guard lock2{mutex}; - handlers_by_type.erase(handler_it); - }; -} - - -ext::scope_guard MemoryAccessStorage::subscribeForChangesImpl(const UUID & id, const OnChangedHandler & handler) const -{ - std::lock_guard lock{mutex}; - auto it = entries.find(id); - if (it == entries.end()) - return {}; - const Entry & entry = it->second; - auto handler_it = entry.handlers_by_id.insert(entry.handlers_by_id.end(), handler); - - return [this, id, handler_it] - { - std::lock_guard lock2{mutex}; - auto it2 = entries.find(id); - if (it2 != entries.end()) - { - const Entry & entry2 = it2->second; - entry2.handlers_by_id.erase(handler_it); - } - }; -} - - -bool MemoryAccessStorage::hasSubscriptionImpl(const UUID & id) const -{ - auto it = entries.find(id); - if (it != entries.end()) - { - const Entry & entry = it->second; - return !entry.handlers_by_id.empty(); - } - return false; -} - - -bool MemoryAccessStorage::hasSubscriptionImpl(std::type_index type) const -{ - auto range = handlers_by_type.equal_range(type); - return range.first != range.second; -} -} diff --git a/dbms/src/Access/MemoryAccessStorage.h b/dbms/src/Access/MemoryAccessStorage.h deleted file mode 100644 index 93d1b24e9da..00000000000 --- a/dbms/src/Access/MemoryAccessStorage.h +++ /dev/null @@ -1,64 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include - - -namespace DB -{ -/// Implementation of IAccessStorage which keeps all data in memory. -class MemoryAccessStorage : public IAccessStorage -{ -public: - MemoryAccessStorage(const String & storage_name_ = "memory"); - ~MemoryAccessStorage() override; - - /// Sets all entities at once. - void setAll(const std::vector & all_entities); - void setAll(const std::vector> & all_entities); - -private: - std::optional findImpl(std::type_index type, const String & name) const override; - std::vector findAllImpl(std::type_index type) const override; - bool existsImpl(const UUID & id) const override; - AccessEntityPtr readImpl(const UUID & id) const override; - String readNameImpl(const UUID & id) const override; - UUID insertImpl(const AccessEntityPtr & entity, bool replace_if_exists) override; - void removeImpl(const UUID & id) override; - void updateImpl(const UUID & id, const UpdateFunc & update_func) override; - ext::scope_guard subscribeForChangesImpl(const UUID & id, const OnChangedHandler & handler) const override; - ext::scope_guard subscribeForChangesImpl(std::type_index type, const OnChangedHandler & handler) const override; - bool hasSubscriptionImpl(const UUID & id) const override; - bool hasSubscriptionImpl(std::type_index type) const override; - - struct Entry - { - UUID id; - AccessEntityPtr entity; - mutable std::list handlers_by_id; - }; - - void insertNoLock(const UUID & id, const AccessEntityPtr & entity, bool replace_if_exists, Notifications & notifications); - void removeNoLock(const UUID & id, Notifications & notifications); - void updateNoLock(const UUID & id, const UpdateFunc & update_func, Notifications & notifications); - void setAllNoLock(const std::vector> & all_entities, Notifications & notifications); - void prepareNotifications(const Entry & entry, bool remove, Notifications & notifications) const; - - using NameTypePair = std::pair; - struct Hash - { - size_t operator()(const NameTypePair & key) const - { - return std::hash{}(key.first) - std::hash{}(key.second); - } - }; - - mutable std::mutex mutex; - std::unordered_map entries; /// We want to search entries both by ID and by the pair of name and type. - std::unordered_map names; /// and by the pair of name and type. - mutable std::unordered_multimap handlers_by_type; -}; -} diff --git a/dbms/src/Access/MultipleAccessStorage.h b/dbms/src/Access/MultipleAccessStorage.h deleted file mode 100644 index de617063e31..00000000000 --- a/dbms/src/Access/MultipleAccessStorage.h +++ /dev/null @@ -1,53 +0,0 @@ -#pragma once - -#include -#include -#include - - -namespace DB -{ -/// Implementation of IAccessStorage which contains multiple nested storages. -class MultipleAccessStorage : public IAccessStorage -{ -public: - using Storage = IAccessStorage; - - MultipleAccessStorage(std::vector> nested_storages_, size_t index_of_nested_storage_for_insertion_ = 0); - ~MultipleAccessStorage() override; - - std::vector findMultiple(std::type_index type, const String & name) const; - - template - std::vector findMultiple(const String & name) const { return findMultiple(EntityType::TYPE, name); } - - const Storage * findStorage(const UUID & id) const; - Storage * findStorage(const UUID & id); - const Storage & getStorage(const UUID & id) const; - Storage & getStorage(const UUID & id); - - Storage & getStorageByIndex(size_t i) { return *(nested_storages[i]); } - const Storage & getStorageByIndex(size_t i) const { return *(nested_storages[i]); } - -protected: - std::optional findImpl(std::type_index type, const String & name) const override; - std::vector findAllImpl(std::type_index type) const override; - bool existsImpl(const UUID & id) const override; - AccessEntityPtr readImpl(const UUID & id) const override; - String readNameImpl(const UUID &id) const override; - UUID insertImpl(const AccessEntityPtr & entity, bool replace_if_exists) override; - void removeImpl(const UUID & id) override; - void updateImpl(const UUID & id, const UpdateFunc & update_func) override; - ext::scope_guard subscribeForChangesImpl(const UUID & id, const OnChangedHandler & handler) const override; - ext::scope_guard subscribeForChangesImpl(std::type_index type, const OnChangedHandler & handler) const override; - bool hasSubscriptionImpl(const UUID & id) const override; - bool hasSubscriptionImpl(std::type_index type) const override; - -private: - std::vector> nested_storages; - IAccessStorage * nested_storage_for_insertion; - mutable LRUCache ids_cache; - mutable std::mutex ids_cache_mutex; -}; - -} diff --git a/dbms/src/Access/Quota.cpp b/dbms/src/Access/Quota.cpp deleted file mode 100644 index d9e9e0b35fc..00000000000 --- a/dbms/src/Access/Quota.cpp +++ /dev/null @@ -1,45 +0,0 @@ -#include -#include -#include - - -namespace DB -{ -Quota::Limits::Limits() -{ - boost::range::fill(max, 0); -} - - -bool operator ==(const Quota::Limits & lhs, const Quota::Limits & rhs) -{ - return boost::range::equal(lhs.max, rhs.max) && (lhs.duration == rhs.duration) - && (lhs.randomize_interval == rhs.randomize_interval); -} - - -bool Quota::equal(const IAccessEntity & other) const -{ - if (!IAccessEntity::equal(other)) - return false; - const auto & other_quota = typeid_cast(other); - return (all_limits == other_quota.all_limits) && (key_type == other_quota.key_type) && (roles == other_quota.roles); -} - - -const char * Quota::resourceTypeToColumnName(ResourceType resource_type) -{ - switch (resource_type) - { - case Quota::QUERIES: return "queries"; - case Quota::ERRORS: return "errors"; - case Quota::RESULT_ROWS: return "result_rows"; - case Quota::RESULT_BYTES: return "result_bytes"; - case Quota::READ_ROWS: return "read_rows"; - case Quota::READ_BYTES: return "read_bytes"; - case Quota::EXECUTION_TIME: return "execution_time"; - } - __builtin_unreachable(); -} -} - diff --git a/dbms/src/Access/Quota.h b/dbms/src/Access/Quota.h deleted file mode 100644 index 4778b119d1e..00000000000 --- a/dbms/src/Access/Quota.h +++ /dev/null @@ -1,140 +0,0 @@ -#pragma once - -#include -#include -#include - - - -namespace DB -{ -/** Quota for resources consumption for specific interval. - * Used to limit resource usage by user. - * Quota is applied "softly" - could be slightly exceed, because it is checked usually only on each block of processed data. - * Accumulated values are not persisted and are lost on server restart. - * Quota is local to server, - * but for distributed queries, accumulated values for read rows and bytes - * are collected from all participating servers and accumulated locally. - */ -struct Quota : public IAccessEntity -{ - enum ResourceType - { - QUERIES, /// Number of queries. - ERRORS, /// Number of queries with exceptions. - RESULT_ROWS, /// Number of rows returned as result. - RESULT_BYTES, /// Number of bytes returned as result. - READ_ROWS, /// Number of rows read from tables. - READ_BYTES, /// Number of bytes read from tables. - EXECUTION_TIME, /// Total amount of query execution time in nanoseconds. - }; - static constexpr size_t MAX_RESOURCE_TYPE = 7; - - using ResourceAmount = UInt64; - static constexpr ResourceAmount UNLIMITED = 0; /// 0 means unlimited. - - /// Amount of resources available to consume for each duration. - struct Limits - { - ResourceAmount max[MAX_RESOURCE_TYPE]; - std::chrono::seconds duration = std::chrono::seconds::zero(); - - /// Intervals can be randomized (to avoid DoS if intervals for many users end at one time). - bool randomize_interval = false; - - Limits(); - friend bool operator ==(const Limits & lhs, const Limits & rhs); - friend bool operator !=(const Limits & lhs, const Limits & rhs) { return !(lhs == rhs); } - }; - - std::vector all_limits; - - /// Key to share quota consumption. - /// Users with the same key share the same amount of resource. - enum class KeyType - { - NONE, /// All users share the same quota. - USER_NAME, /// Connections with the same user name share the same quota. - IP_ADDRESS, /// Connections from the same IP share the same quota. - CLIENT_KEY, /// Client should explicitly supply a key to use. - CLIENT_KEY_OR_USER_NAME, /// Same as CLIENT_KEY, but use USER_NAME if the client doesn't supply a key. - CLIENT_KEY_OR_IP_ADDRESS, /// Same as CLIENT_KEY, but use IP_ADDRESS if the client doesn't supply a key. - }; - static constexpr size_t MAX_KEY_TYPE = 6; - KeyType key_type = KeyType::NONE; - - /// Which roles or users should use this quota. - GenericRoleSet roles; - - bool equal(const IAccessEntity & other) const override; - std::shared_ptr clone() const override { return cloneImpl(); } - - static const char * getNameOfResourceType(ResourceType resource_type); - static const char * resourceTypeToKeyword(ResourceType resource_type); - static const char * resourceTypeToColumnName(ResourceType resource_type); - static const char * getNameOfKeyType(KeyType key_type); - static double executionTimeToSeconds(ResourceAmount ns); - static ResourceAmount secondsToExecutionTime(double s); -}; - - -inline const char * Quota::getNameOfResourceType(ResourceType resource_type) -{ - switch (resource_type) - { - case Quota::QUERIES: return "queries"; - case Quota::ERRORS: return "errors"; - case Quota::RESULT_ROWS: return "result rows"; - case Quota::RESULT_BYTES: return "result bytes"; - case Quota::READ_ROWS: return "read rows"; - case Quota::READ_BYTES: return "read bytes"; - case Quota::EXECUTION_TIME: return "execution time"; - } - __builtin_unreachable(); -} - - -inline const char * Quota::resourceTypeToKeyword(ResourceType resource_type) -{ - switch (resource_type) - { - case Quota::QUERIES: return "QUERIES"; - case Quota::ERRORS: return "ERRORS"; - case Quota::RESULT_ROWS: return "RESULT ROWS"; - case Quota::RESULT_BYTES: return "RESULT BYTES"; - case Quota::READ_ROWS: return "READ ROWS"; - case Quota::READ_BYTES: return "READ BYTES"; - case Quota::EXECUTION_TIME: return "EXECUTION TIME"; - } - __builtin_unreachable(); -} - - -inline const char * Quota::getNameOfKeyType(KeyType key_type) -{ - switch (key_type) - { - case KeyType::NONE: return "none"; - case KeyType::USER_NAME: return "user name"; - case KeyType::IP_ADDRESS: return "ip address"; - case KeyType::CLIENT_KEY: return "client key"; - case KeyType::CLIENT_KEY_OR_USER_NAME: return "client key or user name"; - case KeyType::CLIENT_KEY_OR_IP_ADDRESS: return "client key or ip address"; - } - __builtin_unreachable(); -} - - -inline double Quota::executionTimeToSeconds(ResourceAmount ns) -{ - return std::chrono::duration_cast>(std::chrono::nanoseconds{ns}).count(); -} - -inline Quota::ResourceAmount Quota::secondsToExecutionTime(double s) -{ - return std::chrono::duration_cast(std::chrono::duration(s)).count(); -} - - -using QuotaPtr = std::shared_ptr; -} diff --git a/dbms/src/Access/QuotaContext.cpp b/dbms/src/Access/QuotaContext.cpp deleted file mode 100644 index 815d9440eaa..00000000000 --- a/dbms/src/Access/QuotaContext.cpp +++ /dev/null @@ -1,267 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ -namespace ErrorCodes -{ - extern const int QUOTA_EXPIRED; -} - -struct QuotaContext::Impl -{ - [[noreturn]] static void throwQuotaExceed( - const String & user_name, - const String & quota_name, - ResourceType resource_type, - ResourceAmount used, - ResourceAmount max, - std::chrono::seconds duration, - std::chrono::system_clock::time_point end_of_interval) - { - std::function amount_to_string = [](UInt64 amount) { return std::to_string(amount); }; - if (resource_type == Quota::EXECUTION_TIME) - amount_to_string = [&](UInt64 amount) { return ext::to_string(std::chrono::nanoseconds(amount)); }; - - throw Exception( - "Quota for user " + backQuote(user_name) + " for " + ext::to_string(duration) + " has been exceeded: " - + Quota::getNameOfResourceType(resource_type) + " = " + amount_to_string(used) + "/" + amount_to_string(max) + ". " - + "Interval will end at " + ext::to_string(end_of_interval) + ". " + "Name of quota template: " + backQuote(quota_name), - ErrorCodes::QUOTA_EXPIRED); - } - - - static std::chrono::system_clock::time_point getEndOfInterval( - const Interval & interval, std::chrono::system_clock::time_point current_time, bool * counters_were_reset = nullptr) - { - auto & end_of_interval = interval.end_of_interval; - auto end_loaded = end_of_interval.load(); - auto end = std::chrono::system_clock::time_point{end_loaded}; - if (current_time < end) - { - if (counters_were_reset) - *counters_were_reset = false; - return end; - } - - const auto duration = interval.duration; - - do - { - end = end + (current_time - end + duration) / duration * duration; - if (end_of_interval.compare_exchange_strong(end_loaded, end.time_since_epoch())) - { - boost::range::fill(interval.used, 0); - break; - } - end = std::chrono::system_clock::time_point{end_loaded}; - } - while (current_time >= end); - - if (counters_were_reset) - *counters_were_reset = true; - return end; - } - - - static void used( - const String & user_name, - const Intervals & intervals, - ResourceType resource_type, - ResourceAmount amount, - std::chrono::system_clock::time_point current_time, - bool check_exceeded) - { - for (const auto & interval : intervals.intervals) - { - ResourceAmount used = (interval.used[resource_type] += amount); - ResourceAmount max = interval.max[resource_type]; - if (max == Quota::UNLIMITED) - continue; - if (used > max) - { - bool counters_were_reset = false; - auto end_of_interval = getEndOfInterval(interval, current_time, &counters_were_reset); - if (counters_were_reset) - { - used = (interval.used[resource_type] += amount); - if ((used > max) && check_exceeded) - throwQuotaExceed(user_name, intervals.quota_name, resource_type, used, max, interval.duration, end_of_interval); - } - else if (check_exceeded) - throwQuotaExceed(user_name, intervals.quota_name, resource_type, used, max, interval.duration, end_of_interval); - } - } - } - - static void checkExceeded( - const String & user_name, - const Intervals & intervals, - ResourceType resource_type, - std::chrono::system_clock::time_point current_time) - { - for (const auto & interval : intervals.intervals) - { - ResourceAmount used = interval.used[resource_type]; - ResourceAmount max = interval.max[resource_type]; - if (max == Quota::UNLIMITED) - continue; - if (used > max) - { - bool used_counters_reset = false; - std::chrono::system_clock::time_point end_of_interval = getEndOfInterval(interval, current_time, &used_counters_reset); - if (!used_counters_reset) - throwQuotaExceed(user_name, intervals.quota_name, resource_type, used, max, interval.duration, end_of_interval); - } - } - } - - static void checkExceeded( - const String & user_name, - const Intervals & intervals, - std::chrono::system_clock::time_point current_time) - { - for (auto resource_type : ext::range_with_static_cast(Quota::MAX_RESOURCE_TYPE)) - checkExceeded(user_name, intervals, resource_type, current_time); - } -}; - - -QuotaContext::Interval & QuotaContext::Interval::operator =(const Interval & src) -{ - randomize_interval = src.randomize_interval; - duration = src.duration; - end_of_interval.store(src.end_of_interval.load()); - for (auto resource_type : ext::range(MAX_RESOURCE_TYPE)) - { - max[resource_type] = src.max[resource_type]; - used[resource_type].store(src.used[resource_type].load()); - } - return *this; -} - - -QuotaUsageInfo QuotaContext::Intervals::getUsageInfo(std::chrono::system_clock::time_point current_time) const -{ - QuotaUsageInfo info; - info.quota_id = quota_id; - info.quota_name = quota_name; - info.quota_key = quota_key; - info.intervals.reserve(intervals.size()); - for (const auto & in : intervals) - { - info.intervals.push_back({}); - auto & out = info.intervals.back(); - out.duration = in.duration; - out.randomize_interval = in.randomize_interval; - out.end_of_interval = Impl::getEndOfInterval(in, current_time); - for (auto resource_type : ext::range(MAX_RESOURCE_TYPE)) - { - out.max[resource_type] = in.max[resource_type]; - out.used[resource_type] = in.used[resource_type]; - } - } - return info; -} - - -QuotaContext::QuotaContext() - : intervals(boost::make_shared()) /// Unlimited quota. -{ -} - - -QuotaContext::QuotaContext( - const String & user_name_, - const UUID & user_id_, - const std::vector & enabled_roles_, - const Poco::Net::IPAddress & address_, - const String & client_key_) - : user_name(user_name_), user_id(user_id_), enabled_roles(enabled_roles_), address(address_), client_key(client_key_) -{ -} - - -QuotaContext::~QuotaContext() = default; - - -void QuotaContext::used(ResourceType resource_type, ResourceAmount amount, bool check_exceeded) const -{ - used({resource_type, amount}, check_exceeded); -} - - -void QuotaContext::used(const std::pair & resource, bool check_exceeded) const -{ - auto loaded = intervals.load(); - auto current_time = std::chrono::system_clock::now(); - Impl::used(user_name, *loaded, resource.first, resource.second, current_time, check_exceeded); -} - - -void QuotaContext::used(const std::pair & resource1, const std::pair & resource2, bool check_exceeded) const -{ - auto loaded = intervals.load(); - auto current_time = std::chrono::system_clock::now(); - Impl::used(user_name, *loaded, resource1.first, resource1.second, current_time, check_exceeded); - Impl::used(user_name, *loaded, resource2.first, resource2.second, current_time, check_exceeded); -} - - -void QuotaContext::used(const std::pair & resource1, const std::pair & resource2, const std::pair & resource3, bool check_exceeded) const -{ - auto loaded = intervals.load(); - auto current_time = std::chrono::system_clock::now(); - Impl::used(user_name, *loaded, resource1.first, resource1.second, current_time, check_exceeded); - Impl::used(user_name, *loaded, resource2.first, resource2.second, current_time, check_exceeded); - Impl::used(user_name, *loaded, resource3.first, resource3.second, current_time, check_exceeded); -} - - -void QuotaContext::used(const std::vector> & resources, bool check_exceeded) const -{ - auto loaded = intervals.load(); - auto current_time = std::chrono::system_clock::now(); - for (const auto & resource : resources) - Impl::used(user_name, *loaded, resource.first, resource.second, current_time, check_exceeded); -} - - -void QuotaContext::checkExceeded() const -{ - auto loaded = intervals.load(); - Impl::checkExceeded(user_name, *loaded, std::chrono::system_clock::now()); -} - - -void QuotaContext::checkExceeded(ResourceType resource_type) const -{ - auto loaded = intervals.load(); - Impl::checkExceeded(user_name, *loaded, resource_type, std::chrono::system_clock::now()); -} - - -QuotaUsageInfo QuotaContext::getUsageInfo() const -{ - auto loaded = intervals.load(); - return loaded->getUsageInfo(std::chrono::system_clock::now()); -} - - -QuotaUsageInfo::QuotaUsageInfo() : quota_id(UUID(UInt128(0))) -{ -} - - -QuotaUsageInfo::Interval::Interval() -{ - boost::range::fill(used, 0); - boost::range::fill(max, 0); -} -} diff --git a/dbms/src/Access/QuotaContext.h b/dbms/src/Access/QuotaContext.h deleted file mode 100644 index d788a08ea17..00000000000 --- a/dbms/src/Access/QuotaContext.h +++ /dev/null @@ -1,113 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ -struct QuotaUsageInfo; - - -/// Instances of `QuotaContext` are used to track resource consumption. -class QuotaContext : public boost::noncopyable -{ -public: - using ResourceType = Quota::ResourceType; - using ResourceAmount = Quota::ResourceAmount; - - /// Default constructors makes an unlimited quota. - QuotaContext(); - - ~QuotaContext(); - - /// Tracks resource consumption. If the quota exceeded and `check_exceeded == true`, throws an exception. - void used(ResourceType resource_type, ResourceAmount amount, bool check_exceeded = true) const; - void used(const std::pair & resource, bool check_exceeded = true) const; - void used(const std::pair & resource1, const std::pair & resource2, bool check_exceeded = true) const; - void used(const std::pair & resource1, const std::pair & resource2, const std::pair & resource3, bool check_exceeded = true) const; - void used(const std::vector> & resources, bool check_exceeded = true) const; - - /// Checks if the quota exceeded. If so, throws an exception. - void checkExceeded() const; - void checkExceeded(ResourceType resource_type) const; - - /// Returns the information about this quota context. - QuotaUsageInfo getUsageInfo() const; - -private: - friend class QuotaContextFactory; - friend struct ext::shared_ptr_helper; - - /// Instances of this class are created by QuotaContextFactory. - QuotaContext(const String & user_name_, const UUID & user_id_, const std::vector & enabled_roles_, const Poco::Net::IPAddress & address_, const String & client_key_); - - static constexpr size_t MAX_RESOURCE_TYPE = Quota::MAX_RESOURCE_TYPE; - - struct Interval - { - mutable std::atomic used[MAX_RESOURCE_TYPE]; - ResourceAmount max[MAX_RESOURCE_TYPE]; - std::chrono::seconds duration; - bool randomize_interval; - mutable std::atomic end_of_interval; - - Interval() {} - Interval(const Interval & src) { *this = src; } - Interval & operator =(const Interval & src); - }; - - struct Intervals - { - std::vector intervals; - UUID quota_id; - String quota_name; - String quota_key; - - QuotaUsageInfo getUsageInfo(std::chrono::system_clock::time_point current_time) const; - }; - - struct Impl; - - const String user_name; - const UUID user_id; - const std::vector enabled_roles; - const Poco::Net::IPAddress address; - const String client_key; - boost::atomic_shared_ptr intervals; /// atomically changed by QuotaUsageManager -}; - -using QuotaContextPtr = std::shared_ptr; - - -/// The information about a quota context. -struct QuotaUsageInfo -{ - using ResourceType = Quota::ResourceType; - using ResourceAmount = Quota::ResourceAmount; - static constexpr size_t MAX_RESOURCE_TYPE = Quota::MAX_RESOURCE_TYPE; - - struct Interval - { - ResourceAmount used[MAX_RESOURCE_TYPE]; - ResourceAmount max[MAX_RESOURCE_TYPE]; - std::chrono::seconds duration = std::chrono::seconds::zero(); - bool randomize_interval = false; - std::chrono::system_clock::time_point end_of_interval; - Interval(); - }; - - std::vector intervals; - UUID quota_id; - String quota_name; - String quota_key; - QuotaUsageInfo(); -}; -} diff --git a/dbms/src/Access/QuotaContextFactory.cpp b/dbms/src/Access/QuotaContextFactory.cpp deleted file mode 100644 index f986ee86c01..00000000000 --- a/dbms/src/Access/QuotaContextFactory.cpp +++ /dev/null @@ -1,290 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ -namespace ErrorCodes -{ - extern const int QUOTA_REQUIRES_CLIENT_KEY; -} - - -namespace -{ - std::chrono::system_clock::duration randomDuration(std::chrono::seconds max) - { - auto count = std::chrono::duration_cast(max).count(); - std::uniform_int_distribution distribution{0, count - 1}; - return std::chrono::system_clock::duration(distribution(thread_local_rng)); - } -} - - -void QuotaContextFactory::QuotaInfo::setQuota(const QuotaPtr & quota_, const UUID & quota_id_) -{ - quota = quota_; - quota_id = quota_id_; - roles = "a->roles; - rebuildAllIntervals(); -} - - -bool QuotaContextFactory::QuotaInfo::canUseWithContext(const QuotaContext & context) const -{ - return roles->match(context.user_id, context.enabled_roles); -} - - -String QuotaContextFactory::QuotaInfo::calculateKey(const QuotaContext & context) const -{ - using KeyType = Quota::KeyType; - switch (quota->key_type) - { - case KeyType::NONE: - return ""; - case KeyType::USER_NAME: - return context.user_name; - case KeyType::IP_ADDRESS: - return context.address.toString(); - case KeyType::CLIENT_KEY: - { - if (!context.client_key.empty()) - return context.client_key; - throw Exception( - "Quota " + quota->getName() + " (for user " + context.user_name + ") requires a client supplied key.", - ErrorCodes::QUOTA_REQUIRES_CLIENT_KEY); - } - case KeyType::CLIENT_KEY_OR_USER_NAME: - { - if (!context.client_key.empty()) - return context.client_key; - return context.user_name; - } - case KeyType::CLIENT_KEY_OR_IP_ADDRESS: - { - if (!context.client_key.empty()) - return context.client_key; - return context.address.toString(); - } - } - __builtin_unreachable(); -} - - -boost::shared_ptr QuotaContextFactory::QuotaInfo::getOrBuildIntervals(const String & key) -{ - auto it = key_to_intervals.find(key); - if (it != key_to_intervals.end()) - return it->second; - return rebuildIntervals(key); -} - - -void QuotaContextFactory::QuotaInfo::rebuildAllIntervals() -{ - for (const String & key : key_to_intervals | boost::adaptors::map_keys) - rebuildIntervals(key); -} - - -boost::shared_ptr QuotaContextFactory::QuotaInfo::rebuildIntervals(const String & key) -{ - auto new_intervals = boost::make_shared(); - new_intervals->quota_name = quota->getName(); - new_intervals->quota_id = quota_id; - new_intervals->quota_key = key; - auto & intervals = new_intervals->intervals; - intervals.reserve(quota->all_limits.size()); - constexpr size_t MAX_RESOURCE_TYPE = Quota::MAX_RESOURCE_TYPE; - for (const auto & limits : quota->all_limits) - { - intervals.emplace_back(); - auto & interval = intervals.back(); - interval.duration = limits.duration; - std::chrono::system_clock::time_point end_of_interval{}; - interval.randomize_interval = limits.randomize_interval; - if (limits.randomize_interval) - end_of_interval += randomDuration(limits.duration); - interval.end_of_interval = end_of_interval.time_since_epoch(); - for (auto resource_type : ext::range(MAX_RESOURCE_TYPE)) - { - interval.max[resource_type] = limits.max[resource_type]; - interval.used[resource_type] = 0; - } - } - - /// Order intervals by durations from largest to smallest. - /// To report first about largest interval on what quota was exceeded. - struct GreaterByDuration - { - bool operator()(const Interval & lhs, const Interval & rhs) const { return lhs.duration > rhs.duration; } - }; - boost::range::stable_sort(intervals, GreaterByDuration{}); - - auto it = key_to_intervals.find(key); - if (it == key_to_intervals.end()) - { - /// Just put new intervals into the map. - key_to_intervals.try_emplace(key, new_intervals); - } - else - { - /// We need to keep usage information from the old intervals. - const auto & old_intervals = it->second->intervals; - for (auto & new_interval : new_intervals->intervals) - { - /// Check if an interval with the same duration is already in use. - auto lower_bound = boost::range::lower_bound(old_intervals, new_interval, GreaterByDuration{}); - if ((lower_bound == old_intervals.end()) || (lower_bound->duration != new_interval.duration)) - continue; - - /// Found an interval with the same duration, we need to copy its usage information to `result`. - auto & current_interval = *lower_bound; - for (auto resource_type : ext::range(MAX_RESOURCE_TYPE)) - { - new_interval.used[resource_type].store(current_interval.used[resource_type].load()); - new_interval.end_of_interval.store(current_interval.end_of_interval.load()); - } - } - it->second = new_intervals; - } - - return new_intervals; -} - - -QuotaContextFactory::QuotaContextFactory(const AccessControlManager & access_control_manager_) - : access_control_manager(access_control_manager_) -{ -} - - -QuotaContextFactory::~QuotaContextFactory() -{ -} - - -QuotaContextPtr QuotaContextFactory::createContext(const String & user_name, const UUID & user_id, const std::vector & enabled_roles, const Poco::Net::IPAddress & address, const String & client_key) -{ - std::lock_guard lock{mutex}; - ensureAllQuotasRead(); - auto context = ext::shared_ptr_helper::create(user_name, user_id, enabled_roles, address, client_key); - contexts.push_back(context); - chooseQuotaForContext(context); - return context; -} - - -void QuotaContextFactory::ensureAllQuotasRead() -{ - /// `mutex` is already locked. - if (all_quotas_read) - return; - all_quotas_read = true; - - subscription = access_control_manager.subscribeForChanges( - [&](const UUID & id, const AccessEntityPtr & entity) - { - if (entity) - quotaAddedOrChanged(id, typeid_cast(entity)); - else - quotaRemoved(id); - }); - - for (const UUID & quota_id : access_control_manager.findAll()) - { - auto quota = access_control_manager.tryRead(quota_id); - if (quota) - all_quotas.emplace(quota_id, QuotaInfo(quota, quota_id)); - } -} - - -void QuotaContextFactory::quotaAddedOrChanged(const UUID & quota_id, const std::shared_ptr & new_quota) -{ - std::lock_guard lock{mutex}; - auto it = all_quotas.find(quota_id); - if (it == all_quotas.end()) - { - it = all_quotas.emplace(quota_id, QuotaInfo(new_quota, quota_id)).first; - } - else - { - if (it->second.quota == new_quota) - return; - } - - auto & info = it->second; - info.setQuota(new_quota, quota_id); - chooseQuotaForAllContexts(); -} - - -void QuotaContextFactory::quotaRemoved(const UUID & quota_id) -{ - std::lock_guard lock{mutex}; - all_quotas.erase(quota_id); - chooseQuotaForAllContexts(); -} - - -void QuotaContextFactory::chooseQuotaForAllContexts() -{ - /// `mutex` is already locked. - boost::range::remove_erase_if( - contexts, - [&](const std::weak_ptr & weak) - { - auto context = weak.lock(); - if (!context) - return true; // remove from the `contexts` list. - chooseQuotaForContext(context); - return false; // keep in the `contexts` list. - }); -} - -void QuotaContextFactory::chooseQuotaForContext(const std::shared_ptr & context) -{ - /// `mutex` is already locked. - boost::shared_ptr intervals; - for (auto & info : all_quotas | boost::adaptors::map_values) - { - if (info.canUseWithContext(*context)) - { - String key = info.calculateKey(*context); - intervals = info.getOrBuildIntervals(key); - break; - } - } - - if (!intervals) - intervals = boost::make_shared(); /// No quota == no limits. - - context->intervals.store(intervals); -} - - -std::vector QuotaContextFactory::getUsageInfo() const -{ - std::lock_guard lock{mutex}; - std::vector all_infos; - auto current_time = std::chrono::system_clock::now(); - for (const auto & info : all_quotas | boost::adaptors::map_values) - { - for (const auto & intervals : info.key_to_intervals | boost::adaptors::map_values) - all_infos.push_back(intervals->getUsageInfo(current_time)); - } - return all_infos; -} -} diff --git a/dbms/src/Access/QuotaContextFactory.h b/dbms/src/Access/QuotaContextFactory.h deleted file mode 100644 index c130da4f2cd..00000000000 --- a/dbms/src/Access/QuotaContextFactory.h +++ /dev/null @@ -1,60 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include - - -namespace DB -{ -class AccessControlManager; - - -/// Stores information how much amount of resources have been consumed and how much are left. -class QuotaContextFactory -{ -public: - QuotaContextFactory(const AccessControlManager & access_control_manager_); - ~QuotaContextFactory(); - - QuotaContextPtr createContext(const String & user_name, const UUID & user_id, const std::vector & enabled_roles, const Poco::Net::IPAddress & address, const String & client_key); - std::vector getUsageInfo() const; - -private: - using Interval = QuotaContext::Interval; - using Intervals = QuotaContext::Intervals; - - struct QuotaInfo - { - QuotaInfo(const QuotaPtr & quota_, const UUID & quota_id_) { setQuota(quota_, quota_id_); } - void setQuota(const QuotaPtr & quota_, const UUID & quota_id_); - - bool canUseWithContext(const QuotaContext & context) const; - String calculateKey(const QuotaContext & context) const; - boost::shared_ptr getOrBuildIntervals(const String & key); - boost::shared_ptr rebuildIntervals(const String & key); - void rebuildAllIntervals(); - - QuotaPtr quota; - UUID quota_id; - const GenericRoleSet * roles = nullptr; - std::unordered_map> key_to_intervals; - }; - - void ensureAllQuotasRead(); - void quotaAddedOrChanged(const UUID & quota_id, const std::shared_ptr & new_quota); - void quotaRemoved(const UUID & quota_id); - void chooseQuotaForAllContexts(); - void chooseQuotaForContext(const std::shared_ptr & context); - - const AccessControlManager & access_control_manager; - mutable std::mutex mutex; - std::unordered_map all_quotas; - bool all_quotas_read = false; - ext::scope_guard subscription; - std::vector> contexts; -}; -} diff --git a/dbms/src/Access/Role.cpp b/dbms/src/Access/Role.cpp deleted file mode 100644 index 7b1a395feec..00000000000 --- a/dbms/src/Access/Role.cpp +++ /dev/null @@ -1,16 +0,0 @@ -#include - - -namespace DB -{ - -bool Role::equal(const IAccessEntity & other) const -{ - if (!IAccessEntity::equal(other)) - return false; - const auto & other_role = typeid_cast(other); - return (access == other_role.access) && (access_with_grant_option == other_role.access_with_grant_option) - && (granted_roles == other_role.granted_roles) && (granted_roles_with_admin_option == other_role.granted_roles_with_admin_option); -} - -} diff --git a/dbms/src/Access/Role.h b/dbms/src/Access/Role.h deleted file mode 100644 index eaeb8debd3a..00000000000 --- a/dbms/src/Access/Role.h +++ /dev/null @@ -1,24 +0,0 @@ -#pragma once - -#include -#include -#include -#include - - -namespace DB -{ - -struct Role : public IAccessEntity -{ - AccessRights access; - AccessRights access_with_grant_option; - boost::container::flat_set granted_roles; - boost::container::flat_set granted_roles_with_admin_option; - - bool equal(const IAccessEntity & other) const override; - std::shared_ptr clone() const override { return cloneImpl(); } -}; - -using RolePtr = std::shared_ptr; -} diff --git a/dbms/src/Access/RoleContext.cpp b/dbms/src/Access/RoleContext.cpp deleted file mode 100644 index 291b44027d4..00000000000 --- a/dbms/src/Access/RoleContext.cpp +++ /dev/null @@ -1,200 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ -namespace -{ - void makeUnique(std::vector & vec) - { - boost::range::sort(vec); - vec.erase(std::unique(vec.begin(), vec.end()), vec.end()); - } -} - - -RoleContext::RoleContext(const AccessControlManager & manager_, const UUID & current_role_, bool with_admin_option_) - : manager(&manager_), current_role(current_role_), with_admin_option(with_admin_option_) -{ - update(); -} - - -RoleContext::RoleContext(std::vector && children_) - : children(std::move(children_)) -{ - update(); -} - - -RoleContext::~RoleContext() = default; - - -void RoleContext::update() -{ - std::vector handlers_to_notify; - CurrentRolesInfoPtr info_to_notify; - - { - std::lock_guard lock{mutex}; - auto old_info = info; - - updateImpl(); - - if (!handlers.empty() && (!old_info || (*old_info != *info))) - { - boost::range::copy(handlers, std::back_inserter(handlers_to_notify)); - info_to_notify = info; - } - } - - for (const auto & handler : handlers_to_notify) - handler(info_to_notify); -} - - -void RoleContext::updateImpl() -{ - if (!current_role && children.empty()) - { - info = std::make_shared(); - return; - } - - if (!children.empty()) - { - if (subscriptions_for_change_children.empty()) - { - for (const auto & child : children) - subscriptions_for_change_children.emplace_back( - child->subscribeForChanges([this](const CurrentRolesInfoPtr &) { update(); })); - } - - auto new_info = std::make_shared(); - auto & new_info_ref = *new_info; - - for (const auto & child : children) - { - auto child_info = child->getInfo(); - new_info_ref.access.merge(child_info->access); - new_info_ref.access_with_grant_option.merge(child_info->access_with_grant_option); - boost::range::copy(child_info->current_roles, std::back_inserter(new_info_ref.current_roles)); - boost::range::copy(child_info->enabled_roles, std::back_inserter(new_info_ref.enabled_roles)); - boost::range::copy(child_info->enabled_roles_with_admin_option, std::back_inserter(new_info_ref.enabled_roles_with_admin_option)); - boost::range::copy(child_info->names_of_roles, std::inserter(new_info_ref.names_of_roles, new_info_ref.names_of_roles.end())); - } - makeUnique(new_info_ref.current_roles); - makeUnique(new_info_ref.enabled_roles); - makeUnique(new_info_ref.enabled_roles_with_admin_option); - info = new_info; - return; - } - - assert(current_role); - traverseRoles(*current_role, with_admin_option); - - auto new_info = std::make_shared(); - auto & new_info_ref = *new_info; - - for (auto it = roles_map.begin(); it != roles_map.end();) - { - const auto & id = it->first; - auto & entry = it->second; - if (!entry.in_use) - { - it = roles_map.erase(it); - continue; - } - - if (id == *current_role) - new_info_ref.current_roles.push_back(id); - - new_info_ref.enabled_roles.push_back(id); - - if (entry.with_admin_option) - new_info_ref.enabled_roles_with_admin_option.push_back(id); - - new_info_ref.access.merge(entry.role->access); - new_info_ref.access_with_grant_option.merge(entry.role->access_with_grant_option); - new_info_ref.names_of_roles[id] = entry.role->getName(); - - entry.in_use = false; - entry.with_admin_option = false; - ++it; - } - - info = new_info; -} - - -void RoleContext::traverseRoles(const UUID & id_, bool with_admin_option_) -{ - auto it = roles_map.find(id_); - if (it == roles_map.end()) - { - assert(manager); - auto subscription = manager->subscribeForChanges(id_, [this, id_](const UUID &, const AccessEntityPtr & entity) - { - { - std::lock_guard lock{mutex}; - auto it2 = roles_map.find(id_); - if (it2 == roles_map.end()) - return; - if (entity) - it2->second.role = typeid_cast(entity); - else - roles_map.erase(it2); - } - update(); - }); - - auto role = manager->tryRead(id_); - if (!role) - return; - - RoleEntry new_entry; - new_entry.role = role; - new_entry.subscription_for_change_role = std::move(subscription); - it = roles_map.emplace(id_, std::move(new_entry)).first; - } - - RoleEntry & entry = it->second; - entry.with_admin_option |= with_admin_option_; - if (entry.in_use) - return; - - entry.in_use = true; - for (const auto & granted_role : entry.role->granted_roles) - traverseRoles(granted_role, false); - - for (const auto & granted_role : entry.role->granted_roles_with_admin_option) - traverseRoles(granted_role, true); -} - - -CurrentRolesInfoPtr RoleContext::getInfo() const -{ - std::lock_guard lock{mutex}; - return info; -} - - -ext::scope_guard RoleContext::subscribeForChanges(const OnChangeHandler & handler) const -{ - std::lock_guard lock{mutex}; - handlers.push_back(handler); - auto it = std::prev(handlers.end()); - - return [this, it] - { - std::lock_guard lock2{mutex}; - handlers.erase(it); - }; -} -} diff --git a/dbms/src/Access/RoleContext.h b/dbms/src/Access/RoleContext.h deleted file mode 100644 index 5f19adc56de..00000000000 --- a/dbms/src/Access/RoleContext.h +++ /dev/null @@ -1,64 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ -struct Role; -using RolePtr = std::shared_ptr; -struct CurrentRolesInfo; -using CurrentRolesInfoPtr = std::shared_ptr; -class AccessControlManager; - - -class RoleContext -{ -public: - ~RoleContext(); - - /// Returns all the roles specified in the constructor. - CurrentRolesInfoPtr getInfo() const; - - using OnChangeHandler = std::function; - - /// Called when either the specified roles or the roles granted to the specified roles are changed. - ext::scope_guard subscribeForChanges(const OnChangeHandler & handler) const; - -private: - friend struct ext::shared_ptr_helper; - RoleContext(const AccessControlManager & manager_, const UUID & current_role_, bool with_admin_option_); - RoleContext(std::vector> && children_); - - void update(); - void updateImpl(); - - void traverseRoles(const UUID & id_, bool with_admin_option_); - - const AccessControlManager * manager = nullptr; - std::optional current_role; - bool with_admin_option = false; - std::vector> children; - std::vector subscriptions_for_change_children; - - struct RoleEntry - { - RolePtr role; - ext::scope_guard subscription_for_change_role; - bool with_admin_option = false; - bool in_use = false; - }; - mutable std::unordered_map roles_map; - mutable CurrentRolesInfoPtr info; - mutable std::list handlers; - mutable std::mutex mutex; -}; - -using RoleContextPtr = std::shared_ptr; -} diff --git a/dbms/src/Access/RoleContextFactory.cpp b/dbms/src/Access/RoleContextFactory.cpp deleted file mode 100644 index 3356bc238db..00000000000 --- a/dbms/src/Access/RoleContextFactory.cpp +++ /dev/null @@ -1,52 +0,0 @@ -#include -#include - - -namespace DB -{ - -RoleContextFactory::RoleContextFactory(const AccessControlManager & manager_) - : manager(manager_), cache(600000 /* 10 minutes */) {} - - -RoleContextFactory::~RoleContextFactory() = default; - - -RoleContextPtr RoleContextFactory::createContext( - const std::vector & roles, const std::vector & roles_with_admin_option) -{ - if (roles.size() == 1 && roles_with_admin_option.empty()) - return createContextImpl(roles[0], false); - - if (roles.size() == 1 && roles_with_admin_option == roles) - return createContextImpl(roles[0], true); - - std::vector children; - children.reserve(roles.size()); - for (const auto & role : roles_with_admin_option) - children.push_back(createContextImpl(role, true)); - - boost::container::flat_set roles_with_admin_option_set{roles_with_admin_option.begin(), roles_with_admin_option.end()}; - for (const auto & role : roles) - { - if (!roles_with_admin_option_set.contains(role)) - children.push_back(createContextImpl(role, false)); - } - - return ext::shared_ptr_helper::create(std::move(children)); -} - - -RoleContextPtr RoleContextFactory::createContextImpl(const UUID & id, bool with_admin_option) -{ - std::lock_guard lock{mutex}; - auto key = std::make_pair(id, with_admin_option); - auto x = cache.get(key); - if (x) - return *x; - auto res = ext::shared_ptr_helper::create(manager, id, with_admin_option); - cache.add(key, res); - return res; -} - -} diff --git a/dbms/src/Access/RoleContextFactory.h b/dbms/src/Access/RoleContextFactory.h deleted file mode 100644 index 659c9a218a1..00000000000 --- a/dbms/src/Access/RoleContextFactory.h +++ /dev/null @@ -1,29 +0,0 @@ -#pragma once - -#include -#include -#include - - -namespace DB -{ -class AccessControlManager; - - -class RoleContextFactory -{ -public: - RoleContextFactory(const AccessControlManager & manager_); - ~RoleContextFactory(); - - RoleContextPtr createContext(const std::vector & roles, const std::vector & roles_with_admin_option); - -private: - RoleContextPtr createContextImpl(const UUID & id, bool with_admin_option); - - const AccessControlManager & manager; - Poco::ExpireCache, RoleContextPtr> cache; - std::mutex mutex; -}; - -} diff --git a/dbms/src/Access/RowPolicy.cpp b/dbms/src/Access/RowPolicy.cpp deleted file mode 100644 index d5a28d14bb8..00000000000 --- a/dbms/src/Access/RowPolicy.cpp +++ /dev/null @@ -1,111 +0,0 @@ -#include -#include -#include -#include - - -namespace DB -{ -namespace -{ - void generateFullNameImpl(const String & database_, const String & table_name_, const String & policy_name_, String & full_name_) - { - full_name_.clear(); - full_name_.reserve(database_.length() + table_name_.length() + policy_name_.length() + 6); - full_name_ += backQuoteIfNeed(policy_name_); - full_name_ += " ON "; - if (!database_.empty()) - { - full_name_ += backQuoteIfNeed(database_); - full_name_ += '.'; - } - full_name_ += backQuoteIfNeed(table_name_); - } -} - - -String RowPolicy::FullNameParts::getFullName() const -{ - String full_name; - generateFullNameImpl(database, table_name, policy_name, full_name); - return full_name; -} - - -String RowPolicy::FullNameParts::getFullName(const Context & context) const -{ - String full_name; - generateFullNameImpl(database.empty() ? context.getCurrentDatabase() : database, table_name, policy_name, full_name); - return full_name; -} - - -void RowPolicy::setDatabase(const String & database_) -{ - database = database_; - generateFullNameImpl(database, table_name, policy_name, full_name); -} - - -void RowPolicy::setTableName(const String & table_name_) -{ - table_name = table_name_; - generateFullNameImpl(database, table_name, policy_name, full_name); -} - - -void RowPolicy::setName(const String & policy_name_) -{ - policy_name = policy_name_; - generateFullNameImpl(database, table_name, policy_name, full_name); -} - - -void RowPolicy::setFullName(const String & database_, const String & table_name_, const String & policy_name_) -{ - database = database_; - table_name = table_name_; - policy_name = policy_name_; - generateFullNameImpl(database, table_name, policy_name, full_name); -} - - -bool RowPolicy::equal(const IAccessEntity & other) const -{ - if (!IAccessEntity::equal(other)) - return false; - const auto & other_policy = typeid_cast(other); - return (database == other_policy.database) && (table_name == other_policy.table_name) && (policy_name == other_policy.policy_name) - && boost::range::equal(conditions, other_policy.conditions) && restrictive == other_policy.restrictive - && (roles == other_policy.roles); -} - - -const char * RowPolicy::conditionIndexToString(ConditionIndex index) -{ - switch (index) - { - case SELECT_FILTER: return "SELECT_FILTER"; - case INSERT_CHECK: return "INSERT_CHECK"; - case UPDATE_FILTER: return "UPDATE_FILTER"; - case UPDATE_CHECK: return "UPDATE_CHECK"; - case DELETE_FILTER: return "DELETE_FILTER"; - } - __builtin_unreachable(); -} - - -const char * RowPolicy::conditionIndexToColumnName(ConditionIndex index) -{ - switch (index) - { - case SELECT_FILTER: return "select_filter"; - case INSERT_CHECK: return "insert_check"; - case UPDATE_FILTER: return "update_filter"; - case UPDATE_CHECK: return "update_check"; - case DELETE_FILTER: return "delete_filter"; - } - __builtin_unreachable(); -} - -} diff --git a/dbms/src/Access/RowPolicy.h b/dbms/src/Access/RowPolicy.h deleted file mode 100644 index 6bc51a2481c..00000000000 --- a/dbms/src/Access/RowPolicy.h +++ /dev/null @@ -1,80 +0,0 @@ -#pragma once - -#include -#include - - -namespace DB -{ -class Context; - - -/** Represents a row level security policy for a table. - */ -struct RowPolicy : public IAccessEntity -{ - void setDatabase(const String & database_); - void setTableName(const String & table_name_); - void setName(const String & policy_name_) override; - void setFullName(const String & database_, const String & table_name_, const String & policy_name_); - - String getDatabase() const { return database; } - String getTableName() const { return table_name; } - String getName() const override { return policy_name; } - - struct FullNameParts - { - String database; - String table_name; - String policy_name; - String getFullName() const; - String getFullName(const Context & context) const; - }; - - /// Filter is a SQL conditional expression used to figure out which rows should be visible - /// for user or available for modification. If the expression returns NULL or false for some rows - /// those rows are silently suppressed. - /// Check is a SQL condition expression used to check whether a row can be written into - /// the table. If the expression returns NULL or false an exception is thrown. - /// If a conditional expression here is empty it means no filtering is applied. - enum ConditionIndex - { - SELECT_FILTER, - INSERT_CHECK, - UPDATE_FILTER, - UPDATE_CHECK, - DELETE_FILTER, - }; - static constexpr size_t MAX_CONDITION_INDEX = 5; - static const char * conditionIndexToString(ConditionIndex index); - static const char * conditionIndexToColumnName(ConditionIndex index); - - String conditions[MAX_CONDITION_INDEX]; - - /// Sets that the policy is permissive. - /// A row is only accessible if at least one of the permissive policies passes, - /// in addition to all the restrictive policies. - void setPermissive(bool permissive_ = true) { setRestrictive(!permissive_); } - bool isPermissive() const { return !isRestrictive(); } - - /// Sets that the policy is restrictive. - /// A row is only accessible if at least one of the permissive policies passes, - /// in addition to all the restrictive policies. - void setRestrictive(bool restrictive_ = true) { restrictive = restrictive_; } - bool isRestrictive() const { return restrictive; } - - bool equal(const IAccessEntity & other) const override; - std::shared_ptr clone() const override { return cloneImpl(); } - - /// Which roles or users should use this row policy. - GenericRoleSet roles; - -private: - String database; - String table_name; - String policy_name; - bool restrictive = false; -}; - -using RowPolicyPtr = std::shared_ptr; -} diff --git a/dbms/src/Access/RowPolicyContext.cpp b/dbms/src/Access/RowPolicyContext.cpp deleted file mode 100644 index 661a6cb4b5f..00000000000 --- a/dbms/src/Access/RowPolicyContext.cpp +++ /dev/null @@ -1,79 +0,0 @@ -#include -#include -#include -#include -#include -#include - - -namespace DB -{ -size_t RowPolicyContext::Hash::operator()(const DatabaseAndTableNameRef & database_and_table_name) const -{ - return std::hash{}(database_and_table_name.first) - std::hash{}(database_and_table_name.second); -} - - -RowPolicyContext::RowPolicyContext() - : map_of_mixed_conditions(boost::make_shared()) -{ -} - - -RowPolicyContext::~RowPolicyContext() = default; - - -RowPolicyContext::RowPolicyContext(const UUID & user_id_, const std::vector & enabled_roles_) - : user_id(user_id_), enabled_roles(enabled_roles_) -{} - - -ASTPtr RowPolicyContext::getCondition(const String & database, const String & table_name, ConditionIndex index) const -{ - /// We don't lock `mutex` here. - auto loaded = map_of_mixed_conditions.load(); - auto it = loaded->find({database, table_name}); - if (it == loaded->end()) - return {}; - return it->second.mixed_conditions[index]; -} - - -ASTPtr RowPolicyContext::combineConditionsUsingAnd(const ASTPtr & lhs, const ASTPtr & rhs) -{ - if (!lhs) - return rhs; - if (!rhs) - return lhs; - auto function = std::make_shared(); - auto exp_list = std::make_shared(); - function->name = "and"; - function->arguments = exp_list; - function->children.push_back(exp_list); - exp_list->children.push_back(lhs); - exp_list->children.push_back(rhs); - return function; -} - - -std::vector RowPolicyContext::getCurrentPolicyIDs() const -{ - /// We don't lock `mutex` here. - auto loaded = map_of_mixed_conditions.load(); - std::vector policy_ids; - for (const auto & mixed_conditions : *loaded | boost::adaptors::map_values) - boost::range::copy(mixed_conditions.policy_ids, std::back_inserter(policy_ids)); - return policy_ids; -} - - -std::vector RowPolicyContext::getCurrentPolicyIDs(const String & database, const String & table_name) const -{ - /// We don't lock `mutex` here. - auto loaded = map_of_mixed_conditions.load(); - auto it = loaded->find({database, table_name}); - if (it == loaded->end()) - return {}; - return it->second.policy_ids; -} -} diff --git a/dbms/src/Access/RowPolicyContext.h b/dbms/src/Access/RowPolicyContext.h deleted file mode 100644 index 2042b85bf7a..00000000000 --- a/dbms/src/Access/RowPolicyContext.h +++ /dev/null @@ -1,70 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include - - -namespace DB -{ -class IAST; -using ASTPtr = std::shared_ptr; - - -/// Provides fast access to row policies' conditions for a specific user and tables. -class RowPolicyContext -{ -public: - /// Default constructor makes a row policy usage context which restricts nothing. - RowPolicyContext(); - - ~RowPolicyContext(); - - using ConditionIndex = RowPolicy::ConditionIndex; - - /// Returns prepared filter for a specific table and operations. - /// The function can return nullptr, that means there is no filters applied. - /// The returned filter can be a combination of the filters defined by multiple row policies. - ASTPtr getCondition(const String & database, const String & table_name, ConditionIndex index) const; - - /// Combines two conditions into one by using the logical AND operator. - static ASTPtr combineConditionsUsingAnd(const ASTPtr & lhs, const ASTPtr & rhs); - - /// Returns IDs of all the policies used by the current user. - std::vector getCurrentPolicyIDs() const; - - /// Returns IDs of the policies used by a concrete table. - std::vector getCurrentPolicyIDs(const String & database, const String & table_name) const; - -private: - friend class RowPolicyContextFactory; - friend struct ext::shared_ptr_helper; - RowPolicyContext(const UUID & user_id_, const std::vector & enabled_roles_); /// RowPolicyContext should be created by RowPolicyContextFactory. - - using DatabaseAndTableName = std::pair; - using DatabaseAndTableNameRef = std::pair; - struct Hash - { - size_t operator()(const DatabaseAndTableNameRef & database_and_table_name) const; - }; - static constexpr size_t MAX_CONDITION_INDEX = RowPolicy::MAX_CONDITION_INDEX; - using ParsedConditions = std::array; - struct MixedConditions - { - std::unique_ptr database_and_table_name_keeper; - ParsedConditions mixed_conditions; - std::vector policy_ids; - }; - using MapOfMixedConditions = std::unordered_map; - - const UUID user_id; - const std::vector enabled_roles; - mutable boost::atomic_shared_ptr map_of_mixed_conditions; -}; - - -using RowPolicyContextPtr = std::shared_ptr; -} diff --git a/dbms/src/Access/RowPolicyContextFactory.cpp b/dbms/src/Access/RowPolicyContextFactory.cpp deleted file mode 100644 index 49a23c4d61a..00000000000 --- a/dbms/src/Access/RowPolicyContextFactory.cpp +++ /dev/null @@ -1,304 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ -namespace -{ - bool tryGetLiteralBool(const IAST & ast, bool & value) - { - try - { - if (const ASTLiteral * literal = ast.as()) - { - value = !literal->value.isNull() && applyVisitor(FieldVisitorConvertToNumber(), literal->value); - return true; - } - return false; - } - catch (...) - { - return false; - } - } - - ASTPtr applyFunctionAND(ASTs arguments) - { - bool const_arguments = true; - boost::range::remove_erase_if(arguments, [&](const ASTPtr & argument) -> bool - { - bool b; - if (!tryGetLiteralBool(*argument, b)) - return false; - const_arguments &= b; - return true; - }); - - if (!const_arguments) - return std::make_shared(Field{UInt8(0)}); - if (arguments.empty()) - return std::make_shared(Field{UInt8(1)}); - if (arguments.size() == 1) - return arguments[0]; - - auto function = std::make_shared(); - auto exp_list = std::make_shared(); - function->name = "and"; - function->arguments = exp_list; - function->children.push_back(exp_list); - exp_list->children = std::move(arguments); - return function; - } - - - ASTPtr applyFunctionOR(ASTs arguments) - { - bool const_arguments = false; - boost::range::remove_erase_if(arguments, [&](const ASTPtr & argument) -> bool - { - bool b; - if (!tryGetLiteralBool(*argument, b)) - return false; - const_arguments |= b; - return true; - }); - - if (const_arguments) - return std::make_shared(Field{UInt8(1)}); - if (arguments.empty()) - return std::make_shared(Field{UInt8(0)}); - if (arguments.size() == 1) - return arguments[0]; - - auto function = std::make_shared(); - auto exp_list = std::make_shared(); - function->name = "or"; - function->arguments = exp_list; - function->children.push_back(exp_list); - exp_list->children = std::move(arguments); - return function; - } - - - using ConditionIndex = RowPolicy::ConditionIndex; - static constexpr size_t MAX_CONDITION_INDEX = RowPolicy::MAX_CONDITION_INDEX; - - - /// Accumulates conditions from multiple row policies and joins them using the AND logical operation. - class ConditionsMixer - { - public: - void add(const ASTPtr & condition, bool is_restrictive) - { - if (is_restrictive) - restrictions.push_back(condition); - else - permissions.push_back(condition); - } - - ASTPtr getResult() && - { - /// Process permissive conditions. - restrictions.push_back(applyFunctionOR(std::move(permissions))); - - /// Process restrictive conditions. - return applyFunctionAND(std::move(restrictions)); - } - - private: - ASTs permissions; - ASTs restrictions; - }; -} - - -void RowPolicyContextFactory::PolicyInfo::setPolicy(const RowPolicyPtr & policy_) -{ - policy = policy_; - roles = &policy->roles; - - for (auto index : ext::range_with_static_cast(0, MAX_CONDITION_INDEX)) - { - parsed_conditions[index] = nullptr; - const String & condition = policy->conditions[index]; - if (condition.empty()) - continue; - - auto previous_range = std::pair(std::begin(policy->conditions), std::begin(policy->conditions) + index); - auto previous_it = std::find(previous_range.first, previous_range.second, condition); - if (previous_it != previous_range.second) - { - /// The condition is already parsed before. - parsed_conditions[index] = parsed_conditions[previous_it - previous_range.first]; - continue; - } - - /// Try to parse the condition. - try - { - ParserExpression parser; - parsed_conditions[index] = parseQuery(parser, condition, 0); - } - catch (...) - { - tryLogCurrentException( - &Poco::Logger::get("RowPolicy"), - String("Could not parse the condition ") + RowPolicy::conditionIndexToString(index) + " of row policy " - + backQuote(policy->getFullName())); - } - } -} - - -bool RowPolicyContextFactory::PolicyInfo::canUseWithContext(const RowPolicyContext & context) const -{ - return roles->match(context.user_id, context.enabled_roles); -} - - -RowPolicyContextFactory::RowPolicyContextFactory(const AccessControlManager & access_control_manager_) - : access_control_manager(access_control_manager_) -{ -} - -RowPolicyContextFactory::~RowPolicyContextFactory() = default; - - -RowPolicyContextPtr RowPolicyContextFactory::createContext(const UUID & user_id, const std::vector & enabled_roles) -{ - std::lock_guard lock{mutex}; - ensureAllRowPoliciesRead(); - auto context = ext::shared_ptr_helper::create(user_id, enabled_roles); - contexts.push_back(context); - mixConditionsForContext(*context); - return context; -} - - -void RowPolicyContextFactory::ensureAllRowPoliciesRead() -{ - /// `mutex` is already locked. - if (all_policies_read) - return; - all_policies_read = true; - - subscription = access_control_manager.subscribeForChanges( - [&](const UUID & id, const AccessEntityPtr & entity) - { - if (entity) - rowPolicyAddedOrChanged(id, typeid_cast(entity)); - else - rowPolicyRemoved(id); - }); - - for (const UUID & id : access_control_manager.findAll()) - { - auto quota = access_control_manager.tryRead(id); - if (quota) - all_policies.emplace(id, PolicyInfo(quota)); - } -} - - -void RowPolicyContextFactory::rowPolicyAddedOrChanged(const UUID & policy_id, const RowPolicyPtr & new_policy) -{ - std::lock_guard lock{mutex}; - auto it = all_policies.find(policy_id); - if (it == all_policies.end()) - { - it = all_policies.emplace(policy_id, PolicyInfo(new_policy)).first; - } - else - { - if (it->second.policy == new_policy) - return; - } - - auto & info = it->second; - info.setPolicy(new_policy); - mixConditionsForAllContexts(); -} - - -void RowPolicyContextFactory::rowPolicyRemoved(const UUID & policy_id) -{ - std::lock_guard lock{mutex}; - all_policies.erase(policy_id); - mixConditionsForAllContexts(); -} - - -void RowPolicyContextFactory::mixConditionsForAllContexts() -{ - /// `mutex` is already locked. - boost::range::remove_erase_if( - contexts, - [&](const std::weak_ptr & weak) - { - auto context = weak.lock(); - if (!context) - return true; // remove from the `contexts` list. - mixConditionsForContext(*context); - return false; // keep in the `contexts` list. - }); -} - - -void RowPolicyContextFactory::mixConditionsForContext(RowPolicyContext & context) -{ - /// `mutex` is already locked. - struct Mixers - { - ConditionsMixer mixers[MAX_CONDITION_INDEX]; - std::vector policy_ids; - }; - using MapOfMixedConditions = RowPolicyContext::MapOfMixedConditions; - using DatabaseAndTableName = RowPolicyContext::DatabaseAndTableName; - using DatabaseAndTableNameRef = RowPolicyContext::DatabaseAndTableNameRef; - using Hash = RowPolicyContext::Hash; - - std::unordered_map map_of_mixers; - - for (const auto & [policy_id, info] : all_policies) - { - const auto & policy = *info.policy; - auto & mixers = map_of_mixers[std::pair{policy.getDatabase(), policy.getTableName()}]; - if (info.canUseWithContext(context)) - { - mixers.policy_ids.push_back(policy_id); - for (auto index : ext::range(0, MAX_CONDITION_INDEX)) - if (info.parsed_conditions[index]) - mixers.mixers[index].add(info.parsed_conditions[index], policy.isRestrictive()); - } - } - - auto map_of_mixed_conditions = boost::make_shared(); - for (auto & [database_and_table_name, mixers] : map_of_mixers) - { - auto database_and_table_name_keeper = std::make_unique(); - database_and_table_name_keeper->first = database_and_table_name.first; - database_and_table_name_keeper->second = database_and_table_name.second; - auto & mixed_conditions = (*map_of_mixed_conditions)[DatabaseAndTableNameRef{database_and_table_name_keeper->first, - database_and_table_name_keeper->second}]; - mixed_conditions.database_and_table_name_keeper = std::move(database_and_table_name_keeper); - mixed_conditions.policy_ids = std::move(mixers.policy_ids); - for (auto index : ext::range(0, MAX_CONDITION_INDEX)) - mixed_conditions.mixed_conditions[index] = std::move(mixers.mixers[index]).getResult(); - } - - context.map_of_mixed_conditions.store(map_of_mixed_conditions); -} - -} diff --git a/dbms/src/Access/RowPolicyContextFactory.h b/dbms/src/Access/RowPolicyContextFactory.h deleted file mode 100644 index d93d1626b24..00000000000 --- a/dbms/src/Access/RowPolicyContextFactory.h +++ /dev/null @@ -1,50 +0,0 @@ -#pragma once - -#include -#include -#include -#include - - -namespace DB -{ -class AccessControlManager; - -/// Stores read and parsed row policies. -class RowPolicyContextFactory -{ -public: - RowPolicyContextFactory(const AccessControlManager & access_control_manager_); - ~RowPolicyContextFactory(); - - RowPolicyContextPtr createContext(const UUID & user_id, const std::vector & enabled_roles); - -private: - using ParsedConditions = RowPolicyContext::ParsedConditions; - - struct PolicyInfo - { - PolicyInfo(const RowPolicyPtr & policy_) { setPolicy(policy_); } - void setPolicy(const RowPolicyPtr & policy_); - bool canUseWithContext(const RowPolicyContext & context) const; - - RowPolicyPtr policy; - const GenericRoleSet * roles = nullptr; - ParsedConditions parsed_conditions; - }; - - void ensureAllRowPoliciesRead(); - void rowPolicyAddedOrChanged(const UUID & policy_id, const RowPolicyPtr & new_policy); - void rowPolicyRemoved(const UUID & policy_id); - void mixConditionsForAllContexts(); - void mixConditionsForContext(RowPolicyContext & context); - - const AccessControlManager & access_control_manager; - std::unordered_map all_policies; - bool all_policies_read = false; - ext::scope_guard subscription; - std::vector> contexts; - std::mutex mutex; -}; - -} diff --git a/dbms/src/Access/SettingsConstraints.cpp b/dbms/src/Access/SettingsConstraints.cpp deleted file mode 100644 index 538b062b1e0..00000000000 --- a/dbms/src/Access/SettingsConstraints.cpp +++ /dev/null @@ -1,279 +0,0 @@ -#include -#include -#include -#include -#include - - -namespace DB -{ -namespace ErrorCodes -{ - extern const int NOT_IMPLEMENTED; - extern const int READONLY; - extern const int QUERY_IS_PROHIBITED; - extern const int NO_ELEMENTS_IN_CONFIG; - extern const int SETTING_CONSTRAINT_VIOLATION; -} - -SettingsConstraints::SettingsConstraints() = default; -SettingsConstraints::SettingsConstraints(const SettingsConstraints & src) = default; -SettingsConstraints & SettingsConstraints::operator=(const SettingsConstraints & src) = default; -SettingsConstraints::SettingsConstraints(SettingsConstraints && src) = default; -SettingsConstraints & SettingsConstraints::operator=(SettingsConstraints && src) = default; -SettingsConstraints::~SettingsConstraints() = default; - - -void SettingsConstraints::clear() -{ - constraints_by_index.clear(); -} - - -void SettingsConstraints::setMinValue(const StringRef & name, const Field & min_value) -{ - size_t setting_index = Settings::findIndexStrict(name); - getConstraintRef(setting_index).min_value = Settings::valueToCorrespondingType(setting_index, min_value); -} - - -Field SettingsConstraints::getMinValue(const StringRef & name) const -{ - size_t setting_index = Settings::findIndexStrict(name); - const auto * ptr = tryGetConstraint(setting_index); - if (ptr) - return ptr->min_value; - else - return {}; -} - - -void SettingsConstraints::setMaxValue(const StringRef & name, const Field & max_value) -{ - size_t setting_index = Settings::findIndexStrict(name); - getConstraintRef(setting_index).max_value = Settings::valueToCorrespondingType(setting_index, max_value); -} - - -Field SettingsConstraints::getMaxValue(const StringRef & name) const -{ - size_t setting_index = Settings::findIndexStrict(name); - const auto * ptr = tryGetConstraint(setting_index); - if (ptr) - return ptr->max_value; - else - return {}; -} - - -void SettingsConstraints::setReadOnly(const StringRef & name, bool read_only) -{ - size_t setting_index = Settings::findIndexStrict(name); - getConstraintRef(setting_index).read_only = read_only; -} - - -bool SettingsConstraints::isReadOnly(const StringRef & name) const -{ - size_t setting_index = Settings::findIndexStrict(name); - const auto * ptr = tryGetConstraint(setting_index); - if (ptr) - return ptr->read_only; - else - return false; -} - - -void SettingsConstraints::set(const StringRef & name, const Field & min_value, const Field & max_value, bool read_only) -{ - size_t setting_index = Settings::findIndexStrict(name); - auto & ref = getConstraintRef(setting_index); - ref.min_value = min_value; - ref.max_value = max_value; - ref.read_only = read_only; -} - - -void SettingsConstraints::get(const StringRef & name, Field & min_value, Field & max_value, bool & read_only) const -{ - size_t setting_index = Settings::findIndexStrict(name); - const auto * ptr = tryGetConstraint(setting_index); - if (ptr) - { - min_value = ptr->min_value; - max_value = ptr->max_value; - read_only = ptr->read_only; - } - else - { - min_value = Field{}; - max_value = Field{}; - read_only = false; - } -} - - -void SettingsConstraints::merge(const SettingsConstraints & other) -{ - for (const auto & [setting_index, other_constraint] : other.constraints_by_index) - { - auto & constraint = constraints_by_index[setting_index]; - if (!other_constraint.min_value.isNull()) - constraint.min_value = other_constraint.min_value; - if (!other_constraint.max_value.isNull()) - constraint.max_value = other_constraint.max_value; - if (other_constraint.read_only) - constraint.read_only = true; - } -} - - -SettingsConstraints::Infos SettingsConstraints::getInfo() const -{ - Infos result; - result.reserve(constraints_by_index.size()); - for (const auto & [setting_index, constraint] : constraints_by_index) - { - result.emplace_back(); - Info & info = result.back(); - info.name = Settings::getName(setting_index); - info.min = constraint.min_value; - info.max = constraint.max_value; - info.read_only = constraint.read_only; - } - return result; -} - - -void SettingsConstraints::check(const Settings & current_settings, const SettingChange & change) const -{ - const String & name = change.name; - size_t setting_index = Settings::findIndex(name); - if (setting_index == Settings::npos) - return; - - Field new_value = Settings::valueToCorrespondingType(setting_index, change.value); - Field current_value = current_settings.get(setting_index); - - /// Setting isn't checked if value wasn't changed. - if (current_value == new_value) - return; - - if (!current_settings.allow_ddl && name == "allow_ddl") - throw Exception("Cannot modify 'allow_ddl' setting when DDL queries are prohibited for the user", ErrorCodes::QUERY_IS_PROHIBITED); - - /** The `readonly` value is understood as follows: - * 0 - everything allowed. - * 1 - only read queries can be made; you can not change the settings. - * 2 - You can only do read queries and you can change the settings, except for the `readonly` setting. - */ - if (current_settings.readonly == 1) - throw Exception("Cannot modify '" + name + "' setting in readonly mode", ErrorCodes::READONLY); - - if (current_settings.readonly > 1 && name == "readonly") - throw Exception("Cannot modify 'readonly' setting in readonly mode", ErrorCodes::READONLY); - - const Constraint * constraint = tryGetConstraint(setting_index); - if (constraint) - { - if (constraint->read_only) - throw Exception("Setting " + name + " should not be changed", ErrorCodes::SETTING_CONSTRAINT_VIOLATION); - - if (!constraint->min_value.isNull() && (new_value < constraint->min_value)) - throw Exception( - "Setting " + name + " shouldn't be less than " + applyVisitor(FieldVisitorToString(), constraint->min_value), - ErrorCodes::SETTING_CONSTRAINT_VIOLATION); - - if (!constraint->max_value.isNull() && (new_value > constraint->max_value)) - throw Exception( - "Setting " + name + " shouldn't be greater than " + applyVisitor(FieldVisitorToString(), constraint->max_value), - ErrorCodes::SETTING_CONSTRAINT_VIOLATION); - } -} - - -void SettingsConstraints::check(const Settings & current_settings, const SettingsChanges & changes) const -{ - for (const auto & change : changes) - check(current_settings, change); -} - - -SettingsConstraints::Constraint & SettingsConstraints::getConstraintRef(size_t index) -{ - auto it = constraints_by_index.find(index); - if (it == constraints_by_index.end()) - it = constraints_by_index.emplace(index, Constraint{}).first; - return it->second; -} - -const SettingsConstraints::Constraint * SettingsConstraints::tryGetConstraint(size_t index) const -{ - auto it = constraints_by_index.find(index); - if (it == constraints_by_index.end()) - return nullptr; - return &it->second; -} - - -void SettingsConstraints::setProfile(const String & profile_name, const Poco::Util::AbstractConfiguration & config) -{ - String elem = "profiles." + profile_name; - - Poco::Util::AbstractConfiguration::Keys config_keys; - config.keys(elem, config_keys); - - for (const std::string & key : config_keys) - { - if (key == "profile" || key.starts_with("profile[")) /// Inheritance of profiles from the current one. - setProfile(config.getString(elem + "." + key), config); - else - continue; - } - - String path_to_constraints = "profiles." + profile_name + ".constraints"; - if (config.has(path_to_constraints)) - loadFromConfig(path_to_constraints, config); -} - - -void SettingsConstraints::loadFromConfig(const String & path_to_constraints, const Poco::Util::AbstractConfiguration & config) -{ - if (!config.has(path_to_constraints)) - throw Exception("There is no path '" + path_to_constraints + "' in configuration file.", ErrorCodes::NO_ELEMENTS_IN_CONFIG); - - Poco::Util::AbstractConfiguration::Keys names; - config.keys(path_to_constraints, names); - - for (const String & name : names) - { - String path_to_name = path_to_constraints + "." + name; - Poco::Util::AbstractConfiguration::Keys constraint_types; - config.keys(path_to_name, constraint_types); - for (const String & constraint_type : constraint_types) - { - auto get_constraint_value = [&]{ return config.getString(path_to_name + "." + constraint_type); }; - if (constraint_type == "min") - setMinValue(name, get_constraint_value()); - else if (constraint_type == "max") - setMaxValue(name, get_constraint_value()); - else if (constraint_type == "readonly") - setReadOnly(name, true); - else - throw Exception("Setting " + constraint_type + " value for " + name + " isn't supported", ErrorCodes::NOT_IMPLEMENTED); - } - } -} - - -bool SettingsConstraints::Constraint::operator==(const Constraint & rhs) const -{ - return (read_only == rhs.read_only) && (min_value == rhs.min_value) && (max_value == rhs.max_value); -} - - -bool operator ==(const SettingsConstraints & lhs, const SettingsConstraints & rhs) -{ - return lhs.constraints_by_index == rhs.constraints_by_index; -} -} diff --git a/dbms/src/Access/SettingsConstraints.h b/dbms/src/Access/SettingsConstraints.h deleted file mode 100644 index 3b4d0c28800..00000000000 --- a/dbms/src/Access/SettingsConstraints.h +++ /dev/null @@ -1,119 +0,0 @@ -#pragma once - -#include -#include - -namespace Poco -{ -namespace Util -{ - class AbstractConfiguration; -} -} - - -namespace DB -{ -struct Settings; - -/** Checks if specified changes of settings are allowed or not. - * If the changes are not allowed (i.e. violates some constraints) this class throws an exception. - * The constraints are set by editing the `users.xml` file. - * - * For examples, the following lines in `users.xml` will set that `max_memory_usage` cannot be greater than 20000000000, - * and `force_index_by_date` should be always equal to 0: - * - * - * - * 10000000000 - * 0 - * ... - * - * - * 200000 - * 20000000000 - * - * - * - * - * - * - * - * - * This class also checks that we are not in the read-only mode. - * If a setting cannot be change due to the read-only mode this class throws an exception. - * The value of `readonly` value is understood as follows: - * 0 - everything allowed. - * 1 - only read queries can be made; you can not change the settings. - * 2 - you can only do read queries and you can change the settings, except for the `readonly` setting. - */ -class SettingsConstraints -{ -public: - SettingsConstraints(); - SettingsConstraints(const SettingsConstraints & src); - SettingsConstraints & operator =(const SettingsConstraints & src); - SettingsConstraints(SettingsConstraints && src); - SettingsConstraints & operator =(SettingsConstraints && src); - ~SettingsConstraints(); - - void clear(); - bool empty() const { return constraints_by_index.empty(); } - - void setMinValue(const StringRef & name, const Field & min_value); - Field getMinValue(const StringRef & name) const; - - void setMaxValue(const StringRef & name, const Field & max_value); - Field getMaxValue(const StringRef & name) const; - - void setReadOnly(const StringRef & name, bool read_only); - bool isReadOnly(const StringRef & name) const; - - void set(const StringRef & name, const Field & min_value, const Field & max_value, bool read_only); - void get(const StringRef & name, Field & min_value, Field & max_value, bool & read_only) const; - - void merge(const SettingsConstraints & other); - - struct Info - { - StringRef name; - Field min; - Field max; - bool read_only = false; - }; - using Infos = std::vector; - - Infos getInfo() const; - - void check(const Settings & current_settings, const SettingChange & change) const; - void check(const Settings & current_settings, const SettingsChanges & changes) const; - - /** Set multiple settings from "profile" (in server configuration file (users.xml), profiles contain groups of multiple settings). - * The profile can also be set using the `set` functions, like the profile setting. - */ - void setProfile(const String & profile_name, const Poco::Util::AbstractConfiguration & config); - - /// Loads the constraints from configuration file, at "path" prefix in configuration. - void loadFromConfig(const String & path, const Poco::Util::AbstractConfiguration & config); - - friend bool operator ==(const SettingsConstraints & lhs, const SettingsConstraints & rhs); - friend bool operator !=(const SettingsConstraints & lhs, const SettingsConstraints & rhs) { return !(lhs == rhs); } - -private: - struct Constraint - { - bool read_only = false; - Field min_value; - Field max_value; - - bool operator ==(const Constraint & rhs) const; - bool operator !=(const Constraint & rhs) const { return !(*this == rhs); } - }; - - Constraint & getConstraintRef(size_t index); - const Constraint * tryGetConstraint(size_t) const; - - std::unordered_map constraints_by_index; -}; - -} diff --git a/dbms/src/Access/User.cpp b/dbms/src/Access/User.cpp deleted file mode 100644 index bc5b062db6a..00000000000 --- a/dbms/src/Access/User.cpp +++ /dev/null @@ -1,18 +0,0 @@ -#include - - -namespace DB -{ - -bool User::equal(const IAccessEntity & other) const -{ - if (!IAccessEntity::equal(other)) - return false; - const auto & other_user = typeid_cast(other); - return (authentication == other_user.authentication) && (allowed_client_hosts == other_user.allowed_client_hosts) - && (access == other_user.access) && (access_with_grant_option == other_user.access_with_grant_option) - && (granted_roles == other_user.granted_roles) && (granted_roles_with_admin_option == other_user.granted_roles_with_admin_option) - && (default_roles == other_user.default_roles) && (profile == other_user.profile); -} - -} diff --git a/dbms/src/Access/User.h b/dbms/src/Access/User.h deleted file mode 100644 index 3a9b3cd7014..00000000000 --- a/dbms/src/Access/User.h +++ /dev/null @@ -1,32 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ -/** User and ACL. - */ -struct User : public IAccessEntity -{ - Authentication authentication; - AllowedClientHosts allowed_client_hosts = AllowedClientHosts::AnyHostTag{}; - AccessRights access; - AccessRights access_with_grant_option; - boost::container::flat_set granted_roles; - boost::container::flat_set granted_roles_with_admin_option; - GenericRoleSet default_roles = GenericRoleSet::AllTag{}; - String profile; - - bool equal(const IAccessEntity & other) const override; - std::shared_ptr clone() const override { return cloneImpl(); } -}; - -using UserPtr = std::shared_ptr; -} diff --git a/dbms/src/Access/UsersConfigAccessStorage.cpp b/dbms/src/Access/UsersConfigAccessStorage.cpp deleted file mode 100644 index a6bb2be467b..00000000000 --- a/dbms/src/Access/UsersConfigAccessStorage.cpp +++ /dev/null @@ -1,430 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ -namespace ErrorCodes -{ - extern const int BAD_ARGUMENTS; - extern const int UNKNOWN_ADDRESS_PATTERN_TYPE; -} - - -namespace -{ - char getTypeChar(std::type_index type) - { - if (type == typeid(User)) - return 'U'; - if (type == typeid(Quota)) - return 'Q'; - if (type == typeid(RowPolicy)) - return 'P'; - return 0; - } - - - UUID generateID(std::type_index type, const String & name) - { - Poco::MD5Engine md5; - md5.update(name); - char type_storage_chars[] = " USRSXML"; - type_storage_chars[0] = getTypeChar(type); - md5.update(type_storage_chars, strlen(type_storage_chars)); - UUID result; - memcpy(&result, md5.digest().data(), md5.digestLength()); - return result; - } - - - UUID generateID(const IAccessEntity & entity) { return generateID(entity.getType(), entity.getFullName()); } - - UserPtr parseUser(const Poco::Util::AbstractConfiguration & config, const String & user_name) - { - auto user = std::make_shared(); - user->setName(user_name); - - String user_config = "users." + user_name; - - bool has_password = config.has(user_config + ".password"); - bool has_password_sha256_hex = config.has(user_config + ".password_sha256_hex"); - bool has_password_double_sha1_hex = config.has(user_config + ".password_double_sha1_hex"); - - if (has_password + has_password_sha256_hex + has_password_double_sha1_hex > 1) - throw Exception("More than one field of 'password', 'password_sha256_hex', 'password_double_sha1_hex' is used to specify password for user " + user_name + ". Must be only one of them.", - ErrorCodes::BAD_ARGUMENTS); - - if (!has_password && !has_password_sha256_hex && !has_password_double_sha1_hex) - throw Exception("Either 'password' or 'password_sha256_hex' or 'password_double_sha1_hex' must be specified for user " + user_name + ".", ErrorCodes::BAD_ARGUMENTS); - - if (has_password) - { - user->authentication = Authentication{Authentication::PLAINTEXT_PASSWORD}; - user->authentication.setPassword(config.getString(user_config + ".password")); - } - else if (has_password_sha256_hex) - { - user->authentication = Authentication{Authentication::SHA256_PASSWORD}; - user->authentication.setPasswordHashHex(config.getString(user_config + ".password_sha256_hex")); - } - else if (has_password_double_sha1_hex) - { - user->authentication = Authentication{Authentication::DOUBLE_SHA1_PASSWORD}; - user->authentication.setPasswordHashHex(config.getString(user_config + ".password_double_sha1_hex")); - } - - user->profile = config.getString(user_config + ".profile"); - - /// Fill list of allowed hosts. - const auto networks_config = user_config + ".networks"; - if (config.has(networks_config)) - { - Poco::Util::AbstractConfiguration::Keys keys; - config.keys(networks_config, keys); - user->allowed_client_hosts.clear(); - for (const String & key : keys) - { - String value = config.getString(networks_config + "." + key); - if (key.starts_with("ip")) - user->allowed_client_hosts.addSubnet(value); - else if (key.starts_with("host_regexp")) - user->allowed_client_hosts.addNameRegexp(value); - else if (key.starts_with("host")) - user->allowed_client_hosts.addName(value); - else - throw Exception("Unknown address pattern type: " + key, ErrorCodes::UNKNOWN_ADDRESS_PATTERN_TYPE); - } - } - - /// Fill list of allowed databases. - const auto databases_config = user_config + ".allow_databases"; - std::optional databases; - if (config.has(databases_config)) - { - Poco::Util::AbstractConfiguration::Keys keys; - config.keys(databases_config, keys); - databases.emplace(); - databases->reserve(keys.size()); - for (const auto & key : keys) - { - const auto database_name = config.getString(databases_config + "." + key); - databases->push_back(database_name); - } - } - - /// Fill list of allowed dictionaries. - const auto dictionaries_config = user_config + ".allow_dictionaries"; - std::optional dictionaries; - if (config.has(dictionaries_config)) - { - Poco::Util::AbstractConfiguration::Keys keys; - config.keys(dictionaries_config, keys); - dictionaries.emplace(); - dictionaries->reserve(keys.size()); - for (const auto & key : keys) - { - const auto dictionary_name = config.getString(dictionaries_config + "." + key); - dictionaries->push_back(dictionary_name); - } - } - - user->access.grant(AccessType::ALL); /// By default all databases are accessible. - - if (databases) - { - user->access.fullRevoke(AccessFlags::databaseLevel()); - for (const String & database : *databases) - user->access.grant(AccessFlags::databaseLevel(), database); - } - - if (dictionaries) - { - user->access.fullRevoke(AccessType::dictGet, IDictionary::NO_DATABASE_TAG); - for (const String & dictionary : *dictionaries) - user->access.grant(AccessType::dictGet, IDictionary::NO_DATABASE_TAG, dictionary); - } - else if (databases) - user->access.grant(AccessType::dictGet, IDictionary::NO_DATABASE_TAG); - - user->access_with_grant_option = user->access; - - return user; - } - - - std::vector parseUsers(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log) - { - Poco::Util::AbstractConfiguration::Keys user_names; - config.keys("users", user_names); - - std::vector users; - users.reserve(user_names.size()); - for (const auto & user_name : user_names) - { - try - { - users.push_back(parseUser(config, user_name)); - } - catch (...) - { - tryLogCurrentException(log, "Could not parse user " + backQuote(user_name)); - } - } - return users; - } - - - QuotaPtr parseQuota(const Poco::Util::AbstractConfiguration & config, const String & quota_name, const std::vector & user_ids) - { - auto quota = std::make_shared(); - quota->setName(quota_name); - - using KeyType = Quota::KeyType; - String quota_config = "quotas." + quota_name; - if (config.has(quota_config + ".keyed_by_ip")) - quota->key_type = KeyType::IP_ADDRESS; - else if (config.has(quota_config + ".keyed")) - quota->key_type = KeyType::CLIENT_KEY_OR_USER_NAME; - else - quota->key_type = KeyType::USER_NAME; - - Poco::Util::AbstractConfiguration::Keys interval_keys; - config.keys(quota_config, interval_keys); - - for (const String & interval_key : interval_keys) - { - if (!startsWith(interval_key, "interval")) - continue; - - String interval_config = quota_config + "." + interval_key; - std::chrono::seconds duration{config.getInt(interval_config + ".duration", 0)}; - if (duration.count() <= 0) /// Skip quotas with non-positive duration. - continue; - - quota->all_limits.emplace_back(); - auto & limits = quota->all_limits.back(); - limits.duration = duration; - limits.randomize_interval = config.getBool(interval_config + ".randomize", false); - - using ResourceType = Quota::ResourceType; - limits.max[ResourceType::QUERIES] = config.getUInt64(interval_config + ".queries", Quota::UNLIMITED); - limits.max[ResourceType::ERRORS] = config.getUInt64(interval_config + ".errors", Quota::UNLIMITED); - limits.max[ResourceType::RESULT_ROWS] = config.getUInt64(interval_config + ".result_rows", Quota::UNLIMITED); - limits.max[ResourceType::RESULT_BYTES] = config.getUInt64(interval_config + ".result_bytes", Quota::UNLIMITED); - limits.max[ResourceType::READ_ROWS] = config.getUInt64(interval_config + ".read_rows", Quota::UNLIMITED); - limits.max[ResourceType::READ_BYTES] = config.getUInt64(interval_config + ".read_bytes", Quota::UNLIMITED); - limits.max[ResourceType::EXECUTION_TIME] = Quota::secondsToExecutionTime(config.getUInt64(interval_config + ".execution_time", Quota::UNLIMITED)); - } - - quota->roles.add(user_ids); - - return quota; - } - - - std::vector parseQuotas(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log) - { - Poco::Util::AbstractConfiguration::Keys user_names; - config.keys("users", user_names); - std::unordered_map> quota_to_user_ids; - for (const auto & user_name : user_names) - { - if (config.has("users." + user_name + ".quota")) - quota_to_user_ids[config.getString("users." + user_name + ".quota")].push_back(generateID(typeid(User), user_name)); - } - - Poco::Util::AbstractConfiguration::Keys quota_names; - config.keys("quotas", quota_names); - std::vector quotas; - quotas.reserve(quota_names.size()); - for (const auto & quota_name : quota_names) - { - try - { - auto it = quota_to_user_ids.find(quota_name); - const std::vector & quota_users = (it != quota_to_user_ids.end()) ? std::move(it->second) : std::vector{}; - quotas.push_back(parseQuota(config, quota_name, quota_users)); - } - catch (...) - { - tryLogCurrentException(log, "Could not parse quota " + backQuote(quota_name)); - } - } - return quotas; - } - - - std::vector parseRowPolicies(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log) - { - std::map, std::unordered_map> all_filters_map; - Poco::Util::AbstractConfiguration::Keys user_names; - - try - { - config.keys("users", user_names); - for (const String & user_name : user_names) - { - const String databases_config = "users." + user_name + ".databases"; - if (config.has(databases_config)) - { - Poco::Util::AbstractConfiguration::Keys databases; - config.keys(databases_config, databases); - - /// Read tables within databases - for (const String & database : databases) - { - const String database_config = databases_config + "." + database; - Poco::Util::AbstractConfiguration::Keys keys_in_database_config; - config.keys(database_config, keys_in_database_config); - - /// Read table properties - for (const String & key_in_database_config : keys_in_database_config) - { - String table_name = key_in_database_config; - String filter_config = database_config + "." + table_name + ".filter"; - - if (key_in_database_config.starts_with("table[")) - { - const auto table_name_config = database_config + "." + table_name + "[@name]"; - if (config.has(table_name_config)) - { - table_name = config.getString(table_name_config); - filter_config = database_config + ".table[@name='" + table_name + "']"; - } - } - - all_filters_map[{database, table_name}][user_name] = config.getString(filter_config); - } - } - } - } - } - catch (...) - { - tryLogCurrentException(log, "Could not parse row policies"); - } - - std::vector policies; - for (auto & [database_and_table_name, user_to_filters] : all_filters_map) - { - const auto & [database, table_name] = database_and_table_name; - for (const String & user_name : user_names) - { - auto it = user_to_filters.find(user_name); - String filter = (it != user_to_filters.end()) ? it->second : "1"; - - auto policy = std::make_shared(); - policy->setFullName(database, table_name, user_name); - policy->conditions[RowPolicy::SELECT_FILTER] = filter; - policy->roles.add(generateID(typeid(User), user_name)); - policies.push_back(policy); - } - } - return policies; - } -} - - -UsersConfigAccessStorage::UsersConfigAccessStorage() : IAccessStorage("users.xml") -{ -} - - -UsersConfigAccessStorage::~UsersConfigAccessStorage() {} - - -void UsersConfigAccessStorage::loadFromConfig(const Poco::Util::AbstractConfiguration & config) -{ - std::vector> all_entities; - for (const auto & entity : parseUsers(config, getLogger())) - all_entities.emplace_back(generateID(*entity), entity); - for (const auto & entity : parseQuotas(config, getLogger())) - all_entities.emplace_back(generateID(*entity), entity); - for (const auto & entity : parseRowPolicies(config, getLogger())) - all_entities.emplace_back(generateID(*entity), entity); - memory_storage.setAll(all_entities); -} - - -std::optional UsersConfigAccessStorage::findImpl(std::type_index type, const String & name) const -{ - return memory_storage.find(type, name); -} - - -std::vector UsersConfigAccessStorage::findAllImpl(std::type_index type) const -{ - return memory_storage.findAll(type); -} - - -bool UsersConfigAccessStorage::existsImpl(const UUID & id) const -{ - return memory_storage.exists(id); -} - - -AccessEntityPtr UsersConfigAccessStorage::readImpl(const UUID & id) const -{ - return memory_storage.read(id); -} - - -String UsersConfigAccessStorage::readNameImpl(const UUID & id) const -{ - return memory_storage.readName(id); -} - - -UUID UsersConfigAccessStorage::insertImpl(const AccessEntityPtr & entity, bool) -{ - throwReadonlyCannotInsert(entity->getType(), entity->getFullName()); -} - - -void UsersConfigAccessStorage::removeImpl(const UUID & id) -{ - auto entity = read(id); - throwReadonlyCannotRemove(entity->getType(), entity->getFullName()); -} - - -void UsersConfigAccessStorage::updateImpl(const UUID & id, const UpdateFunc &) -{ - auto entity = read(id); - throwReadonlyCannotUpdate(entity->getType(), entity->getFullName()); -} - - -ext::scope_guard UsersConfigAccessStorage::subscribeForChangesImpl(const UUID & id, const OnChangedHandler & handler) const -{ - return memory_storage.subscribeForChanges(id, handler); -} - - -ext::scope_guard UsersConfigAccessStorage::subscribeForChangesImpl(std::type_index type, const OnChangedHandler & handler) const -{ - return memory_storage.subscribeForChanges(type, handler); -} - - -bool UsersConfigAccessStorage::hasSubscriptionImpl(const UUID & id) const -{ - return memory_storage.hasSubscription(id); -} - - -bool UsersConfigAccessStorage::hasSubscriptionImpl(std::type_index type) const -{ - return memory_storage.hasSubscription(type); -} -} diff --git a/dbms/src/Access/UsersConfigAccessStorage.h b/dbms/src/Access/UsersConfigAccessStorage.h deleted file mode 100644 index 45b46c8c179..00000000000 --- a/dbms/src/Access/UsersConfigAccessStorage.h +++ /dev/null @@ -1,42 +0,0 @@ -#pragma once - -#include - - -namespace Poco -{ - namespace Util - { - class AbstractConfiguration; - } -} - - -namespace DB -{ -/// Implementation of IAccessStorage which loads all from users.xml periodically. -class UsersConfigAccessStorage : public IAccessStorage -{ -public: - UsersConfigAccessStorage(); - ~UsersConfigAccessStorage() override; - - void loadFromConfig(const Poco::Util::AbstractConfiguration & config); - -private: - std::optional findImpl(std::type_index type, const String & name) const override; - std::vector findAllImpl(std::type_index type) const override; - bool existsImpl(const UUID & id) const override; - AccessEntityPtr readImpl(const UUID & id) const override; - String readNameImpl(const UUID & id) const override; - UUID insertImpl(const AccessEntityPtr & entity, bool replace_if_exists) override; - void removeImpl(const UUID & id) override; - void updateImpl(const UUID & id, const UpdateFunc & update_func) override; - ext::scope_guard subscribeForChangesImpl(const UUID & id, const OnChangedHandler & handler) const override; - ext::scope_guard subscribeForChangesImpl(std::type_index type, const OnChangedHandler & handler) const override; - bool hasSubscriptionImpl(const UUID & id) const override; - bool hasSubscriptionImpl(std::type_index type) const override; - - MemoryAccessStorage memory_storage; -}; -} diff --git a/dbms/src/AggregateFunctions/AggregateFunctionGroupUniqArray.cpp b/dbms/src/AggregateFunctions/AggregateFunctionGroupUniqArray.cpp deleted file mode 100644 index e7d6ea2528c..00000000000 --- a/dbms/src/AggregateFunctions/AggregateFunctionGroupUniqArray.cpp +++ /dev/null @@ -1,114 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include "registerAggregateFunctions.h" - - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int ILLEGAL_TYPE_OF_ARGUMENT; - extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; - extern const int BAD_ARGUMENTS; -} - -namespace -{ - -/// Substitute return type for Date and DateTime -template -class AggregateFunctionGroupUniqArrayDate : public AggregateFunctionGroupUniqArray -{ -public: - AggregateFunctionGroupUniqArrayDate(const DataTypePtr & argument_type, UInt64 max_elems_ = std::numeric_limits::max()) : AggregateFunctionGroupUniqArray(argument_type, max_elems_) {} - DataTypePtr getReturnType() const override { return std::make_shared(std::make_shared()); } -}; - -template -class AggregateFunctionGroupUniqArrayDateTime : public AggregateFunctionGroupUniqArray -{ -public: - AggregateFunctionGroupUniqArrayDateTime(const DataTypePtr & argument_type, UInt64 max_elems_ = std::numeric_limits::max()) : AggregateFunctionGroupUniqArray(argument_type, max_elems_) {} - DataTypePtr getReturnType() const override { return std::make_shared(std::make_shared()); } -}; - -template -static IAggregateFunction * createWithExtraTypes(const DataTypePtr & argument_type, TArgs && ... args) -{ - WhichDataType which(argument_type); - if (which.idx == TypeIndex::Date) return new AggregateFunctionGroupUniqArrayDate(argument_type, std::forward(args)...); - else if (which.idx == TypeIndex::DateTime) return new AggregateFunctionGroupUniqArrayDateTime(argument_type, std::forward(args)...); - else - { - /// Check that we can use plain version of AggregateFunctionGroupUniqArrayGeneric - if (argument_type->isValueUnambiguouslyRepresentedInContiguousMemoryRegion()) - return new AggregateFunctionGroupUniqArrayGeneric(argument_type, std::forward(args)...); - else - return new AggregateFunctionGroupUniqArrayGeneric(argument_type, std::forward(args)...); - } -} - -template -inline AggregateFunctionPtr createAggregateFunctionGroupUniqArrayImpl(const std::string & name, const DataTypePtr & argument_type, TArgs ... args) -{ - - AggregateFunctionPtr res(createWithNumericType(*argument_type, argument_type, std::forward(args)...)); - - if (!res) - res = AggregateFunctionPtr(createWithExtraTypes(argument_type, std::forward(args)...)); - - if (!res) - throw Exception("Illegal type " + argument_type->getName() + - " of argument for aggregate function " + name, ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); - - return res; - -} - -AggregateFunctionPtr createAggregateFunctionGroupUniqArray(const std::string & name, const DataTypes & argument_types, const Array & parameters) -{ - assertUnary(name, argument_types); - - bool limit_size = false; - UInt64 max_elems = std::numeric_limits::max(); - - if (parameters.empty()) - { - // no limit - } - else if (parameters.size() == 1) - { - auto type = parameters[0].getType(); - if (type != Field::Types::Int64 && type != Field::Types::UInt64) - throw Exception("Parameter for aggregate function " + name + " should be positive number", ErrorCodes::BAD_ARGUMENTS); - - if ((type == Field::Types::Int64 && parameters[0].get() < 0) || - (type == Field::Types::UInt64 && parameters[0].get() == 0)) - throw Exception("Parameter for aggregate function " + name + " should be positive number", ErrorCodes::BAD_ARGUMENTS); - - limit_size = true; - max_elems = parameters[0].get(); - } - else - throw Exception("Incorrect number of parameters for aggregate function " + name + ", should be 0 or 1", - ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - - if (!limit_size) - return createAggregateFunctionGroupUniqArrayImpl(name, argument_types[0]); - else - return createAggregateFunctionGroupUniqArrayImpl(name, argument_types[0], max_elems); -} - -} - -void registerAggregateFunctionGroupUniqArray(AggregateFunctionFactory & factory) -{ - factory.registerFunction("groupUniqArray", createAggregateFunctionGroupUniqArray); -} - -} diff --git a/dbms/src/AggregateFunctions/AggregateFunctionSumMap.cpp b/dbms/src/AggregateFunctions/AggregateFunctionSumMap.cpp deleted file mode 100644 index 5bedf72c39b..00000000000 --- a/dbms/src/AggregateFunctions/AggregateFunctionSumMap.cpp +++ /dev/null @@ -1,133 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include "registerAggregateFunctions.h" - - -namespace DB -{ -namespace ErrorCodes -{ - extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; - extern const int ILLEGAL_TYPE_OF_ARGUMENT; -} - -namespace -{ - -struct WithOverflowPolicy -{ - /// Overflow, meaning that the returned type is the same as the input type. - static DataTypePtr promoteType(const DataTypePtr & data_type) { return data_type; } -}; - -struct WithoutOverflowPolicy -{ - /// No overflow, meaning we promote the types if necessary. - static DataTypePtr promoteType(const DataTypePtr & data_type) - { - if (!data_type->canBePromoted()) - throw Exception{"Values to be summed are expected to be Numeric, Float or Decimal.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; - - return data_type->promoteNumericType(); - } -}; - -template -using SumMapWithOverflow = AggregateFunctionSumMap; - -template -using SumMapWithoutOverflow = AggregateFunctionSumMap; - -template -using SumMapFilteredWithOverflow = AggregateFunctionSumMapFiltered; - -template -using SumMapFilteredWithoutOverflow = AggregateFunctionSumMapFiltered; - -using SumMapArgs = std::pair; - -SumMapArgs parseArguments(const std::string & name, const DataTypes & arguments) -{ - if (arguments.size() < 2) - throw Exception("Aggregate function " + name + " requires at least two arguments of Array type.", - ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - - const auto * array_type = checkAndGetDataType(arguments[0].get()); - if (!array_type) - throw Exception("First argument for function " + name + " must be an array.", - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); - - - DataTypePtr keys_type = array_type->getNestedType(); - - DataTypes values_types; - values_types.reserve(arguments.size() - 1); - for (size_t i = 1; i < arguments.size(); ++i) - { - array_type = checkAndGetDataType(arguments[i].get()); - if (!array_type) - throw Exception("Argument #" + toString(i) + " for function " + name + " must be an array.", - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); - values_types.push_back(array_type->getNestedType()); - } - - return {std::move(keys_type), std::move(values_types)}; -} - -template