mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Merge branch 'master' into dynamic-constraints
This commit is contained in:
commit
a0b394a6c2
34
.github/actions/debug/action.yml
vendored
34
.github/actions/debug/action.yml
vendored
@ -4,15 +4,31 @@ description: Prints workflow debug info
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Print envs
|
||||
- name: Envs, event.json and contexts
|
||||
shell: bash
|
||||
run: |
|
||||
echo "::group::Envs"
|
||||
env
|
||||
echo "::endgroup::"
|
||||
- name: Print Event.json
|
||||
shell: bash
|
||||
run: |
|
||||
echo "::group::Event.json"
|
||||
echo '::group::Environment variables'
|
||||
env | sort
|
||||
echo '::endgroup::'
|
||||
|
||||
echo '::group::event.json'
|
||||
python3 -m json.tool "$GITHUB_EVENT_PATH"
|
||||
echo "::endgroup::"
|
||||
echo '::endgroup::'
|
||||
|
||||
cat << 'EOF'
|
||||
::group::github context
|
||||
${{ toJSON(github) }}
|
||||
::endgroup::
|
||||
|
||||
::group::env context
|
||||
${{ toJSON(env) }}
|
||||
::endgroup::
|
||||
|
||||
::group::runner context
|
||||
${{ toJSON(runner) }}
|
||||
::endgroup::
|
||||
|
||||
::group::job context
|
||||
${{ toJSON(job) }}
|
||||
::endgroup::
|
||||
EOF
|
||||
|
2
.github/workflows/backport_branches.yml
vendored
2
.github/workflows/backport_branches.yml
vendored
@ -27,6 +27,8 @@ jobs:
|
||||
clear-repository: true # to ensure correct digests
|
||||
fetch-depth: 0 # to get version
|
||||
filter: tree:0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: Labels check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
|
2
.github/workflows/cherry_pick.yml
vendored
2
.github/workflows/cherry_pick.yml
vendored
@ -33,6 +33,8 @@ jobs:
|
||||
clear-repository: true
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
fetch-depth: 0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: Cherry pick
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
|
4
.github/workflows/create_release.yml
vendored
4
.github/workflows/create_release.yml
vendored
@ -56,13 +56,13 @@ jobs:
|
||||
GH_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
||||
runs-on: [self-hosted, release-maker]
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
fetch-depth: 0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: Prepare Release Info
|
||||
shell: bash
|
||||
run: |
|
||||
|
1
.github/workflows/docker_test_images.yml
vendored
1
.github/workflows/docker_test_images.yml
vendored
@ -11,6 +11,7 @@ name: Build docker images
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
jobs:
|
||||
DockerBuildAarch64:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
|
7
.github/workflows/jepsen.yml
vendored
7
.github/workflows/jepsen.yml
vendored
@ -8,27 +8,28 @@ on: # yamllint disable-line rule:truthy
|
||||
schedule:
|
||||
- cron: '0 */6 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
RunConfig:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
outputs:
|
||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true # to ensure correct digests
|
||||
fetch-depth: 0 # to get version
|
||||
filter: tree:0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: PrepareRunConfig
|
||||
id: runconfig
|
||||
run: |
|
||||
echo "::group::configure CI run"
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --workflow "$GITHUB_WORKFLOW" --outfile ${{ runner.temp }}/ci_run_data.json
|
||||
echo "::endgroup::"
|
||||
|
||||
|
||||
echo "::group::CI run configure results"
|
||||
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
|
||||
echo "::endgroup::"
|
||||
|
4
.github/workflows/master.yml
vendored
4
.github/workflows/master.yml
vendored
@ -15,14 +15,14 @@ jobs:
|
||||
outputs:
|
||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true # to ensure correct digests
|
||||
fetch-depth: 0 # to get version
|
||||
filter: tree:0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: Merge sync PR
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
|
4
.github/workflows/merge_queue.yml
vendored
4
.github/workflows/merge_queue.yml
vendored
@ -14,14 +14,14 @@ jobs:
|
||||
outputs:
|
||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true # to ensure correct digests
|
||||
fetch-depth: 0 # to get a version
|
||||
filter: tree:0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: Cancel PR workflow
|
||||
run: |
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --cancel-previous-run
|
||||
|
4
.github/workflows/nightly.yml
vendored
4
.github/workflows/nightly.yml
vendored
@ -15,14 +15,14 @@ jobs:
|
||||
outputs:
|
||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true # to ensure correct digests
|
||||
fetch-depth: 0 # to get version
|
||||
filter: tree:0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: PrepareRunConfig
|
||||
id: runconfig
|
||||
run: |
|
||||
|
4
.github/workflows/pull_request.yml
vendored
4
.github/workflows/pull_request.yml
vendored
@ -25,14 +25,14 @@ jobs:
|
||||
outputs:
|
||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true # to ensure correct digests
|
||||
fetch-depth: 0 # to get a version
|
||||
filter: tree:0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: Cancel previous Sync PR workflow
|
||||
run: |
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --cancel-previous-run
|
||||
|
2
.github/workflows/release_branches.yml
vendored
2
.github/workflows/release_branches.yml
vendored
@ -24,6 +24,8 @@ jobs:
|
||||
clear-repository: true # to ensure correct digests
|
||||
fetch-depth: 0 # to get version
|
||||
filter: tree:0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: Labels check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
|
4
.github/workflows/reusable_simple_job.yml
vendored
4
.github/workflows/reusable_simple_job.yml
vendored
@ -62,8 +62,6 @@ jobs:
|
||||
env:
|
||||
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
@ -72,6 +70,8 @@ jobs:
|
||||
submodules: ${{inputs.submodules}}
|
||||
fetch-depth: ${{inputs.checkout_depth}}
|
||||
filter: tree:0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: Set build envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
|
6
.gitmodules
vendored
6
.gitmodules
vendored
@ -170,9 +170,6 @@
|
||||
[submodule "contrib/fast_float"]
|
||||
path = contrib/fast_float
|
||||
url = https://github.com/fastfloat/fast_float
|
||||
[submodule "contrib/libpq"]
|
||||
path = contrib/libpq
|
||||
url = https://github.com/ClickHouse/libpq
|
||||
[submodule "contrib/NuRaft"]
|
||||
path = contrib/NuRaft
|
||||
url = https://github.com/ClickHouse/NuRaft
|
||||
@ -369,3 +366,6 @@
|
||||
[submodule "contrib/numactl"]
|
||||
path = contrib/numactl
|
||||
url = https://github.com/ClickHouse/numactl.git
|
||||
[submodule "contrib/postgres"]
|
||||
path = contrib/postgres
|
||||
url = https://github.com/ClickHouse/postgres.git
|
||||
|
31
CITATION.cff
Normal file
31
CITATION.cff
Normal file
@ -0,0 +1,31 @@
|
||||
# This CITATION.cff file was generated with cffinit.
|
||||
|
||||
cff-version: 1.2.0
|
||||
title: "ClickHouse"
|
||||
message: "If you use this software, please cite it as below."
|
||||
type: software
|
||||
authors:
|
||||
- family-names: "Milovidov"
|
||||
given-names: "Alexey"
|
||||
repository-code: 'https://github.com/ClickHouse/ClickHouse'
|
||||
url: 'https://clickhouse.com'
|
||||
license: Apache-2.0
|
||||
preferred-citation:
|
||||
type: article
|
||||
authors:
|
||||
- family-names: "Schulze"
|
||||
given-names: "Robert"
|
||||
- family-names: "Schreiber"
|
||||
given-names: "Tom"
|
||||
- family-names: "Yatsishin"
|
||||
given-names: "Ilya"
|
||||
- family-names: "Dahimene"
|
||||
given-names: "Ryadh"
|
||||
- family-names: "Milovidov"
|
||||
given-names: "Alexey"
|
||||
journal: "Proceedings of the VLDB Endowment"
|
||||
title: "ClickHouse - Lightning Fast Analytics for Everyone"
|
||||
year: 2024
|
||||
volume: 17
|
||||
issue: 12
|
||||
doi: 10.14778/3685800.3685802
|
33
README.md
33
README.md
@ -40,30 +40,33 @@ Every month we get together with the community (users, contributors, customers,
|
||||
|
||||
Keep an eye out for upcoming meetups and events around the world. Somewhere else you want us to be? Please feel free to reach out to tyler `<at>` clickhouse `<dot>` com. You can also peruse [ClickHouse Events](https://clickhouse.com/company/news-events) for a list of all upcoming trainings, meetups, speaking engagements, etc.
|
||||
|
||||
The following upcoming meetups are featuring creator of ClickHouse & CTO, Alexey Milovidov:
|
||||
Upcoming meetups
|
||||
|
||||
* [ClickHouse Guangzhou User Group Meetup](https://mp.weixin.qq.com/s/GSvo-7xUoVzCsuUvlLTpCw) - August 25
|
||||
* [San Francisco Meetup (Cloudflare)](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/302540575) - September 5
|
||||
* [Raleigh Meetup (Deutsche Bank)](https://www.meetup.com/triangletechtalks/events/302723486/) - September 9
|
||||
* [New York Meetup (Rokt)](https://www.meetup.com/clickhouse-new-york-user-group/events/302575342) - September 10
|
||||
* [Chicago Meetup (Jump Capital)](https://lu.ma/43tvmrfw) - September 12
|
||||
|
||||
Other upcoming meetups
|
||||
* [Seattle Meetup (Statsig)](https://www.meetup.com/clickhouse-seattle-user-group/events/302518075/) - August 27
|
||||
* [Melbourne Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302732666/) - August 27
|
||||
* [Sydney Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302862966/) - September 5
|
||||
* [Zurich Meetup](https://www.meetup.com/clickhouse-switzerland-meetup-group/events/302267429/) - September 5
|
||||
* [Toronto Meetup (Shopify)](https://www.meetup.com/clickhouse-toronto-user-group/events/301490855/) - September 10
|
||||
* [Austin Meetup](https://www.meetup.com/clickhouse-austin-user-group/events/302558689/) - September 17
|
||||
* [London Meetup](https://www.meetup.com/clickhouse-london-user-group/events/302977267) - September 17
|
||||
* [Bangalore Meetup](https://www.meetup.com/clickhouse-bangalore-user-group/events/303208274/) - September 18
|
||||
* [Tel Aviv Meetup](https://www.meetup.com/clickhouse-meetup-israel/events/303095121) - September 22
|
||||
* [Jakarta Meetup](https://www.meetup.com/clickhouse-indonesia-user-group/events/303191359/) - October 1
|
||||
* [Singapore Meetup](https://www.meetup.com/clickhouse-singapore-meetup-group/events/303212064/) - October 3
|
||||
* [Madrid Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096564/) - October 22
|
||||
* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - October 29
|
||||
* [Oslo Meetup](https://www.meetup.com/open-source-real-time-data-warehouse-real-time-analytics/events/302938622) - October 31
|
||||
* [Ghent Meetup](https://www.meetup.com/clickhouse-belgium-user-group/events/303049405/) - November 19
|
||||
* [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - November 21
|
||||
* [Paris Meetup](https://www.meetup.com/clickhouse-france-user-group/events/303096434) - November 26
|
||||
|
||||
Recently completed meetups
|
||||
|
||||
* [ClickHouse Guangzhou User Group Meetup](https://mp.weixin.qq.com/s/GSvo-7xUoVzCsuUvlLTpCw) - August 25
|
||||
* [Seattle Meetup (Statsig)](https://www.meetup.com/clickhouse-seattle-user-group/events/302518075/) - August 27
|
||||
* [Melbourne Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302732666/) - August 27
|
||||
* [Sydney Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302862966/) - September 5
|
||||
* [Zurich Meetup](https://www.meetup.com/clickhouse-switzerland-meetup-group/events/302267429/) - September 5
|
||||
* [San Francisco Meetup (Cloudflare)](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/302540575) - September 5
|
||||
* [Raleigh Meetup (Deutsche Bank)](https://www.meetup.com/triangletechtalks/events/302723486/) - September 9
|
||||
* [New York Meetup (Rokt)](https://www.meetup.com/clickhouse-new-york-user-group/events/302575342) - September 10
|
||||
* [Toronto Meetup (Shopify)](https://www.meetup.com/clickhouse-toronto-user-group/events/301490855/) - September 10
|
||||
* [Chicago Meetup (Jump Capital)](https://lu.ma/43tvmrfw) - September 12
|
||||
* [London Meetup](https://www.meetup.com/clickhouse-london-user-group/events/302977267) - September 17
|
||||
* [Austin Meetup](https://www.meetup.com/clickhouse-austin-user-group/events/302558689/) - September 17
|
||||
|
||||
## Recent Recordings
|
||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||
|
@ -66,13 +66,11 @@ TRAP(gethostbyname)
|
||||
TRAP(gethostbyname2)
|
||||
TRAP(gethostent)
|
||||
TRAP(getlogin)
|
||||
TRAP(getmntent)
|
||||
TRAP(getnetbyaddr)
|
||||
TRAP(getnetbyname)
|
||||
TRAP(getnetent)
|
||||
TRAP(getnetgrent)
|
||||
TRAP(getnetgrent_r)
|
||||
TRAP(getopt)
|
||||
TRAP(getopt_long)
|
||||
TRAP(getopt_long_only)
|
||||
TRAP(getpass)
|
||||
@ -133,7 +131,6 @@ TRAP(nrand48)
|
||||
TRAP(__ppc_get_timebase_freq)
|
||||
TRAP(ptsname)
|
||||
TRAP(putchar_unlocked)
|
||||
TRAP(putenv)
|
||||
TRAP(pututline)
|
||||
TRAP(pututxline)
|
||||
TRAP(putwchar_unlocked)
|
||||
@ -148,7 +145,6 @@ TRAP(sethostent)
|
||||
TRAP(sethostid)
|
||||
TRAP(setkey)
|
||||
//TRAP(setlocale) // Used by replxx at startup
|
||||
TRAP(setlogmask)
|
||||
TRAP(setnetent)
|
||||
TRAP(setnetgrent)
|
||||
TRAP(setprotoent)
|
||||
@ -203,7 +199,6 @@ TRAP(lgammal)
|
||||
TRAP(nftw)
|
||||
TRAP(nl_langinfo)
|
||||
TRAP(putc_unlocked)
|
||||
TRAP(rand)
|
||||
/** In the current POSIX.1 specification (POSIX.1-2008), readdir() is not required to be thread-safe. However, in modern
|
||||
* implementations (including the glibc implementation), concurrent calls to readdir() that specify different directory streams
|
||||
* are thread-safe. In cases where multiple threads must read from the same directory stream, using readdir() with external
|
||||
@ -288,4 +283,14 @@ TRAP(tss_get)
|
||||
TRAP(tss_set)
|
||||
TRAP(tss_delete)
|
||||
|
||||
#ifndef USE_MUSL
|
||||
/// These produce duplicate symbol errors when statically linking with musl.
|
||||
/// Maybe we can remove them from the musl fork.
|
||||
TRAP(getopt)
|
||||
TRAP(putenv)
|
||||
TRAP(setlogmask)
|
||||
TRAP(rand)
|
||||
TRAP(getmntent)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -188,8 +188,9 @@ namespace Crypto
|
||||
pFile = fopen(keyFile.c_str(), "r");
|
||||
if (pFile)
|
||||
{
|
||||
pem_password_cb * pCB = pass.empty() ? (pem_password_cb *)0 : &passCB;
|
||||
void * pPassword = pass.empty() ? (void *)0 : (void *)pass.c_str();
|
||||
pem_password_cb * pCB = &passCB;
|
||||
static constexpr char * no_password = "";
|
||||
void * pPassword = pass.empty() ? (void *)no_password : (void *)pass.c_str();
|
||||
if (readFunc(pFile, &pKey, pCB, pPassword))
|
||||
{
|
||||
fclose(pFile);
|
||||
@ -225,6 +226,13 @@ namespace Crypto
|
||||
error:
|
||||
if (pFile)
|
||||
fclose(pFile);
|
||||
if (*ppKey)
|
||||
{
|
||||
if constexpr (std::is_same_v<K, EVP_PKEY>)
|
||||
EVP_PKEY_free(*ppKey);
|
||||
else
|
||||
EC_KEY_free(*ppKey);
|
||||
}
|
||||
throw OpenSSLException("EVPKey::loadKey(string)");
|
||||
}
|
||||
|
||||
@ -286,6 +294,13 @@ namespace Crypto
|
||||
error:
|
||||
if (pBIO)
|
||||
BIO_free(pBIO);
|
||||
if (*ppKey)
|
||||
{
|
||||
if constexpr (std::is_same_v<K, EVP_PKEY>)
|
||||
EVP_PKEY_free(*ppKey);
|
||||
else
|
||||
EC_KEY_free(*ppKey);
|
||||
}
|
||||
throw OpenSSLException("EVPKey::loadKey(stream)");
|
||||
}
|
||||
|
||||
|
@ -48,25 +48,17 @@ std::string PathImpl::currentImpl()
|
||||
std::string PathImpl::homeImpl()
|
||||
{
|
||||
std::string path;
|
||||
#if defined(_POSIX_C_SOURCE) || defined(_BSD_SOURCE) || defined(_POSIX_C_SOURCE)
|
||||
size_t buf_size = 1024; // Same as glibc use for getpwuid
|
||||
std::vector<char> buf(buf_size);
|
||||
struct passwd res;
|
||||
struct passwd* pwd = nullptr;
|
||||
|
||||
getpwuid_r(getuid(), &res, buf.data(), buf_size, &pwd);
|
||||
#else
|
||||
struct passwd* pwd = getpwuid(getuid());
|
||||
#endif
|
||||
if (pwd)
|
||||
path = pwd->pw_dir;
|
||||
else
|
||||
{
|
||||
#if defined(_POSIX_C_SOURCE) || defined(_BSD_SOURCE) || defined(_POSIX_C_SOURCE)
|
||||
getpwuid_r(getuid(), &res, buf.data(), buf_size, &pwd);
|
||||
#else
|
||||
pwd = getpwuid(geteuid());
|
||||
#endif
|
||||
if (pwd)
|
||||
path = pwd->pw_dir;
|
||||
else
|
||||
@ -82,7 +74,7 @@ std::string PathImpl::configHomeImpl()
|
||||
{
|
||||
std::string path = PathImpl::homeImpl();
|
||||
std::string::size_type n = path.size();
|
||||
if (n > 0 && path[n - 1] == '/')
|
||||
if (n > 0 && path[n - 1] == '/')
|
||||
#if POCO_OS == POCO_OS_MAC_OS_X
|
||||
path.append("Library/Preferences/");
|
||||
#else
|
||||
@ -97,7 +89,7 @@ std::string PathImpl::dataHomeImpl()
|
||||
{
|
||||
std::string path = PathImpl::homeImpl();
|
||||
std::string::size_type n = path.size();
|
||||
if (n > 0 && path[n - 1] == '/')
|
||||
if (n > 0 && path[n - 1] == '/')
|
||||
#if POCO_OS == POCO_OS_MAC_OS_X
|
||||
path.append("Library/Application Support/");
|
||||
#else
|
||||
@ -112,7 +104,7 @@ std::string PathImpl::cacheHomeImpl()
|
||||
{
|
||||
std::string path = PathImpl::homeImpl();
|
||||
std::string::size_type n = path.size();
|
||||
if (n > 0 && path[n - 1] == '/')
|
||||
if (n > 0 && path[n - 1] == '/')
|
||||
#if POCO_OS == POCO_OS_MAC_OS_X
|
||||
path.append("Library/Caches/");
|
||||
#else
|
||||
@ -127,7 +119,7 @@ std::string PathImpl::tempHomeImpl()
|
||||
{
|
||||
std::string path = PathImpl::homeImpl();
|
||||
std::string::size_type n = path.size();
|
||||
if (n > 0 && path[n - 1] == '/')
|
||||
if (n > 0 && path[n - 1] == '/')
|
||||
#if POCO_OS == POCO_OS_MAC_OS_X
|
||||
path.append("Library/Caches/");
|
||||
#else
|
||||
@ -159,7 +151,7 @@ std::string PathImpl::tempImpl()
|
||||
std::string PathImpl::configImpl()
|
||||
{
|
||||
std::string path;
|
||||
|
||||
|
||||
#if POCO_OS == POCO_OS_MAC_OS_X
|
||||
path = "/Library/Preferences/";
|
||||
#else
|
||||
|
@ -19,6 +19,8 @@
|
||||
|
||||
|
||||
#include <ios>
|
||||
#include <memory>
|
||||
#include <functional>
|
||||
#include "Poco/Any.h"
|
||||
#include "Poco/Buffer.h"
|
||||
#include "Poco/Exception.h"
|
||||
@ -33,6 +35,27 @@ namespace Net
|
||||
{
|
||||
|
||||
|
||||
class IHTTPSessionDataHooks
|
||||
/// Interface to control stream of data bytes being sent or received though socket by HTTPSession
|
||||
/// It allows to monitor, throttle and schedule data streams with syscall granulatrity
|
||||
{
|
||||
public:
|
||||
virtual ~IHTTPSessionDataHooks() = default;
|
||||
|
||||
virtual void atStart(int bytes) = 0;
|
||||
/// Called before sending/receiving data `bytes` to/from socket.
|
||||
|
||||
virtual void atFinish(int bytes) = 0;
|
||||
/// Called when sending/receiving of data `bytes` is successfully finished.
|
||||
|
||||
virtual void atFail() = 0;
|
||||
/// If an error occurred during send/receive `fail()` is called instead of `finish()`.
|
||||
};
|
||||
|
||||
|
||||
using HTTPSessionDataHooksPtr = std::shared_ptr<IHTTPSessionDataHooks>;
|
||||
|
||||
|
||||
class Net_API HTTPSession
|
||||
/// HTTPSession implements basic HTTP session management
|
||||
/// for both HTTP clients and HTTP servers.
|
||||
@ -73,6 +96,12 @@ namespace Net
|
||||
Poco::Timespan getReceiveTimeout() const;
|
||||
/// Returns receive timeout for the HTTP session.
|
||||
|
||||
void setSendDataHooks(const HTTPSessionDataHooksPtr & sendDataHooks = {});
|
||||
/// Sets data hooks that will be called on every sent to the socket.
|
||||
|
||||
void setReceiveDataHooks(const HTTPSessionDataHooksPtr & receiveDataHooks = {});
|
||||
/// Sets data hooks that will be called on every receive from the socket.
|
||||
|
||||
bool connected() const;
|
||||
/// Returns true if the underlying socket is connected.
|
||||
|
||||
@ -211,6 +240,10 @@ namespace Net
|
||||
Poco::Exception * _pException;
|
||||
Poco::Any _data;
|
||||
|
||||
// Data hooks
|
||||
HTTPSessionDataHooksPtr _sendDataHooks;
|
||||
HTTPSessionDataHooksPtr _receiveDataHooks;
|
||||
|
||||
friend class HTTPStreamBuf;
|
||||
friend class HTTPHeaderStreamBuf;
|
||||
friend class HTTPFixedLengthStreamBuf;
|
||||
@ -246,6 +279,16 @@ namespace Net
|
||||
return _receiveTimeout;
|
||||
}
|
||||
|
||||
inline void HTTPSession::setSendDataHooks(const HTTPSessionDataHooksPtr & sendDataHooks)
|
||||
{
|
||||
_sendDataHooks = sendDataHooks;
|
||||
}
|
||||
|
||||
inline void HTTPSession::setReceiveDataHooks(const HTTPSessionDataHooksPtr & receiveDataHooks)
|
||||
{
|
||||
_receiveDataHooks = receiveDataHooks;
|
||||
}
|
||||
|
||||
inline StreamSocket & HTTPSession::socket()
|
||||
{
|
||||
return _socket;
|
||||
|
@ -128,14 +128,14 @@ int HTTPSession::get()
|
||||
{
|
||||
if (_pCurrent == _pEnd)
|
||||
refill();
|
||||
|
||||
|
||||
if (_pCurrent < _pEnd)
|
||||
return *_pCurrent++;
|
||||
else
|
||||
return std::char_traits<char>::eof();
|
||||
}
|
||||
|
||||
|
||||
|
||||
int HTTPSession::peek()
|
||||
{
|
||||
if (_pCurrent == _pEnd)
|
||||
@ -147,7 +147,7 @@ int HTTPSession::peek()
|
||||
return std::char_traits<char>::eof();
|
||||
}
|
||||
|
||||
|
||||
|
||||
int HTTPSession::read(char* buffer, std::streamsize length)
|
||||
{
|
||||
if (_pCurrent < _pEnd)
|
||||
@ -166,10 +166,17 @@ int HTTPSession::write(const char* buffer, std::streamsize length)
|
||||
{
|
||||
try
|
||||
{
|
||||
return _socket.sendBytes(buffer, (int) length);
|
||||
if (_sendDataHooks)
|
||||
_sendDataHooks->atStart((int) length);
|
||||
int result = _socket.sendBytes(buffer, (int) length);
|
||||
if (_sendDataHooks)
|
||||
_sendDataHooks->atFinish(result);
|
||||
return result;
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
if (_sendDataHooks)
|
||||
_sendDataHooks->atFail();
|
||||
setException(exc);
|
||||
throw;
|
||||
}
|
||||
@ -180,10 +187,17 @@ int HTTPSession::receive(char* buffer, int length)
|
||||
{
|
||||
try
|
||||
{
|
||||
return _socket.receiveBytes(buffer, length);
|
||||
if (_receiveDataHooks)
|
||||
_receiveDataHooks->atStart(length);
|
||||
int result = _socket.receiveBytes(buffer, length);
|
||||
if (_receiveDataHooks)
|
||||
_receiveDataHooks->atFinish(result);
|
||||
return result;
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
if (_receiveDataHooks)
|
||||
_receiveDataHooks->atFail();
|
||||
setException(exc);
|
||||
throw;
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ bool checkIsBrokenTimeout()
|
||||
|
||||
SocketImpl::SocketImpl():
|
||||
_sockfd(POCO_INVALID_SOCKET),
|
||||
_blocking(true),
|
||||
_blocking(true),
|
||||
_isBrokenTimeout(checkIsBrokenTimeout())
|
||||
{
|
||||
}
|
||||
@ -82,7 +82,7 @@ SocketImpl::~SocketImpl()
|
||||
close();
|
||||
}
|
||||
|
||||
|
||||
|
||||
SocketImpl* SocketImpl::acceptConnection(SocketAddress& clientAddr)
|
||||
{
|
||||
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
|
||||
@ -118,7 +118,7 @@ void SocketImpl::connect(const SocketAddress& address)
|
||||
rc = ::connect(_sockfd, address.addr(), address.length());
|
||||
}
|
||||
while (rc != 0 && lastError() == POCO_EINTR);
|
||||
if (rc != 0)
|
||||
if (rc != 0)
|
||||
{
|
||||
int err = lastError();
|
||||
error(err, address.toString());
|
||||
@ -205,7 +205,7 @@ void SocketImpl::bind6(const SocketAddress& address, bool reuseAddress, bool reu
|
||||
#if defined(POCO_HAVE_IPv6)
|
||||
if (address.family() != SocketAddress::IPv6)
|
||||
throw Poco::InvalidArgumentException("SocketAddress must be an IPv6 address");
|
||||
|
||||
|
||||
if (_sockfd == POCO_INVALID_SOCKET)
|
||||
{
|
||||
init(address.af());
|
||||
@ -226,11 +226,11 @@ void SocketImpl::bind6(const SocketAddress& address, bool reuseAddress, bool reu
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
||||
void SocketImpl::listen(int backlog)
|
||||
{
|
||||
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
|
||||
|
||||
|
||||
int rc = ::listen(_sockfd, backlog);
|
||||
if (rc != 0) error();
|
||||
}
|
||||
@ -254,7 +254,7 @@ void SocketImpl::shutdownReceive()
|
||||
if (rc != 0) error();
|
||||
}
|
||||
|
||||
|
||||
|
||||
void SocketImpl::shutdownSend()
|
||||
{
|
||||
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
|
||||
@ -263,7 +263,7 @@ void SocketImpl::shutdownSend()
|
||||
if (rc != 0) error();
|
||||
}
|
||||
|
||||
|
||||
|
||||
void SocketImpl::shutdown()
|
||||
{
|
||||
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
|
||||
@ -318,7 +318,7 @@ int SocketImpl::receiveBytes(void* buffer, int length, int flags)
|
||||
throw TimeoutException();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int rc;
|
||||
do
|
||||
{
|
||||
@ -326,7 +326,7 @@ int SocketImpl::receiveBytes(void* buffer, int length, int flags)
|
||||
rc = ::recv(_sockfd, reinterpret_cast<char*>(buffer), length, flags);
|
||||
}
|
||||
while (blocking && rc < 0 && lastError() == POCO_EINTR);
|
||||
if (rc < 0)
|
||||
if (rc < 0)
|
||||
{
|
||||
int err = lastError();
|
||||
if ((err == POCO_EAGAIN || err == POCO_EWOULDBLOCK) && !blocking)
|
||||
@ -364,7 +364,7 @@ int SocketImpl::receiveFrom(void* buffer, int length, SocketAddress& address, in
|
||||
throw TimeoutException();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sockaddr_storage abuffer;
|
||||
struct sockaddr* pSA = reinterpret_cast<struct sockaddr*>(&abuffer);
|
||||
poco_socklen_t saLen = sizeof(abuffer);
|
||||
@ -451,7 +451,7 @@ bool SocketImpl::pollImpl(Poco::Timespan& remainingTime, int mode)
|
||||
}
|
||||
while (rc < 0 && lastError() == POCO_EINTR);
|
||||
if (rc < 0) error();
|
||||
return rc > 0;
|
||||
return rc > 0;
|
||||
|
||||
#else
|
||||
|
||||
@ -494,7 +494,7 @@ bool SocketImpl::pollImpl(Poco::Timespan& remainingTime, int mode)
|
||||
}
|
||||
while (rc < 0 && errorCode == POCO_EINTR);
|
||||
if (rc < 0) error(errorCode);
|
||||
return rc > 0;
|
||||
return rc > 0;
|
||||
|
||||
#endif // POCO_HAVE_FD_POLL
|
||||
}
|
||||
@ -504,13 +504,13 @@ bool SocketImpl::poll(const Poco::Timespan& timeout, int mode)
|
||||
Poco::Timespan remainingTime(timeout);
|
||||
return pollImpl(remainingTime, mode);
|
||||
}
|
||||
|
||||
|
||||
void SocketImpl::setSendBufferSize(int size)
|
||||
{
|
||||
setOption(SOL_SOCKET, SO_SNDBUF, size);
|
||||
}
|
||||
|
||||
|
||||
|
||||
int SocketImpl::getSendBufferSize()
|
||||
{
|
||||
int result;
|
||||
@ -524,7 +524,7 @@ void SocketImpl::setReceiveBufferSize(int size)
|
||||
setOption(SOL_SOCKET, SO_RCVBUF, size);
|
||||
}
|
||||
|
||||
|
||||
|
||||
int SocketImpl::getReceiveBufferSize()
|
||||
{
|
||||
int result;
|
||||
@ -570,7 +570,7 @@ Poco::Timespan SocketImpl::getReceiveTimeout()
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
|
||||
SocketAddress SocketImpl::address()
|
||||
{
|
||||
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
|
||||
@ -581,7 +581,7 @@ SocketAddress SocketImpl::address()
|
||||
int rc = ::getsockname(_sockfd, pSA, &saLen);
|
||||
if (rc == 0)
|
||||
return SocketAddress(pSA, saLen);
|
||||
else
|
||||
else
|
||||
error();
|
||||
return SocketAddress();
|
||||
}
|
||||
|
@ -248,6 +248,9 @@ namespace Net
|
||||
SSL_CTX * sslContext() const;
|
||||
/// Returns the underlying OpenSSL SSL Context object.
|
||||
|
||||
SSL_CTX * takeSslContext();
|
||||
/// Takes ownership of the underlying OpenSSL SSL Context object.
|
||||
|
||||
Usage usage() const;
|
||||
/// Returns whether the context is for use by a client or by a server
|
||||
/// and whether TLSv1 is required.
|
||||
@ -401,6 +404,13 @@ namespace Net
|
||||
return _pSSLContext;
|
||||
}
|
||||
|
||||
inline SSL_CTX * Context::takeSslContext()
|
||||
{
|
||||
auto * result = _pSSLContext;
|
||||
_pSSLContext = nullptr;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
inline bool Context::extendedCertificateVerificationEnabled() const
|
||||
{
|
||||
|
@ -106,6 +106,11 @@ Context::Context(
|
||||
|
||||
Context::~Context()
|
||||
{
|
||||
if (_pSSLContext == nullptr)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
SSL_CTX_free(_pSSLContext);
|
||||
|
@ -18,4 +18,4 @@ target_compile_options (_poco_util
|
||||
-Wno-zero-as-null-pointer-constant
|
||||
)
|
||||
target_include_directories (_poco_util SYSTEM PUBLIC "include")
|
||||
target_link_libraries (_poco_util PUBLIC Poco::JSON Poco::XML)
|
||||
target_link_libraries (_poco_util PUBLIC Poco::JSON Poco::XML Poco::Net)
|
||||
|
@ -241,6 +241,20 @@ namespace Util
|
||||
/// If the value contains references to other properties (${<property>}), these
|
||||
/// are expanded.
|
||||
|
||||
std::string getHost(const std::string & key) const;
|
||||
/// Returns the string value of the host property with the given name.
|
||||
/// Throws a NotFoundException if the key does not exist.
|
||||
/// Throws a SyntaxException if the property is not a valid host (IP address or domain).
|
||||
/// If the value contains references to other properties (${<property>}), these
|
||||
/// are expanded.
|
||||
|
||||
std::string getHost(const std::string & key, const std::string & defaultValue) const;
|
||||
/// If a property with the given key exists, returns the host property's string value,
|
||||
/// otherwise returns the given default value.
|
||||
/// Throws a SyntaxException if the property is not a valid host (IP address or domain).
|
||||
/// If the value contains references to other properties (${<property>}), these
|
||||
/// are expanded.
|
||||
|
||||
virtual void setString(const std::string & key, const std::string & value);
|
||||
/// Sets the property with the given key to the given value.
|
||||
/// An already existing value for the key is overwritten.
|
||||
@ -339,12 +353,35 @@ namespace Util
|
||||
static bool parseBool(const std::string & value);
|
||||
void setRawWithEvent(const std::string & key, std::string value);
|
||||
|
||||
static void checkHostValidity(const std::string & value);
|
||||
/// Throws a SyntaxException if the value is not a valid host (IP address or domain).
|
||||
|
||||
virtual ~AbstractConfiguration();
|
||||
|
||||
private:
|
||||
std::string internalExpand(const std::string & value) const;
|
||||
std::string uncheckedExpand(const std::string & value) const;
|
||||
|
||||
static bool isValidIPv4Address(const std::string & value);
|
||||
/// IPv4 address considered valid if it is "0.0.0.0" or one of those,
|
||||
/// defined by inet_aton() or inet_addr()
|
||||
|
||||
static bool isValidIPv6Address(const std::string & value);
|
||||
/// IPv6 address considered valid if it is "::" or one of those,
|
||||
/// defined by inet_pton() with AF_INET6 flag
|
||||
/// (in this case it may have scope id and may be surrounded by '[', ']')
|
||||
|
||||
static bool isValidDomainName(const std::string & value);
|
||||
/// <domain> ::= <subdomain> [ "." ]
|
||||
/// <subdomain> ::= <label> | <subdomain> "." <label>
|
||||
/// <label> ::= <letter> [ [ <ldh-str> ] <let-dig> ]
|
||||
/// <ldh-str> ::= <let-dig-hyp> | <let-dig-hyp> <ldh-str>
|
||||
/// <let-dig-hyp> ::= <let-dig> | "-"
|
||||
/// <let-dig> ::= <letter> | <digit>
|
||||
/// <letter> ::= any one of the 52 alphabetic characters A through Z in
|
||||
/// upper case and a through z in lower case
|
||||
/// <digit> ::= any one of the ten digits 0 through 9
|
||||
|
||||
AbstractConfiguration(const AbstractConfiguration &);
|
||||
AbstractConfiguration & operator=(const AbstractConfiguration &);
|
||||
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include "Poco/NumberParser.h"
|
||||
#include "Poco/NumberFormatter.h"
|
||||
#include "Poco/String.h"
|
||||
#include "Poco/Net/IPAddressImpl.h"
|
||||
|
||||
|
||||
using Poco::Mutex;
|
||||
@ -263,6 +264,41 @@ bool AbstractConfiguration::getBool(const std::string& key, bool defaultValue) c
|
||||
}
|
||||
|
||||
|
||||
std::string AbstractConfiguration::getHost(const std::string& key) const
|
||||
{
|
||||
Mutex::ScopedLock lock(_mutex);
|
||||
|
||||
std::string value;
|
||||
if (getRaw(key, value))
|
||||
{
|
||||
std::string expandedValue = internalExpand(value);
|
||||
checkHostValidity(expandedValue);
|
||||
return expandedValue;
|
||||
}
|
||||
else
|
||||
throw NotFoundException(key);
|
||||
}
|
||||
|
||||
|
||||
std::string AbstractConfiguration::getHost(const std::string& key, const std::string& defaultValue) const
|
||||
{
|
||||
Mutex::ScopedLock lock(_mutex);
|
||||
|
||||
std::string value;
|
||||
if (getRaw(key, value))
|
||||
{
|
||||
std::string expandedValue = internalExpand(value);
|
||||
checkHostValidity(expandedValue);
|
||||
return expandedValue;
|
||||
}
|
||||
else
|
||||
{
|
||||
checkHostValidity(defaultValue);
|
||||
return defaultValue;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void AbstractConfiguration::setString(const std::string& key, const std::string& value)
|
||||
{
|
||||
setRawWithEvent(key, value);
|
||||
@ -529,4 +565,68 @@ void AbstractConfiguration::setRawWithEvent(const std::string& key, std::string
|
||||
}
|
||||
|
||||
|
||||
void AbstractConfiguration::checkHostValidity(const std::string& value)
|
||||
{
|
||||
if (!isValidIPv4Address(value) && !isValidIPv6Address(value) && !isValidDomainName(value))
|
||||
{
|
||||
throw SyntaxException("Property is not a valid host name", value);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool AbstractConfiguration::isValidIPv4Address(const std::string& value)
|
||||
{
|
||||
using Poco::Net::Impl::IPv4AddressImpl;
|
||||
IPv4AddressImpl empty4 = IPv4AddressImpl();
|
||||
|
||||
IPv4AddressImpl ipAddress = IPv4AddressImpl::parse(value);
|
||||
return ipAddress != empty4 || value == "0.0.0.0";
|
||||
}
|
||||
|
||||
|
||||
bool AbstractConfiguration::isValidIPv6Address(const std::string& value)
|
||||
{
|
||||
#if defined(POCO_HAVE_IPv6)
|
||||
using Poco::Net::Impl::IPv6AddressImpl;
|
||||
IPv6AddressImpl empty6 = IPv6AddressImpl();
|
||||
|
||||
IPv6AddressImpl ipAddress = IPv6AddressImpl::parse(value);
|
||||
return ipAddress != empty6 || value == "::";
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
bool AbstractConfiguration::isValidDomainName(const std::string& value)
|
||||
{
|
||||
if (value.empty() || value == "." || value.length() > 253)
|
||||
return false;
|
||||
int labelLength = 0;
|
||||
char oldChar = 0;
|
||||
|
||||
for (char ch : value)
|
||||
{
|
||||
if (ch == '.')
|
||||
{
|
||||
if (labelLength == 0 || labelLength > 63 || oldChar == '-')
|
||||
return false;
|
||||
labelLength = 0;
|
||||
}
|
||||
else if (isalnum(ch) || ch == '-')
|
||||
{
|
||||
if (labelLength == 0 && (ch == '-' || isdigit(ch)))
|
||||
return false;
|
||||
++labelLength;
|
||||
}
|
||||
else
|
||||
{
|
||||
return false;
|
||||
}
|
||||
oldChar = ch;
|
||||
}
|
||||
return oldChar == '.' || (labelLength > 0 && labelLength <= 63 && oldChar != '-');
|
||||
}
|
||||
|
||||
|
||||
} } // namespace Poco::Util
|
||||
|
9
contrib/CMakeLists.txt
vendored
9
contrib/CMakeLists.txt
vendored
@ -145,8 +145,13 @@ add_contrib (isa-l-cmake isa-l)
|
||||
add_contrib (libhdfs3-cmake libhdfs3) # requires: google-protobuf, krb5, isa-l
|
||||
add_contrib (hive-metastore-cmake hive-metastore) # requires: thrift, avro, arrow, libhdfs3
|
||||
add_contrib (cppkafka-cmake cppkafka)
|
||||
add_contrib (libpqxx-cmake libpqxx)
|
||||
add_contrib (libpq-cmake libpq)
|
||||
|
||||
option(ENABLE_LIBPQXX "Enable PostgreSQL" ${ENABLE_LIBRARIES})
|
||||
if (ENABLE_LIBPQXX)
|
||||
add_contrib (postgres-cmake postgres)
|
||||
add_contrib (libpqxx-cmake libpqxx)
|
||||
endif()
|
||||
|
||||
add_contrib (rocksdb-cmake rocksdb) # requires: jemalloc, snappy, zlib, lz4, zstd, liburing
|
||||
add_contrib (nuraft-cmake NuRaft)
|
||||
add_contrib (fast_float-cmake fast_float)
|
||||
|
2
contrib/curl
vendored
2
contrib/curl
vendored
@ -1 +1 @@
|
||||
Subproject commit de7b3e89218467159a7af72d58cea8425946e97d
|
||||
Subproject commit 83bedbd730d62b83744cc26fa0433d3f6e2e4cd6
|
2
contrib/grpc
vendored
2
contrib/grpc
vendored
@ -1 +1 @@
|
||||
Subproject commit 1716359d2e28d304a250f9df0e6c0ccad03de8db
|
||||
Subproject commit 7bc3abe952aba1dc7bce7f2f790dc781cb51a41e
|
@ -15,162 +15,64 @@ set(ICUDATA_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/icudata/")
|
||||
# These lists of sources were generated from build log of the original ICU build system (configure + make).
|
||||
|
||||
set(ICUUC_SOURCES
|
||||
"${ICU_SOURCE_DIR}/common/errorcode.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/putil.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/umath.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utypes.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uinvchar.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/umutex.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucln_cmn.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uinit.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uobject.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/cmemory.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/charstr.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/cstr.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/udata.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucmndata.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/udatamem.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/umapfile.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/udataswp.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utrie_swap.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucol_swp.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utrace.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uhash.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uhash_us.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uenum.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustrenum.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uvector.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustack.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uvectr32.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uvectr64.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_bld.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_cnv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_io.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_cb.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_err.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvlat1.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_u7.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_u8.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_u16.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_u32.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvscsu.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvbocu.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_ext.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvmbcs.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv2022.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvhz.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_lmb.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvisci.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvdisp.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_set.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_ct.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/resource.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uresbund.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ures_cnv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uresdata.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/resbund.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/resbund_cnv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucurr.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/localebuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/localeprioritylist.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/messagepattern.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucat.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locmap.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uloc.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locid.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locutil.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locavailable.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locdispnames.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locdspnm.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/loclikely.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locresdata.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/lsr.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/loclikelysubtags.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locdistance.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/localematcher.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/bytestream.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/stringpiece.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/bytesinkutil.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/stringtriebuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/bytestriebuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/bytestrie.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/bytestrieiterator.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucharstrie.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucharstriebuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucharstrieiterator.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/dictionarydata.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/edits.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/appendable.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustr_cnv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unistr_cnv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unistr.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unistr_case.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unistr_props.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utf_impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustring.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustrcase.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucasemap.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucasemap_titlecase_brkiter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/cstring.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustrfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustrtrns.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustr_wcs.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utext.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unistr_case_locale.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustrcase_locale.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unistr_titlecase_brkiter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustr_titlecase_brkiter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/normalizer2impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/normalizer2.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/filterednormalizer2.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/normlzr.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unorm.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unormcmp.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/loadednormalizer2impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/chariter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/schriter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uchriter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uiter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/patternprops.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uchar.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uprops.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucase.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/propname.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ubidi_props.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/characterproperties.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ubidi.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ubidiwrt.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ubidiln.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ushape.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uscript.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uscript_props.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/usc_impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unames.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utrie.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utrie2.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utrie2_builder.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucptrie.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/umutablecptrie.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/bmpset.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unisetspan.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uset_props.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uniset_props.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uniset_closure.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uset.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uniset.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/usetiter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ruleiter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/caniter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unifilt.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unifunct.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uarrsort.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/brkiter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ubrk.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/brkeng.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/brkiter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/bytesinkutil.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/bytestream.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/bytestrie.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/bytestriebuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/bytestrieiterator.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/caniter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/characterproperties.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/chariter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/charstr.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/cmemory.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/cstr.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/cstring.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/cwchar.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/dictbe.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/dictionarydata.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/dtintrv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/edits.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/emojiprops.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/errorcode.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/filteredbrk.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/filterednormalizer2.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/icudataver.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/icuplug.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/loadednormalizer2impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/localebuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/localematcher.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/localeprioritylist.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locavailable.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locbased.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locdispnames.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locdistance.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locdspnm.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locid.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/loclikely.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/loclikelysubtags.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locmap.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locresdata.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locutil.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/lsr.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/lstmbe.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/messagepattern.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/mlbe.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/normalizer2.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/normalizer2impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/normlzr.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/parsepos.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/patternprops.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/pluralmap.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/propname.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/propsvec.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/punycode.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/putil.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/rbbi.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/rbbi_cache.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/rbbidata.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/rbbinode.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/rbbirb.cpp"
|
||||
@ -178,166 +80,180 @@ set(ICUUC_SOURCES
|
||||
"${ICU_SOURCE_DIR}/common/rbbisetb.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/rbbistbl.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/rbbitblb.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/rbbi_cache.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/resbund.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/resbund_cnv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/resource.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/restrace.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ruleiter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/schriter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/serv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/servnotf.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/servls.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/servlk.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/servlkf.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/servls.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/servnotf.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/servrbf.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/servslkf.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uidna.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/usprep.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uts46.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/punycode.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/util.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/util_props.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/parsepos.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locbased.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/cwchar.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/wintz.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/dtintrv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvsel.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/propsvec.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ulist.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uloc_tag.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/icudataver.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/icuplug.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/sharedobject.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/simpleformatter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unifiedcache.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uloc_keytype.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ubiditransform.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/pluralmap.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/static_unicode_sets.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/restrace.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/emojiprops.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/lstmbe.cpp")
|
||||
"${ICU_SOURCE_DIR}/common/stringpiece.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/stringtriebuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uarrsort.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ubidi.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ubidi_props.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ubidiln.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ubiditransform.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ubidiwrt.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ubrk.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucase.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucasemap.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucasemap_titlecase_brkiter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucat.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uchar.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucharstrie.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucharstriebuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucharstrieiterator.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uchriter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucln_cmn.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucmndata.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv2022.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_bld.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_cb.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_cnv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_ct.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_err.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_ext.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_io.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_lmb.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_set.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_u16.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_u32.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_u7.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_u8.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvbocu.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvdisp.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvhz.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvisci.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvlat1.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvmbcs.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvscsu.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvsel.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucol_swp.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucptrie.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucurr.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/udata.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/udatamem.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/udataswp.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uenum.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uhash.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uhash_us.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uidna.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uinit.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uinvchar.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uiter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ulist.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uloc.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uloc_keytype.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uloc_tag.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ulocale.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ulocbuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/umapfile.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/umath.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/umutablecptrie.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/umutex.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unames.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unifiedcache.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unifilt.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unifunct.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uniset.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uniset_closure.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uniset_props.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unisetspan.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unistr.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unistr_case.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unistr_case_locale.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unistr_cnv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unistr_props.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unistr_titlecase_brkiter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unorm.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unormcmp.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uobject.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uprops.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ures_cnv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uresbund.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uresdata.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/usc_impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uscript.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uscript_props.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uset.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uset_props.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/usetiter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ushape.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/usprep.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustack.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustr_cnv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustr_titlecase_brkiter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustr_wcs.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustrcase.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustrcase_locale.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustrenum.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustrfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustring.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustrtrns.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utext.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utf_impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/util.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/util_props.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utrace.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utrie.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utrie2.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utrie2_builder.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utrie_swap.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uts46.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utypes.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uvector.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uvectr32.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uvectr64.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/wintz.cpp")
|
||||
|
||||
set(ICUI18N_SOURCES
|
||||
"${ICU_SOURCE_DIR}/i18n/ucln_in.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/fmtable.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/format.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/msgfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/umsg.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/unum.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/decimfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/dcfmtsym.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/fmtable_cnv.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/choicfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/datefmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/smpdtfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/reldtfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/dtfmtsym.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/udat.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/dtptngen.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/udatpg.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/nfrs.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/nfrule.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/nfsubs.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rbnf.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numsys.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/unumsys.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ucsdet.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ucal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/calendar.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/gregocal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/timezone.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/simpletz.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/olsontz.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/alphaindex.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/anytrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/astro.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/taiwncal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/basictz.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/bocsu.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/brktrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/buddhcal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/persncal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/islamcal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/japancal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/gregoimp.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/hebrwcal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/indiancal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/chnsecal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/calendar.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/casetrn.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/cecal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/coptccal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/dangical.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ethpccal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/chnsecal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/choicfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/coleitr.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/coll.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/sortkey.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/bocsu.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ucoleitr.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ucol.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ucol_res.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ucol_sit.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collation.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationsettings.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationbuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationcompare.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationdata.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationtailoring.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationdatabuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationdatareader.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationdatawriter.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationfastlatin.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationfastlatinbuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationfcd.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationiterator.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/utf16collationiterator.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/utf8collationiterator.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uitercollationiterator.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationsets.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationcompare.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationfastlatin.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationkeys.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rulebasedcollator.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationroot.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationrootelements.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationdatabuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationweights.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationruleparser.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationbuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationfastlatinbuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/listformatter.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ulistformatter.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/strmatch.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/usearch.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/search.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/stsearch.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/translit.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/utrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/esctrn.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/unesctrn.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/funcrepl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/strrepl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tridpars.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationsets.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationsettings.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationtailoring.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationweights.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/compactdecimalformat.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/coptccal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/cpdtrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rbt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rbt_data.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rbt_pars.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rbt_rule.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rbt_set.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/nultrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/remtrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/casetrn.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/titletrn.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tolowtrn.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/toupptrn.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/anytrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/name2uni.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uni2name.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/nortrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/quant.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/transreg.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/brktrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/regexcmp.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rematch.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/repattrn.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/regexst.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/regextxt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/regeximp.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uregex.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uregexc.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ulocdata.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/measfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/currfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/curramt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/currunit.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/measure.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/utmscale.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/csdetect.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/csmatch.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/csr2022.cpp"
|
||||
@ -346,60 +262,80 @@ set(ICUI18N_SOURCES
|
||||
"${ICU_SOURCE_DIR}/i18n/csrsbcs.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/csrucode.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/csrutf8.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/inputext.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/wintzimpl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/windtfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/winnmfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/basictz.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/dtrule.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rbtz.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tzrule.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tztrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/vtzone.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/zonemeta.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/standardplural.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/upluralrules.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/plurrule.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/plurfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/selfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/curramt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/currfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/currpinf.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/currunit.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/dangical.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/datefmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/dayperiodrules.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/dcfmtsym.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/decContext.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/decNumber.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/decimfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/displayoptions.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum-dtoa.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-cached-powers.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-double-to-string.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-fast-dtoa.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-string-to-double.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-strtod.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/dtfmtsym.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/dtitvfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/dtitvinf.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/udateintervalformat.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tmunit.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tmutamt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tmutfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/currpinf.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uspoof.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uspoof_impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uspoof_build.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uspoof_conf.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/smpdtfst.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ztrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/zrule.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/vzone.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/dtptngen.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/dtrule.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/erarules.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/esctrn.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ethpccal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/fmtable.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/fmtable_cnv.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/format.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/formatted_string_builder.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/formattedval_iterimpl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/formattedval_sbimpl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/formattedvalue.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/fphdlimp.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/fpositer.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ufieldpositer.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/decNumber.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/decContext.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/alphaindex.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tznames.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tznames_impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tzgnames.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tzfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/compactdecimalformat.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/funcrepl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/gender.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/region.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/scriptset.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uregion.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/reldatefmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/quantityformatter.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/gregocal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/gregoimp.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/hebrwcal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/indiancal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/inputext.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/islamcal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/iso8601cal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/japancal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/listformatter.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/measfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/measunit.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/sharedbreakiterator.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/scientificnumberformatter.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/dayperiodrules.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/measunit_extra.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/measure.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/messageformat2.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/messageformat2_arguments.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/messageformat2_checker.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/messageformat2_data_model.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/messageformat2_errors.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/messageformat2_evaluation.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/messageformat2_formattable.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/messageformat2_formatter.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/messageformat2_function_registry.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/messageformat2_parser.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/messageformat2_serializer.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/msgfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/name2uni.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/nfrs.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/nfrule.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/nfsubs.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/nortrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/nultrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_affixutils.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_asformat.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_capi.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_compact.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_currencysymbols.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_decimalquantity.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_decimfmtprops.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_fluent.cpp"
|
||||
@ -407,7 +343,9 @@ set(ICUI18N_SOURCES
|
||||
"${ICU_SOURCE_DIR}/i18n/number_grouping.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_integerwidth.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_longnames.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_mapper.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_modifiers.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_multiplier.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_notation.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_output.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_padding.cpp"
|
||||
@ -415,46 +353,125 @@ set(ICUI18N_SOURCES
|
||||
"${ICU_SOURCE_DIR}/i18n/number_patternstring.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_rounding.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_scientific.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_utils.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_asformat.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_mapper.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_multiplier.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_currencysymbols.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_simple.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_skeletons.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_capi.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-string-to-double.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-double-to-string.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum-dtoa.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-cached-powers.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-fast-dtoa.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-strtod.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/string_segment.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_parsednumber.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_symbols.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_decimal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_scientific.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_currency.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_affixes.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_compositions.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_validators.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numrange_fluent.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numrange_impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/erarules.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/formattedvalue.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/formattedval_iterimpl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/formattedval_sbimpl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/formatted_string_builder.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/measunit_extra.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_symbolswrapper.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_usageprefs.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_utils.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_affixes.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_compositions.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_currency.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_decimal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_parsednumber.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_scientific.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_symbols.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_validators.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numrange_capi.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numrange_fluent.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numrange_impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numsys.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/olsontz.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/persncal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/pluralranges.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/plurfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/plurrule.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/quant.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/quantityformatter.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rbnf.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rbt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rbt_data.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rbt_pars.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rbt_rule.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rbt_set.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rbtz.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/regexcmp.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/regeximp.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/regexst.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/regextxt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/region.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/reldatefmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/reldtfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rematch.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/remtrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/repattrn.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rulebasedcollator.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/scientificnumberformatter.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/scriptset.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/search.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/selfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/sharedbreakiterator.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/simpletz.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/smpdtfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/smpdtfst.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/sortkey.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/standardplural.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/string_segment.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/strmatch.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/strrepl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/stsearch.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/taiwncal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/timezone.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/titletrn.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tmunit.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tmutamt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tmutfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tolowtrn.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/toupptrn.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/translit.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/transreg.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tridpars.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tzfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tzgnames.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tznames.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tznames_impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tzrule.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tztrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ucal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ucln_in.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ucol.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ucol_res.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ucol_sit.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ucoleitr.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ucsdet.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/udat.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/udateintervalformat.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/udatpg.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ufieldpositer.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uitercollationiterator.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ulistformatter.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ulocdata.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/umsg.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/unesctrn.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uni2name.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/units_complexconverter.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/units_converter.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/units_data.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/units_router.cpp")
|
||||
"${ICU_SOURCE_DIR}/i18n/units_router.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/unum.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/unumsys.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/upluralrules.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uregex.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uregexc.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uregion.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/usearch.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uspoof.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uspoof_build.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uspoof_conf.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uspoof_impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/utf16collationiterator.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/utf8collationiterator.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/utmscale.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/utrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/vtzone.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/vzone.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/windtfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/winnmfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/wintzimpl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/zonemeta.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/zrule.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ztrans.cpp")
|
||||
|
||||
file(GENERATE OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" CONTENT " ")
|
||||
enable_language(ASM)
|
||||
@ -464,6 +481,11 @@ if (ARCH_S390X)
|
||||
else()
|
||||
set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt75l_dat.S" )
|
||||
endif()
|
||||
# ^^ you might be confused how for different little endian platforms (x86, ARM) the same assembly files can be used.
|
||||
# These files are indeed assembly but they only contain data ('.long' directive), which makes them portable accross CPUs.
|
||||
# Only the endianness and the character set (ASCII, EBCDIC) makes a difference, also see
|
||||
# https://unicode-org.github.io/icu/userguide/icu_data/#sharing-icu-data-between-platforms, 'Sharing ICU Data Between Platforms')
|
||||
# (and as an experiment, try re-generating the data files on x86 vs. ARM, ... you'll get exactly the same files)
|
||||
|
||||
set(ICUDATA_SOURCES
|
||||
"${ICUDATA_SOURCE_FILE}"
|
||||
|
2
contrib/libarchive
vendored
2
contrib/libarchive
vendored
@ -1 +1 @@
|
||||
Subproject commit ee45796171324519f0c0bfd012018dd099296336
|
||||
Subproject commit 313aa1fa10b657de791e3202c168a6c833bc3543
|
@ -1,6 +1,6 @@
|
||||
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/libarchive")
|
||||
|
||||
set(SRCS
|
||||
set(SRCS
|
||||
"${LIBRARY_DIR}/libarchive/archive_acl.c"
|
||||
"${LIBRARY_DIR}/libarchive/archive_blake2sp_ref.c"
|
||||
"${LIBRARY_DIR}/libarchive/archive_blake2s_ref.c"
|
||||
@ -135,7 +135,7 @@ set(SRCS
|
||||
)
|
||||
|
||||
add_library(_libarchive ${SRCS})
|
||||
target_include_directories(_libarchive PUBLIC
|
||||
target_include_directories(_libarchive PUBLIC
|
||||
${CMAKE_CURRENT_SOURCE_DIR}
|
||||
"${LIBRARY_DIR}/libarchive"
|
||||
)
|
||||
@ -157,7 +157,7 @@ if (TARGET ch_contrib::zlib)
|
||||
endif()
|
||||
|
||||
if (TARGET ch_contrib::zstd)
|
||||
target_compile_definitions(_libarchive PUBLIC HAVE_ZSTD_H=1 HAVE_LIBZSTD=1 HAVE_LIBZSTD_COMPRESSOR=1)
|
||||
target_compile_definitions(_libarchive PUBLIC HAVE_ZSTD_H=1 HAVE_LIBZSTD=1 HAVE_ZSTD_compressStream=1)
|
||||
target_link_libraries(_libarchive PRIVATE ch_contrib::zstd)
|
||||
endif()
|
||||
|
||||
@ -179,4 +179,4 @@ if (OS_LINUX)
|
||||
)
|
||||
endif()
|
||||
|
||||
add_library(ch_contrib::libarchive ALIAS _libarchive)
|
||||
add_library(ch_contrib::libarchive ALIAS _libarchive)
|
||||
|
@ -334,13 +334,16 @@ typedef uint64_t uintmax_t;
|
||||
/* #undef ARCHIVE_XATTR_LINUX */
|
||||
|
||||
/* Version number of bsdcpio */
|
||||
#define BSDCPIO_VERSION_STRING "3.7.0"
|
||||
#define BSDCPIO_VERSION_STRING "3.7.4"
|
||||
|
||||
/* Version number of bsdtar */
|
||||
#define BSDTAR_VERSION_STRING "3.7.0"
|
||||
#define BSDTAR_VERSION_STRING "3.7.4"
|
||||
|
||||
/* Version number of bsdcat */
|
||||
#define BSDCAT_VERSION_STRING "3.7.0"
|
||||
#define BSDCAT_VERSION_STRING "3.7.4"
|
||||
|
||||
/* Version number of bsdunzip */
|
||||
#define BSDUNZIP_VERSION_STRING "3.7.4"
|
||||
|
||||
/* Define to 1 if you have the `acl_create_entry' function. */
|
||||
/* #undef HAVE_ACL_CREATE_ENTRY */
|
||||
@ -642,8 +645,8 @@ typedef uint64_t uintmax_t;
|
||||
/* Define to 1 if you have the `getgrnam_r' function. */
|
||||
#define HAVE_GETGRNAM_R 1
|
||||
|
||||
/* Define to 1 if platform uses `optreset` to reset `getopt` */
|
||||
#define HAVE_GETOPT_OPTRESET 1
|
||||
/* Define to 1 if you have the `getline' function. */
|
||||
#define HAVE_GETLINE 1
|
||||
|
||||
/* Define to 1 if you have the `getpid' function. */
|
||||
#define HAVE_GETPID 1
|
||||
@ -750,6 +753,12 @@ typedef uint64_t uintmax_t;
|
||||
/* Define to 1 if you have the `pcreposix' library (-lpcreposix). */
|
||||
/* #undef HAVE_LIBPCREPOSIX */
|
||||
|
||||
/* Define to 1 if you have the `pcre2-8' library (-lpcre2-8). */
|
||||
/* #undef HAVE_LIBPCRE2 */
|
||||
|
||||
/* Define to 1 if you have the `pcreposix' library (-lpcre2posix). */
|
||||
/* #undef HAVE_LIBPCRE2POSIX */
|
||||
|
||||
/* Define to 1 if you have the `xml2' library (-lxml2). */
|
||||
#define HAVE_LIBXML2 1
|
||||
|
||||
@ -765,9 +774,8 @@ typedef uint64_t uintmax_t;
|
||||
/* Define to 1 if you have the `zstd' library (-lzstd). */
|
||||
/* #undef HAVE_LIBZSTD */
|
||||
|
||||
/* Define to 1 if you have the `zstd' library (-lzstd) with compression
|
||||
support. */
|
||||
/* #undef HAVE_LIBZSTD_COMPRESSOR */
|
||||
/* Define to 1 if you have the ZSTD_compressStream function. */
|
||||
/* #undef HAVE_ZSTD_compressStream */
|
||||
|
||||
/* Define to 1 if you have the <limits.h> header file. */
|
||||
#define HAVE_LIMITS_H 1
|
||||
@ -923,6 +931,9 @@ typedef uint64_t uintmax_t;
|
||||
/* Define to 1 if you have the <pcreposix.h> header file. */
|
||||
/* #undef HAVE_PCREPOSIX_H */
|
||||
|
||||
/* Define to 1 if you have the <pcre2posix.h> header file. */
|
||||
/* #undef HAVE_PCRE2POSIX_H */
|
||||
|
||||
/* Define to 1 if you have the `pipe' function. */
|
||||
#define HAVE_PIPE 1
|
||||
|
||||
@ -1029,6 +1040,12 @@ typedef uint64_t uintmax_t;
|
||||
/* Define to 1 if you have the `strrchr' function. */
|
||||
#define HAVE_STRRCHR 1
|
||||
|
||||
/* Define to 1 if the system has the type `struct statfs'. */
|
||||
/* #undef HAVE_STRUCT_STATFS */
|
||||
|
||||
/* Define to 1 if `f_iosize' is a member of `struct statfs'. */
|
||||
/* #undef HAVE_STRUCT_STATFS_F_IOSIZE */
|
||||
|
||||
/* Define to 1 if `f_namemax' is a member of `struct statfs'. */
|
||||
/* #undef HAVE_STRUCT_STATFS_F_NAMEMAX */
|
||||
|
||||
@ -1077,6 +1094,9 @@ typedef uint64_t uintmax_t;
|
||||
/* Define to 1 if you have the `symlink' function. */
|
||||
#define HAVE_SYMLINK 1
|
||||
|
||||
/* Define to 1 if you have the `sysconf' function. */
|
||||
#define HAVE_SYSCONF 1
|
||||
|
||||
/* Define to 1 if you have the <sys/acl.h> header file. */
|
||||
/* #undef HAVE_SYS_ACL_H */
|
||||
|
||||
@ -1273,13 +1293,13 @@ typedef uint64_t uintmax_t;
|
||||
/* #undef HAVE__MKGMTIME */
|
||||
|
||||
/* Define as const if the declaration of iconv() needs const. */
|
||||
#define ICONV_CONST
|
||||
#define ICONV_CONST
|
||||
|
||||
/* Version number of libarchive as a single integer */
|
||||
#define LIBARCHIVE_VERSION_NUMBER "3007000"
|
||||
#define LIBARCHIVE_VERSION_NUMBER "3007004"
|
||||
|
||||
/* Version number of libarchive */
|
||||
#define LIBARCHIVE_VERSION_STRING "3.7.0"
|
||||
#define LIBARCHIVE_VERSION_STRING "3.7.4"
|
||||
|
||||
/* Define to 1 if `lstat' dereferences a symlink specified with a trailing
|
||||
slash. */
|
||||
@ -1333,7 +1353,7 @@ typedef uint64_t uintmax_t;
|
||||
#endif /* SAFE_TO_DEFINE_EXTENSIONS */
|
||||
|
||||
/* Version number of package */
|
||||
#define VERSION "3.7.0"
|
||||
#define VERSION "3.7.4"
|
||||
|
||||
/* Number of bits in a file offset, on hosts where this is settable. */
|
||||
/* #undef _FILE_OFFSET_BITS */
|
||||
|
2
contrib/libfiu
vendored
2
contrib/libfiu
vendored
@ -1 +1 @@
|
||||
Subproject commit b85edbde4cf974b1b40d27828a56f0505f4e2ee5
|
||||
Subproject commit a1290d8cd3d7b4541d6c976e0a54f572ac03f2a3
|
1
contrib/libpq
vendored
1
contrib/libpq
vendored
@ -1 +0,0 @@
|
||||
Subproject commit 2446f2c85650b56df9d4ebc4c2ea7f4b01beee57
|
@ -1,78 +0,0 @@
|
||||
if (NOT ENABLE_LIBPQXX)
|
||||
return()
|
||||
endif()
|
||||
|
||||
set(LIBPQ_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libpq")
|
||||
|
||||
set(SRCS
|
||||
"${LIBPQ_SOURCE_DIR}/fe-auth.c"
|
||||
"${LIBPQ_SOURCE_DIR}/fe-auth-scram.c"
|
||||
"${LIBPQ_SOURCE_DIR}/fe-connect.c"
|
||||
"${LIBPQ_SOURCE_DIR}/fe-exec.c"
|
||||
"${LIBPQ_SOURCE_DIR}/fe-lobj.c"
|
||||
"${LIBPQ_SOURCE_DIR}/fe-misc.c"
|
||||
"${LIBPQ_SOURCE_DIR}/fe-print.c"
|
||||
"${LIBPQ_SOURCE_DIR}/fe-trace.c"
|
||||
"${LIBPQ_SOURCE_DIR}/fe-protocol3.c"
|
||||
"${LIBPQ_SOURCE_DIR}/fe-secure.c"
|
||||
"${LIBPQ_SOURCE_DIR}/fe-secure-common.c"
|
||||
"${LIBPQ_SOURCE_DIR}/fe-secure-openssl.c"
|
||||
"${LIBPQ_SOURCE_DIR}/legacy-pqsignal.c"
|
||||
"${LIBPQ_SOURCE_DIR}/libpq-events.c"
|
||||
"${LIBPQ_SOURCE_DIR}/pqexpbuffer.c"
|
||||
|
||||
"${LIBPQ_SOURCE_DIR}/common/scram-common.c"
|
||||
"${LIBPQ_SOURCE_DIR}/common/sha2.c"
|
||||
"${LIBPQ_SOURCE_DIR}/common/sha1.c"
|
||||
"${LIBPQ_SOURCE_DIR}/common/md5.c"
|
||||
"${LIBPQ_SOURCE_DIR}/common/md5_common.c"
|
||||
"${LIBPQ_SOURCE_DIR}/common/hmac_openssl.c"
|
||||
"${LIBPQ_SOURCE_DIR}/common/cryptohash.c"
|
||||
"${LIBPQ_SOURCE_DIR}/common/saslprep.c"
|
||||
"${LIBPQ_SOURCE_DIR}/common/unicode_norm.c"
|
||||
"${LIBPQ_SOURCE_DIR}/common/ip.c"
|
||||
"${LIBPQ_SOURCE_DIR}/common/jsonapi.c"
|
||||
"${LIBPQ_SOURCE_DIR}/common/wchar.c"
|
||||
"${LIBPQ_SOURCE_DIR}/common/base64.c"
|
||||
"${LIBPQ_SOURCE_DIR}/common/link-canary.c"
|
||||
"${LIBPQ_SOURCE_DIR}/common/fe_memutils.c"
|
||||
"${LIBPQ_SOURCE_DIR}/common/string.c"
|
||||
"${LIBPQ_SOURCE_DIR}/common/pg_get_line.c"
|
||||
"${LIBPQ_SOURCE_DIR}/common/stringinfo.c"
|
||||
"${LIBPQ_SOURCE_DIR}/common/psprintf.c"
|
||||
"${LIBPQ_SOURCE_DIR}/common/encnames.c"
|
||||
"${LIBPQ_SOURCE_DIR}/common/logging.c"
|
||||
|
||||
"${LIBPQ_SOURCE_DIR}/port/snprintf.c"
|
||||
"${LIBPQ_SOURCE_DIR}/port/strlcpy.c"
|
||||
"${LIBPQ_SOURCE_DIR}/port/strerror.c"
|
||||
"${LIBPQ_SOURCE_DIR}/port/inet_net_ntop.c"
|
||||
"${LIBPQ_SOURCE_DIR}/port/getpeereid.c"
|
||||
"${LIBPQ_SOURCE_DIR}/port/chklocale.c"
|
||||
"${LIBPQ_SOURCE_DIR}/port/noblock.c"
|
||||
"${LIBPQ_SOURCE_DIR}/port/pg_strong_random.c"
|
||||
"${LIBPQ_SOURCE_DIR}/port/pgstrcasecmp.c"
|
||||
"${LIBPQ_SOURCE_DIR}/port/thread.c"
|
||||
"${LIBPQ_SOURCE_DIR}/port/path.c"
|
||||
)
|
||||
|
||||
add_library(_libpq ${SRCS})
|
||||
|
||||
add_definitions(-DHAVE_BIO_METH_NEW)
|
||||
add_definitions(-DHAVE_HMAC_CTX_NEW)
|
||||
add_definitions(-DHAVE_HMAC_CTX_FREE)
|
||||
|
||||
target_include_directories (_libpq SYSTEM PUBLIC ${LIBPQ_SOURCE_DIR})
|
||||
target_include_directories (_libpq SYSTEM PUBLIC "${LIBPQ_SOURCE_DIR}/include")
|
||||
target_include_directories (_libpq SYSTEM PRIVATE "${LIBPQ_SOURCE_DIR}/configs")
|
||||
|
||||
# NOTE: this is a dirty hack to avoid and instead pg_config.h should be shipped
|
||||
# for different OS'es like for jemalloc, not one generic for all OS'es like
|
||||
# now.
|
||||
if (OS_DARWIN OR OS_FREEBSD OR USE_MUSL)
|
||||
target_compile_definitions(_libpq PRIVATE -DSTRERROR_R_INT=1)
|
||||
endif()
|
||||
|
||||
target_link_libraries (_libpq PRIVATE OpenSSL::SSL)
|
||||
|
||||
add_library(ch_contrib::libpq ALIAS _libpq)
|
@ -1,10 +1,3 @@
|
||||
option(ENABLE_LIBPQXX "Enalbe libpqxx" ${ENABLE_LIBRARIES})
|
||||
|
||||
if (NOT ENABLE_LIBPQXX)
|
||||
message(STATUS "Not using libpqxx")
|
||||
return()
|
||||
endif()
|
||||
|
||||
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/libpqxx")
|
||||
|
||||
set (SRCS
|
||||
|
2
contrib/libuv
vendored
2
contrib/libuv
vendored
@ -1 +1 @@
|
||||
Subproject commit 4482964660c77eec1166cd7d14fb915e3dbd774a
|
||||
Subproject commit 714b58b9849568211ade86b44dd91d37f8a2175e
|
@ -10,6 +10,7 @@ set(uv_sources
|
||||
src/random.c
|
||||
src/strscpy.c
|
||||
src/strtok.c
|
||||
src/thread-common.c
|
||||
src/threadpool.c
|
||||
src/timer.c
|
||||
src/uv-common.c
|
||||
@ -70,10 +71,7 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||
list(APPEND uv_defines _GNU_SOURCE _POSIX_C_SOURCE=200112)
|
||||
list(APPEND uv_libraries rt)
|
||||
list(APPEND uv_sources
|
||||
src/unix/epoll.c
|
||||
src/unix/linux-core.c
|
||||
src/unix/linux-inotify.c
|
||||
src/unix/linux-syscalls.c
|
||||
src/unix/linux.c
|
||||
src/unix/procfs-exepath.c
|
||||
src/unix/random-getrandom.c
|
||||
src/unix/random-sysctl-linux.c)
|
||||
|
@ -140,6 +140,12 @@ if (CMAKE_CROSSCOMPILING)
|
||||
message (STATUS "CROSS COMPILING SET LLVM HOST TRIPLE ${LLVM_HOST_TRIPLE}")
|
||||
endif()
|
||||
|
||||
# llvm-project/llvm/cmake/config-ix.cmake does a weird thing: it defines _LARGEFILE64_SOURCE,
|
||||
# then checks if lseek64() function exists, then undefines _LARGEFILE64_SOURCE.
|
||||
# Then the actual code that uses this function *doesn't* define _LARGEFILE64_SOURCE, so lseek64()
|
||||
# may not exist and compilation fails. This happens with musl.
|
||||
add_compile_definitions("_LARGEFILE64_SOURCE")
|
||||
|
||||
add_subdirectory ("${LLVM_SOURCE_DIR}" "${LLVM_BINARY_DIR}")
|
||||
|
||||
set_directory_properties (PROPERTIES
|
||||
|
2
contrib/openssl
vendored
2
contrib/openssl
vendored
@ -1 +1 @@
|
||||
Subproject commit 66deddc1e53cda8706604a019777259372d1bd62
|
||||
Subproject commit b3e62c440f390e12e77c80675f883af82ad3d5ed
|
1
contrib/postgres
vendored
Submodule
1
contrib/postgres
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit cfd77000af28469fcb650485bad65a35e7649e41
|
78
contrib/postgres-cmake/CMakeLists.txt
Normal file
78
contrib/postgres-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,78 @@
|
||||
# Build description for libpq which is part of the PostgreSQL sources
|
||||
|
||||
set(POSTGRES_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/postgres")
|
||||
set(LIBPQ_SOURCE_DIR "${POSTGRES_SOURCE_DIR}/src/interfaces/libpq")
|
||||
set(LIBPQ_CMAKE_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/postgres-cmake")
|
||||
|
||||
set(SRCS
|
||||
"${LIBPQ_SOURCE_DIR}/fe-auth.c"
|
||||
"${LIBPQ_SOURCE_DIR}/fe-auth-scram.c"
|
||||
"${LIBPQ_SOURCE_DIR}/fe-connect.c"
|
||||
"${LIBPQ_SOURCE_DIR}/fe-exec.c"
|
||||
"${LIBPQ_SOURCE_DIR}/fe-lobj.c"
|
||||
"${LIBPQ_SOURCE_DIR}/fe-misc.c"
|
||||
"${LIBPQ_SOURCE_DIR}/fe-print.c"
|
||||
"${LIBPQ_SOURCE_DIR}/fe-trace.c"
|
||||
"${LIBPQ_SOURCE_DIR}/fe-protocol3.c"
|
||||
"${LIBPQ_SOURCE_DIR}/fe-secure.c"
|
||||
"${LIBPQ_SOURCE_DIR}/fe-secure-common.c"
|
||||
"${LIBPQ_SOURCE_DIR}/fe-secure-openssl.c"
|
||||
"${LIBPQ_SOURCE_DIR}/legacy-pqsignal.c"
|
||||
"${LIBPQ_SOURCE_DIR}/libpq-events.c"
|
||||
"${LIBPQ_SOURCE_DIR}/pqexpbuffer.c"
|
||||
|
||||
"${POSTGRES_SOURCE_DIR}/src/common/scram-common.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/common/sha2.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/common/sha1.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/common/md5.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/common/md5_common.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/common/hmac_openssl.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/common/cryptohash.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/common/saslprep.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/common/unicode_norm.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/common/ip.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/common/jsonapi.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/common/wchar.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/common/base64.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/common/link-canary.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/common/fe_memutils.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/common/string.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/common/pg_get_line.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/common/stringinfo.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/common/psprintf.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/common/encnames.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/common/logging.c"
|
||||
|
||||
"${POSTGRES_SOURCE_DIR}/src/port/snprintf.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/port/strlcpy.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/port/strerror.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/port/inet_net_ntop.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/port/getpeereid.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/port/chklocale.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/port/noblock.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/port/pg_strong_random.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/port/pgstrcasecmp.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/port/thread.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/port/path.c"
|
||||
)
|
||||
|
||||
add_library(_libpq ${SRCS})
|
||||
|
||||
add_definitions(-DHAVE_BIO_METH_NEW)
|
||||
add_definitions(-DHAVE_HMAC_CTX_NEW)
|
||||
add_definitions(-DHAVE_HMAC_CTX_FREE)
|
||||
|
||||
target_include_directories (_libpq SYSTEM PUBLIC ${LIBPQ_SOURCE_DIR})
|
||||
target_include_directories (_libpq SYSTEM PUBLIC "${POSTGRES_SOURCE_DIR}/src/include")
|
||||
target_include_directories (_libpq SYSTEM PUBLIC "${LIBPQ_CMAKE_SOURCE_DIR}") # pre-generated headers
|
||||
|
||||
# NOTE: this is a dirty hack to avoid and instead pg_config.h should be shipped
|
||||
# for different OS'es like for jemalloc, not one generic for all OS'es like
|
||||
# now.
|
||||
if (OS_DARWIN OR OS_FREEBSD OR USE_MUSL)
|
||||
target_compile_definitions(_libpq PRIVATE -DSTRERROR_R_INT=1)
|
||||
endif()
|
||||
|
||||
target_link_libraries (_libpq PRIVATE OpenSSL::SSL)
|
||||
|
||||
add_library(ch_contrib::libpq ALIAS _libpq)
|
943
contrib/postgres-cmake/pg_config.h
Normal file
943
contrib/postgres-cmake/pg_config.h
Normal file
@ -0,0 +1,943 @@
|
||||
/* src/include/pg_config.h. Generated from pg_config.h.in by configure. */
|
||||
/* src/include/pg_config.h.in. Generated from configure.in by autoheader. */
|
||||
|
||||
/* Define if building universal (internal helper macro) */
|
||||
/* #undef AC_APPLE_UNIVERSAL_BUILD */
|
||||
|
||||
/* The normal alignment of `double', in bytes. */
|
||||
#define ALIGNOF_DOUBLE 4
|
||||
|
||||
/* The normal alignment of `int', in bytes. */
|
||||
#define ALIGNOF_INT 4
|
||||
|
||||
/* The normal alignment of `long', in bytes. */
|
||||
#define ALIGNOF_LONG 4
|
||||
|
||||
/* The normal alignment of `long long int', in bytes. */
|
||||
#define ALIGNOF_LONG_LONG_INT 4
|
||||
|
||||
/* The normal alignment of `short', in bytes. */
|
||||
#define ALIGNOF_SHORT 2
|
||||
|
||||
/* Size of a disk block --- this also limits the size of a tuple. You can set
|
||||
it bigger if you need bigger tuples (although TOAST should reduce the need
|
||||
to have large tuples, since fields can be spread across multiple tuples).
|
||||
BLCKSZ must be a power of 2. The maximum possible value of BLCKSZ is
|
||||
currently 2^15 (32768). This is determined by the 15-bit widths of the
|
||||
lp_off and lp_len fields in ItemIdData (see include/storage/itemid.h).
|
||||
Changing BLCKSZ requires an initdb. */
|
||||
#define BLCKSZ 8192
|
||||
|
||||
/* Define to the default TCP port number on which the server listens and to
|
||||
which clients will try to connect. This can be overridden at run-time, but
|
||||
it's convenient if your clients have the right default compiled in.
|
||||
(--with-pgport=PORTNUM) */
|
||||
#define DEF_PGPORT 5432
|
||||
|
||||
/* Define to the default TCP port number as a string constant. */
|
||||
#define DEF_PGPORT_STR "5432"
|
||||
|
||||
/* Define to the file name extension of dynamically-loadable modules. */
|
||||
#define DLSUFFIX ".so"
|
||||
|
||||
/* Define to build with GSSAPI support. (--with-gssapi) */
|
||||
//#define ENABLE_GSS 0
|
||||
|
||||
/* Define to 1 if you want National Language Support. (--enable-nls) */
|
||||
/* #undef ENABLE_NLS */
|
||||
|
||||
/* Define to 1 to build client libraries as thread-safe code.
|
||||
(--enable-thread-safety) */
|
||||
#define ENABLE_THREAD_SAFETY 1
|
||||
|
||||
/* Define to nothing if C supports flexible array members, and to 1 if it does
|
||||
not. That way, with a declaration like `struct s { int n; double
|
||||
d[FLEXIBLE_ARRAY_MEMBER]; };', the struct hack can be used with pre-C99
|
||||
compilers. When computing the size of such an object, don't use 'sizeof
|
||||
(struct s)' as it overestimates the size. Use 'offsetof (struct s, d)'
|
||||
instead. Don't use 'offsetof (struct s, d[0])', as this doesn't work with
|
||||
MSVC and with C++ compilers. */
|
||||
#define FLEXIBLE_ARRAY_MEMBER /**/
|
||||
|
||||
/* float4 values are passed by value if 'true', by reference if 'false' */
|
||||
#define FLOAT4PASSBYVAL true
|
||||
|
||||
/* float8, int8, and related values are passed by value if 'true', by
|
||||
reference if 'false' */
|
||||
#define FLOAT8PASSBYVAL false
|
||||
|
||||
/* Define to 1 if gettimeofday() takes only 1 argument. */
|
||||
/* #undef GETTIMEOFDAY_1ARG */
|
||||
|
||||
#ifdef GETTIMEOFDAY_1ARG
|
||||
# define gettimeofday(a,b) gettimeofday(a)
|
||||
#endif
|
||||
|
||||
/* Define to 1 if you have the `append_history' function. */
|
||||
/* #undef HAVE_APPEND_HISTORY */
|
||||
|
||||
/* Define to 1 if you want to use atomics if available. */
|
||||
#define HAVE_ATOMICS 1
|
||||
|
||||
/* Define to 1 if you have the <atomic.h> header file. */
|
||||
/* #undef HAVE_ATOMIC_H */
|
||||
|
||||
/* Define to 1 if you have the `cbrt' function. */
|
||||
#define HAVE_CBRT 1
|
||||
|
||||
/* Define to 1 if you have the `class' function. */
|
||||
/* #undef HAVE_CLASS */
|
||||
|
||||
/* Define to 1 if you have the <crtdefs.h> header file. */
|
||||
/* #undef HAVE_CRTDEFS_H */
|
||||
|
||||
/* Define to 1 if you have the `crypt' function. */
|
||||
#define HAVE_CRYPT 1
|
||||
|
||||
/* Define to 1 if you have the <crypt.h> header file. */
|
||||
#define HAVE_CRYPT_H 1
|
||||
|
||||
/* Define to 1 if you have the declaration of `fdatasync', and to 0 if you
|
||||
don't. */
|
||||
#define HAVE_DECL_FDATASYNC 1
|
||||
|
||||
/* Define to 1 if you have the declaration of `F_FULLFSYNC', and to 0 if you
|
||||
don't. */
|
||||
#define HAVE_DECL_F_FULLFSYNC 0
|
||||
|
||||
/* Define to 1 if you have the declaration of `posix_fadvise', and to 0 if you
|
||||
don't. */
|
||||
#define HAVE_DECL_POSIX_FADVISE 1
|
||||
|
||||
/* Define to 1 if you have the declaration of `snprintf', and to 0 if you
|
||||
don't. */
|
||||
#define HAVE_DECL_SNPRINTF 1
|
||||
|
||||
/* Define to 1 if you have the declaration of `sigwait', and to 0 if you don't. */
|
||||
#define HAVE_DECL_SIGWAIT 1
|
||||
|
||||
/* Define to 1 if you have the declaration of `strlcat', and to 0 if you
|
||||
don't. */
|
||||
#if OS_DARWIN
|
||||
#define HAVE_DECL_STRLCAT 1
|
||||
#endif
|
||||
|
||||
/* Define to 1 if you have the declaration of `strlcpy', and to 0 if you
|
||||
don't. */
|
||||
#if OS_DARWIN
|
||||
#define HAVE_DECL_STRLCPY 1
|
||||
#endif
|
||||
|
||||
/* Define to 1 if you have the declaration of `sys_siglist', and to 0 if you
|
||||
don't. */
|
||||
#define HAVE_DECL_SYS_SIGLIST 1
|
||||
|
||||
/* Define to 1 if you have the declaration of `vsnprintf', and to 0 if you
|
||||
don't. */
|
||||
#define HAVE_DECL_VSNPRINTF 1
|
||||
|
||||
/* Define to 1 if you have the <dld.h> header file. */
|
||||
/* #undef HAVE_DLD_H */
|
||||
|
||||
/* Define to 1 if you have the `dlopen' function. */
|
||||
#define HAVE_DLOPEN 1
|
||||
|
||||
/* Define to 1 if you have the <editline/history.h> header file. */
|
||||
/* #undef HAVE_EDITLINE_HISTORY_H */
|
||||
|
||||
/* Define to 1 if you have the <editline/readline.h> header file. */
|
||||
#define HAVE_EDITLINE_READLINE_H 1
|
||||
|
||||
/* Define to 1 if you have the `fdatasync' function. */
|
||||
#define HAVE_FDATASYNC 1
|
||||
|
||||
/* Define to 1 if you have the `fls' function. */
|
||||
/* #undef HAVE_FLS */
|
||||
|
||||
/* Define to 1 if you have the `fpclass' function. */
|
||||
/* #undef HAVE_FPCLASS */
|
||||
|
||||
/* Define to 1 if you have the `fp_class' function. */
|
||||
/* #undef HAVE_FP_CLASS */
|
||||
|
||||
/* Define to 1 if you have the `fp_class_d' function. */
|
||||
/* #undef HAVE_FP_CLASS_D */
|
||||
|
||||
/* Define to 1 if you have the <fp_class.h> header file. */
|
||||
/* #undef HAVE_FP_CLASS_H */
|
||||
|
||||
/* Define to 1 if fseeko (and presumably ftello) exists and is declared. */
|
||||
#define HAVE_FSEEKO 1
|
||||
|
||||
/* Define to 1 if your compiler understands __func__. */
|
||||
#define HAVE_FUNCNAME__FUNC 1
|
||||
|
||||
/* Define to 1 if your compiler understands __FUNCTION__. */
|
||||
/* #undef HAVE_FUNCNAME__FUNCTION */
|
||||
|
||||
/* Define to 1 if you have __atomic_compare_exchange_n(int *, int *, int). */
|
||||
/* #undef HAVE_GCC__ATOMIC_INT32_CAS */
|
||||
|
||||
/* Define to 1 if you have __atomic_compare_exchange_n(int64 *, int *, int64).
|
||||
*/
|
||||
/* #undef HAVE_GCC__ATOMIC_INT64_CAS */
|
||||
|
||||
/* Define to 1 if you have __sync_lock_test_and_set(char *) and friends. */
|
||||
#define HAVE_GCC__SYNC_CHAR_TAS 1
|
||||
|
||||
/* Define to 1 if you have __sync_compare_and_swap(int *, int, int). */
|
||||
/* #undef HAVE_GCC__SYNC_INT32_CAS */
|
||||
|
||||
/* Define to 1 if you have __sync_lock_test_and_set(int *) and friends. */
|
||||
#define HAVE_GCC__SYNC_INT32_TAS 1
|
||||
|
||||
/* Define to 1 if you have __sync_compare_and_swap(int64 *, int64, int64). */
|
||||
/* #undef HAVE_GCC__SYNC_INT64_CAS */
|
||||
|
||||
/* Define to 1 if you have the `getaddrinfo' function. */
|
||||
#define HAVE_GETADDRINFO 1
|
||||
|
||||
/* Define to 1 if you have the `gethostbyname_r' function. */
|
||||
#define HAVE_GETHOSTBYNAME_R 1
|
||||
|
||||
/* Define to 1 if you have the `getifaddrs' function. */
|
||||
#define HAVE_GETIFADDRS 1
|
||||
|
||||
/* Define to 1 if you have the `getopt' function. */
|
||||
#define HAVE_GETOPT 1
|
||||
|
||||
/* Define to 1 if you have the <getopt.h> header file. */
|
||||
#define HAVE_GETOPT_H 1
|
||||
|
||||
/* Define to 1 if you have the `getopt_long' function. */
|
||||
#define HAVE_GETOPT_LONG 1
|
||||
|
||||
/* Define to 1 if you have the `getpeereid' function. */
|
||||
/* #undef HAVE_GETPEEREID */
|
||||
|
||||
/* Define to 1 if you have the `getpeerucred' function. */
|
||||
/* #undef HAVE_GETPEERUCRED */
|
||||
|
||||
/* Define to 1 if you have the `getpwuid_r' function. */
|
||||
#define HAVE_GETPWUID_R 1
|
||||
|
||||
/* Define to 1 if you have the `getrlimit' function. */
|
||||
#define HAVE_GETRLIMIT 1
|
||||
|
||||
/* Define to 1 if you have the `getrusage' function. */
|
||||
#define HAVE_GETRUSAGE 1
|
||||
|
||||
/* Define to 1 if you have the `gettimeofday' function. */
|
||||
/* #undef HAVE_GETTIMEOFDAY */
|
||||
|
||||
/* Define to 1 if you have the <gssapi/gssapi.h> header file. */
|
||||
//#define HAVE_GSSAPI_GSSAPI_H 0
|
||||
|
||||
/* Define to 1 if you have the <gssapi.h> header file. */
|
||||
/* #undef HAVE_GSSAPI_H */
|
||||
|
||||
/* Define to 1 if you have the <history.h> header file. */
|
||||
/* #undef HAVE_HISTORY_H */
|
||||
|
||||
/* Define to 1 if you have the `history_truncate_file' function. */
|
||||
#define HAVE_HISTORY_TRUNCATE_FILE 1
|
||||
|
||||
/* Define to 1 if you have the <ieeefp.h> header file. */
|
||||
/* #undef HAVE_IEEEFP_H */
|
||||
|
||||
/* Define to 1 if you have the <ifaddrs.h> header file. */
|
||||
#define HAVE_IFADDRS_H 1
|
||||
|
||||
/* Define to 1 if you have the `inet_aton' function. */
|
||||
#define HAVE_INET_ATON 1
|
||||
|
||||
/* Define to 1 if you have the `inet_pton' function. */
|
||||
#define HAVE_INET_PTON 1
|
||||
|
||||
/* Define to 1 if the system has the type `int64'. */
|
||||
/* #undef HAVE_INT64 */
|
||||
|
||||
/* Define to 1 if the system has the type `int8'. */
|
||||
/* #undef HAVE_INT8 */
|
||||
|
||||
/* Define to 1 if the system has the type `intptr_t'. */
|
||||
#define HAVE_INTPTR_T 1
|
||||
|
||||
/* Define to 1 if you have the <inttypes.h> header file. */
|
||||
#define HAVE_INTTYPES_H 1
|
||||
|
||||
/* Define to 1 if you have the global variable 'int opterr'. */
|
||||
#define HAVE_INT_OPTERR 1
|
||||
|
||||
/* Define to 1 if you have the global variable 'int optreset'. */
|
||||
/* #undef HAVE_INT_OPTRESET */
|
||||
|
||||
/* Define to 1 if you have the global variable 'int timezone'. */
|
||||
#define HAVE_INT_TIMEZONE 1
|
||||
|
||||
/* Define to 1 if you have support for IPv6. */
|
||||
#define HAVE_IPV6 1
|
||||
|
||||
/* Define to 1 if you have isinf(). */
|
||||
#define HAVE_ISINF 1
|
||||
|
||||
/* Define to 1 if you have the <langinfo.h> header file. */
|
||||
#define HAVE_LANGINFO_H 1
|
||||
|
||||
/* Define to 1 if you have the <ldap.h> header file. */
|
||||
//#define HAVE_LDAP_H 0
|
||||
|
||||
/* Define to 1 if you have the `crypto' library (-lcrypto). */
|
||||
#define HAVE_LIBCRYPTO 1
|
||||
|
||||
/* Define to 1 if you have the `ldap' library (-lldap). */
|
||||
//#define HAVE_LIBLDAP 0
|
||||
|
||||
/* Define to 1 if you have the `m' library (-lm). */
|
||||
#define HAVE_LIBM 1
|
||||
|
||||
/* Define to 1 if you have the `pam' library (-lpam). */
|
||||
#define HAVE_LIBPAM 1
|
||||
|
||||
/* Define if you have a function readline library */
|
||||
#define HAVE_LIBREADLINE 1
|
||||
|
||||
/* Define to 1 if you have the `selinux' library (-lselinux). */
|
||||
/* #undef HAVE_LIBSELINUX */
|
||||
|
||||
/* Define to 1 if you have the `ssl' library (-lssl). */
|
||||
#define HAVE_LIBSSL 0
|
||||
|
||||
/* Define to 1 if you have the `wldap32' library (-lwldap32). */
|
||||
/* #undef HAVE_LIBWLDAP32 */
|
||||
|
||||
/* Define to 1 if you have the `xml2' library (-lxml2). */
|
||||
#define HAVE_LIBXML2 1
|
||||
|
||||
/* Define to 1 if you have the `xslt' library (-lxslt). */
|
||||
#define HAVE_LIBXSLT 1
|
||||
|
||||
/* Define to 1 if you have the `z' library (-lz). */
|
||||
#define HAVE_LIBZ 1
|
||||
|
||||
/* Define to 1 if you have the `zstd' library (-lzstd). */
|
||||
/* #undef HAVE_LIBZSTD */
|
||||
|
||||
/* Define to 1 if constants of type 'long long int' should have the suffix LL.
|
||||
*/
|
||||
#define HAVE_LL_CONSTANTS 1
|
||||
|
||||
/* Define to 1 if the system has the type `locale_t'. */
|
||||
#define HAVE_LOCALE_T 1
|
||||
|
||||
/* Define to 1 if `long int' works and is 64 bits. */
|
||||
/* #undef HAVE_LONG_INT_64 */
|
||||
|
||||
/* Define to 1 if the system has the type `long long int'. */
|
||||
#define HAVE_LONG_LONG_INT 1
|
||||
|
||||
/* Define to 1 if `long long int' works and is 64 bits. */
|
||||
#define HAVE_LONG_LONG_INT_64 1
|
||||
|
||||
/* Define to 1 if you have the <mbarrier.h> header file. */
|
||||
/* #undef HAVE_MBARRIER_H */
|
||||
|
||||
/* Define to 1 if you have the `mbstowcs_l' function. */
|
||||
/* #undef HAVE_MBSTOWCS_L */
|
||||
|
||||
/* Define to 1 if you have the `memmove' function. */
|
||||
#define HAVE_MEMMOVE 1
|
||||
|
||||
/* Define to 1 if you have the <memory.h> header file. */
|
||||
#define HAVE_MEMORY_H 1
|
||||
|
||||
/* Define to 1 if the system has the type `MINIDUMP_TYPE'. */
|
||||
/* #undef HAVE_MINIDUMP_TYPE */
|
||||
|
||||
/* Define to 1 if you have the `mkdtemp' function. */
|
||||
#define HAVE_MKDTEMP 1
|
||||
|
||||
/* Define to 1 if you have the <netinet/in.h> header file. */
|
||||
#define HAVE_NETINET_IN_H 1
|
||||
|
||||
/* Define to 1 if you have the <netinet/tcp.h> header file. */
|
||||
#define HAVE_NETINET_TCP_H 1
|
||||
|
||||
/* Define to 1 if you have the <net/if.h> header file. */
|
||||
#define HAVE_NET_IF_H 1
|
||||
|
||||
/* Define to 1 if you have the <ossp/uuid.h> header file. */
|
||||
/* #undef HAVE_OSSP_UUID_H */
|
||||
|
||||
/* Define to 1 if you have the <pam/pam_appl.h> header file. */
|
||||
/* #undef HAVE_PAM_PAM_APPL_H */
|
||||
|
||||
/* Define to 1 if you have the `poll' function. */
|
||||
#define HAVE_POLL 1
|
||||
|
||||
/* Define to 1 if you have the <poll.h> header file. */
|
||||
#define HAVE_POLL_H 1
|
||||
|
||||
/* Define to 1 if you have a POSIX-conforming sigwait declaration. */
|
||||
/* #undef HAVE_POSIX_DECL_SIGWAIT */
|
||||
|
||||
/* Define to 1 if you have the `posix_fadvise' function. */
|
||||
#define HAVE_POSIX_FADVISE 1
|
||||
|
||||
/* Define to 1 if you have the declaration of `preadv', and to 0 if you don't. */
|
||||
/* #undef HAVE_DECL_PREADV */
|
||||
|
||||
/* Define to 1 if you have the declaration of `pwritev', and to 0 if you don't. */
|
||||
/* #define HAVE_DECL_PWRITEV */
|
||||
|
||||
/* Define to 1 if you have the `X509_get_signature_info' function. */
|
||||
/* #undef HAVE_X509_GET_SIGNATURE_INFO */
|
||||
|
||||
/* Define to 1 if you have the POSIX signal interface. */
|
||||
#define HAVE_POSIX_SIGNALS 1
|
||||
|
||||
/* Define to 1 if the assembler supports PPC's LWARX mutex hint bit. */
|
||||
/* #undef HAVE_PPC_LWARX_MUTEX_HINT */
|
||||
|
||||
/* Define to 1 if you have the `pstat' function. */
|
||||
/* #undef HAVE_PSTAT */
|
||||
|
||||
/* Define to 1 if the PS_STRINGS thing exists. */
|
||||
/* #undef HAVE_PS_STRINGS */
|
||||
|
||||
/* Define to 1 if you have the `pthread_is_threaded_np' function. */
|
||||
/* #undef HAVE_PTHREAD_IS_THREADED_NP */
|
||||
|
||||
/* Define to 1 if you have the <pwd.h> header file. */
|
||||
#define HAVE_PWD_H 1
|
||||
|
||||
/* Define to 1 if you have the <readline.h> header file. */
|
||||
/* #undef HAVE_READLINE_H */
|
||||
|
||||
/* Define to 1 if you have the <readline/history.h> header file. */
|
||||
#define HAVE_READLINE_HISTORY_H 1
|
||||
|
||||
/* Define to 1 if you have the <readline/readline.h> header file. */
|
||||
/* #undef HAVE_READLINE_READLINE_H */
|
||||
|
||||
/* Define to 1 if you have the `readlink' function. */
|
||||
#define HAVE_READLINK 1
|
||||
|
||||
/* Define to 1 if you have the `rint' function. */
|
||||
#define HAVE_RINT 1
|
||||
|
||||
/* Define to 1 if you have the `rl_completion_matches' function. */
|
||||
#define HAVE_RL_COMPLETION_MATCHES 1
|
||||
|
||||
/* Define to 1 if you have the `rl_filename_completion_function' function. */
|
||||
#define HAVE_RL_FILENAME_COMPLETION_FUNCTION 1
|
||||
|
||||
/* Define to 1 if you have the `rl_reset_screen_size' function. */
|
||||
/* #undef HAVE_RL_RESET_SCREEN_SIZE */
|
||||
|
||||
/* Define to 1 if you have the `rl_variable_bind' function. */
|
||||
#define HAVE_RL_VARIABLE_BIND 1
|
||||
|
||||
/* Define to 1 if you have the <security/pam_appl.h> header file. */
|
||||
#define HAVE_SECURITY_PAM_APPL_H 1
|
||||
|
||||
/* Define to 1 if you have the `setproctitle' function. */
|
||||
/* #undef HAVE_SETPROCTITLE */
|
||||
|
||||
/* Define to 1 if you have the `setsid' function. */
|
||||
#define HAVE_SETSID 1
|
||||
|
||||
/* Define to 1 if you have the `shm_open' function. */
|
||||
#define HAVE_SHM_OPEN 1
|
||||
|
||||
/* Define to 1 if the system has the type `socklen_t'. */
|
||||
#define HAVE_SOCKLEN_T 1
|
||||
|
||||
/* Define to 1 if you have the `sigprocmask' function. */
|
||||
#define HAVE_SIGPROCMASK 1
|
||||
|
||||
/* Define to 1 if you have sigsetjmp(). */
|
||||
#define HAVE_SIGSETJMP 1
|
||||
|
||||
/* Define to 1 if the system has the type `sig_atomic_t'. */
|
||||
#define HAVE_SIG_ATOMIC_T 1
|
||||
|
||||
/* Define to 1 if you have the `snprintf' function. */
|
||||
#define HAVE_SNPRINTF 1
|
||||
|
||||
/* Define to 1 if you have spinlocks. */
|
||||
#define HAVE_SPINLOCKS 1
|
||||
|
||||
/* Define to 1 if you have the `SSL_CTX_set_num_tickets' function. */
|
||||
/* #define HAVE_SSL_CTX_SET_NUM_TICKETS */
|
||||
|
||||
/* Define to 1 if you have the `SSL_get_current_compression' function. */
|
||||
#define HAVE_SSL_GET_CURRENT_COMPRESSION 0
|
||||
|
||||
/* Define to 1 if you have the <stdint.h> header file. */
|
||||
#define HAVE_STDINT_H 1
|
||||
|
||||
/* Define to 1 if you have the <stdlib.h> header file. */
|
||||
#define HAVE_STDLIB_H 1
|
||||
|
||||
/* Define to 1 if you have the `strerror' function. */
|
||||
#define HAVE_STRERROR 1
|
||||
|
||||
/* Define to 1 if you have the `strerror_r' function. */
|
||||
#define HAVE_STRERROR_R 1
|
||||
|
||||
/* Define to 1 if you have the <strings.h> header file. */
|
||||
//#define HAVE_STRINGS_H 1
|
||||
|
||||
/* Define to 1 if you have the <string.h> header file. */
|
||||
#define HAVE_STRING_H 1
|
||||
|
||||
/* Define to 1 if you have the `strlcat' function. */
|
||||
/* #undef HAVE_STRLCAT */
|
||||
|
||||
/* Define to 1 if you have the `strlcpy' function. */
|
||||
/* #undef HAVE_STRLCPY */
|
||||
|
||||
/* Define to 1 if you have the `strtoll' function. */
|
||||
#define HAVE_STRTOLL 1
|
||||
|
||||
#if (!OS_DARWIN)
|
||||
#define HAVE_STRCHRNUL 1
|
||||
#endif
|
||||
|
||||
/* Define to 1 if you have the `strtoq' function. */
|
||||
/* #undef HAVE_STRTOQ */
|
||||
|
||||
/* Define to 1 if you have the `strtoull' function. */
|
||||
#define HAVE_STRTOULL 1
|
||||
|
||||
/* Define to 1 if you have the `strtouq' function. */
|
||||
/* #undef HAVE_STRTOUQ */
|
||||
|
||||
/* Define to 1 if the system has the type `struct addrinfo'. */
|
||||
#define HAVE_STRUCT_ADDRINFO 1
|
||||
|
||||
/* Define to 1 if the system has the type `struct cmsgcred'. */
|
||||
/* #undef HAVE_STRUCT_CMSGCRED */
|
||||
|
||||
/* Define to 1 if the system has the type `struct option'. */
|
||||
#define HAVE_STRUCT_OPTION 1
|
||||
|
||||
/* Define to 1 if `sa_len' is a member of `struct sockaddr'. */
|
||||
/* #undef HAVE_STRUCT_SOCKADDR_SA_LEN */
|
||||
|
||||
/* Define to 1 if the system has the type `struct sockaddr_storage'. */
|
||||
#define HAVE_STRUCT_SOCKADDR_STORAGE 1
|
||||
|
||||
/* Define to 1 if `ss_family' is a member of `struct sockaddr_storage'. */
|
||||
#define HAVE_STRUCT_SOCKADDR_STORAGE_SS_FAMILY 1
|
||||
|
||||
/* Define to 1 if `ss_len' is a member of `struct sockaddr_storage'. */
|
||||
/* #undef HAVE_STRUCT_SOCKADDR_STORAGE_SS_LEN */
|
||||
|
||||
/* Define to 1 if `__ss_family' is a member of `struct sockaddr_storage'. */
|
||||
/* #undef HAVE_STRUCT_SOCKADDR_STORAGE___SS_FAMILY */
|
||||
|
||||
/* Define to 1 if `__ss_len' is a member of `struct sockaddr_storage'. */
|
||||
/* #undef HAVE_STRUCT_SOCKADDR_STORAGE___SS_LEN */
|
||||
|
||||
/* Define to 1 if `tm_zone' is a member of `struct tm'. */
|
||||
#define HAVE_STRUCT_TM_TM_ZONE 1
|
||||
|
||||
/* Define to 1 if you have the `symlink' function. */
|
||||
#define HAVE_SYMLINK 1
|
||||
|
||||
/* Define to 1 if you have the `sync_file_range' function. */
|
||||
/* #undef HAVE_SYNC_FILE_RANGE */
|
||||
|
||||
/* Define to 1 if you have the syslog interface. */
|
||||
#define HAVE_SYSLOG 1
|
||||
|
||||
/* Define to 1 if you have the <sys/ioctl.h> header file. */
|
||||
#define HAVE_SYS_IOCTL_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/ipc.h> header file. */
|
||||
#define HAVE_SYS_IPC_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/personality.h> header file. */
|
||||
/* #undef HAVE_SYS_PERSONALITY_H */
|
||||
|
||||
/* Define to 1 if you have the <sys/poll.h> header file. */
|
||||
#define HAVE_SYS_POLL_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/pstat.h> header file. */
|
||||
/* #undef HAVE_SYS_PSTAT_H */
|
||||
|
||||
/* Define to 1 if you have the <sys/resource.h> header file. */
|
||||
#define HAVE_SYS_RESOURCE_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/select.h> header file. */
|
||||
#define HAVE_SYS_SELECT_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/sem.h> header file. */
|
||||
#define HAVE_SYS_SEM_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/shm.h> header file. */
|
||||
#define HAVE_SYS_SHM_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/signalfd.h> header file. */
|
||||
/* #undef HAVE_SYS_SIGNALFD_H */
|
||||
|
||||
/* Define to 1 if you have the <sys/socket.h> header file. */
|
||||
#define HAVE_SYS_SOCKET_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/sockio.h> header file. */
|
||||
/* #undef HAVE_SYS_SOCKIO_H */
|
||||
|
||||
/* Define to 1 if you have the <sys/stat.h> header file. */
|
||||
#define HAVE_SYS_STAT_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/tas.h> header file. */
|
||||
/* #undef HAVE_SYS_TAS_H */
|
||||
|
||||
/* Define to 1 if you have the <sys/time.h> header file. */
|
||||
#define HAVE_SYS_TIME_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/types.h> header file. */
|
||||
#define HAVE_SYS_TYPES_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/ucred.h> header file. */
|
||||
#if (OS_DARWIN || OS_FREEBSD)
|
||||
#define HAVE_SYS_UCRED_H 1
|
||||
#endif
|
||||
|
||||
/* Define to 1 if you have the <sys/un.h> header file. */
|
||||
#define HAVE_SYS_UN_H 1
|
||||
#define _GNU_SOURCE 1 /* Needed for glibc struct ucred */
|
||||
|
||||
/* Define to 1 if you have the <termios.h> header file. */
|
||||
#define HAVE_TERMIOS_H 1
|
||||
|
||||
/* Define to 1 if your `struct tm' has `tm_zone'. Deprecated, use
|
||||
`HAVE_STRUCT_TM_TM_ZONE' instead. */
|
||||
#define HAVE_TM_ZONE 1
|
||||
|
||||
/* Define to 1 if you have the `towlower' function. */
|
||||
#define HAVE_TOWLOWER 1
|
||||
|
||||
/* Define to 1 if you have the external array `tzname'. */
|
||||
#define HAVE_TZNAME 1
|
||||
|
||||
/* Define to 1 if you have the <ucred.h> header file. */
|
||||
/* #undef HAVE_UCRED_H */
|
||||
|
||||
/* Define to 1 if the system has the type `uint64'. */
|
||||
/* #undef HAVE_UINT64 */
|
||||
|
||||
/* Define to 1 if the system has the type `uint8'. */
|
||||
/* #undef HAVE_UINT8 */
|
||||
|
||||
/* Define to 1 if the system has the type `uintptr_t'. */
|
||||
#define HAVE_UINTPTR_T 1
|
||||
|
||||
/* Define to 1 if the system has the type `union semun'. */
|
||||
/* #undef HAVE_UNION_SEMUN */
|
||||
|
||||
/* Define to 1 if you have the <unistd.h> header file. */
|
||||
#define HAVE_UNISTD_H 1
|
||||
|
||||
/* Define to 1 if you have unix sockets. */
|
||||
#define HAVE_UNIX_SOCKETS 1
|
||||
|
||||
/* Define to 1 if you have the `unsetenv' function. */
|
||||
#define HAVE_UNSETENV 1
|
||||
|
||||
/* Define to 1 if the system has the type `unsigned long long int'. */
|
||||
#define HAVE_UNSIGNED_LONG_LONG_INT 1
|
||||
|
||||
/* Define to 1 if you have the `utime' function. */
|
||||
#define HAVE_UTIME 1
|
||||
|
||||
/* Define to 1 if you have the `utimes' function. */
|
||||
#define HAVE_UTIMES 1
|
||||
|
||||
/* Define to 1 if you have the <utime.h> header file. */
|
||||
#define HAVE_UTIME_H 1
|
||||
|
||||
/* Define to 1 if you have BSD UUID support. */
|
||||
/* #undef HAVE_UUID_BSD */
|
||||
|
||||
/* Define to 1 if you have E2FS UUID support. */
|
||||
/* #undef HAVE_UUID_E2FS */
|
||||
|
||||
/* Define to 1 if you have the <uuid.h> header file. */
|
||||
#define HAVE_UUID_H 1
|
||||
|
||||
/* Define to 1 if you have OSSP UUID support. */
|
||||
#define HAVE_UUID_OSSP 1
|
||||
|
||||
/* Define to 1 if you have the <uuid/uuid.h> header file. */
|
||||
/* #undef HAVE_UUID_UUID_H */
|
||||
|
||||
/* Define to 1 if you have the `vsnprintf' function. */
|
||||
#define HAVE_VSNPRINTF 1
|
||||
|
||||
/* Define to 1 if you have the <wchar.h> header file. */
|
||||
#define HAVE_WCHAR_H 1
|
||||
|
||||
/* Define to 1 if you have the `wcstombs' function. */
|
||||
#define HAVE_WCSTOMBS 1
|
||||
|
||||
/* Define to 1 if you have the `wcstombs_l' function. */
|
||||
/* #undef HAVE_WCSTOMBS_L */
|
||||
|
||||
/* Define to 1 if you have the <wctype.h> header file. */
|
||||
#define HAVE_WCTYPE_H 1
|
||||
|
||||
/* Define to 1 if you have the <winldap.h> header file. */
|
||||
/* #undef HAVE_WINLDAP_H */
|
||||
|
||||
/* Define to 1 if your compiler understands __builtin_bswap32. */
|
||||
/* #undef HAVE__BUILTIN_BSWAP32 */
|
||||
|
||||
/* Define to 1 if your compiler understands __builtin_constant_p. */
|
||||
#define HAVE__BUILTIN_CONSTANT_P 1
|
||||
|
||||
/* Define to 1 if your compiler understands __builtin_frame_address. */
|
||||
/* #undef HAVE__BUILTIN_FRAME_ADDRESS */
|
||||
|
||||
/* Define to 1 if your compiler understands __builtin_types_compatible_p. */
|
||||
#define HAVE__BUILTIN_TYPES_COMPATIBLE_P 1
|
||||
|
||||
/* Define to 1 if your compiler understands __builtin_unreachable. */
|
||||
/* #undef HAVE__BUILTIN_UNREACHABLE */
|
||||
|
||||
/* Define to 1 if you have __cpuid. */
|
||||
/* #undef HAVE__CPUID */
|
||||
|
||||
/* Define to 1 if you have __get_cpuid. */
|
||||
/* #undef HAVE__GET_CPUID */
|
||||
|
||||
/* Define to 1 if your compiler understands _Static_assert. */
|
||||
/* #undef HAVE__STATIC_ASSERT */
|
||||
|
||||
/* Define to 1 if your compiler understands __VA_ARGS__ in macros. */
|
||||
#define HAVE__VA_ARGS 1
|
||||
|
||||
/* Define to the appropriate snprintf length modifier for 64-bit ints. */
|
||||
#define INT64_MODIFIER "ll"
|
||||
|
||||
/* Define to 1 if `locale_t' requires <xlocale.h>. */
|
||||
/* #undef LOCALE_T_IN_XLOCALE */
|
||||
|
||||
/* Define as the maximum alignment requirement of any C data type. */
|
||||
#define MAXIMUM_ALIGNOF 4
|
||||
|
||||
/* Define bytes to use libc memset(). */
|
||||
#define MEMSET_LOOP_LIMIT 1024
|
||||
|
||||
/* Define to the address where bug reports for this package should be sent. */
|
||||
#define PACKAGE_BUGREPORT "pgsql-bugs@postgresql.org"
|
||||
|
||||
/* Define to the full name of this package. */
|
||||
#define PACKAGE_NAME "PostgreSQL"
|
||||
|
||||
/* Define to the full name and version of this package. */
|
||||
#define PACKAGE_STRING "PostgreSQL 9.5.4"
|
||||
|
||||
/* Define to the one symbol short name of this package. */
|
||||
#define PACKAGE_TARNAME "postgresql"
|
||||
|
||||
/* Define to the home page for this package. */
|
||||
#define PACKAGE_URL ""
|
||||
|
||||
/* Define to the version of this package. */
|
||||
#define PACKAGE_VERSION "9.5.4"
|
||||
|
||||
/* Define to the name of a signed 128-bit integer type. */
|
||||
/* #undef PG_INT128_TYPE */
|
||||
|
||||
/* Define to the name of a signed 64-bit integer type. */
|
||||
#define PG_INT64_TYPE long long int
|
||||
|
||||
/* Define to the name of the default PostgreSQL service principal in Kerberos
|
||||
(GSSAPI). (--with-krb-srvnam=NAME) */
|
||||
#define PG_KRB_SRVNAM "postgres"
|
||||
|
||||
/* PostgreSQL major version as a string */
|
||||
#define PG_MAJORVERSION "9.5"
|
||||
|
||||
/* Define to gnu_printf if compiler supports it, else printf. */
|
||||
#define PG_PRINTF_ATTRIBUTE printf
|
||||
|
||||
/* Define to 1 if "static inline" works without unwanted warnings from
|
||||
compilations where static inline functions are defined but not called. */
|
||||
#define PG_USE_INLINE 1
|
||||
|
||||
/* PostgreSQL version as a string */
|
||||
#define PG_VERSION "9.5.4"
|
||||
|
||||
/* PostgreSQL version as a number */
|
||||
#define PG_VERSION_NUM 90504
|
||||
|
||||
/* A string containing the version number, platform, and C compiler */
|
||||
#define PG_VERSION_STR "PostgreSQL 9.5.4 on i686-pc-linux-gnu, compiled by gcc (GCC) 4.1.2 20080704 (Red Hat 4.1.2-55), 32-bit"
|
||||
|
||||
/* Define to 1 to allow profiling output to be saved separately for each
|
||||
process. */
|
||||
/* #undef PROFILE_PID_DIR */
|
||||
|
||||
/* RELSEG_SIZE is the maximum number of blocks allowed in one disk file. Thus,
|
||||
the maximum size of a single file is RELSEG_SIZE * BLCKSZ; relations bigger
|
||||
than that are divided into multiple files. RELSEG_SIZE * BLCKSZ must be
|
||||
less than your OS' limit on file size. This is often 2 GB or 4GB in a
|
||||
32-bit operating system, unless you have large file support enabled. By
|
||||
default, we make the limit 1 GB to avoid any possible integer-overflow
|
||||
problems within the OS. A limit smaller than necessary only means we divide
|
||||
a large relation into more chunks than necessary, so it seems best to err
|
||||
in the direction of a small limit. A power-of-2 value is recommended to
|
||||
save a few cycles in md.c, but is not absolutely required. Changing
|
||||
RELSEG_SIZE requires an initdb. */
|
||||
#define RELSEG_SIZE 131072
|
||||
|
||||
/* The size of `long', as computed by sizeof. */
|
||||
#define SIZEOF_LONG 4
|
||||
|
||||
/* The size of `off_t', as computed by sizeof. */
|
||||
#define SIZEOF_OFF_T 8
|
||||
|
||||
/* The size of `size_t', as computed by sizeof. */
|
||||
#define SIZEOF_SIZE_T 4
|
||||
|
||||
/* The size of `void *', as computed by sizeof. */
|
||||
#define SIZEOF_VOID_P 4
|
||||
|
||||
/* Define to 1 if you have the ANSI C header files. */
|
||||
#define STDC_HEADERS 1
|
||||
|
||||
/* Define to 1 if strerror_r() returns a int. */
|
||||
/* #undef STRERROR_R_INT */
|
||||
|
||||
/* Define to 1 if your <sys/time.h> declares `struct tm'. */
|
||||
/* #undef TM_IN_SYS_TIME */
|
||||
|
||||
/* Define to 1 to build with assertion checks. (--enable-cassert) */
|
||||
/* #undef USE_ASSERT_CHECKING */
|
||||
|
||||
/* Define to 1 to build with Bonjour support. (--with-bonjour) */
|
||||
/* #undef USE_BONJOUR */
|
||||
|
||||
/* Define to 1 if you want float4 values to be passed by value.
|
||||
(--enable-float4-byval) */
|
||||
#define USE_FLOAT4_BYVAL 1
|
||||
|
||||
/* Define to 1 if you want float8, int8, etc values to be passed by value.
|
||||
(--enable-float8-byval) */
|
||||
/* #undef USE_FLOAT8_BYVAL */
|
||||
|
||||
/* Define to 1 if you want 64-bit integer timestamp and interval support.
|
||||
(--enable-integer-datetimes) */
|
||||
#define USE_INTEGER_DATETIMES 1
|
||||
|
||||
/* Define to 1 to build with LDAP support. (--with-ldap) */
|
||||
//#define USE_LDAP 0
|
||||
|
||||
/* Define to 1 to build with XML support. (--with-libxml) */
|
||||
#define USE_LIBXML 1
|
||||
|
||||
/* Define to 1 to use XSLT support when building contrib/xml2.
|
||||
(--with-libxslt) */
|
||||
#define USE_LIBXSLT 1
|
||||
|
||||
/* Define to select named POSIX semaphores. */
|
||||
/* #undef USE_NAMED_POSIX_SEMAPHORES */
|
||||
|
||||
/* Define to build with OpenSSL support. (--with-openssl) */
|
||||
#define USE_OPENSSL 0
|
||||
|
||||
#define USE_OPENSSL_RANDOM 0
|
||||
|
||||
#define FRONTEND 1
|
||||
|
||||
/* Define to 1 to build with PAM support. (--with-pam) */
|
||||
#define USE_PAM 1
|
||||
|
||||
/* Use replacement snprintf() functions. */
|
||||
/* #undef USE_REPL_SNPRINTF */
|
||||
|
||||
/* Define to 1 to use Intel SSE 4.2 CRC instructions with a runtime check. */
|
||||
#define USE_SLICING_BY_8_CRC32C 1
|
||||
|
||||
/* Define to 1 use Intel SSE 4.2 CRC instructions. */
|
||||
/* #undef USE_SSE42_CRC32C */
|
||||
|
||||
/* Define to 1 to use Intel SSSE 4.2 CRC instructions with a runtime check. */
|
||||
/* #undef USE_SSE42_CRC32C_WITH_RUNTIME_CHECK */
|
||||
|
||||
/* Define to select SysV-style semaphores. */
|
||||
#define USE_SYSV_SEMAPHORES 1
|
||||
|
||||
/* Define to select SysV-style shared memory. */
|
||||
#define USE_SYSV_SHARED_MEMORY 1
|
||||
|
||||
/* Define to select unnamed POSIX semaphores. */
|
||||
/* #undef USE_UNNAMED_POSIX_SEMAPHORES */
|
||||
|
||||
/* Define to select Win32-style semaphores. */
|
||||
/* #undef USE_WIN32_SEMAPHORES */
|
||||
|
||||
/* Define to select Win32-style shared memory. */
|
||||
/* #undef USE_WIN32_SHARED_MEMORY */
|
||||
|
||||
/* Define to 1 to build with ZSTD support. (--with-zstd) */
|
||||
/* #undef USE_ZSTD */
|
||||
|
||||
/* Define to 1 if `wcstombs_l' requires <xlocale.h>. */
|
||||
/* #undef WCSTOMBS_L_IN_XLOCALE */
|
||||
|
||||
/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
|
||||
significant byte first (like Motorola and SPARC, unlike Intel). */
|
||||
#if defined AC_APPLE_UNIVERSAL_BUILD
|
||||
# if defined __BIG_ENDIAN__
|
||||
# define WORDS_BIGENDIAN 1
|
||||
# endif
|
||||
#else
|
||||
# ifndef WORDS_BIGENDIAN
|
||||
/* # undef WORDS_BIGENDIAN */
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* Size of a WAL file block. This need have no particular relation to BLCKSZ.
|
||||
XLOG_BLCKSZ must be a power of 2, and if your system supports O_DIRECT I/O,
|
||||
XLOG_BLCKSZ must be a multiple of the alignment requirement for direct-I/O
|
||||
buffers, else direct I/O may fail. Changing XLOG_BLCKSZ requires an initdb.
|
||||
*/
|
||||
#define XLOG_BLCKSZ 8192
|
||||
|
||||
/* XLOG_SEG_SIZE is the size of a single WAL file. This must be a power of 2
|
||||
and larger than XLOG_BLCKSZ (preferably, a great deal larger than
|
||||
XLOG_BLCKSZ). Changing XLOG_SEG_SIZE requires an initdb. */
|
||||
#define XLOG_SEG_SIZE (16 * 1024 * 1024)
|
||||
|
||||
|
||||
|
||||
/* Number of bits in a file offset, on hosts where this is settable. */
|
||||
#define _FILE_OFFSET_BITS 64
|
||||
|
||||
/* Define to 1 to make fseeko visible on some hosts (e.g. glibc 2.2). */
|
||||
/* #undef _LARGEFILE_SOURCE */
|
||||
|
||||
/* Define for large files, on AIX-style hosts. */
|
||||
/* #undef _LARGE_FILES */
|
||||
|
||||
/* Define to `__inline__' or `__inline' if that's what the C compiler
|
||||
calls it, or to nothing if 'inline' is not supported under any name. */
|
||||
#ifndef __cplusplus
|
||||
/* #undef inline */
|
||||
#endif
|
||||
|
||||
/* Define to the type of a signed integer type wide enough to hold a pointer,
|
||||
if such a type exists, and if the system does not define it. */
|
||||
/* #undef intptr_t */
|
||||
|
||||
/* Define to empty if the C compiler does not understand signed types. */
|
||||
/* #undef signed */
|
||||
|
||||
/* Define to the type of an unsigned integer type wide enough to hold a
|
||||
pointer, if such a type exists, and if the system does not define it. */
|
||||
/* #undef uintptr_t */
|
7
contrib/postgres-cmake/pg_config_ext.h
Normal file
7
contrib/postgres-cmake/pg_config_ext.h
Normal file
@ -0,0 +1,7 @@
|
||||
/*
|
||||
* * src/include/pg_config_ext.h.in. This is generated manually, not by
|
||||
* * autoheader, since we want to limit which symbols get defined here.
|
||||
* */
|
||||
|
||||
/* Define to the name of a signed 64-bit integer type. */
|
||||
#define PG_INT64_TYPE long long int
|
34
contrib/postgres-cmake/pg_config_os.h
Normal file
34
contrib/postgres-cmake/pg_config_os.h
Normal file
@ -0,0 +1,34 @@
|
||||
#if defined(OS_DARWIN)
|
||||
|
||||
/* src/include/port/darwin.h */
|
||||
#define __darwin__ 1
|
||||
|
||||
#if HAVE_DECL_F_FULLFSYNC /* not present before macOS 10.3 */
|
||||
#define HAVE_FSYNC_WRITETHROUGH
|
||||
#endif
|
||||
|
||||
#else
|
||||
/* src/include/port/linux.h */
|
||||
/*
|
||||
* As of July 2007, all known versions of the Linux kernel will sometimes
|
||||
* return EIDRM for a shmctl() operation when EINVAL is correct (it happens
|
||||
* when the low-order 15 bits of the supplied shm ID match the slot number
|
||||
* assigned to a newer shmem segment). We deal with this by assuming that
|
||||
* EIDRM means EINVAL in PGSharedMemoryIsInUse(). This is reasonably safe
|
||||
* since in fact Linux has no excuse for ever returning EIDRM; it doesn't
|
||||
* track removed segments in a way that would allow distinguishing them from
|
||||
* private ones. But someday that code might get upgraded, and we'd have
|
||||
* to have a kernel version test here.
|
||||
*/
|
||||
#define HAVE_LINUX_EIDRM_BUG
|
||||
|
||||
/*
|
||||
* Set the default wal_sync_method to fdatasync. With recent Linux versions,
|
||||
* xlogdefs.h's normal rules will prefer open_datasync, which (a) doesn't
|
||||
* perform better and (b) causes outright failures on ext4 data=journal
|
||||
* filesystems, because those don't support O_DIRECT.
|
||||
*/
|
||||
#define PLATFORM_DEFAULT_SYNC_METHOD SYNC_METHOD_FDATASYNC
|
||||
|
||||
#endif
|
||||
|
12
contrib/postgres-cmake/pg_config_paths.h
Normal file
12
contrib/postgres-cmake/pg_config_paths.h
Normal file
@ -0,0 +1,12 @@
|
||||
#define PGBINDIR "/bin"
|
||||
#define PGSHAREDIR "/share"
|
||||
#define SYSCONFDIR "/etc"
|
||||
#define INCLUDEDIR "/include"
|
||||
#define PKGINCLUDEDIR "/include"
|
||||
#define INCLUDEDIRSERVER "/include/server"
|
||||
#define LIBDIR "/lib"
|
||||
#define PKGLIBDIR "/lib"
|
||||
#define LOCALEDIR "/share/locale"
|
||||
#define DOCDIR "/doc"
|
||||
#define HTMLDIR "/doc"
|
||||
#define MANDIR "/man"
|
2
contrib/sysroot
vendored
2
contrib/sysroot
vendored
@ -1 +1 @@
|
||||
Subproject commit cc385041b226d1fc28ead14dbab5d40a5f821dd8
|
||||
Subproject commit 5be834147d5b5dd77ca2b821f356982029320513
|
@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="24.8.2.3"
|
||||
ARG VERSION="24.8.4.13"
|
||||
ARG PACKAGES="clickhouse-keeper"
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="24.8.2.3"
|
||||
ARG VERSION="24.8.4.13"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
|
@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
||||
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||
ARG VERSION="24.8.2.3"
|
||||
ARG VERSION="24.8.4.13"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
#docker-official-library:off
|
||||
|
@ -109,7 +109,7 @@ if [ -n "$CLICKHOUSE_USER" ] && [ "$CLICKHOUSE_USER" != "default" ] || [ -n "$CL
|
||||
<networks>
|
||||
<ip>::/0</ip>
|
||||
</networks>
|
||||
<password>${CLICKHOUSE_PASSWORD}</password>
|
||||
<password><![CDATA[${CLICKHOUSE_PASSWORD//]]>/]]]]><![CDATA[>}]]></password>
|
||||
<quota>default</quota>
|
||||
<access_management>${CLICKHOUSE_ACCESS_MANAGEMENT}</access_management>
|
||||
</${CLICKHOUSE_USER}>
|
||||
|
@ -124,6 +124,8 @@ function setup_logs_replication
|
||||
check_logs_credentials || return 0
|
||||
__set_connection_args
|
||||
|
||||
echo "My hostname is ${HOSTNAME}"
|
||||
|
||||
echo 'Create all configured system logs'
|
||||
clickhouse-client --query "SYSTEM FLUSH LOGS"
|
||||
|
||||
@ -184,7 +186,17 @@ function setup_logs_replication
|
||||
/^TTL /d
|
||||
')
|
||||
|
||||
echo -e "Creating remote destination table ${table}_${hash} with statement:\n${statement}" >&2
|
||||
echo -e "Creating remote destination table ${table}_${hash} with statement:" >&2
|
||||
|
||||
echo "::group::${table}"
|
||||
# there's the only way big "$statement" can be printed without causing EAGAIN error
|
||||
# cat: write error: Resource temporarily unavailable
|
||||
statement_print="${statement}"
|
||||
if [ "${#statement_print}" -gt 4000 ]; then
|
||||
statement_print="${statement::1999}\n…\n${statement:${#statement}-1999}"
|
||||
fi
|
||||
echo -e "$statement_print"
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "$statement" | clickhouse-client --database_replicated_initial_query_timeout_sec=10 \
|
||||
--distributed_ddl_task_timeout=30 --distributed_ddl_output_mode=throw_only_active \
|
||||
|
@ -3,6 +3,8 @@
|
||||
|
||||
FROM alpine:3.18
|
||||
RUN apk add --no-cache -U iproute2 \
|
||||
&& for bin in iptables iptables-restore iptables-save; \
|
||||
&& for bin in \
|
||||
iptables iptables-restore iptables-save \
|
||||
ip6tables ip6tables-restore ip6tables-save; \
|
||||
do ln -sf xtables-nft-multi "/sbin/$bin"; \
|
||||
done
|
||||
|
@ -13,7 +13,8 @@ entry="/usr/share/clickhouse-test/performance/scripts/entrypoint.sh"
|
||||
# https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt
|
||||
# Double-escaped backslashes are a tribute to the engineering wonder of docker --
|
||||
# it gives '/bin/sh: 1: [bash,: not found' otherwise.
|
||||
numactl --hardware
|
||||
echo > compare.log
|
||||
numactl --hardware | tee -a compare.log
|
||||
node=$(( RANDOM % $(numactl --hardware | sed -n 's/^.*available:\(.*\)nodes.*$/\1/p') ));
|
||||
echo Will bind to NUMA node $node;
|
||||
echo Will bind to NUMA node $node | tee -a compare.log
|
||||
numactl --cpunodebind=$node --membind=$node $entry
|
||||
|
32
docs/changelogs/v24.3.10.33-lts.md
Normal file
32
docs/changelogs/v24.3.10.33-lts.md
Normal file
@ -0,0 +1,32 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.3.10.33-lts (37b6502ebf0) FIXME as compared to v24.3.9.5-lts (a939270465e)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#68870](https://github.com/ClickHouse/ClickHouse/issues/68870): Make allow_experimental_analyzer be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Backported in [#69095](https://github.com/ClickHouse/ClickHouse/issues/69095): Support for the Spanish language in the embedded dictionaries. [#69035](https://github.com/ClickHouse/ClickHouse/pull/69035) ([Vasily Okunev](https://github.com/VOkunev)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#68995](https://github.com/ClickHouse/ClickHouse/issues/68995): Fix the upper bound of the function `fromModifiedJulianDay`. It was supposed to be `9999-12-31` but was mistakenly set to `9999-01-01`. [#67583](https://github.com/ClickHouse/ClickHouse/pull/67583) ([PHO](https://github.com/depressed-pho)).
|
||||
* Backported in [#68844](https://github.com/ClickHouse/ClickHouse/issues/68844): Fixed crash in Parquet filtering when data types in the file substantially differ from requested types (e.g. `... FROM file('a.parquet', Parquet, 'x String')`, but the file has `x Int64`). Without this fix, use `input_format_parquet_filter_push_down = 0` as a workaround. [#68131](https://github.com/ClickHouse/ClickHouse/pull/68131) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Backported in [#68881](https://github.com/ClickHouse/ClickHouse/issues/68881): Fixes [#50868](https://github.com/ClickHouse/ClickHouse/issues/50868). Small DateTime64 constant values returned by a nested subquery inside a distributed query were wrongly transformed to Nulls, thus causing errors and possible incorrect query results. [#68323](https://github.com/ClickHouse/ClickHouse/pull/68323) ([Shankar](https://github.com/shiyer7474)).
|
||||
* Backported in [#69054](https://github.com/ClickHouse/ClickHouse/issues/69054): Added back virtual columns ` _table` and `_database` to distributed tables. They were available until version 24.3. [#68672](https://github.com/ClickHouse/ClickHouse/pull/68672) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#68856](https://github.com/ClickHouse/ClickHouse/issues/68856): Fix possible error `Size of permutation (0) is less than required (...)` during Variant column permutation. [#68681](https://github.com/ClickHouse/ClickHouse/pull/68681) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#69152](https://github.com/ClickHouse/ClickHouse/issues/69152): Fix possible wrong result during anyHeavy state merge. [#68950](https://github.com/ClickHouse/ClickHouse/pull/68950) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#69112](https://github.com/ClickHouse/ClickHouse/issues/69112): Fix logical error when we have empty async insert. [#69080](https://github.com/ClickHouse/ClickHouse/pull/69080) ([Han Fei](https://github.com/hanfei1991)).
|
||||
|
||||
#### NO CL CATEGORY
|
||||
|
||||
* Backported in [#68938](https://github.com/ClickHouse/ClickHouse/issues/68938):. [#68897](https://github.com/ClickHouse/ClickHouse/pull/68897) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#68826](https://github.com/ClickHouse/ClickHouse/issues/68826): Turn off fault injection for insert in `01396_inactive_replica_cleanup_nodes_zookeeper`. [#68715](https://github.com/ClickHouse/ClickHouse/pull/68715) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#68754](https://github.com/ClickHouse/ClickHouse/issues/68754): To make patch release possible from every commit on release branch, package_debug build is required and must not be skipped. [#68750](https://github.com/ClickHouse/ClickHouse/pull/68750) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#69044](https://github.com/ClickHouse/ClickHouse/issues/69044): Fix 01114_database_atomic flakiness. [#68930](https://github.com/ClickHouse/ClickHouse/pull/68930) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
17
docs/changelogs/v24.3.11.7-lts.md
Normal file
17
docs/changelogs/v24.3.11.7-lts.md
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.3.11.7-lts (28795d0a47e) FIXME as compared to v24.3.10.33-lts (37b6502ebf0)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#67479](https://github.com/ClickHouse/ClickHouse/issues/67479): In rare cases ClickHouse could consider parts as broken because of some unexpected projections on disk. Now it's fixed. [#66898](https://github.com/ClickHouse/ClickHouse/pull/66898) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#69243](https://github.com/ClickHouse/ClickHouse/issues/69243): `UNION` clause in subqueries wasn't handled correctly in queries with parallel replicas and lead to LOGICAL_ERROR `Duplicate announcement received for replica`. [#69146](https://github.com/ClickHouse/ClickHouse/pull/69146) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#69221](https://github.com/ClickHouse/ClickHouse/issues/69221): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)).
|
||||
|
29
docs/changelogs/v24.5.7.31-stable.md
Normal file
29
docs/changelogs/v24.5.7.31-stable.md
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.5.7.31-stable (6c185e9aec1) FIXME as compared to v24.5.6.45-stable (bdca8604c29)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#68564](https://github.com/ClickHouse/ClickHouse/issues/68564): Fix indexHint function case found by fuzzer. [#66286](https://github.com/ClickHouse/ClickHouse/pull/66286) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#68996](https://github.com/ClickHouse/ClickHouse/issues/68996): Fix the upper bound of the function `fromModifiedJulianDay`. It was supposed to be `9999-12-31` but was mistakenly set to `9999-01-01`. [#67583](https://github.com/ClickHouse/ClickHouse/pull/67583) ([PHO](https://github.com/depressed-pho)).
|
||||
* Backported in [#68865](https://github.com/ClickHouse/ClickHouse/issues/68865): Fixed crash in Parquet filtering when data types in the file substantially differ from requested types (e.g. `... FROM file('a.parquet', Parquet, 'x String')`, but the file has `x Int64`). Without this fix, use `input_format_parquet_filter_push_down = 0` as a workaround. [#68131](https://github.com/ClickHouse/ClickHouse/pull/68131) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Backported in [#69004](https://github.com/ClickHouse/ClickHouse/issues/69004): After https://github.com/ClickHouse/ClickHouse/pull/61984 `schema_inference_make_columns_nullable=0` still can make columns `Nullable` in Parquet/Arrow formats. The change was backward incompatible and users noticed the changes in the behaviour. This PR makes `schema_inference_make_columns_nullable=0` to work as before (no Nullable columns will be inferred) and introduces new value `auto` for this setting that will make columns `Nullable` only if data has information about nullability. [#68298](https://github.com/ClickHouse/ClickHouse/pull/68298) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68882](https://github.com/ClickHouse/ClickHouse/issues/68882): Fixes [#50868](https://github.com/ClickHouse/ClickHouse/issues/50868). Small DateTime64 constant values returned by a nested subquery inside a distributed query were wrongly transformed to Nulls, thus causing errors and possible incorrect query results. [#68323](https://github.com/ClickHouse/ClickHouse/pull/68323) ([Shankar](https://github.com/shiyer7474)).
|
||||
* Backported in [#69023](https://github.com/ClickHouse/ClickHouse/issues/69023): Added back virtual columns ` _table` and `_database` to distributed tables. They were available until version 24.3. [#68672](https://github.com/ClickHouse/ClickHouse/pull/68672) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#68858](https://github.com/ClickHouse/ClickHouse/issues/68858): Fix possible error `Size of permutation (0) is less than required (...)` during Variant column permutation. [#68681](https://github.com/ClickHouse/ClickHouse/pull/68681) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68784](https://github.com/ClickHouse/ClickHouse/issues/68784): Fix issue with materialized constant keys when hashing maps with arrays as keys in functions `sipHash(64/128)Keyed`. [#68731](https://github.com/ClickHouse/ClickHouse/pull/68731) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* Backported in [#69154](https://github.com/ClickHouse/ClickHouse/issues/69154): Fix possible wrong result during anyHeavy state merge. [#68950](https://github.com/ClickHouse/ClickHouse/pull/68950) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
||||
#### NO CL CATEGORY
|
||||
|
||||
* Backported in [#68940](https://github.com/ClickHouse/ClickHouse/issues/68940):. [#68897](https://github.com/ClickHouse/ClickHouse/pull/68897) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#68828](https://github.com/ClickHouse/ClickHouse/issues/68828): Turn off fault injection for insert in `01396_inactive_replica_cleanup_nodes_zookeeper`. [#68715](https://github.com/ClickHouse/ClickHouse/pull/68715) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#69046](https://github.com/ClickHouse/ClickHouse/issues/69046): Fix 01114_database_atomic flakiness. [#68930](https://github.com/ClickHouse/ClickHouse/pull/68930) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
18
docs/changelogs/v24.5.8.10-stable.md
Normal file
18
docs/changelogs/v24.5.8.10-stable.md
Normal file
@ -0,0 +1,18 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.5.8.10-stable (f11729638ea) FIXME as compared to v24.5.7.31-stable (6c185e9aec1)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#69295](https://github.com/ClickHouse/ClickHouse/issues/69295): TODO. [#68744](https://github.com/ClickHouse/ClickHouse/pull/68744) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#69245](https://github.com/ClickHouse/ClickHouse/issues/69245): `UNION` clause in subqueries wasn't handled correctly in queries with parallel replicas and lead to LOGICAL_ERROR `Duplicate announcement received for replica`. [#69146](https://github.com/ClickHouse/ClickHouse/pull/69146) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Fix crash when using `s3` table function with GLOB paths and filters. [#69176](https://github.com/ClickHouse/ClickHouse/pull/69176) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#69223](https://github.com/ClickHouse/ClickHouse/issues/69223): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)).
|
||||
|
29
docs/changelogs/v24.6.5.30-stable.md
Normal file
29
docs/changelogs/v24.6.5.30-stable.md
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.6.5.30-stable (e6e196c92d6) FIXME as compared to v24.6.4.42-stable (c534bb4b4dd)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#68969](https://github.com/ClickHouse/ClickHouse/issues/68969): Fix the upper bound of the function `fromModifiedJulianDay`. It was supposed to be `9999-12-31` but was mistakenly set to `9999-01-01`. [#67583](https://github.com/ClickHouse/ClickHouse/pull/67583) ([PHO](https://github.com/depressed-pho)).
|
||||
* Backported in [#68814](https://github.com/ClickHouse/ClickHouse/issues/68814): Fixed crash in Parquet filtering when data types in the file substantially differ from requested types (e.g. `... FROM file('a.parquet', Parquet, 'x String')`, but the file has `x Int64`). Without this fix, use `input_format_parquet_filter_push_down = 0` as a workaround. [#68131](https://github.com/ClickHouse/ClickHouse/pull/68131) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Backported in [#69005](https://github.com/ClickHouse/ClickHouse/issues/69005): After https://github.com/ClickHouse/ClickHouse/pull/61984 `schema_inference_make_columns_nullable=0` still can make columns `Nullable` in Parquet/Arrow formats. The change was backward incompatible and users noticed the changes in the behaviour. This PR makes `schema_inference_make_columns_nullable=0` to work as before (no Nullable columns will be inferred) and introduces new value `auto` for this setting that will make columns `Nullable` only if data has information about nullability. [#68298](https://github.com/ClickHouse/ClickHouse/pull/68298) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68883](https://github.com/ClickHouse/ClickHouse/issues/68883): Fixes [#50868](https://github.com/ClickHouse/ClickHouse/issues/50868). Small DateTime64 constant values returned by a nested subquery inside a distributed query were wrongly transformed to Nulls, thus causing errors and possible incorrect query results. [#68323](https://github.com/ClickHouse/ClickHouse/pull/68323) ([Shankar](https://github.com/shiyer7474)).
|
||||
* Backported in [#69025](https://github.com/ClickHouse/ClickHouse/issues/69025): Added back virtual columns ` _table` and `_database` to distributed tables. They were available until version 24.3. [#68672](https://github.com/ClickHouse/ClickHouse/pull/68672) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#68860](https://github.com/ClickHouse/ClickHouse/issues/68860): Fix possible error `Size of permutation (0) is less than required (...)` during Variant column permutation. [#68681](https://github.com/ClickHouse/ClickHouse/pull/68681) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68786](https://github.com/ClickHouse/ClickHouse/issues/68786): Fix issue with materialized constant keys when hashing maps with arrays as keys in functions `sipHash(64/128)Keyed`. [#68731](https://github.com/ClickHouse/ClickHouse/pull/68731) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* Backported in [#69156](https://github.com/ClickHouse/ClickHouse/issues/69156): Fix possible wrong result during anyHeavy state merge. [#68950](https://github.com/ClickHouse/ClickHouse/pull/68950) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#69116](https://github.com/ClickHouse/ClickHouse/issues/69116): Fix logical error when we have empty async insert. [#69080](https://github.com/ClickHouse/ClickHouse/pull/69080) ([Han Fei](https://github.com/hanfei1991)).
|
||||
|
||||
#### NO CL CATEGORY
|
||||
|
||||
* Backported in [#68942](https://github.com/ClickHouse/ClickHouse/issues/68942):. [#68897](https://github.com/ClickHouse/ClickHouse/pull/68897) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#68830](https://github.com/ClickHouse/ClickHouse/issues/68830): Turn off fault injection for insert in `01396_inactive_replica_cleanup_nodes_zookeeper`. [#68715](https://github.com/ClickHouse/ClickHouse/pull/68715) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#69048](https://github.com/ClickHouse/ClickHouse/issues/69048): Fix 01114_database_atomic flakiness. [#68930](https://github.com/ClickHouse/ClickHouse/pull/68930) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
16
docs/changelogs/v24.6.6.6-stable.md
Normal file
16
docs/changelogs/v24.6.6.6-stable.md
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.6.6.6-stable (a4c4580e639) FIXME as compared to v24.6.5.30-stable (e6e196c92d6)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#69197](https://github.com/ClickHouse/ClickHouse/issues/69197): TODO. [#68744](https://github.com/ClickHouse/ClickHouse/pull/68744) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#69225](https://github.com/ClickHouse/ClickHouse/issues/69225): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)).
|
||||
|
17
docs/changelogs/v24.7.6.8-stable.md
Normal file
17
docs/changelogs/v24.7.6.8-stable.md
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.7.6.8-stable (7779883593a) FIXME as compared to v24.7.5.37-stable (f2533ca97be)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#69198](https://github.com/ClickHouse/ClickHouse/issues/69198): TODO. [#68744](https://github.com/ClickHouse/ClickHouse/pull/68744) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#69249](https://github.com/ClickHouse/ClickHouse/issues/69249): `UNION` clause in subqueries wasn't handled correctly in queries with parallel replicas and lead to LOGICAL_ERROR `Duplicate announcement received for replica`. [#69146](https://github.com/ClickHouse/ClickHouse/pull/69146) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#69227](https://github.com/ClickHouse/ClickHouse/issues/69227): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)).
|
||||
|
50
docs/changelogs/v24.8.3.59-lts.md
Normal file
50
docs/changelogs/v24.8.3.59-lts.md
Normal file
@ -0,0 +1,50 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.8.3.59-lts (e729b9fa40e) FIXME as compared to v24.8.2.3-lts (b54f79ed323)
|
||||
|
||||
#### New Feature
|
||||
* Backported in [#68710](https://github.com/ClickHouse/ClickHouse/issues/68710): Query cache entries can now be dropped by tag. For example, the query cache entry created by `SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'abc'` can now be dropped by `SYSTEM DROP QUERY CACHE TAG 'abc'` (or of course just: `SYSTEM DROP QUERY CACHE` which will clear the entire query cache). [#68477](https://github.com/ClickHouse/ClickHouse/pull/68477) ([Michał Tabaszewski](https://github.com/pinsvin00)).
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#69097](https://github.com/ClickHouse/ClickHouse/issues/69097): Support for the Spanish language in the embedded dictionaries. [#69035](https://github.com/ClickHouse/ClickHouse/pull/69035) ([Vasily Okunev](https://github.com/VOkunev)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#68973](https://github.com/ClickHouse/ClickHouse/issues/68973): Fix the upper bound of the function `fromModifiedJulianDay`. It was supposed to be `9999-12-31` but was mistakenly set to `9999-01-01`. [#67583](https://github.com/ClickHouse/ClickHouse/pull/67583) ([PHO](https://github.com/depressed-pho)).
|
||||
* Backported in [#68818](https://github.com/ClickHouse/ClickHouse/issues/68818): Fixed crash in Parquet filtering when data types in the file substantially differ from requested types (e.g. `... FROM file('a.parquet', Parquet, 'x String')`, but the file has `x Int64`). Without this fix, use `input_format_parquet_filter_push_down = 0` as a workaround. [#68131](https://github.com/ClickHouse/ClickHouse/pull/68131) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Backported in [#68893](https://github.com/ClickHouse/ClickHouse/issues/68893): After https://github.com/ClickHouse/ClickHouse/pull/61984 `schema_inference_make_columns_nullable=0` still can make columns `Nullable` in Parquet/Arrow formats. The change was backward incompatible and users noticed the changes in the behaviour. This PR makes `schema_inference_make_columns_nullable=0` to work as before (no Nullable columns will be inferred) and introduces new value `auto` for this setting that will make columns `Nullable` only if data has information about nullability. [#68298](https://github.com/ClickHouse/ClickHouse/pull/68298) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68721](https://github.com/ClickHouse/ClickHouse/issues/68721): Fixes [#50868](https://github.com/ClickHouse/ClickHouse/issues/50868). Small DateTime64 constant values returned by a nested subquery inside a distributed query were wrongly transformed to Nulls, thus causing errors and possible incorrect query results. [#68323](https://github.com/ClickHouse/ClickHouse/pull/68323) ([Shankar](https://github.com/shiyer7474)).
|
||||
* Backported in [#69029](https://github.com/ClickHouse/ClickHouse/issues/69029): Added back virtual columns ` _table` and `_database` to distributed tables. They were available until version 24.3. [#68672](https://github.com/ClickHouse/ClickHouse/pull/68672) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#68864](https://github.com/ClickHouse/ClickHouse/issues/68864): Fix possible error `Size of permutation (0) is less than required (...)` during Variant column permutation. [#68681](https://github.com/ClickHouse/ClickHouse/pull/68681) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68854](https://github.com/ClickHouse/ClickHouse/issues/68854): Fix possible error `DB::Exception: Block structure mismatch in joined block stream: different columns:` with new JSON column. [#68686](https://github.com/ClickHouse/ClickHouse/pull/68686) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68790](https://github.com/ClickHouse/ClickHouse/issues/68790): Fix issue with materialized constant keys when hashing maps with arrays as keys in functions `sipHash(64/128)Keyed`. [#68731](https://github.com/ClickHouse/ClickHouse/pull/68731) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* Backported in [#69108](https://github.com/ClickHouse/ClickHouse/issues/69108): TODO. [#68744](https://github.com/ClickHouse/ClickHouse/pull/68744) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#68850](https://github.com/ClickHouse/ClickHouse/issues/68850): Fix resolving dynamic subcolumns from subqueries in analyzer. [#68824](https://github.com/ClickHouse/ClickHouse/pull/68824) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68911](https://github.com/ClickHouse/ClickHouse/issues/68911): Fix complex types metadata parsing in DeltaLake. Closes [#68739](https://github.com/ClickHouse/ClickHouse/issues/68739). [#68836](https://github.com/ClickHouse/ClickHouse/pull/68836) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Backported in [#69160](https://github.com/ClickHouse/ClickHouse/issues/69160): Fix possible wrong result during anyHeavy state merge. [#68950](https://github.com/ClickHouse/ClickHouse/pull/68950) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#69072](https://github.com/ClickHouse/ClickHouse/issues/69072): Fixed writing to Materialized Views with enabled setting `optimize_functions_to_subcolumns`. [#68951](https://github.com/ClickHouse/ClickHouse/pull/68951) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#69016](https://github.com/ClickHouse/ClickHouse/issues/69016): Don't use serializations cache in const Dynamic column methods. It could let to use-of-unitialized value or even race condition during aggregations. [#68953](https://github.com/ClickHouse/ClickHouse/pull/68953) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#69120](https://github.com/ClickHouse/ClickHouse/issues/69120): Fix logical error when we have empty async insert. [#69080](https://github.com/ClickHouse/ClickHouse/pull/69080) ([Han Fei](https://github.com/hanfei1991)).
|
||||
|
||||
#### NO CL CATEGORY
|
||||
|
||||
* Backported in [#68947](https://github.com/ClickHouse/ClickHouse/issues/68947):. [#68897](https://github.com/ClickHouse/ClickHouse/pull/68897) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#68704](https://github.com/ClickHouse/ClickHouse/issues/68704): Fix enumerating dynamic subcolumns. [#68582](https://github.com/ClickHouse/ClickHouse/pull/68582) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#69000](https://github.com/ClickHouse/ClickHouse/issues/69000): Prioritizing of virtual columns in hive partitioning. [#68606](https://github.com/ClickHouse/ClickHouse/pull/68606) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Backported in [#68799](https://github.com/ClickHouse/ClickHouse/issues/68799): CI: Disable SQLLogic job. [#68654](https://github.com/ClickHouse/ClickHouse/pull/68654) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#68834](https://github.com/ClickHouse/ClickHouse/issues/68834): Turn off fault injection for insert in `01396_inactive_replica_cleanup_nodes_zookeeper`. [#68715](https://github.com/ClickHouse/ClickHouse/pull/68715) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#68781](https://github.com/ClickHouse/ClickHouse/issues/68781): Fix flaky test 00989_parallel_parts_loading. [#68737](https://github.com/ClickHouse/ClickHouse/pull/68737) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#68762](https://github.com/ClickHouse/ClickHouse/issues/68762): To make patch release possible from every commit on release branch, package_debug build is required and must not be skipped. [#68750](https://github.com/ClickHouse/ClickHouse/pull/68750) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#68810](https://github.com/ClickHouse/ClickHouse/issues/68810): Try to disable rerun check if job triggered manually. [#68751](https://github.com/ClickHouse/ClickHouse/pull/68751) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#68962](https://github.com/ClickHouse/ClickHouse/issues/68962): Fix 2477 timeout. [#68752](https://github.com/ClickHouse/ClickHouse/pull/68752) ([jsc0218](https://github.com/jsc0218)).
|
||||
* Backported in [#68977](https://github.com/ClickHouse/ClickHouse/issues/68977): Check setting use_json_alias_for_old_object_type in runtime. [#68793](https://github.com/ClickHouse/ClickHouse/pull/68793) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68852](https://github.com/ClickHouse/ClickHouse/issues/68852): Make dynamic structure selection more consistent. [#68802](https://github.com/ClickHouse/ClickHouse/pull/68802) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#69052](https://github.com/ClickHouse/ClickHouse/issues/69052): Fix 01114_database_atomic flakiness. [#68930](https://github.com/ClickHouse/ClickHouse/pull/68930) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
22
docs/changelogs/v24.8.4.13-lts.md
Normal file
22
docs/changelogs/v24.8.4.13-lts.md
Normal file
@ -0,0 +1,22 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.8.4.13-lts (53195bc189b) FIXME as compared to v24.8.3.59-lts (e729b9fa40e)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#68699](https://github.com/ClickHouse/ClickHouse/issues/68699): Delete old code of named collections from dictionaries and substitute it to the new, which allows to use DDL created named collections in dictionaries. Closes [#60936](https://github.com/ClickHouse/ClickHouse/issues/60936), closes [#36890](https://github.com/ClickHouse/ClickHouse/issues/36890). [#68412](https://github.com/ClickHouse/ClickHouse/pull/68412) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#69231](https://github.com/ClickHouse/ClickHouse/issues/69231): Fix parsing error when null should be inserted as default in some cases during JSON type parsing. [#68955](https://github.com/ClickHouse/ClickHouse/pull/68955) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#69251](https://github.com/ClickHouse/ClickHouse/issues/69251): `UNION` clause in subqueries wasn't handled correctly in queries with parallel replicas and lead to LOGICAL_ERROR `Duplicate announcement received for replica`. [#69146](https://github.com/ClickHouse/ClickHouse/pull/69146) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#69189](https://github.com/ClickHouse/ClickHouse/issues/69189): Don't create Object type if use_json_alias_for_old_object_type=1 but allow_experimental_object_type=0. [#69150](https://github.com/ClickHouse/ClickHouse/pull/69150) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#69229](https://github.com/ClickHouse/ClickHouse/issues/69229): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#69219](https://github.com/ClickHouse/ClickHouse/issues/69219): Disable perf-like test with sanitizers. [#69194](https://github.com/ClickHouse/ClickHouse/pull/69194) ([alesapin](https://github.com/alesapin)).
|
||||
|
@ -13,16 +13,17 @@ Here is a complete list of available database engines. Follow the links for more
|
||||
|
||||
- [Atomic](../../engines/database-engines/atomic.md)
|
||||
|
||||
- [MySQL](../../engines/database-engines/mysql.md)
|
||||
- [Lazy](../../engines/database-engines/lazy.md)
|
||||
|
||||
- [MaterializedPostgreSQL](../../engines/database-engines/materialized-postgresql.md)
|
||||
|
||||
- [MaterializedMySQL](../../engines/database-engines/materialized-mysql.md)
|
||||
|
||||
- [Lazy](../../engines/database-engines/lazy.md)
|
||||
- [MySQL](../../engines/database-engines/mysql.md)
|
||||
|
||||
- [PostgreSQL](../../engines/database-engines/postgresql.md)
|
||||
|
||||
- [MaterializedPostgreSQL](../../engines/database-engines/materialized-postgresql.md)
|
||||
|
||||
- [Replicated](../../engines/database-engines/replicated.md)
|
||||
|
||||
- [SQLite](../../engines/database-engines/sqlite.md)
|
||||
|
||||
|
@ -155,6 +155,12 @@ Replication of [**TOAST**](https://www.postgresql.org/docs/9.5/storage-toast.htm
|
||||
|
||||
Sets a comma-separated list of PostgreSQL database tables, which will be replicated via [MaterializedPostgreSQL](../../engines/database-engines/materialized-postgresql.md) database engine.
|
||||
|
||||
Each table can have subset of replicated columns in brackets. If subset of columns is omitted, then all columns for table will be replicated.
|
||||
|
||||
``` sql
|
||||
materialized_postgresql_tables_list = 'table1(co1, col2),table2,table3(co3, col5, col7)
|
||||
```
|
||||
|
||||
Default value: empty list — means whole PostgreSQL database will be replicated.
|
||||
|
||||
### `materialized_postgresql_schema` {#materialized-postgresql-schema}
|
||||
|
72
docs/en/engines/table-engines/integrations/azure-queue.md
Normal file
72
docs/en/engines/table-engines/integrations/azure-queue.md
Normal file
@ -0,0 +1,72 @@
|
||||
---
|
||||
slug: /en/engines/table-engines/integrations/azure-queue
|
||||
sidebar_position: 181
|
||||
sidebar_label: AzureQueue
|
||||
---
|
||||
|
||||
# AzureQueue Table Engine
|
||||
|
||||
This engine provides an integration with [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs) ecosystem, allowing streaming data import.
|
||||
|
||||
## Create Table {#creating-a-table}
|
||||
|
||||
``` sql
|
||||
CREATE TABLE test (name String, value UInt32)
|
||||
ENGINE = AzureQueue(...)
|
||||
[SETTINGS]
|
||||
[mode = '',]
|
||||
[after_processing = 'keep',]
|
||||
[keeper_path = '',]
|
||||
...
|
||||
```
|
||||
|
||||
**Engine parameters**
|
||||
|
||||
`AzureQueue` parameters are the same as `AzureBlobStorage` table engine supports. See parameters section [here](../../../engines/table-engines/integrations/azureBlobStorage.md).
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
CREATE TABLE azure_queue_engine_table (name String, value UInt32)
|
||||
ENGINE=AzureQueue('DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azurite1:10000/devstoreaccount1/data/')
|
||||
SETTINGS
|
||||
mode = 'unordered'
|
||||
```
|
||||
|
||||
## Settings {#settings}
|
||||
|
||||
The set of supported settings is the same as for `S3Queue` table engine, but without `s3queue_` prefix. See [full list of settings settings](../../../engines/table-engines/integrations/s3queue.md#settings).
|
||||
|
||||
## Description {#description}
|
||||
|
||||
`SELECT` is not particularly useful for streaming import (except for debugging), because each file can be imported only once. It is more practical to create real-time threads using [materialized views](../../../sql-reference/statements/create/view.md). To do this:
|
||||
|
||||
1. Use the engine to create a table for consuming from specified path in S3 and consider it a data stream.
|
||||
2. Create a table with the desired structure.
|
||||
3. Create a materialized view that converts data from the engine and puts it into a previously created table.
|
||||
|
||||
When the `MATERIALIZED VIEW` joins the engine, it starts collecting data in the background.
|
||||
|
||||
Example:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE azure_queue_engine_table (name String, value UInt32)
|
||||
ENGINE=AzureQueue('<endpoint>', 'CSV', 'gzip')
|
||||
SETTINGS
|
||||
mode = 'unordered';
|
||||
|
||||
CREATE TABLE stats (name String, value UInt32)
|
||||
ENGINE = MergeTree() ORDER BY name;
|
||||
|
||||
CREATE MATERIALIZED VIEW consumer TO stats
|
||||
AS SELECT name, value FROM azure_queue_engine_table;
|
||||
|
||||
SELECT * FROM stats ORDER BY name;
|
||||
```
|
||||
|
||||
## Virtual columns {#virtual-columns}
|
||||
|
||||
- `_path` — Path to the file.
|
||||
- `_file` — Name of the file.
|
||||
|
||||
For more information about virtual columns see [here](../../../engines/table-engines/index.md#table_engines-virtual_columns).
|
@ -112,7 +112,7 @@ Example:
|
||||
```
|
||||
|
||||
The NATS server configuration can be added using the ClickHouse config file.
|
||||
More specifically you can add Redis password for NATS engine:
|
||||
More specifically you can add Redis password for NATS engine:
|
||||
|
||||
``` xml
|
||||
<nats>
|
||||
@ -167,7 +167,7 @@ If you want to change the target table by using `ALTER`, we recommend disabling
|
||||
|
||||
- `_subject` - NATS message subject. Data type: `String`.
|
||||
|
||||
Additional virtual columns when `kafka_handle_error_mode='stream'`:
|
||||
Additional virtual columns when `nats_handle_error_mode='stream'`:
|
||||
|
||||
- `_raw_message` - Raw message that couldn't be parsed successfully. Data type: `Nullable(String)`.
|
||||
- `_error` - Exception message happened during failed parsing. Data type: `Nullable(String)`.
|
||||
|
@ -35,7 +35,7 @@ CREATE TABLE s3_engine_table (name String, value UInt32)
|
||||
[SETTINGS ...]
|
||||
```
|
||||
|
||||
### Engine parameters
|
||||
### Engine parameters {#parameters}
|
||||
|
||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path).
|
||||
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.
|
||||
|
@ -5,6 +5,7 @@ sidebar_label: S3Queue
|
||||
---
|
||||
|
||||
# S3Queue Table Engine
|
||||
|
||||
This engine provides integration with [Amazon S3](https://aws.amazon.com/s3/) ecosystem and allows streaming import. This engine is similar to the [Kafka](../../../engines/table-engines/integrations/kafka.md), [RabbitMQ](../../../engines/table-engines/integrations/rabbitmq.md) engines, but provides S3-specific features.
|
||||
|
||||
## Create Table {#creating-a-table}
|
||||
@ -16,27 +17,25 @@ CREATE TABLE s3_queue_engine_table (name String, value UInt32)
|
||||
[mode = '',]
|
||||
[after_processing = 'keep',]
|
||||
[keeper_path = '',]
|
||||
[s3queue_loading_retries = 0,]
|
||||
[s3queue_processing_threads_num = 1,]
|
||||
[s3queue_enable_logging_to_s3queue_log = 0,]
|
||||
[s3queue_polling_min_timeout_ms = 1000,]
|
||||
[s3queue_polling_max_timeout_ms = 10000,]
|
||||
[s3queue_polling_backoff_ms = 0,]
|
||||
[s3queue_tracked_file_ttl_sec = 0,]
|
||||
[s3queue_tracked_files_limit = 1000,]
|
||||
[s3queue_cleanup_interval_min_ms = 10000,]
|
||||
[s3queue_cleanup_interval_max_ms = 30000,]
|
||||
[loading_retries = 0,]
|
||||
[processing_threads_num = 1,]
|
||||
[enable_logging_to_s3queue_log = 0,]
|
||||
[polling_min_timeout_ms = 1000,]
|
||||
[polling_max_timeout_ms = 10000,]
|
||||
[polling_backoff_ms = 0,]
|
||||
[tracked_file_ttl_sec = 0,]
|
||||
[tracked_files_limit = 1000,]
|
||||
[cleanup_interval_min_ms = 10000,]
|
||||
[cleanup_interval_max_ms = 30000,]
|
||||
```
|
||||
|
||||
Starting with `24.7` settings without `s3queue_` prefix are also supported.
|
||||
:::warning
|
||||
Before `24.7`, it is required to use `s3queue_` prefix for all settings apart from `mode`, `after_processing` and `keeper_path`.
|
||||
:::
|
||||
|
||||
**Engine parameters**
|
||||
|
||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path).
|
||||
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.
|
||||
- `format` — The [format](../../../interfaces/formats.md#formats) of the file.
|
||||
- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. Parameter is optional. If credentials are not specified, they are used from the configuration file. For more information see [Using S3 for Data Storage](../mergetree-family/mergetree.md#table_engine-mergetree-s3).
|
||||
- `compression` — Compression type. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. Parameter is optional. By default, it will autodetect compression by file extension.
|
||||
`S3Queue` parameters are the same as `S3` table engine supports. See parameters section [here](../../../engines/table-engines/integrations/s3.md#parameters).
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -107,19 +107,24 @@ The vector similarity index currently does not work with per-table, non-default
|
||||
[here](https://github.com/ClickHouse/ClickHouse/pull/51325#issuecomment-1605920475)). If necessary, the value must be changed in config.xml.
|
||||
:::
|
||||
|
||||
Vector index creation is known to be slow. To speed the process up, index creation can be parallelized. The maximum number of threads can be
|
||||
configured using server configuration
|
||||
setting [max_build_vector_similarity_index_thread_pool_size](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters_max_build_vector_similarity_index_thread_pool_size).
|
||||
|
||||
ANN indexes are built during column insertion and merge. As a result, `INSERT` and `OPTIMIZE` statements will be slower than for ordinary
|
||||
tables. ANNIndexes are ideally used only with immutable or rarely changed data, respectively when are far more read requests than write
|
||||
requests.
|
||||
|
||||
ANN indexes support these queries:
|
||||
ANN indexes support this type of query:
|
||||
|
||||
``` sql
|
||||
SELECT *
|
||||
FROM table
|
||||
[WHERE ...]
|
||||
ORDER BY Distance(vectors, Point)
|
||||
LIMIT N
|
||||
```
|
||||
``` sql
|
||||
WITH [...] AS reference_vector
|
||||
SELECT *
|
||||
FROM table
|
||||
WHERE ... -- WHERE clause is optional
|
||||
ORDER BY Distance(vectors, reference_vector)
|
||||
LIMIT N
|
||||
```
|
||||
|
||||
:::tip
|
||||
To avoid writing out large vectors, you can use [query
|
||||
|
@ -989,19 +989,52 @@ ALTER TABLE tab DROP STATISTICS a;
|
||||
These lightweight statistics aggregate information about distribution of values in columns. Statistics are stored in every part and updated when every insert comes.
|
||||
They can be used for prewhere optimization only if we enable `set allow_statistics_optimize = 1`.
|
||||
|
||||
#### Available Types of Column Statistics {#available-types-of-column-statistics}
|
||||
### Available Types of Column Statistics {#available-types-of-column-statistics}
|
||||
|
||||
- `MinMax`
|
||||
|
||||
The minimum and maximum column value which allows to estimate the selectivity of range filters on numeric columns.
|
||||
|
||||
Syntax: `minmax`
|
||||
|
||||
- `TDigest`
|
||||
|
||||
[TDigest](https://github.com/tdunning/t-digest) sketches which allow to compute approximate percentiles (e.g. the 90th percentile) for numeric columns.
|
||||
|
||||
Syntax: `tdigest`
|
||||
|
||||
- `Uniq`
|
||||
|
||||
[HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) sketches which provide an estimation how many distinct values a column contains.
|
||||
|
||||
- `count_min`
|
||||
Syntax: `uniq`
|
||||
|
||||
- `CountMin`
|
||||
|
||||
[CountMin](https://en.wikipedia.org/wiki/Count%E2%80%93min_sketch) sketches which provide an approximate count of the frequency of each value in a column.
|
||||
|
||||
Syntax `countmin`
|
||||
|
||||
|
||||
### Supported Data Types {#supported-data-types}
|
||||
|
||||
| | (U)Int*, Float*, Decimal(*), Date*, Boolean, Enum* | String or FixedString |
|
||||
|-----------|----------------------------------------------------|-----------------------|
|
||||
| CountMin | ✔ | ✔ |
|
||||
| MinMax | ✔ | ✗ |
|
||||
| TDigest | ✔ | ✗ |
|
||||
| Uniq | ✔ | ✔ |
|
||||
|
||||
|
||||
### Supported Operations {#supported-operations}
|
||||
|
||||
| | Equality filters (==) | Range filters (>, >=, <, <=) |
|
||||
|-----------|-----------------------|------------------------------|
|
||||
| CountMin | ✔ | ✗ |
|
||||
| MinMax | ✗ | ✔ |
|
||||
| TDigest | ✗ | ✔ |
|
||||
| Uniq | ✔ | ✗ |
|
||||
|
||||
[Count-min](https://en.wikipedia.org/wiki/Count%E2%80%93min_sketch) sketches which provide an approximate count of the frequency of each value in a column.
|
||||
|
||||
## Column-level Settings {#column-level-settings}
|
||||
|
||||
|
@ -97,7 +97,7 @@ If you want to change the target table by using `ALTER`, we recommend disabling
|
||||
- `_filename` - Name of the log file. Data type: `LowCardinality(String)`.
|
||||
- `_offset` - Offset in the log file. Data type: `UInt64`.
|
||||
|
||||
Additional virtual columns when `kafka_handle_error_mode='stream'`:
|
||||
Additional virtual columns when `handle_error_mode='stream'`:
|
||||
|
||||
- `_raw_record` - Raw record that couldn't be parsed successfully. Data type: `Nullable(String)`.
|
||||
- `_error` - Exception message happened during failed parsing. Data type: `Nullable(String)`.
|
||||
|
@ -39,6 +39,7 @@ The supported formats are:
|
||||
| [JSONCompact](#jsoncompact) | ✔ | ✔ |
|
||||
| [JSONCompactStrings](#jsoncompactstrings) | ✗ | ✔ |
|
||||
| [JSONCompactColumns](#jsoncompactcolumns) | ✔ | ✔ |
|
||||
| [JSONCompactWithProgress](#jsoncompactwithprogress) | ✗ | ✔ |
|
||||
| [JSONEachRow](#jsoneachrow) | ✔ | ✔ |
|
||||
| [PrettyJSONEachRow](#prettyjsoneachrow) | ✗ | ✔ |
|
||||
| [JSONEachRowWithProgress](#jsoneachrowwithprogress) | ✗ | ✔ |
|
||||
@ -825,17 +826,17 @@ Result:
|
||||
|
||||
## JSONAsObject {#jsonasobject}
|
||||
|
||||
In this format, a single JSON object is interpreted as a single [Object('json')](/docs/en/sql-reference/data-types/json.md) value. If the input has several JSON objects (comma separated), they are interpreted as separate rows. If the input data is enclosed in square brackets, it is interpreted as an array of JSONs.
|
||||
In this format, a single JSON object is interpreted as a single [JSON](/docs/en/sql-reference/data-types/newjson.md) value. If the input has several JSON objects (comma separated), they are interpreted as separate rows. If the input data is enclosed in square brackets, it is interpreted as an array of JSONs.
|
||||
|
||||
This format can only be parsed for a table with a single field of type [Object('json')](/docs/en/sql-reference/data-types/json.md). The remaining columns must be set to [DEFAULT](/docs/en/sql-reference/statements/create/table.md/#default) or [MATERIALIZED](/docs/en/sql-reference/statements/create/table.md/#materialized).
|
||||
This format can only be parsed for a table with a single field of type [JSON](/docs/en/sql-reference/data-types/newjson.md). The remaining columns must be set to [DEFAULT](/docs/en/sql-reference/statements/create/table.md/#default) or [MATERIALIZED](/docs/en/sql-reference/statements/create/table.md/#materialized).
|
||||
|
||||
**Examples**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SET allow_experimental_object_type = 1;
|
||||
CREATE TABLE json_as_object (json Object('json')) ENGINE = Memory;
|
||||
SET allow_experimental_json_type = 1;
|
||||
CREATE TABLE json_as_object (json JSON) ENGINE = Memory;
|
||||
INSERT INTO json_as_object (json) FORMAT JSONAsObject {"foo":{"bar":{"x":"y"},"baz":1}},{},{"any json stucture":1}
|
||||
SELECT * FROM json_as_object FORMAT JSONEachRow;
|
||||
```
|
||||
@ -843,9 +844,9 @@ SELECT * FROM json_as_object FORMAT JSONEachRow;
|
||||
Result:
|
||||
|
||||
``` response
|
||||
{"json":{"any json stucture":0,"foo":{"bar":{"x":"y"},"baz":1}}}
|
||||
{"json":{"any json stucture":0,"foo":{"bar":{"x":""},"baz":0}}}
|
||||
{"json":{"any json stucture":1,"foo":{"bar":{"x":""},"baz":0}}}
|
||||
{"json":{"foo":{"bar":{"x":"y"},"baz":"1"}}}
|
||||
{"json":{}}
|
||||
{"json":{"any json stucture":"1"}}
|
||||
```
|
||||
|
||||
**An array of JSON objects**
|
||||
@ -853,35 +854,34 @@ Result:
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SET allow_experimental_object_type = 1;
|
||||
CREATE TABLE json_square_brackets (field Object('json')) ENGINE = Memory;
|
||||
SET allow_experimental_json_type = 1;
|
||||
CREATE TABLE json_square_brackets (field JSON) ENGINE = Memory;
|
||||
INSERT INTO json_square_brackets FORMAT JSONAsObject [{"id": 1, "name": "name1"}, {"id": 2, "name": "name2"}];
|
||||
|
||||
SELECT * FROM json_square_brackets FORMAT JSONEachRow;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
{"field":{"id":1,"name":"name1"}}
|
||||
{"field":{"id":2,"name":"name2"}}
|
||||
{"field":{"id":"1","name":"name1"}}
|
||||
{"field":{"id":"2","name":"name2"}}
|
||||
```
|
||||
|
||||
**Columns with default values**
|
||||
|
||||
```sql
|
||||
SET allow_experimental_object_type = 1;
|
||||
CREATE TABLE json_as_object (json Object('json'), time DateTime MATERIALIZED now()) ENGINE = Memory;
|
||||
SET allow_experimental_json_type = 1;
|
||||
CREATE TABLE json_as_object (json JSON, time DateTime MATERIALIZED now()) ENGINE = Memory;
|
||||
INSERT INTO json_as_object (json) FORMAT JSONAsObject {"foo":{"bar":{"x":"y"},"baz":1}};
|
||||
INSERT INTO json_as_object (json) FORMAT JSONAsObject {};
|
||||
INSERT INTO json_as_object (json) FORMAT JSONAsObject {"any json stucture":1}
|
||||
SELECT * FROM json_as_object FORMAT JSONEachRow
|
||||
SELECT time, json FROM json_as_object FORMAT JSONEachRow
|
||||
```
|
||||
|
||||
```resonse
|
||||
{"json":{"any json stucture":0,"foo":{"bar":{"x":"y"},"baz":1}},"time":"2024-07-25 17:02:45"}
|
||||
{"json":{"any json stucture":0,"foo":{"bar":{"x":""},"baz":0}},"time":"2024-07-25 17:02:47"}
|
||||
{"json":{"any json stucture":1,"foo":{"bar":{"x":""},"baz":0}},"time":"2024-07-25 17:02:50"}
|
||||
{"time":"2024-09-16 12:18:10","json":{}}
|
||||
{"time":"2024-09-16 12:18:13","json":{"any json stucture":"1"}}
|
||||
{"time":"2024-09-16 12:18:08","json":{"foo":{"bar":{"x":"y"},"baz":"1"}}}
|
||||
```
|
||||
|
||||
## JSONCompact {#jsoncompact}
|
||||
@ -988,6 +988,59 @@ Example:
|
||||
|
||||
Columns that are not present in the block will be filled with default values (you can use [input_format_defaults_for_omitted_fields](/docs/en/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields) setting here)
|
||||
|
||||
## JSONCompactWithProgress (#jsoncompactwithprogress)
|
||||
|
||||
In this format, ClickHouse outputs each row as a separated, newline-delimited JSON Object.
|
||||
|
||||
Each row is either a metadata object, data object, progress information or statistics object:
|
||||
|
||||
1. **Metadata Object (`meta`)**
|
||||
- Describes the structure of the data rows.
|
||||
- Fields: `name` (column name), `type` (data type, e.g., `UInt32`, `String`, etc.).
|
||||
- Example: `{"meta": [{"name":"id", "type":"UInt32"}, {"name":"name", "type":"String"}]}`
|
||||
- Appears before any data objects.
|
||||
|
||||
2. **Data Object (`data`)**
|
||||
- Represents a row of query results.
|
||||
- Fields: An array with values corresponding to the columns defined in the metadata.
|
||||
- Example: `{"data":["1", "John Doe"]}`
|
||||
- Appears after the metadata object, one per row.
|
||||
|
||||
3. **Progress Information Object (`progress`)**
|
||||
- Provides real-time progress feedback during query execution.
|
||||
- Fields: `read_rows`, `read_bytes`, `written_rows`, `written_bytes`, `total_rows_to_read`, `result_rows`, `result_bytes`, `elapsed_ns`.
|
||||
- Example: `{"progress":{"read_rows":"8","read_bytes":"168"}}`
|
||||
- May appear intermittently.
|
||||
|
||||
4. **Statistics Object (`statistics`)**
|
||||
- Summarizes query execution statistics.
|
||||
- Fields: `rows`, `rows_before_limit_at_least`, `elapsed`, `rows_read`, `bytes_read`.
|
||||
- Example: `{"statistics": {"rows":2, "elapsed":0.001995, "rows_read":8}}`
|
||||
- Appears at the end.
|
||||
|
||||
5. **Exception Object (`exception`)**
|
||||
- Represents an error that occurred during query execution.
|
||||
- Fields: A single text field containing the error message.
|
||||
- Example: `{"exception": "Code: 395. DB::Exception: Value passed to 'throwIf' function is non-zero..."}`
|
||||
- Appears when an error is encountered.
|
||||
|
||||
6. **Totals Object (`totals`)**
|
||||
- Provides the totals for each numeric column in the result set.
|
||||
- Fields: An array with total values corresponding to the columns defined in the metadata.
|
||||
- Example: `{"totals": ["", "3"]}`
|
||||
- Appears at the end of the data rows, if applicable.
|
||||
|
||||
Example:
|
||||
|
||||
```json
|
||||
{"meta": [{"name":"id", "type":"UInt32"}, {"name":"name", "type":"String"}]}
|
||||
{"progress":{"read_rows":"8","read_bytes":"168","written_rows":"0","written_bytes":"0","total_rows_to_read":"2","result_rows":"0","result_bytes":"0","elapsed_ns":"0"}}
|
||||
{"data":["1", "John Doe"]}
|
||||
{"data":["2", "Joe Doe"]}
|
||||
{"statistics": {"rows":2, "rows_before_limit_at_least":8, "elapsed":0.001995, "rows_read":8, "bytes_read":168}}
|
||||
```
|
||||
|
||||
|
||||
## JSONEachRow {#jsoneachrow}
|
||||
|
||||
In this format, ClickHouse outputs each row as a separated, newline-delimited JSON Object.
|
||||
@ -1342,6 +1395,7 @@ SELECT * FROM json_each_row_nested
|
||||
- [input_format_json_ignore_unknown_keys_in_named_tuple](/docs/en/operations/settings/settings-formats.md/#input_format_json_ignore_unknown_keys_in_named_tuple) - ignore unknown keys in json object for named tuples. Default value - `false`.
|
||||
- [input_format_json_compact_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_json_compact_allow_variable_number_of_columns) - allow variable number of columns in JSONCompact/JSONCompactEachRow format, ignore extra columns and use default values on missing columns. Default value - `false`.
|
||||
- [input_format_json_throw_on_bad_escape_sequence](/docs/en/operations/settings/settings-formats.md/#input_format_json_throw_on_bad_escape_sequence) - throw an exception if JSON string contains bad escape sequence. If disabled, bad escape sequences will remain as is in the data. Default value - `true`.
|
||||
- [input_format_json_empty_as_default](/docs/en/operations/settings/settings-formats.md/#input_format_json_empty_as_default) - treat empty fields in JSON input as default values. Default value - `false`. For complex default expressions [input_format_defaults_for_omitted_fields](/docs/en/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields) must be enabled too.
|
||||
- [output_format_json_quote_64bit_integers](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_64bit_integers) - controls quoting of 64-bit integers in JSON output format. Default value - `true`.
|
||||
- [output_format_json_quote_64bit_floats](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_64bit_floats) - controls quoting of 64-bit floats in JSON output format. Default value - `false`.
|
||||
- [output_format_json_quote_denormals](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_denormals) - enables '+nan', '-nan', '+inf', '-inf' outputs in JSON output format. Default value - `false`.
|
||||
|
@ -58,7 +58,7 @@ Connection: Close
|
||||
Content-Type: text/tab-separated-values; charset=UTF-8
|
||||
X-ClickHouse-Server-Display-Name: clickhouse.ru-central1.internal
|
||||
X-ClickHouse-Query-Id: 5abe861c-239c-467f-b955-8a201abb8b7f
|
||||
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds": "0"}
|
||||
|
||||
1
|
||||
```
|
||||
@ -472,7 +472,7 @@ $ curl -v 'http://localhost:8123/predefined_query'
|
||||
< X-ClickHouse-Format: Template
|
||||
< X-ClickHouse-Timezone: Asia/Shanghai
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||
<
|
||||
# HELP "Query" "Number of executing queries"
|
||||
# TYPE "Query" counter
|
||||
@ -668,7 +668,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/hi'
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||
<
|
||||
* Connection #0 to host localhost left intact
|
||||
Say Hi!%
|
||||
@ -708,7 +708,7 @@ $ curl -v -H 'XXX:xxx' 'http://localhost:8123/get_config_static_handler'
|
||||
< Content-Type: text/plain; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||
<
|
||||
* Connection #0 to host localhost left intact
|
||||
<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>%
|
||||
@ -766,7 +766,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_absolute_path_static_handler'
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||
<
|
||||
<html><body>Absolute Path File</body></html>
|
||||
* Connection #0 to host localhost left intact
|
||||
@ -785,7 +785,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler'
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||
<
|
||||
<html><body>Relative Path File</body></html>
|
||||
* Connection #0 to host localhost left intact
|
||||
|
10
docs/en/interfaces/third-party/gui.md
vendored
10
docs/en/interfaces/third-party/gui.md
vendored
@ -233,6 +233,16 @@ Features:
|
||||
- Useful tools: Zookeeper data exploration, query EXPLAIN, kill queries, etc.
|
||||
- Visualization metric charts: queries and resource usage, number of merges/mutation, merge performance, query performance, etc.
|
||||
|
||||
### CKibana {#ckibana}
|
||||
|
||||
[CKibana](https://github.com/TongchengOpenSource/ckibana) is a lightweight service that allows you to effortlessly search, explore, and visualize ClickHouse data using the native Kibana UI.
|
||||
|
||||
Features:
|
||||
|
||||
- Translates chart requests from the native Kibana UI into ClickHouse query syntax.
|
||||
- Supports advanced features such as sampling and caching to enhance query performance.
|
||||
- Minimizes the learning cost for users after migrating from ElasticSearch to ClickHouse.
|
||||
|
||||
## Commercial {#commercial}
|
||||
|
||||
### DataGrip {#datagrip}
|
||||
|
@ -6,7 +6,7 @@ import SelfManaged from '@site/docs/en/_snippets/_self_managed_only_no_roadmap.m
|
||||
|
||||
<SelfManaged />
|
||||
|
||||
[SSL 'strict' option](../server-configuration-parameters/settings.md#server_configuration_parameters-openssl) enables mandatory certificate validation for the incoming connections. In this case, only connections with trusted certificates can be established. Connections with untrusted certificates will be rejected. Thus, certificate validation allows to uniquely authenticate an incoming connection. `Common Name` or `subjectAltName extension` field of the certificate is used to identify the connected user. This allows to associate multiple certificates with the same user. Additionally, reissuing and revoking of the certificates does not affect the ClickHouse configuration.
|
||||
[SSL 'strict' option](../server-configuration-parameters/settings.md#server_configuration_parameters-openssl) enables mandatory certificate validation for the incoming connections. In this case, only connections with trusted certificates can be established. Connections with untrusted certificates will be rejected. Thus, certificate validation allows to uniquely authenticate an incoming connection. `Common Name` or `subjectAltName extension` field of the certificate is used to identify the connected user. `subjectAltName extension` supports the usage of one wildcard '*' in the server configuration. This allows to associate multiple certificates with the same user. Additionally, reissuing and revoking of the certificates does not affect the ClickHouse configuration.
|
||||
|
||||
To enable SSL certificate authentication, a list of `Common Name`'s or `Subject Alt Name`'s for each ClickHouse user must be specified in the settings file `users.xml `:
|
||||
|
||||
@ -30,6 +30,12 @@ To enable SSL certificate authentication, a list of `Common Name`'s or `Subject
|
||||
</ssl_certificates>
|
||||
<!-- Other settings -->
|
||||
</user_name_2>
|
||||
<user_name_3>
|
||||
<ssl_certificates>
|
||||
<!-- Wildcard support -->
|
||||
<subject_alt_name>URI:spiffe://foo.com/*/bar</subject_alt_name>
|
||||
</ssl_certificates>
|
||||
</user_name_3>
|
||||
</users>
|
||||
</clickhouse>
|
||||
```
|
||||
|
@ -491,6 +491,14 @@ Type: Double
|
||||
|
||||
Default: 0.9
|
||||
|
||||
## max_build_vector_similarity_index_thread_pool_size {#server_configuration_parameters_max_build_vector_similarity_index_thread_pool_size}
|
||||
|
||||
The maximum number of threads to use for building vector indexes. 0 means all cores.
|
||||
|
||||
Type: UInt64
|
||||
|
||||
Default: 16
|
||||
|
||||
## cgroups_memory_usage_observer_wait_time
|
||||
|
||||
Interval in seconds during which the server's maximum allowed memory consumption is adjusted by the corresponding threshold in cgroups. (see
|
||||
@ -1463,26 +1471,29 @@ Examples:
|
||||
|
||||
## logger {#logger}
|
||||
|
||||
Logging settings.
|
||||
The location and format of log messages.
|
||||
|
||||
Keys:
|
||||
|
||||
- `level` – Logging level. Acceptable values: `trace`, `debug`, `information`, `warning`, `error`.
|
||||
- `log` – The log file. Contains all the entries according to `level`.
|
||||
- `errorlog` – Error log file.
|
||||
- `size` – Size of the file. Applies to `log` and `errorlog`. Once the file reaches `size`, ClickHouse archives and renames it, and creates a new log file in its place.
|
||||
- `count` – The number of archived log files that ClickHouse stores.
|
||||
- `console` – Send `log` and `errorlog` to the console instead of file. To enable, set to `1` or `true`.
|
||||
- `console_log_level` – Logging level for console. Default to `level`.
|
||||
- `use_syslog` - Log to syslog as well.
|
||||
- `syslog_level` - Logging level for logging to syslog.
|
||||
- `stream_compress` – Compress `log` and `errorlog` with `lz4` stream compression. To enable, set to `1` or `true`.
|
||||
- `formatting` – Specify log format to be printed in console log (currently only `json` supported).
|
||||
- `level` – Log level. Acceptable values: `none` (turn logging off), `fatal`, `critical`, `error`, `warning`, `notice`, `information`,
|
||||
`debug`, `trace`, `test`
|
||||
- `log` – The path to the log file.
|
||||
- `errorlog` – The path to the error log file.
|
||||
- `size` – Rotation policy: Maximum size of the log files in bytes. Once the log file size exceeds this threshold, it is renamed and archived, and a new log file is created.
|
||||
- `count` – Rotation policy: How many historical log files Clickhouse are kept at most.
|
||||
- `stream_compress` – Compress log messages using LZ4. Set to `1` or `true` to enable.
|
||||
- `console` – Do not write log messages to log files, instead print them in the console. Set to `1` or `true` to enable. Default is
|
||||
`1` if Clickhouse does not run in daemon mode, `0` otherwise.
|
||||
- `console_log_level` – Log level for console output. Defaults to `level`.
|
||||
- `formatting` – Log format for console output. Currently, only `json` is supported).
|
||||
- `use_syslog` - Also forward log output to syslog.
|
||||
- `syslog_level` - Log level for logging to syslog.
|
||||
|
||||
Both log and error log file names (only file names, not directories) support date and time format specifiers.
|
||||
**Log format specifiers**
|
||||
|
||||
**Format specifiers**
|
||||
Using the following format specifiers, you can define a pattern for the resulting file name. “Example” column shows possible results for `2023-07-06 18:32:07`.
|
||||
File names in `log` and `errorLog` paths support below format specifiers for the resulting file name (the directory part does not support them).
|
||||
|
||||
Column “Example” shows the output at `2023-07-06 18:32:07`.
|
||||
|
||||
| Specifier | Description | Example |
|
||||
|-------------|---------------------------------------------------------------------------------------------------------------------|--------------------------|
|
||||
@ -1537,18 +1548,37 @@ Using the following format specifiers, you can define a pattern for the resultin
|
||||
</logger>
|
||||
```
|
||||
|
||||
Writing to the console can be configured. Config example:
|
||||
To print log messages only in the console:
|
||||
|
||||
``` xml
|
||||
<logger>
|
||||
<level>information</level>
|
||||
<console>1</console>
|
||||
<console>true</console>
|
||||
</logger>
|
||||
```
|
||||
|
||||
**Per-level Overrides**
|
||||
|
||||
The log level of individual log names can be overridden. For example, to mute all messages of loggers "Backup" and "RBAC".
|
||||
|
||||
```xml
|
||||
<logger>
|
||||
<levels>
|
||||
<logger>
|
||||
<name>Backup</name>
|
||||
<level>none</level>
|
||||
</logger>
|
||||
<logger>
|
||||
<name>RBAC</name>
|
||||
<level>none</level>
|
||||
</logger>
|
||||
</levels>
|
||||
</logger>
|
||||
```
|
||||
|
||||
### syslog
|
||||
|
||||
Writing to the syslog is also supported. Config example:
|
||||
To write log messages additionally to syslog:
|
||||
|
||||
``` xml
|
||||
<logger>
|
||||
@ -1562,14 +1592,12 @@ Writing to the syslog is also supported. Config example:
|
||||
</logger>
|
||||
```
|
||||
|
||||
Keys for syslog:
|
||||
Keys for `<syslog>`:
|
||||
|
||||
- use_syslog — Required setting if you want to write to the syslog.
|
||||
- address — The host\[:port\] of syslogd. If omitted, the local daemon is used.
|
||||
- hostname — Optional. The name of the host that logs are sent from.
|
||||
- facility — [The syslog facility keyword](https://en.wikipedia.org/wiki/Syslog#Facility) in uppercase letters with the “LOG_” prefix: (`LOG_USER`, `LOG_DAEMON`, `LOG_LOCAL3`, and so on).
|
||||
Default value: `LOG_USER` if `address` is specified, `LOG_DAEMON` otherwise.
|
||||
- format – Message format. Possible values: `bsd` and `syslog.`
|
||||
- `address` — The address of syslog in format `host\[:port\]`. If omitted, the local daemon is used.
|
||||
- `hostname` — The name of the host from which logs are send. Optional.
|
||||
- `facility` — The syslog [facility keyword](https://en.wikipedia.org/wiki/Syslog#Facility). Must be specified uppercase with a “LOG_” prefix, e.g. `LOG_USER`, `LOG_DAEMON`, `LOG_LOCAL3`, etc. Default value: `LOG_USER` if `address` is specified, `LOG_DAEMON` otherwise.
|
||||
- `format` – Log message format. Possible values: `bsd` and `syslog.`
|
||||
|
||||
### Log formats
|
||||
|
||||
@ -1588,6 +1616,7 @@ You can specify the log format that will be outputted in the console log. Curren
|
||||
"source_line": "192"
|
||||
}
|
||||
```
|
||||
|
||||
To enable JSON logging support, use the following snippet:
|
||||
|
||||
```xml
|
||||
@ -3121,3 +3150,15 @@ Default value: "default"
|
||||
|
||||
**See Also**
|
||||
- [Workload Scheduling](/docs/en/operations/workload-scheduling.md)
|
||||
|
||||
## max_authentication_methods_per_user {#max_authentication_methods_per_user}
|
||||
|
||||
The maximum number of authentication methods a user can be created with or altered to.
|
||||
Changing this setting does not affect existing users. Create/alter authentication-related queries will fail if they exceed the limit specified in this setting.
|
||||
Non authentication create/alter queries will succeed.
|
||||
|
||||
Type: UInt64
|
||||
|
||||
Default value: 100
|
||||
|
||||
Zero means unlimited
|
||||
|
@ -752,6 +752,17 @@ Possible values:
|
||||
|
||||
Default value: 0.
|
||||
|
||||
### input_format_json_empty_as_default {#input_format_json_empty_as_default}
|
||||
|
||||
When enabled, replace empty input fields in JSON with default values. For complex default expressions `input_format_defaults_for_omitted_fields` must be enabled too.
|
||||
|
||||
Possible values:
|
||||
|
||||
+ 0 — Disable.
|
||||
+ 1 — Enable.
|
||||
|
||||
Default value: 0.
|
||||
|
||||
## TSV format settings {#tsv-format-settings}
|
||||
|
||||
### input_format_tsv_empty_as_default {#input_format_tsv_empty_as_default}
|
||||
|
@ -3226,7 +3226,7 @@ Default value: `0`.
|
||||
|
||||
## lightweight_deletes_sync {#lightweight_deletes_sync}
|
||||
|
||||
The same as 'mutation_sync', but controls only execution of lightweight deletes.
|
||||
The same as [`mutations_sync`](#mutations_sync), but controls only execution of lightweight deletes.
|
||||
|
||||
Possible values:
|
||||
|
||||
|
@ -499,7 +499,7 @@ Required parameters:
|
||||
- `type` — `encrypted`. Otherwise the encrypted disk is not created.
|
||||
- `disk` — Type of disk for data storage.
|
||||
- `key` — The key for encryption and decryption. Type: [Uint64](/docs/en/sql-reference/data-types/int-uint.md). You can use `key_hex` parameter to encode the key in hexadecimal form.
|
||||
You can specify multiple keys using the `id` attribute (see example above).
|
||||
You can specify multiple keys using the `id` attribute (see example below).
|
||||
|
||||
Optional parameters:
|
||||
|
||||
|
@ -47,13 +47,15 @@ keeper foo bar
|
||||
|
||||
- `ls '[path]'` -- Lists the nodes for the given path (default: cwd)
|
||||
- `cd '[path]'` -- Changes the working path (default `.`)
|
||||
- `cp '<src>' '<dest>'` -- Copies 'src' node to 'dest' path
|
||||
- `mv '<src>' '<dest>'` -- Moves 'src' node to the 'dest' path
|
||||
- `exists '<path>'` -- Returns `1` if node exists, `0` otherwise
|
||||
- `set '<path>' <value> [version]` -- Updates the node's value. Only updates if version matches (default: -1)
|
||||
- `create '<path>' <value> [mode]` -- Creates new node with the set value
|
||||
- `touch '<path>'` -- Creates new node with an empty string as value. Doesn't throw an exception if the node already exists
|
||||
- `get '<path>'` -- Returns the node's value
|
||||
- `rm '<path>' [version]` -- Removes the node only if version matches (default: -1)
|
||||
- `rmr '<path>'` -- Recursively deletes path. Confirmation required
|
||||
- `rmr '<path>' [limit]` -- Recursively deletes path if the subtree size is smaller than the limit. Confirmation required (default limit = 100)
|
||||
- `flwc <command>` -- Executes four-letter-word command
|
||||
- `help` -- Prints this message
|
||||
- `get_direct_children_number '[path]'` -- Get numbers of direct children nodes under a specific path
|
||||
|
@ -104,7 +104,7 @@ Events that occur at the same second may lay in the sequence in an undefined ord
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `pattern` — Pattern string. See [Pattern syntax](#sequencematch).
|
||||
- `pattern` — Pattern string. See [Pattern syntax](#pattern-syntax).
|
||||
|
||||
**Returned values**
|
||||
|
||||
@ -113,8 +113,7 @@ Events that occur at the same second may lay in the sequence in an undefined ord
|
||||
|
||||
Type: `UInt8`.
|
||||
|
||||
<a name="sequence-function-pattern-syntax"></a>
|
||||
**Pattern syntax**
|
||||
#### Pattern syntax
|
||||
|
||||
- `(?N)` — Matches the condition argument at position `N`. Conditions are numbered in the `[1, 32]` range. For example, `(?1)` matches the argument passed to the `cond1` parameter.
|
||||
|
||||
@ -196,7 +195,7 @@ sequenceCount(pattern)(timestamp, cond1, cond2, ...)
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `pattern` — Pattern string. See [Pattern syntax](#sequencematch).
|
||||
- `pattern` — Pattern string. See [Pattern syntax](#pattern-syntax).
|
||||
|
||||
**Returned values**
|
||||
|
||||
|
@ -0,0 +1,44 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/distinctdynamictypes
|
||||
sidebar_position: 215
|
||||
---
|
||||
|
||||
# distinctDynamicTypes
|
||||
|
||||
Calculates the list of distinct data types stored in [Dynamic](../../data-types/dynamic.md) column.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
distinctDynamicTypes(dynamic)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `dynamic` — [Dynamic](../../data-types/dynamic.md) column.
|
||||
|
||||
**Returned Value**
|
||||
|
||||
- The sorted list of data type names [Array(String)](../../data-types/array.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
DROP TABLE IF EXISTS test_dynamic;
|
||||
CREATE TABLE test_dynamic(d Dynamic) ENGINE = Memory;
|
||||
INSERT INTO test_dynamic VALUES (42), (NULL), ('Hello'), ([1, 2, 3]), ('2020-01-01'), (map(1, 2)), (43), ([4, 5]), (NULL), ('World'), (map(3, 4))
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT distinctDynamicTypes(d) FROM test_dynamic;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```reference
|
||||
┌─distinctDynamicTypes(d)──────────────────────────────────────┐
|
||||
│ ['Array(Int64)','Date','Int64','Map(UInt8, UInt8)','String'] │
|
||||
└──────────────────────────────────────────────────────────────┘
|
||||
```
|
@ -0,0 +1,125 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/distinctjsonpaths
|
||||
sidebar_position: 216
|
||||
---
|
||||
|
||||
# distinctJSONPaths
|
||||
|
||||
Calculates the list of distinct paths stored in [JSON](../../data-types/newjson.md) column.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
distinctJSONPaths(json)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `json` — [JSON](../../data-types/newjson.md) column.
|
||||
|
||||
**Returned Value**
|
||||
|
||||
- The sorted list of paths [Array(String)](../../data-types/array.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
DROP TABLE IF EXISTS test_json;
|
||||
CREATE TABLE test_json(json JSON) ENGINE = Memory;
|
||||
INSERT INTO test_json VALUES ('{"a" : 42, "b" : "Hello"}'), ('{"b" : [1, 2, 3], "c" : {"d" : {"e" : "2020-01-01"}}}'), ('{"a" : 43, "c" : {"d" : {"f" : [{"g" : 42}]}}}')
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT distinctJSONPaths(json) FROM test_json;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```reference
|
||||
┌─distinctJSONPaths(json)───┐
|
||||
│ ['a','b','c.d.e','c.d.f'] │
|
||||
└───────────────────────────┘
|
||||
```
|
||||
|
||||
# distinctJSONPathsAndTypes
|
||||
|
||||
Calculates the list of distinct paths and their types stored in [JSON](../../data-types/newjson.md) column.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
distinctJSONPathsAndTypes(json)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `json` — [JSON](../../data-types/newjson.md) column.
|
||||
|
||||
**Returned Value**
|
||||
|
||||
- The sorted map of paths and types [Map(String, Array(String))](../../data-types/map.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
DROP TABLE IF EXISTS test_json;
|
||||
CREATE TABLE test_json(json JSON) ENGINE = Memory;
|
||||
INSERT INTO test_json VALUES ('{"a" : 42, "b" : "Hello"}'), ('{"b" : [1, 2, 3], "c" : {"d" : {"e" : "2020-01-01"}}}'), ('{"a" : 43, "c" : {"d" : {"f" : [{"g" : 42}]}}}')
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT distinctJSONPathsAndTypes(json) FROM test_json;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```reference
|
||||
┌─distinctJSONPathsAndTypes(json)───────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ {'a':['Int64'],'b':['Array(Nullable(Int64))','String'],'c.d.e':['Date'],'c.d.f':['Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))']} │
|
||||
└───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Note**
|
||||
|
||||
If JSON declaration contains paths with specified types, these paths will be always included in the result of `distinctJSONPaths/distinctJSONPathsAndTypes` functions even if input data didn't have values for these paths.
|
||||
|
||||
```sql
|
||||
DROP TABLE IF EXISTS test_json;
|
||||
CREATE TABLE test_json(json JSON(a UInt32)) ENGINE = Memory;
|
||||
INSERT INTO test_json VALUES ('{"b" : "Hello"}'), ('{"b" : "World", "c" : [1, 2, 3]}');
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT json FROM test_json;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─json──────────────────────────────────┐
|
||||
│ {"a":0,"b":"Hello"} │
|
||||
│ {"a":0,"b":"World","c":["1","2","3"]} │
|
||||
└───────────────────────────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT distinctJSONPaths(json) FROM test_json;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─distinctJSONPaths(json)─┐
|
||||
│ ['a','b','c'] │
|
||||
└─────────────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT distinctJSONPathsAndTypes(json) FROM test_json;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─distinctJSONPathsAndTypes(json)────────────────────────────────┐
|
||||
│ {'a':['UInt32'],'b':['String'],'c':['Array(Nullable(Int64))']} │
|
||||
└────────────────────────────────────────────────────────────────┘
|
||||
```
|
@ -9,7 +9,7 @@ Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
quantileDDsketch[relative_accuracy, (level)](expr)
|
||||
quantileDD(relative_accuracy, [level])(expr)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
@ -453,8 +453,8 @@ As we can see, after inserting paths `e` and `f.g` the limit was reached and we
|
||||
|
||||
### During merges of data parts in MergeTree table engines
|
||||
|
||||
During merge of several data parts in MergeTree table the `JSON` column in the resulting data part can reach the limit of dynamic paths won't be able to store all paths from source parts as subcolumns.
|
||||
In this case ClickHouse chooses what paths will remain as subcolumns after merge and what types will be stored in the shared data structure. In most cases ClickHouse tries to keep paths that contains
|
||||
During merge of several data parts in MergeTree table the `JSON` column in the resulting data part can reach the limit of dynamic paths and won't be able to store all paths from source parts as subcolumns.
|
||||
In this case ClickHouse chooses what paths will remain as subcolumns after merge and what paths will be stored in the shared data structure. In most cases ClickHouse tries to keep paths that contain
|
||||
the largest number of non-null values and move the rarest paths to the shared data structure, but it depends on the implementation.
|
||||
|
||||
Let's see an example of such merge. First, let's create a table with `JSON` column, set the limit of dynamic paths to `3` and insert values with `5` different paths:
|
||||
@ -505,7 +505,130 @@ As we can see, ClickHouse kept the most frequent paths `a`, `b` and `c` and move
|
||||
|
||||
## Introspection functions
|
||||
|
||||
There are several functions that can help to inspect the content of the JSON column: [JSONAllPaths](../functions/json-functions.md#jsonallpaths), [JSONAllPathsWithTypes](../functions/json-functions.md#jsonallpathswithtypes), [JSONDynamicPaths](../functions/json-functions.md#jsondynamicpaths), [JSONDynamicPathsWithTypes](../functions/json-functions.md#jsondynamicpathswithtypes), [JSONSharedDataPaths](../functions/json-functions.md#jsonshareddatapaths), [JSONSharedDataPathsWithTypes](../functions/json-functions.md#jsonshareddatapathswithtypes).
|
||||
There are several functions that can help to inspect the content of the JSON column: [JSONAllPaths](../functions/json-functions.md#jsonallpaths), [JSONAllPathsWithTypes](../functions/json-functions.md#jsonallpathswithtypes), [JSONDynamicPaths](../functions/json-functions.md#jsondynamicpaths), [JSONDynamicPathsWithTypes](../functions/json-functions.md#jsondynamicpathswithtypes), [JSONSharedDataPaths](../functions/json-functions.md#jsonshareddatapaths), [JSONSharedDataPathsWithTypes](../functions/json-functions.md#jsonshareddatapathswithtypes), [distinctDynamicTypes](../aggregate-functions/reference/distinctdynamictypes.md), [distinctJSONPaths and distinctJSONPathsAndTypes](../aggregate-functions/reference/distinctjsonpaths.md)
|
||||
|
||||
**Examples**
|
||||
|
||||
Let's investigate the content of [GH Archive](https://www.gharchive.org/) dataset for `2020-01-01` date:
|
||||
|
||||
```sql
|
||||
SELECT arrayJoin(distinctJSONPaths(json)) FROM s3('s3://clickhouse-public-datasets/gharchive/original/2020-01-01-*.json.gz', JSONAsObject)
|
||||
```
|
||||
|
||||
```text
|
||||
┌─arrayJoin(distinctJSONPaths(json))─────────────────────────┐
|
||||
│ actor.avatar_url │
|
||||
│ actor.display_login │
|
||||
│ actor.gravatar_id │
|
||||
│ actor.id │
|
||||
│ actor.login │
|
||||
│ actor.url │
|
||||
│ created_at │
|
||||
│ id │
|
||||
│ org.avatar_url │
|
||||
│ org.gravatar_id │
|
||||
│ org.id │
|
||||
│ org.login │
|
||||
│ org.url │
|
||||
│ payload.action │
|
||||
│ payload.before │
|
||||
│ payload.comment._links.html.href │
|
||||
│ payload.comment._links.pull_request.href │
|
||||
│ payload.comment._links.self.href │
|
||||
│ payload.comment.author_association │
|
||||
│ payload.comment.body │
|
||||
│ payload.comment.commit_id │
|
||||
│ payload.comment.created_at │
|
||||
│ payload.comment.diff_hunk │
|
||||
│ payload.comment.html_url │
|
||||
│ payload.comment.id │
|
||||
│ payload.comment.in_reply_to_id │
|
||||
│ payload.comment.issue_url │
|
||||
│ payload.comment.line │
|
||||
│ payload.comment.node_id │
|
||||
│ payload.comment.original_commit_id │
|
||||
│ payload.comment.original_position │
|
||||
│ payload.comment.path │
|
||||
│ payload.comment.position │
|
||||
│ payload.comment.pull_request_review_id │
|
||||
...
|
||||
│ payload.release.node_id │
|
||||
│ payload.release.prerelease │
|
||||
│ payload.release.published_at │
|
||||
│ payload.release.tag_name │
|
||||
│ payload.release.tarball_url │
|
||||
│ payload.release.target_commitish │
|
||||
│ payload.release.upload_url │
|
||||
│ payload.release.url │
|
||||
│ payload.release.zipball_url │
|
||||
│ payload.size │
|
||||
│ public │
|
||||
│ repo.id │
|
||||
│ repo.name │
|
||||
│ repo.url │
|
||||
│ type │
|
||||
└─arrayJoin(distinctJSONPaths(json))─────────────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT arrayJoin(distinctJSONPathsAndTypes(json)) FROM s3('s3://clickhouse-public-datasets/gharchive/original/2020-01-01-*.json.gz', JSONAsObject) SETTINGS date_time_input_format='best_effort'
|
||||
```
|
||||
|
||||
|
||||
```text
|
||||
┌─arrayJoin(distinctJSONPathsAndTypes(json))──────────────────┐
|
||||
│ ('actor.avatar_url',['String']) │
|
||||
│ ('actor.display_login',['String']) │
|
||||
│ ('actor.gravatar_id',['String']) │
|
||||
│ ('actor.id',['Int64']) │
|
||||
│ ('actor.login',['String']) │
|
||||
│ ('actor.url',['String']) │
|
||||
│ ('created_at',['DateTime']) │
|
||||
│ ('id',['String']) │
|
||||
│ ('org.avatar_url',['String']) │
|
||||
│ ('org.gravatar_id',['String']) │
|
||||
│ ('org.id',['Int64']) │
|
||||
│ ('org.login',['String']) │
|
||||
│ ('org.url',['String']) │
|
||||
│ ('payload.action',['String']) │
|
||||
│ ('payload.before',['String']) │
|
||||
│ ('payload.comment._links.html.href',['String']) │
|
||||
│ ('payload.comment._links.pull_request.href',['String']) │
|
||||
│ ('payload.comment._links.self.href',['String']) │
|
||||
│ ('payload.comment.author_association',['String']) │
|
||||
│ ('payload.comment.body',['String']) │
|
||||
│ ('payload.comment.commit_id',['String']) │
|
||||
│ ('payload.comment.created_at',['DateTime']) │
|
||||
│ ('payload.comment.diff_hunk',['String']) │
|
||||
│ ('payload.comment.html_url',['String']) │
|
||||
│ ('payload.comment.id',['Int64']) │
|
||||
│ ('payload.comment.in_reply_to_id',['Int64']) │
|
||||
│ ('payload.comment.issue_url',['String']) │
|
||||
│ ('payload.comment.line',['Int64']) │
|
||||
│ ('payload.comment.node_id',['String']) │
|
||||
│ ('payload.comment.original_commit_id',['String']) │
|
||||
│ ('payload.comment.original_position',['Int64']) │
|
||||
│ ('payload.comment.path',['String']) │
|
||||
│ ('payload.comment.position',['Int64']) │
|
||||
│ ('payload.comment.pull_request_review_id',['Int64']) │
|
||||
...
|
||||
│ ('payload.release.node_id',['String']) │
|
||||
│ ('payload.release.prerelease',['Bool']) │
|
||||
│ ('payload.release.published_at',['DateTime']) │
|
||||
│ ('payload.release.tag_name',['String']) │
|
||||
│ ('payload.release.tarball_url',['String']) │
|
||||
│ ('payload.release.target_commitish',['String']) │
|
||||
│ ('payload.release.upload_url',['String']) │
|
||||
│ ('payload.release.url',['String']) │
|
||||
│ ('payload.release.zipball_url',['String']) │
|
||||
│ ('payload.size',['Int64']) │
|
||||
│ ('public',['Bool']) │
|
||||
│ ('repo.id',['Int64']) │
|
||||
│ ('repo.name',['String']) │
|
||||
│ ('repo.url',['String']) │
|
||||
│ ('type',['String']) │
|
||||
└─arrayJoin(distinctJSONPathsAndTypes(json))──────────────────┘
|
||||
```
|
||||
|
||||
## Tips for better usage of the JSON type
|
||||
|
||||
|
@ -2035,6 +2035,7 @@ Query:
|
||||
SELECT arrayZip(['a', 'b', 'c'], [5, 2, 1]);
|
||||
```
|
||||
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
@ -2043,6 +2044,43 @@ Result:
|
||||
└──────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## arrayZipUnaligned
|
||||
|
||||
Combines multiple arrays into a single array, allowing for unaligned arrays. The resulting array contains the corresponding elements of the source arrays grouped into tuples in the listed order of arguments.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
arrayZipUnaligned(arr1, arr2, ..., arrN)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `arrN` — [Array](../data-types/array.md).
|
||||
|
||||
The function can take any number of arrays of different types.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Array with elements from the source arrays grouped into [tuples](../data-types/tuple.md). Data types in the tuple are the same as types of the input arrays and in the same order as arrays are passed. [Array](../data-types/array.md). If the arrays have different sizes, the shorter arrays will be padded with `null` values.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT arrayZipUnaligned(['a'], [1, 2, 3]);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─arrayZipUnaligned(['a'], [1, 2, 3])─┐
|
||||
│ [('a',1),(NULL,2),(NULL,3)] │
|
||||
└─────────────────────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
## arrayAUC
|
||||
|
||||
Calculate AUC (Area Under the Curve, which is a concept in machine learning, see more details: <https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve>).
|
||||
|
@ -1617,45 +1617,348 @@ The calculation is performed relative to specific points in time:
|
||||
|
||||
If unit `WEEK` was specified, `toStartOfInterval` assumes that weeks start on Monday. Note that this behavior is different from that of function `toStartOfWeek` in which weeks start by default on Sunday.
|
||||
|
||||
**See Also**
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
toStartOfInterval(value, INTERVAL x unit[, time_zone])
|
||||
toStartOfInterval(value, INTERVAL x unit[, origin[, time_zone]])
|
||||
```
|
||||
|
||||
The second overload emulates TimescaleDB's `time_bucket()` function, respectively PostgreSQL's `date_bin()` function, e.g.
|
||||
|
||||
``` SQL
|
||||
SELECT toStartOfInterval(toDateTime('2023-01-01 14:45:00'), INTERVAL 1 MINUTE, toDateTime('2023-01-01 14:35:30'));
|
||||
```
|
||||
**See Also**
|
||||
- [date_trunc](#date_trunc)
|
||||
|
||||
## toTime
|
||||
|
||||
Converts a date with time to a certain fixed date, while preserving the time.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
toTime(date[,timezone])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `date` — Date to convert to a time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
- `timezone` (optional) — Timezone for the returned value. [String](../data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- DateTime with date equated to `1970-01-02` while preserving the time. [DateTime](../data-types/datetime.md).
|
||||
|
||||
:::note
|
||||
If the `date` input argument contained sub-second components,
|
||||
they will be dropped in the returned `DateTime` value with second-accuracy.
|
||||
:::
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT toTime(toDateTime64('1970-12-10 01:20:30.3000',3)) AS result, toTypeName(result);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌──────────────result─┬─toTypeName(result)─┐
|
||||
│ 1970-01-02 01:20:30 │ DateTime │
|
||||
└─────────────────────┴────────────────────┘
|
||||
```
|
||||
|
||||
## toRelativeYearNum
|
||||
|
||||
Converts a date, or date with time, to the number of the year, starting from a certain fixed point in the past.
|
||||
Converts a date, or date with time, to the number of years elapsed since a certain fixed point in the past.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
toRelativeYearNum(date)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The number of years from a fixed reference point in the past. [UInt16](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toRelativeYearNum(toDate('2002-12-08')) AS y1,
|
||||
toRelativeYearNum(toDate('2010-10-26')) AS y2
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌───y1─┬───y2─┐
|
||||
│ 2002 │ 2010 │
|
||||
└──────┴──────┘
|
||||
```
|
||||
|
||||
## toRelativeQuarterNum
|
||||
|
||||
Converts a date, or date with time, to the number of the quarter, starting from a certain fixed point in the past.
|
||||
Converts a date, or date with time, to the number of quarters elapsed since a certain fixed point in the past.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
toRelativeQuarterNum(date)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The number of quarters from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toRelativeQuarterNum(toDate('1993-11-25')) AS q1,
|
||||
toRelativeQuarterNum(toDate('2005-01-05')) AS q2
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌───q1─┬───q2─┐
|
||||
│ 7975 │ 8020 │
|
||||
└──────┴──────┘
|
||||
```
|
||||
|
||||
## toRelativeMonthNum
|
||||
|
||||
Converts a date, or date with time, to the number of the month, starting from a certain fixed point in the past.
|
||||
Converts a date, or date with time, to the number of months elapsed since a certain fixed point in the past.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
toRelativeMonthNum(date)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The number of months from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toRelativeMonthNum(toDate('2001-04-25')) AS m1,
|
||||
toRelativeMonthNum(toDate('2009-07-08')) AS m2
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌────m1─┬────m2─┐
|
||||
│ 24016 │ 24115 │
|
||||
└───────┴───────┘
|
||||
```
|
||||
|
||||
## toRelativeWeekNum
|
||||
|
||||
Converts a date, or date with time, to the number of the week, starting from a certain fixed point in the past.
|
||||
Converts a date, or date with time, to the number of weeks elapsed since a certain fixed point in the past.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
toRelativeWeekNum(date)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The number of weeks from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toRelativeWeekNum(toDate('2000-02-29')) AS w1,
|
||||
toRelativeWeekNum(toDate('2001-01-12')) AS w2
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌───w1─┬───w2─┐
|
||||
│ 1574 │ 1619 │
|
||||
└──────┴──────┘
|
||||
```
|
||||
|
||||
## toRelativeDayNum
|
||||
|
||||
Converts a date, or date with time, to the number of the day, starting from a certain fixed point in the past.
|
||||
Converts a date, or date with time, to the number of days elapsed since a certain fixed point in the past.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
toRelativeDayNum(date)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The number of days from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toRelativeDayNum(toDate('1993-10-05')) AS d1,
|
||||
toRelativeDayNum(toDate('2000-09-20')) AS d2
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌───d1─┬────d2─┐
|
||||
│ 8678 │ 11220 │
|
||||
└──────┴───────┘
|
||||
```
|
||||
|
||||
## toRelativeHourNum
|
||||
|
||||
Converts a date, or date with time, to the number of the hour, starting from a certain fixed point in the past.
|
||||
Converts a date, or date with time, to the number of hours elapsed since a certain fixed point in the past.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
toRelativeHourNum(date)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The number of hours from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toRelativeHourNum(toDateTime('1993-10-05 05:20:36')) AS h1,
|
||||
toRelativeHourNum(toDateTime('2000-09-20 14:11:29')) AS h2
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─────h1─┬─────h2─┐
|
||||
│ 208276 │ 269292 │
|
||||
└────────┴────────┘
|
||||
```
|
||||
|
||||
## toRelativeMinuteNum
|
||||
|
||||
Converts a date, or date with time, to the number of the minute, starting from a certain fixed point in the past.
|
||||
Converts a date, or date with time, to the number of minutes elapsed since a certain fixed point in the past.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
toRelativeMinuteNum(date)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The number of minutes from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toRelativeMinuteNum(toDateTime('1993-10-05 05:20:36')) AS m1,
|
||||
toRelativeMinuteNum(toDateTime('2000-09-20 14:11:29')) AS m2
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌───────m1─┬───────m2─┐
|
||||
│ 12496580 │ 16157531 │
|
||||
└──────────┴──────────┘
|
||||
```
|
||||
|
||||
## toRelativeSecondNum
|
||||
|
||||
Converts a date, or date with time, to the number of the second, starting from a certain fixed point in the past.
|
||||
Converts a date, or date with time, to the number of the seconds elapsed since a certain fixed point in the past.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
toRelativeSecondNum(date)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The number of seconds from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toRelativeSecondNum(toDateTime('1993-10-05 05:20:36')) AS s1,
|
||||
toRelativeSecondNum(toDateTime('2000-09-20 14:11:29')) AS s2
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌────────s1─┬────────s2─┐
|
||||
│ 749794836 │ 969451889 │
|
||||
└───────────┴───────────┘
|
||||
```
|
||||
|
||||
## toISOYear
|
||||
|
||||
@ -2019,7 +2322,7 @@ Alias: `dateTrunc`.
|
||||
|
||||
`unit` argument is case-insensitive.
|
||||
|
||||
- `value` — Date and time. [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md).
|
||||
- `value` — Date and time. [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md).
|
||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) for the returned value (optional). If not specified, the function uses the timezone of the `value` parameter. [String](../data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
@ -3884,19 +4187,29 @@ Result:
|
||||
└───────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## timeSlots(StartTime, Duration,\[, Size\])
|
||||
## timeSlots
|
||||
|
||||
For a time interval starting at ‘StartTime’ and continuing for ‘Duration’ seconds, it returns an array of moments in time, consisting of points from this interval rounded down to the ‘Size’ in seconds. ‘Size’ is an optional parameter set to 1800 (30 minutes) by default.
|
||||
This is necessary, for example, when searching for pageviews in the corresponding session.
|
||||
Accepts DateTime and DateTime64 as ’StartTime’ argument. For DateTime, ’Duration’ and ’Size’ arguments must be `UInt32`. For ’DateTime64’ they must be `Decimal64`.
|
||||
Returns an array of DateTime/DateTime64 (return type matches the type of ’StartTime’). For DateTime64, the return value's scale can differ from the scale of ’StartTime’ --- the highest scale among all given arguments is taken.
|
||||
|
||||
Example:
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
timeSlots(StartTime, Duration,\[, Size\])
|
||||
```
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
SELECT timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600));
|
||||
SELECT timeSlots(toDateTime('1980-12-12 21:01:02', 'UTC'), toUInt32(600), 299);
|
||||
SELECT timeSlots(toDateTime64('1980-12-12 21:01:02.1234', 4, 'UTC'), toDecimal64(600.1, 1), toDecimal64(299, 0));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600))─┐
|
||||
│ ['2012-01-01 12:00:00','2012-01-01 12:30:00'] │
|
||||
|
@ -20,10 +20,10 @@ overlay(s, replace, offset[, length])
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `input`: A string type [String](../data-types/string.md).
|
||||
- `s`: A string type [String](../data-types/string.md).
|
||||
- `replace`: A string type [String](../data-types/string.md).
|
||||
- `offset`: An integer type [Int](../data-types/int-uint.md). If `offset` is negative, it is counted from the end of the `input` string.
|
||||
- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within input to be replaced. If `length` is not specified, the number of bytes removed from `input` equals the length of `replace`; otherwise `length` bytes are removed.
|
||||
- `offset`: An integer type [Int](../data-types/int-uint.md) (1-based). If `offset` is negative, it is counted from the end of the string `s`.
|
||||
- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within the input string `s` to be replaced. If `length` is not specified, the number of bytes removed from `s` equals the length of `replace`; otherwise `length` bytes are removed.
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -32,22 +32,35 @@ overlay(s, replace, offset[, length])
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
SELECT overlay('ClickHouse SQL', 'CORE', 12) AS res;
|
||||
SELECT overlay('My father is from Mexico.', 'mother', 4) AS res;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─res─────────────┐
|
||||
│ ClickHouse CORE │
|
||||
└─────────────────┘
|
||||
┌─res──────────────────────┐
|
||||
│ My mother is from Mexico.│
|
||||
└──────────────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT overlay('My father is from Mexico.', 'dad', 4, 6) AS res;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─res───────────────────┐
|
||||
│ My dad is from Mexico.│
|
||||
└───────────────────────┘
|
||||
```
|
||||
|
||||
## overlayUTF8
|
||||
|
||||
Replace part of the string `input` with another string `replace`, starting at the 1-based index `offset`.
|
||||
|
||||
Assumes that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.
|
||||
Assumes that the string contains valid UTF-8 encoded text.
|
||||
If this assumption is violated, no exception is thrown and the result is undefined.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -59,8 +72,8 @@ overlayUTF8(s, replace, offset[, length])
|
||||
|
||||
- `s`: A string type [String](../data-types/string.md).
|
||||
- `replace`: A string type [String](../data-types/string.md).
|
||||
- `offset`: An integer type [Int](../data-types/int-uint.md). If `offset` is negative, it is counted from the end of the `input` string.
|
||||
- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within input to be replaced. If `length` is not specified, the number of characters removed from `input` equals the length of `replace`; otherwise `length` characters are removed.
|
||||
- `offset`: An integer type [Int](../data-types/int-uint.md) (1-based). If `offset` is negative, it is counted from the end of the input string `s`.
|
||||
- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within the input string `s` to be replaced. If `length` is not specified, the number of characters removed from `s` equals the length of `replace`; otherwise `length` characters are removed.
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -69,15 +82,15 @@ overlayUTF8(s, replace, offset[, length])
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
SELECT overlayUTF8('ClickHouse是一款OLAP数据库', '开源', 12, 2) AS res;
|
||||
SELECT overlay('Mein Vater ist aus Österreich.', 'der Türkei', 20) AS res;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─res────────────────────────┐
|
||||
│ ClickHouse是开源OLAP数据库 │
|
||||
└────────────────────────────┘
|
||||
┌─res───────────────────────────┐
|
||||
│ Mein Vater ist aus der Türkei.│
|
||||
└───────────────────────────────┘
|
||||
```
|
||||
|
||||
## replaceOne
|
||||
|
@ -49,6 +49,55 @@ SETTINGS cast_keep_nullable = 1
|
||||
└──────────────────┴─────────────────────┴──────────────────┘
|
||||
```
|
||||
|
||||
## toBool
|
||||
|
||||
Converts an input value to a value of type [`Bool`](../data-types/boolean.md). Throws an exception in case of an error.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
toBool(expr)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `expr` — Expression returning a number or a string. [Expression](../syntax.md/#syntax-expressions).
|
||||
|
||||
Supported arguments:
|
||||
- Values of type (U)Int8/16/32/64/128/256.
|
||||
- Values of type Float32/64.
|
||||
- Strings `true` or `false` (case-insensitive).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns `true` or `false` based on evaluation of the argument. [Bool](../data-types/boolean.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toBool(toUInt8(1)),
|
||||
toBool(toInt8(-1)),
|
||||
toBool(toFloat32(1.01)),
|
||||
toBool('true'),
|
||||
toBool('false'),
|
||||
toBool('FALSE')
|
||||
FORMAT Vertical
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
toBool(toUInt8(1)): true
|
||||
toBool(toInt8(-1)): true
|
||||
toBool(toFloat32(1.01)): true
|
||||
toBool('true'): true
|
||||
toBool('false'): false
|
||||
toBool('FALSE'): false
|
||||
```
|
||||
|
||||
## toInt8
|
||||
|
||||
Converts an input value to a value of type [`Int8`](../data-types/int-uint.md). Throws an exception in case of an error.
|
||||
@ -3857,7 +3906,7 @@ Result:
|
||||
|
||||
## toDateTime64
|
||||
|
||||
Converts the argument to the [DateTime64](../data-types/datetime64.md) data type.
|
||||
Converts an input value to a value of type [DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -3869,7 +3918,7 @@ toDateTime64(expr, scale, [timezone])
|
||||
|
||||
- `expr` — The value. [String](../data-types/string.md), [UInt32](../data-types/int-uint.md), [Float](../data-types/float.md) or [DateTime](../data-types/datetime.md).
|
||||
- `scale` - Tick size (precision): 10<sup>-precision</sup> seconds. Valid range: [ 0 : 9 ].
|
||||
- `timezone` - Time zone of the specified datetime64 object.
|
||||
- `timezone` (optional) - Time zone of the specified datetime64 object.
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -3928,10 +3977,137 @@ SELECT toDateTime64('2019-01-01 00:00:00', 3, 'Asia/Istanbul') AS value, toTypeN
|
||||
|
||||
## toDateTime64OrZero
|
||||
|
||||
Like [toDateTime64](#todatetime64), this function converts an input value to a value of type [DateTime64](../data-types/datetime64.md) but returns the min value of [DateTime64](../data-types/datetime64.md) if an invalid argument is received.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toDateTime64OrZero(expr, scale, [timezone])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `expr` — The value. [String](../data-types/string.md), [UInt32](../data-types/int-uint.md), [Float](../data-types/float.md) or [DateTime](../data-types/datetime.md).
|
||||
- `scale` - Tick size (precision): 10<sup>-precision</sup> seconds. Valid range: [ 0 : 9 ].
|
||||
- `timezone` (optional) - Time zone of the specified DateTime64 object.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A calendar date and time of day, with sub-second precision, otherwise the minimum value of `DateTime64`: `1970-01-01 01:00:00.000`. [DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT toDateTime64OrZero('2008-10-12 00:00:00 00:30:30', 3) AS invalid_arg
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─────────────invalid_arg─┐
|
||||
│ 1970-01-01 01:00:00.000 │
|
||||
└─────────────────────────┘
|
||||
```
|
||||
|
||||
**See also**
|
||||
|
||||
- [toDateTime64](#todatetime64).
|
||||
- [toDateTime64OrNull](#todatetime64ornull).
|
||||
- [toDateTime64OrDefault](#todatetime64ordefault).
|
||||
|
||||
## toDateTime64OrNull
|
||||
|
||||
Like [toDateTime64](#todatetime64), this function converts an input value to a value of type [DateTime64](../data-types/datetime64.md) but returns `NULL` if an invalid argument is received.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toDateTime64OrNull(expr, scale, [timezone])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `expr` — The value. [String](../data-types/string.md), [UInt32](../data-types/int-uint.md), [Float](../data-types/float.md) or [DateTime](../data-types/datetime.md).
|
||||
- `scale` - Tick size (precision): 10<sup>-precision</sup> seconds. Valid range: [ 0 : 9 ].
|
||||
- `timezone` (optional) - Time zone of the specified DateTime64 object.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A calendar date and time of day, with sub-second precision, otherwise `NULL`. [DateTime64](../data-types/datetime64.md)/[NULL](../data-types/nullable.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toDateTime64OrNull('1976-10-18 00:00:00.30', 3) AS valid_arg,
|
||||
toDateTime64OrNull('1976-10-18 00:00:00 30', 3) AS invalid_arg
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌───────────────valid_arg─┬─invalid_arg─┐
|
||||
│ 1976-10-18 00:00:00.300 │ ᴺᵁᴸᴸ │
|
||||
└─────────────────────────┴─────────────┘
|
||||
```
|
||||
|
||||
**See also**
|
||||
|
||||
- [toDateTime64](#todatetime64).
|
||||
- [toDateTime64OrZero](#todatetime64orzero).
|
||||
- [toDateTime64OrDefault](#todatetime64ordefault).
|
||||
|
||||
## toDateTime64OrDefault
|
||||
|
||||
Like [toDateTime64](#todatetime64), this function converts an input value to a value of type [DateTime64](../data-types/datetime64.md),
|
||||
but returns either the default value of [DateTime64](../data-types/datetime64.md)
|
||||
or the provided default if an invalid argument is received.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toDateTime64OrNull(expr, scale, [timezone, default])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `expr` — The value. [String](../data-types/string.md), [UInt32](../data-types/int-uint.md), [Float](../data-types/float.md) or [DateTime](../data-types/datetime.md).
|
||||
- `scale` - Tick size (precision): 10<sup>-precision</sup> seconds. Valid range: [ 0 : 9 ].
|
||||
- `timezone` (optional) - Time zone of the specified DateTime64 object.
|
||||
- `default` (optional) - Default value to return if an invalid argument is received. [DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A calendar date and time of day, with sub-second precision, otherwise the minimum value of `DateTime64` or the `default` value if provided. [DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toDateTime64OrDefault('1976-10-18 00:00:00 30', 3) AS invalid_arg,
|
||||
toDateTime64OrDefault('1976-10-18 00:00:00 30', 3, 'UTC', toDateTime64('2001-01-01 00:00:00.00',3)) AS invalid_arg_with_default
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─────────────invalid_arg─┬─invalid_arg_with_default─┐
|
||||
│ 1970-01-01 01:00:00.000 │ 2000-12-31 23:00:00.000 │
|
||||
└─────────────────────────┴──────────────────────────┘
|
||||
```
|
||||
|
||||
**See also**
|
||||
|
||||
- [toDateTime64](#todatetime64).
|
||||
- [toDateTime64OrZero](#todatetime64orzero).
|
||||
- [toDateTime64OrNull](#todatetime64ornull).
|
||||
|
||||
## toDecimal32
|
||||
|
||||
Converts an input value to a value of type [`Decimal(9, S)`](../data-types/decimal.md) with scale of `S`. Throws an exception in case of an error.
|
||||
|
@ -265,8 +265,6 @@ SELECT now() AS current_date_time, current_date_time + INTERVAL '4' day + INTERV
|
||||
└─────────────────────┴────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
You can work with dates without using `INTERVAL`, just by adding or subtracting seconds, minutes, and hours. For example, an interval of one day can be set by adding `60*60*24`.
|
||||
|
||||
:::note
|
||||
The `INTERVAL` syntax or `addDays` function are always preferred. Simple addition or subtraction (syntax like `now() + ...`) doesn't consider time settings. For example, daylight saving time.
|
||||
:::
|
||||
|
@ -351,7 +351,7 @@ ALTER TABLE mt DELETE IN PARTITION ID '2' WHERE p = 2;
|
||||
You can specify the partition expression in `ALTER ... PARTITION` queries in different ways:
|
||||
|
||||
- As a value from the `partition` column of the `system.parts` table. For example, `ALTER TABLE visits DETACH PARTITION 201901`.
|
||||
- Using the keyword `ALL`. It can be used only with DROP/DETACH/ATTACH. For example, `ALTER TABLE visits ATTACH PARTITION ALL`.
|
||||
- Using the keyword `ALL`. It can be used only with DROP/DETACH/ATTACH/ATTACH FROM. For example, `ALTER TABLE visits ATTACH PARTITION ALL`.
|
||||
- As a tuple of expressions or constants that matches (in types) the table partitioning keys tuple. In the case of a single element partitioning key, the expression should be wrapped in the `tuple (...)` function. For example, `ALTER TABLE visits DETACH PARTITION tuple(toYYYYMM(toDate('2019-01-25')))`.
|
||||
- Using the partition ID. Partition ID is a string identifier of the partition (human-readable, if possible) that is used as the names of partitions in the file system and in ZooKeeper. The partition ID must be specified in the `PARTITION ID` clause, in a single quotes. For example, `ALTER TABLE visits DETACH PARTITION ID '201901'`.
|
||||
- In the [ALTER ATTACH PART](#attach-partitionpart) and [DROP DETACHED PART](#drop-detached-partitionpart) query, to specify the name of a part, use string literal with a value from the `name` column of the [system.detached_parts](/docs/en/operations/system-tables/detached_parts.md/#system_tables-detached_parts) table. For example, `ALTER TABLE visits ATTACH PART '201901_1_1_0'`.
|
||||
|
@ -12,9 +12,10 @@ Syntax:
|
||||
``` sql
|
||||
ALTER USER [IF EXISTS] name1 [ON CLUSTER cluster_name1] [RENAME TO new_name1]
|
||||
[, name2 [ON CLUSTER cluster_name2] [RENAME TO new_name2] ...]
|
||||
[NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']} | {WITH ssl_certificate CN 'common_name' | SAN 'TYPE:subject_alt_name'}]
|
||||
[NOT IDENTIFIED | IDENTIFIED | ADD IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']} | {WITH ssl_certificate CN 'common_name' | SAN 'TYPE:subject_alt_name'}]
|
||||
[[ADD | DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE]
|
||||
[VALID UNTIL datetime]
|
||||
[RESET AUTHENTICATION METHODS TO NEW]
|
||||
[DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ]
|
||||
[GRANTEES {user | role | ANY | NONE} [,...] [EXCEPT {user | role} [,...]]]
|
||||
[SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY | WRITABLE] | PROFILE 'profile_name'] [,...]
|
||||
@ -62,3 +63,31 @@ Allows the user with `john` account to grant his privileges to the user with `ja
|
||||
``` sql
|
||||
ALTER USER john GRANTEES jack;
|
||||
```
|
||||
|
||||
Adds new authentication methods to the user while keeping the existing ones:
|
||||
|
||||
``` sql
|
||||
ALTER USER user1 ADD IDENTIFIED WITH plaintext_password by '1', bcrypt_password by '2', plaintext_password by '3'
|
||||
```
|
||||
|
||||
Notes:
|
||||
1. Older versions of ClickHouse might not support the syntax of multiple authentication methods. Therefore, if the ClickHouse server contains such users and is downgraded to a version that does not support it, such users will become unusable and some user related operations will be broken. In order to downgrade gracefully, one must set all users to contain a single authentication method prior to downgrading. Alternatively, if the server was downgraded without the proper procedure, the faulty users should be dropped.
|
||||
2. `no_password` can not co-exist with other authentication methods for security reasons.
|
||||
Because of that, it is not possible to `ADD` a `no_password` authentication method. The below query will throw an error:
|
||||
|
||||
``` sql
|
||||
ALTER USER user1 ADD IDENTIFIED WITH no_password
|
||||
```
|
||||
|
||||
If you want to drop authentication methods for a user and rely on `no_password`, you must specify in the below replacing form.
|
||||
|
||||
Reset authentication methods and adds the ones specified in the query (effect of leading IDENTIFIED without the ADD keyword):
|
||||
|
||||
``` sql
|
||||
ALTER USER user1 IDENTIFIED WITH plaintext_password by '1', bcrypt_password by '2', plaintext_password by '3'
|
||||
```
|
||||
|
||||
Reset authentication methods and keep the most recent added one:
|
||||
``` sql
|
||||
ALTER USER user1 RESET AUTHENTICATION METHODS TO NEW
|
||||
```
|
||||
|
@ -15,6 +15,7 @@ CREATE USER [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1]
|
||||
[NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']} | {WITH ssl_certificate CN 'common_name' | SAN 'TYPE:subject_alt_name'} | {WITH ssh_key BY KEY 'public_key' TYPE 'ssh-rsa|...'} | {WITH http SERVER 'server_name' [SCHEME 'Basic']}]
|
||||
[HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE]
|
||||
[VALID UNTIL datetime]
|
||||
[RESET AUTHENTICATION METHODS TO NEW]
|
||||
[IN access_storage_type]
|
||||
[DEFAULT ROLE role [,...]]
|
||||
[DEFAULT DATABASE database | NONE]
|
||||
@ -144,6 +145,17 @@ In ClickHouse Cloud, by default, passwords must meet the following complexity re
|
||||
|
||||
The available password types are: `plaintext_password`, `sha256_password`, `double_sha1_password`.
|
||||
|
||||
7. Multiple authentication methods can be specified:
|
||||
|
||||
```sql
|
||||
CREATE USER user1 IDENTIFIED WITH plaintext_password by '1', bcrypt_password by '2', plaintext_password by '3''
|
||||
```
|
||||
|
||||
Notes:
|
||||
1. Older versions of ClickHouse might not support the syntax of multiple authentication methods. Therefore, if the ClickHouse server contains such users and is downgraded to a version that does not support it, such users will become unusable and some user related operations will be broken. In order to downgrade gracefully, one must set all users to contain a single authentication method prior to downgrading. Alternatively, if the server was downgraded without the proper procedure, the faulty users should be dropped.
|
||||
2. `no_password` can not co-exist with other authentication methods for security reasons. Therefore, you can only specify
|
||||
`no_password` if it is the only authentication method in the query.
|
||||
|
||||
## User Host
|
||||
|
||||
User host is a host from which a connection to ClickHouse server could be established. The host can be specified in the `HOST` query section in the following ways:
|
||||
|
@ -10,10 +10,10 @@ title: The Lightweight DELETE Statement
|
||||
The lightweight `DELETE` statement removes rows from the table `[db.]table` that match the expression `expr`. It is only available for the *MergeTree table engine family.
|
||||
|
||||
``` sql
|
||||
DELETE FROM [db.]table [ON CLUSTER cluster] WHERE expr;
|
||||
DELETE FROM [db.]table [ON CLUSTER cluster] [IN PARTITION partition_expr] WHERE expr;
|
||||
```
|
||||
|
||||
It is called "lightweight `DELETE`" to contrast it to the [ALTER table DELETE](/en/sql-reference/statements/alter/delete) command, which is a heavyweight process.
|
||||
It is called "lightweight `DELETE`" to contrast it to the [ALTER TABLE ... DELETE](/en/sql-reference/statements/alter/delete) command, which is a heavyweight process.
|
||||
|
||||
## Examples
|
||||
|
||||
@ -22,23 +22,27 @@ It is called "lightweight `DELETE`" to contrast it to the [ALTER table DELETE](/
|
||||
DELETE FROM hits WHERE Title LIKE '%hello%';
|
||||
```
|
||||
|
||||
## Lightweight `DELETE` does not delete data from storage immediately
|
||||
## Lightweight `DELETE` does not delete data immediately
|
||||
|
||||
With lightweight `DELETE`, deleted rows are internally marked as deleted immediately and will be automatically filtered out of all subsequent queries. However, cleanup of data happens during the next merge. As a result, it is possible that for an unspecified period, data is not actually deleted from storage and is only marked as deleted.
|
||||
Lightweight `DELETE` is implemented as a [mutation](/en/sql-reference/statements/alter#mutations) that marks rows as deleted but does not immediately physically delete them.
|
||||
|
||||
If you need to guarantee that your data is deleted from storage in a predictable time, consider using the [ALTER table DELETE](/en/sql-reference/statements/alter/delete) command. Note that deleting data using `ALTER table DELETE` may consume significant resources as it recreates all affected parts.
|
||||
By default, `DELETE` statements wait until marking the rows as deleted is completed before returning. This can take a long time if the amount of data is large. Alternatively, you can run it asynchronously in the background using the setting [`lightweight_deletes_sync`](/en/operations/settings/settings#lightweight_deletes_sync). If disabled, the `DELETE` statement is going to return immediately, but the data can still be visible to queries until the background mutation is finished.
|
||||
|
||||
The mutation does not physically delete the rows that have been marked as deleted, this will only happen during the next merge. As a result, it is possible that for an unspecified period, data is not actually deleted from storage and is only marked as deleted.
|
||||
|
||||
If you need to guarantee that your data is deleted from storage in a predictable time, consider using the table setting [`min_age_to_force_merge_seconds`](https://clickhouse.com/docs/en/operations/settings/merge-tree-settings#min_age_to_force_merge_seconds). Or you can use the [ALTER TABLE ... DELETE](/en/sql-reference/statements/alter/delete) command. Note that deleting data using `ALTER TABLE ... DELETE` may consume significant resources as it recreates all affected parts.
|
||||
|
||||
## Deleting large amounts of data
|
||||
|
||||
Large deletes can negatively affect ClickHouse performance. If you are attempting to delete all rows from a table, consider using the [`TRUNCATE TABLE`](/en/sql-reference/statements/truncate) command.
|
||||
|
||||
If you anticipate frequent deletes, consider using a [custom partitioning key](/en/engines/table-engines/mergetree-family/custom-partitioning-key). You can then use the [`ALTER TABLE...DROP PARTITION`](/en/sql-reference/statements/alter/partition#drop-partitionpart) command to quickly drop all rows associated with that partition.
|
||||
If you anticipate frequent deletes, consider using a [custom partitioning key](/en/engines/table-engines/mergetree-family/custom-partitioning-key). You can then use the [`ALTER TABLE ... DROP PARTITION`](/en/sql-reference/statements/alter/partition#drop-partitionpart) command to quickly drop all rows associated with that partition.
|
||||
|
||||
## Limitations of lightweight `DELETE`
|
||||
|
||||
### Lightweight `DELETE`s with projections
|
||||
|
||||
By default, `DELETE` does not work for tables with projections. This is because rows in a projection may be affected by a `DELETE` operation. But there is a [MergeTree setting](https://clickhouse.com/docs/en/operations/settings/merge-tree-settings) `lightweight_mutation_projection_mode` can change the behavior.
|
||||
By default, `DELETE` does not work for tables with projections. This is because rows in a projection may be affected by a `DELETE` operation. But there is a [MergeTree setting](https://clickhouse.com/docs/en/operations/settings/merge-tree-settings) `lightweight_mutation_projection_mode` to change the behavior.
|
||||
|
||||
## Performance considerations when using lightweight `DELETE`
|
||||
|
||||
@ -48,7 +52,7 @@ The following can also negatively impact lightweight `DELETE` performance:
|
||||
|
||||
- A heavy `WHERE` condition in a `DELETE` query.
|
||||
- If the mutations queue is filled with many other mutations, this can possibly lead to performance issues as all mutations on a table are executed sequentially.
|
||||
- The affected table having a very large number of data parts.
|
||||
- The affected table has a very large number of data parts.
|
||||
- Having a lot of data in compact parts. In a Compact part, all columns are stored in one file.
|
||||
|
||||
## Delete permissions
|
||||
@ -61,31 +65,31 @@ GRANT ALTER DELETE ON db.table to username;
|
||||
|
||||
## How lightweight DELETEs work internally in ClickHouse
|
||||
|
||||
1. A "mask" is applied to affected rows
|
||||
1. **A "mask" is applied to affected rows**
|
||||
|
||||
When a `DELETE FROM table ...` query is executed, ClickHouse saves a mask where each row is marked as either “existing” or as “deleted”. Those “deleted” rows are omitted for subsequent queries. However, rows are actually only removed later by subsequent merges. Writing this mask is much more lightweight than what is done by an `ALTER table DELETE` query.
|
||||
When a `DELETE FROM table ...` query is executed, ClickHouse saves a mask where each row is marked as either “existing” or as “deleted”. Those “deleted” rows are omitted for subsequent queries. However, rows are actually only removed later by subsequent merges. Writing this mask is much more lightweight than what is done by an `ALTER TABLE ... DELETE` query.
|
||||
|
||||
The mask is implemented as a hidden `_row_exists` system column that stores `True` for all visible rows and `False` for deleted ones. This column is only present in a part if some rows in the part were deleted. This column does not exist when a part has all values equal to `True`.
|
||||
The mask is implemented as a hidden `_row_exists` system column that stores `True` for all visible rows and `False` for deleted ones. This column is only present in a part if some rows in the part were deleted. This column does not exist when a part has all values equal to `True`.
|
||||
|
||||
2. `SELECT` queries are transformed to include the mask
|
||||
2. **`SELECT` queries are transformed to include the mask**
|
||||
|
||||
When a masked column is used in a query, the `SELECT ... FROM table WHERE condition` query internally is extended by the predicate on `_row_exists` and is transformed to:
|
||||
```sql
|
||||
SELECT ... FROM table PREWHERE _row_exists WHERE condition
|
||||
```
|
||||
At execution time, the column `_row_exists` is read to determine which rows should not be returned. If there are many deleted rows, ClickHouse can determine which granules can be fully skipped when reading the rest of the columns.
|
||||
When a masked column is used in a query, the `SELECT ... FROM table WHERE condition` query internally is extended by the predicate on `_row_exists` and is transformed to:
|
||||
```sql
|
||||
SELECT ... FROM table PREWHERE _row_exists WHERE condition
|
||||
```
|
||||
At execution time, the column `_row_exists` is read to determine which rows should not be returned. If there are many deleted rows, ClickHouse can determine which granules can be fully skipped when reading the rest of the columns.
|
||||
|
||||
3. `DELETE` queries are transformed to `ALTER table UPDATE` queries
|
||||
3. **`DELETE` queries are transformed to `ALTER TABLE ... UPDATE` queries**
|
||||
|
||||
The `DELETE FROM table WHERE condition` is translated into an `ALTER table UPDATE _row_exists = 0 WHERE condition` mutation.
|
||||
The `DELETE FROM table WHERE condition` is translated into an `ALTER TABLE table UPDATE _row_exists = 0 WHERE condition` mutation.
|
||||
|
||||
Internally, this mutation is executed in two steps:
|
||||
Internally, this mutation is executed in two steps:
|
||||
|
||||
1. A `SELECT count() FROM table WHERE condition` command is executed for each individual part to determine if the part is affected.
|
||||
1. A `SELECT count() FROM table WHERE condition` command is executed for each individual part to determine if the part is affected.
|
||||
|
||||
2. Based on the commands above, affected parts are then mutated, and hardlinks are created for unaffected parts. In the case of wide parts, the `_row_exists` column for each row is updated and all other columns' files are hardlinked. For compact parts, all columns are re-written because they are all stored together in one file.
|
||||
2. Based on the commands above, affected parts are then mutated, and hardlinks are created for unaffected parts. In the case of wide parts, the `_row_exists` column for each row is updated, and all other columns' files are hardlinked. For compact parts, all columns are re-written because they are all stored together in one file.
|
||||
|
||||
From the steps above, we can see that lightweight deletes using the masking technique improves performance over traditional `ALTER table DELETE` commands because `ALTER table DELETE` reads and re-writes all the columns' files for affected parts.
|
||||
From the steps above, we can see that lightweight `DELETE` using the masking technique improves performance over traditional `ALTER TABLE ... DELETE` because it does not re-write all the columns' files for affected parts.
|
||||
|
||||
## Related content
|
||||
|
||||
|
@ -15,7 +15,14 @@ The `FROM` clause specifies the source to read data from:
|
||||
|
||||
Subquery is another `SELECT` query that may be specified in parenthesis inside `FROM` clause.
|
||||
|
||||
`FROM` clause can contain multiple data sources, separated by commas, which is equivalent of performing [CROSS JOIN](../../../sql-reference/statements/select/join.md) on them.
|
||||
The `FROM` can contain multiple data sources, separated by commas, which is equivalent of performing [CROSS JOIN](../../../sql-reference/statements/select/join.md) on them.
|
||||
|
||||
`FROM` can optionally appear before a `SELECT` clause. This is a ClickHouse-specific extension of standard SQL which makes `SELECT` statements easier to read. Example:
|
||||
|
||||
```sql
|
||||
FROM table
|
||||
SELECT *
|
||||
```
|
||||
|
||||
## FINAL Modifier
|
||||
|
||||
@ -45,19 +52,19 @@ As an alternative to using `FINAL`, it is sometimes possible to use different qu
|
||||
|
||||
### Example Usage
|
||||
|
||||
**Using the `FINAL` keyword**
|
||||
Using the `FINAL` keyword
|
||||
|
||||
```sql
|
||||
SELECT x, y FROM mytable FINAL WHERE x > 1;
|
||||
```
|
||||
|
||||
**Using `FINAL` as a query-level setting**
|
||||
Using `FINAL` as a query-level setting
|
||||
|
||||
```sql
|
||||
SELECT x, y FROM mytable WHERE x > 1 SETTINGS final = 1;
|
||||
```
|
||||
|
||||
**Using `FINAL` as a session-level setting**
|
||||
Using `FINAL` as a session-level setting
|
||||
|
||||
```sql
|
||||
SET final = 1;
|
||||
|
@ -8,14 +8,14 @@ slug: /en/guides/developer/transactional
|
||||
This is transactional (ACID) if the inserted rows are packed and inserted as a single block (see Notes):
|
||||
- Atomic: an INSERT succeeds or is rejected as a whole: if a confirmation is sent to the client, then all rows were inserted; if an error is sent to the client, then no rows were inserted.
|
||||
- Consistent: if there are no table constraints violated, then all rows in an INSERT are inserted and the INSERT succeeds; if constraints are violated, then no rows are inserted.
|
||||
- Isolated: concurrent clients observe a consistent snapshot of the table–the state of the table either as it was before the INSERT attempt, or after the successful INSERT; no partial state is seen
|
||||
- Isolated: concurrent clients observe a consistent snapshot of the table–the state of the table either as it was before the INSERT attempt, or after the successful INSERT; no partial state is seen. Clients inside of another transaction have [snapshot isolation](https://en.wikipedia.org/wiki/Snapshot_isolation), while clients outside of a transaction have [read uncommitted](https://en.wikipedia.org/wiki/Isolation_(database_systems)#Read_uncommitted) isolation level.
|
||||
- Durable: a successful INSERT is written to the filesystem before answering to the client, on a single replica or multiple replicas (controlled by the `insert_quorum` setting), and ClickHouse can ask the OS to sync the filesystem data on the storage media (controlled by the `fsync_after_insert` setting).
|
||||
- INSERT into multiple tables with one statement is possible if materialized views are involved (the INSERT from the client is to a table which has associate materialized views).
|
||||
|
||||
## Case 2: INSERT into multiple partitions, of one table, of the MergeTree* family
|
||||
|
||||
Same as Case 1 above, with this detail:
|
||||
- If table has many partitions and INSERT covers many partitions–then insertion into every partition is transactional on its own
|
||||
- If table has many partitions and INSERT covers many partitions, then insertion into every partition is transactional on its own
|
||||
|
||||
|
||||
## Case 3: INSERT into one distributed table of the MergeTree* family
|
||||
@ -38,7 +38,7 @@ Same as Case 1 above, with this detail:
|
||||
- the insert format is column-based (like Native, Parquet, ORC, etc) and the data contains only one block of data
|
||||
- the size of the inserted block in general may depend on many settings (for example: `max_block_size`, `max_insert_block_size`, `min_insert_block_size_rows`, `min_insert_block_size_bytes`, `preferred_block_size_bytes`, etc)
|
||||
- if the client did not receive an answer from the server, the client does not know if the transaction succeeded, and it can repeat the transaction, using exactly-once insertion properties
|
||||
- ClickHouse is using MVCC with snapshot isolation internally
|
||||
- ClickHouse is using [MVCC](https://en.wikipedia.org/wiki/Multiversion_concurrency_control) with [snapshot isolation](https://en.wikipedia.org/wiki/Snapshot_isolation) internally for concurrent transactions
|
||||
- all ACID properties are valid even in the case of server kill/crash
|
||||
- either insert_quorum into different AZ or fsync should be enabled to ensure durable inserts in the typical setup
|
||||
- "consistency" in ACID terms does not cover the semantics of distributed systems, see https://jepsen.io/consistency which is controlled by different settings (select_sequential_consistency)
|
||||
@ -260,7 +260,7 @@ FROM mergetree_table
|
||||
### Transactions introspection
|
||||
|
||||
You can inspect transactions by querying the `system.transactions` table, but note that you cannot query that
|
||||
table from a session that is in a transaction–open a second `clickhouse client` session to query that table.
|
||||
table from a session that is in a transaction. Open a second `clickhouse client` session to query that table.
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
|
@ -50,7 +50,7 @@ Connection: Close
|
||||
Content-Type: text/tab-separated-values; charset=UTF-8
|
||||
X-ClickHouse-Server-Display-Name: clickhouse.ru-central1.internal
|
||||
X-ClickHouse-Query-Id: 5abe861c-239c-467f-b955-8a201abb8b7f
|
||||
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||
|
||||
1
|
||||
```
|
||||
@ -367,7 +367,7 @@ $ curl -v 'http://localhost:8123/predefined_query'
|
||||
< X-ClickHouse-Format: Template
|
||||
< X-ClickHouse-Timezone: Asia/Shanghai
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0", "elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||
<
|
||||
# HELP "Query" "Number of executing queries"
|
||||
# TYPE "Query" counter
|
||||
@ -601,7 +601,7 @@ $ curl -v -H 'XXX:xxx' 'http://localhost:8123/get_config_static_handler'
|
||||
< Content-Type: text/plain; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||
<
|
||||
* Connection #0 to host localhost left intact
|
||||
<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>%
|
||||
@ -659,7 +659,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_absolute_path_static_handler'
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||
<
|
||||
<html><body>Absolute Path File</body></html>
|
||||
* Connection #0 to host localhost left intact
|
||||
@ -678,7 +678,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler'
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||
<
|
||||
<html><body>Relative Path File</body></html>
|
||||
* Connection #0 to host localhost left intact
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user