Merge branch 'master' into distinct_sorted_simplify

This commit is contained in:
Anton Popov 2022-08-12 17:11:18 +02:00 committed by GitHub
commit 4bd50bb06c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1257 changed files with 14867 additions and 61849 deletions

View File

@ -13,7 +13,6 @@ concurrency:
- master - master
paths: paths:
- '.github/**' - '.github/**'
- 'benchmark/**'
- 'docker/docs/release/**' - 'docker/docs/release/**'
- 'docs/**' - 'docs/**'
- 'utils/list-versions/version_date.tsv' - 'utils/list-versions/version_date.tsv'

View File

@ -3348,6 +3348,10 @@ jobs:
###################################### JEPSEN TESTS ######################################### ###################################### JEPSEN TESTS #########################################
############################################################################################# #############################################################################################
Jepsen: Jepsen:
# This is special test NOT INCLUDED in FinishCheck
# When it's skipped, all dependent tasks will be skipped too.
# DO NOT add it there
if: contains(github.event.pull_request.labels.*.name, 'jepsen-test')
needs: [BuilderBinRelease] needs: [BuilderBinRelease]
uses: ./.github/workflows/jepsen.yml uses: ./.github/workflows/jepsen.yml
@ -3419,7 +3423,6 @@ jobs:
- SharedBuildSmokeTest - SharedBuildSmokeTest
- CompatibilityCheck - CompatibilityCheck
- IntegrationTestsFlakyCheck - IntegrationTestsFlakyCheck
- Jepsen
runs-on: [self-hosted, style-checker] runs-on: [self-hosted, style-checker]
steps: steps:
- name: Clear repository - name: Clear repository

View File

@ -29,7 +29,7 @@ jobs:
fetch-depth: 0 fetch-depth: 0
- name: Generate versions - name: Generate versions
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
run: | run: |
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv ./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
GID=$(id -g "${UID}") GID=$(id -g "${UID}")

2
.gitmodules vendored
View File

@ -201,7 +201,7 @@
[submodule "contrib/boringssl"] [submodule "contrib/boringssl"]
path = contrib/boringssl path = contrib/boringssl
url = https://github.com/ClickHouse/boringssl.git url = https://github.com/ClickHouse/boringssl.git
branch = MergeWithUpstream branch = unknown_branch_from_artur
[submodule "contrib/NuRaft"] [submodule "contrib/NuRaft"]
path = contrib/NuRaft path = contrib/NuRaft
url = https://github.com/ClickHouse/NuRaft.git url = https://github.com/ClickHouse/NuRaft.git

View File

@ -10,9 +10,10 @@ The following versions of ClickHouse server are currently being supported with s
| Version | Supported | | Version | Supported |
|:-|:-| |:-|:-|
| 22.7 | ✔️ |
| 22.6 | ✔️ | | 22.6 | ✔️ |
| 22.5 | ✔️ | | 22.5 | ✔️ |
| 22.4 | ✔️ | | 22.4 | |
| 22.3 | ✔️ | | 22.3 | ✔️ |
| 22.2 | ❌ | | 22.2 | ❌ |
| 22.1 | ❌ | | 22.1 | ❌ |
@ -57,5 +58,5 @@ As the security issue moves from triage, to identified fix, to release planning
## Public Disclosure Timing ## Public Disclosure Timing
A public disclosure date is negotiated by the ClickHouse maintainers and the bug submitter. We prefer to fully disclose the bug as soon as possible once a user mitigation is available. It is reasonable to delay disclosure when the bug or the fix is not yet fully understood, the solution is not well-tested, or for vendor coordination. The timeframe for disclosure is from immediate (especially if it's already publicly known) to 90 days. For a vulnerability with a straightforward mitigation, we expect report date to disclosure date to be on the order of 7 days. A public disclosure date is negotiated by the ClickHouse maintainers and the bug submitter. We prefer to fully disclose the bug as soon as possible once a user mitigation is available. It is reasonable to delay disclosure when the bug or the fix is not yet fully understood, the solution is not well-tested, or for vendor coordination. The timeframe for disclosure is from immediate (especially if it's already publicly known) to 90 days. For a vulnerability with a straightforward mitigation, we expect the report date to disclosure date to be on the order of 7 days.

View File

@ -2,8 +2,47 @@
#include <cstring> #include <cstring>
#include <type_traits> #include <type_traits>
#include <bit>
inline void reverseMemcpy(void * dst, const void * src, size_t size)
{
uint8_t * uint_dst = reinterpret_cast<uint8_t *>(dst);
const uint8_t * uint_src = reinterpret_cast<const uint8_t *>(src);
uint_dst += size;
while (size)
{
--uint_dst;
*uint_dst = *uint_src;
++uint_src;
--size;
}
}
template <typename T>
inline T unalignedLoadLE(const void * address)
{
T res {};
if constexpr (std::endian::native == std::endian::little)
memcpy(&res, address, sizeof(res));
else
reverseMemcpy(&res, address, sizeof(res));
return res;
}
template <typename T>
inline void unalignedStoreLE(void * address,
const typename std::enable_if<true, T>::type & src)
{
static_assert(std::is_trivially_copyable_v<T>);
if constexpr (std::endian::native == std::endian::little)
memcpy(address, &src, sizeof(src));
else
reverseMemcpy(address, &src, sizeof(src));
}
template <typename T> template <typename T>
inline T unalignedLoad(const void * address) inline T unalignedLoad(const void * address)
{ {

View File

@ -23,6 +23,7 @@ if (COMPILER_CLANG)
no_warning(zero-length-array) no_warning(zero-length-array)
no_warning(c++98-compat-pedantic) no_warning(c++98-compat-pedantic)
no_warning(c++98-compat) no_warning(c++98-compat)
no_warning(c++20-compat) # Use constinit in C++20 without warnings
no_warning(conversion) no_warning(conversion)
no_warning(ctad-maybe-unsupported) # clang 9+, linux-only no_warning(ctad-maybe-unsupported) # clang 9+, linux-only
no_warning(disabled-macro-expansion) no_warning(disabled-macro-expansion)

2
contrib/NuRaft vendored

@ -1 +1 @@
Subproject commit e1dc47c1cfd529801a8c94a396a3921a71ae3ccf Subproject commit 1b0af760b3506b8e35b50cb7df098cbad5064ff2

2
contrib/arrow vendored

@ -1 +1 @@
Subproject commit efdcd015cfdee1b6aa349c9ca227ca12c3d697f5 Subproject commit 450a5638704386356f8e520080468fc9bc8bcaf8

2
contrib/azure vendored

@ -1 +1 @@
Subproject commit ac4b763d4ca40122275f1497cbdc5451337461d9 Subproject commit ef75afc075fc71fbcd8fe28dcda3794ae265fd1c

View File

@ -1,6 +1,6 @@
option (ENABLE_AZURE_BLOB_STORAGE "Enable Azure blob storage" ${ENABLE_LIBRARIES}) option (ENABLE_AZURE_BLOB_STORAGE "Enable Azure blob storage" ${ENABLE_LIBRARIES})
if (NOT ENABLE_AZURE_BLOB_STORAGE) if (NOT ENABLE_AZURE_BLOB_STORAGE OR BUILD_STANDALONE_KEEPER OR OS_FREEBSD)
message(STATUS "Not using Azure blob storage") message(STATUS "Not using Azure blob storage")
return() return()
endif() endif()

2
contrib/boringssl vendored

@ -1 +1 @@
Subproject commit c1e01a441d6db234f4f12e63a7657d1f9e6db9c1 Subproject commit 8061ac62d67953e61b793042e33baf1352e67510

View File

@ -44,6 +44,8 @@
#define HAVE_SETJMP_H #define HAVE_SETJMP_H
#define HAVE_SYS_STAT_H #define HAVE_SYS_STAT_H
#define HAVE_UNISTD_H #define HAVE_UNISTD_H
#define HAVE_POLL_H
#define HAVE_PTHREAD_H
#define ENABLE_IPV6 #define ENABLE_IPV6
#define USE_OPENSSL #define USE_OPENSSL

2
contrib/krb5 vendored

@ -1 +1 @@
Subproject commit 5149dea4e2be0f67707383d2682b897c14631374 Subproject commit d879821c7a4c70b0c3ad739d9951d1a2b1903df7

2
contrib/nats-io vendored

@ -1 +1 @@
Subproject commit 6b2227f36757da090321e2d317569d2bd42c4cc1 Subproject commit 1e2597c54616015077e53a26d56b6bac448eb1b6

View File

@ -18,6 +18,8 @@ elseif(WIN32)
set(NATS_PLATFORM_INCLUDE "apple") set(NATS_PLATFORM_INCLUDE "apple")
endif() endif()
add_definitions(-DNATS_HAS_TLS)
file(GLOB PS_SOURCES "${NATS_IO_SOURCE_DIR}/${NATS_PLATFORM_INCLUDE}/*.c") file(GLOB PS_SOURCES "${NATS_IO_SOURCE_DIR}/${NATS_PLATFORM_INCLUDE}/*.c")
set(SRCS set(SRCS
"${NATS_IO_SOURCE_DIR}/asynccb.c" "${NATS_IO_SOURCE_DIR}/asynccb.c"

View File

@ -29,6 +29,7 @@
"docker/test/util": { "docker/test/util": {
"name": "clickhouse/test-util", "name": "clickhouse/test-util",
"dependent": [ "dependent": [
"docker/packager/binary",
"docker/test/base", "docker/test/base",
"docker/test/fasttest" "docker/test/fasttest"
] ]

View File

@ -1,62 +1,7 @@
# rebuild in #33610 # rebuild in #33610
# docker build -t clickhouse/binary-builder . # docker build -t clickhouse/binary-builder .
FROM ubuntu:20.04 ARG FROM_TAG=latest
FROM clickhouse/test-util:$FROM_TAG
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=14
RUN apt-get update \
&& apt-get install \
apt-transport-https \
apt-utils \
ca-certificates \
dnsutils \
gnupg \
iputils-ping \
lsb-release \
wget \
--yes --no-install-recommends --verbose-versions \
&& export LLVM_PUBKEY_HASH="bda960a8da687a275a2078d43c111d66b1c6a893a3275271beedf266c1ff4a0cdecb429c7a5cccf9f486ea7aa43fd27f" \
&& wget -nv -O /tmp/llvm-snapshot.gpg.key https://apt.llvm.org/llvm-snapshot.gpg.key \
&& echo "${LLVM_PUBKEY_HASH} /tmp/llvm-snapshot.gpg.key" | sha384sum -c \
&& apt-key add /tmp/llvm-snapshot.gpg.key \
&& export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \
&& echo "deb [trusted=yes] https://apt.llvm.org/${CODENAME}/ llvm-toolchain-${CODENAME}-${LLVM_VERSION} main" >> \
/etc/apt/sources.list \
&& apt-get clean
# initial packages
RUN apt-get update \
&& apt-get install \
bash \
build-essential \
ccache \
clang-${LLVM_VERSION} \
clang-tidy-${LLVM_VERSION} \
cmake \
curl \
fakeroot \
gdb \
git \
gperf \
lld-${LLVM_VERSION} \
llvm-${LLVM_VERSION} \
llvm-${LLVM_VERSION}-dev \
moreutils \
ninja-build \
pigz \
rename \
software-properties-common \
tzdata \
nasm \
--yes --no-install-recommends \
&& apt-get clean
# This symlink required by gcc to find lld compiler
RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld
ENV CC=clang-${LLVM_VERSION} ENV CC=clang-${LLVM_VERSION}
ENV CXX=clang++-${LLVM_VERSION} ENV CXX=clang++-${LLVM_VERSION}
@ -119,18 +64,18 @@ ENV GOCACHE=/workdir/
RUN mkdir /workdir && chmod 777 /workdir RUN mkdir /workdir && chmod 777 /workdir
WORKDIR /workdir WORKDIR /workdir
# FIXME: thread sanitizer is broken in clang-14, we have to build it with clang-13 # NOTE: thread sanitizer is broken in clang-14, we have to build it with clang-15
# https://github.com/ClickHouse/ClickHouse/pull/39450 # https://github.com/ClickHouse/ClickHouse/pull/39450
# https://github.com/google/sanitizers/issues/1540 # https://github.com/google/sanitizers/issues/1540
# https://github.com/google/sanitizers/issues/1552 # https://github.com/google/sanitizers/issues/1552
RUN export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \ RUN export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \
&& echo "deb [trusted=yes] https://apt.llvm.org/${CODENAME}/ llvm-toolchain-${CODENAME}-13 main" >> \ && echo "deb [trusted=yes] https://apt.llvm.org/${CODENAME}/ llvm-toolchain-${CODENAME}-15 main" >> \
/etc/apt/sources.list.d/clang.list \ /etc/apt/sources.list.d/clang.list \
&& apt-get update \ && apt-get update \
&& apt-get install \ && apt-get install \
clang-13 \ clang-15 \
clang-tidy-13 \ clang-tidy-15 \
--yes --no-install-recommends \ --yes --no-install-recommends \
&& apt-get clean && apt-get clean

View File

@ -3,7 +3,7 @@ set -x -e
exec &> >(ts) exec &> >(ts)
cache_status () { ccache_status () {
ccache --show-config ||: ccache --show-config ||:
ccache --show-stats ||: ccache --show-stats ||:
} }
@ -48,7 +48,7 @@ if [ -n "$MAKE_DEB" ]; then
fi fi
cache_status ccache_status
# clear cache stats # clear cache stats
ccache --zero-stats ||: ccache --zero-stats ||:
@ -92,7 +92,7 @@ $SCAN_WRAPPER ninja $NINJA_FLAGS $BUILD_TARGET
ls -la ./programs ls -la ./programs
cache_status ccache_status
if [ -n "$MAKE_DEB" ]; then if [ -n "$MAKE_DEB" ]; then
# No quotes because I want it to expand to nothing if empty. # No quotes because I want it to expand to nothing if empty.
@ -104,6 +104,7 @@ if [ -n "$MAKE_DEB" ]; then
fi fi
mv ./programs/clickhouse* /output mv ./programs/clickhouse* /output
[ -x ./programs/self-extracting/clickhouse ] && mv ./programs/self-extracting/clickhouse /output
mv ./src/unit_tests_dbms /output ||: # may not exist for some binary builds mv ./src/unit_tests_dbms /output ||: # may not exist for some binary builds
find . -name '*.so' -print -exec mv '{}' /output \; find . -name '*.so' -print -exec mv '{}' /output \;
find . -name '*.so.*' -print -exec mv '{}' /output \; find . -name '*.so.*' -print -exec mv '{}' /output \;
@ -178,7 +179,8 @@ then
mv "coverity-scan.tgz" /output mv "coverity-scan.tgz" /output
fi fi
cache_status ccache_status
ccache --evict-older-than 1d
if [ "${CCACHE_DEBUG:-}" == "1" ] if [ "${CCACHE_DEBUG:-}" == "1" ]
then then

View File

@ -62,7 +62,7 @@ def pre_build(repo_path: str, env_variables: List[str]):
f"git -C {repo_path} fetch --no-recurse-submodules " f"git -C {repo_path} fetch --no-recurse-submodules "
"--no-tags origin master:master" "--no-tags origin master:master"
) )
logging.info("Getting master branch for performance artifact: ''%s'", cmd) logging.info("Getting master branch for performance artifact: '%s'", cmd)
subprocess.check_call(cmd, shell=True) subprocess.check_call(cmd, shell=True)
@ -234,6 +234,7 @@ def parse_env_variables(
if cache: if cache:
result.append("CCACHE_DIR=/ccache") result.append("CCACHE_DIR=/ccache")
result.append("CCACHE_COMPRESSLEVEL=5")
result.append("CCACHE_BASEDIR=/build") result.append("CCACHE_BASEDIR=/build")
result.append("CCACHE_NOHASHDIR=true") result.append("CCACHE_NOHASHDIR=true")
result.append("CCACHE_COMPILERCHECK=content") result.append("CCACHE_COMPILERCHECK=content")
@ -242,7 +243,6 @@ def parse_env_variables(
# 15G is not enough for tidy build # 15G is not enough for tidy build
cache_maxsize = "25G" cache_maxsize = "25G"
result.append(f"CCACHE_MAXSIZE={cache_maxsize}") result.append(f"CCACHE_MAXSIZE={cache_maxsize}")
# result.append("CCACHE_UMASK=777")
if distcc_hosts: if distcc_hosts:
hosts_with_params = [f"{host}/24,lzo" for host in distcc_hosts] + [ hosts_with_params = [f"{host}/24,lzo" for host in distcc_hosts] + [
@ -333,7 +333,7 @@ if __name__ == "__main__":
parser.add_argument( parser.add_argument(
"--compiler", "--compiler",
choices=( choices=(
"clang-13", # For TSAN builds, see #39450 "clang-15", # For TSAN builds, see #39450
"clang-14", "clang-14",
"clang-14-darwin", "clang-14-darwin",
"clang-14-darwin-aarch64", "clang-14-darwin-aarch64",

View File

@ -57,7 +57,15 @@ do
# check if variable not empty # check if variable not empty
[ -z "$dir" ] && continue [ -z "$dir" ] && continue
# ensure directories exist # ensure directories exist
if ! mkdir -p "$dir"; then if [ "$DO_CHOWN" = "1" ]; then
mkdir="mkdir"
else
# if DO_CHOWN=0 it means that the system does not map root user to "admin" permissions
# it mainly happens on NFS mounts where root==nobody for security reasons
# thus mkdir MUST run with user id/gid and not from nobody that has zero permissions
mkdir="/usr/bin/clickhouse su "${USER}:${GROUP}" mkdir"
fi
if ! $mkdir -p "$dir"; then
echo "Couldn't create necessary directory: $dir" echo "Couldn't create necessary directory: $dir"
exit 1 exit 1
fi fi

View File

@ -3,59 +3,12 @@
ARG FROM_TAG=latest ARG FROM_TAG=latest
FROM clickhouse/test-util:$FROM_TAG FROM clickhouse/test-util:$FROM_TAG
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=14
RUN apt-get update \
&& apt-get install ca-certificates lsb-release wget gnupg apt-transport-https \
--yes --no-install-recommends --verbose-versions \
&& export LLVM_PUBKEY_HASH="bda960a8da687a275a2078d43c111d66b1c6a893a3275271beedf266c1ff4a0cdecb429c7a5cccf9f486ea7aa43fd27f" \
&& wget -nv -O /tmp/llvm-snapshot.gpg.key https://apt.llvm.org/llvm-snapshot.gpg.key \
&& echo "${LLVM_PUBKEY_HASH} /tmp/llvm-snapshot.gpg.key" | sha384sum -c \
&& apt-key add /tmp/llvm-snapshot.gpg.key \
&& export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \
&& echo "deb [trusted=yes] http://apt.llvm.org/${CODENAME}/ llvm-toolchain-${CODENAME}-${LLVM_VERSION} main" >> \
/etc/apt/sources.list
# initial packages
RUN apt-get update \ RUN apt-get update \
&& apt-get install \ && apt-get install \
bash \
fakeroot \
ccache \
curl \
software-properties-common \
--yes --no-install-recommends
# Architecture of the image when BuildKit/buildx is used
ARG TARGETARCH
# Special dpkg-deb (https://github.com/ClickHouse-Extras/dpkg) version which is able
# to compress files using pigz (https://zlib.net/pigz/) instead of gzip.
# Significantly increase deb packaging speed and compatible with old systems
RUN arch=${TARGETARCH:-amd64} \
&& curl -Lo /usr/bin/dpkg-deb https://github.com/ClickHouse-Extras/dpkg/releases/download/1.21.1-clickhouse/dpkg-deb-${arch}
RUN apt-get update \
&& apt-get install \
clang-${LLVM_VERSION} \
debhelper \
devscripts \
gdb \
git \
gperf \
lcov \ lcov \
llvm-${LLVM_VERSION} \ netbase \
moreutils \
perl \ perl \
pigz \
pkg-config \
tzdata \
pv \ pv \
nasm \
--yes --no-install-recommends --yes --no-install-recommends
# Sanitizer options for services (clickhouse-server) # Sanitizer options for services (clickhouse-server)

View File

@ -3,83 +3,23 @@
ARG FROM_TAG=latest ARG FROM_TAG=latest
FROM clickhouse/test-util:$FROM_TAG FROM clickhouse/test-util:$FROM_TAG
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=14
RUN apt-get update \
&& apt-get install ca-certificates lsb-release wget gnupg apt-transport-https \
--yes --no-install-recommends --verbose-versions \
&& export LLVM_PUBKEY_HASH="bda960a8da687a275a2078d43c111d66b1c6a893a3275271beedf266c1ff4a0cdecb429c7a5cccf9f486ea7aa43fd27f" \
&& wget -nv -O /tmp/llvm-snapshot.gpg.key https://apt.llvm.org/llvm-snapshot.gpg.key \
&& echo "${LLVM_PUBKEY_HASH} /tmp/llvm-snapshot.gpg.key" | sha384sum -c \
&& apt-key add /tmp/llvm-snapshot.gpg.key \
&& export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \
&& echo "deb [trusted=yes] https://apt.llvm.org/${CODENAME}/ llvm-toolchain-${CODENAME}-${LLVM_VERSION} main" >> \
/etc/apt/sources.list
# initial packages
RUN apt-get update \ RUN apt-get update \
&& apt-get install \ && apt-get install \
bash \
fakeroot \
ccache \
curl \
software-properties-common \
--yes --no-install-recommends
# Architecture of the image when BuildKit/buildx is used
ARG TARGETARCH
# Special dpkg-deb (https://github.com/ClickHouse-Extras/dpkg) version which is able
# to compress files using pigz (https://zlib.net/pigz/) instead of gzip.
# Significantly increase deb packaging speed and compatible with old systems
RUN arch=${TARGETARCH:-amd64} \
&& curl -Lo /usr/bin/dpkg-deb https://github.com/ClickHouse-Extras/dpkg/releases/download/1.21.1-clickhouse/dpkg-deb-${arch}
RUN apt-get update \
&& apt-get install \
apt-transport-https \
bash \
brotli \ brotli \
build-essential \
ca-certificates \
ccache \
clang-${LLVM_VERSION} \
clang-tidy-${LLVM_VERSION} \
cmake \
curl \
expect \ expect \
fakeroot \ file \
gdb \
git \
gperf \
lld-${LLVM_VERSION} \
llvm-${LLVM_VERSION} \
lsof \ lsof \
moreutils \
ninja-build \
psmisc \ psmisc \
python3 \ python3 \
python3-lxml \ python3-lxml \
python3-pip \ python3-pip \
python3-requests \ python3-requests \
python3-termcolor \ python3-termcolor \
rename \
software-properties-common \
tzdata \
unixodbc \ unixodbc \
file \
nasm \
--yes --no-install-recommends --yes --no-install-recommends
RUN pip3 install numpy scipy pandas Jinja2 RUN pip3 install numpy scipy pandas Jinja2
# This symlink required by gcc to find lld compiler
RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.4.20200302/clickhouse-odbc-1.1.4-Linux.tar.gz" ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.4.20200302/clickhouse-odbc-1.1.4-Linux.tar.gz"
RUN mkdir -p /tmp/clickhouse-odbc-tmp \ RUN mkdir -p /tmp/clickhouse-odbc-tmp \

View File

@ -160,9 +160,8 @@ function run_cmake
"-DENABLE_REPLXX=1" "-DENABLE_REPLXX=1"
) )
# TODO remove this? we don't use ccache anyway. An option would be to download it
# from S3 simultaneously with cloning.
export CCACHE_DIR="$FASTTEST_WORKSPACE/ccache" export CCACHE_DIR="$FASTTEST_WORKSPACE/ccache"
export CCACHE_COMPRESSLEVEL=5
export CCACHE_BASEDIR="$FASTTEST_SOURCE" export CCACHE_BASEDIR="$FASTTEST_SOURCE"
export CCACHE_NOHASHDIR=true export CCACHE_NOHASHDIR=true
export CCACHE_COMPILERCHECK=content export CCACHE_COMPILERCHECK=content
@ -191,6 +190,7 @@ function build
gzip "$FASTTEST_OUTPUT/clickhouse-stripped" gzip "$FASTTEST_OUTPUT/clickhouse-stripped"
fi fi
ccache --show-stats ||: ccache --show-stats ||:
ccache --evict-older-than 1d ||:
) )
} }

View File

@ -69,6 +69,8 @@ function download
wget_with_retry "$BINARY_URL_TO_DOWNLOAD" wget_with_retry "$BINARY_URL_TO_DOWNLOAD"
chmod +x clickhouse chmod +x clickhouse
# clickhouse may be compressed - run once to decompress
./clickhouse ||:
ln -s ./clickhouse ./clickhouse-server ln -s ./clickhouse ./clickhouse-server
ln -s ./clickhouse ./clickhouse-client ln -s ./clickhouse ./clickhouse-client

View File

@ -4,4 +4,8 @@ services:
image: nats image: nats
ports: ports:
- "${NATS_EXTERNAL_PORT}:${NATS_INTERNAL_PORT}" - "${NATS_EXTERNAL_PORT}:${NATS_INTERNAL_PORT}"
command: "-p 4444 --user click --pass house" command: "-p 4444 --user click --pass house --tls --tlscert=/etc/certs/server-cert.pem --tlskey=/etc/certs/server-key.pem"
volumes:
- type: bind
source: "${NATS_CERT_DIR}/nats"
target: /etc/certs

View File

@ -41,24 +41,9 @@ color_good = "#b0d050"
header_template = """ header_template = """
<!DOCTYPE html> <!DOCTYPE html>
<html lang="en"> <html lang="en">
<link rel="preload" as="font" href="https://yastatic.net/adv-www/_/sUYVCPUAQE7ExrvMS7FoISoO83s.woff2" type="font/woff2" crossorigin="anonymous"/>
<style> <style>
@font-face {{
font-family:'Yandex Sans Display Web';
src:url(https://yastatic.net/adv-www/_/H63jN0veW07XQUIA2317lr9UIm8.eot);
src:url(https://yastatic.net/adv-www/_/H63jN0veW07XQUIA2317lr9UIm8.eot?#iefix) format('embedded-opentype'),
url(https://yastatic.net/adv-www/_/sUYVCPUAQE7ExrvMS7FoISoO83s.woff2) format('woff2'),
url(https://yastatic.net/adv-www/_/v2Sve_obH3rKm6rKrtSQpf-eB7U.woff) format('woff'),
url(https://yastatic.net/adv-www/_/PzD8hWLMunow5i3RfJ6WQJAL7aI.ttf) format('truetype'),
url(https://yastatic.net/adv-www/_/lF_KG5g4tpQNlYIgA0e77fBSZ5s.svg#YandexSansDisplayWeb-Regular) format('svg');
font-weight:400;
font-style:normal;
font-stretch:normal;
font-display: swap;
}}
body {{ body {{
font-family: "Yandex Sans Display Web", Arial, sans-serif; font-family: "DejaVu Sans", "Noto Sans", Arial, sans-serif;
background: #EEE; background: #EEE;
}} }}

View File

@ -58,6 +58,7 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
--tcp_port 19000 --tcp_port_secure 19440 --http_port 18123 --https_port 18443 --interserver_http_port 19009 --tcp_with_proxy_port 19010 \ --tcp_port 19000 --tcp_port_secure 19440 --http_port 18123 --https_port 18443 --interserver_http_port 19009 --tcp_with_proxy_port 19010 \
--mysql_port 19004 --postgresql_port 19005 \ --mysql_port 19004 --postgresql_port 19005 \
--keeper_server.tcp_port 19181 --keeper_server.server_id 2 \ --keeper_server.tcp_port 19181 --keeper_server.server_id 2 \
--prometheus.port 19988 \
--macros.replica r2 # It doesn't work :( --macros.replica r2 # It doesn't work :(
mkdir -p /var/run/clickhouse-server2 mkdir -p /var/run/clickhouse-server2
@ -69,6 +70,7 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
--tcp_port 29000 --tcp_port_secure 29440 --http_port 28123 --https_port 28443 --interserver_http_port 29009 --tcp_with_proxy_port 29010 \ --tcp_port 29000 --tcp_port_secure 29440 --http_port 28123 --https_port 28443 --interserver_http_port 29009 --tcp_with_proxy_port 29010 \
--mysql_port 29004 --postgresql_port 29005 \ --mysql_port 29004 --postgresql_port 29005 \
--keeper_server.tcp_port 29181 --keeper_server.server_id 3 \ --keeper_server.tcp_port 29181 --keeper_server.server_id 3 \
--prometheus.port 29988 \
--macros.shard s2 # It doesn't work :( --macros.shard s2 # It doesn't work :(
MAX_RUN_TIME=$((MAX_RUN_TIME < 9000 ? MAX_RUN_TIME : 9000)) # min(MAX_RUN_TIME, 2.5 hours) MAX_RUN_TIME=$((MAX_RUN_TIME < 9000 ? MAX_RUN_TIME : 9000)) # min(MAX_RUN_TIME, 2.5 hours)

View File

@ -218,6 +218,12 @@ clickhouse-client --query "SELECT 'Server successfully started', 'OK'" >> /test_
|| (echo -e 'Server failed to start (see application_errors.txt and clickhouse-server.clean.log)\tFAIL' >> /test_output/test_results.tsv \ || (echo -e 'Server failed to start (see application_errors.txt and clickhouse-server.clean.log)\tFAIL' >> /test_output/test_results.tsv \
&& grep -a "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt) && grep -a "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt)
echo "Get previous release tag"
previous_release_tag=$(clickhouse-client --query="SELECT version()" | get_previous_release_tag)
echo $previous_release_tag
stop
[ -f /var/log/clickhouse-server/clickhouse-server.log ] || echo -e "Server log does not exist\tFAIL" [ -f /var/log/clickhouse-server/clickhouse-server.log ] || echo -e "Server log does not exist\tFAIL"
[ -f /var/log/clickhouse-server/stderr.log ] || echo -e "Stderr log does not exist\tFAIL" [ -f /var/log/clickhouse-server/stderr.log ] || echo -e "Stderr log does not exist\tFAIL"
@ -265,10 +271,6 @@ zgrep -Fa " received signal " /test_output/gdb.log > /dev/null \
echo -e "Backward compatibility check\n" echo -e "Backward compatibility check\n"
echo "Get previous release tag"
previous_release_tag=$(clickhouse-client --query="SELECT version()" | get_previous_release_tag)
echo $previous_release_tag
echo "Clone previous release repository" echo "Clone previous release repository"
git clone https://github.com/ClickHouse/ClickHouse.git --no-tags --progress --branch=$previous_release_tag --no-recurse-submodules --depth=1 previous_release_repository git clone https://github.com/ClickHouse/ClickHouse.git --no-tags --progress --branch=$previous_release_tag --no-recurse-submodules --depth=1 previous_release_repository
@ -278,7 +280,6 @@ mkdir previous_release_package_folder
echo $previous_release_tag | download_release_packets && echo -e 'Download script exit code\tOK' >> /test_output/test_results.tsv \ echo $previous_release_tag | download_release_packets && echo -e 'Download script exit code\tOK' >> /test_output/test_results.tsv \
|| echo -e 'Download script failed\tFAIL' >> /test_output/test_results.tsv || echo -e 'Download script failed\tFAIL' >> /test_output/test_results.tsv
stop
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.clean.log mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.clean.log
# Check if we cloned previous release repository successfully # Check if we cloned previous release repository successfully
@ -455,3 +456,5 @@ for core in core.*; do
pigz $core pigz $core
mv $core.gz /test_output/ mv $core.gz /test_output/
done done
dmesg -T > /test_output/dmesg.log

View File

@ -77,7 +77,7 @@ def run_func_test(
pipes = [] pipes = []
for i in range(0, len(output_paths)): for i in range(0, len(output_paths)):
f = open(output_paths[i], "w") f = open(output_paths[i], "w")
full_command = "{} {} {} {} {}".format( full_command = "{} {} {} {} {} --stress".format(
cmd, cmd,
get_options(i, backward_compatibility_check), get_options(i, backward_compatibility_check),
global_time_limit_option, global_time_limit_option,

View File

@ -1,5 +1,82 @@
# rebuild in #33610
# docker build -t clickhouse/test-util . # docker build -t clickhouse/test-util .
FROM ubuntu:20.04 FROM ubuntu:20.04
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=14
RUN apt-get update \
&& apt-get install \
apt-transport-https \
apt-utils \
ca-certificates \
dnsutils \
gnupg \
iputils-ping \
lsb-release \
wget \
--yes --no-install-recommends --verbose-versions \
&& export LLVM_PUBKEY_HASH="bda960a8da687a275a2078d43c111d66b1c6a893a3275271beedf266c1ff4a0cdecb429c7a5cccf9f486ea7aa43fd27f" \
&& wget -nv -O /tmp/llvm-snapshot.gpg.key https://apt.llvm.org/llvm-snapshot.gpg.key \
&& echo "${LLVM_PUBKEY_HASH} /tmp/llvm-snapshot.gpg.key" | sha384sum -c \
&& apt-key add /tmp/llvm-snapshot.gpg.key \
&& export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \
&& echo "deb [trusted=yes] https://apt.llvm.org/${CODENAME}/ llvm-toolchain-${CODENAME}-${LLVM_VERSION} main" >> \
/etc/apt/sources.list \
&& apt-get clean
# initial packages
RUN apt-get update \
&& apt-get install \
bash \
bsdmainutils \
build-essential \
clang-${LLVM_VERSION} \
clang-tidy-${LLVM_VERSION} \
cmake \
curl \
fakeroot \
gdb \
git \
gperf \
lld-${LLVM_VERSION} \
llvm-${LLVM_VERSION} \
llvm-${LLVM_VERSION}-dev \
moreutils \
nasm \
ninja-build \
pigz \
rename \
software-properties-common \
tzdata \
--yes --no-install-recommends \
&& apt-get clean
# This symlink required by gcc to find lld compiler
RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld
ARG CCACHE_VERSION=4.6.1
RUN mkdir /tmp/ccache \
&& cd /tmp/ccache \
&& curl -L \
-O https://github.com/ccache/ccache/releases/download/v$CCACHE_VERSION/ccache-$CCACHE_VERSION.tar.xz \
-O https://github.com/ccache/ccache/releases/download/v$CCACHE_VERSION/ccache-$CCACHE_VERSION.tar.xz.asc \
&& gpg --recv-keys --keyserver hkps://keyserver.ubuntu.com 5A939A71A46792CF57866A51996DDA075594ADB8 \
&& gpg --verify ccache-4.6.1.tar.xz.asc \
&& tar xf ccache-$CCACHE_VERSION.tar.xz \
&& cd /tmp/ccache/ccache-$CCACHE_VERSION \
&& cmake -DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_BUILD_TYPE=None \
-DZSTD_FROM_INTERNET=ON \
-DREDIS_STORAGE_BACKEND=OFF \
-Wno-dev \
-B build \
-S . \
&& make VERBOSE=1 -C build \
&& make install -C build \
&& cd / \
&& rm -rf /tmp/ccache
COPY process_functional_tests_result.py / COPY process_functional_tests_result.py /

View File

@ -86,7 +86,7 @@ def process_test_log(log_path):
test_end = True test_end = True
test_results = [ test_results = [
(test[0], test[1], test[2], "".join(test[3]))[:4096] for test in test_results (test[0], test[1], test[2], "".join(test[3])[:4096]) for test in test_results
] ]
return ( return (

View File

@ -1,29 +0,0 @@
---
toc_priority:
toc_title:
---
# data_type_name {#data_type-name}
Description.
**Parameters** (Optional)
- `x` — Description. [Type name](relative/path/to/type/dscr.md#type).
- `y` — Description. [Type name](relative/path/to/type/dscr.md#type).
**Examples**
```sql
```
## Additional Info {#additional-info} (Optional)
The name of an additional section can be any, for example, **Usage**.
**See Also** (Optional)
- [link](#)
[Original article](https://clickhouse.com/docs/en/data-types/<data-type-name>/) <!--hide-->

View File

@ -1,63 +0,0 @@
# EngineName {#enginename}
- What the Database/Table engine does.
- Relations with other engines if they exist.
## Creating a Database {#creating-a-database}
``` sql
CREATE DATABASE ...
```
or
## Creating a Table {#creating-a-table}
``` sql
CREATE TABLE ...
```
**Engine Parameters**
**Query Clauses** (for Table engines only)
## Virtual columns {#virtual-columns} (for Table engines only)
List and virtual columns with description, if they exist.
## Data Types Support {#data_types-support} (for Database engines only)
| EngineName | ClickHouse |
|-----------------------|------------------------------------|
| NativeDataTypeName | [ClickHouseDataTypeName](link#) |
## Specifics and recommendations {#specifics-and-recommendations}
Algorithms
Specifics of read and write processes
Examples of tasks
Recommendations for usage
Specifics of data storage
## Usage Example {#usage-example}
The example must show usage and use cases. The following text contains the recommended parts of this section.
Input table:
``` text
```
Query:
``` sql
```
Result:
``` text
```
Follow up with any text to clarify the example.
**See Also**
- [link](#)

View File

@ -1,51 +0,0 @@
## functionName {#functionname-in-lower-case}
Short description.
**Syntax** (without SELECT)
``` sql
<function syntax>
```
Alias: `<alias name>`. (Optional)
More text (Optional).
**Arguments** (Optional)
- `x` — Description. Optional (only for optional arguments). Possible values: <values list>. Default value: <value>. [Type name](relative/path/to/type/dscr.md#type).
- `y` — Description. Optional (only for optional arguments). Possible values: <values list>.Default value: <value>. [Type name](relative/path/to/type/dscr.md#type).
**Parameters** (Optional, only for parametric aggregate functions)
- `z` — Description. Optional (only for optional parameters). Possible values: <values list>. Default value: <value>. [Type name](relative/path/to/type/dscr.md#type).
**Returned value(s)**
- Returned values list.
Type: [Type name](relative/path/to/type/dscr.md#type).
**Example**
The example must show usage and/or a use cases. The following text contains recommended parts of an example.
Input table (Optional):
``` text
```
Query:
``` sql
```
Result:
``` text
```
**See Also** (Optional)
- [link](#)

View File

@ -1,33 +0,0 @@
## server_setting_name {#server_setting_name}
Description.
Describe what is configured in this section of settings.
Possible value: ...
Default value: ...
**Settings** (Optional)
If the section contains several settings, list them here. Specify possible values and default values:
- setting_1 — Description.
- setting_2 — Description.
**Example**
```xml
<server_setting_name>
<setting_1> ... </setting_1>
<setting_2> ... </setting_2>
</server_setting_name>
```
**Additional Info** (Optional)
The name of an additional section can be any, for example, **Usage**.
**See Also** (Optional)
- [link](#)

View File

@ -1,27 +0,0 @@
## setting_name {#setting_name}
Description.
For the switch setting, use the typical phrase: “Enables or disables something …”.
Possible values:
*For switcher setting:*
- 0 — Disabled.
- 1 — Enabled.
*For another setting (typical phrases):*
- Positive integer.
- 0 — Disabled or unlimited or something else.
Default value: `value`.
**Additional Info** (Optional)
The name of an additional section can be any, for example, **Usage**.
**See Also** (Optional)
- [link](#)

View File

@ -1,24 +0,0 @@
# Statement name (for example, SHOW USER) {#statement-name-in-lower-case}
Brief description of what the statement does.
**Syntax**
```sql
Syntax of the statement.
```
## Other necessary sections of the description (Optional) {#anchor}
Examples of descriptions with a complicated structure:
- https://clickhouse.com/docs/en/sql-reference/statements/grant/
- https://clickhouse.com/docs/en/sql-reference/statements/revoke/
- https://clickhouse.com/docs/en/sql-reference/statements/select/join/
**See Also** (Optional)
Links to related topics as a list.
- [link](#)

View File

@ -1,25 +0,0 @@
# system.table_name {#system-tables_table-name}
Description.
Columns:
- `column_name` ([data_type_name](path/to/data_type.md)) — Description.
**Example**
Query:
``` sql
SELECT * FROM system.table_name
```
Result:
``` text
Some output. It shouldn't be too long.
```
**See Also**
- [Article name](path/to/article_name.md) — Some words about referenced information.

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog # 2022 Changelog
### ClickHouse release v22.3.1.1262-prestable FIXME as compared to v22.2.1.2139-prestable ### ClickHouse release v22.3.1.1262-prestable (92ab33f560e) FIXME as compared to v22.2.1.2139-prestable (75366fc95e5)
#### Backward Incompatible Change #### Backward Incompatible Change
* Improvement the toDatetime function overflows. When the date string is very large, it will be converted to 1970. [#32898](https://github.com/ClickHouse/ClickHouse/pull/32898) ([HaiBo Li](https://github.com/marising)). * Improvement the toDatetime function overflows. When the date string is very large, it will be converted to 1970. [#32898](https://github.com/ClickHouse/ClickHouse/pull/32898) ([HaiBo Li](https://github.com/marising)).

View File

@ -0,0 +1,30 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.3.10.22-lts (25886f517d4) FIXME as compared to v22.3.9.19-lts (7976930b82e)
#### Bug Fix
* Backported in [#39761](https://github.com/ClickHouse/ClickHouse/issues/39761): Fix seeking while reading from encrypted disk. This PR fixes [#38381](https://github.com/ClickHouse/ClickHouse/issues/38381). [#39687](https://github.com/ClickHouse/ClickHouse/pull/39687) ([Vitaly Baranov](https://github.com/vitlibar)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#39206](https://github.com/ClickHouse/ClickHouse/issues/39206): Fix reading of sparse columns from `MergeTree` tables that store their data in S3. [#37978](https://github.com/ClickHouse/ClickHouse/pull/37978) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#39381](https://github.com/ClickHouse/ClickHouse/issues/39381): Fixed error `Not found column Type in block` in selects with `PREWHERE` and read-in-order optimizations. [#39157](https://github.com/ClickHouse/ClickHouse/pull/39157) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Backported in [#39588](https://github.com/ClickHouse/ClickHouse/issues/39588): Fix data race and possible heap-buffer-overflow in Avro format. Closes [#39094](https://github.com/ClickHouse/ClickHouse/issues/39094) Closes [#33652](https://github.com/ClickHouse/ClickHouse/issues/33652). [#39498](https://github.com/ClickHouse/ClickHouse/pull/39498) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#39610](https://github.com/ClickHouse/ClickHouse/issues/39610): Fix bug with maxsplit argument for splitByChar, which was not working correctly. [#39552](https://github.com/ClickHouse/ClickHouse/pull/39552) ([filimonov](https://github.com/filimonov)).
* Backported in [#39834](https://github.com/ClickHouse/ClickHouse/issues/39834): Fix `CANNOT_READ_ALL_DATA` exception with `local_filesystem_read_method=pread_threadpool`. This bug affected only Linux kernel version 5.9 and 5.10 according to [man](https://manpages.debian.org/testing/manpages-dev/preadv2.2.en.html#BUGS). [#39800](https://github.com/ClickHouse/ClickHouse/pull/39800) ([Anton Popov](https://github.com/CurtizJ)).
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)
* Backported in [#39238](https://github.com/ClickHouse/ClickHouse/issues/39238): Fix performance regression of scalar query optimization. [#35986](https://github.com/ClickHouse/ClickHouse/pull/35986) ([Amos Bird](https://github.com/amosbird)).
* Backported in [#39531](https://github.com/ClickHouse/ClickHouse/issues/39531): Fix some issues with async reads from remote filesystem which happened when reading low cardinality. [#36763](https://github.com/ClickHouse/ClickHouse/pull/36763) ([Kseniia Sumarokova](https://github.com/kssenii)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Replace MemoryTrackerBlockerInThread to LockMemoryExceptionInThread [#39619](https://github.com/ClickHouse/ClickHouse/pull/39619) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Change mysql-odbc url [#39702](https://github.com/ClickHouse/ClickHouse/pull/39702) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,20 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.3.11.12-lts (137c5f72657) FIXME as compared to v22.3.10.22-lts (25886f517d4)
#### Build/Testing/Packaging Improvement
* Backported in [#39881](https://github.com/ClickHouse/ClickHouse/issues/39881): Former packages used to install systemd.service file to `/etc`. The files there are marked as `conf` and are not cleaned out, and not updated automatically. This PR cleans them out. [#39323](https://github.com/ClickHouse/ClickHouse/pull/39323) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#39336](https://github.com/ClickHouse/ClickHouse/issues/39336): Fix `parallel_view_processing=1` with `optimize_trivial_insert_select=1`. Fix `max_insert_threads` while pushing to views. [#38731](https://github.com/ClickHouse/ClickHouse/pull/38731) ([Azat Khuzhin](https://github.com/azat)).
#### NO CL ENTRY
* NO CL ENTRY: 'Revert "Backport [#39687](https://github.com/ClickHouse/ClickHouse/issues/39687) to 22.3: Fix seeking while reading from encrypted disk"'. [#40052](https://github.com/ClickHouse/ClickHouse/pull/40052) ([Alexey Milovidov](https://github.com/alexey-milovidov)).

View File

@ -5,5 +5,5 @@ sidebar_label: 2022
# 2022 Changelog # 2022 Changelog
### ClickHouse release v22.3.2.2-lts FIXME as compared to v22.3.1.1262-prestable ### ClickHouse release v22.3.2.2-lts (89a621679c6) FIXME as compared to v22.3.1.1262-prestable (92ab33f560e)

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog # 2022 Changelog
### ClickHouse release v22.3.3.44-lts FIXME as compared to v22.3.2.2-lts ### ClickHouse release v22.3.3.44-lts (abb756d3ca2) FIXME as compared to v22.3.2.2-lts (89a621679c6)
#### Bug Fix #### Bug Fix
* Backported in [#35928](https://github.com/ClickHouse/ClickHouse/issues/35928): Added settings `input_format_ipv4_default_on_conversion_error`, `input_format_ipv6_default_on_conversion_error` to allow insert of invalid ip address values as default into tables. Closes [#35726](https://github.com/ClickHouse/ClickHouse/issues/35726). [#35733](https://github.com/ClickHouse/ClickHouse/pull/35733) ([Maksim Kita](https://github.com/kitaisreal)). * Backported in [#35928](https://github.com/ClickHouse/ClickHouse/issues/35928): Added settings `input_format_ipv4_default_on_conversion_error`, `input_format_ipv6_default_on_conversion_error` to allow insert of invalid ip address values as default into tables. Closes [#35726](https://github.com/ClickHouse/ClickHouse/issues/35726). [#35733](https://github.com/ClickHouse/ClickHouse/pull/35733) ([Maksim Kita](https://github.com/kitaisreal)).

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog # 2022 Changelog
### ClickHouse release v22.3.4.20-lts FIXME as compared to v22.3.3.44-lts ### ClickHouse release v22.3.4.20-lts (ecbaf001f49) FIXME as compared to v22.3.3.44-lts (abb756d3ca2)
#### Build/Testing/Packaging Improvement #### Build/Testing/Packaging Improvement
* - Add `_le_` method for ClickHouseVersion - Fix auto_version for existing tag - docker_server now support getting version from tags - Add python unit tests to backport workflow. [#36028](https://github.com/ClickHouse/ClickHouse/pull/36028) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). * - Add `_le_` method for ClickHouseVersion - Fix auto_version for existing tag - docker_server now support getting version from tags - Add python unit tests to backport workflow. [#36028](https://github.com/ClickHouse/ClickHouse/pull/36028) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog # 2022 Changelog
### ClickHouse release v22.3.5.5-lts FIXME as compared to v22.3.4.20-lts ### ClickHouse release v22.3.5.5-lts (438b4a81f77) FIXME as compared to v22.3.4.20-lts (ecbaf001f49)
#### Bug Fix (user-visible misbehaviour in official stable or prestable release) #### Bug Fix (user-visible misbehaviour in official stable or prestable release)

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog # 2022 Changelog
### ClickHouse release v22.3.6.5-lts FIXME as compared to v22.3.5.5-lts ### ClickHouse release v22.3.6.5-lts (3e44e824cff) FIXME as compared to v22.3.5.5-lts (438b4a81f77)
#### Bug Fix (user-visible misbehaviour in official stable or prestable release) #### Bug Fix (user-visible misbehaviour in official stable or prestable release)

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog # 2022 Changelog
### ClickHouse release v22.3.7.28-lts FIXME as compared to v22.3.6.5-lts ### ClickHouse release v22.3.7.28-lts (420bdfa2751) FIXME as compared to v22.3.6.5-lts (3e44e824cff)
#### Bug Fix (user-visible misbehavior in official stable or prestable release) #### Bug Fix (user-visible misbehavior in official stable or prestable release)

View File

@ -0,0 +1,32 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.3.8.39-lts (6bcf982f58b) FIXME as compared to v22.3.7.28-lts (420bdfa2751)
#### Build/Testing/Packaging Improvement
* Backported in [#38826](https://github.com/ClickHouse/ClickHouse/issues/38826): - Change `all|noarch` packages to architecture-dependent - Fix some documentation for it - Push aarch64|arm64 packages to artifactory and release assets - Fixes [#36443](https://github.com/ClickHouse/ClickHouse/issues/36443). [#38580](https://github.com/ClickHouse/ClickHouse/pull/38580) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#38453](https://github.com/ClickHouse/ClickHouse/issues/38453): Fix bug with nested short-circuit functions that led to execution of arguments even if condition is false. Closes [#38040](https://github.com/ClickHouse/ClickHouse/issues/38040). [#38173](https://github.com/ClickHouse/ClickHouse/pull/38173) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#38710](https://github.com/ClickHouse/ClickHouse/issues/38710): Fix incorrect result of distributed queries with `DISTINCT` and `LIMIT`. Fixes [#38282](https://github.com/ClickHouse/ClickHouse/issues/38282). [#38371](https://github.com/ClickHouse/ClickHouse/pull/38371) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#38689](https://github.com/ClickHouse/ClickHouse/issues/38689): Now it's possible to start a clickhouse-server and attach/detach tables even for tables with the incorrect values of IPv4/IPv6 representation. Proper fix for issue [#35156](https://github.com/ClickHouse/ClickHouse/issues/35156). [#38590](https://github.com/ClickHouse/ClickHouse/pull/38590) ([alesapin](https://github.com/alesapin)).
* Backported in [#38776](https://github.com/ClickHouse/ClickHouse/issues/38776): `rankCorr` function will work correctly if some arguments are NaNs. This closes [#38396](https://github.com/ClickHouse/ClickHouse/issues/38396). [#38722](https://github.com/ClickHouse/ClickHouse/pull/38722) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#38780](https://github.com/ClickHouse/ClickHouse/issues/38780): Fix use-after-free for Map combinator that leads to incorrect result. [#38748](https://github.com/ClickHouse/ClickHouse/pull/38748) ([Azat Khuzhin](https://github.com/azat)).
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)
* Backported in [#36818](https://github.com/ClickHouse/ClickHouse/issues/36818): Fix projection analysis which might lead to wrong query result when IN subquery is used. This fixes [#35336](https://github.com/ClickHouse/ClickHouse/issues/35336). [#35631](https://github.com/ClickHouse/ClickHouse/pull/35631) ([Amos Bird](https://github.com/amosbird)).
* Backported in [#38467](https://github.com/ClickHouse/ClickHouse/issues/38467): - Fix potential error with literals in `WHERE` for join queries. Close [#36279](https://github.com/ClickHouse/ClickHouse/issues/36279). [#36542](https://github.com/ClickHouse/ClickHouse/pull/36542) ([Vladimir C](https://github.com/vdimir)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Try to fix some trash [#37303](https://github.com/ClickHouse/ClickHouse/pull/37303) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Update docker-compose to try get rid of v1 errors [#38394](https://github.com/ClickHouse/ClickHouse/pull/38394) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Trying backport useful features for CI [#38510](https://github.com/ClickHouse/ClickHouse/pull/38510) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix backports diff [#38703](https://github.com/ClickHouse/ClickHouse/pull/38703) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,24 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.3.9.19-lts (7976930b82e) FIXME as compared to v22.3.8.39-lts (6bcf982f58b)
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#39097](https://github.com/ClickHouse/ClickHouse/issues/39097): Any allocations inside OvercommitTracker may lead to deadlock. Logging was not very informative so it's easier just to remove logging. Fixes [#37794](https://github.com/ClickHouse/ClickHouse/issues/37794). [#39030](https://github.com/ClickHouse/ClickHouse/pull/39030) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#39080](https://github.com/ClickHouse/ClickHouse/issues/39080): Fix bug in filesystem cache that could happen in some corner case which coincided with cache capacity hitting the limit. Closes [#39066](https://github.com/ClickHouse/ClickHouse/issues/39066). [#39070](https://github.com/ClickHouse/ClickHouse/pull/39070) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Backported in [#39149](https://github.com/ClickHouse/ClickHouse/issues/39149): Fix error `Block structure mismatch` which could happen for INSERT into table with attached MATERIALIZED VIEW and enabled setting `extremes = 1`. Closes [#29759](https://github.com/ClickHouse/ClickHouse/issues/29759) and [#38729](https://github.com/ClickHouse/ClickHouse/issues/38729). [#39125](https://github.com/ClickHouse/ClickHouse/pull/39125) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#39372](https://github.com/ClickHouse/ClickHouse/issues/39372): Declare RabbitMQ queue without default arguments `x-max-length` and `x-overflow`. [#39259](https://github.com/ClickHouse/ClickHouse/pull/39259) ([rnbondarenko](https://github.com/rnbondarenko)).
* Backported in [#39379](https://github.com/ClickHouse/ClickHouse/issues/39379): Fix segmentation fault in MaterializedPostgreSQL database engine, which could happen if some exception occurred at replication initialisation. Closes [#36939](https://github.com/ClickHouse/ClickHouse/issues/36939). [#39272](https://github.com/ClickHouse/ClickHouse/pull/39272) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Backported in [#39351](https://github.com/ClickHouse/ClickHouse/issues/39351): Fix incorrect fetch postgresql tables query fro PostgreSQL database engine. Closes [#33502](https://github.com/ClickHouse/ClickHouse/issues/33502). [#39283](https://github.com/ClickHouse/ClickHouse/pull/39283) ([Kseniia Sumarokova](https://github.com/kssenii)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Reproduce and a little bit better fix for LC dict right offset. [#36856](https://github.com/ClickHouse/ClickHouse/pull/36856) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Retry docker buildx commands with progressive sleep in between [#38898](https://github.com/ClickHouse/ClickHouse/pull/38898) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Add docker_server.py running to backport and release CIs [#39011](https://github.com/ClickHouse/ClickHouse/pull/39011) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog # 2022 Changelog
### ClickHouse release v22.4.1.2305-prestable FIXME as compared to v22.3.1.1262-prestable ### ClickHouse release v22.4.1.2305-prestable (77a82cc090d) FIXME as compared to v22.3.1.1262-prestable (92ab33f560e)
#### Backward Incompatible Change #### Backward Incompatible Change
* Function `yandexConsistentHash` (consistent hashing algorithm by Konstantin "kostik" Oblakov) is renamed to `kostikConsistentHash`. The old name is left as an alias for compatibility. Although this change is backward compatible, we may remove the alias in subsequent releases, that's why it's recommended to update the usages of this function in your apps. [#35553](https://github.com/ClickHouse/ClickHouse/pull/35553) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Function `yandexConsistentHash` (consistent hashing algorithm by Konstantin "kostik" Oblakov) is renamed to `kostikConsistentHash`. The old name is left as an alias for compatibility. Although this change is backward compatible, we may remove the alias in subsequent releases, that's why it's recommended to update the usages of this function in your apps. [#35553](https://github.com/ClickHouse/ClickHouse/pull/35553) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
@ -68,7 +68,7 @@ sidebar_label: 2022
* For lts releases packages will be pushed to both lts and stable repos. [#35382](https://github.com/ClickHouse/ClickHouse/pull/35382) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). * For lts releases packages will be pushed to both lts and stable repos. [#35382](https://github.com/ClickHouse/ClickHouse/pull/35382) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Support uuid for postgres engines. Closes [#35384](https://github.com/ClickHouse/ClickHouse/issues/35384). [#35403](https://github.com/ClickHouse/ClickHouse/pull/35403) ([Kseniia Sumarokova](https://github.com/kssenii)). * Support uuid for postgres engines. Closes [#35384](https://github.com/ClickHouse/ClickHouse/issues/35384). [#35403](https://github.com/ClickHouse/ClickHouse/pull/35403) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Add arguments `--user`, `--password`, `--host`, `--port` for clickhouse-diagnostics. [#35422](https://github.com/ClickHouse/ClickHouse/pull/35422) ([李扬](https://github.com/taiyang-li)). * Add arguments `--user`, `--password`, `--host`, `--port` for clickhouse-diagnostics. [#35422](https://github.com/ClickHouse/ClickHouse/pull/35422) ([李扬](https://github.com/taiyang-li)).
* fix INSERT INTO table FROM INFILE does not display progress bar. [#35429](https://github.com/ClickHouse/ClickHouse/pull/35429) ([xiedeyantu](https://github.com/xiedeyantu)). * fix INSERT INTO table FROM INFILE does not display progress bar. [#35429](https://github.com/ClickHouse/ClickHouse/pull/35429) ([chen](https://github.com/xiedeyantu)).
* Allow server to bind to low-numbered ports (e.g. 443). ClickHouse installation script will set `cap_net_bind_service` to the binary file. [#35451](https://github.com/ClickHouse/ClickHouse/pull/35451) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Allow server to bind to low-numbered ports (e.g. 443). ClickHouse installation script will set `cap_net_bind_service` to the binary file. [#35451](https://github.com/ClickHouse/ClickHouse/pull/35451) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Add settings `input_format_orc_case_insensitive_column_matching`, `input_format_arrow_case_insensitive_column_matching`, and `input_format_parquet_case_insensitive_column_matching` which allows ClickHouse to use case insensitive matching of columns while reading data from ORC, Arrow or Parquet files. [#35459](https://github.com/ClickHouse/ClickHouse/pull/35459) ([Antonio Andelic](https://github.com/antonio2368)). * Add settings `input_format_orc_case_insensitive_column_matching`, `input_format_arrow_case_insensitive_column_matching`, and `input_format_parquet_case_insensitive_column_matching` which allows ClickHouse to use case insensitive matching of columns while reading data from ORC, Arrow or Parquet files. [#35459](https://github.com/ClickHouse/ClickHouse/pull/35459) ([Antonio Andelic](https://github.com/antonio2368)).
* - Add explicit table info to the scan node of query plan and pipeline. [#35460](https://github.com/ClickHouse/ClickHouse/pull/35460) ([何李夫](https://github.com/helifu)). * - Add explicit table info to the scan node of query plan and pipeline. [#35460](https://github.com/ClickHouse/ClickHouse/pull/35460) ([何李夫](https://github.com/helifu)).
@ -106,7 +106,7 @@ sidebar_label: 2022
* ASTPartition::formatImpl should output ALL while executing ALTER TABLE t DETACH PARTITION ALL. [#35987](https://github.com/ClickHouse/ClickHouse/pull/35987) ([awakeljw](https://github.com/awakeljw)). * ASTPartition::formatImpl should output ALL while executing ALTER TABLE t DETACH PARTITION ALL. [#35987](https://github.com/ClickHouse/ClickHouse/pull/35987) ([awakeljw](https://github.com/awakeljw)).
* `clickhouse-keeper` starts answering 4-letter commands before getting the quorum. [#35992](https://github.com/ClickHouse/ClickHouse/pull/35992) ([Antonio Andelic](https://github.com/antonio2368)). * `clickhouse-keeper` starts answering 4-letter commands before getting the quorum. [#35992](https://github.com/ClickHouse/ClickHouse/pull/35992) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix wrong assertion in replxx which happens when navigating back the history when the first line of input is a newline. Mark as improvement because it only affects debug build. This fixes [#34511](https://github.com/ClickHouse/ClickHouse/issues/34511). [#36007](https://github.com/ClickHouse/ClickHouse/pull/36007) ([Amos Bird](https://github.com/amosbird)). * Fix wrong assertion in replxx which happens when navigating back the history when the first line of input is a newline. Mark as improvement because it only affects debug build. This fixes [#34511](https://github.com/ClickHouse/ClickHouse/issues/34511). [#36007](https://github.com/ClickHouse/ClickHouse/pull/36007) ([Amos Bird](https://github.com/amosbird)).
* If someone writes DEFAULT NULL in table definition, make data type Nullable. [#35887](https://github.com/ClickHouse/ClickHouse/issues/35887). [#36058](https://github.com/ClickHouse/ClickHouse/pull/36058) ([xiedeyantu](https://github.com/xiedeyantu)). * If someone writes DEFAULT NULL in table definition, make data type Nullable. [#35887](https://github.com/ClickHouse/ClickHouse/issues/35887). [#36058](https://github.com/ClickHouse/ClickHouse/pull/36058) ([chen](https://github.com/xiedeyantu)).
* Added `thread_id` and `query_id` columns to `system.zookeeper_log` table. [#36074](https://github.com/ClickHouse/ClickHouse/pull/36074) ([Alexander Tokmakov](https://github.com/tavplubix)). * Added `thread_id` and `query_id` columns to `system.zookeeper_log` table. [#36074](https://github.com/ClickHouse/ClickHouse/pull/36074) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Auto assign numbers for Enum elements. [#36101](https://github.com/ClickHouse/ClickHouse/pull/36101) ([awakeljw](https://github.com/awakeljw)). * Auto assign numbers for Enum elements. [#36101](https://github.com/ClickHouse/ClickHouse/pull/36101) ([awakeljw](https://github.com/awakeljw)).
* Reset thread name in `ThreadPool` to `ThreadPoolIdle` after job is done. This is to avoid displaying the old thread name for idle threads. This closes [#36114](https://github.com/ClickHouse/ClickHouse/issues/36114). [#36115](https://github.com/ClickHouse/ClickHouse/pull/36115) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Reset thread name in `ThreadPool` to `ThreadPoolIdle` after job is done. This is to avoid displaying the old thread name for idle threads. This closes [#36114](https://github.com/ClickHouse/ClickHouse/issues/36114). [#36115](https://github.com/ClickHouse/ClickHouse/pull/36115) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
@ -331,7 +331,7 @@ sidebar_label: 2022
* ci: replace directory system log tables artifacts with tsv [#35773](https://github.com/ClickHouse/ClickHouse/pull/35773) ([Azat Khuzhin](https://github.com/azat)). * ci: replace directory system log tables artifacts with tsv [#35773](https://github.com/ClickHouse/ClickHouse/pull/35773) ([Azat Khuzhin](https://github.com/azat)).
* One more try to resurrect build hash [#35774](https://github.com/ClickHouse/ClickHouse/pull/35774) ([alesapin](https://github.com/alesapin)). * One more try to resurrect build hash [#35774](https://github.com/ClickHouse/ClickHouse/pull/35774) ([alesapin](https://github.com/alesapin)).
* Refactoring QueryPipeline [#35789](https://github.com/ClickHouse/ClickHouse/pull/35789) ([Amos Bird](https://github.com/amosbird)). * Refactoring QueryPipeline [#35789](https://github.com/ClickHouse/ClickHouse/pull/35789) ([Amos Bird](https://github.com/amosbird)).
* Delete duplicate code [#35798](https://github.com/ClickHouse/ClickHouse/pull/35798) ([xiedeyantu](https://github.com/xiedeyantu)). * Delete duplicate code [#35798](https://github.com/ClickHouse/ClickHouse/pull/35798) ([chen](https://github.com/xiedeyantu)).
* remove unused variable [#35800](https://github.com/ClickHouse/ClickHouse/pull/35800) ([flynn](https://github.com/ucasfl)). * remove unused variable [#35800](https://github.com/ClickHouse/ClickHouse/pull/35800) ([flynn](https://github.com/ucasfl)).
* Make `SortDescription::column_name` always non-empty [#35805](https://github.com/ClickHouse/ClickHouse/pull/35805) ([Nikita Taranov](https://github.com/nickitat)). * Make `SortDescription::column_name` always non-empty [#35805](https://github.com/ClickHouse/ClickHouse/pull/35805) ([Nikita Taranov](https://github.com/nickitat)).
* Fix latest_error referenced before assignment [#35807](https://github.com/ClickHouse/ClickHouse/pull/35807) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). * Fix latest_error referenced before assignment [#35807](https://github.com/ClickHouse/ClickHouse/pull/35807) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
@ -417,7 +417,7 @@ sidebar_label: 2022
* Revert reverting "Fix crash in ParallelReadBuffer" [#36212](https://github.com/ClickHouse/ClickHouse/pull/36212) ([Kruglov Pavel](https://github.com/Avogar)). * Revert reverting "Fix crash in ParallelReadBuffer" [#36212](https://github.com/ClickHouse/ClickHouse/pull/36212) ([Kruglov Pavel](https://github.com/Avogar)).
* Make stateless tests with s3 always green [#36214](https://github.com/ClickHouse/ClickHouse/pull/36214) ([Alexander Tokmakov](https://github.com/tavplubix)). * Make stateless tests with s3 always green [#36214](https://github.com/ClickHouse/ClickHouse/pull/36214) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Add Tyler Hannan to contributors [#36216](https://github.com/ClickHouse/ClickHouse/pull/36216) ([Tyler Hannan](https://github.com/tylerhannan)). * Add Tyler Hannan to contributors [#36216](https://github.com/ClickHouse/ClickHouse/pull/36216) ([Tyler Hannan](https://github.com/tylerhannan)).
* Fix the repeated call of func to get the table when drop table [#36248](https://github.com/ClickHouse/ClickHouse/pull/36248) ([xiedeyantu](https://github.com/xiedeyantu)). * Fix the repeated call of func to get the table when drop table [#36248](https://github.com/ClickHouse/ClickHouse/pull/36248) ([chen](https://github.com/xiedeyantu)).
* Split test 01675_data_type_coroutine into 2 tests to prevent possible timeouts [#36250](https://github.com/ClickHouse/ClickHouse/pull/36250) ([Kruglov Pavel](https://github.com/Avogar)). * Split test 01675_data_type_coroutine into 2 tests to prevent possible timeouts [#36250](https://github.com/ClickHouse/ClickHouse/pull/36250) ([Kruglov Pavel](https://github.com/Avogar)).
* Merge TRUSTED_CONTRIBUTORS in lambda and import in check [#36252](https://github.com/ClickHouse/ClickHouse/pull/36252) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). * Merge TRUSTED_CONTRIBUTORS in lambda and import in check [#36252](https://github.com/ClickHouse/ClickHouse/pull/36252) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix exception "File segment can be completed only by downloader" in tests [#36253](https://github.com/ClickHouse/ClickHouse/pull/36253) ([Kseniia Sumarokova](https://github.com/kssenii)). * Fix exception "File segment can be completed only by downloader" in tests [#36253](https://github.com/ClickHouse/ClickHouse/pull/36253) ([Kseniia Sumarokova](https://github.com/kssenii)).

View File

@ -5,5 +5,5 @@ sidebar_label: 2022
# 2022 Changelog # 2022 Changelog
### ClickHouse release v22.4.2.1-stable FIXME as compared to v22.4.1.2305-prestable ### ClickHouse release v22.4.2.1-stable (b34ebdc36ae) FIXME as compared to v22.4.1.2305-prestable (77a82cc090d)

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog # 2022 Changelog
### ClickHouse release v22.4.3.3-stable FIXME as compared to v22.4.2.1-stable ### ClickHouse release v22.4.3.3-stable (def956d6299) FIXME as compared to v22.4.2.1-stable (b34ebdc36ae)
#### Bug Fix (user-visible misbehaviour in official stable or prestable release) #### Bug Fix (user-visible misbehaviour in official stable or prestable release)

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog # 2022 Changelog
### ClickHouse release v22.4.4.7-stable FIXME as compared to v22.4.3.3-stable ### ClickHouse release v22.4.4.7-stable (ba44414f9b3) FIXME as compared to v22.4.3.3-stable (def956d6299)
#### Bug Fix (user-visible misbehaviour in official stable or prestable release) #### Bug Fix (user-visible misbehaviour in official stable or prestable release)

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog # 2022 Changelog
### ClickHouse release v22.4.5.9-stable FIXME as compared to v22.4.4.7-stable ### ClickHouse release v22.4.5.9-stable (059ef6cadcd) FIXME as compared to v22.4.4.7-stable (ba44414f9b3)
#### Bug Fix (user-visible misbehaviour in official stable or prestable release) #### Bug Fix (user-visible misbehaviour in official stable or prestable release)

View File

@ -0,0 +1,44 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.4.6.53-stable (0625731c940) FIXME as compared to v22.4.5.9-stable (059ef6cadcd)
#### New Feature
* Backported in [#38714](https://github.com/ClickHouse/ClickHouse/issues/38714): SALT is allowed for CREATE USER <user> IDENTIFIED WITH sha256_hash. [#37377](https://github.com/ClickHouse/ClickHouse/pull/37377) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
#### Build/Testing/Packaging Improvement
* Backported in [#38828](https://github.com/ClickHouse/ClickHouse/issues/38828): - Change `all|noarch` packages to architecture-dependent - Fix some documentation for it - Push aarch64|arm64 packages to artifactory and release assets - Fixes [#36443](https://github.com/ClickHouse/ClickHouse/issues/36443). [#38580](https://github.com/ClickHouse/ClickHouse/pull/38580) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#37717](https://github.com/ClickHouse/ClickHouse/issues/37717): Fix unexpected errors with a clash of constant strings in aggregate function, prewhere and join. Close [#36891](https://github.com/ClickHouse/ClickHouse/issues/36891). [#37336](https://github.com/ClickHouse/ClickHouse/pull/37336) ([Vladimir C](https://github.com/vdimir)).
* Backported in [#37512](https://github.com/ClickHouse/ClickHouse/issues/37512): Fix logical error in normalizeUTF8 functions. Closes [#37298](https://github.com/ClickHouse/ClickHouse/issues/37298). [#37443](https://github.com/ClickHouse/ClickHouse/pull/37443) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#37941](https://github.com/ClickHouse/ClickHouse/issues/37941): Fix setting cast_ipv4_ipv6_default_on_conversion_error for internal cast function. Closes [#35156](https://github.com/ClickHouse/ClickHouse/issues/35156). [#37761](https://github.com/ClickHouse/ClickHouse/pull/37761) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#38452](https://github.com/ClickHouse/ClickHouse/issues/38452): Fix bug with nested short-circuit functions that led to execution of arguments even if condition is false. Closes [#38040](https://github.com/ClickHouse/ClickHouse/issues/38040). [#38173](https://github.com/ClickHouse/ClickHouse/pull/38173) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#38711](https://github.com/ClickHouse/ClickHouse/issues/38711): Fix incorrect result of distributed queries with `DISTINCT` and `LIMIT`. Fixes [#38282](https://github.com/ClickHouse/ClickHouse/issues/38282). [#38371](https://github.com/ClickHouse/ClickHouse/pull/38371) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#38593](https://github.com/ClickHouse/ClickHouse/issues/38593): Fix parts removal (will be left forever if they had not been removed on server shutdown) after incorrect server shutdown. [#38486](https://github.com/ClickHouse/ClickHouse/pull/38486) ([Azat Khuzhin](https://github.com/azat)).
* Backported in [#38596](https://github.com/ClickHouse/ClickHouse/issues/38596): Fix table creation to avoid replication issues with pre-22.4 replicas. [#38541](https://github.com/ClickHouse/ClickHouse/pull/38541) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#38686](https://github.com/ClickHouse/ClickHouse/issues/38686): Now it's possible to start a clickhouse-server and attach/detach tables even for tables with the incorrect values of IPv4/IPv6 representation. Proper fix for issue [#35156](https://github.com/ClickHouse/ClickHouse/issues/35156). [#38590](https://github.com/ClickHouse/ClickHouse/pull/38590) ([alesapin](https://github.com/alesapin)).
* Backported in [#38663](https://github.com/ClickHouse/ClickHouse/issues/38663): Adapt some more nodes to avoid issues with pre-22.4 replicas. [#38627](https://github.com/ClickHouse/ClickHouse/pull/38627) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#38777](https://github.com/ClickHouse/ClickHouse/issues/38777): `rankCorr` function will work correctly if some arguments are NaNs. This closes [#38396](https://github.com/ClickHouse/ClickHouse/issues/38396). [#38722](https://github.com/ClickHouse/ClickHouse/pull/38722) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#38781](https://github.com/ClickHouse/ClickHouse/issues/38781): Fix use-after-free for Map combinator that leads to incorrect result. [#38748](https://github.com/ClickHouse/ClickHouse/pull/38748) ([Azat Khuzhin](https://github.com/azat)).
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)
* Backported in [#37456](https://github.com/ClickHouse/ClickHouse/issues/37456): Server might fail to start if it cannot resolve hostname of external ClickHouse dictionary. It's fixed. Fixes [#36451](https://github.com/ClickHouse/ClickHouse/issues/36451). [#36463](https://github.com/ClickHouse/ClickHouse/pull/36463) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Backported in [#38468](https://github.com/ClickHouse/ClickHouse/issues/38468): - Fix potential error with literals in `WHERE` for join queries. Close [#36279](https://github.com/ClickHouse/ClickHouse/issues/36279). [#36542](https://github.com/ClickHouse/ClickHouse/pull/36542) ([Vladimir C](https://github.com/vdimir)).
* Backported in [#37363](https://github.com/ClickHouse/ClickHouse/issues/37363): Fixed problem with infs in `quantileTDigest`. Fixes [#32107](https://github.com/ClickHouse/ClickHouse/issues/32107). [#37021](https://github.com/ClickHouse/ClickHouse/pull/37021) ([Vladimir Chebotarev](https://github.com/excitoon)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Integration tests [#36866](https://github.com/ClickHouse/ClickHouse/pull/36866) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Try to fix some trash [#37303](https://github.com/ClickHouse/ClickHouse/pull/37303) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Update protobuf files for kafka and rabbitmq [fix integration tests] [#37884](https://github.com/ClickHouse/ClickHouse/pull/37884) ([Nikita Taranov](https://github.com/nickitat)).
* Try fix `test_grpc_protocol/test.py::test_progress` [#37908](https://github.com/ClickHouse/ClickHouse/pull/37908) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Update docker-compose to try get rid of v1 errors [#38394](https://github.com/ClickHouse/ClickHouse/pull/38394) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix backports diff [#38703](https://github.com/ClickHouse/ClickHouse/pull/38703) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -5,10 +5,10 @@ sidebar_label: 2022
# 2022 Changelog # 2022 Changelog
### ClickHouse release v22.5.1.2079-stable FIXME as compared to v22.4.1.2305-prestable ### ClickHouse release v22.5.1.2079-stable (df0cb062098) FIXME as compared to v22.4.1.2305-prestable (77a82cc090d)
#### Backward Incompatible Change #### Backward Incompatible Change
* Updated the BoringSSL module to the official FIPS compliant version. This makes ClickHouse FIPS compliant. [#35914](https://github.com/ClickHouse/ClickHouse/pull/35914) ([Meena-Renganathan](https://github.com/Meena-Renganathan)). * Updated the BoringSSL module to the official FIPS compliant version. This makes ClickHouse FIPS compliant. [#35914](https://github.com/ClickHouse/ClickHouse/pull/35914) ([Deleted user](https://github.com/ghost)).
* Now, background merges, mutations and `OPTIMIZE` will not increment `SelectedRows` and `SelectedBytes` metrics. They (still) will increment `MergedRows` and `MergedUncompressedBytes` as it was before. [#37040](https://github.com/ClickHouse/ClickHouse/pull/37040) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). * Now, background merges, mutations and `OPTIMIZE` will not increment `SelectedRows` and `SelectedBytes` metrics. They (still) will increment `MergedRows` and `MergedUncompressedBytes` as it was before. [#37040](https://github.com/ClickHouse/ClickHouse/pull/37040) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
#### New Feature #### New Feature
@ -20,7 +20,7 @@ sidebar_label: 2022
* Parse collations in CREATE TABLE, throw exception or ignore. closes [#35892](https://github.com/ClickHouse/ClickHouse/issues/35892). [#36271](https://github.com/ClickHouse/ClickHouse/pull/36271) ([yuuch](https://github.com/yuuch)). * Parse collations in CREATE TABLE, throw exception or ignore. closes [#35892](https://github.com/ClickHouse/ClickHouse/issues/35892). [#36271](https://github.com/ClickHouse/ClickHouse/pull/36271) ([yuuch](https://github.com/yuuch)).
* Add aliases JSONLines and NDJSON for JSONEachRow. Closes [#36303](https://github.com/ClickHouse/ClickHouse/issues/36303). [#36327](https://github.com/ClickHouse/ClickHouse/pull/36327) ([flynn](https://github.com/ucasfl)). * Add aliases JSONLines and NDJSON for JSONEachRow. Closes [#36303](https://github.com/ClickHouse/ClickHouse/issues/36303). [#36327](https://github.com/ClickHouse/ClickHouse/pull/36327) ([flynn](https://github.com/ucasfl)).
* Set parts_to_delay_insert and parts_to_throw_insert as query-level settings. If they are defined, they can override table-level settings. [#36371](https://github.com/ClickHouse/ClickHouse/pull/36371) ([Memo](https://github.com/Joeywzr)). * Set parts_to_delay_insert and parts_to_throw_insert as query-level settings. If they are defined, they can override table-level settings. [#36371](https://github.com/ClickHouse/ClickHouse/pull/36371) ([Memo](https://github.com/Joeywzr)).
* temporary table can show total rows and total bytes. [#36401](https://github.com/ClickHouse/ClickHouse/issues/36401). [#36439](https://github.com/ClickHouse/ClickHouse/pull/36439) ([xiedeyantu](https://github.com/xiedeyantu)). * temporary table can show total rows and total bytes. [#36401](https://github.com/ClickHouse/ClickHouse/issues/36401). [#36439](https://github.com/ClickHouse/ClickHouse/pull/36439) ([chen](https://github.com/xiedeyantu)).
* Added new hash function - wyHash64. [#36467](https://github.com/ClickHouse/ClickHouse/pull/36467) ([olevino](https://github.com/olevino)). * Added new hash function - wyHash64. [#36467](https://github.com/ClickHouse/ClickHouse/pull/36467) ([olevino](https://github.com/olevino)).
* Window function nth_value was added. [#36601](https://github.com/ClickHouse/ClickHouse/pull/36601) ([Nikolay](https://github.com/ndchikin)). * Window function nth_value was added. [#36601](https://github.com/ClickHouse/ClickHouse/pull/36601) ([Nikolay](https://github.com/ndchikin)).
* Add MySQLDump input format. It reads all data from INSERT queries belonging to one table in dump. If there are more than one table, by default it reads data from the first one. [#36667](https://github.com/ClickHouse/ClickHouse/pull/36667) ([Kruglov Pavel](https://github.com/Avogar)). * Add MySQLDump input format. It reads all data from INSERT queries belonging to one table in dump. If there are more than one table, by default it reads data from the first one. [#36667](https://github.com/ClickHouse/ClickHouse/pull/36667) ([Kruglov Pavel](https://github.com/Avogar)).
@ -212,7 +212,7 @@ sidebar_label: 2022
* Update version after release [#36502](https://github.com/ClickHouse/ClickHouse/pull/36502) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). * Update version after release [#36502](https://github.com/ClickHouse/ClickHouse/pull/36502) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Followup on [#36172](https://github.com/ClickHouse/ClickHouse/issues/36172) password hash salt feature [#36510](https://github.com/ClickHouse/ClickHouse/pull/36510) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). * Followup on [#36172](https://github.com/ClickHouse/ClickHouse/issues/36172) password hash salt feature [#36510](https://github.com/ClickHouse/ClickHouse/pull/36510) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Update version_date.tsv after v22.4.2.1-stable [#36533](https://github.com/ClickHouse/ClickHouse/pull/36533) ([github-actions[bot]](https://github.com/apps/github-actions)). * Update version_date.tsv after v22.4.2.1-stable [#36533](https://github.com/ClickHouse/ClickHouse/pull/36533) ([github-actions[bot]](https://github.com/apps/github-actions)).
* fix log should print 'from' path [#36535](https://github.com/ClickHouse/ClickHouse/pull/36535) ([xiedeyantu](https://github.com/xiedeyantu)). * fix log should print 'from' path [#36535](https://github.com/ClickHouse/ClickHouse/pull/36535) ([chen](https://github.com/xiedeyantu)).
* Add function bin tests for Int/UInt128/UInt256 [#36537](https://github.com/ClickHouse/ClickHouse/pull/36537) ([Memo](https://github.com/Joeywzr)). * Add function bin tests for Int/UInt128/UInt256 [#36537](https://github.com/ClickHouse/ClickHouse/pull/36537) ([Memo](https://github.com/Joeywzr)).
* Fix 01161_all_system_tables [#36539](https://github.com/ClickHouse/ClickHouse/pull/36539) ([Antonio Andelic](https://github.com/antonio2368)). * Fix 01161_all_system_tables [#36539](https://github.com/ClickHouse/ClickHouse/pull/36539) ([Antonio Andelic](https://github.com/antonio2368)).
* Update PULL_REQUEST_TEMPLATE.md [#36543](https://github.com/ClickHouse/ClickHouse/pull/36543) ([Ivan Blinkov](https://github.com/blinkov)). * Update PULL_REQUEST_TEMPLATE.md [#36543](https://github.com/ClickHouse/ClickHouse/pull/36543) ([Ivan Blinkov](https://github.com/blinkov)).

View File

@ -0,0 +1,40 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.5.2.53-stable (5fd600fda9e) FIXME as compared to v22.5.1.2079-stable (df0cb062098)
#### New Feature
* Backported in [#38713](https://github.com/ClickHouse/ClickHouse/issues/38713): SALT is allowed for CREATE USER <user> IDENTIFIED WITH sha256_hash. [#37377](https://github.com/ClickHouse/ClickHouse/pull/37377) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
#### Build/Testing/Packaging Improvement
* Backported in [#38827](https://github.com/ClickHouse/ClickHouse/issues/38827): - Change `all|noarch` packages to architecture-dependent - Fix some documentation for it - Push aarch64|arm64 packages to artifactory and release assets - Fixes [#36443](https://github.com/ClickHouse/ClickHouse/issues/36443). [#38580](https://github.com/ClickHouse/ClickHouse/pull/38580) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#37716](https://github.com/ClickHouse/ClickHouse/issues/37716): Fix unexpected errors with a clash of constant strings in aggregate function, prewhere and join. Close [#36891](https://github.com/ClickHouse/ClickHouse/issues/36891). [#37336](https://github.com/ClickHouse/ClickHouse/pull/37336) ([Vladimir C](https://github.com/vdimir)).
* Backported in [#37408](https://github.com/ClickHouse/ClickHouse/issues/37408): Throw an exception when GROUPING SETS used with ROLLUP or CUBE. [#37367](https://github.com/ClickHouse/ClickHouse/pull/37367) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#37513](https://github.com/ClickHouse/ClickHouse/issues/37513): Fix logical error in normalizeUTF8 functions. Closes [#37298](https://github.com/ClickHouse/ClickHouse/issues/37298). [#37443](https://github.com/ClickHouse/ClickHouse/pull/37443) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#37942](https://github.com/ClickHouse/ClickHouse/issues/37942): Fix setting cast_ipv4_ipv6_default_on_conversion_error for internal cast function. Closes [#35156](https://github.com/ClickHouse/ClickHouse/issues/35156). [#37761](https://github.com/ClickHouse/ClickHouse/pull/37761) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#38451](https://github.com/ClickHouse/ClickHouse/issues/38451): Fix bug with nested short-circuit functions that led to execution of arguments even if condition is false. Closes [#38040](https://github.com/ClickHouse/ClickHouse/issues/38040). [#38173](https://github.com/ClickHouse/ClickHouse/pull/38173) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#38544](https://github.com/ClickHouse/ClickHouse/issues/38544): Do not allow recursive usage of OvercommitTracker during logging. Fixes [#37794](https://github.com/ClickHouse/ClickHouse/issues/37794) cc @tavplubix @davenger. [#38246](https://github.com/ClickHouse/ClickHouse/pull/38246) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#38708](https://github.com/ClickHouse/ClickHouse/issues/38708): Fix incorrect result of distributed queries with `DISTINCT` and `LIMIT`. Fixes [#38282](https://github.com/ClickHouse/ClickHouse/issues/38282). [#38371](https://github.com/ClickHouse/ClickHouse/pull/38371) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#38595](https://github.com/ClickHouse/ClickHouse/issues/38595): Fix parts removal (will be left forever if they had not been removed on server shutdown) after incorrect server shutdown. [#38486](https://github.com/ClickHouse/ClickHouse/pull/38486) ([Azat Khuzhin](https://github.com/azat)).
* Backported in [#38598](https://github.com/ClickHouse/ClickHouse/issues/38598): Fix table creation to avoid replication issues with pre-22.4 replicas. [#38541](https://github.com/ClickHouse/ClickHouse/pull/38541) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#38688](https://github.com/ClickHouse/ClickHouse/issues/38688): Now it's possible to start a clickhouse-server and attach/detach tables even for tables with the incorrect values of IPv4/IPv6 representation. Proper fix for issue [#35156](https://github.com/ClickHouse/ClickHouse/issues/35156). [#38590](https://github.com/ClickHouse/ClickHouse/pull/38590) ([alesapin](https://github.com/alesapin)).
* Backported in [#38664](https://github.com/ClickHouse/ClickHouse/issues/38664): Adapt some more nodes to avoid issues with pre-22.4 replicas. [#38627](https://github.com/ClickHouse/ClickHouse/pull/38627) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#38779](https://github.com/ClickHouse/ClickHouse/issues/38779): `rankCorr` function will work correctly if some arguments are NaNs. This closes [#38396](https://github.com/ClickHouse/ClickHouse/issues/38396). [#38722](https://github.com/ClickHouse/ClickHouse/pull/38722) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#38783](https://github.com/ClickHouse/ClickHouse/issues/38783): Fix use-after-free for Map combinator that leads to incorrect result. [#38748](https://github.com/ClickHouse/ClickHouse/pull/38748) ([Azat Khuzhin](https://github.com/azat)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Try to fix some trash [#37303](https://github.com/ClickHouse/ClickHouse/pull/37303) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Update protobuf files for kafka and rabbitmq [fix integration tests] [#37884](https://github.com/ClickHouse/ClickHouse/pull/37884) ([Nikita Taranov](https://github.com/nickitat)).
* Try fix `test_grpc_protocol/test.py::test_progress` [#37908](https://github.com/ClickHouse/ClickHouse/pull/37908) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Try to fix BC check [#38178](https://github.com/ClickHouse/ClickHouse/pull/38178) ([Kruglov Pavel](https://github.com/Avogar)).
* Update docker-compose to try get rid of v1 errors [#38394](https://github.com/ClickHouse/ClickHouse/pull/38394) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix backports diff [#38703](https://github.com/ClickHouse/ClickHouse/pull/38703) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,24 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.5.3.21-stable (e03724efec5) FIXME as compared to v22.5.2.53-stable (5fd600fda9e)
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#38241](https://github.com/ClickHouse/ClickHouse/issues/38241): Fix possible crash in `Distributed` async insert in case of removing a replica from config. [#38029](https://github.com/ClickHouse/ClickHouse/pull/38029) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#39098](https://github.com/ClickHouse/ClickHouse/issues/39098): Any allocations inside OvercommitTracker may lead to deadlock. Logging was not very informative so it's easier just to remove logging. Fixes [#37794](https://github.com/ClickHouse/ClickHouse/issues/37794). [#39030](https://github.com/ClickHouse/ClickHouse/pull/39030) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#39078](https://github.com/ClickHouse/ClickHouse/issues/39078): Fix bug in filesystem cache that could happen in some corner case which coincided with cache capacity hitting the limit. Closes [#39066](https://github.com/ClickHouse/ClickHouse/issues/39066). [#39070](https://github.com/ClickHouse/ClickHouse/pull/39070) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Backported in [#39152](https://github.com/ClickHouse/ClickHouse/issues/39152): Fix error `Block structure mismatch` which could happen for INSERT into table with attached MATERIALIZED VIEW and enabled setting `extremes = 1`. Closes [#29759](https://github.com/ClickHouse/ClickHouse/issues/29759) and [#38729](https://github.com/ClickHouse/ClickHouse/issues/38729). [#39125](https://github.com/ClickHouse/ClickHouse/pull/39125) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#39274](https://github.com/ClickHouse/ClickHouse/issues/39274): Fixed error `Not found column Type in block` in selects with `PREWHERE` and read-in-order optimizations. [#39157](https://github.com/ClickHouse/ClickHouse/pull/39157) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Backported in [#39369](https://github.com/ClickHouse/ClickHouse/issues/39369): Declare RabbitMQ queue without default arguments `x-max-length` and `x-overflow`. [#39259](https://github.com/ClickHouse/ClickHouse/pull/39259) ([rnbondarenko](https://github.com/rnbondarenko)).
* Backported in [#39350](https://github.com/ClickHouse/ClickHouse/issues/39350): Fix incorrect fetch postgresql tables query fro PostgreSQL database engine. Closes [#33502](https://github.com/ClickHouse/ClickHouse/issues/33502). [#39283](https://github.com/ClickHouse/ClickHouse/pull/39283) ([Kseniia Sumarokova](https://github.com/kssenii)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Retry docker buildx commands with progressive sleep in between [#38898](https://github.com/ClickHouse/ClickHouse/pull/38898) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Add docker_server.py running to backport and release CIs [#39011](https://github.com/ClickHouse/ClickHouse/pull/39011) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,29 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.5.4.19-stable (c893bba830e) FIXME as compared to v22.5.3.21-stable (e03724efec5)
#### Bug Fix
* Backported in [#39748](https://github.com/ClickHouse/ClickHouse/issues/39748): Fix seeking while reading from encrypted disk. This PR fixes [#38381](https://github.com/ClickHouse/ClickHouse/issues/38381). [#39687](https://github.com/ClickHouse/ClickHouse/pull/39687) ([Vitaly Baranov](https://github.com/vitlibar)).
#### Build/Testing/Packaging Improvement
* Backported in [#39882](https://github.com/ClickHouse/ClickHouse/issues/39882): Former packages used to install systemd.service file to `/etc`. The files there are marked as `conf` and are not cleaned out, and not updated automatically. This PR cleans them out. [#39323](https://github.com/ClickHouse/ClickHouse/pull/39323) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#39209](https://github.com/ClickHouse/ClickHouse/issues/39209): Fix reading of sparse columns from `MergeTree` tables that store their data in S3. [#37978](https://github.com/ClickHouse/ClickHouse/pull/37978) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#39589](https://github.com/ClickHouse/ClickHouse/issues/39589): Fix data race and possible heap-buffer-overflow in Avro format. Closes [#39094](https://github.com/ClickHouse/ClickHouse/issues/39094) Closes [#33652](https://github.com/ClickHouse/ClickHouse/issues/33652). [#39498](https://github.com/ClickHouse/ClickHouse/pull/39498) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#39611](https://github.com/ClickHouse/ClickHouse/issues/39611): Fix bug with maxsplit argument for splitByChar, which was not working correctly. [#39552](https://github.com/ClickHouse/ClickHouse/pull/39552) ([filimonov](https://github.com/filimonov)).
* Backported in [#39790](https://github.com/ClickHouse/ClickHouse/issues/39790): Fix wrong index analysis with tuples and operator `IN`, which could lead to wrong query result. [#39752](https://github.com/ClickHouse/ClickHouse/pull/39752) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#39835](https://github.com/ClickHouse/ClickHouse/issues/39835): Fix `CANNOT_READ_ALL_DATA` exception with `local_filesystem_read_method=pread_threadpool`. This bug affected only Linux kernel version 5.9 and 5.10 according to [man](https://manpages.debian.org/testing/manpages-dev/preadv2.2.en.html#BUGS). [#39800](https://github.com/ClickHouse/ClickHouse/pull/39800) ([Anton Popov](https://github.com/CurtizJ)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Fix reading from s3 in some corner cases [#38239](https://github.com/ClickHouse/ClickHouse/pull/38239) ([Anton Popov](https://github.com/CurtizJ)).
* Replace MemoryTrackerBlockerInThread to LockMemoryExceptionInThread [#39619](https://github.com/ClickHouse/ClickHouse/pull/39619) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Change mysql-odbc url [#39702](https://github.com/ClickHouse/ClickHouse/pull/39702) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog # 2022 Changelog
### ClickHouse release v22.6.1.1985-stable FIXME as compared to v22.5.1.2079-stable ### ClickHouse release v22.6.1.1985-stable (7000c4e0033) FIXME as compared to v22.5.1.2079-stable (df0cb062098)
#### Backward Incompatible Change #### Backward Incompatible Change
* Changes how settings using `seconds` as type are parsed to support floating point values (for example: `max_execution_time=0.5`). Infinity or NaN values will throw an exception. [#37187](https://github.com/ClickHouse/ClickHouse/pull/37187) ([Raúl Marín](https://github.com/Algunenano)). * Changes how settings using `seconds` as type are parsed to support floating point values (for example: `max_execution_time=0.5`). Infinity or NaN values will throw an exception. [#37187](https://github.com/ClickHouse/ClickHouse/pull/37187) ([Raúl Marín](https://github.com/Algunenano)).
@ -78,7 +78,7 @@ sidebar_label: 2022
* Allow to use String type instead of Binary in Arrow/Parquet/ORC formats. This PR introduces 3 new settings for it: `output_format_arrow_string_as_string`, `output_format_parquet_string_as_string`, `output_format_orc_string_as_string`. Default value for all settings is `false`. [#37327](https://github.com/ClickHouse/ClickHouse/pull/37327) ([Kruglov Pavel](https://github.com/Avogar)). * Allow to use String type instead of Binary in Arrow/Parquet/ORC formats. This PR introduces 3 new settings for it: `output_format_arrow_string_as_string`, `output_format_parquet_string_as_string`, `output_format_orc_string_as_string`. Default value for all settings is `false`. [#37327](https://github.com/ClickHouse/ClickHouse/pull/37327) ([Kruglov Pavel](https://github.com/Avogar)).
* Apply setting `input_format_max_rows_to_read_for_schema_inference` for all read rows in total from all files in globs. Previously setting `input_format_max_rows_to_read_for_schema_inference` was applied for each file in glob separately and in case of huge number of nulls we could read first `input_format_max_rows_to_read_for_schema_inference` rows from each file and get nothing. Also increase default value for this setting to 25000. [#37332](https://github.com/ClickHouse/ClickHouse/pull/37332) ([Kruglov Pavel](https://github.com/Avogar)). * Apply setting `input_format_max_rows_to_read_for_schema_inference` for all read rows in total from all files in globs. Previously setting `input_format_max_rows_to_read_for_schema_inference` was applied for each file in glob separately and in case of huge number of nulls we could read first `input_format_max_rows_to_read_for_schema_inference` rows from each file and get nothing. Also increase default value for this setting to 25000. [#37332](https://github.com/ClickHouse/ClickHouse/pull/37332) ([Kruglov Pavel](https://github.com/Avogar)).
* allows providing `NULL`/`NOT NULL` right after type in column declaration. [#37337](https://github.com/ClickHouse/ClickHouse/pull/37337) ([Igor Nikonov](https://github.com/devcrafter)). * allows providing `NULL`/`NOT NULL` right after type in column declaration. [#37337](https://github.com/ClickHouse/ClickHouse/pull/37337) ([Igor Nikonov](https://github.com/devcrafter)).
* optimize file segment PARTIALLY_DOWNLOADED get read buffer. [#37338](https://github.com/ClickHouse/ClickHouse/pull/37338) ([xiedeyantu](https://github.com/xiedeyantu)). * optimize file segment PARTIALLY_DOWNLOADED get read buffer. [#37338](https://github.com/ClickHouse/ClickHouse/pull/37338) ([chen](https://github.com/xiedeyantu)).
* Allow to prune the list of files via virtual columns such as `_file` and `_path` when reading from S3. This is for [#37174](https://github.com/ClickHouse/ClickHouse/issues/37174) , [#23494](https://github.com/ClickHouse/ClickHouse/issues/23494). [#37356](https://github.com/ClickHouse/ClickHouse/pull/37356) ([Amos Bird](https://github.com/amosbird)). * Allow to prune the list of files via virtual columns such as `_file` and `_path` when reading from S3. This is for [#37174](https://github.com/ClickHouse/ClickHouse/issues/37174) , [#23494](https://github.com/ClickHouse/ClickHouse/issues/23494). [#37356](https://github.com/ClickHouse/ClickHouse/pull/37356) ([Amos Bird](https://github.com/amosbird)).
* Try to improve short circuit functions processing to fix problems with stress tests. [#37384](https://github.com/ClickHouse/ClickHouse/pull/37384) ([Kruglov Pavel](https://github.com/Avogar)). * Try to improve short circuit functions processing to fix problems with stress tests. [#37384](https://github.com/ClickHouse/ClickHouse/pull/37384) ([Kruglov Pavel](https://github.com/Avogar)).
* Closes [#37395](https://github.com/ClickHouse/ClickHouse/issues/37395). [#37415](https://github.com/ClickHouse/ClickHouse/pull/37415) ([Memo](https://github.com/Joeywzr)). * Closes [#37395](https://github.com/ClickHouse/ClickHouse/issues/37395). [#37415](https://github.com/ClickHouse/ClickHouse/pull/37415) ([Memo](https://github.com/Joeywzr)).
@ -117,7 +117,7 @@ sidebar_label: 2022
* Remove recursive submodules, because we don't need them and they can be confusing. Add style check to prevent recursive submodules. This closes [#32821](https://github.com/ClickHouse/ClickHouse/issues/32821). [#37616](https://github.com/ClickHouse/ClickHouse/pull/37616) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Remove recursive submodules, because we don't need them and they can be confusing. Add style check to prevent recursive submodules. This closes [#32821](https://github.com/ClickHouse/ClickHouse/issues/32821). [#37616](https://github.com/ClickHouse/ClickHouse/pull/37616) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Add docs spellcheck to CI. [#37790](https://github.com/ClickHouse/ClickHouse/pull/37790) ([Vladimir C](https://github.com/vdimir)). * Add docs spellcheck to CI. [#37790](https://github.com/ClickHouse/ClickHouse/pull/37790) ([Vladimir C](https://github.com/vdimir)).
* Fix overly aggressive stripping which removed the embedded hash required for checking the consistency of the executable. [#37993](https://github.com/ClickHouse/ClickHouse/pull/37993) ([Robert Schulze](https://github.com/rschu1ze)). * Fix overly aggressive stripping which removed the embedded hash required for checking the consistency of the executable. [#37993](https://github.com/ClickHouse/ClickHouse/pull/37993) ([Robert Schulze](https://github.com/rschu1ze)).
* fix MacOS build compressor faild. [#38007](https://github.com/ClickHouse/ClickHouse/pull/38007) ([xiedeyantu](https://github.com/xiedeyantu)). * fix MacOS build compressor faild. [#38007](https://github.com/ClickHouse/ClickHouse/pull/38007) ([chen](https://github.com/xiedeyantu)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release) #### Bug Fix (user-visible misbehavior in official stable or prestable release)
@ -166,7 +166,7 @@ sidebar_label: 2022
* Fix possible incorrect result of `SELECT ... WITH FILL` in the case when `ORDER BY` should be applied after `WITH FILL` result (e.g. for outer query). Incorrect result was caused by optimization for `ORDER BY` expressions ([#35623](https://github.com/ClickHouse/ClickHouse/issues/35623)). Closes [#37904](https://github.com/ClickHouse/ClickHouse/issues/37904). [#37959](https://github.com/ClickHouse/ClickHouse/pull/37959) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). * Fix possible incorrect result of `SELECT ... WITH FILL` in the case when `ORDER BY` should be applied after `WITH FILL` result (e.g. for outer query). Incorrect result was caused by optimization for `ORDER BY` expressions ([#35623](https://github.com/ClickHouse/ClickHouse/issues/35623)). Closes [#37904](https://github.com/ClickHouse/ClickHouse/issues/37904). [#37959](https://github.com/ClickHouse/ClickHouse/pull/37959) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Add missing default columns when pushing to the target table in WindowView, fix [#37815](https://github.com/ClickHouse/ClickHouse/issues/37815). [#37965](https://github.com/ClickHouse/ClickHouse/pull/37965) ([vxider](https://github.com/Vxider)). * Add missing default columns when pushing to the target table in WindowView, fix [#37815](https://github.com/ClickHouse/ClickHouse/issues/37815). [#37965](https://github.com/ClickHouse/ClickHouse/pull/37965) ([vxider](https://github.com/Vxider)).
* Fixed a stack overflow issue that would cause compilation to fail. [#37996](https://github.com/ClickHouse/ClickHouse/pull/37996) ([Han Shukai](https://github.com/KinderRiven)). * Fixed a stack overflow issue that would cause compilation to fail. [#37996](https://github.com/ClickHouse/ClickHouse/pull/37996) ([Han Shukai](https://github.com/KinderRiven)).
* when open enable_filesystem_query_cache_limit, throw Reserved cache size exceeds the remaining cache size. [#38004](https://github.com/ClickHouse/ClickHouse/pull/38004) ([xiedeyantu](https://github.com/xiedeyantu)). * when open enable_filesystem_query_cache_limit, throw Reserved cache size exceeds the remaining cache size. [#38004](https://github.com/ClickHouse/ClickHouse/pull/38004) ([chen](https://github.com/xiedeyantu)).
* Query, containing ORDER BY ... WITH FILL, can generate extra rows when multiple WITH FILL columns are present. [#38074](https://github.com/ClickHouse/ClickHouse/pull/38074) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). * Query, containing ORDER BY ... WITH FILL, can generate extra rows when multiple WITH FILL columns are present. [#38074](https://github.com/ClickHouse/ClickHouse/pull/38074) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
#### Bug Fix (user-visible misbehaviour in official stable or prestable release) #### Bug Fix (user-visible misbehaviour in official stable or prestable release)

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog # 2022 Changelog
### ClickHouse release v22.6.2.12-stable FIXME as compared to v22.6.1.1985-stable ### ClickHouse release v22.6.2.12-stable (1fc97f10cbf) FIXME as compared to v22.6.1.1985-stable (7000c4e0033)
#### Improvement #### Improvement
* Backported in [#38484](https://github.com/ClickHouse/ClickHouse/issues/38484): Improve the stability for hive storage integration test. Move the data prepare step into test.py. [#38260](https://github.com/ClickHouse/ClickHouse/pull/38260) ([lgbo](https://github.com/lgbo-ustc)). * Backported in [#38484](https://github.com/ClickHouse/ClickHouse/issues/38484): Improve the stability for hive storage integration test. Move the data prepare step into test.py. [#38260](https://github.com/ClickHouse/ClickHouse/pull/38260) ([lgbo](https://github.com/lgbo-ustc)).

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog # 2022 Changelog
### ClickHouse release v22.6.3.35-stable FIXME as compared to v22.6.2.12-stable ### ClickHouse release v22.6.3.35-stable (d5566f2f2dd) FIXME as compared to v22.6.2.12-stable (1fc97f10cbf)
#### Bug Fix #### Bug Fix
* Backported in [#38812](https://github.com/ClickHouse/ClickHouse/issues/38812): Fix crash when executing GRANT ALL ON *.* with ON CLUSTER. It was broken in https://github.com/ClickHouse/ClickHouse/pull/35767. This closes [#38618](https://github.com/ClickHouse/ClickHouse/issues/38618). [#38674](https://github.com/ClickHouse/ClickHouse/pull/38674) ([Vitaly Baranov](https://github.com/vitlibar)). * Backported in [#38812](https://github.com/ClickHouse/ClickHouse/issues/38812): Fix crash when executing GRANT ALL ON *.* with ON CLUSTER. It was broken in https://github.com/ClickHouse/ClickHouse/pull/35767. This closes [#38618](https://github.com/ClickHouse/ClickHouse/issues/38618). [#38674](https://github.com/ClickHouse/ClickHouse/pull/38674) ([Vitaly Baranov](https://github.com/vitlibar)).

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog # 2022 Changelog
### ClickHouse release v22.6.4.35-stable FIXME as compared to v22.6.3.35-stable ### ClickHouse release v22.6.4.35-stable (b9202cae6f4) FIXME as compared to v22.6.3.35-stable (d5566f2f2dd)
#### Build/Testing/Packaging Improvement #### Build/Testing/Packaging Improvement
* Backported in [#38822](https://github.com/ClickHouse/ClickHouse/issues/38822): - Change `all|noarch` packages to architecture-dependent - Fix some documentation for it - Push aarch64|arm64 packages to artifactory and release assets - Fixes [#36443](https://github.com/ClickHouse/ClickHouse/issues/36443). [#38580](https://github.com/ClickHouse/ClickHouse/pull/38580) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). * Backported in [#38822](https://github.com/ClickHouse/ClickHouse/issues/38822): - Change `all|noarch` packages to architecture-dependent - Fix some documentation for it - Push aarch64|arm64 packages to artifactory and release assets - Fixes [#36443](https://github.com/ClickHouse/ClickHouse/issues/36443). [#38580](https://github.com/ClickHouse/ClickHouse/pull/38580) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,30 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.6.5.22-stable (47ca5f14a34) FIXME as compared to v22.6.4.35-stable (b9202cae6f4)
#### Bug Fix
* Backported in [#39749](https://github.com/ClickHouse/ClickHouse/issues/39749): Fix seeking while reading from encrypted disk. This PR fixes [#38381](https://github.com/ClickHouse/ClickHouse/issues/38381). [#39687](https://github.com/ClickHouse/ClickHouse/pull/39687) ([Vitaly Baranov](https://github.com/vitlibar)).
#### Build/Testing/Packaging Improvement
* Backported in [#39883](https://github.com/ClickHouse/ClickHouse/issues/39883): Former packages used to install systemd.service file to `/etc`. The files there are marked as `conf` and are not cleaned out, and not updated automatically. This PR cleans them out. [#39323](https://github.com/ClickHouse/ClickHouse/pull/39323) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#39207](https://github.com/ClickHouse/ClickHouse/issues/39207): Fix reading of sparse columns from `MergeTree` tables that store their data in S3. [#37978](https://github.com/ClickHouse/ClickHouse/pull/37978) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#38932](https://github.com/ClickHouse/ClickHouse/issues/38932): Fix `parallel_view_processing=1` with `optimize_trivial_insert_select=1`. Fix `max_insert_threads` while pushing to views. [#38731](https://github.com/ClickHouse/ClickHouse/pull/38731) ([Azat Khuzhin](https://github.com/azat)).
* Backported in [#39590](https://github.com/ClickHouse/ClickHouse/issues/39590): Fix data race and possible heap-buffer-overflow in Avro format. Closes [#39094](https://github.com/ClickHouse/ClickHouse/issues/39094) Closes [#33652](https://github.com/ClickHouse/ClickHouse/issues/33652). [#39498](https://github.com/ClickHouse/ClickHouse/pull/39498) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#39612](https://github.com/ClickHouse/ClickHouse/issues/39612): Fix bug with maxsplit argument for splitByChar, which was not working correctly. [#39552](https://github.com/ClickHouse/ClickHouse/pull/39552) ([filimonov](https://github.com/filimonov)).
* Backported in [#39791](https://github.com/ClickHouse/ClickHouse/issues/39791): Fix wrong index analysis with tuples and operator `IN`, which could lead to wrong query result. [#39752](https://github.com/ClickHouse/ClickHouse/pull/39752) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#39836](https://github.com/ClickHouse/ClickHouse/issues/39836): Fix `CANNOT_READ_ALL_DATA` exception with `local_filesystem_read_method=pread_threadpool`. This bug affected only Linux kernel version 5.9 and 5.10 according to [man](https://manpages.debian.org/testing/manpages-dev/preadv2.2.en.html#BUGS). [#39800](https://github.com/ClickHouse/ClickHouse/pull/39800) ([Anton Popov](https://github.com/CurtizJ)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Fix reading from s3 in some corner cases [#38239](https://github.com/ClickHouse/ClickHouse/pull/38239) ([Anton Popov](https://github.com/CurtizJ)).
* Replace MemoryTrackerBlockerInThread to LockMemoryExceptionInThread [#39619](https://github.com/ClickHouse/ClickHouse/pull/39619) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Change mysql-odbc url [#39702](https://github.com/ClickHouse/ClickHouse/pull/39702) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,24 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.7.2.15-stable (f843089624e) FIXME as compared to v22.7.1.2484-stable (f4f05ec786a)
#### Bug Fix
* Backported in [#39750](https://github.com/ClickHouse/ClickHouse/issues/39750): Fix seeking while reading from encrypted disk. This PR fixes [#38381](https://github.com/ClickHouse/ClickHouse/issues/38381). [#39687](https://github.com/ClickHouse/ClickHouse/pull/39687) ([Vitaly Baranov](https://github.com/vitlibar)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#39591](https://github.com/ClickHouse/ClickHouse/issues/39591): Fix data race and possible heap-buffer-overflow in Avro format. Closes [#39094](https://github.com/ClickHouse/ClickHouse/issues/39094) Closes [#33652](https://github.com/ClickHouse/ClickHouse/issues/33652). [#39498](https://github.com/ClickHouse/ClickHouse/pull/39498) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#39613](https://github.com/ClickHouse/ClickHouse/issues/39613): Fix bug with maxsplit argument for splitByChar, which was not working correctly. [#39552](https://github.com/ClickHouse/ClickHouse/pull/39552) ([filimonov](https://github.com/filimonov)).
* Backported in [#39792](https://github.com/ClickHouse/ClickHouse/issues/39792): Fix wrong index analysis with tuples and operator `IN`, which could lead to wrong query result. [#39752](https://github.com/ClickHouse/ClickHouse/pull/39752) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#39837](https://github.com/ClickHouse/ClickHouse/issues/39837): Fix `CANNOT_READ_ALL_DATA` exception with `local_filesystem_read_method=pread_threadpool`. This bug affected only Linux kernel version 5.9 and 5.10 according to [man](https://manpages.debian.org/testing/manpages-dev/preadv2.2.en.html#BUGS). [#39800](https://github.com/ClickHouse/ClickHouse/pull/39800) ([Anton Popov](https://github.com/CurtizJ)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Replace MemoryTrackerBlockerInThread to LockMemoryExceptionInThread [#39619](https://github.com/ClickHouse/ClickHouse/pull/39619) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Change mysql-odbc url [#39702](https://github.com/ClickHouse/ClickHouse/pull/39702) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,18 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.7.3.5-stable (e140b8b5f3a) FIXME as compared to v22.7.2.15-stable (f843089624e)
#### Build/Testing/Packaging Improvement
* Backported in [#39884](https://github.com/ClickHouse/ClickHouse/issues/39884): Former packages used to install systemd.service file to `/etc`. The files there are marked as `conf` and are not cleaned out, and not updated automatically. This PR cleans them out. [#39323](https://github.com/ClickHouse/ClickHouse/pull/39323) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Backported in [#39884](https://github.com/ClickHouse/ClickHouse/issues/39884): Former packages used to install systemd.service file to `/etc`. The files there are marked as `conf` and are not cleaned out, and not updated automatically. This PR cleans them out. [#39323](https://github.com/ClickHouse/ClickHouse/pull/39323) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#40045](https://github.com/ClickHouse/ClickHouse/issues/40045): Fix big memory usage during fetches. Fixes [#39915](https://github.com/ClickHouse/ClickHouse/issues/39915). [#39990](https://github.com/ClickHouse/ClickHouse/pull/39990) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#40045](https://github.com/ClickHouse/ClickHouse/issues/40045): Fix big memory usage during fetches. Fixes [#39915](https://github.com/ClickHouse/ClickHouse/issues/39915). [#39990](https://github.com/ClickHouse/ClickHouse/pull/39990) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).

View File

@ -285,4 +285,9 @@ If you are not interested in functionality provided by third-party libraries, yo
-DENABLE_LIBRARIES=0 -DENABLE_EMBEDDED_COMPILER=0 -DENABLE_LIBRARIES=0 -DENABLE_EMBEDDED_COMPILER=0
``` ```
Compressing the binary at the end of the build may take a while, disable the self-extraction feature via
```
-DENABLE_CLICKHOUSE_SELF_EXTRACTING=0
```
In case of problems with any of the development options, you are on your own! In case of problems with any of the development options, you are on your own!

View File

@ -29,7 +29,7 @@ ENGINE = PostgreSQL('host:port', 'database', 'user', 'password'[, `schema`, `use
## Data Types Support {#data_types-support} ## Data Types Support {#data_types-support}
| PostgerSQL | ClickHouse | | PostgreSQL | ClickHouse |
|------------------|--------------------------------------------------------------| |------------------|--------------------------------------------------------------|
| DATE | [Date](../../sql-reference/data-types/date.md) | | DATE | [Date](../../sql-reference/data-types/date.md) |
| TIMESTAMP | [DateTime](../../sql-reference/data-types/datetime.md) | | TIMESTAMP | [DateTime](../../sql-reference/data-types/datetime.md) |

View File

@ -878,8 +878,6 @@ User can assign new big parts to different disks of a [JBOD](https://en.wikipedi
`MergeTree` family table engines can store data to [S3](https://aws.amazon.com/s3/) using a disk with type `s3`. `MergeTree` family table engines can store data to [S3](https://aws.amazon.com/s3/) using a disk with type `s3`.
This feature is under development and not ready for production. There are known drawbacks such as very low performance.
Configuration markup: Configuration markup:
``` xml ``` xml
<storage_configuration> <storage_configuration>

View File

@ -49,10 +49,17 @@ The supported formats are:
| [JSONCompactStringsEachRowWithNamesAndTypes](#jsoncompactstringseachrowwithnamesandtypes) | ✔ | ✔ | | [JSONCompactStringsEachRowWithNamesAndTypes](#jsoncompactstringseachrowwithnamesandtypes) | ✔ | ✔ |
| [TSKV](#tskv) | ✔ | ✔ | | [TSKV](#tskv) | ✔ | ✔ |
| [Pretty](#pretty) | ✗ | ✔ | | [Pretty](#pretty) | ✗ | ✔ |
| [PrettyCompact](#prettycompact) | ✗ | ✔ |
| [PrettyCompactMonoBlock](#prettycompactmonoblock) | ✗ | ✔ |
| [PrettyNoEscapes](#prettynoescapes) | ✗ | ✔ | | [PrettyNoEscapes](#prettynoescapes) | ✗ | ✔ |
| [PrettyMonoBlock](#prettymonoblock) | ✗ | ✔ |
| [PrettyNoEscapesMonoBlock](#prettynoescapesmonoblock) | ✗ | ✔ |
| [PrettyCompact](#prettycompact) | ✗ | ✔ |
| [PrettyCompactNoEscapes](#prettycompactnoescapes) | ✗ | ✔ |
| [PrettyCompactMonoBlock](#prettycompactmonoblock) | ✗ | ✔ |
| [PrettyCompactNoEscapesMonoBlock](#prettycompactnoescapesmonoblock) | ✗ | ✔ |
| [PrettySpace](#prettyspace) | ✗ | ✔ | | [PrettySpace](#prettyspace) | ✗ | ✔ |
| [PrettySpaceNoEscapes](#prettyspacenoescapes) | ✗ | ✔ |
| [PrettySpaceMonoBlock](#prettyspacemonoblock) | ✗ | ✔ |
| [PrettySpaceNoEscapesMonoBlock](#prettyspacenoescapesmonoblock) | ✗ | ✔ |
| [Prometheus](#prometheus) | ✗ | ✔ | | [Prometheus](#prometheus) | ✗ | ✔ |
| [Protobuf](#protobuf) | ✔ | ✔ | | [Protobuf](#protobuf) | ✔ | ✔ |
| [ProtobufSingle](#protobufsingle) | ✔ | ✔ | | [ProtobufSingle](#protobufsingle) | ✔ | ✔ |
@ -1198,18 +1205,9 @@ Extremes:
└────────────┴─────────┘ └────────────┴─────────┘
``` ```
## PrettyCompact {#prettycompact}
Differs from [Pretty](#pretty) in that the grid is drawn between rows and the result is more compact.
This format is used by default in the command-line client in interactive mode.
## PrettyCompactMonoBlock {#prettycompactmonoblock}
Differs from [PrettyCompact](#prettycompact) in that up to 10,000 rows are buffered, then output as a single table, not by blocks.
## PrettyNoEscapes {#prettynoescapes} ## PrettyNoEscapes {#prettynoescapes}
Differs from Pretty in that ANSI-escape sequences arent used. This is necessary for displaying this format in a browser, as well as for using the watch command-line utility. Differs from [Pretty](#pretty) in that ANSI-escape sequences arent used. This is necessary for displaying this format in a browser, as well as for using the watch command-line utility.
Example: Example:
@ -1219,19 +1217,49 @@ $ watch -n1 "clickhouse-client --query='SELECT event, value FROM system.events F
You can use the HTTP interface for displaying in the browser. You can use the HTTP interface for displaying in the browser.
### PrettyCompactNoEscapes {#prettycompactnoescapes} ## PrettyMonoBlock {#prettymonoblock}
The same as the previous setting. Differs from [Pretty](#pretty) in that up to 10,000 rows are buffered, then output as a single table, not by blocks.
### PrettySpaceNoEscapes {#prettyspacenoescapes} ## PrettyNoEscapesMonoBlock {#prettynoescapesmonoblock}
The same as the previous setting. Differs from [PrettyNoEscapes](#prettynoescapes) in that up to 10,000 rows are buffered, then output as a single table, not by blocks.
## PrettyCompact {#prettycompact}
Differs from [Pretty](#pretty) in that the grid is drawn between rows and the result is more compact.
This format is used by default in the command-line client in interactive mode.
## PrettyCompactNoEscapes {#prettynoescapes}
Differs from [PrettyCompact](#prettycompact) in that ANSI-escape sequences arent used. This is necessary for displaying this format in a browser, as well as for using the watch command-line utility.
## PrettyCompactMonoBlock {#prettycompactmonoblock}
Differs from [PrettyCompact](#prettycompact) in that up to 10,000 rows are buffered, then output as a single table, not by blocks.
## PrettyCompactNoEscapesMonoBlock {#prettycompactnoescapesmonoblock}
Differs from [PrettyCompactNoEscapes](#prettycompactnoescapes) in that up to 10,000 rows are buffered, then output as a single table, not by blocks.
## PrettySpace {#prettyspace} ## PrettySpace {#prettyspace}
Differs from [PrettyCompact](#prettycompact) in that whitespace (space characters) is used instead of the grid. Differs from [PrettyCompact](#prettycompact) in that whitespace (space characters) is used instead of the grid.
### Pretty formats settings {#pretty-formats-settings} ## PrettySpaceNoEscapes {#prettyspacenoescapes}
Differs from [PrettySpace](#prettyspace) in that ANSI-escape sequences arent used. This is necessary for displaying this format in a browser, as well as for using the watch command-line utility.
## PrettySpaceMonoBlock {#prettyspacemonoblock}
Differs from [PrettySpace](#prettyspace) in that up to 10,000 rows are buffered, then output as a single table, not by blocks.
## PrettySpaceNoEscapesMonoBlock {#prettyspacenoescapesmonoblock}
Differs from [PrettySpaceNoEscapes](#prettyspacenoescapes) in that up to 10,000 rows are buffered, then output as a single table, not by blocks.
## Pretty formats settings {#pretty-formats-settings}
- [output_format_pretty_max_rows](../operations/settings/settings.md#output_format_pretty_max_rows) - rows limit for Pretty formats. Default value - `10000`. - [output_format_pretty_max_rows](../operations/settings/settings.md#output_format_pretty_max_rows) - rows limit for Pretty formats. Default value - `10000`.
- [output_format_pretty_max_column_pad_width](../operations/settings/settings.md#output_format_pretty_max_column_pad_width) - maximum width to pad all values in a column in Pretty formats. Default value - `250`. - [output_format_pretty_max_column_pad_width](../operations/settings/settings.md#output_format_pretty_max_column_pad_width) - maximum width to pad all values in a column in Pretty formats. Default value - `250`.

View File

@ -5,7 +5,7 @@ sidebar_label: Date32
# Date32 # Date32
A date. Supports the date range same with [Datetime64](../../sql-reference/data-types/datetime64.md). Stored in four bytes as the number of days since 1925-01-01. Allows storing values till 2283-11-11. A date. Supports the date range same with [Datetime64](../../sql-reference/data-types/datetime64.md). Stored in four bytes as the number of days since 1900-01-01. Allows storing values till 2299-12-31.
**Examples** **Examples**
@ -36,5 +36,5 @@ SELECT * FROM new;
- [toDate32](../../sql-reference/functions/type-conversion-functions.md#todate32) - [toDate32](../../sql-reference/functions/type-conversion-functions.md#todate32)
- [toDate32OrZero](../../sql-reference/functions/type-conversion-functions.md#todate32-or-zero) - [toDate32OrZero](../../sql-reference/functions/type-conversion-functions.md#todate32-or-zero)
- [toDate32OrNull](../../sql-reference/functions/type-conversion-functions.md#todate32-or-null) - [toDate32OrNull](../../sql-reference/functions/type-conversion-functions.md#todate32-or-null)

View File

@ -18,7 +18,7 @@ DateTime64(precision, [timezone])
Internally, stores data as a number of ticks since epoch start (1970-01-01 00:00:00 UTC) as Int64. The tick resolution is determined by the precision parameter. Additionally, the `DateTime64` type can store time zone that is the same for the entire column, that affects how the values of the `DateTime64` type values are displayed in text format and how the values specified as strings are parsed (2020-01-01 05:00:01.000). The time zone is not stored in the rows of the table (or in resultset), but is stored in the column metadata. See details in [DateTime](../../sql-reference/data-types/datetime.md). Internally, stores data as a number of ticks since epoch start (1970-01-01 00:00:00 UTC) as Int64. The tick resolution is determined by the precision parameter. Additionally, the `DateTime64` type can store time zone that is the same for the entire column, that affects how the values of the `DateTime64` type values are displayed in text format and how the values specified as strings are parsed (2020-01-01 05:00:01.000). The time zone is not stored in the rows of the table (or in resultset), but is stored in the column metadata. See details in [DateTime](../../sql-reference/data-types/datetime.md).
Supported range of values: \[1925-01-01 00:00:00, 2283-11-11 23:59:59.99999999\] (Note: The precision of the maximum value is 8). Supported range of values: \[1900-01-01 00:00:00, 2299-12-31 23:59:59.99999999\] (Note: The precision of the maximum value is 8).
## Examples ## Examples

View File

@ -55,9 +55,9 @@ Differs from intDiv in that it returns zero when dividing by zero or when
## modulo(a, b), a % b operator ## modulo(a, b), a % b operator
Calculates the remainder after division. Calculates the remainder when dividing `a` by `b`.
If arguments are floating-point numbers, they are pre-converted to integers by dropping the decimal portion. The result type is an integer if both inputs are integers. If one of the inputs is a floating-point number, the result is a floating-point number.
The remainder is taken in the same sense as in C++. Truncated division is used for negative numbers. The remainder is computed like in C++. Truncated division is used for negative numbers.
An exception is thrown when dividing by zero or when dividing a minimal negative number by minus one. An exception is thrown when dividing by zero or when dividing a minimal negative number by minus one.
## moduloOrZero(a, b) ## moduloOrZero(a, b)

View File

@ -266,8 +266,8 @@ Result:
└────────────────┘ └────────────────┘
``` ```
:::note :::note
The return type `toStartOf*` functions described below is `Date` or `DateTime`. Though these functions can take `DateTime64` as an argument, passing them a `DateTime64` that is out of the normal range (years 1925 - 2283) will give an incorrect result. The return type `toStartOf*` functions described below is `Date` or `DateTime`. Though these functions can take `DateTime64` as an argument, passing them a `DateTime64` that is out of the normal range (years 1900 - 2299) will give an incorrect result.
::: :::
## toStartOfYear ## toStartOfYear
@ -291,7 +291,7 @@ Returns the date.
Rounds down a date or date with time to the first day of the month. Rounds down a date or date with time to the first day of the month.
Returns the date. Returns the date.
:::note :::note
The behavior of parsing incorrect dates is implementation specific. ClickHouse may return zero date, throw an exception or do “natural” overflow. The behavior of parsing incorrect dates is implementation specific. ClickHouse may return zero date, throw an exception or do “natural” overflow.
::: :::

View File

@ -22,7 +22,7 @@ Consider using the [sipHash64](#hash_functions-siphash64) function instead.
**Arguments** **Arguments**
The function takes a variable number of input parameters. Arguments can be any of the [supported data types](../../sql-reference/data-types/index.md). The function takes a variable number of input parameters. Arguments can be any of the [supported data types](../../sql-reference/data-types/index.md). For some data types calculated value of hash function may be the same for the same values even if types of arguments differ (integers of different size, named and unnamed `Tuple` with the same data, `Map` and the corresponding `Array(Tuple(key, value))` type with the same data).
**Returned Value** **Returned Value**
@ -69,7 +69,7 @@ Function [interprets](../../sql-reference/functions/type-conversion-functions.md
**Arguments** **Arguments**
The function takes a variable number of input parameters. Arguments can be any of the [supported data types](../../sql-reference/data-types/index.md). The function takes a variable number of input parameters. Arguments can be any of the [supported data types](../../sql-reference/data-types/index.md). For some data types calculated value of hash function may be the same for the same values even if types of arguments differ (integers of different size, named and unnamed `Tuple` with the same data, `Map` and the corresponding `Array(Tuple(key, value))` type with the same data).
**Returned Value** **Returned Value**
@ -99,7 +99,7 @@ sipHash128(par1,...)
**Arguments** **Arguments**
The function takes a variable number of input parameters. Arguments can be any of the [supported data types](../../sql-reference/data-types/index.md). The function takes a variable number of input parameters. Arguments can be any of the [supported data types](../../sql-reference/data-types/index.md). For some data types calculated value of hash function may be the same for the same values even if types of arguments differ (integers of different size, named and unnamed `Tuple` with the same data, `Map` and the corresponding `Array(Tuple(key, value))` type with the same data).
**Returned value** **Returned value**
@ -135,7 +135,7 @@ This is a fast non-cryptographic hash function. It uses the CityHash algorithm f
**Arguments** **Arguments**
The function takes a variable number of input parameters. Arguments can be any of the [supported data types](../../sql-reference/data-types/index.md). The function takes a variable number of input parameters. Arguments can be any of the [supported data types](../../sql-reference/data-types/index.md). For some data types calculated value of hash function may be the same for the same values even if types of arguments differ (integers of different size, named and unnamed `Tuple` with the same data, `Map` and the corresponding `Array(Tuple(key, value))` type with the same data).
**Returned Value** **Returned Value**
@ -275,7 +275,7 @@ These functions use the `Fingerprint64` and `Hash64` methods respectively from a
**Arguments** **Arguments**
The function takes a variable number of input parameters. Arguments can be any of the [supported data types](../../sql-reference/data-types/index.md). The function takes a variable number of input parameters. Arguments can be any of the [supported data types](../../sql-reference/data-types/index.md). For some data types calculated value of hash function may be the same for the same values even if types of arguments differ (integers of different size, named and unnamed `Tuple` with the same data, `Map` and the corresponding `Array(Tuple(key, value))` type with the same data)..
**Returned Value** **Returned Value**
@ -401,7 +401,7 @@ metroHash64(par1, ...)
**Arguments** **Arguments**
The function takes a variable number of input parameters. Arguments can be any of the [supported data types](../../sql-reference/data-types/index.md). The function takes a variable number of input parameters. Arguments can be any of the [supported data types](../../sql-reference/data-types/index.md). For some data types calculated value of hash function may be the same for the same values even if types of arguments differ (integers of different size, named and unnamed `Tuple` with the same data, `Map` and the corresponding `Array(Tuple(key, value))` type with the same data).
**Returned Value** **Returned Value**
@ -436,7 +436,7 @@ murmurHash2_64(par1, ...)
**Arguments** **Arguments**
Both functions take a variable number of input parameters. Arguments can be any of the [supported data types](../../sql-reference/data-types/index.md). Both functions take a variable number of input parameters. Arguments can be any of the [supported data types](../../sql-reference/data-types/index.md). For some data types calculated value of hash function may be the same for the same values even if types of arguments differ (integers of different size, named and unnamed `Tuple` with the same data, `Map` and the corresponding `Array(Tuple(key, value))` type with the same data).
**Returned Value** **Returned Value**
@ -504,7 +504,7 @@ murmurHash3_64(par1, ...)
**Arguments** **Arguments**
Both functions take a variable number of input parameters. Arguments can be any of the [supported data types](../../sql-reference/data-types/index.md). Both functions take a variable number of input parameters. Arguments can be any of the [supported data types](../../sql-reference/data-types/index.md). For some data types calculated value of hash function may be the same for the same values even if types of arguments differ (integers of different size, named and unnamed `Tuple` with the same data, `Map` and the corresponding `Array(Tuple(key, value))` type with the same data).
**Returned Value** **Returned Value**

View File

@ -218,23 +218,23 @@ SELECT toDate32('1955-01-01') AS value, toTypeName(value);
2. The value is outside the range: 2. The value is outside the range:
``` sql ``` sql
SELECT toDate32('1924-01-01') AS value, toTypeName(value); SELECT toDate32('1899-01-01') AS value, toTypeName(value);
``` ```
``` text ``` text
┌──────value─┬─toTypeName(toDate32('1925-01-01'))─┐ ┌──────value─┬─toTypeName(toDate32('1899-01-01'))─┐
│ 1925-01-01 │ Date32 │ │ 1900-01-01 │ Date32 │
└────────────┴────────────────────────────────────┘ └────────────┴────────────────────────────────────┘
``` ```
3. With `Date`-type argument: 3. With `Date`-type argument:
``` sql ``` sql
SELECT toDate32(toDate('1924-01-01')) AS value, toTypeName(value); SELECT toDate32(toDate('1899-01-01')) AS value, toTypeName(value);
``` ```
``` text ``` text
┌──────value─┬─toTypeName(toDate32(toDate('1924-01-01')))─┐ ┌──────value─┬─toTypeName(toDate32(toDate('1899-01-01')))─┐
│ 1970-01-01 │ Date32 │ │ 1970-01-01 │ Date32 │
└────────────┴────────────────────────────────────────────┘ └────────────┴────────────────────────────────────────────┘
``` ```
@ -248,14 +248,14 @@ The same as [toDate32](#todate32) but returns the min value of [Date32](../../sq
Query: Query:
``` sql ``` sql
SELECT toDate32OrZero('1924-01-01'), toDate32OrZero(''); SELECT toDate32OrZero('1899-01-01'), toDate32OrZero('');
``` ```
Result: Result:
``` text ``` text
┌─toDate32OrZero('1924-01-01')─┬─toDate32OrZero('')─┐ ┌─toDate32OrZero('1899-01-01')─┬─toDate32OrZero('')─┐
│ 1925-01-01 │ 1925-01-01 │ │ 1900-01-01 │ 1900-01-01 │
└──────────────────────────────┴────────────────────┘ └──────────────────────────────┴────────────────────┘
``` ```
@ -1072,7 +1072,7 @@ For all of the formats with separator the function parses months names expressed
Query: Query:
``` sql ``` sql
SELECT parseDateTimeBestEffort('12/12/2020 12:12:57') SELECT parseDateTimeBestEffort('23/10/2020 12:12:57')
AS parseDateTimeBestEffort; AS parseDateTimeBestEffort;
``` ```
@ -1080,7 +1080,7 @@ Result:
``` text ``` text
┌─parseDateTimeBestEffort─┐ ┌─parseDateTimeBestEffort─┐
│ 2020-12-12 12:12:57 │ │ 2020-10-23 12:12:57 │
└─────────────────────────┘ └─────────────────────────┘
``` ```
@ -1117,7 +1117,7 @@ Result:
Query: Query:
``` sql ``` sql
SELECT parseDateTimeBestEffort('2018-12-12 10:12:12') SELECT parseDateTimeBestEffort('2018-10-23 10:12:12')
AS parseDateTimeBestEffort; AS parseDateTimeBestEffort;
``` ```
@ -1125,7 +1125,7 @@ Result:
``` text ``` text
┌─parseDateTimeBestEffort─┐ ┌─parseDateTimeBestEffort─┐
│ 2018-12-12 10:12:12 │ │ 2018-10-23 10:12:12 │
└─────────────────────────┘ └─────────────────────────┘
``` ```
@ -1152,77 +1152,7 @@ Result:
## parseDateTimeBestEffortUS ## parseDateTimeBestEffortUS
This function is similar to [parseDateTimeBestEffort](#parsedatetimebesteffort), the only difference is that this function prefers US date format (`MM/DD/YYYY` etc.) in case of ambiguity. This function behaves like [parseDateTimeBestEffort](#parsedatetimebesteffort) for ISO date formats, e.g. `YYYY-MM-DD hh:mm:ss`, and other date formats where the month and date components can be unambiguously extracted, e.g. `YYYYMMDDhhmmss`, `YYYY-MM`, `DD hh`, or `YYYY-MM-DD hh:mm:ss ±h:mm`. If the month and the date components cannot be unambiguously extracted, e.g. `MM/DD/YYYY`, `MM-DD-YYYY`, or `MM-DD-YY`, it prefers the US date format instead of `DD/MM/YYYY`, `DD-MM-YYYY`, or `DD-MM-YY`. As an exception from the latter, if the month is bigger than 12 and smaller or equal than 31, this function falls back to the behavior of [parseDateTimeBestEffort](#parsedatetimebesteffort), e.g. `15/08/2020` is parsed as `2020-08-15`.
**Syntax**
``` sql
parseDateTimeBestEffortUS(time_string [, time_zone])
```
**Arguments**
- `time_string` — String containing a date and time to convert. [String](../../sql-reference/data-types/string.md).
- `time_zone` — Time zone. The function parses `time_string` according to the time zone. [String](../../sql-reference/data-types/string.md).
**Supported non-standard formats**
- A string containing 9..10 digit [unix timestamp](https://en.wikipedia.org/wiki/Unix_time).
- A string with a date and a time component: `YYYYMMDDhhmmss`, `MM/DD/YYYY hh:mm:ss`, `MM-DD-YY hh:mm`, `YYYY-MM-DD hh:mm:ss`, etc.
- A string with a date, but no time component: `YYYY`, `YYYYMM`, `YYYY*MM`, `MM/DD/YYYY`, `MM-DD-YY` etc.
- A string with a day and time: `DD`, `DD hh`, `DD hh:mm`. In this case, `YYYY-MM` are substituted as `2000-01`.
- A string that includes the date and time along with time zone offset information: `YYYY-MM-DD hh:mm:ss ±h:mm`, etc. For example, `2020-12-12 17:36:00 -5:00`.
**Returned value**
- `time_string` converted to the `DateTime` data type.
**Examples**
Query:
``` sql
SELECT parseDateTimeBestEffortUS('09/12/2020 12:12:57')
AS parseDateTimeBestEffortUS;
```
Result:
``` text
┌─parseDateTimeBestEffortUS─┐
│ 2020-09-12 12:12:57 │
└─────────────────────────——┘
```
Query:
``` sql
SELECT parseDateTimeBestEffortUS('09-12-2020 12:12:57')
AS parseDateTimeBestEffortUS;
```
Result:
``` text
┌─parseDateTimeBestEffortUS─┐
│ 2020-09-12 12:12:57 │
└─────────────────────────——┘
```
Query:
``` sql
SELECT parseDateTimeBestEffortUS('09.12.2020 12:12:57')
AS parseDateTimeBestEffortUS;
```
Result:
``` text
┌─parseDateTimeBestEffortUS─┐
│ 2020-09-12 12:12:57 │
└─────────────────────────——┘
```
## parseDateTimeBestEffortOrNull ## parseDateTimeBestEffortOrNull
## parseDateTime32BestEffortOrNull ## parseDateTime32BestEffortOrNull
@ -1238,174 +1168,10 @@ Same as for [parseDateTimeBestEffort](#parsedatetimebesteffort) except that it r
Same as [parseDateTimeBestEffortUS](#parsedatetimebesteffortUS) function except that it returns `NULL` when it encounters a date format that cannot be processed. Same as [parseDateTimeBestEffortUS](#parsedatetimebesteffortUS) function except that it returns `NULL` when it encounters a date format that cannot be processed.
**Syntax**
``` sql
parseDateTimeBestEffortUSOrNull(time_string[, time_zone])
```
**Parameters**
- `time_string` — String containing a date or date with time to convert. The date must be in the US date format (`MM/DD/YYYY`, etc). [String](../../sql-reference/data-types/string.md).
- `time_zone` — [Timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](../../sql-reference/data-types/string.md).
**Supported non-standard formats**
- A string containing 9..10 digit [unix timestamp](https://en.wikipedia.org/wiki/Unix_time).
- A string with a date and a time components: `YYYYMMDDhhmmss`, `MM/DD/YYYY hh:mm:ss`, `MM-DD-YY hh:mm`, `YYYY-MM-DD hh:mm:ss`, etc.
- A string with a date, but no time component: `YYYY`, `YYYYMM`, `YYYY*MM`, `MM/DD/YYYY`, `MM-DD-YY`, etc.
- A string with a day and time: `DD`, `DD hh`, `DD hh:mm`. In this case, `YYYY-MM` are substituted with `2000-01`.
- A string that includes date and time along with timezone offset information: `YYYY-MM-DD hh:mm:ss ±h:mm`, etc. For example, `2020-12-12 17:36:00 -5:00`.
**Returned values**
- `time_string` converted to the [DateTime](../../sql-reference/data-types/datetime.md) data type.
- `NULL` if the input string cannot be converted to the `DateTime` data type.
**Examples**
Query:
``` sql
SELECT parseDateTimeBestEffortUSOrNull('02/10/2021 21:12:57') AS parseDateTimeBestEffortUSOrNull;
```
Result:
``` text
┌─parseDateTimeBestEffortUSOrNull─┐
│ 2021-02-10 21:12:57 │
└─────────────────────────────────┘
```
Query:
``` sql
SELECT parseDateTimeBestEffortUSOrNull('02-10-2021 21:12:57 GMT', 'Asia/Istanbul') AS parseDateTimeBestEffortUSOrNull;
```
Result:
``` text
┌─parseDateTimeBestEffortUSOrNull─┐
│ 2021-02-11 00:12:57 │
└─────────────────────────────────┘
```
Query:
``` sql
SELECT parseDateTimeBestEffortUSOrNull('02.10.2021') AS parseDateTimeBestEffortUSOrNull;
```
Result:
``` text
┌─parseDateTimeBestEffortUSOrNull─┐
│ 2021-02-10 00:00:00 │
└─────────────────────────────────┘
```
Query:
``` sql
SELECT parseDateTimeBestEffortUSOrNull('10.2021') AS parseDateTimeBestEffortUSOrNull;
```
Result:
``` text
┌─parseDateTimeBestEffortUSOrNull─┐
│ ᴺᵁᴸᴸ │
└─────────────────────────────────┘
```
## parseDateTimeBestEffortUSOrZero ## parseDateTimeBestEffortUSOrZero
Same as [parseDateTimeBestEffortUS](#parsedatetimebesteffortUS) function except that it returns zero date (`1970-01-01`) or zero date with time (`1970-01-01 00:00:00`) when it encounters a date format that cannot be processed. Same as [parseDateTimeBestEffortUS](#parsedatetimebesteffortUS) function except that it returns zero date (`1970-01-01`) or zero date with time (`1970-01-01 00:00:00`) when it encounters a date format that cannot be processed.
**Syntax**
``` sql
parseDateTimeBestEffortUSOrZero(time_string[, time_zone])
```
**Parameters**
- `time_string` — String containing a date or date with time to convert. The date must be in the US date format (`MM/DD/YYYY`, etc). [String](../../sql-reference/data-types/string.md).
- `time_zone` — [Timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](../../sql-reference/data-types/string.md).
**Supported non-standard formats**
- A string containing 9..10 digit [unix timestamp](https://en.wikipedia.org/wiki/Unix_time).
- A string with a date and a time components: `YYYYMMDDhhmmss`, `MM/DD/YYYY hh:mm:ss`, `MM-DD-YY hh:mm`, `YYYY-MM-DD hh:mm:ss`, etc.
- A string with a date, but no time component: `YYYY`, `YYYYMM`, `YYYY*MM`, `MM/DD/YYYY`, `MM-DD-YY`, etc.
- A string with a day and time: `DD`, `DD hh`, `DD hh:mm`. In this case, `YYYY-MM` are substituted with `2000-01`.
- A string that includes date and time along with timezone offset information: `YYYY-MM-DD hh:mm:ss ±h:mm`, etc. For example, `2020-12-12 17:36:00 -5:00`.
**Returned values**
- `time_string` converted to the [DateTime](../../sql-reference/data-types/datetime.md) data type.
- Zero date or zero date with time if the input string cannot be converted to the `DateTime` data type.
**Examples**
Query:
``` sql
SELECT parseDateTimeBestEffortUSOrZero('02/10/2021 21:12:57') AS parseDateTimeBestEffortUSOrZero;
```
Result:
``` text
┌─parseDateTimeBestEffortUSOrZero─┐
│ 2021-02-10 21:12:57 │
└─────────────────────────────────┘
```
Query:
``` sql
SELECT parseDateTimeBestEffortUSOrZero('02-10-2021 21:12:57 GMT', 'Asia/Istanbul') AS parseDateTimeBestEffortUSOrZero;
```
Result:
``` text
┌─parseDateTimeBestEffortUSOrZero─┐
│ 2021-02-11 00:12:57 │
└─────────────────────────────────┘
```
Query:
``` sql
SELECT parseDateTimeBestEffortUSOrZero('02.10.2021') AS parseDateTimeBestEffortUSOrZero;
```
Result:
``` text
┌─parseDateTimeBestEffortUSOrZero─┐
│ 2021-02-10 00:00:00 │
└─────────────────────────────────┘
```
Query:
``` sql
SELECT parseDateTimeBestEffortUSOrZero('02.2021') AS parseDateTimeBestEffortUSOrZero;
```
Result:
``` text
┌─parseDateTimeBestEffortUSOrZero─┐
│ 1970-01-01 00:00:00 │
└─────────────────────────────────┘
```
## parseDateTime64BestEffort ## parseDateTime64BestEffort
Same as [parseDateTimeBestEffort](#parsedatetimebesteffort) function but also parse milliseconds and microseconds and returns [DateTime](../../sql-reference/functions/type-conversion-functions.md#data_type-datetime) data type. Same as [parseDateTimeBestEffort](#parsedatetimebesteffort) function but also parse milliseconds and microseconds and returns [DateTime](../../sql-reference/functions/type-conversion-functions.md#data_type-datetime) data type.
@ -1452,18 +1218,30 @@ Result:
└────────────────────────────┴────────────────────────────────┘ └────────────────────────────┴────────────────────────────────┘
``` ```
## parseDateTime64BestEffortUS
Same as for [parseDateTime64BestEffort](#parsedatetime64besteffort), except that this function prefers US date format (`MM/DD/YYYY` etc.) in case of ambiguity.
## parseDateTime64BestEffortOrNull ## parseDateTime64BestEffortOrNull
Same as for [parseDateTime64BestEffort](#parsedatetime64besteffort) except that it returns `NULL` when it encounters a date format that cannot be processed. Same as for [parseDateTime64BestEffort](#parsedatetime64besteffort) except that it returns `NULL` when it encounters a date format that cannot be processed.
## parseDateTime64BestEffortOrZero ## parseDateTime64BestEffortOrZero
Same as for [parseDateTime64BestEffort](#parsedatetimebesteffort) except that it returns zero date or zero date time when it encounters a date format that cannot be processed. Same as for [parseDateTime64BestEffort](#parsedatetime64besteffort) except that it returns zero date or zero date time when it encounters a date format that cannot be processed.
## parseDateTime64BestEffortUSOrNull
Same as for [parseDateTime64BestEffort](#parsedatetime64besteffort), except that this function prefers US date format (`MM/DD/YYYY` etc.) in case of ambiguity and returns `NULL` when it encounters a date format that cannot be processed.
## parseDateTime64BestEffortUSOrZero
Same as for [parseDateTime64BestEffort](#parsedatetime64besteffort), except that this function prefers US date format (`MM/DD/YYYY` etc.) in case of ambiguity and returns zero date or zero date time when it encounters a date format that cannot be processed.
## toLowCardinality ## toLowCardinality
Converts input parameter to the [LowCardianlity](../../sql-reference/data-types/lowcardinality.md) version of same data type. Converts input parameter to the [LowCardinality](../../sql-reference/data-types/lowcardinality.md) version of same data type.
To convert data from the `LowCardinality` data type use the [CAST](#type_conversion_function-cast) function. For example, `CAST(x as String)`. To convert data from the `LowCardinality` data type use the [CAST](#type_conversion_function-cast) function. For example, `CAST(x as String)`.

View File

@ -34,7 +34,7 @@ CREATE TABLE table_with_ttl
) )
ENGINE MergeTree() ENGINE MergeTree()
ORDER BY tuple() ORDER BY tuple()
TTL event_time + INTERVAL 3 MONTH; TTL event_time + INTERVAL 3 MONTH
SETTINGS min_bytes_for_wide_part = 0; SETTINGS min_bytes_for_wide_part = 0;
INSERT INTO table_with_ttl VALUES (now(), 1, 'username1'); INSERT INTO table_with_ttl VALUES (now(), 1, 'username1');

View File

@ -9,13 +9,13 @@ Table functions are methods for constructing tables.
You can use table functions in: You can use table functions in:
- [FROM](../../sql-reference/statements/select/from.md) clause of the `SELECT` query. - [FROM](../../sql-reference/statements/select/from.md) clause of the `SELECT` query.
The method for creating a temporary table that is available only in the current query. The table is deleted when the query finishes. The method for creating a temporary table that is available only in the current query. The table is deleted when the query finishes.
- [CREATE TABLE AS table_function()](../../sql-reference/statements/create/table.md) query. - [CREATE TABLE AS table_function()](../../sql-reference/statements/create/table.md) query.
It's one of the methods of creating a table. It's one of the methods of creating a table.
- [INSERT INTO TABLE FUNCTION](../../sql-reference/statements/insert-into.md#inserting-into-table-function) query. - [INSERT INTO TABLE FUNCTION](../../sql-reference/statements/insert-into.md#inserting-into-table-function) query.
@ -38,4 +38,3 @@ You cant use table functions if the [allow_ddl](../../operations/settings/per
| [s3](../../sql-reference/table-functions/s3.md) | Creates a [S3](../../engines/table-engines/integrations/s3.md)-engine table. | | [s3](../../sql-reference/table-functions/s3.md) | Creates a [S3](../../engines/table-engines/integrations/s3.md)-engine table. |
| [sqlite](../../sql-reference/table-functions/sqlite.md) | Creates a [sqlite](../../engines/table-engines/integrations/sqlite.md)-engine table. | | [sqlite](../../sql-reference/table-functions/sqlite.md) | Creates a [sqlite](../../engines/table-engines/integrations/sqlite.md)-engine table. |
[Original article](https://clickhouse.com/docs/en/sql-reference/table-functions/) <!--hide-->

View File

@ -8,6 +8,7 @@ sidebar_label: Сборка на Mac OS X
:::info "Вам не нужно собирать ClickHouse самостоятельно" :::info "Вам не нужно собирать ClickHouse самостоятельно"
Вы можете установить предварительно собранный ClickHouse, как описано в [Быстром старте](https://clickhouse.com/#quick-start). Вы можете установить предварительно собранный ClickHouse, как описано в [Быстром старте](https://clickhouse.com/#quick-start).
Следуйте инструкциям по установке для `macOS (Intel)` или `macOS (Apple Silicon)`. Следуйте инструкциям по установке для `macOS (Intel)` или `macOS (Apple Silicon)`.
:::
Сборка должна запускаться с x86_64 (Intel) на macOS версии 10.15 (Catalina) и выше в последней версии компилятора Xcode's native AppleClang, Homebrew's vanilla Clang или в GCC-компиляторах. Сборка должна запускаться с x86_64 (Intel) на macOS версии 10.15 (Catalina) и выше в последней версии компилятора Xcode's native AppleClang, Homebrew's vanilla Clang или в GCC-компиляторах.
@ -90,6 +91,7 @@ $ /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/
:::info "Note" :::info "Note"
Вам понадобится команда `sudo`. Вам понадобится команда `sudo`.
:::
1. Создайте файл `/Library/LaunchDaemons/limit.maxfiles.plist` и поместите в него следующее: 1. Создайте файл `/Library/LaunchDaemons/limit.maxfiles.plist` и поместите в него следующее:

View File

@ -29,7 +29,7 @@ ENGINE = PostgreSQL('host:port', 'database', 'user', 'password'[, `schema`, `use
## Поддерживаемые типы данных {#data_types-support} ## Поддерживаемые типы данных {#data_types-support}
| PostgerSQL | ClickHouse | | PostgreSQL | ClickHouse |
|------------------|--------------------------------------------------------------| |------------------|--------------------------------------------------------------|
| DATE | [Date](../../sql-reference/data-types/date.md) | | DATE | [Date](../../sql-reference/data-types/date.md) |
| TIMESTAMP | [DateTime](../../sql-reference/data-types/datetime.md) | | TIMESTAMP | [DateTime](../../sql-reference/data-types/datetime.md) |

View File

@ -49,6 +49,7 @@ PostgreSQL массивы конвертируются в массивы ClickHo
:::info "Внимание" :::info "Внимание"
Будьте внимательны, в PostgreSQL массивы, созданные как `type_name[]`, являются многомерными и могут содержать в себе разное количество измерений в разных строках одной таблицы. Внутри ClickHouse допустимы только многомерные массивы с одинаковым кол-вом измерений во всех строках таблицы. Будьте внимательны, в PostgreSQL массивы, созданные как `type_name[]`, являются многомерными и могут содержать в себе разное количество измерений в разных строках одной таблицы. Внутри ClickHouse допустимы только многомерные массивы с одинаковым кол-вом измерений во всех строках таблицы.
:::
Поддерживает несколько реплик, которые должны быть перечислены через `|`. Например: Поддерживает несколько реплик, которые должны быть перечислены через `|`. Например:

View File

@ -40,6 +40,7 @@ ORDER BY (CounterID, StartDate, intHash32(UserID));
:::info "Info" :::info "Info"
Не рекомендуется делать слишком гранулированное партиционирование то есть задавать партиции по столбцу, в котором будет слишком большой разброс значений (речь идет о порядке более тысячи партиций). Это приведет к скоплению большого числа файлов и файловых дескрипторов в системе, что может значительно снизить производительность запросов `SELECT`. Не рекомендуется делать слишком гранулированное партиционирование то есть задавать партиции по столбцу, в котором будет слишком большой разброс значений (речь идет о порядке более тысячи партиций). Это приведет к скоплению большого числа файлов и файловых дескрипторов в системе, что может значительно снизить производительность запросов `SELECT`.
:::
Чтобы получить набор кусков и партиций таблицы, можно воспользоваться системной таблицей [system.parts](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md#system_tables-parts). В качестве примера рассмотрим таблицу `visits`, в которой задано партиционирование по месяцам. Выполним `SELECT` для таблицы `system.parts`: Чтобы получить набор кусков и партиций таблицы, можно воспользоваться системной таблицей [system.parts](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md#system_tables-parts). В качестве примера рассмотрим таблицу `visits`, в которой задано партиционирование по месяцам. Выполним `SELECT` для таблицы `system.parts`:
@ -80,6 +81,7 @@ WHERE table = 'visits'
:::info "Info" :::info "Info"
Названия кусков для таблиц старого типа образуются следующим образом: `20190117_20190123_2_2_0` (минимальная дата _ максимальная дата _ номер минимального блока _ номер максимального блока _ уровень). Названия кусков для таблиц старого типа образуются следующим образом: `20190117_20190123_2_2_0` (минимальная дата _ максимальная дата _ номер минимального блока _ номер максимального блока _ уровень).
:::
Как видно из примера выше, таблица содержит несколько отдельных кусков для одной и той же партиции (например, куски `201901_1_3_1` и `201901_1_9_2` принадлежат партиции `201901`). Это означает, что эти куски еще не были объединены в файловой системе они хранятся отдельно. После того как будет выполнено автоматическое слияние данных (выполняется примерно спустя 10 минут после вставки данных), исходные куски будут объединены в один более крупный кусок и помечены как неактивные. Как видно из примера выше, таблица содержит несколько отдельных кусков для одной и той же партиции (например, куски `201901_1_3_1` и `201901_1_9_2` принадлежат партиции `201901`). Это означает, что эти куски еще не были объединены в файловой системе они хранятся отдельно. После того как будет выполнено автоматическое слияние данных (выполняется примерно спустя 10 минут после вставки данных), исходные куски будут объединены в один более крупный кусок и помечены как неактивные.

View File

@ -14,3 +14,4 @@ sidebar_position: 10
:::info "Забавный факт" :::info "Забавный факт"
Спустя годы после того, как ClickHouse получил свое название, принцип комбинирования двух слов, каждое из которых имеет подходящий смысл, был признан лучшим способом назвать базу данных в [исследовании Andy Pavlo](https://www.cs.cmu.edu/~pavlo/blog/2020/03/on-naming-a-database-management-system.html), Associate Professor of Databases в Carnegie Mellon University. ClickHouse разделил награду "за лучшее название СУБД" с Postgres. Спустя годы после того, как ClickHouse получил свое название, принцип комбинирования двух слов, каждое из которых имеет подходящий смысл, был признан лучшим способом назвать базу данных в [исследовании Andy Pavlo](https://www.cs.cmu.edu/~pavlo/blog/2020/03/on-naming-a-database-management-system.html), Associate Professor of Databases в Carnegie Mellon University. ClickHouse разделил награду "за лучшее название СУБД" с Postgres.
:::

View File

@ -20,5 +20,6 @@ sidebar_label: Общие вопросы
:::info "Если вы не нашли то, что искали:" :::info "Если вы не нашли то, что искали:"
Загляните в другие категории F.A.Q. или поищите в остальных разделах документации, ориентируясь по оглавлению слева. Загляните в другие категории F.A.Q. или поищите в остальных разделах документации, ориентируясь по оглавлению слева.
:::
[Original article](https://clickhouse.com/docs/ru/faq/general/) <!--hide--> [Original article](https://clickhouse.com/docs/ru/faq/general/) <!--hide-->

View File

@ -60,3 +60,4 @@ sidebar_position: 8
- Ориентируйтесь на показатели, собранные при работе с реальными данными. - Ориентируйтесь на показатели, собранные при работе с реальными данными.
- Проверяйте производительность в процессе CI. - Проверяйте производительность в процессе CI.
- Измеряйте и анализируйте всё, что только возможно. - Измеряйте и анализируйте всё, что только возможно.
:::

View File

@ -15,5 +15,6 @@ sidebar_label: Интеграция
:::info "Если вы не нашли то, что искали" :::info "Если вы не нашли то, что искали"
Загляните в другие подразделы F.A.Q. или поищите в остальных разделах документации, ориентируйтесь по оглавлению слева. Загляните в другие подразделы F.A.Q. или поищите в остальных разделах документации, ориентируйтесь по оглавлению слева.
:::
[Original article](https://clickhouse.com/docs/ru/faq/integration/) [Original article](https://clickhouse.com/docs/ru/faq/integration/)

View File

@ -14,5 +14,6 @@ sidebar_label: Операции
:::info "Если вы не нашли то, что искали" :::info "Если вы не нашли то, что искали"
Загляните в другие подразделы F.A.Q. или поищите в остальных разделах документации, ориентируйтесь по оглавлению слева. Загляните в другие подразделы F.A.Q. или поищите в остальных разделах документации, ориентируйтесь по оглавлению слева.
:::
[Original article](https://clickhouse.com/docs/en/faq/operations/) [Original article](https://clickhouse.com/docs/en/faq/operations/)

View File

@ -293,6 +293,7 @@ $ clickhouse-client --query "SELECT COUNT(*) FROM datasets.trips_mergetree"
:::info "Info" :::info "Info"
Если вы собираетесь выполнять запросы, приведенные ниже, то к имени таблицы Если вы собираетесь выполнять запросы, приведенные ниже, то к имени таблицы
нужно добавить имя базы, `datasets.trips_mergetree`. нужно добавить имя базы, `datasets.trips_mergetree`.
:::
## Результаты на одном сервере {#rezultaty-na-odnom-servere} ## Результаты на одном сервере {#rezultaty-na-odnom-servere}

View File

@ -157,6 +157,7 @@ $ clickhouse-client --query "SELECT COUNT(*) FROM datasets.ontime"
:::info "Info" :::info "Info"
Если вы собираетесь выполнять запросы, приведенные ниже, то к имени таблицы Если вы собираетесь выполнять запросы, приведенные ниже, то к имени таблицы
нужно добавить имя базы, `datasets.ontime`. нужно добавить имя базы, `datasets.ontime`.
:::
## Запросы: {#zaprosy} ## Запросы: {#zaprosy}

View File

@ -99,6 +99,7 @@ ClickHouse предоставляет возможность аутентифи
:::info "" :::info ""
Ещё раз отметим, что кроме `users.xml`, необходимо также включить Kerberos в `config.xml`. Ещё раз отметим, что кроме `users.xml`, необходимо также включить Kerberos в `config.xml`.
:::
### Настройка Kerberos через SQL {#enabling-kerberos-using-sql} ### Настройка Kerberos через SQL {#enabling-kerberos-using-sql}

View File

@ -174,6 +174,7 @@ ClickHouse проверяет условия для `min_part_size` и `min_part
:::info "Примечание" :::info "Примечание"
Жесткое ограничение настраивается с помощью системных инструментов. Жесткое ограничение настраивается с помощью системных инструментов.
:::
**Пример** **Пример**
@ -706,6 +707,7 @@ ClickHouse поддерживает динамическое изменение
:::info "Примечание" :::info "Примечание"
Параметры этих настроек могут быть изменены во время выполнения запросов и вступят в силу немедленно. Запросы, которые уже запущены, выполнятся без изменений. Параметры этих настроек могут быть изменены во время выполнения запросов и вступят в силу немедленно. Запросы, которые уже запущены, выполнятся без изменений.
:::
Возможные значения: Возможные значения:
@ -726,6 +728,7 @@ ClickHouse поддерживает динамическое изменение
:::info "Примечание" :::info "Примечание"
Параметры этих настроек могут быть изменены во время выполнения запросов и вступят в силу немедленно. Запросы, которые уже запущены, выполнятся без изменений. Параметры этих настроек могут быть изменены во время выполнения запросов и вступят в силу немедленно. Запросы, которые уже запущены, выполнятся без изменений.
:::
Возможные значения: Возможные значения:
@ -746,6 +749,7 @@ ClickHouse поддерживает динамическое изменение
:::info "Примечание" :::info "Примечание"
Параметры этих настроек могут быть изменены во время выполнения запросов и вступят в силу немедленно. Запросы, которые уже запущены, выполнятся без изменений. Параметры этих настроек могут быть изменены во время выполнения запросов и вступят в силу немедленно. Запросы, которые уже запущены, выполнятся без изменений.
:::
Возможные значения: Возможные значения:

View File

@ -30,6 +30,7 @@
:::info "Замечание" :::info "Замечание"
Даже если `parts_to_do = 0`, для реплицированной таблицы возможна ситуация, когда мутация ещё не завершена из-за долго выполняющейся операции `INSERT`, которая добавляет данные, которые нужно будет мутировать. Даже если `parts_to_do = 0`, для реплицированной таблицы возможна ситуация, когда мутация ещё не завершена из-за долго выполняющейся операции `INSERT`, которая добавляет данные, которые нужно будет мутировать.
:::
Если во время мутации какого-либо куска возникли проблемы, заполняются следующие столбцы: Если во время мутации какого-либо куска возникли проблемы, заполняются следующие столбцы:

View File

@ -8,6 +8,7 @@ sidebar_position: 141
:::info "Примечание" :::info "Примечание"
Чтобы эта функция работала должным образом, исходные данные должны быть отсортированы. В [материализованном представлении](../../../sql-reference/statements/create/view.md#materialized) вместо нее рекомендуется использовать [deltaSumTimestamp](../../../sql-reference/aggregate-functions/reference/deltasumtimestamp.md#agg_functions-deltasumtimestamp). Чтобы эта функция работала должным образом, исходные данные должны быть отсортированы. В [материализованном представлении](../../../sql-reference/statements/create/view.md#materialized) вместо нее рекомендуется использовать [deltaSumTimestamp](../../../sql-reference/aggregate-functions/reference/deltasumtimestamp.md#agg_functions-deltasumtimestamp).
:::
**Синтаксис** **Синтаксис**

View File

@ -20,6 +20,7 @@ intervalLengthSum(start, end)
:::info "Примечание" :::info "Примечание"
Аргументы должны быть одного типа. В противном случае ClickHouse сгенерирует исключение. Аргументы должны быть одного типа. В противном случае ClickHouse сгенерирует исключение.
:::
**Возвращаемое значение** **Возвращаемое значение**

View File

@ -5,7 +5,7 @@ sidebar_label: Date32
# Date32 {#data_type-datetime32} # Date32 {#data_type-datetime32}
Дата. Поддерживается такой же диапазон дат, как для типа [Datetime64](../../sql-reference/data-types/datetime64.md). Значение хранится в четырех байтах и соответствует числу дней с 1925-01-01 по 2283-11-11. Дата. Поддерживается такой же диапазон дат, как для типа [Datetime64](../../sql-reference/data-types/datetime64.md). Значение хранится в четырех байтах и соответствует числу дней с 1900-01-01 по 2299-12-31.
**Пример** **Пример**
@ -36,5 +36,5 @@ SELECT * FROM new;
- [toDate32](../../sql-reference/functions/type-conversion-functions.md#todate32) - [toDate32](../../sql-reference/functions/type-conversion-functions.md#todate32)
- [toDate32OrZero](../../sql-reference/functions/type-conversion-functions.md#todate32-or-zero) - [toDate32OrZero](../../sql-reference/functions/type-conversion-functions.md#todate32-or-zero)
- [toDate32OrNull](../../sql-reference/functions/type-conversion-functions.md#todate32-or-null) - [toDate32OrNull](../../sql-reference/functions/type-conversion-functions.md#todate32-or-null)

View File

@ -18,7 +18,7 @@ DateTime64(precision, [timezone])
Данные хранятся в виде количества ‘тиков’, прошедших с момента начала эпохи (1970-01-01 00:00:00 UTC), в Int64. Размер тика определяется параметром precision. Дополнительно, тип `DateTime64` позволяет хранить часовой пояс, единый для всей колонки, который влияет на то, как будут отображаться значения типа `DateTime64` в текстовом виде и как будут парситься значения заданные в виде строк (2020-01-01 05:00:01.000). Часовой пояс не хранится в строках таблицы (выборки), а хранится в метаданных колонки. Подробнее см. [DateTime](datetime.md). Данные хранятся в виде количества ‘тиков’, прошедших с момента начала эпохи (1970-01-01 00:00:00 UTC), в Int64. Размер тика определяется параметром precision. Дополнительно, тип `DateTime64` позволяет хранить часовой пояс, единый для всей колонки, который влияет на то, как будут отображаться значения типа `DateTime64` в текстовом виде и как будут парситься значения заданные в виде строк (2020-01-01 05:00:01.000). Часовой пояс не хранится в строках таблицы (выборки), а хранится в метаданных колонки. Подробнее см. [DateTime](datetime.md).
Диапазон значений: \[1925-01-01 00:00:00, 2283-11-11 23:59:59.99999999\] (Примечание: Точность максимального значения составляет 8). Диапазон значений: \[1900-01-01 00:00:00, 2299-12-31 23:59:59.99999999\] (Примечание: Точность максимального значения составляет 8).
## Примеры {#examples} ## Примеры {#examples}

View File

@ -26,6 +26,7 @@ sidebar_label: Nullable
:::info "Info" :::info "Info"
Почти всегда использование `Nullable` снижает производительность, учитывайте это при проектировании своих баз. Почти всегда использование `Nullable` снижает производительность, учитывайте это при проектировании своих баз.
:::
## Поиск NULL {#finding-null} ## Поиск NULL {#finding-null}

View File

@ -464,6 +464,7 @@ SOURCE(ODBC(
:::info "Примечание" :::info "Примечание"
Поля `table` и `query` не могут быть использованы вместе. Также обязательно должен быть один из источников данных: `table` или `query`. Поля `table` и `query` не могут быть использованы вместе. Также обязательно должен быть один из источников данных: `table` или `query`.
:::
ClickHouse получает от ODBC-драйвера информацию о квотировании и квотирует настройки в запросах к драйверу, поэтому имя таблицы нужно указывать в соответствии с регистром имени таблицы в базе данных. ClickHouse получает от ODBC-драйвера информацию о квотировании и квотирует настройки в запросах к драйверу, поэтому имя таблицы нужно указывать в соответствии с регистром имени таблицы в базе данных.
@ -543,6 +544,7 @@ SOURCE(MYSQL(
:::info "Примечание" :::info "Примечание"
Поля `table` или `where` не могут быть использованы вместе с полем `query`. Также обязательно должен быть один из источников данных: `table` или `query`. Поля `table` или `where` не могут быть использованы вместе с полем `query`. Также обязательно должен быть один из источников данных: `table` или `query`.
Явный параметр `secure` отсутствует. Автоматически поддержана работа в обоих случаях: когда установка SSL-соединения необходима и когда нет. Явный параметр `secure` отсутствует. Автоматически поддержана работа в обоих случаях: когда установка SSL-соединения необходима и когда нет.
:::
MySQL можно подключить на локальном хосте через сокеты, для этого необходимо задать `host` и `socket`. MySQL можно подключить на локальном хосте через сокеты, для этого необходимо задать `host` и `socket`.
@ -633,6 +635,7 @@ SOURCE(CLICKHOUSE(
:::info "Примечание" :::info "Примечание"
Поля `table` или `where` не могут быть использованы вместе с полем `query`. Также обязательно должен быть один из источников данных: `table` или `query`. Поля `table` или `where` не могут быть использованы вместе с полем `query`. Также обязательно должен быть один из источников данных: `table` или `query`.
:::
### MongoDB {#dicts-external_dicts_dict_sources-mongodb} ### MongoDB {#dicts-external_dicts_dict_sources-mongodb}
@ -748,6 +751,7 @@ SOURCE(REDIS(
:::info "Примечание" :::info "Примечание"
Поля `column_family` или `where` не могут быть использованы вместе с полем `query`. Также обязательно должен быть один из источников данных: `column_family` или `query`. Поля `column_family` или `where` не могут быть использованы вместе с полем `query`. Также обязательно должен быть один из источников данных: `column_family` или `query`.
:::
### PostgreSQL {#dicts-external_dicts_dict_sources-postgresql} ### PostgreSQL {#dicts-external_dicts_dict_sources-postgresql}
@ -804,3 +808,4 @@ SOURCE(POSTGRESQL(
:::info "Примечание" :::info "Примечание"
Поля `table` или `where` не могут быть использованы вместе с полем `query`. Также обязательно должен быть один из источников данных: `table` или `query`. Поля `table` или `where` не могут быть использованы вместе с полем `query`. Также обязательно должен быть один из источников данных: `table` или `query`.
:::

View File

@ -56,7 +56,7 @@ SELECT toTypeName(0), toTypeName(0 + 0), toTypeName(0 + 0 + 0), toTypeName(0 + 0
## modulo(a, b), оператор a % b {#modulo} ## modulo(a, b), оператор a % b {#modulo}
Вычисляет остаток от деления. Вычисляет остаток от деления.
Если аргументы - числа с плавающей запятой, то они предварительно преобразуются в целые числа, путём отбрасывания дробной части. Тип результата - целое число, если оба аргумента - целые числа. Если один из аргументов является числом с плавающей точкой, результатом будет число с плавающей точкой.
Берётся остаток в том же смысле, как это делается в C++. По факту, для отрицательных чисел, используется truncated division. Берётся остаток в том же смысле, как это делается в C++. По факту, для отрицательных чисел, используется truncated division.
При делении на ноль или при делении минимального отрицательного числа на минус единицу, кидается исключение. При делении на ноль или при делении минимального отрицательного числа на минус единицу, кидается исключение.

Some files were not shown because too many files have changed in this diff Show More