mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-18 04:12:19 +00:00
Merge branch 'master' of github.com:ClickHouse/ClickHouse into fix_00098_primary_key_memory_allocated
This commit is contained in:
commit
9270deb83e
@ -1,14 +0,0 @@
|
|||||||
ARG FROM_TAG=latest
|
|
||||||
FROM clickhouse/stateless-test:$FROM_TAG
|
|
||||||
|
|
||||||
USER root
|
|
||||||
|
|
||||||
RUN apt-get update -y \
|
|
||||||
&& env DEBIAN_FRONTEND=noninteractive \
|
|
||||||
apt-get install --yes --no-install-recommends \
|
|
||||||
nodejs \
|
|
||||||
npm \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* \
|
|
||||||
|
|
||||||
USER clickhouse
|
|
@ -1,117 +0,0 @@
|
|||||||
# docker build -t clickhouse/stateless-test .
|
|
||||||
FROM ubuntu:22.04
|
|
||||||
|
|
||||||
# ARG for quick switch to a given ubuntu mirror
|
|
||||||
ARG apt_archive="http://archive.ubuntu.com"
|
|
||||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
|
||||||
|
|
||||||
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.6.20200320/clickhouse-odbc-1.1.6-Linux.tar.gz"
|
|
||||||
|
|
||||||
|
|
||||||
RUN mkdir /etc/clickhouse-server /etc/clickhouse-keeper /etc/clickhouse-client && chmod 777 /etc/clickhouse-* \
|
|
||||||
&& mkdir -p /var/lib/clickhouse /var/log/clickhouse-server && chmod 777 /var/log/clickhouse-server /var/lib/clickhouse
|
|
||||||
|
|
||||||
RUN addgroup --gid 1001 clickhouse && adduser --uid 1001 --gid 1001 --disabled-password clickhouse
|
|
||||||
|
|
||||||
# moreutils - provides ts fo FT
|
|
||||||
# expect, bzip2 - requried by FT
|
|
||||||
# bsdmainutils - provides hexdump for FT
|
|
||||||
|
|
||||||
# golang version 1.13 on Ubuntu 20 is enough for tests
|
|
||||||
RUN apt-get update -y \
|
|
||||||
&& env DEBIAN_FRONTEND=noninteractive \
|
|
||||||
apt-get install --yes --no-install-recommends \
|
|
||||||
awscli \
|
|
||||||
brotli \
|
|
||||||
lz4 \
|
|
||||||
expect \
|
|
||||||
moreutils \
|
|
||||||
bzip2 \
|
|
||||||
bsdmainutils \
|
|
||||||
golang \
|
|
||||||
lsof \
|
|
||||||
mysql-client=8.0* \
|
|
||||||
ncdu \
|
|
||||||
netcat-openbsd \
|
|
||||||
nodejs \
|
|
||||||
npm \
|
|
||||||
odbcinst \
|
|
||||||
openjdk-11-jre-headless \
|
|
||||||
openssl \
|
|
||||||
postgresql-client \
|
|
||||||
python3 \
|
|
||||||
python3-pip \
|
|
||||||
qemu-user-static \
|
|
||||||
sqlite3 \
|
|
||||||
sudo \
|
|
||||||
tree \
|
|
||||||
unixodbc \
|
|
||||||
rustc \
|
|
||||||
cargo \
|
|
||||||
zstd \
|
|
||||||
file \
|
|
||||||
jq \
|
|
||||||
pv \
|
|
||||||
zip \
|
|
||||||
unzip \
|
|
||||||
p7zip-full \
|
|
||||||
curl \
|
|
||||||
wget \
|
|
||||||
xz-utils \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
|
||||||
|
|
||||||
ARG PROTOC_VERSION=25.1
|
|
||||||
RUN curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-linux-x86_64.zip \
|
|
||||||
&& unzip protoc-${PROTOC_VERSION}-linux-x86_64.zip -d /usr/local \
|
|
||||||
&& rm protoc-${PROTOC_VERSION}-linux-x86_64.zip
|
|
||||||
|
|
||||||
COPY requirements.txt /
|
|
||||||
RUN pip3 install --no-cache-dir -r /requirements.txt
|
|
||||||
|
|
||||||
RUN mkdir -p /tmp/clickhouse-odbc-tmp \
|
|
||||||
&& cd /tmp/clickhouse-odbc-tmp \
|
|
||||||
&& curl -L ${odbc_driver_url} | tar --strip-components=1 -xz clickhouse-odbc-1.1.6-Linux \
|
|
||||||
&& mkdir /usr/local/lib64 -p \
|
|
||||||
&& cp /tmp/clickhouse-odbc-tmp/lib64/*.so /usr/local/lib64/ \
|
|
||||||
&& odbcinst -i -d -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbcinst.ini.sample \
|
|
||||||
&& odbcinst -i -s -l -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbc.ini.sample \
|
|
||||||
&& sed -i 's"=libclickhouseodbc"=/usr/local/lib64/libclickhouseodbc"' /etc/odbcinst.ini \
|
|
||||||
&& rm -rf /tmp/clickhouse-odbc-tmp
|
|
||||||
|
|
||||||
ENV TZ=Europe/Amsterdam
|
|
||||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
|
||||||
|
|
||||||
ENV NUM_TRIES=1
|
|
||||||
|
|
||||||
# Unrelated to vars in setup_minio.sh, but should be the same there
|
|
||||||
# to have the same binaries for local running scenario
|
|
||||||
ARG MINIO_SERVER_VERSION=2024-08-03T04-33-23Z
|
|
||||||
ARG MINIO_CLIENT_VERSION=2024-07-31T15-58-33Z
|
|
||||||
ARG TARGETARCH
|
|
||||||
|
|
||||||
# Download Minio-related binaries
|
|
||||||
RUN arch=${TARGETARCH:-amd64} \
|
|
||||||
&& curl -L "https://dl.min.io/server/minio/release/linux-${arch}/archive/minio.RELEASE.${MINIO_SERVER_VERSION}" -o /minio \
|
|
||||||
&& curl -L "https://dl.min.io/client/mc/release/linux-${arch}/archive/mc.RELEASE.${MINIO_CLIENT_VERSION}" -o /mc \
|
|
||||||
&& chmod +x /mc /minio
|
|
||||||
|
|
||||||
ENV MINIO_ROOT_USER="clickhouse"
|
|
||||||
ENV MINIO_ROOT_PASSWORD="clickhouse"
|
|
||||||
|
|
||||||
# for minio to work without root
|
|
||||||
RUN chmod 777 /home
|
|
||||||
ENV HOME="/home"
|
|
||||||
ENV TEMP_DIR="/tmp/praktika"
|
|
||||||
ENV PATH="/wd/tests:/tmp/praktika/input:$PATH"
|
|
||||||
|
|
||||||
RUN curl -L --no-verbose -O 'https://archive.apache.org/dist/hadoop/common/hadoop-3.3.1/hadoop-3.3.1.tar.gz' \
|
|
||||||
&& tar -xvf hadoop-3.3.1.tar.gz \
|
|
||||||
&& rm -rf hadoop-3.3.1.tar.gz \
|
|
||||||
&& chmod 777 /hadoop-3.3.1
|
|
||||||
|
|
||||||
|
|
||||||
RUN npm install -g azurite@3.30.0 \
|
|
||||||
&& npm install -g tslib && npm install -g node
|
|
||||||
|
|
||||||
USER clickhouse
|
|
@ -1,6 +0,0 @@
|
|||||||
Jinja2==3.1.3
|
|
||||||
numpy==1.26.4
|
|
||||||
requests==2.32.3
|
|
||||||
pandas==1.5.3
|
|
||||||
scipy==1.12.0
|
|
||||||
pyarrow==18.0.0
|
|
@ -13,30 +13,11 @@ class JobStages(metaclass=MetaClasses.WithIter):
|
|||||||
|
|
||||||
def parse_args():
|
def parse_args():
|
||||||
parser = argparse.ArgumentParser(description="ClickHouse Build Job")
|
parser = argparse.ArgumentParser(description="ClickHouse Build Job")
|
||||||
parser.add_argument(
|
parser.add_argument("BUILD_TYPE", help="Type: <amd|arm_debug|release_sanitizer>")
|
||||||
"--build-type",
|
parser.add_argument("--param", help="Optional custom job start stage", default=None)
|
||||||
help="Type: <amd|arm>,<debug|release>,<asan|msan|..>",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--param",
|
|
||||||
help="Optional user-defined job start stage (for local run)",
|
|
||||||
default=None,
|
|
||||||
)
|
|
||||||
return parser.parse_args()
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
CMAKE_CMD = """cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA \
|
|
||||||
-DCMAKE_BUILD_TYPE={BUILD_TYPE} \
|
|
||||||
-DSANITIZE={SANITIZER} \
|
|
||||||
-DENABLE_CHECK_HEAVY_BUILDS=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 \
|
|
||||||
-DENABLE_UTILS=0 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DCMAKE_INSTALL_PREFIX=/usr \
|
|
||||||
-DCMAKE_INSTALL_SYSCONFDIR=/etc -DCMAKE_INSTALL_LOCALSTATEDIR=/var -DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=ON \
|
|
||||||
{AUX_DEFS} \
|
|
||||||
-DCMAKE_C_COMPILER=clang-18 -DCMAKE_CXX_COMPILER=clang++-18 \
|
|
||||||
-DCOMPILER_CACHE={CACHE_TYPE} \
|
|
||||||
-DENABLE_BUILD_PROFILING=1 {DIR}"""
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
||||||
args = parse_args()
|
args = parse_args()
|
||||||
@ -52,41 +33,23 @@ def main():
|
|||||||
stages.pop(0)
|
stages.pop(0)
|
||||||
stages.insert(0, stage)
|
stages.insert(0, stage)
|
||||||
|
|
||||||
build_type = args.build_type
|
cmake_build_type = "Release"
|
||||||
assert (
|
sanitizer = ""
|
||||||
build_type
|
|
||||||
), "build_type must be provided either as input argument or as a parameter of parametrized job in CI"
|
|
||||||
build_type = build_type.lower()
|
|
||||||
|
|
||||||
CACHE_TYPE = "sccache"
|
if "debug" in args.BUILD_TYPE.lower():
|
||||||
|
|
||||||
BUILD_TYPE = "RelWithDebInfo"
|
|
||||||
SANITIZER = ""
|
|
||||||
AUX_DEFS = " -DENABLE_TESTS=0 "
|
|
||||||
|
|
||||||
if "debug" in build_type:
|
|
||||||
print("Build type set: debug")
|
print("Build type set: debug")
|
||||||
BUILD_TYPE = "Debug"
|
cmake_build_type = "Debug"
|
||||||
AUX_DEFS = " -DENABLE_TESTS=1 "
|
|
||||||
elif "release" in build_type:
|
if "asan" in args.BUILD_TYPE.lower():
|
||||||
print("Build type set: release")
|
|
||||||
AUX_DEFS = (
|
|
||||||
" -DENABLE_TESTS=0 -DSPLIT_DEBUG_SYMBOLS=ON -DBUILD_STANDALONE_KEEPER=1 "
|
|
||||||
)
|
|
||||||
elif "asan" in build_type:
|
|
||||||
print("Sanitizer set: address")
|
print("Sanitizer set: address")
|
||||||
SANITIZER = "address"
|
sanitizer = "address"
|
||||||
else:
|
|
||||||
assert False
|
|
||||||
|
|
||||||
cmake_cmd = CMAKE_CMD.format(
|
# if Environment.is_local_run():
|
||||||
BUILD_TYPE=BUILD_TYPE,
|
# build_cache_type = "disabled"
|
||||||
CACHE_TYPE=CACHE_TYPE,
|
# else:
|
||||||
SANITIZER=SANITIZER,
|
build_cache_type = "sccache"
|
||||||
AUX_DEFS=AUX_DEFS,
|
|
||||||
DIR=Utils.cwd(),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
current_directory = Utils.cwd()
|
||||||
build_dir = f"{Settings.TEMP_DIR}/build"
|
build_dir = f"{Settings.TEMP_DIR}/build"
|
||||||
|
|
||||||
res = True
|
res = True
|
||||||
@ -106,7 +69,12 @@ def main():
|
|||||||
results.append(
|
results.append(
|
||||||
Result.create_from_command_execution(
|
Result.create_from_command_execution(
|
||||||
name="Cmake configuration",
|
name="Cmake configuration",
|
||||||
command=cmake_cmd,
|
command=f"cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_BUILD_TYPE={cmake_build_type} \
|
||||||
|
-DSANITIZE={sanitizer} -DENABLE_CHECK_HEAVY_BUILDS=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 -DENABLE_TESTS=0 \
|
||||||
|
-DENABLE_UTILS=0 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DCMAKE_INSTALL_PREFIX=/usr \
|
||||||
|
-DCMAKE_INSTALL_SYSCONFDIR=/etc -DCMAKE_INSTALL_LOCALSTATEDIR=/var -DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=ON \
|
||||||
|
-DCMAKE_C_COMPILER=clang-18 -DCMAKE_CXX_COMPILER=clang++-18 -DCOMPILER_CACHE={build_cache_type} -DENABLE_TESTS=1 \
|
||||||
|
-DENABLE_BUILD_PROFILING=1 {current_directory}",
|
||||||
workdir=build_dir,
|
workdir=build_dir,
|
||||||
with_log=True,
|
with_log=True,
|
||||||
)
|
)
|
||||||
@ -127,7 +95,7 @@ def main():
|
|||||||
Shell.check(f"ls -l {build_dir}/programs/")
|
Shell.check(f"ls -l {build_dir}/programs/")
|
||||||
res = results[-1].is_ok()
|
res = results[-1].is_ok()
|
||||||
|
|
||||||
Result.create_from(results=results, stopwatch=stop_watch).complete_job()
|
Result.create_from(results=results, stopwatch=stop_watch).finish_job_accordingly()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -379,4 +379,4 @@ if __name__ == "__main__":
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
Result.create_from(results=results, stopwatch=stop_watch).complete_job()
|
Result.create_from(results=results, stopwatch=stop_watch).finish_job_accordingly()
|
||||||
|
@ -1,13 +1,120 @@
|
|||||||
import argparse
|
import argparse
|
||||||
|
import threading
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
from praktika.result import Result
|
from praktika.result import Result
|
||||||
from praktika.settings import Settings
|
from praktika.settings import Settings
|
||||||
from praktika.utils import MetaClasses, Shell, Utils
|
from praktika.utils import MetaClasses, Shell, Utils
|
||||||
|
|
||||||
from ci.jobs.scripts.clickhouse_proc import ClickHouseProc
|
|
||||||
from ci.jobs.scripts.functional_tests_results import FTResultsProcessor
|
from ci.jobs.scripts.functional_tests_results import FTResultsProcessor
|
||||||
|
|
||||||
|
|
||||||
|
class ClickHouseProc:
|
||||||
|
def __init__(self):
|
||||||
|
self.ch_config_dir = f"{Settings.TEMP_DIR}/etc/clickhouse-server"
|
||||||
|
self.pid_file = f"{self.ch_config_dir}/clickhouse-server.pid"
|
||||||
|
self.config_file = f"{self.ch_config_dir}/config.xml"
|
||||||
|
self.user_files_path = f"{self.ch_config_dir}/user_files"
|
||||||
|
self.test_output_file = f"{Settings.OUTPUT_DIR}/test_result.txt"
|
||||||
|
self.command = f"clickhouse-server --config-file {self.config_file} --pid-file {self.pid_file} -- --path {self.ch_config_dir} --user_files_path {self.user_files_path} --top_level_domains_path {self.ch_config_dir}/top_level_domains --keeper_server.storage_path {self.ch_config_dir}/coordination"
|
||||||
|
self.proc = None
|
||||||
|
self.pid = 0
|
||||||
|
nproc = int(Utils.cpu_count() / 2)
|
||||||
|
self.fast_test_command = f"clickhouse-test --hung-check --fast-tests-only --no-random-settings --no-random-merge-tree-settings --no-long --testname --shard --zookeeper --check-zookeeper-session --order random --print-time --report-logs-stats --jobs {nproc} -- '' | ts '%Y-%m-%d %H:%M:%S' \
|
||||||
|
| tee -a \"{self.test_output_file}\""
|
||||||
|
# TODO: store info in case of failure
|
||||||
|
self.info = ""
|
||||||
|
self.info_file = ""
|
||||||
|
|
||||||
|
Utils.set_env("CLICKHOUSE_CONFIG_DIR", self.ch_config_dir)
|
||||||
|
Utils.set_env("CLICKHOUSE_CONFIG", self.config_file)
|
||||||
|
Utils.set_env("CLICKHOUSE_USER_FILES", self.user_files_path)
|
||||||
|
Utils.set_env("CLICKHOUSE_SCHEMA_FILES", f"{self.ch_config_dir}/format_schemas")
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
print("Starting ClickHouse server")
|
||||||
|
Shell.check(f"rm {self.pid_file}")
|
||||||
|
|
||||||
|
def run_clickhouse():
|
||||||
|
self.proc = Shell.run_async(
|
||||||
|
self.command, verbose=True, suppress_output=True
|
||||||
|
)
|
||||||
|
|
||||||
|
thread = threading.Thread(target=run_clickhouse)
|
||||||
|
thread.daemon = True # Allow program to exit even if thread is still running
|
||||||
|
thread.start()
|
||||||
|
|
||||||
|
# self.proc = Shell.run_async(self.command, verbose=True)
|
||||||
|
|
||||||
|
started = False
|
||||||
|
try:
|
||||||
|
for _ in range(5):
|
||||||
|
pid = Shell.get_output(f"cat {self.pid_file}").strip()
|
||||||
|
if not pid:
|
||||||
|
Utils.sleep(1)
|
||||||
|
continue
|
||||||
|
started = True
|
||||||
|
print(f"Got pid from fs [{pid}]")
|
||||||
|
_ = int(pid)
|
||||||
|
break
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if not started:
|
||||||
|
stdout = self.proc.stdout.read().strip() if self.proc.stdout else ""
|
||||||
|
stderr = self.proc.stderr.read().strip() if self.proc.stderr else ""
|
||||||
|
Utils.print_formatted_error("Failed to start ClickHouse", stdout, stderr)
|
||||||
|
return False
|
||||||
|
|
||||||
|
print(f"ClickHouse server started successfully, pid [{pid}]")
|
||||||
|
return True
|
||||||
|
|
||||||
|
def wait_ready(self):
|
||||||
|
res, out, err = 0, "", ""
|
||||||
|
attempts = 30
|
||||||
|
delay = 2
|
||||||
|
for attempt in range(attempts):
|
||||||
|
res, out, err = Shell.get_res_stdout_stderr(
|
||||||
|
'clickhouse-client --query "select 1"', verbose=True
|
||||||
|
)
|
||||||
|
if out.strip() == "1":
|
||||||
|
print("Server ready")
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
print(f"Server not ready, wait")
|
||||||
|
Utils.sleep(delay)
|
||||||
|
else:
|
||||||
|
Utils.print_formatted_error(
|
||||||
|
f"Server not ready after [{attempts*delay}s]", out, err
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def run_fast_test(self):
|
||||||
|
if Path(self.test_output_file).exists():
|
||||||
|
Path(self.test_output_file).unlink()
|
||||||
|
exit_code = Shell.run(self.fast_test_command)
|
||||||
|
return exit_code == 0
|
||||||
|
|
||||||
|
def terminate(self):
|
||||||
|
print("Terminate ClickHouse process")
|
||||||
|
timeout = 10
|
||||||
|
if self.proc:
|
||||||
|
Utils.terminate_process_group(self.proc.pid)
|
||||||
|
|
||||||
|
self.proc.terminate()
|
||||||
|
try:
|
||||||
|
self.proc.wait(timeout=10)
|
||||||
|
print(f"Process {self.proc.pid} terminated gracefully.")
|
||||||
|
except Exception:
|
||||||
|
print(
|
||||||
|
f"Process {self.proc.pid} did not terminate in {timeout} seconds, killing it..."
|
||||||
|
)
|
||||||
|
Utils.terminate_process_group(self.proc.pid, force=True)
|
||||||
|
self.proc.wait() # Wait for the process to be fully killed
|
||||||
|
print(f"Process {self.proc} was killed.")
|
||||||
|
|
||||||
|
|
||||||
def clone_submodules():
|
def clone_submodules():
|
||||||
submodules_to_update = [
|
submodules_to_update = [
|
||||||
"contrib/sysroot",
|
"contrib/sysroot",
|
||||||
@ -133,7 +240,7 @@ def main():
|
|||||||
Shell.check(f"rm -rf {build_dir} && mkdir -p {build_dir}")
|
Shell.check(f"rm -rf {build_dir} && mkdir -p {build_dir}")
|
||||||
results.append(
|
results.append(
|
||||||
Result.create_from_command_execution(
|
Result.create_from_command_execution(
|
||||||
name="Checkout Submodules",
|
name="Checkout Submodules for Minimal Build",
|
||||||
command=clone_submodules,
|
command=clone_submodules,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
@ -188,8 +295,8 @@ def main():
|
|||||||
if res and JobStages.CONFIG in stages:
|
if res and JobStages.CONFIG in stages:
|
||||||
commands = [
|
commands = [
|
||||||
f"rm -rf {Settings.TEMP_DIR}/etc/ && mkdir -p {Settings.TEMP_DIR}/etc/clickhouse-client {Settings.TEMP_DIR}/etc/clickhouse-server",
|
f"rm -rf {Settings.TEMP_DIR}/etc/ && mkdir -p {Settings.TEMP_DIR}/etc/clickhouse-client {Settings.TEMP_DIR}/etc/clickhouse-server",
|
||||||
f"cp ./programs/server/config.xml ./programs/server/users.xml {Settings.TEMP_DIR}/etc/clickhouse-server/",
|
f"cp {current_directory}/programs/server/config.xml {current_directory}/programs/server/users.xml {Settings.TEMP_DIR}/etc/clickhouse-server/",
|
||||||
f"./tests/config/install.sh {Settings.TEMP_DIR}/etc/clickhouse-server {Settings.TEMP_DIR}/etc/clickhouse-client --fast-test",
|
f"{current_directory}/tests/config/install.sh {Settings.TEMP_DIR}/etc/clickhouse-server {Settings.TEMP_DIR}/etc/clickhouse-client",
|
||||||
# f"cp -a {current_directory}/programs/server/config.d/log_to_console.xml {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/",
|
# f"cp -a {current_directory}/programs/server/config.d/log_to_console.xml {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/",
|
||||||
f"rm -f {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/secure_ports.xml",
|
f"rm -f {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/secure_ports.xml",
|
||||||
update_path_ch_config,
|
update_path_ch_config,
|
||||||
@ -203,7 +310,7 @@ def main():
|
|||||||
)
|
)
|
||||||
res = results[-1].is_ok()
|
res = results[-1].is_ok()
|
||||||
|
|
||||||
CH = ClickHouseProc(fast_test=True)
|
CH = ClickHouseProc()
|
||||||
if res and JobStages.TEST in stages:
|
if res and JobStages.TEST in stages:
|
||||||
stop_watch_ = Utils.Stopwatch()
|
stop_watch_ = Utils.Stopwatch()
|
||||||
step_name = "Start ClickHouse Server"
|
step_name = "Start ClickHouse Server"
|
||||||
@ -215,17 +322,15 @@ def main():
|
|||||||
)
|
)
|
||||||
|
|
||||||
if res and JobStages.TEST in stages:
|
if res and JobStages.TEST in stages:
|
||||||
stop_watch_ = Utils.Stopwatch()
|
|
||||||
step_name = "Tests"
|
step_name = "Tests"
|
||||||
print(step_name)
|
print(step_name)
|
||||||
res = res and CH.run_fast_test()
|
res = res and CH.run_fast_test()
|
||||||
if res:
|
if res:
|
||||||
results.append(FTResultsProcessor(wd=Settings.OUTPUT_DIR).run())
|
results.append(FTResultsProcessor(wd=Settings.OUTPUT_DIR).run())
|
||||||
results[-1].set_timing(stopwatch=stop_watch_)
|
|
||||||
|
|
||||||
CH.terminate()
|
CH.terminate()
|
||||||
|
|
||||||
Result.create_from(results=results, stopwatch=stop_watch).complete_job()
|
Result.create_from(results=results, stopwatch=stop_watch).finish_job_accordingly()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -1,170 +0,0 @@
|
|||||||
import argparse
|
|
||||||
import os
|
|
||||||
import time
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
from praktika.result import Result
|
|
||||||
from praktika.settings import Settings
|
|
||||||
from praktika.utils import MetaClasses, Shell, Utils
|
|
||||||
|
|
||||||
from ci.jobs.scripts.clickhouse_proc import ClickHouseProc
|
|
||||||
from ci.jobs.scripts.functional_tests_results import FTResultsProcessor
|
|
||||||
|
|
||||||
|
|
||||||
class JobStages(metaclass=MetaClasses.WithIter):
|
|
||||||
INSTALL_CLICKHOUSE = "install"
|
|
||||||
START = "start"
|
|
||||||
TEST = "test"
|
|
||||||
|
|
||||||
|
|
||||||
def parse_args():
|
|
||||||
parser = argparse.ArgumentParser(description="ClickHouse Build Job")
|
|
||||||
parser.add_argument(
|
|
||||||
"--ch-path", help="Path to clickhouse binary", default=f"{Settings.INPUT_DIR}"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--test-options",
|
|
||||||
help="Comma separated option(s): parallel|non-parallel|BATCH_NUM/BTATCH_TOT|..",
|
|
||||||
default="",
|
|
||||||
)
|
|
||||||
parser.add_argument("--param", help="Optional job start stage", default=None)
|
|
||||||
parser.add_argument("--test", help="Optional test name pattern", default="")
|
|
||||||
return parser.parse_args()
|
|
||||||
|
|
||||||
|
|
||||||
def run_test(
|
|
||||||
no_parallel: bool, no_sequiential: bool, batch_num: int, batch_total: int, test=""
|
|
||||||
):
|
|
||||||
test_output_file = f"{Settings.OUTPUT_DIR}/test_result.txt"
|
|
||||||
|
|
||||||
test_command = f"clickhouse-test --jobs 2 --testname --shard --zookeeper --check-zookeeper-session --no-stateless \
|
|
||||||
--hung-check --print-time \
|
|
||||||
--capture-client-stacktrace --queries ./tests/queries -- '{test}' \
|
|
||||||
| ts '%Y-%m-%d %H:%M:%S' | tee -a \"{test_output_file}\""
|
|
||||||
if Path(test_output_file).exists():
|
|
||||||
Path(test_output_file).unlink()
|
|
||||||
Shell.run(test_command, verbose=True)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
|
|
||||||
args = parse_args()
|
|
||||||
test_options = args.test_options.split(",")
|
|
||||||
no_parallel = "non-parallel" in test_options
|
|
||||||
no_sequential = "parallel" in test_options
|
|
||||||
batch_num, total_batches = 0, 0
|
|
||||||
for to in test_options:
|
|
||||||
if "/" in to:
|
|
||||||
batch_num, total_batches = map(int, to.split("/"))
|
|
||||||
|
|
||||||
# os.environ["AZURE_CONNECTION_STRING"] = Shell.get_output(
|
|
||||||
# f"aws ssm get-parameter --region us-east-1 --name azure_connection_string --with-decryption --output text --query Parameter.Value",
|
|
||||||
# verbose=True,
|
|
||||||
# strict=True
|
|
||||||
# )
|
|
||||||
|
|
||||||
ch_path = args.ch_path
|
|
||||||
assert Path(
|
|
||||||
ch_path + "/clickhouse"
|
|
||||||
).is_file(), f"clickhouse binary not found under [{ch_path}]"
|
|
||||||
|
|
||||||
stop_watch = Utils.Stopwatch()
|
|
||||||
|
|
||||||
stages = list(JobStages)
|
|
||||||
|
|
||||||
logs_to_attach = []
|
|
||||||
|
|
||||||
stage = args.param or JobStages.INSTALL_CLICKHOUSE
|
|
||||||
if stage:
|
|
||||||
assert stage in JobStages, f"--param must be one of [{list(JobStages)}]"
|
|
||||||
print(f"Job will start from stage [{stage}]")
|
|
||||||
while stage in stages:
|
|
||||||
stages.pop(0)
|
|
||||||
stages.insert(0, stage)
|
|
||||||
|
|
||||||
res = True
|
|
||||||
results = []
|
|
||||||
|
|
||||||
Utils.add_to_PATH(f"{ch_path}:tests")
|
|
||||||
|
|
||||||
if res and JobStages.INSTALL_CLICKHOUSE in stages:
|
|
||||||
commands = [
|
|
||||||
f"rm -rf /tmp/praktika/var/log/clickhouse-server/clickhouse-server.*",
|
|
||||||
f"chmod +x {ch_path}/clickhouse",
|
|
||||||
f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-server",
|
|
||||||
f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-client",
|
|
||||||
f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-compressor",
|
|
||||||
f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-local",
|
|
||||||
f"rm -rf {Settings.TEMP_DIR}/etc/ && mkdir -p {Settings.TEMP_DIR}/etc/clickhouse-client {Settings.TEMP_DIR}/etc/clickhouse-server",
|
|
||||||
f"cp programs/server/config.xml programs/server/users.xml {Settings.TEMP_DIR}/etc/clickhouse-server/",
|
|
||||||
f"./tests/config/install.sh {Settings.TEMP_DIR}/etc/clickhouse-server {Settings.TEMP_DIR}/etc/clickhouse-client --s3-storage",
|
|
||||||
# clickhouse benchmark segfaults with --config-path, so provide client config by its default location
|
|
||||||
f"cp {Settings.TEMP_DIR}/etc/clickhouse-client/* /etc/clickhouse-client/",
|
|
||||||
# update_path_ch_config,
|
|
||||||
# f"sed -i 's|>/var/|>{Settings.TEMP_DIR}/var/|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.xml",
|
|
||||||
# f"sed -i 's|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/ssl_certs.xml",
|
|
||||||
f"for file in /tmp/praktika/etc/clickhouse-server/config.d/*.xml; do [ -f $file ] && echo Change config $file && sed -i 's|>/var/log|>{Settings.TEMP_DIR}/var/log|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' $(readlink -f $file); done",
|
|
||||||
f"for file in /tmp/praktika/etc/clickhouse-server/*.xml; do [ -f $file ] && echo Change config $file && sed -i 's|>/var/log|>{Settings.TEMP_DIR}/var/log|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' $(readlink -f $file); done",
|
|
||||||
f"for file in /tmp/praktika/etc/clickhouse-server/config.d/*.xml; do [ -f $file ] && echo Change config $file && sed -i 's|<path>local_disk|<path>{Settings.TEMP_DIR}/local_disk|g' $(readlink -f $file); done",
|
|
||||||
f"clickhouse-server --version",
|
|
||||||
]
|
|
||||||
results.append(
|
|
||||||
Result.create_from_command_execution(
|
|
||||||
name="Install ClickHouse", command=commands, with_log=True
|
|
||||||
)
|
|
||||||
)
|
|
||||||
res = results[-1].is_ok()
|
|
||||||
|
|
||||||
CH = ClickHouseProc()
|
|
||||||
if res and JobStages.START in stages:
|
|
||||||
stop_watch_ = Utils.Stopwatch()
|
|
||||||
step_name = "Start ClickHouse Server"
|
|
||||||
print(step_name)
|
|
||||||
minio_log = "/tmp/praktika/output/minio.log"
|
|
||||||
res = res and CH.start_minio(test_type="stateful", log_file_path=minio_log)
|
|
||||||
logs_to_attach += [minio_log]
|
|
||||||
time.sleep(10)
|
|
||||||
Shell.check("ps -ef | grep minio", verbose=True)
|
|
||||||
res = res and Shell.check(
|
|
||||||
"aws s3 ls s3://test --endpoint-url http://localhost:11111/", verbose=True
|
|
||||||
)
|
|
||||||
res = res and CH.start()
|
|
||||||
res = res and CH.wait_ready()
|
|
||||||
if res:
|
|
||||||
print("ch started")
|
|
||||||
logs_to_attach += [
|
|
||||||
"/tmp/praktika/var/log/clickhouse-server/clickhouse-server.log",
|
|
||||||
"/tmp/praktika/var/log/clickhouse-server/clickhouse-server.err.log",
|
|
||||||
]
|
|
||||||
results.append(
|
|
||||||
Result.create_from(
|
|
||||||
name=step_name,
|
|
||||||
status=res,
|
|
||||||
stopwatch=stop_watch_,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
res = results[-1].is_ok()
|
|
||||||
|
|
||||||
if res and JobStages.TEST in stages:
|
|
||||||
stop_watch_ = Utils.Stopwatch()
|
|
||||||
step_name = "Tests"
|
|
||||||
print(step_name)
|
|
||||||
# assert Shell.check("clickhouse-client -q \"insert into system.zookeeper (name, path, value) values ('auxiliary_zookeeper2', '/test/chroot/', '')\"", verbose=True)
|
|
||||||
run_test(
|
|
||||||
no_parallel=no_parallel,
|
|
||||||
no_sequiential=no_sequential,
|
|
||||||
batch_num=batch_num,
|
|
||||||
batch_total=total_batches,
|
|
||||||
test=args.test,
|
|
||||||
)
|
|
||||||
results.append(FTResultsProcessor(wd=Settings.OUTPUT_DIR).run())
|
|
||||||
results[-1].set_timing(stopwatch=stop_watch_)
|
|
||||||
res = results[-1].is_ok()
|
|
||||||
|
|
||||||
Result.create_from(
|
|
||||||
results=results, stopwatch=stop_watch, files=logs_to_attach if not res else []
|
|
||||||
).complete_job()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
@ -1,183 +0,0 @@
|
|||||||
import argparse
|
|
||||||
import os
|
|
||||||
import time
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
from praktika.result import Result
|
|
||||||
from praktika.settings import Settings
|
|
||||||
from praktika.utils import MetaClasses, Shell, Utils
|
|
||||||
|
|
||||||
from ci.jobs.scripts.clickhouse_proc import ClickHouseProc
|
|
||||||
from ci.jobs.scripts.functional_tests_results import FTResultsProcessor
|
|
||||||
|
|
||||||
|
|
||||||
class JobStages(metaclass=MetaClasses.WithIter):
|
|
||||||
INSTALL_CLICKHOUSE = "install"
|
|
||||||
START = "start"
|
|
||||||
TEST = "test"
|
|
||||||
|
|
||||||
|
|
||||||
def parse_args():
|
|
||||||
parser = argparse.ArgumentParser(description="ClickHouse Build Job")
|
|
||||||
parser.add_argument(
|
|
||||||
"--ch-path", help="Path to clickhouse binary", default=f"{Settings.INPUT_DIR}"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--test-options",
|
|
||||||
help="Comma separated option(s): parallel|non-parallel|BATCH_NUM/BTATCH_TOT|..",
|
|
||||||
default="",
|
|
||||||
)
|
|
||||||
parser.add_argument("--param", help="Optional job start stage", default=None)
|
|
||||||
parser.add_argument("--test", help="Optional test name pattern", default="")
|
|
||||||
return parser.parse_args()
|
|
||||||
|
|
||||||
|
|
||||||
def run_stateless_test(
|
|
||||||
no_parallel: bool, no_sequiential: bool, batch_num: int, batch_total: int, test=""
|
|
||||||
):
|
|
||||||
assert not (no_parallel and no_sequiential)
|
|
||||||
test_output_file = f"{Settings.OUTPUT_DIR}/test_result.txt"
|
|
||||||
aux = ""
|
|
||||||
nproc = int(Utils.cpu_count() / 2)
|
|
||||||
if batch_num and batch_total:
|
|
||||||
aux = f"--run-by-hash-total {batch_total} --run-by-hash-num {batch_num-1}"
|
|
||||||
statless_test_command = f"clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
|
|
||||||
--no-drop-if-fail --capture-client-stacktrace --queries /repo/tests/queries --test-runs 1 --hung-check \
|
|
||||||
{'--no-parallel' if no_parallel else ''} {'--no-sequential' if no_sequiential else ''} \
|
|
||||||
--print-time --jobs {nproc} --report-coverage --report-logs-stats {aux} \
|
|
||||||
--queries ./tests/queries -- '{test}' | ts '%Y-%m-%d %H:%M:%S' \
|
|
||||||
| tee -a \"{test_output_file}\""
|
|
||||||
if Path(test_output_file).exists():
|
|
||||||
Path(test_output_file).unlink()
|
|
||||||
Shell.run(statless_test_command, verbose=True)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
|
|
||||||
args = parse_args()
|
|
||||||
test_options = args.test_options.split(",")
|
|
||||||
no_parallel = "non-parallel" in test_options
|
|
||||||
no_sequential = "parallel" in test_options
|
|
||||||
batch_num, total_batches = 0, 0
|
|
||||||
for to in test_options:
|
|
||||||
if "/" in to:
|
|
||||||
batch_num, total_batches = map(int, to.split("/"))
|
|
||||||
|
|
||||||
# os.environ["AZURE_CONNECTION_STRING"] = Shell.get_output(
|
|
||||||
# f"aws ssm get-parameter --region us-east-1 --name azure_connection_string --with-decryption --output text --query Parameter.Value",
|
|
||||||
# verbose=True,
|
|
||||||
# strict=True
|
|
||||||
# )
|
|
||||||
|
|
||||||
ch_path = args.ch_path
|
|
||||||
assert Path(
|
|
||||||
ch_path + "/clickhouse"
|
|
||||||
).is_file(), f"clickhouse binary not found under [{ch_path}]"
|
|
||||||
|
|
||||||
stop_watch = Utils.Stopwatch()
|
|
||||||
|
|
||||||
stages = list(JobStages)
|
|
||||||
|
|
||||||
logs_to_attach = []
|
|
||||||
|
|
||||||
stage = args.param or JobStages.INSTALL_CLICKHOUSE
|
|
||||||
if stage:
|
|
||||||
assert stage in JobStages, f"--param must be one of [{list(JobStages)}]"
|
|
||||||
print(f"Job will start from stage [{stage}]")
|
|
||||||
while stage in stages:
|
|
||||||
stages.pop(0)
|
|
||||||
stages.insert(0, stage)
|
|
||||||
|
|
||||||
res = True
|
|
||||||
results = []
|
|
||||||
|
|
||||||
Utils.add_to_PATH(f"{ch_path}:tests")
|
|
||||||
|
|
||||||
if res and JobStages.INSTALL_CLICKHOUSE in stages:
|
|
||||||
commands = [
|
|
||||||
f"rm -rf /tmp/praktika/var/log/clickhouse-server/clickhouse-server.*",
|
|
||||||
f"chmod +x {ch_path}/clickhouse",
|
|
||||||
f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-server",
|
|
||||||
f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-client",
|
|
||||||
f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-compressor",
|
|
||||||
f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-local",
|
|
||||||
f"rm -rf {Settings.TEMP_DIR}/etc/ && mkdir -p {Settings.TEMP_DIR}/etc/clickhouse-client {Settings.TEMP_DIR}/etc/clickhouse-server",
|
|
||||||
f"cp programs/server/config.xml programs/server/users.xml {Settings.TEMP_DIR}/etc/clickhouse-server/",
|
|
||||||
# TODO: find a way to work with Azure secret so it's ok for local tests as well, for now keep azure disabled
|
|
||||||
f"./tests/config/install.sh {Settings.TEMP_DIR}/etc/clickhouse-server {Settings.TEMP_DIR}/etc/clickhouse-client --s3-storage --no-azure",
|
|
||||||
# clickhouse benchmark segfaults with --config-path, so provide client config by its default location
|
|
||||||
f"cp {Settings.TEMP_DIR}/etc/clickhouse-client/* /etc/clickhouse-client/",
|
|
||||||
# update_path_ch_config,
|
|
||||||
# f"sed -i 's|>/var/|>{Settings.TEMP_DIR}/var/|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.xml",
|
|
||||||
# f"sed -i 's|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/ssl_certs.xml",
|
|
||||||
f"for file in /tmp/praktika/etc/clickhouse-server/config.d/*.xml; do [ -f $file ] && echo Change config $file && sed -i 's|>/var/log|>{Settings.TEMP_DIR}/var/log|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' $(readlink -f $file); done",
|
|
||||||
f"for file in /tmp/praktika/etc/clickhouse-server/*.xml; do [ -f $file ] && echo Change config $file && sed -i 's|>/var/log|>{Settings.TEMP_DIR}/var/log|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' $(readlink -f $file); done",
|
|
||||||
f"for file in /tmp/praktika/etc/clickhouse-server/config.d/*.xml; do [ -f $file ] && echo Change config $file && sed -i 's|<path>local_disk|<path>{Settings.TEMP_DIR}/local_disk|g' $(readlink -f $file); done",
|
|
||||||
f"clickhouse-server --version",
|
|
||||||
]
|
|
||||||
results.append(
|
|
||||||
Result.create_from_command_execution(
|
|
||||||
name="Install ClickHouse", command=commands, with_log=True
|
|
||||||
)
|
|
||||||
)
|
|
||||||
res = results[-1].is_ok()
|
|
||||||
|
|
||||||
CH = ClickHouseProc()
|
|
||||||
if res and JobStages.START in stages:
|
|
||||||
stop_watch_ = Utils.Stopwatch()
|
|
||||||
step_name = "Start ClickHouse Server"
|
|
||||||
print(step_name)
|
|
||||||
hdfs_log = "/tmp/praktika/output/hdfs_mini.log"
|
|
||||||
minio_log = "/tmp/praktika/output/minio.log"
|
|
||||||
res = res and CH.start_hdfs(log_file_path=hdfs_log)
|
|
||||||
res = res and CH.start_minio(test_type="stateful", log_file_path=minio_log)
|
|
||||||
logs_to_attach += [minio_log, hdfs_log]
|
|
||||||
time.sleep(10)
|
|
||||||
Shell.check("ps -ef | grep minio", verbose=True)
|
|
||||||
Shell.check("ps -ef | grep hdfs", verbose=True)
|
|
||||||
res = res and Shell.check(
|
|
||||||
"aws s3 ls s3://test --endpoint-url http://localhost:11111/", verbose=True
|
|
||||||
)
|
|
||||||
res = res and CH.start()
|
|
||||||
res = res and CH.wait_ready()
|
|
||||||
if res:
|
|
||||||
print("ch started")
|
|
||||||
logs_to_attach += [
|
|
||||||
"/tmp/praktika/var/log/clickhouse-server/clickhouse-server.log",
|
|
||||||
"/tmp/praktika/var/log/clickhouse-server/clickhouse-server.err.log",
|
|
||||||
]
|
|
||||||
results.append(
|
|
||||||
Result.create_from(
|
|
||||||
name=step_name,
|
|
||||||
status=res,
|
|
||||||
stopwatch=stop_watch_,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
res = results[-1].is_ok()
|
|
||||||
|
|
||||||
if res and JobStages.TEST in stages:
|
|
||||||
stop_watch_ = Utils.Stopwatch()
|
|
||||||
step_name = "Tests"
|
|
||||||
print(step_name)
|
|
||||||
assert Shell.check(
|
|
||||||
"clickhouse-client -q \"insert into system.zookeeper (name, path, value) values ('auxiliary_zookeeper2', '/test/chroot/', '')\"",
|
|
||||||
verbose=True,
|
|
||||||
)
|
|
||||||
run_stateless_test(
|
|
||||||
no_parallel=no_parallel,
|
|
||||||
no_sequiential=no_sequential,
|
|
||||||
batch_num=batch_num,
|
|
||||||
batch_total=total_batches,
|
|
||||||
test=args.test,
|
|
||||||
)
|
|
||||||
results.append(FTResultsProcessor(wd=Settings.OUTPUT_DIR).run())
|
|
||||||
results[-1].set_timing(stopwatch=stop_watch_)
|
|
||||||
res = results[-1].is_ok()
|
|
||||||
|
|
||||||
Result.create_from(
|
|
||||||
results=results, stopwatch=stop_watch, files=logs_to_attach if not res else []
|
|
||||||
).complete_job()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
@ -1,142 +0,0 @@
|
|||||||
import subprocess
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
from praktika.settings import Settings
|
|
||||||
from praktika.utils import Shell, Utils
|
|
||||||
|
|
||||||
|
|
||||||
class ClickHouseProc:
|
|
||||||
BACKUPS_XML = """
|
|
||||||
<clickhouse>
|
|
||||||
<backups>
|
|
||||||
<type>local</type>
|
|
||||||
<path>{CH_RUNTIME_DIR}/var/lib/clickhouse/disks/backups/</path>
|
|
||||||
</backups>
|
|
||||||
</clickhouse>
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, fast_test=False):
|
|
||||||
self.ch_config_dir = f"{Settings.TEMP_DIR}/etc/clickhouse-server"
|
|
||||||
self.pid_file = f"{self.ch_config_dir}/clickhouse-server.pid"
|
|
||||||
self.config_file = f"{self.ch_config_dir}/config.xml"
|
|
||||||
self.user_files_path = f"{self.ch_config_dir}/user_files"
|
|
||||||
self.test_output_file = f"{Settings.OUTPUT_DIR}/test_result.txt"
|
|
||||||
self.command = f"clickhouse-server --config-file {self.config_file} --pid-file {self.pid_file} -- --path {self.ch_config_dir} --user_files_path {self.user_files_path} --top_level_domains_path {self.ch_config_dir}/top_level_domains --keeper_server.storage_path {self.ch_config_dir}/coordination"
|
|
||||||
self.proc = None
|
|
||||||
self.pid = 0
|
|
||||||
nproc = int(Utils.cpu_count() / 2)
|
|
||||||
self.fast_test_command = f"clickhouse-test --hung-check --fast-tests-only --no-random-settings --no-random-merge-tree-settings --no-long --testname --shard --zookeeper --check-zookeeper-session --order random --print-time --report-logs-stats --jobs {nproc} -- '' | ts '%Y-%m-%d %H:%M:%S' \
|
|
||||||
| tee -a \"{self.test_output_file}\""
|
|
||||||
# TODO: store info in case of failure
|
|
||||||
self.info = ""
|
|
||||||
self.info_file = ""
|
|
||||||
|
|
||||||
Utils.set_env("CLICKHOUSE_CONFIG_DIR", self.ch_config_dir)
|
|
||||||
Utils.set_env("CLICKHOUSE_CONFIG", self.config_file)
|
|
||||||
Utils.set_env("CLICKHOUSE_USER_FILES", self.user_files_path)
|
|
||||||
# Utils.set_env("CLICKHOUSE_SCHEMA_FILES", f"{self.ch_config_dir}/format_schemas")
|
|
||||||
|
|
||||||
# if not fast_test:
|
|
||||||
# with open(f"{self.ch_config_dir}/config.d/backups.xml", "w") as file:
|
|
||||||
# file.write(self.BACKUPS_XML)
|
|
||||||
|
|
||||||
self.minio_proc = None
|
|
||||||
|
|
||||||
def start_hdfs(self, log_file_path):
|
|
||||||
command = ["./ci/jobs/scripts/functional_tests/setup_hdfs_minicluster.sh"]
|
|
||||||
with open(log_file_path, "w") as log_file:
|
|
||||||
process = subprocess.Popen(
|
|
||||||
command, stdout=log_file, stderr=subprocess.STDOUT
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
f"Started setup_hdfs_minicluster.sh asynchronously with PID {process.pid}"
|
|
||||||
)
|
|
||||||
return True
|
|
||||||
|
|
||||||
def start_minio(self, test_type, log_file_path):
|
|
||||||
command = [
|
|
||||||
"./ci/jobs/scripts/functional_tests/setup_minio.sh",
|
|
||||||
test_type,
|
|
||||||
"./tests",
|
|
||||||
]
|
|
||||||
with open(log_file_path, "w") as log_file:
|
|
||||||
process = subprocess.Popen(
|
|
||||||
command, stdout=log_file, stderr=subprocess.STDOUT
|
|
||||||
)
|
|
||||||
print(f"Started setup_minio.sh asynchronously with PID {process.pid}")
|
|
||||||
return True
|
|
||||||
|
|
||||||
def start(self):
|
|
||||||
print("Starting ClickHouse server")
|
|
||||||
Shell.check(f"rm {self.pid_file}")
|
|
||||||
self.proc = subprocess.Popen(self.command, stderr=subprocess.STDOUT, shell=True)
|
|
||||||
started = False
|
|
||||||
try:
|
|
||||||
for _ in range(5):
|
|
||||||
pid = Shell.get_output(f"cat {self.pid_file}").strip()
|
|
||||||
if not pid:
|
|
||||||
Utils.sleep(1)
|
|
||||||
continue
|
|
||||||
started = True
|
|
||||||
print(f"Got pid from fs [{pid}]")
|
|
||||||
_ = int(pid)
|
|
||||||
break
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
if not started:
|
|
||||||
stdout = self.proc.stdout.read().strip() if self.proc.stdout else ""
|
|
||||||
stderr = self.proc.stderr.read().strip() if self.proc.stderr else ""
|
|
||||||
Utils.print_formatted_error("Failed to start ClickHouse", stdout, stderr)
|
|
||||||
return False
|
|
||||||
|
|
||||||
print(f"ClickHouse server started successfully, pid [{pid}]")
|
|
||||||
return True
|
|
||||||
|
|
||||||
def wait_ready(self):
|
|
||||||
res, out, err = 0, "", ""
|
|
||||||
attempts = 30
|
|
||||||
delay = 2
|
|
||||||
for attempt in range(attempts):
|
|
||||||
res, out, err = Shell.get_res_stdout_stderr(
|
|
||||||
'clickhouse-client --query "select 1"', verbose=True
|
|
||||||
)
|
|
||||||
if out.strip() == "1":
|
|
||||||
print("Server ready")
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
print(f"Server not ready, wait")
|
|
||||||
Utils.sleep(delay)
|
|
||||||
else:
|
|
||||||
Utils.print_formatted_error(
|
|
||||||
f"Server not ready after [{attempts*delay}s]", out, err
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def run_fast_test(self):
|
|
||||||
if Path(self.test_output_file).exists():
|
|
||||||
Path(self.test_output_file).unlink()
|
|
||||||
exit_code = Shell.run(self.fast_test_command)
|
|
||||||
return exit_code == 0
|
|
||||||
|
|
||||||
def terminate(self):
|
|
||||||
print("Terminate ClickHouse process")
|
|
||||||
timeout = 10
|
|
||||||
if self.proc:
|
|
||||||
Utils.terminate_process_group(self.proc.pid)
|
|
||||||
|
|
||||||
self.proc.terminate()
|
|
||||||
try:
|
|
||||||
self.proc.wait(timeout=10)
|
|
||||||
print(f"Process {self.proc.pid} terminated gracefully.")
|
|
||||||
except Exception:
|
|
||||||
print(
|
|
||||||
f"Process {self.proc.pid} did not terminate in {timeout} seconds, killing it..."
|
|
||||||
)
|
|
||||||
Utils.terminate_process_group(self.proc.pid, force=True)
|
|
||||||
self.proc.wait() # Wait for the process to be fully killed
|
|
||||||
print(f"Process {self.proc} was killed.")
|
|
||||||
|
|
||||||
if self.minio_proc:
|
|
||||||
Utils.terminate_process_group(self.minio_proc.pid)
|
|
@ -1,19 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# shellcheck disable=SC2024
|
|
||||||
|
|
||||||
set -e -x -a -u
|
|
||||||
|
|
||||||
ls -lha
|
|
||||||
|
|
||||||
cd /hadoop-3.3.1
|
|
||||||
|
|
||||||
export JAVA_HOME=/usr
|
|
||||||
mkdir -p target/test/data
|
|
||||||
|
|
||||||
bin/mapred minicluster -format -nomr -nnport 12222 &
|
|
||||||
|
|
||||||
while ! nc -z localhost 12222; do
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
|
|
||||||
lsof -i :12222
|
|
@ -1,162 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -euxf -o pipefail
|
|
||||||
|
|
||||||
export MINIO_ROOT_USER=${MINIO_ROOT_USER:-clickhouse}
|
|
||||||
export MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD:-clickhouse}
|
|
||||||
TEST_DIR=${2:-/repo/tests/}
|
|
||||||
|
|
||||||
if [ -d "$TEMP_DIR" ]; then
|
|
||||||
TEST_DIR=$(readlink -f $TEST_DIR)
|
|
||||||
cd "$TEMP_DIR"
|
|
||||||
# add / for minio mc in docker
|
|
||||||
PATH="/:.:$PATH"
|
|
||||||
fi
|
|
||||||
|
|
||||||
usage() {
|
|
||||||
echo $"Usage: $0 <stateful|stateless> <test_path> (default path: /usr/share/clickhouse-test)"
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
check_arg() {
|
|
||||||
local query_dir
|
|
||||||
if [ ! $# -eq 1 ]; then
|
|
||||||
if [ ! $# -eq 2 ]; then
|
|
||||||
echo "ERROR: need either one or two arguments, <stateful|stateless> <test_path> (default path: /usr/share/clickhouse-test)"
|
|
||||||
usage
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
case "$1" in
|
|
||||||
stateless)
|
|
||||||
query_dir="0_stateless"
|
|
||||||
;;
|
|
||||||
stateful)
|
|
||||||
query_dir="1_stateful"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "unknown test type ${test_type}"
|
|
||||||
usage
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
echo ${query_dir}
|
|
||||||
}
|
|
||||||
|
|
||||||
find_arch() {
|
|
||||||
local arch
|
|
||||||
case $(uname -m) in
|
|
||||||
x86_64)
|
|
||||||
arch="amd64"
|
|
||||||
;;
|
|
||||||
aarch64)
|
|
||||||
arch="arm64"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "unknown architecture $(uname -m)";
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
echo ${arch}
|
|
||||||
}
|
|
||||||
|
|
||||||
find_os() {
|
|
||||||
local os
|
|
||||||
os=$(uname -s | tr '[:upper:]' '[:lower:]')
|
|
||||||
echo "${os}"
|
|
||||||
}
|
|
||||||
|
|
||||||
download_minio() {
|
|
||||||
local os
|
|
||||||
local arch
|
|
||||||
local minio_server_version=${MINIO_SERVER_VERSION:-2024-08-03T04-33-23Z}
|
|
||||||
local minio_client_version=${MINIO_CLIENT_VERSION:-2024-07-31T15-58-33Z}
|
|
||||||
|
|
||||||
os=$(find_os)
|
|
||||||
arch=$(find_arch)
|
|
||||||
wget "https://dl.min.io/server/minio/release/${os}-${arch}/archive/minio.RELEASE.${minio_server_version}" -O ./minio
|
|
||||||
wget "https://dl.min.io/client/mc/release/${os}-${arch}/archive/mc.RELEASE.${minio_client_version}" -O ./mc
|
|
||||||
chmod +x ./mc ./minio
|
|
||||||
}
|
|
||||||
|
|
||||||
start_minio() {
|
|
||||||
pwd
|
|
||||||
mkdir -p ./minio_data
|
|
||||||
minio --version
|
|
||||||
nohup minio server --address ":11111" ./minio_data &
|
|
||||||
wait_for_it
|
|
||||||
lsof -i :11111
|
|
||||||
sleep 5
|
|
||||||
}
|
|
||||||
|
|
||||||
setup_minio() {
|
|
||||||
local test_type=$1
|
|
||||||
echo "setup_minio(), test_type=$test_type"
|
|
||||||
mc alias set clickminio http://localhost:11111 clickhouse clickhouse
|
|
||||||
mc admin user add clickminio test testtest
|
|
||||||
mc admin policy attach clickminio readwrite --user=test ||:
|
|
||||||
mc mb --ignore-existing clickminio/test
|
|
||||||
if [ "$test_type" = "stateless" ]; then
|
|
||||||
echo "Create @test bucket in minio"
|
|
||||||
mc anonymous set public clickminio/test
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# uploads data to minio, by default after unpacking all tests
|
|
||||||
# will be in /usr/share/clickhouse-test/queries
|
|
||||||
upload_data() {
|
|
||||||
local query_dir=$1
|
|
||||||
local test_path=$2
|
|
||||||
local data_path=${test_path}/queries/${query_dir}/data_minio
|
|
||||||
echo "upload_data() data_path=$data_path"
|
|
||||||
|
|
||||||
# iterating over globs will cause redundant file variable to be
|
|
||||||
# a path to a file, not a filename
|
|
||||||
# shellcheck disable=SC2045
|
|
||||||
if [ -d "${data_path}" ]; then
|
|
||||||
mc cp --recursive "${data_path}"/ clickminio/test/
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
setup_aws_credentials() {
|
|
||||||
local minio_root_user=${MINIO_ROOT_USER:-clickhouse}
|
|
||||||
local minio_root_password=${MINIO_ROOT_PASSWORD:-clickhouse}
|
|
||||||
mkdir -p ~/.aws
|
|
||||||
cat <<EOT >> ~/.aws/credentials
|
|
||||||
[default]
|
|
||||||
aws_access_key_id=${minio_root_user}
|
|
||||||
aws_secret_access_key=${minio_root_password}
|
|
||||||
EOT
|
|
||||||
}
|
|
||||||
|
|
||||||
wait_for_it() {
|
|
||||||
local counter=0
|
|
||||||
local max_counter=60
|
|
||||||
local url="http://localhost:11111"
|
|
||||||
local params=(
|
|
||||||
--silent
|
|
||||||
--verbose
|
|
||||||
)
|
|
||||||
while ! curl "${params[@]}" "${url}" 2>&1 | grep AccessDenied
|
|
||||||
do
|
|
||||||
if [[ ${counter} == "${max_counter}" ]]; then
|
|
||||||
echo "failed to setup minio"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
echo "trying to connect to minio"
|
|
||||||
sleep 1
|
|
||||||
counter=$((counter + 1))
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
main() {
|
|
||||||
local query_dir
|
|
||||||
query_dir=$(check_arg "$@")
|
|
||||||
if ! (minio --version && mc --version); then
|
|
||||||
download_minio
|
|
||||||
fi
|
|
||||||
start_minio
|
|
||||||
setup_minio "$1"
|
|
||||||
upload_data "${query_dir}" "$TEST_DIR"
|
|
||||||
setup_aws_credentials
|
|
||||||
}
|
|
||||||
|
|
||||||
main "$@"
|
|
@ -1,6 +1,7 @@
|
|||||||
import dataclasses
|
import dataclasses
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
|
from praktika.environment import Environment
|
||||||
from praktika.result import Result
|
from praktika.result import Result
|
||||||
|
|
||||||
OK_SIGN = "[ OK "
|
OK_SIGN = "[ OK "
|
||||||
@ -232,8 +233,6 @@ class FTResultsProcessor:
|
|||||||
else:
|
else:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
info = f"Total: {s.total - s.skipped}, Failed: {s.failed}"
|
|
||||||
|
|
||||||
# TODO: !!!
|
# TODO: !!!
|
||||||
# def test_result_comparator(item):
|
# def test_result_comparator(item):
|
||||||
# # sort by status then by check name
|
# # sort by status then by check name
|
||||||
@ -251,11 +250,10 @@ class FTResultsProcessor:
|
|||||||
# test_results.sort(key=test_result_comparator)
|
# test_results.sort(key=test_result_comparator)
|
||||||
|
|
||||||
return Result.create_from(
|
return Result.create_from(
|
||||||
name="Tests",
|
name=Environment.JOB_NAME,
|
||||||
results=test_results,
|
results=test_results,
|
||||||
status=state,
|
status=state,
|
||||||
files=[self.tests_output_file],
|
files=[self.tests_output_file],
|
||||||
info=info,
|
|
||||||
with_info_from_results=False,
|
with_info_from_results=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -37,30 +37,6 @@ def create_parser():
|
|||||||
type=str,
|
type=str,
|
||||||
default=None,
|
default=None,
|
||||||
)
|
)
|
||||||
run_parser.add_argument(
|
|
||||||
"--test",
|
|
||||||
help="Custom parameter to pass into a job script, it's up to job script how to use it, for local test",
|
|
||||||
type=str,
|
|
||||||
default="",
|
|
||||||
)
|
|
||||||
run_parser.add_argument(
|
|
||||||
"--pr",
|
|
||||||
help="PR number. Optional parameter for local run. Set if you want an required artifact to be uploaded from CI run in that PR",
|
|
||||||
type=int,
|
|
||||||
default=None,
|
|
||||||
)
|
|
||||||
run_parser.add_argument(
|
|
||||||
"--sha",
|
|
||||||
help="Commit sha. Optional parameter for local run. Set if you want an required artifact to be uploaded from CI run on that sha, head sha will be used if not set",
|
|
||||||
type=str,
|
|
||||||
default=None,
|
|
||||||
)
|
|
||||||
run_parser.add_argument(
|
|
||||||
"--branch",
|
|
||||||
help="Commit sha. Optional parameter for local run. Set if you want an required artifact to be uploaded from CI run on that branch, main branch name will be used if not set",
|
|
||||||
type=str,
|
|
||||||
default=None,
|
|
||||||
)
|
|
||||||
run_parser.add_argument(
|
run_parser.add_argument(
|
||||||
"--ci",
|
"--ci",
|
||||||
help="When not set - dummy env will be generated, for local test",
|
help="When not set - dummy env will be generated, for local test",
|
||||||
@ -109,13 +85,9 @@ if __name__ == "__main__":
|
|||||||
workflow=workflow,
|
workflow=workflow,
|
||||||
job=job,
|
job=job,
|
||||||
docker=args.docker,
|
docker=args.docker,
|
||||||
local_run=not args.ci,
|
dummy_env=not args.ci,
|
||||||
no_docker=args.no_docker,
|
no_docker=args.no_docker,
|
||||||
param=args.param,
|
param=args.param,
|
||||||
test=args.test,
|
|
||||||
pr=args.pr,
|
|
||||||
branch=args.branch,
|
|
||||||
sha=args.sha,
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
parser.print_help()
|
parser.print_help()
|
||||||
|
@ -6,7 +6,7 @@ from types import SimpleNamespace
|
|||||||
from typing import Any, Dict, List, Type
|
from typing import Any, Dict, List, Type
|
||||||
|
|
||||||
from praktika import Workflow
|
from praktika import Workflow
|
||||||
from praktika.settings import Settings
|
from praktika._settings import _Settings
|
||||||
from praktika.utils import MetaClasses, T
|
from praktika.utils import MetaClasses, T
|
||||||
|
|
||||||
|
|
||||||
@ -30,12 +30,13 @@ class _Environment(MetaClasses.Serializable):
|
|||||||
INSTANCE_ID: str
|
INSTANCE_ID: str
|
||||||
INSTANCE_LIFE_CYCLE: str
|
INSTANCE_LIFE_CYCLE: str
|
||||||
LOCAL_RUN: bool = False
|
LOCAL_RUN: bool = False
|
||||||
|
PARAMETER: Any = None
|
||||||
REPORT_INFO: List[str] = dataclasses.field(default_factory=list)
|
REPORT_INFO: List[str] = dataclasses.field(default_factory=list)
|
||||||
name = "environment"
|
name = "environment"
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def file_name_static(cls, _name=""):
|
def file_name_static(cls, _name=""):
|
||||||
return f"{Settings.TEMP_DIR}/{cls.name}.json"
|
return f"{_Settings.TEMP_DIR}/{cls.name}.json"
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_dict(cls: Type[T], obj: Dict[str, Any]) -> T:
|
def from_dict(cls: Type[T], obj: Dict[str, Any]) -> T:
|
||||||
@ -66,12 +67,12 @@ class _Environment(MetaClasses.Serializable):
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_needs_statuses():
|
def get_needs_statuses():
|
||||||
if Path(Settings.WORKFLOW_STATUS_FILE).is_file():
|
if Path(_Settings.WORKFLOW_STATUS_FILE).is_file():
|
||||||
with open(Settings.WORKFLOW_STATUS_FILE, "r", encoding="utf8") as f:
|
with open(_Settings.WORKFLOW_STATUS_FILE, "r", encoding="utf8") as f:
|
||||||
return json.load(f)
|
return json.load(f)
|
||||||
else:
|
else:
|
||||||
print(
|
print(
|
||||||
f"ERROR: Status file [{Settings.WORKFLOW_STATUS_FILE}] does not exist"
|
f"ERROR: Status file [{_Settings.WORKFLOW_STATUS_FILE}] does not exist"
|
||||||
)
|
)
|
||||||
raise RuntimeError()
|
raise RuntimeError()
|
||||||
|
|
||||||
@ -158,8 +159,7 @@ class _Environment(MetaClasses.Serializable):
|
|||||||
@classmethod
|
@classmethod
|
||||||
def get_s3_prefix_static(cls, pr_number, branch, sha, latest=False):
|
def get_s3_prefix_static(cls, pr_number, branch, sha, latest=False):
|
||||||
prefix = ""
|
prefix = ""
|
||||||
assert sha or latest
|
if pr_number > 0:
|
||||||
if pr_number and pr_number > 0:
|
|
||||||
prefix += f"{pr_number}"
|
prefix += f"{pr_number}"
|
||||||
else:
|
else:
|
||||||
prefix += f"{branch}"
|
prefix += f"{branch}"
|
||||||
@ -171,15 +171,18 @@ class _Environment(MetaClasses.Serializable):
|
|||||||
|
|
||||||
# TODO: find a better place for the function. This file should not import praktika.settings
|
# TODO: find a better place for the function. This file should not import praktika.settings
|
||||||
# as it's requires reading users config, that's why imports nested inside the function
|
# as it's requires reading users config, that's why imports nested inside the function
|
||||||
def get_report_url(self, settings, latest=False):
|
def get_report_url(self):
|
||||||
import urllib
|
import urllib
|
||||||
|
|
||||||
path = settings.HTML_S3_PATH
|
from praktika.settings import Settings
|
||||||
for bucket, endpoint in settings.S3_BUCKET_TO_HTTP_ENDPOINT.items():
|
from praktika.utils import Utils
|
||||||
|
|
||||||
|
path = Settings.HTML_S3_PATH
|
||||||
|
for bucket, endpoint in Settings.S3_BUCKET_TO_HTTP_ENDPOINT.items():
|
||||||
if bucket in path:
|
if bucket in path:
|
||||||
path = path.replace(bucket, endpoint)
|
path = path.replace(bucket, endpoint)
|
||||||
break
|
break
|
||||||
REPORT_URL = f"https://{path}/{Path(settings.HTML_PAGE_FILE).name}?PR={self.PR_NUMBER}&sha={'latest' if latest else self.SHA}&name_0={urllib.parse.quote(self.WORKFLOW_NAME, safe='')}&name_1={urllib.parse.quote(self.JOB_NAME, safe='')}"
|
REPORT_URL = f"https://{path}/{Path(Settings.HTML_PAGE_FILE).name}?PR={self.PR_NUMBER}&sha={self.SHA}&name_0={urllib.parse.quote(self.WORKFLOW_NAME, safe='')}&name_1={urllib.parse.quote(self.JOB_NAME, safe='')}"
|
||||||
return REPORT_URL
|
return REPORT_URL
|
||||||
|
|
||||||
def is_local_run(self):
|
def is_local_run(self):
|
||||||
|
124
ci/praktika/_settings.py
Normal file
124
ci/praktika/_settings.py
Normal file
@ -0,0 +1,124 @@
|
|||||||
|
import dataclasses
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, Iterable, List, Optional
|
||||||
|
|
||||||
|
|
||||||
|
@dataclasses.dataclass
|
||||||
|
class _Settings:
|
||||||
|
######################################
|
||||||
|
# Pipeline generation settings #
|
||||||
|
######################################
|
||||||
|
CI_PATH = "./ci"
|
||||||
|
WORKFLOW_PATH_PREFIX: str = "./.github/workflows"
|
||||||
|
WORKFLOWS_DIRECTORY: str = f"{CI_PATH}/workflows"
|
||||||
|
SETTINGS_DIRECTORY: str = f"{CI_PATH}/settings"
|
||||||
|
CI_CONFIG_JOB_NAME = "Config Workflow"
|
||||||
|
DOCKER_BUILD_JOB_NAME = "Docker Builds"
|
||||||
|
FINISH_WORKFLOW_JOB_NAME = "Finish Workflow"
|
||||||
|
READY_FOR_MERGE_STATUS_NAME = "Ready for Merge"
|
||||||
|
CI_CONFIG_RUNS_ON: Optional[List[str]] = None
|
||||||
|
DOCKER_BUILD_RUNS_ON: Optional[List[str]] = None
|
||||||
|
VALIDATE_FILE_PATHS: bool = True
|
||||||
|
|
||||||
|
######################################
|
||||||
|
# Runtime Settings #
|
||||||
|
######################################
|
||||||
|
MAX_RETRIES_S3 = 3
|
||||||
|
MAX_RETRIES_GH = 3
|
||||||
|
|
||||||
|
######################################
|
||||||
|
# S3 (artifact storage) settings #
|
||||||
|
######################################
|
||||||
|
S3_ARTIFACT_PATH: str = ""
|
||||||
|
|
||||||
|
######################################
|
||||||
|
# CI workspace settings #
|
||||||
|
######################################
|
||||||
|
TEMP_DIR: str = "/tmp/praktika"
|
||||||
|
OUTPUT_DIR: str = f"{TEMP_DIR}/output"
|
||||||
|
INPUT_DIR: str = f"{TEMP_DIR}/input"
|
||||||
|
PYTHON_INTERPRETER: str = "python3"
|
||||||
|
PYTHON_PACKET_MANAGER: str = "pip3"
|
||||||
|
PYTHON_VERSION: str = "3.9"
|
||||||
|
INSTALL_PYTHON_FOR_NATIVE_JOBS: bool = False
|
||||||
|
INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS: str = "./ci/requirements.txt"
|
||||||
|
ENVIRONMENT_VAR_FILE: str = f"{TEMP_DIR}/environment.json"
|
||||||
|
RUN_LOG: str = f"{TEMP_DIR}/praktika_run.log"
|
||||||
|
|
||||||
|
SECRET_GH_APP_ID: str = "GH_APP_ID"
|
||||||
|
SECRET_GH_APP_PEM_KEY: str = "GH_APP_PEM_KEY"
|
||||||
|
|
||||||
|
ENV_SETUP_SCRIPT: str = "/tmp/praktika_setup_env.sh"
|
||||||
|
WORKFLOW_STATUS_FILE: str = f"{TEMP_DIR}/workflow_status.json"
|
||||||
|
|
||||||
|
######################################
|
||||||
|
# CI Cache settings #
|
||||||
|
######################################
|
||||||
|
CACHE_VERSION: int = 1
|
||||||
|
CACHE_DIGEST_LEN: int = 20
|
||||||
|
CACHE_S3_PATH: str = ""
|
||||||
|
CACHE_LOCAL_PATH: str = f"{TEMP_DIR}/ci_cache"
|
||||||
|
|
||||||
|
######################################
|
||||||
|
# Report settings #
|
||||||
|
######################################
|
||||||
|
HTML_S3_PATH: str = ""
|
||||||
|
HTML_PAGE_FILE: str = "./praktika/json.html"
|
||||||
|
TEXT_CONTENT_EXTENSIONS: Iterable[str] = frozenset([".txt", ".log"])
|
||||||
|
S3_BUCKET_TO_HTTP_ENDPOINT: Optional[Dict[str, str]] = None
|
||||||
|
|
||||||
|
DOCKERHUB_USERNAME: str = ""
|
||||||
|
DOCKERHUB_SECRET: str = ""
|
||||||
|
DOCKER_WD: str = "/wd"
|
||||||
|
|
||||||
|
######################################
|
||||||
|
# CI DB Settings #
|
||||||
|
######################################
|
||||||
|
SECRET_CI_DB_URL: str = "CI_DB_URL"
|
||||||
|
SECRET_CI_DB_PASSWORD: str = "CI_DB_PASSWORD"
|
||||||
|
CI_DB_DB_NAME = ""
|
||||||
|
CI_DB_TABLE_NAME = ""
|
||||||
|
CI_DB_INSERT_TIMEOUT_SEC = 5
|
||||||
|
|
||||||
|
|
||||||
|
_USER_DEFINED_SETTINGS = [
|
||||||
|
"S3_ARTIFACT_PATH",
|
||||||
|
"CACHE_S3_PATH",
|
||||||
|
"HTML_S3_PATH",
|
||||||
|
"S3_BUCKET_TO_HTTP_ENDPOINT",
|
||||||
|
"TEXT_CONTENT_EXTENSIONS",
|
||||||
|
"TEMP_DIR",
|
||||||
|
"OUTPUT_DIR",
|
||||||
|
"INPUT_DIR",
|
||||||
|
"CI_CONFIG_RUNS_ON",
|
||||||
|
"DOCKER_BUILD_RUNS_ON",
|
||||||
|
"CI_CONFIG_JOB_NAME",
|
||||||
|
"PYTHON_INTERPRETER",
|
||||||
|
"PYTHON_VERSION",
|
||||||
|
"PYTHON_PACKET_MANAGER",
|
||||||
|
"INSTALL_PYTHON_FOR_NATIVE_JOBS",
|
||||||
|
"INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS",
|
||||||
|
"MAX_RETRIES_S3",
|
||||||
|
"MAX_RETRIES_GH",
|
||||||
|
"VALIDATE_FILE_PATHS",
|
||||||
|
"DOCKERHUB_USERNAME",
|
||||||
|
"DOCKERHUB_SECRET",
|
||||||
|
"READY_FOR_MERGE_STATUS_NAME",
|
||||||
|
"SECRET_CI_DB_URL",
|
||||||
|
"SECRET_CI_DB_PASSWORD",
|
||||||
|
"CI_DB_DB_NAME",
|
||||||
|
"CI_DB_TABLE_NAME",
|
||||||
|
"CI_DB_INSERT_TIMEOUT_SEC",
|
||||||
|
"SECRET_GH_APP_PEM_KEY",
|
||||||
|
"SECRET_GH_APP_ID",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class GHRunners:
|
||||||
|
ubuntu = "ubuntu-latest"
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
for setting in _USER_DEFINED_SETTINGS:
|
||||||
|
print(_Settings().__getattribute__(setting))
|
||||||
|
# print(dataclasses.asdict(_Settings()))
|
@ -52,7 +52,7 @@ class CIDB:
|
|||||||
check_status=result.status,
|
check_status=result.status,
|
||||||
check_duration_ms=int(result.duration * 1000),
|
check_duration_ms=int(result.duration * 1000),
|
||||||
check_start_time=Utils.timestamp_to_str(result.start_time),
|
check_start_time=Utils.timestamp_to_str(result.start_time),
|
||||||
report_url=env.get_report_url(settings=Settings),
|
report_url=env.get_report_url(),
|
||||||
pull_request_url=env.CHANGE_URL,
|
pull_request_url=env.CHANGE_URL,
|
||||||
base_ref=env.BASE_BRANCH,
|
base_ref=env.BASE_BRANCH,
|
||||||
base_repo=env.REPOSITORY,
|
base_repo=env.REPOSITORY,
|
||||||
|
@ -23,7 +23,7 @@ class Digest:
|
|||||||
hash_string = hash_obj.hexdigest()
|
hash_string = hash_obj.hexdigest()
|
||||||
return hash_string
|
return hash_string
|
||||||
|
|
||||||
def calc_job_digest(self, job_config: Job.Config, docker_digests):
|
def calc_job_digest(self, job_config: Job.Config):
|
||||||
config = job_config.digest_config
|
config = job_config.digest_config
|
||||||
if not config:
|
if not config:
|
||||||
return "f" * Settings.CACHE_DIGEST_LEN
|
return "f" * Settings.CACHE_DIGEST_LEN
|
||||||
@ -31,32 +31,32 @@ class Digest:
|
|||||||
cache_key = self._hash_digest_config(config)
|
cache_key = self._hash_digest_config(config)
|
||||||
|
|
||||||
if cache_key in self.digest_cache:
|
if cache_key in self.digest_cache:
|
||||||
print(
|
return self.digest_cache[cache_key]
|
||||||
f"calc digest for job [{job_config.name}]: hash_key [{cache_key}] - from cache"
|
|
||||||
)
|
|
||||||
digest = self.digest_cache[cache_key]
|
|
||||||
else:
|
|
||||||
included_files = Utils.traverse_paths(
|
included_files = Utils.traverse_paths(
|
||||||
job_config.digest_config.include_paths,
|
job_config.digest_config.include_paths,
|
||||||
job_config.digest_config.exclude_paths,
|
job_config.digest_config.exclude_paths,
|
||||||
sorted=True,
|
sorted=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
print(
|
print(
|
||||||
f"calc digest for job [{job_config.name}]: hash_key [{cache_key}], include [{len(included_files)}] files"
|
f"calc digest for job [{job_config.name}]: hash_key [{cache_key}], include [{len(included_files)}] files"
|
||||||
)
|
)
|
||||||
|
# Sort files to ensure consistent hash calculation
|
||||||
|
included_files.sort()
|
||||||
|
|
||||||
|
# Calculate MD5 hash
|
||||||
|
res = ""
|
||||||
|
if not included_files:
|
||||||
|
res = "f" * Settings.CACHE_DIGEST_LEN
|
||||||
|
print(f"NOTE: empty digest config [{config}] - return dummy digest")
|
||||||
|
else:
|
||||||
hash_md5 = hashlib.md5()
|
hash_md5 = hashlib.md5()
|
||||||
for i, file_path in enumerate(included_files):
|
for file_path in included_files:
|
||||||
hash_md5 = self._calc_file_digest(file_path, hash_md5)
|
res = self._calc_file_digest(file_path, hash_md5)
|
||||||
digest = hash_md5.hexdigest()[: Settings.CACHE_DIGEST_LEN]
|
assert res
|
||||||
self.digest_cache[cache_key] = digest
|
self.digest_cache[cache_key] = res
|
||||||
|
return res
|
||||||
if job_config.run_in_docker:
|
|
||||||
# respect docker digest in the job digest
|
|
||||||
docker_digest = docker_digests[job_config.run_in_docker.split("+")[0]]
|
|
||||||
digest = "-".join([docker_digest, digest])
|
|
||||||
|
|
||||||
return digest
|
|
||||||
|
|
||||||
def calc_docker_digest(
|
def calc_docker_digest(
|
||||||
self,
|
self,
|
||||||
@ -103,10 +103,10 @@ class Digest:
|
|||||||
print(
|
print(
|
||||||
f"WARNING: No valid file resolved by link {file_path} -> {resolved_path} - skipping digest calculation"
|
f"WARNING: No valid file resolved by link {file_path} -> {resolved_path} - skipping digest calculation"
|
||||||
)
|
)
|
||||||
return hash_md5
|
return hash_md5.hexdigest()[: Settings.CACHE_DIGEST_LEN]
|
||||||
|
|
||||||
with open(resolved_path, "rb") as f:
|
with open(resolved_path, "rb") as f:
|
||||||
for chunk in iter(lambda: f.read(4096), b""):
|
for chunk in iter(lambda: f.read(4096), b""):
|
||||||
hash_md5.update(chunk)
|
hash_md5.update(chunk)
|
||||||
|
|
||||||
return hash_md5
|
return hash_md5.hexdigest()[: Settings.CACHE_DIGEST_LEN]
|
||||||
|
3
ci/praktika/environment.py
Normal file
3
ci/praktika/environment.py
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
from praktika._environment import _Environment
|
||||||
|
|
||||||
|
Environment = _Environment.get()
|
@ -18,7 +18,7 @@ class GH:
|
|||||||
ret_code, out, err = Shell.get_res_stdout_stderr(command, verbose=True)
|
ret_code, out, err = Shell.get_res_stdout_stderr(command, verbose=True)
|
||||||
res = ret_code == 0
|
res = ret_code == 0
|
||||||
if not res and "Validation Failed" in err:
|
if not res and "Validation Failed" in err:
|
||||||
print(f"ERROR: GH command validation error.")
|
print("ERROR: GH command validation error")
|
||||||
break
|
break
|
||||||
if not res and "Bad credentials" in err:
|
if not res and "Bad credentials" in err:
|
||||||
print("ERROR: GH credentials/auth failure")
|
print("ERROR: GH credentials/auth failure")
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
from praktika._environment import _Environment
|
from praktika._environment import _Environment
|
||||||
from praktika.cache import Cache
|
from praktika.cache import Cache
|
||||||
|
from praktika.mangle import _get_workflows
|
||||||
from praktika.runtime import RunConfig
|
from praktika.runtime import RunConfig
|
||||||
from praktika.settings import Settings
|
from praktika.settings import Settings
|
||||||
from praktika.utils import Utils
|
from praktika.utils import Utils
|
||||||
@ -7,10 +8,11 @@ from praktika.utils import Utils
|
|||||||
|
|
||||||
class CacheRunnerHooks:
|
class CacheRunnerHooks:
|
||||||
@classmethod
|
@classmethod
|
||||||
def configure(cls, workflow):
|
def configure(cls, _workflow):
|
||||||
workflow_config = RunConfig.from_fs(workflow.name)
|
workflow_config = RunConfig.from_fs(_workflow.name)
|
||||||
docker_digests = workflow_config.digest_dockers
|
|
||||||
cache = Cache()
|
cache = Cache()
|
||||||
|
assert _Environment.get().WORKFLOW_NAME
|
||||||
|
workflow = _get_workflows(name=_Environment.get().WORKFLOW_NAME)[0]
|
||||||
print(f"Workflow Configure, workflow [{workflow.name}]")
|
print(f"Workflow Configure, workflow [{workflow.name}]")
|
||||||
assert (
|
assert (
|
||||||
workflow.enable_cache
|
workflow.enable_cache
|
||||||
@ -18,13 +20,11 @@ class CacheRunnerHooks:
|
|||||||
artifact_digest_map = {}
|
artifact_digest_map = {}
|
||||||
job_digest_map = {}
|
job_digest_map = {}
|
||||||
for job in workflow.jobs:
|
for job in workflow.jobs:
|
||||||
digest = cache.digest.calc_job_digest(
|
|
||||||
job_config=job, docker_digests=docker_digests
|
|
||||||
)
|
|
||||||
if not job.digest_config:
|
if not job.digest_config:
|
||||||
print(
|
print(
|
||||||
f"NOTE: job [{job.name}] has no Config.digest_config - skip cache check, always run"
|
f"NOTE: job [{job.name}] has no Config.digest_config - skip cache check, always run"
|
||||||
)
|
)
|
||||||
|
digest = cache.digest.calc_job_digest(job_config=job)
|
||||||
job_digest_map[job.name] = digest
|
job_digest_map[job.name] = digest
|
||||||
if job.provides:
|
if job.provides:
|
||||||
# assign the job digest also to the artifacts it provides
|
# assign the job digest also to the artifacts it provides
|
||||||
@ -50,6 +50,7 @@ class CacheRunnerHooks:
|
|||||||
), f"BUG, Workflow with enabled cache must have job digests after configuration, wf [{workflow.name}]"
|
), f"BUG, Workflow with enabled cache must have job digests after configuration, wf [{workflow.name}]"
|
||||||
|
|
||||||
print("Check remote cache")
|
print("Check remote cache")
|
||||||
|
job_to_cache_record = {}
|
||||||
for job_name, job_digest in workflow_config.digest_jobs.items():
|
for job_name, job_digest in workflow_config.digest_jobs.items():
|
||||||
record = cache.fetch_success(job_name=job_name, job_digest=job_digest)
|
record = cache.fetch_success(job_name=job_name, job_digest=job_digest)
|
||||||
if record:
|
if record:
|
||||||
@ -59,7 +60,7 @@ class CacheRunnerHooks:
|
|||||||
)
|
)
|
||||||
workflow_config.cache_success.append(job_name)
|
workflow_config.cache_success.append(job_name)
|
||||||
workflow_config.cache_success_base64.append(Utils.to_base64(job_name))
|
workflow_config.cache_success_base64.append(Utils.to_base64(job_name))
|
||||||
workflow_config.cache_jobs[job_name] = record
|
job_to_cache_record[job_name] = record
|
||||||
|
|
||||||
print("Check artifacts to reuse")
|
print("Check artifacts to reuse")
|
||||||
for job in workflow.jobs:
|
for job in workflow.jobs:
|
||||||
@ -67,7 +68,7 @@ class CacheRunnerHooks:
|
|||||||
if job.provides:
|
if job.provides:
|
||||||
for artifact_name in job.provides:
|
for artifact_name in job.provides:
|
||||||
workflow_config.cache_artifacts[artifact_name] = (
|
workflow_config.cache_artifacts[artifact_name] = (
|
||||||
workflow_config.cache_jobs[job.name]
|
job_to_cache_record[job.name]
|
||||||
)
|
)
|
||||||
|
|
||||||
print(f"Write config to GH's job output")
|
print(f"Write config to GH's job output")
|
||||||
|
@ -1,125 +1,63 @@
|
|||||||
import dataclasses
|
import dataclasses
|
||||||
import json
|
import json
|
||||||
|
import urllib.parse
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
from praktika._environment import _Environment
|
from praktika._environment import _Environment
|
||||||
from praktika.gh import GH
|
from praktika.gh import GH
|
||||||
from praktika.parser import WorkflowConfigParser
|
from praktika.parser import WorkflowConfigParser
|
||||||
from praktika.result import Result, ResultInfo, _ResultS3
|
from praktika.result import Result, ResultInfo
|
||||||
from praktika.runtime import RunConfig
|
from praktika.runtime import RunConfig
|
||||||
from praktika.s3 import S3
|
from praktika.s3 import S3
|
||||||
from praktika.settings import Settings
|
from praktika.settings import Settings
|
||||||
from praktika.utils import Utils
|
from praktika.utils import Shell, Utils
|
||||||
|
|
||||||
|
|
||||||
@dataclasses.dataclass
|
@dataclasses.dataclass
|
||||||
class GitCommit:
|
class GitCommit:
|
||||||
# date: str
|
date: str
|
||||||
# message: str
|
message: str
|
||||||
sha: str
|
sha: str
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def from_json(file) -> List["GitCommit"]:
|
def from_json(json_data: str) -> List["GitCommit"]:
|
||||||
commits = []
|
commits = []
|
||||||
json_data = None
|
|
||||||
try:
|
try:
|
||||||
with open(file, "r", encoding="utf-8") as f:
|
data = json.loads(json_data)
|
||||||
json_data = json.load(f)
|
|
||||||
commits = [
|
commits = [
|
||||||
GitCommit(
|
GitCommit(
|
||||||
# message=commit["messageHeadline"],
|
message=commit["messageHeadline"],
|
||||||
sha=commit["sha"],
|
sha=commit["oid"],
|
||||||
# date=commit["committedDate"],
|
date=commit["committedDate"],
|
||||||
)
|
)
|
||||||
for commit in json_data
|
for commit in data.get("commits", [])
|
||||||
]
|
]
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(
|
print(
|
||||||
f"ERROR: Failed to deserialize commit's data [{json_data}], ex: [{e}]"
|
f"ERROR: Failed to deserialize commit's data: [{json_data}], ex: [{e}]"
|
||||||
)
|
)
|
||||||
|
|
||||||
return commits
|
return commits
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def update_s3_data(cls):
|
|
||||||
env = _Environment.get()
|
|
||||||
sha = env.SHA
|
|
||||||
if not sha:
|
|
||||||
print("WARNING: Failed to retrieve commit sha")
|
|
||||||
return
|
|
||||||
commits = cls.pull_from_s3()
|
|
||||||
for commit in commits:
|
|
||||||
if sha == commit.sha:
|
|
||||||
print(
|
|
||||||
f"INFO: Sha already present in commits data [{sha}] - skip data update"
|
|
||||||
)
|
|
||||||
return
|
|
||||||
commits.append(GitCommit(sha=sha))
|
|
||||||
cls.push_to_s3(commits)
|
|
||||||
return
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def dump(cls, commits):
|
|
||||||
commits_ = []
|
|
||||||
for commit in commits:
|
|
||||||
commits_.append(dataclasses.asdict(commit))
|
|
||||||
with open(cls.file_name(), "w", encoding="utf8") as f:
|
|
||||||
json.dump(commits_, f)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def pull_from_s3(cls):
|
|
||||||
local_path = Path(cls.file_name())
|
|
||||||
file_name = local_path.name
|
|
||||||
env = _Environment.get()
|
|
||||||
s3_path = f"{Settings.HTML_S3_PATH}/{cls.get_s3_prefix(pr_number=env.PR_NUMBER, branch=env.BRANCH)}/{file_name}"
|
|
||||||
if not S3.copy_file_from_s3(s3_path=s3_path, local_path=local_path):
|
|
||||||
print(f"WARNING: failed to cp file [{s3_path}] from s3")
|
|
||||||
return []
|
|
||||||
return cls.from_json(local_path)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def push_to_s3(cls, commits):
|
|
||||||
print(f"INFO: push commits data to s3, commits num [{len(commits)}]")
|
|
||||||
cls.dump(commits)
|
|
||||||
local_path = Path(cls.file_name())
|
|
||||||
file_name = local_path.name
|
|
||||||
env = _Environment.get()
|
|
||||||
s3_path = f"{Settings.HTML_S3_PATH}/{cls.get_s3_prefix(pr_number=env.PR_NUMBER, branch=env.BRANCH)}/{file_name}"
|
|
||||||
if not S3.copy_file_to_s3(s3_path=s3_path, local_path=local_path, text=True):
|
|
||||||
print(f"WARNING: failed to cp file [{local_path}] to s3")
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_s3_prefix(cls, pr_number, branch):
|
|
||||||
prefix = ""
|
|
||||||
assert pr_number or branch
|
|
||||||
if pr_number and pr_number > 0:
|
|
||||||
prefix += f"{pr_number}"
|
|
||||||
else:
|
|
||||||
prefix += f"{branch}"
|
|
||||||
return prefix
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def file_name(cls):
|
|
||||||
return f"{Settings.TEMP_DIR}/commits.json"
|
|
||||||
|
|
||||||
# def _get_pr_commits(pr_number):
|
|
||||||
# res = []
|
|
||||||
# if not pr_number:
|
|
||||||
# return res
|
|
||||||
# output = Shell.get_output(f"gh pr view {pr_number} --json commits")
|
|
||||||
# if output:
|
|
||||||
# res = GitCommit.from_json(output)
|
|
||||||
# return res
|
|
||||||
|
|
||||||
|
|
||||||
class HtmlRunnerHooks:
|
class HtmlRunnerHooks:
|
||||||
@classmethod
|
@classmethod
|
||||||
def configure(cls, _workflow):
|
def configure(cls, _workflow):
|
||||||
|
|
||||||
|
def _get_pr_commits(pr_number):
|
||||||
|
res = []
|
||||||
|
if not pr_number:
|
||||||
|
return res
|
||||||
|
output = Shell.get_output(f"gh pr view {pr_number} --json commits")
|
||||||
|
if output:
|
||||||
|
res = GitCommit.from_json(output)
|
||||||
|
return res
|
||||||
|
|
||||||
# generate pending Results for all jobs in the workflow
|
# generate pending Results for all jobs in the workflow
|
||||||
if _workflow.enable_cache:
|
if _workflow.enable_cache:
|
||||||
skip_jobs = RunConfig.from_fs(_workflow.name).cache_success
|
skip_jobs = RunConfig.from_fs(_workflow.name).cache_success
|
||||||
job_cache_records = RunConfig.from_fs(_workflow.name).cache_jobs
|
|
||||||
else:
|
else:
|
||||||
skip_jobs = []
|
skip_jobs = []
|
||||||
|
|
||||||
@ -129,22 +67,36 @@ class HtmlRunnerHooks:
|
|||||||
if job.name not in skip_jobs:
|
if job.name not in skip_jobs:
|
||||||
result = Result.generate_pending(job.name)
|
result = Result.generate_pending(job.name)
|
||||||
else:
|
else:
|
||||||
result = Result.generate_skipped(job.name, job_cache_records[job.name])
|
result = Result.generate_skipped(job.name)
|
||||||
results.append(result)
|
results.append(result)
|
||||||
summary_result = Result.generate_pending(_workflow.name, results=results)
|
summary_result = Result.generate_pending(_workflow.name, results=results)
|
||||||
summary_result.links.append(env.CHANGE_URL)
|
summary_result.aux_links.append(env.CHANGE_URL)
|
||||||
summary_result.links.append(env.RUN_URL)
|
summary_result.aux_links.append(env.RUN_URL)
|
||||||
summary_result.start_time = Utils.timestamp()
|
summary_result.start_time = Utils.timestamp()
|
||||||
|
page_url = "/".join(
|
||||||
|
["https:/", Settings.HTML_S3_PATH, str(Path(Settings.HTML_PAGE_FILE).name)]
|
||||||
|
)
|
||||||
|
for bucket, endpoint in Settings.S3_BUCKET_TO_HTTP_ENDPOINT.items():
|
||||||
|
page_url = page_url.replace(bucket, endpoint)
|
||||||
|
# TODO: add support for non-PRs (use branch?)
|
||||||
|
page_url += f"?PR={env.PR_NUMBER}&sha=latest&name_0={urllib.parse.quote(env.WORKFLOW_NAME, safe='')}"
|
||||||
|
summary_result.html_link = page_url
|
||||||
|
|
||||||
|
# clean the previous latest results in PR if any
|
||||||
|
if env.PR_NUMBER:
|
||||||
|
S3.clean_latest_result()
|
||||||
|
S3.copy_result_to_s3(
|
||||||
|
summary_result,
|
||||||
|
unlock=False,
|
||||||
|
)
|
||||||
|
|
||||||
assert _ResultS3.copy_result_to_s3_with_version(summary_result, version=0)
|
|
||||||
page_url = env.get_report_url(settings=Settings)
|
|
||||||
print(f"CI Status page url [{page_url}]")
|
print(f"CI Status page url [{page_url}]")
|
||||||
|
|
||||||
res1 = GH.post_commit_status(
|
res1 = GH.post_commit_status(
|
||||||
name=_workflow.name,
|
name=_workflow.name,
|
||||||
status=Result.Status.PENDING,
|
status=Result.Status.PENDING,
|
||||||
description="",
|
description="",
|
||||||
url=env.get_report_url(settings=Settings, latest=True),
|
url=page_url,
|
||||||
)
|
)
|
||||||
res2 = GH.post_pr_comment(
|
res2 = GH.post_pr_comment(
|
||||||
comment_body=f"Workflow [[{_workflow.name}]({page_url})], commit [{_Environment.get().SHA[:8]}]",
|
comment_body=f"Workflow [[{_workflow.name}]({page_url})], commit [{_Environment.get().SHA[:8]}]",
|
||||||
@ -154,15 +106,23 @@ class HtmlRunnerHooks:
|
|||||||
Utils.raise_with_error(
|
Utils.raise_with_error(
|
||||||
"Failed to set both GH commit status and PR comment with Workflow Status, cannot proceed"
|
"Failed to set both GH commit status and PR comment with Workflow Status, cannot proceed"
|
||||||
)
|
)
|
||||||
|
|
||||||
if env.PR_NUMBER:
|
if env.PR_NUMBER:
|
||||||
# TODO: enable for branch, add commit number limiting
|
commits = _get_pr_commits(env.PR_NUMBER)
|
||||||
GitCommit.update_s3_data()
|
# TODO: upload commits data to s3 to visualise it on a report page
|
||||||
|
print(commits)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def pre_run(cls, _workflow, _job):
|
def pre_run(cls, _workflow, _job):
|
||||||
result = Result.from_fs(_job.name)
|
result = Result.from_fs(_job.name)
|
||||||
_ResultS3.update_workflow_results(
|
S3.copy_result_from_s3(
|
||||||
workflow_name=_workflow.name, new_sub_results=result
|
Result.file_name_static(_workflow.name),
|
||||||
|
)
|
||||||
|
workflow_result = Result.from_fs(_workflow.name)
|
||||||
|
workflow_result.update_sub_result(result)
|
||||||
|
S3.copy_result_to_s3(
|
||||||
|
workflow_result,
|
||||||
|
unlock=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -172,13 +132,14 @@ class HtmlRunnerHooks:
|
|||||||
@classmethod
|
@classmethod
|
||||||
def post_run(cls, _workflow, _job, info_errors):
|
def post_run(cls, _workflow, _job, info_errors):
|
||||||
result = Result.from_fs(_job.name)
|
result = Result.from_fs(_job.name)
|
||||||
_ResultS3.upload_result_files_to_s3(result)
|
|
||||||
_ResultS3.copy_result_to_s3(result)
|
|
||||||
|
|
||||||
env = _Environment.get()
|
env = _Environment.get()
|
||||||
|
S3.copy_result_from_s3(
|
||||||
|
Result.file_name_static(_workflow.name),
|
||||||
|
lock=True,
|
||||||
|
)
|
||||||
|
workflow_result = Result.from_fs(_workflow.name)
|
||||||
|
print(f"Workflow info [{workflow_result.info}], info_errors [{info_errors}]")
|
||||||
|
|
||||||
new_sub_results = [result]
|
|
||||||
new_result_info = ""
|
|
||||||
env_info = env.REPORT_INFO
|
env_info = env.REPORT_INFO
|
||||||
if env_info:
|
if env_info:
|
||||||
print(
|
print(
|
||||||
@ -190,8 +151,14 @@ class HtmlRunnerHooks:
|
|||||||
info_str = f"{_job.name}:\n"
|
info_str = f"{_job.name}:\n"
|
||||||
info_str += "\n".join(info_errors)
|
info_str += "\n".join(info_errors)
|
||||||
print("Update workflow results with new info")
|
print("Update workflow results with new info")
|
||||||
new_result_info = info_str
|
workflow_result.set_info(info_str)
|
||||||
|
|
||||||
|
old_status = workflow_result.status
|
||||||
|
|
||||||
|
S3.upload_result_files_to_s3(result)
|
||||||
|
workflow_result.update_sub_result(result)
|
||||||
|
|
||||||
|
skipped_job_results = []
|
||||||
if not result.is_ok():
|
if not result.is_ok():
|
||||||
print(
|
print(
|
||||||
"Current job failed - find dependee jobs in the workflow and set their statuses to skipped"
|
"Current job failed - find dependee jobs in the workflow and set their statuses to skipped"
|
||||||
@ -204,7 +171,7 @@ class HtmlRunnerHooks:
|
|||||||
print(
|
print(
|
||||||
f"NOTE: Set job [{dependee_job.name}] status to [{Result.Status.SKIPPED}] due to current failure"
|
f"NOTE: Set job [{dependee_job.name}] status to [{Result.Status.SKIPPED}] due to current failure"
|
||||||
)
|
)
|
||||||
new_sub_results.append(
|
skipped_job_results.append(
|
||||||
Result(
|
Result(
|
||||||
name=dependee_job.name,
|
name=dependee_job.name,
|
||||||
status=Result.Status.SKIPPED,
|
status=Result.Status.SKIPPED,
|
||||||
@ -212,18 +179,20 @@ class HtmlRunnerHooks:
|
|||||||
+ f" [{_job.name}]",
|
+ f" [{_job.name}]",
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
for skipped_job_result in skipped_job_results:
|
||||||
|
workflow_result.update_sub_result(skipped_job_result)
|
||||||
|
|
||||||
updated_status = _ResultS3.update_workflow_results(
|
S3.copy_result_to_s3(
|
||||||
new_info=new_result_info,
|
workflow_result,
|
||||||
new_sub_results=new_sub_results,
|
unlock=True,
|
||||||
workflow_name=_workflow.name,
|
)
|
||||||
|
if workflow_result.status != old_status:
|
||||||
|
print(
|
||||||
|
f"Update GH commit status [{result.name}]: [{old_status} -> {workflow_result.status}], link [{workflow_result.html_link}]"
|
||||||
)
|
)
|
||||||
|
|
||||||
if updated_status:
|
|
||||||
print(f"Update GH commit status [{result.name}]: [{updated_status}]")
|
|
||||||
GH.post_commit_status(
|
GH.post_commit_status(
|
||||||
name=_workflow.name,
|
name=workflow_result.name,
|
||||||
status=GH.convert_to_gh_status(updated_status),
|
status=GH.convert_to_gh_status(workflow_result.status),
|
||||||
description="",
|
description="",
|
||||||
url=env.get_report_url(settings=Settings, latest=True),
|
url=workflow_result.html_link,
|
||||||
)
|
)
|
||||||
|
@ -52,58 +52,30 @@ class Job:
|
|||||||
self,
|
self,
|
||||||
parameter: Optional[List[Any]] = None,
|
parameter: Optional[List[Any]] = None,
|
||||||
runs_on: Optional[List[List[str]]] = None,
|
runs_on: Optional[List[List[str]]] = None,
|
||||||
provides: Optional[List[List[str]]] = None,
|
|
||||||
requires: Optional[List[List[str]]] = None,
|
|
||||||
timeout: Optional[List[int]] = None,
|
timeout: Optional[List[int]] = None,
|
||||||
):
|
):
|
||||||
assert (
|
assert (
|
||||||
parameter or runs_on
|
parameter or runs_on
|
||||||
), "Either :parameter or :runs_on must be non empty list for parametrisation"
|
), "Either :parameter or :runs_on must be non empty list for parametrisation"
|
||||||
if runs_on:
|
|
||||||
assert isinstance(runs_on, list) and isinstance(runs_on[0], list)
|
|
||||||
if not parameter:
|
if not parameter:
|
||||||
parameter = [None] * len(runs_on)
|
parameter = [None] * len(runs_on)
|
||||||
if not runs_on:
|
if not runs_on:
|
||||||
runs_on = [None] * len(parameter)
|
runs_on = [None] * len(parameter)
|
||||||
if not timeout:
|
if not timeout:
|
||||||
timeout = [None] * len(parameter)
|
timeout = [None] * len(parameter)
|
||||||
if not provides:
|
|
||||||
provides = [None] * len(parameter)
|
|
||||||
if not requires:
|
|
||||||
requires = [None] * len(parameter)
|
|
||||||
assert (
|
assert (
|
||||||
len(parameter)
|
len(parameter) == len(runs_on) == len(timeout)
|
||||||
== len(runs_on)
|
), "Parametrization lists must be of the same size"
|
||||||
== len(timeout)
|
|
||||||
== len(provides)
|
|
||||||
== len(requires)
|
|
||||||
), f"Parametrization lists must be of the same size [{len(parameter)}, {len(runs_on)}, {len(timeout)}, {len(provides)}, {len(requires)}]"
|
|
||||||
|
|
||||||
res = []
|
res = []
|
||||||
for parameter_, runs_on_, timeout_, provides_, requires_ in zip(
|
for parameter_, runs_on_, timeout_ in zip(parameter, runs_on, timeout):
|
||||||
parameter, runs_on, timeout, provides, requires
|
|
||||||
):
|
|
||||||
obj = copy.deepcopy(self)
|
obj = copy.deepcopy(self)
|
||||||
assert (
|
|
||||||
not obj.provides
|
|
||||||
), "Job.Config.provides must be empty for parametrized jobs"
|
|
||||||
if parameter_:
|
if parameter_:
|
||||||
obj.parameter = parameter_
|
obj.parameter = parameter_
|
||||||
obj.command = obj.command.format(PARAMETER=parameter_)
|
|
||||||
if runs_on_:
|
if runs_on_:
|
||||||
obj.runs_on = runs_on_
|
obj.runs_on = runs_on_
|
||||||
if timeout_:
|
if timeout_:
|
||||||
obj.timeout = timeout_
|
obj.timeout = timeout_
|
||||||
if provides_:
|
|
||||||
assert (
|
|
||||||
not obj.provides
|
|
||||||
), "Job.Config.provides must be empty for parametrized jobs"
|
|
||||||
obj.provides = provides_
|
|
||||||
if requires_:
|
|
||||||
assert (
|
|
||||||
not obj.requires
|
|
||||||
), "Job.Config.requires and parametrize(requires=...) are both set"
|
|
||||||
obj.requires = requires_
|
|
||||||
obj.name = obj.get_job_name_with_parameter()
|
obj.name = obj.get_job_name_with_parameter()
|
||||||
res.append(obj)
|
res.append(obj)
|
||||||
return res
|
return res
|
||||||
@ -112,16 +84,13 @@ class Job:
|
|||||||
name, parameter, runs_on = self.name, self.parameter, self.runs_on
|
name, parameter, runs_on = self.name, self.parameter, self.runs_on
|
||||||
res = name
|
res = name
|
||||||
name_params = []
|
name_params = []
|
||||||
if parameter:
|
|
||||||
if isinstance(parameter, list) or isinstance(parameter, dict):
|
if isinstance(parameter, list) or isinstance(parameter, dict):
|
||||||
name_params.append(json.dumps(parameter))
|
name_params.append(json.dumps(parameter))
|
||||||
else:
|
elif parameter is not None:
|
||||||
name_params.append(parameter)
|
name_params.append(parameter)
|
||||||
elif runs_on:
|
if runs_on:
|
||||||
assert isinstance(runs_on, list)
|
assert isinstance(runs_on, list)
|
||||||
name_params.append(json.dumps(runs_on))
|
name_params.append(json.dumps(runs_on))
|
||||||
else:
|
|
||||||
assert False
|
|
||||||
if name_params:
|
if name_params:
|
||||||
name_params = [str(param) for param in name_params]
|
name_params = [str(param) for param in name_params]
|
||||||
res += f" ({', '.join(name_params)})"
|
res += f" ({', '.join(name_params)})"
|
||||||
|
@ -89,27 +89,15 @@
|
|||||||
letter-spacing: -0.5px;
|
letter-spacing: -0.5px;
|
||||||
}
|
}
|
||||||
|
|
||||||
.dropdown-value {
|
|
||||||
width: 100px;
|
|
||||||
font-weight: normal;
|
|
||||||
font-family: inherit;
|
|
||||||
background-color: transparent;
|
|
||||||
color: inherit;
|
|
||||||
/*border: none;*/
|
|
||||||
/*outline: none;*/
|
|
||||||
/*cursor: pointer;*/
|
|
||||||
}
|
|
||||||
|
|
||||||
#result-container {
|
#result-container {
|
||||||
background-color: var(--tile-background);
|
background-color: var(--tile-background);
|
||||||
margin-left: calc(var(--status-width) + 20px);
|
margin-left: calc(var(--status-width) + 20px);
|
||||||
padding: 0;
|
padding: 20px;
|
||||||
box-sizing: border-box;
|
box-sizing: border-box;
|
||||||
text-align: center;
|
text-align: center;
|
||||||
font-size: 18px;
|
font-size: 18px;
|
||||||
font-weight: normal;
|
font-weight: normal;
|
||||||
flex-grow: 1;
|
flex-grow: 1;
|
||||||
margin-bottom: 40px;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#footer {
|
#footer {
|
||||||
@ -201,7 +189,10 @@
|
|||||||
}
|
}
|
||||||
|
|
||||||
th.name-column, td.name-column {
|
th.name-column, td.name-column {
|
||||||
min-width: 350px;
|
max-width: 400px; /* Set the maximum width for the column */
|
||||||
|
white-space: nowrap; /* Prevent text from wrapping */
|
||||||
|
overflow: hidden; /* Hide the overflowed text */
|
||||||
|
text-overflow: ellipsis; /* Show ellipsis (...) for overflowed text */
|
||||||
}
|
}
|
||||||
|
|
||||||
th.status-column, td.status-column {
|
th.status-column, td.status-column {
|
||||||
@ -291,12 +282,6 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
function updateUrlParameter(paramName, paramValue) {
|
|
||||||
const url = new URL(window.location.href);
|
|
||||||
url.searchParams.set(paramName, paramValue);
|
|
||||||
window.location.href = url.toString();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attach the toggle function to the click event of the icon
|
// Attach the toggle function to the click event of the icon
|
||||||
document.getElementById('theme-toggle').addEventListener('click', toggleTheme);
|
document.getElementById('theme-toggle').addEventListener('click', toggleTheme);
|
||||||
|
|
||||||
@ -306,14 +291,14 @@
|
|||||||
const monthNames = ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
|
const monthNames = ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
|
||||||
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"];
|
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"];
|
||||||
const month = monthNames[date.getMonth()];
|
const month = monthNames[date.getMonth()];
|
||||||
//const year = date.getFullYear();
|
const year = date.getFullYear();
|
||||||
const hours = String(date.getHours()).padStart(2, '0');
|
const hours = String(date.getHours()).padStart(2, '0');
|
||||||
const minutes = String(date.getMinutes()).padStart(2, '0');
|
const minutes = String(date.getMinutes()).padStart(2, '0');
|
||||||
const seconds = String(date.getSeconds()).padStart(2, '0');
|
const seconds = String(date.getSeconds()).padStart(2, '0');
|
||||||
//const milliseconds = String(date.getMilliseconds()).padStart(2, '0');
|
//const milliseconds = String(date.getMilliseconds()).padStart(2, '0');
|
||||||
|
|
||||||
return showDate
|
return showDate
|
||||||
? `${day}'${month} ${hours}:${minutes}:${seconds}`
|
? `${day}-${month}-${year} ${hours}:${minutes}:${seconds}`
|
||||||
: `${hours}:${minutes}:${seconds}`;
|
: `${hours}:${minutes}:${seconds}`;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -343,7 +328,7 @@
|
|||||||
const milliseconds = Math.floor((duration % 1) * 1000);
|
const milliseconds = Math.floor((duration % 1) * 1000);
|
||||||
|
|
||||||
const formattedSeconds = String(seconds);
|
const formattedSeconds = String(seconds);
|
||||||
const formattedMilliseconds = String(milliseconds).padStart(2, '0').slice(-2);
|
const formattedMilliseconds = String(milliseconds).padStart(3, '0');
|
||||||
|
|
||||||
return `${formattedSeconds}.${formattedMilliseconds}`;
|
return `${formattedSeconds}.${formattedMilliseconds}`;
|
||||||
}
|
}
|
||||||
@ -361,7 +346,8 @@
|
|||||||
return 'status-other';
|
return 'status-other';
|
||||||
}
|
}
|
||||||
|
|
||||||
function addKeyValueToStatus(key, value, options = null) {
|
function addKeyValueToStatus(key, value) {
|
||||||
|
|
||||||
const statusContainer = document.getElementById('status-container');
|
const statusContainer = document.getElementById('status-container');
|
||||||
|
|
||||||
let keyValuePair = document.createElement('div');
|
let keyValuePair = document.createElement('div');
|
||||||
@ -371,40 +357,12 @@
|
|||||||
keyElement.className = 'json-key';
|
keyElement.className = 'json-key';
|
||||||
keyElement.textContent = key + ':';
|
keyElement.textContent = key + ':';
|
||||||
|
|
||||||
let valueElement;
|
const valueElement = document.createElement('div');
|
||||||
|
|
||||||
if (options) {
|
|
||||||
// Create dropdown if options are provided
|
|
||||||
valueElement = document.createElement('select');
|
|
||||||
valueElement.className = 'dropdown-value';
|
|
||||||
|
|
||||||
options.forEach(optionValue => {
|
|
||||||
const option = document.createElement('option');
|
|
||||||
option.value = optionValue;
|
|
||||||
option.textContent = optionValue.slice(0, 10);
|
|
||||||
|
|
||||||
// Set the initially selected option
|
|
||||||
if (optionValue === value) {
|
|
||||||
option.selected = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
valueElement.appendChild(option);
|
|
||||||
});
|
|
||||||
|
|
||||||
// Update the URL parameter when the selected value changes
|
|
||||||
valueElement.addEventListener('change', (event) => {
|
|
||||||
const selectedValue = event.target.value;
|
|
||||||
updateUrlParameter(key, selectedValue);
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
// Create a simple text display if no options are provided
|
|
||||||
valueElement = document.createElement('div');
|
|
||||||
valueElement.className = 'json-value';
|
valueElement.className = 'json-value';
|
||||||
valueElement.textContent = value || 'N/A'; // Display 'N/A' if value is null
|
valueElement.textContent = value;
|
||||||
}
|
|
||||||
|
|
||||||
keyValuePair.appendChild(keyElement);
|
keyValuePair.appendChild(keyElement)
|
||||||
keyValuePair.appendChild(valueElement);
|
keyValuePair.appendChild(valueElement)
|
||||||
statusContainer.appendChild(keyValuePair);
|
statusContainer.appendChild(keyValuePair);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -528,12 +486,12 @@
|
|||||||
const columns = ['name', 'status', 'start_time', 'duration', 'info'];
|
const columns = ['name', 'status', 'start_time', 'duration', 'info'];
|
||||||
|
|
||||||
const columnSymbols = {
|
const columnSymbols = {
|
||||||
name: '🗂️',
|
name: '📂',
|
||||||
status: '🧾',
|
status: '✔️',
|
||||||
start_time: '🕒',
|
start_time: '🕒',
|
||||||
duration: '⏳',
|
duration: '⏳',
|
||||||
info: '📝',
|
info: 'ℹ️',
|
||||||
files: '📎'
|
files: '📄'
|
||||||
};
|
};
|
||||||
|
|
||||||
function createResultsTable(results, nest_level) {
|
function createResultsTable(results, nest_level) {
|
||||||
@ -542,14 +500,16 @@
|
|||||||
const thead = document.createElement('thead');
|
const thead = document.createElement('thead');
|
||||||
const tbody = document.createElement('tbody');
|
const tbody = document.createElement('tbody');
|
||||||
|
|
||||||
|
// Get the current URL parameters
|
||||||
|
const currentUrl = new URL(window.location.href);
|
||||||
|
|
||||||
// Create table headers based on the fixed columns
|
// Create table headers based on the fixed columns
|
||||||
const headerRow = document.createElement('tr');
|
const headerRow = document.createElement('tr');
|
||||||
columns.forEach(column => {
|
columns.forEach(column => {
|
||||||
const th = document.createElement('th');
|
const th = document.createElement('th');
|
||||||
th.textContent = columnSymbols[column] || column;
|
th.textContent = th.textContent = columnSymbols[column] || column;
|
||||||
th.style.cursor = 'pointer'; // Make headers clickable
|
th.style.cursor = 'pointer'; // Make headers clickable
|
||||||
th.setAttribute('data-sort-direction', 'asc'); // Default sort direction
|
th.addEventListener('click', () => sortTable(results, column, tbody, nest_level)); // Add click event to sort the table
|
||||||
th.addEventListener('click', () => sortTable(results, column, columnSymbols[column] || column, tbody, nest_level, columns)); // Add click event to sort the table
|
|
||||||
headerRow.appendChild(th);
|
headerRow.appendChild(th);
|
||||||
});
|
});
|
||||||
thead.appendChild(headerRow);
|
thead.appendChild(headerRow);
|
||||||
@ -601,7 +561,8 @@
|
|||||||
td.classList.add('time-column');
|
td.classList.add('time-column');
|
||||||
td.textContent = value ? formatDuration(value) : '';
|
td.textContent = value ? formatDuration(value) : '';
|
||||||
} else if (column === 'info') {
|
} else if (column === 'info') {
|
||||||
td.textContent = value.includes('\n') ? '↵' : (value || '');
|
// For info and other columns, just display the value
|
||||||
|
td.textContent = value || '';
|
||||||
td.classList.add('info-column');
|
td.classList.add('info-column');
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -612,33 +573,39 @@
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
function sortTable(results, column, key, tbody, nest_level, columns) {
|
function sortTable(results, key, tbody, nest_level) {
|
||||||
// Find the table header element for the given key
|
// Find the table header element for the given key
|
||||||
const tableHeaders = document.querySelectorAll('th');
|
let th = null;
|
||||||
let th = Array.from(tableHeaders).find(header => header.textContent === key);
|
const tableHeaders = document.querySelectorAll('th'); // Select all table headers
|
||||||
|
tableHeaders.forEach(header => {
|
||||||
|
if (header.textContent.trim().toLowerCase() === key.toLowerCase()) {
|
||||||
|
th = header;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
if (!th) {
|
if (!th) {
|
||||||
console.error(`No table header found for key: ${key}`);
|
console.error(`No table header found for key: ${key}`);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const ascending = th.getAttribute('data-sort-direction') === 'asc';
|
// Determine the current sort direction
|
||||||
th.setAttribute('data-sort-direction', ascending ? 'desc' : 'asc');
|
let ascending = th.getAttribute('data-sort-direction') === 'asc' ? false : true;
|
||||||
|
|
||||||
|
// Toggle the sort direction for the next click
|
||||||
|
th.setAttribute('data-sort-direction', ascending ? 'asc' : 'desc');
|
||||||
|
|
||||||
|
// Sort the results array by the given key
|
||||||
results.sort((a, b) => {
|
results.sort((a, b) => {
|
||||||
if (a[column] < b[column]) return ascending ? -1 : 1;
|
if (a[key] < b[key]) return ascending ? -1 : 1;
|
||||||
if (a[column] > b[column]) return ascending ? 1 : -1;
|
if (a[key] > b[key]) return ascending ? 1 : -1;
|
||||||
return 0;
|
return 0;
|
||||||
});
|
});
|
||||||
|
|
||||||
// Clear the existing rows in tbody
|
|
||||||
tbody.innerHTML = '';
|
|
||||||
|
|
||||||
// Re-populate the table with sorted data
|
// Re-populate the table with sorted data
|
||||||
populateTableRows(tbody, results, columns, nest_level);
|
populateTableRows(tbody, results, columns, nest_level);
|
||||||
}
|
}
|
||||||
|
|
||||||
function loadResultsJSON(PR, sha, nameParams) {
|
function loadJSON(PR, sha, nameParams) {
|
||||||
const infoElement = document.getElementById('info-container');
|
const infoElement = document.getElementById('info-container');
|
||||||
let lastModifiedTime = null;
|
let lastModifiedTime = null;
|
||||||
const task = nameParams[0].toLowerCase();
|
const task = nameParams[0].toLowerCase();
|
||||||
@ -663,9 +630,12 @@
|
|||||||
let targetData = navigatePath(data, nameParams);
|
let targetData = navigatePath(data, nameParams);
|
||||||
let nest_level = nameParams.length;
|
let nest_level = nameParams.length;
|
||||||
|
|
||||||
// Add footer links from top-level Result
|
if (targetData) {
|
||||||
if (Array.isArray(data.links) && data.links.length > 0) {
|
infoElement.style.display = 'none';
|
||||||
data.links.forEach(link => {
|
|
||||||
|
// Handle footer links if present
|
||||||
|
if (Array.isArray(data.aux_links) && data.aux_links.length > 0) {
|
||||||
|
data.aux_links.forEach(link => {
|
||||||
const a = document.createElement('a');
|
const a = document.createElement('a');
|
||||||
a.href = link;
|
a.href = link;
|
||||||
a.textContent = link.split('/').pop();
|
a.textContent = link.split('/').pop();
|
||||||
@ -674,10 +644,6 @@
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
if (targetData) {
|
|
||||||
//infoElement.style.display = 'none';
|
|
||||||
infoElement.innerHTML = (targetData.info || '').replace(/\n/g, '<br>');
|
|
||||||
|
|
||||||
addStatusToStatus(targetData.status, targetData.start_time, targetData.duration)
|
addStatusToStatus(targetData.status, targetData.start_time, targetData.duration)
|
||||||
|
|
||||||
// Handle links
|
// Handle links
|
||||||
@ -755,62 +721,22 @@
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
let path_commits_json = '';
|
|
||||||
let commitsArray = [];
|
|
||||||
|
|
||||||
if (PR) {
|
if (PR) {
|
||||||
addKeyValueToStatus("PR", PR);
|
addKeyValueToStatus("PR", PR)
|
||||||
const baseUrl = window.location.origin + window.location.pathname.replace('/json.html', '');
|
|
||||||
path_commits_json = `${baseUrl}/${encodeURIComponent(PR)}/commits.json`;
|
|
||||||
} else {
|
} else {
|
||||||
// Placeholder for a different path when PR is missing
|
console.error("TODO")
|
||||||
console.error("PR parameter is missing. Setting alternate commits path.");
|
|
||||||
path_commits_json = '/path/to/alternative/commits.json';
|
|
||||||
}
|
}
|
||||||
|
addKeyValueToStatus("sha", sha);
|
||||||
function loadCommitsArray(path) {
|
|
||||||
return fetch(path, { cache: "no-cache" })
|
|
||||||
.then(response => {
|
|
||||||
if (!response.ok) {
|
|
||||||
console.error(`HTTP error! status: ${response.status}`)
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
return response.json();
|
|
||||||
})
|
|
||||||
.then(data => {
|
|
||||||
if (Array.isArray(data) && data.every(item => typeof item === 'object' && item.hasOwnProperty('sha'))) {
|
|
||||||
return data.map(item => item.sha);
|
|
||||||
} else {
|
|
||||||
throw new Error('Invalid data format: expected array of objects with a "sha" key');
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.catch(error => {
|
|
||||||
console.error('Error loading commits JSON:', error);
|
|
||||||
return []; // Return an empty array if an error occurs
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
loadCommitsArray(path_commits_json)
|
|
||||||
.then(data => {
|
|
||||||
commitsArray = data;
|
|
||||||
})
|
|
||||||
.finally(() => {
|
|
||||||
// Proceed with the rest of the initialization
|
|
||||||
addKeyValueToStatus("sha", sha || "latest", commitsArray.concat(["latest"]));
|
|
||||||
|
|
||||||
if (nameParams[1]) {
|
if (nameParams[1]) {
|
||||||
addKeyValueToStatus("job", nameParams[1]);
|
addKeyValueToStatus("job", nameParams[1]);
|
||||||
}
|
}
|
||||||
addKeyValueToStatus("workflow", nameParams[0]);
|
addKeyValueToStatus("workflow", nameParams[0]);
|
||||||
|
|
||||||
// Check if all required parameters are present to load JSON
|
|
||||||
if (PR && sha && root_name) {
|
if (PR && sha && root_name) {
|
||||||
const shaToLoad = (sha === 'latest') ? commitsArray[commitsArray.length - 1] : sha;
|
loadJSON(PR, sha, nameParams);
|
||||||
loadResultsJSON(PR, shaToLoad, nameParams);
|
|
||||||
} else {
|
} else {
|
||||||
document.getElementById('title').textContent = 'Error: Missing required URL parameters: PR, sha, or name_0';
|
document.getElementById('title').textContent = 'Error: Missing required URL parameters: PR, sha, or name_0';
|
||||||
}
|
}
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
window.onload = init;
|
window.onload = init;
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
import copy
|
import copy
|
||||||
import importlib.util
|
import importlib.util
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict
|
||||||
|
|
||||||
from praktika import Job
|
from praktika import Job
|
||||||
from praktika.settings import Settings
|
from praktika._settings import _USER_DEFINED_SETTINGS, _Settings
|
||||||
from praktika.utils import Utils
|
from praktika.utils import ContextManager, Utils
|
||||||
|
|
||||||
|
|
||||||
def _get_workflows(name=None, file=None):
|
def _get_workflows(name=None, file=None):
|
||||||
@ -13,13 +14,14 @@ def _get_workflows(name=None, file=None):
|
|||||||
"""
|
"""
|
||||||
res = []
|
res = []
|
||||||
|
|
||||||
directory = Path(Settings.WORKFLOWS_DIRECTORY)
|
with ContextManager.cd():
|
||||||
|
directory = Path(_Settings.WORKFLOWS_DIRECTORY)
|
||||||
for py_file in directory.glob("*.py"):
|
for py_file in directory.glob("*.py"):
|
||||||
if file and file not in str(py_file):
|
if file and file not in str(py_file):
|
||||||
continue
|
continue
|
||||||
module_name = py_file.name.removeprefix(".py")
|
module_name = py_file.name.removeprefix(".py")
|
||||||
spec = importlib.util.spec_from_file_location(
|
spec = importlib.util.spec_from_file_location(
|
||||||
module_name, f"{Settings.WORKFLOWS_DIRECTORY}/{module_name}"
|
module_name, f"{_Settings.WORKFLOWS_DIRECTORY}/{module_name}"
|
||||||
)
|
)
|
||||||
assert spec
|
assert spec
|
||||||
foo = importlib.util.module_from_spec(spec)
|
foo = importlib.util.module_from_spec(spec)
|
||||||
@ -56,6 +58,7 @@ def _update_workflow_artifacts(workflow):
|
|||||||
artifact_job = {}
|
artifact_job = {}
|
||||||
for job in workflow.jobs:
|
for job in workflow.jobs:
|
||||||
for artifact_name in job.provides:
|
for artifact_name in job.provides:
|
||||||
|
assert artifact_name not in artifact_job
|
||||||
artifact_job[artifact_name] = job.name
|
artifact_job[artifact_name] = job.name
|
||||||
for artifact in workflow.artifacts:
|
for artifact in workflow.artifacts:
|
||||||
artifact._provided_by = artifact_job[artifact.name]
|
artifact._provided_by = artifact_job[artifact.name]
|
||||||
@ -105,3 +108,30 @@ def _update_workflow_with_native_jobs(workflow):
|
|||||||
for job in workflow.jobs:
|
for job in workflow.jobs:
|
||||||
aux_job.requires.append(job.name)
|
aux_job.requires.append(job.name)
|
||||||
workflow.jobs.append(aux_job)
|
workflow.jobs.append(aux_job)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_user_settings() -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Gets user's settings
|
||||||
|
"""
|
||||||
|
res = {} # type: Dict[str, Any]
|
||||||
|
|
||||||
|
directory = Path(_Settings.SETTINGS_DIRECTORY)
|
||||||
|
for py_file in directory.glob("*.py"):
|
||||||
|
module_name = py_file.name.removeprefix(".py")
|
||||||
|
spec = importlib.util.spec_from_file_location(
|
||||||
|
module_name, f"{_Settings.SETTINGS_DIRECTORY}/{module_name}"
|
||||||
|
)
|
||||||
|
assert spec
|
||||||
|
foo = importlib.util.module_from_spec(spec)
|
||||||
|
assert spec.loader
|
||||||
|
spec.loader.exec_module(foo)
|
||||||
|
for setting in _USER_DEFINED_SETTINGS:
|
||||||
|
try:
|
||||||
|
value = getattr(foo, setting)
|
||||||
|
res[setting] = value
|
||||||
|
print(f"Apply user defined setting [{setting} = {value}]")
|
||||||
|
except Exception as e:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return res
|
||||||
|
@ -10,8 +10,9 @@ from praktika.gh import GH
|
|||||||
from praktika.hook_cache import CacheRunnerHooks
|
from praktika.hook_cache import CacheRunnerHooks
|
||||||
from praktika.hook_html import HtmlRunnerHooks
|
from praktika.hook_html import HtmlRunnerHooks
|
||||||
from praktika.mangle import _get_workflows
|
from praktika.mangle import _get_workflows
|
||||||
from praktika.result import Result, ResultInfo, _ResultS3
|
from praktika.result import Result, ResultInfo
|
||||||
from praktika.runtime import RunConfig
|
from praktika.runtime import RunConfig
|
||||||
|
from praktika.s3 import S3
|
||||||
from praktika.settings import Settings
|
from praktika.settings import Settings
|
||||||
from praktika.utils import Shell, Utils
|
from praktika.utils import Shell, Utils
|
||||||
|
|
||||||
@ -150,7 +151,7 @@ def _config_workflow(workflow: Workflow.Config, job_name):
|
|||||||
status = Result.Status.ERROR
|
status = Result.Status.ERROR
|
||||||
print("ERROR: ", info)
|
print("ERROR: ", info)
|
||||||
else:
|
else:
|
||||||
assert Shell.check(f"{Settings.PYTHON_INTERPRETER} -m praktika yaml")
|
Shell.check(f"{Settings.PYTHON_INTERPRETER} -m praktika --generate")
|
||||||
exit_code, output, err = Shell.get_res_stdout_stderr(
|
exit_code, output, err = Shell.get_res_stdout_stderr(
|
||||||
f"git diff-index HEAD -- {Settings.WORKFLOW_PATH_PREFIX}"
|
f"git diff-index HEAD -- {Settings.WORKFLOW_PATH_PREFIX}"
|
||||||
)
|
)
|
||||||
@ -224,7 +225,6 @@ def _config_workflow(workflow: Workflow.Config, job_name):
|
|||||||
cache_success=[],
|
cache_success=[],
|
||||||
cache_success_base64=[],
|
cache_success_base64=[],
|
||||||
cache_artifacts={},
|
cache_artifacts={},
|
||||||
cache_jobs={},
|
|
||||||
).dump()
|
).dump()
|
||||||
|
|
||||||
# checks:
|
# checks:
|
||||||
@ -250,9 +250,6 @@ def _config_workflow(workflow: Workflow.Config, job_name):
|
|||||||
info_lines.append(job_name + ": " + info)
|
info_lines.append(job_name + ": " + info)
|
||||||
results.append(result_)
|
results.append(result_)
|
||||||
|
|
||||||
if workflow.enable_merge_commit:
|
|
||||||
assert False, "NOT implemented"
|
|
||||||
|
|
||||||
# config:
|
# config:
|
||||||
if workflow.dockers:
|
if workflow.dockers:
|
||||||
print("Calculate docker's digests")
|
print("Calculate docker's digests")
|
||||||
@ -310,8 +307,9 @@ def _finish_workflow(workflow, job_name):
|
|||||||
print(env.get_needs_statuses())
|
print(env.get_needs_statuses())
|
||||||
|
|
||||||
print("Check Workflow results")
|
print("Check Workflow results")
|
||||||
_ResultS3.copy_result_from_s3(
|
S3.copy_result_from_s3(
|
||||||
Result.file_name_static(workflow.name),
|
Result.file_name_static(workflow.name),
|
||||||
|
lock=False,
|
||||||
)
|
)
|
||||||
workflow_result = Result.from_fs(workflow.name)
|
workflow_result = Result.from_fs(workflow.name)
|
||||||
|
|
||||||
@ -341,12 +339,10 @@ def _finish_workflow(workflow, job_name):
|
|||||||
f"NOTE: Result for [{result.name}] has not ok status [{result.status}]"
|
f"NOTE: Result for [{result.name}] has not ok status [{result.status}]"
|
||||||
)
|
)
|
||||||
ready_for_merge_status = Result.Status.FAILED
|
ready_for_merge_status = Result.Status.FAILED
|
||||||
failed_results.append(result.name)
|
failed_results.append(result.name.split("(", maxsplit=1)[0]) # cut name
|
||||||
|
|
||||||
if failed_results:
|
if failed_results:
|
||||||
ready_for_merge_description = (
|
ready_for_merge_description = f"failed: {', '.join(failed_results)}"
|
||||||
f'Failed {len(failed_results)} "Required for Merge" jobs'
|
|
||||||
)
|
|
||||||
|
|
||||||
if not GH.post_commit_status(
|
if not GH.post_commit_status(
|
||||||
name=Settings.READY_FOR_MERGE_STATUS_NAME + f" [{workflow.name}]",
|
name=Settings.READY_FOR_MERGE_STATUS_NAME + f" [{workflow.name}]",
|
||||||
@ -358,11 +354,14 @@ def _finish_workflow(workflow, job_name):
|
|||||||
env.add_info(ResultInfo.GH_STATUS_ERROR)
|
env.add_info(ResultInfo.GH_STATUS_ERROR)
|
||||||
|
|
||||||
if update_final_report:
|
if update_final_report:
|
||||||
_ResultS3.copy_result_to_s3(
|
S3.copy_result_to_s3(
|
||||||
workflow_result,
|
workflow_result,
|
||||||
)
|
unlock=False,
|
||||||
|
) # no lock - no unlock
|
||||||
|
|
||||||
Result.from_fs(job_name).set_status(Result.Status.SUCCESS)
|
Result.from_fs(job_name).set_status(Result.Status.SUCCESS).set_info(
|
||||||
|
ready_for_merge_description
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -1,13 +1,12 @@
|
|||||||
import dataclasses
|
import dataclasses
|
||||||
import datetime
|
import datetime
|
||||||
import sys
|
import sys
|
||||||
|
from collections.abc import Container
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Dict, List, Optional, Union
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
from praktika._environment import _Environment
|
from praktika._environment import _Environment
|
||||||
from praktika.cache import Cache
|
from praktika._settings import _Settings
|
||||||
from praktika.s3 import S3
|
|
||||||
from praktika.settings import Settings
|
|
||||||
from praktika.utils import ContextManager, MetaClasses, Shell, Utils
|
from praktika.utils import ContextManager, MetaClasses, Shell, Utils
|
||||||
|
|
||||||
|
|
||||||
@ -28,6 +27,10 @@ class Result(MetaClasses.Serializable):
|
|||||||
files (List[str]): A list of file paths or names related to the result.
|
files (List[str]): A list of file paths or names related to the result.
|
||||||
links (List[str]): A list of URLs related to the result (e.g., links to reports or resources).
|
links (List[str]): A list of URLs related to the result (e.g., links to reports or resources).
|
||||||
info (str): Additional information about the result. Free-form text.
|
info (str): Additional information about the result. Free-form text.
|
||||||
|
# TODO: rename
|
||||||
|
aux_links (List[str]): A list of auxiliary links that provide additional context for the result.
|
||||||
|
# TODO: remove
|
||||||
|
html_link (str): A direct link to an HTML representation of the result (e.g., a detailed report page).
|
||||||
|
|
||||||
Inner Class:
|
Inner Class:
|
||||||
Status: Defines possible statuses for the task, such as "success", "failure", etc.
|
Status: Defines possible statuses for the task, such as "success", "failure", etc.
|
||||||
@ -49,6 +52,8 @@ class Result(MetaClasses.Serializable):
|
|||||||
files: List[str] = dataclasses.field(default_factory=list)
|
files: List[str] = dataclasses.field(default_factory=list)
|
||||||
links: List[str] = dataclasses.field(default_factory=list)
|
links: List[str] = dataclasses.field(default_factory=list)
|
||||||
info: str = ""
|
info: str = ""
|
||||||
|
aux_links: List[str] = dataclasses.field(default_factory=list)
|
||||||
|
html_link: str = ""
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def create_from(
|
def create_from(
|
||||||
@ -57,15 +62,14 @@ class Result(MetaClasses.Serializable):
|
|||||||
stopwatch: Utils.Stopwatch = None,
|
stopwatch: Utils.Stopwatch = None,
|
||||||
status="",
|
status="",
|
||||||
files=None,
|
files=None,
|
||||||
info: Union[List[str], str] = "",
|
info="",
|
||||||
with_info_from_results=True,
|
with_info_from_results=True,
|
||||||
):
|
):
|
||||||
if isinstance(status, bool):
|
if isinstance(status, bool):
|
||||||
status = Result.Status.SUCCESS if status else Result.Status.FAILED
|
status = Result.Status.SUCCESS if status else Result.Status.FAILED
|
||||||
if not results and not status:
|
if not results and not status:
|
||||||
Utils.raise_with_error(
|
print("ERROR: Either .results or .status must be provided")
|
||||||
f"Either .results ({results}) or .status ({status}) must be provided"
|
raise
|
||||||
)
|
|
||||||
if not name:
|
if not name:
|
||||||
name = _Environment.get().JOB_NAME
|
name = _Environment.get().JOB_NAME
|
||||||
if not name:
|
if not name:
|
||||||
@ -74,10 +78,10 @@ class Result(MetaClasses.Serializable):
|
|||||||
result_status = status or Result.Status.SUCCESS
|
result_status = status or Result.Status.SUCCESS
|
||||||
infos = []
|
infos = []
|
||||||
if info:
|
if info:
|
||||||
if isinstance(info, str):
|
if isinstance(info, Container):
|
||||||
infos += [info]
|
|
||||||
else:
|
|
||||||
infos += info
|
infos += info
|
||||||
|
else:
|
||||||
|
infos.append(info)
|
||||||
if results and not status:
|
if results and not status:
|
||||||
for result in results:
|
for result in results:
|
||||||
if result.status not in (Result.Status.SUCCESS, Result.Status.FAILED):
|
if result.status not in (Result.Status.SUCCESS, Result.Status.FAILED):
|
||||||
@ -108,7 +112,7 @@ class Result(MetaClasses.Serializable):
|
|||||||
return self.status not in (Result.Status.PENDING, Result.Status.RUNNING)
|
return self.status not in (Result.Status.PENDING, Result.Status.RUNNING)
|
||||||
|
|
||||||
def is_running(self):
|
def is_running(self):
|
||||||
return self.status in (Result.Status.RUNNING,)
|
return self.status not in (Result.Status.RUNNING,)
|
||||||
|
|
||||||
def is_ok(self):
|
def is_ok(self):
|
||||||
return self.status in (Result.Status.SKIPPED, Result.Status.SUCCESS)
|
return self.status in (Result.Status.SKIPPED, Result.Status.SUCCESS)
|
||||||
@ -151,7 +155,7 @@ class Result(MetaClasses.Serializable):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def file_name_static(cls, name):
|
def file_name_static(cls, name):
|
||||||
return f"{Settings.TEMP_DIR}/result_{Utils.normalize_string(name)}.json"
|
return f"{_Settings.TEMP_DIR}/result_{Utils.normalize_string(name)}.json"
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_dict(cls, obj: Dict[str, Any]) -> "Result":
|
def from_dict(cls, obj: Dict[str, Any]) -> "Result":
|
||||||
@ -176,11 +180,6 @@ class Result(MetaClasses.Serializable):
|
|||||||
)
|
)
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def set_timing(self, stopwatch: Utils.Stopwatch):
|
|
||||||
self.start_time = stopwatch.start_time
|
|
||||||
self.duration = stopwatch.duration
|
|
||||||
return self
|
|
||||||
|
|
||||||
def update_sub_result(self, result: "Result"):
|
def update_sub_result(self, result: "Result"):
|
||||||
assert self.results, "BUG?"
|
assert self.results, "BUG?"
|
||||||
for i, result_ in enumerate(self.results):
|
for i, result_ in enumerate(self.results):
|
||||||
@ -234,7 +233,7 @@ class Result(MetaClasses.Serializable):
|
|||||||
)
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def generate_skipped(cls, name, cache_record: Cache.CacheRecord, results=None):
|
def generate_skipped(cls, name, results=None):
|
||||||
return Result(
|
return Result(
|
||||||
name=name,
|
name=name,
|
||||||
status=Result.Status.SKIPPED,
|
status=Result.Status.SKIPPED,
|
||||||
@ -243,7 +242,7 @@ class Result(MetaClasses.Serializable):
|
|||||||
results=results or [],
|
results=results or [],
|
||||||
files=[],
|
files=[],
|
||||||
links=[],
|
links=[],
|
||||||
info=f"from cache: sha [{cache_record.sha}], pr/branch [{cache_record.pr_number or cache_record.branch}]",
|
info="from cache",
|
||||||
)
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -277,7 +276,7 @@ class Result(MetaClasses.Serializable):
|
|||||||
|
|
||||||
# Set log file path if logging is enabled
|
# Set log file path if logging is enabled
|
||||||
log_file = (
|
log_file = (
|
||||||
f"{Settings.TEMP_DIR}/{Utils.normalize_string(name)}.log"
|
f"{_Settings.TEMP_DIR}/{Utils.normalize_string(name)}.log"
|
||||||
if with_log
|
if with_log
|
||||||
else None
|
else None
|
||||||
)
|
)
|
||||||
@ -319,35 +318,18 @@ class Result(MetaClasses.Serializable):
|
|||||||
files=[log_file] if log_file else None,
|
files=[log_file] if log_file else None,
|
||||||
)
|
)
|
||||||
|
|
||||||
def complete_job(self):
|
def finish_job_accordingly(self):
|
||||||
self.dump()
|
self.dump()
|
||||||
if not self.is_ok():
|
if not self.is_ok():
|
||||||
print("ERROR: Job Failed")
|
print("ERROR: Job Failed")
|
||||||
print(self.to_stdout_formatted())
|
for result in self.results:
|
||||||
|
if not result.is_ok():
|
||||||
|
print("Failed checks:")
|
||||||
|
print(" | ", result)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
else:
|
else:
|
||||||
print("ok")
|
print("ok")
|
||||||
|
|
||||||
def to_stdout_formatted(self, indent="", res=""):
|
|
||||||
if self.is_ok():
|
|
||||||
return res
|
|
||||||
|
|
||||||
res += f"{indent}Task [{self.name}] failed.\n"
|
|
||||||
fail_info = ""
|
|
||||||
sub_indent = indent + " "
|
|
||||||
|
|
||||||
if not self.results:
|
|
||||||
if not self.is_ok():
|
|
||||||
fail_info += f"{sub_indent}{self.name}:\n"
|
|
||||||
for line in self.info.splitlines():
|
|
||||||
fail_info += f"{sub_indent}{sub_indent}{line}\n"
|
|
||||||
return res + fail_info
|
|
||||||
|
|
||||||
for sub_result in self.results:
|
|
||||||
res = sub_result.to_stdout_formatted(sub_indent, res)
|
|
||||||
|
|
||||||
return res
|
|
||||||
|
|
||||||
|
|
||||||
class ResultInfo:
|
class ResultInfo:
|
||||||
SETUP_ENV_JOB_FAILED = (
|
SETUP_ENV_JOB_FAILED = (
|
||||||
@ -370,202 +352,3 @@ class ResultInfo:
|
|||||||
)
|
)
|
||||||
|
|
||||||
S3_ERROR = "S3 call failure"
|
S3_ERROR = "S3 call failure"
|
||||||
|
|
||||||
|
|
||||||
class _ResultS3:
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def copy_result_to_s3(cls, result, unlock=False):
|
|
||||||
result.dump()
|
|
||||||
env = _Environment.get()
|
|
||||||
s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}"
|
|
||||||
s3_path_full = f"{s3_path}/{Path(result.file_name()).name}"
|
|
||||||
url = S3.copy_file_to_s3(s3_path=s3_path, local_path=result.file_name())
|
|
||||||
# if unlock:
|
|
||||||
# if not cls.unlock(s3_path_full):
|
|
||||||
# print(f"ERROR: File [{s3_path_full}] unlock failure")
|
|
||||||
# assert False # TODO: investigate
|
|
||||||
return url
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def copy_result_from_s3(cls, local_path, lock=False):
|
|
||||||
env = _Environment.get()
|
|
||||||
file_name = Path(local_path).name
|
|
||||||
s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}/{file_name}"
|
|
||||||
# if lock:
|
|
||||||
# cls.lock(s3_path)
|
|
||||||
if not S3.copy_file_from_s3(s3_path=s3_path, local_path=local_path):
|
|
||||||
print(f"ERROR: failed to cp file [{s3_path}] from s3")
|
|
||||||
raise
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def copy_result_from_s3_with_version(cls, local_path):
|
|
||||||
env = _Environment.get()
|
|
||||||
file_name = Path(local_path).name
|
|
||||||
local_dir = Path(local_path).parent
|
|
||||||
file_name_pattern = f"{file_name}_*"
|
|
||||||
for file_path in local_dir.glob(file_name_pattern):
|
|
||||||
file_path.unlink()
|
|
||||||
s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}/"
|
|
||||||
if not S3.copy_file_from_s3_matching_pattern(
|
|
||||||
s3_path=s3_path, local_path=local_dir, include=file_name_pattern
|
|
||||||
):
|
|
||||||
print(f"ERROR: failed to cp file [{s3_path}] from s3")
|
|
||||||
raise
|
|
||||||
result_files = []
|
|
||||||
for file_path in local_dir.glob(file_name_pattern):
|
|
||||||
result_files.append(file_path)
|
|
||||||
assert result_files, "No result files found"
|
|
||||||
result_files.sort()
|
|
||||||
version = int(result_files[-1].name.split("_")[-1])
|
|
||||||
Shell.check(f"cp {result_files[-1]} {local_path}", strict=True, verbose=True)
|
|
||||||
return version
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def copy_result_to_s3_with_version(cls, result, version):
|
|
||||||
result.dump()
|
|
||||||
filename = Path(result.file_name()).name
|
|
||||||
file_name_versioned = f"{filename}_{str(version).zfill(3)}"
|
|
||||||
env = _Environment.get()
|
|
||||||
s3_path_versioned = (
|
|
||||||
f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}/{file_name_versioned}"
|
|
||||||
)
|
|
||||||
s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}/"
|
|
||||||
if version == 0:
|
|
||||||
S3.clean_s3_directory(s3_path=s3_path)
|
|
||||||
if not S3.put(
|
|
||||||
s3_path=s3_path_versioned,
|
|
||||||
local_path=result.file_name(),
|
|
||||||
if_none_matched=True,
|
|
||||||
):
|
|
||||||
print("Failed to put versioned Result")
|
|
||||||
return False
|
|
||||||
if not S3.put(s3_path=s3_path, local_path=result.file_name()):
|
|
||||||
print("Failed to put non-versioned Result")
|
|
||||||
return True
|
|
||||||
|
|
||||||
# @classmethod
|
|
||||||
# def lock(cls, s3_path, level=0):
|
|
||||||
# env = _Environment.get()
|
|
||||||
# s3_path_lock = s3_path + f".lock"
|
|
||||||
# file_path_lock = f"{Settings.TEMP_DIR}/{Path(s3_path_lock).name}"
|
|
||||||
# assert Shell.check(
|
|
||||||
# f"echo '''{env.JOB_NAME}''' > {file_path_lock}", verbose=True
|
|
||||||
# ), "Never"
|
|
||||||
#
|
|
||||||
# i = 20
|
|
||||||
# meta = S3.head_object(s3_path_lock)
|
|
||||||
# while meta:
|
|
||||||
# locked_by_job = meta.get("Metadata", {"job": ""}).get("job", "")
|
|
||||||
# if locked_by_job:
|
|
||||||
# decoded_bytes = base64.b64decode(locked_by_job)
|
|
||||||
# locked_by_job = decoded_bytes.decode("utf-8")
|
|
||||||
# print(
|
|
||||||
# f"WARNING: Failed to acquire lock, meta [{meta}], job [{locked_by_job}] - wait"
|
|
||||||
# )
|
|
||||||
# i -= 5
|
|
||||||
# if i < 0:
|
|
||||||
# info = f"ERROR: lock acquire failure - unlock forcefully"
|
|
||||||
# print(info)
|
|
||||||
# env.add_info(info)
|
|
||||||
# break
|
|
||||||
# time.sleep(5)
|
|
||||||
#
|
|
||||||
# metadata = {"job": Utils.to_base64(env.JOB_NAME)}
|
|
||||||
# S3.put(
|
|
||||||
# s3_path=s3_path_lock,
|
|
||||||
# local_path=file_path_lock,
|
|
||||||
# metadata=metadata,
|
|
||||||
# if_none_matched=True,
|
|
||||||
# )
|
|
||||||
# time.sleep(1)
|
|
||||||
# obj = S3.head_object(s3_path_lock)
|
|
||||||
# if not obj or not obj.has_tags(tags=metadata):
|
|
||||||
# print(f"WARNING: locked by another job [{obj}]")
|
|
||||||
# env.add_info("S3 lock file failure")
|
|
||||||
# cls.lock(s3_path, level=level + 1)
|
|
||||||
# print("INFO: lock acquired")
|
|
||||||
#
|
|
||||||
# @classmethod
|
|
||||||
# def unlock(cls, s3_path):
|
|
||||||
# s3_path_lock = s3_path + ".lock"
|
|
||||||
# env = _Environment.get()
|
|
||||||
# obj = S3.head_object(s3_path_lock)
|
|
||||||
# if not obj:
|
|
||||||
# print("ERROR: lock file is removed")
|
|
||||||
# assert False # investigate
|
|
||||||
# elif not obj.has_tags({"job": Utils.to_base64(env.JOB_NAME)}):
|
|
||||||
# print("ERROR: lock file was acquired by another job")
|
|
||||||
# assert False # investigate
|
|
||||||
#
|
|
||||||
# if not S3.delete(s3_path_lock):
|
|
||||||
# print(f"ERROR: File [{s3_path_lock}] delete failure")
|
|
||||||
# print("INFO: lock released")
|
|
||||||
# return True
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def upload_result_files_to_s3(cls, result):
|
|
||||||
if result.results:
|
|
||||||
for result_ in result.results:
|
|
||||||
cls.upload_result_files_to_s3(result_)
|
|
||||||
for file in result.files:
|
|
||||||
if not Path(file).is_file():
|
|
||||||
print(f"ERROR: Invalid file [{file}] in [{result.name}] - skip upload")
|
|
||||||
result.info += f"\nWARNING: Result file [{file}] was not found"
|
|
||||||
file_link = S3._upload_file_to_s3(file, upload_to_s3=False)
|
|
||||||
else:
|
|
||||||
is_text = False
|
|
||||||
for text_file_suffix in Settings.TEXT_CONTENT_EXTENSIONS:
|
|
||||||
if file.endswith(text_file_suffix):
|
|
||||||
print(
|
|
||||||
f"File [{file}] matches Settings.TEXT_CONTENT_EXTENSIONS [{Settings.TEXT_CONTENT_EXTENSIONS}] - add text attribute for s3 object"
|
|
||||||
)
|
|
||||||
is_text = True
|
|
||||||
break
|
|
||||||
file_link = S3._upload_file_to_s3(
|
|
||||||
file,
|
|
||||||
upload_to_s3=True,
|
|
||||||
text=is_text,
|
|
||||||
s3_subprefix=Utils.normalize_string(result.name),
|
|
||||||
)
|
|
||||||
result.links.append(file_link)
|
|
||||||
if result.files:
|
|
||||||
print(
|
|
||||||
f"Result files [{result.files}] uploaded to s3 [{result.links[-len(result.files):]}] - clean files list"
|
|
||||||
)
|
|
||||||
result.files = []
|
|
||||||
result.dump()
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def update_workflow_results(cls, workflow_name, new_info="", new_sub_results=None):
|
|
||||||
assert new_info or new_sub_results
|
|
||||||
|
|
||||||
attempt = 1
|
|
||||||
prev_status = ""
|
|
||||||
new_status = ""
|
|
||||||
done = False
|
|
||||||
while attempt < 10:
|
|
||||||
version = cls.copy_result_from_s3_with_version(
|
|
||||||
Result.file_name_static(workflow_name)
|
|
||||||
)
|
|
||||||
workflow_result = Result.from_fs(workflow_name)
|
|
||||||
prev_status = workflow_result.status
|
|
||||||
if new_info:
|
|
||||||
workflow_result.set_info(new_info)
|
|
||||||
if new_sub_results:
|
|
||||||
if isinstance(new_sub_results, Result):
|
|
||||||
new_sub_results = [new_sub_results]
|
|
||||||
for result_ in new_sub_results:
|
|
||||||
workflow_result.update_sub_result(result_)
|
|
||||||
new_status = workflow_result.status
|
|
||||||
if cls.copy_result_to_s3_with_version(workflow_result, version=version + 1):
|
|
||||||
done = True
|
|
||||||
break
|
|
||||||
print(f"Attempt [{attempt}] to upload workflow result failed")
|
|
||||||
attempt += 1
|
|
||||||
assert done
|
|
||||||
|
|
||||||
if prev_status != new_status:
|
|
||||||
return new_status
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
@ -19,7 +19,7 @@ from praktika.utils import Shell, TeePopen, Utils
|
|||||||
|
|
||||||
class Runner:
|
class Runner:
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def generate_local_run_environment(workflow, job, pr=None, branch=None, sha=None):
|
def generate_dummy_environment(workflow, job):
|
||||||
print("WARNING: Generate dummy env for local test")
|
print("WARNING: Generate dummy env for local test")
|
||||||
Shell.check(
|
Shell.check(
|
||||||
f"mkdir -p {Settings.TEMP_DIR} {Settings.INPUT_DIR} {Settings.OUTPUT_DIR}"
|
f"mkdir -p {Settings.TEMP_DIR} {Settings.INPUT_DIR} {Settings.OUTPUT_DIR}"
|
||||||
@ -28,9 +28,9 @@ class Runner:
|
|||||||
WORKFLOW_NAME=workflow.name,
|
WORKFLOW_NAME=workflow.name,
|
||||||
JOB_NAME=job.name,
|
JOB_NAME=job.name,
|
||||||
REPOSITORY="",
|
REPOSITORY="",
|
||||||
BRANCH=branch or Settings.MAIN_BRANCH if not pr else "",
|
BRANCH="",
|
||||||
SHA=sha or Shell.get_output("git rev-parse HEAD"),
|
SHA="",
|
||||||
PR_NUMBER=pr or -1,
|
PR_NUMBER=-1,
|
||||||
EVENT_TYPE="",
|
EVENT_TYPE="",
|
||||||
JOB_OUTPUT_STREAM="",
|
JOB_OUTPUT_STREAM="",
|
||||||
EVENT_FILE_PATH="",
|
EVENT_FILE_PATH="",
|
||||||
@ -52,7 +52,6 @@ class Runner:
|
|||||||
cache_success=[],
|
cache_success=[],
|
||||||
cache_success_base64=[],
|
cache_success_base64=[],
|
||||||
cache_artifacts={},
|
cache_artifacts={},
|
||||||
cache_jobs={},
|
|
||||||
)
|
)
|
||||||
for docker in workflow.dockers:
|
for docker in workflow.dockers:
|
||||||
workflow_config.digest_dockers[docker.name] = Digest().calc_docker_digest(
|
workflow_config.digest_dockers[docker.name] = Digest().calc_docker_digest(
|
||||||
@ -81,12 +80,13 @@ class Runner:
|
|||||||
print("Read GH Environment")
|
print("Read GH Environment")
|
||||||
env = _Environment.from_env()
|
env = _Environment.from_env()
|
||||||
env.JOB_NAME = job.name
|
env.JOB_NAME = job.name
|
||||||
|
env.PARAMETER = job.parameter
|
||||||
env.dump()
|
env.dump()
|
||||||
print(env)
|
print(env)
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
def _pre_run(self, workflow, job, local_run=False):
|
def _pre_run(self, workflow, job):
|
||||||
env = _Environment.get()
|
env = _Environment.get()
|
||||||
|
|
||||||
result = Result(
|
result = Result(
|
||||||
@ -96,7 +96,6 @@ class Runner:
|
|||||||
)
|
)
|
||||||
result.dump()
|
result.dump()
|
||||||
|
|
||||||
if not local_run:
|
|
||||||
if workflow.enable_report and job.name != Settings.CI_CONFIG_JOB_NAME:
|
if workflow.enable_report and job.name != Settings.CI_CONFIG_JOB_NAME:
|
||||||
print("Update Job and Workflow Report")
|
print("Update Job and Workflow Report")
|
||||||
HtmlRunnerHooks.pre_run(workflow, job)
|
HtmlRunnerHooks.pre_run(workflow, job)
|
||||||
@ -124,48 +123,28 @@ class Runner:
|
|||||||
|
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
def _run(self, workflow, job, docker="", no_docker=False, param=None, test=""):
|
def _run(self, workflow, job, docker="", no_docker=False, param=None):
|
||||||
# re-set envs for local run
|
|
||||||
env = _Environment.get()
|
|
||||||
env.JOB_NAME = job.name
|
|
||||||
env.dump()
|
|
||||||
|
|
||||||
if param:
|
if param:
|
||||||
if not isinstance(param, str):
|
if not isinstance(param, str):
|
||||||
Utils.raise_with_error(
|
Utils.raise_with_error(
|
||||||
f"Custom param for local tests must be of type str, got [{type(param)}]"
|
f"Custom param for local tests must be of type str, got [{type(param)}]"
|
||||||
)
|
)
|
||||||
|
env = _Environment.get()
|
||||||
|
env.dump()
|
||||||
|
|
||||||
if job.run_in_docker and not no_docker:
|
if job.run_in_docker and not no_docker:
|
||||||
job.run_in_docker, docker_settings = (
|
# TODO: add support for any image, including not from ci config (e.g. ubuntu:latest)
|
||||||
job.run_in_docker.split("+")[0],
|
docker_tag = RunConfig.from_fs(workflow.name).digest_dockers[
|
||||||
job.run_in_docker.split("+")[1:],
|
job.run_in_docker
|
||||||
)
|
]
|
||||||
from_root = "root" in docker_settings
|
docker = docker or f"{job.run_in_docker}:{docker_tag}"
|
||||||
settings = [s for s in docker_settings if s.startswith("--")]
|
cmd = f"docker run --rm --user \"$(id -u):$(id -g)\" -e PYTHONPATH='{Settings.DOCKER_WD}:{Settings.DOCKER_WD}/ci' --volume ./:{Settings.DOCKER_WD} --volume {Settings.TEMP_DIR}:{Settings.TEMP_DIR} --workdir={Settings.DOCKER_WD} {docker} {job.command}"
|
||||||
if ":" in job.run_in_docker:
|
|
||||||
docker_name, docker_tag = job.run_in_docker.split(":")
|
|
||||||
print(
|
|
||||||
f"WARNING: Job [{job.name}] use custom docker image with a tag - praktika won't control docker version"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
docker_name, docker_tag = (
|
|
||||||
job.run_in_docker,
|
|
||||||
RunConfig.from_fs(workflow.name).digest_dockers[job.run_in_docker],
|
|
||||||
)
|
|
||||||
docker = docker or f"{docker_name}:{docker_tag}"
|
|
||||||
cmd = f"docker run --rm --name praktika {'--user $(id -u):$(id -g)' if not from_root else ''} -e PYTHONPATH='{Settings.DOCKER_WD}:{Settings.DOCKER_WD}/ci' --volume ./:{Settings.DOCKER_WD} --volume {Settings.TEMP_DIR}:{Settings.TEMP_DIR} --workdir={Settings.DOCKER_WD} {' '.join(settings)} {docker} {job.command}"
|
|
||||||
else:
|
else:
|
||||||
cmd = job.command
|
cmd = job.command
|
||||||
python_path = os.getenv("PYTHONPATH", ":")
|
|
||||||
os.environ["PYTHONPATH"] = f".:{python_path}"
|
|
||||||
|
|
||||||
if param:
|
if param:
|
||||||
print(f"Custom --param [{param}] will be passed to job's script")
|
print(f"Custom --param [{param}] will be passed to job's script")
|
||||||
cmd += f" --param {param}"
|
cmd += f" --param {param}"
|
||||||
if test:
|
|
||||||
print(f"Custom --test [{test}] will be passed to job's script")
|
|
||||||
cmd += f" --test {test}"
|
|
||||||
print(f"--- Run command [{cmd}]")
|
print(f"--- Run command [{cmd}]")
|
||||||
|
|
||||||
with TeePopen(cmd, timeout=job.timeout) as process:
|
with TeePopen(cmd, timeout=job.timeout) as process:
|
||||||
@ -240,10 +219,13 @@ class Runner:
|
|||||||
print(info)
|
print(info)
|
||||||
result.set_info(info).set_status(Result.Status.ERROR).dump()
|
result.set_info(info).set_status(Result.Status.ERROR).dump()
|
||||||
|
|
||||||
if not result.is_ok():
|
|
||||||
result.set_files(files=[Settings.RUN_LOG])
|
result.set_files(files=[Settings.RUN_LOG])
|
||||||
result.update_duration().dump()
|
result.update_duration().dump()
|
||||||
|
|
||||||
|
if result.info and result.status != Result.Status.SUCCESS:
|
||||||
|
# provide job info to workflow level
|
||||||
|
info_errors.append(result.info)
|
||||||
|
|
||||||
if run_exit_code == 0:
|
if run_exit_code == 0:
|
||||||
providing_artifacts = []
|
providing_artifacts = []
|
||||||
if job.provides and workflow.artifacts:
|
if job.provides and workflow.artifacts:
|
||||||
@ -303,24 +285,14 @@ class Runner:
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
def run(
|
def run(
|
||||||
self,
|
self, workflow, job, docker="", dummy_env=False, no_docker=False, param=None
|
||||||
workflow,
|
|
||||||
job,
|
|
||||||
docker="",
|
|
||||||
local_run=False,
|
|
||||||
no_docker=False,
|
|
||||||
param=None,
|
|
||||||
test="",
|
|
||||||
pr=None,
|
|
||||||
sha=None,
|
|
||||||
branch=None,
|
|
||||||
):
|
):
|
||||||
res = True
|
res = True
|
||||||
setup_env_code = -10
|
setup_env_code = -10
|
||||||
prerun_code = -10
|
prerun_code = -10
|
||||||
run_code = -10
|
run_code = -10
|
||||||
|
|
||||||
if res and not local_run:
|
if res and not dummy_env:
|
||||||
print(
|
print(
|
||||||
f"\n\n=== Setup env script [{job.name}], workflow [{workflow.name}] ==="
|
f"\n\n=== Setup env script [{job.name}], workflow [{workflow.name}] ==="
|
||||||
)
|
)
|
||||||
@ -337,15 +309,13 @@ class Runner:
|
|||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
print(f"=== Setup env finished ===\n\n")
|
print(f"=== Setup env finished ===\n\n")
|
||||||
else:
|
else:
|
||||||
self.generate_local_run_environment(
|
self.generate_dummy_environment(workflow, job)
|
||||||
workflow, job, pr=pr, branch=branch, sha=sha
|
|
||||||
)
|
|
||||||
|
|
||||||
if res and (not local_run or pr or sha or branch):
|
if res and not dummy_env:
|
||||||
res = False
|
res = False
|
||||||
print(f"=== Pre run script [{job.name}], workflow [{workflow.name}] ===")
|
print(f"=== Pre run script [{job.name}], workflow [{workflow.name}] ===")
|
||||||
try:
|
try:
|
||||||
prerun_code = self._pre_run(workflow, job, local_run=local_run)
|
prerun_code = self._pre_run(workflow, job)
|
||||||
res = prerun_code == 0
|
res = prerun_code == 0
|
||||||
if not res:
|
if not res:
|
||||||
print(f"ERROR: Pre-run failed with exit code [{prerun_code}]")
|
print(f"ERROR: Pre-run failed with exit code [{prerun_code}]")
|
||||||
@ -359,12 +329,7 @@ class Runner:
|
|||||||
print(f"=== Run script [{job.name}], workflow [{workflow.name}] ===")
|
print(f"=== Run script [{job.name}], workflow [{workflow.name}] ===")
|
||||||
try:
|
try:
|
||||||
run_code = self._run(
|
run_code = self._run(
|
||||||
workflow,
|
workflow, job, docker=docker, no_docker=no_docker, param=param
|
||||||
job,
|
|
||||||
docker=docker,
|
|
||||||
no_docker=no_docker,
|
|
||||||
param=param,
|
|
||||||
test=test,
|
|
||||||
)
|
)
|
||||||
res = run_code == 0
|
res = run_code == 0
|
||||||
if not res:
|
if not res:
|
||||||
@ -374,7 +339,7 @@ class Runner:
|
|||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
print(f"=== Run scrip finished ===\n\n")
|
print(f"=== Run scrip finished ===\n\n")
|
||||||
|
|
||||||
if not local_run:
|
if not dummy_env:
|
||||||
print(f"=== Post run script [{job.name}], workflow [{workflow.name}] ===")
|
print(f"=== Post run script [{job.name}], workflow [{workflow.name}] ===")
|
||||||
self._post_run(workflow, job, setup_env_code, prerun_code, run_code)
|
self._post_run(workflow, job, setup_env_code, prerun_code, run_code)
|
||||||
print(f"=== Post run scrip finished ===")
|
print(f"=== Post run scrip finished ===")
|
||||||
|
@ -15,23 +15,17 @@ class RunConfig(MetaClasses.Serializable):
|
|||||||
# there are might be issue with special characters in job names if used directly in yaml syntax - create base64 encoded list to avoid this
|
# there are might be issue with special characters in job names if used directly in yaml syntax - create base64 encoded list to avoid this
|
||||||
cache_success_base64: List[str]
|
cache_success_base64: List[str]
|
||||||
cache_artifacts: Dict[str, Cache.CacheRecord]
|
cache_artifacts: Dict[str, Cache.CacheRecord]
|
||||||
cache_jobs: Dict[str, Cache.CacheRecord]
|
|
||||||
sha: str
|
sha: str
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_dict(cls, obj):
|
def from_dict(cls, obj):
|
||||||
cache_artifacts = obj["cache_artifacts"]
|
cache_artifacts = obj["cache_artifacts"]
|
||||||
cache_jobs = obj["cache_jobs"]
|
|
||||||
cache_artifacts_deserialized = {}
|
cache_artifacts_deserialized = {}
|
||||||
cache_jobs_deserialized = {}
|
|
||||||
for artifact_name, cache_artifact in cache_artifacts.items():
|
for artifact_name, cache_artifact in cache_artifacts.items():
|
||||||
cache_artifacts_deserialized[artifact_name] = Cache.CacheRecord.from_dict(
|
cache_artifacts_deserialized[artifact_name] = Cache.CacheRecord.from_dict(
|
||||||
cache_artifact
|
cache_artifact
|
||||||
)
|
)
|
||||||
obj["cache_artifacts"] = cache_artifacts_deserialized
|
obj["cache_artifacts"] = cache_artifacts_deserialized
|
||||||
for job_name, cache_jobs in cache_jobs.items():
|
|
||||||
cache_jobs_deserialized[job_name] = Cache.CacheRecord.from_dict(cache_jobs)
|
|
||||||
obj["cache_jobs"] = cache_artifacts_deserialized
|
|
||||||
return RunConfig(**obj)
|
return RunConfig(**obj)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
@ -1,11 +1,12 @@
|
|||||||
import dataclasses
|
import dataclasses
|
||||||
import json
|
import json
|
||||||
|
import time
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Dict
|
from typing import Dict
|
||||||
|
|
||||||
from praktika._environment import _Environment
|
from praktika._environment import _Environment
|
||||||
from praktika.settings import Settings
|
from praktika.settings import Settings
|
||||||
from praktika.utils import Shell
|
from praktika.utils import Shell, Utils
|
||||||
|
|
||||||
|
|
||||||
class S3:
|
class S3:
|
||||||
@ -51,22 +52,23 @@ class S3:
|
|||||||
cmd += " --content-type text/plain"
|
cmd += " --content-type text/plain"
|
||||||
res = cls.run_command_with_retries(cmd)
|
res = cls.run_command_with_retries(cmd)
|
||||||
if not res:
|
if not res:
|
||||||
raise RuntimeError()
|
raise
|
||||||
bucket = s3_path.split("/")[0]
|
bucket = s3_path.split("/")[0]
|
||||||
endpoint = Settings.S3_BUCKET_TO_HTTP_ENDPOINT[bucket]
|
endpoint = Settings.S3_BUCKET_TO_HTTP_ENDPOINT[bucket]
|
||||||
assert endpoint
|
assert endpoint
|
||||||
return f"https://{s3_full_path}".replace(bucket, endpoint)
|
return f"https://{s3_full_path}".replace(bucket, endpoint)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def put(cls, s3_path, local_path, text=False, metadata=None, if_none_matched=False):
|
def put(cls, s3_path, local_path, text=False, metadata=None):
|
||||||
assert Path(local_path).exists(), f"Path [{local_path}] does not exist"
|
assert Path(local_path).exists(), f"Path [{local_path}] does not exist"
|
||||||
assert Path(s3_path), f"Invalid S3 Path [{s3_path}]"
|
assert Path(s3_path), f"Invalid S3 Path [{s3_path}]"
|
||||||
assert Path(
|
assert Path(
|
||||||
local_path
|
local_path
|
||||||
).is_file(), f"Path [{local_path}] is not file. Only files are supported"
|
).is_file(), f"Path [{local_path}] is not file. Only files are supported"
|
||||||
|
file_name = Path(local_path).name
|
||||||
s3_full_path = s3_path
|
s3_full_path = s3_path
|
||||||
if s3_full_path.endswith("/"):
|
if not s3_full_path.endswith(file_name):
|
||||||
s3_full_path = f"{s3_path}{Path(local_path).name}"
|
s3_full_path = f"{s3_path}/{Path(local_path).name}"
|
||||||
|
|
||||||
s3_full_path = str(s3_full_path).removeprefix("s3://")
|
s3_full_path = str(s3_full_path).removeprefix("s3://")
|
||||||
bucket, key = s3_full_path.split("/", maxsplit=1)
|
bucket, key = s3_full_path.split("/", maxsplit=1)
|
||||||
@ -74,8 +76,6 @@ class S3:
|
|||||||
command = (
|
command = (
|
||||||
f"aws s3api put-object --bucket {bucket} --key {key} --body {local_path}"
|
f"aws s3api put-object --bucket {bucket} --key {key} --body {local_path}"
|
||||||
)
|
)
|
||||||
if if_none_matched:
|
|
||||||
command += f' --if-none-match "*"'
|
|
||||||
if metadata:
|
if metadata:
|
||||||
for k, v in metadata.items():
|
for k, v in metadata.items():
|
||||||
command += f" --metadata {k}={v}"
|
command += f" --metadata {k}={v}"
|
||||||
@ -84,7 +84,7 @@ class S3:
|
|||||||
if text:
|
if text:
|
||||||
cmd += " --content-type text/plain"
|
cmd += " --content-type text/plain"
|
||||||
res = cls.run_command_with_retries(command)
|
res = cls.run_command_with_retries(command)
|
||||||
return res
|
assert res
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def run_command_with_retries(cls, command, retries=Settings.MAX_RETRIES_S3):
|
def run_command_with_retries(cls, command, retries=Settings.MAX_RETRIES_S3):
|
||||||
@ -101,14 +101,6 @@ class S3:
|
|||||||
elif "does not exist" in stderr:
|
elif "does not exist" in stderr:
|
||||||
print("ERROR: requested file does not exist")
|
print("ERROR: requested file does not exist")
|
||||||
break
|
break
|
||||||
elif "Unknown options" in stderr:
|
|
||||||
print("ERROR: Invalid AWS CLI command or CLI client version:")
|
|
||||||
print(f" | awc error: {stderr}")
|
|
||||||
break
|
|
||||||
elif "PreconditionFailed" in stderr:
|
|
||||||
print("ERROR: AWS API Call Precondition Failed")
|
|
||||||
print(f" | awc error: {stderr}")
|
|
||||||
break
|
|
||||||
if ret_code != 0:
|
if ret_code != 0:
|
||||||
print(
|
print(
|
||||||
f"ERROR: aws s3 cp failed, stdout/stderr err: [{stderr}], out [{stdout}]"
|
f"ERROR: aws s3 cp failed, stdout/stderr err: [{stderr}], out [{stdout}]"
|
||||||
@ -116,6 +108,13 @@ class S3:
|
|||||||
res = ret_code == 0
|
res = ret_code == 0
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_link(cls, s3_path, local_path):
|
||||||
|
s3_full_path = f"{s3_path}/{Path(local_path).name}"
|
||||||
|
bucket = s3_path.split("/")[0]
|
||||||
|
endpoint = Settings.S3_BUCKET_TO_HTTP_ENDPOINT[bucket]
|
||||||
|
return f"https://{s3_full_path}".replace(bucket, endpoint)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def copy_file_from_s3(cls, s3_path, local_path):
|
def copy_file_from_s3(cls, s3_path, local_path):
|
||||||
assert Path(s3_path), f"Invalid S3 Path [{s3_path}]"
|
assert Path(s3_path), f"Invalid S3 Path [{s3_path}]"
|
||||||
@ -129,19 +128,6 @@ class S3:
|
|||||||
res = cls.run_command_with_retries(cmd)
|
res = cls.run_command_with_retries(cmd)
|
||||||
return res
|
return res
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def copy_file_from_s3_matching_pattern(
|
|
||||||
cls, s3_path, local_path, include, exclude="*"
|
|
||||||
):
|
|
||||||
assert Path(s3_path), f"Invalid S3 Path [{s3_path}]"
|
|
||||||
assert Path(
|
|
||||||
local_path
|
|
||||||
).is_dir(), f"Path [{local_path}] does not exist or not a directory"
|
|
||||||
assert s3_path.endswith("/"), f"s3 path is invalid [{s3_path}]"
|
|
||||||
cmd = f'aws s3 cp s3://{s3_path} {local_path} --exclude "{exclude}" --include "{include}" --recursive'
|
|
||||||
res = cls.run_command_with_retries(cmd)
|
|
||||||
return res
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def head_object(cls, s3_path):
|
def head_object(cls, s3_path):
|
||||||
s3_path = str(s3_path).removeprefix("s3://")
|
s3_path = str(s3_path).removeprefix("s3://")
|
||||||
@ -162,6 +148,103 @@ class S3:
|
|||||||
verbose=True,
|
verbose=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# TODO: apparently should be placed into separate file to be used only inside praktika
|
||||||
|
# keeping this module clean from importing Settings, Environment and etc, making it easy for use externally
|
||||||
|
@classmethod
|
||||||
|
def copy_result_to_s3(cls, result, unlock=True):
|
||||||
|
result.dump()
|
||||||
|
env = _Environment.get()
|
||||||
|
s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}"
|
||||||
|
s3_path_full = f"{s3_path}/{Path(result.file_name()).name}"
|
||||||
|
url = S3.copy_file_to_s3(s3_path=s3_path, local_path=result.file_name())
|
||||||
|
if env.PR_NUMBER:
|
||||||
|
print("Duplicate Result for latest commit alias in PR")
|
||||||
|
s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix(latest=True)}"
|
||||||
|
url = S3.copy_file_to_s3(s3_path=s3_path, local_path=result.file_name())
|
||||||
|
if unlock:
|
||||||
|
if not cls.unlock(s3_path_full):
|
||||||
|
print(f"ERROR: File [{s3_path_full}] unlock failure")
|
||||||
|
assert False # TODO: investigate
|
||||||
|
return url
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def copy_result_from_s3(cls, local_path, lock=True):
|
||||||
|
env = _Environment.get()
|
||||||
|
file_name = Path(local_path).name
|
||||||
|
s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}/{file_name}"
|
||||||
|
if lock:
|
||||||
|
cls.lock(s3_path)
|
||||||
|
if not S3.copy_file_from_s3(s3_path=s3_path, local_path=local_path):
|
||||||
|
print(f"ERROR: failed to cp file [{s3_path}] from s3")
|
||||||
|
raise
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def lock(cls, s3_path, level=0):
|
||||||
|
assert level < 3, "Never"
|
||||||
|
env = _Environment.get()
|
||||||
|
s3_path_lock = s3_path + f".lock"
|
||||||
|
file_path_lock = f"{Settings.TEMP_DIR}/{Path(s3_path_lock).name}"
|
||||||
|
assert Shell.check(
|
||||||
|
f"echo '''{env.JOB_NAME}''' > {file_path_lock}", verbose=True
|
||||||
|
), "Never"
|
||||||
|
|
||||||
|
i = 20
|
||||||
|
meta = S3.head_object(s3_path_lock)
|
||||||
|
while meta:
|
||||||
|
print(f"WARNING: Failed to acquire lock, meta [{meta}] - wait")
|
||||||
|
i -= 5
|
||||||
|
if i < 0:
|
||||||
|
info = f"ERROR: lock acquire failure - unlock forcefully"
|
||||||
|
print(info)
|
||||||
|
env.add_info(info)
|
||||||
|
break
|
||||||
|
time.sleep(5)
|
||||||
|
|
||||||
|
metadata = {"job": Utils.to_base64(env.JOB_NAME)}
|
||||||
|
S3.put(
|
||||||
|
s3_path=s3_path_lock,
|
||||||
|
local_path=file_path_lock,
|
||||||
|
metadata=metadata,
|
||||||
|
)
|
||||||
|
time.sleep(1)
|
||||||
|
obj = S3.head_object(s3_path_lock)
|
||||||
|
if not obj or not obj.has_tags(tags=metadata):
|
||||||
|
print(f"WARNING: locked by another job [{obj}]")
|
||||||
|
env.add_info("S3 lock file failure")
|
||||||
|
cls.lock(s3_path, level=level + 1)
|
||||||
|
print("INFO: lock acquired")
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def unlock(cls, s3_path):
|
||||||
|
s3_path_lock = s3_path + ".lock"
|
||||||
|
env = _Environment.get()
|
||||||
|
obj = S3.head_object(s3_path_lock)
|
||||||
|
if not obj:
|
||||||
|
print("ERROR: lock file is removed")
|
||||||
|
assert False # investigate
|
||||||
|
elif not obj.has_tags({"job": Utils.to_base64(env.JOB_NAME)}):
|
||||||
|
print("ERROR: lock file was acquired by another job")
|
||||||
|
assert False # investigate
|
||||||
|
|
||||||
|
if not S3.delete(s3_path_lock):
|
||||||
|
print(f"ERROR: File [{s3_path_lock}] delete failure")
|
||||||
|
print("INFO: lock released")
|
||||||
|
return True
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_result_link(cls, result):
|
||||||
|
env = _Environment.get()
|
||||||
|
s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix(latest=True if env.PR_NUMBER else False)}"
|
||||||
|
return S3.get_link(s3_path=s3_path, local_path=result.file_name())
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def clean_latest_result(cls):
|
||||||
|
env = _Environment.get()
|
||||||
|
env.SHA = "latest"
|
||||||
|
assert env.PR_NUMBER
|
||||||
|
s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}"
|
||||||
|
S3.clean_s3_directory(s3_path=s3_path)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _upload_file_to_s3(
|
def _upload_file_to_s3(
|
||||||
cls, local_file_path, upload_to_s3: bool, text: bool = False, s3_subprefix=""
|
cls, local_file_path, upload_to_s3: bool, text: bool = False, s3_subprefix=""
|
||||||
@ -177,3 +260,36 @@ class S3:
|
|||||||
)
|
)
|
||||||
return html_link
|
return html_link
|
||||||
return f"file://{Path(local_file_path).absolute()}"
|
return f"file://{Path(local_file_path).absolute()}"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def upload_result_files_to_s3(cls, result):
|
||||||
|
if result.results:
|
||||||
|
for result_ in result.results:
|
||||||
|
cls.upload_result_files_to_s3(result_)
|
||||||
|
for file in result.files:
|
||||||
|
if not Path(file).is_file():
|
||||||
|
print(f"ERROR: Invalid file [{file}] in [{result.name}] - skip upload")
|
||||||
|
result.info += f"\nWARNING: Result file [{file}] was not found"
|
||||||
|
file_link = cls._upload_file_to_s3(file, upload_to_s3=False)
|
||||||
|
else:
|
||||||
|
is_text = False
|
||||||
|
for text_file_suffix in Settings.TEXT_CONTENT_EXTENSIONS:
|
||||||
|
if file.endswith(text_file_suffix):
|
||||||
|
print(
|
||||||
|
f"File [{file}] matches Settings.TEXT_CONTENT_EXTENSIONS [{Settings.TEXT_CONTENT_EXTENSIONS}] - add text attribute for s3 object"
|
||||||
|
)
|
||||||
|
is_text = True
|
||||||
|
break
|
||||||
|
file_link = cls._upload_file_to_s3(
|
||||||
|
file,
|
||||||
|
upload_to_s3=True,
|
||||||
|
text=is_text,
|
||||||
|
s3_subprefix=Utils.normalize_string(result.name),
|
||||||
|
)
|
||||||
|
result.links.append(file_link)
|
||||||
|
if result.files:
|
||||||
|
print(
|
||||||
|
f"Result files [{result.files}] uploaded to s3 [{result.links[-len(result.files):]}] - clean files list"
|
||||||
|
)
|
||||||
|
result.files = []
|
||||||
|
result.dump()
|
||||||
|
@ -1,152 +1,8 @@
|
|||||||
import dataclasses
|
from praktika._settings import _Settings
|
||||||
import importlib.util
|
from praktika.mangle import _get_user_settings
|
||||||
from pathlib import Path
|
|
||||||
from typing import Dict, Iterable, List, Optional
|
|
||||||
|
|
||||||
|
Settings = _Settings()
|
||||||
|
|
||||||
@dataclasses.dataclass
|
user_settings = _get_user_settings()
|
||||||
class _Settings:
|
for setting, value in user_settings.items():
|
||||||
######################################
|
Settings.__setattr__(setting, value)
|
||||||
# Pipeline generation settings #
|
|
||||||
######################################
|
|
||||||
MAIN_BRANCH = "main"
|
|
||||||
CI_PATH = "./ci"
|
|
||||||
WORKFLOW_PATH_PREFIX: str = "./.github/workflows"
|
|
||||||
WORKFLOWS_DIRECTORY: str = f"{CI_PATH}/workflows"
|
|
||||||
SETTINGS_DIRECTORY: str = f"{CI_PATH}/settings"
|
|
||||||
CI_CONFIG_JOB_NAME = "Config Workflow"
|
|
||||||
DOCKER_BUILD_JOB_NAME = "Docker Builds"
|
|
||||||
FINISH_WORKFLOW_JOB_NAME = "Finish Workflow"
|
|
||||||
READY_FOR_MERGE_STATUS_NAME = "Ready for Merge"
|
|
||||||
CI_CONFIG_RUNS_ON: Optional[List[str]] = None
|
|
||||||
DOCKER_BUILD_RUNS_ON: Optional[List[str]] = None
|
|
||||||
VALIDATE_FILE_PATHS: bool = True
|
|
||||||
|
|
||||||
######################################
|
|
||||||
# Runtime Settings #
|
|
||||||
######################################
|
|
||||||
MAX_RETRIES_S3 = 3
|
|
||||||
MAX_RETRIES_GH = 3
|
|
||||||
|
|
||||||
######################################
|
|
||||||
# S3 (artifact storage) settings #
|
|
||||||
######################################
|
|
||||||
S3_ARTIFACT_PATH: str = ""
|
|
||||||
|
|
||||||
######################################
|
|
||||||
# CI workspace settings #
|
|
||||||
######################################
|
|
||||||
TEMP_DIR: str = "/tmp/praktika"
|
|
||||||
OUTPUT_DIR: str = f"{TEMP_DIR}/output"
|
|
||||||
INPUT_DIR: str = f"{TEMP_DIR}/input"
|
|
||||||
PYTHON_INTERPRETER: str = "python3"
|
|
||||||
PYTHON_PACKET_MANAGER: str = "pip3"
|
|
||||||
PYTHON_VERSION: str = "3.9"
|
|
||||||
INSTALL_PYTHON_FOR_NATIVE_JOBS: bool = False
|
|
||||||
INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS: str = "./ci/requirements.txt"
|
|
||||||
ENVIRONMENT_VAR_FILE: str = f"{TEMP_DIR}/environment.json"
|
|
||||||
RUN_LOG: str = f"{TEMP_DIR}/praktika_run.log"
|
|
||||||
|
|
||||||
SECRET_GH_APP_ID: str = "GH_APP_ID"
|
|
||||||
SECRET_GH_APP_PEM_KEY: str = "GH_APP_PEM_KEY"
|
|
||||||
|
|
||||||
ENV_SETUP_SCRIPT: str = "/tmp/praktika_setup_env.sh"
|
|
||||||
WORKFLOW_STATUS_FILE: str = f"{TEMP_DIR}/workflow_status.json"
|
|
||||||
|
|
||||||
######################################
|
|
||||||
# CI Cache settings #
|
|
||||||
######################################
|
|
||||||
CACHE_VERSION: int = 1
|
|
||||||
CACHE_DIGEST_LEN: int = 20
|
|
||||||
CACHE_S3_PATH: str = ""
|
|
||||||
CACHE_LOCAL_PATH: str = f"{TEMP_DIR}/ci_cache"
|
|
||||||
|
|
||||||
######################################
|
|
||||||
# Report settings #
|
|
||||||
######################################
|
|
||||||
HTML_S3_PATH: str = ""
|
|
||||||
HTML_PAGE_FILE: str = "./praktika/json.html"
|
|
||||||
TEXT_CONTENT_EXTENSIONS: Iterable[str] = frozenset([".txt", ".log"])
|
|
||||||
S3_BUCKET_TO_HTTP_ENDPOINT: Optional[Dict[str, str]] = None
|
|
||||||
|
|
||||||
DOCKERHUB_USERNAME: str = ""
|
|
||||||
DOCKERHUB_SECRET: str = ""
|
|
||||||
DOCKER_WD: str = "/wd"
|
|
||||||
|
|
||||||
######################################
|
|
||||||
# CI DB Settings #
|
|
||||||
######################################
|
|
||||||
SECRET_CI_DB_URL: str = "CI_DB_URL"
|
|
||||||
SECRET_CI_DB_PASSWORD: str = "CI_DB_PASSWORD"
|
|
||||||
CI_DB_DB_NAME = ""
|
|
||||||
CI_DB_TABLE_NAME = ""
|
|
||||||
CI_DB_INSERT_TIMEOUT_SEC = 5
|
|
||||||
|
|
||||||
DISABLE_MERGE_COMMIT = True
|
|
||||||
|
|
||||||
|
|
||||||
_USER_DEFINED_SETTINGS = [
|
|
||||||
"S3_ARTIFACT_PATH",
|
|
||||||
"CACHE_S3_PATH",
|
|
||||||
"HTML_S3_PATH",
|
|
||||||
"S3_BUCKET_TO_HTTP_ENDPOINT",
|
|
||||||
"TEXT_CONTENT_EXTENSIONS",
|
|
||||||
"TEMP_DIR",
|
|
||||||
"OUTPUT_DIR",
|
|
||||||
"INPUT_DIR",
|
|
||||||
"CI_CONFIG_RUNS_ON",
|
|
||||||
"DOCKER_BUILD_RUNS_ON",
|
|
||||||
"CI_CONFIG_JOB_NAME",
|
|
||||||
"PYTHON_INTERPRETER",
|
|
||||||
"PYTHON_VERSION",
|
|
||||||
"PYTHON_PACKET_MANAGER",
|
|
||||||
"INSTALL_PYTHON_FOR_NATIVE_JOBS",
|
|
||||||
"INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS",
|
|
||||||
"MAX_RETRIES_S3",
|
|
||||||
"MAX_RETRIES_GH",
|
|
||||||
"VALIDATE_FILE_PATHS",
|
|
||||||
"DOCKERHUB_USERNAME",
|
|
||||||
"DOCKERHUB_SECRET",
|
|
||||||
"READY_FOR_MERGE_STATUS_NAME",
|
|
||||||
"SECRET_CI_DB_URL",
|
|
||||||
"SECRET_CI_DB_PASSWORD",
|
|
||||||
"CI_DB_DB_NAME",
|
|
||||||
"CI_DB_TABLE_NAME",
|
|
||||||
"CI_DB_INSERT_TIMEOUT_SEC",
|
|
||||||
"SECRET_GH_APP_PEM_KEY",
|
|
||||||
"SECRET_GH_APP_ID",
|
|
||||||
"MAIN_BRANCH",
|
|
||||||
"DISABLE_MERGE_COMMIT",
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def _get_settings() -> _Settings:
|
|
||||||
res = _Settings()
|
|
||||||
|
|
||||||
directory = Path(_Settings.SETTINGS_DIRECTORY)
|
|
||||||
for py_file in directory.glob("*.py"):
|
|
||||||
module_name = py_file.name.removeprefix(".py")
|
|
||||||
spec = importlib.util.spec_from_file_location(
|
|
||||||
module_name, f"{_Settings.SETTINGS_DIRECTORY}/{module_name}"
|
|
||||||
)
|
|
||||||
assert spec
|
|
||||||
foo = importlib.util.module_from_spec(spec)
|
|
||||||
assert spec.loader
|
|
||||||
spec.loader.exec_module(foo)
|
|
||||||
for setting in _USER_DEFINED_SETTINGS:
|
|
||||||
try:
|
|
||||||
value = getattr(foo, setting)
|
|
||||||
res.__setattr__(setting, value)
|
|
||||||
# print(f"- read user defined setting [{setting} = {value}]")
|
|
||||||
except Exception as e:
|
|
||||||
# print(f"Exception while read user settings: {e}")
|
|
||||||
pass
|
|
||||||
|
|
||||||
return res
|
|
||||||
|
|
||||||
|
|
||||||
class GHRunners:
|
|
||||||
ubuntu = "ubuntu-latest"
|
|
||||||
|
|
||||||
|
|
||||||
Settings = _get_settings()
|
|
||||||
|
@ -17,6 +17,8 @@ from threading import Thread
|
|||||||
from types import SimpleNamespace
|
from types import SimpleNamespace
|
||||||
from typing import Any, Dict, Iterator, List, Optional, Type, TypeVar, Union
|
from typing import Any, Dict, Iterator, List, Optional, Type, TypeVar, Union
|
||||||
|
|
||||||
|
from praktika._settings import _Settings
|
||||||
|
|
||||||
T = TypeVar("T", bound="Serializable")
|
T = TypeVar("T", bound="Serializable")
|
||||||
|
|
||||||
|
|
||||||
@ -79,25 +81,24 @@ class MetaClasses:
|
|||||||
class ContextManager:
|
class ContextManager:
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def cd(to: Optional[Union[Path, str]]) -> Iterator[None]:
|
def cd(to: Optional[Union[Path, str]] = None) -> Iterator[None]:
|
||||||
"""
|
"""
|
||||||
changes current working directory to @path or `git root` if @path is None
|
changes current working directory to @path or `git root` if @path is None
|
||||||
:param to:
|
:param to:
|
||||||
:return:
|
:return:
|
||||||
"""
|
"""
|
||||||
# if not to:
|
if not to:
|
||||||
# try:
|
try:
|
||||||
# to = Shell.get_output_or_raise("git rev-parse --show-toplevel")
|
to = Shell.get_output_or_raise("git rev-parse --show-toplevel")
|
||||||
# except:
|
except:
|
||||||
# pass
|
pass
|
||||||
# if not to:
|
if not to:
|
||||||
# if Path(_Settings.DOCKER_WD).is_dir():
|
if Path(_Settings.DOCKER_WD).is_dir():
|
||||||
# to = _Settings.DOCKER_WD
|
to = _Settings.DOCKER_WD
|
||||||
# if not to:
|
if not to:
|
||||||
# assert False, "FIX IT"
|
assert False, "FIX IT"
|
||||||
# assert to
|
assert to
|
||||||
old_pwd = os.getcwd()
|
old_pwd = os.getcwd()
|
||||||
if to:
|
|
||||||
os.chdir(to)
|
os.chdir(to)
|
||||||
try:
|
try:
|
||||||
yield
|
yield
|
||||||
|
@ -4,8 +4,10 @@ from itertools import chain
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
from praktika import Workflow
|
from praktika import Workflow
|
||||||
|
from praktika._settings import GHRunners
|
||||||
from praktika.mangle import _get_workflows
|
from praktika.mangle import _get_workflows
|
||||||
from praktika.settings import GHRunners, Settings
|
from praktika.settings import Settings
|
||||||
|
from praktika.utils import ContextManager
|
||||||
|
|
||||||
|
|
||||||
class Validator:
|
class Validator:
|
||||||
@ -117,6 +119,7 @@ class Validator:
|
|||||||
def validate_file_paths_in_run_command(cls, workflow: Workflow.Config) -> None:
|
def validate_file_paths_in_run_command(cls, workflow: Workflow.Config) -> None:
|
||||||
if not Settings.VALIDATE_FILE_PATHS:
|
if not Settings.VALIDATE_FILE_PATHS:
|
||||||
return
|
return
|
||||||
|
with ContextManager.cd():
|
||||||
for job in workflow.jobs:
|
for job in workflow.jobs:
|
||||||
run_command = job.command
|
run_command = job.command
|
||||||
command_parts = run_command.split(" ")
|
command_parts = run_command.split(" ")
|
||||||
@ -132,6 +135,7 @@ class Validator:
|
|||||||
def validate_file_paths_in_digest_configs(cls, workflow: Workflow.Config) -> None:
|
def validate_file_paths_in_digest_configs(cls, workflow: Workflow.Config) -> None:
|
||||||
if not Settings.VALIDATE_FILE_PATHS:
|
if not Settings.VALIDATE_FILE_PATHS:
|
||||||
return
|
return
|
||||||
|
with ContextManager.cd():
|
||||||
for job in workflow.jobs:
|
for job in workflow.jobs:
|
||||||
if not job.digest_config:
|
if not job.digest_config:
|
||||||
continue
|
continue
|
||||||
@ -149,6 +153,7 @@ class Validator:
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def validate_requirements_txt_files(cls, workflow: Workflow.Config) -> None:
|
def validate_requirements_txt_files(cls, workflow: Workflow.Config) -> None:
|
||||||
|
with ContextManager.cd():
|
||||||
for job in workflow.jobs:
|
for job in workflow.jobs:
|
||||||
if job.job_requirements:
|
if job.job_requirements:
|
||||||
if job.job_requirements.python_requirements_txt:
|
if job.job_requirements.python_requirements_txt:
|
||||||
@ -166,7 +171,9 @@ class Validator:
|
|||||||
"\n echo requests==2.32.3 >> ./ci/requirements.txt"
|
"\n echo requests==2.32.3 >> ./ci/requirements.txt"
|
||||||
)
|
)
|
||||||
message += "\n echo https://clickhouse-builds.s3.amazonaws.com/packages/praktika-0.1-py3-none-any.whl >> ./ci/requirements.txt"
|
message += "\n echo https://clickhouse-builds.s3.amazonaws.com/packages/praktika-0.1-py3-none-any.whl >> ./ci/requirements.txt"
|
||||||
cls.evaluate_check(path.is_file(), message, job.name, workflow.name)
|
cls.evaluate_check(
|
||||||
|
path.is_file(), message, job.name, workflow.name
|
||||||
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def validate_dockers(cls, workflow: Workflow.Config):
|
def validate_dockers(cls, workflow: Workflow.Config):
|
||||||
|
@ -31,7 +31,6 @@ class Workflow:
|
|||||||
enable_report: bool = False
|
enable_report: bool = False
|
||||||
enable_merge_ready_status: bool = False
|
enable_merge_ready_status: bool = False
|
||||||
enable_cidb: bool = False
|
enable_cidb: bool = False
|
||||||
enable_merge_commit: bool = False
|
|
||||||
|
|
||||||
def is_event_pull_request(self):
|
def is_event_pull_request(self):
|
||||||
return self.event == Workflow.Event.PULL_REQUEST
|
return self.event == Workflow.Event.PULL_REQUEST
|
||||||
|
@ -80,8 +80,6 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
ref: ${{{{ github.head_ref }}}}
|
|
||||||
{JOB_ADDONS}
|
{JOB_ADDONS}
|
||||||
- name: Prepare env script
|
- name: Prepare env script
|
||||||
run: |
|
run: |
|
||||||
@ -104,11 +102,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
. /tmp/praktika_setup_env.sh
|
. /tmp/praktika_setup_env.sh
|
||||||
set -o pipefail
|
set -o pipefail
|
||||||
if command -v ts &> /dev/null; then
|
{PYTHON} -m praktika run --job '''{JOB_NAME}''' --workflow "{WORKFLOW_NAME}" --ci |& tee {RUN_LOG}
|
||||||
python3 -m praktika run --job '''{JOB_NAME}''' --workflow "{WORKFLOW_NAME}" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log
|
|
||||||
else
|
|
||||||
python3 -m praktika run --job '''{JOB_NAME}''' --workflow "{WORKFLOW_NAME}" --ci |& tee /tmp/praktika/praktika_run.log
|
|
||||||
fi
|
|
||||||
{UPLOADS_GITHUB}\
|
{UPLOADS_GITHUB}\
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@ -190,9 +184,11 @@ jobs:
|
|||||||
False
|
False
|
||||||
), f"Workflow event not yet supported [{workflow_config.event}]"
|
), f"Workflow event not yet supported [{workflow_config.event}]"
|
||||||
|
|
||||||
|
with ContextManager.cd():
|
||||||
with open(self._get_workflow_file_name(workflow_config.name), "w") as f:
|
with open(self._get_workflow_file_name(workflow_config.name), "w") as f:
|
||||||
f.write(yaml_workflow_str)
|
f.write(yaml_workflow_str)
|
||||||
|
|
||||||
|
with ContextManager.cd():
|
||||||
Shell.check("git add ./.github/workflows/*.yaml")
|
Shell.check("git add ./.github/workflows/*.yaml")
|
||||||
|
|
||||||
|
|
||||||
|
@ -7,33 +7,24 @@ S3_BUCKET_HTTP_ENDPOINT = "clickhouse-builds.s3.amazonaws.com"
|
|||||||
class RunnerLabels:
|
class RunnerLabels:
|
||||||
CI_SERVICES = "ci_services"
|
CI_SERVICES = "ci_services"
|
||||||
CI_SERVICES_EBS = "ci_services_ebs"
|
CI_SERVICES_EBS = "ci_services_ebs"
|
||||||
BUILDER_AMD = "builder"
|
BUILDER = "builder"
|
||||||
BUILDER_ARM = "builder-aarch64"
|
|
||||||
FUNC_TESTER_AMD = "func-tester"
|
|
||||||
FUNC_TESTER_ARM = "func-tester-aarch64"
|
|
||||||
|
|
||||||
|
|
||||||
BASE_BRANCH = "master"
|
BASE_BRANCH = "master"
|
||||||
|
|
||||||
azure_secret = Secret.Config(
|
|
||||||
name="azure_connection_string",
|
|
||||||
type=Secret.Type.AWS_SSM_VAR,
|
|
||||||
)
|
|
||||||
|
|
||||||
SECRETS = [
|
SECRETS = [
|
||||||
Secret.Config(
|
Secret.Config(
|
||||||
name="dockerhub_robot_password",
|
name="dockerhub_robot_password",
|
||||||
type=Secret.Type.AWS_SSM_VAR,
|
type=Secret.Type.AWS_SSM_VAR,
|
||||||
),
|
),
|
||||||
azure_secret,
|
Secret.Config(
|
||||||
# Secret.Config(
|
name="woolenwolf_gh_app.clickhouse-app-id",
|
||||||
# name="woolenwolf_gh_app.clickhouse-app-id",
|
type=Secret.Type.AWS_SSM_SECRET,
|
||||||
# type=Secret.Type.AWS_SSM_SECRET,
|
),
|
||||||
# ),
|
Secret.Config(
|
||||||
# Secret.Config(
|
name="woolenwolf_gh_app.clickhouse-app-key",
|
||||||
# name="woolenwolf_gh_app.clickhouse-app-key",
|
type=Secret.Type.AWS_SSM_SECRET,
|
||||||
# type=Secret.Type.AWS_SSM_SECRET,
|
),
|
||||||
# ),
|
|
||||||
]
|
]
|
||||||
|
|
||||||
DOCKERS = [
|
DOCKERS = [
|
||||||
@ -127,18 +118,18 @@ DOCKERS = [
|
|||||||
# platforms=Docker.Platforms.arm_amd,
|
# platforms=Docker.Platforms.arm_amd,
|
||||||
# depends_on=["clickhouse/test-base"],
|
# depends_on=["clickhouse/test-base"],
|
||||||
# ),
|
# ),
|
||||||
Docker.Config(
|
# Docker.Config(
|
||||||
name="clickhouse/stateless-test",
|
# name="clickhouse/stateless-test",
|
||||||
path="./ci/docker/stateless-test",
|
# path="./ci/docker/test/stateless",
|
||||||
platforms=Docker.Platforms.arm_amd,
|
# platforms=Docker.Platforms.arm_amd,
|
||||||
depends_on=[],
|
# depends_on=["clickhouse/test-base"],
|
||||||
),
|
# ),
|
||||||
Docker.Config(
|
# Docker.Config(
|
||||||
name="clickhouse/stateful-test",
|
# name="clickhouse/stateful-test",
|
||||||
path="./ci/docker/stateful-test",
|
# path="./ci/docker/test/stateful",
|
||||||
platforms=Docker.Platforms.arm_amd,
|
# platforms=Docker.Platforms.arm_amd,
|
||||||
depends_on=["clickhouse/stateless-test"],
|
# depends_on=["clickhouse/stateless-test"],
|
||||||
),
|
# ),
|
||||||
# Docker.Config(
|
# Docker.Config(
|
||||||
# name="clickhouse/stress-test",
|
# name="clickhouse/stress-test",
|
||||||
# path="./ci/docker/test/stress",
|
# path="./ci/docker/test/stress",
|
||||||
@ -239,6 +230,4 @@ DOCKERS = [
|
|||||||
class JobNames:
|
class JobNames:
|
||||||
STYLE_CHECK = "Style Check"
|
STYLE_CHECK = "Style Check"
|
||||||
FAST_TEST = "Fast test"
|
FAST_TEST = "Fast test"
|
||||||
BUILD = "Build"
|
BUILD_AMD_DEBUG = "Build amd64 debug"
|
||||||
STATELESS = "Stateless tests"
|
|
||||||
STATEFUL = "Stateful tests"
|
|
||||||
|
@ -4,8 +4,6 @@ from ci.settings.definitions import (
|
|||||||
RunnerLabels,
|
RunnerLabels,
|
||||||
)
|
)
|
||||||
|
|
||||||
MAIN_BRANCH = "master"
|
|
||||||
|
|
||||||
S3_ARTIFACT_PATH = f"{S3_BUCKET_NAME}/artifacts"
|
S3_ARTIFACT_PATH = f"{S3_BUCKET_NAME}/artifacts"
|
||||||
CI_CONFIG_RUNS_ON = [RunnerLabels.CI_SERVICES]
|
CI_CONFIG_RUNS_ON = [RunnerLabels.CI_SERVICES]
|
||||||
DOCKER_BUILD_RUNS_ON = [RunnerLabels.CI_SERVICES_EBS]
|
DOCKER_BUILD_RUNS_ON = [RunnerLabels.CI_SERVICES_EBS]
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
from typing import List
|
||||||
|
|
||||||
from praktika import Artifact, Job, Workflow
|
from praktika import Artifact, Job, Workflow
|
||||||
from praktika.settings import Settings
|
from praktika.settings import Settings
|
||||||
|
|
||||||
@ -11,10 +13,7 @@ from ci.settings.definitions import (
|
|||||||
|
|
||||||
|
|
||||||
class ArtifactNames:
|
class ArtifactNames:
|
||||||
CH_AMD_DEBUG = "CH_AMD_DEBUG"
|
ch_debug_binary = "clickhouse_debug_binary"
|
||||||
CH_AMD_RELEASE = "CH_AMD_RELEASE"
|
|
||||||
CH_ARM_RELEASE = "CH_ARM_RELEASE"
|
|
||||||
CH_ARM_ASAN = "CH_ARM_ASAN"
|
|
||||||
|
|
||||||
|
|
||||||
style_check_job = Job.Config(
|
style_check_job = Job.Config(
|
||||||
@ -26,7 +25,7 @@ style_check_job = Job.Config(
|
|||||||
|
|
||||||
fast_test_job = Job.Config(
|
fast_test_job = Job.Config(
|
||||||
name=JobNames.FAST_TEST,
|
name=JobNames.FAST_TEST,
|
||||||
runs_on=[RunnerLabels.BUILDER_AMD],
|
runs_on=[RunnerLabels.BUILDER],
|
||||||
command="python3 ./ci/jobs/fast_test.py",
|
command="python3 ./ci/jobs/fast_test.py",
|
||||||
run_in_docker="clickhouse/fasttest",
|
run_in_docker="clickhouse/fasttest",
|
||||||
digest_config=Job.CacheDigestConfig(
|
digest_config=Job.CacheDigestConfig(
|
||||||
@ -38,13 +37,11 @@ fast_test_job = Job.Config(
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
build_jobs = Job.Config(
|
job_build_amd_debug = Job.Config(
|
||||||
name=JobNames.BUILD,
|
name=JobNames.BUILD_AMD_DEBUG,
|
||||||
runs_on=["...from params..."],
|
runs_on=[RunnerLabels.BUILDER],
|
||||||
requires=[JobNames.FAST_TEST],
|
command="python3 ./ci/jobs/build_clickhouse.py amd_debug",
|
||||||
command="python3 ./ci/jobs/build_clickhouse.py --build-type {PARAMETER}",
|
|
||||||
run_in_docker="clickhouse/fasttest",
|
run_in_docker="clickhouse/fasttest",
|
||||||
timeout=3600 * 2,
|
|
||||||
digest_config=Job.CacheDigestConfig(
|
digest_config=Job.CacheDigestConfig(
|
||||||
include_paths=[
|
include_paths=[
|
||||||
"./src",
|
"./src",
|
||||||
@ -57,85 +54,9 @@ build_jobs = Job.Config(
|
|||||||
"./docker/packager/packager",
|
"./docker/packager/packager",
|
||||||
"./rust",
|
"./rust",
|
||||||
"./tests/ci/version_helper.py",
|
"./tests/ci/version_helper.py",
|
||||||
"./ci/jobs/build_clickhouse.py",
|
|
||||||
],
|
],
|
||||||
),
|
),
|
||||||
).parametrize(
|
provides=[ArtifactNames.ch_debug_binary],
|
||||||
parameter=["amd_debug", "amd_release", "arm_release", "arm_asan"],
|
|
||||||
provides=[
|
|
||||||
[ArtifactNames.CH_AMD_DEBUG],
|
|
||||||
[ArtifactNames.CH_AMD_RELEASE],
|
|
||||||
[ArtifactNames.CH_ARM_RELEASE],
|
|
||||||
[ArtifactNames.CH_ARM_ASAN],
|
|
||||||
],
|
|
||||||
runs_on=[
|
|
||||||
[RunnerLabels.BUILDER_AMD],
|
|
||||||
[RunnerLabels.BUILDER_AMD],
|
|
||||||
[RunnerLabels.BUILDER_ARM],
|
|
||||||
[RunnerLabels.BUILDER_ARM],
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
stateless_tests_jobs = Job.Config(
|
|
||||||
name=JobNames.STATELESS,
|
|
||||||
runs_on=[RunnerLabels.BUILDER_AMD],
|
|
||||||
command="python3 ./ci/jobs/functional_stateless_tests.py --test-options {PARAMETER}",
|
|
||||||
# many tests expect to see "/var/lib/clickhouse" in various output lines - add mount for now, consider creating this dir in docker file
|
|
||||||
run_in_docker="clickhouse/stateless-test+--security-opt seccomp=unconfined",
|
|
||||||
digest_config=Job.CacheDigestConfig(
|
|
||||||
include_paths=[
|
|
||||||
"./ci/jobs/functional_stateless_tests.py",
|
|
||||||
],
|
|
||||||
),
|
|
||||||
).parametrize(
|
|
||||||
parameter=[
|
|
||||||
"amd_debug,parallel",
|
|
||||||
"amd_debug,non-parallel",
|
|
||||||
"amd_release,parallel",
|
|
||||||
"amd_release,non-parallel",
|
|
||||||
"arm_asan,parallel",
|
|
||||||
"arm_asan,non-parallel",
|
|
||||||
],
|
|
||||||
runs_on=[
|
|
||||||
[RunnerLabels.BUILDER_AMD],
|
|
||||||
[RunnerLabels.FUNC_TESTER_AMD],
|
|
||||||
[RunnerLabels.BUILDER_AMD],
|
|
||||||
[RunnerLabels.FUNC_TESTER_AMD],
|
|
||||||
[RunnerLabels.BUILDER_ARM],
|
|
||||||
[RunnerLabels.FUNC_TESTER_ARM],
|
|
||||||
],
|
|
||||||
requires=[
|
|
||||||
[ArtifactNames.CH_AMD_DEBUG],
|
|
||||||
[ArtifactNames.CH_AMD_DEBUG],
|
|
||||||
[ArtifactNames.CH_AMD_RELEASE],
|
|
||||||
[ArtifactNames.CH_AMD_RELEASE],
|
|
||||||
[ArtifactNames.CH_ARM_ASAN],
|
|
||||||
[ArtifactNames.CH_ARM_ASAN],
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
stateful_tests_jobs = Job.Config(
|
|
||||||
name=JobNames.STATEFUL,
|
|
||||||
runs_on=[RunnerLabels.BUILDER_AMD],
|
|
||||||
command="python3 ./ci/jobs/functional_stateful_tests.py --test-options {PARAMETER}",
|
|
||||||
# many tests expect to see "/var/lib/clickhouse"
|
|
||||||
# some tests expect to see "/var/log/clickhouse"
|
|
||||||
run_in_docker="clickhouse/stateless-test+--security-opt seccomp=unconfined",
|
|
||||||
digest_config=Job.CacheDigestConfig(
|
|
||||||
include_paths=[
|
|
||||||
"./ci/jobs/functional_stateful_tests.py",
|
|
||||||
],
|
|
||||||
),
|
|
||||||
).parametrize(
|
|
||||||
parameter=[
|
|
||||||
"amd_debug,parallel",
|
|
||||||
],
|
|
||||||
runs_on=[
|
|
||||||
[RunnerLabels.BUILDER_AMD],
|
|
||||||
],
|
|
||||||
requires=[
|
|
||||||
[ArtifactNames.CH_AMD_DEBUG],
|
|
||||||
],
|
|
||||||
)
|
)
|
||||||
|
|
||||||
workflow = Workflow.Config(
|
workflow = Workflow.Config(
|
||||||
@ -145,31 +66,14 @@ workflow = Workflow.Config(
|
|||||||
jobs=[
|
jobs=[
|
||||||
style_check_job,
|
style_check_job,
|
||||||
fast_test_job,
|
fast_test_job,
|
||||||
*build_jobs,
|
job_build_amd_debug,
|
||||||
*stateless_tests_jobs,
|
|
||||||
*stateful_tests_jobs,
|
|
||||||
],
|
],
|
||||||
artifacts=[
|
artifacts=[
|
||||||
Artifact.Config(
|
Artifact.Config(
|
||||||
name=ArtifactNames.CH_AMD_DEBUG,
|
name=ArtifactNames.ch_debug_binary,
|
||||||
type=Artifact.Type.S3,
|
type=Artifact.Type.S3,
|
||||||
path=f"{Settings.TEMP_DIR}/build/programs/clickhouse",
|
path=f"{Settings.TEMP_DIR}/build/programs/clickhouse",
|
||||||
),
|
)
|
||||||
Artifact.Config(
|
|
||||||
name=ArtifactNames.CH_AMD_RELEASE,
|
|
||||||
type=Artifact.Type.S3,
|
|
||||||
path=f"{Settings.TEMP_DIR}/build/programs/clickhouse",
|
|
||||||
),
|
|
||||||
Artifact.Config(
|
|
||||||
name=ArtifactNames.CH_ARM_RELEASE,
|
|
||||||
type=Artifact.Type.S3,
|
|
||||||
path=f"{Settings.TEMP_DIR}/build/programs/clickhouse",
|
|
||||||
),
|
|
||||||
Artifact.Config(
|
|
||||||
name=ArtifactNames.CH_ARM_ASAN,
|
|
||||||
type=Artifact.Type.S3,
|
|
||||||
path=f"{Settings.TEMP_DIR}/build/programs/clickhouse",
|
|
||||||
),
|
|
||||||
],
|
],
|
||||||
dockers=DOCKERS,
|
dockers=DOCKERS,
|
||||||
secrets=SECRETS,
|
secrets=SECRETS,
|
||||||
@ -180,14 +84,11 @@ workflow = Workflow.Config(
|
|||||||
|
|
||||||
WORKFLOWS = [
|
WORKFLOWS = [
|
||||||
workflow,
|
workflow,
|
||||||
]
|
] # type: List[Workflow.Config]
|
||||||
|
|
||||||
|
|
||||||
# if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
# # local job test inside praktika environment
|
# local job test inside praktika environment
|
||||||
# from praktika.runner import Runner
|
from praktika.runner import Runner
|
||||||
# from praktika.digest import Digest
|
|
||||||
#
|
Runner().run(workflow, fast_test_job, docker="fasttest", dummy_env=True)
|
||||||
# print(Digest().calc_job_digest(amd_debug_build_job))
|
|
||||||
#
|
|
||||||
# Runner().run(workflow, fast_test_job, docker="fasttest", local_run=True)
|
|
||||||
|
@ -74,6 +74,7 @@ elseif (ARCH_AARCH64)
|
|||||||
# introduced as optional, either in v8.2 [7] or in v8.4 [8].
|
# introduced as optional, either in v8.2 [7] or in v8.4 [8].
|
||||||
# rcpc: Load-Acquire RCpc Register. Better support of release/acquire of atomics. Good for allocators and high contention code.
|
# rcpc: Load-Acquire RCpc Register. Better support of release/acquire of atomics. Good for allocators and high contention code.
|
||||||
# Optional in v8.2, mandatory in v8.3 [9]. Supported in Graviton >=2, Azure and GCP instances.
|
# Optional in v8.2, mandatory in v8.3 [9]. Supported in Graviton >=2, Azure and GCP instances.
|
||||||
|
# bf16: Bfloat16, a half-precision floating point format developed by Google Brain. Optional in v8.2, mandatory in v8.6.
|
||||||
#
|
#
|
||||||
# [1] https://github.com/aws/aws-graviton-getting-started/blob/main/c-c%2B%2B.md
|
# [1] https://github.com/aws/aws-graviton-getting-started/blob/main/c-c%2B%2B.md
|
||||||
# [2] https://community.arm.com/arm-community-blogs/b/tools-software-ides-blog/posts/making-the-most-of-the-arm-architecture-in-gcc-10
|
# [2] https://community.arm.com/arm-community-blogs/b/tools-software-ides-blog/posts/making-the-most-of-the-arm-architecture-in-gcc-10
|
||||||
|
@ -4489,9 +4489,9 @@ Using replacement fields, you can define a pattern for the resulting string.
|
|||||||
| k | clockhour of day (1~24) | number | 24 |
|
| k | clockhour of day (1~24) | number | 24 |
|
||||||
| m | minute of hour | number | 30 |
|
| m | minute of hour | number | 30 |
|
||||||
| s | second of minute | number | 55 |
|
| s | second of minute | number | 55 |
|
||||||
| S | fraction of second (not supported yet) | number | 978 |
|
| S | fraction of second | number | 978 |
|
||||||
| z | time zone (short name not supported yet) | text | Pacific Standard Time; PST |
|
| z | time zone | text | Eastern Standard Time; EST |
|
||||||
| Z | time zone offset/id (not supported yet) | zone | -0800; -08:00; America/Los_Angeles |
|
| Z | time zone offset | zone | -0800; -0812 |
|
||||||
| ' | escape for text | delimiter | |
|
| ' | escape for text | delimiter | |
|
||||||
| '' | single quote | literal | ' |
|
| '' | single quote | literal | ' |
|
||||||
|
|
||||||
|
@ -6867,9 +6867,53 @@ Same as for [parseDateTimeInJodaSyntax](#parsedatetimeinjodasyntax) except that
|
|||||||
|
|
||||||
Same as for [parseDateTimeInJodaSyntax](#parsedatetimeinjodasyntax) except that it returns `NULL` when it encounters a date format that cannot be processed.
|
Same as for [parseDateTimeInJodaSyntax](#parsedatetimeinjodasyntax) except that it returns `NULL` when it encounters a date format that cannot be processed.
|
||||||
|
|
||||||
|
## parseDateTime64
|
||||||
|
|
||||||
|
Converts a [String](../data-types/string.md) to [DateTime64](../data-types/datetime64.md) according to a [MySQL format string](https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_date-format).
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
parseDateTime64(str[, format[, timezone]])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `str` — The String to be parsed.
|
||||||
|
- `format` — The format string. Optional. `%Y-%m-%d %H:%i:%s.%f` if not specified.
|
||||||
|
- `timezone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md#timezone). Optional.
|
||||||
|
|
||||||
|
**Returned value(s)**
|
||||||
|
|
||||||
|
Returns [DateTime64](../data-types/datetime64.md) type values parsed from input string according to a MySQL style format string.
|
||||||
|
|
||||||
|
## parseDateTime64OrZero
|
||||||
|
|
||||||
|
Same as for [parseDateTime64](#parsedatetime64) except that it returns zero date when it encounters a date format that cannot be processed.
|
||||||
|
|
||||||
|
## parseDateTime64OrNull
|
||||||
|
|
||||||
|
Same as for [parseDateTime64](#parsedatetime64) except that it returns `NULL` when it encounters a date format that cannot be processed.
|
||||||
|
|
||||||
## parseDateTime64InJodaSyntax
|
## parseDateTime64InJodaSyntax
|
||||||
|
|
||||||
Similar to [parseDateTimeInJodaSyntax](#parsedatetimeinjodasyntax). Differently, it returns a value of type [DateTime64](../data-types/datetime64.md).
|
Converts a [String](../data-types/string.md) to [DateTime64](../data-types/datetime64.md) according to a [Joda format string](https://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html).
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
parseDateTime64InJodaSyntax(str[, format[, timezone]])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `str` — The String to be parsed.
|
||||||
|
- `format` — The format string. Optional. `yyyy-MM-dd HH:mm:ss` if not specified.
|
||||||
|
- `timezone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md#timezone). Optional.
|
||||||
|
|
||||||
|
**Returned value(s)**
|
||||||
|
|
||||||
|
Returns [DateTime64](../data-types/datetime64.md) type values parsed from input string according to a joda style format string.
|
||||||
|
|
||||||
## parseDateTime64InJodaSyntaxOrZero
|
## parseDateTime64InJodaSyntaxOrZero
|
||||||
|
|
||||||
|
@ -161,6 +161,8 @@ Settings:
|
|||||||
- `actions` — Prints detailed information about step actions. Default: 0.
|
- `actions` — Prints detailed information about step actions. Default: 0.
|
||||||
- `json` — Prints query plan steps as a row in [JSON](../../interfaces/formats.md#json) format. Default: 0. It is recommended to use [TSVRaw](../../interfaces/formats.md#tabseparatedraw) format to avoid unnecessary escaping.
|
- `json` — Prints query plan steps as a row in [JSON](../../interfaces/formats.md#json) format. Default: 0. It is recommended to use [TSVRaw](../../interfaces/formats.md#tabseparatedraw) format to avoid unnecessary escaping.
|
||||||
|
|
||||||
|
When `json=1` step names will contain an additional suffix with unique step identifier.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
@ -194,30 +196,25 @@ EXPLAIN json = 1, description = 0 SELECT 1 UNION ALL SELECT 2 FORMAT TSVRaw;
|
|||||||
{
|
{
|
||||||
"Plan": {
|
"Plan": {
|
||||||
"Node Type": "Union",
|
"Node Type": "Union",
|
||||||
|
"Node Id": "Union_10",
|
||||||
"Plans": [
|
"Plans": [
|
||||||
{
|
{
|
||||||
"Node Type": "Expression",
|
"Node Type": "Expression",
|
||||||
|
"Node Id": "Expression_13",
|
||||||
"Plans": [
|
"Plans": [
|
||||||
{
|
{
|
||||||
"Node Type": "SettingQuotaAndLimits",
|
"Node Type": "ReadFromStorage",
|
||||||
"Plans": [
|
"Node Id": "ReadFromStorage_0"
|
||||||
{
|
|
||||||
"Node Type": "ReadFromStorage"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"Node Type": "Expression",
|
"Node Type": "Expression",
|
||||||
|
"Node Id": "Expression_16",
|
||||||
"Plans": [
|
"Plans": [
|
||||||
{
|
{
|
||||||
"Node Type": "SettingQuotaAndLimits",
|
"Node Type": "ReadFromStorage",
|
||||||
"Plans": [
|
"Node Id": "ReadFromStorage_4"
|
||||||
{
|
|
||||||
"Node Type": "ReadFromStorage"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@ -249,6 +246,7 @@ EXPLAIN json = 1, description = 0, header = 1 SELECT 1, 2 + dummy;
|
|||||||
{
|
{
|
||||||
"Plan": {
|
"Plan": {
|
||||||
"Node Type": "Expression",
|
"Node Type": "Expression",
|
||||||
|
"Node Id": "Expression_5",
|
||||||
"Header": [
|
"Header": [
|
||||||
{
|
{
|
||||||
"Name": "1",
|
"Name": "1",
|
||||||
@ -259,18 +257,10 @@ EXPLAIN json = 1, description = 0, header = 1 SELECT 1, 2 + dummy;
|
|||||||
"Type": "UInt16"
|
"Type": "UInt16"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"Plans": [
|
|
||||||
{
|
|
||||||
"Node Type": "SettingQuotaAndLimits",
|
|
||||||
"Header": [
|
|
||||||
{
|
|
||||||
"Name": "dummy",
|
|
||||||
"Type": "UInt8"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"Plans": [
|
"Plans": [
|
||||||
{
|
{
|
||||||
"Node Type": "ReadFromStorage",
|
"Node Type": "ReadFromStorage",
|
||||||
|
"Node Id": "ReadFromStorage_0",
|
||||||
"Header": [
|
"Header": [
|
||||||
{
|
{
|
||||||
"Name": "dummy",
|
"Name": "dummy",
|
||||||
@ -280,8 +270,6 @@ EXPLAIN json = 1, description = 0, header = 1 SELECT 1, 2 + dummy;
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
```
|
```
|
||||||
@ -351,17 +339,31 @@ EXPLAIN json = 1, actions = 1, description = 0 SELECT 1 FORMAT TSVRaw;
|
|||||||
{
|
{
|
||||||
"Plan": {
|
"Plan": {
|
||||||
"Node Type": "Expression",
|
"Node Type": "Expression",
|
||||||
|
"Node Id": "Expression_5",
|
||||||
"Expression": {
|
"Expression": {
|
||||||
"Inputs": [],
|
"Inputs": [
|
||||||
|
{
|
||||||
|
"Name": "dummy",
|
||||||
|
"Type": "UInt8"
|
||||||
|
}
|
||||||
|
],
|
||||||
"Actions": [
|
"Actions": [
|
||||||
{
|
{
|
||||||
"Node Type": "Column",
|
"Node Type": "INPUT",
|
||||||
"Result Type": "UInt8",
|
"Result Type": "UInt8",
|
||||||
"Result Type": "Column",
|
"Result Name": "dummy",
|
||||||
|
"Arguments": [0],
|
||||||
|
"Removed Arguments": [0],
|
||||||
|
"Result": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Node Type": "COLUMN",
|
||||||
|
"Result Type": "UInt8",
|
||||||
|
"Result Name": "1",
|
||||||
"Column": "Const(UInt8)",
|
"Column": "Const(UInt8)",
|
||||||
"Arguments": [],
|
"Arguments": [],
|
||||||
"Removed Arguments": [],
|
"Removed Arguments": [],
|
||||||
"Result": 0
|
"Result": 1
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"Outputs": [
|
"Outputs": [
|
||||||
@ -370,17 +372,12 @@ EXPLAIN json = 1, actions = 1, description = 0 SELECT 1 FORMAT TSVRaw;
|
|||||||
"Type": "UInt8"
|
"Type": "UInt8"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"Positions": [0],
|
"Positions": [1]
|
||||||
"Project Input": true
|
|
||||||
},
|
},
|
||||||
"Plans": [
|
"Plans": [
|
||||||
{
|
{
|
||||||
"Node Type": "SettingQuotaAndLimits",
|
"Node Type": "ReadFromStorage",
|
||||||
"Plans": [
|
"Node Id": "ReadFromStorage_0"
|
||||||
{
|
|
||||||
"Node Type": "ReadFromStorage"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@ -396,6 +393,8 @@ Settings:
|
|||||||
- `graph` — Prints a graph described in the [DOT](https://en.wikipedia.org/wiki/DOT_(graph_description_language)) graph description language. Default: 0.
|
- `graph` — Prints a graph described in the [DOT](https://en.wikipedia.org/wiki/DOT_(graph_description_language)) graph description language. Default: 0.
|
||||||
- `compact` — Prints graph in compact mode if `graph` setting is enabled. Default: 1.
|
- `compact` — Prints graph in compact mode if `graph` setting is enabled. Default: 1.
|
||||||
|
|
||||||
|
When `compact=0` and `graph=1` processor names will contain an additional suffix with unique processor identifier.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
@ -136,7 +136,7 @@ ClickHouse применяет настройку в тех случаях, ко
|
|||||||
- 0 — выключена.
|
- 0 — выключена.
|
||||||
- 1 — включена.
|
- 1 — включена.
|
||||||
|
|
||||||
Значение по умолчанию: 0.
|
Значение по умолчанию: 1.
|
||||||
|
|
||||||
## http_zlib_compression_level {#settings-http_zlib_compression_level}
|
## http_zlib_compression_level {#settings-http_zlib_compression_level}
|
||||||
|
|
||||||
|
@ -97,7 +97,7 @@ ClickHouse从表的过时副本中选择最相关的副本。
|
|||||||
- 0 — Disabled.
|
- 0 — Disabled.
|
||||||
- 1 — Enabled.
|
- 1 — Enabled.
|
||||||
|
|
||||||
默认值:0。
|
默认值:1。
|
||||||
|
|
||||||
## http_zlib_compression_level {#settings-http_zlib_compression_level}
|
## http_zlib_compression_level {#settings-http_zlib_compression_level}
|
||||||
|
|
||||||
|
@ -22,6 +22,13 @@ namespace ErrorCodes
|
|||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
|
|
||||||
|
/** Due to a lack of proper code review, this code was contributed with a multiplication of template instantiations
|
||||||
|
* over all pairs of data types, and we deeply regret that.
|
||||||
|
*
|
||||||
|
* We cannot remove all combinations, because the binary representation of serialized data has to remain the same,
|
||||||
|
* but we can partially heal the wound by treating unsigned and signed data types in the same way.
|
||||||
|
*/
|
||||||
|
|
||||||
template <typename ValueType, typename TimestampType>
|
template <typename ValueType, typename TimestampType>
|
||||||
struct AggregationFunctionDeltaSumTimestampData
|
struct AggregationFunctionDeltaSumTimestampData
|
||||||
{
|
{
|
||||||
@ -37,23 +44,22 @@ template <typename ValueType, typename TimestampType>
|
|||||||
class AggregationFunctionDeltaSumTimestamp final
|
class AggregationFunctionDeltaSumTimestamp final
|
||||||
: public IAggregateFunctionDataHelper<
|
: public IAggregateFunctionDataHelper<
|
||||||
AggregationFunctionDeltaSumTimestampData<ValueType, TimestampType>,
|
AggregationFunctionDeltaSumTimestampData<ValueType, TimestampType>,
|
||||||
AggregationFunctionDeltaSumTimestamp<ValueType, TimestampType>
|
AggregationFunctionDeltaSumTimestamp<ValueType, TimestampType>>
|
||||||
>
|
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
AggregationFunctionDeltaSumTimestamp(const DataTypes & arguments, const Array & params)
|
AggregationFunctionDeltaSumTimestamp(const DataTypes & arguments, const Array & params)
|
||||||
: IAggregateFunctionDataHelper<
|
: IAggregateFunctionDataHelper<
|
||||||
AggregationFunctionDeltaSumTimestampData<ValueType, TimestampType>,
|
AggregationFunctionDeltaSumTimestampData<ValueType, TimestampType>,
|
||||||
AggregationFunctionDeltaSumTimestamp<ValueType, TimestampType>
|
AggregationFunctionDeltaSumTimestamp<ValueType, TimestampType>>{arguments, params, createResultType()}
|
||||||
>{arguments, params, createResultType()}
|
{
|
||||||
{}
|
}
|
||||||
|
|
||||||
AggregationFunctionDeltaSumTimestamp()
|
AggregationFunctionDeltaSumTimestamp()
|
||||||
: IAggregateFunctionDataHelper<
|
: IAggregateFunctionDataHelper<
|
||||||
AggregationFunctionDeltaSumTimestampData<ValueType, TimestampType>,
|
AggregationFunctionDeltaSumTimestampData<ValueType, TimestampType>,
|
||||||
AggregationFunctionDeltaSumTimestamp<ValueType, TimestampType>
|
AggregationFunctionDeltaSumTimestamp<ValueType, TimestampType>>{}
|
||||||
>{}
|
{
|
||||||
{}
|
}
|
||||||
|
|
||||||
bool allocatesMemoryInArena() const override { return false; }
|
bool allocatesMemoryInArena() const override { return false; }
|
||||||
|
|
||||||
@ -63,8 +69,8 @@ public:
|
|||||||
|
|
||||||
void NO_SANITIZE_UNDEFINED ALWAYS_INLINE add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override
|
void NO_SANITIZE_UNDEFINED ALWAYS_INLINE add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override
|
||||||
{
|
{
|
||||||
auto value = assert_cast<const ColumnVector<ValueType> &>(*columns[0]).getData()[row_num];
|
auto value = unalignedLoad<ValueType>(columns[0]->getRawData().data() + row_num * sizeof(ValueType));
|
||||||
auto ts = assert_cast<const ColumnVector<TimestampType> &>(*columns[1]).getData()[row_num];
|
auto ts = unalignedLoad<TimestampType>(columns[1]->getRawData().data() + row_num * sizeof(TimestampType));
|
||||||
|
|
||||||
auto & data = this->data(place);
|
auto & data = this->data(place);
|
||||||
|
|
||||||
@ -172,10 +178,48 @@ public:
|
|||||||
|
|
||||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||||
{
|
{
|
||||||
assert_cast<ColumnVector<ValueType> &>(to).getData().push_back(this->data(place).sum);
|
static_cast<ColumnFixedSizeHelper &>(to).template insertRawData<sizeof(ValueType)>(
|
||||||
|
reinterpret_cast<const char *>(&this->data(place).sum));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
template <typename FirstType, template <typename, typename> class AggregateFunctionTemplate, typename... TArgs>
|
||||||
|
IAggregateFunction * createWithTwoTypesSecond(const IDataType & second_type, TArgs && ... args)
|
||||||
|
{
|
||||||
|
WhichDataType which(second_type);
|
||||||
|
|
||||||
|
if (which.idx == TypeIndex::UInt32) return new AggregateFunctionTemplate<FirstType, UInt32>(args...);
|
||||||
|
if (which.idx == TypeIndex::UInt64) return new AggregateFunctionTemplate<FirstType, UInt64>(args...);
|
||||||
|
if (which.idx == TypeIndex::Int32) return new AggregateFunctionTemplate<FirstType, UInt32>(args...);
|
||||||
|
if (which.idx == TypeIndex::Int64) return new AggregateFunctionTemplate<FirstType, UInt64>(args...);
|
||||||
|
if (which.idx == TypeIndex::Float32) return new AggregateFunctionTemplate<FirstType, Float32>(args...);
|
||||||
|
if (which.idx == TypeIndex::Float64) return new AggregateFunctionTemplate<FirstType, Float64>(args...);
|
||||||
|
if (which.idx == TypeIndex::Date) return new AggregateFunctionTemplate<FirstType, UInt16>(args...);
|
||||||
|
if (which.idx == TypeIndex::DateTime) return new AggregateFunctionTemplate<FirstType, UInt32>(args...);
|
||||||
|
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <template <typename, typename> class AggregateFunctionTemplate, typename... TArgs>
|
||||||
|
IAggregateFunction * createWithTwoTypes(const IDataType & first_type, const IDataType & second_type, TArgs && ... args)
|
||||||
|
{
|
||||||
|
WhichDataType which(first_type);
|
||||||
|
|
||||||
|
if (which.idx == TypeIndex::UInt8) return createWithTwoTypesSecond<UInt8, AggregateFunctionTemplate>(second_type, args...);
|
||||||
|
if (which.idx == TypeIndex::UInt16) return createWithTwoTypesSecond<UInt16, AggregateFunctionTemplate>(second_type, args...);
|
||||||
|
if (which.idx == TypeIndex::UInt32) return createWithTwoTypesSecond<UInt32, AggregateFunctionTemplate>(second_type, args...);
|
||||||
|
if (which.idx == TypeIndex::UInt64) return createWithTwoTypesSecond<UInt64, AggregateFunctionTemplate>(second_type, args...);
|
||||||
|
if (which.idx == TypeIndex::Int8) return createWithTwoTypesSecond<UInt8, AggregateFunctionTemplate>(second_type, args...);
|
||||||
|
if (which.idx == TypeIndex::Int16) return createWithTwoTypesSecond<UInt16, AggregateFunctionTemplate>(second_type, args...);
|
||||||
|
if (which.idx == TypeIndex::Int32) return createWithTwoTypesSecond<UInt32, AggregateFunctionTemplate>(second_type, args...);
|
||||||
|
if (which.idx == TypeIndex::Int64) return createWithTwoTypesSecond<UInt64, AggregateFunctionTemplate>(second_type, args...);
|
||||||
|
if (which.idx == TypeIndex::Float32) return createWithTwoTypesSecond<Float32, AggregateFunctionTemplate>(second_type, args...);
|
||||||
|
if (which.idx == TypeIndex::Float64) return createWithTwoTypesSecond<Float64, AggregateFunctionTemplate>(second_type, args...);
|
||||||
|
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
AggregateFunctionPtr createAggregateFunctionDeltaSumTimestamp(
|
AggregateFunctionPtr createAggregateFunctionDeltaSumTimestamp(
|
||||||
const String & name,
|
const String & name,
|
||||||
const DataTypes & arguments,
|
const DataTypes & arguments,
|
||||||
@ -193,8 +237,14 @@ AggregateFunctionPtr createAggregateFunctionDeltaSumTimestamp(
|
|||||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument for aggregate function {}, "
|
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument for aggregate function {}, "
|
||||||
"must be Int, Float, Date, DateTime", arguments[1]->getName(), name);
|
"must be Int, Float, Date, DateTime", arguments[1]->getName(), name);
|
||||||
|
|
||||||
return AggregateFunctionPtr(createWithTwoNumericOrDateTypes<AggregationFunctionDeltaSumTimestamp>(
|
auto res = AggregateFunctionPtr(createWithTwoTypes<AggregationFunctionDeltaSumTimestamp>(
|
||||||
*arguments[0], *arguments[1], arguments, params));
|
*arguments[0], *arguments[1], arguments, params));
|
||||||
|
|
||||||
|
if (!res)
|
||||||
|
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument for aggregate function {}, "
|
||||||
|
"this type is not supported", arguments[0]->getName(), name);
|
||||||
|
|
||||||
|
return res;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -184,36 +184,8 @@ static IAggregateFunction * createWithDecimalType(const IDataType & argument_typ
|
|||||||
}
|
}
|
||||||
|
|
||||||
/** For template with two arguments.
|
/** For template with two arguments.
|
||||||
|
* This is an extremely dangerous for code bloat - do not use.
|
||||||
*/
|
*/
|
||||||
template <typename FirstType, template <typename, typename> class AggregateFunctionTemplate, typename... TArgs>
|
|
||||||
static IAggregateFunction * createWithTwoNumericTypesSecond(const IDataType & second_type, TArgs && ... args)
|
|
||||||
{
|
|
||||||
WhichDataType which(second_type);
|
|
||||||
#define DISPATCH(TYPE) \
|
|
||||||
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<FirstType, TYPE>(args...);
|
|
||||||
FOR_NUMERIC_TYPES(DISPATCH)
|
|
||||||
#undef DISPATCH
|
|
||||||
if (which.idx == TypeIndex::Enum8) return new AggregateFunctionTemplate<FirstType, Int8>(args...);
|
|
||||||
if (which.idx == TypeIndex::Enum16) return new AggregateFunctionTemplate<FirstType, Int16>(args...);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <template <typename, typename> class AggregateFunctionTemplate, typename... TArgs>
|
|
||||||
static IAggregateFunction * createWithTwoNumericTypes(const IDataType & first_type, const IDataType & second_type, TArgs && ... args)
|
|
||||||
{
|
|
||||||
WhichDataType which(first_type);
|
|
||||||
#define DISPATCH(TYPE) \
|
|
||||||
if (which.idx == TypeIndex::TYPE) \
|
|
||||||
return createWithTwoNumericTypesSecond<TYPE, AggregateFunctionTemplate>(second_type, args...);
|
|
||||||
FOR_NUMERIC_TYPES(DISPATCH)
|
|
||||||
#undef DISPATCH
|
|
||||||
if (which.idx == TypeIndex::Enum8)
|
|
||||||
return createWithTwoNumericTypesSecond<Int8, AggregateFunctionTemplate>(second_type, args...);
|
|
||||||
if (which.idx == TypeIndex::Enum16)
|
|
||||||
return createWithTwoNumericTypesSecond<Int16, AggregateFunctionTemplate>(second_type, args...);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename FirstType, template <typename, typename> class AggregateFunctionTemplate, typename... TArgs>
|
template <typename FirstType, template <typename, typename> class AggregateFunctionTemplate, typename... TArgs>
|
||||||
static IAggregateFunction * createWithTwoBasicNumericTypesSecond(const IDataType & second_type, TArgs && ... args)
|
static IAggregateFunction * createWithTwoBasicNumericTypesSecond(const IDataType & second_type, TArgs && ... args)
|
||||||
{
|
{
|
||||||
@ -237,46 +209,6 @@ static IAggregateFunction * createWithTwoBasicNumericTypes(const IDataType & fir
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename FirstType, template <typename, typename> class AggregateFunctionTemplate, typename... TArgs>
|
|
||||||
static IAggregateFunction * createWithTwoNumericOrDateTypesSecond(const IDataType & second_type, TArgs && ... args)
|
|
||||||
{
|
|
||||||
WhichDataType which(second_type);
|
|
||||||
#define DISPATCH(TYPE) \
|
|
||||||
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<FirstType, TYPE>(args...);
|
|
||||||
FOR_NUMERIC_TYPES(DISPATCH)
|
|
||||||
#undef DISPATCH
|
|
||||||
if (which.idx == TypeIndex::Enum8) return new AggregateFunctionTemplate<FirstType, Int8>(args...);
|
|
||||||
if (which.idx == TypeIndex::Enum16) return new AggregateFunctionTemplate<FirstType, Int16>(args...);
|
|
||||||
|
|
||||||
/// expects that DataTypeDate based on UInt16, DataTypeDateTime based on UInt32
|
|
||||||
if (which.idx == TypeIndex::Date) return new AggregateFunctionTemplate<FirstType, UInt16>(args...);
|
|
||||||
if (which.idx == TypeIndex::DateTime) return new AggregateFunctionTemplate<FirstType, UInt32>(args...);
|
|
||||||
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <template <typename, typename> class AggregateFunctionTemplate, typename... TArgs>
|
|
||||||
static IAggregateFunction * createWithTwoNumericOrDateTypes(const IDataType & first_type, const IDataType & second_type, TArgs && ... args)
|
|
||||||
{
|
|
||||||
WhichDataType which(first_type);
|
|
||||||
#define DISPATCH(TYPE) \
|
|
||||||
if (which.idx == TypeIndex::TYPE) \
|
|
||||||
return createWithTwoNumericOrDateTypesSecond<TYPE, AggregateFunctionTemplate>(second_type, args...);
|
|
||||||
FOR_NUMERIC_TYPES(DISPATCH)
|
|
||||||
#undef DISPATCH
|
|
||||||
if (which.idx == TypeIndex::Enum8)
|
|
||||||
return createWithTwoNumericOrDateTypesSecond<Int8, AggregateFunctionTemplate>(second_type, args...);
|
|
||||||
if (which.idx == TypeIndex::Enum16)
|
|
||||||
return createWithTwoNumericOrDateTypesSecond<Int16, AggregateFunctionTemplate>(second_type, args...);
|
|
||||||
|
|
||||||
/// expects that DataTypeDate based on UInt16, DataTypeDateTime based on UInt32
|
|
||||||
if (which.idx == TypeIndex::Date)
|
|
||||||
return createWithTwoNumericOrDateTypesSecond<UInt16, AggregateFunctionTemplate>(second_type, args...);
|
|
||||||
if (which.idx == TypeIndex::DateTime)
|
|
||||||
return createWithTwoNumericOrDateTypesSecond<UInt32, AggregateFunctionTemplate>(second_type, args...);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <template <typename> class AggregateFunctionTemplate, typename... TArgs>
|
template <template <typename> class AggregateFunctionTemplate, typename... TArgs>
|
||||||
static IAggregateFunction * createWithStringType(const IDataType & argument_type, TArgs && ... args)
|
static IAggregateFunction * createWithStringType(const IDataType & argument_type, TArgs && ... args)
|
||||||
{
|
{
|
||||||
|
@ -49,6 +49,7 @@
|
|||||||
M(TemporaryFilesForSort, "Number of temporary files created for external sorting") \
|
M(TemporaryFilesForSort, "Number of temporary files created for external sorting") \
|
||||||
M(TemporaryFilesForAggregation, "Number of temporary files created for external aggregation") \
|
M(TemporaryFilesForAggregation, "Number of temporary files created for external aggregation") \
|
||||||
M(TemporaryFilesForJoin, "Number of temporary files created for JOIN") \
|
M(TemporaryFilesForJoin, "Number of temporary files created for JOIN") \
|
||||||
|
M(TemporaryFilesForMerge, "Number of temporary files for vertical merge") \
|
||||||
M(TemporaryFilesUnknown, "Number of temporary files created without known purpose") \
|
M(TemporaryFilesUnknown, "Number of temporary files created without known purpose") \
|
||||||
M(Read, "Number of read (read, pread, io_getevents, etc.) syscalls in fly") \
|
M(Read, "Number of read (read, pread, io_getevents, etc.) syscalls in fly") \
|
||||||
M(RemoteRead, "Number of read with remote reader in fly") \
|
M(RemoteRead, "Number of read with remote reader in fly") \
|
||||||
|
@ -204,6 +204,16 @@ bool ThreadStatus::isQueryCanceled() const
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t ThreadStatus::getNextPlanStepIndex() const
|
||||||
|
{
|
||||||
|
return local_data.plan_step_index->fetch_add(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t ThreadStatus::getNextPipelineProcessorIndex() const
|
||||||
|
{
|
||||||
|
return local_data.pipeline_processor_index->fetch_add(1);
|
||||||
|
}
|
||||||
|
|
||||||
ThreadStatus::~ThreadStatus()
|
ThreadStatus::~ThreadStatus()
|
||||||
{
|
{
|
||||||
flushUntrackedMemory();
|
flushUntrackedMemory();
|
||||||
|
@ -11,6 +11,7 @@
|
|||||||
|
|
||||||
#include <boost/noncopyable.hpp>
|
#include <boost/noncopyable.hpp>
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
#include <functional>
|
#include <functional>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
@ -90,6 +91,11 @@ public:
|
|||||||
String query_for_logs;
|
String query_for_logs;
|
||||||
UInt64 normalized_query_hash = 0;
|
UInt64 normalized_query_hash = 0;
|
||||||
|
|
||||||
|
// Since processors might be added on the fly within expand() function we use atomic_size_t.
|
||||||
|
// These two fields are used for EXPLAIN PLAN / PIPELINE.
|
||||||
|
std::shared_ptr<std::atomic_size_t> plan_step_index = std::make_shared<std::atomic_size_t>(0);
|
||||||
|
std::shared_ptr<std::atomic_size_t> pipeline_processor_index = std::make_shared<std::atomic_size_t>(0);
|
||||||
|
|
||||||
QueryIsCanceledPredicate query_is_canceled_predicate = {};
|
QueryIsCanceledPredicate query_is_canceled_predicate = {};
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -313,6 +319,9 @@ public:
|
|||||||
|
|
||||||
void initGlobalProfiler(UInt64 global_profiler_real_time_period, UInt64 global_profiler_cpu_time_period);
|
void initGlobalProfiler(UInt64 global_profiler_real_time_period, UInt64 global_profiler_cpu_time_period);
|
||||||
|
|
||||||
|
size_t getNextPlanStepIndex() const;
|
||||||
|
size_t getNextPipelineProcessorIndex() const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void applyGlobalSettings();
|
void applyGlobalSettings();
|
||||||
void applyQuerySettings();
|
void applyQuerySettings();
|
||||||
|
@ -1794,7 +1794,7 @@ Possible values:
|
|||||||
|
|
||||||
- 0 — Disabled.
|
- 0 — Disabled.
|
||||||
- 1 — Enabled.
|
- 1 — Enabled.
|
||||||
)", 0) \
|
)", 1) \
|
||||||
DECLARE(Int64, http_zlib_compression_level, 3, R"(
|
DECLARE(Int64, http_zlib_compression_level, 3, R"(
|
||||||
Sets the level of data compression in the response to an HTTP request if [enable_http_compression = 1](#enable_http_compression).
|
Sets the level of data compression in the response to an HTTP request if [enable_http_compression = 1](#enable_http_compression).
|
||||||
|
|
||||||
@ -4565,7 +4565,7 @@ Possible values:
|
|||||||
- 0 - Disable
|
- 0 - Disable
|
||||||
- 1 - Enable
|
- 1 - Enable
|
||||||
)", 0) \
|
)", 0) \
|
||||||
DECLARE(Bool, query_plan_merge_filters, false, R"(
|
DECLARE(Bool, query_plan_merge_filters, true, R"(
|
||||||
Allow to merge filters in the query plan
|
Allow to merge filters in the query plan
|
||||||
)", 0) \
|
)", 0) \
|
||||||
DECLARE(Bool, query_plan_filter_push_down, true, R"(
|
DECLARE(Bool, query_plan_filter_push_down, true, R"(
|
||||||
|
@ -64,6 +64,7 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
|
|||||||
},
|
},
|
||||||
{"24.11",
|
{"24.11",
|
||||||
{
|
{
|
||||||
|
{"enable_http_compression", false, true, "Improvement for read-only clients since they can't change settings"},
|
||||||
{"validate_mutation_query", false, true, "New setting to validate mutation queries by default."},
|
{"validate_mutation_query", false, true, "New setting to validate mutation queries by default."},
|
||||||
{"enable_job_stack_trace", false, true, "Enable by default collecting stack traces from job's scheduling."},
|
{"enable_job_stack_trace", false, true, "Enable by default collecting stack traces from job's scheduling."},
|
||||||
{"allow_suspicious_types_in_group_by", true, false, "Don't allow Variant/Dynamic types in GROUP BY by default"},
|
{"allow_suspicious_types_in_group_by", true, false, "Don't allow Variant/Dynamic types in GROUP BY by default"},
|
||||||
@ -77,6 +78,7 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
|
|||||||
{"backup_restore_keeper_max_retries_while_initializing", 0, 20, "New setting."},
|
{"backup_restore_keeper_max_retries_while_initializing", 0, 20, "New setting."},
|
||||||
{"backup_restore_keeper_max_retries_while_handling_error", 0, 20, "New setting."},
|
{"backup_restore_keeper_max_retries_while_handling_error", 0, 20, "New setting."},
|
||||||
{"backup_restore_finish_timeout_after_error_sec", 0, 180, "New setting."},
|
{"backup_restore_finish_timeout_after_error_sec", 0, 180, "New setting."},
|
||||||
|
{"query_plan_merge_filters", false, true, "Allow to merge filters in the query plan. This is required to properly support filter-push-down with a new analyzer."},
|
||||||
{"parallel_replicas_local_plan", false, true, "Use local plan for local replica in a query with parallel replicas"},
|
{"parallel_replicas_local_plan", false, true, "Use local plan for local replica in a query with parallel replicas"},
|
||||||
{"allow_experimental_bfloat16_type", false, false, "Add new experimental BFloat16 type"},
|
{"allow_experimental_bfloat16_type", false, false, "Add new experimental BFloat16 type"},
|
||||||
{"filesystem_cache_skip_download_if_exceeds_per_query_cache_write_limit", 1, 1, "Rename of setting skip_download_if_exceeds_query_cache_limit"},
|
{"filesystem_cache_skip_download_if_exceeds_per_query_cache_write_limit", 1, 1, "Rename of setting skip_download_if_exceeds_query_cache_limit"},
|
||||||
|
@ -69,7 +69,7 @@ static void testCascadeBufferRedability(
|
|||||||
auto rbuf = wbuf_readable.tryGetReadBuffer();
|
auto rbuf = wbuf_readable.tryGetReadBuffer();
|
||||||
ASSERT_FALSE(!rbuf);
|
ASSERT_FALSE(!rbuf);
|
||||||
|
|
||||||
concat.appendBuffer(wrapReadBufferPointer(std::move(rbuf)));
|
concat.appendBuffer(std::move(rbuf));
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string decoded_data;
|
std::string decoded_data;
|
||||||
|
@ -32,12 +32,12 @@ namespace Setting
|
|||||||
|
|
||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
{
|
{
|
||||||
extern const int ILLEGAL_COLUMN;
|
|
||||||
extern const int NOT_IMPLEMENTED;
|
|
||||||
extern const int BAD_ARGUMENTS;
|
extern const int BAD_ARGUMENTS;
|
||||||
extern const int VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE;
|
|
||||||
extern const int CANNOT_PARSE_DATETIME;
|
extern const int CANNOT_PARSE_DATETIME;
|
||||||
|
extern const int ILLEGAL_COLUMN;
|
||||||
extern const int NOT_ENOUGH_SPACE;
|
extern const int NOT_ENOUGH_SPACE;
|
||||||
|
extern const int NOT_IMPLEMENTED;
|
||||||
|
extern const int VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE;
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
@ -57,6 +57,12 @@ namespace
|
|||||||
Null
|
Null
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum class ReturnType: uint8_t
|
||||||
|
{
|
||||||
|
DateTime,
|
||||||
|
DateTime64
|
||||||
|
};
|
||||||
|
|
||||||
constexpr Int32 minYear = 1970;
|
constexpr Int32 minYear = 1970;
|
||||||
constexpr Int32 maxYear = 2106;
|
constexpr Int32 maxYear = 2106;
|
||||||
|
|
||||||
@ -186,6 +192,7 @@ namespace
|
|||||||
Int32 minute = 0; /// range [0, 59]
|
Int32 minute = 0; /// range [0, 59]
|
||||||
Int32 second = 0; /// range [0, 59]
|
Int32 second = 0; /// range [0, 59]
|
||||||
Int32 microsecond = 0; /// range [0, 999999]
|
Int32 microsecond = 0; /// range [0, 999999]
|
||||||
|
UInt32 scale = 0; /// scale of the result DateTime64. Always 6 for ParseSytax == MySQL, [0, 6] for ParseSyntax == Joda.
|
||||||
|
|
||||||
bool is_am = true; /// If is_hour_of_half_day = true and is_am = false (i.e. pm) then add 12 hours to the result DateTime
|
bool is_am = true; /// If is_hour_of_half_day = true and is_am = false (i.e. pm) then add 12 hours to the result DateTime
|
||||||
bool hour_starts_at_1 = false; /// Whether the hour is clockhour
|
bool hour_starts_at_1 = false; /// Whether the hour is clockhour
|
||||||
@ -214,6 +221,7 @@ namespace
|
|||||||
minute = 0;
|
minute = 0;
|
||||||
second = 0;
|
second = 0;
|
||||||
microsecond = 0;
|
microsecond = 0;
|
||||||
|
scale = 0;
|
||||||
|
|
||||||
is_am = true;
|
is_am = true;
|
||||||
hour_starts_at_1 = false;
|
hour_starts_at_1 = false;
|
||||||
@ -449,6 +457,18 @@ namespace
|
|||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
[[nodiscard]]
|
||||||
|
VoidOrError setScale(UInt8 scale_, ParseSyntax parse_syntax_)
|
||||||
|
{
|
||||||
|
if (parse_syntax_ == ParseSyntax::MySQL && scale_ != 6)
|
||||||
|
RETURN_ERROR(ErrorCodes::CANNOT_PARSE_DATETIME, "Value {} for scale must be 6 for MySQL parse syntax", std::to_string(scale_))
|
||||||
|
else if (parse_syntax_ == ParseSyntax::Joda && scale_ > 6)
|
||||||
|
RETURN_ERROR(ErrorCodes::CANNOT_PARSE_DATETIME, "Value {} for scale must be in the range [0, 6] for Joda syntax", std::to_string(scale_))
|
||||||
|
|
||||||
|
scale = scale_;
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
/// For debug
|
/// For debug
|
||||||
[[maybe_unused]] String toString() const
|
[[maybe_unused]] String toString() const
|
||||||
{
|
{
|
||||||
@ -571,7 +591,7 @@ namespace
|
|||||||
};
|
};
|
||||||
|
|
||||||
/// _FUNC_(str[, format, timezone])
|
/// _FUNC_(str[, format, timezone])
|
||||||
template <typename Name, ParseSyntax parse_syntax, ErrorHandling error_handling, bool parseDateTime64 = false>
|
template <typename Name, ParseSyntax parse_syntax, ReturnType return_type, ErrorHandling error_handling>
|
||||||
class FunctionParseDateTimeImpl : public IFunction
|
class FunctionParseDateTimeImpl : public IFunction
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
@ -601,93 +621,100 @@ namespace
|
|||||||
FunctionArgumentDescriptors mandatory_args{
|
FunctionArgumentDescriptors mandatory_args{
|
||||||
{"time", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isString), nullptr, "String"}
|
{"time", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isString), nullptr, "String"}
|
||||||
};
|
};
|
||||||
|
|
||||||
FunctionArgumentDescriptors optional_args{
|
FunctionArgumentDescriptors optional_args{
|
||||||
{"format", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isString), nullptr, "String"},
|
{"format", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isString), &isColumnConst, "const String"},
|
||||||
{"timezone", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isString), &isColumnConst, "const String"}
|
{"timezone", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isString), &isColumnConst, "const String"}
|
||||||
};
|
};
|
||||||
|
|
||||||
validateFunctionArguments(*this, arguments, mandatory_args, optional_args);
|
validateFunctionArguments(*this, arguments, mandatory_args, optional_args);
|
||||||
|
|
||||||
String time_zone_name = getTimeZone(arguments).getTimeZone();
|
String time_zone_name = getTimeZone(arguments).getTimeZone();
|
||||||
DataTypePtr date_type = nullptr;
|
DataTypePtr data_type;
|
||||||
if constexpr (parseDateTime64)
|
if constexpr (return_type == ReturnType::DateTime)
|
||||||
|
data_type = std::make_shared<DataTypeDateTime>(time_zone_name);
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if constexpr (parse_syntax == ParseSyntax::MySQL)
|
||||||
|
data_type = std::make_shared<DataTypeDateTime64>(6, time_zone_name);
|
||||||
|
else
|
||||||
{
|
{
|
||||||
String format = getFormat(arguments);
|
String format = getFormat(arguments);
|
||||||
std::vector<Instruction> instructions = parseFormat(format);
|
std::vector<Instruction> instructions = parseFormat(format);
|
||||||
UInt32 scale = 0;
|
/// How many 'S' characters does the format string contain?
|
||||||
if (!instructions.empty())
|
UInt32 s_count = 0;
|
||||||
|
for (const auto & instruction : instructions)
|
||||||
{
|
{
|
||||||
for (const auto & ins : instructions)
|
const String fragment = instruction.getFragment();
|
||||||
|
for (char c : fragment)
|
||||||
{
|
{
|
||||||
if (scale > 0)
|
if (c == 'S')
|
||||||
break;
|
++s_count;
|
||||||
const String fragment = ins.getFragment();
|
else
|
||||||
for (char ch : fragment)
|
|
||||||
{
|
|
||||||
if (ch != 'S')
|
|
||||||
{
|
|
||||||
scale = 0;
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
else
|
if (s_count > 0)
|
||||||
scale++;
|
break;
|
||||||
|
}
|
||||||
|
/// Use s_count as DateTime64's scale.
|
||||||
|
data_type = std::make_shared<DataTypeDateTime64>(s_count, time_zone_name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
date_type = std::make_shared<DataTypeDateTime64>(scale, time_zone_name);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
date_type = std::make_shared<DataTypeDateTime>(time_zone_name);
|
|
||||||
if (error_handling == ErrorHandling::Null)
|
if (error_handling == ErrorHandling::Null)
|
||||||
return std::make_shared<DataTypeNullable>(date_type);
|
return std::make_shared<DataTypeNullable>(data_type);
|
||||||
return date_type;
|
return data_type;
|
||||||
}
|
}
|
||||||
|
|
||||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
|
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
|
||||||
{
|
{
|
||||||
ColumnUInt8::MutablePtr col_null_map;
|
DataTypePtr result_type_without_nullable;
|
||||||
if constexpr (error_handling == ErrorHandling::Null)
|
if constexpr (error_handling == ErrorHandling::Null)
|
||||||
col_null_map = ColumnUInt8::create(input_rows_count, 0);
|
result_type_without_nullable = removeNullable(result_type); /// Remove Nullable wrapper. It will be added back later.
|
||||||
if constexpr (parseDateTime64)
|
|
||||||
{
|
|
||||||
const DataTypeDateTime64 * datatime64_type = checkAndGetDataType<DataTypeDateTime64>(removeNullable(result_type).get());
|
|
||||||
auto col_res = ColumnDateTime64::create(input_rows_count, datatime64_type->getScale());
|
|
||||||
PaddedPODArray<DataTypeDateTime64::FieldType> & res_data = col_res->getData();
|
|
||||||
executeImpl2<DataTypeDateTime64::FieldType>(arguments, result_type, input_rows_count, res_data, col_null_map);
|
|
||||||
if constexpr (error_handling == ErrorHandling::Null)
|
|
||||||
return ColumnNullable::create(std::move(col_res), std::move(col_null_map));
|
|
||||||
else
|
else
|
||||||
return col_res;
|
result_type_without_nullable = result_type;
|
||||||
|
|
||||||
|
if constexpr (return_type == ReturnType::DateTime)
|
||||||
|
{
|
||||||
|
MutableColumnPtr col_res = ColumnDateTime::create(input_rows_count);
|
||||||
|
ColumnDateTime * col_datetime = assert_cast<ColumnDateTime *>(col_res.get());
|
||||||
|
return executeImpl2<DataTypeDateTime::FieldType>(arguments, result_type, input_rows_count, col_res, col_datetime->getData());
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
auto col_res = ColumnDateTime::create(input_rows_count);
|
const auto * result_type_without_nullable_casted = checkAndGetDataType<DataTypeDateTime64>(result_type_without_nullable.get());
|
||||||
PaddedPODArray<DataTypeDateTime::FieldType> & res_data = col_res->getData();
|
MutableColumnPtr col_res = ColumnDateTime64::create(input_rows_count, result_type_without_nullable_casted->getScale());
|
||||||
executeImpl2<DataTypeDateTime::FieldType>(arguments, result_type, input_rows_count, res_data, col_null_map);
|
ColumnDateTime64 * col_datetime64 = assert_cast<ColumnDateTime64 *>(col_res.get());
|
||||||
if constexpr (error_handling == ErrorHandling::Null)
|
return executeImpl2<DataTypeDateTime64::FieldType>(arguments, result_type, input_rows_count, col_res, col_datetime64->getData());
|
||||||
return ColumnNullable::create(std::move(col_res), std::move(col_null_map));
|
|
||||||
else
|
|
||||||
return col_res;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
void executeImpl2(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count,
|
ColumnPtr executeImpl2(
|
||||||
PaddedPODArray<T> & res_data, ColumnUInt8::MutablePtr & col_null_map) const
|
const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count,
|
||||||
|
MutableColumnPtr & col_res, PaddedPODArray<T> & res_data) const
|
||||||
{
|
{
|
||||||
const auto * col_str = checkAndGetColumn<ColumnString>(arguments[0].column.get());
|
const auto * col_str = checkAndGetColumn<ColumnString>(arguments[0].column.get());
|
||||||
if (!col_str)
|
if (!col_str)
|
||||||
throw Exception(
|
throw Exception(
|
||||||
ErrorCodes::ILLEGAL_COLUMN,
|
ErrorCodes::ILLEGAL_COLUMN,
|
||||||
"Illegal column {} of first ('str') argument of function {}. Must be string.",
|
"Illegal type in 1st ('time') argument of function {}. Must be String.",
|
||||||
arguments[0].column->getName(),
|
|
||||||
getName());
|
getName());
|
||||||
|
|
||||||
String format = getFormat(arguments);
|
Int64 multiplier = 0;
|
||||||
const auto & time_zone = getTimeZone(arguments);
|
UInt32 scale = 0;
|
||||||
std::vector<Instruction> instructions = parseFormat(format);
|
if constexpr (return_type == ReturnType::DateTime64)
|
||||||
|
{
|
||||||
|
const DataTypeDateTime64 * result_type_without_nullable_casted = checkAndGetDataType<DataTypeDateTime64>(removeNullable(result_type).get());
|
||||||
|
scale = result_type_without_nullable_casted->getScale();
|
||||||
|
multiplier = DecimalUtils::scaleMultiplier<DateTime64>(scale);
|
||||||
|
}
|
||||||
|
|
||||||
|
ColumnUInt8::MutablePtr col_null_map;
|
||||||
|
if constexpr (error_handling == ErrorHandling::Null)
|
||||||
|
col_null_map = ColumnUInt8::create(input_rows_count, 0);
|
||||||
|
|
||||||
|
const String format = getFormat(arguments);
|
||||||
|
const std::vector<Instruction> instructions = parseFormat(format);
|
||||||
|
const auto & time_zone = getTimeZone(arguments);
|
||||||
/// Make datetime fit in a cache line.
|
/// Make datetime fit in a cache line.
|
||||||
alignas(64) DateTime<error_handling> datetime;
|
alignas(64) DateTime<error_handling> datetime;
|
||||||
for (size_t i = 0; i < input_rows_count; ++i)
|
for (size_t i = 0; i < input_rows_count; ++i)
|
||||||
@ -698,6 +725,15 @@ namespace
|
|||||||
Pos end = str_ref.data + str_ref.size;
|
Pos end = str_ref.data + str_ref.size;
|
||||||
bool error = false;
|
bool error = false;
|
||||||
|
|
||||||
|
if constexpr (return_type == ReturnType::DateTime64)
|
||||||
|
{
|
||||||
|
if (auto result = datetime.setScale(static_cast<UInt8>(scale), parse_syntax); !result.has_value())
|
||||||
|
{
|
||||||
|
const ErrorCodeAndMessage & err = result.error();
|
||||||
|
throw Exception(err.error_code, "Invalid scale value: {}, {}", std::to_string(scale), err.error_message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for (const auto & instruction : instructions)
|
for (const auto & instruction : instructions)
|
||||||
{
|
{
|
||||||
if (auto result = instruction.perform(cur, end, datetime); result.has_value())
|
if (auto result = instruction.perform(cur, end, datetime); result.has_value())
|
||||||
@ -732,9 +768,8 @@ namespace
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
Int64OrError result = 0;
|
Int64OrError result = 0;
|
||||||
|
|
||||||
/// Ensure all input was consumed
|
/// Ensure all input was consumed
|
||||||
if (!parseDateTime64 && cur < end)
|
if (cur < end)
|
||||||
{
|
{
|
||||||
result = tl::unexpected(ErrorCodeAndMessage(
|
result = tl::unexpected(ErrorCodeAndMessage(
|
||||||
ErrorCodes::CANNOT_PARSE_DATETIME,
|
ErrorCodes::CANNOT_PARSE_DATETIME,
|
||||||
@ -747,14 +782,10 @@ namespace
|
|||||||
{
|
{
|
||||||
if (result = datetime.buildDateTime(time_zone); result.has_value())
|
if (result = datetime.buildDateTime(time_zone); result.has_value())
|
||||||
{
|
{
|
||||||
if constexpr (parseDateTime64)
|
if constexpr (return_type == ReturnType::DateTime)
|
||||||
{
|
|
||||||
const DataTypeDateTime64 * datatime64_type = checkAndGetDataType<DataTypeDateTime64>(removeNullable(result_type).get());
|
|
||||||
Int64 multiplier = DecimalUtils::scaleMultiplier<DateTime64>(datatime64_type->getScale());
|
|
||||||
res_data[i] = static_cast<Int64>(*result) * multiplier + datetime.microsecond;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
res_data[i] = static_cast<UInt32>(*result);
|
res_data[i] = static_cast<UInt32>(*result);
|
||||||
|
else
|
||||||
|
res_data[i] = static_cast<Int64>(*result) * multiplier + datetime.microsecond;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -777,6 +808,10 @@ namespace
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if constexpr (error_handling == ErrorHandling::Null)
|
||||||
|
return ColumnNullable::create(std::move(col_res), std::move(col_null_map));
|
||||||
|
else
|
||||||
|
return std::move(col_res);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -808,7 +843,7 @@ namespace
|
|||||||
explicit Instruction(const String & literal_) : literal(literal_), fragment("LITERAL") { }
|
explicit Instruction(const String & literal_) : literal(literal_), fragment("LITERAL") { }
|
||||||
explicit Instruction(String && literal_) : literal(std::move(literal_)), fragment("LITERAL") { }
|
explicit Instruction(String && literal_) : literal(std::move(literal_)), fragment("LITERAL") { }
|
||||||
|
|
||||||
String getFragment() const { return fragment; }
|
const String & getFragment() const { return fragment; }
|
||||||
|
|
||||||
/// For debug
|
/// For debug
|
||||||
[[maybe_unused]] String toString() const
|
[[maybe_unused]] String toString() const
|
||||||
@ -885,6 +920,28 @@ namespace
|
|||||||
return cur;
|
return cur;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template<typename T, NeedCheckSpace need_check_space>
|
||||||
|
[[nodiscard]]
|
||||||
|
static PosOrError readNumber6(Pos cur, Pos end, [[maybe_unused]] const String & fragment, T & res)
|
||||||
|
{
|
||||||
|
if constexpr (need_check_space == NeedCheckSpace::Yes)
|
||||||
|
RETURN_ERROR_IF_FAILED(checkSpace(cur, end, 6, "readNumber6 requires size >= 6", fragment))
|
||||||
|
|
||||||
|
res = (*cur - '0');
|
||||||
|
++cur;
|
||||||
|
res = res * 10 + (*cur - '0');
|
||||||
|
++cur;
|
||||||
|
res = res * 10 + (*cur - '0');
|
||||||
|
++cur;
|
||||||
|
res = res * 10 + (*cur - '0');
|
||||||
|
++cur;
|
||||||
|
res = res * 10 + (*cur - '0');
|
||||||
|
++cur;
|
||||||
|
res = res * 10 + (*cur - '0');
|
||||||
|
++cur;
|
||||||
|
return cur;
|
||||||
|
}
|
||||||
|
|
||||||
[[nodiscard]]
|
[[nodiscard]]
|
||||||
static VoidOrError checkSpace(Pos cur, Pos end, size_t len, const String & msg, const String & fragment)
|
static VoidOrError checkSpace(Pos cur, Pos end, size_t len, const String & msg, const String & fragment)
|
||||||
{
|
{
|
||||||
@ -1305,13 +1362,28 @@ namespace
|
|||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]]
|
[[nodiscard]]
|
||||||
static PosOrError mysqlMicrosecond(Pos cur, Pos end, const String & fragment, DateTime<error_handling> & /*date*/)
|
static PosOrError mysqlMicrosecond(Pos cur, Pos end, const String & fragment, DateTime<error_handling> & date)
|
||||||
|
{
|
||||||
|
if constexpr (return_type == ReturnType::DateTime)
|
||||||
{
|
{
|
||||||
RETURN_ERROR_IF_FAILED(checkSpace(cur, end, 6, "mysqlMicrosecond requires size >= 6", fragment))
|
RETURN_ERROR_IF_FAILED(checkSpace(cur, end, 6, "mysqlMicrosecond requires size >= 6", fragment))
|
||||||
|
|
||||||
for (size_t i = 0; i < 6; ++i)
|
for (size_t i = 0; i < 6; ++i)
|
||||||
ASSIGN_RESULT_OR_RETURN_ERROR(cur, (assertNumber<NeedCheckSpace::No>(cur, end, fragment)))
|
ASSIGN_RESULT_OR_RETURN_ERROR(cur, (assertNumber<NeedCheckSpace::No>(cur, end, fragment)))
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (date.scale != 6)
|
||||||
|
RETURN_ERROR(
|
||||||
|
ErrorCodes::CANNOT_PARSE_DATETIME,
|
||||||
|
"Unable to parse fragment {} from {} because the datetime scale {} is not 6",
|
||||||
|
fragment,
|
||||||
|
std::string_view(cur, end - cur),
|
||||||
|
std::to_string(date.scale))
|
||||||
|
Int32 microsecond = 0;
|
||||||
|
ASSIGN_RESULT_OR_RETURN_ERROR(cur, (readNumber6<Int32, NeedCheckSpace::Yes>(cur, end, fragment, microsecond)))
|
||||||
|
RETURN_ERROR_IF_FAILED(date.setMicrosecond(microsecond))
|
||||||
|
}
|
||||||
return cur;
|
return cur;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1695,7 +1767,7 @@ namespace
|
|||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]]
|
[[nodiscard]]
|
||||||
static PosOrError jodaMicroSecondOfSecond(size_t repetitions, Pos cur, Pos end, const String & fragment, DateTime<error_handling> & date)
|
static PosOrError jodaMicrosecondOfSecond(size_t repetitions, Pos cur, Pos end, const String & fragment, DateTime<error_handling> & date)
|
||||||
{
|
{
|
||||||
Int32 microsecond;
|
Int32 microsecond;
|
||||||
ASSIGN_RESULT_OR_RETURN_ERROR(cur, (readNumberWithVariableLength(cur, end, false, false, false, repetitions, std::max(repetitions, 2uz), fragment, microsecond)))
|
ASSIGN_RESULT_OR_RETURN_ERROR(cur, (readNumberWithVariableLength(cur, end, false, false, false, repetitions, std::max(repetitions, 2uz), fragment, microsecond)))
|
||||||
@ -1704,31 +1776,32 @@ namespace
|
|||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]]
|
[[nodiscard]]
|
||||||
static PosOrError jodaTimezoneId(size_t, Pos cur, Pos end, const String &, DateTime<error_handling> & date)
|
static PosOrError jodaTimezone(size_t, Pos cur, Pos end, const String &, DateTime<error_handling> & date)
|
||||||
{
|
{
|
||||||
String dateTimeZone;
|
String read_time_zone;
|
||||||
while (cur <= end)
|
while (cur <= end)
|
||||||
{
|
{
|
||||||
dateTimeZone += *cur;
|
read_time_zone += *cur;
|
||||||
++cur;
|
++cur;
|
||||||
}
|
}
|
||||||
const DateLUTImpl & date_time_zone = DateLUT::instance(dateTimeZone);
|
const DateLUTImpl & date_time_zone = DateLUT::instance(read_time_zone);
|
||||||
const auto result = date.buildDateTime(date_time_zone);
|
const auto result = date.buildDateTime(date_time_zone);
|
||||||
if (result.has_value())
|
if (result.has_value())
|
||||||
{
|
{
|
||||||
const auto timezoneOffset = date_time_zone.timezoneOffset(*result);
|
const DateLUTImpl::Time timezone_offset = date_time_zone.timezoneOffset(*result);
|
||||||
date.has_time_zone_offset = true;
|
date.has_time_zone_offset = true;
|
||||||
date.time_zone_offset = timezoneOffset;
|
date.time_zone_offset = timezone_offset;
|
||||||
return cur;
|
return cur;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
RETURN_ERROR(ErrorCodes::CANNOT_PARSE_DATETIME, "Unable to build date time from timezone {}", dateTimeZone)
|
RETURN_ERROR(ErrorCodes::CANNOT_PARSE_DATETIME, "Unable to parse date time from timezone {}", read_time_zone)
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]]
|
[[nodiscard]]
|
||||||
static PosOrError jodaTimezoneOffset(size_t repetitions, Pos cur, Pos end, const String & fragment, DateTime<error_handling> & date)
|
static PosOrError jodaTimezoneOffset(size_t repetitions, Pos cur, Pos end, const String & fragment, DateTime<error_handling> & date)
|
||||||
{
|
{
|
||||||
RETURN_ERROR_IF_FAILED(checkSpace(cur, end, 5, "jodaTimezoneOffset requires size >= 5", fragment))
|
RETURN_ERROR_IF_FAILED(checkSpace(cur, end, 5, "jodaTimezoneOffset requires size >= 5", fragment))
|
||||||
|
|
||||||
Int32 sign;
|
Int32 sign;
|
||||||
if (*cur == '-')
|
if (*cur == '-')
|
||||||
sign = -1;
|
sign = -1;
|
||||||
@ -1737,7 +1810,7 @@ namespace
|
|||||||
else
|
else
|
||||||
RETURN_ERROR(
|
RETURN_ERROR(
|
||||||
ErrorCodes::CANNOT_PARSE_DATETIME,
|
ErrorCodes::CANNOT_PARSE_DATETIME,
|
||||||
"Unable to parse fragment {} from {} because of unknown sign time zone offset: {}",
|
"Unable to parse fragment {} from {} because of unknown sign in time zone offset: {}",
|
||||||
fragment,
|
fragment,
|
||||||
std::string_view(cur, end - cur),
|
std::string_view(cur, end - cur),
|
||||||
std::string_view(cur, 1))
|
std::string_view(cur, 1))
|
||||||
@ -1745,8 +1818,22 @@ namespace
|
|||||||
|
|
||||||
Int32 hour;
|
Int32 hour;
|
||||||
ASSIGN_RESULT_OR_RETURN_ERROR(cur, (readNumberWithVariableLength(cur, end, false, false, false, repetitions, std::max(repetitions, 2uz), fragment, hour)))
|
ASSIGN_RESULT_OR_RETURN_ERROR(cur, (readNumberWithVariableLength(cur, end, false, false, false, repetitions, std::max(repetitions, 2uz), fragment, hour)))
|
||||||
|
if (hour < 0 || hour > 23)
|
||||||
|
RETURN_ERROR(
|
||||||
|
ErrorCodes::CANNOT_PARSE_DATETIME,
|
||||||
|
"Unable to parse fragment {} from {} because the hour of datetime not in range [0, 23]: {}",
|
||||||
|
fragment,
|
||||||
|
std::string_view(cur, end - cur),
|
||||||
|
std::string_view(cur, 1))
|
||||||
Int32 minute;
|
Int32 minute;
|
||||||
ASSIGN_RESULT_OR_RETURN_ERROR(cur, (readNumberWithVariableLength(cur, end, false, false, false, repetitions, std::max(repetitions, 2uz), fragment, minute)))
|
ASSIGN_RESULT_OR_RETURN_ERROR(cur, (readNumberWithVariableLength(cur, end, false, false, false, repetitions, std::max(repetitions, 2uz), fragment, minute)))
|
||||||
|
if (minute < 0 || minute > 59)
|
||||||
|
RETURN_ERROR(
|
||||||
|
ErrorCodes::CANNOT_PARSE_DATETIME,
|
||||||
|
"Unable to parse fragment {} from {} because the minute of datetime not in range [0, 59]: {}",
|
||||||
|
fragment,
|
||||||
|
std::string_view(cur, end - cur),
|
||||||
|
std::string_view(cur, 1))
|
||||||
date.has_time_zone_offset = true;
|
date.has_time_zone_offset = true;
|
||||||
date.time_zone_offset = sign * (hour * 3600 + minute * 60);
|
date.time_zone_offset = sign * (hour * 3600 + minute * 60);
|
||||||
return cur;
|
return cur;
|
||||||
@ -2133,10 +2220,10 @@ namespace
|
|||||||
instructions.emplace_back(ACTION_ARGS_WITH_BIND(Instruction::jodaSecondOfMinute, repetitions));
|
instructions.emplace_back(ACTION_ARGS_WITH_BIND(Instruction::jodaSecondOfMinute, repetitions));
|
||||||
break;
|
break;
|
||||||
case 'S':
|
case 'S':
|
||||||
instructions.emplace_back(ACTION_ARGS_WITH_BIND(Instruction::jodaMicroSecondOfSecond, repetitions));
|
instructions.emplace_back(ACTION_ARGS_WITH_BIND(Instruction::jodaMicrosecondOfSecond, repetitions));
|
||||||
break;
|
break;
|
||||||
case 'z':
|
case 'z':
|
||||||
instructions.emplace_back(ACTION_ARGS_WITH_BIND(Instruction::jodaTimezoneId, repetitions));
|
instructions.emplace_back(ACTION_ARGS_WITH_BIND(Instruction::jodaTimezone, repetitions));
|
||||||
break;
|
break;
|
||||||
case 'Z':
|
case 'Z':
|
||||||
instructions.emplace_back(ACTION_ARGS_WITH_BIND(Instruction::jodaTimezoneOffset, repetitions));
|
instructions.emplace_back(ACTION_ARGS_WITH_BIND(Instruction::jodaTimezoneOffset, repetitions));
|
||||||
@ -2161,21 +2248,22 @@ namespace
|
|||||||
if (arguments.size() == 1)
|
if (arguments.size() == 1)
|
||||||
{
|
{
|
||||||
if constexpr (parse_syntax == ParseSyntax::MySQL)
|
if constexpr (parse_syntax == ParseSyntax::MySQL)
|
||||||
|
{
|
||||||
|
if constexpr (return_type == ReturnType::DateTime)
|
||||||
return "%Y-%m-%d %H:%i:%s";
|
return "%Y-%m-%d %H:%i:%s";
|
||||||
|
else
|
||||||
|
return "%Y-%m-%d %H:%i:%s.%f";
|
||||||
|
}
|
||||||
else
|
else
|
||||||
return "yyyy-MM-dd HH:mm:ss";
|
return "yyyy-MM-dd HH:mm:ss";
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (!arguments[1].column || !isColumnConst(*arguments[1].column))
|
|
||||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Argument at index {} for function {} must be constant", 1, getName());
|
|
||||||
|
|
||||||
const auto * col_format = checkAndGetColumnConst<ColumnString>(arguments[1].column.get());
|
const auto * col_format = checkAndGetColumnConst<ColumnString>(arguments[1].column.get());
|
||||||
if (!col_format)
|
if (!col_format)
|
||||||
throw Exception(
|
throw Exception(
|
||||||
ErrorCodes::ILLEGAL_COLUMN,
|
ErrorCodes::ILLEGAL_COLUMN,
|
||||||
"Illegal column {} of second ('format') argument of function {}. Must be constant string.",
|
"Illegal type in 'format' argument of function {}. Must be constant String.",
|
||||||
arguments[1].column->getName(),
|
|
||||||
getName());
|
getName());
|
||||||
return col_format->getValue<String>();
|
return col_format->getValue<String>();
|
||||||
}
|
}
|
||||||
@ -2190,8 +2278,7 @@ namespace
|
|||||||
if (!col)
|
if (!col)
|
||||||
throw Exception(
|
throw Exception(
|
||||||
ErrorCodes::ILLEGAL_COLUMN,
|
ErrorCodes::ILLEGAL_COLUMN,
|
||||||
"Illegal column {} of third ('timezone') argument of function {}. Must be constant String.",
|
"Illegal type in 'timezone' argument of function {}. Must be constant String.",
|
||||||
arguments[2].column->getName(),
|
|
||||||
getName());
|
getName());
|
||||||
|
|
||||||
String time_zone = col->getValue<String>();
|
String time_zone = col->getValue<String>();
|
||||||
@ -2229,6 +2316,21 @@ namespace
|
|||||||
static constexpr auto name = "parseDateTimeInJodaSyntaxOrNull";
|
static constexpr auto name = "parseDateTimeInJodaSyntaxOrNull";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct NameParseDateTime64
|
||||||
|
{
|
||||||
|
static constexpr auto name = "parseDateTime64";
|
||||||
|
};
|
||||||
|
|
||||||
|
struct NameParseDateTime64OrZero
|
||||||
|
{
|
||||||
|
static constexpr auto name = "parseDateTime64OrZero";
|
||||||
|
};
|
||||||
|
|
||||||
|
struct NameParseDateTime64OrNull
|
||||||
|
{
|
||||||
|
static constexpr auto name = "parseDateTime64OrNull";
|
||||||
|
};
|
||||||
|
|
||||||
struct NameParseDateTime64InJodaSyntax
|
struct NameParseDateTime64InJodaSyntax
|
||||||
{
|
{
|
||||||
static constexpr auto name = "parseDateTime64InJodaSyntax";
|
static constexpr auto name = "parseDateTime64InJodaSyntax";
|
||||||
@ -2244,15 +2346,18 @@ namespace
|
|||||||
static constexpr auto name = "parseDateTime64InJodaSyntaxOrNull";
|
static constexpr auto name = "parseDateTime64InJodaSyntaxOrNull";
|
||||||
};
|
};
|
||||||
|
|
||||||
using FunctionParseDateTime = FunctionParseDateTimeImpl<NameParseDateTime, ParseSyntax::MySQL, ErrorHandling::Exception>;
|
using FunctionParseDateTime = FunctionParseDateTimeImpl<NameParseDateTime, ParseSyntax::MySQL, ReturnType::DateTime, ErrorHandling::Exception>;
|
||||||
using FunctionParseDateTimeOrZero = FunctionParseDateTimeImpl<NameParseDateTimeOrZero, ParseSyntax::MySQL, ErrorHandling::Zero>;
|
using FunctionParseDateTimeOrZero = FunctionParseDateTimeImpl<NameParseDateTimeOrZero, ParseSyntax::MySQL, ReturnType::DateTime, ErrorHandling::Zero>;
|
||||||
using FunctionParseDateTimeOrNull = FunctionParseDateTimeImpl<NameParseDateTimeOrNull, ParseSyntax::MySQL, ErrorHandling::Null>;
|
using FunctionParseDateTimeOrNull = FunctionParseDateTimeImpl<NameParseDateTimeOrNull, ParseSyntax::MySQL, ReturnType::DateTime, ErrorHandling::Null>;
|
||||||
using FunctionParseDateTimeInJodaSyntax = FunctionParseDateTimeImpl<NameParseDateTimeInJodaSyntax, ParseSyntax::Joda, ErrorHandling::Exception>;
|
using FunctionParseDateTime64 = FunctionParseDateTimeImpl<NameParseDateTime64, ParseSyntax::MySQL, ReturnType::DateTime64, ErrorHandling::Exception>;
|
||||||
using FunctionParseDateTimeInJodaSyntaxOrZero = FunctionParseDateTimeImpl<NameParseDateTimeInJodaSyntaxOrZero, ParseSyntax::Joda, ErrorHandling::Zero>;
|
using FunctionParseDateTime64OrZero = FunctionParseDateTimeImpl<NameParseDateTime64OrZero, ParseSyntax::MySQL, ReturnType::DateTime64, ErrorHandling::Zero>;
|
||||||
using FunctionParseDateTimeInJodaSyntaxOrNull = FunctionParseDateTimeImpl<NameParseDateTimeInJodaSyntaxOrNull, ParseSyntax::Joda, ErrorHandling::Null>;
|
using FunctionParseDateTime64OrNull = FunctionParseDateTimeImpl<NameParseDateTime64OrNull, ParseSyntax::MySQL, ReturnType::DateTime64, ErrorHandling::Null>;
|
||||||
using FunctionParseDateTime64InJodaSyntax = FunctionParseDateTimeImpl<NameParseDateTime64InJodaSyntax, ParseSyntax::Joda, ErrorHandling::Exception, true>;
|
using FunctionParseDateTimeInJodaSyntax = FunctionParseDateTimeImpl<NameParseDateTimeInJodaSyntax, ParseSyntax::Joda, ReturnType::DateTime, ErrorHandling::Exception>;
|
||||||
using FunctionParseDateTime64InJodaSyntaxOrZero = FunctionParseDateTimeImpl<NameParseDateTime64InJodaSyntaxOrZero, ParseSyntax::Joda, ErrorHandling::Zero, true>;
|
using FunctionParseDateTimeInJodaSyntaxOrZero = FunctionParseDateTimeImpl<NameParseDateTimeInJodaSyntaxOrZero, ParseSyntax::Joda, ReturnType::DateTime, ErrorHandling::Zero>;
|
||||||
using FunctionParseDateTime64InJodaSyntaxOrNull = FunctionParseDateTimeImpl<NameParseDateTime64InJodaSyntaxOrNull, ParseSyntax::Joda, ErrorHandling::Null, true>;
|
using FunctionParseDateTimeInJodaSyntaxOrNull = FunctionParseDateTimeImpl<NameParseDateTimeInJodaSyntaxOrNull, ParseSyntax::Joda, ReturnType::DateTime, ErrorHandling::Null>;
|
||||||
|
using FunctionParseDateTime64InJodaSyntax = FunctionParseDateTimeImpl<NameParseDateTime64InJodaSyntax, ParseSyntax::Joda, ReturnType::DateTime64, ErrorHandling::Exception>;
|
||||||
|
using FunctionParseDateTime64InJodaSyntaxOrZero = FunctionParseDateTimeImpl<NameParseDateTime64InJodaSyntaxOrZero, ParseSyntax::Joda, ReturnType::DateTime64, ErrorHandling::Zero>;
|
||||||
|
using FunctionParseDateTime64InJodaSyntaxOrNull = FunctionParseDateTimeImpl<NameParseDateTime64InJodaSyntaxOrNull, ParseSyntax::Joda, ReturnType::DateTime64, ErrorHandling::Null>;
|
||||||
}
|
}
|
||||||
|
|
||||||
REGISTER_FUNCTION(ParseDateTime)
|
REGISTER_FUNCTION(ParseDateTime)
|
||||||
@ -2262,13 +2367,16 @@ REGISTER_FUNCTION(ParseDateTime)
|
|||||||
factory.registerFunction<FunctionParseDateTimeOrZero>();
|
factory.registerFunction<FunctionParseDateTimeOrZero>();
|
||||||
factory.registerFunction<FunctionParseDateTimeOrNull>();
|
factory.registerFunction<FunctionParseDateTimeOrNull>();
|
||||||
factory.registerAlias("str_to_date", FunctionParseDateTimeOrNull::name, FunctionFactory::Case::Insensitive);
|
factory.registerAlias("str_to_date", FunctionParseDateTimeOrNull::name, FunctionFactory::Case::Insensitive);
|
||||||
|
|
||||||
factory.registerFunction<FunctionParseDateTimeInJodaSyntax>();
|
factory.registerFunction<FunctionParseDateTimeInJodaSyntax>();
|
||||||
factory.registerFunction<FunctionParseDateTimeInJodaSyntaxOrZero>();
|
factory.registerFunction<FunctionParseDateTimeInJodaSyntaxOrZero>();
|
||||||
factory.registerFunction<FunctionParseDateTimeInJodaSyntaxOrNull>();
|
factory.registerFunction<FunctionParseDateTimeInJodaSyntaxOrNull>();
|
||||||
|
|
||||||
factory.registerFunction<FunctionParseDateTime64InJodaSyntax>();
|
factory.registerFunction<FunctionParseDateTime64InJodaSyntax>();
|
||||||
factory.registerFunction<FunctionParseDateTime64InJodaSyntaxOrZero>();
|
factory.registerFunction<FunctionParseDateTime64InJodaSyntaxOrZero>();
|
||||||
factory.registerFunction<FunctionParseDateTime64InJodaSyntaxOrNull>();
|
factory.registerFunction<FunctionParseDateTime64InJodaSyntaxOrNull>();
|
||||||
|
factory.registerFunction<FunctionParseDateTime64>();
|
||||||
|
factory.registerFunction<FunctionParseDateTime64OrZero>();
|
||||||
|
factory.registerFunction<FunctionParseDateTime64OrNull>();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -335,7 +335,7 @@ Aggregator::Aggregator(const Block & header_, const Params & params_)
|
|||||||
: header(header_)
|
: header(header_)
|
||||||
, keys_positions(calculateKeysPositions(header, params_))
|
, keys_positions(calculateKeysPositions(header, params_))
|
||||||
, params(params_)
|
, params(params_)
|
||||||
, tmp_data(params.tmp_data_scope ? std::make_unique<TemporaryDataOnDisk>(params.tmp_data_scope, CurrentMetrics::TemporaryFilesForAggregation) : nullptr)
|
, tmp_data(params.tmp_data_scope ? params.tmp_data_scope->childScope(CurrentMetrics::TemporaryFilesForAggregation) : nullptr)
|
||||||
, min_bytes_for_prefetch(getMinBytesForPrefetch())
|
, min_bytes_for_prefetch(getMinBytesForPrefetch())
|
||||||
{
|
{
|
||||||
/// Use query-level memory tracker
|
/// Use query-level memory tracker
|
||||||
@ -1519,10 +1519,15 @@ void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants, si
|
|||||||
Stopwatch watch;
|
Stopwatch watch;
|
||||||
size_t rows = data_variants.size();
|
size_t rows = data_variants.size();
|
||||||
|
|
||||||
auto & out_stream = tmp_data->createStream(getHeader(false), max_temp_file_size);
|
auto & out_stream = [this, max_temp_file_size]() -> TemporaryBlockStreamHolder &
|
||||||
|
{
|
||||||
|
std::lock_guard lk(tmp_files_mutex);
|
||||||
|
return tmp_files.emplace_back(getHeader(false), tmp_data.get(), max_temp_file_size);
|
||||||
|
}();
|
||||||
|
|
||||||
ProfileEvents::increment(ProfileEvents::ExternalAggregationWritePart);
|
ProfileEvents::increment(ProfileEvents::ExternalAggregationWritePart);
|
||||||
|
|
||||||
LOG_DEBUG(log, "Writing part of aggregation data into temporary file {}", out_stream.getPath());
|
LOG_DEBUG(log, "Writing part of aggregation data into temporary file {}", out_stream.getHolder()->describeFilePath());
|
||||||
|
|
||||||
/// Flush only two-level data and possibly overflow data.
|
/// Flush only two-level data and possibly overflow data.
|
||||||
|
|
||||||
@ -1639,11 +1644,24 @@ Block Aggregator::convertOneBucketToBlock(AggregatedDataVariants & variants, Are
|
|||||||
return block;
|
return block;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::list<TemporaryBlockStreamHolder> Aggregator::detachTemporaryData()
|
||||||
|
{
|
||||||
|
std::lock_guard lk(tmp_files_mutex);
|
||||||
|
return std::move(tmp_files);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Aggregator::hasTemporaryData() const
|
||||||
|
{
|
||||||
|
std::lock_guard lk(tmp_files_mutex);
|
||||||
|
return !tmp_files.empty();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
template <typename Method>
|
template <typename Method>
|
||||||
void Aggregator::writeToTemporaryFileImpl(
|
void Aggregator::writeToTemporaryFileImpl(
|
||||||
AggregatedDataVariants & data_variants,
|
AggregatedDataVariants & data_variants,
|
||||||
Method & method,
|
Method & method,
|
||||||
TemporaryFileStream & out) const
|
TemporaryBlockStreamHolder & out) const
|
||||||
{
|
{
|
||||||
size_t max_temporary_block_size_rows = 0;
|
size_t max_temporary_block_size_rows = 0;
|
||||||
size_t max_temporary_block_size_bytes = 0;
|
size_t max_temporary_block_size_bytes = 0;
|
||||||
@ -1660,14 +1678,14 @@ void Aggregator::writeToTemporaryFileImpl(
|
|||||||
for (UInt32 bucket = 0; bucket < Method::Data::NUM_BUCKETS; ++bucket)
|
for (UInt32 bucket = 0; bucket < Method::Data::NUM_BUCKETS; ++bucket)
|
||||||
{
|
{
|
||||||
Block block = convertOneBucketToBlock(data_variants, method, data_variants.aggregates_pool, false, bucket);
|
Block block = convertOneBucketToBlock(data_variants, method, data_variants.aggregates_pool, false, bucket);
|
||||||
out.write(block);
|
out->write(block);
|
||||||
update_max_sizes(block);
|
update_max_sizes(block);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (params.overflow_row)
|
if (params.overflow_row)
|
||||||
{
|
{
|
||||||
Block block = prepareBlockAndFillWithoutKey(data_variants, false, true);
|
Block block = prepareBlockAndFillWithoutKey(data_variants, false, true);
|
||||||
out.write(block);
|
out->write(block);
|
||||||
update_max_sizes(block);
|
update_max_sizes(block);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -309,9 +309,9 @@ public:
|
|||||||
/// For external aggregation.
|
/// For external aggregation.
|
||||||
void writeToTemporaryFile(AggregatedDataVariants & data_variants, size_t max_temp_file_size = 0) const;
|
void writeToTemporaryFile(AggregatedDataVariants & data_variants, size_t max_temp_file_size = 0) const;
|
||||||
|
|
||||||
bool hasTemporaryData() const { return tmp_data && !tmp_data->empty(); }
|
bool hasTemporaryData() const;
|
||||||
|
|
||||||
const TemporaryDataOnDisk & getTemporaryData() const { return *tmp_data; }
|
std::list<TemporaryBlockStreamHolder> detachTemporaryData();
|
||||||
|
|
||||||
/// Get data structure of the result.
|
/// Get data structure of the result.
|
||||||
Block getHeader(bool final) const;
|
Block getHeader(bool final) const;
|
||||||
@ -355,7 +355,9 @@ private:
|
|||||||
LoggerPtr log = getLogger("Aggregator");
|
LoggerPtr log = getLogger("Aggregator");
|
||||||
|
|
||||||
/// For external aggregation.
|
/// For external aggregation.
|
||||||
TemporaryDataOnDiskPtr tmp_data;
|
TemporaryDataOnDiskScopePtr tmp_data;
|
||||||
|
mutable std::mutex tmp_files_mutex;
|
||||||
|
mutable std::list<TemporaryBlockStreamHolder> tmp_files TSA_GUARDED_BY(tmp_files_mutex);
|
||||||
|
|
||||||
size_t min_bytes_for_prefetch = 0;
|
size_t min_bytes_for_prefetch = 0;
|
||||||
|
|
||||||
@ -456,7 +458,7 @@ private:
|
|||||||
void writeToTemporaryFileImpl(
|
void writeToTemporaryFileImpl(
|
||||||
AggregatedDataVariants & data_variants,
|
AggregatedDataVariants & data_variants,
|
||||||
Method & method,
|
Method & method,
|
||||||
TemporaryFileStream & out) const;
|
TemporaryBlockStreamHolder & out) const;
|
||||||
|
|
||||||
/// Merge NULL key data from hash table `src` into `dst`.
|
/// Merge NULL key data from hash table `src` into `dst`.
|
||||||
template <typename Method, typename Table>
|
template <typename Method, typename Table>
|
||||||
|
@ -364,6 +364,8 @@ struct ContextSharedPart : boost::noncopyable
|
|||||||
/// Child scopes for more fine-grained accounting are created per user/query/etc.
|
/// Child scopes for more fine-grained accounting are created per user/query/etc.
|
||||||
/// Initialized once during server startup.
|
/// Initialized once during server startup.
|
||||||
TemporaryDataOnDiskScopePtr root_temp_data_on_disk TSA_GUARDED_BY(mutex);
|
TemporaryDataOnDiskScopePtr root_temp_data_on_disk TSA_GUARDED_BY(mutex);
|
||||||
|
/// TODO: remove, use only root_temp_data_on_disk
|
||||||
|
VolumePtr temporary_volume_legacy;
|
||||||
|
|
||||||
mutable OnceFlag async_loader_initialized;
|
mutable OnceFlag async_loader_initialized;
|
||||||
mutable std::unique_ptr<AsyncLoader> async_loader; /// Thread pool for asynchronous initialization of arbitrary DAG of `LoadJob`s (used for tables loading)
|
mutable std::unique_ptr<AsyncLoader> async_loader; /// Thread pool for asynchronous initialization of arbitrary DAG of `LoadJob`s (used for tables loading)
|
||||||
@ -799,10 +801,9 @@ struct ContextSharedPart : boost::noncopyable
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Special volumes might also use disks that require shutdown.
|
/// Special volumes might also use disks that require shutdown.
|
||||||
auto & tmp_data = root_temp_data_on_disk;
|
if (temporary_volume_legacy)
|
||||||
if (tmp_data && tmp_data->getVolume())
|
|
||||||
{
|
{
|
||||||
auto & disks = tmp_data->getVolume()->getDisks();
|
auto & disks = temporary_volume_legacy->getDisks();
|
||||||
for (auto & disk : disks)
|
for (auto & disk : disks)
|
||||||
disk->shutdown();
|
disk->shutdown();
|
||||||
}
|
}
|
||||||
@ -1184,8 +1185,8 @@ VolumePtr Context::getGlobalTemporaryVolume() const
|
|||||||
SharedLockGuard lock(shared->mutex);
|
SharedLockGuard lock(shared->mutex);
|
||||||
/// Calling this method we just bypass the `temp_data_on_disk` and write to the file on the volume directly.
|
/// Calling this method we just bypass the `temp_data_on_disk` and write to the file on the volume directly.
|
||||||
/// Volume is the same for `root_temp_data_on_disk` (always set) and `temp_data_on_disk` (if it's set).
|
/// Volume is the same for `root_temp_data_on_disk` (always set) and `temp_data_on_disk` (if it's set).
|
||||||
if (shared->root_temp_data_on_disk)
|
if (shared->temporary_volume_legacy)
|
||||||
return shared->root_temp_data_on_disk->getVolume();
|
return shared->temporary_volume_legacy;
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1273,6 +1274,10 @@ try
|
|||||||
/// We skip directories (for example, 'http_buffers' - it's used for buffering of the results) and all other file types.
|
/// We skip directories (for example, 'http_buffers' - it's used for buffering of the results) and all other file types.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
fs::create_directories(path);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
@ -1306,7 +1311,8 @@ void Context::setTemporaryStoragePath(const String & path, size_t max_size)
|
|||||||
|
|
||||||
TemporaryDataOnDiskSettings temporary_data_on_disk_settings;
|
TemporaryDataOnDiskSettings temporary_data_on_disk_settings;
|
||||||
temporary_data_on_disk_settings.max_size_on_disk = max_size;
|
temporary_data_on_disk_settings.max_size_on_disk = max_size;
|
||||||
shared->root_temp_data_on_disk = std::make_shared<TemporaryDataOnDiskScope>(std::move(volume), std::move(temporary_data_on_disk_settings));
|
shared->root_temp_data_on_disk = std::make_shared<TemporaryDataOnDiskScope>(volume, std::move(temporary_data_on_disk_settings));
|
||||||
|
shared->temporary_volume_legacy = volume;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Context::setTemporaryStoragePolicy(const String & policy_name, size_t max_size)
|
void Context::setTemporaryStoragePolicy(const String & policy_name, size_t max_size)
|
||||||
@ -1354,7 +1360,8 @@ void Context::setTemporaryStoragePolicy(const String & policy_name, size_t max_s
|
|||||||
|
|
||||||
TemporaryDataOnDiskSettings temporary_data_on_disk_settings;
|
TemporaryDataOnDiskSettings temporary_data_on_disk_settings;
|
||||||
temporary_data_on_disk_settings.max_size_on_disk = max_size;
|
temporary_data_on_disk_settings.max_size_on_disk = max_size;
|
||||||
shared->root_temp_data_on_disk = std::make_shared<TemporaryDataOnDiskScope>(std::move(volume), std::move(temporary_data_on_disk_settings));
|
shared->root_temp_data_on_disk = std::make_shared<TemporaryDataOnDiskScope>(volume, std::move(temporary_data_on_disk_settings));
|
||||||
|
shared->temporary_volume_legacy = volume;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Context::setTemporaryStorageInCache(const String & cache_disk_name, size_t max_size)
|
void Context::setTemporaryStorageInCache(const String & cache_disk_name, size_t max_size)
|
||||||
@ -1378,7 +1385,8 @@ void Context::setTemporaryStorageInCache(const String & cache_disk_name, size_t
|
|||||||
|
|
||||||
TemporaryDataOnDiskSettings temporary_data_on_disk_settings;
|
TemporaryDataOnDiskSettings temporary_data_on_disk_settings;
|
||||||
temporary_data_on_disk_settings.max_size_on_disk = max_size;
|
temporary_data_on_disk_settings.max_size_on_disk = max_size;
|
||||||
shared->root_temp_data_on_disk = std::make_shared<TemporaryDataOnDiskScope>(std::move(volume), file_cache.get(), std::move(temporary_data_on_disk_settings));
|
shared->root_temp_data_on_disk = std::make_shared<TemporaryDataOnDiskScope>(file_cache.get(), std::move(temporary_data_on_disk_settings));
|
||||||
|
shared->temporary_volume_legacy = volume;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Context::setFlagsPath(const String & path)
|
void Context::setFlagsPath(const String & path)
|
||||||
|
@ -41,15 +41,15 @@ namespace
|
|||||||
class AccumulatedBlockReader
|
class AccumulatedBlockReader
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
AccumulatedBlockReader(TemporaryFileStream & reader_,
|
AccumulatedBlockReader(TemporaryBlockStreamReaderHolder reader_,
|
||||||
std::mutex & mutex_,
|
std::mutex & mutex_,
|
||||||
size_t result_block_size_ = 0)
|
size_t result_block_size_ = 0)
|
||||||
: reader(reader_)
|
: reader(std::move(reader_))
|
||||||
, mutex(mutex_)
|
, mutex(mutex_)
|
||||||
, result_block_size(result_block_size_)
|
, result_block_size(result_block_size_)
|
||||||
{
|
{
|
||||||
if (!reader.isWriteFinished())
|
if (!reader)
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Reading not finished file");
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Reader is nullptr");
|
||||||
}
|
}
|
||||||
|
|
||||||
Block read()
|
Block read()
|
||||||
@ -63,7 +63,7 @@ namespace
|
|||||||
size_t rows_read = 0;
|
size_t rows_read = 0;
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
Block block = reader.read();
|
Block block = reader->read();
|
||||||
rows_read += block.rows();
|
rows_read += block.rows();
|
||||||
if (!block)
|
if (!block)
|
||||||
{
|
{
|
||||||
@ -81,7 +81,7 @@ namespace
|
|||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
TemporaryFileStream & reader;
|
TemporaryBlockStreamReaderHolder reader;
|
||||||
std::mutex & mutex;
|
std::mutex & mutex;
|
||||||
|
|
||||||
const size_t result_block_size;
|
const size_t result_block_size;
|
||||||
@ -124,12 +124,12 @@ class GraceHashJoin::FileBucket : boost::noncopyable
|
|||||||
public:
|
public:
|
||||||
using BucketLock = std::unique_lock<std::mutex>;
|
using BucketLock = std::unique_lock<std::mutex>;
|
||||||
|
|
||||||
explicit FileBucket(size_t bucket_index_, TemporaryFileStream & left_file_, TemporaryFileStream & right_file_, LoggerPtr log_)
|
explicit FileBucket(size_t bucket_index_, TemporaryBlockStreamHolder left_file_, TemporaryBlockStreamHolder right_file_, LoggerPtr log_)
|
||||||
: idx{bucket_index_}
|
: idx(bucket_index_)
|
||||||
, left_file{left_file_}
|
, left_file(std::move(left_file_))
|
||||||
, right_file{right_file_}
|
, right_file(std::move(right_file_))
|
||||||
, state{State::WRITING_BLOCKS}
|
, state(State::WRITING_BLOCKS)
|
||||||
, log{log_}
|
, log(log_)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -157,12 +157,6 @@ public:
|
|||||||
return addBlockImpl(block, right_file, lock);
|
return addBlockImpl(block, right_file, lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool finished() const
|
|
||||||
{
|
|
||||||
std::unique_lock<std::mutex> left_lock(left_file_mutex);
|
|
||||||
return left_file.isEof();
|
|
||||||
}
|
|
||||||
|
|
||||||
bool empty() const { return is_empty.load(); }
|
bool empty() const { return is_empty.load(); }
|
||||||
|
|
||||||
AccumulatedBlockReader startJoining()
|
AccumulatedBlockReader startJoining()
|
||||||
@ -172,24 +166,21 @@ public:
|
|||||||
std::unique_lock<std::mutex> left_lock(left_file_mutex);
|
std::unique_lock<std::mutex> left_lock(left_file_mutex);
|
||||||
std::unique_lock<std::mutex> right_lock(right_file_mutex);
|
std::unique_lock<std::mutex> right_lock(right_file_mutex);
|
||||||
|
|
||||||
left_file.finishWriting();
|
|
||||||
right_file.finishWriting();
|
|
||||||
|
|
||||||
state = State::JOINING_BLOCKS;
|
state = State::JOINING_BLOCKS;
|
||||||
}
|
}
|
||||||
return AccumulatedBlockReader(right_file, right_file_mutex);
|
return AccumulatedBlockReader(right_file.getReadStream(), right_file_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
AccumulatedBlockReader getLeftTableReader()
|
AccumulatedBlockReader getLeftTableReader()
|
||||||
{
|
{
|
||||||
ensureState(State::JOINING_BLOCKS);
|
ensureState(State::JOINING_BLOCKS);
|
||||||
return AccumulatedBlockReader(left_file, left_file_mutex);
|
return AccumulatedBlockReader(left_file.getReadStream(), left_file_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
const size_t idx;
|
const size_t idx;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
bool addBlockImpl(const Block & block, TemporaryFileStream & writer, std::unique_lock<std::mutex> & lock)
|
bool addBlockImpl(const Block & block, TemporaryBlockStreamHolder & writer, std::unique_lock<std::mutex> & lock)
|
||||||
{
|
{
|
||||||
ensureState(State::WRITING_BLOCKS);
|
ensureState(State::WRITING_BLOCKS);
|
||||||
|
|
||||||
@ -199,7 +190,7 @@ private:
|
|||||||
if (block.rows())
|
if (block.rows())
|
||||||
is_empty = false;
|
is_empty = false;
|
||||||
|
|
||||||
writer.write(block);
|
writer->write(block);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -217,8 +208,8 @@ private:
|
|||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Invalid state transition, expected {}, got {}", expected, state.load());
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Invalid state transition, expected {}, got {}", expected, state.load());
|
||||||
}
|
}
|
||||||
|
|
||||||
TemporaryFileStream & left_file;
|
TemporaryBlockStreamHolder left_file;
|
||||||
TemporaryFileStream & right_file;
|
TemporaryBlockStreamHolder right_file;
|
||||||
mutable std::mutex left_file_mutex;
|
mutable std::mutex left_file_mutex;
|
||||||
mutable std::mutex right_file_mutex;
|
mutable std::mutex right_file_mutex;
|
||||||
|
|
||||||
@ -274,7 +265,7 @@ GraceHashJoin::GraceHashJoin(
|
|||||||
, max_num_buckets{context->getSettingsRef()[Setting::grace_hash_join_max_buckets]}
|
, max_num_buckets{context->getSettingsRef()[Setting::grace_hash_join_max_buckets]}
|
||||||
, left_key_names(table_join->getOnlyClause().key_names_left)
|
, left_key_names(table_join->getOnlyClause().key_names_left)
|
||||||
, right_key_names(table_join->getOnlyClause().key_names_right)
|
, right_key_names(table_join->getOnlyClause().key_names_right)
|
||||||
, tmp_data(std::make_unique<TemporaryDataOnDisk>(tmp_data_, CurrentMetrics::TemporaryFilesForJoin))
|
, tmp_data(tmp_data_->childScope(CurrentMetrics::TemporaryFilesForJoin))
|
||||||
, hash_join(makeInMemoryJoin("grace0"))
|
, hash_join(makeInMemoryJoin("grace0"))
|
||||||
, hash_join_sample_block(hash_join->savedBlockSample())
|
, hash_join_sample_block(hash_join->savedBlockSample())
|
||||||
{
|
{
|
||||||
@ -398,10 +389,10 @@ void GraceHashJoin::addBuckets(const size_t bucket_count)
|
|||||||
for (size_t i = 0; i < bucket_count; ++i)
|
for (size_t i = 0; i < bucket_count; ++i)
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
auto & left_file = tmp_data->createStream(left_sample_block);
|
TemporaryBlockStreamHolder left_file(left_sample_block, tmp_data.get());
|
||||||
auto & right_file = tmp_data->createStream(prepareRightBlock(right_sample_block));
|
TemporaryBlockStreamHolder right_file(prepareRightBlock(right_sample_block), tmp_data.get());
|
||||||
|
|
||||||
BucketPtr new_bucket = std::make_shared<FileBucket>(current_size + i, left_file, right_file, log);
|
BucketPtr new_bucket = std::make_shared<FileBucket>(current_size + i, std::move(left_file), std::move(right_file), log);
|
||||||
tmp_buckets.emplace_back(std::move(new_bucket));
|
tmp_buckets.emplace_back(std::move(new_bucket));
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
@ -632,12 +623,9 @@ IBlocksStreamPtr GraceHashJoin::getDelayedBlocks()
|
|||||||
for (bucket_idx = bucket_idx + 1; bucket_idx < buckets.size(); ++bucket_idx)
|
for (bucket_idx = bucket_idx + 1; bucket_idx < buckets.size(); ++bucket_idx)
|
||||||
{
|
{
|
||||||
current_bucket = buckets[bucket_idx].get();
|
current_bucket = buckets[bucket_idx].get();
|
||||||
if (current_bucket->finished() || current_bucket->empty())
|
if (current_bucket->empty())
|
||||||
{
|
{
|
||||||
LOG_TRACE(log, "Skipping {} {} bucket {}",
|
LOG_TRACE(log, "Skipping empty bucket {}", bucket_idx);
|
||||||
current_bucket->finished() ? "finished" : "",
|
|
||||||
current_bucket->empty() ? "empty" : "",
|
|
||||||
bucket_idx);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -132,7 +132,7 @@ private:
|
|||||||
Names left_key_names;
|
Names left_key_names;
|
||||||
Names right_key_names;
|
Names right_key_names;
|
||||||
|
|
||||||
TemporaryDataOnDiskPtr tmp_data;
|
TemporaryDataOnDiskScopePtr tmp_data;
|
||||||
|
|
||||||
Buckets buckets;
|
Buckets buckets;
|
||||||
mutable SharedMutex rehash_mutex;
|
mutable SharedMutex rehash_mutex;
|
||||||
|
@ -35,11 +35,6 @@
|
|||||||
#include <Interpreters/HashJoin/HashJoinMethods.h>
|
#include <Interpreters/HashJoin/HashJoinMethods.h>
|
||||||
#include <Interpreters/HashJoin/JoinUsedFlags.h>
|
#include <Interpreters/HashJoin/JoinUsedFlags.h>
|
||||||
|
|
||||||
namespace CurrentMetrics
|
|
||||||
{
|
|
||||||
extern const Metric TemporaryFilesForJoin;
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -64,7 +59,7 @@ struct NotProcessedCrossJoin : public ExtraBlock
|
|||||||
{
|
{
|
||||||
size_t left_position;
|
size_t left_position;
|
||||||
size_t right_block;
|
size_t right_block;
|
||||||
std::unique_ptr<TemporaryFileStream::Reader> reader;
|
std::optional<TemporaryBlockStreamReaderHolder> reader;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -106,10 +101,7 @@ HashJoin::HashJoin(std::shared_ptr<TableJoin> table_join_, const Block & right_s
|
|||||||
, instance_id(instance_id_)
|
, instance_id(instance_id_)
|
||||||
, asof_inequality(table_join->getAsofInequality())
|
, asof_inequality(table_join->getAsofInequality())
|
||||||
, data(std::make_shared<RightTableData>())
|
, data(std::make_shared<RightTableData>())
|
||||||
, tmp_data(
|
, tmp_data(table_join_->getTempDataOnDisk())
|
||||||
table_join_->getTempDataOnDisk()
|
|
||||||
? std::make_unique<TemporaryDataOnDisk>(table_join_->getTempDataOnDisk(), CurrentMetrics::TemporaryFilesForJoin)
|
|
||||||
: nullptr)
|
|
||||||
, right_sample_block(right_sample_block_)
|
, right_sample_block(right_sample_block_)
|
||||||
, max_joined_block_rows(table_join->maxJoinedBlockRows())
|
, max_joined_block_rows(table_join->maxJoinedBlockRows())
|
||||||
, instance_log_id(!instance_id_.empty() ? "(" + instance_id_ + ") " : "")
|
, instance_log_id(!instance_id_.empty() ? "(" + instance_id_ + ") " : "")
|
||||||
@ -520,11 +512,10 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits)
|
|||||||
&& (tmp_stream || (max_bytes_in_join && getTotalByteCount() + block_to_save.allocatedBytes() >= max_bytes_in_join)
|
&& (tmp_stream || (max_bytes_in_join && getTotalByteCount() + block_to_save.allocatedBytes() >= max_bytes_in_join)
|
||||||
|| (max_rows_in_join && getTotalRowCount() + block_to_save.rows() >= max_rows_in_join)))
|
|| (max_rows_in_join && getTotalRowCount() + block_to_save.rows() >= max_rows_in_join)))
|
||||||
{
|
{
|
||||||
if (tmp_stream == nullptr)
|
if (!tmp_stream)
|
||||||
{
|
tmp_stream.emplace(right_sample_block, tmp_data.get());
|
||||||
tmp_stream = &tmp_data->createStream(right_sample_block);
|
|
||||||
}
|
tmp_stream.value()->write(block_to_save);
|
||||||
tmp_stream->write(block_to_save);
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -730,13 +721,14 @@ void HashJoin::joinBlockImplCross(Block & block, ExtraBlockPtr & not_processed)
|
|||||||
{
|
{
|
||||||
size_t start_left_row = 0;
|
size_t start_left_row = 0;
|
||||||
size_t start_right_block = 0;
|
size_t start_right_block = 0;
|
||||||
std::unique_ptr<TemporaryFileStream::Reader> reader = nullptr;
|
std::optional<TemporaryBlockStreamReaderHolder> reader;
|
||||||
if (not_processed)
|
if (not_processed)
|
||||||
{
|
{
|
||||||
auto & continuation = static_cast<NotProcessedCrossJoin &>(*not_processed);
|
auto & continuation = static_cast<NotProcessedCrossJoin &>(*not_processed);
|
||||||
start_left_row = continuation.left_position;
|
start_left_row = continuation.left_position;
|
||||||
start_right_block = continuation.right_block;
|
start_right_block = continuation.right_block;
|
||||||
reader = std::move(continuation.reader);
|
if (continuation.reader)
|
||||||
|
reader = std::move(*continuation.reader);
|
||||||
not_processed.reset();
|
not_processed.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -804,12 +796,10 @@ void HashJoin::joinBlockImplCross(Block & block, ExtraBlockPtr & not_processed)
|
|||||||
|
|
||||||
if (tmp_stream && rows_added <= max_joined_block_rows)
|
if (tmp_stream && rows_added <= max_joined_block_rows)
|
||||||
{
|
{
|
||||||
if (reader == nullptr)
|
if (!reader)
|
||||||
{
|
|
||||||
tmp_stream->finishWritingAsyncSafe();
|
|
||||||
reader = tmp_stream->getReadStream();
|
reader = tmp_stream->getReadStream();
|
||||||
}
|
|
||||||
while (auto block_right = reader->read())
|
while (auto block_right = reader.value()->read())
|
||||||
{
|
{
|
||||||
++block_number;
|
++block_number;
|
||||||
process_right_block(block_right);
|
process_right_block(block_right);
|
||||||
|
@ -423,8 +423,9 @@ private:
|
|||||||
std::vector<Sizes> key_sizes;
|
std::vector<Sizes> key_sizes;
|
||||||
|
|
||||||
/// Needed to do external cross join
|
/// Needed to do external cross join
|
||||||
TemporaryDataOnDiskPtr tmp_data;
|
TemporaryDataOnDiskScopePtr tmp_data;
|
||||||
TemporaryFileStream* tmp_stream{nullptr};
|
std::optional<TemporaryBlockStreamHolder> tmp_stream;
|
||||||
|
mutable std::once_flag finish_writing;
|
||||||
|
|
||||||
/// Block with columns from the right-side table.
|
/// Block with columns from the right-side table.
|
||||||
Block right_sample_block;
|
Block right_sample_block;
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
#include <Core/Names.h>
|
#include <Core/Names.h>
|
||||||
#include <Core/Block.h>
|
#include <Core/Block.h>
|
||||||
|
@ -48,6 +48,8 @@ ColumnsDescription ProcessorProfileLogElement::getColumnsDescription()
|
|||||||
{"input_bytes", std::make_shared<DataTypeUInt64>(), "The number of bytes consumed by processor."},
|
{"input_bytes", std::make_shared<DataTypeUInt64>(), "The number of bytes consumed by processor."},
|
||||||
{"output_rows", std::make_shared<DataTypeUInt64>(), "The number of rows generated by processor."},
|
{"output_rows", std::make_shared<DataTypeUInt64>(), "The number of rows generated by processor."},
|
||||||
{"output_bytes", std::make_shared<DataTypeUInt64>(), "The number of bytes generated by processor."},
|
{"output_bytes", std::make_shared<DataTypeUInt64>(), "The number of bytes generated by processor."},
|
||||||
|
{"processor_uniq_id", std::make_shared<DataTypeString>(), "The uniq processor id in pipeline."},
|
||||||
|
{"step_uniq_id", std::make_shared<DataTypeString>(), "The uniq step id in plan."},
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -83,6 +85,8 @@ void ProcessorProfileLogElement::appendToBlock(MutableColumns & columns) const
|
|||||||
columns[i++]->insert(input_bytes);
|
columns[i++]->insert(input_bytes);
|
||||||
columns[i++]->insert(output_rows);
|
columns[i++]->insert(output_rows);
|
||||||
columns[i++]->insert(output_bytes);
|
columns[i++]->insert(output_bytes);
|
||||||
|
columns[i++]->insert(processor_uniq_id);
|
||||||
|
columns[i++]->insert(step_uniq_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
void logProcessorProfile(ContextPtr context, const Processors & processors)
|
void logProcessorProfile(ContextPtr context, const Processors & processors)
|
||||||
@ -120,6 +124,8 @@ void logProcessorProfile(ContextPtr context, const Processors & processors)
|
|||||||
processor_elem.plan_step_name = processor->getPlanStepName();
|
processor_elem.plan_step_name = processor->getPlanStepName();
|
||||||
processor_elem.plan_step_description = processor->getPlanStepDescription();
|
processor_elem.plan_step_description = processor->getPlanStepDescription();
|
||||||
processor_elem.plan_group = processor->getQueryPlanStepGroup();
|
processor_elem.plan_group = processor->getQueryPlanStepGroup();
|
||||||
|
processor_elem.processor_uniq_id = processor->getUniqID();
|
||||||
|
processor_elem.step_uniq_id = processor->getStepUniqID();
|
||||||
|
|
||||||
processor_elem.processor_name = processor->getName();
|
processor_elem.processor_name = processor->getName();
|
||||||
|
|
||||||
|
@ -17,7 +17,7 @@ struct ProcessorProfileLogElement
|
|||||||
UInt64 id{};
|
UInt64 id{};
|
||||||
std::vector<UInt64> parent_ids;
|
std::vector<UInt64> parent_ids;
|
||||||
|
|
||||||
UInt64 plan_step{};
|
UInt64 plan_step;
|
||||||
UInt64 plan_group{};
|
UInt64 plan_group{};
|
||||||
String plan_step_name;
|
String plan_step_name;
|
||||||
String plan_step_description;
|
String plan_step_description;
|
||||||
@ -25,6 +25,8 @@ struct ProcessorProfileLogElement
|
|||||||
String initial_query_id;
|
String initial_query_id;
|
||||||
String query_id;
|
String query_id;
|
||||||
String processor_name;
|
String processor_name;
|
||||||
|
String processor_uniq_id;
|
||||||
|
String step_uniq_id;
|
||||||
|
|
||||||
/// Milliseconds spend in IProcessor::work()
|
/// Milliseconds spend in IProcessor::work()
|
||||||
UInt64 elapsed_us{};
|
UInt64 elapsed_us{};
|
||||||
|
@ -20,6 +20,11 @@
|
|||||||
#include <memory>
|
#include <memory>
|
||||||
#include <base/types.h>
|
#include <base/types.h>
|
||||||
|
|
||||||
|
namespace CurrentMetrics
|
||||||
|
{
|
||||||
|
extern const Metric TemporaryFilesForJoin;
|
||||||
|
}
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -265,7 +270,7 @@ public:
|
|||||||
|
|
||||||
VolumePtr getGlobalTemporaryVolume() { return tmp_volume; }
|
VolumePtr getGlobalTemporaryVolume() { return tmp_volume; }
|
||||||
|
|
||||||
TemporaryDataOnDiskScopePtr getTempDataOnDisk() { return tmp_data; }
|
TemporaryDataOnDiskScopePtr getTempDataOnDisk() { return tmp_data ? tmp_data->childScope(CurrentMetrics::TemporaryFilesForJoin) : nullptr; }
|
||||||
|
|
||||||
ActionsDAG createJoinedBlockActions(ContextPtr context) const;
|
ActionsDAG createJoinedBlockActions(ContextPtr context) const;
|
||||||
|
|
||||||
|
@ -9,13 +9,16 @@
|
|||||||
#include <Interpreters/Cache/FileCache.h>
|
#include <Interpreters/Cache/FileCache.h>
|
||||||
#include <Formats/NativeWriter.h>
|
#include <Formats/NativeWriter.h>
|
||||||
#include <Core/ProtocolDefines.h>
|
#include <Core/ProtocolDefines.h>
|
||||||
|
#include <Disks/IDisk.h>
|
||||||
#include <Disks/SingleDiskVolume.h>
|
#include <Disks/SingleDiskVolume.h>
|
||||||
#include <Disks/DiskLocal.h>
|
#include <Disks/DiskLocal.h>
|
||||||
#include <Disks/IO/WriteBufferFromTemporaryFile.h>
|
#include <Disks/IO/WriteBufferFromTemporaryFile.h>
|
||||||
|
|
||||||
#include <Core/Defines.h>
|
#include <Core/Defines.h>
|
||||||
|
#include <Common/formatReadable.h>
|
||||||
|
#include <Common/NaNUtils.h>
|
||||||
#include <Interpreters/Cache/WriteBufferToFileSegment.h>
|
#include <Interpreters/Cache/WriteBufferToFileSegment.h>
|
||||||
#include "Common/Exception.h"
|
#include <Common/Exception.h>
|
||||||
|
|
||||||
namespace ProfileEvents
|
namespace ProfileEvents
|
||||||
{
|
{
|
||||||
@ -27,11 +30,293 @@ namespace DB
|
|||||||
|
|
||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
{
|
{
|
||||||
extern const int TOO_MANY_ROWS_OR_BYTES;
|
extern const int INVALID_STATE;
|
||||||
extern const int LOGICAL_ERROR;
|
extern const int LOGICAL_ERROR;
|
||||||
extern const int NOT_ENOUGH_SPACE;
|
extern const int NOT_ENOUGH_SPACE;
|
||||||
|
extern const int TOO_MANY_ROWS_OR_BYTES;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
|
||||||
|
inline CompressionCodecPtr getCodec(const TemporaryDataOnDiskSettings & settings)
|
||||||
|
{
|
||||||
|
if (settings.compression_codec.empty())
|
||||||
|
return CompressionCodecFactory::instance().get("NONE");
|
||||||
|
|
||||||
|
return CompressionCodecFactory::instance().get(settings.compression_codec);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
TemporaryFileHolder::TemporaryFileHolder()
|
||||||
|
{
|
||||||
|
ProfileEvents::increment(ProfileEvents::ExternalProcessingFilesTotal);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class TemporaryFileInLocalCache : public TemporaryFileHolder
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
explicit TemporaryFileInLocalCache(FileCache & file_cache, size_t reserve_size = 0)
|
||||||
|
{
|
||||||
|
const auto key = FileSegment::Key::random();
|
||||||
|
LOG_TRACE(getLogger("TemporaryFileInLocalCache"), "Creating temporary file in cache with key {}", key);
|
||||||
|
segment_holder = file_cache.set(
|
||||||
|
key, 0, std::max<size_t>(1, reserve_size),
|
||||||
|
CreateFileSegmentSettings(FileSegmentKind::Ephemeral), FileCache::getCommonUser());
|
||||||
|
|
||||||
|
chassert(segment_holder->size() == 1);
|
||||||
|
segment_holder->front().getKeyMetadata()->createBaseDirectory(/* throw_if_failed */true);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::unique_ptr<WriteBuffer> write() override
|
||||||
|
{
|
||||||
|
return std::make_unique<WriteBufferToFileSegment>(&segment_holder->front());
|
||||||
|
}
|
||||||
|
|
||||||
|
std::unique_ptr<ReadBuffer> read(size_t buffer_size) const override
|
||||||
|
{
|
||||||
|
return std::make_unique<ReadBufferFromFile>(segment_holder->front().getPath(), /* buf_size = */ buffer_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
String describeFilePath() const override
|
||||||
|
{
|
||||||
|
return fmt::format("fscache://{}", segment_holder->front().getPath());
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
FileSegmentsHolderPtr segment_holder;
|
||||||
|
};
|
||||||
|
|
||||||
|
class TemporaryFileOnLocalDisk : public TemporaryFileHolder
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
explicit TemporaryFileOnLocalDisk(VolumePtr volume, size_t reserve_size = 0)
|
||||||
|
: path_to_file("tmp" + toString(UUIDHelpers::generateV4()))
|
||||||
|
{
|
||||||
|
LOG_TRACE(getLogger("TemporaryFileOnLocalDisk"), "Creating temporary file '{}'", path_to_file);
|
||||||
|
if (reserve_size > 0)
|
||||||
|
{
|
||||||
|
auto reservation = volume->reserve(reserve_size);
|
||||||
|
if (!reservation)
|
||||||
|
{
|
||||||
|
auto disks = volume->getDisks();
|
||||||
|
Strings disks_info;
|
||||||
|
for (const auto & d : disks)
|
||||||
|
{
|
||||||
|
auto to_double = [](auto x) { return static_cast<double>(x); };
|
||||||
|
disks_info.push_back(fmt::format("{}: available: {} unreserved: {}, total: {}, keeping: {}",
|
||||||
|
d->getName(),
|
||||||
|
ReadableSize(d->getAvailableSpace().transform(to_double).value_or(NaNOrZero<double>())),
|
||||||
|
ReadableSize(d->getUnreservedSpace().transform(to_double).value_or(NaNOrZero<double>())),
|
||||||
|
ReadableSize(d->getTotalSpace().transform(to_double).value_or(NaNOrZero<double>())),
|
||||||
|
ReadableSize(d->getKeepingFreeSpace())));
|
||||||
|
}
|
||||||
|
|
||||||
|
throw Exception(ErrorCodes::NOT_ENOUGH_SPACE,
|
||||||
|
"Not enough space on temporary disk, cannot reserve {} bytes on [{}]",
|
||||||
|
reserve_size, fmt::join(disks_info, ", "));
|
||||||
|
}
|
||||||
|
disk = reservation->getDisk();
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
disk = volume->getDisk();
|
||||||
|
}
|
||||||
|
chassert(disk);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::unique_ptr<WriteBuffer> write() override
|
||||||
|
{
|
||||||
|
return disk->writeFile(path_to_file);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::unique_ptr<ReadBuffer> read(size_t buffer_size) const override
|
||||||
|
{
|
||||||
|
ReadSettings settings;
|
||||||
|
settings.local_fs_buffer_size = buffer_size;
|
||||||
|
settings.remote_fs_buffer_size = buffer_size;
|
||||||
|
settings.prefetch_buffer_size = buffer_size;
|
||||||
|
|
||||||
|
return disk->readFile(path_to_file, settings);
|
||||||
|
}
|
||||||
|
|
||||||
|
String describeFilePath() const override
|
||||||
|
{
|
||||||
|
return fmt::format("disk({})://{}/{}", disk->getName(), disk->getPath(), path_to_file);
|
||||||
|
}
|
||||||
|
|
||||||
|
~TemporaryFileOnLocalDisk() override
|
||||||
|
try
|
||||||
|
{
|
||||||
|
if (disk->existsFile(path_to_file))
|
||||||
|
{
|
||||||
|
LOG_TRACE(getLogger("TemporaryFileOnLocalDisk"), "Removing temporary file '{}'", path_to_file);
|
||||||
|
disk->removeRecursive(path_to_file);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
LOG_WARNING(getLogger("TemporaryFileOnLocalDisk"), "Temporary path '{}' does not exist in '{}' on disk {}", path_to_file, disk->getPath(), disk->getName());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
DiskPtr disk;
|
||||||
|
String path_to_file;
|
||||||
|
};
|
||||||
|
|
||||||
|
TemporaryFileProvider createTemporaryFileProvider(VolumePtr volume)
|
||||||
|
{
|
||||||
|
if (!volume)
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Volume is not initialized");
|
||||||
|
return [volume](size_t max_size) -> std::unique_ptr<TemporaryFileHolder>
|
||||||
|
{
|
||||||
|
return std::make_unique<TemporaryFileOnLocalDisk>(volume, max_size);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
TemporaryFileProvider createTemporaryFileProvider(FileCache * file_cache)
|
||||||
|
{
|
||||||
|
if (!file_cache || !file_cache->isInitialized())
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "File cache is not initialized");
|
||||||
|
return [file_cache](size_t max_size) -> std::unique_ptr<TemporaryFileHolder>
|
||||||
|
{
|
||||||
|
return std::make_unique<TemporaryFileInLocalCache>(*file_cache, max_size);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
TemporaryDataOnDiskScopePtr TemporaryDataOnDiskScope::childScope(CurrentMetrics::Metric current_metric)
|
||||||
|
{
|
||||||
|
TemporaryDataOnDiskSettings child_settings = settings;
|
||||||
|
child_settings.current_metric = current_metric;
|
||||||
|
return std::make_shared<TemporaryDataOnDiskScope>(shared_from_this(), child_settings);
|
||||||
|
}
|
||||||
|
|
||||||
|
TemporaryDataReadBuffer::TemporaryDataReadBuffer(std::unique_ptr<ReadBuffer> in_)
|
||||||
|
: ReadBuffer(nullptr, 0)
|
||||||
|
, compressed_buf(std::move(in_))
|
||||||
|
{
|
||||||
|
BufferBase::set(compressed_buf->buffer().begin(), compressed_buf->buffer().size(), compressed_buf->offset());
|
||||||
|
}
|
||||||
|
|
||||||
|
bool TemporaryDataReadBuffer::nextImpl()
|
||||||
|
{
|
||||||
|
compressed_buf->position() = position();
|
||||||
|
if (!compressed_buf->next())
|
||||||
|
{
|
||||||
|
set(compressed_buf->position(), 0);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
BufferBase::set(compressed_buf->buffer().begin(), compressed_buf->buffer().size(), compressed_buf->offset());
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
TemporaryDataBuffer::TemporaryDataBuffer(TemporaryDataOnDiskScope * parent_, size_t reserve_size)
|
||||||
|
: WriteBuffer(nullptr, 0)
|
||||||
|
, parent(parent_)
|
||||||
|
, file_holder(parent->file_provider(reserve_size))
|
||||||
|
, out_compressed_buf(file_holder->write(), getCodec(parent->getSettings()))
|
||||||
|
{
|
||||||
|
WriteBuffer::set(out_compressed_buf->buffer().begin(), out_compressed_buf->buffer().size());
|
||||||
|
}
|
||||||
|
|
||||||
|
void TemporaryDataBuffer::nextImpl()
|
||||||
|
{
|
||||||
|
if (!out_compressed_buf)
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file buffer writing has been finished");
|
||||||
|
|
||||||
|
out_compressed_buf->position() = position();
|
||||||
|
out_compressed_buf->next();
|
||||||
|
BufferBase::set(out_compressed_buf->buffer().begin(), out_compressed_buf->buffer().size(), out_compressed_buf->offset());
|
||||||
|
updateAllocAndCheck();
|
||||||
|
}
|
||||||
|
|
||||||
|
String TemporaryDataBuffer::describeFilePath() const
|
||||||
|
{
|
||||||
|
return file_holder->describeFilePath();
|
||||||
|
}
|
||||||
|
|
||||||
|
TemporaryDataBuffer::~TemporaryDataBuffer()
|
||||||
|
{
|
||||||
|
if (out_compressed_buf)
|
||||||
|
// read() nor finishWriting() was called
|
||||||
|
cancel();
|
||||||
|
}
|
||||||
|
|
||||||
|
void TemporaryDataBuffer::cancelImpl() noexcept
|
||||||
|
{
|
||||||
|
if (out_compressed_buf)
|
||||||
|
{
|
||||||
|
/// CompressedWriteBuffer doesn't call cancel/finalize for wrapped buffer
|
||||||
|
out_compressed_buf->cancel();
|
||||||
|
out_compressed_buf.getHolder()->cancel();
|
||||||
|
out_compressed_buf.reset();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void TemporaryDataBuffer::finalizeImpl()
|
||||||
|
{
|
||||||
|
if (!out_compressed_buf)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/// CompressedWriteBuffer doesn't call cancel/finalize for wrapped buffer
|
||||||
|
out_compressed_buf->finalize();
|
||||||
|
out_compressed_buf.getHolder()->finalize();
|
||||||
|
|
||||||
|
updateAllocAndCheck();
|
||||||
|
out_compressed_buf.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
TemporaryDataBuffer::Stat TemporaryDataBuffer::finishWriting()
|
||||||
|
{
|
||||||
|
/// TemporaryDataBuffer::read can be called from multiple threads
|
||||||
|
std::call_once(write_finished, [this]
|
||||||
|
{
|
||||||
|
if (canceled)
|
||||||
|
throw Exception(ErrorCodes::INVALID_STATE, "Writing to temporary file buffer was not successful");
|
||||||
|
next();
|
||||||
|
finalize();
|
||||||
|
});
|
||||||
|
return stat;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::unique_ptr<ReadBuffer> TemporaryDataBuffer::read()
|
||||||
|
{
|
||||||
|
finishWriting();
|
||||||
|
|
||||||
|
if (stat.compressed_size == 0 && stat.uncompressed_size == 0)
|
||||||
|
return std::make_unique<TemporaryDataReadBuffer>(std::make_unique<ReadBufferFromEmptyFile>());
|
||||||
|
|
||||||
|
/// Keep buffer size less that file size, to avoid memory overhead for large amounts of small files
|
||||||
|
size_t buffer_size = std::min<size_t>(stat.compressed_size, DBMS_DEFAULT_BUFFER_SIZE);
|
||||||
|
return std::make_unique<TemporaryDataReadBuffer>(file_holder->read(buffer_size));
|
||||||
|
}
|
||||||
|
|
||||||
|
void TemporaryDataBuffer::updateAllocAndCheck()
|
||||||
|
{
|
||||||
|
if (!out_compressed_buf)
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file buffer writing has been finished");
|
||||||
|
|
||||||
|
size_t new_compressed_size = out_compressed_buf->getCompressedBytes();
|
||||||
|
size_t new_uncompressed_size = out_compressed_buf->getUncompressedBytes();
|
||||||
|
|
||||||
|
if (unlikely(new_compressed_size < stat.compressed_size || new_uncompressed_size < stat.uncompressed_size))
|
||||||
|
{
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||||
|
"Temporary file {} size decreased after write: compressed: {} -> {}, uncompressed: {} -> {}",
|
||||||
|
file_holder ? file_holder->describeFilePath() : "NULL",
|
||||||
|
new_compressed_size, stat.compressed_size, new_uncompressed_size, stat.uncompressed_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
parent->deltaAllocAndCheck(new_compressed_size - stat.compressed_size, new_uncompressed_size - stat.uncompressed_size);
|
||||||
|
stat.compressed_size = new_compressed_size;
|
||||||
|
stat.uncompressed_size = new_uncompressed_size;
|
||||||
|
}
|
||||||
|
|
||||||
void TemporaryDataOnDiskScope::deltaAllocAndCheck(ssize_t compressed_delta, ssize_t uncompressed_delta)
|
void TemporaryDataOnDiskScope::deltaAllocAndCheck(ssize_t compressed_delta, ssize_t uncompressed_delta)
|
||||||
{
|
{
|
||||||
@ -54,391 +339,25 @@ void TemporaryDataOnDiskScope::deltaAllocAndCheck(ssize_t compressed_delta, ssiz
|
|||||||
stat.uncompressed_size += uncompressed_delta;
|
stat.uncompressed_size += uncompressed_delta;
|
||||||
}
|
}
|
||||||
|
|
||||||
TemporaryDataOnDisk::TemporaryDataOnDisk(TemporaryDataOnDiskScopePtr parent_)
|
TemporaryBlockStreamHolder::TemporaryBlockStreamHolder(const Block & header_, TemporaryDataOnDiskScope * parent_, size_t reserve_size)
|
||||||
: TemporaryDataOnDiskScope(parent_, parent_->getSettings())
|
: WrapperGuard(std::make_unique<TemporaryDataBuffer>(parent_, reserve_size), DBMS_TCP_PROTOCOL_VERSION, header_)
|
||||||
|
, header(header_)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
TemporaryDataOnDisk::TemporaryDataOnDisk(TemporaryDataOnDiskScopePtr parent_, CurrentMetrics::Metric metric_scope)
|
TemporaryDataBuffer::Stat TemporaryBlockStreamHolder::finishWriting() const
|
||||||
: TemporaryDataOnDiskScope(parent_, parent_->getSettings())
|
{
|
||||||
, current_metric_scope(metric_scope)
|
if (!holder)
|
||||||
{}
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary block stream is not initialized");
|
||||||
|
|
||||||
std::unique_ptr<WriteBufferFromFileBase> TemporaryDataOnDisk::createRawStream(size_t max_file_size)
|
impl->flush();
|
||||||
{
|
return holder->finishWriting();
|
||||||
if (file_cache && file_cache->isInitialized())
|
|
||||||
{
|
|
||||||
auto holder = createCacheFile(max_file_size);
|
|
||||||
return std::make_unique<WriteBufferToFileSegment>(std::move(holder));
|
|
||||||
}
|
|
||||||
if (volume)
|
|
||||||
{
|
|
||||||
auto tmp_file = createRegularFile(max_file_size);
|
|
||||||
return std::make_unique<WriteBufferFromTemporaryFile>(std::move(tmp_file));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryDataOnDiskScope has no cache and no volume");
|
TemporaryBlockStreamReaderHolder TemporaryBlockStreamHolder::getReadStream() const
|
||||||
}
|
|
||||||
|
|
||||||
TemporaryFileStream & TemporaryDataOnDisk::createStream(const Block & header, size_t max_file_size)
|
|
||||||
{
|
{
|
||||||
if (file_cache && file_cache->isInitialized())
|
if (!holder)
|
||||||
{
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary block stream is not initialized");
|
||||||
auto holder = createCacheFile(max_file_size);
|
return TemporaryBlockStreamReaderHolder(holder->read(), header, DBMS_TCP_PROTOCOL_VERSION);
|
||||||
|
|
||||||
std::lock_guard lock(mutex);
|
|
||||||
TemporaryFileStreamPtr & tmp_stream = streams.emplace_back(std::make_unique<TemporaryFileStream>(std::move(holder), header, this));
|
|
||||||
return *tmp_stream;
|
|
||||||
}
|
|
||||||
if (volume)
|
|
||||||
{
|
|
||||||
auto tmp_file = createRegularFile(max_file_size);
|
|
||||||
std::lock_guard lock(mutex);
|
|
||||||
TemporaryFileStreamPtr & tmp_stream
|
|
||||||
= streams.emplace_back(std::make_unique<TemporaryFileStream>(std::move(tmp_file), header, this));
|
|
||||||
return *tmp_stream;
|
|
||||||
}
|
|
||||||
|
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryDataOnDiskScope has no cache and no volume");
|
|
||||||
}
|
|
||||||
|
|
||||||
FileSegmentsHolderPtr TemporaryDataOnDisk::createCacheFile(size_t max_file_size)
|
|
||||||
{
|
|
||||||
if (!file_cache)
|
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryDataOnDiskScope has no cache");
|
|
||||||
|
|
||||||
ProfileEvents::increment(ProfileEvents::ExternalProcessingFilesTotal);
|
|
||||||
|
|
||||||
const auto key = FileSegment::Key::random();
|
|
||||||
auto holder = file_cache->set(
|
|
||||||
key, 0, std::max(10_MiB, max_file_size),
|
|
||||||
CreateFileSegmentSettings(FileSegmentKind::Ephemeral), FileCache::getCommonUser());
|
|
||||||
|
|
||||||
chassert(holder->size() == 1);
|
|
||||||
holder->back().getKeyMetadata()->createBaseDirectory(/* throw_if_failed */true);
|
|
||||||
|
|
||||||
return holder;
|
|
||||||
}
|
|
||||||
|
|
||||||
TemporaryFileOnDiskHolder TemporaryDataOnDisk::createRegularFile(size_t max_file_size)
|
|
||||||
{
|
|
||||||
if (!volume)
|
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryDataOnDiskScope has no volume");
|
|
||||||
|
|
||||||
DiskPtr disk;
|
|
||||||
if (max_file_size > 0)
|
|
||||||
{
|
|
||||||
auto reservation = volume->reserve(max_file_size);
|
|
||||||
if (!reservation)
|
|
||||||
throw Exception(ErrorCodes::NOT_ENOUGH_SPACE, "Not enough space on temporary disk");
|
|
||||||
disk = reservation->getDisk();
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
disk = volume->getDisk();
|
|
||||||
}
|
|
||||||
/// We do not increment ProfileEvents::ExternalProcessingFilesTotal here because it is incremented in TemporaryFileOnDisk constructor.
|
|
||||||
return std::make_unique<TemporaryFileOnDisk>(disk, current_metric_scope);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<TemporaryFileStream *> TemporaryDataOnDisk::getStreams() const
|
|
||||||
{
|
|
||||||
std::vector<TemporaryFileStream *> res;
|
|
||||||
std::lock_guard lock(mutex);
|
|
||||||
res.reserve(streams.size());
|
|
||||||
for (const auto & stream : streams)
|
|
||||||
res.push_back(stream.get());
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool TemporaryDataOnDisk::empty() const
|
|
||||||
{
|
|
||||||
std::lock_guard lock(mutex);
|
|
||||||
return streams.empty();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline CompressionCodecPtr getCodec(const TemporaryDataOnDiskSettings & settings)
|
|
||||||
{
|
|
||||||
if (settings.compression_codec.empty())
|
|
||||||
return CompressionCodecFactory::instance().get("NONE");
|
|
||||||
|
|
||||||
return CompressionCodecFactory::instance().get(settings.compression_codec);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct TemporaryFileStream::OutputWriter
|
|
||||||
{
|
|
||||||
OutputWriter(std::unique_ptr<WriteBuffer> out_buf_, const Block & header_, const TemporaryDataOnDiskSettings & settings)
|
|
||||||
: out_buf(std::move(out_buf_))
|
|
||||||
, out_compressed_buf(*out_buf, getCodec(settings))
|
|
||||||
, out_writer(out_compressed_buf, DBMS_TCP_PROTOCOL_VERSION, header_)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t write(const Block & block)
|
|
||||||
{
|
|
||||||
if (finalized)
|
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot write to finalized stream");
|
|
||||||
size_t written_bytes = out_writer.write(block);
|
|
||||||
num_rows += block.rows();
|
|
||||||
return written_bytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
void flush()
|
|
||||||
{
|
|
||||||
if (finalized)
|
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot flush finalized stream");
|
|
||||||
|
|
||||||
out_compressed_buf.next();
|
|
||||||
out_buf->next();
|
|
||||||
out_writer.flush();
|
|
||||||
}
|
|
||||||
|
|
||||||
void finalize()
|
|
||||||
{
|
|
||||||
if (finalized)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/// if we called finalize() explicitly, and got an exception,
|
|
||||||
/// we don't want to get it again in the destructor, so set finalized flag first
|
|
||||||
finalized = true;
|
|
||||||
|
|
||||||
out_writer.flush();
|
|
||||||
out_compressed_buf.finalize();
|
|
||||||
out_buf->finalize();
|
|
||||||
}
|
|
||||||
|
|
||||||
~OutputWriter()
|
|
||||||
{
|
|
||||||
try
|
|
||||||
{
|
|
||||||
finalize();
|
|
||||||
}
|
|
||||||
catch (...)
|
|
||||||
{
|
|
||||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
std::unique_ptr<WriteBuffer> out_buf;
|
|
||||||
CompressedWriteBuffer out_compressed_buf;
|
|
||||||
NativeWriter out_writer;
|
|
||||||
|
|
||||||
std::atomic_size_t num_rows = 0;
|
|
||||||
|
|
||||||
bool finalized = false;
|
|
||||||
};
|
|
||||||
|
|
||||||
TemporaryFileStream::Reader::Reader(const String & path_, const Block & header_, size_t size_)
|
|
||||||
: path(path_)
|
|
||||||
, size(size_ ? std::min<size_t>(size_, DBMS_DEFAULT_BUFFER_SIZE) : DBMS_DEFAULT_BUFFER_SIZE)
|
|
||||||
, header(header_)
|
|
||||||
{
|
|
||||||
LOG_TEST(getLogger("TemporaryFileStream"), "Reading {} from {}", header_.dumpStructure(), path);
|
|
||||||
}
|
|
||||||
|
|
||||||
TemporaryFileStream::Reader::Reader(const String & path_, size_t size_)
|
|
||||||
: path(path_)
|
|
||||||
, size(size_ ? std::min<size_t>(size_, DBMS_DEFAULT_BUFFER_SIZE) : DBMS_DEFAULT_BUFFER_SIZE)
|
|
||||||
{
|
|
||||||
LOG_TEST(getLogger("TemporaryFileStream"), "Reading from {}", path);
|
|
||||||
}
|
|
||||||
|
|
||||||
Block TemporaryFileStream::Reader::read()
|
|
||||||
{
|
|
||||||
if (!in_reader)
|
|
||||||
{
|
|
||||||
if (fs::exists(path))
|
|
||||||
in_file_buf = std::make_unique<ReadBufferFromFile>(path, size);
|
|
||||||
else
|
|
||||||
in_file_buf = std::make_unique<ReadBufferFromEmptyFile>();
|
|
||||||
|
|
||||||
in_compressed_buf = std::make_unique<CompressedReadBuffer>(*in_file_buf);
|
|
||||||
if (header.has_value())
|
|
||||||
in_reader = std::make_unique<NativeReader>(*in_compressed_buf, header.value(), DBMS_TCP_PROTOCOL_VERSION);
|
|
||||||
else
|
|
||||||
in_reader = std::make_unique<NativeReader>(*in_compressed_buf, DBMS_TCP_PROTOCOL_VERSION);
|
|
||||||
}
|
|
||||||
return in_reader->read();
|
|
||||||
}
|
|
||||||
|
|
||||||
TemporaryFileStream::TemporaryFileStream(TemporaryFileOnDiskHolder file_, const Block & header_, TemporaryDataOnDisk * parent_)
|
|
||||||
: parent(parent_)
|
|
||||||
, header(header_)
|
|
||||||
, file(std::move(file_))
|
|
||||||
, out_writer(std::make_unique<OutputWriter>(std::make_unique<WriteBufferFromFile>(file->getAbsolutePath()), header, parent->settings))
|
|
||||||
{
|
|
||||||
LOG_TEST(getLogger("TemporaryFileStream"), "Writing to temporary file {}", file->getAbsolutePath());
|
|
||||||
}
|
|
||||||
|
|
||||||
TemporaryFileStream::TemporaryFileStream(FileSegmentsHolderPtr segments_, const Block & header_, TemporaryDataOnDisk * parent_)
|
|
||||||
: parent(parent_)
|
|
||||||
, header(header_)
|
|
||||||
, segment_holder(std::move(segments_))
|
|
||||||
{
|
|
||||||
if (segment_holder->size() != 1)
|
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryFileStream can be created only from single segment");
|
|
||||||
auto out_buf = std::make_unique<WriteBufferToFileSegment>(&segment_holder->front());
|
|
||||||
|
|
||||||
LOG_TEST(getLogger("TemporaryFileStream"), "Writing to temporary file {}", out_buf->getFileName());
|
|
||||||
out_writer = std::make_unique<OutputWriter>(std::move(out_buf), header, parent_->settings);
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t TemporaryFileStream::write(const Block & block)
|
|
||||||
{
|
|
||||||
if (!out_writer)
|
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Writing has been finished");
|
|
||||||
|
|
||||||
updateAllocAndCheck();
|
|
||||||
size_t bytes_written = out_writer->write(block);
|
|
||||||
return bytes_written;
|
|
||||||
}
|
|
||||||
|
|
||||||
void TemporaryFileStream::flush()
|
|
||||||
{
|
|
||||||
if (!out_writer)
|
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Writing has been finished");
|
|
||||||
|
|
||||||
out_writer->flush();
|
|
||||||
}
|
|
||||||
|
|
||||||
TemporaryFileStream::Stat TemporaryFileStream::finishWriting()
|
|
||||||
{
|
|
||||||
if (isWriteFinished())
|
|
||||||
return stat;
|
|
||||||
|
|
||||||
if (out_writer)
|
|
||||||
{
|
|
||||||
out_writer->finalize();
|
|
||||||
/// The amount of written data can be changed after finalization, some buffers can be flushed
|
|
||||||
/// Need to update the stat
|
|
||||||
updateAllocAndCheck();
|
|
||||||
out_writer.reset();
|
|
||||||
|
|
||||||
/// reader will be created at the first read call, not to consume memory before it is needed
|
|
||||||
}
|
|
||||||
return stat;
|
|
||||||
}
|
|
||||||
|
|
||||||
TemporaryFileStream::Stat TemporaryFileStream::finishWritingAsyncSafe()
|
|
||||||
{
|
|
||||||
std::call_once(finish_writing, [this]{ finishWriting(); });
|
|
||||||
return stat;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool TemporaryFileStream::isWriteFinished() const
|
|
||||||
{
|
|
||||||
assert(in_reader == nullptr || out_writer == nullptr);
|
|
||||||
return out_writer == nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
Block TemporaryFileStream::read()
|
|
||||||
{
|
|
||||||
if (!isWriteFinished())
|
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Writing has been not finished");
|
|
||||||
|
|
||||||
if (isEof())
|
|
||||||
return {};
|
|
||||||
|
|
||||||
if (!in_reader)
|
|
||||||
{
|
|
||||||
in_reader = std::make_unique<Reader>(getPath(), header, getSize());
|
|
||||||
}
|
|
||||||
|
|
||||||
Block block = in_reader->read();
|
|
||||||
if (!block)
|
|
||||||
{
|
|
||||||
/// finalize earlier to release resources, do not wait for the destructor
|
|
||||||
this->release();
|
|
||||||
}
|
|
||||||
return block;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::unique_ptr<TemporaryFileStream::Reader> TemporaryFileStream::getReadStream()
|
|
||||||
{
|
|
||||||
if (!isWriteFinished())
|
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Writing has been not finished");
|
|
||||||
|
|
||||||
if (isEof())
|
|
||||||
return nullptr;
|
|
||||||
|
|
||||||
return std::make_unique<Reader>(getPath(), header, getSize());
|
|
||||||
}
|
|
||||||
|
|
||||||
void TemporaryFileStream::updateAllocAndCheck()
|
|
||||||
{
|
|
||||||
assert(out_writer);
|
|
||||||
size_t new_compressed_size = out_writer->out_compressed_buf.getCompressedBytes();
|
|
||||||
size_t new_uncompressed_size = out_writer->out_compressed_buf.getUncompressedBytes();
|
|
||||||
|
|
||||||
if (unlikely(new_compressed_size < stat.compressed_size || new_uncompressed_size < stat.uncompressed_size))
|
|
||||||
{
|
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
|
||||||
"Temporary file {} size decreased after write: compressed: {} -> {}, uncompressed: {} -> {}",
|
|
||||||
getPath(), new_compressed_size, stat.compressed_size, new_uncompressed_size, stat.uncompressed_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
parent->deltaAllocAndCheck(new_compressed_size - stat.compressed_size, new_uncompressed_size - stat.uncompressed_size);
|
|
||||||
stat.compressed_size = new_compressed_size;
|
|
||||||
stat.uncompressed_size = new_uncompressed_size;
|
|
||||||
stat.num_rows = out_writer->num_rows;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool TemporaryFileStream::isEof() const
|
|
||||||
{
|
|
||||||
return file == nullptr && !segment_holder;
|
|
||||||
}
|
|
||||||
|
|
||||||
void TemporaryFileStream::release()
|
|
||||||
{
|
|
||||||
if (in_reader)
|
|
||||||
in_reader.reset();
|
|
||||||
|
|
||||||
if (out_writer)
|
|
||||||
{
|
|
||||||
out_writer->finalize();
|
|
||||||
out_writer.reset();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (file)
|
|
||||||
{
|
|
||||||
file.reset();
|
|
||||||
parent->deltaAllocAndCheck(-stat.compressed_size, -stat.uncompressed_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (segment_holder)
|
|
||||||
segment_holder.reset();
|
|
||||||
}
|
|
||||||
|
|
||||||
String TemporaryFileStream::getPath() const
|
|
||||||
{
|
|
||||||
if (file)
|
|
||||||
return file->getAbsolutePath();
|
|
||||||
if (segment_holder && !segment_holder->empty())
|
|
||||||
return segment_holder->front().getPath();
|
|
||||||
|
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryFileStream has no file");
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t TemporaryFileStream::getSize() const
|
|
||||||
{
|
|
||||||
if (file)
|
|
||||||
return file->getDisk()->getFileSize(file->getRelativePath());
|
|
||||||
if (segment_holder && !segment_holder->empty())
|
|
||||||
return segment_holder->front().getReservedSize();
|
|
||||||
|
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryFileStream has no file");
|
|
||||||
}
|
|
||||||
|
|
||||||
TemporaryFileStream::~TemporaryFileStream()
|
|
||||||
{
|
|
||||||
try
|
|
||||||
{
|
|
||||||
release();
|
|
||||||
}
|
|
||||||
catch (...)
|
|
||||||
{
|
|
||||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
|
||||||
assert(false); /// deltaAllocAndCheck with negative can't throw exception
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -4,15 +4,21 @@
|
|||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include <boost/noncopyable.hpp>
|
#include <boost/noncopyable.hpp>
|
||||||
|
|
||||||
#include <IO/ReadBufferFromFile.h>
|
#include <Common/CurrentMetrics.h>
|
||||||
#include <Compression/CompressedReadBuffer.h>
|
#include <Compression/CompressedReadBuffer.h>
|
||||||
#include <Formats/NativeReader.h>
|
#include <Compression/CompressedWriteBuffer.h>
|
||||||
#include <Core/Block.h>
|
|
||||||
#include <Disks/IVolume.h>
|
#include <Disks/IVolume.h>
|
||||||
#include <Disks/TemporaryFileOnDisk.h>
|
#include <Disks/TemporaryFileOnDisk.h>
|
||||||
#include <Interpreters/Cache/FileSegment.h>
|
|
||||||
#include <Common/CurrentMetrics.h>
|
|
||||||
|
|
||||||
|
#include <Formats/NativeReader.h>
|
||||||
|
#include <Formats/NativeWriter.h>
|
||||||
|
|
||||||
|
#include <Interpreters/Cache/FileSegment.h>
|
||||||
|
|
||||||
|
#include <IO/ReadBufferFromFile.h>
|
||||||
|
|
||||||
|
class FileCacheTest_TemporaryDataReadBufferSize_Test;
|
||||||
|
|
||||||
namespace CurrentMetrics
|
namespace CurrentMetrics
|
||||||
{
|
{
|
||||||
@ -25,11 +31,10 @@ namespace DB
|
|||||||
class TemporaryDataOnDiskScope;
|
class TemporaryDataOnDiskScope;
|
||||||
using TemporaryDataOnDiskScopePtr = std::shared_ptr<TemporaryDataOnDiskScope>;
|
using TemporaryDataOnDiskScopePtr = std::shared_ptr<TemporaryDataOnDiskScope>;
|
||||||
|
|
||||||
class TemporaryDataOnDisk;
|
class TemporaryDataBuffer;
|
||||||
using TemporaryDataOnDiskPtr = std::unique_ptr<TemporaryDataOnDisk>;
|
using TemporaryDataBufferPtr = std::unique_ptr<TemporaryDataBuffer>;
|
||||||
|
|
||||||
class TemporaryFileStream;
|
class TemporaryFileHolder;
|
||||||
using TemporaryFileStreamPtr = std::unique_ptr<TemporaryFileStream>;
|
|
||||||
|
|
||||||
class FileCache;
|
class FileCache;
|
||||||
|
|
||||||
@ -40,15 +45,26 @@ struct TemporaryDataOnDiskSettings
|
|||||||
|
|
||||||
/// Compression codec for temporary data, if empty no compression will be used. LZ4 by default
|
/// Compression codec for temporary data, if empty no compression will be used. LZ4 by default
|
||||||
String compression_codec = "LZ4";
|
String compression_codec = "LZ4";
|
||||||
|
|
||||||
|
/// Read/Write internal buffer size
|
||||||
|
size_t buffer_size = DBMS_DEFAULT_BUFFER_SIZE;
|
||||||
|
|
||||||
|
/// Metrics counter to increment when temporary file in current scope are created
|
||||||
|
CurrentMetrics::Metric current_metric = CurrentMetrics::TemporaryFilesUnknown;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// Creates temporary files located on specified resource (disk, fs_cache, etc.)
|
||||||
|
using TemporaryFileProvider = std::function<std::unique_ptr<TemporaryFileHolder>(size_t)>;
|
||||||
|
TemporaryFileProvider createTemporaryFileProvider(VolumePtr volume);
|
||||||
|
TemporaryFileProvider createTemporaryFileProvider(FileCache * file_cache);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Used to account amount of temporary data written to disk.
|
* Used to account amount of temporary data written to disk.
|
||||||
* If limit is set, throws exception if limit is exceeded.
|
* If limit is set, throws exception if limit is exceeded.
|
||||||
* Data can be nested, so parent scope accounts all data written by children.
|
* Data can be nested, so parent scope accounts all data written by children.
|
||||||
* Scopes are: global -> per-user -> per-query -> per-purpose (sorting, aggregation, etc).
|
* Scopes are: global -> per-user -> per-query -> per-purpose (sorting, aggregation, etc).
|
||||||
*/
|
*/
|
||||||
class TemporaryDataOnDiskScope : boost::noncopyable
|
class TemporaryDataOnDiskScope : boost::noncopyable, public std::enable_shared_from_this<TemporaryDataOnDiskScope>
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
struct StatAtomic
|
struct StatAtomic
|
||||||
@ -57,164 +73,156 @@ public:
|
|||||||
std::atomic<size_t> uncompressed_size;
|
std::atomic<size_t> uncompressed_size;
|
||||||
};
|
};
|
||||||
|
|
||||||
explicit TemporaryDataOnDiskScope(VolumePtr volume_, TemporaryDataOnDiskSettings settings_)
|
/// Root scope
|
||||||
: volume(std::move(volume_))
|
template <typename T>
|
||||||
|
TemporaryDataOnDiskScope(T && storage, TemporaryDataOnDiskSettings settings_)
|
||||||
|
: file_provider(createTemporaryFileProvider(std::forward<T>(storage)))
|
||||||
, settings(std::move(settings_))
|
, settings(std::move(settings_))
|
||||||
{}
|
{}
|
||||||
|
|
||||||
explicit TemporaryDataOnDiskScope(VolumePtr volume_, FileCache * file_cache_, TemporaryDataOnDiskSettings settings_)
|
|
||||||
: volume(std::move(volume_))
|
|
||||||
, file_cache(file_cache_)
|
|
||||||
, settings(std::move(settings_))
|
|
||||||
{}
|
|
||||||
|
|
||||||
explicit TemporaryDataOnDiskScope(TemporaryDataOnDiskScopePtr parent_, TemporaryDataOnDiskSettings settings_)
|
TemporaryDataOnDiskScope(TemporaryDataOnDiskScopePtr parent_, TemporaryDataOnDiskSettings settings_)
|
||||||
: parent(std::move(parent_))
|
: parent(std::move(parent_))
|
||||||
, volume(parent->volume)
|
, file_provider(parent->file_provider)
|
||||||
, file_cache(parent->file_cache)
|
|
||||||
, settings(std::move(settings_))
|
, settings(std::move(settings_))
|
||||||
{}
|
{}
|
||||||
|
|
||||||
/// TODO: remove
|
TemporaryDataOnDiskScopePtr childScope(CurrentMetrics::Metric current_metric);
|
||||||
/// Refactor all code that uses volume directly to use TemporaryDataOnDisk.
|
|
||||||
VolumePtr getVolume() const { return volume; }
|
|
||||||
|
|
||||||
const TemporaryDataOnDiskSettings & getSettings() const { return settings; }
|
const TemporaryDataOnDiskSettings & getSettings() const { return settings; }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
friend class TemporaryDataBuffer;
|
||||||
|
|
||||||
void deltaAllocAndCheck(ssize_t compressed_delta, ssize_t uncompressed_delta);
|
void deltaAllocAndCheck(ssize_t compressed_delta, ssize_t uncompressed_delta);
|
||||||
|
|
||||||
TemporaryDataOnDiskScopePtr parent = nullptr;
|
TemporaryDataOnDiskScopePtr parent = nullptr;
|
||||||
|
|
||||||
VolumePtr volume = nullptr;
|
TemporaryFileProvider file_provider;
|
||||||
FileCache * file_cache = nullptr;
|
|
||||||
|
|
||||||
StatAtomic stat;
|
StatAtomic stat;
|
||||||
const TemporaryDataOnDiskSettings settings;
|
const TemporaryDataOnDiskSettings settings;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/** Used to hold the wrapper and wrapped object together.
|
||||||
* Holds the set of temporary files.
|
* This class provides a convenient way to manage the lifetime of both the wrapper and the wrapped object.
|
||||||
* New file stream is created with `createStream`.
|
* The wrapper class (Impl) stores a reference to the wrapped object (Holder), and both objects are owned by this class.
|
||||||
* Streams are owned by this object and will be deleted when it is deleted.
|
* The lifetime of the wrapper and the wrapped object should be the same.
|
||||||
* It's a leaf node in temporary data scope tree.
|
* This pattern is commonly used when the caller only needs to interact with the wrapper and doesn't need to be aware of the wrapped object.
|
||||||
|
* Examples: CompressedWriteBuffer and WriteBuffer, and NativeReader and ReadBuffer.
|
||||||
*/
|
*/
|
||||||
class TemporaryDataOnDisk : private TemporaryDataOnDiskScope
|
template <typename Impl, typename Holder>
|
||||||
|
class WrapperGuard
|
||||||
{
|
{
|
||||||
friend class TemporaryFileStream; /// to allow it to call `deltaAllocAndCheck` to account data
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
using TemporaryDataOnDiskScope::StatAtomic;
|
template <typename ... Args>
|
||||||
|
explicit WrapperGuard(std::unique_ptr<Holder> holder_, Args && ... args)
|
||||||
|
: holder(std::move(holder_))
|
||||||
|
, impl(std::make_unique<Impl>(*holder, std::forward<Args>(args)...))
|
||||||
|
{
|
||||||
|
chassert(holder);
|
||||||
|
chassert(impl);
|
||||||
|
}
|
||||||
|
|
||||||
explicit TemporaryDataOnDisk(TemporaryDataOnDiskScopePtr parent_);
|
Impl * operator->() { chassert(impl); chassert(holder); return impl.get(); }
|
||||||
|
const Impl * operator->() const { chassert(impl); chassert(holder); return impl.get(); }
|
||||||
|
Impl & operator*() { chassert(impl); chassert(holder); return *impl; }
|
||||||
|
const Impl & operator*() const { chassert(impl); chassert(holder); return *impl; }
|
||||||
|
operator bool() const { return impl != nullptr; } /// NOLINT
|
||||||
|
|
||||||
explicit TemporaryDataOnDisk(TemporaryDataOnDiskScopePtr parent_, CurrentMetrics::Metric metric_scope);
|
const Holder * getHolder() const { return holder.get(); }
|
||||||
|
Holder * getHolder() { return holder.get(); }
|
||||||
|
|
||||||
/// If max_file_size > 0, then check that there's enough space on the disk and throw an exception in case of lack of free space
|
void reset()
|
||||||
TemporaryFileStream & createStream(const Block & header, size_t max_file_size = 0);
|
{
|
||||||
|
impl.reset();
|
||||||
|
holder.reset();
|
||||||
|
}
|
||||||
|
|
||||||
/// Write raw data directly into buffer.
|
protected:
|
||||||
/// Differences from `createStream`:
|
std::unique_ptr<Holder> holder;
|
||||||
/// 1) it doesn't account data in parent scope
|
std::unique_ptr<Impl> impl;
|
||||||
/// 2) returned buffer owns resources (instead of TemporaryDataOnDisk itself)
|
};
|
||||||
/// If max_file_size > 0, then check that there's enough space on the disk and throw an exception in case of lack of free space
|
|
||||||
std::unique_ptr<WriteBufferFromFileBase> createRawStream(size_t max_file_size = 0);
|
|
||||||
|
|
||||||
std::vector<TemporaryFileStream *> getStreams() const;
|
/// Owns temporary file and provides access to it.
|
||||||
bool empty() const;
|
/// On destruction, file is removed and all resources are freed.
|
||||||
|
/// Lifetime of read/write buffers should be less than lifetime of TemporaryFileHolder.
|
||||||
|
class TemporaryFileHolder
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
TemporaryFileHolder();
|
||||||
|
|
||||||
const StatAtomic & getStat() const { return stat; }
|
virtual std::unique_ptr<WriteBuffer> write() = 0;
|
||||||
|
virtual std::unique_ptr<ReadBuffer> read(size_t buffer_size) const = 0;
|
||||||
|
|
||||||
|
/// Get location for logging
|
||||||
|
virtual String describeFilePath() const = 0;
|
||||||
|
|
||||||
|
virtual ~TemporaryFileHolder() = default;
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Reads raw data from temporary file
|
||||||
|
class TemporaryDataReadBuffer : public ReadBuffer
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
explicit TemporaryDataReadBuffer(std::unique_ptr<ReadBuffer> in_);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
FileSegmentsHolderPtr createCacheFile(size_t max_file_size);
|
friend class ::FileCacheTest_TemporaryDataReadBufferSize_Test;
|
||||||
TemporaryFileOnDiskHolder createRegularFile(size_t max_file_size);
|
|
||||||
|
|
||||||
mutable std::mutex mutex;
|
bool nextImpl() override;
|
||||||
std::vector<TemporaryFileStreamPtr> streams TSA_GUARDED_BY(mutex);
|
|
||||||
|
|
||||||
typename CurrentMetrics::Metric current_metric_scope = CurrentMetrics::TemporaryFilesUnknown;
|
WrapperGuard<CompressedReadBuffer, ReadBuffer> compressed_buf;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/// Writes raw data to buffer provided by file_holder, and accounts amount of written data in parent scope.
|
||||||
* Data can be written into this stream and then read.
|
class TemporaryDataBuffer : public WriteBuffer
|
||||||
* After finish writing, call `finishWriting` and then either call `read` or 'getReadStream'(only one of the two) to read the data.
|
|
||||||
* Account amount of data written to disk in parent scope.
|
|
||||||
*/
|
|
||||||
class TemporaryFileStream : boost::noncopyable
|
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
struct Reader
|
|
||||||
{
|
|
||||||
Reader(const String & path, const Block & header_, size_t size = 0);
|
|
||||||
|
|
||||||
explicit Reader(const String & path, size_t size = 0);
|
|
||||||
|
|
||||||
Block read();
|
|
||||||
|
|
||||||
const std::string path;
|
|
||||||
const size_t size;
|
|
||||||
const std::optional<Block> header;
|
|
||||||
|
|
||||||
std::unique_ptr<ReadBufferFromFileBase> in_file_buf;
|
|
||||||
std::unique_ptr<CompressedReadBuffer> in_compressed_buf;
|
|
||||||
std::unique_ptr<NativeReader> in_reader;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Stat
|
struct Stat
|
||||||
{
|
{
|
||||||
/// Statistics for file
|
|
||||||
/// Non-atomic because we don't allow to `read` or `write` into single file from multiple threads
|
|
||||||
size_t compressed_size = 0;
|
size_t compressed_size = 0;
|
||||||
size_t uncompressed_size = 0;
|
size_t uncompressed_size = 0;
|
||||||
size_t num_rows = 0;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
TemporaryFileStream(TemporaryFileOnDiskHolder file_, const Block & header_, TemporaryDataOnDisk * parent_);
|
explicit TemporaryDataBuffer(TemporaryDataOnDiskScope * parent_, size_t reserve_size = 0);
|
||||||
TemporaryFileStream(FileSegmentsHolderPtr segments_, const Block & header_, TemporaryDataOnDisk * parent_);
|
void nextImpl() override;
|
||||||
|
void finalizeImpl() override;
|
||||||
size_t write(const Block & block);
|
void cancelImpl() noexcept override;
|
||||||
void flush();
|
|
||||||
|
|
||||||
|
std::unique_ptr<ReadBuffer> read();
|
||||||
Stat finishWriting();
|
Stat finishWriting();
|
||||||
Stat finishWritingAsyncSafe();
|
|
||||||
bool isWriteFinished() const;
|
|
||||||
|
|
||||||
std::unique_ptr<Reader> getReadStream();
|
String describeFilePath() const;
|
||||||
|
|
||||||
Block read();
|
~TemporaryDataBuffer() override;
|
||||||
|
|
||||||
String getPath() const;
|
|
||||||
size_t getSize() const;
|
|
||||||
|
|
||||||
Block getHeader() const { return header; }
|
|
||||||
|
|
||||||
/// Read finished and file released
|
|
||||||
bool isEof() const;
|
|
||||||
|
|
||||||
~TemporaryFileStream();
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void updateAllocAndCheck();
|
void updateAllocAndCheck();
|
||||||
|
|
||||||
/// Release everything, close reader and writer, delete file
|
TemporaryDataOnDiskScope * parent;
|
||||||
void release();
|
std::unique_ptr<TemporaryFileHolder> file_holder;
|
||||||
|
WrapperGuard<CompressedWriteBuffer, WriteBuffer> out_compressed_buf;
|
||||||
TemporaryDataOnDisk * parent;
|
std::once_flag write_finished;
|
||||||
|
|
||||||
Block header;
|
|
||||||
|
|
||||||
/// Data can be stored in file directly or in the cache
|
|
||||||
TemporaryFileOnDiskHolder file;
|
|
||||||
FileSegmentsHolderPtr segment_holder;
|
|
||||||
|
|
||||||
Stat stat;
|
Stat stat;
|
||||||
|
};
|
||||||
|
|
||||||
std::once_flag finish_writing;
|
|
||||||
|
|
||||||
struct OutputWriter;
|
/// High level interfaces for reading and writing temporary data by blocks.
|
||||||
std::unique_ptr<OutputWriter> out_writer;
|
using TemporaryBlockStreamReaderHolder = WrapperGuard<NativeReader, ReadBuffer>;
|
||||||
|
|
||||||
std::unique_ptr<Reader> in_reader;
|
class TemporaryBlockStreamHolder : public WrapperGuard<NativeWriter, TemporaryDataBuffer>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
TemporaryBlockStreamHolder(const Block & header_, TemporaryDataOnDiskScope * parent_, size_t reserve_size = 0);
|
||||||
|
|
||||||
|
TemporaryBlockStreamReaderHolder getReadStream() const;
|
||||||
|
|
||||||
|
TemporaryDataBuffer::Stat finishWriting() const;
|
||||||
|
const Block & getHeader() const { return header; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
Block header;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -934,7 +934,7 @@ static Block generateBlock(size_t size = 0)
|
|||||||
return block;
|
return block;
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t readAllTemporaryData(TemporaryFileStream & stream)
|
static size_t readAllTemporaryData(NativeReader & stream)
|
||||||
{
|
{
|
||||||
Block block;
|
Block block;
|
||||||
size_t read_rows = 0;
|
size_t read_rows = 0;
|
||||||
@ -947,6 +947,7 @@ static size_t readAllTemporaryData(TemporaryFileStream & stream)
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(FileCacheTest, temporaryData)
|
TEST_F(FileCacheTest, temporaryData)
|
||||||
|
try
|
||||||
{
|
{
|
||||||
ServerUUID::setRandomForUnitTests();
|
ServerUUID::setRandomForUnitTests();
|
||||||
DB::FileCacheSettings settings;
|
DB::FileCacheSettings settings;
|
||||||
@ -959,7 +960,7 @@ TEST_F(FileCacheTest, temporaryData)
|
|||||||
file_cache.initialize();
|
file_cache.initialize();
|
||||||
|
|
||||||
const auto user = FileCache::getCommonUser();
|
const auto user = FileCache::getCommonUser();
|
||||||
auto tmp_data_scope = std::make_shared<TemporaryDataOnDiskScope>(nullptr, &file_cache, TemporaryDataOnDiskSettings{});
|
auto tmp_data_scope = std::make_shared<TemporaryDataOnDiskScope>(&file_cache, TemporaryDataOnDiskSettings{});
|
||||||
|
|
||||||
auto some_data_holder = file_cache.getOrSet(FileCacheKey::fromPath("some_data"), 0, 5_KiB, 5_KiB, CreateFileSegmentSettings{}, 0, user);
|
auto some_data_holder = file_cache.getOrSet(FileCacheKey::fromPath("some_data"), 0, 5_KiB, 5_KiB, CreateFileSegmentSettings{}, 0, user);
|
||||||
|
|
||||||
@ -982,12 +983,17 @@ TEST_F(FileCacheTest, temporaryData)
|
|||||||
|
|
||||||
size_t size_used_with_temporary_data;
|
size_t size_used_with_temporary_data;
|
||||||
size_t segments_used_with_temporary_data;
|
size_t segments_used_with_temporary_data;
|
||||||
|
|
||||||
|
|
||||||
{
|
{
|
||||||
auto tmp_data = std::make_unique<TemporaryDataOnDisk>(tmp_data_scope);
|
TemporaryBlockStreamHolder stream(generateBlock(), tmp_data_scope.get());
|
||||||
|
ASSERT_TRUE(stream);
|
||||||
|
/// Do nothing with stream, just create it and destroy.
|
||||||
|
}
|
||||||
|
|
||||||
auto & stream = tmp_data->createStream(generateBlock());
|
{
|
||||||
|
TemporaryBlockStreamHolder stream(generateBlock(), tmp_data_scope.get());
|
||||||
ASSERT_GT(stream.write(generateBlock(100)), 0);
|
ASSERT_GT(stream->write(generateBlock(100)), 0);
|
||||||
|
|
||||||
ASSERT_GT(file_cache.getUsedCacheSize(), 0);
|
ASSERT_GT(file_cache.getUsedCacheSize(), 0);
|
||||||
ASSERT_GT(file_cache.getFileSegmentsNum(), 0);
|
ASSERT_GT(file_cache.getFileSegmentsNum(), 0);
|
||||||
@ -995,22 +1001,22 @@ TEST_F(FileCacheTest, temporaryData)
|
|||||||
size_t used_size_before_attempt = file_cache.getUsedCacheSize();
|
size_t used_size_before_attempt = file_cache.getUsedCacheSize();
|
||||||
/// data can't be evicted because it is still held by `some_data_holder`
|
/// data can't be evicted because it is still held by `some_data_holder`
|
||||||
ASSERT_THROW({
|
ASSERT_THROW({
|
||||||
stream.write(generateBlock(2000));
|
stream->write(generateBlock(2000));
|
||||||
stream.flush();
|
stream.finishWriting();
|
||||||
}, DB::Exception);
|
}, DB::Exception);
|
||||||
|
|
||||||
|
ASSERT_THROW(stream.finishWriting(), DB::Exception);
|
||||||
|
|
||||||
ASSERT_EQ(file_cache.getUsedCacheSize(), used_size_before_attempt);
|
ASSERT_EQ(file_cache.getUsedCacheSize(), used_size_before_attempt);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
size_t before_used_size = file_cache.getUsedCacheSize();
|
size_t before_used_size = file_cache.getUsedCacheSize();
|
||||||
auto tmp_data = std::make_unique<TemporaryDataOnDisk>(tmp_data_scope);
|
auto write_buf_stream = std::make_unique<TemporaryDataBuffer>(tmp_data_scope.get());
|
||||||
|
|
||||||
auto write_buf_stream = tmp_data->createRawStream();
|
|
||||||
|
|
||||||
write_buf_stream->write("1234567890", 10);
|
write_buf_stream->write("1234567890", 10);
|
||||||
write_buf_stream->write("abcde", 5);
|
write_buf_stream->write("abcde", 5);
|
||||||
auto read_buf = dynamic_cast<IReadableWriteBuffer *>(write_buf_stream.get())->tryGetReadBuffer();
|
auto read_buf = write_buf_stream->read();
|
||||||
|
|
||||||
ASSERT_GT(file_cache.getUsedCacheSize(), before_used_size + 10);
|
ASSERT_GT(file_cache.getUsedCacheSize(), before_used_size + 10);
|
||||||
|
|
||||||
@ -1023,22 +1029,22 @@ TEST_F(FileCacheTest, temporaryData)
|
|||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
auto tmp_data = std::make_unique<TemporaryDataOnDisk>(tmp_data_scope);
|
TemporaryBlockStreamHolder stream(generateBlock(), tmp_data_scope.get());
|
||||||
auto & stream = tmp_data->createStream(generateBlock());
|
|
||||||
|
|
||||||
ASSERT_GT(stream.write(generateBlock(100)), 0);
|
ASSERT_GT(stream->write(generateBlock(100)), 0);
|
||||||
|
|
||||||
some_data_holder.reset();
|
some_data_holder.reset();
|
||||||
|
|
||||||
stream.write(generateBlock(2000));
|
stream->write(generateBlock(2000));
|
||||||
|
|
||||||
auto stat = stream.finishWriting();
|
stream.finishWriting();
|
||||||
|
|
||||||
ASSERT_TRUE(fs::exists(stream.getPath()));
|
String file_path = stream.getHolder()->describeFilePath().substr(strlen("fscache://"));
|
||||||
ASSERT_GT(fs::file_size(stream.getPath()), 100);
|
|
||||||
|
|
||||||
ASSERT_EQ(stat.num_rows, 2100);
|
ASSERT_TRUE(fs::exists(file_path)) << "File " << file_path << " should exist";
|
||||||
ASSERT_EQ(readAllTemporaryData(stream), 2100);
|
ASSERT_GT(fs::file_size(file_path), 100) << "File " << file_path << " should be larger than 100 bytes";
|
||||||
|
|
||||||
|
ASSERT_EQ(readAllTemporaryData(*stream.getReadStream()), 2100);
|
||||||
|
|
||||||
size_used_with_temporary_data = file_cache.getUsedCacheSize();
|
size_used_with_temporary_data = file_cache.getUsedCacheSize();
|
||||||
segments_used_with_temporary_data = file_cache.getFileSegmentsNum();
|
segments_used_with_temporary_data = file_cache.getFileSegmentsNum();
|
||||||
@ -1054,6 +1060,11 @@ TEST_F(FileCacheTest, temporaryData)
|
|||||||
ASSERT_LE(file_cache.getUsedCacheSize(), size_used_before_temporary_data);
|
ASSERT_LE(file_cache.getUsedCacheSize(), size_used_before_temporary_data);
|
||||||
ASSERT_LE(file_cache.getFileSegmentsNum(), segments_used_before_temporary_data);
|
ASSERT_LE(file_cache.getFileSegmentsNum(), segments_used_before_temporary_data);
|
||||||
}
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
std::cerr << getCurrentExceptionMessage(true) << std::endl;
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
|
||||||
TEST_F(FileCacheTest, CachedReadBuffer)
|
TEST_F(FileCacheTest, CachedReadBuffer)
|
||||||
{
|
{
|
||||||
@ -1148,18 +1159,22 @@ TEST_F(FileCacheTest, TemporaryDataReadBufferSize)
|
|||||||
DB::FileCache file_cache("cache", settings);
|
DB::FileCache file_cache("cache", settings);
|
||||||
file_cache.initialize();
|
file_cache.initialize();
|
||||||
|
|
||||||
auto tmp_data_scope = std::make_shared<TemporaryDataOnDiskScope>(/*volume=*/nullptr, &file_cache, /*settings=*/TemporaryDataOnDiskSettings{});
|
auto tmp_data_scope = std::make_shared<TemporaryDataOnDiskScope>(&file_cache, TemporaryDataOnDiskSettings{});
|
||||||
|
|
||||||
auto tmp_data = std::make_unique<TemporaryDataOnDisk>(tmp_data_scope);
|
|
||||||
|
|
||||||
auto block = generateBlock(/*size=*/3);
|
auto block = generateBlock(/*size=*/3);
|
||||||
auto & stream = tmp_data->createStream(block);
|
TemporaryBlockStreamHolder stream(block, tmp_data_scope.get());
|
||||||
stream.write(block);
|
|
||||||
stream.finishWriting();
|
|
||||||
|
|
||||||
/// We allocate buffer of size min(getSize(), DBMS_DEFAULT_BUFFER_SIZE)
|
stream->write(block);
|
||||||
|
auto stat = stream.finishWriting();
|
||||||
|
|
||||||
|
/// We allocate buffer of size min(stat.compressed_size, DBMS_DEFAULT_BUFFER_SIZE)
|
||||||
/// We do care about buffer size because realistic external group by could generate 10^5 temporary files
|
/// We do care about buffer size because realistic external group by could generate 10^5 temporary files
|
||||||
ASSERT_EQ(stream.getSize(), 62);
|
ASSERT_EQ(stat.compressed_size, 62);
|
||||||
|
|
||||||
|
auto reader = stream.getReadStream();
|
||||||
|
auto * read_buf = reader.getHolder();
|
||||||
|
const auto & internal_buffer = static_cast<TemporaryDataReadBuffer *>(read_buf)->compressed_buf.getHolder()->internalBuffer();
|
||||||
|
ASSERT_EQ(internal_buffer.size(), 62);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Temporary data stored on disk
|
/// Temporary data stored on disk
|
||||||
@ -1170,16 +1185,14 @@ TEST_F(FileCacheTest, TemporaryDataReadBufferSize)
|
|||||||
disk = createDisk("temporary_data_read_buffer_size_test_dir");
|
disk = createDisk("temporary_data_read_buffer_size_test_dir");
|
||||||
VolumePtr volume = std::make_shared<SingleDiskVolume>("volume", disk);
|
VolumePtr volume = std::make_shared<SingleDiskVolume>("volume", disk);
|
||||||
|
|
||||||
auto tmp_data_scope = std::make_shared<TemporaryDataOnDiskScope>(/*volume=*/volume, /*cache=*/nullptr, /*settings=*/TemporaryDataOnDiskSettings{});
|
auto tmp_data_scope = std::make_shared<TemporaryDataOnDiskScope>(volume, TemporaryDataOnDiskSettings{});
|
||||||
|
|
||||||
auto tmp_data = std::make_unique<TemporaryDataOnDisk>(tmp_data_scope);
|
|
||||||
|
|
||||||
auto block = generateBlock(/*size=*/3);
|
auto block = generateBlock(/*size=*/3);
|
||||||
auto & stream = tmp_data->createStream(block);
|
TemporaryBlockStreamHolder stream(block, tmp_data_scope.get());
|
||||||
stream.write(block);
|
stream->write(block);
|
||||||
stream.finishWriting();
|
auto stat = stream.finishWriting();
|
||||||
|
|
||||||
ASSERT_EQ(stream.getSize(), 62);
|
ASSERT_EQ(stat.compressed_size, 62);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -31,7 +31,7 @@ CreateQueryUUIDs::CreateQueryUUIDs(const ASTCreateQuery & query, bool generate_r
|
|||||||
/// If we generate random UUIDs for already existing tables then those UUIDs will not be correct making those inner target table inaccessible.
|
/// If we generate random UUIDs for already existing tables then those UUIDs will not be correct making those inner target table inaccessible.
|
||||||
/// Thus it's not safe for example to replace
|
/// Thus it's not safe for example to replace
|
||||||
/// "ATTACH MATERIALIZED VIEW mv AS SELECT a FROM b" with
|
/// "ATTACH MATERIALIZED VIEW mv AS SELECT a FROM b" with
|
||||||
/// "ATTACH MATERIALIZED VIEW mv TO INNER UUID "XXXX" AS SELECT a FROM b"
|
/// "ATTACH MATERIALIZED VIEW mv TO INNER UUID "248372b7-02c4-4c88-a5e1-282a83cc572a" AS SELECT a FROM b"
|
||||||
/// This replacement is safe only for CREATE queries when inner target tables don't exist yet.
|
/// This replacement is safe only for CREATE queries when inner target tables don't exist yet.
|
||||||
if (!query.attach)
|
if (!query.attach)
|
||||||
{
|
{
|
||||||
|
@ -274,7 +274,7 @@ FiltersForTableExpressionMap collectFiltersForAnalysis(const QueryTreeNodePtr &
|
|||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
FiltersForTableExpressionMap collectFiltersForAnalysis(const QueryTreeNodePtr & query_tree_node, SelectQueryOptions & select_query_options)
|
FiltersForTableExpressionMap collectFiltersForAnalysis(const QueryTreeNodePtr & query_tree_node, const SelectQueryOptions & select_query_options)
|
||||||
{
|
{
|
||||||
if (select_query_options.only_analyze)
|
if (select_query_options.only_analyze)
|
||||||
return {};
|
return {};
|
||||||
|
@ -659,6 +659,7 @@ std::unique_ptr<ExpressionStep> createComputeAliasColumnsStep(
|
|||||||
}
|
}
|
||||||
|
|
||||||
JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expression,
|
JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expression,
|
||||||
|
const QueryTreeNodePtr & parent_join_tree,
|
||||||
const SelectQueryInfo & select_query_info,
|
const SelectQueryInfo & select_query_info,
|
||||||
const SelectQueryOptions & select_query_options,
|
const SelectQueryOptions & select_query_options,
|
||||||
PlannerContextPtr & planner_context,
|
PlannerContextPtr & planner_context,
|
||||||
@ -696,8 +697,6 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres
|
|||||||
table_expression_query_info.table_expression = table_expression;
|
table_expression_query_info.table_expression = table_expression;
|
||||||
if (const auto & filter_actions = table_expression_data.getFilterActions())
|
if (const auto & filter_actions = table_expression_data.getFilterActions())
|
||||||
table_expression_query_info.filter_actions_dag = std::make_shared<const ActionsDAG>(filter_actions->clone());
|
table_expression_query_info.filter_actions_dag = std::make_shared<const ActionsDAG>(filter_actions->clone());
|
||||||
table_expression_query_info.current_table_chosen_for_reading_with_parallel_replicas
|
|
||||||
= table_node == planner_context->getGlobalPlannerContext()->parallel_replicas_table;
|
|
||||||
|
|
||||||
size_t max_streams = settings[Setting::max_threads];
|
size_t max_streams = settings[Setting::max_threads];
|
||||||
size_t max_threads_execute_query = settings[Setting::max_threads];
|
size_t max_threads_execute_query = settings[Setting::max_threads];
|
||||||
@ -912,12 +911,25 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres
|
|||||||
/// It is just a safety check needed until we have a proper sending plan to replicas.
|
/// It is just a safety check needed until we have a proper sending plan to replicas.
|
||||||
/// If we have a non-trivial storage like View it might create its own Planner inside read(), run findTableForParallelReplicas()
|
/// If we have a non-trivial storage like View it might create its own Planner inside read(), run findTableForParallelReplicas()
|
||||||
/// and find some other table that might be used for reading with parallel replicas. It will lead to errors.
|
/// and find some other table that might be used for reading with parallel replicas. It will lead to errors.
|
||||||
const bool other_table_already_chosen_for_reading_with_parallel_replicas
|
const bool no_tables_or_another_table_chosen_for_reading_with_parallel_replicas_mode
|
||||||
= planner_context->getGlobalPlannerContext()->parallel_replicas_table
|
= query_context->canUseParallelReplicasOnFollower()
|
||||||
&& !table_expression_query_info.current_table_chosen_for_reading_with_parallel_replicas;
|
&& table_node != planner_context->getGlobalPlannerContext()->parallel_replicas_table;
|
||||||
if (other_table_already_chosen_for_reading_with_parallel_replicas)
|
if (no_tables_or_another_table_chosen_for_reading_with_parallel_replicas_mode)
|
||||||
planner_context->getMutableQueryContext()->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0));
|
{
|
||||||
|
auto mutable_context = Context::createCopy(query_context);
|
||||||
|
mutable_context->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0));
|
||||||
|
storage->read(
|
||||||
|
query_plan,
|
||||||
|
columns_names,
|
||||||
|
storage_snapshot,
|
||||||
|
table_expression_query_info,
|
||||||
|
std::move(mutable_context),
|
||||||
|
from_stage,
|
||||||
|
max_block_size,
|
||||||
|
max_streams);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
storage->read(
|
storage->read(
|
||||||
query_plan,
|
query_plan,
|
||||||
columns_names,
|
columns_names,
|
||||||
@ -927,6 +939,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres
|
|||||||
from_stage,
|
from_stage,
|
||||||
max_block_size,
|
max_block_size,
|
||||||
max_streams);
|
max_streams);
|
||||||
|
}
|
||||||
|
|
||||||
auto parallel_replicas_enabled_for_storage = [](const StoragePtr & table, const Settings & query_settings)
|
auto parallel_replicas_enabled_for_storage = [](const StoragePtr & table, const Settings & query_settings)
|
||||||
{
|
{
|
||||||
@ -942,6 +955,19 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres
|
|||||||
/// query_plan can be empty if there is nothing to read
|
/// query_plan can be empty if there is nothing to read
|
||||||
if (query_plan.isInitialized() && parallel_replicas_enabled_for_storage(storage, settings))
|
if (query_plan.isInitialized() && parallel_replicas_enabled_for_storage(storage, settings))
|
||||||
{
|
{
|
||||||
|
const bool allow_parallel_replicas_for_table_expression = [](const QueryTreeNodePtr & join_tree_node)
|
||||||
|
{
|
||||||
|
const JoinNode * join_node = join_tree_node->as<JoinNode>();
|
||||||
|
if (!join_node)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
const auto join_kind = join_node->getKind();
|
||||||
|
if (join_kind == JoinKind::Left || join_kind == JoinKind::Right || join_kind == JoinKind::Inner)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}(parent_join_tree);
|
||||||
|
|
||||||
if (query_context->canUseParallelReplicasCustomKey() && query_context->getClientInfo().distributed_depth == 0)
|
if (query_context->canUseParallelReplicasCustomKey() && query_context->getClientInfo().distributed_depth == 0)
|
||||||
{
|
{
|
||||||
if (auto cluster = query_context->getClusterForParallelReplicas();
|
if (auto cluster = query_context->getClusterForParallelReplicas();
|
||||||
@ -964,7 +990,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres
|
|||||||
query_plan = std::move(query_plan_parallel_replicas);
|
query_plan = std::move(query_plan_parallel_replicas);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (ClusterProxy::canUseParallelReplicasOnInitiator(query_context))
|
else if (ClusterProxy::canUseParallelReplicasOnInitiator(query_context) && allow_parallel_replicas_for_table_expression)
|
||||||
{
|
{
|
||||||
// (1) find read step
|
// (1) find read step
|
||||||
QueryPlan::Node * node = query_plan.getRootNode();
|
QueryPlan::Node * node = query_plan.getRootNode();
|
||||||
@ -1794,7 +1820,8 @@ JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node,
|
|||||||
const ColumnIdentifierSet & outer_scope_columns,
|
const ColumnIdentifierSet & outer_scope_columns,
|
||||||
PlannerContextPtr & planner_context)
|
PlannerContextPtr & planner_context)
|
||||||
{
|
{
|
||||||
auto table_expressions_stack = buildTableExpressionsStack(query_node->as<QueryNode &>().getJoinTree());
|
const QueryTreeNodePtr & join_tree_node = query_node->as<QueryNode &>().getJoinTree();
|
||||||
|
auto table_expressions_stack = buildTableExpressionsStack(join_tree_node);
|
||||||
size_t table_expressions_stack_size = table_expressions_stack.size();
|
size_t table_expressions_stack_size = table_expressions_stack.size();
|
||||||
bool is_single_table_expression = table_expressions_stack_size == 1;
|
bool is_single_table_expression = table_expressions_stack_size == 1;
|
||||||
|
|
||||||
@ -1829,7 +1856,9 @@ JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node,
|
|||||||
* Examples: Distributed, LiveView, Merge storages.
|
* Examples: Distributed, LiveView, Merge storages.
|
||||||
*/
|
*/
|
||||||
auto left_table_expression = table_expressions_stack.front();
|
auto left_table_expression = table_expressions_stack.front();
|
||||||
auto left_table_expression_query_plan = buildQueryPlanForTableExpression(left_table_expression,
|
auto left_table_expression_query_plan = buildQueryPlanForTableExpression(
|
||||||
|
left_table_expression,
|
||||||
|
join_tree_node,
|
||||||
select_query_info,
|
select_query_info,
|
||||||
select_query_options,
|
select_query_options,
|
||||||
planner_context,
|
planner_context,
|
||||||
@ -1902,7 +1931,9 @@ JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node,
|
|||||||
* table expression in subquery.
|
* table expression in subquery.
|
||||||
*/
|
*/
|
||||||
bool is_remote = planner_context->getTableExpressionDataOrThrow(table_expression).isRemote();
|
bool is_remote = planner_context->getTableExpressionDataOrThrow(table_expression).isRemote();
|
||||||
query_plans_stack.push_back(buildQueryPlanForTableExpression(table_expression,
|
query_plans_stack.push_back(buildQueryPlanForTableExpression(
|
||||||
|
table_expression,
|
||||||
|
join_tree_node,
|
||||||
select_query_info,
|
select_query_info,
|
||||||
select_query_options,
|
select_query_options,
|
||||||
planner_context,
|
planner_context,
|
||||||
|
@ -23,6 +23,8 @@
|
|||||||
#include <Storages/StorageMaterializedView.h>
|
#include <Storages/StorageMaterializedView.h>
|
||||||
#include <Storages/buildQueryTreeForShard.h>
|
#include <Storages/buildQueryTreeForShard.h>
|
||||||
|
|
||||||
|
#include <ranges>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
namespace Setting
|
namespace Setting
|
||||||
@ -38,12 +40,12 @@ namespace ErrorCodes
|
|||||||
|
|
||||||
/// Returns a list of (sub)queries (candidates) which may support parallel replicas.
|
/// Returns a list of (sub)queries (candidates) which may support parallel replicas.
|
||||||
/// The rule is :
|
/// The rule is :
|
||||||
/// subquery has only LEFT or ALL INNER JOIN (or none), and left part is MergeTree table or subquery candidate as well.
|
/// subquery has only LEFT / RIGHT / ALL INNER JOIN (or none), and left / right part is MergeTree table or subquery candidate as well.
|
||||||
///
|
///
|
||||||
/// Additional checks are required, so we return many candidates. The innermost subquery is on top.
|
/// Additional checks are required, so we return many candidates. The innermost subquery is on top.
|
||||||
std::stack<const QueryNode *> getSupportingParallelReplicasQuery(const IQueryTreeNode * query_tree_node)
|
std::vector<const QueryNode *> getSupportingParallelReplicasQuery(const IQueryTreeNode * query_tree_node)
|
||||||
{
|
{
|
||||||
std::stack<const QueryNode *> res;
|
std::vector<const QueryNode *> res;
|
||||||
|
|
||||||
while (query_tree_node)
|
while (query_tree_node)
|
||||||
{
|
{
|
||||||
@ -75,7 +77,7 @@ std::stack<const QueryNode *> getSupportingParallelReplicasQuery(const IQueryTre
|
|||||||
{
|
{
|
||||||
const auto & query_node_to_process = query_tree_node->as<QueryNode &>();
|
const auto & query_node_to_process = query_tree_node->as<QueryNode &>();
|
||||||
query_tree_node = query_node_to_process.getJoinTree().get();
|
query_tree_node = query_node_to_process.getJoinTree().get();
|
||||||
res.push(&query_node_to_process);
|
res.push_back(&query_node_to_process);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case QueryTreeNodeType::UNION:
|
case QueryTreeNodeType::UNION:
|
||||||
@ -98,17 +100,16 @@ std::stack<const QueryNode *> getSupportingParallelReplicasQuery(const IQueryTre
|
|||||||
case QueryTreeNodeType::JOIN:
|
case QueryTreeNodeType::JOIN:
|
||||||
{
|
{
|
||||||
const auto & join_node = query_tree_node->as<JoinNode &>();
|
const auto & join_node = query_tree_node->as<JoinNode &>();
|
||||||
auto join_kind = join_node.getKind();
|
const auto join_kind = join_node.getKind();
|
||||||
auto join_strictness = join_node.getStrictness();
|
const auto join_strictness = join_node.getStrictness();
|
||||||
|
|
||||||
bool can_parallelize_join =
|
if (join_kind == JoinKind::Left || (join_kind == JoinKind::Inner && join_strictness == JoinStrictness::All))
|
||||||
join_kind == JoinKind::Left
|
query_tree_node = join_node.getLeftTableExpression().get();
|
||||||
|| (join_kind == JoinKind::Inner && join_strictness == JoinStrictness::All);
|
else if (join_kind == JoinKind::Right)
|
||||||
|
query_tree_node = join_node.getRightTableExpression().get();
|
||||||
if (!can_parallelize_join)
|
else
|
||||||
return {};
|
return {};
|
||||||
|
|
||||||
query_tree_node = join_node.getLeftTableExpression().get();
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
@ -163,14 +164,27 @@ QueryTreeNodePtr replaceTablesWithDummyTables(QueryTreeNodePtr query, const Cont
|
|||||||
return query->cloneAndReplace(visitor.replacement_map);
|
return query->cloneAndReplace(visitor.replacement_map);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef DUMP_PARALLEL_REPLICAS_QUERY_CANDIDATES
|
||||||
|
static void dumpStack(const std::vector<const QueryNode *> & stack)
|
||||||
|
{
|
||||||
|
std::ranges::reverse_view rv{stack};
|
||||||
|
for (const auto * node : rv)
|
||||||
|
LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "{}\n{}", CityHash_v1_0_2::Hash128to64(node->getTreeHash()), node->dumpTree());
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/// Find the best candidate for parallel replicas execution by verifying query plan.
|
/// Find the best candidate for parallel replicas execution by verifying query plan.
|
||||||
/// If query plan has only Expression, Filter of Join steps, we can execute it fully remotely and check the next query.
|
/// If query plan has only Expression, Filter or Join steps, we can execute it fully remotely and check the next query.
|
||||||
/// Otherwise we can execute current query up to WithMergableStage only.
|
/// Otherwise we can execute current query up to WithMergableStage only.
|
||||||
const QueryNode * findQueryForParallelReplicas(
|
const QueryNode * findQueryForParallelReplicas(
|
||||||
std::stack<const QueryNode *> stack,
|
std::vector<const QueryNode *> stack,
|
||||||
const std::unordered_map<const QueryNode *, const QueryPlan::Node *> & mapping,
|
const std::unordered_map<const QueryNode *, const QueryPlan::Node *> & mapping,
|
||||||
const Settings & settings)
|
const Settings & settings)
|
||||||
{
|
{
|
||||||
|
#ifdef DUMP_PARALLEL_REPLICAS_QUERY_CANDIDATES
|
||||||
|
dumpStack(stack);
|
||||||
|
#endif
|
||||||
|
|
||||||
struct Frame
|
struct Frame
|
||||||
{
|
{
|
||||||
const QueryPlan::Node * node = nullptr;
|
const QueryPlan::Node * node = nullptr;
|
||||||
@ -189,8 +203,8 @@ const QueryNode * findQueryForParallelReplicas(
|
|||||||
|
|
||||||
while (!stack.empty())
|
while (!stack.empty())
|
||||||
{
|
{
|
||||||
const QueryNode * const subquery_node = stack.top();
|
const QueryNode * const subquery_node = stack.back();
|
||||||
stack.pop();
|
stack.pop_back();
|
||||||
|
|
||||||
auto it = mapping.find(subquery_node);
|
auto it = mapping.find(subquery_node);
|
||||||
/// This should not happen ideally.
|
/// This should not happen ideally.
|
||||||
@ -236,7 +250,7 @@ const QueryNode * findQueryForParallelReplicas(
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
const auto * join = typeid_cast<JoinStep *>(step);
|
const auto * join = typeid_cast<JoinStep *>(step);
|
||||||
/// We've checked that JOIN is INNER/LEFT in query tree.
|
/// We've checked that JOIN is INNER/LEFT/RIGHT on query tree level before.
|
||||||
/// Don't distribute UNION node.
|
/// Don't distribute UNION node.
|
||||||
if (!join)
|
if (!join)
|
||||||
return res;
|
return res;
|
||||||
@ -263,7 +277,7 @@ const QueryNode * findQueryForParallelReplicas(
|
|||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tree_node, SelectQueryOptions & select_query_options)
|
const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tree_node, const SelectQueryOptions & select_query_options)
|
||||||
{
|
{
|
||||||
if (select_query_options.only_analyze)
|
if (select_query_options.only_analyze)
|
||||||
return nullptr;
|
return nullptr;
|
||||||
@ -287,7 +301,7 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
||||||
/// We don't have any subquery and storage can process parallel replicas by itself.
|
/// We don't have any subquery and storage can process parallel replicas by itself.
|
||||||
if (stack.top() == query_tree_node.get())
|
if (stack.back() == query_tree_node.get())
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
||||||
/// This is needed to avoid infinite recursion.
|
/// This is needed to avoid infinite recursion.
|
||||||
@ -310,31 +324,33 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr
|
|||||||
const auto & mapping = planner.getQueryNodeToPlanStepMapping();
|
const auto & mapping = planner.getQueryNodeToPlanStepMapping();
|
||||||
const auto * res = findQueryForParallelReplicas(new_stack, mapping, context->getSettingsRef());
|
const auto * res = findQueryForParallelReplicas(new_stack, mapping, context->getSettingsRef());
|
||||||
|
|
||||||
/// Now, return a query from initial stack.
|
|
||||||
if (res)
|
if (res)
|
||||||
{
|
{
|
||||||
|
// find query in initial stack
|
||||||
while (!new_stack.empty())
|
while (!new_stack.empty())
|
||||||
{
|
{
|
||||||
if (res == new_stack.top())
|
if (res == new_stack.back())
|
||||||
return stack.top();
|
{
|
||||||
|
res = stack.back();
|
||||||
stack.pop();
|
break;
|
||||||
new_stack.pop();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
stack.pop_back();
|
||||||
|
new_stack.pop_back();
|
||||||
|
}
|
||||||
|
}
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const TableNode * findTableForParallelReplicas(const IQueryTreeNode * query_tree_node)
|
static const TableNode * findTableForParallelReplicas(const IQueryTreeNode * query_tree_node)
|
||||||
{
|
{
|
||||||
std::stack<const IQueryTreeNode *> right_join_nodes;
|
std::stack<const IQueryTreeNode *> join_nodes;
|
||||||
while (query_tree_node || !right_join_nodes.empty())
|
while (query_tree_node || !join_nodes.empty())
|
||||||
{
|
{
|
||||||
if (!query_tree_node)
|
if (!query_tree_node)
|
||||||
{
|
{
|
||||||
query_tree_node = right_join_nodes.top();
|
query_tree_node = join_nodes.top();
|
||||||
right_join_nodes.pop();
|
join_nodes.pop();
|
||||||
}
|
}
|
||||||
|
|
||||||
auto join_tree_node_type = query_tree_node->getNodeType();
|
auto join_tree_node_type = query_tree_node->getNodeType();
|
||||||
@ -383,8 +399,23 @@ static const TableNode * findTableForParallelReplicas(const IQueryTreeNode * que
|
|||||||
case QueryTreeNodeType::JOIN:
|
case QueryTreeNodeType::JOIN:
|
||||||
{
|
{
|
||||||
const auto & join_node = query_tree_node->as<JoinNode &>();
|
const auto & join_node = query_tree_node->as<JoinNode &>();
|
||||||
|
const auto join_kind = join_node.getKind();
|
||||||
|
const auto join_strictness = join_node.getStrictness();
|
||||||
|
|
||||||
|
if (join_kind == JoinKind::Left || (join_kind == JoinKind::Inner and join_strictness == JoinStrictness::All))
|
||||||
|
{
|
||||||
query_tree_node = join_node.getLeftTableExpression().get();
|
query_tree_node = join_node.getLeftTableExpression().get();
|
||||||
right_join_nodes.push(join_node.getRightTableExpression().get());
|
join_nodes.push(join_node.getRightTableExpression().get());
|
||||||
|
}
|
||||||
|
else if (join_kind == JoinKind::Right)
|
||||||
|
{
|
||||||
|
query_tree_node = join_node.getRightTableExpression().get();
|
||||||
|
join_nodes.push(join_node.getLeftTableExpression().get());
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
@ -400,7 +431,7 @@ static const TableNode * findTableForParallelReplicas(const IQueryTreeNode * que
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
const TableNode * findTableForParallelReplicas(const QueryTreeNodePtr & query_tree_node, SelectQueryOptions & select_query_options)
|
const TableNode * findTableForParallelReplicas(const QueryTreeNodePtr & query_tree_node, const SelectQueryOptions & select_query_options)
|
||||||
{
|
{
|
||||||
if (select_query_options.only_analyze)
|
if (select_query_options.only_analyze)
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
@ -15,10 +15,10 @@ struct SelectQueryOptions;
|
|||||||
|
|
||||||
/// Find a query which can be executed with parallel replicas up to WithMergableStage.
|
/// Find a query which can be executed with parallel replicas up to WithMergableStage.
|
||||||
/// Returned query will always contain some (>1) subqueries, possibly with joins.
|
/// Returned query will always contain some (>1) subqueries, possibly with joins.
|
||||||
const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tree_node, SelectQueryOptions & select_query_options);
|
const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tree_node, const SelectQueryOptions & select_query_options);
|
||||||
|
|
||||||
/// Find a table from which we should read on follower replica. It's the left-most table within all JOINs and UNIONs.
|
/// Find a table from which we should read on follower replica. It's the left-most table within all JOINs and UNIONs.
|
||||||
const TableNode * findTableForParallelReplicas(const QueryTreeNodePtr & query_tree_node, SelectQueryOptions & select_query_options);
|
const TableNode * findTableForParallelReplicas(const QueryTreeNodePtr & query_tree_node, const SelectQueryOptions & select_query_options);
|
||||||
|
|
||||||
struct JoinTreeQueryPlan;
|
struct JoinTreeQueryPlan;
|
||||||
|
|
||||||
|
@ -79,7 +79,7 @@ bool ExecutionThreadContext::executeTask()
|
|||||||
|
|
||||||
if (trace_processors)
|
if (trace_processors)
|
||||||
{
|
{
|
||||||
span = std::make_unique<OpenTelemetry::SpanHolder>(node->processor->getName());
|
span = std::make_unique<OpenTelemetry::SpanHolder>(node->processor->getUniqID());
|
||||||
span->addAttribute("thread_number", thread_number);
|
span->addAttribute("thread_number", thread_number);
|
||||||
}
|
}
|
||||||
std::optional<Stopwatch> execution_time_watch;
|
std::optional<Stopwatch> execution_time_watch;
|
||||||
|
@ -10,6 +10,20 @@
|
|||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
IProcessor::IProcessor()
|
||||||
|
{
|
||||||
|
processor_index = CurrentThread::isInitialized() ? CurrentThread::get().getNextPipelineProcessorIndex() : 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
IProcessor::IProcessor(InputPorts inputs_, OutputPorts outputs_) : inputs(std::move(inputs_)), outputs(std::move(outputs_))
|
||||||
|
{
|
||||||
|
for (auto & port : inputs)
|
||||||
|
port.processor = this;
|
||||||
|
for (auto & port : outputs)
|
||||||
|
port.processor = this;
|
||||||
|
processor_index = CurrentThread::isInitialized() ? CurrentThread::get().getNextPipelineProcessorIndex() : 0;
|
||||||
|
}
|
||||||
|
|
||||||
void IProcessor::setQueryPlanStep(IQueryPlanStep * step, size_t group)
|
void IProcessor::setQueryPlanStep(IQueryPlanStep * step, size_t group)
|
||||||
{
|
{
|
||||||
query_plan_step = step;
|
query_plan_step = step;
|
||||||
@ -18,6 +32,7 @@ void IProcessor::setQueryPlanStep(IQueryPlanStep * step, size_t group)
|
|||||||
{
|
{
|
||||||
plan_step_name = step->getName();
|
plan_step_name = step->getName();
|
||||||
plan_step_description = step->getStepDescription();
|
plan_step_description = step->getStepDescription();
|
||||||
|
step_uniq_id = step->getUniqID();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,9 +1,12 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <memory>
|
#include <Interpreters/Context.h>
|
||||||
#include <Processors/Port.h>
|
#include <Processors/Port.h>
|
||||||
|
#include <Processors/QueryPlan/IQueryPlanStep.h>
|
||||||
|
#include <Common/CurrentThread.h>
|
||||||
#include <Common/Stopwatch.h>
|
#include <Common/Stopwatch.h>
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
class EventCounter;
|
class EventCounter;
|
||||||
|
|
||||||
@ -121,19 +124,14 @@ protected:
|
|||||||
OutputPorts outputs;
|
OutputPorts outputs;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
IProcessor() = default;
|
IProcessor();
|
||||||
|
|
||||||
IProcessor(InputPorts inputs_, OutputPorts outputs_)
|
IProcessor(InputPorts inputs_, OutputPorts outputs_);
|
||||||
: inputs(std::move(inputs_)), outputs(std::move(outputs_))
|
|
||||||
{
|
|
||||||
for (auto & port : inputs)
|
|
||||||
port.processor = this;
|
|
||||||
for (auto & port : outputs)
|
|
||||||
port.processor = this;
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual String getName() const = 0;
|
virtual String getName() const = 0;
|
||||||
|
|
||||||
|
String getUniqID() const { return fmt::format("{}_{}", getName(), processor_index); }
|
||||||
|
|
||||||
enum class Status : uint8_t
|
enum class Status : uint8_t
|
||||||
{
|
{
|
||||||
/// Processor needs some data at its inputs to proceed.
|
/// Processor needs some data at its inputs to proceed.
|
||||||
@ -314,6 +312,7 @@ public:
|
|||||||
void setQueryPlanStep(IQueryPlanStep * step, size_t group = 0);
|
void setQueryPlanStep(IQueryPlanStep * step, size_t group = 0);
|
||||||
|
|
||||||
IQueryPlanStep * getQueryPlanStep() const { return query_plan_step; }
|
IQueryPlanStep * getQueryPlanStep() const { return query_plan_step; }
|
||||||
|
const String & getStepUniqID() const { return step_uniq_id; }
|
||||||
size_t getQueryPlanStepGroup() const { return query_plan_step_group; }
|
size_t getQueryPlanStepGroup() const { return query_plan_step_group; }
|
||||||
const String & getPlanStepName() const { return plan_step_name; }
|
const String & getPlanStepName() const { return plan_step_name; }
|
||||||
const String & getPlanStepDescription() const { return plan_step_description; }
|
const String & getPlanStepDescription() const { return plan_step_description; }
|
||||||
@ -407,7 +406,10 @@ private:
|
|||||||
size_t stream_number = NO_STREAM;
|
size_t stream_number = NO_STREAM;
|
||||||
|
|
||||||
IQueryPlanStep * query_plan_step = nullptr;
|
IQueryPlanStep * query_plan_step = nullptr;
|
||||||
|
String step_uniq_id;
|
||||||
size_t query_plan_step_group = 0;
|
size_t query_plan_step_group = 0;
|
||||||
|
|
||||||
|
size_t processor_index = 0;
|
||||||
String plan_step_name;
|
String plan_step_name;
|
||||||
String plan_step_description;
|
String plan_step_description;
|
||||||
};
|
};
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
#include <Interpreters/ExpressionActions.h>
|
#include <Interpreters/ExpressionActions.h>
|
||||||
#include <IO/Operators.h>
|
#include <IO/Operators.h>
|
||||||
#include <Common/JSONBuilder.h>
|
#include <Common/JSONBuilder.h>
|
||||||
|
#include <DataTypes/DataTypeFactory.h>
|
||||||
#include <DataTypes/DataTypeLowCardinality.h>
|
#include <DataTypes/DataTypeLowCardinality.h>
|
||||||
#include <DataTypes/DataTypesNumber.h>
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
#include <Functions/IFunction.h>
|
#include <Functions/IFunction.h>
|
||||||
@ -52,7 +53,7 @@ static ActionsAndName splitSingleAndFilter(ActionsDAG & dag, const ActionsDAG::N
|
|||||||
auto filter_type = removeLowCardinality(split_filter_node->result_type);
|
auto filter_type = removeLowCardinality(split_filter_node->result_type);
|
||||||
if (!filter_type->onlyNull() && !isUInt8(removeNullable(filter_type)))
|
if (!filter_type->onlyNull() && !isUInt8(removeNullable(filter_type)))
|
||||||
{
|
{
|
||||||
DataTypePtr cast_type = std::make_shared<DataTypeUInt8>();
|
DataTypePtr cast_type = DataTypeFactory::instance().get("Bool");
|
||||||
if (filter_type->isNullable())
|
if (filter_type->isNullable())
|
||||||
cast_type = std::make_shared<DataTypeNullable>(std::move(cast_type));
|
cast_type = std::make_shared<DataTypeNullable>(std::move(cast_type));
|
||||||
|
|
||||||
|
@ -10,6 +10,11 @@ namespace ErrorCodes
|
|||||||
extern const int LOGICAL_ERROR;
|
extern const int LOGICAL_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
IQueryPlanStep::IQueryPlanStep()
|
||||||
|
{
|
||||||
|
step_index = CurrentThread::isInitialized() ? CurrentThread::get().getNextPlanStepIndex() : 0;
|
||||||
|
}
|
||||||
|
|
||||||
void IQueryPlanStep::updateInputHeaders(Headers input_headers_)
|
void IQueryPlanStep::updateInputHeaders(Headers input_headers_)
|
||||||
{
|
{
|
||||||
input_headers = std::move(input_headers_);
|
input_headers = std::move(input_headers_);
|
||||||
|
@ -1,8 +1,13 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <Common/CurrentThread.h>
|
||||||
#include <Core/Block.h>
|
#include <Core/Block.h>
|
||||||
#include <Core/SortDescription.h>
|
#include <Core/SortDescription.h>
|
||||||
|
#include <Interpreters/Context.h>
|
||||||
#include <Processors/QueryPlan/BuildQueryPipelineSettings.h>
|
#include <Processors/QueryPlan/BuildQueryPipelineSettings.h>
|
||||||
|
|
||||||
|
#include <fmt/core.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -26,6 +31,8 @@ using Headers = std::vector<Header>;
|
|||||||
class IQueryPlanStep
|
class IQueryPlanStep
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
IQueryPlanStep();
|
||||||
|
|
||||||
virtual ~IQueryPlanStep() = default;
|
virtual ~IQueryPlanStep() = default;
|
||||||
|
|
||||||
virtual String getName() const = 0;
|
virtual String getName() const = 0;
|
||||||
@ -77,6 +84,8 @@ public:
|
|||||||
|
|
||||||
/// Updates the input streams of the given step. Used during query plan optimizations.
|
/// Updates the input streams of the given step. Used during query plan optimizations.
|
||||||
/// It won't do any validation of new streams, so it is your responsibility to ensure that this update doesn't break anything
|
/// It won't do any validation of new streams, so it is your responsibility to ensure that this update doesn't break anything
|
||||||
|
String getUniqID() const { return fmt::format("{}_{}", getName(), step_index); }
|
||||||
|
|
||||||
/// (e.g. you correctly remove / add columns).
|
/// (e.g. you correctly remove / add columns).
|
||||||
void updateInputHeaders(Headers input_headers_);
|
void updateInputHeaders(Headers input_headers_);
|
||||||
void updateInputHeader(Header input_header, size_t idx = 0);
|
void updateInputHeader(Header input_header, size_t idx = 0);
|
||||||
@ -95,6 +104,9 @@ protected:
|
|||||||
Processors processors;
|
Processors processors;
|
||||||
|
|
||||||
static void describePipeline(const Processors & processors, FormatSettings & settings);
|
static void describePipeline(const Processors & processors, FormatSettings & settings);
|
||||||
|
|
||||||
|
private:
|
||||||
|
size_t step_index = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
using QueryPlanStepPtr = std::unique_ptr<IQueryPlanStep>;
|
using QueryPlanStepPtr = std::unique_ptr<IQueryPlanStep>;
|
||||||
|
@ -3,12 +3,15 @@
|
|||||||
#include <Common/checkStackSize.h>
|
#include <Common/checkStackSize.h>
|
||||||
#include <Interpreters/ActionsDAG.h>
|
#include <Interpreters/ActionsDAG.h>
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
|
#include <Interpreters/IJoin.h>
|
||||||
#include <Interpreters/InterpreterSelectQueryAnalyzer.h>
|
#include <Interpreters/InterpreterSelectQueryAnalyzer.h>
|
||||||
#include <Interpreters/StorageID.h>
|
#include <Interpreters/StorageID.h>
|
||||||
|
#include <Interpreters/TableJoin.h>
|
||||||
#include <Parsers/ASTFunction.h>
|
#include <Parsers/ASTFunction.h>
|
||||||
#include <Processors/QueryPlan/ConvertingActions.h>
|
#include <Processors/QueryPlan/ConvertingActions.h>
|
||||||
#include <Processors/QueryPlan/ExpressionStep.h>
|
#include <Processors/QueryPlan/ExpressionStep.h>
|
||||||
#include <Processors/QueryPlan/ISourceStep.h>
|
#include <Processors/QueryPlan/ISourceStep.h>
|
||||||
|
#include <Processors/QueryPlan/JoinStep.h>
|
||||||
#include <Processors/QueryPlan/ReadFromMergeTree.h>
|
#include <Processors/QueryPlan/ReadFromMergeTree.h>
|
||||||
#include <Processors/Sources/NullSource.h>
|
#include <Processors/Sources/NullSource.h>
|
||||||
#include <Processors/Transforms/ExpressionTransform.h>
|
#include <Processors/Transforms/ExpressionTransform.h>
|
||||||
@ -62,7 +65,14 @@ std::pair<std::unique_ptr<QueryPlan>, bool> createLocalPlanForParallelReplicas(
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
if (!node->children.empty())
|
if (!node->children.empty())
|
||||||
|
{
|
||||||
|
// in case of RIGHT JOIN, - reading from right table is parallelized among replicas
|
||||||
|
const JoinStep * join = typeid_cast<JoinStep*>(node->step.get());
|
||||||
|
if (join && join->getJoin()->getTableJoin().kind() == JoinKind::Right)
|
||||||
|
node = node->children.at(1);
|
||||||
|
else
|
||||||
node = node->children.at(0);
|
node = node->children.at(0);
|
||||||
|
}
|
||||||
else
|
else
|
||||||
node = nullptr;
|
node = nullptr;
|
||||||
}
|
}
|
||||||
|
@ -207,6 +207,7 @@ QueryPipelineBuilderPtr QueryPlan::buildQueryPipeline(
|
|||||||
static void explainStep(const IQueryPlanStep & step, JSONBuilder::JSONMap & map, const QueryPlan::ExplainPlanOptions & options)
|
static void explainStep(const IQueryPlanStep & step, JSONBuilder::JSONMap & map, const QueryPlan::ExplainPlanOptions & options)
|
||||||
{
|
{
|
||||||
map.add("Node Type", step.getName());
|
map.add("Node Type", step.getName());
|
||||||
|
map.add("Node Id", step.getUniqID());
|
||||||
|
|
||||||
if (options.description)
|
if (options.description)
|
||||||
{
|
{
|
||||||
|
@ -282,9 +282,9 @@ void SortingStep::mergeSorting(
|
|||||||
if (increase_sort_description_compile_attempts)
|
if (increase_sort_description_compile_attempts)
|
||||||
increase_sort_description_compile_attempts = false;
|
increase_sort_description_compile_attempts = false;
|
||||||
|
|
||||||
auto tmp_data_on_disk = sort_settings.tmp_data
|
TemporaryDataOnDiskScopePtr tmp_data_on_disk = nullptr;
|
||||||
? std::make_unique<TemporaryDataOnDisk>(sort_settings.tmp_data, CurrentMetrics::TemporaryFilesForSort)
|
if (sort_settings.tmp_data)
|
||||||
: std::unique_ptr<TemporaryDataOnDisk>();
|
tmp_data_on_disk = sort_settings.tmp_data->childScope(CurrentMetrics::TemporaryFilesForSort);
|
||||||
|
|
||||||
return std::make_shared<MergeSortingTransform>(
|
return std::make_shared<MergeSortingTransform>(
|
||||||
header,
|
header,
|
||||||
|
@ -54,9 +54,9 @@ namespace
|
|||||||
class SourceFromNativeStream : public ISource
|
class SourceFromNativeStream : public ISource
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
explicit SourceFromNativeStream(TemporaryFileStream * tmp_stream_)
|
explicit SourceFromNativeStream(const Block & header, TemporaryBlockStreamReaderHolder tmp_stream_)
|
||||||
: ISource(tmp_stream_->getHeader())
|
: ISource(header)
|
||||||
, tmp_stream(tmp_stream_)
|
, tmp_stream(std::move(tmp_stream_))
|
||||||
{}
|
{}
|
||||||
|
|
||||||
String getName() const override { return "SourceFromNativeStream"; }
|
String getName() const override { return "SourceFromNativeStream"; }
|
||||||
@ -69,7 +69,7 @@ namespace
|
|||||||
auto block = tmp_stream->read();
|
auto block = tmp_stream->read();
|
||||||
if (!block)
|
if (!block)
|
||||||
{
|
{
|
||||||
tmp_stream = nullptr;
|
tmp_stream.reset();
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
return convertToChunk(block);
|
return convertToChunk(block);
|
||||||
@ -78,7 +78,7 @@ namespace
|
|||||||
std::optional<ReadProgress> getReadProgress() override { return std::nullopt; }
|
std::optional<ReadProgress> getReadProgress() override { return std::nullopt; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
TemporaryFileStream * tmp_stream;
|
TemporaryBlockStreamReaderHolder tmp_stream;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -811,15 +811,18 @@ void AggregatingTransform::initGenerate()
|
|||||||
|
|
||||||
Pipes pipes;
|
Pipes pipes;
|
||||||
/// Merge external data from all aggregators used in query.
|
/// Merge external data from all aggregators used in query.
|
||||||
for (const auto & aggregator : *params->aggregator_list_ptr)
|
for (auto & aggregator : *params->aggregator_list_ptr)
|
||||||
{
|
{
|
||||||
const auto & tmp_data = aggregator.getTemporaryData();
|
tmp_files = aggregator.detachTemporaryData();
|
||||||
for (auto * tmp_stream : tmp_data.getStreams())
|
num_streams += tmp_files.size();
|
||||||
pipes.emplace_back(Pipe(std::make_unique<SourceFromNativeStream>(tmp_stream)));
|
|
||||||
|
|
||||||
num_streams += tmp_data.getStreams().size();
|
for (auto & tmp_stream : tmp_files)
|
||||||
compressed_size += tmp_data.getStat().compressed_size;
|
{
|
||||||
uncompressed_size += tmp_data.getStat().uncompressed_size;
|
auto stat = tmp_stream.finishWriting();
|
||||||
|
compressed_size += stat.compressed_size;
|
||||||
|
uncompressed_size += stat.uncompressed_size;
|
||||||
|
pipes.emplace_back(Pipe(std::make_unique<SourceFromNativeStream>(tmp_stream.getHeader(), tmp_stream.getReadStream())));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_DEBUG(
|
LOG_DEBUG(
|
||||||
|
@ -216,6 +216,8 @@ private:
|
|||||||
|
|
||||||
RowsBeforeStepCounterPtr rows_before_aggregation;
|
RowsBeforeStepCounterPtr rows_before_aggregation;
|
||||||
|
|
||||||
|
std::list<TemporaryBlockStreamHolder> tmp_files;
|
||||||
|
|
||||||
void initGenerate();
|
void initGenerate();
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -27,15 +27,20 @@ namespace ProfileEvents
|
|||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int LOGICAL_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
class BufferingToFileTransform : public IAccumulatingTransform
|
class BufferingToFileTransform : public IAccumulatingTransform
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
BufferingToFileTransform(const Block & header, TemporaryFileStream & tmp_stream_, LoggerPtr log_)
|
BufferingToFileTransform(const Block & header, TemporaryBlockStreamHolder tmp_stream_, LoggerPtr log_)
|
||||||
: IAccumulatingTransform(header, header)
|
: IAccumulatingTransform(header, header)
|
||||||
, tmp_stream(tmp_stream_)
|
, tmp_stream(std::move(tmp_stream_))
|
||||||
, log(log_)
|
, log(log_)
|
||||||
{
|
{
|
||||||
LOG_INFO(log, "Sorting and writing part of data into temporary file {}", tmp_stream.getPath());
|
LOG_INFO(log, "Sorting and writing part of data into temporary file {}", tmp_stream.getHolder()->describeFilePath());
|
||||||
ProfileEvents::increment(ProfileEvents::ExternalSortWritePart);
|
ProfileEvents::increment(ProfileEvents::ExternalSortWritePart);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -44,14 +49,15 @@ public:
|
|||||||
void consume(Chunk chunk) override
|
void consume(Chunk chunk) override
|
||||||
{
|
{
|
||||||
Block block = getInputPort().getHeader().cloneWithColumns(chunk.detachColumns());
|
Block block = getInputPort().getHeader().cloneWithColumns(chunk.detachColumns());
|
||||||
tmp_stream.write(block);
|
tmp_stream->write(block);
|
||||||
}
|
}
|
||||||
|
|
||||||
Chunk generate() override
|
Chunk generate() override
|
||||||
{
|
{
|
||||||
if (!tmp_stream.isWriteFinished())
|
if (!tmp_read_stream)
|
||||||
{
|
{
|
||||||
auto stat = tmp_stream.finishWriting();
|
auto stat = tmp_stream.finishWriting();
|
||||||
|
tmp_read_stream = tmp_stream.getReadStream();
|
||||||
|
|
||||||
ProfileEvents::increment(ProfileEvents::ExternalProcessingCompressedBytesTotal, stat.compressed_size);
|
ProfileEvents::increment(ProfileEvents::ExternalProcessingCompressedBytesTotal, stat.compressed_size);
|
||||||
ProfileEvents::increment(ProfileEvents::ExternalProcessingUncompressedBytesTotal, stat.uncompressed_size);
|
ProfileEvents::increment(ProfileEvents::ExternalProcessingUncompressedBytesTotal, stat.uncompressed_size);
|
||||||
@ -59,10 +65,11 @@ public:
|
|||||||
ProfileEvents::increment(ProfileEvents::ExternalSortUncompressedBytes, stat.uncompressed_size);
|
ProfileEvents::increment(ProfileEvents::ExternalSortUncompressedBytes, stat.uncompressed_size);
|
||||||
|
|
||||||
LOG_INFO(log, "Done writing part of data into temporary file {}, compressed {}, uncompressed {} ",
|
LOG_INFO(log, "Done writing part of data into temporary file {}, compressed {}, uncompressed {} ",
|
||||||
tmp_stream.getPath(), ReadableSize(static_cast<double>(stat.compressed_size)), ReadableSize(static_cast<double>(stat.uncompressed_size)));
|
tmp_stream.getHolder()->describeFilePath(),
|
||||||
|
ReadableSize(static_cast<double>(stat.compressed_size)), ReadableSize(static_cast<double>(stat.uncompressed_size)));
|
||||||
}
|
}
|
||||||
|
|
||||||
Block block = tmp_stream.read();
|
Block block = tmp_read_stream.value()->read();
|
||||||
if (!block)
|
if (!block)
|
||||||
return {};
|
return {};
|
||||||
|
|
||||||
@ -71,7 +78,8 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
TemporaryFileStream & tmp_stream;
|
TemporaryBlockStreamHolder tmp_stream;
|
||||||
|
std::optional<TemporaryBlockStreamReaderHolder> tmp_read_stream;
|
||||||
|
|
||||||
LoggerPtr log;
|
LoggerPtr log;
|
||||||
};
|
};
|
||||||
@ -86,7 +94,7 @@ MergeSortingTransform::MergeSortingTransform(
|
|||||||
size_t max_bytes_before_remerge_,
|
size_t max_bytes_before_remerge_,
|
||||||
double remerge_lowered_memory_bytes_ratio_,
|
double remerge_lowered_memory_bytes_ratio_,
|
||||||
size_t max_bytes_before_external_sort_,
|
size_t max_bytes_before_external_sort_,
|
||||||
TemporaryDataOnDiskPtr tmp_data_,
|
TemporaryDataOnDiskScopePtr tmp_data_,
|
||||||
size_t min_free_disk_space_)
|
size_t min_free_disk_space_)
|
||||||
: SortingTransform(header, description_, max_merged_block_size_, limit_, increase_sort_description_compile_attempts)
|
: SortingTransform(header, description_, max_merged_block_size_, limit_, increase_sort_description_compile_attempts)
|
||||||
, max_bytes_before_remerge(max_bytes_before_remerge_)
|
, max_bytes_before_remerge(max_bytes_before_remerge_)
|
||||||
@ -168,9 +176,13 @@ void MergeSortingTransform::consume(Chunk chunk)
|
|||||||
*/
|
*/
|
||||||
if (max_bytes_before_external_sort && sum_bytes_in_blocks > max_bytes_before_external_sort)
|
if (max_bytes_before_external_sort && sum_bytes_in_blocks > max_bytes_before_external_sort)
|
||||||
{
|
{
|
||||||
|
if (!tmp_data)
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryDataOnDisk is not set for MergeSortingTransform");
|
||||||
|
temporary_files_num++;
|
||||||
|
|
||||||
/// If there's less free disk space than reserve_size, an exception will be thrown
|
/// If there's less free disk space than reserve_size, an exception will be thrown
|
||||||
size_t reserve_size = sum_bytes_in_blocks + min_free_disk_space;
|
size_t reserve_size = sum_bytes_in_blocks + min_free_disk_space;
|
||||||
auto & tmp_stream = tmp_data->createStream(header_without_constants, reserve_size);
|
TemporaryBlockStreamHolder tmp_stream(header_without_constants, tmp_data.get(), reserve_size);
|
||||||
size_t max_merged_block_size = this->max_merged_block_size;
|
size_t max_merged_block_size = this->max_merged_block_size;
|
||||||
if (max_block_bytes > 0 && sum_rows_in_blocks > 0 && sum_bytes_in_blocks > 0)
|
if (max_block_bytes > 0 && sum_rows_in_blocks > 0 && sum_bytes_in_blocks > 0)
|
||||||
{
|
{
|
||||||
@ -179,7 +191,7 @@ void MergeSortingTransform::consume(Chunk chunk)
|
|||||||
max_merged_block_size = std::max(std::min(max_merged_block_size, max_block_bytes / avg_row_bytes), 128UL);
|
max_merged_block_size = std::max(std::min(max_merged_block_size, max_block_bytes / avg_row_bytes), 128UL);
|
||||||
}
|
}
|
||||||
merge_sorter = std::make_unique<MergeSorter>(header_without_constants, std::move(chunks), description, max_merged_block_size, limit);
|
merge_sorter = std::make_unique<MergeSorter>(header_without_constants, std::move(chunks), description, max_merged_block_size, limit);
|
||||||
auto current_processor = std::make_shared<BufferingToFileTransform>(header_without_constants, tmp_stream, log);
|
auto current_processor = std::make_shared<BufferingToFileTransform>(header_without_constants, std::move(tmp_stream), log);
|
||||||
|
|
||||||
processors.emplace_back(current_processor);
|
processors.emplace_back(current_processor);
|
||||||
|
|
||||||
@ -223,14 +235,14 @@ void MergeSortingTransform::generate()
|
|||||||
{
|
{
|
||||||
if (!generated_prefix)
|
if (!generated_prefix)
|
||||||
{
|
{
|
||||||
size_t num_tmp_files = tmp_data ? tmp_data->getStreams().size() : 0;
|
if (temporary_files_num == 0)
|
||||||
if (num_tmp_files == 0)
|
{
|
||||||
merge_sorter
|
merge_sorter = std::make_unique<MergeSorter>(header_without_constants, std::move(chunks), description, max_merged_block_size, limit);
|
||||||
= std::make_unique<MergeSorter>(header_without_constants, std::move(chunks), description, max_merged_block_size, limit);
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
ProfileEvents::increment(ProfileEvents::ExternalSortMerge);
|
ProfileEvents::increment(ProfileEvents::ExternalSortMerge);
|
||||||
LOG_INFO(log, "There are {} temporary sorted parts to merge", num_tmp_files);
|
LOG_INFO(log, "There are {} temporary sorted parts to merge", temporary_files_num);
|
||||||
|
|
||||||
processors.emplace_back(std::make_shared<MergeSorterSource>(
|
processors.emplace_back(std::make_shared<MergeSorterSource>(
|
||||||
header_without_constants, std::move(chunks), description, max_merged_block_size, limit));
|
header_without_constants, std::move(chunks), description, max_merged_block_size, limit));
|
||||||
|
@ -29,7 +29,7 @@ public:
|
|||||||
size_t max_bytes_before_remerge_,
|
size_t max_bytes_before_remerge_,
|
||||||
double remerge_lowered_memory_bytes_ratio_,
|
double remerge_lowered_memory_bytes_ratio_,
|
||||||
size_t max_bytes_before_external_sort_,
|
size_t max_bytes_before_external_sort_,
|
||||||
TemporaryDataOnDiskPtr tmp_data_,
|
TemporaryDataOnDiskScopePtr tmp_data_,
|
||||||
size_t min_free_disk_space_);
|
size_t min_free_disk_space_);
|
||||||
|
|
||||||
String getName() const override { return "MergeSortingTransform"; }
|
String getName() const override { return "MergeSortingTransform"; }
|
||||||
@ -45,7 +45,8 @@ private:
|
|||||||
size_t max_bytes_before_remerge;
|
size_t max_bytes_before_remerge;
|
||||||
double remerge_lowered_memory_bytes_ratio;
|
double remerge_lowered_memory_bytes_ratio;
|
||||||
size_t max_bytes_before_external_sort;
|
size_t max_bytes_before_external_sort;
|
||||||
TemporaryDataOnDiskPtr tmp_data;
|
TemporaryDataOnDiskScopePtr tmp_data;
|
||||||
|
size_t temporary_files_num = 0;
|
||||||
size_t min_free_disk_space;
|
size_t min_free_disk_space;
|
||||||
size_t max_block_bytes;
|
size_t max_block_bytes;
|
||||||
|
|
||||||
|
@ -398,10 +398,10 @@ std::unique_ptr<QueryPipelineBuilder> QueryPipelineBuilder::joinPipelinesRightLe
|
|||||||
|
|
||||||
left->pipe.collected_processors = collected_processors;
|
left->pipe.collected_processors = collected_processors;
|
||||||
|
|
||||||
/// Collect the NEW processors for the right pipeline.
|
|
||||||
QueryPipelineProcessorsCollector collector(*right);
|
|
||||||
/// Remember the last step of the right pipeline.
|
/// Remember the last step of the right pipeline.
|
||||||
IQueryPlanStep * step = right->pipe.processors->back()->getQueryPlanStep();
|
IQueryPlanStep * step = right->pipe.processors->back()->getQueryPlanStep();
|
||||||
|
/// Collect the NEW processors for the right pipeline.
|
||||||
|
QueryPipelineProcessorsCollector collector(*right, step);
|
||||||
|
|
||||||
/// In case joined subquery has totals, and we don't, add default chunk to totals.
|
/// In case joined subquery has totals, and we don't, add default chunk to totals.
|
||||||
bool default_totals = false;
|
bool default_totals = false;
|
||||||
|
@ -30,7 +30,7 @@ void printPipeline(const Processors & processors, const Statuses & statuses, Wri
|
|||||||
for (const auto & processor : processors)
|
for (const auto & processor : processors)
|
||||||
{
|
{
|
||||||
const auto & description = processor->getDescription();
|
const auto & description = processor->getDescription();
|
||||||
out << " n" << get_proc_id(*processor) << "[label=\"" << processor->getName() << (description.empty() ? "" : ":") << description;
|
out << " n" << get_proc_id(*processor) << "[label=\"" << processor->getUniqID() << (description.empty() ? "" : ":") << description;
|
||||||
|
|
||||||
if (statuses_iter != statuses.end())
|
if (statuses_iter != statuses.end())
|
||||||
{
|
{
|
||||||
|
@ -170,15 +170,16 @@ void HTTPHandler::pushDelayedResults(Output & used_output)
|
|||||||
|
|
||||||
for (auto & write_buf : write_buffers)
|
for (auto & write_buf : write_buffers)
|
||||||
{
|
{
|
||||||
if (!write_buf)
|
if (auto * write_buf_concrete = dynamic_cast<TemporaryDataBuffer *>(write_buf.get()))
|
||||||
continue;
|
|
||||||
|
|
||||||
IReadableWriteBuffer * write_buf_concrete = dynamic_cast<IReadableWriteBuffer *>(write_buf.get());
|
|
||||||
if (write_buf_concrete)
|
|
||||||
{
|
{
|
||||||
ReadBufferPtr reread_buf = write_buf_concrete->tryGetReadBuffer();
|
if (auto reread_buf = write_buf_concrete->read())
|
||||||
if (reread_buf)
|
read_buffers.emplace_back(std::move(reread_buf));
|
||||||
read_buffers.emplace_back(wrapReadBufferPointer(reread_buf));
|
}
|
||||||
|
|
||||||
|
if (auto * write_buf_concrete = dynamic_cast<IReadableWriteBuffer *>(write_buf.get()))
|
||||||
|
{
|
||||||
|
if (auto reread_buf = write_buf_concrete->tryGetReadBuffer())
|
||||||
|
read_buffers.emplace_back(std::move(reread_buf));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -321,21 +322,19 @@ void HTTPHandler::processQuery(
|
|||||||
|
|
||||||
if (buffer_size_memory > 0 || buffer_until_eof)
|
if (buffer_size_memory > 0 || buffer_until_eof)
|
||||||
{
|
{
|
||||||
CascadeWriteBuffer::WriteBufferPtrs cascade_buffer1;
|
CascadeWriteBuffer::WriteBufferPtrs cascade_buffers;
|
||||||
CascadeWriteBuffer::WriteBufferConstructors cascade_buffer2;
|
CascadeWriteBuffer::WriteBufferConstructors cascade_buffers_lazy;
|
||||||
|
|
||||||
if (buffer_size_memory > 0)
|
if (buffer_size_memory > 0)
|
||||||
cascade_buffer1.emplace_back(std::make_shared<MemoryWriteBuffer>(buffer_size_memory));
|
cascade_buffers.emplace_back(std::make_shared<MemoryWriteBuffer>(buffer_size_memory));
|
||||||
|
|
||||||
if (buffer_until_eof)
|
if (buffer_until_eof)
|
||||||
{
|
{
|
||||||
auto tmp_data = std::make_shared<TemporaryDataOnDisk>(server.context()->getTempDataOnDisk());
|
auto tmp_data = server.context()->getTempDataOnDisk();
|
||||||
|
cascade_buffers_lazy.emplace_back([tmp_data](const WriteBufferPtr &) -> WriteBufferPtr
|
||||||
auto create_tmp_disk_buffer = [tmp_data] (const WriteBufferPtr &) -> WriteBufferPtr {
|
{
|
||||||
return tmp_data->createRawStream();
|
return std::make_unique<TemporaryDataBuffer>(tmp_data.get());
|
||||||
};
|
});
|
||||||
|
|
||||||
cascade_buffer2.emplace_back(std::move(create_tmp_disk_buffer));
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -351,10 +350,10 @@ void HTTPHandler::processQuery(
|
|||||||
return next_buffer;
|
return next_buffer;
|
||||||
};
|
};
|
||||||
|
|
||||||
cascade_buffer2.emplace_back(push_memory_buffer_and_continue);
|
cascade_buffers_lazy.emplace_back(push_memory_buffer_and_continue);
|
||||||
}
|
}
|
||||||
|
|
||||||
used_output.out_delayed_and_compressed_holder = std::make_unique<CascadeWriteBuffer>(std::move(cascade_buffer1), std::move(cascade_buffer2));
|
used_output.out_delayed_and_compressed_holder = std::make_unique<CascadeWriteBuffer>(std::move(cascade_buffers), std::move(cascade_buffers_lazy));
|
||||||
used_output.out_maybe_delayed_and_compressed = used_output.out_delayed_and_compressed_holder.get();
|
used_output.out_maybe_delayed_and_compressed = used_output.out_delayed_and_compressed_holder.get();
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -65,6 +65,11 @@ namespace ProfileEvents
|
|||||||
extern const Event MergeProjectionStageExecuteMilliseconds;
|
extern const Event MergeProjectionStageExecuteMilliseconds;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
namespace CurrentMetrics
|
||||||
|
{
|
||||||
|
extern const Metric TemporaryFilesForMerge;
|
||||||
|
}
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
namespace Setting
|
namespace Setting
|
||||||
@ -124,6 +129,7 @@ static ColumnsStatistics getStatisticsForColumns(
|
|||||||
return all_statistics;
|
return all_statistics;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Manages the "rows_sources" temporary file that is used during vertical merge.
|
/// Manages the "rows_sources" temporary file that is used during vertical merge.
|
||||||
class RowsSourcesTemporaryFile : public ITemporaryFileLookup
|
class RowsSourcesTemporaryFile : public ITemporaryFileLookup
|
||||||
{
|
{
|
||||||
@ -132,9 +138,7 @@ public:
|
|||||||
static constexpr auto FILE_ID = "rows_sources";
|
static constexpr auto FILE_ID = "rows_sources";
|
||||||
|
|
||||||
explicit RowsSourcesTemporaryFile(TemporaryDataOnDiskScopePtr temporary_data_on_disk_)
|
explicit RowsSourcesTemporaryFile(TemporaryDataOnDiskScopePtr temporary_data_on_disk_)
|
||||||
: tmp_disk(std::make_unique<TemporaryDataOnDisk>(temporary_data_on_disk_))
|
: temporary_data_on_disk(temporary_data_on_disk_->childScope(CurrentMetrics::TemporaryFilesForMerge))
|
||||||
, uncompressed_write_buffer(tmp_disk->createRawStream())
|
|
||||||
, tmp_file_name_on_disk(uncompressed_write_buffer->getFileName())
|
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -143,11 +147,11 @@ public:
|
|||||||
if (name != FILE_ID)
|
if (name != FILE_ID)
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected temporary file name requested: {}", name);
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected temporary file name requested: {}", name);
|
||||||
|
|
||||||
if (write_buffer)
|
if (tmp_data_buffer)
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file was already requested for writing, there musto be only one writer");
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file was already requested for writing, there musto be only one writer");
|
||||||
|
|
||||||
write_buffer = (std::make_unique<CompressedWriteBuffer>(*uncompressed_write_buffer));
|
tmp_data_buffer = std::make_unique<TemporaryDataBuffer>(temporary_data_on_disk.get());
|
||||||
return *write_buffer;
|
return *tmp_data_buffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<ReadBuffer> getTemporaryFileForReading(const String & name) override
|
std::unique_ptr<ReadBuffer> getTemporaryFileForReading(const String & name) override
|
||||||
@ -163,25 +167,24 @@ public:
|
|||||||
return std::make_unique<ReadBufferFromEmptyFile>();
|
return std::make_unique<ReadBufferFromEmptyFile>();
|
||||||
|
|
||||||
/// Reopen the file for each read so that multiple reads can be performed in parallel and there is no need to seek to the beginning.
|
/// Reopen the file for each read so that multiple reads can be performed in parallel and there is no need to seek to the beginning.
|
||||||
auto raw_file_read_buffer = std::make_unique<ReadBufferFromFile>(tmp_file_name_on_disk);
|
return tmp_data_buffer->read();
|
||||||
return std::make_unique<CompressedReadBufferFromFile>(std::move(raw_file_read_buffer));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns written data size in bytes
|
/// Returns written data size in bytes
|
||||||
size_t finalizeWriting()
|
size_t finalizeWriting()
|
||||||
{
|
{
|
||||||
write_buffer->finalize();
|
if (!tmp_data_buffer)
|
||||||
uncompressed_write_buffer->finalize();
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file was not requested for writing");
|
||||||
|
|
||||||
|
auto stat = tmp_data_buffer->finishWriting();
|
||||||
finalized = true;
|
finalized = true;
|
||||||
final_size = write_buffer->count();
|
final_size = stat.uncompressed_size;
|
||||||
return final_size;
|
return final_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::unique_ptr<TemporaryDataOnDisk> tmp_disk;
|
std::unique_ptr<TemporaryDataBuffer> tmp_data_buffer;
|
||||||
std::unique_ptr<WriteBufferFromFileBase> uncompressed_write_buffer;
|
TemporaryDataOnDiskScopePtr temporary_data_on_disk;
|
||||||
std::unique_ptr<WriteBuffer> write_buffer;
|
|
||||||
const String tmp_file_name_on_disk;
|
|
||||||
bool finalized = false;
|
bool finalized = false;
|
||||||
size_t final_size = 0;
|
size_t final_size = 0;
|
||||||
};
|
};
|
||||||
@ -874,6 +877,7 @@ bool MergeTask::VerticalMergeStage::prepareVerticalMergeForAllColumns() const
|
|||||||
/// In special case, when there is only one source part, and no rows were skipped, we may have
|
/// In special case, when there is only one source part, and no rows were skipped, we may have
|
||||||
/// skipped writing rows_sources file. Otherwise rows_sources_count must be equal to the total
|
/// skipped writing rows_sources file. Otherwise rows_sources_count must be equal to the total
|
||||||
/// number of input rows.
|
/// number of input rows.
|
||||||
|
/// Note that only one byte index is written for each row, so number of rows is equals to the number of bytes written.
|
||||||
if ((rows_sources_count > 0 || global_ctx->future_part->parts.size() > 1) && sum_input_rows_exact != rows_sources_count + input_rows_filtered)
|
if ((rows_sources_count > 0 || global_ctx->future_part->parts.size() > 1) && sum_input_rows_exact != rows_sources_count + input_rows_filtered)
|
||||||
throw Exception(
|
throw Exception(
|
||||||
ErrorCodes::LOGICAL_ERROR,
|
ErrorCodes::LOGICAL_ERROR,
|
||||||
@ -881,6 +885,7 @@ bool MergeTask::VerticalMergeStage::prepareVerticalMergeForAllColumns() const
|
|||||||
"of bytes written to rows_sources file ({}). It is a bug.",
|
"of bytes written to rows_sources file ({}). It is a bug.",
|
||||||
sum_input_rows_exact, input_rows_filtered, rows_sources_count);
|
sum_input_rows_exact, input_rows_filtered, rows_sources_count);
|
||||||
|
|
||||||
|
|
||||||
ctx->it_name_and_type = global_ctx->gathering_columns.cbegin();
|
ctx->it_name_and_type = global_ctx->gathering_columns.cbegin();
|
||||||
|
|
||||||
const auto & settings = global_ctx->context->getSettingsRef();
|
const auto & settings = global_ctx->context->getSettingsRef();
|
||||||
|
@ -162,8 +162,6 @@ struct SelectQueryInfo
|
|||||||
/// It's guaranteed to be present in JOIN TREE of `query_tree`
|
/// It's guaranteed to be present in JOIN TREE of `query_tree`
|
||||||
QueryTreeNodePtr table_expression;
|
QueryTreeNodePtr table_expression;
|
||||||
|
|
||||||
bool current_table_chosen_for_reading_with_parallel_replicas = false;
|
|
||||||
|
|
||||||
/// Table expression modifiers for storage
|
/// Table expression modifiers for storage
|
||||||
std::optional<TableExpressionModifiers> table_expression_modifiers;
|
std::optional<TableExpressionModifiers> table_expression_modifiers;
|
||||||
|
|
||||||
|
@ -276,9 +276,7 @@ void StorageMergeTree::read(
|
|||||||
}
|
}
|
||||||
|
|
||||||
const bool enable_parallel_reading = local_context->canUseParallelReplicasOnFollower()
|
const bool enable_parallel_reading = local_context->canUseParallelReplicasOnFollower()
|
||||||
&& local_context->getSettingsRef()[Setting::parallel_replicas_for_non_replicated_merge_tree]
|
&& local_context->getSettingsRef()[Setting::parallel_replicas_for_non_replicated_merge_tree];
|
||||||
&& (!local_context->getSettingsRef()[Setting::allow_experimental_analyzer]
|
|
||||||
|| query_info.current_table_chosen_for_reading_with_parallel_replicas);
|
|
||||||
|
|
||||||
if (auto plan = reader.read(
|
if (auto plan = reader.read(
|
||||||
column_names,
|
column_names,
|
||||||
|
@ -5640,10 +5640,7 @@ void StorageReplicatedMergeTree::readLocalImpl(
|
|||||||
const size_t max_block_size,
|
const size_t max_block_size,
|
||||||
const size_t num_streams)
|
const size_t num_streams)
|
||||||
{
|
{
|
||||||
const bool enable_parallel_reading = local_context->canUseParallelReplicasOnFollower()
|
const bool enable_parallel_reading = local_context->canUseParallelReplicasOnFollower();
|
||||||
&& (!local_context->getSettingsRef()[Setting::allow_experimental_analyzer]
|
|
||||||
|| query_info.current_table_chosen_for_reading_with_parallel_replicas);
|
|
||||||
|
|
||||||
auto plan = reader.read(
|
auto plan = reader.read(
|
||||||
column_names, storage_snapshot, query_info,
|
column_names, storage_snapshot, query_info,
|
||||||
local_context, max_block_size, num_streams,
|
local_context, max_block_size, num_streams,
|
||||||
|
@ -314,6 +314,35 @@ TableNodePtr executeSubqueryNode(const QueryTreeNodePtr & subquery_node,
|
|||||||
return temporary_table_expression_node;
|
return temporary_table_expression_node;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
QueryTreeNodePtr getSubqueryFromTableExpression(
|
||||||
|
const QueryTreeNodePtr & join_table_expression,
|
||||||
|
const std::unordered_map<QueryTreeNodePtr, CollectColumnSourceToColumnsVisitor::Columns> & column_source_to_columns,
|
||||||
|
const ContextPtr & context)
|
||||||
|
{
|
||||||
|
auto join_table_expression_node_type = join_table_expression->getNodeType();
|
||||||
|
QueryTreeNodePtr subquery_node;
|
||||||
|
|
||||||
|
if (join_table_expression_node_type == QueryTreeNodeType::QUERY || join_table_expression_node_type == QueryTreeNodeType::UNION)
|
||||||
|
{
|
||||||
|
subquery_node = join_table_expression;
|
||||||
|
}
|
||||||
|
else if (
|
||||||
|
join_table_expression_node_type == QueryTreeNodeType::TABLE || join_table_expression_node_type == QueryTreeNodeType::TABLE_FUNCTION)
|
||||||
|
{
|
||||||
|
const auto & columns = column_source_to_columns.at(join_table_expression).columns;
|
||||||
|
subquery_node = buildSubqueryToReadColumnsFromTableExpression(columns, join_table_expression, context);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
throw Exception(
|
||||||
|
ErrorCodes::LOGICAL_ERROR,
|
||||||
|
"Expected JOIN table expression to be table, table function, query or union node. Actual {}",
|
||||||
|
join_table_expression->formatASTForErrorMessage());
|
||||||
|
}
|
||||||
|
|
||||||
|
return subquery_node;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
QueryTreeNodePtr buildQueryTreeForShard(const PlannerContextPtr & planner_context, QueryTreeNodePtr query_tree_to_modify)
|
QueryTreeNodePtr buildQueryTreeForShard(const PlannerContextPtr & planner_context, QueryTreeNodePtr query_tree_to_modify)
|
||||||
@ -335,37 +364,31 @@ QueryTreeNodePtr buildQueryTreeForShard(const PlannerContextPtr & planner_contex
|
|||||||
{
|
{
|
||||||
if (auto * join_node = global_in_or_join_node.query_node->as<JoinNode>())
|
if (auto * join_node = global_in_or_join_node.query_node->as<JoinNode>())
|
||||||
{
|
{
|
||||||
auto join_right_table_expression = join_node->getRightTableExpression();
|
QueryTreeNodePtr join_table_expression;
|
||||||
auto join_right_table_expression_node_type = join_right_table_expression->getNodeType();
|
const auto join_kind = join_node->getKind();
|
||||||
|
if (join_kind == JoinKind::Left || join_kind == JoinKind::Inner)
|
||||||
QueryTreeNodePtr subquery_node;
|
|
||||||
|
|
||||||
if (join_right_table_expression_node_type == QueryTreeNodeType::QUERY ||
|
|
||||||
join_right_table_expression_node_type == QueryTreeNodeType::UNION)
|
|
||||||
{
|
{
|
||||||
subquery_node = join_right_table_expression;
|
join_table_expression = join_node->getRightTableExpression();
|
||||||
}
|
}
|
||||||
else if (join_right_table_expression_node_type == QueryTreeNodeType::TABLE ||
|
else if (join_kind == JoinKind::Right)
|
||||||
join_right_table_expression_node_type == QueryTreeNodeType::TABLE_FUNCTION)
|
|
||||||
{
|
{
|
||||||
const auto & columns = column_source_to_columns.at(join_right_table_expression).columns;
|
join_table_expression = join_node->getLeftTableExpression();
|
||||||
subquery_node = buildSubqueryToReadColumnsFromTableExpression(columns,
|
|
||||||
join_right_table_expression,
|
|
||||||
planner_context->getQueryContext());
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
throw Exception(
|
||||||
"Expected JOIN right table expression to be table, table function, query or union node. Actual {}",
|
ErrorCodes::LOGICAL_ERROR, "Unexpected join kind: {}", join_kind);
|
||||||
join_right_table_expression->formatASTForErrorMessage());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auto subquery_node
|
||||||
|
= getSubqueryFromTableExpression(join_table_expression, column_source_to_columns, planner_context->getQueryContext());
|
||||||
|
|
||||||
auto temporary_table_expression_node = executeSubqueryNode(subquery_node,
|
auto temporary_table_expression_node = executeSubqueryNode(subquery_node,
|
||||||
planner_context->getMutableQueryContext(),
|
planner_context->getMutableQueryContext(),
|
||||||
global_in_or_join_node.subquery_depth);
|
global_in_or_join_node.subquery_depth);
|
||||||
temporary_table_expression_node->setAlias(join_right_table_expression->getAlias());
|
temporary_table_expression_node->setAlias(join_table_expression->getAlias());
|
||||||
|
|
||||||
replacement_map.emplace(join_right_table_expression.get(), std::move(temporary_table_expression_node));
|
replacement_map.emplace(join_table_expression.get(), std::move(temporary_table_expression_node));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (auto * in_function_node = global_in_or_join_node.query_node->as<FunctionNode>())
|
if (auto * in_function_node = global_in_or_join_node.query_node->as<FunctionNode>())
|
||||||
|
@ -2154,9 +2154,9 @@ class TestSuite:
|
|||||||
self.sequential_tests = []
|
self.sequential_tests = []
|
||||||
self.parallel_tests = []
|
self.parallel_tests = []
|
||||||
for test_name in self.all_tests:
|
for test_name in self.all_tests:
|
||||||
if self.is_sequential_test(test_name) and not args.no_sequential:
|
if self.is_sequential_test(test_name):
|
||||||
self.sequential_tests.append(test_name)
|
self.sequential_tests.append(test_name)
|
||||||
elif not args.no_parallel:
|
else:
|
||||||
self.parallel_tests.append(test_name)
|
self.parallel_tests.append(test_name)
|
||||||
|
|
||||||
def is_sequential_test(self, test_name):
|
def is_sequential_test(self, test_name):
|
||||||
@ -2620,6 +2620,7 @@ def run_tests_process(*args, **kwargs):
|
|||||||
|
|
||||||
|
|
||||||
def do_run_tests(jobs, test_suite: TestSuite):
|
def do_run_tests(jobs, test_suite: TestSuite):
|
||||||
|
if jobs > 1 and len(test_suite.parallel_tests) > 0:
|
||||||
print(
|
print(
|
||||||
"Found",
|
"Found",
|
||||||
len(test_suite.parallel_tests),
|
len(test_suite.parallel_tests),
|
||||||
@ -2627,7 +2628,6 @@ def do_run_tests(jobs, test_suite: TestSuite):
|
|||||||
len(test_suite.sequential_tests),
|
len(test_suite.sequential_tests),
|
||||||
"sequential tests",
|
"sequential tests",
|
||||||
)
|
)
|
||||||
if test_suite.parallel_tests:
|
|
||||||
tests_n = len(test_suite.parallel_tests)
|
tests_n = len(test_suite.parallel_tests)
|
||||||
jobs = min(jobs, tests_n)
|
jobs = min(jobs, tests_n)
|
||||||
|
|
||||||
@ -2640,7 +2640,6 @@ def do_run_tests(jobs, test_suite: TestSuite):
|
|||||||
# It makes it more difficult to detect real flaky tests,
|
# It makes it more difficult to detect real flaky tests,
|
||||||
# because the distribution and the amount
|
# because the distribution and the amount
|
||||||
# of failures will be nearly the same for all tests from the group.
|
# of failures will be nearly the same for all tests from the group.
|
||||||
# TODO: add shuffle for sequential tests
|
|
||||||
random.shuffle(test_suite.parallel_tests)
|
random.shuffle(test_suite.parallel_tests)
|
||||||
|
|
||||||
batch_size = len(test_suite.parallel_tests) // jobs
|
batch_size = len(test_suite.parallel_tests) // jobs
|
||||||
@ -2686,7 +2685,6 @@ def do_run_tests(jobs, test_suite: TestSuite):
|
|||||||
if not p.is_alive():
|
if not p.is_alive():
|
||||||
processes.remove(p)
|
processes.remove(p)
|
||||||
|
|
||||||
if test_suite.sequential_tests:
|
|
||||||
run_tests_array(
|
run_tests_array(
|
||||||
(
|
(
|
||||||
test_suite.sequential_tests,
|
test_suite.sequential_tests,
|
||||||
@ -2697,6 +2695,16 @@ def do_run_tests(jobs, test_suite: TestSuite):
|
|||||||
)
|
)
|
||||||
|
|
||||||
return len(test_suite.sequential_tests) + len(test_suite.parallel_tests)
|
return len(test_suite.sequential_tests) + len(test_suite.parallel_tests)
|
||||||
|
num_tests = len(test_suite.all_tests)
|
||||||
|
run_tests_array(
|
||||||
|
(
|
||||||
|
test_suite.all_tests,
|
||||||
|
num_tests,
|
||||||
|
test_suite,
|
||||||
|
False,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return num_tests
|
||||||
|
|
||||||
|
|
||||||
def is_test_from_dir(suite_dir, case):
|
def is_test_from_dir(suite_dir, case):
|
||||||
@ -3283,10 +3291,7 @@ def parse_args():
|
|||||||
help='Replace random database name with "default" in stderr',
|
help='Replace random database name with "default" in stderr',
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--no-sequential", action="store_true", help="Not run no-parallel"
|
"--parallel", default="1/1", help="One parallel test run number/total"
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--no-parallel", action="store_true", help="Run only no-parallel"
|
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"-j", "--jobs", default=1, nargs="?", type=int, help="Run all tests in parallel"
|
"-j", "--jobs", default=1, nargs="?", type=int, help="Run all tests in parallel"
|
||||||
@ -3335,7 +3340,7 @@ def parse_args():
|
|||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--sequential",
|
"--sequential",
|
||||||
nargs="+",
|
nargs="+",
|
||||||
help="Run all tests sequentially",
|
help="Run these tests sequentially even if --parallel specified",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--no-long", action="store_true", dest="no_long", help="Do not run long tests"
|
"--no-long", action="store_true", dest="no_long", help="Do not run long tests"
|
||||||
|
@ -6,7 +6,6 @@
|
|||||||
<cacheSessions>true</cacheSessions>
|
<cacheSessions>true</cacheSessions>
|
||||||
<disableProtocols>sslv2,sslv3</disableProtocols>
|
<disableProtocols>sslv2,sslv3</disableProtocols>
|
||||||
<preferServerCiphers>true</preferServerCiphers>
|
<preferServerCiphers>true</preferServerCiphers>
|
||||||
<verificationMode>none</verificationMode>
|
|
||||||
<invalidCertificateHandler>
|
<invalidCertificateHandler>
|
||||||
<name>AcceptCertificateHandler</name> <!-- For tests only-->
|
<name>AcceptCertificateHandler</name> <!-- For tests only-->
|
||||||
</invalidCertificateHandler>
|
</invalidCertificateHandler>
|
||||||
|
@ -9,20 +9,6 @@ DEST_SERVER_PATH="${1:-/etc/clickhouse-server}"
|
|||||||
DEST_CLIENT_PATH="${2:-/etc/clickhouse-client}"
|
DEST_CLIENT_PATH="${2:-/etc/clickhouse-client}"
|
||||||
SRC_PATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
|
SRC_PATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
|
||||||
|
|
||||||
|
|
||||||
FAST_TEST=0
|
|
||||||
NO_AZURE=0
|
|
||||||
|
|
||||||
while [[ "$#" -gt 0 ]]; do
|
|
||||||
case $1 in
|
|
||||||
--fast-test) FAST_TEST=1 ;;
|
|
||||||
--s3-storage) EXPORT_S3_STORAGE_POLICIES=1 ;;
|
|
||||||
--no-azure) NO_AZURE=1 ;;
|
|
||||||
*) echo "Unknown option: $1" ; exit 1 ;;
|
|
||||||
esac
|
|
||||||
shift
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "Going to install test configs from $SRC_PATH into $DEST_SERVER_PATH"
|
echo "Going to install test configs from $SRC_PATH into $DEST_SERVER_PATH"
|
||||||
|
|
||||||
mkdir -p $DEST_SERVER_PATH/config.d/
|
mkdir -p $DEST_SERVER_PATH/config.d/
|
||||||
@ -86,8 +72,9 @@ ln -sf $SRC_PATH/config.d/serverwide_trace_collector.xml $DEST_SERVER_PATH/confi
|
|||||||
ln -sf $SRC_PATH/config.d/rocksdb.xml $DEST_SERVER_PATH/config.d/
|
ln -sf $SRC_PATH/config.d/rocksdb.xml $DEST_SERVER_PATH/config.d/
|
||||||
|
|
||||||
# Not supported with fasttest.
|
# Not supported with fasttest.
|
||||||
if [ "${DEST_SERVER_PATH}" = "/etc/clickhouse-server" ] || [ "$FAST_TEST" != "1" ]; then
|
if [ "${DEST_SERVER_PATH}" = "/etc/clickhouse-server" ]
|
||||||
ln -sf "$SRC_PATH/config.d/legacy_geobase.xml" "$DEST_SERVER_PATH/config.d/"
|
then
|
||||||
|
ln -sf $SRC_PATH/config.d/legacy_geobase.xml $DEST_SERVER_PATH/config.d/
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ln -sf $SRC_PATH/users.d/log_queries.xml $DEST_SERVER_PATH/users.d/
|
ln -sf $SRC_PATH/users.d/log_queries.xml $DEST_SERVER_PATH/users.d/
|
||||||
@ -198,10 +185,8 @@ elif [[ "$USE_AZURE_STORAGE_FOR_MERGE_TREE" == "1" ]]; then
|
|||||||
ln -sf $SRC_PATH/config.d/azure_storage_policy_by_default.xml $DEST_SERVER_PATH/config.d/
|
ln -sf $SRC_PATH/config.d/azure_storage_policy_by_default.xml $DEST_SERVER_PATH/config.d/
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$EXPORT_S3_STORAGE_POLICIES" == "1" ]]; then
|
if [[ -n "$EXPORT_S3_STORAGE_POLICIES" ]]; then
|
||||||
if [[ "$NO_AZURE" != "1" ]]; then
|
|
||||||
ln -sf $SRC_PATH/config.d/azure_storage_conf.xml $DEST_SERVER_PATH/config.d/
|
ln -sf $SRC_PATH/config.d/azure_storage_conf.xml $DEST_SERVER_PATH/config.d/
|
||||||
fi
|
|
||||||
ln -sf $SRC_PATH/config.d/storage_conf.xml $DEST_SERVER_PATH/config.d/
|
ln -sf $SRC_PATH/config.d/storage_conf.xml $DEST_SERVER_PATH/config.d/
|
||||||
ln -sf $SRC_PATH/config.d/storage_conf_02944.xml $DEST_SERVER_PATH/config.d/
|
ln -sf $SRC_PATH/config.d/storage_conf_02944.xml $DEST_SERVER_PATH/config.d/
|
||||||
ln -sf $SRC_PATH/config.d/storage_conf_02963.xml $DEST_SERVER_PATH/config.d/
|
ln -sf $SRC_PATH/config.d/storage_conf_02963.xml $DEST_SERVER_PATH/config.d/
|
||||||
@ -210,7 +195,7 @@ if [[ "$EXPORT_S3_STORAGE_POLICIES" == "1" ]]; then
|
|||||||
ln -sf $SRC_PATH/users.d/s3_cache_new.xml $DEST_SERVER_PATH/users.d/
|
ln -sf $SRC_PATH/users.d/s3_cache_new.xml $DEST_SERVER_PATH/users.d/
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$USE_DATABASE_REPLICATED" == "1" ]]; then
|
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||||
ln -sf $SRC_PATH/users.d/database_replicated.xml $DEST_SERVER_PATH/users.d/
|
ln -sf $SRC_PATH/users.d/database_replicated.xml $DEST_SERVER_PATH/users.d/
|
||||||
ln -sf $SRC_PATH/config.d/database_replicated.xml $DEST_SERVER_PATH/config.d/
|
ln -sf $SRC_PATH/config.d/database_replicated.xml $DEST_SERVER_PATH/config.d/
|
||||||
rm /etc/clickhouse-server/config.d/zookeeper.xml
|
rm /etc/clickhouse-server/config.d/zookeeper.xml
|
||||||
|
@ -35,8 +35,8 @@ def test_disk_selection(start_cluster):
|
|||||||
|
|
||||||
node.query(query, settings=settings)
|
node.query(query, settings=settings)
|
||||||
assert node.contains_in_log(
|
assert node.contains_in_log(
|
||||||
"Writing part of aggregation data into temporary file /disk1/"
|
"Writing part of aggregation data into temporary file.*/disk1/"
|
||||||
)
|
)
|
||||||
assert node.contains_in_log(
|
assert node.contains_in_log(
|
||||||
"Writing part of aggregation data into temporary file /disk2/"
|
"Writing part of aggregation data into temporary file.*/disk2/"
|
||||||
)
|
)
|
||||||
|
@ -163,7 +163,6 @@ Filter column: notEquals(__table1.y, 2_UInt8)
|
|||||||
> filter is pushed down before CreatingSets
|
> filter is pushed down before CreatingSets
|
||||||
CreatingSets
|
CreatingSets
|
||||||
Filter
|
Filter
|
||||||
Filter
|
|
||||||
1
|
1
|
||||||
3
|
3
|
||||||
> one condition of filter is pushed down before LEFT JOIN
|
> one condition of filter is pushed down before LEFT JOIN
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user