mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 07:01:59 +00:00
Merge branch 'master' into complex-merge-selector
This commit is contained in:
commit
7e3ac97a85
6
.gitmodules
vendored
6
.gitmodules
vendored
@ -227,12 +227,6 @@
|
||||
[submodule "contrib/minizip-ng"]
|
||||
path = contrib/minizip-ng
|
||||
url = https://github.com/zlib-ng/minizip-ng
|
||||
[submodule "contrib/qpl"]
|
||||
path = contrib/qpl
|
||||
url = https://github.com/intel/qpl
|
||||
[submodule "contrib/idxd-config"]
|
||||
path = contrib/idxd-config
|
||||
url = https://github.com/intel/idxd-config
|
||||
[submodule "contrib/QAT-ZSTD-Plugin"]
|
||||
path = contrib/QAT-ZSTD-Plugin
|
||||
url = https://github.com/intel/QAT-ZSTD-Plugin
|
||||
|
@ -369,11 +369,15 @@ namespace PackedZeroTraits
|
||||
{
|
||||
template <typename Second, template <typename, typename> class PackedPairNoInit>
|
||||
inline bool check(const PackedPairNoInit<StringRef, Second> p)
|
||||
{ return 0 == p.key.size; }
|
||||
{
|
||||
return 0 == p.key.size;
|
||||
}
|
||||
|
||||
template <typename Second, template <typename, typename> class PackedPairNoInit>
|
||||
inline void set(PackedPairNoInit<StringRef, Second> & p)
|
||||
{ p.key.size = 0; }
|
||||
{
|
||||
p.key.size = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -952,6 +952,8 @@ private:
|
||||
static std::pair<LoggerMapIterator, bool> add(Logger * pLogger);
|
||||
static std::optional<LoggerMapIterator> find(const std::string & name);
|
||||
static Logger * findRawPtr(const std::string & name);
|
||||
void unsafeSetChannel(Channel * pChannel);
|
||||
Channel* unsafeGetChannel() const;
|
||||
|
||||
Logger();
|
||||
Logger(const Logger &);
|
||||
|
@ -61,6 +61,13 @@ Logger::~Logger()
|
||||
|
||||
|
||||
void Logger::setChannel(Channel* pChannel)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||
unsafeSetChannel(pChannel);
|
||||
}
|
||||
|
||||
|
||||
void Logger::unsafeSetChannel(Channel* pChannel)
|
||||
{
|
||||
if (_pChannel) _pChannel->release();
|
||||
_pChannel = pChannel;
|
||||
@ -69,6 +76,14 @@ void Logger::setChannel(Channel* pChannel)
|
||||
|
||||
|
||||
Channel* Logger::getChannel() const
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||
|
||||
return unsafeGetChannel();
|
||||
}
|
||||
|
||||
|
||||
Channel* Logger::unsafeGetChannel() const
|
||||
{
|
||||
return _pChannel;
|
||||
}
|
||||
@ -89,7 +104,7 @@ void Logger::setLevel(const std::string& level)
|
||||
void Logger::setProperty(const std::string& name, const std::string& value)
|
||||
{
|
||||
if (name == "channel")
|
||||
setChannel(LoggingRegistry::defaultRegistry().channelForName(value));
|
||||
unsafeSetChannel(LoggingRegistry::defaultRegistry().channelForName(value));
|
||||
else if (name == "level")
|
||||
setLevel(value);
|
||||
else
|
||||
@ -160,7 +175,7 @@ void Logger::setChannel(const std::string& name, Channel* pChannel)
|
||||
if (len == 0 ||
|
||||
(it.first.compare(0, len, name) == 0 && (it.first.length() == len || it.first[len] == '.')))
|
||||
{
|
||||
it.second.logger->setChannel(pChannel);
|
||||
it.second.logger->unsafeSetChannel(pChannel);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -393,7 +408,7 @@ std::pair<Logger::LoggerMapIterator, bool> Logger::unsafeGet(const std::string&
|
||||
else
|
||||
{
|
||||
Logger& par = parent(name);
|
||||
logger = new Logger(name, par.getChannel(), par.getLevel());
|
||||
logger = new Logger(name, par.unsafeGetChannel(), par.getLevel());
|
||||
}
|
||||
|
||||
return add(logger);
|
||||
|
1
ci/README.md
Normal file
1
ci/README.md
Normal file
@ -0,0 +1 @@
|
||||
Note: This directory is under active development for CI improvements and is not currently in use within the scope of the existing CI pipeline.
|
105
ci/docker/fasttest/Dockerfile
Normal file
105
ci/docker/fasttest/Dockerfile
Normal file
@ -0,0 +1,105 @@
|
||||
# docker build -t clickhouse/fasttest .
|
||||
FROM ubuntu:22.04
|
||||
|
||||
# ARG for quick switch to a given ubuntu mirror
|
||||
ARG apt_archive="http://archive.ubuntu.com"
|
||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=18
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install \
|
||||
apt-transport-https \
|
||||
apt-utils \
|
||||
ca-certificates \
|
||||
curl \
|
||||
gnupg \
|
||||
lsb-release \
|
||||
wget \
|
||||
git \
|
||||
--yes --no-install-recommends --verbose-versions \
|
||||
&& export LLVM_PUBKEY_HASH="bda960a8da687a275a2078d43c111d66b1c6a893a3275271beedf266c1ff4a0cdecb429c7a5cccf9f486ea7aa43fd27f" \
|
||||
&& wget -nv -O /tmp/llvm-snapshot.gpg.key https://apt.llvm.org/llvm-snapshot.gpg.key \
|
||||
&& echo "${LLVM_PUBKEY_HASH} /tmp/llvm-snapshot.gpg.key" | sha384sum -c \
|
||||
&& apt-key add /tmp/llvm-snapshot.gpg.key \
|
||||
&& export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \
|
||||
&& echo "deb https://apt.llvm.org/${CODENAME}/ llvm-toolchain-${CODENAME}-${LLVM_VERSION} main" >> \
|
||||
/etc/apt/sources.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install --yes --no-install-recommends --verbose-versions llvm-${LLVM_VERSION} \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||
|
||||
# moreutils - provides ts fo FT
|
||||
# expect, bzip2 - requried by FT
|
||||
# bsdmainutils - provides hexdump for FT
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install \
|
||||
clang-${LLVM_VERSION} \
|
||||
cmake \
|
||||
libclang-${LLVM_VERSION}-dev \
|
||||
libclang-rt-${LLVM_VERSION}-dev \
|
||||
lld-${LLVM_VERSION} \
|
||||
llvm-${LLVM_VERSION}-dev \
|
||||
lsof \
|
||||
ninja-build \
|
||||
python3 \
|
||||
python3-pip \
|
||||
zstd \
|
||||
moreutils \
|
||||
expect \
|
||||
bsdmainutils \
|
||||
pv \
|
||||
jq \
|
||||
bzip2 \
|
||||
--yes --no-install-recommends \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||
|
||||
COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb
|
||||
# Give suid to gdb to grant it attach permissions
|
||||
RUN chmod u+s /opt/gdb/bin/gdb
|
||||
ENV PATH="/opt/gdb/bin:${PATH}"
|
||||
|
||||
# This symlink is required by gcc to find the lld linker
|
||||
RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld
|
||||
# FIXME: workaround for "The imported target "merge-fdata" references the file" error
|
||||
# https://salsa.debian.org/pkg-llvm-team/llvm-toolchain/-/commit/992e52c0b156a5ba9c6a8a54f8c4857ddd3d371d
|
||||
RUN sed -i '/_IMPORT_CHECK_FILES_FOR_\(mlir-\|llvm-bolt\|merge-fdata\|MLIR\)/ {s|^|#|}' /usr/lib/llvm-${LLVM_VERSION}/lib/cmake/llvm/LLVMExports-*.cmake
|
||||
|
||||
# LLVM changes paths for compiler-rt libraries. For some reason clang-18.1.8 cannot catch up libraries from default install path.
|
||||
# It's very dirty workaround, better to build compiler and LLVM ourself and use it. Details: https://github.com/llvm/llvm-project/issues/95792
|
||||
RUN test ! -d /usr/lib/llvm-18/lib/clang/18/lib/x86_64-pc-linux-gnu || ln -s /usr/lib/llvm-18/lib/clang/18/lib/x86_64-pc-linux-gnu /usr/lib/llvm-18/lib/clang/18/lib/x86_64-unknown-linux-gnu
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG SCCACHE_VERSION=v0.7.7
|
||||
ENV SCCACHE_IGNORE_SERVER_IO_ERROR=1
|
||||
# sccache requires a value for the region. So by default we use The Default Region
|
||||
ENV SCCACHE_REGION=us-east-1
|
||||
RUN arch=${TARGETARCH} \
|
||||
&& case $arch in \
|
||||
amd64) rarch=x86_64 ;; \
|
||||
arm64) rarch=aarch64 ;; \
|
||||
esac \
|
||||
&& curl -Ls "https://github.com/mozilla/sccache/releases/download/$SCCACHE_VERSION/sccache-$SCCACHE_VERSION-$rarch-unknown-linux-musl.tar.gz" | \
|
||||
tar xz -C /tmp \
|
||||
&& mv "/tmp/sccache-$SCCACHE_VERSION-$rarch-unknown-linux-musl/sccache" /usr/bin \
|
||||
&& rm "/tmp/sccache-$SCCACHE_VERSION-$rarch-unknown-linux-musl" -r
|
||||
|
||||
COPY requirements.txt /
|
||||
RUN pip3 install --no-cache-dir -r /requirements.txt
|
||||
|
||||
# chmod 777 to make the container user independent
|
||||
RUN mkdir -p /var/lib/clickhouse \
|
||||
&& chmod 777 /var/lib/clickhouse
|
||||
|
||||
ENV TZ=Europe/Amsterdam
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
RUN groupadd --system --gid 1000 clickhouse \
|
||||
&& useradd --system --gid 1000 --uid 1000 -m clickhouse \
|
||||
&& mkdir -p /.cache/sccache && chmod 777 /.cache/sccache
|
||||
|
||||
ENV PYTHONPATH="/wd"
|
||||
ENV PYTHONUNBUFFERED=1
|
6
ci/docker/fasttest/requirements.txt
Normal file
6
ci/docker/fasttest/requirements.txt
Normal file
@ -0,0 +1,6 @@
|
||||
Jinja2==3.1.3
|
||||
numpy==1.26.4
|
||||
requests==2.32.3
|
||||
pandas==1.5.3
|
||||
scipy==1.12.0
|
||||
#https://clickhouse-builds.s3.amazonaws.com/packages/praktika-0.1-py3-none-any.whl
|
5
ci/docker/style-test/requirements.txt
Normal file
5
ci/docker/style-test/requirements.txt
Normal file
@ -0,0 +1,5 @@
|
||||
requests==2.32.3
|
||||
yamllint==1.26.3
|
||||
codespell==2.2.1
|
||||
#use praktika from CH repo
|
||||
#https://clickhouse-builds.s3.amazonaws.com/packages/praktika-0.1-py3-none-any.whl
|
@ -2,7 +2,6 @@ import math
|
||||
import multiprocessing
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from concurrent.futures import ProcessPoolExecutor
|
||||
from pathlib import Path
|
||||
|
||||
@ -51,25 +50,6 @@ def run_check_concurrent(check_name, check_function, files, nproc=NPROC):
|
||||
return result
|
||||
|
||||
|
||||
def run_simple_check(check_name, check_function, **kwargs):
|
||||
stop_watch = Utils.Stopwatch()
|
||||
|
||||
error = check_function(**kwargs)
|
||||
|
||||
result = Result(
|
||||
name=check_name,
|
||||
status=Result.Status.SUCCESS if not error else Result.Status.FAILED,
|
||||
start_time=stop_watch.start_time,
|
||||
duration=stop_watch.duration,
|
||||
info=error,
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
def run_check(check_name, check_function, files):
|
||||
return run_check_concurrent(check_name, check_function, files, nproc=1)
|
||||
|
||||
|
||||
def check_duplicate_includes(file_path):
|
||||
includes = []
|
||||
with open(file_path, "r", encoding="utf-8", errors="ignore") as f:
|
||||
@ -117,7 +97,7 @@ def check_xmllint(file_paths):
|
||||
def check_functional_test_cases(files):
|
||||
"""
|
||||
Queries with event_date should have yesterday() not today()
|
||||
NOTE: it is not that accuate, but at least something.
|
||||
NOTE: it is not that accurate, but at least something.
|
||||
"""
|
||||
|
||||
patterns = [
|
||||
@ -345,66 +325,58 @@ if __name__ == "__main__":
|
||||
)
|
||||
)
|
||||
results.append(
|
||||
run_check(
|
||||
check_name="Check Tests Numbers",
|
||||
check_function=check_gaps_in_tests_numbers,
|
||||
files=functional_test_files,
|
||||
Result.create_from_command_execution(
|
||||
name="Check Tests Numbers",
|
||||
command=check_gaps_in_tests_numbers,
|
||||
command_args=[functional_test_files],
|
||||
)
|
||||
)
|
||||
results.append(
|
||||
run_simple_check(
|
||||
check_name="Check Broken Symlinks",
|
||||
check_function=check_broken_links,
|
||||
path="./",
|
||||
exclude_paths=["contrib/", "metadata/", "programs/server/data"],
|
||||
Result.create_from_command_execution(
|
||||
name="Check Broken Symlinks",
|
||||
command=check_broken_links,
|
||||
command_kwargs={
|
||||
"path": "./",
|
||||
"exclude_paths": ["contrib/", "metadata/", "programs/server/data"],
|
||||
},
|
||||
)
|
||||
)
|
||||
results.append(
|
||||
run_simple_check(
|
||||
check_name="Check CPP code",
|
||||
check_function=check_cpp_code,
|
||||
Result.create_from_command_execution(
|
||||
name="Check CPP code",
|
||||
command=check_cpp_code,
|
||||
)
|
||||
)
|
||||
results.append(
|
||||
run_simple_check(
|
||||
check_name="Check Submodules",
|
||||
check_function=check_repo_submodules,
|
||||
Result.create_from_command_execution(
|
||||
name="Check Submodules",
|
||||
command=check_repo_submodules,
|
||||
)
|
||||
)
|
||||
results.append(
|
||||
run_check(
|
||||
check_name="Check File Names",
|
||||
check_function=check_file_names,
|
||||
files=all_files,
|
||||
Result.create_from_command_execution(
|
||||
name="Check File Names",
|
||||
command=check_file_names,
|
||||
command_args=[all_files],
|
||||
)
|
||||
)
|
||||
results.append(
|
||||
run_simple_check(
|
||||
check_name="Check Many Different Things",
|
||||
check_function=check_other,
|
||||
Result.create_from_command_execution(
|
||||
name="Check Many Different Things",
|
||||
command=check_other,
|
||||
)
|
||||
)
|
||||
results.append(
|
||||
run_simple_check(
|
||||
check_name="Check Codespell",
|
||||
check_function=check_codespell,
|
||||
Result.create_from_command_execution(
|
||||
name="Check Codespell",
|
||||
command=check_codespell,
|
||||
)
|
||||
)
|
||||
results.append(
|
||||
run_simple_check(
|
||||
check_name="Check Aspell",
|
||||
check_function=check_aspell,
|
||||
Result.create_from_command_execution(
|
||||
name="Check Aspell",
|
||||
command=check_aspell,
|
||||
)
|
||||
)
|
||||
|
||||
res = Result.create_from(results=results, stopwatch=stop_watch).dump()
|
||||
|
||||
if not res.is_ok():
|
||||
print("Style check: failed")
|
||||
for result in results:
|
||||
if not result.is_ok():
|
||||
print("Failed check:")
|
||||
print(" | ", result)
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("Style check: ok")
|
||||
Result.create_from(results=results, stopwatch=stop_watch).finish_job_accordingly()
|
329
ci/jobs/fast_test.py
Normal file
329
ci/jobs/fast_test.py
Normal file
@ -0,0 +1,329 @@
|
||||
import threading
|
||||
from pathlib import Path
|
||||
|
||||
from ci_v2.jobs.scripts.functional_tests_results import FTResultsProcessor
|
||||
from praktika.environment import Environment
|
||||
from praktika.result import Result
|
||||
from praktika.settings import Settings
|
||||
from praktika.utils import MetaClasses, Shell, Utils
|
||||
|
||||
|
||||
class ClickHouseProc:
|
||||
def __init__(self):
|
||||
self.ch_config_dir = f"{Settings.TEMP_DIR}/etc/clickhouse-server"
|
||||
self.pid_file = f"{self.ch_config_dir}/clickhouse-server.pid"
|
||||
self.config_file = f"{self.ch_config_dir}/config.xml"
|
||||
self.user_files_path = f"{self.ch_config_dir}/user_files"
|
||||
self.test_output_file = f"{Settings.OUTPUT_DIR}/test_result.txt"
|
||||
self.command = f"clickhouse-server --config-file {self.config_file} --pid-file {self.pid_file} -- --path {self.ch_config_dir} --user_files_path {self.user_files_path} --top_level_domains_path {self.ch_config_dir}/top_level_domains --keeper_server.storage_path {self.ch_config_dir}/coordination"
|
||||
self.proc = None
|
||||
self.pid = 0
|
||||
nproc = int(Utils.cpu_count() / 2)
|
||||
self.fast_test_command = f"clickhouse-test --hung-check --fast-tests-only --no-random-settings --no-random-merge-tree-settings --no-long --testname --shard --zookeeper --check-zookeeper-session --order random --print-time --report-logs-stats --jobs {nproc} -- '' | ts '%Y-%m-%d %H:%M:%S' \
|
||||
| tee -a \"{self.test_output_file}\""
|
||||
# TODO: store info in case of failure
|
||||
self.info = ""
|
||||
self.info_file = ""
|
||||
|
||||
Utils.set_env("CLICKHOUSE_CONFIG_DIR", self.ch_config_dir)
|
||||
Utils.set_env("CLICKHOUSE_CONFIG", self.config_file)
|
||||
Utils.set_env("CLICKHOUSE_USER_FILES", self.user_files_path)
|
||||
Utils.set_env("CLICKHOUSE_SCHEMA_FILES", f"{self.ch_config_dir}/format_schemas")
|
||||
|
||||
def start(self):
|
||||
print("Starting ClickHouse server")
|
||||
Shell.check(f"rm {self.pid_file}")
|
||||
|
||||
def run_clickhouse():
|
||||
self.proc = Shell.run_async(
|
||||
self.command, verbose=True, suppress_output=True
|
||||
)
|
||||
|
||||
thread = threading.Thread(target=run_clickhouse)
|
||||
thread.daemon = True # Allow program to exit even if thread is still running
|
||||
thread.start()
|
||||
|
||||
# self.proc = Shell.run_async(self.command, verbose=True)
|
||||
|
||||
started = False
|
||||
try:
|
||||
for _ in range(5):
|
||||
pid = Shell.get_output(f"cat {self.pid_file}").strip()
|
||||
if not pid:
|
||||
Utils.sleep(1)
|
||||
continue
|
||||
started = True
|
||||
print(f"Got pid from fs [{pid}]")
|
||||
_ = int(pid)
|
||||
break
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if not started:
|
||||
stdout = self.proc.stdout.read().strip() if self.proc.stdout else ""
|
||||
stderr = self.proc.stderr.read().strip() if self.proc.stderr else ""
|
||||
Utils.print_formatted_error("Failed to start ClickHouse", stdout, stderr)
|
||||
return False
|
||||
|
||||
print(f"ClickHouse server started successfully, pid [{pid}]")
|
||||
return True
|
||||
|
||||
def wait_ready(self):
|
||||
res, out, err = 0, "", ""
|
||||
attempts = 30
|
||||
delay = 2
|
||||
for attempt in range(attempts):
|
||||
res, out, err = Shell.get_res_stdout_stderr(
|
||||
'clickhouse-client --query "select 1"', verbose=True
|
||||
)
|
||||
if out.strip() == "1":
|
||||
print("Server ready")
|
||||
break
|
||||
else:
|
||||
print(f"Server not ready, wait")
|
||||
Utils.sleep(delay)
|
||||
else:
|
||||
Utils.print_formatted_error(
|
||||
f"Server not ready after [{attempts*delay}s]", out, err
|
||||
)
|
||||
return False
|
||||
return True
|
||||
|
||||
def run_fast_test(self):
|
||||
if Path(self.test_output_file).exists():
|
||||
Path(self.test_output_file).unlink()
|
||||
exit_code = Shell.run(self.fast_test_command)
|
||||
return exit_code == 0
|
||||
|
||||
def terminate(self):
|
||||
print("Terminate ClickHouse process")
|
||||
timeout = 10
|
||||
if self.proc:
|
||||
Utils.terminate_process_group(self.proc.pid)
|
||||
|
||||
self.proc.terminate()
|
||||
try:
|
||||
self.proc.wait(timeout=10)
|
||||
print(f"Process {self.proc.pid} terminated gracefully.")
|
||||
except Exception:
|
||||
print(
|
||||
f"Process {self.proc.pid} did not terminate in {timeout} seconds, killing it..."
|
||||
)
|
||||
Utils.terminate_process_group(self.proc.pid, force=True)
|
||||
self.proc.wait() # Wait for the process to be fully killed
|
||||
print(f"Process {self.proc} was killed.")
|
||||
|
||||
|
||||
def clone_submodules():
|
||||
submodules_to_update = [
|
||||
"contrib/sysroot",
|
||||
"contrib/magic_enum",
|
||||
"contrib/abseil-cpp",
|
||||
"contrib/boost",
|
||||
"contrib/zlib-ng",
|
||||
"contrib/libxml2",
|
||||
"contrib/libunwind",
|
||||
"contrib/fmtlib",
|
||||
"contrib/aklomp-base64",
|
||||
"contrib/cctz",
|
||||
"contrib/libcpuid",
|
||||
"contrib/libdivide",
|
||||
"contrib/double-conversion",
|
||||
"contrib/llvm-project",
|
||||
"contrib/lz4",
|
||||
"contrib/zstd",
|
||||
"contrib/fastops",
|
||||
"contrib/rapidjson",
|
||||
"contrib/re2",
|
||||
"contrib/sparsehash-c11",
|
||||
"contrib/croaring",
|
||||
"contrib/miniselect",
|
||||
"contrib/xz",
|
||||
"contrib/dragonbox",
|
||||
"contrib/fast_float",
|
||||
"contrib/NuRaft",
|
||||
"contrib/jemalloc",
|
||||
"contrib/replxx",
|
||||
"contrib/wyhash",
|
||||
"contrib/c-ares",
|
||||
"contrib/morton-nd",
|
||||
"contrib/xxHash",
|
||||
"contrib/expected",
|
||||
"contrib/simdjson",
|
||||
"contrib/liburing",
|
||||
"contrib/libfiu",
|
||||
"contrib/incbin",
|
||||
"contrib/yaml-cpp",
|
||||
]
|
||||
|
||||
res = Shell.check("git submodule sync", verbose=True, strict=True)
|
||||
res = res and Shell.check("git submodule init", verbose=True, strict=True)
|
||||
res = res and Shell.check(
|
||||
command=f"xargs --max-procs={min([Utils.cpu_count(), 20])} --null --no-run-if-empty --max-args=1 git submodule update --depth 1 --single-branch",
|
||||
stdin_str="\0".join(submodules_to_update) + "\0",
|
||||
timeout=120,
|
||||
retries=3,
|
||||
verbose=True,
|
||||
)
|
||||
res = res and Shell.check("git submodule foreach git reset --hard", verbose=True)
|
||||
res = res and Shell.check("git submodule foreach git checkout @ -f", verbose=True)
|
||||
res = res and Shell.check("git submodule foreach git clean -xfd", verbose=True)
|
||||
return res
|
||||
|
||||
|
||||
def update_path_ch_config(config_file_path=""):
|
||||
print("Updating path in clickhouse config")
|
||||
config_file_path = (
|
||||
config_file_path or f"{Settings.TEMP_DIR}/etc/clickhouse-server/config.xml"
|
||||
)
|
||||
ssl_config_file_path = (
|
||||
f"{Settings.TEMP_DIR}/etc/clickhouse-server/config.d/ssl_certs.xml"
|
||||
)
|
||||
try:
|
||||
with open(config_file_path, "r", encoding="utf-8") as file:
|
||||
content = file.read()
|
||||
|
||||
with open(ssl_config_file_path, "r", encoding="utf-8") as file:
|
||||
ssl_config_content = file.read()
|
||||
content = content.replace(">/var/", f">{Settings.TEMP_DIR}/var/")
|
||||
content = content.replace(">/etc/", f">{Settings.TEMP_DIR}/etc/")
|
||||
ssl_config_content = ssl_config_content.replace(
|
||||
">/etc/", f">{Settings.TEMP_DIR}/etc/"
|
||||
)
|
||||
with open(config_file_path, "w", encoding="utf-8") as file:
|
||||
file.write(content)
|
||||
with open(ssl_config_file_path, "w", encoding="utf-8") as file:
|
||||
file.write(ssl_config_content)
|
||||
except Exception as e:
|
||||
print(f"ERROR: failed to update config, exception: {e}")
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class JobStages(metaclass=MetaClasses.WithIter):
|
||||
CHECKOUT_SUBMODULES = "checkout"
|
||||
CMAKE = "cmake"
|
||||
BUILD = "build"
|
||||
CONFIG = "config"
|
||||
TEST = "test"
|
||||
|
||||
|
||||
def main():
|
||||
stop_watch = Utils.Stopwatch()
|
||||
|
||||
stages = list(JobStages)
|
||||
stage = Environment.LOCAL_RUN_PARAM or JobStages.CHECKOUT_SUBMODULES
|
||||
if stage:
|
||||
assert stage in JobStages, f"--param must be one of [{list(JobStages)}]"
|
||||
print(f"Job will start from stage [{stage}]")
|
||||
while stage in stages:
|
||||
stages.pop(0)
|
||||
stages.insert(0, stage)
|
||||
|
||||
current_directory = Utils.cwd()
|
||||
build_dir = f"{Settings.TEMP_DIR}/build"
|
||||
|
||||
Utils.add_to_PATH(f"{build_dir}/programs:{current_directory}/tests")
|
||||
|
||||
res = True
|
||||
results = []
|
||||
|
||||
if res and JobStages.CHECKOUT_SUBMODULES in stages:
|
||||
Shell.check(f"rm -rf {build_dir} && mkdir -p {build_dir}")
|
||||
results.append(
|
||||
Result.create_from_command_execution(
|
||||
name="Checkout Submodules for Minimal Build",
|
||||
command=clone_submodules,
|
||||
)
|
||||
)
|
||||
res = results[-1].is_ok()
|
||||
|
||||
if res and JobStages.CMAKE in stages:
|
||||
results.append(
|
||||
Result.create_from_command_execution(
|
||||
name="Cmake configuration",
|
||||
command=f"cmake {current_directory} -DCMAKE_CXX_COMPILER=clang++-18 -DCMAKE_C_COMPILER=clang-18 \
|
||||
-DCMAKE_TOOLCHAIN_FILE={current_directory}/cmake/linux/toolchain-x86_64-musl.cmake -DENABLE_LIBRARIES=0 \
|
||||
-DENABLE_TESTS=0 -DENABLE_UTILS=0 -DENABLE_THINLTO=0 -DENABLE_NURAFT=1 -DENABLE_SIMDJSON=1 \
|
||||
-DENABLE_JEMALLOC=1 -DENABLE_LIBURING=1 -DENABLE_YAML_CPP=1 -DCOMPILER_CACHE=sccache",
|
||||
workdir=build_dir,
|
||||
with_log=True,
|
||||
)
|
||||
)
|
||||
res = results[-1].is_ok()
|
||||
|
||||
if res and JobStages.BUILD in stages:
|
||||
Shell.check("sccache --show-stats")
|
||||
results.append(
|
||||
Result.create_from_command_execution(
|
||||
name="Build ClickHouse",
|
||||
command="ninja clickhouse-bundle clickhouse-stripped",
|
||||
workdir=build_dir,
|
||||
with_log=True,
|
||||
)
|
||||
)
|
||||
Shell.check("sccache --show-stats")
|
||||
res = results[-1].is_ok()
|
||||
|
||||
if res and JobStages.BUILD in stages:
|
||||
commands = [
|
||||
f"mkdir -p {Settings.OUTPUT_DIR}/binaries",
|
||||
f"cp ./programs/clickhouse {Settings.OUTPUT_DIR}/binaries/clickhouse",
|
||||
f"zstd --threads=0 --force programs/clickhouse-stripped -o {Settings.OUTPUT_DIR}/binaries/clickhouse-stripped.zst",
|
||||
"sccache --show-stats",
|
||||
"clickhouse-client --version",
|
||||
"clickhouse-test --help",
|
||||
]
|
||||
results.append(
|
||||
Result.create_from_command_execution(
|
||||
name="Check and Compress binary",
|
||||
command=commands,
|
||||
workdir=build_dir,
|
||||
with_log=True,
|
||||
)
|
||||
)
|
||||
res = results[-1].is_ok()
|
||||
|
||||
if res and JobStages.CONFIG in stages:
|
||||
commands = [
|
||||
f"rm -rf {Settings.TEMP_DIR}/etc/ && mkdir -p {Settings.TEMP_DIR}/etc/clickhouse-client {Settings.TEMP_DIR}/etc/clickhouse-server",
|
||||
f"cp {current_directory}/programs/server/config.xml {current_directory}/programs/server/users.xml {Settings.TEMP_DIR}/etc/clickhouse-server/",
|
||||
f"{current_directory}/tests/config/install.sh {Settings.TEMP_DIR}/etc/clickhouse-server {Settings.TEMP_DIR}/etc/clickhouse-client",
|
||||
# f"cp -a {current_directory}/programs/server/config.d/log_to_console.xml {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/",
|
||||
f"rm -f {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/secure_ports.xml",
|
||||
update_path_ch_config,
|
||||
]
|
||||
results.append(
|
||||
Result.create_from_command_execution(
|
||||
name="Install ClickHouse Config",
|
||||
command=commands,
|
||||
with_log=True,
|
||||
)
|
||||
)
|
||||
res = results[-1].is_ok()
|
||||
|
||||
CH = ClickHouseProc()
|
||||
if res and JobStages.TEST in stages:
|
||||
stop_watch_ = Utils.Stopwatch()
|
||||
step_name = "Start ClickHouse Server"
|
||||
print(step_name)
|
||||
res = CH.start()
|
||||
res = res and CH.wait_ready()
|
||||
results.append(
|
||||
Result.create_from(name=step_name, status=res, stopwatch=stop_watch_)
|
||||
)
|
||||
|
||||
if res and JobStages.TEST in stages:
|
||||
step_name = "Tests"
|
||||
print(step_name)
|
||||
res = res and CH.run_fast_test()
|
||||
if res:
|
||||
results.append(FTResultsProcessor(wd=Settings.OUTPUT_DIR).run())
|
||||
|
||||
CH.terminate()
|
||||
|
||||
Result.create_from(results=results, stopwatch=stop_watch).finish_job_accordingly()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -14,7 +14,8 @@
|
||||
|
||||
LC_ALL="en_US.UTF-8"
|
||||
ROOT_PATH="."
|
||||
EXCLUDE_DIRS='build/|integration/|widechar_width/|glibc-compatibility/|poco/|memcpy/|consistent-hashing|benchmark|tests/.*.cpp|utils/keeper-bench/example.yaml'
|
||||
EXCLUDE='build/|integration/|widechar_width/|glibc-compatibility/|poco/|memcpy/|consistent-hashing|benchmark|tests/.*.cpp|utils/keeper-bench/example.yaml'
|
||||
EXCLUDE_DOCS='Settings\.cpp|FormatFactorySettingsDeclaration\.h'
|
||||
|
||||
# From [1]:
|
||||
# But since array_to_string_internal() in array.c still loops over array
|
||||
@ -31,7 +32,8 @@ function in_array()
|
||||
}
|
||||
|
||||
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' 2>/dev/null |
|
||||
grep -vP $EXCLUDE_DIRS |
|
||||
grep -vP $EXCLUDE |
|
||||
grep -vP $EXCLUDE_DOCS |
|
||||
xargs grep $@ -P '((class|struct|namespace|enum|if|for|while|else|throw|switch).*|\)(\s*const)?(\s*override)?\s*)\{$|\s$|^ {1,3}[^\* ]\S|\t|^\s*(if|else if|if constexpr|else if constexpr|for|while|catch|switch)\(|\( [^\s\\]|\S \)' |
|
||||
# a curly brace not in a new line, but not for the case of C++11 init or agg. initialization | trailing whitespace | number of ws not a multiple of 4, but not in the case of comment continuation | missing whitespace after for/if/while... before opening brace | whitespaces inside braces
|
||||
grep -v -P '(//|:\s+\*|\$\(\()| \)"'
|
||||
@ -39,12 +41,12 @@ find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' 2>/dev/n
|
||||
|
||||
# Tabs
|
||||
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' 2>/dev/null |
|
||||
grep -vP $EXCLUDE_DIRS |
|
||||
xargs grep $@ -F $'\t'
|
||||
grep -vP $EXCLUDE |
|
||||
xargs grep $@ -F $'\t' && echo '^ tabs are not allowed'
|
||||
|
||||
# // namespace comments are unneeded
|
||||
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' 2>/dev/null |
|
||||
grep -vP $EXCLUDE_DIRS |
|
||||
grep -vP $EXCLUDE |
|
||||
xargs grep $@ -P '}\s*//+\s*namespace\s*'
|
||||
|
||||
# Broken symlinks
|
||||
@ -52,26 +54,26 @@ find -L $ROOT_PATH -type l 2>/dev/null | grep -v contrib && echo "^ Broken symli
|
||||
|
||||
# Duplicated or incorrect setting declarations
|
||||
SETTINGS_FILE=$(mktemp)
|
||||
cat $ROOT_PATH/src/Core/Settings.cpp $ROOT_PATH/src/Core/FormatFactorySettingsDeclaration.h | grep "M(" | awk '{print substr($2, 0, length($2) - 1) " " substr($1, 3, length($1) - 3) " SettingsDeclaration" }' > ${SETTINGS_FILE}
|
||||
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep "extern const Settings" -T | awk '{print substr($5, 0, length($5) -1) " " substr($4, 9) " " substr($1, 0, length($1) - 1)}' >> ${SETTINGS_FILE}
|
||||
ALL_DECLARATION_FILES="
|
||||
$ROOT_PATH/src/Core/Settings.cpp
|
||||
$ROOT_PATH/src/Storages/MergeTree/MergeTreeSettings.cpp
|
||||
$ROOT_PATH/src/Core/FormatFactorySettingsDeclaration.h"
|
||||
|
||||
# Duplicate extern declarations for settings
|
||||
awk '{if (seen[$0]++) print $3 " -> " $1 ;}' ${SETTINGS_FILE} | while read line;
|
||||
cat $ROOT_PATH/src/Core/Settings.cpp $ROOT_PATH/src/Core/FormatFactorySettingsDeclaration.h | grep "M(" | awk '{print substr($2, 0, length($2) - 1) " Settings" substr($1, 3, length($1) - 3) " SettingsDeclaration" }' | sort | uniq > ${SETTINGS_FILE}
|
||||
cat $ROOT_PATH/src/Storages/MergeTree/MergeTreeSettings.cpp | grep "M(" | awk '{print substr($2, 0, length($2) - 1) " MergeTreeSettings" substr($1, 3, length($1) - 3) " SettingsDeclaration" }' | sort | uniq >> ${SETTINGS_FILE}
|
||||
|
||||
# Check that if there are duplicated settings (declared in different objects) they all have the same type (it's simpler to validate style with that assert)
|
||||
for setting in $(awk '{print $1 " " $2}' ${SETTINGS_FILE} | sed -e 's/MergeTreeSettings//g' -e 's/Settings//g' | sort | uniq | awk '{ print $1 }' | uniq -d);
|
||||
do
|
||||
echo "Found duplicated setting declaration in: $line"
|
||||
echo "# Found multiple definitions of setting ${setting} with different types: "
|
||||
grep --line-number " ${setting}," ${ALL_DECLARATION_FILES} | awk '{print " > " $0 }'
|
||||
done
|
||||
|
||||
# Incorrect declarations for settings
|
||||
for setting in $(awk '{print $1 " " $2}' ${SETTINGS_FILE} | sort | uniq | awk '{ print $1 }' | sort | uniq -d);
|
||||
do
|
||||
expected=$(grep "^$setting " ${SETTINGS_FILE} | grep SettingsDeclaration | awk '{ print $2 }')
|
||||
grep "^$setting " ${SETTINGS_FILE} | grep -v " $expected" | awk '{ print $3 " found setting " $1 " with type " $2 }' | while read line;
|
||||
do
|
||||
echo "In $line but it should be $expected"
|
||||
done
|
||||
done
|
||||
# We append all uses of extern found in implementation files to validate them in a single pass and avoid reading the same files over and over
|
||||
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep -e "^\s*extern const Settings" -e "^\s**extern const MergeTreeSettings" -T | awk '{print substr($5, 0, length($5) -1) " " $4 " " substr($1, 0, length($1) - 1)}' >> ${SETTINGS_FILE}
|
||||
|
||||
rm ${SETTINGS_FILE}
|
||||
# Duplicated or incorrect setting declarations
|
||||
bash $ROOT_PATH/utils/check-style/check-settings-style
|
||||
|
||||
# Unused/Undefined/Duplicates ErrorCodes/ProfileEvents/CurrentMetrics
|
||||
declare -A EXTERN_TYPES
|
||||
@ -91,12 +93,14 @@ EXTERN_TYPES_EXCLUDES=(
|
||||
ProfileEvents::Timer
|
||||
ProfileEvents::Type
|
||||
ProfileEvents::TypeEnum
|
||||
ProfileEvents::ValueType
|
||||
ProfileEvents::dumpToMapColumn
|
||||
ProfileEvents::getProfileEvents
|
||||
ProfileEvents::ThreadIdToCountersSnapshot
|
||||
ProfileEvents::LOCAL_NAME
|
||||
ProfileEvents::keeper_profile_events
|
||||
ProfileEvents::CountersIncrement
|
||||
ProfileEvents::size
|
||||
|
||||
CurrentMetrics::add
|
||||
CurrentMetrics::sub
|
||||
@ -108,6 +112,7 @@ EXTERN_TYPES_EXCLUDES=(
|
||||
CurrentMetrics::values
|
||||
CurrentMetrics::Value
|
||||
CurrentMetrics::keeper_metrics
|
||||
CurrentMetrics::size
|
||||
|
||||
ErrorCodes::ErrorCode
|
||||
ErrorCodes::getName
|
||||
@ -130,7 +135,7 @@ for extern_type in ${!EXTERN_TYPES[@]}; do
|
||||
# and this matches with zkutil::CreateMode
|
||||
grep -v -e 'src/Common/ZooKeeper/Types.h' -e 'src/Coordination/KeeperConstants.cpp'
|
||||
} | {
|
||||
grep -vP $EXCLUDE_DIRS | xargs grep -l -P "extern const $type_of_extern $allowed_chars"
|
||||
grep -vP $EXCLUDE | xargs grep -l -P "extern const $type_of_extern $allowed_chars"
|
||||
} | while read file; do
|
||||
grep -P "extern const $type_of_extern $allowed_chars;" $file | sed -r -e "s/^.*?extern const $type_of_extern ($allowed_chars);.*?$/\1/" | while read val; do
|
||||
if ! grep -q "$extern_type::$val" $file; then
|
||||
@ -148,7 +153,7 @@ for extern_type in ${!EXTERN_TYPES[@]}; do
|
||||
# sed -i -r "0,/(\s*)extern const $type_of_extern [$allowed_chars]+/s//\1extern const $type_of_extern $val;\n&/" $file || \
|
||||
# awk '{ print; if (ns == 1) { ns = 2 }; if (ns == 2) { ns = 0; print "namespace $extern_type\n{\n extern const $type_of_extern '$val';\n}" } }; /namespace DB/ { ns = 1; };' < $file > ${file}.tmp && mv ${file}.tmp $file )
|
||||
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | {
|
||||
grep -vP $EXCLUDE_DIRS | xargs grep -l -P "$extern_type::$allowed_chars"
|
||||
grep -vP $EXCLUDE | xargs grep -l -P "$extern_type::$allowed_chars"
|
||||
} | while read file; do
|
||||
grep -P "$extern_type::$allowed_chars" $file | grep -P -v '^\s*//' | sed -r -e "s/^.*?$extern_type::($allowed_chars).*?$/\1/" | while read val; do
|
||||
if ! grep -q "extern const $type_of_extern $val" $file; then
|
||||
@ -161,7 +166,7 @@ for extern_type in ${!EXTERN_TYPES[@]}; do
|
||||
|
||||
# Duplicates
|
||||
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | {
|
||||
grep -vP $EXCLUDE_DIRS | xargs grep -l -P "$extern_type::$allowed_chars"
|
||||
grep -vP $EXCLUDE | xargs grep -l -P "$extern_type::$allowed_chars"
|
||||
} | while read file; do
|
||||
grep -P "extern const $type_of_extern $allowed_chars;" $file | sort | uniq -c | grep -v -P ' +1 ' && echo "Duplicate $extern_type in file $file"
|
||||
done
|
||||
@ -169,32 +174,32 @@ done
|
||||
|
||||
# Three or more consecutive empty lines
|
||||
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' |
|
||||
grep -vP $EXCLUDE_DIRS |
|
||||
grep -vP $EXCLUDE |
|
||||
while read file; do awk '/^$/ { ++i; if (i > 2) { print "More than two consecutive empty lines in file '$file'" } } /./ { i = 0 }' $file; done
|
||||
|
||||
# Check that every header file has #pragma once in first line
|
||||
find $ROOT_PATH/{src,programs,utils} -name '*.h' |
|
||||
grep -vP $EXCLUDE_DIRS |
|
||||
grep -vP $EXCLUDE |
|
||||
while read file; do [[ $(head -n1 $file) != '#pragma once' ]] && echo "File $file must have '#pragma once' in first line"; done
|
||||
|
||||
# Too many exclamation marks
|
||||
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' |
|
||||
grep -vP $EXCLUDE_DIRS |
|
||||
grep -vP $EXCLUDE |
|
||||
xargs grep -F '!!!' | grep -P '.' && echo "Too many exclamation marks (looks dirty, unconfident)."
|
||||
|
||||
# Exclamation mark in a message
|
||||
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' |
|
||||
grep -vP $EXCLUDE_DIRS |
|
||||
grep -vP $EXCLUDE |
|
||||
xargs grep -F '!",' | grep -P '.' && echo "No need for an exclamation mark (looks dirty, unconfident)."
|
||||
|
||||
# Trailing whitespaces
|
||||
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' |
|
||||
grep -vP $EXCLUDE_DIRS |
|
||||
grep -vP $EXCLUDE |
|
||||
xargs grep -n -P ' $' | grep -n -P '.' && echo "^ Trailing whitespaces."
|
||||
|
||||
# Forbid stringstream because it's easy to use them incorrectly and hard to debug possible issues
|
||||
find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' |
|
||||
grep -vP $EXCLUDE_DIRS |
|
||||
grep -vP $EXCLUDE |
|
||||
xargs grep -P 'std::[io]?stringstream' | grep -v "STYLE_CHECK_ALLOW_STD_STRING_STREAM" && echo "Use WriteBufferFromOwnString or ReadBufferFromString instead of std::stringstream"
|
||||
|
||||
# Forbid std::cerr/std::cout in src (fine in programs/utils)
|
||||
@ -204,6 +209,7 @@ std_cerr_cout_excludes=(
|
||||
_fuzzer
|
||||
# OK
|
||||
src/Common/ProgressIndication.cpp
|
||||
src/Common/ProgressTable.cpp
|
||||
# only under #ifdef DBMS_HASH_MAP_DEBUG_RESIZES, that is used only in tests
|
||||
src/Common/HashTable/HashTable.h
|
||||
# SensitiveDataMasker::printStats()
|
||||
@ -230,11 +236,10 @@ std_cerr_cout_excludes=(
|
||||
)
|
||||
sources_with_std_cerr_cout=( $(
|
||||
find $ROOT_PATH/{src,base} -name '*.h' -or -name '*.cpp' | \
|
||||
grep -vP $EXCLUDE_DIRS | \
|
||||
grep -vP $EXCLUDE | \
|
||||
grep -F -v $(printf -- "-e %s " "${std_cerr_cout_excludes[@]}") | \
|
||||
xargs grep -F --with-filename -e std::cerr -e std::cout | cut -d: -f1 | sort -u
|
||||
) )
|
||||
|
||||
# Exclude comments
|
||||
for src in "${sources_with_std_cerr_cout[@]}"; do
|
||||
# suppress stderr, since it may contain warning for #pargma once in headers
|
||||
@ -279,23 +284,23 @@ fi
|
||||
|
||||
# Forbid std::filesystem::is_symlink and std::filesystem::read_symlink, because it's easy to use them incorrectly
|
||||
find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' |
|
||||
grep -vP $EXCLUDE_DIRS |
|
||||
grep -vP $EXCLUDE |
|
||||
xargs grep -P '::(is|read)_symlink' | grep -v "STYLE_CHECK_ALLOW_STD_FS_SYMLINK" && echo "Use DB::FS::isSymlink and DB::FS::readSymlink instead"
|
||||
|
||||
# Forbid __builtin_unreachable(), because it's hard to debug when it becomes reachable
|
||||
find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' |
|
||||
grep -vP $EXCLUDE_DIRS |
|
||||
grep -vP $EXCLUDE |
|
||||
xargs grep -P '__builtin_unreachable' && echo "Use UNREACHABLE() from defines.h instead"
|
||||
|
||||
# Forbid mt19937() and random_device() which are outdated and slow
|
||||
find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' |
|
||||
grep -vP $EXCLUDE_DIRS |
|
||||
grep -vP $EXCLUDE |
|
||||
xargs grep -P '(std::mt19937|std::mersenne_twister_engine|std::random_device)' && echo "Use pcg64_fast (from pcg_random.h) and randomSeed (from Common/randomSeed.h) instead"
|
||||
|
||||
# Require checking return value of close(),
|
||||
# since it can hide fd misuse and break other places.
|
||||
find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' |
|
||||
grep -vP $EXCLUDE_DIRS |
|
||||
grep -vP $EXCLUDE |
|
||||
xargs grep -e ' close(.*fd' -e ' ::close(' | grep -v = && echo "Return value of close() should be checked"
|
||||
|
||||
# A small typo can lead to debug code in release builds, see https://github.com/ClickHouse/ClickHouse/pull/47647
|
||||
@ -322,18 +327,15 @@ ls -1d $ROOT_PATH/contrib/*-cmake | xargs -I@ find @ -name 'CMakeLists.txt' -or
|
||||
|
||||
# Wrong spelling of abbreviations, e.g. SQL is right, Sql is wrong. XMLHttpRequest is very wrong.
|
||||
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' |
|
||||
grep -vP $EXCLUDE_DIRS |
|
||||
grep -vP $EXCLUDE |
|
||||
xargs grep -P 'Sql|Html|Xml|Cpu|Tcp|Udp|Http|Db|Json|Yaml' | grep -v -P 'RabbitMQ|Azure|Aws|aws|Avro|IO/S3' &&
|
||||
echo "Abbreviations such as SQL, XML, HTTP, should be in all caps. For example, SQL is right, Sql is wrong. XMLHttpRequest is very wrong."
|
||||
|
||||
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' |
|
||||
grep -vP $EXCLUDE_DIRS |
|
||||
grep -vP $EXCLUDE |
|
||||
xargs grep -F -i 'ErrorCodes::LOGICAL_ERROR, "Logical error:' &&
|
||||
echo "If an exception has LOGICAL_ERROR code, there is no need to include the text 'Logical error' in the exception message, because then the phrase 'Logical error' will be printed twice."
|
||||
|
||||
# There shouldn't be any code snippets under GPL or LGPL
|
||||
find $ROOT_PATH/{src,base,programs} -name '*.h' -or -name '*.cpp' 2>/dev/null | xargs grep -i -F 'General Public License' && echo "There shouldn't be any code snippets under GPL or LGPL"
|
||||
|
||||
PATTERN="allow_";
|
||||
DIFF=$(comm -3 <(grep -o "\b$PATTERN\w*\b" $ROOT_PATH/src/Core/Settings.cpp | sort -u) <(grep -o -h "\b$PATTERN\w*\b" $ROOT_PATH/src/Databases/enableAllExperimentalSettings.cpp $ROOT_PATH/utils/check-style/experimental_settings_ignore.txt | sort -u));
|
||||
[ -n "$DIFF" ] && echo "$DIFF" && echo "^^ Detected 'allow_*' settings that might need to be included in src/Databases/enableAllExperimentalSettings.cpp" && echo "Alternatively, consider adding an exception to utils/check-style/experimental_settings_ignore.txt"
|
284
ci/jobs/scripts/functional_tests_results.py
Executable file
284
ci/jobs/scripts/functional_tests_results.py
Executable file
@ -0,0 +1,284 @@
|
||||
import dataclasses
|
||||
from typing import List
|
||||
|
||||
from praktika.environment import Environment
|
||||
from praktika.result import Result
|
||||
|
||||
OK_SIGN = "[ OK "
|
||||
FAIL_SIGN = "[ FAIL "
|
||||
TIMEOUT_SIGN = "[ Timeout! "
|
||||
UNKNOWN_SIGN = "[ UNKNOWN "
|
||||
SKIPPED_SIGN = "[ SKIPPED "
|
||||
HUNG_SIGN = "Found hung queries in processlist"
|
||||
SERVER_DIED_SIGN = "Server died, terminating all processes"
|
||||
SERVER_DIED_SIGN2 = "Server does not respond to health check"
|
||||
DATABASE_SIGN = "Database: "
|
||||
|
||||
SUCCESS_FINISH_SIGNS = ["All tests have finished", "No tests were run"]
|
||||
|
||||
RETRIES_SIGN = "Some tests were restarted"
|
||||
|
||||
|
||||
# def write_results(results_file, status_file, results, status):
|
||||
# with open(results_file, "w", encoding="utf-8") as f:
|
||||
# out = csv.writer(f, delimiter="\t")
|
||||
# out.writerows(results)
|
||||
# with open(status_file, "w", encoding="utf-8") as f:
|
||||
# out = csv.writer(f, delimiter="\t")
|
||||
# out.writerow(status)
|
||||
|
||||
BROKEN_TESTS_ANALYZER_TECH_DEBT = [
|
||||
"01624_soft_constraints",
|
||||
# Check after ConstantNode refactoring
|
||||
"02944_variant_as_common_type",
|
||||
]
|
||||
|
||||
|
||||
class FTResultsProcessor:
|
||||
@dataclasses.dataclass
|
||||
class Summary:
|
||||
total: int
|
||||
skipped: int
|
||||
unknown: int
|
||||
failed: int
|
||||
success: int
|
||||
test_results: List[Result]
|
||||
hung: bool = False
|
||||
server_died: bool = False
|
||||
retries: bool = False
|
||||
success_finish: bool = False
|
||||
test_end: bool = True
|
||||
|
||||
def __init__(self, wd):
|
||||
self.tests_output_file = f"{wd}/test_result.txt"
|
||||
# self.test_results_parsed_file = f"{wd}/test_result.tsv"
|
||||
# self.status_file = f"{wd}/check_status.tsv"
|
||||
self.broken_tests = BROKEN_TESTS_ANALYZER_TECH_DEBT
|
||||
|
||||
def _process_test_output(self):
|
||||
total = 0
|
||||
skipped = 0
|
||||
unknown = 0
|
||||
failed = 0
|
||||
success = 0
|
||||
hung = False
|
||||
server_died = False
|
||||
retries = False
|
||||
success_finish = False
|
||||
test_results = []
|
||||
test_end = True
|
||||
|
||||
with open(self.tests_output_file, "r", encoding="utf-8") as test_file:
|
||||
for line in test_file:
|
||||
original_line = line
|
||||
line = line.strip()
|
||||
|
||||
if any(s in line for s in SUCCESS_FINISH_SIGNS):
|
||||
success_finish = True
|
||||
# Ignore hung check report, since it may be quite large.
|
||||
# (and may break python parser which has limit of 128KiB for each row).
|
||||
if HUNG_SIGN in line:
|
||||
hung = True
|
||||
break
|
||||
if SERVER_DIED_SIGN in line or SERVER_DIED_SIGN2 in line:
|
||||
server_died = True
|
||||
if RETRIES_SIGN in line:
|
||||
retries = True
|
||||
if any(
|
||||
sign in line
|
||||
for sign in (OK_SIGN, FAIL_SIGN, UNKNOWN_SIGN, SKIPPED_SIGN)
|
||||
):
|
||||
test_name = line.split(" ")[2].split(":")[0]
|
||||
|
||||
test_time = ""
|
||||
try:
|
||||
time_token = line.split("]")[1].strip().split()[0]
|
||||
float(time_token)
|
||||
test_time = time_token
|
||||
except:
|
||||
pass
|
||||
|
||||
total += 1
|
||||
if TIMEOUT_SIGN in line:
|
||||
if test_name in self.broken_tests:
|
||||
success += 1
|
||||
test_results.append((test_name, "BROKEN", test_time, []))
|
||||
else:
|
||||
failed += 1
|
||||
test_results.append((test_name, "Timeout", test_time, []))
|
||||
elif FAIL_SIGN in line:
|
||||
if test_name in self.broken_tests:
|
||||
success += 1
|
||||
test_results.append((test_name, "BROKEN", test_time, []))
|
||||
else:
|
||||
failed += 1
|
||||
test_results.append((test_name, "FAIL", test_time, []))
|
||||
elif UNKNOWN_SIGN in line:
|
||||
unknown += 1
|
||||
test_results.append((test_name, "FAIL", test_time, []))
|
||||
elif SKIPPED_SIGN in line:
|
||||
skipped += 1
|
||||
test_results.append((test_name, "SKIPPED", test_time, []))
|
||||
else:
|
||||
if OK_SIGN in line and test_name in self.broken_tests:
|
||||
skipped += 1
|
||||
test_results.append(
|
||||
(
|
||||
test_name,
|
||||
"NOT_FAILED",
|
||||
test_time,
|
||||
[
|
||||
"This test passed. Update analyzer_tech_debt.txt.\n"
|
||||
],
|
||||
)
|
||||
)
|
||||
else:
|
||||
success += int(OK_SIGN in line)
|
||||
test_results.append((test_name, "OK", test_time, []))
|
||||
test_end = False
|
||||
elif (
|
||||
len(test_results) > 0
|
||||
and test_results[-1][1] == "FAIL"
|
||||
and not test_end
|
||||
):
|
||||
test_results[-1][3].append(original_line)
|
||||
# Database printed after everything else in case of failures,
|
||||
# so this is a stop marker for capturing test output.
|
||||
#
|
||||
# And it is handled after everything else to include line with database into the report.
|
||||
if DATABASE_SIGN in line:
|
||||
test_end = True
|
||||
|
||||
test_results = [
|
||||
Result(
|
||||
name=test[0],
|
||||
status=test[1],
|
||||
start_time=None,
|
||||
duration=float(test[2]),
|
||||
info="".join(test[3])[:8192],
|
||||
)
|
||||
for test in test_results
|
||||
]
|
||||
|
||||
s = self.Summary(
|
||||
total=total,
|
||||
skipped=skipped,
|
||||
unknown=unknown,
|
||||
failed=failed,
|
||||
success=success,
|
||||
test_results=test_results,
|
||||
hung=hung,
|
||||
server_died=server_died,
|
||||
success_finish=success_finish,
|
||||
retries=retries,
|
||||
)
|
||||
|
||||
return s
|
||||
|
||||
def run(self):
|
||||
state = Result.Status.SUCCESS
|
||||
s = self._process_test_output()
|
||||
test_results = s.test_results
|
||||
|
||||
# # Check test_results.tsv for sanitizer asserts, crashes and other critical errors.
|
||||
# # If the file is present, it's expected to be generated by stress_test.lib check for critical errors
|
||||
# # In the end this file will be fully regenerated, including both results from critical errors check and
|
||||
# # functional test results.
|
||||
# if test_results_path and os.path.exists(test_results_path):
|
||||
# with open(test_results_path, "r", encoding="utf-8") as test_results_file:
|
||||
# existing_test_results = list(
|
||||
# csv.reader(test_results_file, delimiter="\t")
|
||||
# )
|
||||
# for test in existing_test_results:
|
||||
# if len(test) < 2:
|
||||
# unknown += 1
|
||||
# else:
|
||||
# test_results.append(test)
|
||||
#
|
||||
# if test[1] != "OK":
|
||||
# failed += 1
|
||||
# else:
|
||||
# success += 1
|
||||
|
||||
# is_flaky_check = 1 < int(os.environ.get("NUM_TRIES", 1))
|
||||
# logging.info("Is flaky check: %s", is_flaky_check)
|
||||
# # If no tests were run (success == 0) it indicates an error (e.g. server did not start or crashed immediately)
|
||||
# # But it's Ok for "flaky checks" - they can contain just one test for check which is marked as skipped.
|
||||
# if failed != 0 or unknown != 0 or (success == 0 and (not is_flaky_check)):
|
||||
if s.failed != 0 or s.unknown != 0:
|
||||
state = Result.Status.FAILED
|
||||
|
||||
if s.hung:
|
||||
state = Result.Status.FAILED
|
||||
test_results.append(
|
||||
Result("Some queries hung", "FAIL", info="Some queries hung")
|
||||
)
|
||||
elif s.server_died:
|
||||
state = Result.Status.FAILED
|
||||
# When ClickHouse server crashes, some tests are still running
|
||||
# and fail because they cannot connect to server
|
||||
for result in test_results:
|
||||
if result.status == "FAIL":
|
||||
result.status = "SERVER_DIED"
|
||||
test_results.append(Result("Server died", "FAIL", info="Server died"))
|
||||
elif not s.success_finish:
|
||||
state = Result.Status.FAILED
|
||||
test_results.append(
|
||||
Result("Tests are not finished", "FAIL", info="Tests are not finished")
|
||||
)
|
||||
elif s.retries:
|
||||
test_results.append(
|
||||
Result("Some tests restarted", "SKIPPED", info="Some tests restarted")
|
||||
)
|
||||
else:
|
||||
pass
|
||||
|
||||
# TODO: !!!
|
||||
# def test_result_comparator(item):
|
||||
# # sort by status then by check name
|
||||
# order = {
|
||||
# "FAIL": 0,
|
||||
# "SERVER_DIED": 1,
|
||||
# "Timeout": 2,
|
||||
# "NOT_FAILED": 3,
|
||||
# "BROKEN": 4,
|
||||
# "OK": 5,
|
||||
# "SKIPPED": 6,
|
||||
# }
|
||||
# return order.get(item[1], 10), str(item[0]), item[1]
|
||||
#
|
||||
# test_results.sort(key=test_result_comparator)
|
||||
|
||||
return Result.create_from(
|
||||
name=Environment.JOB_NAME,
|
||||
results=test_results,
|
||||
status=state,
|
||||
files=[self.tests_output_file],
|
||||
with_info_from_results=False,
|
||||
)
|
||||
|
||||
|
||||
# if __name__ == "__main__":
|
||||
# logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
|
||||
# parser = argparse.ArgumentParser(
|
||||
# description="ClickHouse script for parsing results of functional tests"
|
||||
# )
|
||||
#
|
||||
# parser.add_argument("--out-results-file", default="/test_output/test_results.tsv")
|
||||
# parser.add_argument("--out-status-file", default="/test_output/check_status.tsv")
|
||||
# args = parser.parse_args()
|
||||
#
|
||||
# broken_tests = []
|
||||
# state, description, test_results = process_result(
|
||||
# args.in_results_dir,
|
||||
# broken_tests,
|
||||
# args.in_test_result_file,
|
||||
# args.in_results_file,
|
||||
# )
|
||||
# logging.info("Result parsed")
|
||||
# status = (state, description)
|
||||
#
|
||||
#
|
||||
#
|
||||
# write_results(args.out_results_file, args.out_status_file, test_results, status)
|
||||
# logging.info("Result written")
|
5
ci/praktika/__init__.py
Normal file
5
ci/praktika/__init__.py
Normal file
@ -0,0 +1,5 @@
|
||||
from .artifact import Artifact
|
||||
from .docker import Docker
|
||||
from .job import Job
|
||||
from .secret import Secret
|
||||
from .workflow import Workflow
|
94
ci/praktika/__main__.py
Normal file
94
ci/praktika/__main__.py
Normal file
@ -0,0 +1,94 @@
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
from praktika.html_prepare import Html
|
||||
from praktika.utils import Utils
|
||||
from praktika.validator import Validator
|
||||
from praktika.yaml_generator import YamlGenerator
|
||||
|
||||
|
||||
def create_parser():
|
||||
parser = argparse.ArgumentParser(prog="python3 -m praktika")
|
||||
|
||||
subparsers = parser.add_subparsers(dest="command", help="Available subcommands")
|
||||
|
||||
run_parser = subparsers.add_parser("run", help="Job Runner")
|
||||
run_parser.add_argument("--job", help="Job Name", type=str, required=True)
|
||||
run_parser.add_argument(
|
||||
"--workflow",
|
||||
help="Workflow Name (required if job name is not uniq per config)",
|
||||
type=str,
|
||||
default="",
|
||||
)
|
||||
run_parser.add_argument(
|
||||
"--no-docker",
|
||||
help="Do not run job in docker even if job config says so, for local test",
|
||||
action="store_true",
|
||||
)
|
||||
run_parser.add_argument(
|
||||
"--docker",
|
||||
help="Custom docker image for job run, for local test",
|
||||
type=str,
|
||||
default="",
|
||||
)
|
||||
run_parser.add_argument(
|
||||
"--param",
|
||||
help="Custom parameter to pass into a job script, it's up to job script how to use it, for local test",
|
||||
type=str,
|
||||
default=None,
|
||||
)
|
||||
run_parser.add_argument(
|
||||
"--ci",
|
||||
help="When not set - dummy env will be generated, for local test",
|
||||
action="store_true",
|
||||
default="",
|
||||
)
|
||||
|
||||
_yaml_parser = subparsers.add_parser("yaml", help="Generates Yaml Workflows")
|
||||
|
||||
_html_parser = subparsers.add_parser("html", help="Uploads HTML page for reports")
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = create_parser()
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.command == "yaml":
|
||||
Validator().validate()
|
||||
YamlGenerator().generate()
|
||||
elif args.command == "html":
|
||||
Html.prepare()
|
||||
elif args.command == "run":
|
||||
from praktika.mangle import _get_workflows
|
||||
from praktika.runner import Runner
|
||||
|
||||
workflows = _get_workflows(name=args.workflow or None)
|
||||
job_workflow_pairs = []
|
||||
for workflow in workflows:
|
||||
job = workflow.find_job(args.job, lazy=True)
|
||||
if job:
|
||||
job_workflow_pairs.append((job, workflow))
|
||||
if not job_workflow_pairs:
|
||||
Utils.raise_with_error(
|
||||
f"Failed to find job [{args.job}] workflow [{args.workflow}]"
|
||||
)
|
||||
elif len(job_workflow_pairs) > 1:
|
||||
Utils.raise_with_error(
|
||||
f"More than one job [{args.job}] found - try specifying workflow name with --workflow"
|
||||
)
|
||||
else:
|
||||
job, workflow = job_workflow_pairs[0][0], job_workflow_pairs[0][1]
|
||||
print(f"Going to run job [{job.name}], workflow [{workflow.name}]")
|
||||
Runner().run(
|
||||
workflow=workflow,
|
||||
job=job,
|
||||
docker=args.docker,
|
||||
dummy_env=not args.ci,
|
||||
no_docker=args.no_docker,
|
||||
param=args.param,
|
||||
)
|
||||
else:
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
195
ci/praktika/_environment.py
Normal file
195
ci/praktika/_environment.py
Normal file
@ -0,0 +1,195 @@
|
||||
import dataclasses
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
from types import SimpleNamespace
|
||||
from typing import Any, Dict, List, Type
|
||||
|
||||
from praktika import Workflow
|
||||
from praktika._settings import _Settings
|
||||
from praktika.utils import MetaClasses, T
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class _Environment(MetaClasses.Serializable):
|
||||
WORKFLOW_NAME: str
|
||||
JOB_NAME: str
|
||||
REPOSITORY: str
|
||||
BRANCH: str
|
||||
SHA: str
|
||||
PR_NUMBER: int
|
||||
EVENT_TYPE: str
|
||||
JOB_OUTPUT_STREAM: str
|
||||
EVENT_FILE_PATH: str
|
||||
CHANGE_URL: str
|
||||
COMMIT_URL: str
|
||||
BASE_BRANCH: str
|
||||
RUN_ID: str
|
||||
RUN_URL: str
|
||||
INSTANCE_TYPE: str
|
||||
INSTANCE_ID: str
|
||||
INSTANCE_LIFE_CYCLE: str
|
||||
PARAMETER: Any = None
|
||||
REPORT_INFO: List[str] = dataclasses.field(default_factory=list)
|
||||
LOCAL_RUN_PARAM: str = ""
|
||||
name = "environment"
|
||||
|
||||
@classmethod
|
||||
def file_name_static(cls, _name=""):
|
||||
return f"{_Settings.TEMP_DIR}/{cls.name}.json"
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls: Type[T], obj: Dict[str, Any]) -> T:
|
||||
JOB_OUTPUT_STREAM = os.getenv("GITHUB_OUTPUT", "")
|
||||
obj["JOB_OUTPUT_STREAM"] = JOB_OUTPUT_STREAM
|
||||
if "PARAMETER" in obj:
|
||||
obj["PARAMETER"] = _to_object(obj["PARAMETER"])
|
||||
return cls(**obj)
|
||||
|
||||
def add_info(self, info):
|
||||
self.REPORT_INFO.append(info)
|
||||
self.dump()
|
||||
|
||||
@classmethod
|
||||
def get(cls):
|
||||
if Path(cls.file_name_static()).is_file():
|
||||
return cls.from_fs("environment")
|
||||
else:
|
||||
print("WARNING: Environment: get from env")
|
||||
env = cls.from_env()
|
||||
env.dump()
|
||||
return env
|
||||
|
||||
def set_job_name(self, job_name):
|
||||
self.JOB_NAME = job_name
|
||||
self.dump()
|
||||
return self
|
||||
|
||||
@staticmethod
|
||||
def get_needs_statuses():
|
||||
if Path(_Settings.WORKFLOW_STATUS_FILE).is_file():
|
||||
with open(_Settings.WORKFLOW_STATUS_FILE, "r", encoding="utf8") as f:
|
||||
return json.load(f)
|
||||
else:
|
||||
print(
|
||||
f"ERROR: Status file [{_Settings.WORKFLOW_STATUS_FILE}] does not exist"
|
||||
)
|
||||
raise RuntimeError()
|
||||
|
||||
@classmethod
|
||||
def from_env(cls) -> "_Environment":
|
||||
WORKFLOW_NAME = os.getenv("GITHUB_WORKFLOW", "")
|
||||
JOB_NAME = os.getenv("JOB_NAME", "")
|
||||
REPOSITORY = os.getenv("GITHUB_REPOSITORY", "")
|
||||
BRANCH = os.getenv("GITHUB_HEAD_REF", "")
|
||||
|
||||
EVENT_FILE_PATH = os.getenv("GITHUB_EVENT_PATH", "")
|
||||
JOB_OUTPUT_STREAM = os.getenv("GITHUB_OUTPUT", "")
|
||||
RUN_ID = os.getenv("GITHUB_RUN_ID", "0")
|
||||
RUN_URL = f"https://github.com/{REPOSITORY}/actions/runs/{RUN_ID}"
|
||||
BASE_BRANCH = os.getenv("GITHUB_BASE_REF", "")
|
||||
|
||||
if EVENT_FILE_PATH:
|
||||
with open(EVENT_FILE_PATH, "r", encoding="utf-8") as f:
|
||||
github_event = json.load(f)
|
||||
if "pull_request" in github_event:
|
||||
EVENT_TYPE = Workflow.Event.PULL_REQUEST
|
||||
PR_NUMBER = github_event["pull_request"]["number"]
|
||||
SHA = github_event["pull_request"]["head"]["sha"]
|
||||
CHANGE_URL = github_event["pull_request"]["html_url"]
|
||||
COMMIT_URL = CHANGE_URL + f"/commits/{SHA}"
|
||||
elif "commits" in github_event:
|
||||
EVENT_TYPE = Workflow.Event.PUSH
|
||||
SHA = github_event["after"]
|
||||
CHANGE_URL = github_event["head_commit"]["url"] # commit url
|
||||
PR_NUMBER = 0
|
||||
COMMIT_URL = CHANGE_URL
|
||||
else:
|
||||
assert False, "TODO: not supported"
|
||||
else:
|
||||
print("WARNING: Local execution - dummy Environment will be generated")
|
||||
SHA = "TEST"
|
||||
PR_NUMBER = -1
|
||||
EVENT_TYPE = Workflow.Event.PUSH
|
||||
CHANGE_URL = ""
|
||||
COMMIT_URL = ""
|
||||
|
||||
INSTANCE_TYPE = (
|
||||
os.getenv("INSTANCE_TYPE", None)
|
||||
# or Shell.get_output("ec2metadata --instance-type")
|
||||
or ""
|
||||
)
|
||||
INSTANCE_ID = (
|
||||
os.getenv("INSTANCE_ID", None)
|
||||
# or Shell.get_output("ec2metadata --instance-id")
|
||||
or ""
|
||||
)
|
||||
INSTANCE_LIFE_CYCLE = (
|
||||
os.getenv("INSTANCE_LIFE_CYCLE", None)
|
||||
# or Shell.get_output(
|
||||
# "curl -s --fail http://169.254.169.254/latest/meta-data/instance-life-cycle"
|
||||
# )
|
||||
or ""
|
||||
)
|
||||
|
||||
return _Environment(
|
||||
WORKFLOW_NAME=WORKFLOW_NAME,
|
||||
JOB_NAME=JOB_NAME,
|
||||
REPOSITORY=REPOSITORY,
|
||||
BRANCH=BRANCH,
|
||||
EVENT_FILE_PATH=EVENT_FILE_PATH,
|
||||
JOB_OUTPUT_STREAM=JOB_OUTPUT_STREAM,
|
||||
SHA=SHA,
|
||||
EVENT_TYPE=EVENT_TYPE,
|
||||
PR_NUMBER=PR_NUMBER,
|
||||
RUN_ID=RUN_ID,
|
||||
CHANGE_URL=CHANGE_URL,
|
||||
COMMIT_URL=COMMIT_URL,
|
||||
RUN_URL=RUN_URL,
|
||||
BASE_BRANCH=BASE_BRANCH,
|
||||
INSTANCE_TYPE=INSTANCE_TYPE,
|
||||
INSTANCE_ID=INSTANCE_ID,
|
||||
INSTANCE_LIFE_CYCLE=INSTANCE_LIFE_CYCLE,
|
||||
REPORT_INFO=[],
|
||||
)
|
||||
|
||||
def get_s3_prefix(self, latest=False):
|
||||
return self.get_s3_prefix_static(self.PR_NUMBER, self.BRANCH, self.SHA, latest)
|
||||
|
||||
@classmethod
|
||||
def get_s3_prefix_static(cls, pr_number, branch, sha, latest=False):
|
||||
prefix = ""
|
||||
if pr_number > 0:
|
||||
prefix += f"{pr_number}"
|
||||
else:
|
||||
prefix += f"{branch}"
|
||||
if latest:
|
||||
prefix += f"/latest"
|
||||
elif sha:
|
||||
prefix += f"/{sha}"
|
||||
return prefix
|
||||
|
||||
# TODO: find a better place for the function. This file should not import praktika.settings
|
||||
# as it's requires reading users config, that's why imports nested inside the function
|
||||
def get_report_url(self):
|
||||
import urllib
|
||||
|
||||
from praktika.settings import Settings
|
||||
from praktika.utils import Utils
|
||||
|
||||
path = Settings.HTML_S3_PATH
|
||||
for bucket, endpoint in Settings.S3_BUCKET_TO_HTTP_ENDPOINT.items():
|
||||
if bucket in path:
|
||||
path = path.replace(bucket, endpoint)
|
||||
break
|
||||
REPORT_URL = f"https://{path}/{Path(Settings.HTML_PAGE_FILE).name}?PR={self.PR_NUMBER}&sha={self.SHA}&name_0={urllib.parse.quote(self.WORKFLOW_NAME, safe='')}&name_1={urllib.parse.quote(self.JOB_NAME, safe='')}"
|
||||
return REPORT_URL
|
||||
|
||||
|
||||
def _to_object(data):
|
||||
if isinstance(data, dict):
|
||||
return SimpleNamespace(**{k: _to_object(v) for k, v in data.items()})
|
||||
elif isinstance(data, list):
|
||||
return [_to_object(i) for i in data]
|
||||
else:
|
||||
return data
|
128
ci/praktika/_settings.py
Normal file
128
ci/praktika/_settings.py
Normal file
@ -0,0 +1,128 @@
|
||||
import dataclasses
|
||||
from pathlib import Path
|
||||
from typing import Dict, Iterable, List, Optional
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class _Settings:
|
||||
######################################
|
||||
# Pipeline generation settings #
|
||||
######################################
|
||||
if Path("./ci_v2").is_dir():
|
||||
# TODO: hack for CH, remove
|
||||
CI_PATH = "./ci_v2"
|
||||
else:
|
||||
CI_PATH = "./ci"
|
||||
WORKFLOW_PATH_PREFIX: str = "./.github/workflows"
|
||||
WORKFLOWS_DIRECTORY: str = f"{CI_PATH}/workflows"
|
||||
SETTINGS_DIRECTORY: str = f"{CI_PATH}/settings"
|
||||
CI_CONFIG_JOB_NAME = "Config Workflow"
|
||||
DOCKER_BUILD_JOB_NAME = "Docker Builds"
|
||||
FINISH_WORKFLOW_JOB_NAME = "Finish Workflow"
|
||||
READY_FOR_MERGE_STATUS_NAME = "Ready for Merge"
|
||||
CI_CONFIG_RUNS_ON: Optional[List[str]] = None
|
||||
DOCKER_BUILD_RUNS_ON: Optional[List[str]] = None
|
||||
VALIDATE_FILE_PATHS: bool = True
|
||||
|
||||
######################################
|
||||
# Runtime Settings #
|
||||
######################################
|
||||
MAX_RETRIES_S3 = 3
|
||||
MAX_RETRIES_GH = 3
|
||||
|
||||
######################################
|
||||
# S3 (artifact storage) settings #
|
||||
######################################
|
||||
S3_ARTIFACT_PATH: str = ""
|
||||
|
||||
######################################
|
||||
# CI workspace settings #
|
||||
######################################
|
||||
TEMP_DIR: str = "/tmp/praktika"
|
||||
OUTPUT_DIR: str = f"{TEMP_DIR}/output"
|
||||
INPUT_DIR: str = f"{TEMP_DIR}/input"
|
||||
PYTHON_INTERPRETER: str = "python3"
|
||||
PYTHON_PACKET_MANAGER: str = "pip3"
|
||||
PYTHON_VERSION: str = "3.9"
|
||||
INSTALL_PYTHON_FOR_NATIVE_JOBS: bool = False
|
||||
INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS: str = "./ci/requirements.txt"
|
||||
ENVIRONMENT_VAR_FILE: str = f"{TEMP_DIR}/environment.json"
|
||||
RUN_LOG: str = f"{TEMP_DIR}/praktika_run.log"
|
||||
|
||||
SECRET_GH_APP_ID: str = "GH_APP_ID"
|
||||
SECRET_GH_APP_PEM_KEY: str = "GH_APP_PEM_KEY"
|
||||
|
||||
ENV_SETUP_SCRIPT: str = "/tmp/praktika_setup_env.sh"
|
||||
WORKFLOW_STATUS_FILE: str = f"{TEMP_DIR}/workflow_status.json"
|
||||
|
||||
######################################
|
||||
# CI Cache settings #
|
||||
######################################
|
||||
CACHE_VERSION: int = 1
|
||||
CACHE_DIGEST_LEN: int = 20
|
||||
CACHE_S3_PATH: str = ""
|
||||
CACHE_LOCAL_PATH: str = f"{TEMP_DIR}/ci_cache"
|
||||
|
||||
######################################
|
||||
# Report settings #
|
||||
######################################
|
||||
HTML_S3_PATH: str = ""
|
||||
HTML_PAGE_FILE: str = "./praktika/json.html"
|
||||
TEXT_CONTENT_EXTENSIONS: Iterable[str] = frozenset([".txt", ".log"])
|
||||
S3_BUCKET_TO_HTTP_ENDPOINT: Optional[Dict[str, str]] = None
|
||||
|
||||
DOCKERHUB_USERNAME: str = ""
|
||||
DOCKERHUB_SECRET: str = ""
|
||||
DOCKER_WD: str = "/wd"
|
||||
|
||||
######################################
|
||||
# CI DB Settings #
|
||||
######################################
|
||||
SECRET_CI_DB_URL: str = "CI_DB_URL"
|
||||
SECRET_CI_DB_PASSWORD: str = "CI_DB_PASSWORD"
|
||||
CI_DB_DB_NAME = ""
|
||||
CI_DB_TABLE_NAME = ""
|
||||
CI_DB_INSERT_TIMEOUT_SEC = 5
|
||||
|
||||
|
||||
_USER_DEFINED_SETTINGS = [
|
||||
"S3_ARTIFACT_PATH",
|
||||
"CACHE_S3_PATH",
|
||||
"HTML_S3_PATH",
|
||||
"S3_BUCKET_TO_HTTP_ENDPOINT",
|
||||
"TEXT_CONTENT_EXTENSIONS",
|
||||
"TEMP_DIR",
|
||||
"OUTPUT_DIR",
|
||||
"INPUT_DIR",
|
||||
"CI_CONFIG_RUNS_ON",
|
||||
"DOCKER_BUILD_RUNS_ON",
|
||||
"CI_CONFIG_JOB_NAME",
|
||||
"PYTHON_INTERPRETER",
|
||||
"PYTHON_VERSION",
|
||||
"PYTHON_PACKET_MANAGER",
|
||||
"INSTALL_PYTHON_FOR_NATIVE_JOBS",
|
||||
"INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS",
|
||||
"MAX_RETRIES_S3",
|
||||
"MAX_RETRIES_GH",
|
||||
"VALIDATE_FILE_PATHS",
|
||||
"DOCKERHUB_USERNAME",
|
||||
"DOCKERHUB_SECRET",
|
||||
"READY_FOR_MERGE_STATUS_NAME",
|
||||
"SECRET_CI_DB_URL",
|
||||
"SECRET_CI_DB_PASSWORD",
|
||||
"CI_DB_DB_NAME",
|
||||
"CI_DB_TABLE_NAME",
|
||||
"CI_DB_INSERT_TIMEOUT_SEC",
|
||||
"SECRET_GH_APP_PEM_KEY",
|
||||
"SECRET_GH_APP_ID",
|
||||
]
|
||||
|
||||
|
||||
class GHRunners:
|
||||
ubuntu = "ubuntu-latest"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
for setting in _USER_DEFINED_SETTINGS:
|
||||
print(_Settings().__getattribute__(setting))
|
||||
# print(dataclasses.asdict(_Settings()))
|
33
ci/praktika/artifact.py
Normal file
33
ci/praktika/artifact.py
Normal file
@ -0,0 +1,33 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
class Artifact:
|
||||
class Type:
|
||||
GH = "github"
|
||||
S3 = "s3"
|
||||
PHONY = "phony"
|
||||
|
||||
@dataclass
|
||||
class Config:
|
||||
"""
|
||||
name - artifact name
|
||||
type - artifact type, see Artifact.Type
|
||||
path - file path or glob, e.g. "path/**/[abc]rtifac?/*"
|
||||
"""
|
||||
|
||||
name: str
|
||||
type: str
|
||||
path: str
|
||||
_provided_by: str = ""
|
||||
_s3_path: str = ""
|
||||
|
||||
def is_s3_artifact(self):
|
||||
return self.type == Artifact.Type.S3
|
||||
|
||||
@classmethod
|
||||
def define_artifact(cls, name, type, path):
|
||||
return cls.Config(name=name, type=type, path=path)
|
||||
|
||||
@classmethod
|
||||
def define_gh_artifact(cls, name, path):
|
||||
return cls.define_artifact(name=name, type=cls.Type.GH, path=path)
|
127
ci/praktika/cache.py
Normal file
127
ci/praktika/cache.py
Normal file
@ -0,0 +1,127 @@
|
||||
import dataclasses
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
from praktika import Artifact, Job, Workflow
|
||||
from praktika._environment import _Environment
|
||||
from praktika.digest import Digest
|
||||
from praktika.s3 import S3
|
||||
from praktika.settings import Settings
|
||||
from praktika.utils import Utils
|
||||
|
||||
|
||||
class Cache:
|
||||
@dataclasses.dataclass
|
||||
class CacheRecord:
|
||||
class Type:
|
||||
SUCCESS = "success"
|
||||
|
||||
type: str
|
||||
sha: str
|
||||
pr_number: int
|
||||
branch: str
|
||||
|
||||
def dump(self, path):
|
||||
with open(path, "w", encoding="utf8") as f:
|
||||
json.dump(dataclasses.asdict(self), f)
|
||||
|
||||
@classmethod
|
||||
def from_fs(cls, path):
|
||||
with open(path, "r", encoding="utf8") as f:
|
||||
return Cache.CacheRecord(**json.load(f))
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, obj):
|
||||
return Cache.CacheRecord(**obj)
|
||||
|
||||
def __init__(self):
|
||||
self.digest = Digest()
|
||||
self.success = {} # type Dict[str, Any]
|
||||
|
||||
@classmethod
|
||||
def push_success_record(cls, job_name, job_digest, sha):
|
||||
type_ = Cache.CacheRecord.Type.SUCCESS
|
||||
record = Cache.CacheRecord(
|
||||
type=type_,
|
||||
sha=sha,
|
||||
pr_number=_Environment.get().PR_NUMBER,
|
||||
branch=_Environment.get().BRANCH,
|
||||
)
|
||||
assert (
|
||||
Settings.CACHE_S3_PATH
|
||||
), f"Setting CACHE_S3_PATH must be defined with enabled CI Cache"
|
||||
record_path = f"{Settings.CACHE_S3_PATH}/v{Settings.CACHE_VERSION}/{Utils.normalize_string(job_name)}/{job_digest}"
|
||||
record_file = Path(Settings.TEMP_DIR) / type_
|
||||
record.dump(record_file)
|
||||
S3.copy_file_to_s3(s3_path=record_path, local_path=record_file)
|
||||
record_file.unlink()
|
||||
|
||||
def fetch_success(self, job_name, job_digest):
|
||||
type_ = Cache.CacheRecord.Type.SUCCESS
|
||||
assert (
|
||||
Settings.CACHE_S3_PATH
|
||||
), f"Setting CACHE_S3_PATH must be defined with enabled CI Cache"
|
||||
record_path = f"{Settings.CACHE_S3_PATH}/v{Settings.CACHE_VERSION}/{Utils.normalize_string(job_name)}/{job_digest}/{type_}"
|
||||
record_file_local_dir = (
|
||||
f"{Settings.CACHE_LOCAL_PATH}/{Utils.normalize_string(job_name)}/"
|
||||
)
|
||||
Path(record_file_local_dir).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if S3.head_object(record_path):
|
||||
res = S3.copy_file_from_s3(
|
||||
s3_path=record_path, local_path=record_file_local_dir
|
||||
)
|
||||
else:
|
||||
res = None
|
||||
|
||||
if res:
|
||||
print(f"Cache record found, job [{job_name}], digest [{job_digest}]")
|
||||
self.success[job_name] = True
|
||||
return Cache.CacheRecord.from_fs(Path(record_file_local_dir) / type_)
|
||||
return None
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# test
|
||||
c = Cache()
|
||||
workflow = Workflow.Config(
|
||||
name="TEST",
|
||||
event=Workflow.Event.PULL_REQUEST,
|
||||
jobs=[
|
||||
Job.Config(
|
||||
name="JobA",
|
||||
runs_on=["some"],
|
||||
command="python -m unittest ./ci/tests/example_1/test_example_produce_artifact.py",
|
||||
provides=["greet"],
|
||||
job_requirements=Job.Requirements(
|
||||
python_requirements_txt="./ci/requirements.txt"
|
||||
),
|
||||
digest_config=Job.CacheDigestConfig(
|
||||
# example: use glob to include files
|
||||
include_paths=["./ci/tests/example_1/test_example_consume*.py"],
|
||||
),
|
||||
),
|
||||
Job.Config(
|
||||
name="JobB",
|
||||
runs_on=["some"],
|
||||
command="python -m unittest ./ci/tests/example_1/test_example_consume_artifact.py",
|
||||
requires=["greet"],
|
||||
job_requirements=Job.Requirements(
|
||||
python_requirements_txt="./ci/requirements.txt"
|
||||
),
|
||||
digest_config=Job.CacheDigestConfig(
|
||||
# example: use dir to include files recursively
|
||||
include_paths=["./ci/tests/example_1"],
|
||||
# example: use glob to exclude files from digest
|
||||
exclude_paths=[
|
||||
"./ci/tests/example_1/test_example_consume*",
|
||||
"./**/*.pyc",
|
||||
],
|
||||
),
|
||||
),
|
||||
],
|
||||
artifacts=[Artifact.Config(type="s3", name="greet", path="hello")],
|
||||
enable_cache=True,
|
||||
)
|
||||
for job in workflow.jobs:
|
||||
print(c.digest.calc_job_digest(job))
|
136
ci/praktika/cidb.py
Normal file
136
ci/praktika/cidb.py
Normal file
@ -0,0 +1,136 @@
|
||||
import copy
|
||||
import dataclasses
|
||||
import json
|
||||
from typing import Optional
|
||||
|
||||
import requests
|
||||
from praktika._environment import _Environment
|
||||
from praktika.result import Result
|
||||
from praktika.settings import Settings
|
||||
from praktika.utils import Utils
|
||||
|
||||
|
||||
class CIDB:
|
||||
@dataclasses.dataclass
|
||||
class TableRecord:
|
||||
pull_request_number: int
|
||||
commit_sha: str
|
||||
commit_url: str
|
||||
check_name: str
|
||||
check_status: str
|
||||
check_duration_ms: int
|
||||
check_start_time: int
|
||||
report_url: str
|
||||
pull_request_url: str
|
||||
base_ref: str
|
||||
base_repo: str
|
||||
head_ref: str
|
||||
head_repo: str
|
||||
task_url: str
|
||||
instance_type: str
|
||||
instance_id: str
|
||||
test_name: str
|
||||
test_status: str
|
||||
test_duration_ms: Optional[int]
|
||||
test_context_raw: str
|
||||
|
||||
def __init__(self, url, passwd):
|
||||
self.url = url
|
||||
self.auth = {
|
||||
"X-ClickHouse-User": "default",
|
||||
"X-ClickHouse-Key": passwd,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def json_data_generator(cls, result: Result):
|
||||
env = _Environment.get()
|
||||
base_record = cls.TableRecord(
|
||||
pull_request_number=env.PR_NUMBER,
|
||||
commit_sha=env.SHA,
|
||||
commit_url=env.COMMIT_URL,
|
||||
check_name=result.name,
|
||||
check_status=result.status,
|
||||
check_duration_ms=int(result.duration * 1000),
|
||||
check_start_time=Utils.timestamp_to_str(result.start_time),
|
||||
report_url=env.get_report_url(),
|
||||
pull_request_url=env.CHANGE_URL,
|
||||
base_ref=env.BASE_BRANCH,
|
||||
base_repo=env.REPOSITORY,
|
||||
head_ref=env.BRANCH,
|
||||
# TODO: remove from table?
|
||||
head_repo=env.REPOSITORY,
|
||||
# TODO: remove from table?
|
||||
task_url="",
|
||||
instance_type=",".join([env.INSTANCE_TYPE, env.INSTANCE_LIFE_CYCLE]),
|
||||
instance_id=env.INSTANCE_ID,
|
||||
test_name="",
|
||||
test_status="",
|
||||
test_duration_ms=None,
|
||||
test_context_raw=result.info,
|
||||
)
|
||||
yield json.dumps(dataclasses.asdict(base_record))
|
||||
for result_ in result.results:
|
||||
record = copy.deepcopy(base_record)
|
||||
record.test_name = result_.name
|
||||
if result_.start_time:
|
||||
record.check_start_time = (Utils.timestamp_to_str(result.start_time),)
|
||||
record.test_status = result_.status
|
||||
record.test_duration_ms = int(result_.duration * 1000)
|
||||
record.test_context_raw = result_.info
|
||||
yield json.dumps(dataclasses.asdict(record))
|
||||
|
||||
def insert(self, result: Result):
|
||||
# Create a session object
|
||||
params = {
|
||||
"database": Settings.CI_DB_DB_NAME,
|
||||
"query": f"INSERT INTO {Settings.CI_DB_TABLE_NAME} FORMAT JSONEachRow",
|
||||
"date_time_input_format": "best_effort",
|
||||
"send_logs_level": "warning",
|
||||
}
|
||||
|
||||
session = requests.Session()
|
||||
|
||||
for json_str in self.json_data_generator(result):
|
||||
try:
|
||||
response1 = session.post(
|
||||
url=self.url,
|
||||
params=params,
|
||||
data=json_str,
|
||||
headers=self.auth,
|
||||
timeout=Settings.CI_DB_INSERT_TIMEOUT_SEC,
|
||||
)
|
||||
except Exception as ex:
|
||||
raise ex
|
||||
|
||||
session.close()
|
||||
|
||||
def check(self):
|
||||
# Create a session object
|
||||
params = {
|
||||
"database": Settings.CI_DB_DB_NAME,
|
||||
"query": f"SELECT 1",
|
||||
}
|
||||
try:
|
||||
response = requests.post(
|
||||
url=self.url,
|
||||
params=params,
|
||||
data="",
|
||||
headers=self.auth,
|
||||
timeout=Settings.CI_DB_INSERT_TIMEOUT_SEC,
|
||||
)
|
||||
if not response.ok:
|
||||
print("ERROR: No connection to CI DB")
|
||||
return (
|
||||
False,
|
||||
f"ERROR: No connection to CI DB [{response.status_code}/{response.reason}]",
|
||||
)
|
||||
if not response.json() == 1:
|
||||
print("ERROR: CI DB smoke test failed select 1 == 1")
|
||||
return (
|
||||
False,
|
||||
f"ERROR: CI DB smoke test failed [select 1 ==> {response.json()}]",
|
||||
)
|
||||
except Exception as ex:
|
||||
print(f"ERROR: Exception [{ex}]")
|
||||
return False, "CIDB: ERROR: Exception [{ex}]"
|
||||
return True, ""
|
100
ci/praktika/digest.py
Normal file
100
ci/praktika/digest.py
Normal file
@ -0,0 +1,100 @@
|
||||
import dataclasses
|
||||
import hashlib
|
||||
from hashlib import md5
|
||||
from typing import List
|
||||
|
||||
from praktika import Job
|
||||
from praktika.docker import Docker
|
||||
from praktika.settings import Settings
|
||||
from praktika.utils import Utils
|
||||
|
||||
|
||||
class Digest:
|
||||
def __init__(self):
|
||||
self.digest_cache = {}
|
||||
|
||||
@staticmethod
|
||||
def _hash_digest_config(digest_config: Job.CacheDigestConfig) -> str:
|
||||
data_dict = dataclasses.asdict(digest_config)
|
||||
hash_obj = md5()
|
||||
hash_obj.update(str(data_dict).encode())
|
||||
hash_string = hash_obj.hexdigest()
|
||||
return hash_string
|
||||
|
||||
def calc_job_digest(self, job_config: Job.Config):
|
||||
config = job_config.digest_config
|
||||
if not config:
|
||||
return "f" * Settings.CACHE_DIGEST_LEN
|
||||
|
||||
cache_key = self._hash_digest_config(config)
|
||||
|
||||
if cache_key in self.digest_cache:
|
||||
return self.digest_cache[cache_key]
|
||||
|
||||
included_files = Utils.traverse_paths(
|
||||
job_config.digest_config.include_paths,
|
||||
job_config.digest_config.exclude_paths,
|
||||
sorted=True,
|
||||
)
|
||||
|
||||
print(f"calc digest: hash_key [{cache_key}], include [{included_files}] files")
|
||||
# Sort files to ensure consistent hash calculation
|
||||
included_files.sort()
|
||||
|
||||
# Calculate MD5 hash
|
||||
res = ""
|
||||
if not included_files:
|
||||
res = "f" * Settings.CACHE_DIGEST_LEN
|
||||
print(f"NOTE: empty digest config [{config}] - return dummy digest")
|
||||
else:
|
||||
hash_md5 = hashlib.md5()
|
||||
for file_path in included_files:
|
||||
res = self._calc_file_digest(file_path, hash_md5)
|
||||
assert res
|
||||
self.digest_cache[cache_key] = res
|
||||
return res
|
||||
|
||||
def calc_docker_digest(
|
||||
self,
|
||||
docker_config: Docker.Config,
|
||||
dependency_configs: List[Docker.Config],
|
||||
hash_md5=None,
|
||||
):
|
||||
"""
|
||||
|
||||
:param hash_md5:
|
||||
:param dependency_configs: list of Docker.Config(s) that :param docker_config: depends on
|
||||
:param docker_config: Docker.Config to calculate digest for
|
||||
:return:
|
||||
"""
|
||||
print(f"Calculate digest for docker [{docker_config.name}]")
|
||||
paths = Utils.traverse_path(docker_config.path, sorted=True)
|
||||
if not hash_md5:
|
||||
hash_md5 = hashlib.md5()
|
||||
|
||||
dependencies = []
|
||||
for dependency_name in docker_config.depends_on:
|
||||
for dependency_config in dependency_configs:
|
||||
if dependency_config.name == dependency_name:
|
||||
print(
|
||||
f"Add docker [{dependency_config.name}] as dependency for docker [{docker_config.name}] digest calculation"
|
||||
)
|
||||
dependencies.append(dependency_config)
|
||||
|
||||
for dependency in dependencies:
|
||||
_ = self.calc_docker_digest(dependency, dependency_configs, hash_md5)
|
||||
|
||||
for path in paths:
|
||||
_ = self._calc_file_digest(path, hash_md5=hash_md5)
|
||||
|
||||
return hash_md5.hexdigest()[: Settings.CACHE_DIGEST_LEN]
|
||||
|
||||
@staticmethod
|
||||
def _calc_file_digest(file_path, hash_md5):
|
||||
# Calculate MD5 hash
|
||||
with open(file_path, "rb") as f:
|
||||
for chunk in iter(lambda: f.read(4096), b""):
|
||||
hash_md5.update(chunk)
|
||||
|
||||
res = hash_md5.hexdigest()[: Settings.CACHE_DIGEST_LEN]
|
||||
return res
|
60
ci/praktika/docker.py
Normal file
60
ci/praktika/docker.py
Normal file
@ -0,0 +1,60 @@
|
||||
import dataclasses
|
||||
from typing import List
|
||||
|
||||
from praktika.utils import Shell
|
||||
|
||||
|
||||
class Docker:
|
||||
class Platforms:
|
||||
ARM = "linux/arm64"
|
||||
AMD = "linux/amd64"
|
||||
arm_amd = [ARM, AMD]
|
||||
|
||||
@dataclasses.dataclass
|
||||
class Config:
|
||||
name: str
|
||||
path: str
|
||||
depends_on: List[str]
|
||||
platforms: List[str]
|
||||
|
||||
@classmethod
|
||||
def build(cls, config: "Docker.Config", log_file, digests, add_latest):
|
||||
tags_substr = f" -t {config.name}:{digests[config.name]}"
|
||||
if add_latest:
|
||||
tags_substr = f" -t {config.name}:latest"
|
||||
|
||||
from_tag = ""
|
||||
if config.depends_on:
|
||||
assert (
|
||||
len(config.depends_on) == 1
|
||||
), f"Only one dependency in depends_on is currently supported, docker [{config}]"
|
||||
from_tag = f" --build-arg FROM_TAG={digests[config.depends_on[0]]}"
|
||||
|
||||
command = f"docker buildx build --platform {','.join(config.platforms)} {tags_substr} {from_tag} --cache-to type=inline --cache-from type=registry,ref={config.name} --push {config.path}"
|
||||
return Shell.run(command, log_file=log_file, verbose=True)
|
||||
|
||||
@classmethod
|
||||
def sort_in_build_order(cls, dockers: List["Docker.Config"]):
|
||||
ready_names = []
|
||||
i = 0
|
||||
while i < len(dockers):
|
||||
docker = dockers[i]
|
||||
if not docker.depends_on or all(
|
||||
dep in ready_names for dep in docker.depends_on
|
||||
):
|
||||
ready_names.append(docker.name)
|
||||
i += 1
|
||||
else:
|
||||
dockers.append(dockers.pop(i))
|
||||
return dockers
|
||||
|
||||
@classmethod
|
||||
def login(cls, user_name, user_password):
|
||||
print("Docker: log in to dockerhub")
|
||||
return Shell.check(
|
||||
f"docker login --username '{user_name}' --password-stdin",
|
||||
strict=True,
|
||||
stdin_str=user_password,
|
||||
encoding="utf-8",
|
||||
verbose=True,
|
||||
)
|
3
ci/praktika/environment.py
Normal file
3
ci/praktika/environment.py
Normal file
@ -0,0 +1,3 @@
|
||||
from praktika._environment import _Environment
|
||||
|
||||
Environment = _Environment.get()
|
0
ci/praktika/execution/__init__.py
Normal file
0
ci/praktika/execution/__init__.py
Normal file
4
ci/praktika/execution/__main__.py
Normal file
4
ci/praktika/execution/__main__.py
Normal file
@ -0,0 +1,4 @@
|
||||
from praktika.execution.machine_init import run
|
||||
|
||||
if __name__ == "__main__":
|
||||
run()
|
31
ci/praktika/execution/execution_settings.py
Normal file
31
ci/praktika/execution/execution_settings.py
Normal file
@ -0,0 +1,31 @@
|
||||
import os
|
||||
|
||||
from praktika.utils import MetaClasses
|
||||
|
||||
|
||||
class ScalingType(metaclass=MetaClasses.WithIter):
|
||||
DISABLED = "disabled"
|
||||
AUTOMATIC_SCALE_DOWN = "scale_down"
|
||||
AUTOMATIC_SCALE_UP_DOWN = "scale"
|
||||
|
||||
|
||||
class DefaultExecutionSettings:
|
||||
GH_ACTIONS_DIRECTORY: str = "/home/ubuntu/gh_actions"
|
||||
RUNNER_SCALING_TYPE: str = ScalingType.AUTOMATIC_SCALE_UP_DOWN
|
||||
MAX_WAIT_TIME_BEFORE_SCALE_DOWN_SEC: int = 30
|
||||
|
||||
|
||||
class ExecutionSettings:
|
||||
GH_ACTIONS_DIRECTORY = os.getenv(
|
||||
"GH_ACTIONS_DIRECTORY", DefaultExecutionSettings.GH_ACTIONS_DIRECTORY
|
||||
)
|
||||
RUNNER_SCALING_TYPE = os.getenv(
|
||||
"RUNNER_SCALING_TYPE", DefaultExecutionSettings.RUNNER_SCALING_TYPE
|
||||
)
|
||||
MAX_WAIT_TIME_BEFORE_SCALE_DOWN_SEC = int(
|
||||
os.getenv(
|
||||
"MAX_WAIT_TIME_BEFORE_SCALE_DOWN_SEC",
|
||||
DefaultExecutionSettings.MAX_WAIT_TIME_BEFORE_SCALE_DOWN_SEC,
|
||||
)
|
||||
)
|
||||
LOCAL_EXECUTION = bool(os.getenv("CLOUD", "0") == "0")
|
338
ci/praktika/execution/machine_init.py
Normal file
338
ci/praktika/execution/machine_init.py
Normal file
@ -0,0 +1,338 @@
|
||||
import os
|
||||
import platform
|
||||
import signal
|
||||
import time
|
||||
import traceback
|
||||
|
||||
import requests
|
||||
from praktika.execution.execution_settings import ExecutionSettings, ScalingType
|
||||
from praktika.utils import ContextManager, Shell
|
||||
|
||||
|
||||
class StateMachine:
|
||||
class StateNames:
|
||||
INIT = "init"
|
||||
WAIT = "wait"
|
||||
RUN = "run"
|
||||
|
||||
def __init__(self):
|
||||
self.state = self.StateNames.INIT
|
||||
self.scale_type = ExecutionSettings.RUNNER_SCALING_TYPE
|
||||
self.machine = Machine(scaling_type=self.scale_type).update_instance_info()
|
||||
self.state_updated_at = int(time.time())
|
||||
self.forked = False
|
||||
|
||||
def kick(self):
|
||||
if self.state == self.StateNames.INIT:
|
||||
self.machine.config_actions().run_actions_async()
|
||||
print("State Machine: INIT -> WAIT")
|
||||
self.state = self.StateNames.WAIT
|
||||
self.state_updated_at = int(time.time())
|
||||
# TODO: add monitoring
|
||||
if not self.machine.is_actions_process_healthy():
|
||||
print(f"ERROR: GH runner process unexpectedly died")
|
||||
self.machine.self_terminate(decrease_capacity=False)
|
||||
elif self.state == self.StateNames.WAIT:
|
||||
res = self.machine.check_job_assigned()
|
||||
if res:
|
||||
print("State Machine: WAIT -> RUN")
|
||||
self.state = self.StateNames.RUN
|
||||
self.state_updated_at = int(time.time())
|
||||
self.check_scale_up()
|
||||
else:
|
||||
self.check_scale_down()
|
||||
elif self.state == self.StateNames.RUN:
|
||||
res = self.machine.check_job_running()
|
||||
if res:
|
||||
pass
|
||||
else:
|
||||
print("State Machine: RUN -> INIT")
|
||||
self.state = self.StateNames.INIT
|
||||
self.state_updated_at = int(time.time())
|
||||
|
||||
def check_scale_down(self):
|
||||
if self.scale_type not in (
|
||||
ScalingType.AUTOMATIC_SCALE_DOWN,
|
||||
ScalingType.AUTOMATIC_SCALE_UP_DOWN,
|
||||
):
|
||||
return
|
||||
if ScalingType.AUTOMATIC_SCALE_UP_DOWN and not self.forked:
|
||||
print(
|
||||
f"Scaling type is AUTOMATIC_SCALE_UP_DOWN and machine has not run a job - do not scale down"
|
||||
)
|
||||
return
|
||||
if (
|
||||
int(time.time()) - self.state_updated_at
|
||||
> ExecutionSettings.MAX_WAIT_TIME_BEFORE_SCALE_DOWN_SEC
|
||||
):
|
||||
print(
|
||||
f"No job assigned for more than MAX_WAIT_TIME_BEFORE_SCALE_DOWN_SEC [{ExecutionSettings.MAX_WAIT_TIME_BEFORE_SCALE_DOWN_SEC}] - scale down the instance"
|
||||
)
|
||||
if not ExecutionSettings.LOCAL_EXECUTION:
|
||||
self.machine.self_terminate(decrease_capacity=True)
|
||||
else:
|
||||
print("Local execution - skip scaling operation")
|
||||
|
||||
def check_scale_up(self):
|
||||
if self.scale_type not in (ScalingType.AUTOMATIC_SCALE_UP_DOWN,):
|
||||
return
|
||||
if self.forked:
|
||||
print("This instance already forked once - do not scale up")
|
||||
return
|
||||
self.machine.self_fork()
|
||||
self.forked = True
|
||||
|
||||
def run(self):
|
||||
self.machine.unconfig_actions()
|
||||
while True:
|
||||
self.kick()
|
||||
time.sleep(5)
|
||||
|
||||
def terminate(self):
|
||||
try:
|
||||
self.machine.unconfig_actions()
|
||||
except:
|
||||
print("WARNING: failed to unconfig runner")
|
||||
if not ExecutionSettings.LOCAL_EXECUTION:
|
||||
if self.machine is not None:
|
||||
self.machine.self_terminate(decrease_capacity=False)
|
||||
time.sleep(10)
|
||||
# wait termination
|
||||
print("ERROR: failed to terminate instance via aws cli - try os call")
|
||||
os.system("sudo shutdown now")
|
||||
else:
|
||||
print("NOTE: Local execution - machine won't be terminated")
|
||||
|
||||
|
||||
class Machine:
|
||||
@staticmethod
|
||||
def get_latest_gh_actions_release():
|
||||
url = f"https://api.github.com/repos/actions/runner/releases/latest"
|
||||
response = requests.get(url, timeout=5)
|
||||
if response.status_code == 200:
|
||||
latest_release = response.json()
|
||||
return latest_release["tag_name"].removeprefix("v")
|
||||
else:
|
||||
print(f"Failed to get the latest release: {response.status_code}")
|
||||
return None
|
||||
|
||||
def __init__(self, scaling_type):
|
||||
self.os_name = platform.system().lower()
|
||||
assert self.os_name == "linux", f"Unsupported OS [{self.os_name}]"
|
||||
if platform.machine() == "x86_64":
|
||||
self.arch = "x64"
|
||||
elif "aarch64" in platform.machine().lower():
|
||||
self.arch = "arm64"
|
||||
else:
|
||||
assert False, f"Unsupported arch [{platform.machine()}]"
|
||||
self.instance_id = None
|
||||
self.asg_name = None
|
||||
self.runner_api_endpoint = None
|
||||
self.runner_type = None
|
||||
self.labels = []
|
||||
self.proc = None
|
||||
assert scaling_type in ScalingType
|
||||
self.scaling_type = scaling_type
|
||||
|
||||
def install_gh_actions_runner(self):
|
||||
gh_actions_version = self.get_latest_gh_actions_release()
|
||||
assert self.os_name and gh_actions_version and self.arch
|
||||
Shell.check(
|
||||
f"rm -rf {ExecutionSettings.GH_ACTIONS_DIRECTORY}",
|
||||
strict=True,
|
||||
verbose=True,
|
||||
)
|
||||
Shell.check(
|
||||
f"mkdir {ExecutionSettings.GH_ACTIONS_DIRECTORY}", strict=True, verbose=True
|
||||
)
|
||||
with ContextManager.cd(ExecutionSettings.GH_ACTIONS_DIRECTORY):
|
||||
Shell.check(
|
||||
f"curl -O -L https://github.com/actions/runner/releases/download/v{gh_actions_version}/actions-runner-{self.os_name}-{self.arch}-{gh_actions_version}.tar.gz",
|
||||
strict=True,
|
||||
verbose=True,
|
||||
)
|
||||
Shell.check(f"tar xzf *tar.gz", strict=True, verbose=True)
|
||||
Shell.check(f"rm -f *tar.gz", strict=True, verbose=True)
|
||||
Shell.check(f"sudo ./bin/installdependencies.sh", strict=True, verbose=True)
|
||||
Shell.check(
|
||||
f"chown -R ubuntu:ubuntu {ExecutionSettings.GH_ACTIONS_DIRECTORY}",
|
||||
strict=True,
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
def _get_gh_token_from_ssm(self):
|
||||
gh_token = Shell.get_output_or_raise(
|
||||
"/usr/local/bin/aws ssm get-parameter --name github_runner_registration_token --with-decryption --output text --query Parameter.Value"
|
||||
)
|
||||
return gh_token
|
||||
|
||||
def update_instance_info(self):
|
||||
self.instance_id = Shell.get_output_or_raise("ec2metadata --instance-id")
|
||||
assert self.instance_id
|
||||
self.asg_name = Shell.get_output(
|
||||
f"aws ec2 describe-instances --instance-id {self.instance_id} --query \"Reservations[].Instances[].Tags[?Key=='aws:autoscaling:groupName'].Value\" --output text"
|
||||
)
|
||||
# self.runner_type = Shell.get_output_or_raise(
|
||||
# f'/usr/local/bin/aws ec2 describe-tags --filters "Name=resource-id,Values={self.instance_id}" --query "Tags[?Key==\'github:runner-type\'].Value" --output text'
|
||||
# )
|
||||
self.runner_type = self.asg_name
|
||||
if (
|
||||
self.scaling_type != ScalingType.DISABLED
|
||||
and not ExecutionSettings.LOCAL_EXECUTION
|
||||
):
|
||||
assert (
|
||||
self.asg_name and self.runner_type
|
||||
), f"Failed to retrieve ASG name, which is required for scaling_type [{self.scaling_type}]"
|
||||
org = os.getenv("MY_ORG", "")
|
||||
assert (
|
||||
org
|
||||
), "MY_ORG env variable myst be set to use init script for runner machine"
|
||||
self.runner_api_endpoint = f"https://github.com/{org}"
|
||||
|
||||
self.labels = ["self-hosted", self.runner_type]
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def check_job_assigned(cls):
|
||||
runner_pid = Shell.get_output_or_raise("pgrep Runner.Listener")
|
||||
if not runner_pid:
|
||||
print("check_job_assigned: No runner pid")
|
||||
return False
|
||||
log_file = Shell.get_output_or_raise(
|
||||
f"lsof -p {runner_pid} | grep -o {ExecutionSettings.GH_ACTIONS_DIRECTORY}/_diag/Runner.*log"
|
||||
)
|
||||
if not log_file:
|
||||
print("check_job_assigned: No log file")
|
||||
return False
|
||||
return Shell.check(f"grep -q 'Terminal] .* Running job:' {log_file}")
|
||||
|
||||
def check_job_running(self):
|
||||
if self.proc is None:
|
||||
print(f"WARNING: No job started")
|
||||
return False
|
||||
exit_code = self.proc.poll()
|
||||
if exit_code is None:
|
||||
return True
|
||||
else:
|
||||
print(f"Job runner finished with exit code [{exit_code}]")
|
||||
self.proc = None
|
||||
return False
|
||||
|
||||
def config_actions(self):
|
||||
if not self.instance_id:
|
||||
self.update_instance_info()
|
||||
token = self._get_gh_token_from_ssm()
|
||||
assert token and self.instance_id and self.runner_api_endpoint and self.labels
|
||||
command = f"sudo -u ubuntu {ExecutionSettings.GH_ACTIONS_DIRECTORY}/config.sh --token {token} \
|
||||
--url {self.runner_api_endpoint} --ephemeral --unattended --replace \
|
||||
--runnergroup Default --labels {','.join(self.labels)} --work wd --name {self.instance_id}"
|
||||
res = 1
|
||||
i = 0
|
||||
while i < 10 and res != 0:
|
||||
res = Shell.run(command)
|
||||
i += 1
|
||||
if res != 0:
|
||||
print(
|
||||
f"ERROR: failed to configure GH actions runner after [{i}] attempts, exit code [{res}], retry after 10s"
|
||||
)
|
||||
time.sleep(10)
|
||||
self._get_gh_token_from_ssm()
|
||||
if res == 0:
|
||||
print("GH action runner has been configured")
|
||||
else:
|
||||
assert False, "GH actions runner configuration failed"
|
||||
return self
|
||||
|
||||
def unconfig_actions(self):
|
||||
token = self._get_gh_token_from_ssm()
|
||||
command = f"sudo -u ubuntu {ExecutionSettings.GH_ACTIONS_DIRECTORY}/config.sh remove --token {token}"
|
||||
Shell.check(command, strict=True)
|
||||
return self
|
||||
|
||||
def run_actions_async(self):
|
||||
command = f"sudo -u ubuntu {ExecutionSettings.GH_ACTIONS_DIRECTORY}/run.sh"
|
||||
self.proc = Shell.run_async(command)
|
||||
assert self.proc is not None
|
||||
return self
|
||||
|
||||
def is_actions_process_healthy(self):
|
||||
try:
|
||||
if self.proc.poll() is None:
|
||||
return True
|
||||
|
||||
stdout, stderr = self.proc.communicate()
|
||||
|
||||
if self.proc.returncode != 0:
|
||||
# Handle failure
|
||||
print(
|
||||
f"GH Action process failed with return code {self.proc.returncode}"
|
||||
)
|
||||
print(f"Error output: {stderr}")
|
||||
return False
|
||||
else:
|
||||
print(f"GH Action process is not running")
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f"GH Action process exception: {e}")
|
||||
return False
|
||||
|
||||
def self_terminate(self, decrease_capacity):
|
||||
print(
|
||||
f"WARNING: Self terminate is called, decrease_capacity [{decrease_capacity}]"
|
||||
)
|
||||
traceback.print_stack()
|
||||
if not self.instance_id:
|
||||
self.update_instance_info()
|
||||
assert self.instance_id
|
||||
command = f"aws autoscaling terminate-instance-in-auto-scaling-group --instance-id {self.instance_id}"
|
||||
if decrease_capacity:
|
||||
command += " --should-decrement-desired-capacity"
|
||||
else:
|
||||
command += " --no-should-decrement-desired-capacity"
|
||||
Shell.check(
|
||||
command=command,
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
def self_fork(self):
|
||||
current_capacity = Shell.get_output(
|
||||
f'aws autoscaling describe-auto-scaling-groups --auto-scaling-group-name {self.asg_name} \
|
||||
--query "AutoScalingGroups[0].DesiredCapacity" --output text'
|
||||
)
|
||||
current_capacity = int(current_capacity)
|
||||
if not current_capacity:
|
||||
print("ERROR: failed to get current capacity - cannot scale up")
|
||||
return
|
||||
desired_capacity = current_capacity + 1
|
||||
command = f"aws autoscaling set-desired-capacity --auto-scaling-group-name {self.asg_name} --desired-capacity {desired_capacity}"
|
||||
print(f"Increase capacity [{current_capacity} -> {desired_capacity}]")
|
||||
res = Shell.check(
|
||||
command=command,
|
||||
verbose=True,
|
||||
)
|
||||
if not res:
|
||||
print("ERROR: failed to increase capacity - cannot scale up")
|
||||
|
||||
|
||||
def handle_signal(signum, _frame):
|
||||
print(f"FATAL: Received signal {signum}")
|
||||
raise RuntimeError(f"killed by signal {signum}")
|
||||
|
||||
|
||||
def run():
|
||||
signal.signal(signal.SIGINT, handle_signal)
|
||||
signal.signal(signal.SIGTERM, handle_signal)
|
||||
m = None
|
||||
try:
|
||||
m = StateMachine()
|
||||
m.run()
|
||||
except Exception as e:
|
||||
print(f"FATAL: Exception [{e}] - terminate instance")
|
||||
time.sleep(10)
|
||||
if m:
|
||||
m.terminate()
|
||||
raise e
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run()
|
102
ci/praktika/favicon/lambda_function.py
Normal file
102
ci/praktika/favicon/lambda_function.py
Normal file
@ -0,0 +1,102 @@
|
||||
import base64
|
||||
import random
|
||||
import struct
|
||||
import zlib
|
||||
|
||||
|
||||
def create_favicon():
|
||||
# Image dimensions
|
||||
width = 32
|
||||
height = 32
|
||||
|
||||
# Initialize a transparent background image (RGBA: 4 bytes per pixel)
|
||||
image_data = bytearray(
|
||||
[0, 0, 0, 0] * width * height
|
||||
) # Set alpha to 0 for transparency
|
||||
|
||||
# Draw 4 vertical lines with color #FAFF68 (RGB: 250, 255, 104)
|
||||
line_color = [250, 255, 104, 255] # RGBA for #FAFF68 with full opacity
|
||||
line_width = 4
|
||||
space_width = 3
|
||||
x_start = space_width
|
||||
line_number = 4
|
||||
|
||||
line_height = height - space_width
|
||||
|
||||
for i in range(line_number):
|
||||
# Randomly pick a starting y position for each line
|
||||
y_start = random.randint(0, height - 1)
|
||||
|
||||
# Draw the line with random shift along Y-axis
|
||||
for y in range(line_height):
|
||||
y_pos = (y + y_start) % height
|
||||
for x in range(line_width):
|
||||
pixel_index = (y_pos * width + x_start + x) * 4
|
||||
image_data[pixel_index : pixel_index + 4] = line_color
|
||||
|
||||
x_start += line_width + space_width
|
||||
|
||||
# Convert the RGBA image to PNG format
|
||||
png_data = create_png(width, height, image_data)
|
||||
|
||||
# Convert PNG to ICO format
|
||||
ico_data = create_ico(png_data)
|
||||
|
||||
return ico_data
|
||||
|
||||
|
||||
def create_png(width, height, image_data):
|
||||
def write_chunk(chunk_type, data):
|
||||
chunk_len = struct.pack(">I", len(data))
|
||||
chunk_crc = struct.pack(">I", zlib.crc32(chunk_type + data) & 0xFFFFFFFF)
|
||||
return chunk_len + chunk_type + data + chunk_crc
|
||||
|
||||
png_signature = b"\x89PNG\r\n\x1a\n"
|
||||
ihdr_chunk = struct.pack(">IIBBBBB", width, height, 8, 6, 0, 0, 0)
|
||||
idat_data = zlib.compress(
|
||||
b"".join(
|
||||
b"\x00" + image_data[y * width * 4 : (y + 1) * width * 4]
|
||||
for y in range(height)
|
||||
),
|
||||
9,
|
||||
)
|
||||
idat_chunk = write_chunk(b"IDAT", idat_data)
|
||||
iend_chunk = write_chunk(b"IEND", b"")
|
||||
|
||||
return png_signature + write_chunk(b"IHDR", ihdr_chunk) + idat_chunk + iend_chunk
|
||||
|
||||
|
||||
def create_ico(png_data):
|
||||
# ICO header: reserved (2 bytes), type (2 bytes), image count (2 bytes)
|
||||
ico_header = struct.pack("<HHH", 0, 1, 1)
|
||||
# ICO entry: width, height, color count, reserved, color planes, bits per pixel, size, offset
|
||||
ico_entry = struct.pack("<BBBBHHII", 32, 32, 0, 0, 1, 32, len(png_data), 22)
|
||||
return ico_header + ico_entry + png_data
|
||||
|
||||
|
||||
def save_favicon_to_disk(ico_data, file_path="favicon.ico"):
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(ico_data)
|
||||
print(f"Favicon saved to {file_path}")
|
||||
|
||||
|
||||
def lambda_handler(event, context):
|
||||
# Generate the favicon
|
||||
favicon_data = create_favicon()
|
||||
|
||||
# Return the favicon as a binary response
|
||||
return {
|
||||
"statusCode": 200,
|
||||
"headers": {
|
||||
"Content-Type": "image/x-icon",
|
||||
"Content-Disposition": 'inline; filename="favicon.ico"',
|
||||
},
|
||||
"body": base64.b64encode(favicon_data).decode("utf-8"),
|
||||
"isBase64Encoded": True,
|
||||
}
|
||||
|
||||
|
||||
# Optional: Call the function directly to generate and save favicon locally (if running outside Lambda)
|
||||
if __name__ == "__main__":
|
||||
favicon_data = create_favicon()
|
||||
save_favicon_to_disk(favicon_data)
|
105
ci/praktika/gh.py
Normal file
105
ci/praktika/gh.py
Normal file
@ -0,0 +1,105 @@
|
||||
import json
|
||||
import time
|
||||
|
||||
from praktika._environment import _Environment
|
||||
from praktika.result import Result
|
||||
from praktika.settings import Settings
|
||||
from praktika.utils import Shell
|
||||
|
||||
|
||||
class GH:
|
||||
@classmethod
|
||||
def do_command_with_retries(cls, command):
|
||||
res = False
|
||||
retry_count = 0
|
||||
out, err = "", ""
|
||||
|
||||
while retry_count < Settings.MAX_RETRIES_GH and not res:
|
||||
ret_code, out, err = Shell.get_res_stdout_stderr(command, verbose=True)
|
||||
res = ret_code == 0
|
||||
if not res and "Validation Failed" in err:
|
||||
print("ERROR: GH command validation error")
|
||||
break
|
||||
if not res and "Bad credentials" in err:
|
||||
print("ERROR: GH credentials/auth failure")
|
||||
break
|
||||
if not res:
|
||||
retry_count += 1
|
||||
time.sleep(5)
|
||||
|
||||
if not res:
|
||||
print(
|
||||
f"ERROR: Failed to execute gh command [{command}] out:[{out}] err:[{err}] after [{retry_count}] attempts"
|
||||
)
|
||||
return res
|
||||
|
||||
@classmethod
|
||||
def post_pr_comment(
|
||||
cls, comment_body, or_update_comment_with_substring, repo=None, pr=None
|
||||
):
|
||||
if not repo:
|
||||
repo = _Environment.get().REPOSITORY
|
||||
if not pr:
|
||||
pr = _Environment.get().PR_NUMBER
|
||||
if or_update_comment_with_substring:
|
||||
print(f"check comment [{comment_body}] created")
|
||||
cmd_check_created = f'gh api -H "Accept: application/vnd.github.v3+json" \
|
||||
"/repos/{repo}/issues/{pr}/comments" \
|
||||
--jq \'.[] | {{id: .id, body: .body}}\' | grep -F "{or_update_comment_with_substring}"'
|
||||
output = Shell.get_output(cmd_check_created)
|
||||
if output:
|
||||
comment_ids = []
|
||||
try:
|
||||
comment_ids = [
|
||||
json.loads(item.strip())["id"] for item in output.split("\n")
|
||||
]
|
||||
except Exception as ex:
|
||||
print(f"Failed to retrieve PR comments with [{ex}]")
|
||||
for id in comment_ids:
|
||||
cmd = f'gh api \
|
||||
-X PATCH \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
"/repos/{repo}/issues/comments/{id}" \
|
||||
-f body=\'{comment_body}\''
|
||||
print(f"Update existing comments [{id}]")
|
||||
return cls.do_command_with_retries(cmd)
|
||||
|
||||
cmd = f'gh pr comment {pr} --body "{comment_body}"'
|
||||
return cls.do_command_with_retries(cmd)
|
||||
|
||||
@classmethod
|
||||
def post_commit_status(cls, name, status, description, url):
|
||||
status = cls.convert_to_gh_status(status)
|
||||
command = (
|
||||
f"gh api -X POST -H 'Accept: application/vnd.github.v3+json' "
|
||||
f"/repos/{_Environment.get().REPOSITORY}/statuses/{_Environment.get().SHA} "
|
||||
f"-f state='{status}' -f target_url='{url}' "
|
||||
f"-f description='{description}' -f context='{name}'"
|
||||
)
|
||||
return cls.do_command_with_retries(command)
|
||||
|
||||
@classmethod
|
||||
def convert_to_gh_status(cls, status):
|
||||
if status in (
|
||||
Result.Status.PENDING,
|
||||
Result.Status.SUCCESS,
|
||||
Result.Status.FAILED,
|
||||
Result.Status.ERROR,
|
||||
):
|
||||
return status
|
||||
if status in Result.Status.RUNNING:
|
||||
return Result.Status.PENDING
|
||||
else:
|
||||
assert (
|
||||
False
|
||||
), f"Invalid status [{status}] to be set as GH commit status.state"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# test
|
||||
GH.post_pr_comment(
|
||||
comment_body="foobar",
|
||||
or_update_comment_with_substring="CI",
|
||||
repo="ClickHouse/praktika",
|
||||
pr=15,
|
||||
)
|
71
ci/praktika/gh_auth.py
Normal file
71
ci/praktika/gh_auth.py
Normal file
@ -0,0 +1,71 @@
|
||||
import sys
|
||||
import time
|
||||
from typing import List
|
||||
|
||||
import requests
|
||||
from jwt import JWT, jwk_from_pem
|
||||
from praktika import Workflow
|
||||
from praktika.mangle import _get_workflows
|
||||
from praktika.settings import Settings
|
||||
from praktika.utils import Shell
|
||||
|
||||
|
||||
class GHAuth:
|
||||
@staticmethod
|
||||
def _generate_jwt(client_id, pem):
|
||||
pem = str.encode(pem)
|
||||
signing_key = jwk_from_pem(pem)
|
||||
payload = {
|
||||
"iat": int(time.time()),
|
||||
"exp": int(time.time()) + 600,
|
||||
"iss": client_id,
|
||||
}
|
||||
# Create JWT
|
||||
jwt_instance = JWT()
|
||||
encoded_jwt = jwt_instance.encode(payload, signing_key, alg="RS256")
|
||||
return encoded_jwt
|
||||
|
||||
@staticmethod
|
||||
def _get_installation_id(jwt_token):
|
||||
headers = {
|
||||
"Authorization": f"Bearer {jwt_token}",
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
response = requests.get(
|
||||
"https://api.github.com/app/installations", headers=headers, timeout=10
|
||||
)
|
||||
response.raise_for_status()
|
||||
installations = response.json()
|
||||
assert installations, "No installations found for the GitHub App"
|
||||
return installations[0]["id"]
|
||||
|
||||
@staticmethod
|
||||
def _get_access_token(jwt_token, installation_id):
|
||||
headers = {
|
||||
"Authorization": f"Bearer {jwt_token}",
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
url = (
|
||||
f"https://api.github.com/app/installations/{installation_id}/access_tokens"
|
||||
)
|
||||
response = requests.post(url, headers=headers, timeout=10)
|
||||
response.raise_for_status()
|
||||
return response.json()["token"]
|
||||
|
||||
@classmethod
|
||||
def auth(cls, workflow_name) -> None:
|
||||
wf = _get_workflows(workflow_name) # type: List[Workflow.Config]
|
||||
pem = wf[0].get_secret(Settings.SECRET_GH_APP_PEM_KEY).get_value()
|
||||
assert pem
|
||||
app_id = wf[0].get_secret(Settings.SECRET_GH_APP_ID).get_value()
|
||||
# Generate JWT
|
||||
jwt_token = cls._generate_jwt(app_id, pem)
|
||||
# Get Installation ID
|
||||
installation_id = cls._get_installation_id(jwt_token)
|
||||
# Get Installation Access Token
|
||||
access_token = cls._get_access_token(jwt_token, installation_id)
|
||||
Shell.check(f"echo {access_token} | gh auth login --with-token", strict=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
GHAuth.auth(sys.argv[1])
|
124
ci/praktika/hook_cache.py
Normal file
124
ci/praktika/hook_cache.py
Normal file
@ -0,0 +1,124 @@
|
||||
from praktika._environment import _Environment
|
||||
from praktika.cache import Cache
|
||||
from praktika.mangle import _get_workflows
|
||||
from praktika.runtime import RunConfig
|
||||
from praktika.settings import Settings
|
||||
from praktika.utils import Utils
|
||||
|
||||
|
||||
class CacheRunnerHooks:
|
||||
@classmethod
|
||||
def configure(cls, _workflow):
|
||||
workflow_config = RunConfig.from_fs(_workflow.name)
|
||||
cache = Cache()
|
||||
assert _Environment.get().WORKFLOW_NAME
|
||||
workflow = _get_workflows(name=_Environment.get().WORKFLOW_NAME)[0]
|
||||
print(f"Workflow Configure, workflow [{workflow.name}]")
|
||||
assert (
|
||||
workflow.enable_cache
|
||||
), f"Outdated yaml pipelines or BUG. Configuration must be run only for workflow with enabled cache, workflow [{workflow.name}]"
|
||||
artifact_digest_map = {}
|
||||
job_digest_map = {}
|
||||
for job in workflow.jobs:
|
||||
if not job.digest_config:
|
||||
print(
|
||||
f"NOTE: job [{job.name}] has no Config.digest_config - skip cache check, always run"
|
||||
)
|
||||
digest = cache.digest.calc_job_digest(job_config=job)
|
||||
job_digest_map[job.name] = digest
|
||||
if job.provides:
|
||||
# assign the job digest also to the artifacts it provides
|
||||
for artifact in job.provides:
|
||||
artifact_digest_map[artifact] = digest
|
||||
for job in workflow.jobs:
|
||||
digests_combined_list = []
|
||||
if job.requires:
|
||||
# include digest of required artifact to the job digest, so that they affect job state
|
||||
for artifact_name in job.requires:
|
||||
if artifact_name not in [
|
||||
artifact.name for artifact in workflow.artifacts
|
||||
]:
|
||||
# phony artifact assumed to be not affecting jobs that depend on it
|
||||
continue
|
||||
digests_combined_list.append(artifact_digest_map[artifact_name])
|
||||
digests_combined_list.append(job_digest_map[job.name])
|
||||
final_digest = "-".join(digests_combined_list)
|
||||
workflow_config.digest_jobs[job.name] = final_digest
|
||||
|
||||
assert (
|
||||
workflow_config.digest_jobs
|
||||
), f"BUG, Workflow with enabled cache must have job digests after configuration, wf [{workflow.name}]"
|
||||
|
||||
print("Check remote cache")
|
||||
job_to_cache_record = {}
|
||||
for job_name, job_digest in workflow_config.digest_jobs.items():
|
||||
record = cache.fetch_success(job_name=job_name, job_digest=job_digest)
|
||||
if record:
|
||||
assert (
|
||||
Utils.normalize_string(job_name)
|
||||
not in workflow_config.cache_success
|
||||
)
|
||||
workflow_config.cache_success.append(job_name)
|
||||
workflow_config.cache_success_base64.append(Utils.to_base64(job_name))
|
||||
job_to_cache_record[job_name] = record
|
||||
|
||||
print("Check artifacts to reuse")
|
||||
for job in workflow.jobs:
|
||||
if job.name in workflow_config.cache_success:
|
||||
if job.provides:
|
||||
for artifact_name in job.provides:
|
||||
workflow_config.cache_artifacts[artifact_name] = (
|
||||
job_to_cache_record[job.name]
|
||||
)
|
||||
|
||||
print(f"Write config to GH's job output")
|
||||
with open(_Environment.get().JOB_OUTPUT_STREAM, "a", encoding="utf8") as f:
|
||||
print(
|
||||
f"DATA={workflow_config.to_json()}",
|
||||
file=f,
|
||||
)
|
||||
print(f"WorkflowRuntimeConfig: [{workflow_config.to_json(pretty=True)}]")
|
||||
print(
|
||||
"Dump WorkflowConfig to fs, the next hooks in this job might want to see it"
|
||||
)
|
||||
workflow_config.dump()
|
||||
|
||||
return workflow_config
|
||||
|
||||
@classmethod
|
||||
def pre_run(cls, _workflow, _job, _required_artifacts=None):
|
||||
path_prefixes = []
|
||||
if _job.name == Settings.CI_CONFIG_JOB_NAME:
|
||||
# SPECIAL handling
|
||||
return path_prefixes
|
||||
env = _Environment.get()
|
||||
runtime_config = RunConfig.from_fs(_workflow.name)
|
||||
required_artifacts = []
|
||||
if _required_artifacts:
|
||||
required_artifacts = _required_artifacts
|
||||
for artifact in required_artifacts:
|
||||
if artifact.name in runtime_config.cache_artifacts:
|
||||
record = runtime_config.cache_artifacts[artifact.name]
|
||||
print(f"Reuse artifact [{artifact.name}] from [{record}]")
|
||||
path_prefixes.append(
|
||||
env.get_s3_prefix_static(
|
||||
record.pr_number, record.branch, record.sha
|
||||
)
|
||||
)
|
||||
else:
|
||||
path_prefixes.append(env.get_s3_prefix())
|
||||
return path_prefixes
|
||||
|
||||
@classmethod
|
||||
def run(cls, workflow, job):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def post_run(cls, workflow, job):
|
||||
if job.name == Settings.CI_CONFIG_JOB_NAME:
|
||||
return
|
||||
if job.digest_config:
|
||||
# cache is enabled, and it's a job that supposed to be cached (has defined digest config)
|
||||
workflow_runtime = RunConfig.from_fs(workflow.name)
|
||||
job_digest = workflow_runtime.digest_jobs[job.name]
|
||||
Cache.push_success_record(job.name, job_digest, workflow_runtime.sha)
|
153
ci/praktika/hook_html.py
Normal file
153
ci/praktika/hook_html.py
Normal file
@ -0,0 +1,153 @@
|
||||
import urllib.parse
|
||||
from pathlib import Path
|
||||
|
||||
from praktika._environment import _Environment
|
||||
from praktika.gh import GH
|
||||
from praktika.parser import WorkflowConfigParser
|
||||
from praktika.result import Result, ResultInfo
|
||||
from praktika.runtime import RunConfig
|
||||
from praktika.s3 import S3
|
||||
from praktika.settings import Settings
|
||||
from praktika.utils import Utils
|
||||
|
||||
|
||||
class HtmlRunnerHooks:
|
||||
@classmethod
|
||||
def configure(cls, _workflow):
|
||||
# generate pending Results for all jobs in the workflow
|
||||
if _workflow.enable_cache:
|
||||
skip_jobs = RunConfig.from_fs(_workflow.name).cache_success
|
||||
else:
|
||||
skip_jobs = []
|
||||
|
||||
env = _Environment.get()
|
||||
results = []
|
||||
for job in _workflow.jobs:
|
||||
if job.name not in skip_jobs:
|
||||
result = Result.generate_pending(job.name)
|
||||
else:
|
||||
result = Result.generate_skipped(job.name)
|
||||
results.append(result)
|
||||
summary_result = Result.generate_pending(_workflow.name, results=results)
|
||||
summary_result.aux_links.append(env.CHANGE_URL)
|
||||
summary_result.aux_links.append(env.RUN_URL)
|
||||
summary_result.start_time = Utils.timestamp()
|
||||
page_url = "/".join(
|
||||
["https:/", Settings.HTML_S3_PATH, str(Path(Settings.HTML_PAGE_FILE).name)]
|
||||
)
|
||||
for bucket, endpoint in Settings.S3_BUCKET_TO_HTTP_ENDPOINT.items():
|
||||
page_url = page_url.replace(bucket, endpoint)
|
||||
# TODO: add support for non-PRs (use branch?)
|
||||
page_url += f"?PR={env.PR_NUMBER}&sha=latest&name_0={urllib.parse.quote(env.WORKFLOW_NAME, safe='')}"
|
||||
summary_result.html_link = page_url
|
||||
|
||||
# clean the previous latest results in PR if any
|
||||
if env.PR_NUMBER:
|
||||
S3.clean_latest_result()
|
||||
S3.copy_result_to_s3(
|
||||
summary_result,
|
||||
unlock=False,
|
||||
)
|
||||
|
||||
print(f"CI Status page url [{page_url}]")
|
||||
|
||||
res1 = GH.post_commit_status(
|
||||
name=_workflow.name,
|
||||
status=Result.Status.PENDING,
|
||||
description="",
|
||||
url=page_url,
|
||||
)
|
||||
res2 = GH.post_pr_comment(
|
||||
comment_body=f"Workflow [[{_workflow.name}]({page_url})], commit [{_Environment.get().SHA[:8]}]",
|
||||
or_update_comment_with_substring=f"Workflow [",
|
||||
)
|
||||
if not (res1 or res2):
|
||||
print(
|
||||
"ERROR: Failed to set both GH commit status and PR comment with Workflow Status, cannot proceed"
|
||||
)
|
||||
raise
|
||||
|
||||
@classmethod
|
||||
def pre_run(cls, _workflow, _job):
|
||||
result = Result.from_fs(_job.name)
|
||||
S3.copy_result_from_s3(
|
||||
Result.file_name_static(_workflow.name),
|
||||
)
|
||||
workflow_result = Result.from_fs(_workflow.name)
|
||||
workflow_result.update_sub_result(result)
|
||||
S3.copy_result_to_s3(
|
||||
workflow_result,
|
||||
unlock=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def run(cls, _workflow, _job):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def post_run(cls, _workflow, _job, info_errors):
|
||||
result = Result.from_fs(_job.name)
|
||||
env = _Environment.get()
|
||||
S3.copy_result_from_s3(
|
||||
Result.file_name_static(_workflow.name),
|
||||
lock=True,
|
||||
)
|
||||
workflow_result = Result.from_fs(_workflow.name)
|
||||
print(f"Workflow info [{workflow_result.info}], info_errors [{info_errors}]")
|
||||
|
||||
env_info = env.REPORT_INFO
|
||||
if env_info:
|
||||
print(
|
||||
f"WARNING: some info lines are set in Environment - append to report [{env_info}]"
|
||||
)
|
||||
info_errors += env_info
|
||||
if info_errors:
|
||||
info_errors = [f" | {error}" for error in info_errors]
|
||||
info_str = f"{_job.name}:\n"
|
||||
info_str += "\n".join(info_errors)
|
||||
print("Update workflow results with new info")
|
||||
workflow_result.set_info(info_str)
|
||||
|
||||
old_status = workflow_result.status
|
||||
|
||||
S3.upload_result_files_to_s3(result)
|
||||
workflow_result.update_sub_result(result)
|
||||
|
||||
skipped_job_results = []
|
||||
if not result.is_ok():
|
||||
print(
|
||||
"Current job failed - find dependee jobs in the workflow and set their statuses to skipped"
|
||||
)
|
||||
workflow_config_parsed = WorkflowConfigParser(_workflow).parse()
|
||||
for dependee_job in workflow_config_parsed.workflow_yaml_config.jobs:
|
||||
if _job.name in dependee_job.needs:
|
||||
if _workflow.get_job(dependee_job.name).run_unless_cancelled:
|
||||
continue
|
||||
print(
|
||||
f"NOTE: Set job [{dependee_job.name}] status to [{Result.Status.SKIPPED}] due to current failure"
|
||||
)
|
||||
skipped_job_results.append(
|
||||
Result(
|
||||
name=dependee_job.name,
|
||||
status=Result.Status.SKIPPED,
|
||||
info=ResultInfo.SKIPPED_DUE_TO_PREVIOUS_FAILURE
|
||||
+ f" [{_job.name}]",
|
||||
)
|
||||
)
|
||||
for skipped_job_result in skipped_job_results:
|
||||
workflow_result.update_sub_result(skipped_job_result)
|
||||
|
||||
S3.copy_result_to_s3(
|
||||
workflow_result,
|
||||
unlock=True,
|
||||
)
|
||||
if workflow_result.status != old_status:
|
||||
print(
|
||||
f"Update GH commit status [{result.name}]: [{old_status} -> {workflow_result.status}], link [{workflow_result.html_link}]"
|
||||
)
|
||||
GH.post_commit_status(
|
||||
name=workflow_result.name,
|
||||
status=GH.convert_to_gh_status(workflow_result.status),
|
||||
description="",
|
||||
url=workflow_result.html_link,
|
||||
)
|
43
ci/praktika/hook_interface.py
Normal file
43
ci/praktika/hook_interface.py
Normal file
@ -0,0 +1,43 @@
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
from praktika import Workflow
|
||||
|
||||
|
||||
class HookInterface(ABC):
|
||||
@abstractmethod
|
||||
def pre_run(self, _workflow, _job):
|
||||
"""
|
||||
runs in pre-run step
|
||||
:param _workflow:
|
||||
:param _job:
|
||||
:return:
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def run(self, _workflow, _job):
|
||||
"""
|
||||
runs in run step
|
||||
:param _workflow:
|
||||
:param _job:
|
||||
:return:
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def post_run(self, _workflow, _job):
|
||||
"""
|
||||
runs in post-run step
|
||||
:param _workflow:
|
||||
:param _job:
|
||||
:return:
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def configure(self, _workflow: Workflow.Config):
|
||||
"""
|
||||
runs in initial WorkflowConfig job in run step
|
||||
:return:
|
||||
"""
|
||||
pass
|
10
ci/praktika/html_prepare.py
Normal file
10
ci/praktika/html_prepare.py
Normal file
@ -0,0 +1,10 @@
|
||||
from praktika.s3 import S3
|
||||
from praktika.settings import Settings
|
||||
|
||||
|
||||
class Html:
|
||||
@classmethod
|
||||
def prepare(cls):
|
||||
S3.copy_file_to_s3(
|
||||
s3_path=Settings.HTML_S3_PATH, local_path=Settings.HTML_PAGE_FILE
|
||||
)
|
102
ci/praktika/job.py
Normal file
102
ci/praktika/job.py
Normal file
@ -0,0 +1,102 @@
|
||||
import copy
|
||||
import json
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, List, Optional
|
||||
|
||||
|
||||
class Job:
|
||||
@dataclass
|
||||
class Requirements:
|
||||
python: bool = False
|
||||
python_requirements_txt: str = ""
|
||||
|
||||
@dataclass
|
||||
class CacheDigestConfig:
|
||||
include_paths: List[str] = field(default_factory=list)
|
||||
exclude_paths: List[str] = field(default_factory=list)
|
||||
|
||||
@dataclass
|
||||
class Config:
|
||||
# Job Name
|
||||
name: str
|
||||
|
||||
# Machine's label to run job on. For instance [ubuntu-latest] for free gh runner
|
||||
runs_on: List[str]
|
||||
|
||||
# Job Run Command
|
||||
command: str
|
||||
|
||||
# What job requires
|
||||
# May be phony or physical names
|
||||
requires: List[str] = field(default_factory=list)
|
||||
|
||||
# What job provides
|
||||
# May be phony or physical names
|
||||
provides: List[str] = field(default_factory=list)
|
||||
|
||||
job_requirements: Optional["Job.Requirements"] = None
|
||||
|
||||
timeout: int = 1 * 3600
|
||||
|
||||
digest_config: Optional["Job.CacheDigestConfig"] = None
|
||||
|
||||
run_in_docker: str = ""
|
||||
|
||||
run_unless_cancelled: bool = False
|
||||
|
||||
allow_merge_on_failure: bool = False
|
||||
|
||||
parameter: Any = None
|
||||
|
||||
def parametrize(
|
||||
self,
|
||||
parameter: Optional[List[Any]] = None,
|
||||
runs_on: Optional[List[List[str]]] = None,
|
||||
timeout: Optional[List[int]] = None,
|
||||
):
|
||||
assert (
|
||||
parameter or runs_on
|
||||
), "Either :parameter or :runs_on must be non empty list for parametrisation"
|
||||
if not parameter:
|
||||
parameter = [None] * len(runs_on)
|
||||
if not runs_on:
|
||||
runs_on = [None] * len(parameter)
|
||||
if not timeout:
|
||||
timeout = [None] * len(parameter)
|
||||
assert (
|
||||
len(parameter) == len(runs_on) == len(timeout)
|
||||
), "Parametrization lists must be of the same size"
|
||||
|
||||
res = []
|
||||
for parameter_, runs_on_, timeout_ in zip(parameter, runs_on, timeout):
|
||||
obj = copy.deepcopy(self)
|
||||
if parameter_:
|
||||
obj.parameter = parameter_
|
||||
if runs_on_:
|
||||
obj.runs_on = runs_on_
|
||||
if timeout_:
|
||||
obj.timeout = timeout_
|
||||
obj.name = obj.get_job_name_with_parameter()
|
||||
res.append(obj)
|
||||
return res
|
||||
|
||||
def get_job_name_with_parameter(self):
|
||||
name, parameter, runs_on = self.name, self.parameter, self.runs_on
|
||||
res = name
|
||||
name_params = []
|
||||
if isinstance(parameter, list) or isinstance(parameter, dict):
|
||||
name_params.append(json.dumps(parameter))
|
||||
elif parameter is not None:
|
||||
name_params.append(parameter)
|
||||
if runs_on:
|
||||
assert isinstance(runs_on, list)
|
||||
name_params.append(json.dumps(runs_on))
|
||||
if name_params:
|
||||
name_params = [str(param) for param in name_params]
|
||||
res += f" ({', '.join(name_params)})"
|
||||
|
||||
self.name = res
|
||||
return res
|
||||
|
||||
def __repr__(self):
|
||||
return self.name
|
727
ci/praktika/json.html
Normal file
727
ci/praktika/json.html
Normal file
@ -0,0 +1,727 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>praktika report</title>
|
||||
<link rel="icon" href="https://w4z3pajszlbkfcw2wcylfei5km0xmwag.lambda-url.us-east-1.on.aws/" type="image/x-icon">
|
||||
<style>
|
||||
|
||||
/* Default (Day Theme) */
|
||||
:root {
|
||||
--background-color: white;
|
||||
--text-color: #000;
|
||||
--tile-background: #f9f9f9;
|
||||
--footer-background: #f1f1f1;
|
||||
--footer-text-color: #000;
|
||||
--status-width: 300px;
|
||||
}
|
||||
|
||||
body {
|
||||
background-color: var(--background-color);
|
||||
color: var(--text-color);
|
||||
height: 100%;
|
||||
margin: 0;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
font-family: monospace, sans-serif;
|
||||
}
|
||||
|
||||
body.night-theme {
|
||||
--background-color: #1F1F1C;
|
||||
--text-color: #fff;
|
||||
--tile-background: black;
|
||||
}
|
||||
|
||||
#info-container {
|
||||
margin-left: calc(var(--status-width) + 20px);
|
||||
margin-bottom: 10px;
|
||||
background-color: var(--tile-background);
|
||||
padding: 10px;
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
#status-container {
|
||||
position: fixed;
|
||||
top: 0;
|
||||
bottom: 0;
|
||||
left: 0;
|
||||
width: var(--status-width);
|
||||
background-color: var(--tile-background);
|
||||
padding: 20px;
|
||||
box-sizing: border-box;
|
||||
text-align: left;
|
||||
font-size: 18px;
|
||||
font-weight: bold;
|
||||
margin: 0; /* Remove margin */
|
||||
}
|
||||
|
||||
#status-container button {
|
||||
display: block; /* Stack buttons vertically */
|
||||
width: 100%; /* Full width of container */
|
||||
padding: 10px;
|
||||
margin-bottom: 10px; /* Space between buttons */
|
||||
background-color: #4CAF50; /* Green background color */
|
||||
color: white;
|
||||
border: none;
|
||||
border-radius: 5px;
|
||||
font-size: 16px;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
#status-container button:hover {
|
||||
background-color: #45a049; /* Darker green on hover */
|
||||
}
|
||||
|
||||
#result-container {
|
||||
background-color: var(--tile-background);
|
||||
margin-left: calc(var(--status-width) + 20px);
|
||||
padding: 20px;
|
||||
box-sizing: border-box;
|
||||
text-align: center;
|
||||
font-size: 18px;
|
||||
font-weight: normal;
|
||||
flex-grow: 1;
|
||||
}
|
||||
|
||||
#footer {
|
||||
padding: 10px;
|
||||
position: fixed;
|
||||
bottom: 0;
|
||||
left: 0;
|
||||
right: 0;
|
||||
background-color: #1F1F1C;
|
||||
color: white;
|
||||
font-size: 14px;
|
||||
display: flex;
|
||||
justify-content: space-between; /* Ensure the .left expands, and .right and .settings are aligned to the right */
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
#footer a {
|
||||
color: white;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
#footer .left {
|
||||
flex-grow: 1; /* Takes up all the available space */
|
||||
}
|
||||
|
||||
/* make some space around '/' in the navigation line */
|
||||
#footer .left span.separator {
|
||||
margin-left: 5px;
|
||||
margin-right: 5px;
|
||||
}
|
||||
|
||||
#footer .right, #footer .settings {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
#footer .right a::before {
|
||||
content: "#";
|
||||
margin-left: 10px;
|
||||
color: #e0e0e0;
|
||||
}
|
||||
|
||||
#footer .right::before, #footer .settings::before {
|
||||
content: "|"; /* Add separator before right and settings sections */
|
||||
margin-left: 10px;
|
||||
margin-right: 10px;
|
||||
color: #e0e0e0;
|
||||
}
|
||||
|
||||
#theme-toggle {
|
||||
cursor: pointer;
|
||||
font-size: 20px;
|
||||
color: white;
|
||||
}
|
||||
|
||||
#theme-toggle:hover {
|
||||
color: #e0e0e0;
|
||||
}
|
||||
|
||||
#footer a:hover {
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
#links {
|
||||
margin-top: 10px;
|
||||
padding: 15px;
|
||||
border: 1px solid #ccc;
|
||||
border-radius: 5px;
|
||||
background-color: #f9f9f9;
|
||||
}
|
||||
|
||||
#links a {
|
||||
display: block;
|
||||
margin-bottom: 5px;
|
||||
padding: 5px 10px;
|
||||
background-color: #D5D5D5;
|
||||
color: black;
|
||||
text-decoration: none;
|
||||
border-radius: 5px;
|
||||
}
|
||||
|
||||
#links a:hover {
|
||||
background-color: #D5D5D5;
|
||||
}
|
||||
|
||||
table {
|
||||
width: 100%;
|
||||
border-collapse: collapse;
|
||||
}
|
||||
|
||||
th.name-column, td.name-column {
|
||||
max-width: 400px; /* Set the maximum width for the column */
|
||||
white-space: nowrap; /* Prevent text from wrapping */
|
||||
overflow: hidden; /* Hide the overflowed text */
|
||||
text-overflow: ellipsis; /* Show ellipsis (...) for overflowed text */
|
||||
}
|
||||
|
||||
th.status-column, td.status-column {
|
||||
max-width: 100px; /* Set the maximum width for the column */
|
||||
white-space: nowrap; /* Prevent text from wrapping */
|
||||
overflow: hidden; /* Hide the overflowed text */
|
||||
text-overflow: ellipsis; /* Show ellipsis (...) for overflowed text */
|
||||
}
|
||||
|
||||
th.time-column, td.time-column {
|
||||
max-width: 120px; /* Set the maximum width for the column */
|
||||
white-space: nowrap; /* Prevent text from wrapping */
|
||||
text-align: right;
|
||||
}
|
||||
|
||||
th.info-column, td.info-column {
|
||||
width: 100%; /* Allow the column to take all the remaining space */
|
||||
}
|
||||
|
||||
th, td {
|
||||
padding: 8px;
|
||||
border: 1px solid #ddd;
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
th {
|
||||
background-color: #f4f4f4;
|
||||
}
|
||||
|
||||
.status-success {
|
||||
color: green;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.status-fail {
|
||||
color: red;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.status-pending {
|
||||
color: #d4a017;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.status-broken {
|
||||
color: purple;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.status-run {
|
||||
color: blue;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.status-error {
|
||||
color: darkred;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.status-other {
|
||||
color: grey;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.json-key {
|
||||
font-weight: bold;
|
||||
margin-top: 10px;
|
||||
}
|
||||
|
||||
.json-value {
|
||||
margin-left: 20px;
|
||||
}
|
||||
|
||||
.json-value a {
|
||||
color: #007bff;
|
||||
}
|
||||
|
||||
.json-value a:hover {
|
||||
text-decoration: underline;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div id="info-container"></div>
|
||||
<div id="status-container"></div>
|
||||
<div id="result-container"></div>
|
||||
|
||||
<footer id="footer">
|
||||
<div class="left"></div>
|
||||
<div class="right"></div>
|
||||
<div class="settings">
|
||||
<span id="theme-toggle">☀️</span>
|
||||
</div>
|
||||
</footer>
|
||||
|
||||
<script>
|
||||
function toggleTheme() {
|
||||
document.body.classList.toggle('night-theme');
|
||||
const toggleIcon = document.getElementById('theme-toggle');
|
||||
if (document.body.classList.contains('night-theme')) {
|
||||
toggleIcon.textContent = '☾'; // Moon for night mode
|
||||
} else {
|
||||
toggleIcon.textContent = '☀️'; // Sun for day mode
|
||||
}
|
||||
}
|
||||
|
||||
// Attach the toggle function to the click event of the icon
|
||||
document.getElementById('theme-toggle').addEventListener('click', toggleTheme);
|
||||
|
||||
// Function to format timestamp to "DD-mmm-YYYY HH:MM:SS.MM"
|
||||
function formatTimestamp(timestamp, showDate = true) {
|
||||
const date = new Date(timestamp * 1000);
|
||||
const day = String(date.getDate()).padStart(2, '0');
|
||||
const monthNames = ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
|
||||
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"];
|
||||
const month = monthNames[date.getMonth()];
|
||||
const year = date.getFullYear();
|
||||
const hours = String(date.getHours()).padStart(2, '0');
|
||||
const minutes = String(date.getMinutes()).padStart(2, '0');
|
||||
const seconds = String(date.getSeconds()).padStart(2, '0');
|
||||
//const milliseconds = String(date.getMilliseconds()).padStart(2, '0');
|
||||
|
||||
return showDate
|
||||
? `${day}-${month}-${year} ${hours}:${minutes}:${seconds}`
|
||||
: `${hours}:${minutes}:${seconds}`;
|
||||
}
|
||||
|
||||
// Function to determine status class based on value
|
||||
function getStatusClass(status) {
|
||||
const lowerStatus = status.toLowerCase();
|
||||
if (lowerStatus.includes('success') || lowerStatus === 'ok') return 'status-success';
|
||||
if (lowerStatus.includes('fail')) return 'status-fail';
|
||||
if (lowerStatus.includes('pending')) return 'status-pending';
|
||||
if (lowerStatus.includes('broken')) return 'status-broken';
|
||||
if (lowerStatus.includes('run')) return 'status-run';
|
||||
if (lowerStatus.includes('error')) return 'status-error';
|
||||
return 'status-other';
|
||||
}
|
||||
|
||||
// Function to format duration from seconds to "HH:MM:SS"
|
||||
function formatDuration(durationInSeconds) {
|
||||
// Check if the duration is empty, null, or not a number
|
||||
if (!durationInSeconds || isNaN(durationInSeconds)) {
|
||||
return '';
|
||||
}
|
||||
|
||||
// Ensure duration is a floating-point number
|
||||
const duration = parseFloat(durationInSeconds);
|
||||
|
||||
// Calculate seconds and milliseconds
|
||||
const seconds = Math.floor(duration); // Whole seconds
|
||||
const milliseconds = Math.floor((duration % 1) * 1000); // Convert fraction to milliseconds
|
||||
|
||||
// Format seconds and milliseconds with leading zeros where needed
|
||||
const formattedSeconds = String(seconds);
|
||||
const formattedMilliseconds = String(milliseconds).padStart(3, '0');
|
||||
|
||||
// Return the formatted duration as seconds.milliseconds
|
||||
return `${formattedSeconds}.${formattedMilliseconds}`;
|
||||
}
|
||||
|
||||
function addKeyValueToStatus(key, value) {
|
||||
|
||||
const statusContainer = document.getElementById('status-container');
|
||||
|
||||
const keyElement = document.createElement('div');
|
||||
keyElement.className = 'json-key';
|
||||
keyElement.textContent = key + ':';
|
||||
|
||||
const valueElement = document.createElement('div');
|
||||
valueElement.className = 'json-value';
|
||||
valueElement.textContent = value;
|
||||
|
||||
statusContainer.appendChild(keyElement);
|
||||
statusContainer.appendChild(valueElement);
|
||||
}
|
||||
|
||||
function addFileButtonToStatus(key, links) {
|
||||
|
||||
if (links == null) {
|
||||
return
|
||||
}
|
||||
|
||||
const statusContainer = document.getElementById('status-container');
|
||||
|
||||
const keyElement = document.createElement('div');
|
||||
keyElement.className = 'json-key';
|
||||
keyElement.textContent = key + ':';
|
||||
statusContainer.appendChild(keyElement);
|
||||
|
||||
if (Array.isArray(links) && links.length > 0) {
|
||||
links.forEach(link => {
|
||||
// const a = document.createElement('a');
|
||||
// a.href = link;
|
||||
// a.textContent = link.split('/').pop();
|
||||
// a.target = '_blank';
|
||||
// statusContainer.appendChild(a);
|
||||
const button = document.createElement('button');
|
||||
button.textContent = link.split('/').pop();
|
||||
button.addEventListener('click', function () {
|
||||
window.location.href = link;
|
||||
});
|
||||
statusContainer.appendChild(button);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function addStatusToStatus(status, start_time, duration) {
|
||||
const statusContainer = document.getElementById('status-container');
|
||||
|
||||
let keyElement = document.createElement('div');
|
||||
let valueElement = document.createElement('div');
|
||||
keyElement.className = 'json-key';
|
||||
valueElement.className = 'json-value';
|
||||
keyElement.textContent = 'status:';
|
||||
valueElement.classList.add('status-value');
|
||||
valueElement.classList.add(getStatusClass(status));
|
||||
valueElement.textContent = status;
|
||||
statusContainer.appendChild(keyElement);
|
||||
statusContainer.appendChild(valueElement);
|
||||
|
||||
keyElement = document.createElement('div');
|
||||
valueElement = document.createElement('div');
|
||||
keyElement.className = 'json-key';
|
||||
valueElement.className = 'json-value';
|
||||
keyElement.textContent = 'start_time:';
|
||||
valueElement.textContent = formatTimestamp(start_time);
|
||||
statusContainer.appendChild(keyElement);
|
||||
statusContainer.appendChild(valueElement);
|
||||
|
||||
keyElement = document.createElement('div');
|
||||
valueElement = document.createElement('div');
|
||||
keyElement.className = 'json-key';
|
||||
valueElement.className = 'json-value';
|
||||
keyElement.textContent = 'duration:';
|
||||
if (duration === null) {
|
||||
// Set initial value to 0 and add a unique ID or data attribute to identify the duration element
|
||||
valueElement.textContent = '00:00:00';
|
||||
valueElement.setAttribute('id', 'duration-value');
|
||||
} else {
|
||||
// Format the duration if it's a valid number
|
||||
valueElement.textContent = formatDuration(duration);
|
||||
}
|
||||
statusContainer.appendChild(keyElement);
|
||||
statusContainer.appendChild(valueElement);
|
||||
}
|
||||
|
||||
function navigatePath(jsonObj, nameArray) {
|
||||
let baseParams = new URLSearchParams(window.location.search);
|
||||
let keysToDelete = [];
|
||||
baseParams.forEach((value, key) => {
|
||||
if (key.startsWith('name_')) {
|
||||
keysToDelete.push(key); // Collect the keys to delete
|
||||
}
|
||||
});
|
||||
keysToDelete.forEach((key) => baseParams.delete(key));
|
||||
let pathNames = [];
|
||||
let pathLinks = [];
|
||||
let currentObj = jsonObj;
|
||||
|
||||
// Add the first entry (root level)
|
||||
baseParams.set(`name_0`, currentObj.name);
|
||||
pathNames.push(currentObj.name);
|
||||
pathLinks.push(`<span class="separator">/</span><a href="${window.location.pathname}?${baseParams.toString()}">${currentObj.name}</a>`);
|
||||
// Iterate through the nameArray starting at index 0
|
||||
for (const [index, name] of nameArray.entries()) {
|
||||
if (index === 0) continue;
|
||||
if (currentObj && Array.isArray(currentObj.results)) {
|
||||
const nextResult = currentObj.results.find(result => result.name === name);
|
||||
if (nextResult) {
|
||||
baseParams.set(`name_${index}`, nextResult.name);
|
||||
pathNames.push(nextResult.name); // Correctly push nextResult name, not currentObj.name
|
||||
pathLinks.push(`<span class="separator">/</span><a href="${window.location.pathname}?${baseParams.toString()}">${nextResult.name}</a>`);
|
||||
currentObj = nextResult; // Move to the next object in the hierarchy
|
||||
} else {
|
||||
console.error(`Name "${name}" not found in results array.`);
|
||||
return null; // Name not found in results array
|
||||
}
|
||||
} else {
|
||||
console.error(`Current object is not structured as expected.`);
|
||||
return null; // Current object is not structured as expected
|
||||
}
|
||||
}
|
||||
const footerLeft = document.querySelector('#footer .left');
|
||||
footerLeft.innerHTML = pathLinks.join('');
|
||||
|
||||
return currentObj;
|
||||
}
|
||||
|
||||
// Define the fixed columns globally, so both functions can use it
|
||||
const columns = ['name', 'status', 'start_time', 'duration', 'info'];
|
||||
|
||||
const columnSymbols = {
|
||||
name: '👤',
|
||||
status: '✔️',
|
||||
start_time: '🕒',
|
||||
duration: '⏳',
|
||||
info: '⚠️'
|
||||
};
|
||||
|
||||
function createResultsTable(results, nest_level) {
|
||||
if (results && Array.isArray(results) && results.length > 0) {
|
||||
const table = document.createElement('table');
|
||||
const thead = document.createElement('thead');
|
||||
const tbody = document.createElement('tbody');
|
||||
|
||||
// Get the current URL parameters
|
||||
const currentUrl = new URL(window.location.href);
|
||||
|
||||
// Create table headers based on the fixed columns
|
||||
const headerRow = document.createElement('tr');
|
||||
columns.forEach(column => {
|
||||
const th = document.createElement('th');
|
||||
th.textContent = th.textContent = columnSymbols[column] || column;
|
||||
th.style.cursor = 'pointer'; // Make headers clickable
|
||||
th.addEventListener('click', () => sortTable(results, column, tbody, nest_level)); // Add click event to sort the table
|
||||
headerRow.appendChild(th);
|
||||
});
|
||||
thead.appendChild(headerRow);
|
||||
|
||||
// Create table rows
|
||||
populateTableRows(tbody, results, columns, nest_level);
|
||||
|
||||
table.appendChild(thead);
|
||||
table.appendChild(tbody);
|
||||
|
||||
return table;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function populateTableRows(tbody, results, columns, nest_level) {
|
||||
const currentUrl = new URL(window.location.href); // Get the current URL
|
||||
|
||||
// Clear existing rows if re-rendering (used in sorting)
|
||||
tbody.innerHTML = '';
|
||||
|
||||
results.forEach((result, index) => {
|
||||
const row = document.createElement('tr');
|
||||
|
||||
columns.forEach(column => {
|
||||
const td = document.createElement('td');
|
||||
const value = result[column];
|
||||
|
||||
if (column === 'name') {
|
||||
// Create a link for the name field, using name_X
|
||||
const link = document.createElement('a');
|
||||
const newUrl = new URL(currentUrl); // Create a fresh copy of the URL for each row
|
||||
newUrl.searchParams.set(`name_${nest_level}`, value); // Use backticks for string interpolation
|
||||
link.href = newUrl.toString();
|
||||
link.textContent = value;
|
||||
td.classList.add('name-column');
|
||||
td.appendChild(link);
|
||||
} else if (column === 'status') {
|
||||
// Apply status formatting
|
||||
const span = document.createElement('span');
|
||||
span.className = getStatusClass(value);
|
||||
span.textContent = value;
|
||||
td.classList.add('status-column');
|
||||
td.appendChild(span);
|
||||
} else if (column === 'start_time') {
|
||||
td.classList.add('time-column');
|
||||
td.textContent = value ? formatTimestamp(value, false) : '';
|
||||
} else if (column === 'duration') {
|
||||
td.classList.add('time-column');
|
||||
td.textContent = value ? formatDuration(value) : '';
|
||||
} else if (column === 'info') {
|
||||
// For info and other columns, just display the value
|
||||
td.textContent = value || '';
|
||||
td.classList.add('info-column');
|
||||
}
|
||||
|
||||
row.appendChild(td);
|
||||
});
|
||||
|
||||
tbody.appendChild(row);
|
||||
});
|
||||
}
|
||||
|
||||
function sortTable(results, key, tbody, nest_level) {
|
||||
// Find the table header element for the given key
|
||||
let th = null;
|
||||
const tableHeaders = document.querySelectorAll('th'); // Select all table headers
|
||||
tableHeaders.forEach(header => {
|
||||
if (header.textContent.trim().toLowerCase() === key.toLowerCase()) {
|
||||
th = header;
|
||||
}
|
||||
});
|
||||
|
||||
if (!th) {
|
||||
console.error(`No table header found for key: ${key}`);
|
||||
return;
|
||||
}
|
||||
|
||||
// Determine the current sort direction
|
||||
let ascending = th.getAttribute('data-sort-direction') === 'asc' ? false : true;
|
||||
|
||||
// Toggle the sort direction for the next click
|
||||
th.setAttribute('data-sort-direction', ascending ? 'asc' : 'desc');
|
||||
|
||||
// Sort the results array by the given key
|
||||
results.sort((a, b) => {
|
||||
if (a[key] < b[key]) return ascending ? -1 : 1;
|
||||
if (a[key] > b[key]) return ascending ? 1 : -1;
|
||||
return 0;
|
||||
});
|
||||
|
||||
// Re-populate the table with sorted data
|
||||
populateTableRows(tbody, results, columns, nest_level);
|
||||
}
|
||||
|
||||
function loadJSON(PR, sha, nameParams) {
|
||||
const infoElement = document.getElementById('info-container');
|
||||
let lastModifiedTime = null;
|
||||
const task = nameParams[0].toLowerCase();
|
||||
|
||||
// Construct the URL dynamically based on PR, sha, and name_X
|
||||
const baseUrl = window.location.origin + window.location.pathname.replace('/json.html', '');
|
||||
const path = `${baseUrl}/${encodeURIComponent(PR)}/${encodeURIComponent(sha)}/result_${task}.json`;
|
||||
|
||||
fetch(path, {cache: "no-cache"})
|
||||
.then(response => {
|
||||
if (!response.ok) {
|
||||
throw new Error(`HTTP error! status: ${response.status}`);
|
||||
}
|
||||
lastModifiedTime = response.headers.get('Last-Modified');
|
||||
return response.json();
|
||||
})
|
||||
.then(data => {
|
||||
const linksDiv = document.getElementById('links');
|
||||
const resultsDiv = document.getElementById('result-container');
|
||||
const footerRight = document.querySelector('#footer .right');
|
||||
|
||||
let targetData = navigatePath(data, nameParams);
|
||||
let nest_level = nameParams.length;
|
||||
|
||||
if (targetData) {
|
||||
infoElement.style.display = 'none';
|
||||
|
||||
// Handle footer links if present
|
||||
if (Array.isArray(data.aux_links) && data.aux_links.length > 0) {
|
||||
data.aux_links.forEach(link => {
|
||||
const a = document.createElement('a');
|
||||
a.href = link;
|
||||
a.textContent = link.split('/').pop();
|
||||
a.target = '_blank';
|
||||
footerRight.appendChild(a);
|
||||
});
|
||||
}
|
||||
addStatusToStatus(targetData.status, targetData.start_time, targetData.duration)
|
||||
|
||||
// Handle links
|
||||
addFileButtonToStatus('files', targetData.links)
|
||||
|
||||
|
||||
// Handle duration update if duration is null and start_time exists
|
||||
if (targetData.duration === null && targetData.start_time) {
|
||||
let duration = Math.floor(Date.now() / 1000 - targetData.start_time);
|
||||
const durationElement = document.getElementById('duration-value');
|
||||
|
||||
const intervalId = setInterval(() => {
|
||||
duration++;
|
||||
durationElement.textContent = formatDuration(duration);
|
||||
}, 1000);
|
||||
}
|
||||
|
||||
// If 'results' exists and is non-empty, create the table
|
||||
const resultsData = targetData.results;
|
||||
if (Array.isArray(resultsData) && resultsData.length > 0) {
|
||||
const table = createResultsTable(resultsData, nest_level);
|
||||
if (table) {
|
||||
resultsDiv.appendChild(table);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
infoElement.textContent = 'Object Not Found';
|
||||
infoElement.style.display = 'block';
|
||||
}
|
||||
|
||||
// Set up auto-reload if Last-Modified header is present
|
||||
if (lastModifiedTime) {
|
||||
setInterval(() => {
|
||||
checkForUpdate(path, lastModifiedTime);
|
||||
}, 30000); // 30000 milliseconds = 30 seconds
|
||||
}
|
||||
})
|
||||
.catch(error => {
|
||||
console.error('Error loading JSON:', error);
|
||||
infoElement.textContent = 'Error loading data';
|
||||
infoElement.style.display = 'block';
|
||||
});
|
||||
}
|
||||
|
||||
// Function to check if the JSON file is updated
|
||||
function checkForUpdate(path, lastModifiedTime) {
|
||||
fetch(path, {method: 'HEAD'})
|
||||
.then(response => {
|
||||
if (!response.ok) {
|
||||
throw new Error(`HTTP error! status: ${response.status}`);
|
||||
}
|
||||
const newLastModifiedTime = response.headers.get('Last-Modified');
|
||||
if (newLastModifiedTime && new Date(newLastModifiedTime) > new Date(lastModifiedTime)) {
|
||||
// If the JSON file has been updated, reload the page
|
||||
window.location.reload();
|
||||
}
|
||||
})
|
||||
.catch(error => {
|
||||
console.error('Error checking for update:', error);
|
||||
});
|
||||
}
|
||||
|
||||
// Initialize the page and load JSON from URL parameter
|
||||
function init() {
|
||||
const urlParams = new URLSearchParams(window.location.search);
|
||||
const PR = urlParams.get('PR');
|
||||
const sha = urlParams.get('sha');
|
||||
const root_name = urlParams.get('name_0');
|
||||
const nameParams = [];
|
||||
|
||||
urlParams.forEach((value, key) => {
|
||||
if (key.startsWith('name_')) {
|
||||
const index = parseInt(key.split('_')[1], 10);
|
||||
nameParams[index] = value;
|
||||
}
|
||||
});
|
||||
|
||||
if (PR) {
|
||||
addKeyValueToStatus("PR", PR)
|
||||
} else {
|
||||
console.error("TODO")
|
||||
}
|
||||
addKeyValueToStatus("sha", sha);
|
||||
if (nameParams[1]) {
|
||||
addKeyValueToStatus("job", nameParams[1]);
|
||||
}
|
||||
addKeyValueToStatus("workflow", nameParams[0]);
|
||||
|
||||
if (PR && sha && root_name) {
|
||||
loadJSON(PR, sha, nameParams);
|
||||
} else {
|
||||
document.getElementById('title').textContent = 'Error: Missing required URL parameters: PR, sha, or name_0';
|
||||
}
|
||||
}
|
||||
|
||||
window.onload = init;
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
137
ci/praktika/mangle.py
Normal file
137
ci/praktika/mangle.py
Normal file
@ -0,0 +1,137 @@
|
||||
import copy
|
||||
import importlib.util
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict
|
||||
|
||||
from praktika import Job
|
||||
from praktika._settings import _USER_DEFINED_SETTINGS, _Settings
|
||||
from praktika.utils import ContextManager, Utils
|
||||
|
||||
|
||||
def _get_workflows(name=None, file=None):
|
||||
"""
|
||||
Gets user's workflow configs
|
||||
"""
|
||||
res = []
|
||||
|
||||
with ContextManager.cd():
|
||||
directory = Path(_Settings.WORKFLOWS_DIRECTORY)
|
||||
for py_file in directory.glob("*.py"):
|
||||
if file and file not in str(py_file):
|
||||
continue
|
||||
module_name = py_file.name.removeprefix(".py")
|
||||
spec = importlib.util.spec_from_file_location(
|
||||
module_name, f"{_Settings.WORKFLOWS_DIRECTORY}/{module_name}"
|
||||
)
|
||||
assert spec
|
||||
foo = importlib.util.module_from_spec(spec)
|
||||
assert spec.loader
|
||||
spec.loader.exec_module(foo)
|
||||
try:
|
||||
for workflow in foo.WORKFLOWS:
|
||||
if name:
|
||||
if name == workflow.name:
|
||||
print(f"Read workflow [{name}] config from [{module_name}]")
|
||||
res = [workflow]
|
||||
break
|
||||
else:
|
||||
continue
|
||||
else:
|
||||
res += foo.WORKFLOWS
|
||||
print(f"Read workflow configs from [{module_name}]")
|
||||
except Exception as e:
|
||||
print(
|
||||
f"WARNING: Failed to add WORKFLOWS config from [{module_name}], exception [{e}]"
|
||||
)
|
||||
if not res:
|
||||
Utils.raise_with_error(f"Failed to find workflow [{name or file}]")
|
||||
|
||||
for workflow in res:
|
||||
# add native jobs
|
||||
_update_workflow_with_native_jobs(workflow)
|
||||
# fill in artifact properties, e.g. _provided_by
|
||||
_update_workflow_artifacts(workflow)
|
||||
return res
|
||||
|
||||
|
||||
def _update_workflow_artifacts(workflow):
|
||||
artifact_job = {}
|
||||
for job in workflow.jobs:
|
||||
for artifact_name in job.provides:
|
||||
assert artifact_name not in artifact_job
|
||||
artifact_job[artifact_name] = job.name
|
||||
for artifact in workflow.artifacts:
|
||||
artifact._provided_by = artifact_job[artifact.name]
|
||||
|
||||
|
||||
def _update_workflow_with_native_jobs(workflow):
|
||||
if workflow.dockers:
|
||||
from praktika.native_jobs import _docker_build_job
|
||||
|
||||
print(f"Enable native job [{_docker_build_job.name}] for [{workflow.name}]")
|
||||
aux_job = copy.deepcopy(_docker_build_job)
|
||||
if workflow.enable_cache:
|
||||
print(
|
||||
f"Add automatic digest config for [{aux_job.name}] job since cache is enabled"
|
||||
)
|
||||
docker_digest_config = Job.CacheDigestConfig()
|
||||
for docker_config in workflow.dockers:
|
||||
docker_digest_config.include_paths.append(docker_config.path)
|
||||
aux_job.digest_config = docker_digest_config
|
||||
|
||||
workflow.jobs.insert(0, aux_job)
|
||||
for job in workflow.jobs[1:]:
|
||||
if not job.requires:
|
||||
job.requires = []
|
||||
job.requires.append(aux_job.name)
|
||||
|
||||
if (
|
||||
workflow.enable_cache
|
||||
or workflow.enable_report
|
||||
or workflow.enable_merge_ready_status
|
||||
):
|
||||
from praktika.native_jobs import _workflow_config_job
|
||||
|
||||
print(f"Enable native job [{_workflow_config_job.name}] for [{workflow.name}]")
|
||||
aux_job = copy.deepcopy(_workflow_config_job)
|
||||
workflow.jobs.insert(0, aux_job)
|
||||
for job in workflow.jobs[1:]:
|
||||
if not job.requires:
|
||||
job.requires = []
|
||||
job.requires.append(aux_job.name)
|
||||
|
||||
if workflow.enable_merge_ready_status:
|
||||
from praktika.native_jobs import _final_job
|
||||
|
||||
print(f"Enable native job [{_final_job.name}] for [{workflow.name}]")
|
||||
aux_job = copy.deepcopy(_final_job)
|
||||
for job in workflow.jobs:
|
||||
aux_job.requires.append(job.name)
|
||||
workflow.jobs.append(aux_job)
|
||||
|
||||
|
||||
def _get_user_settings() -> Dict[str, Any]:
|
||||
"""
|
||||
Gets user's settings
|
||||
"""
|
||||
res = {} # type: Dict[str, Any]
|
||||
|
||||
directory = Path(_Settings.SETTINGS_DIRECTORY)
|
||||
for py_file in directory.glob("*.py"):
|
||||
module_name = py_file.name.removeprefix(".py")
|
||||
spec = importlib.util.spec_from_file_location(
|
||||
module_name, f"{_Settings.SETTINGS_DIRECTORY}/{module_name}"
|
||||
)
|
||||
assert spec
|
||||
foo = importlib.util.module_from_spec(spec)
|
||||
assert spec.loader
|
||||
spec.loader.exec_module(foo)
|
||||
for setting in _USER_DEFINED_SETTINGS:
|
||||
try:
|
||||
value = getattr(foo, setting)
|
||||
res[setting] = value
|
||||
print(f"Apply user defined setting [{setting} = {value}]")
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
return res
|
378
ci/praktika/native_jobs.py
Normal file
378
ci/praktika/native_jobs.py
Normal file
@ -0,0 +1,378 @@
|
||||
import sys
|
||||
from typing import Dict
|
||||
|
||||
from praktika import Job, Workflow
|
||||
from praktika._environment import _Environment
|
||||
from praktika.cidb import CIDB
|
||||
from praktika.digest import Digest
|
||||
from praktika.docker import Docker
|
||||
from praktika.gh import GH
|
||||
from praktika.hook_cache import CacheRunnerHooks
|
||||
from praktika.hook_html import HtmlRunnerHooks
|
||||
from praktika.mangle import _get_workflows
|
||||
from praktika.result import Result, ResultInfo
|
||||
from praktika.runtime import RunConfig
|
||||
from praktika.s3 import S3
|
||||
from praktika.settings import Settings
|
||||
from praktika.utils import Shell, Utils
|
||||
|
||||
assert Settings.CI_CONFIG_RUNS_ON
|
||||
|
||||
_workflow_config_job = Job.Config(
|
||||
name=Settings.CI_CONFIG_JOB_NAME,
|
||||
runs_on=Settings.CI_CONFIG_RUNS_ON,
|
||||
job_requirements=(
|
||||
Job.Requirements(
|
||||
python=Settings.INSTALL_PYTHON_FOR_NATIVE_JOBS,
|
||||
python_requirements_txt=Settings.INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS,
|
||||
)
|
||||
if Settings.INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS
|
||||
else None
|
||||
),
|
||||
command=f"{Settings.PYTHON_INTERPRETER} -m praktika.native_jobs '{Settings.CI_CONFIG_JOB_NAME}'",
|
||||
)
|
||||
|
||||
_docker_build_job = Job.Config(
|
||||
name=Settings.DOCKER_BUILD_JOB_NAME,
|
||||
runs_on=Settings.DOCKER_BUILD_RUNS_ON,
|
||||
job_requirements=Job.Requirements(
|
||||
python=Settings.INSTALL_PYTHON_FOR_NATIVE_JOBS,
|
||||
python_requirements_txt="",
|
||||
),
|
||||
timeout=4 * 3600,
|
||||
command=f"{Settings.PYTHON_INTERPRETER} -m praktika.native_jobs '{Settings.DOCKER_BUILD_JOB_NAME}'",
|
||||
)
|
||||
|
||||
_final_job = Job.Config(
|
||||
name=Settings.FINISH_WORKFLOW_JOB_NAME,
|
||||
runs_on=Settings.CI_CONFIG_RUNS_ON,
|
||||
job_requirements=Job.Requirements(
|
||||
python=Settings.INSTALL_PYTHON_FOR_NATIVE_JOBS,
|
||||
python_requirements_txt="",
|
||||
),
|
||||
command=f"{Settings.PYTHON_INTERPRETER} -m praktika.native_jobs '{Settings.FINISH_WORKFLOW_JOB_NAME}'",
|
||||
run_unless_cancelled=True,
|
||||
)
|
||||
|
||||
|
||||
def _build_dockers(workflow, job_name):
|
||||
print(f"Start [{job_name}], workflow [{workflow.name}]")
|
||||
dockers = workflow.dockers
|
||||
ready = []
|
||||
results = []
|
||||
job_status = Result.Status.SUCCESS
|
||||
job_info = ""
|
||||
dockers = Docker.sort_in_build_order(dockers)
|
||||
docker_digests = {} # type: Dict[str, str]
|
||||
for docker in dockers:
|
||||
docker_digests[docker.name] = Digest().calc_docker_digest(docker, dockers)
|
||||
|
||||
if not Shell.check(
|
||||
"docker buildx inspect --bootstrap | grep -q docker-container", verbose=True
|
||||
):
|
||||
print("Install docker container driver")
|
||||
if not Shell.check(
|
||||
"docker buildx create --use --name mybuilder --driver docker-container",
|
||||
verbose=True,
|
||||
):
|
||||
job_status = Result.Status.FAILED
|
||||
job_info = "Failed to install docker buildx driver"
|
||||
|
||||
if job_status == Result.Status.SUCCESS:
|
||||
if not Docker.login(
|
||||
Settings.DOCKERHUB_USERNAME,
|
||||
user_password=workflow.get_secret(Settings.DOCKERHUB_SECRET).get_value(),
|
||||
):
|
||||
job_status = Result.Status.FAILED
|
||||
job_info = "Failed to login to dockerhub"
|
||||
|
||||
if job_status == Result.Status.SUCCESS:
|
||||
for docker in dockers:
|
||||
assert (
|
||||
docker.name not in ready
|
||||
), f"All docker names must be uniq [{dockers}]"
|
||||
stopwatch = Utils.Stopwatch()
|
||||
info = f"{docker.name}:{docker_digests[docker.name]}"
|
||||
log_file = f"{Settings.OUTPUT_DIR}/docker_{Utils.normalize_string(docker.name)}.log"
|
||||
files = []
|
||||
|
||||
code, out, err = Shell.get_res_stdout_stderr(
|
||||
f"docker manifest inspect {docker.name}:{docker_digests[docker.name]}"
|
||||
)
|
||||
print(
|
||||
f"Docker inspect results for {docker.name}:{docker_digests[docker.name]}: exit code [{code}], out [{out}], err [{err}]"
|
||||
)
|
||||
if "no such manifest" in err:
|
||||
ret_code = Docker.build(
|
||||
docker, log_file=log_file, digests=docker_digests, add_latest=False
|
||||
)
|
||||
if ret_code == 0:
|
||||
status = Result.Status.SUCCESS
|
||||
else:
|
||||
status = Result.Status.FAILED
|
||||
job_status = Result.Status.FAILED
|
||||
info += f", failed with exit code: {ret_code}, see log"
|
||||
files.append(log_file)
|
||||
else:
|
||||
print(
|
||||
f"Docker image [{docker.name}:{docker_digests[docker.name]} exists - skip build"
|
||||
)
|
||||
status = Result.Status.SKIPPED
|
||||
ready.append(docker.name)
|
||||
results.append(
|
||||
Result(
|
||||
name=docker.name,
|
||||
status=status,
|
||||
info=info,
|
||||
duration=stopwatch.duration,
|
||||
start_time=stopwatch.start_time,
|
||||
files=files,
|
||||
)
|
||||
)
|
||||
Result.from_fs(job_name).set_status(job_status).set_results(results).set_info(
|
||||
job_info
|
||||
)
|
||||
|
||||
if job_status != Result.Status.SUCCESS:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def _config_workflow(workflow: Workflow.Config, job_name):
|
||||
def _check_yaml_up_to_date():
|
||||
print("Check workflows are up to date")
|
||||
stop_watch = Utils.Stopwatch()
|
||||
exit_code, output, err = Shell.get_res_stdout_stderr(
|
||||
f"git diff-index HEAD -- {Settings.WORKFLOW_PATH_PREFIX}"
|
||||
)
|
||||
info = ""
|
||||
status = Result.Status.SUCCESS
|
||||
if exit_code != 0:
|
||||
info = f"workspace has uncommitted files unexpectedly [{output}]"
|
||||
status = Result.Status.ERROR
|
||||
print("ERROR: ", info)
|
||||
else:
|
||||
Shell.check(f"{Settings.PYTHON_INTERPRETER} -m praktika --generate")
|
||||
exit_code, output, err = Shell.get_res_stdout_stderr(
|
||||
f"git diff-index HEAD -- {Settings.WORKFLOW_PATH_PREFIX}"
|
||||
)
|
||||
if exit_code != 0:
|
||||
info = f"workspace has outdated workflows [{output}] - regenerate with [python -m praktika --generate]"
|
||||
status = Result.Status.ERROR
|
||||
print("ERROR: ", info)
|
||||
|
||||
return (
|
||||
Result(
|
||||
name="Check Workflows updated",
|
||||
status=status,
|
||||
start_time=stop_watch.start_time,
|
||||
duration=stop_watch.duration,
|
||||
info=info,
|
||||
),
|
||||
info,
|
||||
)
|
||||
|
||||
def _check_secrets(secrets):
|
||||
print("Check Secrets")
|
||||
stop_watch = Utils.Stopwatch()
|
||||
infos = []
|
||||
for secret_config in secrets:
|
||||
value = secret_config.get_value()
|
||||
if not value:
|
||||
info = f"ERROR: Failed to read secret [{secret_config.name}]"
|
||||
infos.append(info)
|
||||
print(info)
|
||||
|
||||
info = "\n".join(infos)
|
||||
return (
|
||||
Result(
|
||||
name="Check Secrets",
|
||||
status=(Result.Status.FAILED if infos else Result.Status.SUCCESS),
|
||||
start_time=stop_watch.start_time,
|
||||
duration=stop_watch.duration,
|
||||
info=info,
|
||||
),
|
||||
info,
|
||||
)
|
||||
|
||||
def _check_db(workflow):
|
||||
stop_watch = Utils.Stopwatch()
|
||||
res, info = CIDB(
|
||||
workflow.get_secret(Settings.SECRET_CI_DB_URL).get_value(),
|
||||
workflow.get_secret(Settings.SECRET_CI_DB_PASSWORD).get_value(),
|
||||
).check()
|
||||
return (
|
||||
Result(
|
||||
name="Check CI DB",
|
||||
status=(Result.Status.FAILED if not res else Result.Status.SUCCESS),
|
||||
start_time=stop_watch.start_time,
|
||||
duration=stop_watch.duration,
|
||||
info=info,
|
||||
),
|
||||
info,
|
||||
)
|
||||
|
||||
print(f"Start [{job_name}], workflow [{workflow.name}]")
|
||||
results = []
|
||||
files = []
|
||||
info_lines = []
|
||||
job_status = Result.Status.SUCCESS
|
||||
|
||||
workflow_config = RunConfig(
|
||||
name=workflow.name,
|
||||
digest_jobs={},
|
||||
digest_dockers={},
|
||||
sha=_Environment.get().SHA,
|
||||
cache_success=[],
|
||||
cache_success_base64=[],
|
||||
cache_artifacts={},
|
||||
).dump()
|
||||
|
||||
# checks:
|
||||
result_, info = _check_yaml_up_to_date()
|
||||
if result_.status != Result.Status.SUCCESS:
|
||||
print("ERROR: yaml files are outdated - regenerate, commit and push")
|
||||
job_status = Result.Status.ERROR
|
||||
info_lines.append(job_name + ": " + info)
|
||||
results.append(result_)
|
||||
|
||||
if workflow.secrets:
|
||||
result_, info = _check_secrets(workflow.secrets)
|
||||
if result_.status != Result.Status.SUCCESS:
|
||||
print(f"ERROR: Invalid secrets in workflow [{workflow.name}]")
|
||||
job_status = Result.Status.ERROR
|
||||
info_lines.append(job_name + ": " + info)
|
||||
results.append(result_)
|
||||
|
||||
if workflow.enable_cidb:
|
||||
result_, info = _check_db(workflow)
|
||||
if result_.status != Result.Status.SUCCESS:
|
||||
job_status = Result.Status.ERROR
|
||||
info_lines.append(job_name + ": " + info)
|
||||
results.append(result_)
|
||||
|
||||
# config:
|
||||
if workflow.dockers:
|
||||
print("Calculate docker's digests")
|
||||
dockers = workflow.dockers
|
||||
dockers = Docker.sort_in_build_order(dockers)
|
||||
for docker in dockers:
|
||||
workflow_config.digest_dockers[docker.name] = Digest().calc_docker_digest(
|
||||
docker, dockers
|
||||
)
|
||||
workflow_config.dump()
|
||||
|
||||
if workflow.enable_cache:
|
||||
print("Cache Lookup")
|
||||
stop_watch = Utils.Stopwatch()
|
||||
workflow_config = CacheRunnerHooks.configure(workflow)
|
||||
results.append(
|
||||
Result(
|
||||
name="Cache Lookup",
|
||||
status=Result.Status.SUCCESS,
|
||||
start_time=stop_watch.start_time,
|
||||
duration=stop_watch.duration,
|
||||
)
|
||||
)
|
||||
files.append(RunConfig.file_name_static(workflow.name))
|
||||
|
||||
workflow_config.dump()
|
||||
|
||||
if workflow.enable_report:
|
||||
print("Init report")
|
||||
stop_watch = Utils.Stopwatch()
|
||||
HtmlRunnerHooks.configure(workflow)
|
||||
results.append(
|
||||
Result(
|
||||
name="Init Report",
|
||||
status=Result.Status.SUCCESS,
|
||||
start_time=stop_watch.start_time,
|
||||
duration=stop_watch.duration,
|
||||
)
|
||||
)
|
||||
files.append(Result.file_name_static(workflow.name))
|
||||
|
||||
Result.from_fs(job_name).set_status(job_status).set_results(results).set_files(
|
||||
files
|
||||
).set_info("\n".join(info_lines))
|
||||
|
||||
if job_status != Result.Status.SUCCESS:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def _finish_workflow(workflow, job_name):
|
||||
print(f"Start [{job_name}], workflow [{workflow.name}]")
|
||||
env = _Environment.get()
|
||||
|
||||
print("Check Actions statuses")
|
||||
print(env.get_needs_statuses())
|
||||
|
||||
print("Check Workflow results")
|
||||
S3.copy_result_from_s3(
|
||||
Result.file_name_static(workflow.name),
|
||||
lock=False,
|
||||
)
|
||||
workflow_result = Result.from_fs(workflow.name)
|
||||
|
||||
ready_for_merge_status = Result.Status.SUCCESS
|
||||
ready_for_merge_description = ""
|
||||
failed_results = []
|
||||
update_final_report = False
|
||||
for result in workflow_result.results:
|
||||
if result.name == job_name or result.status in (
|
||||
Result.Status.SUCCESS,
|
||||
Result.Status.SKIPPED,
|
||||
):
|
||||
continue
|
||||
if not result.is_completed():
|
||||
print(
|
||||
f"ERROR: not finished job [{result.name}] in the workflow - set status to error"
|
||||
)
|
||||
result.status = Result.Status.ERROR
|
||||
# dump workflow result after update - to have an updated result in post
|
||||
workflow_result.dump()
|
||||
# add error into env - should apper in the report
|
||||
env.add_info(ResultInfo.NOT_FINALIZED + f" [{result.name}]")
|
||||
update_final_report = True
|
||||
job = workflow.get_job(result.name)
|
||||
if not job or not job.allow_merge_on_failure:
|
||||
print(
|
||||
f"NOTE: Result for [{result.name}] has not ok status [{result.status}]"
|
||||
)
|
||||
ready_for_merge_status = Result.Status.FAILED
|
||||
failed_results.append(result.name.split("(", maxsplit=1)[0]) # cut name
|
||||
|
||||
if failed_results:
|
||||
ready_for_merge_description = f"failed: {', '.join(failed_results)}"
|
||||
|
||||
if not GH.post_commit_status(
|
||||
name=Settings.READY_FOR_MERGE_STATUS_NAME + f" [{workflow.name}]",
|
||||
status=ready_for_merge_status,
|
||||
description=ready_for_merge_description,
|
||||
url="",
|
||||
):
|
||||
print(f"ERROR: failed to set status [{Settings.READY_FOR_MERGE_STATUS_NAME}]")
|
||||
env.add_info(ResultInfo.GH_STATUS_ERROR)
|
||||
|
||||
if update_final_report:
|
||||
S3.copy_result_to_s3(
|
||||
workflow_result,
|
||||
unlock=False,
|
||||
) # no lock - no unlock
|
||||
|
||||
Result.from_fs(job_name).set_status(Result.Status.SUCCESS).set_info(
|
||||
ready_for_merge_description
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
job_name = sys.argv[1]
|
||||
assert job_name, "Job name must be provided as input argument"
|
||||
workflow = _get_workflows(name=_Environment.get().WORKFLOW_NAME)[0]
|
||||
if job_name == Settings.DOCKER_BUILD_JOB_NAME:
|
||||
_build_dockers(workflow, job_name)
|
||||
elif job_name == Settings.CI_CONFIG_JOB_NAME:
|
||||
_config_workflow(workflow, job_name)
|
||||
elif job_name == Settings.FINISH_WORKFLOW_JOB_NAME:
|
||||
_finish_workflow(workflow, job_name)
|
||||
else:
|
||||
assert False, f"BUG, job name [{job_name}]"
|
258
ci/praktika/parser.py
Normal file
258
ci/praktika/parser.py
Normal file
@ -0,0 +1,258 @@
|
||||
import dataclasses
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from praktika import Artifact, Workflow
|
||||
from praktika.mangle import _get_workflows
|
||||
|
||||
|
||||
class AddonType:
|
||||
PY = "py"
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class WorkflowYaml:
|
||||
@dataclasses.dataclass
|
||||
class JobYaml:
|
||||
name: str
|
||||
needs: List[str]
|
||||
runs_on: List[str]
|
||||
artifacts_gh_requires: List["WorkflowYaml.ArtifactYaml"]
|
||||
artifacts_gh_provides: List["WorkflowYaml.ArtifactYaml"]
|
||||
addons: List["WorkflowYaml.JobAddonYaml"]
|
||||
gh_app_auth: bool
|
||||
run_unless_cancelled: bool
|
||||
parameter: Any
|
||||
|
||||
def __repr__(self):
|
||||
return self.name
|
||||
|
||||
@dataclasses.dataclass
|
||||
class ArtifactYaml:
|
||||
name: str
|
||||
provided_by: str
|
||||
required_by: List[str]
|
||||
path: str
|
||||
type: str
|
||||
|
||||
def __repr__(self):
|
||||
return self.name
|
||||
|
||||
@dataclasses.dataclass
|
||||
class JobAddonYaml:
|
||||
install_python: bool
|
||||
requirements_txt_path: str
|
||||
|
||||
name: str
|
||||
event: str
|
||||
branches: List[str]
|
||||
jobs: List[JobYaml]
|
||||
job_to_config: Dict[str, JobYaml]
|
||||
artifact_to_config: Dict[str, ArtifactYaml]
|
||||
secret_names_gh: List[str]
|
||||
enable_cache: bool
|
||||
|
||||
|
||||
class WorkflowConfigParser:
|
||||
def __init__(self, config: Workflow.Config):
|
||||
self.workflow_name = config.name
|
||||
self.config = config
|
||||
self.requires_all = [] # type: List[str]
|
||||
self.provides_all = [] # type: List[str]
|
||||
self.job_names_all = [] # type: List[str]
|
||||
self.artifact_to_providing_job_map = {} # type: Dict[str, List[str]]
|
||||
self.artifact_to_job_requires_map = {} # type: Dict[str, List[str]]
|
||||
self.artifact_map = {} # type: Dict[str, List[Artifact.Config]]
|
||||
|
||||
self.job_to_provides_artifacts = {} # type: Dict[str, List[Artifact.Config]]
|
||||
self.job_to_requires_artifacts = {} # type: Dict[str, List[Artifact.Config]]
|
||||
|
||||
self.workflow_yaml_config = WorkflowYaml(
|
||||
name=self.workflow_name,
|
||||
event=config.event,
|
||||
branches=[],
|
||||
jobs=[],
|
||||
secret_names_gh=[],
|
||||
job_to_config={},
|
||||
artifact_to_config={},
|
||||
enable_cache=False,
|
||||
)
|
||||
|
||||
def parse(self):
|
||||
self.workflow_yaml_config.enable_cache = self.config.enable_cache
|
||||
|
||||
# populate WorkflowYaml.branches
|
||||
if self.config.event in (Workflow.Event.PUSH,):
|
||||
assert (
|
||||
self.config.branches
|
||||
), f'Workflow.Config.branches (e.g. ["main"]) must be set for workflow with event [{self.config.event}], workflow [{self.workflow_name}]'
|
||||
assert (
|
||||
not self.config.base_branches
|
||||
), f'Workflow.Config.base_branches (e.g. ["main"]) must not be set for workflow with event [{self.config.event}], workflow [{self.workflow_name}]'
|
||||
assert isinstance(
|
||||
self.config.branches, list
|
||||
), f'Workflow.Config.branches must be of type list (e.g. ["main"]), workflow [{self.workflow_name}]'
|
||||
self.workflow_yaml_config.branches = self.config.branches
|
||||
elif self.config.event in (Workflow.Event.PULL_REQUEST,):
|
||||
assert (
|
||||
self.config.base_branches
|
||||
), f'Workflow.Config.base_branches (e.g. ["main"]) must be set for workflow with event [{self.config.event}], workflow [{self.workflow_name}]'
|
||||
assert (
|
||||
not self.config.branches
|
||||
), f'Workflow.Config.branches (e.g. ["main"]) must not be set for workflow with event [{self.config.event}], workflow [{self.workflow_name}]'
|
||||
assert isinstance(
|
||||
self.config.base_branches, list
|
||||
), f'Workflow.Config.base_branches must be of type list (e.g. ["main"]), workflow [{self.workflow_name}]'
|
||||
self.workflow_yaml_config.branches = self.config.base_branches
|
||||
|
||||
# populate WorkflowYaml.artifact_to_config with phony artifacts
|
||||
for job in self.config.jobs:
|
||||
assert (
|
||||
job.name not in self.workflow_yaml_config.artifact_to_config
|
||||
), f"Not uniq Job name [{job.name}], workflow [{self.workflow_name}]"
|
||||
self.workflow_yaml_config.artifact_to_config[job.name] = (
|
||||
WorkflowYaml.ArtifactYaml(
|
||||
name=job.name,
|
||||
provided_by=job.name,
|
||||
required_by=[],
|
||||
path="",
|
||||
type=Artifact.Type.PHONY,
|
||||
)
|
||||
)
|
||||
|
||||
# populate jobs
|
||||
for job in self.config.jobs:
|
||||
job_yaml_config = WorkflowYaml.JobYaml(
|
||||
name=job.name,
|
||||
addons=[],
|
||||
artifacts_gh_requires=[],
|
||||
artifacts_gh_provides=[],
|
||||
needs=[],
|
||||
runs_on=[],
|
||||
gh_app_auth=False,
|
||||
run_unless_cancelled=job.run_unless_cancelled,
|
||||
parameter=None,
|
||||
)
|
||||
self.workflow_yaml_config.jobs.append(job_yaml_config)
|
||||
assert (
|
||||
job.name not in self.workflow_yaml_config.job_to_config
|
||||
), f"Job name [{job.name}] is not uniq, workflow [{self.workflow_name}]"
|
||||
self.workflow_yaml_config.job_to_config[job.name] = job_yaml_config
|
||||
|
||||
# populate WorkflowYaml.artifact_to_config
|
||||
if self.config.artifacts:
|
||||
for artifact in self.config.artifacts:
|
||||
assert (
|
||||
artifact.name not in self.workflow_yaml_config.artifact_to_config
|
||||
), f"Artifact name [{artifact.name}] is not uniq, workflow [{self.workflow_name}]"
|
||||
artifact_yaml_config = WorkflowYaml.ArtifactYaml(
|
||||
name=artifact.name,
|
||||
provided_by="",
|
||||
required_by=[],
|
||||
path=artifact.path,
|
||||
type=artifact.type,
|
||||
)
|
||||
self.workflow_yaml_config.artifact_to_config[artifact.name] = (
|
||||
artifact_yaml_config
|
||||
)
|
||||
|
||||
# populate ArtifactYaml.provided_by
|
||||
for job in self.config.jobs:
|
||||
if job.provides:
|
||||
for artifact_name in job.provides:
|
||||
assert (
|
||||
artifact_name in self.workflow_yaml_config.artifact_to_config
|
||||
), f"Artifact [{artifact_name}] has no config, job [{job.name}], workflow [{self.workflow_name}]"
|
||||
assert not self.workflow_yaml_config.artifact_to_config[
|
||||
artifact_name
|
||||
].provided_by, f"Artifact [{artifact_name}] provided by multiple jobs [{self.workflow_yaml_config.artifact_to_config[artifact_name].provided_by}] and [{job.name}]"
|
||||
self.workflow_yaml_config.artifact_to_config[
|
||||
artifact_name
|
||||
].provided_by = job.name
|
||||
|
||||
# populate ArtifactYaml.required_by
|
||||
for job in self.config.jobs:
|
||||
if job.requires:
|
||||
for artifact_name in job.requires:
|
||||
assert (
|
||||
artifact_name in self.workflow_yaml_config.artifact_to_config
|
||||
), f"Artifact [{artifact_name}] has no config, job [{job.name}], workflow [{self.workflow_name}]"
|
||||
assert self.workflow_yaml_config.artifact_to_config[
|
||||
artifact_name
|
||||
].provided_by, f"Artifact [{artifact_name}] has no job providing it, required by job [{job.name}], workflow [{self.workflow_name}]"
|
||||
self.workflow_yaml_config.artifact_to_config[
|
||||
artifact_name
|
||||
].required_by.append(job.name)
|
||||
|
||||
# populate JobYaml.addons
|
||||
for job in self.config.jobs:
|
||||
if job.job_requirements:
|
||||
addon_yaml = WorkflowYaml.JobAddonYaml(
|
||||
requirements_txt_path=job.job_requirements.python_requirements_txt,
|
||||
install_python=job.job_requirements.python,
|
||||
)
|
||||
self.workflow_yaml_config.job_to_config[job.name].addons.append(
|
||||
addon_yaml
|
||||
)
|
||||
|
||||
if self.config.enable_report:
|
||||
for job in self.config.jobs:
|
||||
# auth required for every job with enabled HTML, so that workflow summary status can be updated
|
||||
self.workflow_yaml_config.job_to_config[job.name].gh_app_auth = True
|
||||
|
||||
# populate JobYaml.runs_on
|
||||
for job in self.config.jobs:
|
||||
self.workflow_yaml_config.job_to_config[job.name].runs_on = job.runs_on
|
||||
|
||||
# populate JobYaml.artifacts_gh_requires, JobYaml.artifacts_gh_provides and JobYaml.needs
|
||||
for (
|
||||
artifact_name,
|
||||
artifact,
|
||||
) in self.workflow_yaml_config.artifact_to_config.items():
|
||||
# assert (
|
||||
# artifact.provided_by
|
||||
# and artifact.provided_by in self.workflow_yaml_config.job_to_config
|
||||
# ), f"Artifact [{artifact_name}] has no valid job providing it [{artifact.provided_by}]"
|
||||
for job_name in artifact.required_by:
|
||||
if (
|
||||
artifact.provided_by
|
||||
not in self.workflow_yaml_config.job_to_config[job_name].needs
|
||||
):
|
||||
self.workflow_yaml_config.job_to_config[job_name].needs.append(
|
||||
artifact.provided_by
|
||||
)
|
||||
if artifact.type in (Artifact.Type.GH,):
|
||||
self.workflow_yaml_config.job_to_config[
|
||||
job_name
|
||||
].artifacts_gh_requires.append(artifact)
|
||||
elif artifact.type in (Artifact.Type.PHONY, Artifact.Type.S3):
|
||||
pass
|
||||
else:
|
||||
assert (
|
||||
False
|
||||
), f"Artifact [{artifact_name}] has unsupported type [{artifact.type}]"
|
||||
if not artifact.required_by and artifact.type != Artifact.Type.PHONY:
|
||||
print(
|
||||
f"WARNING: Artifact [{artifact_name}] provided by job [{artifact.provided_by}] not required by any job in workflow [{self.workflow_name}]"
|
||||
)
|
||||
if artifact.type == Artifact.Type.GH:
|
||||
self.workflow_yaml_config.job_to_config[
|
||||
artifact.provided_by
|
||||
].artifacts_gh_provides.append(artifact)
|
||||
|
||||
# populate JobYaml.parametrize
|
||||
for job in self.config.jobs:
|
||||
self.workflow_yaml_config.job_to_config[job.name].parameter = job.parameter
|
||||
|
||||
# populate secrets
|
||||
for secret_config in self.config.secrets:
|
||||
if secret_config.is_gh():
|
||||
self.workflow_yaml_config.secret_names_gh.append(secret_config.name)
|
||||
|
||||
return self
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# test
|
||||
workflows = _get_workflows()
|
||||
for workflow in workflows:
|
||||
WorkflowConfigParser(workflow).parse()
|
354
ci/praktika/result.py
Normal file
354
ci/praktika/result.py
Normal file
@ -0,0 +1,354 @@
|
||||
import dataclasses
|
||||
import datetime
|
||||
import sys
|
||||
from collections.abc import Container
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from praktika._environment import _Environment
|
||||
from praktika._settings import _Settings
|
||||
from praktika.utils import ContextManager, MetaClasses, Shell, Utils
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class Result(MetaClasses.Serializable):
|
||||
"""
|
||||
Represents the outcome of a workflow/job/task or any operation, along with associated metadata.
|
||||
|
||||
This class supports nesting of results to represent tasks with sub-tasks, and includes
|
||||
various attributes to track status, timing, files, and links.
|
||||
|
||||
Attributes:
|
||||
name (str): The name of the task.
|
||||
status (str): The current status of the task. Should be one of the values defined in the Status class.
|
||||
start_time (Optional[float]): The start time of the task in Unix timestamp format. None if not started.
|
||||
duration (Optional[float]): The duration of the task in seconds. None if not completed.
|
||||
results (List[Result]): A list of sub-results representing nested tasks.
|
||||
files (List[str]): A list of file paths or names related to the result.
|
||||
links (List[str]): A list of URLs related to the result (e.g., links to reports or resources).
|
||||
info (str): Additional information about the result. Free-form text.
|
||||
# TODO: rename
|
||||
aux_links (List[str]): A list of auxiliary links that provide additional context for the result.
|
||||
# TODO: remove
|
||||
html_link (str): A direct link to an HTML representation of the result (e.g., a detailed report page).
|
||||
|
||||
Inner Class:
|
||||
Status: Defines possible statuses for the task, such as "success", "failure", etc.
|
||||
"""
|
||||
|
||||
class Status:
|
||||
SKIPPED = "skipped"
|
||||
SUCCESS = "success"
|
||||
FAILED = "failure"
|
||||
PENDING = "pending"
|
||||
RUNNING = "running"
|
||||
ERROR = "error"
|
||||
|
||||
name: str
|
||||
status: str
|
||||
start_time: Optional[float] = None
|
||||
duration: Optional[float] = None
|
||||
results: List["Result"] = dataclasses.field(default_factory=list)
|
||||
files: List[str] = dataclasses.field(default_factory=list)
|
||||
links: List[str] = dataclasses.field(default_factory=list)
|
||||
info: str = ""
|
||||
aux_links: List[str] = dataclasses.field(default_factory=list)
|
||||
html_link: str = ""
|
||||
|
||||
@staticmethod
|
||||
def create_from(
|
||||
name="",
|
||||
results: List["Result"] = None,
|
||||
stopwatch: Utils.Stopwatch = None,
|
||||
status="",
|
||||
files=None,
|
||||
info="",
|
||||
with_info_from_results=True,
|
||||
):
|
||||
if isinstance(status, bool):
|
||||
status = Result.Status.SUCCESS if status else Result.Status.FAILED
|
||||
if not results and not status:
|
||||
print("ERROR: Either .results or .status must be provided")
|
||||
raise
|
||||
if not name:
|
||||
name = _Environment.get().JOB_NAME
|
||||
if not name:
|
||||
print("ERROR: Failed to guess the .name")
|
||||
raise
|
||||
result_status = status or Result.Status.SUCCESS
|
||||
infos = []
|
||||
if info:
|
||||
if isinstance(info, Container):
|
||||
infos += info
|
||||
else:
|
||||
infos.append(info)
|
||||
if results and not status:
|
||||
for result in results:
|
||||
if result.status not in (Result.Status.SUCCESS, Result.Status.FAILED):
|
||||
Utils.raise_with_error(
|
||||
f"Unexpected result status [{result.status}] for Result.create_from call"
|
||||
)
|
||||
if result.status != Result.Status.SUCCESS:
|
||||
result_status = Result.Status.FAILED
|
||||
if results:
|
||||
for result in results:
|
||||
if result.info and with_info_from_results:
|
||||
infos.append(f"{result.name}: {result.info}")
|
||||
return Result(
|
||||
name=name,
|
||||
status=result_status,
|
||||
start_time=stopwatch.start_time if stopwatch else None,
|
||||
duration=stopwatch.duration if stopwatch else None,
|
||||
info="\n".join(infos) if infos else "",
|
||||
results=results or [],
|
||||
files=files or [],
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get():
|
||||
return Result.from_fs(_Environment.get().JOB_NAME)
|
||||
|
||||
def is_completed(self):
|
||||
return self.status not in (Result.Status.PENDING, Result.Status.RUNNING)
|
||||
|
||||
def is_running(self):
|
||||
return self.status not in (Result.Status.RUNNING,)
|
||||
|
||||
def is_ok(self):
|
||||
return self.status in (Result.Status.SKIPPED, Result.Status.SUCCESS)
|
||||
|
||||
def set_status(self, status) -> "Result":
|
||||
self.status = status
|
||||
self.dump()
|
||||
return self
|
||||
|
||||
def set_success(self) -> "Result":
|
||||
return self.set_status(Result.Status.SUCCESS)
|
||||
|
||||
def set_results(self, results: List["Result"]) -> "Result":
|
||||
self.results = results
|
||||
self.dump()
|
||||
return self
|
||||
|
||||
def set_files(self, files) -> "Result":
|
||||
for file in files:
|
||||
assert Path(
|
||||
file
|
||||
).is_file(), f"Not valid file [{file}] from file list [{files}]"
|
||||
if not self.files:
|
||||
self.files = []
|
||||
self.files += files
|
||||
self.dump()
|
||||
return self
|
||||
|
||||
def set_info(self, info: str) -> "Result":
|
||||
if self.info:
|
||||
self.info += "\n"
|
||||
self.info += info
|
||||
self.dump()
|
||||
return self
|
||||
|
||||
def set_link(self, link) -> "Result":
|
||||
self.links.append(link)
|
||||
self.dump()
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def file_name_static(cls, name):
|
||||
return f"{_Settings.TEMP_DIR}/result_{Utils.normalize_string(name)}.json"
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, obj: Dict[str, Any]) -> "Result":
|
||||
sub_results = []
|
||||
for result_dict in obj["results"] or []:
|
||||
sub_res = cls.from_dict(result_dict)
|
||||
sub_results.append(sub_res)
|
||||
obj["results"] = sub_results
|
||||
return Result(**obj)
|
||||
|
||||
def update_duration(self):
|
||||
if not self.duration and self.start_time:
|
||||
self.duration = datetime.datetime.utcnow().timestamp() - self.start_time
|
||||
else:
|
||||
if not self.duration:
|
||||
print(
|
||||
f"NOTE: duration is set for job [{self.name}] Result - do not update by CI"
|
||||
)
|
||||
else:
|
||||
print(
|
||||
f"NOTE: start_time is not set for job [{self.name}] Result - do not update duration"
|
||||
)
|
||||
return self
|
||||
|
||||
def update_sub_result(self, result: "Result"):
|
||||
assert self.results, "BUG?"
|
||||
for i, result_ in enumerate(self.results):
|
||||
if result_.name == result.name:
|
||||
self.results[i] = result
|
||||
self._update_status()
|
||||
return self
|
||||
|
||||
def _update_status(self):
|
||||
was_pending = False
|
||||
was_running = False
|
||||
if self.status == self.Status.PENDING:
|
||||
was_pending = True
|
||||
if self.status == self.Status.RUNNING:
|
||||
was_running = True
|
||||
|
||||
has_pending, has_running, has_failed = False, False, False
|
||||
for result_ in self.results:
|
||||
if result_.status in (self.Status.RUNNING,):
|
||||
has_running = True
|
||||
if result_.status in (self.Status.PENDING,):
|
||||
has_pending = True
|
||||
if result_.status in (self.Status.ERROR, self.Status.FAILED):
|
||||
has_failed = True
|
||||
if has_running:
|
||||
self.status = self.Status.RUNNING
|
||||
elif has_pending:
|
||||
self.status = self.Status.PENDING
|
||||
elif has_failed:
|
||||
self.status = self.Status.FAILED
|
||||
else:
|
||||
self.status = self.Status.SUCCESS
|
||||
if (was_pending or was_running) and self.status not in (
|
||||
self.Status.PENDING,
|
||||
self.Status.RUNNING,
|
||||
):
|
||||
print("Pipeline finished")
|
||||
self.update_duration()
|
||||
|
||||
@classmethod
|
||||
def generate_pending(cls, name, results=None):
|
||||
return Result(
|
||||
name=name,
|
||||
status=Result.Status.PENDING,
|
||||
start_time=None,
|
||||
duration=None,
|
||||
results=results or [],
|
||||
files=[],
|
||||
links=[],
|
||||
info="",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def generate_skipped(cls, name, results=None):
|
||||
return Result(
|
||||
name=name,
|
||||
status=Result.Status.SKIPPED,
|
||||
start_time=None,
|
||||
duration=None,
|
||||
results=results or [],
|
||||
files=[],
|
||||
links=[],
|
||||
info="from cache",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def create_from_command_execution(
|
||||
cls,
|
||||
name,
|
||||
command,
|
||||
with_log=False,
|
||||
fail_fast=True,
|
||||
workdir=None,
|
||||
command_args=None,
|
||||
command_kwargs=None,
|
||||
):
|
||||
"""
|
||||
Executes shell commands or Python callables, optionally logging output, and handles errors.
|
||||
|
||||
:param name: Check name
|
||||
:param command: Shell command (str) or Python callable, or list of them.
|
||||
:param workdir: Optional working directory.
|
||||
:param with_log: Boolean flag to log output to a file.
|
||||
:param fail_fast: Boolean flag to stop execution if one command fails.
|
||||
:param command_args: Positional arguments for the callable command.
|
||||
:param command_kwargs: Keyword arguments for the callable command.
|
||||
:return: Result object with status and optional log file.
|
||||
"""
|
||||
|
||||
# Stopwatch to track execution time
|
||||
stop_watch_ = Utils.Stopwatch()
|
||||
command_args = command_args or []
|
||||
command_kwargs = command_kwargs or {}
|
||||
|
||||
# Set log file path if logging is enabled
|
||||
log_file = (
|
||||
f"{_Settings.TEMP_DIR}/{Utils.normalize_string(name)}.log"
|
||||
if with_log
|
||||
else None
|
||||
)
|
||||
|
||||
# Ensure the command is a list for consistent iteration
|
||||
if not isinstance(command, list):
|
||||
fail_fast = False
|
||||
command = [command]
|
||||
|
||||
print(f"> Starting execution for [{name}]")
|
||||
res = True # Track success/failure status
|
||||
error_infos = []
|
||||
for command_ in command:
|
||||
if callable(command_):
|
||||
# If command is a Python function, call it with provided arguments
|
||||
result = command_(*command_args, **command_kwargs)
|
||||
if isinstance(result, bool):
|
||||
res = result
|
||||
elif result:
|
||||
error_infos.append(str(result))
|
||||
res = False
|
||||
else:
|
||||
# Run shell command in a specified directory with logging and verbosity
|
||||
with ContextManager.cd(workdir):
|
||||
exit_code = Shell.run(command_, verbose=True, log_file=log_file)
|
||||
res = exit_code == 0
|
||||
|
||||
# If fail_fast is enabled, stop on first failure
|
||||
if not res and fail_fast:
|
||||
print(f"Execution stopped due to failure in [{command_}]")
|
||||
break
|
||||
|
||||
# Create and return the result object with status and log file (if any)
|
||||
return Result.create_from(
|
||||
name=name,
|
||||
status=res,
|
||||
stopwatch=stop_watch_,
|
||||
info=error_infos,
|
||||
files=[log_file] if log_file else None,
|
||||
)
|
||||
|
||||
def finish_job_accordingly(self):
|
||||
self.dump()
|
||||
if not self.is_ok():
|
||||
print("ERROR: Job Failed")
|
||||
for result in self.results:
|
||||
if not result.is_ok():
|
||||
print("Failed checks:")
|
||||
print(" | ", result)
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("ok")
|
||||
|
||||
|
||||
class ResultInfo:
|
||||
SETUP_ENV_JOB_FAILED = (
|
||||
"Failed to set up job env, it's praktika bug or misconfiguration"
|
||||
)
|
||||
PRE_JOB_FAILED = (
|
||||
"Failed to do a job pre-run step, it's praktika bug or misconfiguration"
|
||||
)
|
||||
KILLED = "Job killed or terminated, no Result provided"
|
||||
NOT_FOUND_IMPOSSIBLE = (
|
||||
"No Result file (bug, or job misbehaviour, must not ever happen)"
|
||||
)
|
||||
SKIPPED_DUE_TO_PREVIOUS_FAILURE = "Skipped due to previous failure"
|
||||
TIMEOUT = "Timeout"
|
||||
|
||||
GH_STATUS_ERROR = "Failed to set GH commit status"
|
||||
|
||||
NOT_FINALIZED = (
|
||||
"Job did not not provide Result: job script bug, died CI runner or praktika bug"
|
||||
)
|
||||
|
||||
S3_ERROR = "S3 call failure"
|
348
ci/praktika/runner.py
Normal file
348
ci/praktika/runner.py
Normal file
@ -0,0 +1,348 @@
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
|
||||
from praktika._environment import _Environment
|
||||
from praktika.artifact import Artifact
|
||||
from praktika.cidb import CIDB
|
||||
from praktika.digest import Digest
|
||||
from praktika.hook_cache import CacheRunnerHooks
|
||||
from praktika.hook_html import HtmlRunnerHooks
|
||||
from praktika.result import Result, ResultInfo
|
||||
from praktika.runtime import RunConfig
|
||||
from praktika.s3 import S3
|
||||
from praktika.settings import Settings
|
||||
from praktika.utils import Shell, TeePopen, Utils
|
||||
|
||||
|
||||
class Runner:
|
||||
@staticmethod
|
||||
def generate_dummy_environment(workflow, job):
|
||||
print("WARNING: Generate dummy env for local test")
|
||||
Shell.check(
|
||||
f"mkdir -p {Settings.TEMP_DIR} {Settings.INPUT_DIR} {Settings.OUTPUT_DIR}"
|
||||
)
|
||||
_Environment(
|
||||
WORKFLOW_NAME=workflow.name,
|
||||
JOB_NAME=job.name,
|
||||
REPOSITORY="",
|
||||
BRANCH="",
|
||||
SHA="",
|
||||
PR_NUMBER=-1,
|
||||
EVENT_TYPE="",
|
||||
JOB_OUTPUT_STREAM="",
|
||||
EVENT_FILE_PATH="",
|
||||
CHANGE_URL="",
|
||||
COMMIT_URL="",
|
||||
BASE_BRANCH="",
|
||||
RUN_URL="",
|
||||
RUN_ID="",
|
||||
INSTANCE_ID="",
|
||||
INSTANCE_TYPE="",
|
||||
INSTANCE_LIFE_CYCLE="",
|
||||
).dump()
|
||||
workflow_config = RunConfig(
|
||||
name=workflow.name,
|
||||
digest_jobs={},
|
||||
digest_dockers={},
|
||||
sha="",
|
||||
cache_success=[],
|
||||
cache_success_base64=[],
|
||||
cache_artifacts={},
|
||||
)
|
||||
for docker in workflow.dockers:
|
||||
workflow_config.digest_dockers[docker.name] = Digest().calc_docker_digest(
|
||||
docker, workflow.dockers
|
||||
)
|
||||
workflow_config.dump()
|
||||
|
||||
Result.generate_pending(job.name).dump()
|
||||
|
||||
def _setup_env(self, _workflow, job):
|
||||
# source env file to write data into fs (workflow config json, workflow status json)
|
||||
Shell.check(f". {Settings.ENV_SETUP_SCRIPT}", verbose=True, strict=True)
|
||||
|
||||
# parse the same env script and apply envs from python so that this process sees them
|
||||
with open(Settings.ENV_SETUP_SCRIPT, "r") as f:
|
||||
content = f.read()
|
||||
export_pattern = re.compile(
|
||||
r"export (\w+)=\$\(cat<<\'EOF\'\n(.*?)EOF\n\)", re.DOTALL
|
||||
)
|
||||
matches = export_pattern.findall(content)
|
||||
for key, value in matches:
|
||||
value = value.strip()
|
||||
os.environ[key] = value
|
||||
print(f"Set environment variable {key}.")
|
||||
|
||||
# TODO: remove
|
||||
os.environ["PYTHONPATH"] = os.getcwd()
|
||||
|
||||
print("Read GH Environment")
|
||||
env = _Environment.from_env()
|
||||
env.JOB_NAME = job.name
|
||||
env.PARAMETER = job.parameter
|
||||
env.dump()
|
||||
print(env)
|
||||
|
||||
return 0
|
||||
|
||||
def _pre_run(self, workflow, job):
|
||||
env = _Environment.get()
|
||||
|
||||
result = Result(
|
||||
name=job.name,
|
||||
status=Result.Status.RUNNING,
|
||||
start_time=Utils.timestamp(),
|
||||
)
|
||||
result.dump()
|
||||
|
||||
if workflow.enable_report and job.name != Settings.CI_CONFIG_JOB_NAME:
|
||||
print("Update Job and Workflow Report")
|
||||
HtmlRunnerHooks.pre_run(workflow, job)
|
||||
|
||||
print("Download required artifacts")
|
||||
required_artifacts = []
|
||||
if job.requires and workflow.artifacts:
|
||||
for requires_artifact_name in job.requires:
|
||||
for artifact in workflow.artifacts:
|
||||
if (
|
||||
artifact.name == requires_artifact_name
|
||||
and artifact.type == Artifact.Type.S3
|
||||
):
|
||||
required_artifacts.append(artifact)
|
||||
print(f"--- Job requires s3 artifacts [{required_artifacts}]")
|
||||
if workflow.enable_cache:
|
||||
prefixes = CacheRunnerHooks.pre_run(
|
||||
_job=job, _workflow=workflow, _required_artifacts=required_artifacts
|
||||
)
|
||||
else:
|
||||
prefixes = [env.get_s3_prefix()] * len(required_artifacts)
|
||||
for artifact, prefix in zip(required_artifacts, prefixes):
|
||||
s3_path = f"{Settings.S3_ARTIFACT_PATH}/{prefix}/{Utils.normalize_string(artifact._provided_by)}/{Path(artifact.path).name}"
|
||||
assert S3.copy_file_from_s3(s3_path=s3_path, local_path=Settings.INPUT_DIR)
|
||||
|
||||
return 0
|
||||
|
||||
def _run(self, workflow, job, docker="", no_docker=False, param=None):
|
||||
if param:
|
||||
if not isinstance(param, str):
|
||||
Utils.raise_with_error(
|
||||
f"Custom param for local tests must be of type str, got [{type(param)}]"
|
||||
)
|
||||
env = _Environment.get()
|
||||
env.LOCAL_RUN_PARAM = param
|
||||
env.dump()
|
||||
print(f"Custom param for local tests [{param}] dumped into Environment")
|
||||
|
||||
if job.run_in_docker and not no_docker:
|
||||
# TODO: add support for any image, including not from ci config (e.g. ubuntu:latest)
|
||||
docker_tag = RunConfig.from_fs(workflow.name).digest_dockers[
|
||||
job.run_in_docker
|
||||
]
|
||||
docker = docker or f"{job.run_in_docker}:{docker_tag}"
|
||||
cmd = f"docker run --rm --user \"$(id -u):$(id -g)\" -e PYTHONPATH='{Settings.DOCKER_WD}' --volume ./:{Settings.DOCKER_WD} --volume {Settings.TEMP_DIR}:{Settings.TEMP_DIR} --workdir={Settings.DOCKER_WD} {docker} {job.command}"
|
||||
else:
|
||||
cmd = job.command
|
||||
print(f"--- Run command [{cmd}]")
|
||||
|
||||
with TeePopen(cmd, timeout=job.timeout) as process:
|
||||
exit_code = process.wait()
|
||||
|
||||
result = Result.from_fs(job.name)
|
||||
if exit_code != 0:
|
||||
if not result.is_completed():
|
||||
if process.timeout_exceeded:
|
||||
print(
|
||||
f"WARNING: Job timed out: [{job.name}], timeout [{job.timeout}], exit code [{exit_code}]"
|
||||
)
|
||||
result.set_status(Result.Status.ERROR).set_info(
|
||||
ResultInfo.TIMEOUT
|
||||
)
|
||||
elif result.is_running():
|
||||
info = f"ERROR: Job terminated with an error, exit code [{exit_code}] - set status to [{Result.Status.ERROR}]"
|
||||
print(info)
|
||||
result.set_status(Result.Status.ERROR).set_info(info)
|
||||
else:
|
||||
info = f"ERROR: Invalid status [{result.status}] for exit code [{exit_code}] - switch to [{Result.Status.ERROR}]"
|
||||
print(info)
|
||||
result.set_status(Result.Status.ERROR).set_info(info)
|
||||
result.dump()
|
||||
|
||||
return exit_code
|
||||
|
||||
def _post_run(
|
||||
self, workflow, job, setup_env_exit_code, prerun_exit_code, run_exit_code
|
||||
):
|
||||
info_errors = []
|
||||
env = _Environment.get()
|
||||
result_exist = Result.exist(job.name)
|
||||
|
||||
if setup_env_exit_code != 0:
|
||||
info = f"ERROR: {ResultInfo.SETUP_ENV_JOB_FAILED}"
|
||||
print(info)
|
||||
# set Result with error and logs
|
||||
Result(
|
||||
name=job.name,
|
||||
status=Result.Status.ERROR,
|
||||
start_time=Utils.timestamp(),
|
||||
duration=0.0,
|
||||
info=info,
|
||||
).dump()
|
||||
elif prerun_exit_code != 0:
|
||||
info = f"ERROR: {ResultInfo.PRE_JOB_FAILED}"
|
||||
print(info)
|
||||
# set Result with error and logs
|
||||
Result(
|
||||
name=job.name,
|
||||
status=Result.Status.ERROR,
|
||||
start_time=Utils.timestamp(),
|
||||
duration=0.0,
|
||||
info=info,
|
||||
).dump()
|
||||
elif not result_exist:
|
||||
info = f"ERROR: {ResultInfo.NOT_FOUND_IMPOSSIBLE}"
|
||||
print(info)
|
||||
Result(
|
||||
name=job.name,
|
||||
start_time=Utils.timestamp(),
|
||||
duration=None,
|
||||
status=Result.Status.ERROR,
|
||||
info=ResultInfo.NOT_FOUND_IMPOSSIBLE,
|
||||
).dump()
|
||||
|
||||
result = Result.from_fs(job.name)
|
||||
|
||||
if not result.is_completed():
|
||||
info = f"ERROR: {ResultInfo.KILLED}"
|
||||
print(info)
|
||||
result.set_info(info).set_status(Result.Status.ERROR).dump()
|
||||
|
||||
result.set_files(files=[Settings.RUN_LOG])
|
||||
result.update_duration().dump()
|
||||
|
||||
if result.info and result.status != Result.Status.SUCCESS:
|
||||
# provide job info to workflow level
|
||||
info_errors.append(result.info)
|
||||
|
||||
if run_exit_code == 0:
|
||||
providing_artifacts = []
|
||||
if job.provides and workflow.artifacts:
|
||||
for provides_artifact_name in job.provides:
|
||||
for artifact in workflow.artifacts:
|
||||
if (
|
||||
artifact.name == provides_artifact_name
|
||||
and artifact.type == Artifact.Type.S3
|
||||
):
|
||||
providing_artifacts.append(artifact)
|
||||
if providing_artifacts:
|
||||
print(f"Job provides s3 artifacts [{providing_artifacts}]")
|
||||
for artifact in providing_artifacts:
|
||||
try:
|
||||
assert Shell.check(
|
||||
f"ls -l {artifact.path}", verbose=True
|
||||
), f"Artifact {artifact.path} not found"
|
||||
s3_path = f"{Settings.S3_ARTIFACT_PATH}/{env.get_s3_prefix()}/{Utils.normalize_string(env.JOB_NAME)}"
|
||||
link = S3.copy_file_to_s3(
|
||||
s3_path=s3_path, local_path=artifact.path
|
||||
)
|
||||
result.set_link(link)
|
||||
except Exception as e:
|
||||
error = (
|
||||
f"ERROR: Failed to upload artifact [{artifact}], ex [{e}]"
|
||||
)
|
||||
print(error)
|
||||
info_errors.append(error)
|
||||
result.set_status(Result.Status.ERROR)
|
||||
|
||||
if workflow.enable_cidb:
|
||||
print("Insert results to CIDB")
|
||||
try:
|
||||
CIDB(
|
||||
url=workflow.get_secret(Settings.SECRET_CI_DB_URL).get_value(),
|
||||
passwd=workflow.get_secret(
|
||||
Settings.SECRET_CI_DB_PASSWORD
|
||||
).get_value(),
|
||||
).insert(result)
|
||||
except Exception as ex:
|
||||
error = f"ERROR: Failed to insert data into CI DB, exception [{ex}]"
|
||||
print(error)
|
||||
info_errors.append(error)
|
||||
|
||||
result.dump()
|
||||
|
||||
# always in the end
|
||||
if workflow.enable_cache:
|
||||
print(f"Run CI cache hook")
|
||||
if result.is_ok():
|
||||
CacheRunnerHooks.post_run(workflow, job)
|
||||
|
||||
if workflow.enable_report:
|
||||
print(f"Run html report hook")
|
||||
HtmlRunnerHooks.post_run(workflow, job, info_errors)
|
||||
|
||||
return True
|
||||
|
||||
def run(
|
||||
self, workflow, job, docker="", dummy_env=False, no_docker=False, param=None
|
||||
):
|
||||
res = True
|
||||
setup_env_code = -10
|
||||
prerun_code = -10
|
||||
run_code = -10
|
||||
|
||||
if res and not dummy_env:
|
||||
print(
|
||||
f"\n\n=== Setup env script [{job.name}], workflow [{workflow.name}] ==="
|
||||
)
|
||||
try:
|
||||
setup_env_code = self._setup_env(workflow, job)
|
||||
# Source the bash script and capture the environment variables
|
||||
res = setup_env_code == 0
|
||||
if not res:
|
||||
print(
|
||||
f"ERROR: Setup env script failed with exit code [{setup_env_code}]"
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"ERROR: Setup env script failed with exception [{e}]")
|
||||
traceback.print_exc()
|
||||
print(f"=== Setup env finished ===\n\n")
|
||||
else:
|
||||
self.generate_dummy_environment(workflow, job)
|
||||
|
||||
if res and not dummy_env:
|
||||
res = False
|
||||
print(f"=== Pre run script [{job.name}], workflow [{workflow.name}] ===")
|
||||
try:
|
||||
prerun_code = self._pre_run(workflow, job)
|
||||
res = prerun_code == 0
|
||||
if not res:
|
||||
print(f"ERROR: Pre-run failed with exit code [{prerun_code}]")
|
||||
except Exception as e:
|
||||
print(f"ERROR: Pre-run script failed with exception [{e}]")
|
||||
traceback.print_exc()
|
||||
print(f"=== Pre run finished ===\n\n")
|
||||
|
||||
if res:
|
||||
res = False
|
||||
print(f"=== Run script [{job.name}], workflow [{workflow.name}] ===")
|
||||
try:
|
||||
run_code = self._run(
|
||||
workflow, job, docker=docker, no_docker=no_docker, param=param
|
||||
)
|
||||
res = run_code == 0
|
||||
if not res:
|
||||
print(f"ERROR: Run failed with exit code [{run_code}]")
|
||||
except Exception as e:
|
||||
print(f"ERROR: Run script failed with exception [{e}]")
|
||||
traceback.print_exc()
|
||||
print(f"=== Run scrip finished ===\n\n")
|
||||
|
||||
if not dummy_env:
|
||||
print(f"=== Post run script [{job.name}], workflow [{workflow.name}] ===")
|
||||
self._post_run(workflow, job, setup_env_code, prerun_code, run_code)
|
||||
print(f"=== Post run scrip finished ===")
|
||||
|
||||
if not res:
|
||||
sys.exit(1)
|
35
ci/praktika/runtime.py
Normal file
35
ci/praktika/runtime.py
Normal file
@ -0,0 +1,35 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, List
|
||||
|
||||
from praktika.cache import Cache
|
||||
from praktika.settings import Settings
|
||||
from praktika.utils import MetaClasses, Utils
|
||||
|
||||
|
||||
@dataclass
|
||||
class RunConfig(MetaClasses.Serializable):
|
||||
name: str
|
||||
digest_jobs: Dict[str, str]
|
||||
digest_dockers: Dict[str, str]
|
||||
cache_success: List[str]
|
||||
# there are might be issue with special characters in job names if used directly in yaml syntax - create base64 encoded list to avoid this
|
||||
cache_success_base64: List[str]
|
||||
cache_artifacts: Dict[str, Cache.CacheRecord]
|
||||
sha: str
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, obj):
|
||||
cache_artifacts = obj["cache_artifacts"]
|
||||
cache_artifacts_deserialized = {}
|
||||
for artifact_name, cache_artifact in cache_artifacts.items():
|
||||
cache_artifacts_deserialized[artifact_name] = Cache.CacheRecord.from_dict(
|
||||
cache_artifact
|
||||
)
|
||||
obj["cache_artifacts"] = cache_artifacts_deserialized
|
||||
return RunConfig(**obj)
|
||||
|
||||
@classmethod
|
||||
def file_name_static(cls, name):
|
||||
return (
|
||||
f"{Settings.TEMP_DIR}/workflow_config_{Utils.normalize_string(name)}.json"
|
||||
)
|
295
ci/praktika/s3.py
Normal file
295
ci/praktika/s3.py
Normal file
@ -0,0 +1,295 @@
|
||||
import dataclasses
|
||||
import json
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Dict
|
||||
|
||||
from praktika._environment import _Environment
|
||||
from praktika.settings import Settings
|
||||
from praktika.utils import Shell, Utils
|
||||
|
||||
|
||||
class S3:
|
||||
@dataclasses.dataclass
|
||||
class Object:
|
||||
AcceptRanges: str
|
||||
Expiration: str
|
||||
LastModified: str
|
||||
ContentLength: int
|
||||
ETag: str
|
||||
ContentType: str
|
||||
ServerSideEncryption: str
|
||||
Metadata: Dict
|
||||
|
||||
def has_tags(self, tags):
|
||||
meta = self.Metadata
|
||||
for k, v in tags.items():
|
||||
if k not in meta or meta[k] != v:
|
||||
print(f"tag [{k}={v}] does not match meta [{meta}]")
|
||||
return False
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def clean_s3_directory(cls, s3_path):
|
||||
assert len(s3_path.split("/")) > 2, "check to not delete too much"
|
||||
cmd = f"aws s3 rm s3://{s3_path} --recursive"
|
||||
cls.run_command_with_retries(cmd, retries=1)
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def copy_file_to_s3(cls, s3_path, local_path, text=False):
|
||||
assert Path(local_path).exists(), f"Path [{local_path}] does not exist"
|
||||
assert Path(s3_path), f"Invalid S3 Path [{s3_path}]"
|
||||
assert Path(
|
||||
local_path
|
||||
).is_file(), f"Path [{local_path}] is not file. Only files are supported"
|
||||
file_name = Path(local_path).name
|
||||
s3_full_path = s3_path
|
||||
if not s3_full_path.endswith(file_name):
|
||||
s3_full_path = f"{s3_path}/{Path(local_path).name}"
|
||||
cmd = f"aws s3 cp {local_path} s3://{s3_full_path}"
|
||||
if text:
|
||||
cmd += " --content-type text/plain"
|
||||
res = cls.run_command_with_retries(cmd)
|
||||
if not res:
|
||||
raise
|
||||
bucket = s3_path.split("/")[0]
|
||||
endpoint = Settings.S3_BUCKET_TO_HTTP_ENDPOINT[bucket]
|
||||
assert endpoint
|
||||
return f"https://{s3_full_path}".replace(bucket, endpoint)
|
||||
|
||||
@classmethod
|
||||
def put(cls, s3_path, local_path, text=False, metadata=None):
|
||||
assert Path(local_path).exists(), f"Path [{local_path}] does not exist"
|
||||
assert Path(s3_path), f"Invalid S3 Path [{s3_path}]"
|
||||
assert Path(
|
||||
local_path
|
||||
).is_file(), f"Path [{local_path}] is not file. Only files are supported"
|
||||
file_name = Path(local_path).name
|
||||
s3_full_path = s3_path
|
||||
if not s3_full_path.endswith(file_name):
|
||||
s3_full_path = f"{s3_path}/{Path(local_path).name}"
|
||||
|
||||
s3_full_path = str(s3_full_path).removeprefix("s3://")
|
||||
bucket, key = s3_full_path.split("/", maxsplit=1)
|
||||
|
||||
command = (
|
||||
f"aws s3api put-object --bucket {bucket} --key {key} --body {local_path}"
|
||||
)
|
||||
if metadata:
|
||||
for k, v in metadata.items():
|
||||
command += f" --metadata {k}={v}"
|
||||
|
||||
cmd = f"aws s3 cp {local_path} s3://{s3_full_path}"
|
||||
if text:
|
||||
cmd += " --content-type text/plain"
|
||||
res = cls.run_command_with_retries(command)
|
||||
assert res
|
||||
|
||||
@classmethod
|
||||
def run_command_with_retries(cls, command, retries=Settings.MAX_RETRIES_S3):
|
||||
i = 0
|
||||
res = False
|
||||
while not res and i < retries:
|
||||
i += 1
|
||||
ret_code, stdout, stderr = Shell.get_res_stdout_stderr(
|
||||
command, verbose=True
|
||||
)
|
||||
if "aws sso login" in stderr:
|
||||
print("ERROR: aws login expired")
|
||||
break
|
||||
elif "does not exist" in stderr:
|
||||
print("ERROR: requested file does not exist")
|
||||
break
|
||||
if ret_code != 0:
|
||||
print(
|
||||
f"ERROR: aws s3 cp failed, stdout/stderr err: [{stderr}], out [{stdout}]"
|
||||
)
|
||||
res = ret_code == 0
|
||||
return res
|
||||
|
||||
@classmethod
|
||||
def get_link(cls, s3_path, local_path):
|
||||
s3_full_path = f"{s3_path}/{Path(local_path).name}"
|
||||
bucket = s3_path.split("/")[0]
|
||||
endpoint = Settings.S3_BUCKET_TO_HTTP_ENDPOINT[bucket]
|
||||
return f"https://{s3_full_path}".replace(bucket, endpoint)
|
||||
|
||||
@classmethod
|
||||
def copy_file_from_s3(cls, s3_path, local_path):
|
||||
assert Path(s3_path), f"Invalid S3 Path [{s3_path}]"
|
||||
if Path(local_path).is_dir():
|
||||
local_path = Path(local_path) / Path(s3_path).name
|
||||
else:
|
||||
assert Path(
|
||||
local_path
|
||||
).parent.is_dir(), f"Parent path for [{local_path}] does not exist"
|
||||
cmd = f"aws s3 cp s3://{s3_path} {local_path}"
|
||||
res = cls.run_command_with_retries(cmd)
|
||||
return res
|
||||
|
||||
@classmethod
|
||||
def head_object(cls, s3_path):
|
||||
s3_path = str(s3_path).removeprefix("s3://")
|
||||
bucket, key = s3_path.split("/", maxsplit=1)
|
||||
output = Shell.get_output(
|
||||
f"aws s3api head-object --bucket {bucket} --key {key}", verbose=True
|
||||
)
|
||||
if not output:
|
||||
return None
|
||||
else:
|
||||
return cls.Object(**json.loads(output))
|
||||
|
||||
@classmethod
|
||||
def delete(cls, s3_path):
|
||||
assert Path(s3_path), f"Invalid S3 Path [{s3_path}]"
|
||||
return Shell.check(
|
||||
f"aws s3 rm s3://{s3_path}",
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# TODO: apparently should be placed into separate file to be used only inside praktika
|
||||
# keeping this module clean from importing Settings, Environment and etc, making it easy for use externally
|
||||
@classmethod
|
||||
def copy_result_to_s3(cls, result, unlock=True):
|
||||
result.dump()
|
||||
env = _Environment.get()
|
||||
s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}"
|
||||
s3_path_full = f"{s3_path}/{Path(result.file_name()).name}"
|
||||
url = S3.copy_file_to_s3(s3_path=s3_path, local_path=result.file_name())
|
||||
if env.PR_NUMBER:
|
||||
print("Duplicate Result for latest commit alias in PR")
|
||||
s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix(latest=True)}"
|
||||
url = S3.copy_file_to_s3(s3_path=s3_path, local_path=result.file_name())
|
||||
if unlock:
|
||||
if not cls.unlock(s3_path_full):
|
||||
print(f"ERROR: File [{s3_path_full}] unlock failure")
|
||||
assert False # TODO: investigate
|
||||
return url
|
||||
|
||||
@classmethod
|
||||
def copy_result_from_s3(cls, local_path, lock=True):
|
||||
env = _Environment.get()
|
||||
file_name = Path(local_path).name
|
||||
s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}/{file_name}"
|
||||
if lock:
|
||||
cls.lock(s3_path)
|
||||
if not S3.copy_file_from_s3(s3_path=s3_path, local_path=local_path):
|
||||
print(f"ERROR: failed to cp file [{s3_path}] from s3")
|
||||
raise
|
||||
|
||||
@classmethod
|
||||
def lock(cls, s3_path, level=0):
|
||||
assert level < 3, "Never"
|
||||
env = _Environment.get()
|
||||
s3_path_lock = s3_path + f".lock"
|
||||
file_path_lock = f"{Settings.TEMP_DIR}/{Path(s3_path_lock).name}"
|
||||
assert Shell.check(
|
||||
f"echo '''{env.JOB_NAME}''' > {file_path_lock}", verbose=True
|
||||
), "Never"
|
||||
|
||||
i = 20
|
||||
meta = S3.head_object(s3_path_lock)
|
||||
while meta:
|
||||
print(f"WARNING: Failed to acquire lock, meta [{meta}] - wait")
|
||||
i -= 5
|
||||
if i < 0:
|
||||
info = f"ERROR: lock acquire failure - unlock forcefully"
|
||||
print(info)
|
||||
env.add_info(info)
|
||||
break
|
||||
time.sleep(5)
|
||||
|
||||
metadata = {"job": Utils.to_base64(env.JOB_NAME)}
|
||||
S3.put(
|
||||
s3_path=s3_path_lock,
|
||||
local_path=file_path_lock,
|
||||
metadata=metadata,
|
||||
)
|
||||
time.sleep(1)
|
||||
obj = S3.head_object(s3_path_lock)
|
||||
if not obj or not obj.has_tags(tags=metadata):
|
||||
print(f"WARNING: locked by another job [{obj}]")
|
||||
env.add_info("S3 lock file failure")
|
||||
cls.lock(s3_path, level=level + 1)
|
||||
print("INFO: lock acquired")
|
||||
|
||||
@classmethod
|
||||
def unlock(cls, s3_path):
|
||||
s3_path_lock = s3_path + ".lock"
|
||||
env = _Environment.get()
|
||||
obj = S3.head_object(s3_path_lock)
|
||||
if not obj:
|
||||
print("ERROR: lock file is removed")
|
||||
assert False # investigate
|
||||
elif not obj.has_tags({"job": Utils.to_base64(env.JOB_NAME)}):
|
||||
print("ERROR: lock file was acquired by another job")
|
||||
assert False # investigate
|
||||
|
||||
if not S3.delete(s3_path_lock):
|
||||
print(f"ERROR: File [{s3_path_lock}] delete failure")
|
||||
print("INFO: lock released")
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def get_result_link(cls, result):
|
||||
env = _Environment.get()
|
||||
s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix(latest=True if env.PR_NUMBER else False)}"
|
||||
return S3.get_link(s3_path=s3_path, local_path=result.file_name())
|
||||
|
||||
@classmethod
|
||||
def clean_latest_result(cls):
|
||||
env = _Environment.get()
|
||||
env.SHA = "latest"
|
||||
assert env.PR_NUMBER
|
||||
s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}"
|
||||
S3.clean_s3_directory(s3_path=s3_path)
|
||||
|
||||
@classmethod
|
||||
def _upload_file_to_s3(
|
||||
cls, local_file_path, upload_to_s3: bool, text: bool = False, s3_subprefix=""
|
||||
) -> str:
|
||||
if upload_to_s3:
|
||||
env = _Environment.get()
|
||||
s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}"
|
||||
if s3_subprefix:
|
||||
s3_subprefix.removeprefix("/").removesuffix("/")
|
||||
s3_path += f"/{s3_subprefix}"
|
||||
html_link = S3.copy_file_to_s3(
|
||||
s3_path=s3_path, local_path=local_file_path, text=text
|
||||
)
|
||||
return html_link
|
||||
return f"file://{Path(local_file_path).absolute()}"
|
||||
|
||||
@classmethod
|
||||
def upload_result_files_to_s3(cls, result):
|
||||
if result.results:
|
||||
for result_ in result.results:
|
||||
cls.upload_result_files_to_s3(result_)
|
||||
for file in result.files:
|
||||
if not Path(file).is_file():
|
||||
print(f"ERROR: Invalid file [{file}] in [{result.name}] - skip upload")
|
||||
result.info += f"\nWARNING: Result file [{file}] was not found"
|
||||
file_link = cls._upload_file_to_s3(file, upload_to_s3=False)
|
||||
else:
|
||||
is_text = False
|
||||
for text_file_suffix in Settings.TEXT_CONTENT_EXTENSIONS:
|
||||
if file.endswith(text_file_suffix):
|
||||
print(
|
||||
f"File [{file}] matches Settings.TEXT_CONTENT_EXTENSIONS [{Settings.TEXT_CONTENT_EXTENSIONS}] - add text attribute for s3 object"
|
||||
)
|
||||
is_text = True
|
||||
break
|
||||
file_link = cls._upload_file_to_s3(
|
||||
file,
|
||||
upload_to_s3=True,
|
||||
text=is_text,
|
||||
s3_subprefix=Utils.normalize_string(result.name),
|
||||
)
|
||||
result.links.append(file_link)
|
||||
if result.files:
|
||||
print(
|
||||
f"Result files [{result.files}] uploaded to s3 [{result.links[-len(result.files):]}] - clean files list"
|
||||
)
|
||||
result.files = []
|
||||
result.dump()
|
61
ci/praktika/secret.py
Normal file
61
ci/praktika/secret.py
Normal file
@ -0,0 +1,61 @@
|
||||
import dataclasses
|
||||
import os
|
||||
|
||||
from praktika.utils import Shell
|
||||
|
||||
|
||||
class Secret:
|
||||
class Type:
|
||||
AWS_SSM_VAR = "aws parameter"
|
||||
AWS_SSM_SECRET = "aws secret"
|
||||
GH_SECRET = "gh secret"
|
||||
|
||||
@dataclasses.dataclass
|
||||
class Config:
|
||||
name: str
|
||||
type: str
|
||||
|
||||
def is_gh(self):
|
||||
return self.type == Secret.Type.GH_SECRET
|
||||
|
||||
def get_value(self):
|
||||
if self.type == Secret.Type.AWS_SSM_VAR:
|
||||
return self.get_aws_ssm_var()
|
||||
if self.type == Secret.Type.AWS_SSM_SECRET:
|
||||
return self.get_aws_ssm_secret()
|
||||
elif self.type == Secret.Type.GH_SECRET:
|
||||
return self.get_gh_secret()
|
||||
else:
|
||||
assert False, f"Not supported secret type, secret [{self}]"
|
||||
|
||||
def get_aws_ssm_var(self):
|
||||
res = Shell.get_output(
|
||||
f"aws ssm get-parameter --name {self.name} --with-decryption --output text --query Parameter.Value",
|
||||
)
|
||||
if not res:
|
||||
print(f"ERROR: Failed to get secret [{self.name}]")
|
||||
raise RuntimeError()
|
||||
return res
|
||||
|
||||
def get_aws_ssm_secret(self):
|
||||
name, secret_key_name = self.name, ""
|
||||
if "." in self.name:
|
||||
name, secret_key_name = self.name.split(".")
|
||||
cmd = f"aws secretsmanager get-secret-value --secret-id {name} --query SecretString --output text"
|
||||
if secret_key_name:
|
||||
cmd += f" | jq -r '.[\"{secret_key_name}\"]'"
|
||||
res = Shell.get_output(cmd, verbose=True)
|
||||
if not res:
|
||||
print(f"ERROR: Failed to get secret [{self.name}]")
|
||||
raise RuntimeError()
|
||||
return res
|
||||
|
||||
def get_gh_secret(self):
|
||||
res = os.getenv(f"{self.name}")
|
||||
if not res:
|
||||
print(f"ERROR: Failed to get secret [{self.name}]")
|
||||
raise RuntimeError()
|
||||
return res
|
||||
|
||||
def __repr__(self):
|
||||
return self.name
|
8
ci/praktika/settings.py
Normal file
8
ci/praktika/settings.py
Normal file
@ -0,0 +1,8 @@
|
||||
from praktika._settings import _Settings
|
||||
from praktika.mangle import _get_user_settings
|
||||
|
||||
Settings = _Settings()
|
||||
|
||||
user_settings = _get_user_settings()
|
||||
for setting, value in user_settings.items():
|
||||
Settings.__setattr__(setting, value)
|
597
ci/praktika/utils.py
Normal file
597
ci/praktika/utils.py
Normal file
@ -0,0 +1,597 @@
|
||||
import base64
|
||||
import dataclasses
|
||||
import glob
|
||||
import json
|
||||
import multiprocessing
|
||||
import os
|
||||
import re
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from abc import ABC, abstractmethod
|
||||
from contextlib import contextmanager
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from threading import Thread
|
||||
from types import SimpleNamespace
|
||||
from typing import Any, Dict, Iterator, List, Optional, Type, TypeVar, Union
|
||||
|
||||
from praktika._settings import _Settings
|
||||
|
||||
T = TypeVar("T", bound="Serializable")
|
||||
|
||||
|
||||
class MetaClasses:
|
||||
class WithIter(type):
|
||||
def __iter__(cls):
|
||||
return (v for k, v in cls.__dict__.items() if not k.startswith("_"))
|
||||
|
||||
@dataclasses.dataclass
|
||||
class Serializable(ABC):
|
||||
@classmethod
|
||||
def to_dict(cls, obj):
|
||||
if dataclasses.is_dataclass(obj):
|
||||
return {k: cls.to_dict(v) for k, v in dataclasses.asdict(obj).items()}
|
||||
elif isinstance(obj, SimpleNamespace):
|
||||
return {k: cls.to_dict(v) for k, v in vars(obj).items()}
|
||||
elif isinstance(obj, list):
|
||||
return [cls.to_dict(i) for i in obj]
|
||||
elif isinstance(obj, dict):
|
||||
return {k: cls.to_dict(v) for k, v in obj.items()}
|
||||
else:
|
||||
return obj
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls: Type[T], obj: Dict[str, Any]) -> T:
|
||||
return cls(**obj)
|
||||
|
||||
@classmethod
|
||||
def from_fs(cls: Type[T], name) -> T:
|
||||
with open(cls.file_name_static(name), "r", encoding="utf8") as f:
|
||||
try:
|
||||
return cls.from_dict(json.load(f))
|
||||
except json.decoder.JSONDecodeError as ex:
|
||||
print(f"ERROR: failed to parse json, ex [{ex}]")
|
||||
print(f"JSON content [{cls.file_name_static(name)}]")
|
||||
Shell.check(f"cat {cls.file_name_static(name)}")
|
||||
raise ex
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def file_name_static(cls, name):
|
||||
pass
|
||||
|
||||
def file_name(self):
|
||||
return self.file_name_static(self.name)
|
||||
|
||||
def dump(self):
|
||||
with open(self.file_name(), "w", encoding="utf8") as f:
|
||||
json.dump(self.to_dict(self), f, indent=4)
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def exist(cls, name):
|
||||
return Path(cls.file_name_static(name)).is_file()
|
||||
|
||||
def to_json(self, pretty=False):
|
||||
return json.dumps(dataclasses.asdict(self), indent=4 if pretty else None)
|
||||
|
||||
|
||||
class ContextManager:
|
||||
@staticmethod
|
||||
@contextmanager
|
||||
def cd(to: Optional[Union[Path, str]] = None) -> Iterator[None]:
|
||||
"""
|
||||
changes current working directory to @path or `git root` if @path is None
|
||||
:param to:
|
||||
:return:
|
||||
"""
|
||||
if not to:
|
||||
try:
|
||||
to = Shell.get_output_or_raise("git rev-parse --show-toplevel")
|
||||
except:
|
||||
pass
|
||||
if not to:
|
||||
if Path(_Settings.DOCKER_WD).is_dir():
|
||||
to = _Settings.DOCKER_WD
|
||||
if not to:
|
||||
assert False, "FIX IT"
|
||||
assert to
|
||||
old_pwd = os.getcwd()
|
||||
os.chdir(to)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
os.chdir(old_pwd)
|
||||
|
||||
|
||||
class Shell:
|
||||
@classmethod
|
||||
def get_output_or_raise(cls, command, verbose=False):
|
||||
return cls.get_output(command, verbose=verbose, strict=True).strip()
|
||||
|
||||
@classmethod
|
||||
def get_output(cls, command, strict=False, verbose=False):
|
||||
if verbose:
|
||||
print(f"Run command [{command}]")
|
||||
res = subprocess.run(
|
||||
command,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
)
|
||||
if res.stderr:
|
||||
print(f"WARNING: stderr: {res.stderr.strip()}")
|
||||
if strict and res.returncode != 0:
|
||||
raise RuntimeError(f"command failed with {res.returncode}")
|
||||
return res.stdout.strip()
|
||||
|
||||
@classmethod
|
||||
def get_res_stdout_stderr(cls, command, verbose=True):
|
||||
if verbose:
|
||||
print(f"Run command [{command}]")
|
||||
res = subprocess.run(
|
||||
command,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
)
|
||||
return res.returncode, res.stdout.strip(), res.stderr.strip()
|
||||
|
||||
@classmethod
|
||||
def check(
|
||||
cls,
|
||||
command,
|
||||
log_file=None,
|
||||
strict=False,
|
||||
verbose=False,
|
||||
dry_run=False,
|
||||
stdin_str=None,
|
||||
timeout=None,
|
||||
retries=0,
|
||||
**kwargs,
|
||||
):
|
||||
return (
|
||||
cls.run(
|
||||
command,
|
||||
log_file,
|
||||
strict,
|
||||
verbose,
|
||||
dry_run,
|
||||
stdin_str,
|
||||
retries=retries,
|
||||
timeout=timeout,
|
||||
**kwargs,
|
||||
)
|
||||
== 0
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def run(
|
||||
cls,
|
||||
command,
|
||||
log_file=None,
|
||||
strict=False,
|
||||
verbose=False,
|
||||
dry_run=False,
|
||||
stdin_str=None,
|
||||
timeout=None,
|
||||
retries=0,
|
||||
**kwargs,
|
||||
):
|
||||
def _check_timeout(timeout, process) -> None:
|
||||
if not timeout:
|
||||
return
|
||||
time.sleep(timeout)
|
||||
print(
|
||||
f"WARNING: Timeout exceeded [{timeout}], sending SIGTERM to process group [{process.pid}]"
|
||||
)
|
||||
try:
|
||||
os.killpg(process.pid, signal.SIGTERM)
|
||||
except ProcessLookupError:
|
||||
print("Process already terminated.")
|
||||
return
|
||||
|
||||
time_wait = 0
|
||||
wait_interval = 5
|
||||
|
||||
# Wait for process to terminate
|
||||
while process.poll() is None and time_wait < 100:
|
||||
print("Waiting for process to exit...")
|
||||
time.sleep(wait_interval)
|
||||
time_wait += wait_interval
|
||||
|
||||
# Force kill if still running
|
||||
if process.poll() is None:
|
||||
print(f"WARNING: Process still running after SIGTERM, sending SIGKILL")
|
||||
try:
|
||||
os.killpg(process.pid, signal.SIGKILL)
|
||||
except ProcessLookupError:
|
||||
print("Process already terminated.")
|
||||
|
||||
# Dry-run
|
||||
if dry_run:
|
||||
print(f"Dry-run. Would run command [{command}]")
|
||||
return 0 # Return success for dry-run
|
||||
|
||||
if verbose:
|
||||
print(f"Run command: [{command}]")
|
||||
|
||||
log_file = log_file or "/dev/null"
|
||||
proc = None
|
||||
for retry in range(retries + 1):
|
||||
try:
|
||||
with open(log_file, "w") as log_fp:
|
||||
proc = subprocess.Popen(
|
||||
command,
|
||||
shell=True,
|
||||
stderr=subprocess.STDOUT,
|
||||
stdout=subprocess.PIPE,
|
||||
stdin=subprocess.PIPE if stdin_str else None,
|
||||
universal_newlines=True,
|
||||
start_new_session=True, # Start a new process group for signal handling
|
||||
bufsize=1, # Line-buffered
|
||||
errors="backslashreplace",
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
# Start the timeout thread if specified
|
||||
if timeout:
|
||||
t = Thread(target=_check_timeout, args=(timeout, proc))
|
||||
t.daemon = True
|
||||
t.start()
|
||||
|
||||
# Write stdin if provided
|
||||
if stdin_str:
|
||||
proc.stdin.write(stdin_str)
|
||||
proc.stdin.close()
|
||||
|
||||
# Process output in real-time
|
||||
if proc.stdout:
|
||||
for line in proc.stdout:
|
||||
sys.stdout.write(line)
|
||||
log_fp.write(line)
|
||||
|
||||
proc.wait() # Wait for the process to finish
|
||||
|
||||
if proc.returncode == 0:
|
||||
break # Exit retry loop if success
|
||||
else:
|
||||
if verbose:
|
||||
print(
|
||||
f"ERROR: command [{command}] failed, exit code: {proc.returncode}, retry: {retry}/{retries}"
|
||||
)
|
||||
except Exception as e:
|
||||
if verbose:
|
||||
print(
|
||||
f"ERROR: command failed, exception: {e}, retry: {retry}/{retries}"
|
||||
)
|
||||
if proc:
|
||||
proc.kill()
|
||||
|
||||
# Handle strict mode (ensure process success or fail)
|
||||
if strict:
|
||||
assert (
|
||||
proc and proc.returncode == 0
|
||||
), f"Command failed with return code {proc.returncode}"
|
||||
|
||||
return proc.returncode if proc else 1 # Return 1 if process never started
|
||||
|
||||
@classmethod
|
||||
def run_async(
|
||||
cls,
|
||||
command,
|
||||
stdin_str=None,
|
||||
verbose=False,
|
||||
suppress_output=False,
|
||||
**kwargs,
|
||||
):
|
||||
if verbose:
|
||||
print(f"Run command in background [{command}]")
|
||||
proc = subprocess.Popen(
|
||||
command,
|
||||
shell=True,
|
||||
stderr=subprocess.STDOUT if not suppress_output else subprocess.DEVNULL,
|
||||
stdout=subprocess.PIPE if not suppress_output else subprocess.DEVNULL,
|
||||
stdin=subprocess.PIPE if stdin_str else None,
|
||||
universal_newlines=True,
|
||||
start_new_session=True,
|
||||
bufsize=1,
|
||||
errors="backslashreplace",
|
||||
**kwargs,
|
||||
)
|
||||
if proc.stdout:
|
||||
for line in proc.stdout:
|
||||
print(line, end="")
|
||||
return proc
|
||||
|
||||
|
||||
class Utils:
|
||||
@staticmethod
|
||||
def terminate_process_group(pid, force=False):
|
||||
if not force:
|
||||
os.killpg(os.getpgid(pid), signal.SIGTERM)
|
||||
else:
|
||||
os.killpg(os.getpgid(pid), signal.SIGKILL)
|
||||
|
||||
@staticmethod
|
||||
def set_env(key, val):
|
||||
os.environ[key] = val
|
||||
|
||||
@staticmethod
|
||||
def print_formatted_error(error_message, stdout="", stderr=""):
|
||||
stdout_lines = stdout.splitlines() if stdout else []
|
||||
stderr_lines = stderr.splitlines() if stderr else []
|
||||
print(f"ERROR: {error_message}")
|
||||
if stdout_lines:
|
||||
print(" Out:")
|
||||
for line in stdout_lines:
|
||||
print(f" | {line}")
|
||||
if stderr_lines:
|
||||
print(" Err:")
|
||||
for line in stderr_lines:
|
||||
print(f" | {line}")
|
||||
|
||||
@staticmethod
|
||||
def sleep(seconds):
|
||||
time.sleep(seconds)
|
||||
|
||||
@staticmethod
|
||||
def cwd():
|
||||
return Path.cwd()
|
||||
|
||||
@staticmethod
|
||||
def cpu_count():
|
||||
return multiprocessing.cpu_count()
|
||||
|
||||
@staticmethod
|
||||
def raise_with_error(error_message, stdout="", stderr=""):
|
||||
Utils.print_formatted_error(error_message, stdout, stderr)
|
||||
raise
|
||||
|
||||
@staticmethod
|
||||
def timestamp():
|
||||
return datetime.utcnow().timestamp()
|
||||
|
||||
@staticmethod
|
||||
def timestamp_to_str(timestamp):
|
||||
return datetime.utcfromtimestamp(timestamp).strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
@staticmethod
|
||||
def get_failed_tests_number(description: str) -> Optional[int]:
|
||||
description = description.lower()
|
||||
|
||||
pattern = r"fail:\s*(\d+)\s*(?=,|$)"
|
||||
match = re.search(pattern, description)
|
||||
if match:
|
||||
return int(match.group(1))
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def is_killed_with_oom():
|
||||
if Shell.check(
|
||||
"sudo dmesg -T | grep -q -e 'Out of memory: Killed process' -e 'oom_reaper: reaped process' -e 'oom-kill:constraint=CONSTRAINT_NONE'"
|
||||
):
|
||||
return True
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def clear_dmesg():
|
||||
Shell.check("sudo dmesg --clear", verbose=True)
|
||||
|
||||
@staticmethod
|
||||
def to_base64(value):
|
||||
assert isinstance(value, str), f"TODO: not supported for {type(value)}"
|
||||
string_bytes = value.encode("utf-8")
|
||||
base64_bytes = base64.b64encode(string_bytes)
|
||||
base64_string = base64_bytes.decode("utf-8")
|
||||
return base64_string
|
||||
|
||||
@staticmethod
|
||||
def is_hex(s):
|
||||
try:
|
||||
int(s, 16)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def normalize_string(string: str) -> str:
|
||||
res = string.lower()
|
||||
for r in (
|
||||
(" ", "_"),
|
||||
("(", ""),
|
||||
(")", ""),
|
||||
("{", ""),
|
||||
("}", ""),
|
||||
("'", ""),
|
||||
("[", ""),
|
||||
("]", ""),
|
||||
(",", ""),
|
||||
("/", "_"),
|
||||
("-", "_"),
|
||||
(":", ""),
|
||||
('"', ""),
|
||||
):
|
||||
res = res.replace(*r)
|
||||
return res
|
||||
|
||||
@staticmethod
|
||||
def traverse_path(path, file_suffixes=None, sorted=False, not_exists_ok=False):
|
||||
res = []
|
||||
|
||||
def is_valid_file(file):
|
||||
if file_suffixes is None:
|
||||
return True
|
||||
return any(file.endswith(suffix) for suffix in file_suffixes)
|
||||
|
||||
if os.path.isfile(path):
|
||||
if is_valid_file(path):
|
||||
res.append(path)
|
||||
elif os.path.isdir(path):
|
||||
for root, dirs, files in os.walk(path):
|
||||
for file in files:
|
||||
full_path = os.path.join(root, file)
|
||||
if is_valid_file(full_path):
|
||||
res.append(full_path)
|
||||
elif "*" in str(path):
|
||||
res.extend(
|
||||
[
|
||||
f
|
||||
for f in glob.glob(path, recursive=True)
|
||||
if os.path.isfile(f) and is_valid_file(f)
|
||||
]
|
||||
)
|
||||
else:
|
||||
if not_exists_ok:
|
||||
pass
|
||||
else:
|
||||
assert False, f"File does not exist or not valid [{path}]"
|
||||
|
||||
if sorted:
|
||||
res.sort(reverse=True)
|
||||
|
||||
return res
|
||||
|
||||
@classmethod
|
||||
def traverse_paths(
|
||||
cls,
|
||||
include_paths,
|
||||
exclude_paths,
|
||||
file_suffixes=None,
|
||||
sorted=False,
|
||||
not_exists_ok=False,
|
||||
) -> List["str"]:
|
||||
included_files_ = set()
|
||||
for path in include_paths:
|
||||
included_files_.update(cls.traverse_path(path, file_suffixes=file_suffixes))
|
||||
|
||||
excluded_files = set()
|
||||
for path in exclude_paths:
|
||||
res = cls.traverse_path(path, not_exists_ok=not_exists_ok)
|
||||
if not res:
|
||||
print(
|
||||
f"WARNING: Utils.traverse_paths excluded 0 files by path [{path}] in exclude_paths"
|
||||
)
|
||||
else:
|
||||
excluded_files.update(res)
|
||||
res = [f for f in included_files_ if f not in excluded_files]
|
||||
if sorted:
|
||||
res.sort(reverse=True)
|
||||
return res
|
||||
|
||||
@classmethod
|
||||
def add_to_PATH(cls, path):
|
||||
path_cur = os.getenv("PATH", "")
|
||||
if path_cur:
|
||||
path += ":" + path_cur
|
||||
os.environ["PATH"] = path
|
||||
|
||||
class Stopwatch:
|
||||
def __init__(self):
|
||||
self.start_time = datetime.utcnow().timestamp()
|
||||
|
||||
@property
|
||||
def duration(self) -> float:
|
||||
return datetime.utcnow().timestamp() - self.start_time
|
||||
|
||||
|
||||
class TeePopen:
|
||||
def __init__(
|
||||
self,
|
||||
command: str,
|
||||
log_file: Union[str, Path] = "",
|
||||
env: Optional[dict] = None,
|
||||
timeout: Optional[int] = None,
|
||||
):
|
||||
self.command = command
|
||||
self.log_file_name = log_file
|
||||
self.log_file = None
|
||||
self.env = env or os.environ.copy()
|
||||
self.process = None # type: Optional[subprocess.Popen]
|
||||
self.timeout = timeout
|
||||
self.timeout_exceeded = False
|
||||
self.terminated_by_sigterm = False
|
||||
self.terminated_by_sigkill = False
|
||||
|
||||
def _check_timeout(self) -> None:
|
||||
if self.timeout is None:
|
||||
return
|
||||
time.sleep(self.timeout)
|
||||
print(
|
||||
f"WARNING: Timeout exceeded [{self.timeout}], send SIGTERM to [{self.process.pid}] and give a chance for graceful termination"
|
||||
)
|
||||
self.send_signal(signal.SIGTERM)
|
||||
time_wait = 0
|
||||
self.terminated_by_sigterm = True
|
||||
self.timeout_exceeded = True
|
||||
while self.process.poll() is None and time_wait < 100:
|
||||
print("wait...")
|
||||
wait = 5
|
||||
time.sleep(wait)
|
||||
time_wait += wait
|
||||
while self.process.poll() is None:
|
||||
print(f"WARNING: Still running, send SIGKILL to [{self.process.pid}]")
|
||||
self.send_signal(signal.SIGKILL)
|
||||
self.terminated_by_sigkill = True
|
||||
time.sleep(2)
|
||||
|
||||
def __enter__(self) -> "TeePopen":
|
||||
if self.log_file_name:
|
||||
self.log_file = open(self.log_file_name, "w", encoding="utf-8")
|
||||
self.process = subprocess.Popen(
|
||||
self.command,
|
||||
shell=True,
|
||||
universal_newlines=True,
|
||||
env=self.env,
|
||||
start_new_session=True, # signall will be sent to all children
|
||||
stderr=subprocess.STDOUT,
|
||||
stdout=subprocess.PIPE,
|
||||
bufsize=1,
|
||||
errors="backslashreplace",
|
||||
)
|
||||
time.sleep(1)
|
||||
print(f"Subprocess started, pid [{self.process.pid}]")
|
||||
if self.timeout is not None and self.timeout > 0:
|
||||
t = Thread(target=self._check_timeout)
|
||||
t.daemon = True # does not block the program from exit
|
||||
t.start()
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.wait()
|
||||
if self.log_file:
|
||||
self.log_file.close()
|
||||
|
||||
def wait(self) -> int:
|
||||
if self.process.stdout is not None:
|
||||
for line in self.process.stdout:
|
||||
sys.stdout.write(line)
|
||||
if self.log_file:
|
||||
self.log_file.write(line)
|
||||
|
||||
return self.process.wait()
|
||||
|
||||
def poll(self):
|
||||
return self.process.poll()
|
||||
|
||||
def send_signal(self, signal_num):
|
||||
os.killpg(self.process.pid, signal_num)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@dataclasses.dataclass
|
||||
class Test(MetaClasses.Serializable):
|
||||
name: str
|
||||
|
||||
@staticmethod
|
||||
def file_name_static(name):
|
||||
return f"/tmp/{Utils.normalize_string(name)}.json"
|
||||
|
||||
Test(name="dsada").dump()
|
||||
t = Test.from_fs("dsada")
|
||||
print(t)
|
208
ci/praktika/validator.py
Normal file
208
ci/praktika/validator.py
Normal file
@ -0,0 +1,208 @@
|
||||
import glob
|
||||
import sys
|
||||
from itertools import chain
|
||||
from pathlib import Path
|
||||
|
||||
from praktika import Workflow
|
||||
from praktika._settings import GHRunners
|
||||
from praktika.mangle import _get_workflows
|
||||
from praktika.settings import Settings
|
||||
from praktika.utils import ContextManager
|
||||
|
||||
|
||||
class Validator:
|
||||
@classmethod
|
||||
def validate(cls):
|
||||
print("---Start validating Pipeline and settings---")
|
||||
workflows = _get_workflows()
|
||||
for workflow in workflows:
|
||||
print(f"Validating workflow [{workflow.name}]")
|
||||
|
||||
cls.validate_file_paths_in_run_command(workflow)
|
||||
cls.validate_file_paths_in_digest_configs(workflow)
|
||||
cls.validate_requirements_txt_files(workflow)
|
||||
cls.validate_dockers(workflow)
|
||||
|
||||
if workflow.artifacts:
|
||||
for artifact in workflow.artifacts:
|
||||
if artifact.is_s3_artifact():
|
||||
assert (
|
||||
Settings.S3_ARTIFACT_PATH
|
||||
), "Provide S3_ARTIFACT_PATH setting in any .py file in ./ci/settings/* to be able to use s3 for artifacts"
|
||||
|
||||
for job in workflow.jobs:
|
||||
if job.requires and workflow.artifacts:
|
||||
for require in job.requires:
|
||||
if (
|
||||
require in workflow.artifacts
|
||||
and workflow.artifacts[require].is_s3_artifact()
|
||||
):
|
||||
assert not any(
|
||||
[r in GHRunners for r in job.runs_on]
|
||||
), f"GH runners [{job.name}:{job.runs_on}] must not be used with S3 as artifact storage"
|
||||
|
||||
if job.allow_merge_on_failure:
|
||||
assert (
|
||||
workflow.enable_merge_ready_status
|
||||
), f"Job property allow_merge_on_failure must be used only with enabled workflow.enable_merge_ready_status, workflow [{workflow.name}], job [{job.name}]"
|
||||
|
||||
if workflow.enable_cache:
|
||||
assert (
|
||||
Settings.CI_CONFIG_RUNS_ON
|
||||
), f"Runner label to run workflow config job must be provided via CACHE_CONFIG_RUNS_ON setting if enable_cache=True, workflow [{workflow.name}]"
|
||||
|
||||
assert (
|
||||
Settings.CACHE_S3_PATH
|
||||
), f"CACHE_S3_PATH Setting must be defined if enable_cache=True, workflow [{workflow.name}]"
|
||||
|
||||
if workflow.dockers:
|
||||
cls.evaluate_check(
|
||||
Settings.DOCKER_BUILD_RUNS_ON,
|
||||
f"DOCKER_BUILD_RUNS_ON settings must be defined if workflow has dockers",
|
||||
workflow_name=workflow.name,
|
||||
)
|
||||
|
||||
if workflow.enable_report:
|
||||
assert (
|
||||
Settings.HTML_S3_PATH
|
||||
), f"HTML_S3_PATH Setting must be defined if enable_html=True, workflow [{workflow.name}]"
|
||||
assert (
|
||||
Settings.S3_BUCKET_TO_HTTP_ENDPOINT
|
||||
), f"S3_BUCKET_TO_HTTP_ENDPOINT Setting must be defined if enable_html=True, workflow [{workflow.name}]"
|
||||
assert (
|
||||
Settings.HTML_S3_PATH.split("/")[0]
|
||||
in Settings.S3_BUCKET_TO_HTTP_ENDPOINT
|
||||
), f"S3_BUCKET_TO_HTTP_ENDPOINT Setting must include bucket name [{Settings.HTML_S3_PATH}] from HTML_S3_PATH, workflow [{workflow.name}]"
|
||||
|
||||
if workflow.enable_cache:
|
||||
for artifact in workflow.artifacts or []:
|
||||
assert (
|
||||
artifact.is_s3_artifact()
|
||||
), f"All artifacts must be of S3 type if enable_cache|enable_html=True, artifact [{artifact.name}], type [{artifact.type}], workflow [{workflow.name}]"
|
||||
|
||||
if workflow.dockers:
|
||||
assert (
|
||||
Settings.DOCKERHUB_USERNAME
|
||||
), f"Settings.DOCKERHUB_USERNAME must be provided if workflow has dockers, workflow [{workflow.name}]"
|
||||
assert (
|
||||
Settings.DOCKERHUB_SECRET
|
||||
), f"Settings.DOCKERHUB_SECRET must be provided if workflow has dockers, workflow [{workflow.name}]"
|
||||
assert workflow.get_secret(
|
||||
Settings.DOCKERHUB_SECRET
|
||||
), f"Secret [{Settings.DOCKERHUB_SECRET}] must have configuration in workflow.secrets, workflow [{workflow.name}]"
|
||||
|
||||
if (
|
||||
workflow.enable_cache
|
||||
or workflow.enable_report
|
||||
or workflow.enable_merge_ready_status
|
||||
):
|
||||
for job in workflow.jobs:
|
||||
assert not any(
|
||||
job in ("ubuntu-latest",) for job in job.runs_on
|
||||
), f"GitHub Runners must not be used for workflow with enabled: workflow.enable_cache, workflow.enable_html or workflow.enable_merge_ready_status as s3 access is required, workflow [{workflow.name}], job [{job.name}]"
|
||||
|
||||
if workflow.enable_cidb:
|
||||
assert (
|
||||
Settings.SECRET_CI_DB_URL
|
||||
), f"Settings.CI_DB_URL_SECRET must be provided if workflow.enable_cidb=True, workflow [{workflow.name}]"
|
||||
assert (
|
||||
Settings.SECRET_CI_DB_PASSWORD
|
||||
), f"Settings.CI_DB_PASSWORD_SECRET must be provided if workflow.enable_cidb=True, workflow [{workflow.name}]"
|
||||
assert (
|
||||
Settings.CI_DB_DB_NAME
|
||||
), f"Settings.CI_DB_DB_NAME must be provided if workflow.enable_cidb=True, workflow [{workflow.name}]"
|
||||
assert (
|
||||
Settings.CI_DB_TABLE_NAME
|
||||
), f"Settings.CI_DB_TABLE_NAME must be provided if workflow.enable_cidb=True, workflow [{workflow.name}]"
|
||||
|
||||
@classmethod
|
||||
def validate_file_paths_in_run_command(cls, workflow: Workflow.Config) -> None:
|
||||
if not Settings.VALIDATE_FILE_PATHS:
|
||||
return
|
||||
with ContextManager.cd():
|
||||
for job in workflow.jobs:
|
||||
run_command = job.command
|
||||
command_parts = run_command.split(" ")
|
||||
for part in command_parts:
|
||||
if ">" in part:
|
||||
return
|
||||
if "/" in part:
|
||||
assert (
|
||||
Path(part).is_file() or Path(part).is_dir()
|
||||
), f"Apparently run command [{run_command}] for job [{job}] has invalid path [{part}]. Setting to disable check: VALIDATE_FILE_PATHS"
|
||||
|
||||
@classmethod
|
||||
def validate_file_paths_in_digest_configs(cls, workflow: Workflow.Config) -> None:
|
||||
if not Settings.VALIDATE_FILE_PATHS:
|
||||
return
|
||||
with ContextManager.cd():
|
||||
for job in workflow.jobs:
|
||||
if not job.digest_config:
|
||||
continue
|
||||
for include_path in chain(
|
||||
job.digest_config.include_paths, job.digest_config.exclude_paths
|
||||
):
|
||||
if "*" in include_path:
|
||||
assert glob.glob(
|
||||
include_path, recursive=True
|
||||
), f"Apparently file glob [{include_path}] in job [{job.name}] digest_config [{job.digest_config}] invalid, workflow [{workflow.name}]. Setting to disable check: VALIDATE_FILE_PATHS"
|
||||
else:
|
||||
assert (
|
||||
Path(include_path).is_file() or Path(include_path).is_dir()
|
||||
), f"Apparently file path [{include_path}] in job [{job.name}] digest_config [{job.digest_config}] invalid, workflow [{workflow.name}]. Setting to disable check: VALIDATE_FILE_PATHS"
|
||||
|
||||
@classmethod
|
||||
def validate_requirements_txt_files(cls, workflow: Workflow.Config) -> None:
|
||||
with ContextManager.cd():
|
||||
for job in workflow.jobs:
|
||||
if job.job_requirements:
|
||||
if job.job_requirements.python_requirements_txt:
|
||||
path = Path(job.job_requirements.python_requirements_txt)
|
||||
message = f"File with py requirement [{path}] does not exist"
|
||||
if job.name in (
|
||||
Settings.DOCKER_BUILD_JOB_NAME,
|
||||
Settings.CI_CONFIG_JOB_NAME,
|
||||
Settings.FINISH_WORKFLOW_JOB_NAME,
|
||||
):
|
||||
message += '\n If all requirements already installed on your runners - add setting INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS""'
|
||||
message += "\n If requirements needs to be installed - add requirements file (Settings.INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS):"
|
||||
message += "\n echo jwt==1.3.1 > ./ci/requirements.txt"
|
||||
message += (
|
||||
"\n echo requests==2.32.3 >> ./ci/requirements.txt"
|
||||
)
|
||||
message += "\n echo https://clickhouse-builds.s3.amazonaws.com/packages/praktika-0.1-py3-none-any.whl >> ./ci/requirements.txt"
|
||||
cls.evaluate_check(
|
||||
path.is_file(), message, job.name, workflow.name
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def validate_dockers(cls, workflow: Workflow.Config):
|
||||
names = []
|
||||
for docker in workflow.dockers:
|
||||
cls.evaluate_check(
|
||||
docker.name not in names,
|
||||
f"Non uniq docker name [{docker.name}]",
|
||||
workflow_name=workflow.name,
|
||||
)
|
||||
names.append(docker.name)
|
||||
for docker in workflow.dockers:
|
||||
for docker_dep in docker.depends_on:
|
||||
cls.evaluate_check(
|
||||
docker_dep in names,
|
||||
f"Docker [{docker.name}] has invalid dependency [{docker_dep}]",
|
||||
workflow_name=workflow.name,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def evaluate_check(cls, check_ok, message, workflow_name, job_name=""):
|
||||
message = message.split("\n")
|
||||
messages = [message] if not isinstance(message, list) else message
|
||||
if check_ok:
|
||||
return
|
||||
else:
|
||||
print(
|
||||
f"ERROR: Config validation failed: workflow [{workflow_name}], job [{job_name}]:"
|
||||
)
|
||||
for message in messages:
|
||||
print(" || " + message)
|
||||
sys.exit(1)
|
1
ci/praktika/version.py
Normal file
1
ci/praktika/version.py
Normal file
@ -0,0 +1 @@
|
||||
VERSION = 1
|
68
ci/praktika/workflow.py
Normal file
68
ci/praktika/workflow.py
Normal file
@ -0,0 +1,68 @@
|
||||
from dataclasses import dataclass, field
|
||||
from typing import List, Optional
|
||||
|
||||
from praktika import Artifact, Job
|
||||
from praktika.docker import Docker
|
||||
from praktika.secret import Secret
|
||||
from praktika.utils import Utils
|
||||
|
||||
|
||||
class Workflow:
|
||||
class Event:
|
||||
PULL_REQUEST = "pull_request"
|
||||
PUSH = "push"
|
||||
|
||||
@dataclass
|
||||
class Config:
|
||||
"""
|
||||
branches - List of branch names or patterns, for push trigger only
|
||||
base_branches - List of base branches (target branch), for pull_request trigger only
|
||||
"""
|
||||
|
||||
name: str
|
||||
event: str
|
||||
jobs: List[Job.Config]
|
||||
branches: List[str] = field(default_factory=list)
|
||||
base_branches: List[str] = field(default_factory=list)
|
||||
artifacts: List[Artifact.Config] = field(default_factory=list)
|
||||
dockers: List[Docker.Config] = field(default_factory=list)
|
||||
secrets: List[Secret.Config] = field(default_factory=list)
|
||||
enable_cache: bool = False
|
||||
enable_report: bool = False
|
||||
enable_merge_ready_status: bool = False
|
||||
enable_cidb: bool = False
|
||||
|
||||
def is_event_pull_request(self):
|
||||
return self.event == Workflow.Event.PULL_REQUEST
|
||||
|
||||
def is_event_push(self):
|
||||
return self.event == Workflow.Event.PUSH
|
||||
|
||||
def get_job(self, name):
|
||||
job = self.find_job(name)
|
||||
if not job:
|
||||
Utils.raise_with_error(
|
||||
f"Failed to find job [{name}], workflow [{self.name}]"
|
||||
)
|
||||
return job
|
||||
|
||||
def find_job(self, name, lazy=False):
|
||||
name = str(name)
|
||||
for job in self.jobs:
|
||||
if lazy:
|
||||
if name.lower() in job.name.lower():
|
||||
return job
|
||||
else:
|
||||
if job.name == name:
|
||||
return job
|
||||
return None
|
||||
|
||||
def get_secret(self, name) -> Optional[Secret.Config]:
|
||||
name = str(name)
|
||||
names = []
|
||||
for secret in self.secrets:
|
||||
if secret.name == name:
|
||||
return secret
|
||||
names.append(secret.name)
|
||||
print(f"ERROR: Failed to find secret [{name}], workflow secrets [{names}]")
|
||||
raise
|
349
ci/praktika/yaml_generator.py
Normal file
349
ci/praktika/yaml_generator.py
Normal file
@ -0,0 +1,349 @@
|
||||
import dataclasses
|
||||
from typing import List
|
||||
|
||||
from praktika import Artifact, Job, Workflow
|
||||
from praktika.mangle import _get_workflows
|
||||
from praktika.parser import WorkflowConfigParser
|
||||
from praktika.runtime import RunConfig
|
||||
from praktika.settings import Settings
|
||||
from praktika.utils import ContextManager, Shell, Utils
|
||||
|
||||
|
||||
class YamlGenerator:
|
||||
class Templates:
|
||||
TEMPLATE_PULL_REQUEST_0 = """\
|
||||
# generated by praktika
|
||||
|
||||
name: {NAME}
|
||||
|
||||
on:
|
||||
{EVENT}:
|
||||
branches: [{BRANCHES}]
|
||||
|
||||
# Cancel the previous wf run in PRs.
|
||||
concurrency:
|
||||
group: ${{{{{{{{ github.workflow }}}}}}}}-${{{{{{{{ github.ref }}}}}}}}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
GH_TOKEN: ${{{{{{{{ github.token }}}}}}}}
|
||||
|
||||
# Allow updating GH commit statuses and PR comments to post an actual job reports link
|
||||
permissions: write-all
|
||||
|
||||
jobs:
|
||||
{JOBS}\
|
||||
"""
|
||||
|
||||
TEMPLATE_CALLABLE_WORKFLOW = """\
|
||||
# generated by praktika
|
||||
|
||||
name: {NAME}
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
config:
|
||||
type: string
|
||||
required: false
|
||||
default: ''
|
||||
secrets:
|
||||
{SECRETS}
|
||||
|
||||
env:
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
jobs:
|
||||
{JOBS}\
|
||||
"""
|
||||
|
||||
TEMPLATE_SECRET_CONFIG = """\
|
||||
{SECRET_NAME}:
|
||||
required: true
|
||||
"""
|
||||
|
||||
TEMPLATE_MATRIX = """
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
params: {PARAMS_LIST}\
|
||||
"""
|
||||
|
||||
TEMPLATE_JOB_0 = """
|
||||
{JOB_NAME_NORMALIZED}:
|
||||
runs-on: [{RUNS_ON}]
|
||||
needs: [{NEEDS}]{IF_EXPRESSION}
|
||||
name: "{JOB_NAME_GH}"
|
||||
outputs:
|
||||
data: ${{{{ steps.run.outputs.DATA }}}}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
{JOB_ADDONS}
|
||||
- name: Prepare env script
|
||||
run: |
|
||||
export PYTHONPATH=.:$PYTHONPATH
|
||||
cat > {ENV_SETUP_SCRIPT} << 'ENV_SETUP_SCRIPT_EOF'
|
||||
{SETUP_ENVS}
|
||||
cat > {WORKFLOW_CONFIG_FILE} << 'EOF'
|
||||
${{{{ needs.{WORKFLOW_CONFIG_JOB_NAME}.outputs.data }}}}
|
||||
EOF
|
||||
cat > {WORKFLOW_STATUS_FILE} << 'EOF'
|
||||
${{{{ toJson(needs) }}}}
|
||||
EOF
|
||||
ENV_SETUP_SCRIPT_EOF
|
||||
|
||||
rm -rf {INPUT_DIR} {OUTPUT_DIR} {TEMP_DIR}
|
||||
mkdir -p {TEMP_DIR} {INPUT_DIR} {OUTPUT_DIR}
|
||||
{DOWNLOADS_GITHUB}
|
||||
- name: Run
|
||||
id: run
|
||||
run: |
|
||||
set -o pipefail
|
||||
{PYTHON} -m praktika run --job '''{JOB_NAME}''' --workflow "{WORKFLOW_NAME}" --ci |& tee {RUN_LOG}
|
||||
{UPLOADS_GITHUB}\
|
||||
"""
|
||||
|
||||
TEMPLATE_SETUP_ENV_SECRETS = """\
|
||||
export {SECRET_NAME}=$(cat<<'EOF'
|
||||
${{{{ secrets.{SECRET_NAME} }}}}
|
||||
EOF
|
||||
)\
|
||||
"""
|
||||
|
||||
TEMPLATE_PY_INSTALL = """
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: {PYTHON_VERSION}
|
||||
"""
|
||||
|
||||
TEMPLATE_PY_WITH_REQUIREMENTS = """
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt-get update && sudo apt install -y python3-pip
|
||||
# TODO: --break-system-packages? otherwise ubuntu's apt/apt-get complains
|
||||
{PYTHON} -m pip install --upgrade pip --break-system-packages
|
||||
{PIP} install -r {REQUIREMENT_PATH} --break-system-packages
|
||||
"""
|
||||
|
||||
TEMPLATE_GH_UPLOAD = """
|
||||
- name: Upload artifact {NAME}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: {NAME}
|
||||
path: {PATH}
|
||||
"""
|
||||
|
||||
TEMPLATE_GH_DOWNLOAD = """
|
||||
- name: Download artifact {NAME}
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: {NAME}
|
||||
path: {PATH}
|
||||
"""
|
||||
|
||||
TEMPLATE_IF_EXPRESSION = """
|
||||
if: ${{{{ !failure() && !cancelled() && !contains(fromJson(needs.{WORKFLOW_CONFIG_JOB_NAME}.outputs.data).cache_success_base64, '{JOB_NAME_BASE64}') }}}}\
|
||||
"""
|
||||
|
||||
TEMPLATE_IF_EXPRESSION_SKIPPED_OR_SUCCESS = """
|
||||
if: ${{ !failure() && !cancelled() }}\
|
||||
"""
|
||||
|
||||
TEMPLATE_IF_EXPRESSION_NOT_CANCELLED = """
|
||||
if: ${{ !cancelled() }}\
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.py_workflows = [] # type: List[Workflow.Config]
|
||||
|
||||
@classmethod
|
||||
def _get_workflow_file_name(cls, workflow_name):
|
||||
return f"{Settings.WORKFLOW_PATH_PREFIX}/{Utils.normalize_string(workflow_name)}.yaml"
|
||||
|
||||
def generate(self, workflow_file="", workflow_config=None):
|
||||
print("---Start generating yaml pipelines---")
|
||||
if workflow_config:
|
||||
self.py_workflows = [workflow_config]
|
||||
else:
|
||||
self.py_workflows = _get_workflows(file=workflow_file)
|
||||
assert self.py_workflows
|
||||
for workflow_config in self.py_workflows:
|
||||
print(f"Generate workflow [{workflow_config.name}]")
|
||||
parser = WorkflowConfigParser(workflow_config).parse()
|
||||
if (
|
||||
workflow_config.is_event_pull_request()
|
||||
or workflow_config.is_event_push()
|
||||
):
|
||||
yaml_workflow_str = PullRequestPushYamlGen(parser).generate()
|
||||
else:
|
||||
assert (
|
||||
False
|
||||
), f"Workflow event not yet supported [{workflow_config.event}]"
|
||||
|
||||
with ContextManager.cd():
|
||||
with open(self._get_workflow_file_name(workflow_config.name), "w") as f:
|
||||
f.write(yaml_workflow_str)
|
||||
|
||||
with ContextManager.cd():
|
||||
Shell.check("git add ./.github/workflows/*.yaml")
|
||||
|
||||
|
||||
class PullRequestPushYamlGen:
|
||||
def __init__(self, parser: WorkflowConfigParser):
|
||||
self.workflow_config = parser.workflow_yaml_config
|
||||
self.parser = parser
|
||||
|
||||
def generate(self):
|
||||
job_items = []
|
||||
for i, job in enumerate(self.workflow_config.jobs):
|
||||
job_name_normalized = Utils.normalize_string(job.name)
|
||||
needs = ", ".join(map(Utils.normalize_string, job.needs))
|
||||
job_name = job.name
|
||||
job_addons = []
|
||||
for addon in job.addons:
|
||||
if addon.install_python:
|
||||
job_addons.append(
|
||||
YamlGenerator.Templates.TEMPLATE_PY_INSTALL.format(
|
||||
PYTHON_VERSION=Settings.PYTHON_VERSION
|
||||
)
|
||||
)
|
||||
if addon.requirements_txt_path:
|
||||
job_addons.append(
|
||||
YamlGenerator.Templates.TEMPLATE_PY_WITH_REQUIREMENTS.format(
|
||||
PYTHON=Settings.PYTHON_INTERPRETER,
|
||||
PIP=Settings.PYTHON_PACKET_MANAGER,
|
||||
PYTHON_VERSION=Settings.PYTHON_VERSION,
|
||||
REQUIREMENT_PATH=addon.requirements_txt_path,
|
||||
)
|
||||
)
|
||||
uploads_github = []
|
||||
for artifact in job.artifacts_gh_provides:
|
||||
uploads_github.append(
|
||||
YamlGenerator.Templates.TEMPLATE_GH_UPLOAD.format(
|
||||
NAME=artifact.name, PATH=artifact.path
|
||||
)
|
||||
)
|
||||
downloads_github = []
|
||||
for artifact in job.artifacts_gh_requires:
|
||||
downloads_github.append(
|
||||
YamlGenerator.Templates.TEMPLATE_GH_DOWNLOAD.format(
|
||||
NAME=artifact.name, PATH=Settings.INPUT_DIR
|
||||
)
|
||||
)
|
||||
|
||||
config_job_name_normalized = Utils.normalize_string(
|
||||
Settings.CI_CONFIG_JOB_NAME
|
||||
)
|
||||
|
||||
if_expression = ""
|
||||
if (
|
||||
self.workflow_config.enable_cache
|
||||
and job_name_normalized != config_job_name_normalized
|
||||
):
|
||||
if_expression = YamlGenerator.Templates.TEMPLATE_IF_EXPRESSION.format(
|
||||
WORKFLOW_CONFIG_JOB_NAME=config_job_name_normalized,
|
||||
JOB_NAME_BASE64=Utils.to_base64(job_name),
|
||||
)
|
||||
if job.run_unless_cancelled:
|
||||
if_expression = (
|
||||
YamlGenerator.Templates.TEMPLATE_IF_EXPRESSION_NOT_CANCELLED
|
||||
)
|
||||
|
||||
secrets_envs = []
|
||||
for secret in self.workflow_config.secret_names_gh:
|
||||
secrets_envs.append(
|
||||
YamlGenerator.Templates.TEMPLATE_SETUP_ENV_SECRETS.format(
|
||||
SECRET_NAME=secret
|
||||
)
|
||||
)
|
||||
|
||||
job_item = YamlGenerator.Templates.TEMPLATE_JOB_0.format(
|
||||
JOB_NAME_NORMALIZED=job_name_normalized,
|
||||
WORKFLOW_CONFIG_JOB_NAME=config_job_name_normalized,
|
||||
IF_EXPRESSION=if_expression,
|
||||
RUNS_ON=", ".join(job.runs_on),
|
||||
NEEDS=needs,
|
||||
JOB_NAME_GH=job_name.replace('"', '\\"'),
|
||||
JOB_NAME=job_name.replace(
|
||||
"'", "'\\''"
|
||||
), # ' must be escaped so that yaml commands are properly parsed
|
||||
WORKFLOW_NAME=self.workflow_config.name,
|
||||
ENV_SETUP_SCRIPT=Settings.ENV_SETUP_SCRIPT,
|
||||
SETUP_ENVS="\n".join(secrets_envs),
|
||||
WORKFLOW_CONFIG_FILE=RunConfig.file_name_static(
|
||||
self.workflow_config.name
|
||||
),
|
||||
JOB_ADDONS="".join(job_addons),
|
||||
DOWNLOADS_GITHUB="\n".join(downloads_github),
|
||||
UPLOADS_GITHUB="\n".join(uploads_github),
|
||||
RUN_LOG=Settings.RUN_LOG,
|
||||
PYTHON=Settings.PYTHON_INTERPRETER,
|
||||
WORKFLOW_STATUS_FILE=Settings.WORKFLOW_STATUS_FILE,
|
||||
TEMP_DIR=Settings.TEMP_DIR,
|
||||
INPUT_DIR=Settings.INPUT_DIR,
|
||||
OUTPUT_DIR=Settings.OUTPUT_DIR,
|
||||
)
|
||||
job_items.append(job_item)
|
||||
|
||||
base_template = YamlGenerator.Templates.TEMPLATE_PULL_REQUEST_0
|
||||
template_1 = base_template.strip().format(
|
||||
NAME=self.workflow_config.name,
|
||||
BRANCHES=", ".join(
|
||||
[f"'{branch}'" for branch in self.workflow_config.branches]
|
||||
),
|
||||
EVENT=self.workflow_config.event,
|
||||
JOBS="{}" * len(job_items),
|
||||
)
|
||||
res = template_1.format(*job_items)
|
||||
|
||||
return res
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class AuxConfig:
|
||||
# defines aux step to install dependencies
|
||||
addon: Job.Requirements
|
||||
# defines aux step(s) to upload GH artifacts
|
||||
uploads_gh: List[Artifact.Config]
|
||||
# defines aux step(s) to download GH artifacts
|
||||
downloads_gh: List[Artifact.Config]
|
||||
|
||||
def get_aux_workflow_name(self):
|
||||
suffix = ""
|
||||
if self.addon.python_requirements_txt:
|
||||
suffix += "_py"
|
||||
for _ in self.uploads_gh:
|
||||
suffix += "_uplgh"
|
||||
for _ in self.downloads_gh:
|
||||
suffix += "_dnlgh"
|
||||
return f"{Settings.WORKFLOW_PATH_PREFIX}/aux_job{suffix}.yaml"
|
||||
|
||||
def get_aux_workflow_input(self):
|
||||
res = ""
|
||||
if self.addon.python_requirements_txt:
|
||||
res += f" requirements_txt: {self.addon.python_requirements_txt}"
|
||||
return res
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
WFS = [
|
||||
Workflow.Config(
|
||||
name="PR",
|
||||
event=Workflow.Event.PULL_REQUEST,
|
||||
jobs=[
|
||||
Job.Config(
|
||||
name="Hello World",
|
||||
runs_on=["foo"],
|
||||
command="bar",
|
||||
job_requirements=Job.Requirements(
|
||||
python_requirements_txt="./requirement.txt"
|
||||
),
|
||||
)
|
||||
],
|
||||
enable_cache=True,
|
||||
)
|
||||
]
|
||||
YamlGenerator().generate(workflow_config=WFS)
|
@ -7,6 +7,7 @@ S3_BUCKET_HTTP_ENDPOINT = "clickhouse-builds.s3.amazonaws.com"
|
||||
class RunnerLabels:
|
||||
CI_SERVICES = "ci_services"
|
||||
CI_SERVICES_EBS = "ci_services_ebs"
|
||||
BUILDER = "builder"
|
||||
|
||||
|
||||
BASE_BRANCH = "master"
|
||||
@ -29,142 +30,122 @@ SECRETS = [
|
||||
DOCKERS = [
|
||||
# Docker.Config(
|
||||
# name="clickhouse/binary-builder",
|
||||
# path="./docker/packager/binary-builder",
|
||||
# arm64=True,
|
||||
# amd64=True,
|
||||
# path="./ci_v2/docker/packager/binary-builder",
|
||||
# platforms=Docker.Platforms.arm_amd,
|
||||
# depends_on=[],
|
||||
# ),
|
||||
# Docker.Config(
|
||||
# name="clickhouse/cctools",
|
||||
# path="./docker/packager/cctools",
|
||||
# arm64=True,
|
||||
# amd64=True,
|
||||
# path="./ci_v2/docker/packager/cctools",
|
||||
# platforms=Docker.Platforms.arm_amd,
|
||||
# depends_on=[],
|
||||
# ),
|
||||
# Docker.Config(
|
||||
# name="clickhouse/test-old-centos",
|
||||
# path="./docker/test/compatibility/centos",
|
||||
# arm64=True,
|
||||
# amd64=True,
|
||||
# path="./ci_v2/docker/test/compatibility/centos",
|
||||
# platforms=Docker.Platforms.arm_amd,
|
||||
# depends_on=[],
|
||||
# ),
|
||||
# Docker.Config(
|
||||
# name="clickhouse/test-old-ubuntu",
|
||||
# path="./docker/test/compatibility/ubuntu",
|
||||
# arm64=True,
|
||||
# amd64=True,
|
||||
# path="./ci_v2/docker/test/compatibility/ubuntu",
|
||||
# platforms=Docker.Platforms.arm_amd,
|
||||
# depends_on=[],
|
||||
# ),
|
||||
# Docker.Config(
|
||||
# name="clickhouse/test-util",
|
||||
# path="./docker/test/util",
|
||||
# arm64=True,
|
||||
# amd64=True,
|
||||
# path="./ci_v2/docker/test/util",
|
||||
# platforms=Docker.Platforms.arm_amd,
|
||||
# depends_on=[],
|
||||
# ),
|
||||
# Docker.Config(
|
||||
# name="clickhouse/integration-test",
|
||||
# path="./docker/test/integration/base",
|
||||
# arm64=True,
|
||||
# amd64=True,
|
||||
# path="./ci_v2/docker/test/integration/base",
|
||||
# platforms=Docker.Platforms.arm_amd,
|
||||
# depends_on=["clickhouse/test-base"],
|
||||
# ),
|
||||
# Docker.Config(
|
||||
# name="clickhouse/fuzzer",
|
||||
# path="./docker/test/fuzzer",
|
||||
# arm64=True,
|
||||
# amd64=True,
|
||||
# path="./ci_v2/docker/test/fuzzer",
|
||||
# platforms=Docker.Platforms.arm_amd,
|
||||
# depends_on=["clickhouse/test-base"],
|
||||
# ),
|
||||
# Docker.Config(
|
||||
# name="clickhouse/performance-comparison",
|
||||
# path="./docker/test/performance-comparison",
|
||||
# arm64=True,
|
||||
# amd64=True,
|
||||
# path="./ci_v2/docker/test/performance-comparison",
|
||||
# platforms=Docker.Platforms.arm_amd,
|
||||
# depends_on=[],
|
||||
# ),
|
||||
# Docker.Config(
|
||||
# name="clickhouse/fasttest",
|
||||
# path="./docker/test/fasttest",
|
||||
# arm64=True,
|
||||
# amd64=True,
|
||||
# depends_on=["clickhouse/test-util"],
|
||||
# ),
|
||||
Docker.Config(
|
||||
name="clickhouse/fasttest",
|
||||
path="./ci_v2/docker/fasttest",
|
||||
platforms=Docker.Platforms.arm_amd,
|
||||
depends_on=[],
|
||||
),
|
||||
# Docker.Config(
|
||||
# name="clickhouse/test-base",
|
||||
# path="./docker/test/base",
|
||||
# arm64=True,
|
||||
# amd64=True,
|
||||
# path="./ci_v2/docker/test/base",
|
||||
# platforms=Docker.Platforms.arm_amd,
|
||||
# depends_on=["clickhouse/test-util"],
|
||||
# ),
|
||||
# Docker.Config(
|
||||
# name="clickhouse/clickbench",
|
||||
# path="./docker/test/clickbench",
|
||||
# arm64=True,
|
||||
# amd64=True,
|
||||
# path="./ci_v2/docker/test/clickbench",
|
||||
# platforms=Docker.Platforms.arm_amd,
|
||||
# depends_on=["clickhouse/test-base"],
|
||||
# ),
|
||||
# Docker.Config(
|
||||
# name="clickhouse/keeper-jepsen-test",
|
||||
# path="./docker/test/keeper-jepsen",
|
||||
# arm64=True,
|
||||
# amd64=True,
|
||||
# path="./ci_v2/docker/test/keeper-jepsen",
|
||||
# platforms=Docker.Platforms.arm_amd,
|
||||
# depends_on=["clickhouse/test-base"],
|
||||
# ),
|
||||
# Docker.Config(
|
||||
# name="clickhouse/server-jepsen-test",
|
||||
# path="./docker/test/server-jepsen",
|
||||
# arm64=True,
|
||||
# amd64=True,
|
||||
# path="./ci_v2/docker/test/server-jepsen",
|
||||
# platforms=Docker.Platforms.arm_amd,
|
||||
# depends_on=["clickhouse/test-base"],
|
||||
# ),
|
||||
# Docker.Config(
|
||||
# name="clickhouse/sqllogic-test",
|
||||
# path="./docker/test/sqllogic",
|
||||
# arm64=True,
|
||||
# amd64=True,
|
||||
# path="./ci_v2/docker/test/sqllogic",
|
||||
# platforms=Docker.Platforms.arm_amd,
|
||||
# depends_on=["clickhouse/test-base"],
|
||||
# ),
|
||||
# Docker.Config(
|
||||
# name="clickhouse/sqltest",
|
||||
# path="./docker/test/sqltest",
|
||||
# arm64=True,
|
||||
# amd64=True,
|
||||
# path="./ci_v2/docker/test/sqltest",
|
||||
# platforms=Docker.Platforms.arm_amd,
|
||||
# depends_on=["clickhouse/test-base"],
|
||||
# ),
|
||||
# Docker.Config(
|
||||
# name="clickhouse/stateless-test",
|
||||
# path="./docker/test/stateless",
|
||||
# arm64=True,
|
||||
# amd64=True,
|
||||
# path="./ci_v2/docker/test/stateless",
|
||||
# platforms=Docker.Platforms.arm_amd,
|
||||
# depends_on=["clickhouse/test-base"],
|
||||
# ),
|
||||
# Docker.Config(
|
||||
# name="clickhouse/stateful-test",
|
||||
# path="./docker/test/stateful",
|
||||
# arm64=True,
|
||||
# amd64=True,
|
||||
# path="./ci_v2/docker/test/stateful",
|
||||
# platforms=Docker.Platforms.arm_amd,
|
||||
# depends_on=["clickhouse/stateless-test"],
|
||||
# ),
|
||||
# Docker.Config(
|
||||
# name="clickhouse/stress-test",
|
||||
# path="./docker/test/stress",
|
||||
# arm64=True,
|
||||
# amd64=True,
|
||||
# path="./ci_v2/docker/test/stress",
|
||||
# platforms=Docker.Platforms.arm_amd,
|
||||
# depends_on=["clickhouse/stateful-test"],
|
||||
# ),
|
||||
# Docker.Config(
|
||||
# name="clickhouse/unit-test",
|
||||
# path="./docker/test/unit",
|
||||
# arm64=True,
|
||||
# amd64=True,
|
||||
# path="./ci_v2/docker/test/unit",
|
||||
# platforms=Docker.Platforms.arm_amd,
|
||||
# depends_on=["clickhouse/test-base"],
|
||||
# ),
|
||||
# Docker.Config(
|
||||
# name="clickhouse/integration-tests-runner",
|
||||
# path="./docker/test/integration/runner",
|
||||
# arm64=True,
|
||||
# amd64=True,
|
||||
# path="./ci_v2/docker/test/integration/runner",
|
||||
# platforms=Docker.Platforms.arm_amd,
|
||||
# depends_on=["clickhouse/test-base"],
|
||||
# ),
|
||||
Docker.Config(
|
||||
@ -175,9 +156,8 @@ DOCKERS = [
|
||||
),
|
||||
# Docker.Config(
|
||||
# name="clickhouse/docs-builder",
|
||||
# path="./docker/docs/builder",
|
||||
# arm64=True,
|
||||
# amd64=True,
|
||||
# path="./ci_v2/docker/docs/builder",
|
||||
# platforms=Docker.Platforms.arm_amd,
|
||||
# depends_on=["clickhouse/test-base"],
|
||||
# ),
|
||||
]
|
||||
@ -249,3 +229,4 @@ DOCKERS = [
|
||||
|
||||
class JobNames:
|
||||
STYLE_CHECK = "Style Check"
|
||||
FAST_TEST = "Fast test"
|
@ -16,12 +16,20 @@ style_check_job = Job.Config(
|
||||
run_in_docker="clickhouse/style-test",
|
||||
)
|
||||
|
||||
fast_test_job = Job.Config(
|
||||
name=JobNames.FAST_TEST,
|
||||
runs_on=[RunnerLabels.BUILDER],
|
||||
command="python3 ./ci_v2/jobs/fast_test.py",
|
||||
run_in_docker="clickhouse/fasttest",
|
||||
)
|
||||
|
||||
workflow = Workflow.Config(
|
||||
name="PR",
|
||||
event=Workflow.Event.PULL_REQUEST,
|
||||
base_branches=[BASE_BRANCH],
|
||||
jobs=[
|
||||
style_check_job,
|
||||
fast_test_job,
|
||||
],
|
||||
dockers=DOCKERS,
|
||||
secrets=SECRETS,
|
||||
@ -36,9 +44,7 @@ WORKFLOWS = [
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# example: local job test inside praktika environment
|
||||
# local job test inside praktika environment
|
||||
from praktika.runner import Runner
|
||||
|
||||
Runner.generate_dummy_environment(workflow, style_check_job)
|
||||
|
||||
Runner().run(workflow, style_check_job)
|
||||
Runner().run(workflow, fast_test_job, docker="fasttest", dummy_env=True)
|
@ -1,4 +0,0 @@
|
||||
requests==2.32.3
|
||||
yamllint==1.26.3
|
||||
codespell==2.2.1
|
||||
https://clickhouse-builds.s3.amazonaws.com/packages/praktika-0.1-py3-none-any.whl
|
22
contrib/CMakeLists.txt
vendored
22
contrib/CMakeLists.txt
vendored
@ -178,35 +178,13 @@ add_contrib (sqlite-cmake sqlite-amalgamation)
|
||||
add_contrib (s2geometry-cmake s2geometry)
|
||||
add_contrib (c-ares-cmake c-ares)
|
||||
|
||||
if (OS_LINUX AND ARCH_AMD64 AND ENABLE_SSE42)
|
||||
option (ENABLE_QPL "Enable Intel® Query Processing Library (QPL)" ${ENABLE_LIBRARIES})
|
||||
elseif(ENABLE_QPL)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "QPL library is only supported on x86_64 with SSE 4.2 or higher")
|
||||
endif()
|
||||
if (ENABLE_QPL)
|
||||
add_contrib (idxd-config-cmake idxd-config)
|
||||
add_contrib (qpl-cmake qpl) # requires: idxd-config
|
||||
else()
|
||||
message(STATUS "Not using QPL")
|
||||
endif ()
|
||||
|
||||
if (OS_LINUX AND ARCH_AMD64 AND NOT NO_SSE3_OR_HIGHER)
|
||||
option (ENABLE_QATLIB "Enable Intel® QuickAssist Technology Library (QATlib)" ${ENABLE_LIBRARIES})
|
||||
elseif(ENABLE_QATLIB)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "QATLib is only supported on x86_64")
|
||||
endif()
|
||||
if (ENABLE_QATLIB)
|
||||
option (ENABLE_QAT_USDM_DRIVER "A User Space DMA-able Memory (USDM) component which allocates/frees DMA-able memory" OFF)
|
||||
option (ENABLE_QAT_OUT_OF_TREE_BUILD "Using out-of-tree driver, user needs to customize ICP_ROOT variable" OFF)
|
||||
set(ICP_ROOT "" CACHE STRING "ICP_ROOT variable to define the path of out-of-tree driver package")
|
||||
if (ENABLE_QAT_OUT_OF_TREE_BUILD)
|
||||
if (ICP_ROOT STREQUAL "")
|
||||
message(FATAL_ERROR "Please define the path of out-of-tree driver package with -DICP_ROOT=xxx or disable out-of-tree build with -DENABLE_QAT_OUT_OF_TREE_BUILD=OFF; \
|
||||
If you want out-of-tree build but have no package available, please download and build ICP package from: https://www.intel.com/content/www/us/en/download/765501.html")
|
||||
endif ()
|
||||
else()
|
||||
add_contrib (qatlib-cmake qatlib) # requires: isa-l
|
||||
endif ()
|
||||
add_contrib (QAT-ZSTD-Plugin-cmake QAT-ZSTD-Plugin)
|
||||
else()
|
||||
message(STATUS "Not using QATLib")
|
||||
|
@ -1,35 +1,5 @@
|
||||
# Intel® QuickAssist Technology ZSTD Plugin (QAT ZSTD Plugin) is a plugin to Zstandard*(ZSTD*) for accelerating compression by QAT.
|
||||
# ENABLE_QAT_OUT_OF_TREE_BUILD = 1 means kernel don't have native support, user will build and install driver from external package: https://www.intel.com/content/www/us/en/download/765501.html
|
||||
# meanwhile, user need to set ICP_ROOT environment variable which point to the root directory of QAT driver source tree.
|
||||
# ENABLE_QAT_OUT_OF_TREE_BUILD = 0 means kernel has built-in qat driver, QAT-ZSTD-PLUGIN just has dependency on qatlib.
|
||||
|
||||
if (ENABLE_QAT_OUT_OF_TREE_BUILD)
|
||||
message(STATUS "Intel QATZSTD out-of-tree build, ICP_ROOT:${ICP_ROOT}")
|
||||
|
||||
set(QATZSTD_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/QAT-ZSTD-Plugin/src")
|
||||
set(QATZSTD_SRC "${QATZSTD_SRC_DIR}/qatseqprod.c")
|
||||
set(ZSTD_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/zstd/lib")
|
||||
set(QAT_INCLUDE_DIR "${ICP_ROOT}/quickassist/include")
|
||||
set(QAT_DC_INCLUDE_DIR "${ICP_ROOT}/quickassist/include/dc")
|
||||
set(QAT_AL_INCLUDE_DIR "${ICP_ROOT}/quickassist/lookaside/access_layer/include")
|
||||
set(QAT_USDM_INCLUDE_DIR "${ICP_ROOT}/quickassist/utilities/libusdm_drv")
|
||||
set(USDM_LIBRARY "${ICP_ROOT}/build/libusdm_drv_s.so")
|
||||
set(QAT_S_LIBRARY "${ICP_ROOT}/build/libqat_s.so")
|
||||
if (ENABLE_QAT_USDM_DRIVER)
|
||||
add_definitions(-DENABLE_USDM_DRV)
|
||||
endif()
|
||||
add_library(_qatzstd_plugin ${QATZSTD_SRC})
|
||||
target_link_libraries (_qatzstd_plugin PUBLIC ${USDM_LIBRARY} ${QAT_S_LIBRARY})
|
||||
target_include_directories(_qatzstd_plugin
|
||||
SYSTEM PUBLIC "${QATZSTD_SRC_DIR}"
|
||||
PRIVATE ${QAT_INCLUDE_DIR}
|
||||
${QAT_DC_INCLUDE_DIR}
|
||||
${QAT_AL_INCLUDE_DIR}
|
||||
${QAT_USDM_INCLUDE_DIR}
|
||||
${ZSTD_LIBRARY_DIR})
|
||||
target_compile_definitions(_qatzstd_plugin PRIVATE -DDEBUGLEVEL=0)
|
||||
add_library (ch_contrib::qatzstd_plugin ALIAS _qatzstd_plugin)
|
||||
else () # In-tree build
|
||||
message(STATUS "Intel QATZSTD in-tree build")
|
||||
set(QATZSTD_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/QAT-ZSTD-Plugin/src")
|
||||
set(QATZSTD_SRC "${QATZSTD_SRC_DIR}/qatseqprod.c")
|
||||
@ -81,5 +51,3 @@ else () # In-tree build
|
||||
target_compile_definitions(_qatzstd_plugin PRIVATE -DDEBUGLEVEL=0 PUBLIC -DINTREE)
|
||||
target_include_directories(_qatzstd_plugin SYSTEM PUBLIC $<BUILD_INTERFACE:${QATZSTD_SRC_DIR}> $<INSTALL_INTERFACE:include>)
|
||||
add_library (ch_contrib::qatzstd_plugin ALIAS _qatzstd_plugin)
|
||||
endif ()
|
||||
|
||||
|
1
contrib/idxd-config
vendored
1
contrib/idxd-config
vendored
@ -1 +0,0 @@
|
||||
Subproject commit a836ce0e42052a69bffbbc14239ab4097f3b77f1
|
@ -1,23 +0,0 @@
|
||||
## accel_config is the utility library required by QPL-Deflate codec for controlling and configuring Intel® In-Memory Analytics Accelerator (Intel® IAA).
|
||||
set (LIBACCEL_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/idxd-config")
|
||||
set (UUID_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl-cmake")
|
||||
set (LIBACCEL_HEADER_DIR "${ClickHouse_SOURCE_DIR}/contrib/idxd-config-cmake/include")
|
||||
set (SRCS
|
||||
"${LIBACCEL_SOURCE_DIR}/accfg/lib/libaccfg.c"
|
||||
"${LIBACCEL_SOURCE_DIR}/util/log.c"
|
||||
"${LIBACCEL_SOURCE_DIR}/util/sysfs.c"
|
||||
)
|
||||
|
||||
add_library(_accel-config ${SRCS})
|
||||
|
||||
target_compile_options(_accel-config PRIVATE "-D_GNU_SOURCE")
|
||||
|
||||
target_include_directories(_accel-config BEFORE
|
||||
PRIVATE ${UUID_DIR}
|
||||
PRIVATE ${LIBACCEL_HEADER_DIR}
|
||||
PRIVATE ${LIBACCEL_SOURCE_DIR})
|
||||
|
||||
target_include_directories(_accel-config SYSTEM BEFORE
|
||||
PUBLIC ${LIBACCEL_SOURCE_DIR}/accfg)
|
||||
|
||||
add_library(ch_contrib::accel-config ALIAS _accel-config)
|
@ -1,159 +0,0 @@
|
||||
/* config.h. Generated from config.h.in by configure. */
|
||||
/* config.h.in. Generated from configure.ac by autoheader. */
|
||||
|
||||
/* Define if building universal (internal helper macro) */
|
||||
/* #undef AC_APPLE_UNIVERSAL_BUILD */
|
||||
|
||||
/* Debug messages. */
|
||||
/* #undef ENABLE_DEBUG */
|
||||
|
||||
/* Documentation / man pages. */
|
||||
/* #define ENABLE_DOCS */
|
||||
|
||||
/* System logging. */
|
||||
#define ENABLE_LOGGING 1
|
||||
|
||||
/* accfg test support */
|
||||
/* #undef ENABLE_TEST */
|
||||
|
||||
/* Define to 1 if big-endian-arch */
|
||||
/* #undef HAVE_BIG_ENDIAN */
|
||||
|
||||
/* Define to 1 if you have the <dlfcn.h> header file. */
|
||||
#define HAVE_DLFCN_H 1
|
||||
|
||||
/* Define to 1 if you have the <inttypes.h> header file. */
|
||||
#define HAVE_INTTYPES_H 1
|
||||
|
||||
/* Define to 1 if you have the <linux/version.h> header file. */
|
||||
#define HAVE_LINUX_VERSION_H 1
|
||||
|
||||
/* Define to 1 if little-endian-arch */
|
||||
#define HAVE_LITTLE_ENDIAN 1
|
||||
|
||||
/* Define to 1 if you have the <memory.h> header file. */
|
||||
#define HAVE_MEMORY_H 1
|
||||
|
||||
/* Define to 1 if you have the `secure_getenv' function. */
|
||||
#define HAVE_SECURE_GETENV 1
|
||||
|
||||
/* Define to 1 if you have statement expressions. */
|
||||
#define HAVE_STATEMENT_EXPR 1
|
||||
|
||||
/* Define to 1 if you have the <stdint.h> header file. */
|
||||
#define HAVE_STDINT_H 1
|
||||
|
||||
/* Define to 1 if you have the <stdlib.h> header file. */
|
||||
#define HAVE_STDLIB_H 1
|
||||
|
||||
/* Define to 1 if you have the <strings.h> header file. */
|
||||
#define HAVE_STRINGS_H 1
|
||||
|
||||
/* Define to 1 if you have the <string.h> header file. */
|
||||
#define HAVE_STRING_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/stat.h> header file. */
|
||||
#define HAVE_SYS_STAT_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/types.h> header file. */
|
||||
#define HAVE_SYS_TYPES_H 1
|
||||
|
||||
/* Define to 1 if typeof works with your compiler. */
|
||||
#define HAVE_TYPEOF 1
|
||||
|
||||
/* Define to 1 if you have the <unistd.h> header file. */
|
||||
#define HAVE_UNISTD_H 1
|
||||
|
||||
/* Define to 1 if using libuuid */
|
||||
#define HAVE_UUID 1
|
||||
|
||||
/* Define to 1 if you have the `__secure_getenv' function. */
|
||||
/* #undef HAVE___SECURE_GETENV */
|
||||
|
||||
/* Define to the sub-directory where libtool stores uninstalled libraries. */
|
||||
#define LT_OBJDIR ".libs/"
|
||||
|
||||
/* Name of package */
|
||||
#define PACKAGE "accel-config"
|
||||
|
||||
/* Define to the address where bug reports for this package should be sent. */
|
||||
#define PACKAGE_BUGREPORT "linux-dsa@lists.01.org"
|
||||
|
||||
/* Define to the full name of this package. */
|
||||
#define PACKAGE_NAME "accel-config"
|
||||
|
||||
/* Define to the full name and version of this package. */
|
||||
#define PACKAGE_STRING "accel-config 3.5.2.gitf6605c41"
|
||||
|
||||
/* Define to the one symbol short name of this package. */
|
||||
#define PACKAGE_TARNAME "accel-config"
|
||||
|
||||
/* Define to the home page for this package. */
|
||||
#define PACKAGE_URL "https://github.com/xxx/accel-config"
|
||||
|
||||
/* Define to the version of this package. */
|
||||
#define PACKAGE_VERSION "3.5.2.gitf6605c41"
|
||||
|
||||
/* Define to 1 if you have the ANSI C header files. */
|
||||
#define STDC_HEADERS 1
|
||||
|
||||
/* Enable extensions on AIX 3, Interix. */
|
||||
#ifndef _ALL_SOURCE
|
||||
# define _ALL_SOURCE 1
|
||||
#endif
|
||||
/* Enable GNU extensions on systems that have them. */
|
||||
#ifndef _GNU_SOURCE
|
||||
# define _GNU_SOURCE 1
|
||||
#endif
|
||||
/* Enable threading extensions on Solaris. */
|
||||
#ifndef _POSIX_PTHREAD_SEMANTICS
|
||||
# define _POSIX_PTHREAD_SEMANTICS 1
|
||||
#endif
|
||||
/* Enable extensions on HP NonStop. */
|
||||
#ifndef _TANDEM_SOURCE
|
||||
# define _TANDEM_SOURCE 1
|
||||
#endif
|
||||
/* Enable general extensions on Solaris. */
|
||||
#ifndef __EXTENSIONS__
|
||||
# define __EXTENSIONS__ 1
|
||||
#endif
|
||||
|
||||
|
||||
/* Version number of package */
|
||||
#define VERSION "3.5.2.gitf6605c41"
|
||||
|
||||
/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
|
||||
significant byte first (like Motorola and SPARC, unlike Intel). */
|
||||
#if defined AC_APPLE_UNIVERSAL_BUILD
|
||||
# if defined __BIG_ENDIAN__
|
||||
# define WORDS_BIGENDIAN 1
|
||||
# endif
|
||||
#else
|
||||
# ifndef WORDS_BIGENDIAN
|
||||
/* # undef WORDS_BIGENDIAN */
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* Enable large inode numbers on Mac OS X 10.5. */
|
||||
#ifndef _DARWIN_USE_64_BIT_INODE
|
||||
# define _DARWIN_USE_64_BIT_INODE 1
|
||||
#endif
|
||||
|
||||
/* Number of bits in a file offset, on hosts where this is settable. */
|
||||
/* #undef _FILE_OFFSET_BITS */
|
||||
|
||||
/* Define for large files, on AIX-style hosts. */
|
||||
/* #undef _LARGE_FILES */
|
||||
|
||||
/* Define to 1 if on MINIX. */
|
||||
/* #undef _MINIX */
|
||||
|
||||
/* Define to 2 if the system does not provide POSIX.1 features except with
|
||||
this defined. */
|
||||
/* #undef _POSIX_1_SOURCE */
|
||||
|
||||
/* Define to 1 if you need to in order for `stat' and other things to work. */
|
||||
/* #undef _POSIX_SOURCE */
|
||||
|
||||
/* Define to __typeof__ if your compiler spells it that way. */
|
||||
/* #undef typeof */
|
1
contrib/qpl
vendored
1
contrib/qpl
vendored
@ -1 +0,0 @@
|
||||
Subproject commit c2ced94c53c1ee22191201a59878e9280bc9b9b8
|
@ -1,738 +0,0 @@
|
||||
## The Intel® QPL provides high performance implementations of data processing functions for existing hardware accelerator, and/or software path in case if hardware accelerator is not available.
|
||||
set (UUID_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl-cmake")
|
||||
set (QPL_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl")
|
||||
set (QPL_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl/sources")
|
||||
set (QPL_BINARY_DIR "${ClickHouse_BINARY_DIR}/build/contrib/qpl")
|
||||
set (EFFICIENT_WAIT OFF)
|
||||
set (LOG_HW_INIT OFF)
|
||||
set (SANITIZE_MEMORY OFF)
|
||||
set (SANITIZE_THREADS OFF)
|
||||
set (LIB_FUZZING_ENGINE OFF)
|
||||
set (DYNAMIC_LOADING_LIBACCEL_CONFIG OFF)
|
||||
|
||||
function(GetLibraryVersion _content _outputVar)
|
||||
string(REGEX MATCHALL "QPL VERSION (.+) LANGUAGES" VERSION_REGEX "${_content}")
|
||||
SET(${_outputVar} ${CMAKE_MATCH_1} PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
set (QPL_VERSION 1.6.0)
|
||||
|
||||
message(STATUS "Intel QPL version: ${QPL_VERSION}")
|
||||
|
||||
# There are 5 source subdirectories under $QPL_SRC_DIR: c_api, core-iaa, core-sw, middle-layer and isal.
|
||||
# Generate 8 library targets: qpl_c_api, core_iaa, qplcore_px, qplcore_avx512, qplcore_sw_dispatcher, middle_layer_lib, isal and isal_asm,
|
||||
# which are then combined into static or shared qpl.
|
||||
# Output ch_contrib::qpl by linking with 8 library targets.
|
||||
|
||||
# Note, QPL has integrated a customized version of ISA-L to meet specific needs.
|
||||
# This version has been significantly modified and there are no plans to maintain compatibility with the upstream version
|
||||
# or upgrade the current copy.
|
||||
|
||||
## cmake/CompileOptions.cmake and automatic wrappers generation
|
||||
|
||||
# ==========================================================================
|
||||
# Copyright (C) 2022 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
# ==========================================================================
|
||||
|
||||
set(QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS "-fno-exceptions;-fno-rtti")
|
||||
|
||||
function(modify_standard_language_flag)
|
||||
# Declaring function parameters
|
||||
set(OPTIONS "")
|
||||
set(ONE_VALUE_ARGS
|
||||
LANGUAGE_NAME
|
||||
FLAG_NAME
|
||||
NEW_FLAG_VALUE)
|
||||
set(MULTI_VALUE_ARGS "")
|
||||
|
||||
# Parsing function parameters
|
||||
cmake_parse_arguments(MODIFY
|
||||
"${OPTIONS}"
|
||||
"${ONE_VALUE_ARGS}"
|
||||
"${MULTI_VALUE_ARGS}"
|
||||
${ARGN})
|
||||
|
||||
# Variables
|
||||
set(FLAG_REGULAR_EXPRESSION "${MODIFY_FLAG_NAME}.*[ ]*")
|
||||
set(NEW_VALUE "${MODIFY_FLAG_NAME}${MODIFY_NEW_FLAG_VALUE}")
|
||||
|
||||
# Replacing specified flag with new value
|
||||
string(REGEX REPLACE
|
||||
${FLAG_REGULAR_EXPRESSION} ${NEW_VALUE}
|
||||
NEW_COMPILE_FLAGS
|
||||
"${CMAKE_${MODIFY_LANGUAGE_NAME}_FLAGS}")
|
||||
|
||||
# Returning the value
|
||||
set(CMAKE_${MODIFY_LANGUAGE_NAME}_FLAGS ${NEW_COMPILE_FLAGS} PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
function(get_function_name_with_default_bit_width in_function_name bit_width out_function_name)
|
||||
|
||||
if(in_function_name MATCHES ".*_i")
|
||||
|
||||
string(REPLACE "_i" "" in_function_name ${in_function_name})
|
||||
|
||||
set(${out_function_name} "${in_function_name}_${bit_width}_i" PARENT_SCOPE)
|
||||
|
||||
else()
|
||||
|
||||
set(${out_function_name} "${in_function_name}_${bit_width}" PARENT_SCOPE)
|
||||
|
||||
endif()
|
||||
|
||||
endfunction()
|
||||
|
||||
macro(get_list_of_supported_optimizations PLATFORMS_LIST)
|
||||
list(APPEND PLATFORMS_LIST "")
|
||||
list(APPEND PLATFORMS_LIST "px")
|
||||
list(APPEND PLATFORMS_LIST "avx512")
|
||||
endmacro(get_list_of_supported_optimizations)
|
||||
|
||||
function(generate_unpack_kernel_arrays current_directory PLATFORMS_LIST)
|
||||
list(APPEND UNPACK_POSTFIX_LIST "")
|
||||
list(APPEND UNPACK_PRLE_POSTFIX_LIST "")
|
||||
list(APPEND PACK_POSTFIX_LIST "")
|
||||
list(APPEND PACK_INDEX_POSTFIX_LIST "")
|
||||
list(APPEND SCAN_POSTFIX_LIST "")
|
||||
list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "")
|
||||
list(APPEND DEFAULT_BIT_WIDTH_LIST "")
|
||||
|
||||
#create list of functions that use only 8u 16u 32u postfixes
|
||||
list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "unpack_prle")
|
||||
list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "extract")
|
||||
list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "extract_i")
|
||||
list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "select")
|
||||
list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "select_i")
|
||||
list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "expand")
|
||||
|
||||
#create default bit width list
|
||||
list(APPEND DEFAULT_BIT_WIDTH_LIST "8u")
|
||||
list(APPEND DEFAULT_BIT_WIDTH_LIST "16u")
|
||||
list(APPEND DEFAULT_BIT_WIDTH_LIST "32u")
|
||||
|
||||
#create scan kernel postfixes
|
||||
list(APPEND SCAN_COMPARATOR_LIST "")
|
||||
|
||||
list(APPEND SCAN_COMPARATOR_LIST "eq")
|
||||
list(APPEND SCAN_COMPARATOR_LIST "ne")
|
||||
list(APPEND SCAN_COMPARATOR_LIST "lt")
|
||||
list(APPEND SCAN_COMPARATOR_LIST "le")
|
||||
list(APPEND SCAN_COMPARATOR_LIST "gt")
|
||||
list(APPEND SCAN_COMPARATOR_LIST "ge")
|
||||
list(APPEND SCAN_COMPARATOR_LIST "range")
|
||||
list(APPEND SCAN_COMPARATOR_LIST "not_range")
|
||||
|
||||
foreach(SCAN_COMPARATOR IN LISTS SCAN_COMPARATOR_LIST)
|
||||
list(APPEND SCAN_POSTFIX_LIST "_${SCAN_COMPARATOR}_8u")
|
||||
list(APPEND SCAN_POSTFIX_LIST "_${SCAN_COMPARATOR}_16u8u")
|
||||
list(APPEND SCAN_POSTFIX_LIST "_${SCAN_COMPARATOR}_32u8u")
|
||||
endforeach()
|
||||
|
||||
# create unpack kernel postfixes
|
||||
foreach(input_width RANGE 1 32 1)
|
||||
if(input_width LESS 8 OR input_width EQUAL 8)
|
||||
list(APPEND UNPACK_POSTFIX_LIST "_${input_width}u8u")
|
||||
|
||||
elseif(input_width LESS 16 OR input_width EQUAL 16)
|
||||
list(APPEND UNPACK_POSTFIX_LIST "_${input_width}u16u")
|
||||
|
||||
else()
|
||||
list(APPEND UNPACK_POSTFIX_LIST "_${input_width}u32u")
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
# create pack kernel postfixes
|
||||
foreach(output_width RANGE 1 8 1)
|
||||
list(APPEND PACK_POSTFIX_LIST "_8u${output_width}u")
|
||||
endforeach()
|
||||
|
||||
foreach(output_width RANGE 9 16 1)
|
||||
list(APPEND PACK_POSTFIX_LIST "_16u${output_width}u")
|
||||
endforeach()
|
||||
|
||||
foreach(output_width RANGE 17 32 1)
|
||||
list(APPEND PACK_POSTFIX_LIST "_32u${output_width}u")
|
||||
endforeach()
|
||||
|
||||
list(APPEND PACK_POSTFIX_LIST "_8u16u")
|
||||
list(APPEND PACK_POSTFIX_LIST "_8u32u")
|
||||
list(APPEND PACK_POSTFIX_LIST "_16u32u")
|
||||
|
||||
# create pack index kernel postfixes
|
||||
list(APPEND PACK_INDEX_POSTFIX_LIST "_nu")
|
||||
list(APPEND PACK_INDEX_POSTFIX_LIST "_8u")
|
||||
list(APPEND PACK_INDEX_POSTFIX_LIST "_8u16u")
|
||||
list(APPEND PACK_INDEX_POSTFIX_LIST "_8u32u")
|
||||
|
||||
# write to file
|
||||
file(MAKE_DIRECTORY ${current_directory}/generated)
|
||||
|
||||
foreach(PLATFORM_VALUE IN LISTS PLATFORMS_LIST)
|
||||
set(directory "${current_directory}/generated")
|
||||
set(PLATFORM_PREFIX "${PLATFORM_VALUE}_")
|
||||
|
||||
#
|
||||
# Write unpack table
|
||||
#
|
||||
file(WRITE ${directory}/${PLATFORM_PREFIX}unpack.cpp "#include \"qplc_api.h\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "unpack_table_t ${PLATFORM_PREFIX}unpack_table = {\n")
|
||||
|
||||
#write LE kernels
|
||||
foreach(UNPACK_POSTFIX IN LISTS UNPACK_POSTFIX_LIST)
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "\t${PLATFORM_PREFIX}qplc_unpack${UNPACK_POSTFIX},\n")
|
||||
endforeach()
|
||||
|
||||
#write BE kernels
|
||||
|
||||
#get last element of the list
|
||||
set(LAST_ELEMENT "")
|
||||
list(GET UNPACK_POSTFIX_LIST -1 LAST_ELEMENT)
|
||||
|
||||
foreach(UNPACK_POSTFIX IN LISTS UNPACK_POSTFIX_LIST)
|
||||
|
||||
if(UNPACK_POSTFIX STREQUAL LAST_ELEMENT)
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "\t${PLATFORM_PREFIX}qplc_unpack_be${UNPACK_POSTFIX}};\n")
|
||||
else()
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "\t${PLATFORM_PREFIX}qplc_unpack_be${UNPACK_POSTFIX},\n")
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "}\n")
|
||||
|
||||
#
|
||||
# Write pack table
|
||||
#
|
||||
file(WRITE ${directory}/${PLATFORM_PREFIX}pack.cpp "#include \"qplc_api.h\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "pack_table_t ${PLATFORM_PREFIX}pack_table = {\n")
|
||||
|
||||
#write LE kernels
|
||||
foreach(PACK_POSTFIX IN LISTS PACK_POSTFIX_LIST)
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "\t${PLATFORM_PREFIX}qplc_pack${PACK_POSTFIX},\n")
|
||||
endforeach()
|
||||
|
||||
#write BE kernels
|
||||
|
||||
#get last element of the list
|
||||
set(LAST_ELEMENT "")
|
||||
list(GET PACK_POSTFIX_LIST -1 LAST_ELEMENT)
|
||||
|
||||
foreach(PACK_POSTFIX IN LISTS PACK_POSTFIX_LIST)
|
||||
|
||||
if(PACK_POSTFIX STREQUAL LAST_ELEMENT)
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "\t${PLATFORM_PREFIX}qplc_pack_be${PACK_POSTFIX}};\n")
|
||||
else()
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "\t${PLATFORM_PREFIX}qplc_pack_be${PACK_POSTFIX},\n")
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "}\n")
|
||||
|
||||
#
|
||||
# Write scan table
|
||||
#
|
||||
file(WRITE ${directory}/${PLATFORM_PREFIX}scan.cpp "#include \"qplc_api.h\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "scan_table_t ${PLATFORM_PREFIX}scan_table = {\n")
|
||||
|
||||
#get last element of the list
|
||||
set(LAST_ELEMENT "")
|
||||
list(GET SCAN_POSTFIX_LIST -1 LAST_ELEMENT)
|
||||
|
||||
foreach(SCAN_POSTFIX IN LISTS SCAN_POSTFIX_LIST)
|
||||
|
||||
if(SCAN_POSTFIX STREQUAL LAST_ELEMENT)
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "\t${PLATFORM_PREFIX}qplc_scan${SCAN_POSTFIX}};\n")
|
||||
else()
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "\t${PLATFORM_PREFIX}qplc_scan${SCAN_POSTFIX},\n")
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "}\n")
|
||||
|
||||
#
|
||||
# Write scan_i table
|
||||
#
|
||||
file(WRITE ${directory}/${PLATFORM_PREFIX}scan_i.cpp "#include \"qplc_api.h\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "scan_i_table_t ${PLATFORM_PREFIX}scan_i_table = {\n")
|
||||
|
||||
#get last element of the list
|
||||
set(LAST_ELEMENT "")
|
||||
list(GET SCAN_POSTFIX_LIST -1 LAST_ELEMENT)
|
||||
|
||||
foreach(SCAN_POSTFIX IN LISTS SCAN_POSTFIX_LIST)
|
||||
|
||||
if(SCAN_POSTFIX STREQUAL LAST_ELEMENT)
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "\t${PLATFORM_PREFIX}qplc_scan${SCAN_POSTFIX}_i};\n")
|
||||
else()
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "\t${PLATFORM_PREFIX}qplc_scan${SCAN_POSTFIX}_i,\n")
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "}\n")
|
||||
|
||||
#
|
||||
# Write pack_index table
|
||||
#
|
||||
file(WRITE ${directory}/${PLATFORM_PREFIX}pack_index.cpp "#include \"qplc_api.h\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "pack_index_table_t ${PLATFORM_PREFIX}pack_index_table = {\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_bits_nu,\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_8u,\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_8u16u,\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_8u32u,\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_bits_be_nu,\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_8u,\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_be_8u16u,\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_be_8u32u};\n")
|
||||
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "}\n")
|
||||
|
||||
#
|
||||
# Write default bit width functions
|
||||
#
|
||||
foreach(DEAULT_BIT_WIDTH_FUNCTION IN LISTS DEFAULT_BIT_WIDTH_FUNCTIONS_LIST)
|
||||
file(WRITE ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "#include \"qplc_api.h\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "${DEAULT_BIT_WIDTH_FUNCTION}_table_t ${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}_table = {\n")
|
||||
|
||||
#get last element of the list
|
||||
set(LAST_ELEMENT "")
|
||||
list(GET DEFAULT_BIT_WIDTH_LIST -1 LAST_ELEMENT)
|
||||
|
||||
foreach(BIT_WIDTH IN LISTS DEFAULT_BIT_WIDTH_LIST)
|
||||
|
||||
set(FUNCTION_NAME "")
|
||||
get_function_name_with_default_bit_width(${DEAULT_BIT_WIDTH_FUNCTION} ${BIT_WIDTH} FUNCTION_NAME)
|
||||
|
||||
if(BIT_WIDTH STREQUAL LAST_ELEMENT)
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "\t${PLATFORM_PREFIX}qplc_${FUNCTION_NAME}};\n")
|
||||
else()
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "\t${PLATFORM_PREFIX}qplc_${FUNCTION_NAME},\n")
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "}\n")
|
||||
endforeach()
|
||||
|
||||
#
|
||||
# Write aggregates table
|
||||
#
|
||||
file(WRITE ${directory}/${PLATFORM_PREFIX}aggregates.cpp "#include \"qplc_api.h\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "aggregates_table_t ${PLATFORM_PREFIX}aggregates_table = {\n")
|
||||
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "\t${PLATFORM_PREFIX}qplc_bit_aggregates_8u,\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "\t${PLATFORM_PREFIX}qplc_aggregates_8u,\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "\t${PLATFORM_PREFIX}qplc_aggregates_16u,\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "\t${PLATFORM_PREFIX}qplc_aggregates_32u};\n")
|
||||
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "}\n")
|
||||
|
||||
#
|
||||
# Write mem_copy functions table
|
||||
#
|
||||
file(WRITE ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "#include \"qplc_api.h\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "memory_copy_table_t ${PLATFORM_PREFIX}memory_copy_table = {\n")
|
||||
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "\t${PLATFORM_PREFIX}qplc_copy_8u,\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "\t${PLATFORM_PREFIX}qplc_copy_16u,\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "\t${PLATFORM_PREFIX}qplc_copy_32u};\n")
|
||||
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "}\n")
|
||||
|
||||
#
|
||||
# Write mem_copy functions table
|
||||
#
|
||||
file(WRITE ${directory}/${PLATFORM_PREFIX}zero.cpp "#include \"qplc_api.h\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}zero.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}zero.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}zero.cpp "zero_table_t ${PLATFORM_PREFIX}zero_table = {\n")
|
||||
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}zero.cpp "\t${PLATFORM_PREFIX}qplc_zero_8u};\n")
|
||||
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}zero.cpp "}\n")
|
||||
|
||||
#
|
||||
# Write move functions table
|
||||
#
|
||||
file(WRITE ${directory}/${PLATFORM_PREFIX}move.cpp "#include \"qplc_api.h\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}move.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}move.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}move.cpp "move_table_t ${PLATFORM_PREFIX}move_table = {\n")
|
||||
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}move.cpp "\t${PLATFORM_PREFIX}qplc_move_8u};\n")
|
||||
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}move.cpp "}\n")
|
||||
|
||||
#
|
||||
# Write crc64 function table
|
||||
#
|
||||
file(WRITE ${directory}/${PLATFORM_PREFIX}crc64.cpp "#include \"qplc_api.h\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}crc64.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}crc64.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}crc64.cpp "crc64_table_t ${PLATFORM_PREFIX}crc64_table = {\n")
|
||||
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}crc64.cpp "\t${PLATFORM_PREFIX}qplc_crc64};\n")
|
||||
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}crc64.cpp "}\n")
|
||||
|
||||
#
|
||||
# Write xor_checksum function table
|
||||
#
|
||||
file(WRITE ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "#include \"qplc_api.h\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "xor_checksum_table_t ${PLATFORM_PREFIX}xor_checksum_table = {\n")
|
||||
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "\t${PLATFORM_PREFIX}qplc_xor_checksum_8u};\n")
|
||||
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "}\n")
|
||||
|
||||
#
|
||||
# Write deflate functions table
|
||||
#
|
||||
file(WRITE ${directory}/${PLATFORM_PREFIX}deflate.cpp "#include \"deflate_slow_icf.h\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "#include \"deflate_hash_table.h\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "#include \"deflate_histogram.h\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "deflate_table_t ${PLATFORM_PREFIX}deflate_table = {\n")
|
||||
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "\t reinterpret_cast<void *>(&${PLATFORM_PREFIX}slow_deflate_icf_body),\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "\t reinterpret_cast<void *>(&${PLATFORM_PREFIX}deflate_histogram_reset),\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "\t reinterpret_cast<void *>(&${PLATFORM_PREFIX}deflate_hash_table_reset)};\n")
|
||||
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "}\n")
|
||||
|
||||
#
|
||||
# Write deflate fix functions table
|
||||
#
|
||||
file(WRITE ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "#include \"deflate_slow.h\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "deflate_fix_table_t ${PLATFORM_PREFIX}deflate_fix_table = {\n")
|
||||
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "\t reinterpret_cast<void *>(&${PLATFORM_PREFIX}slow_deflate_body)};\n")
|
||||
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "}\n")
|
||||
|
||||
#
|
||||
# Write setup_dictionary functions table
|
||||
#
|
||||
file(WRITE ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "#include \"deflate_slow_utils.h\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "setup_dictionary_table_t ${PLATFORM_PREFIX}setup_dictionary_table = {\n")
|
||||
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "\t reinterpret_cast<void *>(&${PLATFORM_PREFIX}setup_dictionary)};\n")
|
||||
|
||||
file(APPEND ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "}\n")
|
||||
|
||||
endforeach()
|
||||
endfunction()
|
||||
|
||||
# [SUBDIR]isal
|
||||
|
||||
enable_language(ASM_NASM)
|
||||
|
||||
set(ISAL_C_SRC ${QPL_SRC_DIR}/isal/igzip/adler32_base.c
|
||||
${QPL_SRC_DIR}/isal/igzip/huff_codes.c
|
||||
${QPL_SRC_DIR}/isal/igzip/hufftables_c.c
|
||||
${QPL_SRC_DIR}/isal/igzip/igzip.c
|
||||
${QPL_SRC_DIR}/isal/igzip/igzip_base.c
|
||||
${QPL_SRC_DIR}/isal/igzip/flatten_ll.c
|
||||
${QPL_SRC_DIR}/isal/igzip/encode_df.c
|
||||
${QPL_SRC_DIR}/isal/igzip/igzip_icf_base.c
|
||||
${QPL_SRC_DIR}/isal/igzip/igzip_inflate.c
|
||||
${QPL_SRC_DIR}/isal/igzip/igzip_icf_body.c
|
||||
${QPL_SRC_DIR}/isal/crc/crc_base.c
|
||||
${QPL_SRC_DIR}/isal/crc/crc64_base.c)
|
||||
|
||||
set(ISAL_ASM_SRC ${QPL_SRC_DIR}/isal/igzip/igzip_body.asm
|
||||
${QPL_SRC_DIR}/isal/igzip/igzip_gen_icf_map_lh1_04.asm
|
||||
${QPL_SRC_DIR}/isal/igzip/igzip_gen_icf_map_lh1_06.asm
|
||||
${QPL_SRC_DIR}/isal/igzip/igzip_decode_block_stateless_04.asm
|
||||
${QPL_SRC_DIR}/isal/igzip/igzip_finish.asm
|
||||
${QPL_SRC_DIR}/isal/igzip/encode_df_04.asm
|
||||
${QPL_SRC_DIR}/isal/igzip/encode_df_06.asm
|
||||
${QPL_SRC_DIR}/isal/igzip/igzip_decode_block_stateless_01.asm
|
||||
${QPL_SRC_DIR}/isal/igzip/proc_heap.asm
|
||||
${QPL_SRC_DIR}/isal/igzip/igzip_icf_body_h1_gr_bt.asm
|
||||
${QPL_SRC_DIR}/isal/igzip/igzip_icf_finish.asm
|
||||
${QPL_SRC_DIR}/isal/igzip/igzip_inflate_multibinary.asm
|
||||
${QPL_SRC_DIR}/isal/igzip/igzip_update_histogram_01.asm
|
||||
${QPL_SRC_DIR}/isal/igzip/igzip_update_histogram_04.asm
|
||||
${QPL_SRC_DIR}/isal/igzip/rfc1951_lookup.asm
|
||||
${QPL_SRC_DIR}/isal/igzip/adler32_sse.asm
|
||||
${QPL_SRC_DIR}/isal/igzip/adler32_avx2_4.asm
|
||||
${QPL_SRC_DIR}/isal/igzip/igzip_deflate_hash.asm
|
||||
${QPL_SRC_DIR}/isal/igzip/igzip_set_long_icf_fg_04.asm
|
||||
${QPL_SRC_DIR}/isal/igzip/igzip_set_long_icf_fg_06.asm
|
||||
${QPL_SRC_DIR}/isal/igzip/igzip_multibinary.asm
|
||||
${QPL_SRC_DIR}/isal/crc/crc_multibinary.asm
|
||||
${QPL_SRC_DIR}/isal/crc/crc32_gzip_refl_by8.asm
|
||||
${QPL_SRC_DIR}/isal/crc/crc32_gzip_refl_by8_02.asm
|
||||
${QPL_SRC_DIR}/isal/crc/crc32_gzip_refl_by16_10.asm
|
||||
${QPL_SRC_DIR}/isal/crc/crc32_ieee_01.asm
|
||||
${QPL_SRC_DIR}/isal/crc/crc32_ieee_02.asm
|
||||
${QPL_SRC_DIR}/isal/crc/crc32_ieee_by4.asm
|
||||
${QPL_SRC_DIR}/isal/crc/crc32_ieee_by16_10.asm
|
||||
${QPL_SRC_DIR}/isal/crc/crc32_iscsi_00.asm
|
||||
${QPL_SRC_DIR}/isal/crc/crc32_iscsi_01.asm
|
||||
${QPL_SRC_DIR}/isal/crc/crc32_iscsi_by16_10.asm)
|
||||
|
||||
# Adding ISA-L library target
|
||||
add_library(isal OBJECT ${ISAL_C_SRC})
|
||||
add_library(isal_asm OBJECT ${ISAL_ASM_SRC})
|
||||
|
||||
set_property(GLOBAL APPEND PROPERTY QPL_LIB_DEPS
|
||||
$<TARGET_OBJECTS:isal>)
|
||||
|
||||
set_property(GLOBAL APPEND PROPERTY QPL_LIB_DEPS
|
||||
$<TARGET_OBJECTS:isal_asm>)
|
||||
|
||||
# Setting external and internal interfaces for ISA-L library
|
||||
target_include_directories(isal
|
||||
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/isal/include>
|
||||
PUBLIC ${QPL_SRC_DIR}/isal/igzip)
|
||||
|
||||
set_target_properties(isal PROPERTIES
|
||||
CXX_STANDARD 11
|
||||
C_STANDARD 99)
|
||||
|
||||
# AS_FEATURE_LEVEL=10 means "Check SIMD capabilities of the target system at runtime and use up to AVX512 if available".
|
||||
# HAVE_KNOWS_AVX512 means rely on AVX512 being available on the target system.
|
||||
target_compile_options(isal_asm PRIVATE "-I${QPL_SRC_DIR}/isal/include/"
|
||||
PRIVATE "-I${QPL_SRC_DIR}/isal/igzip/"
|
||||
PRIVATE "-I${QPL_SRC_DIR}/isal/crc/"
|
||||
PRIVATE "-DHAVE_AS_KNOWS_AVX512"
|
||||
PRIVATE "-DAS_FEATURE_LEVEL=10"
|
||||
PRIVATE "-DQPL_LIB")
|
||||
|
||||
# Here must remove "-fno-sanitize=undefined" from COMPILE_OPTIONS.
|
||||
# Otherwise nasm compiler would fail to proceed due to unrecognition of "-fno-sanitize=undefined"
|
||||
if (SANITIZE STREQUAL "undefined")
|
||||
get_target_property(target_options isal_asm COMPILE_OPTIONS)
|
||||
list(REMOVE_ITEM target_options "-fno-sanitize=undefined")
|
||||
set_property(TARGET isal_asm PROPERTY COMPILE_OPTIONS ${target_options})
|
||||
endif()
|
||||
|
||||
target_compile_definitions(isal PUBLIC
|
||||
QPL_LIB
|
||||
NDEBUG)
|
||||
|
||||
# [SUBDIR]core-sw
|
||||
# Create set of libraries corresponding to supported platforms for SW fallback which are implemented by AVX512 and non-AVX512 instructions respectively.
|
||||
# The upper level QPL API will check SIMD capabilities of the target system at runtime and decide to call AVX512 function or non-AVX512 function.
|
||||
# Hence, here we don't need put ENABLE_AVX512 CMake switch.
|
||||
|
||||
get_list_of_supported_optimizations(PLATFORMS_LIST)
|
||||
|
||||
foreach(PLATFORM_ID IN LISTS PLATFORMS_LIST)
|
||||
# Find Core Sources
|
||||
file(GLOB SOURCES
|
||||
${QPL_SRC_DIR}/core-sw/src/checksums/*.c
|
||||
${QPL_SRC_DIR}/core-sw/src/filtering/*.c
|
||||
${QPL_SRC_DIR}/core-sw/src/other/*.c
|
||||
${QPL_SRC_DIR}/core-sw/src/compression/*.c)
|
||||
|
||||
file(GLOB DATA_SOURCES
|
||||
${QPL_SRC_DIR}/core-sw/src/data/*.c)
|
||||
|
||||
# Create library
|
||||
add_library(qplcore_${PLATFORM_ID} OBJECT ${SOURCES})
|
||||
|
||||
set_property(GLOBAL APPEND PROPERTY QPL_LIB_DEPS
|
||||
$<TARGET_OBJECTS:qplcore_${PLATFORM_ID}>)
|
||||
|
||||
target_include_directories(qplcore_${PLATFORM_ID}
|
||||
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/core-sw>
|
||||
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/core-sw/include>
|
||||
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/core-sw/src/include>
|
||||
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/core-sw/src/compression/include>
|
||||
PRIVATE $<TARGET_PROPERTY:isal,INTERFACE_INCLUDE_DIRECTORIES>)
|
||||
|
||||
# Set specific compiler options and/or definitions based on a platform
|
||||
if (${PLATFORM_ID} MATCHES "avx512")
|
||||
target_compile_definitions(qplcore_${PLATFORM_ID} PRIVATE PLATFORM=2)
|
||||
target_compile_options(qplcore_${PLATFORM_ID} PRIVATE -march=skylake-avx512)
|
||||
else() # Create default px library
|
||||
target_compile_definitions(qplcore_${PLATFORM_ID} PRIVATE PLATFORM=0)
|
||||
endif()
|
||||
|
||||
target_link_libraries(qplcore_${PLATFORM_ID} isal)
|
||||
endforeach()
|
||||
|
||||
#
|
||||
# Create dispatcher between platforms and auto-generated wrappers
|
||||
#
|
||||
file(GLOB SW_DISPATCHER_SOURCES ${QPL_SRC_DIR}/core-sw/dispatcher/*.cpp)
|
||||
|
||||
add_library(qplcore_sw_dispatcher OBJECT ${SW_DISPATCHER_SOURCES})
|
||||
|
||||
set_property(GLOBAL APPEND PROPERTY QPL_LIB_DEPS
|
||||
$<TARGET_OBJECTS:qplcore_sw_dispatcher>)
|
||||
|
||||
target_include_directories(qplcore_sw_dispatcher
|
||||
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/core-sw/dispatcher>)
|
||||
|
||||
# Generate kernel wrappers
|
||||
generate_unpack_kernel_arrays(${QPL_BINARY_DIR} "${PLATFORMS_LIST}")
|
||||
|
||||
foreach(PLATFORM_ID IN LISTS PLATFORMS_LIST)
|
||||
file(GLOB GENERATED_${PLATFORM_ID}_TABLES_SRC ${QPL_BINARY_DIR}/generated/${PLATFORM_ID}_*.cpp)
|
||||
|
||||
target_sources(qplcore_sw_dispatcher PRIVATE ${GENERATED_${PLATFORM_ID}_TABLES_SRC})
|
||||
|
||||
# Set specific compiler options and/or definitions based on a platform
|
||||
if (${PLATFORM_ID} MATCHES "avx512")
|
||||
set_source_files_properties(${GENERATED_${PLATFORM_ID}_TABLES_SRC} PROPERTIES COMPILE_DEFINITIONS PLATFORM=2)
|
||||
else()
|
||||
set_source_files_properties(${GENERATED_${PLATFORM_ID}_TABLES_SRC} PROPERTIES COMPILE_DEFINITIONS PLATFORM=0)
|
||||
endif()
|
||||
|
||||
target_include_directories(qplcore_sw_dispatcher
|
||||
PUBLIC $<TARGET_PROPERTY:qplcore_${PLATFORM_ID},INTERFACE_INCLUDE_DIRECTORIES>)
|
||||
endforeach()
|
||||
|
||||
set_target_properties(qplcore_sw_dispatcher PROPERTIES CXX_STANDARD 17)
|
||||
|
||||
# w/a for build compatibility with ISAL codebase
|
||||
target_compile_definitions(qplcore_sw_dispatcher PUBLIC -DQPL_LIB)
|
||||
|
||||
target_compile_options(qplcore_sw_dispatcher
|
||||
PRIVATE ${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS})
|
||||
|
||||
# [SUBDIR]core-iaa
|
||||
file(GLOB HW_PATH_SRC ${QPL_SRC_DIR}/core-iaa/sources/aecs/*.c
|
||||
${QPL_SRC_DIR}/core-iaa/sources/driver_loader/*.c
|
||||
${QPL_SRC_DIR}/core-iaa/sources/descriptors/*.c
|
||||
${QPL_SRC_DIR}/core-iaa/sources/*.c)
|
||||
|
||||
# Create library
|
||||
add_library(core_iaa OBJECT ${HW_PATH_SRC})
|
||||
|
||||
set_property(GLOBAL APPEND PROPERTY QPL_LIB_DEPS
|
||||
$<TARGET_OBJECTS:core_iaa>)
|
||||
|
||||
target_include_directories(core_iaa
|
||||
PRIVATE ${UUID_DIR}
|
||||
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/core-iaa/include>
|
||||
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/core-iaa/sources/include>
|
||||
PRIVATE $<BUILD_INTERFACE:${QPL_PROJECT_DIR}/include> # status.h in own_checkers.h
|
||||
PRIVATE $<TARGET_PROPERTY:qpl_c_api,INTERFACE_INCLUDE_DIRECTORIES> # for own_checkers.h
|
||||
PRIVATE $<TARGET_PROPERTY:qplcore_sw_dispatcher,INTERFACE_INCLUDE_DIRECTORIES>)
|
||||
|
||||
target_compile_features(core_iaa PRIVATE c_std_11)
|
||||
|
||||
target_compile_definitions(core_iaa PRIVATE QPL_BADARG_CHECK
|
||||
PRIVATE $<$<BOOL:${LOG_HW_INIT}>:LOG_HW_INIT>
|
||||
PRIVATE $<$<BOOL:${DYNAMIC_LOADING_LIBACCEL_CONFIG}>:DYNAMIC_LOADING_LIBACCEL_CONFIG>)
|
||||
|
||||
# [SUBDIR]middle-layer
|
||||
file(GLOB MIDDLE_LAYER_SRC
|
||||
${QPL_SRC_DIR}/middle-layer/accelerator/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/analytics/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/common/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/compression/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/compression/*/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/compression/*/*/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/dispatcher/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/other/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/util/*.cpp)
|
||||
|
||||
add_library(middle_layer_lib OBJECT
|
||||
${MIDDLE_LAYER_SRC})
|
||||
|
||||
set_property(GLOBAL APPEND PROPERTY QPL_LIB_DEPS
|
||||
$<TARGET_OBJECTS:middle_layer_lib>)
|
||||
|
||||
target_compile_options(middle_layer_lib
|
||||
PRIVATE $<$<C_COMPILER_ID:GNU,Clang>:$<$<CONFIG:Release>:-O3;-U_FORTIFY_SOURCE;-D_FORTIFY_SOURCE=2>>
|
||||
PRIVATE ${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS})
|
||||
|
||||
target_compile_definitions(middle_layer_lib
|
||||
PUBLIC QPL_VERSION="${QPL_VERSION}"
|
||||
PUBLIC $<$<BOOL:${LOG_HW_INIT}>:LOG_HW_INIT>
|
||||
PUBLIC $<$<BOOL:${EFFICIENT_WAIT}>:QPL_EFFICIENT_WAIT>
|
||||
PUBLIC QPL_BADARG_CHECK
|
||||
PUBLIC $<$<BOOL:${DYNAMIC_LOADING_LIBACCEL_CONFIG}>:DYNAMIC_LOADING_LIBACCEL_CONFIG>)
|
||||
|
||||
set_target_properties(middle_layer_lib PROPERTIES CXX_STANDARD 17)
|
||||
|
||||
target_include_directories(middle_layer_lib
|
||||
PRIVATE ${UUID_DIR}
|
||||
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/middle-layer>
|
||||
PUBLIC $<TARGET_PROPERTY:_qpl,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
PRIVATE $<TARGET_PROPERTY:qpl_c_api,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
PUBLIC $<TARGET_PROPERTY:qplcore_sw_dispatcher,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
PUBLIC $<TARGET_PROPERTY:isal,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
PUBLIC $<TARGET_PROPERTY:core_iaa,INTERFACE_INCLUDE_DIRECTORIES>)
|
||||
|
||||
target_compile_definitions(middle_layer_lib PUBLIC -DQPL_LIB)
|
||||
|
||||
# [SUBDIR]c_api
|
||||
file(GLOB QPL_C_API_SRC
|
||||
${QPL_SRC_DIR}/c_api/compression_operations/*.c
|
||||
${QPL_SRC_DIR}/c_api/compression_operations/*.cpp
|
||||
${QPL_SRC_DIR}/c_api/filter_operations/*.cpp
|
||||
${QPL_SRC_DIR}/c_api/legacy_hw_path/*.c
|
||||
${QPL_SRC_DIR}/c_api/legacy_hw_path/*.cpp
|
||||
${QPL_SRC_DIR}/c_api/other_operations/*.cpp
|
||||
${QPL_SRC_DIR}/c_api/serialization/*.cpp
|
||||
${QPL_SRC_DIR}/c_api/*.cpp)
|
||||
|
||||
add_library(qpl_c_api OBJECT ${QPL_C_API_SRC})
|
||||
|
||||
target_include_directories(qpl_c_api
|
||||
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/c_api/>
|
||||
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/include/> $<INSTALL_INTERFACE:include>
|
||||
PRIVATE $<TARGET_PROPERTY:middle_layer_lib,INTERFACE_INCLUDE_DIRECTORIES>)
|
||||
|
||||
set_target_properties(qpl_c_api PROPERTIES
|
||||
$<$<C_COMPILER_ID:GNU,Clang>:C_STANDARD 17
|
||||
CXX_STANDARD 17)
|
||||
|
||||
target_compile_options(qpl_c_api
|
||||
PRIVATE $<$<C_COMPILER_ID:GNU,Clang>:$<$<CONFIG:Release>:-O3;-U_FORTIFY_SOURCE;-D_FORTIFY_SOURCE=2>>
|
||||
PRIVATE $<$<COMPILE_LANG_AND_ID:CXX,GNU,Clang>:${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS}>)
|
||||
|
||||
target_compile_definitions(qpl_c_api
|
||||
PUBLIC -DQPL_BADARG_CHECK # own_checkers.h
|
||||
PUBLIC -DQPL_LIB # needed for middle_layer_lib
|
||||
PUBLIC $<$<BOOL:${LOG_HW_INIT}>:LOG_HW_INIT>) # needed for middle_layer_lib
|
||||
|
||||
set_property(GLOBAL APPEND PROPERTY QPL_LIB_DEPS
|
||||
$<TARGET_OBJECTS:qpl_c_api>)
|
||||
|
||||
# Final _qpl target
|
||||
|
||||
get_property(LIB_DEPS GLOBAL PROPERTY QPL_LIB_DEPS)
|
||||
|
||||
add_library(_qpl STATIC ${LIB_DEPS})
|
||||
|
||||
target_include_directories(_qpl
|
||||
PUBLIC $<BUILD_INTERFACE:${QPL_PROJECT_DIR}/include/> $<INSTALL_INTERFACE:include>)
|
||||
|
||||
target_link_libraries(_qpl
|
||||
PRIVATE ch_contrib::accel-config)
|
||||
|
||||
target_include_directories(_qpl SYSTEM BEFORE
|
||||
PUBLIC "${QPL_PROJECT_DIR}/include"
|
||||
PUBLIC ${UUID_DIR})
|
||||
|
||||
add_library (ch_contrib::qpl ALIAS _qpl)
|
@ -1,4 +0,0 @@
|
||||
#ifndef _QPL_UUID_UUID_H
|
||||
#define _QPL_UUID_UUID_H
|
||||
typedef unsigned char uuid_t[16];
|
||||
#endif /* _QPL_UUID_UUID_H */
|
@ -23,6 +23,7 @@ charset-normalizer==3.3.2
|
||||
click==8.1.7
|
||||
confluent-kafka==2.3.0
|
||||
cryptography==42.0.0
|
||||
datacompy==0.7.3
|
||||
dbus-python==1.2.18
|
||||
delta-spark==2.3.0
|
||||
deltalake==0.16.0
|
||||
@ -60,6 +61,7 @@ oauthlib==3.2.0
|
||||
packaging==24.0
|
||||
paramiko==3.4.0
|
||||
pika==1.2.0
|
||||
pandas==2.2.3
|
||||
pip==24.1.1
|
||||
pluggy==1.5.0
|
||||
protobuf==4.25.2
|
||||
|
@ -16,6 +16,7 @@ RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
||||
libxml2-utils \
|
||||
locales \
|
||||
moreutils \
|
||||
ripgrep \
|
||||
python3-pip \
|
||||
yamllint \
|
||||
zstd \
|
||||
|
@ -1,327 +0,0 @@
|
||||
---
|
||||
slug: /en/development/building_and_benchmarking_deflate_qpl
|
||||
sidebar_position: 73
|
||||
sidebar_label: Building and Benchmarking DEFLATE_QPL
|
||||
description: How to build Clickhouse and run benchmark with DEFLATE_QPL Codec
|
||||
---
|
||||
|
||||
# Build Clickhouse with DEFLATE_QPL
|
||||
|
||||
- Make sure your host machine meet the QPL required [prerequisites](https://intel.github.io/qpl/documentation/get_started_docs/installation.html#prerequisites)
|
||||
- deflate_qpl is enabled by default during cmake build. In case you accidentally change it, please double-check build flag: ENABLE_QPL=1
|
||||
|
||||
- For generic requirements, please refer to Clickhouse generic [build instructions](/docs/en/development/build.md)
|
||||
|
||||
# Run Benchmark with DEFLATE_QPL
|
||||
|
||||
## Files list
|
||||
|
||||
The folders `benchmark_sample` under [qpl-cmake](https://github.com/ClickHouse/ClickHouse/tree/master/contrib/qpl-cmake) give example to run benchmark with python scripts:
|
||||
|
||||
`client_scripts` contains python scripts for running typical benchmark, for example:
|
||||
- `client_stressing_test.py`: The python script for query stress test with [1~4] server instances.
|
||||
- `queries_ssb.sql`: The file lists all queries for [Star Schema Benchmark](https://clickhouse.com/docs/en/getting-started/example-datasets/star-schema/)
|
||||
- `allin1_ssb.sh`: This shell script executes benchmark workflow all in one automatically.
|
||||
|
||||
`database_files` means it will store database files according to lz4/deflate/zstd codec.
|
||||
|
||||
## Run benchmark automatically for Star Schema:
|
||||
|
||||
``` bash
|
||||
$ cd ./benchmark_sample/client_scripts
|
||||
$ sh run_ssb.sh
|
||||
```
|
||||
|
||||
After complete, please check all the results in this folder:`./output/`
|
||||
|
||||
In case you run into failure, please manually run benchmark as below sections.
|
||||
|
||||
## Definition
|
||||
|
||||
[CLICKHOUSE_EXE] means the path of clickhouse executable program.
|
||||
|
||||
## Environment
|
||||
|
||||
- CPU: Sapphire Rapid
|
||||
- OS Requirements refer to [System Requirements for QPL](https://intel.github.io/qpl/documentation/get_started_docs/installation.html#system-requirements)
|
||||
- IAA Setup refer to [Accelerator Configuration](https://intel.github.io/qpl/documentation/get_started_docs/installation.html#accelerator-configuration)
|
||||
- Install python modules:
|
||||
|
||||
``` bash
|
||||
pip3 install clickhouse_driver numpy
|
||||
```
|
||||
|
||||
[Self-check for IAA]
|
||||
|
||||
``` bash
|
||||
$ accel-config list | grep -P 'iax|state'
|
||||
```
|
||||
|
||||
Expected output like this:
|
||||
``` bash
|
||||
"dev":"iax1",
|
||||
"state":"enabled",
|
||||
"state":"enabled",
|
||||
```
|
||||
|
||||
If you see nothing output, it means IAA is not ready to work. Please check IAA setup again.
|
||||
|
||||
## Generate raw data
|
||||
|
||||
``` bash
|
||||
$ cd ./benchmark_sample
|
||||
$ mkdir rawdata_dir && cd rawdata_dir
|
||||
```
|
||||
|
||||
Use [`dbgen`](https://clickhouse.com/docs/en/getting-started/example-datasets/star-schema) to generate 100 million rows data with the parameters:
|
||||
-s 20
|
||||
|
||||
The files like `*.tbl` are expected to output under `./benchmark_sample/rawdata_dir/ssb-dbgen`:
|
||||
|
||||
## Database setup
|
||||
|
||||
Set up database with LZ4 codec
|
||||
|
||||
``` bash
|
||||
$ cd ./database_dir/lz4
|
||||
$ [CLICKHOUSE_EXE] server -C config_lz4.xml >&/dev/null&
|
||||
$ [CLICKHOUSE_EXE] client
|
||||
```
|
||||
|
||||
Here you should see the message `Connected to ClickHouse server` from console which means client successfully setup connection with server.
|
||||
|
||||
Complete below three steps mentioned in [Star Schema Benchmark](https://clickhouse.com/docs/en/getting-started/example-datasets/star-schema)
|
||||
- Creating tables in ClickHouse
|
||||
- Inserting data. Here should use `./benchmark_sample/rawdata_dir/ssb-dbgen/*.tbl` as input data.
|
||||
- Converting “star schema” to de-normalized “flat schema”
|
||||
|
||||
Set up database with IAA Deflate codec
|
||||
|
||||
``` bash
|
||||
$ cd ./database_dir/deflate
|
||||
$ [CLICKHOUSE_EXE] server -C config_deflate.xml >&/dev/null&
|
||||
$ [CLICKHOUSE_EXE] client
|
||||
```
|
||||
Complete three steps same as lz4 above
|
||||
|
||||
Set up database with ZSTD codec
|
||||
|
||||
``` bash
|
||||
$ cd ./database_dir/zstd
|
||||
$ [CLICKHOUSE_EXE] server -C config_zstd.xml >&/dev/null&
|
||||
$ [CLICKHOUSE_EXE] client
|
||||
```
|
||||
Complete three steps same as lz4 above
|
||||
|
||||
[self-check]
|
||||
For each codec(lz4/zstd/deflate), please execute below query to make sure the databases are created successfully:
|
||||
```sql
|
||||
select count() from lineorder_flat
|
||||
```
|
||||
You are expected to see below output:
|
||||
```sql
|
||||
┌───count()─┐
|
||||
│ 119994608 │
|
||||
└───────────┘
|
||||
```
|
||||
[Self-check for IAA Deflate codec]
|
||||
|
||||
At the first time you execute insertion or query from client, clickhouse server console is expected to print this log:
|
||||
```text
|
||||
Hardware-assisted DeflateQpl codec is ready!
|
||||
```
|
||||
If you never find this, but see another log as below:
|
||||
```text
|
||||
Initialization of hardware-assisted DeflateQpl codec failed
|
||||
```
|
||||
That means IAA devices is not ready, you need check IAA setup again.
|
||||
|
||||
## Benchmark with single instance
|
||||
|
||||
- Before start benchmark, Please disable C6 and set CPU frequency governor to be `performance`
|
||||
|
||||
``` bash
|
||||
$ cpupower idle-set -d 3
|
||||
$ cpupower frequency-set -g performance
|
||||
```
|
||||
|
||||
- To eliminate impact of memory bound on cross sockets, we use `numactl` to bind server on one socket and client on another socket.
|
||||
- Single instance means single server connected with single client
|
||||
|
||||
Now run benchmark for LZ4/Deflate/ZSTD respectively:
|
||||
|
||||
LZ4:
|
||||
|
||||
``` bash
|
||||
$ cd ./database_dir/lz4
|
||||
$ numactl -m 0 -N 0 [CLICKHOUSE_EXE] server -C config_lz4.xml >&/dev/null&
|
||||
$ cd ./client_scripts
|
||||
$ numactl -m 1 -N 1 python3 client_stressing_test.py queries_ssb.sql 1 > lz4.log
|
||||
```
|
||||
|
||||
IAA deflate:
|
||||
|
||||
``` bash
|
||||
$ cd ./database_dir/deflate
|
||||
$ numactl -m 0 -N 0 [CLICKHOUSE_EXE] server -C config_deflate.xml >&/dev/null&
|
||||
$ cd ./client_scripts
|
||||
$ numactl -m 1 -N 1 python3 client_stressing_test.py queries_ssb.sql 1 > deflate.log
|
||||
```
|
||||
|
||||
ZSTD:
|
||||
|
||||
``` bash
|
||||
$ cd ./database_dir/zstd
|
||||
$ numactl -m 0 -N 0 [CLICKHOUSE_EXE] server -C config_zstd.xml >&/dev/null&
|
||||
$ cd ./client_scripts
|
||||
$ numactl -m 1 -N 1 python3 client_stressing_test.py queries_ssb.sql 1 > zstd.log
|
||||
```
|
||||
|
||||
Now three logs should be output as expected:
|
||||
```text
|
||||
lz4.log
|
||||
deflate.log
|
||||
zstd.log
|
||||
```
|
||||
|
||||
How to check performance metrics:
|
||||
|
||||
We focus on QPS, please search the keyword: `QPS_Final` and collect statistics
|
||||
|
||||
## Benchmark with multi-instances
|
||||
|
||||
- To reduce impact of memory bound on too much threads, We recommend run benchmark with multi-instances.
|
||||
- Multi-instance means multiple(2 or 4)servers connected with respective client.
|
||||
- The cores of one socket need to be divided equally and assigned to the servers respectively.
|
||||
- For multi-instances, must create new folder for each codec and insert dataset by following the similar steps as single instance.
|
||||
|
||||
There are 2 differences:
|
||||
- For client side, you need launch clickhouse with the assigned port during table creation and data insertion.
|
||||
- For server side, you need launch clickhouse with the specific xml config file in which port has been assigned. All customized xml config files for multi-instances has been provided under ./server_config.
|
||||
|
||||
Here we assume there are 60 cores per socket and take 2 instances for example.
|
||||
Launch server for first instance
|
||||
LZ4:
|
||||
|
||||
``` bash
|
||||
$ cd ./database_dir/lz4
|
||||
$ numactl -C 0-29,120-149 [CLICKHOUSE_EXE] server -C config_lz4.xml >&/dev/null&
|
||||
```
|
||||
|
||||
ZSTD:
|
||||
|
||||
``` bash
|
||||
$ cd ./database_dir/zstd
|
||||
$ numactl -C 0-29,120-149 [CLICKHOUSE_EXE] server -C config_zstd.xml >&/dev/null&
|
||||
```
|
||||
|
||||
IAA Deflate:
|
||||
|
||||
``` bash
|
||||
$ cd ./database_dir/deflate
|
||||
$ numactl -C 0-29,120-149 [CLICKHOUSE_EXE] server -C config_deflate.xml >&/dev/null&
|
||||
```
|
||||
|
||||
[Launch server for second instance]
|
||||
|
||||
LZ4:
|
||||
|
||||
``` bash
|
||||
$ cd ./database_dir && mkdir lz4_s2 && cd lz4_s2
|
||||
$ cp ../../server_config/config_lz4_s2.xml ./
|
||||
$ numactl -C 30-59,150-179 [CLICKHOUSE_EXE] server -C config_lz4_s2.xml >&/dev/null&
|
||||
```
|
||||
|
||||
ZSTD:
|
||||
|
||||
``` bash
|
||||
$ cd ./database_dir && mkdir zstd_s2 && cd zstd_s2
|
||||
$ cp ../../server_config/config_zstd_s2.xml ./
|
||||
$ numactl -C 30-59,150-179 [CLICKHOUSE_EXE] server -C config_zstd_s2.xml >&/dev/null&
|
||||
```
|
||||
|
||||
IAA Deflate:
|
||||
|
||||
``` bash
|
||||
$ cd ./database_dir && mkdir deflate_s2 && cd deflate_s2
|
||||
$ cp ../../server_config/config_deflate_s2.xml ./
|
||||
$ numactl -C 30-59,150-179 [CLICKHOUSE_EXE] server -C config_deflate_s2.xml >&/dev/null&
|
||||
```
|
||||
|
||||
Creating tables && Inserting data for second instance
|
||||
|
||||
Creating tables:
|
||||
|
||||
``` bash
|
||||
$ [CLICKHOUSE_EXE] client -m --port=9001
|
||||
```
|
||||
|
||||
Inserting data:
|
||||
|
||||
``` bash
|
||||
$ [CLICKHOUSE_EXE] client --query "INSERT INTO [TBL_FILE_NAME] FORMAT CSV" < [TBL_FILE_NAME].tbl --port=9001
|
||||
```
|
||||
|
||||
- [TBL_FILE_NAME] represents the name of a file named with the regular expression: *. tbl under `./benchmark_sample/rawdata_dir/ssb-dbgen`.
|
||||
- `--port=9001` stands for the assigned port for server instance which is also defined in config_lz4_s2.xml/config_zstd_s2.xml/config_deflate_s2.xml. For even more instances, you need replace it with the value: 9002/9003 which stand for s3/s4 instance respectively. If you don't assign it, the port is 9000 by default which has been used by first instance.
|
||||
|
||||
Benchmarking with 2 instances
|
||||
|
||||
LZ4:
|
||||
|
||||
``` bash
|
||||
$ cd ./database_dir/lz4
|
||||
$ numactl -C 0-29,120-149 [CLICKHOUSE_EXE] server -C config_lz4.xml >&/dev/null&
|
||||
$ cd ./database_dir/lz4_s2
|
||||
$ numactl -C 30-59,150-179 [CLICKHOUSE_EXE] server -C config_lz4_s2.xml >&/dev/null&
|
||||
$ cd ./client_scripts
|
||||
$ numactl -m 1 -N 1 python3 client_stressing_test.py queries_ssb.sql 2 > lz4_2insts.log
|
||||
```
|
||||
|
||||
ZSTD:
|
||||
|
||||
``` bash
|
||||
$ cd ./database_dir/zstd
|
||||
$ numactl -C 0-29,120-149 [CLICKHOUSE_EXE] server -C config_zstd.xml >&/dev/null&
|
||||
$ cd ./database_dir/zstd_s2
|
||||
$ numactl -C 30-59,150-179 [CLICKHOUSE_EXE] server -C config_zstd_s2.xml >&/dev/null&
|
||||
$ cd ./client_scripts
|
||||
$ numactl -m 1 -N 1 python3 client_stressing_test.py queries_ssb.sql 2 > zstd_2insts.log
|
||||
```
|
||||
|
||||
IAA deflate
|
||||
|
||||
``` bash
|
||||
$ cd ./database_dir/deflate
|
||||
$ numactl -C 0-29,120-149 [CLICKHOUSE_EXE] server -C config_deflate.xml >&/dev/null&
|
||||
$ cd ./database_dir/deflate_s2
|
||||
$ numactl -C 30-59,150-179 [CLICKHOUSE_EXE] server -C config_deflate_s2.xml >&/dev/null&
|
||||
$ cd ./client_scripts
|
||||
$ numactl -m 1 -N 1 python3 client_stressing_test.py queries_ssb.sql 2 > deflate_2insts.log
|
||||
```
|
||||
|
||||
Here the last argument: `2` of client_stressing_test.py stands for the number of instances. For more instances, you need replace it with the value: 3 or 4. This script support up to 4 instances/
|
||||
|
||||
Now three logs should be output as expected:
|
||||
|
||||
``` text
|
||||
lz4_2insts.log
|
||||
deflate_2insts.log
|
||||
zstd_2insts.log
|
||||
```
|
||||
How to check performance metrics:
|
||||
|
||||
We focus on QPS, please search the keyword: `QPS_Final` and collect statistics
|
||||
|
||||
Benchmark setup for 4 instances is similar with 2 instances above.
|
||||
We recommend use 2 instances benchmark data as final report for review.
|
||||
|
||||
## Tips
|
||||
|
||||
Each time before launch new clickhouse server, please make sure no background clickhouse process running, please check and kill old one:
|
||||
|
||||
``` bash
|
||||
$ ps -aux| grep clickhouse
|
||||
$ kill -9 [PID]
|
||||
```
|
||||
By comparing the query list in ./client_scripts/queries_ssb.sql with official [Star Schema Benchmark](https://clickhouse.com/docs/en/getting-started/example-datasets/star-schema), you will find 3 queries are not included: Q1.2/Q1.3/Q3.4 . This is because cpu utilization% is very low <10% for these queries which means cannot demonstrate performance differences.
|
@ -18,7 +18,7 @@ SELECT library_name, license_type, license_path FROM system.licenses ORDER BY li
|
||||
Note that the listed libraries are the ones located in the `contrib/` directory of the ClickHouse repository.
|
||||
Depending on the build options, some of the libraries may have not been compiled, and, as a result, their functionality may not be available at runtime.
|
||||
|
||||
[Example](https://play.clickhouse.com/play?user=play#U0VMRUNUIGxpYnJhcnlfbmFtZSwgbGljZW5zZV90eXBlLCBsaWNlbnNlX3BhdGggRlJPTSBzeXN0ZW0ubGljZW5zZXMgT1JERVIgQlkgbGlicmFyeV9uYW1lIENPTExBVEUgJ2VuJw==)
|
||||
[Example](https://sql.clickhouse.com?query_id=478GCPU7LRTSZJBNY3EJT3)
|
||||
|
||||
## Adding and maintaining third-party libraries
|
||||
|
||||
|
@ -36,6 +36,7 @@ SETTINGS
|
||||
## Settings {#settings}
|
||||
|
||||
The set of supported settings is the same as for `S3Queue` table engine, but without `s3queue_` prefix. See [full list of settings settings](../../../engines/table-engines/integrations/s3queue.md#settings).
|
||||
To get a list of settings, configured for the table, use `system.s3_queue_settings` table. Available from `24.10`.
|
||||
|
||||
## Description {#description}
|
||||
|
||||
|
@ -69,6 +69,8 @@ SETTINGS
|
||||
|
||||
## Settings {#settings}
|
||||
|
||||
To get a list of settings, configured for the table, use `system.s3_queue_settings` table. Available from `24.10`.
|
||||
|
||||
### mode {#mode}
|
||||
|
||||
Possible values:
|
||||
|
@ -37,7 +37,7 @@ For a description of request parameters, see [request description](../../../sql-
|
||||
|
||||
**Query clauses**
|
||||
|
||||
When creating an `AggregatingMergeTree` table the same [clauses](../../../engines/table-engines/mergetree-family/mergetree.md) are required, as when creating a `MergeTree` table.
|
||||
When creating an `AggregatingMergeTree` table, the same [clauses](../../../engines/table-engines/mergetree-family/mergetree.md) are required as when creating a `MergeTree` table.
|
||||
|
||||
<details markdown="1">
|
||||
|
||||
@ -62,19 +62,19 @@ All of the parameters have the same meaning as in `MergeTree`.
|
||||
## SELECT and INSERT {#select-and-insert}
|
||||
|
||||
To insert data, use [INSERT SELECT](../../../sql-reference/statements/insert-into.md) query with aggregate -State- functions.
|
||||
When selecting data from `AggregatingMergeTree` table, use `GROUP BY` clause and the same aggregate functions as when inserting data, but using `-Merge` suffix.
|
||||
When selecting data from `AggregatingMergeTree` table, use `GROUP BY` clause and the same aggregate functions as when inserting data, but using the `-Merge` suffix.
|
||||
|
||||
In the results of `SELECT` query, the values of `AggregateFunction` type have implementation-specific binary representation for all of the ClickHouse output formats. If dump data into, for example, `TabSeparated` format with `SELECT` query then this dump can be loaded back using `INSERT` query.
|
||||
In the results of `SELECT` query, the values of `AggregateFunction` type have implementation-specific binary representation for all of the ClickHouse output formats. For example, if you dump data into `TabSeparated` format with a `SELECT` query, then this dump can be loaded back using an `INSERT` query.
|
||||
|
||||
## Example of an Aggregated Materialized View {#example-of-an-aggregated-materialized-view}
|
||||
|
||||
The following examples assumes that you have a database named `test` so make sure you create that if it doesn't already exist:
|
||||
The following example assumes that you have a database named `test`, so create it if it doesn't already exist:
|
||||
|
||||
```sql
|
||||
CREATE DATABASE test;
|
||||
```
|
||||
|
||||
We will create the table `test.visits` that contain the raw data:
|
||||
Now create the table `test.visits` that contains the raw data:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE test.visits
|
||||
@ -86,9 +86,9 @@ CREATE TABLE test.visits
|
||||
) ENGINE = MergeTree ORDER BY (StartDate, CounterID);
|
||||
```
|
||||
|
||||
Next, we need to create an `AggregatingMergeTree` table that will store `AggregationFunction`s that keep track of the total number of visits and the number of unique users.
|
||||
Next, you need an `AggregatingMergeTree` table that will store `AggregationFunction`s that keep track of the total number of visits and the number of unique users.
|
||||
|
||||
`AggregatingMergeTree` materialized view that watches the `test.visits` table, and use the `AggregateFunction` type:
|
||||
Create an `AggregatingMergeTree` materialized view that watches the `test.visits` table, and uses the `AggregateFunction` type:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE test.agg_visits (
|
||||
@ -100,7 +100,7 @@ CREATE TABLE test.agg_visits (
|
||||
ENGINE = AggregatingMergeTree() ORDER BY (StartDate, CounterID);
|
||||
```
|
||||
|
||||
And then let's create a materialized view that populates `test.agg_visits` from `test.visits` :
|
||||
Create a materialized view that populates `test.agg_visits` from `test.visits`:
|
||||
|
||||
```sql
|
||||
CREATE MATERIALIZED VIEW test.visits_mv TO test.agg_visits
|
||||
@ -113,7 +113,7 @@ FROM test.visits
|
||||
GROUP BY StartDate, CounterID;
|
||||
```
|
||||
|
||||
Inserting data into the `test.visits` table.
|
||||
Insert data into the `test.visits` table:
|
||||
|
||||
``` sql
|
||||
INSERT INTO test.visits (StartDate, CounterID, Sign, UserID)
|
||||
@ -122,7 +122,7 @@ INSERT INTO test.visits (StartDate, CounterID, Sign, UserID)
|
||||
|
||||
The data is inserted in both `test.visits` and `test.agg_visits`.
|
||||
|
||||
To get the aggregated data, we need to execute a query such as `SELECT ... GROUP BY ...` from the materialized view `test.mv_visits`:
|
||||
To get the aggregated data, execute a query such as `SELECT ... GROUP BY ...` from the materialized view `test.mv_visits`:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
@ -140,14 +140,14 @@ ORDER BY StartDate;
|
||||
└─────────────────────────┴────────┴───────┘
|
||||
```
|
||||
|
||||
And how about if we add another couple of records to `test.visits`, but this time we'll use a different timestamp for one of the records:
|
||||
Add another couple of records to `test.visits`, but this time try using a different timestamp for one of the records:
|
||||
|
||||
```sql
|
||||
INSERT INTO test.visits (StartDate, CounterID, Sign, UserID)
|
||||
VALUES (1669446031000, 2, 5, 10), (1667446031000, 3, 7, 5);
|
||||
```
|
||||
|
||||
If we then run the `SELECT` query again, we'll see the following output:
|
||||
Run the `SELECT` query again, which will return the following output:
|
||||
|
||||
```text
|
||||
┌───────────────StartDate─┬─Visits─┬─Users─┐
|
||||
|
@ -453,4 +453,4 @@ ORDER BY yr,
|
||||
mo;
|
||||
```
|
||||
|
||||
The data is also available for interactive queries in the [Playground](https://play.clickhouse.com/play?user=play), [example](https://play.clickhouse.com/play?user=play#U0VMRUNUIG1hY2hpbmVfbmFtZSwKICAgICAgIE1JTihjcHUpIEFTIGNwdV9taW4sCiAgICAgICBNQVgoY3B1KSBBUyBjcHVfbWF4LAogICAgICAgQVZHKGNwdSkgQVMgY3B1X2F2ZywKICAgICAgIE1JTihuZXRfaW4pIEFTIG5ldF9pbl9taW4sCiAgICAgICBNQVgobmV0X2luKSBBUyBuZXRfaW5fbWF4LAogICAgICAgQVZHKG5ldF9pbikgQVMgbmV0X2luX2F2ZywKICAgICAgIE1JTihuZXRfb3V0KSBBUyBuZXRfb3V0X21pbiwKICAgICAgIE1BWChuZXRfb3V0KSBBUyBuZXRfb3V0X21heCwKICAgICAgIEFWRyhuZXRfb3V0KSBBUyBuZXRfb3V0X2F2ZwpGUk9NICgKICBTRUxFQ1QgbWFjaGluZV9uYW1lLAogICAgICAgICBDT0FMRVNDRShjcHVfdXNlciwgMC4wKSBBUyBjcHUsCiAgICAgICAgIENPQUxFU0NFKGJ5dGVzX2luLCAwLjApIEFTIG5ldF9pbiwKICAgICAgICAgQ09BTEVTQ0UoYnl0ZXNfb3V0LCAwLjApIEFTIG5ldF9vdXQKICBGUk9NIG1nYmVuY2gubG9nczEKICBXSEVSRSBtYWNoaW5lX25hbWUgSU4gKCdhbmFuc2knLCdhcmFnb2cnLCd1cmQnKQogICAgQU5EIGxvZ190aW1lID49IFRJTUVTVEFNUCAnMjAxNy0wMS0xMSAwMDowMDowMCcKKSBBUyByCkdST1VQIEJZIG1hY2hpbmVfbmFtZQ==).
|
||||
The data is also available for interactive queries in the [Playground](https://sql.clickhouse.com), [example](https://sql.clickhouse.com?query_id=1MXMHASDLEQIP4P1D1STND).
|
||||
|
@ -360,9 +360,9 @@ This screenshot shows cell tower locations with LTE, UMTS, and GSM radios. The
|
||||
![Dashboard of cell towers by radio type in mcc 204](@site/docs/en/getting-started/example-datasets/images/superset-cell-tower-dashboard.png)
|
||||
|
||||
:::tip
|
||||
The data is also available for interactive queries in the [Playground](https://play.clickhouse.com/play?user=play).
|
||||
The data is also available for interactive queries in the [Playground](https://sql.clickhouse.com).
|
||||
|
||||
This [example](https://play.clickhouse.com/play?user=play#U0VMRUNUIG1jYywgY291bnQoKSBGUk9NIGNlbGxfdG93ZXJzIEdST1VQIEJZIG1jYyBPUkRFUiBCWSBjb3VudCgpIERFU0M=) will populate the username and even the query for you.
|
||||
This [example](https://sql.clickhouse.com?query_id=UV8M4MAGS2PWAUOAYAAARM) will populate the username and even the query for you.
|
||||
|
||||
Although you cannot create tables in the Playground, you can run all of the queries and even use Superset (adjust the host name and port number).
|
||||
:::
|
||||
|
@ -244,13 +244,13 @@ FROM s3('https://datasets-documentation.s3.amazonaws.com/github/commits/clickhou
|
||||
|
||||
The tool suggests several queries via its help output. We have answered these in addition to some additional supplementary questions of interest. These queries are of approximately increasing complexity vs. the tool's arbitrary order.
|
||||
|
||||
This dataset is available in [play.clickhouse.com](https://play.clickhouse.com/play?user=play#U0hPVyBUQUJMRVMgSU4gZ2l0X2NsaWNraG91c2U=) in the `git_clickhouse` databases. We provide a link to this environment for all queries, adapting the database name as required. Note that play results may vary from the those presented here due to differences in time of data collection.
|
||||
This dataset is available in [play.clickhouse.com](https://sql.clickhouse.com?query_id=DCQPNPAIMAQXRLHYURLKVJ) in the `git_clickhouse` databases. We provide a link to this environment for all queries, adapting the database name as required. Note that play results may vary from the those presented here due to differences in time of data collection.
|
||||
|
||||
## History of a single file
|
||||
|
||||
The simplest of queries. Here we look at all commit messages for the `StorageReplicatedMergeTree.cpp`. Since these are likely more interesting, we sort by the most recent messages first.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICB0aW1lLAogICAgc3Vic3RyaW5nKGNvbW1pdF9oYXNoLCAxLCAxMSkgQVMgY29tbWl0LAogICAgY2hhbmdlX3R5cGUsCiAgICBhdXRob3IsCiAgICBwYXRoLAogICAgb2xkX3BhdGgsCiAgICBsaW5lc19hZGRlZCwKICAgIGxpbmVzX2RlbGV0ZWQsCiAgICBjb21taXRfbWVzc2FnZQpGUk9NIGdpdF9jbGlja2hvdXNlLmZpbGVfY2hhbmdlcwpXSEVSRSBwYXRoID0gJ3NyYy9TdG9yYWdlcy9TdG9yYWdlUmVwbGljYXRlZE1lcmdlVHJlZS5jcHAnCk9SREVSIEJZIHRpbWUgREVTQwpMSU1JVCAxMA==)
|
||||
[play](https://sql.clickhouse.com?query_id=COAZRFX2YFULDBXRQTCQ1S)
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
@ -287,7 +287,7 @@ LIMIT 10
|
||||
|
||||
We can also review the line changes, excluding renames i.e. we won't show changes before a rename event when the file existed under a different name:
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICB0aW1lLAogICAgc3Vic3RyaW5nKGNvbW1pdF9oYXNoLCAxLCAxMSkgQVMgY29tbWl0LAogICAgc2lnbiwKICAgIGxpbmVfbnVtYmVyX29sZCwKICAgIGxpbmVfbnVtYmVyX25ldywKICAgIGF1dGhvciwKICAgIGxpbmUKRlJPTSBnaXRfY2xpY2tob3VzZS5saW5lX2NoYW5nZXMKV0hFUkUgcGF0aCA9ICdzcmMvU3RvcmFnZXMvU3RvcmFnZVJlcGxpY2F0ZWRNZXJnZVRyZWUuY3BwJwpPUkRFUiBCWSBsaW5lX251bWJlcl9uZXcgQVNDCkxJTUlUIDEw)
|
||||
[play](https://sql.clickhouse.com?query_id=AKS9SYLARFMZCHGAAQNEBN)
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
@ -327,7 +327,7 @@ This is important for later analysis when we only want to consider the current f
|
||||
|
||||
**Note there appears to have been a broken commit history in relation to files under the `dbms`, `libs`, `tests/testflows/` directories during their renames. We also thus exclude these.**
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#U0VMRUNUIHBhdGgKRlJPTQooCiAgICBTRUxFQ1QKICAgICAgICBvbGRfcGF0aCBBUyBwYXRoLAogICAgICAgIG1heCh0aW1lKSBBUyBsYXN0X3RpbWUsCiAgICAgICAgMiBBUyBjaGFuZ2VfdHlwZQogICAgRlJPTSBnaXRfY2xpY2tob3VzZS5maWxlX2NoYW5nZXMKICAgIEdST1VQIEJZIG9sZF9wYXRoCiAgICBVTklPTiBBTEwKICAgIFNFTEVDVAogICAgICAgIHBhdGgsCiAgICAgICAgbWF4KHRpbWUpIEFTIGxhc3RfdGltZSwKICAgICAgICBhcmdNYXgoY2hhbmdlX3R5cGUsIHRpbWUpIEFTIGNoYW5nZV90eXBlCiAgICBGUk9NIGdpdF9jbGlja2hvdXNlLmZpbGVfY2hhbmdlcwogICAgR1JPVVAgQlkgcGF0aAopCkdST1VQIEJZIHBhdGgKSEFWSU5HIChhcmdNYXgoY2hhbmdlX3R5cGUsIGxhc3RfdGltZSkgIT0gMikgQU5EIE5PVCBtYXRjaChwYXRoLCAnKF5kYm1zLyl8KF5saWJzLyl8KF50ZXN0cy90ZXN0Zmxvd3MvKXwoXnByb2dyYW1zL3NlcnZlci9zdG9yZS8pJykgT1JERVIgQlkgcGF0aApMSU1JVCAxMA==)
|
||||
[play](https://sql.clickhouse.com?query_id=2HNFWPCFWEEY92WTAPMA7W)
|
||||
|
||||
```sql
|
||||
SELECT path
|
||||
@ -369,7 +369,7 @@ LIMIT 10
|
||||
|
||||
Note that this allows for files to be renamed and then re-renamed to their original values. First we aggregate `old_path` for a list of deleted files as a result of renaming. We union this with the last operation for every `path`. Finally, we filter this list to those where the final event is not a `Delete`.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#U0VMRUNUIHVuaXEocGF0aCkKRlJPTQooCiAgICBTRUxFQ1QgcGF0aAogICAgRlJPTQogICAgKAogICAgICAgIFNFTEVDVAogICAgICAgICAgICBvbGRfcGF0aCBBUyBwYXRoLAogICAgICAgICAgICBtYXgodGltZSkgQVMgbGFzdF90aW1lLAogICAgICAgICAgICAyIEFTIGNoYW5nZV90eXBlCiAgICAgICAgRlJPTSBnaXRfY2xpY2tob3VzZS5maWxlX2NoYW5nZXMKICAgICAgICBHUk9VUCBCWSBvbGRfcGF0aAogICAgICAgIFVOSU9OIEFMTAogICAgICAgIFNFTEVDVAogICAgICAgICAgICBwYXRoLAogICAgICAgICAgICBtYXgodGltZSkgQVMgbGFzdF90aW1lLAogICAgICAgICAgICBhcmdNYXgoY2hhbmdlX3R5cGUsIHRpbWUpIEFTIGNoYW5nZV90eXBlCiAgICAgICAgRlJPTSBnaXRfY2xpY2tob3VzZS5maWxlX2NoYW5nZXMKICAgICAgICBHUk9VUCBCWSBwYXRoCiAgICApCiAgICBHUk9VUCBCWSBwYXRoCiAgICBIQVZJTkcgKGFyZ01heChjaGFuZ2VfdHlwZSwgbGFzdF90aW1lKSAhPSAyKSBBTkQgTk9UIG1hdGNoKHBhdGgsICcoXmRibXMvKXwoXmxpYnMvKXwoXnRlc3RzL3Rlc3RmbG93cy8pfChecHJvZ3JhbXMvc2VydmVyL3N0b3JlLyknKSBPUkRFUiBCWSBwYXRoCikK)
|
||||
[play](https://sql.clickhouse.com?query_id=1OXCKMOH2JVMSHD3NS2WW6)
|
||||
|
||||
```sql
|
||||
SELECT uniq(path)
|
||||
@ -419,7 +419,7 @@ The difference here is caused by a few factors:
|
||||
|
||||
- A rename can occur alongside other modifications to the file. These are listed as separate events in file_changes but with the same time. The `argMax` function has no way of distinguishing these - it picks the first value. The natural ordering of the inserts (the only means of knowing the correct order) is not maintained across the union so modified events can be selected. For example, below the `src/Functions/geometryFromColumn.h` file has several modifications before being renamed to `src/Functions/geometryConverters.h`. Our current solution may pick a Modify event as the latest change causing `src/Functions/geometryFromColumn.h` to be retained.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICAgIGNoYW5nZV90eXBlLAogICAgICBwYXRoLAogICAgICBvbGRfcGF0aCwKICAgICAgdGltZSwKICAgICAgY29tbWl0X2hhc2gKICBGUk9NIGdpdF9jbGlja2hvdXNlLmZpbGVfY2hhbmdlcwogIFdIRVJFIChwYXRoID0gJ3NyYy9GdW5jdGlvbnMvZ2VvbWV0cnlGcm9tQ29sdW1uLmgnKSBPUiAob2xkX3BhdGggPSAnc3JjL0Z1bmN0aW9ucy9nZW9tZXRyeUZyb21Db2x1bW4uaCcpCg==)
|
||||
[play](https://sql.clickhouse.com?query_id=SCXWMR9GBMJ9UNZYQXQBFA)
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
@ -454,7 +454,7 @@ These differences shouldn't meaningfully impact our analysis. **We welcome impro
|
||||
|
||||
Limiting to current files, we consider the number of modifications to be the sum of deletes and additions.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#V0lUSCBjdXJyZW50X2ZpbGVzIEFTCiAgICAoCiAgICAgICAgU0VMRUNUIHBhdGgKICAgICAgICBGUk9NCiAgICAgICAgKAogICAgICAgICAgICBTRUxFQ1QKICAgICAgICAgICAgICAgIG9sZF9wYXRoIEFTIHBhdGgsCiAgICAgICAgICAgICAgICBtYXgodGltZSkgQVMgbGFzdF90aW1lLAogICAgICAgICAgICAgICAgMiBBUyBjaGFuZ2VfdHlwZQogICAgICAgICAgICBGUk9NIGdpdF9jbGlja2hvdXNlLmZpbGVfY2hhbmdlcwogICAgICAgICAgICBHUk9VUCBCWSBvbGRfcGF0aAogICAgICAgICAgICBVTklPTiBBTEwKICAgICAgICAgICAgU0VMRUNUCiAgICAgICAgICAgICAgICBwYXRoLAogICAgICAgICAgICAgICAgbWF4KHRpbWUpIEFTIGxhc3RfdGltZSwKICAgICAgICAgICAgICAgIGFyZ01heChjaGFuZ2VfdHlwZSwgdGltZSkgQVMgY2hhbmdlX3R5cGUKICAgICAgICAgICAgRlJPTSBnaXRfY2xpY2tob3VzZS5maWxlX2NoYW5nZXMKICAgICAgICAgICAgR1JPVVAgQlkgcGF0aAogICAgICAgICkKICAgICAgICBHUk9VUCBCWSBwYXRoCiAgICAgICAgSEFWSU5HIChhcmdNYXgoY2hhbmdlX3R5cGUsIGxhc3RfdGltZSkgIT0gMikgQU5EIChOT1QgbWF0Y2gocGF0aCwgJyheZGJtcy8pfChebGlicy8pfChedGVzdHMvdGVzdGZsb3dzLyl8KF5wcm9ncmFtcy9zZXJ2ZXIvc3RvcmUvKScpKQogICAgICAgIE9SREVSIEJZIHBhdGggQVNDCiAgICApClNFTEVDVAogICAgcGF0aCwKICAgIHN1bShsaW5lc19hZGRlZCkgKyBzdW0obGluZXNfZGVsZXRlZCkgQVMgbW9kaWZpY2F0aW9ucwpGUk9NIGdpdF9jbGlja2hvdXNlLmZpbGVfY2hhbmdlcwpXSEVSRSAocGF0aCBJTiAoY3VycmVudF9maWxlcykpIEFORCAoZmlsZV9leHRlbnNpb24gSU4gKCdoJywgJ2NwcCcsICdzcWwnKSkKR1JPVVAgQlkgcGF0aApPUkRFUiBCWSBtb2RpZmljYXRpb25zIERFU0MKTElNSVQgMTA=)
|
||||
[play](https://sql.clickhouse.com?query_id=MHXPSBNPTDMJYR3OYSXVR7)
|
||||
|
||||
```sql
|
||||
WITH current_files AS
|
||||
@ -507,7 +507,7 @@ LIMIT 10
|
||||
|
||||
## What day of the week do commits usually occur?
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICBkYXlfb2Zfd2VlaywKICAgIGNvdW50KCkgQVMgYwpGUk9NIGdpdF9jbGlja2hvdXNlLmNvbW1pdHMKR1JPVVAgQlkgZGF5T2ZXZWVrKHRpbWUpIEFTIGRheV9vZl93ZWVrCg==)
|
||||
[play](https://sql.clickhouse.com?query_id=GED2STFSYJDRAA59H8RLIV)
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
@ -534,7 +534,7 @@ This makes sense with some productivity drop-off on Fridays. Great to see people
|
||||
|
||||
This would produce a large query result that is unrealistic to show or visualize if unfiltered. We, therefore, allow a file or subdirectory to be filtered in the following example. Here we group by week using the `toStartOfWeek` function - adapt as required.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICB3ZWVrLAogICAgc3VtKGxpbmVzX2FkZGVkKSBBUyBsaW5lc19hZGRlZCwKICAgIHN1bShsaW5lc19kZWxldGVkKSBBUyBsaW5lc19kZWxldGVkLAogICAgdW5pcShjb21taXRfaGFzaCkgQVMgbnVtX2NvbW1pdHMsCiAgICB1bmlxKGF1dGhvcikgQVMgYXV0aG9ycwpGUk9NIGdpdF9jbGlja2hvdXNlLmZpbGVfY2hhbmdlcwpXSEVSRSBwYXRoIExJS0UgJ3NyYy9TdG9yYWdlcyUnCkdST1VQIEJZIHRvU3RhcnRPZldlZWsodGltZSkgQVMgd2VlawpPUkRFUiBCWSB3ZWVrIEFTQwpMSU1JVCAxMAo=)
|
||||
[play](https://sql.clickhouse.com?query_id=REZRXDVU7CAWT5WKNJSTNY)
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
@ -578,7 +578,7 @@ This data visualizes well. Below we use Superset.
|
||||
|
||||
Limit to current files only.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#V0lUSCBjdXJyZW50X2ZpbGVzIEFTCiAgICAoCiAgICAgICAgU0VMRUNUIHBhdGgKICAgICAgICBGUk9NCiAgICAgICAgKAogICAgICAgICAgICBTRUxFQ1QKICAgICAgICAgICAgICAgIG9sZF9wYXRoIEFTIHBhdGgsCiAgICAgICAgICAgICAgICBtYXgodGltZSkgQVMgbGFzdF90aW1lLAogICAgICAgICAgICAgICAgMiBBUyBjaGFuZ2VfdHlwZQogICAgICAgICAgICBGUk9NIGdpdF9jbGlja2hvdXNlLmZpbGVfY2hhbmdlcwogICAgICAgICAgICBHUk9VUCBCWSBvbGRfcGF0aAogICAgICAgICAgICBVTklPTiBBTEwKICAgICAgICAgICAgU0VMRUNUCiAgICAgICAgICAgICAgICBwYXRoLAogICAgICAgICAgICAgICAgbWF4KHRpbWUpIEFTIGxhc3RfdGltZSwKICAgICAgICAgICAgICAgIGFyZ01heChjaGFuZ2VfdHlwZSwgdGltZSkgQVMgY2hhbmdlX3R5cGUKICAgICAgICAgICAgRlJPTSBnaXRfY2xpY2tob3VzZS5maWxlX2NoYW5nZXMKICAgICAgICAgICAgR1JPVVAgQlkgcGF0aAogICAgICAgICkKICAgICAgICBHUk9VUCBCWSBwYXRoCiAgICAgICAgSEFWSU5HIChhcmdNYXgoY2hhbmdlX3R5cGUsIGxhc3RfdGltZSkgIT0gMikgQU5EIChOT1QgbWF0Y2gocGF0aCwgJyheZGJtcy8pfChebGlicy8pfChedGVzdHMvdGVzdGZsb3dzLyl8KF5wcm9ncmFtcy9zZXJ2ZXIvc3RvcmUvKScpKQogICAgICAgIE9SREVSIEJZIHBhdGggQVNDCiAgICApClNFTEVDVAogICAgcGF0aCwKICAgIHVuaXEoYXV0aG9yKSBBUyBudW1fYXV0aG9ycwpGUk9NIGdpdF9jbGlja2hvdXNlLmZpbGVfY2hhbmdlcwpXSEVSRSBwYXRoIElOIChjdXJyZW50X2ZpbGVzKQpHUk9VUCBCWSBwYXRoCk9SREVSIEJZIG51bV9hdXRob3JzIERFU0MKTElNSVQgMTA=)
|
||||
[play](https://sql.clickhouse.com?query_id=CYQFNQNK9TAMPU2OZ8KG5Y)
|
||||
|
||||
```sql
|
||||
WITH current_files AS
|
||||
@ -633,7 +633,7 @@ LIMIT 10
|
||||
|
||||
Limited to current files only.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#V0lUSCBjdXJyZW50X2ZpbGVzIEFTCiAgICAoCiAgICAgICAgU0VMRUNUIHBhdGgKICAgICAgICBGUk9NCiAgICAgICAgKAogICAgICAgICAgICBTRUxFQ1QKICAgICAgICAgICAgICAgIG9sZF9wYXRoIEFTIHBhdGgsCiAgICAgICAgICAgICAgICBtYXgodGltZSkgQVMgbGFzdF90aW1lLAogICAgICAgICAgICAgICAgMiBBUyBjaGFuZ2VfdHlwZQogICAgICAgICAgICBGUk9NIGdpdF9jbGlja2hvdXNlLmZpbGVfY2hhbmdlcwogICAgICAgICAgICBHUk9VUCBCWSBvbGRfcGF0aAogICAgICAgICAgICBVTklPTiBBTEwKICAgICAgICAgICAgU0VMRUNUCiAgICAgICAgICAgICAgICBwYXRoLAogICAgICAgICAgICAgICAgbWF4KHRpbWUpIEFTIGxhc3RfdGltZSwKICAgICAgICAgICAgICAgIGFyZ01heChjaGFuZ2VfdHlwZSwgdGltZSkgQVMgY2hhbmdlX3R5cGUKICAgICAgICAgICAgRlJPTSBnaXRfY2xpY2tob3VzZS5maWxlX2NoYW5nZXMKICAgICAgICAgICAgR1JPVVAgQlkgcGF0aAogICAgICAgICkKICAgICAgICBHUk9VUCBCWSBwYXRoCiAgICAgICAgSEFWSU5HIChhcmdNYXgoY2hhbmdlX3R5cGUsIGxhc3RfdGltZSkgIT0gMikgQU5EIChOT1QgbWF0Y2gocGF0aCwgJyheZGJtcy8pfChebGlicy8pfChedGVzdHMvdGVzdGZsb3dzLyl8KF5wcm9ncmFtcy9zZXJ2ZXIvc3RvcmUvKScpKQogICAgICAgIE9SREVSIEJZIHBhdGggQVNDCiAgICApClNFTEVDVAogICAgYW55KHBhdGgpIEFTIGZpbGVfcGF0aCwKICAgIGxpbmUsCiAgICBtYXgodGltZSkgQVMgbGF0ZXN0X2NoYW5nZSwKICAgIGFueShmaWxlX2NoYW5nZV90eXBlKQpGUk9NIGdpdF9jbGlja2hvdXNlLmxpbmVfY2hhbmdlcwpXSEVSRSBwYXRoIElOIChjdXJyZW50X2ZpbGVzKQpHUk9VUCBCWSBsaW5lCk9SREVSIEJZIGxhdGVzdF9jaGFuZ2UgQVNDCkxJTUlUIDEw)
|
||||
[play](https://sql.clickhouse.com?query_id=VWPBPGRZVGTHOCQYWNQZNT)
|
||||
|
||||
```sql
|
||||
WITH current_files AS
|
||||
@ -690,7 +690,7 @@ LIMIT 10
|
||||
|
||||
Limited to current files only.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#V0lUSCBjdXJyZW50X2ZpbGVzIEFTCiAgICAoCiAgICAgICAgU0VMRUNUIHBhdGgKICAgICAgICBGUk9NCiAgICAgICAgKAogICAgICAgICAgICBTRUxFQ1QKICAgICAgICAgICAgICAgIG9sZF9wYXRoIEFTIHBhdGgsCiAgICAgICAgICAgICAgICBtYXgodGltZSkgQVMgbGFzdF90aW1lLAogICAgICAgICAgICAgICAgMiBBUyBjaGFuZ2VfdHlwZQogICAgICAgICAgICBGUk9NIGdpdF9jbGlja2hvdXNlLmZpbGVfY2hhbmdlcwogICAgICAgICAgICBHUk9VUCBCWSBvbGRfcGF0aAogICAgICAgICAgICBVTklPTiBBTEwKICAgICAgICAgICAgU0VMRUNUCiAgICAgICAgICAgICAgICBwYXRoLAogICAgICAgICAgICAgICAgbWF4KHRpbWUpIEFTIGxhc3RfdGltZSwKICAgICAgICAgICAgICAgIGFyZ01heChjaGFuZ2VfdHlwZSwgdGltZSkgQVMgY2hhbmdlX3R5cGUKICAgICAgICAgICAgRlJPTSBnaXRfY2xpY2tob3VzZS5maWxlX2NoYW5nZXMKICAgICAgICAgICAgR1JPVVAgQlkgcGF0aAogICAgICAgICkKICAgICAgICBHUk9VUCBCWSBwYXRoCiAgICAgICAgSEFWSU5HIChhcmdNYXgoY2hhbmdlX3R5cGUsIGxhc3RfdGltZSkgIT0gMikgQU5EIChOT1QgbWF0Y2gocGF0aCwgJyheZGJtcy8pfChebGlicy8pfChedGVzdHMvdGVzdGZsb3dzLyl8KF5wcm9ncmFtcy9zZXJ2ZXIvc3RvcmUvKScpKQogICAgICAgIE9SREVSIEJZIHBhdGggQVNDCiAgICApClNFTEVDVAogICAgY291bnQoKSBBUyBjLAogICAgcGF0aCwKICAgIG1heCh0aW1lKSBBUyBsYXRlc3RfY2hhbmdlCkZST00gZ2l0X2NsaWNraG91c2UuZmlsZV9jaGFuZ2VzCldIRVJFIHBhdGggSU4gKGN1cnJlbnRfZmlsZXMpCkdST1VQIEJZIHBhdGgKT1JERVIgQlkgYyBERVNDCkxJTUlUIDEw)
|
||||
[play](https://sql.clickhouse.com?query_id=VWPBPGRZVGTHOCQYWNQZNT)
|
||||
|
||||
```sql
|
||||
WITH current_files AS
|
||||
@ -750,7 +750,7 @@ Our core data structure, the Merge Tree, is obviously under constant evolution w
|
||||
|
||||
Do we write more docs at certain times of the month e.g., around release dates? We can use the `countIf` function to compute a simple ratio, visualizing the result using the `bar` function.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICBkYXksCiAgICBiYXIoZG9jc19yYXRpbyAqIDEwMDAsIDAsIDEwMCwgMTAwKSBBUyBiYXIKRlJPTQooCiAgICBTRUxFQ1QKICAgICAgICBkYXksCiAgICAgICAgY291bnRJZihmaWxlX2V4dGVuc2lvbiBJTiAoJ2gnLCAnY3BwJywgJ3NxbCcpKSBBUyBjb2RlLAogICAgICAgIGNvdW50SWYoZmlsZV9leHRlbnNpb24gPSAnbWQnKSBBUyBkb2NzLAogICAgICAgIGRvY3MgLyAoY29kZSArIGRvY3MpIEFTIGRvY3NfcmF0aW8KICAgIEZST00gZ2l0X2NsaWNraG91c2UubGluZV9jaGFuZ2VzCiAgICBXSEVSRSAoc2lnbiA9IDEpIEFORCAoZmlsZV9leHRlbnNpb24gSU4gKCdoJywgJ2NwcCcsICdzcWwnLCAnbWQnKSkKICAgIEdST1VQIEJZIGRheU9mTW9udGgodGltZSkgQVMgZGF5CikK)
|
||||
[play](https://sql.clickhouse.com?query_id=BA4RZUXUHNQBH9YK7F2T9J)
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
@ -811,7 +811,7 @@ Maybe a little more near the end of the month, but overall we keep a good even d
|
||||
|
||||
We consider diversity here to be the number of unique files an author has contributed to.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICBhdXRob3IsCiAgICB1bmlxKHBhdGgpIEFTIG51bV9maWxlcwpGUk9NIGdpdF9jbGlja2hvdXNlLmZpbGVfY2hhbmdlcwpXSEVSRSAoY2hhbmdlX3R5cGUgSU4gKCdBZGQnLCAnTW9kaWZ5JykpIEFORCAoZmlsZV9leHRlbnNpb24gSU4gKCdoJywgJ2NwcCcsICdzcWwnKSkKR1JPVVAgQlkgYXV0aG9yCk9SREVSIEJZIG51bV9maWxlcyBERVNDCkxJTUlUIDEw)
|
||||
[play](https://sql.clickhouse.com?query_id=MT8WBABUKYBYSBA78W5TML)
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
@ -841,7 +841,7 @@ LIMIT 10
|
||||
|
||||
Let's see who has the most diverse commits in their recent work. Rather than limit by date, we'll restrict to an author's last N commits (in this case, we've used 3 but feel free to modify):
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICBhdXRob3IsCiAgICBzdW0obnVtX2ZpbGVzX2NvbW1pdCkgQVMgbnVtX2ZpbGVzCkZST00KKAogICAgU0VMRUNUCiAgICAgICAgYXV0aG9yLAogICAgICAgIGNvbW1pdF9oYXNoLAogICAgICAgIHVuaXEocGF0aCkgQVMgbnVtX2ZpbGVzX2NvbW1pdCwKICAgICAgICBtYXgodGltZSkgQVMgY29tbWl0X3RpbWUKICAgIEZST00gZ2l0X2NsaWNraG91c2UuZmlsZV9jaGFuZ2VzCiAgICBXSEVSRSAoY2hhbmdlX3R5cGUgSU4gKCdBZGQnLCAnTW9kaWZ5JykpIEFORCAoZmlsZV9leHRlbnNpb24gSU4gKCdoJywgJ2NwcCcsICdzcWwnKSkKICAgIEdST1VQIEJZCiAgICAgICAgYXV0aG9yLAogICAgICAgIGNvbW1pdF9oYXNoCiAgICBPUkRFUiBCWQogICAgICAgIGF1dGhvciBBU0MsCiAgICAgICAgY29tbWl0X3RpbWUgREVTQwogICAgTElNSVQgMyBCWSBhdXRob3IKKQpHUk9VUCBCWSBhdXRob3IKT1JERVIgQlkgbnVtX2ZpbGVzIERFU0MKTElNSVQgMTA=)
|
||||
[play](https://sql.clickhouse.com?query_id=4Q3D67FWRIVWTY8EIDDE5U)
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
@ -888,7 +888,7 @@ LIMIT 10
|
||||
|
||||
Here we select our founder [Alexey Milovidov](https://github.com/alexey-milovidov) and limit our analysis to current files.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#V0lUSCBjdXJyZW50X2ZpbGVzIEFTCiAgICAoCiAgICAgICAgU0VMRUNUIHBhdGgKICAgICAgICBGUk9NCiAgICAgICAgKAogICAgICAgICAgICBTRUxFQ1QKICAgICAgICAgICAgICAgIG9sZF9wYXRoIEFTIHBhdGgsCiAgICAgICAgICAgICAgICBtYXgodGltZSkgQVMgbGFzdF90aW1lLAogICAgICAgICAgICAgICAgMiBBUyBjaGFuZ2VfdHlwZQogICAgICAgICAgICBGUk9NIGdpdF9jbGlja2hvdXNlLmZpbGVfY2hhbmdlcwogICAgICAgICAgICBHUk9VUCBCWSBvbGRfcGF0aAogICAgICAgICAgICBVTklPTiBBTEwKICAgICAgICAgICAgU0VMRUNUCiAgICAgICAgICAgICAgICBwYXRoLAogICAgICAgICAgICAgICAgbWF4KHRpbWUpIEFTIGxhc3RfdGltZSwKICAgICAgICAgICAgICAgIGFyZ01heChjaGFuZ2VfdHlwZSwgdGltZSkgQVMgY2hhbmdlX3R5cGUKICAgICAgICAgICAgRlJPTSBnaXRfY2xpY2tob3VzZS5maWxlX2NoYW5nZXMKICAgICAgICAgICAgR1JPVVAgQlkgcGF0aAogICAgICAgICkKICAgICAgICBHUk9VUCBCWSBwYXRoCiAgICAgICAgSEFWSU5HIChhcmdNYXgoY2hhbmdlX3R5cGUsIGxhc3RfdGltZSkgIT0gMikgQU5EIChOT1QgbWF0Y2gocGF0aCwgJyheZGJtcy8pfChebGlicy8pfChedGVzdHMvdGVzdGZsb3dzLyl8KF5wcm9ncmFtcy9zZXJ2ZXIvc3RvcmUvKScpKQogICAgICAgIE9SREVSIEJZIHBhdGggQVNDCiAgICApClNFTEVDVAogICAgcGF0aCwKICAgIGNvdW50KCkgQVMgYwpGUk9NIGdpdF9jbGlja2hvdXNlLmZpbGVfY2hhbmdlcwpXSEVSRSAoYXV0aG9yID0gJ0FsZXhleSBNaWxvdmlkb3YnKSBBTkQgKHBhdGggSU4gKGN1cnJlbnRfZmlsZXMpKQpHUk9VUCBCWSBwYXRoCk9SREVSIEJZIGMgREVTQwpMSU1JVCAxMA==)
|
||||
[play](https://sql.clickhouse.com?query_id=OKGZBACRHVGCRAGCZAJKMF)
|
||||
|
||||
```sql
|
||||
WITH current_files AS
|
||||
@ -941,7 +941,7 @@ LIMIT 10
|
||||
|
||||
This makes sense because Alexey has been responsible for maintaining the Change log. But what if we use the base name of the file to identify his popular files - this allows for renames and should focus on code contributions.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICBiYXNlLAogICAgY291bnQoKSBBUyBjCkZST00gZ2l0X2NsaWNraG91c2UuZmlsZV9jaGFuZ2VzCldIRVJFIChhdXRob3IgPSAnQWxleGV5IE1pbG92aWRvdicpIEFORCAoZmlsZV9leHRlbnNpb24gSU4gKCdoJywgJ2NwcCcsICdzcWwnKSkKR1JPVVAgQlkgYmFzZW5hbWUocGF0aCkgQVMgYmFzZQpPUkRFUiBCWSBjIERFU0MKTElNSVQgMTA=)
|
||||
[play](https://sql.clickhouse.com?query_id=P9PBDZGOSVTKXEXU73ZNAJ)
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
@ -976,7 +976,7 @@ For this, we first need to identify the largest files. Estimating this via a ful
|
||||
|
||||
To estimate, assuming we restrict to current files, we sum line additions and subtract deletions. We can then compute a ratio of length to the number of authors.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#V0lUSCBjdXJyZW50X2ZpbGVzIEFTCiAgICAoCiAgICAgICAgU0VMRUNUIHBhdGgKICAgICAgICBGUk9NCiAgICAgICAgKAogICAgICAgICAgICBTRUxFQ1QKICAgICAgICAgICAgICAgIG9sZF9wYXRoIEFTIHBhdGgsCiAgICAgICAgICAgICAgICBtYXgodGltZSkgQVMgbGFzdF90aW1lLAogICAgICAgICAgICAgICAgMiBBUyBjaGFuZ2VfdHlwZQogICAgICAgICAgICBGUk9NIGdpdF9jbGlja2hvdXNlLmZpbGVfY2hhbmdlcwogICAgICAgICAgICBHUk9VUCBCWSBvbGRfcGF0aAogICAgICAgICAgICBVTklPTiBBTEwKICAgICAgICAgICAgU0VMRUNUCiAgICAgICAgICAgICAgICBwYXRoLAogICAgICAgICAgICAgICAgbWF4KHRpbWUpIEFTIGxhc3RfdGltZSwKICAgICAgICAgICAgICAgIGFyZ01heChjaGFuZ2VfdHlwZSwgdGltZSkgQVMgY2hhbmdlX3R5cGUKICAgICAgICAgICAgRlJPTSBnaXRfY2xpY2tob3VzZS5maWxlX2NoYW5nZXMKICAgICAgICAgICAgR1JPVVAgQlkgcGF0aAogICAgICAgICkKICAgICAgICBHUk9VUCBCWSBwYXRoCiAgICAgICAgSEFWSU5HIChhcmdNYXgoY2hhbmdlX3R5cGUsIGxhc3RfdGltZSkgIT0gMikgQU5EIChOT1QgbWF0Y2gocGF0aCwgJyheZGJtcy8pfChebGlicy8pfChedGVzdHMvdGVzdGZsb3dzLyl8KF5wcm9ncmFtcy9zZXJ2ZXIvc3RvcmUvKScpKQogICAgICAgIE9SREVSIEJZIHBhdGggQVNDCiAgICApClNFTEVDVAogICAgcGF0aCwKICAgIHN1bShsaW5lc19hZGRlZCkgLSBzdW0obGluZXNfZGVsZXRlZCkgQVMgbnVtX2xpbmVzLAogICAgdW5pcUV4YWN0KGF1dGhvcikgQVMgbnVtX2F1dGhvcnMsCiAgICBudW1fbGluZXMgLyBudW1fYXV0aG9ycyBBUyBsaW5lc19hdXRob3JfcmF0aW8KRlJPTSBnaXRfY2xpY2tob3VzZS5maWxlX2NoYW5nZXMKV0hFUkUgcGF0aCBJTiAoY3VycmVudF9maWxlcykKR1JPVVAgQlkgcGF0aApPUkRFUiBCWSBsaW5lc19hdXRob3JfcmF0aW8gREVTQwpMSU1JVCAxMA==)
|
||||
[play](https://sql.clickhouse.com?query_id=PVSDOHZYUMRDDUZFEYJC7J)
|
||||
|
||||
```sql
|
||||
WITH current_files AS
|
||||
@ -1031,7 +1031,7 @@ LIMIT 10
|
||||
|
||||
Text dictionaries aren't maybe realistic, so lets restrict to code only via a file extension filter!
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#V0lUSCBjdXJyZW50X2ZpbGVzIEFTCiAgICAoCiAgICAgICAgU0VMRUNUIHBhdGgKICAgICAgICBGUk9NCiAgICAgICAgKAogICAgICAgICAgICBTRUxFQ1QKICAgICAgICAgICAgICAgIG9sZF9wYXRoIEFTIHBhdGgsCiAgICAgICAgICAgICAgICBtYXgodGltZSkgQVMgbGFzdF90aW1lLAogICAgICAgICAgICAgICAgMiBBUyBjaGFuZ2VfdHlwZQogICAgICAgICAgICBGUk9NIGdpdF9jbGlja2hvdXNlLmZpbGVfY2hhbmdlcwogICAgICAgICAgICBHUk9VUCBCWSBvbGRfcGF0aAogICAgICAgICAgICBVTklPTiBBTEwKICAgICAgICAgICAgU0VMRUNUCiAgICAgICAgICAgICAgICBwYXRoLAogICAgICAgICAgICAgICAgbWF4KHRpbWUpIEFTIGxhc3RfdGltZSwKICAgICAgICAgICAgICAgIGFyZ01heChjaGFuZ2VfdHlwZSwgdGltZSkgQVMgY2hhbmdlX3R5cGUKICAgICAgICAgICAgRlJPTSBnaXRfY2xpY2tob3VzZS5maWxlX2NoYW5nZXMKICAgICAgICAgICAgR1JPVVAgQlkgcGF0aAogICAgICAgICkKICAgICAgICBHUk9VUCBCWSBwYXRoCiAgICAgICAgSEFWSU5HIChhcmdNYXgoY2hhbmdlX3R5cGUsIGxhc3RfdGltZSkgIT0gMikgQU5EIChOT1QgbWF0Y2gocGF0aCwgJyheZGJtcy8pfChebGlicy8pfChedGVzdHMvdGVzdGZsb3dzLyl8KF5wcm9ncmFtcy9zZXJ2ZXIvc3RvcmUvKScpKQogICAgICAgIE9SREVSIEJZIHBhdGggQVNDCiAgICApClNFTEVDVAogICAgcGF0aCwKICAgIHN1bShsaW5lc19hZGRlZCkgLSBzdW0obGluZXNfZGVsZXRlZCkgQVMgbnVtX2xpbmVzLAogICAgdW5pcUV4YWN0KGF1dGhvcikgQVMgbnVtX2F1dGhvcnMsCiAgICBudW1fbGluZXMgLyBudW1fYXV0aG9ycyBBUyBsaW5lc19hdXRob3JfcmF0aW8KRlJPTSBnaXRfY2xpY2tob3VzZS5maWxlX2NoYW5nZXMKV0hFUkUgKHBhdGggSU4gKGN1cnJlbnRfZmlsZXMpKSBBTkQgKGZpbGVfZXh0ZW5zaW9uIElOICgnaCcsICdjcHAnLCAnc3FsJykpCkdST1VQIEJZIHBhdGgKT1JERVIgQlkgbGluZXNfYXV0aG9yX3JhdGlvIERFU0MKTElNSVQgMTA=)
|
||||
[play](https://sql.clickhouse.com?query_id=BZHGWUIZMPZZUHS5XRBK2M)
|
||||
|
||||
```sql
|
||||
WITH current_files AS
|
||||
@ -1085,7 +1085,7 @@ LIMIT 10
|
||||
|
||||
There is some recency bias in this - newer files have fewer opportunities for commits. What about if we restrict to files at least 1 yr old?
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#V0lUSCBjdXJyZW50X2ZpbGVzIEFTCiAgICAoCiAgICAgICAgU0VMRUNUIHBhdGgKICAgICAgICBGUk9NCiAgICAgICAgKAogICAgICAgICAgICBTRUxFQ1QKICAgICAgICAgICAgICAgIG9sZF9wYXRoIEFTIHBhdGgsCiAgICAgICAgICAgICAgICBtYXgodGltZSkgQVMgbGFzdF90aW1lLAogICAgICAgICAgICAgICAgMiBBUyBjaGFuZ2VfdHlwZQogICAgICAgICAgICBGUk9NIGdpdF9jbGlja2hvdXNlLmZpbGVfY2hhbmdlcwogICAgICAgICAgICBHUk9VUCBCWSBvbGRfcGF0aAogICAgICAgICAgICBVTklPTiBBTEwKICAgICAgICAgICAgU0VMRUNUCiAgICAgICAgICAgICAgICBwYXRoLAogICAgICAgICAgICAgICAgbWF4KHRpbWUpIEFTIGxhc3RfdGltZSwKICAgICAgICAgICAgICAgIGFyZ01heChjaGFuZ2VfdHlwZSwgdGltZSkgQVMgY2hhbmdlX3R5cGUKICAgICAgICAgICAgRlJPTSBnaXRfY2xpY2tob3VzZS5maWxlX2NoYW5nZXMKICAgICAgICAgICAgR1JPVVAgQlkgcGF0aAogICAgICAgICkKICAgICAgICBHUk9VUCBCWSBwYXRoCiAgICAgICAgSEFWSU5HIChhcmdNYXgoY2hhbmdlX3R5cGUsIGxhc3RfdGltZSkgIT0gMikgQU5EIChOT1QgbWF0Y2gocGF0aCwgJyheZGJtcy8pfChebGlicy8pfChedGVzdHMvdGVzdGZsb3dzLyl8KF5wcm9ncmFtcy9zZXJ2ZXIvc3RvcmUvKScpKQogICAgICAgIE9SREVSIEJZIHBhdGggQVNDCiAgICApClNFTEVDVAogICAgbWluKHRpbWUpIEFTIG1pbl9kYXRlLAogICAgcGF0aCwKICAgIHN1bShsaW5lc19hZGRlZCkgLSBzdW0obGluZXNfZGVsZXRlZCkgQVMgbnVtX2xpbmVzLAogICAgdW5pcUV4YWN0KGF1dGhvcikgQVMgbnVtX2F1dGhvcnMsCiAgICBudW1fbGluZXMgLyBudW1fYXV0aG9ycyBBUyBsaW5lc19hdXRob3JfcmF0aW8KRlJPTSBnaXRfY2xpY2tob3VzZS5maWxlX2NoYW5nZXMKV0hFUkUgKHBhdGggSU4gKGN1cnJlbnRfZmlsZXMpKSBBTkQgKGZpbGVfZXh0ZW5zaW9uIElOICgnaCcsICdjcHAnLCAnc3FsJykpCkdST1VQIEJZIHBhdGgKSEFWSU5HIG1pbl9kYXRlIDw9IChub3coKSAtIHRvSW50ZXJ2YWxZZWFyKDEpKQpPUkRFUiBCWSBsaW5lc19hdXRob3JfcmF0aW8gREVTQwpMSU1JVCAxMA==)
|
||||
[play](https://sql.clickhouse.com?query_id=RMHHZEDHFUCBGRQVQA2732)
|
||||
|
||||
```sql
|
||||
WITH current_files AS
|
||||
@ -1144,7 +1144,7 @@ LIMIT 10
|
||||
|
||||
We interpret this as the number of lines added and removed by the day of the week. In this case, we focus on the [Functions directory](https://github.com/ClickHouse/ClickHouse/tree/master/src/Functions)
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICBkYXlPZldlZWssCiAgICB1bmlxKGNvbW1pdF9oYXNoKSBBUyBjb21taXRzLAogICAgc3VtKGxpbmVzX2FkZGVkKSBBUyBsaW5lc19hZGRlZCwKICAgIHN1bShsaW5lc19kZWxldGVkKSBBUyBsaW5lc19kZWxldGVkCkZST00gZ2l0X2NsaWNraG91c2UuZmlsZV9jaGFuZ2VzCldIRVJFIHBhdGggTElLRSAnc3JjL0Z1bmN0aW9ucyUnCkdST1VQIEJZIHRvRGF5T2ZXZWVrKHRpbWUpIEFTIGRheU9mV2Vlaw==)
|
||||
[play](https://sql.clickhouse.com?query_id=PF3KEMYG5CVLJGCFYQEGB1)
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
@ -1171,7 +1171,7 @@ GROUP BY toDayOfWeek(time) AS dayOfWeek
|
||||
|
||||
And by time of day,
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICBob3VyT2ZEYXksCiAgICB1bmlxKGNvbW1pdF9oYXNoKSBBUyBjb21taXRzLAogICAgc3VtKGxpbmVzX2FkZGVkKSBBUyBsaW5lc19hZGRlZCwKICAgIHN1bShsaW5lc19kZWxldGVkKSBBUyBsaW5lc19kZWxldGVkCkZST00gZ2l0X2NsaWNraG91c2UuZmlsZV9jaGFuZ2VzCldIRVJFIHBhdGggTElLRSAnc3JjL0Z1bmN0aW9ucyUnCkdST1VQIEJZIHRvSG91cih0aW1lKSBBUyBob3VyT2ZEYXk=)
|
||||
[play](https://sql.clickhouse.com?query_id=Q4VDVKEGHHRBCUJHNCVTF1)
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
@ -1215,7 +1215,7 @@ GROUP BY toHour(time) AS hourOfDay
|
||||
|
||||
This distribution makes sense given most of our development team is in Amsterdam. The `bar` functions helps us visualize these distributions:
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICBob3VyT2ZEYXksCiAgICBiYXIoY29tbWl0cywgMCwgNDAwLCA1MCkgQVMgY29tbWl0cywKICAgIGJhcihsaW5lc19hZGRlZCwgMCwgMzAwMDAsIDUwKSBBUyBsaW5lc19hZGRlZCwKICAgIGJhcihsaW5lc19kZWxldGVkLCAwLCAxNTAwMCwgNTApIEFTIGxpbmVzX2RlbGV0ZWQKRlJPTQooCiAgICBTRUxFQ1QKICAgICAgICBob3VyT2ZEYXksCiAgICAgICAgdW5pcShjb21taXRfaGFzaCkgQVMgY29tbWl0cywKICAgICAgICBzdW0obGluZXNfYWRkZWQpIEFTIGxpbmVzX2FkZGVkLAogICAgICAgIHN1bShsaW5lc19kZWxldGVkKSBBUyBsaW5lc19kZWxldGVkCiAgICBGUk9NIGdpdF9jbGlja2hvdXNlLmZpbGVfY2hhbmdlcwogICAgV0hFUkUgcGF0aCBMSUtFICdzcmMvRnVuY3Rpb25zJScKICAgIEdST1VQIEJZIHRvSG91cih0aW1lKSBBUyBob3VyT2ZEYXkKKQ==)
|
||||
[play](https://sql.clickhouse.com?query_id=9AZ8CENV8N91YGW7T6IB68)
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
@ -1269,7 +1269,7 @@ FROM
|
||||
|
||||
The `sign = -1` indicates a code deletion. We exclude punctuation and the insertion of empty lines.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICBwcmV2X2F1dGhvciB8fCAnKGEpJyBhcyBhZGRfYXV0aG9yLAogICAgYXV0aG9yICB8fCAnKGQpJyBhcyBkZWxldGVfYXV0aG9yLAogICAgY291bnQoKSBBUyBjCkZST00gZ2l0X2NsaWNraG91c2UubGluZV9jaGFuZ2VzCldIRVJFIChzaWduID0gLTEpIEFORCAoZmlsZV9leHRlbnNpb24gSU4gKCdoJywgJ2NwcCcpKSBBTkQgKGxpbmVfdHlwZSBOT1QgSU4gKCdQdW5jdCcsICdFbXB0eScpKSBBTkQgKGF1dGhvciAhPSBwcmV2X2F1dGhvcikgQU5EIChwcmV2X2F1dGhvciAhPSAnJykKR1JPVVAgQlkKICAgIHByZXZfYXV0aG9yLAogICAgYXV0aG9yCk9SREVSIEJZIGMgREVTQwpMSU1JVCAxIEJZIHByZXZfYXV0aG9yCkxJTUlUIDEwMA==)
|
||||
[play](https://sql.clickhouse.com?query_id=448O8GWAHY3EM6ZZ7AGLAM)
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
@ -1325,7 +1325,7 @@ Alexey clearly likes removing other peoples code. Lets exclude him for a more ba
|
||||
|
||||
If we consider by just number of commits:
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICBkYXlfb2Zfd2VlaywKICAgIGF1dGhvciwKICAgIGNvdW50KCkgQVMgYwpGUk9NIGdpdF9jbGlja2hvdXNlLmNvbW1pdHMKR1JPVVAgQlkKICAgIGRheU9mV2Vlayh0aW1lKSBBUyBkYXlfb2Zfd2VlaywKICAgIGF1dGhvcgpPUkRFUiBCWQogICAgZGF5X29mX3dlZWsgQVNDLAogICAgYyBERVNDCkxJTUlUIDEgQlkgZGF5X29mX3dlZWs=)
|
||||
[play](https://sql.clickhouse.com?query_id=WXPKFJCAHOKYKEVTWNFVCY)
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
@ -1356,7 +1356,7 @@ LIMIT 1 BY day_of_week
|
||||
|
||||
OK, some possible advantages here to the longest contributor - our founder Alexey. Lets limit our analysis to the last year.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICBkYXlfb2Zfd2VlaywKICAgIGF1dGhvciwKICAgIGNvdW50KCkgQVMgYwpGUk9NIGdpdF9jbGlja2hvdXNlLmNvbW1pdHMKV0hFUkUgdGltZSA+IChub3coKSAtIHRvSW50ZXJ2YWxZZWFyKDEpKQpHUk9VUCBCWQogICAgZGF5T2ZXZWVrKHRpbWUpIEFTIGRheV9vZl93ZWVrLAogICAgYXV0aG9yCk9SREVSIEJZCiAgICBkYXlfb2Zfd2VlayBBU0MsCiAgICBjIERFU0MKTElNSVQgMSBCWSBkYXlfb2Zfd2Vlaw==)
|
||||
[play](https://sql.clickhouse.com?query_id=8YRJGHFTNJAWJ96XCJKKEH)
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
@ -1390,7 +1390,7 @@ This is still a little simple and doesn't reflect people's work.
|
||||
|
||||
A better metric might be who is the top contributor each day as a fraction of the total work performed in the last year. Note that we treat the deletion and adding code equally.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICB0b3BfYXV0aG9yLmRheV9vZl93ZWVrLAogICAgdG9wX2F1dGhvci5hdXRob3IsCiAgICB0b3BfYXV0aG9yLmF1dGhvcl93b3JrIC8gYWxsX3dvcmsudG90YWxfd29yayBBUyB0b3BfYXV0aG9yX3BlcmNlbnQKRlJPTQooCiAgICBTRUxFQ1QKICAgICAgICBkYXlfb2Zfd2VlaywKICAgICAgICBhdXRob3IsCiAgICAgICAgc3VtKGxpbmVzX2FkZGVkKSArIHN1bShsaW5lc19kZWxldGVkKSBBUyBhdXRob3Jfd29yawogICAgRlJPTSBnaXRfY2xpY2tob3VzZS5maWxlX2NoYW5nZXMKICAgIFdIRVJFIHRpbWUgPiAobm93KCkgLSB0b0ludGVydmFsWWVhcigxKSkKICAgIEdST1VQIEJZCiAgICAgICAgYXV0aG9yLAogICAgICAgIGRheU9mV2Vlayh0aW1lKSBBUyBkYXlfb2Zfd2VlawogICAgT1JERVIgQlkKICAgICAgICBkYXlfb2Zfd2VlayBBU0MsCiAgICAgICAgYXV0aG9yX3dvcmsgREVTQwogICAgTElNSVQgMSBCWSBkYXlfb2Zfd2VlawopIEFTIHRvcF9hdXRob3IKSU5ORVIgSk9JTgooCiAgICBTRUxFQ1QKICAgICAgICBkYXlfb2Zfd2VlaywKICAgICAgICBzdW0obGluZXNfYWRkZWQpICsgc3VtKGxpbmVzX2RlbGV0ZWQpIEFTIHRvdGFsX3dvcmsKICAgIEZST00gZ2l0X2NsaWNraG91c2UuZmlsZV9jaGFuZ2VzCiAgICBXSEVSRSB0aW1lID4gKG5vdygpIC0gdG9JbnRlcnZhbFllYXIoMSkpCiAgICBHUk9VUCBCWSBkYXlPZldlZWsodGltZSkgQVMgZGF5X29mX3dlZWsKKSBBUyBhbGxfd29yayBVU0lORyAoZGF5X29mX3dlZWsp)
|
||||
[play](https://sql.clickhouse.com?query_id=VQF4KMRDSUEXGS1JFVDJHV)
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
@ -1440,7 +1440,7 @@ INNER JOIN
|
||||
|
||||
We limit the analysis to the current files. For brevity, we restrict the results to a depth of 2 with 5 files per root folder. Adjust as required.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#V0lUSCBjdXJyZW50X2ZpbGVzIEFTCiAgICAoCiAgICAgICAgU0VMRUNUIHBhdGgKICAgICAgICBGUk9NCiAgICAgICAgKAogICAgICAgICAgICBTRUxFQ1QKICAgICAgICAgICAgICAgIG9sZF9wYXRoIEFTIHBhdGgsCiAgICAgICAgICAgICAgICBtYXgodGltZSkgQVMgbGFzdF90aW1lLAogICAgICAgICAgICAgICAgMiBBUyBjaGFuZ2VfdHlwZQogICAgICAgICAgICBGUk9NIGdpdF9jbGlja2hvdXNlLmZpbGVfY2hhbmdlcwogICAgICAgICAgICBHUk9VUCBCWSBvbGRfcGF0aAogICAgICAgICAgICBVTklPTiBBTEwKICAgICAgICAgICAgU0VMRUNUCiAgICAgICAgICAgICAgICBwYXRoLAogICAgICAgICAgICAgICAgbWF4KHRpbWUpIEFTIGxhc3RfdGltZSwKICAgICAgICAgICAgICAgIGFyZ01heChjaGFuZ2VfdHlwZSwgdGltZSkgQVMgY2hhbmdlX3R5cGUKICAgICAgICAgICAgRlJPTSBnaXRfY2xpY2tob3VzZS5maWxlX2NoYW5nZXMKICAgICAgICAgICAgR1JPVVAgQlkgcGF0aAogICAgICAgICkKICAgICAgICBHUk9VUCBCWSBwYXRoCiAgICAgICAgSEFWSU5HIChhcmdNYXgoY2hhbmdlX3R5cGUsIGxhc3RfdGltZSkgIT0gMikgQU5EIChOT1QgbWF0Y2gocGF0aCwgJyheZGJtcy8pfChebGlicy8pfChedGVzdHMvdGVzdGZsb3dzLyl8KF5wcm9ncmFtcy9zZXJ2ZXIvc3RvcmUvKScpKQogICAgICAgIE9SREVSIEJZIHBhdGggQVNDCiAgICApClNFTEVDVAogICAgY29uY2F0KHJvb3QsICcvJywgc3ViX2ZvbGRlcikgQVMgZm9sZGVyLAogICAgcm91bmQoYXZnKGRheXNfcHJlc2VudCkpIEFTIGF2Z19hZ2Vfb2ZfZmlsZXMsCiAgICBtaW4oZGF5c19wcmVzZW50KSBBUyBtaW5fYWdlX2ZpbGVzLAogICAgbWF4KGRheXNfcHJlc2VudCkgQVMgbWF4X2FnZV9maWxlcywKICAgIGNvdW50KCkgQVMgYwpGUk9NCigKICAgIFNFTEVDVAogICAgICAgIHBhdGgsCiAgICAgICAgZGF0ZURpZmYoJ2RheScsIG1pbih0aW1lKSwgdG9EYXRlKCcyMDIyLTExLTAzJykpIEFTIGRheXNfcHJlc2VudAogICAgRlJPTSBnaXRfY2xpY2tob3VzZS5maWxlX2NoYW5nZXMKICAgIFdIRVJFIChwYXRoIElOIChjdXJyZW50X2ZpbGVzKSkgQU5EIChmaWxlX2V4dGVuc2lvbiBJTiAoJ2gnLCAnY3BwJywgJ3NxbCcpKQogICAgR1JPVVAgQlkgcGF0aAopCkdST1VQIEJZCiAgICBzcGxpdEJ5Q2hhcignLycsIHBhdGgpWzFdIEFTIHJvb3QsCiAgICBzcGxpdEJ5Q2hhcignLycsIHBhdGgpWzJdIEFTIHN1Yl9mb2xkZXIKT1JERVIgQlkKICAgIHJvb3QgQVNDLAogICAgYyBERVNDCkxJTUlUIDUgQlkgcm9vdAo=)
|
||||
[play](https://sql.clickhouse.com?query_id=6YWAUQYPZINZDJGBEZBNWG)
|
||||
|
||||
```sql
|
||||
WITH current_files AS
|
||||
@ -1523,7 +1523,7 @@ LIMIT 5 BY root
|
||||
|
||||
For this question, we need the number of lines written by an author divided by the total number of lines they have had removed by another contributor.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICBrLAogICAgd3JpdHRlbl9jb2RlLmMsCiAgICByZW1vdmVkX2NvZGUuYywKICAgIHJlbW92ZWRfY29kZS5jIC8gd3JpdHRlbl9jb2RlLmMgQVMgcmVtb3ZlX3JhdGlvCkZST00KKAogICAgU0VMRUNUCiAgICAgICAgYXV0aG9yIEFTIGssCiAgICAgICAgY291bnQoKSBBUyBjCiAgICBGUk9NIGdpdF9jbGlja2hvdXNlLmxpbmVfY2hhbmdlcwogICAgV0hFUkUgKHNpZ24gPSAxKSBBTkQgKGZpbGVfZXh0ZW5zaW9uIElOICgnaCcsICdjcHAnKSkgQU5EIChsaW5lX3R5cGUgTk9UIElOICgnUHVuY3QnLCAnRW1wdHknKSkKICAgIEdST1VQIEJZIGsKKSBBUyB3cml0dGVuX2NvZGUKSU5ORVIgSk9JTgooCiAgICBTRUxFQ1QKICAgICAgICBwcmV2X2F1dGhvciBBUyBrLAogICAgICAgIGNvdW50KCkgQVMgYwogICAgRlJPTSBnaXRfY2xpY2tob3VzZS5saW5lX2NoYW5nZXMKICAgIFdIRVJFIChzaWduID0gLTEpIEFORCAoZmlsZV9leHRlbnNpb24gSU4gKCdoJywgJ2NwcCcpKSBBTkQgKGxpbmVfdHlwZSBOT1QgSU4gKCdQdW5jdCcsICdFbXB0eScpKSBBTkQgKGF1dGhvciAhPSBwcmV2X2F1dGhvcikKICAgIEdST1VQIEJZIGsKKSBBUyByZW1vdmVkX2NvZGUgVVNJTkcgKGspCldIRVJFIHdyaXR0ZW5fY29kZS5jID4gMTAwMApPUkRFUiBCWSByZW1vdmVfcmF0aW8gREVTQwpMSU1JVCAxMAo=)
|
||||
[play](https://sql.clickhouse.com?query_id=T4DTWTB36WFSEYAZLMGRNF)
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
@ -1627,7 +1627,7 @@ This doesn't capture the notion of a "re-write" however, where a large portion o
|
||||
|
||||
The query is limited to the current files only. We list all file changes by grouping by `path` and `commit_hash`, returning the number of lines added and removed. Using a window function, we estimate the file's total size at any moment in time by performing a cumulative sum and estimating the impact of any change on file size as `lines added - lines removed`. Using this statistic, we can calculate the percentage of the file that has been added or removed for each change. Finally, we count the number of file changes that constitute a rewrite per file i.e. `(percent_add >= 0.5) AND (percent_delete >= 0.5) AND current_size > 50`. Note we require files to be more than 50 lines to avoid early contributions to a file being counted as a rewrite. This also avoids a bias to very small files, which may be more likely to be rewritten.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#V0lUSAogICAgY3VycmVudF9maWxlcyBBUwogICAgKAogICAgICAgIFNFTEVDVCBwYXRoCiAgICAgICAgRlJPTQogICAgICAgICgKICAgICAgICAgICAgU0VMRUNUCiAgICAgICAgICAgICAgICBvbGRfcGF0aCBBUyBwYXRoLAogICAgICAgICAgICAgICAgbWF4KHRpbWUpIEFTIGxhc3RfdGltZSwKICAgICAgICAgICAgICAgIDIgQVMgY2hhbmdlX3R5cGUKICAgICAgICAgICAgRlJPTSBnaXRfY2xpY2tob3VzZS5maWxlX2NoYW5nZXMKICAgICAgICAgICAgR1JPVVAgQlkgb2xkX3BhdGgKICAgICAgICAgICAgVU5JT04gQUxMCiAgICAgICAgICAgIFNFTEVDVAogICAgICAgICAgICAgICAgcGF0aCwKICAgICAgICAgICAgICAgIG1heCh0aW1lKSBBUyBsYXN0X3RpbWUsCiAgICAgICAgICAgICAgICBhcmdNYXgoY2hhbmdlX3R5cGUsIHRpbWUpIEFTIGNoYW5nZV90eXBlCiAgICAgICAgICAgIEZST00gZ2l0X2NsaWNraG91c2UuZmlsZV9jaGFuZ2VzCiAgICAgICAgICAgIEdST1VQIEJZIHBhdGgKICAgICAgICApCiAgICAgICAgR1JPVVAgQlkgcGF0aAogICAgICAgIEhBVklORyAoYXJnTWF4KGNoYW5nZV90eXBlLCBsYXN0X3RpbWUpICE9IDIpIEFORCAoTk9UIG1hdGNoKHBhdGgsICcoXmRibXMvKXwoXmxpYnMvKXwoXnRlc3RzL3Rlc3RmbG93cy8pfChecHJvZ3JhbXMvc2VydmVyL3N0b3JlLyknKSkKICAgICAgICBPUkRFUiBCWSBwYXRoIEFTQwogICAgKSwKICAgIGNoYW5nZXMgQVMKICAgICgKICAgICAgICBTRUxFQ1QKICAgICAgICAgICAgcGF0aCwKICAgICAgICAgICAgbWF4KHRpbWUpIEFTIG1heF90aW1lLAogICAgICAgICAgICBjb21taXRfaGFzaCwKICAgICAgICAgICAgYW55KGxpbmVzX2FkZGVkKSBBUyBudW1fYWRkZWQsCiAgICAgICAgICAgIGFueShsaW5lc19kZWxldGVkKSBBUyBudW1fZGVsZXRlZCwKICAgICAgICAgICAgYW55KGNoYW5nZV90eXBlKSBBUyB0eXBlCiAgICAgICAgRlJPTSBnaXRfY2xpY2tob3VzZS5maWxlX2NoYW5nZXMKICAgICAgICBXSEVSRSAoY2hhbmdlX3R5cGUgSU4gKCdBZGQnLCAnTW9kaWZ5JykpIEFORCAocGF0aCBJTiAoY3VycmVudF9maWxlcykpIEFORCAoZmlsZV9leHRlbnNpb24gSU4gKCdoJywgJ2NwcCcsICdzcWwnKSkKICAgICAgICBHUk9VUCBCWQogICAgICAgICAgICBwYXRoLAogICAgICAgICAgICBjb21taXRfaGFzaAogICAgICAgIE9SREVSIEJZCiAgICAgICAgICAgIHBhdGggQVNDLAogICAgICAgICAgICBtYXhfdGltZSBBU0MKICAgICksCiAgICByZXdyaXRlcyBBUwogICAgKAogICAgICAgIFNFTEVDVAogICAgICAgICAgICBwYXRoLAogICAgICAgICAgICBjb21taXRfaGFzaCwKICAgICAgICAgICAgbWF4X3RpbWUsCiAgICAgICAgICAgIHR5cGUsCiAgICAgICAgICAgIG51bV9hZGRlZCwKICAgICAgICAgICAgbnVtX2RlbGV0ZWQsCiAgICAgICAgICAgIHN1bShudW1fYWRkZWQgLSBudW1fZGVsZXRlZCkgT1ZFUiAoUEFSVElUSU9OIEJZIHBhdGggT1JERVIgQlkgbWF4X3RpbWUgQVNDKSBBUyBjdXJyZW50X3NpemUsCiAgICAgICAgICAgIGlmKGN1cnJlbnRfc2l6ZSA+IDAsIG51bV9hZGRlZCAvIGN1cnJlbnRfc2l6ZSwgMCkgQVMgcGVyY2VudF9hZGQsCiAgICAgICAgICAgIGlmKGN1cnJlbnRfc2l6ZSA+IDAsIG51bV9kZWxldGVkIC8gY3VycmVudF9zaXplLCAwKSBBUyBwZXJjZW50X2RlbGV0ZQogICAgICAgIEZST00gY2hhbmdlcwogICAgKQpTRUxFQ1QKICAgIHBhdGgsCiAgICBjb3VudCgpIEFTIG51bV9yZXdyaXRlcwpGUk9NIHJld3JpdGVzCldIRVJFICh0eXBlID0gJ01vZGlmeScpIEFORCAocGVyY2VudF9hZGQgPj0gMC41KSBBTkQgKHBlcmNlbnRfZGVsZXRlID49IDAuNSkgQU5EIChjdXJyZW50X3NpemUgPiA1MCkKR1JPVVAgQlkgcGF0aApPUkRFUiBCWSBudW1fcmV3cml0ZXMgREVTQwpMSU1JVCAxMA==)
|
||||
[play](https://sql.clickhouse.com?query_id=5PL1QLNSH6QQTR8H9HINNP)
|
||||
|
||||
```sql
|
||||
WITH
|
||||
@ -1719,7 +1719,7 @@ We query for lines added, joining this with the lines removed - filtering to cas
|
||||
|
||||
Finally, we aggregate across this dataset to compute the average number of days lines stay in the repository by the day of the week.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICBkYXlfb2Zfd2Vla19hZGRlZCwKICAgIGNvdW50KCkgQVMgbnVtLAogICAgYXZnKGRheXNfcHJlc2VudCkgQVMgYXZnX2RheXNfcHJlc2VudApGUk9NCigKICAgIFNFTEVDVAogICAgICAgIGFkZGVkX2NvZGUubGluZSwKICAgICAgICBhZGRlZF9jb2RlLnRpbWUgQVMgYWRkZWRfZGF5LAogICAgICAgIGRhdGVEaWZmKCdkYXknLCBhZGRlZF9jb2RlLnRpbWUsIHJlbW92ZWRfY29kZS50aW1lKSBBUyBkYXlzX3ByZXNlbnQKICAgIEZST00KICAgICgKICAgICAgICBTRUxFQ1QKICAgICAgICAgICAgcGF0aCwKICAgICAgICAgICAgbGluZSwKICAgICAgICAgICAgbWF4KHRpbWUpIEFTIHRpbWUKICAgICAgICBGUk9NIGdpdF9jbGlja2hvdXNlLmxpbmVfY2hhbmdlcwogICAgICAgIFdIRVJFIChzaWduID0gMSkgQU5EIChsaW5lX3R5cGUgTk9UIElOICgnUHVuY3QnLCAnRW1wdHknKSkKICAgICAgICBHUk9VUCBCWQogICAgICAgICAgICBwYXRoLAogICAgICAgICAgICBsaW5lCiAgICApIEFTIGFkZGVkX2NvZGUKICAgIElOTkVSIEpPSU4KICAgICgKICAgICAgICBTRUxFQ1QKICAgICAgICAgICAgcGF0aCwKICAgICAgICAgICAgbGluZSwKICAgICAgICAgICAgbWF4KHRpbWUpIEFTIHRpbWUKICAgICAgICBGUk9NIGdpdF9jbGlja2hvdXNlLmxpbmVfY2hhbmdlcwogICAgICAgIFdIRVJFIChzaWduID0gLTEpIEFORCAobGluZV90eXBlIE5PVCBJTiAoJ1B1bmN0JywgJ0VtcHR5JykpCiAgICAgICAgR1JPVVAgQlkKICAgICAgICAgICAgcGF0aCwKICAgICAgICAgICAgbGluZQogICAgKSBBUyByZW1vdmVkX2NvZGUgVVNJTkcgKHBhdGgsIGxpbmUpCiAgICBXSEVSRSByZW1vdmVkX2NvZGUudGltZSA+IGFkZGVkX2NvZGUudGltZQopCkdST1VQIEJZIGRheU9mV2VlayhhZGRlZF9kYXkpIEFTIGRheV9vZl93ZWVrX2FkZGVk)
|
||||
[play](https://sql.clickhouse.com?query_id=GVF23LEZTNZI22BT8LZBBE)
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
@ -1778,7 +1778,7 @@ GROUP BY dayOfWeek(added_day) AS day_of_week_added
|
||||
This query uses the same principle as [What weekday does the code have the highest chance to stay in the repository](#what-weekday-does-the-code-have-the-highest-chance-to-stay-in-the-repository) - by aiming to uniquely identify a line of code using the path and line contents.
|
||||
This allows us to identify the time between when a line was added and removed. We filter to current files and code only, however, and average the time for each file across lines.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#V0lUSAogICAgY3VycmVudF9maWxlcyBBUwogICAgKAogICAgICAgIFNFTEVDVCBwYXRoCiAgICAgICAgRlJPTQogICAgICAgICgKICAgICAgICAgICAgU0VMRUNUCiAgICAgICAgICAgICAgICBvbGRfcGF0aCBBUyBwYXRoLAogICAgICAgICAgICAgICAgbWF4KHRpbWUpIEFTIGxhc3RfdGltZSwKICAgICAgICAgICAgICAgIDIgQVMgY2hhbmdlX3R5cGUKICAgICAgICAgICAgRlJPTSBnaXRfY2xpY2tob3VzZS5maWxlX2NoYW5nZXMKICAgICAgICAgICAgR1JPVVAgQlkgb2xkX3BhdGgKICAgICAgICAgICAgVU5JT04gQUxMCiAgICAgICAgICAgIFNFTEVDVAogICAgICAgICAgICAgICAgcGF0aCwKICAgICAgICAgICAgICAgIG1heCh0aW1lKSBBUyBsYXN0X3RpbWUsCiAgICAgICAgICAgICAgICBhcmdNYXgoY2hhbmdlX3R5cGUsIHRpbWUpIEFTIGNoYW5nZV90eXBlCiAgICAgICAgICAgIEZST00gZ2l0X2NsaWNraG91c2UuZmlsZV9jaGFuZ2VzCiAgICAgICAgICAgIEdST1VQIEJZIHBhdGgKICAgICAgICApCiAgICAgICAgR1JPVVAgQlkgcGF0aAogICAgICAgIEhBVklORyAoYXJnTWF4KGNoYW5nZV90eXBlLCBsYXN0X3RpbWUpICE9IDIpIEFORCAoTk9UIG1hdGNoKHBhdGgsICcoXmRibXMvKXwoXmxpYnMvKXwoXnRlc3RzL3Rlc3RmbG93cy8pfChecHJvZ3JhbXMvc2VydmVyL3N0b3JlLyknKSkKICAgICAgICBPUkRFUiBCWSBwYXRoIEFTQwogICAgKSwKICAgIGxpbmVzX3JlbW92ZWQgQVMKICAgICgKICAgICAgICBTRUxFQ1QKICAgICAgICAgICAgYWRkZWRfY29kZS5wYXRoIEFTIHBhdGgsCiAgICAgICAgICAgIGFkZGVkX2NvZGUubGluZSwKICAgICAgICAgICAgYWRkZWRfY29kZS50aW1lIEFTIGFkZGVkX2RheSwKICAgICAgICAgICAgZGF0ZURpZmYoJ2RheScsIGFkZGVkX2NvZGUudGltZSwgcmVtb3ZlZF9jb2RlLnRpbWUpIEFTIGRheXNfcHJlc2VudAogICAgICAgIEZST00KICAgICAgICAoCiAgICAgICAgICAgIFNFTEVDVAogICAgICAgICAgICAgICAgcGF0aCwKICAgICAgICAgICAgICAgIGxpbmUsCiAgICAgICAgICAgICAgICBtYXgodGltZSkgQVMgdGltZSwKICAgICAgICAgICAgICAgIGFueShmaWxlX2V4dGVuc2lvbikgQVMgZmlsZV9leHRlbnNpb24KICAgICAgICAgICAgRlJPTSBnaXRfY2xpY2tob3VzZS5saW5lX2NoYW5nZXMKICAgICAgICAgICAgV0hFUkUgKHNpZ24gPSAxKSBBTkQgKGxpbmVfdHlwZSBOT1QgSU4gKCdQdW5jdCcsICdFbXB0eScpKQogICAgICAgICAgICBHUk9VUCBCWQogICAgICAgICAgICAgICAgcGF0aCwKICAgICAgICAgICAgICAgIGxpbmUKICAgICAgICApIEFTIGFkZGVkX2NvZGUKICAgICAgICBJTk5FUiBKT0lOCiAgICAgICAgKAogICAgICAgICAgICBTRUxFQ1QKICAgICAgICAgICAgICAgIHBhdGgsCiAgICAgICAgICAgICAgICBsaW5lLAogICAgICAgICAgICAgICAgbWF4KHRpbWUpIEFTIHRpbWUKICAgICAgICAgICAgRlJPTSBnaXRfY2xpY2tob3VzZS5saW5lX2NoYW5nZXMKICAgICAgICAgICAgV0hFUkUgKHNpZ24gPSAtMSkgQU5EIChsaW5lX3R5cGUgTk9UIElOICgnUHVuY3QnLCAnRW1wdHknKSkKICAgICAgICAgICAgR1JPVVAgQlkKICAgICAgICAgICAgICAgIHBhdGgsCiAgICAgICAgICAgICAgICBsaW5lCiAgICAgICAgKSBBUyByZW1vdmVkX2NvZGUgVVNJTkcgKHBhdGgsIGxpbmUpCiAgICAgICAgV0hFUkUgKHJlbW92ZWRfY29kZS50aW1lID4gYWRkZWRfY29kZS50aW1lKSBBTkQgKHBhdGggSU4gKGN1cnJlbnRfZmlsZXMpKSBBTkQgKGZpbGVfZXh0ZW5zaW9uIElOICgnaCcsICdjcHAnLCAnc3FsJykpCiAgICApClNFTEVDVAogICAgcGF0aCwKICAgIGF2ZyhkYXlzX3ByZXNlbnQpIEFTIGF2Z19jb2RlX2FnZQpGUk9NIGxpbmVzX3JlbW92ZWQKR1JPVVAgQlkgcGF0aApPUkRFUiBCWSBhdmdfY29kZV9hZ2UgREVTQwpMSU1JVCAxMA==)
|
||||
[play](https://sql.clickhouse.com?query_id=3CYYT7HEHWRFHVCM9JCKSU)
|
||||
|
||||
```sql
|
||||
WITH
|
||||
@ -1869,7 +1869,7 @@ There are a few ways we can address this question. Focusing on the code to test
|
||||
|
||||
Note we limit to users with more than 20 changes to focus on regular committers and avoid a bias to one-off contributions.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICBhdXRob3IsCiAgICBjb3VudElmKChmaWxlX2V4dGVuc2lvbiBJTiAoJ2gnLCAnY3BwJywgJ3NxbCcsICdzaCcsICdweScsICdleHBlY3QnKSkgQU5EIChwYXRoIExJS0UgJyV0ZXN0cyUnKSkgQVMgdGVzdCwKICAgIGNvdW50SWYoKGZpbGVfZXh0ZW5zaW9uIElOICgnaCcsICdjcHAnLCAnc3FsJykpIEFORCAoTk9UIChwYXRoIExJS0UgJyV0ZXN0cyUnKSkpIEFTIGNvZGUsCiAgICBjb2RlIC8gKGNvZGUgKyB0ZXN0KSBBUyByYXRpb19jb2RlCkZST00gZ2l0X2NsaWNraG91c2UuZmlsZV9jaGFuZ2VzCkdST1VQIEJZIGF1dGhvcgpIQVZJTkcgY29kZSA+IDIwCk9SREVSIEJZIGNvZGUgREVTQwpMSU1JVCAyMA==)
|
||||
[play](https://sql.clickhouse.com?query_id=JGKZSEQDPDTDKZXD3ZCGLE)
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
@ -1911,7 +1911,7 @@ LIMIT 20
|
||||
|
||||
We can plot this distribution as a histogram.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#V0lUSCAoCiAgICAgICAgU0VMRUNUIGhpc3RvZ3JhbSgxMCkocmF0aW9fY29kZSkgQVMgaGlzdAogICAgICAgIEZST00KICAgICAgICAoCiAgICAgICAgICAgIFNFTEVDVAogICAgICAgICAgICAgICAgYXV0aG9yLAogICAgICAgICAgICAgICAgY291bnRJZigoZmlsZV9leHRlbnNpb24gSU4gKCdoJywgJ2NwcCcsICdzcWwnLCAnc2gnLCAncHknLCAnZXhwZWN0JykpIEFORCAocGF0aCBMSUtFICcldGVzdHMlJykpIEFTIHRlc3QsCiAgICAgICAgICAgICAgICBjb3VudElmKChmaWxlX2V4dGVuc2lvbiBJTiAoJ2gnLCAnY3BwJywgJ3NxbCcpKSBBTkQgKE5PVCAocGF0aCBMSUtFICcldGVzdHMlJykpKSBBUyBjb2RlLAogICAgICAgICAgICAgICAgY29kZSAvIChjb2RlICsgdGVzdCkgQVMgcmF0aW9fY29kZQogICAgICAgICAgICBGUk9NIGdpdF9jbGlja2hvdXNlLmZpbGVfY2hhbmdlcwogICAgICAgICAgICBHUk9VUCBCWSBhdXRob3IKICAgICAgICAgICAgSEFWSU5HIGNvZGUgPiAyMAogICAgICAgICAgICBPUkRFUiBCWSBjb2RlIERFU0MKICAgICAgICAgICAgTElNSVQgMjAKICAgICAgICApCiAgICApIEFTIGhpc3QKU0VMRUNUCiAgICBhcnJheUpvaW4oaGlzdCkuMSBBUyBsb3dlciwKICAgIGFycmF5Sm9pbihoaXN0KS4yIEFTIHVwcGVyLAogICAgYmFyKGFycmF5Sm9pbihoaXN0KS4zLCAwLCAxMDAsIDUwMCkgQVMgYmFy)
|
||||
[play](https://sql.clickhouse.com?query_id=S5AJIIRGSUAY1JXEVHQDAK)
|
||||
|
||||
```sql
|
||||
WITH (
|
||||
@ -1954,7 +1954,7 @@ Most contributors write more code than tests, as you'd expect.
|
||||
|
||||
What about who adds the most comments when contributing code?
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICBhdXRob3IsCiAgICBhdmcocmF0aW9fY29tbWVudHMpIEFTIGF2Z19yYXRpb19jb21tZW50cywKICAgIHN1bShjb2RlKSBBUyBjb2RlCkZST00KKAogICAgU0VMRUNUCiAgICAgICAgYXV0aG9yLAogICAgICAgIGNvbW1pdF9oYXNoLAogICAgICAgIGNvdW50SWYobGluZV90eXBlID0gJ0NvbW1lbnQnKSBBUyBjb21tZW50cywKICAgICAgICBjb3VudElmKGxpbmVfdHlwZSA9ICdDb2RlJykgQVMgY29kZSwKICAgICAgICBpZihjb21tZW50cyA+IDAsIGNvbW1lbnRzIC8gKGNvbW1lbnRzICsgY29kZSksIDApIEFTIHJhdGlvX2NvbW1lbnRzCiAgICBGUk9NIGdpdF9jbGlja2hvdXNlLmxpbmVfY2hhbmdlcwogICAgR1JPVVAgQlkKICAgICAgICBhdXRob3IsCiAgICAgICAgY29tbWl0X2hhc2gKKQpHUk9VUCBCWSBhdXRob3IKT1JERVIgQlkgY29kZSBERVNDCkxJTUlUIDEwCg==)
|
||||
[play](https://sql.clickhouse.com?query_id=EXPHDIURBTOXXOK1TGNNYD)
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
@ -2038,7 +2038,7 @@ To compute this, we first work out each author's comments ratio over time - simi
|
||||
|
||||
After calculating the average by-week offset across all authors, we sample these results by selecting every 10th week.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#V0lUSCBhdXRob3JfcmF0aW9zX2J5X29mZnNldCBBUwogICAgKAogICAgICAgIFNFTEVDVAogICAgICAgICAgICBhdXRob3IsCiAgICAgICAgICAgIGRhdGVEaWZmKCd3ZWVrJywgc3RhcnRfZGF0ZXMuc3RhcnRfZGF0ZSwgY29udHJpYnV0aW9ucy53ZWVrKSBBUyB3ZWVrX29mZnNldCwKICAgICAgICAgICAgcmF0aW9fY29kZQogICAgICAgIEZST00KICAgICAgICAoCiAgICAgICAgICAgIFNFTEVDVAogICAgICAgICAgICAgICAgYXV0aG9yLAogICAgICAgICAgICAgICAgdG9TdGFydE9mV2VlayhtaW4odGltZSkpIEFTIHN0YXJ0X2RhdGUKICAgICAgICAgICAgRlJPTSBnaXRfY2xpY2tob3VzZS5saW5lX2NoYW5nZXMKICAgICAgICAgICAgV0hFUkUgZmlsZV9leHRlbnNpb24gSU4gKCdoJywgJ2NwcCcsICdzcWwnKQogICAgICAgICAgICBHUk9VUCBCWSBhdXRob3IgQVMgc3RhcnRfZGF0ZXMKICAgICAgICApIEFTIHN0YXJ0X2RhdGVzCiAgICAgICAgSU5ORVIgSk9JTgogICAgICAgICgKICAgICAgICAgICAgU0VMRUNUCiAgICAgICAgICAgICAgICBhdXRob3IsCiAgICAgICAgICAgICAgICBjb3VudElmKGxpbmVfdHlwZSA9ICdDb2RlJykgQVMgY29kZSwKICAgICAgICAgICAgICAgIGNvdW50SWYoKGxpbmVfdHlwZSA9ICdDb21tZW50JykgT1IgKGxpbmVfdHlwZSA9ICdQdW5jdCcpKSBBUyBjb21tZW50cywKICAgICAgICAgICAgICAgIGNvbW1lbnRzIC8gKGNvbW1lbnRzICsgY29kZSkgQVMgcmF0aW9fY29kZSwKICAgICAgICAgICAgICAgIHRvU3RhcnRPZldlZWsodGltZSkgQVMgd2VlawogICAgICAgICAgICBGUk9NIGdpdF9jbGlja2hvdXNlLmxpbmVfY2hhbmdlcwogICAgICAgICAgICBXSEVSRSAoZmlsZV9leHRlbnNpb24gSU4gKCdoJywgJ2NwcCcsICdzcWwnKSkgQU5EIChzaWduID0gMSkKICAgICAgICAgICAgR1JPVVAgQlkKICAgICAgICAgICAgICAgIHRpbWUsCiAgICAgICAgICAgICAgICBhdXRob3IKICAgICAgICAgICAgSEFWSU5HIGNvZGUgPiAyMAogICAgICAgICAgICBPUkRFUiBCWQogICAgICAgICAgICAgICAgYXV0aG9yIEFTQywKICAgICAgICAgICAgICAgIHRpbWUgQVNDCiAgICAgICAgKSBBUyBjb250cmlidXRpb25zIFVTSU5HIChhdXRob3IpCiAgICApClNFTEVDVAogICAgd2Vla19vZmZzZXQsCiAgICBhdmcocmF0aW9fY29kZSkgQVMgYXZnX2NvZGVfcmF0aW8KRlJPTSBhdXRob3JfcmF0aW9zX2J5X29mZnNldApHUk9VUCBCWSB3ZWVrX29mZnNldApIQVZJTkcgKHdlZWtfb2Zmc2V0ICUgMTApID0gMApPUkRFUiBCWSB3ZWVrX29mZnNldCBBU0MKTElNSVQgMjAK)
|
||||
[play](https://sql.clickhouse.com?query_id=SBHEWR8XC4PRHY13HPPKCN)
|
||||
|
||||
```sql
|
||||
WITH author_ratios_by_offset AS
|
||||
@ -2116,7 +2116,7 @@ Encouragingly, our comment % is pretty constant and doesn't degrade the longer a
|
||||
|
||||
We can use the same principle as [List files that were rewritten most number of time or by most of authors](#list-files-that-were-rewritten-most-number-of-time-or-by-most-of-authors) to identify rewrites but consider all files. A window function is used to compute the time between rewrites for each file. From this, we can calculate an average and median across all files.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#V0lUSAogICAgY2hhbmdlcyBBUwogICAgKAogICAgICAgIFNFTEVDVAogICAgICAgICAgICBwYXRoLAogICAgICAgICAgICBjb21taXRfaGFzaCwKICAgICAgICAgICAgbWF4X3RpbWUsCiAgICAgICAgICAgIHR5cGUsCiAgICAgICAgICAgIG51bV9hZGRlZCwKICAgICAgICAgICAgbnVtX2RlbGV0ZWQsCiAgICAgICAgICAgIHN1bShudW1fYWRkZWQgLSBudW1fZGVsZXRlZCkgT1ZFUiAoUEFSVElUSU9OIEJZIHBhdGggT1JERVIgQlkgbWF4X3RpbWUgQVNDKSBBUyBjdXJyZW50X3NpemUsCiAgICAgICAgICAgIGlmKGN1cnJlbnRfc2l6ZSA+IDAsIG51bV9hZGRlZCAvIGN1cnJlbnRfc2l6ZSwgMCkgQVMgcGVyY2VudF9hZGQsCiAgICAgICAgICAgIGlmKGN1cnJlbnRfc2l6ZSA+IDAsIG51bV9kZWxldGVkIC8gY3VycmVudF9zaXplLCAwKSBBUyBwZXJjZW50X2RlbGV0ZQogICAgICAgIEZST00KICAgICAgICAoCiAgICAgICAgICAgIFNFTEVDVAogICAgICAgICAgICAgICAgcGF0aCwKICAgICAgICAgICAgICAgIG1heCh0aW1lKSBBUyBtYXhfdGltZSwKICAgICAgICAgICAgICAgIGNvbW1pdF9oYXNoLAogICAgICAgICAgICAgICAgYW55KGxpbmVzX2FkZGVkKSBBUyBudW1fYWRkZWQsCiAgICAgICAgICAgICAgICBhbnkobGluZXNfZGVsZXRlZCkgQVMgbnVtX2RlbGV0ZWQsCiAgICAgICAgICAgICAgICBhbnkoY2hhbmdlX3R5cGUpIEFTIHR5cGUKICAgICAgICAgICAgRlJPTSBnaXRfY2xpY2tob3VzZS5maWxlX2NoYW5nZXMKICAgICAgICAgICAgV0hFUkUgKGNoYW5nZV90eXBlIElOICgnQWRkJywgJ01vZGlmeScpKSBBTkQgKGZpbGVfZXh0ZW5zaW9uIElOICgnaCcsICdjcHAnLCAnc3FsJykpCiAgICAgICAgICAgIEdST1VQIEJZCiAgICAgICAgICAgICAgICBwYXRoLAogICAgICAgICAgICAgICAgY29tbWl0X2hhc2gKICAgICAgICAgICAgT1JERVIgQlkKICAgICAgICAgICAgICAgIHBhdGggQVNDLAogICAgICAgICAgICAgICAgbWF4X3RpbWUgQVNDCiAgICAgICAgKQogICAgKSwKICAgIHJld3JpdGVzIEFTCiAgICAoCiAgICAgICAgU0VMRUNUCiAgICAgICAgICAgICosCiAgICAgICAgICAgIGFueShtYXhfdGltZSkgT1ZFUiAoUEFSVElUSU9OIEJZIHBhdGggT1JERVIgQlkgbWF4X3RpbWUgQVNDIFJPV1MgQkVUV0VFTiAxIFBSRUNFRElORyBBTkQgQ1VSUkVOVCBST1cpIEFTIHByZXZpb3VzX3Jld3JpdGUsCiAgICAgICAgICAgIGRhdGVEaWZmKCdkYXknLCBwcmV2aW91c19yZXdyaXRlLCBtYXhfdGltZSkgQVMgcmV3cml0ZV9kYXlzCiAgICAgICAgRlJPTSBjaGFuZ2VzCiAgICAgICAgV0hFUkUgKHR5cGUgPSAnTW9kaWZ5JykgQU5EIChwZXJjZW50X2FkZCA+PSAwLjUpIEFORCAocGVyY2VudF9kZWxldGUgPj0gMC41KSBBTkQgKGN1cnJlbnRfc2l6ZSA+IDUwKQogICAgKQpTRUxFQ1QKICAgIGF2Z0lmKHJld3JpdGVfZGF5cywgcmV3cml0ZV9kYXlzID4gMCkgQVMgYXZnX3Jld3JpdGVfdGltZSwKICAgIHF1YW50aWxlc1RpbWluZ0lmKDAuNSkocmV3cml0ZV9kYXlzLCByZXdyaXRlX2RheXMgPiAwKSBBUyBoYWxmX2xpZmUKRlJPTSByZXdyaXRlcw==)
|
||||
[play](https://sql.clickhouse.com?query_id=WSHUEPJP9TNJUH7QITWWOR)
|
||||
|
||||
```sql
|
||||
WITH
|
||||
@ -2176,7 +2176,7 @@ FROM rewrites
|
||||
|
||||
Similar to [What is the average time before code will be rewritten and the median (half-life of code decay)?](#what-is-the-average-time-before-code-will-be-rewritten-and-the-median-half-life-of-code-decay) and [List files that were rewritten most number of time or by most of authors](#list-files-that-were-rewritten-most-number-of-time-or-by-most-of-authors), except we aggregate by day of week. Adjust as required e.g. month of year.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#V0lUSAogICAgY2hhbmdlcyBBUwogICAgKAogICAgICAgIFNFTEVDVAogICAgICAgICAgICBwYXRoLAogICAgICAgICAgICBjb21taXRfaGFzaCwKICAgICAgICAgICAgbWF4X3RpbWUsCiAgICAgICAgICAgIHR5cGUsCiAgICAgICAgICAgIG51bV9hZGRlZCwKICAgICAgICAgICAgbnVtX2RlbGV0ZWQsCiAgICAgICAgICAgIHN1bShudW1fYWRkZWQgLSBudW1fZGVsZXRlZCkgT1ZFUiAoUEFSVElUSU9OIEJZIHBhdGggT1JERVIgQlkgbWF4X3RpbWUgQVNDKSBBUyBjdXJyZW50X3NpemUsCiAgICAgICAgICAgIGlmKGN1cnJlbnRfc2l6ZSA+IDAsIG51bV9hZGRlZCAvIGN1cnJlbnRfc2l6ZSwgMCkgQVMgcGVyY2VudF9hZGQsCiAgICAgICAgICAgIGlmKGN1cnJlbnRfc2l6ZSA+IDAsIG51bV9kZWxldGVkIC8gY3VycmVudF9zaXplLCAwKSBBUyBwZXJjZW50X2RlbGV0ZQogICAgICAgIEZST00KICAgICAgICAoCiAgICAgICAgICAgIFNFTEVDVAogICAgICAgICAgICAgICAgcGF0aCwKICAgICAgICAgICAgICAgIG1heCh0aW1lKSBBUyBtYXhfdGltZSwKICAgICAgICAgICAgICAgIGNvbW1pdF9oYXNoLAogICAgICAgICAgICAgICAgYW55KGZpbGVfbGluZXNfYWRkZWQpIEFTIG51bV9hZGRlZCwKICAgICAgICAgICAgICAgIGFueShmaWxlX2xpbmVzX2RlbGV0ZWQpIEFTIG51bV9kZWxldGVkLAogICAgICAgICAgICAgICAgYW55KGZpbGVfY2hhbmdlX3R5cGUpIEFTIHR5cGUKICAgICAgICAgICAgRlJPTSBnaXRfY2xpY2tob3VzZS5saW5lX2NoYW5nZXMKICAgICAgICAgICAgV0hFUkUgKGZpbGVfY2hhbmdlX3R5cGUgSU4gKCdBZGQnLCAnTW9kaWZ5JykpIEFORCAoZmlsZV9leHRlbnNpb24gSU4gKCdoJywgJ2NwcCcsICdzcWwnKSkKICAgICAgICAgICAgR1JPVVAgQlkKICAgICAgICAgICAgICAgIHBhdGgsCiAgICAgICAgICAgICAgICBjb21taXRfaGFzaAogICAgICAgICAgICBPUkRFUiBCWQogICAgICAgICAgICAgICAgcGF0aCBBU0MsCiAgICAgICAgICAgICAgICBtYXhfdGltZSBBU0MKICAgICAgICApCiAgICApLAogICAgcmV3cml0ZXMgQVMKICAgICgKICAgICAgICBTRUxFQ1QgYW55KG1heF90aW1lKSBPVkVSIChQQVJUSVRJT04gQlkgcGF0aCBPUkRFUiBCWSBtYXhfdGltZSBBU0MgUk9XUyBCRVRXRUVOIDEgUFJFQ0VESU5HIEFORCBDVVJSRU5UIFJPVykgQVMgcHJldmlvdXNfcmV3cml0ZQogICAgICAgIEZST00gY2hhbmdlcwogICAgICAgIFdIRVJFICh0eXBlID0gJ01vZGlmeScpIEFORCAocGVyY2VudF9hZGQgPj0gMC41KSBBTkQgKHBlcmNlbnRfZGVsZXRlID49IDAuNSkgQU5EIChjdXJyZW50X3NpemUgPiA1MCkKICAgICkKU0VMRUNUCiAgICBkYXlPZldlZWsocHJldmlvdXNfcmV3cml0ZSkgQVMgZGF5T2ZXZWVrLAogICAgY291bnQoKSBBUyBudW1fcmVfd3JpdGVzCkZST00gcmV3cml0ZXMKR1JPVVAgQlkgZGF5T2ZXZWVr)
|
||||
[play](https://sql.clickhouse.com?query_id=8PQNWEWHAJTGN6FTX59KH2)
|
||||
|
||||
```sql
|
||||
WITH
|
||||
@ -2240,7 +2240,7 @@ GROUP BY dayOfWeek
|
||||
|
||||
We define "sticky" as how long does an author's code stay before its rewritten. Similar to the previous question [What is the average time before code will be rewritten and the median (half-life of code decay)?](#what-is-the-average-time-before-code-will-be-rewritten-and-the-median-half-life-of-code-decay) - using the same metric for rewrites i.e. 50% additions and 50% deletions to the file. We compute the average rewrite time per author and only consider contributors with more than two files.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#V0lUSAogICAgY2hhbmdlcyBBUwogICAgKAogICAgICAgIFNFTEVDVAogICAgICAgICAgICBwYXRoLAogICAgICAgICAgICBhdXRob3IsCiAgICAgICAgICAgIGNvbW1pdF9oYXNoLAogICAgICAgICAgICBtYXhfdGltZSwKICAgICAgICAgICAgdHlwZSwKICAgICAgICAgICAgbnVtX2FkZGVkLAogICAgICAgICAgICBudW1fZGVsZXRlZCwKICAgICAgICAgICAgc3VtKG51bV9hZGRlZCAtIG51bV9kZWxldGVkKSBPVkVSIChQQVJUSVRJT04gQlkgcGF0aCBPUkRFUiBCWSBtYXhfdGltZSBBU0MpIEFTIGN1cnJlbnRfc2l6ZSwKICAgICAgICAgICAgaWYoY3VycmVudF9zaXplID4gMCwgbnVtX2FkZGVkIC8gY3VycmVudF9zaXplLCAwKSBBUyBwZXJjZW50X2FkZCwKICAgICAgICAgICAgaWYoY3VycmVudF9zaXplID4gMCwgbnVtX2RlbGV0ZWQgLyBjdXJyZW50X3NpemUsIDApIEFTIHBlcmNlbnRfZGVsZXRlCiAgICAgICAgRlJPTQogICAgICAgICgKICAgICAgICAgICAgU0VMRUNUCiAgICAgICAgICAgICAgICBwYXRoLAogICAgICAgICAgICAgICAgYW55KGF1dGhvcikgQVMgYXV0aG9yLAogICAgICAgICAgICAgICAgbWF4KHRpbWUpIEFTIG1heF90aW1lLAogICAgICAgICAgICAgICAgY29tbWl0X2hhc2gsCiAgICAgICAgICAgICAgICBhbnkoZmlsZV9saW5lc19hZGRlZCkgQVMgbnVtX2FkZGVkLAogICAgICAgICAgICAgICAgYW55KGZpbGVfbGluZXNfZGVsZXRlZCkgQVMgbnVtX2RlbGV0ZWQsCiAgICAgICAgICAgICAgICBhbnkoZmlsZV9jaGFuZ2VfdHlwZSkgQVMgdHlwZQogICAgICAgICAgICBGUk9NIGdpdF9jbGlja2hvdXNlLmxpbmVfY2hhbmdlcwogICAgICAgICAgICBXSEVSRSAoZmlsZV9jaGFuZ2VfdHlwZSBJTiAoJ0FkZCcsICdNb2RpZnknKSkgQU5EIChmaWxlX2V4dGVuc2lvbiBJTiAoJ2gnLCAnY3BwJywgJ3NxbCcpKQogICAgICAgICAgICBHUk9VUCBCWQogICAgICAgICAgICAgICAgcGF0aCwKICAgICAgICAgICAgICAgIGNvbW1pdF9oYXNoCiAgICAgICAgICAgIE9SREVSIEJZCiAgICAgICAgICAgICAgICBwYXRoIEFTQywKICAgICAgICAgICAgICAgIG1heF90aW1lIEFTQwogICAgICAgICkKICAgICksCiAgICByZXdyaXRlcyBBUwogICAgKAogICAgICAgIFNFTEVDVAogICAgICAgICAgICAqLAogICAgICAgICAgICBhbnkobWF4X3RpbWUpIE9WRVIgKFBBUlRJVElPTiBCWSBwYXRoIE9SREVSIEJZIG1heF90aW1lIEFTQyBST1dTIEJFVFdFRU4gMSBQUkVDRURJTkcgQU5EIENVUlJFTlQgUk9XKSBBUyBwcmV2aW91c19yZXdyaXRlLAogICAgICAgICAgICBkYXRlRGlmZignZGF5JywgcHJldmlvdXNfcmV3cml0ZSwgbWF4X3RpbWUpIEFTIHJld3JpdGVfZGF5cywKICAgICAgICAgICAgYW55KGF1dGhvcikgT1ZFUiAoUEFSVElUSU9OIEJZIHBhdGggT1JERVIgQlkgbWF4X3RpbWUgQVNDIFJPV1MgQkVUV0VFTiAxIFBSRUNFRElORyBBTkQgQ1VSUkVOVCBST1cpIEFTIHByZXZfYXV0aG9yCiAgICAgICAgRlJPTSBjaGFuZ2VzCiAgICAgICAgV0hFUkUgKHR5cGUgPSAnTW9kaWZ5JykgQU5EIChwZXJjZW50X2FkZCA+PSAwLjUpIEFORCAocGVyY2VudF9kZWxldGUgPj0gMC41KSBBTkQgKGN1cnJlbnRfc2l6ZSA+IDUwKQogICAgKQpTRUxFQ1QKICAgIHByZXZfYXV0aG9yLAogICAgYXZnKHJld3JpdGVfZGF5cykgQVMgYywKICAgIHVuaXEocGF0aCkgQVMgbnVtX2ZpbGVzCkZST00gcmV3cml0ZXMKR1JPVVAgQlkgcHJldl9hdXRob3IKSEFWSU5HIG51bV9maWxlcyA+IDIKT1JERVIgQlkgYyBERVNDCkxJTUlUIDEwCg==)
|
||||
[play](https://sql.clickhouse.com?query_id=BKHLVVWN5SET1VTIFQ8JVK)
|
||||
|
||||
```sql
|
||||
WITH
|
||||
@ -2319,7 +2319,7 @@ This query first requires us to calculate the days when an author has committed.
|
||||
|
||||
Our subsequent array functions compute each author's longest sequence of consecutive ones. First, the `groupArray` function is used to collate all `consecutive_day` values for an author. This array of 1s and 0s, is then split on 0 values into subarrays. Finally, we calculate the longest subarray.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#V0lUSCBjb21taXRfZGF5cyBBUwogICAgKAogICAgICAgIFNFTEVDVAogICAgICAgICAgICBhdXRob3IsCiAgICAgICAgICAgIGRheSwKICAgICAgICAgICAgYW55KGRheSkgT1ZFUiAoUEFSVElUSU9OIEJZIGF1dGhvciBPUkRFUiBCWSBkYXkgQVNDIFJPV1MgQkVUV0VFTiAxIFBSRUNFRElORyBBTkQgQ1VSUkVOVCBST1cpIEFTIHByZXZpb3VzX2NvbW1pdCwKICAgICAgICAgICAgZGF0ZURpZmYoJ2RheScsIHByZXZpb3VzX2NvbW1pdCwgZGF5KSBBUyBkYXlzX3NpbmNlX2xhc3QsCiAgICAgICAgICAgIGlmKGRheXNfc2luY2VfbGFzdCA9IDEsIDEsIDApIEFTIGNvbnNlY3V0aXZlX2RheQogICAgICAgIEZST00KICAgICAgICAoCiAgICAgICAgICAgIFNFTEVDVAogICAgICAgICAgICAgICAgYXV0aG9yLAogICAgICAgICAgICAgICAgdG9TdGFydE9mRGF5KHRpbWUpIEFTIGRheQogICAgICAgICAgICBGUk9NIGdpdF9jbGlja2hvdXNlLmNvbW1pdHMKICAgICAgICAgICAgR1JPVVAgQlkKICAgICAgICAgICAgICAgIGF1dGhvciwKICAgICAgICAgICAgICAgIGRheQogICAgICAgICAgICBPUkRFUiBCWQogICAgICAgICAgICAgICAgYXV0aG9yIEFTQywKICAgICAgICAgICAgICAgIGRheSBBU0MKICAgICAgICApCiAgICApClNFTEVDVAogICAgYXV0aG9yLAogICAgYXJyYXlNYXgoYXJyYXlNYXAoeCAtPiBsZW5ndGgoeCksIGFycmF5U3BsaXQoeCAtPiAoeCA9IDApLCBncm91cEFycmF5KGNvbnNlY3V0aXZlX2RheSkpKSkgQVMgbWF4X2NvbnNlY3V0aXZlX2RheXMKRlJPTSBjb21taXRfZGF5cwpHUk9VUCBCWSBhdXRob3IKT1JERVIgQlkgbWF4X2NvbnNlY3V0aXZlX2RheXMgREVTQwpMSU1JVCAxMA==)
|
||||
[play](https://sql.clickhouse.com?query_id=S3E64UYCAMDAYJRSXINVFR)
|
||||
|
||||
```sql
|
||||
WITH commit_days AS
|
||||
@ -2372,7 +2372,7 @@ LIMIT 10
|
||||
|
||||
Files can be renamed. When this occurs, we get a rename event, where the `path` column is set to the new path of the file and the `old_path` represents the previous location e.g.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICB0aW1lLAogICAgcGF0aCwKICAgIG9sZF9wYXRoLAogICAgY29tbWl0X2hhc2gsCiAgICBjb21taXRfbWVzc2FnZQpGUk9NIGdpdF9jbGlja2hvdXNlLmZpbGVfY2hhbmdlcwpXSEVSRSAocGF0aCA9ICdzcmMvU3RvcmFnZXMvU3RvcmFnZVJlcGxpY2F0ZWRNZXJnZVRyZWUuY3BwJykgQU5EIChjaGFuZ2VfdHlwZSA9ICdSZW5hbWUnKQ==)
|
||||
[play](https://sql.clickhouse.com?query_id=AKTW3Z8JZAPQ4H9BH2ZFRX)
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
@ -2410,8 +2410,6 @@ By calling `file_path_history('src/Storages/StorageReplicatedMergeTree.cpp')` we
|
||||
|
||||
For example,
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#U0VMRUNUIGZpbGVfcGF0aF9oaXN0b3J5KCdzcmMvU3RvcmFnZXMvU3RvcmFnZVJlcGxpY2F0ZWRNZXJnZVRyZWUuY3BwJykgQVMgcGF0aHMK)
|
||||
|
||||
```sql
|
||||
SELECT file_path_history('src/Storages/StorageReplicatedMergeTree.cpp') AS paths
|
||||
|
||||
@ -2424,8 +2422,6 @@ SELECT file_path_history('src/Storages/StorageReplicatedMergeTree.cpp') AS paths
|
||||
|
||||
We can use this capability to now assemble the commits for the entire history of a file. In this example, we show one commit for each of the `path` values.
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICB0aW1lLAogICAgc3Vic3RyaW5nKGNvbW1pdF9oYXNoLCAxLCAxMSkgQVMgY29tbWl0LAogICAgY2hhbmdlX3R5cGUsCiAgICBhdXRob3IsCiAgICBwYXRoLAogICAgY29tbWl0X21lc3NhZ2UKRlJPTSBnaXRfY2xpY2tob3VzZS5maWxlX2NoYW5nZXMKV0hFUkUgcGF0aCBJTiBmaWxlX3BhdGhfaGlzdG9yeSgnc3JjL1N0b3JhZ2VzL1N0b3JhZ2VSZXBsaWNhdGVkTWVyZ2VUcmVlLmNwcCcpCk9SREVSIEJZIHRpbWUgREVTQwpMSU1JVCAxIEJZIHBhdGgKRk9STUFUIFByZXR0eUNvbXBhY3RNb25vQmxvY2s=)
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
time,
|
||||
@ -2457,8 +2453,6 @@ This is particularly difficult to get an exact result due to the inability to cu
|
||||
|
||||
An approximate solution, sufficient for a high-level analysis, may look something like this:
|
||||
|
||||
[play](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICBsaW5lX251bWJlcl9uZXcsCiAgICBhcmdNYXgoYXV0aG9yLCB0aW1lKSwKICAgIGFyZ01heChsaW5lLCB0aW1lKQpGUk9NIGdpdF9jbGlja2hvdXNlLmxpbmVfY2hhbmdlcwpXSEVSRSBwYXRoIElOIGZpbGVfcGF0aF9oaXN0b3J5KCdzcmMvU3RvcmFnZXMvU3RvcmFnZVJlcGxpY2F0ZWRNZXJnZVRyZWUuY3BwJykKR1JPVVAgQlkgbGluZV9udW1iZXJfbmV3Ck9SREVSIEJZIGxpbmVfbnVtYmVyX25ldyBBU0MKTElNSVQgMjA=)
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
line_number_new,
|
||||
|
@ -354,4 +354,4 @@ At least they have caviar with vodka. Very nice.
|
||||
|
||||
## Online Playground {#playground}
|
||||
|
||||
The data is uploaded to ClickHouse Playground, [example](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICByb3VuZCh0b1VJbnQzMk9yWmVybyhleHRyYWN0KG1lbnVfZGF0ZSwgJ15cXGR7NH0nKSksIC0xKSBBUyBkLAogICAgY291bnQoKSwKICAgIHJvdW5kKGF2ZyhwcmljZSksIDIpLAogICAgYmFyKGF2ZyhwcmljZSksIDAsIDUwLCAxMDApLAogICAgYW55KGRpc2hfbmFtZSkKRlJPTSBtZW51X2l0ZW1fZGVub3JtCldIRVJFIChtZW51X2N1cnJlbmN5IElOICgnRG9sbGFycycsICcnKSkgQU5EIChkID4gMCkgQU5EIChkIDwgMjAyMikgQU5EIChkaXNoX25hbWUgSUxJS0UgJyVjYXZpYXIlJykKR1JPVVAgQlkgZApPUkRFUiBCWSBkIEFTQw==).
|
||||
The data is uploaded to ClickHouse Playground, [example](https://sql.clickhouse.com?query_id=KB5KQJJFNBKHE5GBUJCP1B).
|
||||
|
@ -386,7 +386,7 @@ ORDER BY c DESC
|
||||
LIMIT 10;
|
||||
```
|
||||
|
||||
You can also play with the data in Playground, [example](https://play.clickhouse.com/play?user=play#U0VMRUNUIERheU9mV2VlaywgY291bnQoKikgQVMgYwpGUk9NIG9udGltZQpXSEVSRSBZZWFyPj0yMDAwIEFORCBZZWFyPD0yMDA4CkdST1VQIEJZIERheU9mV2VlawpPUkRFUiBCWSBjIERFU0M7Cg==).
|
||||
You can also play with the data in Playground, [example](https://sql.clickhouse.com?query_id=M4FSVBVMSHY98NKCQP8N4K).
|
||||
|
||||
This performance test was created by Vadim Tkachenko. See:
|
||||
|
||||
|
@ -417,4 +417,4 @@ Result:
|
||||
|
||||
### Online Playground {#playground}
|
||||
|
||||
You can test other queries to this data set using the interactive resource [Online Playground](https://play.clickhouse.com/play?user=play). For example, [like this](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICBvcmlnaW4sCiAgICBjb3VudCgpLAogICAgcm91bmQoYXZnKGdlb0Rpc3RhbmNlKGxvbmdpdHVkZV8xLCBsYXRpdHVkZV8xLCBsb25naXR1ZGVfMiwgbGF0aXR1ZGVfMikpKSBBUyBkaXN0YW5jZSwKICAgIGJhcihkaXN0YW5jZSwgMCwgMTAwMDAwMDAsIDEwMCkgQVMgYmFyCkZST00gb3BlbnNreQpXSEVSRSBvcmlnaW4gIT0gJycKR1JPVVAgQlkgb3JpZ2luCk9SREVSIEJZIGNvdW50KCkgREVTQwpMSU1JVCAxMDA=). However, please note that you cannot create temporary tables here.
|
||||
You can test other queries to this data set using the interactive resource [Online Playground](https://sql.clickhouse.com). For example, [like this](https://sql.clickhouse.com?query_id=BIPDVQNIGVEZFQYFEFQB7O). However, please note that you cannot create temporary tables here.
|
||||
|
@ -335,4 +335,4 @@ Result:
|
||||
|
||||
### Online Playground
|
||||
|
||||
The dataset is also available in the [Online Playground](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICBhcnJheUpvaW4oTkVSKSBBUyBrLAogICAgY291bnQoKSBBUyBjCkZST00gcmVjaXBlcwpHUk9VUCBCWSBrCk9SREVSIEJZIGMgREVTQwpMSU1JVCA1MA==).
|
||||
The dataset is also available in the [Online Playground](https://sql.clickhouse.com?query_id=HQXNQZE26Z1QWYP9KC76ML).
|
||||
|
@ -447,4 +447,4 @@ With projection: 100 rows in set. Elapsed: 0.336 sec. Processed 17.32 thousand r
|
||||
|
||||
### Test it in the Playground {#playground}
|
||||
|
||||
The dataset is also available in the [Online Playground](https://play.clickhouse.com/play?user=play#U0VMRUNUIHRvd24sIGRpc3RyaWN0LCBjb3VudCgpIEFTIGMsIHJvdW5kKGF2ZyhwcmljZSkpIEFTIHByaWNlLCBiYXIocHJpY2UsIDAsIDUwMDAwMDAsIDEwMCkgRlJPTSB1a19wcmljZV9wYWlkIFdIRVJFIGRhdGUgPj0gJzIwMjAtMDEtMDEnIEdST1VQIEJZIHRvd24sIGRpc3RyaWN0IEhBVklORyBjID49IDEwMCBPUkRFUiBCWSBwcmljZSBERVNDIExJTUlUIDEwMA==).
|
||||
The dataset is also available in the [Online Playground](https://sql.clickhouse.com?query_id=TRCWH5ZETY4SEEK8ISCCAX).
|
||||
|
@ -8,7 +8,7 @@ slug: /en/getting-started/playground
|
||||
|
||||
# ClickHouse Playground
|
||||
|
||||
[ClickHouse Playground](https://play.clickhouse.com/play?user=play) allows people to experiment with ClickHouse by running queries instantly, without setting up their server or cluster.
|
||||
[ClickHouse Playground](https://sql.clickhouse.com) allows people to experiment with ClickHouse by running queries instantly, without setting up their server or cluster.
|
||||
Several example datasets are available in Playground.
|
||||
|
||||
You can make queries to Playground using any HTTP client, for example [curl](https://curl.haxx.se) or [wget](https://www.gnu.org/software/wget/), or set up a connection using [JDBC](../interfaces/jdbc.md) or [ODBC](../interfaces/odbc.md) drivers. More information about software products that support ClickHouse is available [here](../integrations/index.mdx).
|
||||
|
@ -33,7 +33,7 @@ The tags or attributes are saved as two parallel arrays, containing the keys and
|
||||
|
||||
## Log-query-settings
|
||||
|
||||
ClickHouse allows you to log changes to query settings during query execution. When enabled, any modifications made to query settings will be recorded in the OpenTelemetry span log. This feature is particularly useful in production environments for tracking configuration changes that may affect query performance.
|
||||
Setting [log_query_settings](settings/settings.md) allows log changes to query settings during query execution. When enabled, any modifications made to query settings will be recorded in the OpenTelemetry span log. This feature is particularly useful in production environments for tracking configuration changes that may affect query performance.
|
||||
|
||||
## Integration with monitoring systems
|
||||
|
||||
|
@ -1488,6 +1488,8 @@ Keys:
|
||||
- `formatting` – Log format for console output. Currently, only `json` is supported).
|
||||
- `use_syslog` - Also forward log output to syslog.
|
||||
- `syslog_level` - Log level for logging to syslog.
|
||||
- `message_regexp` - Only log messages that match this regular expression. Defaults to `""`, indicating no filtering.
|
||||
- `message_regexp_negative` - Only log messages that don't match this regular expression. Defaults to `""`, indicating no filtering.
|
||||
|
||||
**Log format specifiers**
|
||||
|
||||
@ -1576,6 +1578,28 @@ The log level of individual log names can be overridden. For example, to mute al
|
||||
</logger>
|
||||
```
|
||||
|
||||
**Regular Expression Filtering**
|
||||
|
||||
The messages logged can be filtered using regular expressions using `message_regexp` and `message_regexp_negative`. This can be done on a per-level basis or globally. If both a global and logger-specific pattern is specified, the global pattern is overridden (ignored) and only the logger-specific pattern applies. The positive and negative patterns are considered independently for this situation. Note: Using this feature may cause a slight slowdown in performance.
|
||||
|
||||
|
||||
```xml
|
||||
<logger>
|
||||
<level>trace</level>
|
||||
<!-- Global: Don't log Trace messages -->
|
||||
<message_regexp_negative>.*Trace.*</message_regexp_negative>
|
||||
|
||||
<message_regexps>
|
||||
<logger>
|
||||
<!-- For the executeQuery logger, only log if message has "Read", but not "from" -->
|
||||
<name>executeQuery</name>
|
||||
<message_regexp>.*Read.*</message_regexp>
|
||||
<message_regexp_negative>.*from.*</message_regexp_negative>
|
||||
</logger>
|
||||
</message_regexps>
|
||||
</logger>
|
||||
```
|
||||
|
||||
### syslog
|
||||
|
||||
To write log messages additionally to syslog:
|
||||
|
@ -1079,6 +1079,8 @@ Possible values:
|
||||
|
||||
Default value: 0 bytes.
|
||||
|
||||
Note that if both `min_free_disk_bytes_to_perform_insert` and `min_free_disk_ratio_to_perform_insert` are specified, ClickHouse will count on the value that will allow to perform inserts on a bigger amount of free memory.
|
||||
|
||||
## min_free_disk_ratio_to_perform_insert
|
||||
|
||||
The minimum free to total disk space ratio to perform an `INSERT`. Must be a floating point value between 0 and 1. Note that this setting:
|
||||
|
20
docs/en/operations/system-tables/azure_queue_settings.md
Normal file
20
docs/en/operations/system-tables/azure_queue_settings.md
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
slug: /en/operations/system-tables/azure_queue_settings
|
||||
---
|
||||
# azure_queue_settings
|
||||
|
||||
Contains information about settings of [AzureQueue](../../engines/table-engines/integrations/azure-queue.md) tables.
|
||||
Available from `24.10` server version.
|
||||
|
||||
Columns:
|
||||
|
||||
- `database` ([String](../../sql-reference/data-types/string.md)) — Table name.
|
||||
- `table` ([String](../../sql-reference/data-types/string.md)) — Database name.
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — Setting name.
|
||||
- `value` ([String](../../sql-reference/data-types/string.md)) — Setting value.
|
||||
- `changed` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Whether the setting was explicitly defined in the config or explicitly changed.
|
||||
- `description` ([String](../../sql-reference/data-types/string.md)) — Setting description.
|
||||
- `alterable` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Shows whether the setting can be changes via `ALTER TABLE ... MODIFY SETTING`.
|
||||
- `0` — Current user can alter the setting.
|
||||
- `1` — Current user can’t alter the setting.
|
||||
- `type` ([String](../../sql-reference/data-types/string.md)) — Setting type (implementation specific string value).
|
@ -13,10 +13,12 @@ The `system.part_log` table contains the following columns:
|
||||
- `query_id` ([String](../../sql-reference/data-types/string.md)) — Identifier of the `INSERT` query that created this data part.
|
||||
- `event_type` ([Enum8](../../sql-reference/data-types/enum.md)) — Type of the event that occurred with the data part. Can have one of the following values:
|
||||
- `NewPart` — Inserting of a new data part.
|
||||
- `MergeParts` — Merging of data parts.
|
||||
- `MergePartsStart` — Merging of data parts has started.
|
||||
- `MergeParts` — Merging of data parts has finished.
|
||||
- `DownloadPart` — Downloading a data part.
|
||||
- `RemovePart` — Removing or detaching a data part using [DETACH PARTITION](../../sql-reference/statements/alter/partition.md#alter_detach-partition).
|
||||
- `MutatePart` — Mutating of a data part.
|
||||
- `MutatePartStart` — Mutating of a data part has started.
|
||||
- `MutatePart` — Mutating of a data part has finished.
|
||||
- `MovePart` — Moving the data part from the one disk to another one.
|
||||
- `merge_reason` ([Enum8](../../sql-reference/data-types/enum.md)) — The reason for the event with type `MERGE_PARTS`. Can have one of the following values:
|
||||
- `NotAMerge` — The current event has the type other than `MERGE_PARTS`.
|
||||
|
20
docs/en/operations/system-tables/s3_queue_settings.md
Normal file
20
docs/en/operations/system-tables/s3_queue_settings.md
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
slug: /en/operations/system-tables/s3_queue_settings
|
||||
---
|
||||
# s3_queue_settings
|
||||
|
||||
Contains information about settings of [S3Queue](../../engines/table-engines/integrations/s3queue.md) tables.
|
||||
Available from `24.10` server version.
|
||||
|
||||
Columns:
|
||||
|
||||
- `database` ([String](../../sql-reference/data-types/string.md)) — Table name.
|
||||
- `table` ([String](../../sql-reference/data-types/string.md)) — Database name.
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — Setting name.
|
||||
- `value` ([String](../../sql-reference/data-types/string.md)) — Setting value.
|
||||
- `changed` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Whether the setting was explicitly defined in the config or explicitly changed.
|
||||
- `description` ([String](../../sql-reference/data-types/string.md)) — Setting description.
|
||||
- `alterable` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Shows whether the setting can be changes via `ALTER TABLE ... MODIFY SETTING`.
|
||||
- `0` — Current user can alter the setting.
|
||||
- `1` — Current user can’t alter the setting.
|
||||
- `type` ([String](../../sql-reference/data-types/string.md)) — Setting type (implementation specific string value).
|
@ -427,19 +427,6 @@ High compression levels are useful for asymmetric scenarios, like compress once,
|
||||
ZSTD_QAT is not available in ClickHouse Cloud.
|
||||
:::
|
||||
|
||||
#### DEFLATE_QPL
|
||||
|
||||
`DEFLATE_QPL` — [Deflate compression algorithm](https://github.com/intel/qpl) implemented by Intel® Query Processing Library. Some limitations apply:
|
||||
|
||||
- DEFLATE_QPL is disabled by default and can only be used after enabling configuration setting [enable_deflate_qpl_codec](../../../operations/settings/settings.md#enable_deflate_qpl_codec).
|
||||
- DEFLATE_QPL requires a ClickHouse build compiled with SSE 4.2 instructions (by default, this is the case). Refer to [Build Clickhouse with DEFLATE_QPL](/docs/en/development/building_and_benchmarking_deflate_qpl.md/#Build-Clickhouse-with-DEFLATE_QPL) for more details.
|
||||
- DEFLATE_QPL works best if the system has a Intel® IAA (In-Memory Analytics Accelerator) offloading device. Refer to [Accelerator Configuration](https://intel.github.io/qpl/documentation/get_started_docs/installation.html#accelerator-configuration) and [Benchmark with DEFLATE_QPL](/docs/en/development/building_and_benchmarking_deflate_qpl.md/#Run-Benchmark-with-DEFLATE_QPL) for more details.
|
||||
- DEFLATE_QPL-compressed data can only be transferred between ClickHouse nodes compiled with SSE 4.2 enabled.
|
||||
|
||||
:::note
|
||||
DEFLATE_QPL is not available in ClickHouse Cloud.
|
||||
:::
|
||||
|
||||
### Specialized Codecs
|
||||
|
||||
These codecs are designed to make compression more effective by exploiting specific features of the data. Some of these codecs do not compress data themselves, they instead preprocess the data such that a second compression stage using a general-purpose codec can achieve a higher data compression rate.
|
||||
|
@ -93,7 +93,7 @@ sidebar_label: "Используемые сторонние библиотеки
|
||||
SELECT library_name, license_type, license_path FROM system.licenses ORDER BY library_name COLLATE 'en';
|
||||
```
|
||||
|
||||
[Пример](https://play.clickhouse.com/play?user=play#U0VMRUNUIGxpYnJhcnlfbmFtZSwgbGljZW5zZV90eXBlLCBsaWNlbnNlX3BhdGggRlJPTSBzeXN0ZW0ubGljZW5zZXMgT1JERVIgQlkgbGlicmFyeV9uYW1lIENPTExBVEUgJ2VuJw==)
|
||||
[Пример](https://sql.clickhouse.com?query_id=478GCPU7LRTSZJBNY3EJT3)
|
||||
|
||||
## Рекомендации по добавлению сторонних библиотек и поддержанию в них пользовательских изменений {#adding-third-party-libraries}
|
||||
|
||||
|
@ -412,4 +412,4 @@ ORDER BY yr,
|
||||
mo;
|
||||
```
|
||||
|
||||
Данные также доступны для работы с интерактивными запросами через [Playground](https://play.clickhouse.com/play?user=play), [пример](https://play.clickhouse.com/play?user=play#U0VMRUNUIG1hY2hpbmVfbmFtZSwKICAgICAgIE1JTihjcHUpIEFTIGNwdV9taW4sCiAgICAgICBNQVgoY3B1KSBBUyBjcHVfbWF4LAogICAgICAgQVZHKGNwdSkgQVMgY3B1X2F2ZywKICAgICAgIE1JTihuZXRfaW4pIEFTIG5ldF9pbl9taW4sCiAgICAgICBNQVgobmV0X2luKSBBUyBuZXRfaW5fbWF4LAogICAgICAgQVZHKG5ldF9pbikgQVMgbmV0X2luX2F2ZywKICAgICAgIE1JTihuZXRfb3V0KSBBUyBuZXRfb3V0X21pbiwKICAgICAgIE1BWChuZXRfb3V0KSBBUyBuZXRfb3V0X21heCwKICAgICAgIEFWRyhuZXRfb3V0KSBBUyBuZXRfb3V0X2F2ZwpGUk9NICgKICBTRUxFQ1QgbWFjaGluZV9uYW1lLAogICAgICAgICBDT0FMRVNDRShjcHVfdXNlciwgMC4wKSBBUyBjcHUsCiAgICAgICAgIENPQUxFU0NFKGJ5dGVzX2luLCAwLjApIEFTIG5ldF9pbiwKICAgICAgICAgQ09BTEVTQ0UoYnl0ZXNfb3V0LCAwLjApIEFTIG5ldF9vdXQKICBGUk9NIG1nYmVuY2gubG9nczEKICBXSEVSRSBtYWNoaW5lX25hbWUgSU4gKCdhbmFuc2knLCdhcmFnb2cnLCd1cmQnKQogICAgQU5EIGxvZ190aW1lID49IFRJTUVTVEFNUCAnMjAxNy0wMS0xMSAwMDowMDowMCcKKSBBUyByCkdST1VQIEJZIG1hY2hpbmVfbmFtZQ==).
|
||||
Данные также доступны для работы с интерактивными запросами через [Playground](https://sql.clickhouse.com), [пример](https://sql.clickhouse.com?query_id=1MXMHASDLEQIP4P1D1STND).
|
||||
|
@ -126,4 +126,4 @@ SELECT count() FROM cell_towers WHERE pointInPolygon((lon, lat), (SELECT * FROM
|
||||
1 rows in set. Elapsed: 0.067 sec. Processed 43.28 million rows, 692.42 MB (645.83 million rows/s., 10.33 GB/s.)
|
||||
```
|
||||
|
||||
Вы можете протестировать другие запросы с помощью интерактивного ресурса [Playground](https://play.clickhouse.com/play?user=play). Например, [вот так](https://play.clickhouse.com/play?user=play#U0VMRUNUIG1jYywgY291bnQoKSBGUk9NIGNlbGxfdG93ZXJzIEdST1VQIEJZIG1jYyBPUkRFUiBCWSBjb3VudCgpIERFU0M=). Однако, обратите внимание, что здесь нельзя создавать временные таблицы.
|
||||
Вы можете протестировать другие запросы с помощью интерактивного ресурса [Playground](https://sql.clickhouse.com). Например, [вот так](https://sql.clickhouse.com?query_id=UV8M4MAGS2PWAUOAYAAARM). Однако, обратите внимание, что здесь нельзя создавать временные таблицы.
|
||||
|
@ -338,4 +338,4 @@ WHERE title = 'Chocolate-Strawberry-Orange Wedding Cake';
|
||||
|
||||
### Online Playground
|
||||
|
||||
Этот набор данных доступен в [Online Playground](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICBhcnJheUpvaW4oTkVSKSBBUyBrLAogICAgY291bnQoKSBBUyBjCkZST00gcmVjaXBlcwpHUk9VUCBCWSBrCk9SREVSIEJZIGMgREVTQwpMSU1JVCA1MA==).
|
||||
Этот набор данных доступен в [Online Playground](https://sql.clickhouse.com?query_id=HQXNQZE26Z1QWYP9KC76ML).
|
||||
|
@ -6,7 +6,7 @@ sidebar_label: Playground
|
||||
|
||||
# ClickHouse Playground {#clickhouse-playground}
|
||||
|
||||
[ClickHouse Playground](https://play.clickhouse.com/play?user=play) позволяет пользователям экспериментировать с ClickHouse, выполняя запросы мгновенно, без необходимости настройки сервера или кластера.
|
||||
[ClickHouse Playground](https://sql.clickhouse.com) позволяет пользователям экспериментировать с ClickHouse, выполняя запросы мгновенно, без необходимости настройки сервера или кластера.
|
||||
В Playground доступны несколько примеров наборов данных.
|
||||
|
||||
Вы можете выполнять запросы к Playground, используя любой HTTP-клиент, например [curl](https://curl.haxx.se) или [wget](https://www.gnu.org/software/wget/), или настроить соединение, используя драйверы [JDBC](../interfaces/jdbc.md) или [ODBC](../interfaces/odbc.md). Дополнительную информацию о программных продуктах, поддерживающих ClickHouse, можно найти [здесь](../interfaces/index.md).
|
||||
|
@ -457,4 +457,4 @@ ORDER BY yr,
|
||||
mo;
|
||||
```
|
||||
|
||||
此数据集可在 [Playground](https://play.clickhouse.com/play?user=play) 中进行交互式的请求, [example](https://play.clickhouse.com/play?user=play#U0VMRUNUIG1hY2hpbmVfbmFtZSwKICAgICAgIE1JTihjcHUpIEFTIGNwdV9taW4sCiAgICAgICBNQVgoY3B1KSBBUyBjcHVfbWF4LAogICAgICAgQVZHKGNwdSkgQVMgY3B1X2F2ZywKICAgICAgIE1JTihuZXRfaW4pIEFTIG5ldF9pbl9taW4sCiAgICAgICBNQVgobmV0X2luKSBBUyBuZXRfaW5fbWF4LAogICAgICAgQVZHKG5ldF9pbikgQVMgbmV0X2luX2F2ZywKICAgICAgIE1JTihuZXRfb3V0KSBBUyBuZXRfb3V0X21pbiwKICAgICAgIE1BWChuZXRfb3V0KSBBUyBuZXRfb3V0X21heCwKICAgICAgIEFWRyhuZXRfb3V0KSBBUyBuZXRfb3V0X2F2ZwpGUk9NICgKICBTRUxFQ1QgbWFjaGluZV9uYW1lLAogICAgICAgICBDT0FMRVNDRShjcHVfdXNlciwgMC4wKSBBUyBjcHUsCiAgICAgICAgIENPQUxFU0NFKGJ5dGVzX2luLCAwLjApIEFTIG5ldF9pbiwKICAgICAgICAgQ09BTEVTQ0UoYnl0ZXNfb3V0LCAwLjApIEFTIG5ldF9vdXQKICBGUk9NIG1nYmVuY2gubG9nczEKICBXSEVSRSBtYWNoaW5lX25hbWUgSU4gKCdhbmFuc2knLCdhcmFnb2cnLCd1cmQnKQogICAgQU5EIGxvZ190aW1lID49IFRJTUVTVEFNUCAnMjAxNy0wMS0xMSAwMDowMDowMCcKKSBBUyByCkdST1VQIEJZIG1hY2hpbmVfbmFtZQ==).
|
||||
此数据集可在 [Playground](https://sql.clickhouse.com) 中进行交互式的请求, [example](https://sql.clickhouse.com?query_id=1MXMHASDLEQIP4P1D1STND).
|
||||
|
@ -228,5 +228,5 @@ WHERE pointInPolygon((lon, lat), (SELECT * FROM moscow))
|
||||
1 rows in set. Elapsed: 0.067 sec. Processed 43.28 million rows, 692.42 MB (645.83 million rows/s., 10.33 GB/s.)
|
||||
```
|
||||
|
||||
虽然不能创建临时表,但此数据集仍可在 [Playground](https://play.clickhouse.com/play?user=play) 中进行交互式的请求, [example](https://play.clickhouse.com/play?user=play#U0VMRUNUIG1jYywgY291bnQoKSBGUk9NIGNlbGxfdG93ZXJzIEdST1VQIEJZIG1jYyBPUkRFUiBCWSBjb3VudCgpIERFU0M=).
|
||||
虽然不能创建临时表,但此数据集仍可在 [Playground](https://sql.clickhouse.com) 中进行交互式的请求, [example](https://sql.clickhouse.com?query_id=UV8M4MAGS2PWAUOAYAAARM).
|
||||
|
||||
|
@ -349,4 +349,4 @@ ORDER BY d ASC;
|
||||
|
||||
## 在线 Playground{#playground}
|
||||
|
||||
此数据集已经上传到了 ClickHouse Playground 中,[example](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICByb3VuZCh0b1VJbnQzMk9yWmVybyhleHRyYWN0KG1lbnVfZGF0ZSwgJ15cXGR7NH0nKSksIC0xKSBBUyBkLAogICAgY291bnQoKSwKICAgIHJvdW5kKGF2ZyhwcmljZSksIDIpLAogICAgYmFyKGF2ZyhwcmljZSksIDAsIDUwLCAxMDApLAogICAgYW55KGRpc2hfbmFtZSkKRlJPTSBtZW51X2l0ZW1fZGVub3JtCldIRVJFIChtZW51X2N1cnJlbmN5IElOICgnRG9sbGFycycsICcnKSkgQU5EIChkID4gMCkgQU5EIChkIDwgMjAyMikgQU5EIChkaXNoX25hbWUgSUxJS0UgJyVjYXZpYXIlJykKR1JPVVAgQlkgZApPUkRFUiBCWSBkIEFTQw==)。
|
||||
此数据集已经上传到了 ClickHouse Playground 中,[example](https://sql.clickhouse.com?query_id=KB5KQJJFNBKHE5GBUJCP1B)。
|
||||
|
@ -413,4 +413,4 @@ ORDER BY k ASC;
|
||||
|
||||
### 在线 Playground {#playground}
|
||||
|
||||
你可以使用交互式资源 [Online Playground](https://play.clickhouse.com/play?user=play) 来尝试对此数据集的其他查询。 例如, [执行这个查询](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICBvcmlnaW4sCiAgICBjb3VudCgpLAogICAgcm91bmQoYXZnKGdlb0Rpc3RhbmNlKGxvbmdpdHVkZV8xLCBsYXRpdHVkZV8xLCBsb25naXR1ZGVfMiwgbGF0aXR1ZGVfMikpKSBBUyBkaXN0YW5jZSwKICAgIGJhcihkaXN0YW5jZSwgMCwgMTAwMDAwMDAsIDEwMCkgQVMgYmFyCkZST00gb3BlbnNreQpXSEVSRSBvcmlnaW4gIT0gJycKR1JPVVAgQlkgb3JpZ2luCk9SREVSIEJZIGNvdW50KCkgREVTQwpMSU1JVCAxMDA=). 但是,请注意无法在 Playground 中创建临时表。
|
||||
你可以使用交互式资源 [Online Playground](https://sql.clickhouse.com) 来尝试对此数据集的其他查询。 例如, [执行这个查询](https://sql.clickhouse.com?query_id=BIPDVQNIGVEZFQYFEFQB7O). 但是,请注意无法在 Playground 中创建临时表。
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user