mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 16:42:05 +00:00
Merge branch 'master' into fix_integration_tests_parser_bug
This commit is contained in:
commit
83ba5cfddd
2
.github/workflows/debug.yml
vendored
2
.github/workflows/debug.yml
vendored
@ -2,7 +2,7 @@
|
||||
name: Debug
|
||||
|
||||
'on':
|
||||
[push, pull_request, release]
|
||||
[push, pull_request, release, workflow_dispatch]
|
||||
|
||||
jobs:
|
||||
DebugInfo:
|
||||
|
18
debian/.gitignore
vendored
18
debian/.gitignore
vendored
@ -1,18 +0,0 @@
|
||||
control
|
||||
copyright
|
||||
tmp/
|
||||
clickhouse-benchmark/
|
||||
clickhouse-client.docs
|
||||
clickhouse-client/
|
||||
clickhouse-common-static-dbg/
|
||||
clickhouse-common-static.docs
|
||||
clickhouse-common-static/
|
||||
clickhouse-server-base/
|
||||
clickhouse-server-common/
|
||||
clickhouse-server/
|
||||
debhelper-build-stamp
|
||||
files
|
||||
*.debhelper.log
|
||||
*.debhelper
|
||||
*.substvars
|
||||
|
223
debian/.pbuilderrc
vendored
223
debian/.pbuilderrc
vendored
@ -1,223 +0,0 @@
|
||||
#
|
||||
# sudo apt install pbuilder fakeroot debhelper debian-archive-keyring debian-keyring
|
||||
#
|
||||
# ubuntu:
|
||||
# prepare old (trusty or earlier) host system:
|
||||
|
||||
# sudo ln -s gutsy /usr/share/debootstrap/scripts/eoan
|
||||
# sudo ln -s gutsy /usr/share/debootstrap/scripts/disco
|
||||
# sudo ln -s gutsy /usr/share/debootstrap/scripts/cosmic
|
||||
# sudo ln -s gutsy /usr/share/debootstrap/scripts/artful
|
||||
# sudo ln -s gutsy /usr/share/debootstrap/scripts/bionic
|
||||
# sudo ln -s sid /usr/share/debootstrap/scripts/buster
|
||||
# build ubuntu:
|
||||
# sudo DIST=bionic pbuilder create --configfile debian/.pbuilderrc && DIST=bionic pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=cosmic pbuilder create --configfile debian/.pbuilderrc && DIST=cosmic pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=disco pbuilder create --configfile debian/.pbuilderrc && DIST=disco pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=eoan pbuilder create --configfile debian/.pbuilderrc && DIST=eoan pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=devel pbuilder create --configfile debian/.pbuilderrc && DIST=devel pdebuild --configfile debian/.pbuilderrc
|
||||
# build debian:
|
||||
# sudo DIST=stable pbuilder create --configfile debian/.pbuilderrc && DIST=stable pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=testing pbuilder create --configfile debian/.pbuilderrc && DIST=testing pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=unstable pbuilder create --configfile debian/.pbuilderrc && DIST=unstable pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=experimental pbuilder create --configfile debian/.pbuilderrc && DIST=experimental pdebuild --configfile debian/.pbuilderrc
|
||||
# build i386 experimental:
|
||||
# sudo DIST=trusty ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=trusty ARCH=i386 pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=xenial ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=xenial ARCH=i386 pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=zesty ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=zesty ARCH=i386 pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=artful ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=artful ARCH=i386 pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=bionic ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=bionic ARCH=i386 pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=stable ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=stable ARCH=i386 pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=testing ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=testing ARCH=i386 pdebuild --configfile debian/.pbuilderrc
|
||||
# sudo DIST=experimental ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=experimental ARCH=i386 pdebuild --configfile debian/.pbuilderrc
|
||||
# test gcc-9
|
||||
# env DEB_CC=gcc-9 DEB_CXX=g++-9 EXTRAPACKAGES="g++-9 gcc-9" DIST=disco pdebuild --configfile debian/.pbuilderrc
|
||||
# use only clang:
|
||||
# env DEB_CC=clang-8 DEB_CXX=clang++-8 EXTRAPACKAGES=clang-8 DIST=disco pdebuild --configfile debian/.pbuilderrc
|
||||
# env DEB_CC=clang-5.0 DEB_CXX=clang++-5.0 EXTRAPACKAGES=clang-5.0 DIST=artful pdebuild --configfile debian/.pbuilderrc
|
||||
# clang+asan:
|
||||
# env DEB_CC=clang-5.0 DEB_CXX=clang++-5.0 EXTRAPACKAGES="clang-5.0 libc++abi-dev libc++-dev" CMAKE_FLAGS="-DENABLE_TCMALLOC=0 -DENABLE_UNWIND=0 -DCMAKE_BUILD_TYPE=Asan" DIST=artful pdebuild --configfile debian/.pbuilderrc
|
||||
# clang+tsan:
|
||||
# env DEB_CC=clang-5.0 DEB_CXX=clang++-5.0 EXTRAPACKAGES="clang-5.0 libc++abi-dev libc++-dev" CMAKE_FLAGS="-DCMAKE_BUILD_TYPE=Tsan" DIST=artful pdebuild --configfile debian/.pbuilderrc
|
||||
# without sse for old systems and some VM:
|
||||
# env DH_VERBOSE=1 CMAKE_FLAGS="-DHAVE_SSE41=0 -DHAVE_SSE42=0 -DHAVE_POPCNT=0 -DHAVE_SSE2_INTRIN=0 -DSSE2FLAG=' ' -DHAVE_SSE42_INTRIN=0 -DSSE4FLAG=' ' -DHAVE_PCLMULQDQ_INTRIN=0 -DPCLMULFLAG=' '" DIST=artful pdebuild --configfile debian/.pbuilderrc
|
||||
|
||||
# Note: on trusty host creating some future dists can fail (debootstrap error).
|
||||
|
||||
# Your packages built here: /var/cache/pbuilder/*-*/result
|
||||
|
||||
# from https://wiki.debian.org/PbuilderTricks :
|
||||
|
||||
# Codenames for Debian suites according to their alias. Update these when
|
||||
# needed.
|
||||
UNSTABLE_CODENAME="sid"
|
||||
TESTING_CODENAME="buster"
|
||||
STABLE_CODENAME="stretch"
|
||||
STABLE_BACKPORTS_SUITE="$STABLE_CODENAME-backports"
|
||||
|
||||
# List of Debian suites.
|
||||
DEBIAN_SUITES=($UNSTABLE_CODENAME $TESTING_CODENAME $STABLE_CODENAME $STABLE_BACKPORTS_SUITE
|
||||
"experimental" "unstable" "testing" "stable")
|
||||
|
||||
# List of Ubuntu suites. Update these when needed.
|
||||
UBUNTU_SUITES=("eoan" "disco" "cosmic" "bionic" "artful" "zesty" "xenial" "trusty" "devel")
|
||||
|
||||
# Set a default distribution if none is used. Note that you can set your own default (i.e. ${DIST:="unstable"}).
|
||||
HOST_DIST=`lsb_release --short --codename`
|
||||
: ${DIST:="$HOST_DIST"}
|
||||
|
||||
# Optionally change Debian codenames in $DIST to their aliases.
|
||||
case "$DIST" in
|
||||
$UNSTABLE_CODENAME)
|
||||
DIST="unstable"
|
||||
;;
|
||||
$TESTING_CODENAME)
|
||||
DIST="testing"
|
||||
;;
|
||||
$STABLE_CODENAME)
|
||||
DIST="stable"
|
||||
;;
|
||||
esac
|
||||
|
||||
# Optionally set the architecture to the host architecture if none set. Note
|
||||
# that you can set your own default (i.e. ${ARCH:="i386"}).
|
||||
: ${ARCH:="$(dpkg --print-architecture)"}
|
||||
|
||||
NAME="$DIST"
|
||||
if [ -n "${ARCH}" ]; then
|
||||
NAME="$NAME-$ARCH"
|
||||
DEBOOTSTRAPOPTS=("--arch" "$ARCH" "${DEBOOTSTRAPOPTS[@]}")
|
||||
fi
|
||||
|
||||
BASETGZ=${SET_BASETGZ}
|
||||
BASETGZ=${BASETGZ:="/var/cache/pbuilder/$NAME-base.tgz"}
|
||||
DISTRIBUTION="$DIST"
|
||||
BUILDRESULT=${SET_BUILDRESULT}
|
||||
BUILDRESULT=${BUILDRESULT:="/var/cache/pbuilder/$NAME/result/"}
|
||||
APTCACHE="/var/cache/pbuilder/$NAME/aptcache/"
|
||||
BUILDPLACE="/var/cache/pbuilder/build/"
|
||||
ALLOWUNTRUSTED=${SET_ALLOWUNTRUSTED:=${ALLOWUNTRUSTED}}
|
||||
|
||||
#DEBOOTSTRAPOPTS=( '--variant=buildd' $SET_DEBOOTSTRAPOPTS )
|
||||
|
||||
|
||||
if $(echo ${DEBIAN_SUITES[@]} | grep -q $DIST); then
|
||||
# Debian configuration
|
||||
OSNAME=debian
|
||||
MIRRORSITE=${SET_MIRRORSITE="http://deb.debian.org/$OSNAME/"}
|
||||
COMPONENTS="main contrib non-free"
|
||||
if $(echo "$STABLE_CODENAME stable" | grep -q $DIST); then
|
||||
OTHERMIRROR="$OTHERMIRROR | deb $MIRRORSITE $STABLE_BACKPORTS_SUITE $COMPONENTS"
|
||||
fi
|
||||
# APTKEYRINGS=/usr/share/keyrings/debian-archive-keyring.gpg
|
||||
|
||||
case "$HOST_DIST" in
|
||||
"trusty" )
|
||||
DEBOOTSTRAPOPTS+=( '--no-check-gpg' )
|
||||
;;
|
||||
*)
|
||||
DEBOOTSTRAPOPTS+=( '--keyring' '/usr/share/keyrings/debian-archive-keyring.gpg' )
|
||||
# DEBOOTSTRAPOPTS+=( '--keyring' '/usr/share/keyrings/debian-keyring.gpg' )
|
||||
esac
|
||||
elif $(echo ${UBUNTU_SUITES[@]} | grep -q $DIST); then
|
||||
# Ubuntu configuration
|
||||
OSNAME=ubuntu
|
||||
|
||||
if [[ "$ARCH" == "amd64" || "$ARCH" == "i386" ]]; then
|
||||
MIRRORSITE=${SET_MIRRORSITE="http://archive.ubuntu.com/$OSNAME/"}
|
||||
else
|
||||
MIRRORSITE=${SET_MIRRORSITE="http://ports.ubuntu.com/ubuntu-ports/"}
|
||||
fi
|
||||
|
||||
COMPONENTS="main restricted universe multiverse"
|
||||
|
||||
OTHERMIRROR="$OTHERMIRROR | deb $MIRRORSITE $DIST-updates main restricted universe multiverse"
|
||||
OTHERMIRROR="$OTHERMIRROR | deb $MIRRORSITE $DIST-security main restricted universe multiverse"
|
||||
OTHERMIRROR="$OTHERMIRROR | deb $MIRRORSITE $DIST-proposed main restricted universe multiverse"
|
||||
|
||||
case "$DIST" in
|
||||
"trusty" | "xenial" )
|
||||
OTHERMIRROR="$OTHERMIRROR | deb http://ppa.launchpad.net/ubuntu-toolchain-r/test/$OSNAME $DIST main"
|
||||
ALLOWUNTRUSTED=yes
|
||||
;;
|
||||
esac
|
||||
|
||||
# deb http://apt.llvm.org/zesty/ llvm-toolchain-zesty-5.0 main
|
||||
else
|
||||
echo "Unknown distribution: $DIST"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "using $NAME $OSNAME $DIST $ARCH $LOGNAME $MIRRORSITE"
|
||||
|
||||
case "$DIST" in
|
||||
"trusty")
|
||||
# ccache broken
|
||||
;;
|
||||
*)
|
||||
CCACHEDIR=${SET_CCACHEDIR:="/var/cache/pbuilder/ccache"}
|
||||
;;
|
||||
esac
|
||||
|
||||
# old systems with default gcc <= 6
|
||||
case "$DIST" in
|
||||
"trusty" | "xenial" | "stable" )
|
||||
export DEB_CC=gcc-7
|
||||
export DEB_CXX=g++-7
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ "$ARCH" != arm64 ]; then
|
||||
case "$DIST" in
|
||||
# TODO: fix llvm-8 and use for "disco" and "eoan"
|
||||
"experimental")
|
||||
EXTRAPACKAGES+=" liblld-8-dev libclang-8-dev llvm-8-dev liblld-8 "
|
||||
export CMAKE_FLAGS="-DLLVM_VERSION=8 $CMAKE_FLAGS"
|
||||
;;
|
||||
"eoan" | "disco" | "cosmic" | "testing" | "unstable")
|
||||
EXTRAPACKAGES+=" liblld-7-dev libclang-7-dev llvm-7-dev liblld-7 "
|
||||
export CMAKE_FLAGS="-DLLVM_VERSION=7 $CMAKE_FLAGS"
|
||||
;;
|
||||
"bionic")
|
||||
EXTRAPACKAGES+=" liblld-6.0-dev libclang-6.0-dev liblld-6.0 "
|
||||
export CMAKE_FLAGS="-DLLVM_VERSION=6 $CMAKE_FLAGS"
|
||||
;;
|
||||
"artful" )
|
||||
EXTRAPACKAGES+=" liblld-5.0-dev libclang-5.0-dev liblld-5.0 "
|
||||
;;
|
||||
esac
|
||||
else
|
||||
export CMAKE_FLAGS="-DENABLE_EMBEDDED_COMPILER=0 $CMAKE_FLAGS"
|
||||
fi
|
||||
|
||||
# Will test symbols
|
||||
#EXTRAPACKAGES+=" gdb "
|
||||
|
||||
# For killall in pbuilder-hooks:
|
||||
EXTRAPACKAGES+=" psmisc "
|
||||
|
||||
[[ $CCACHE_PREFIX == 'distcc' ]] && EXTRAPACKAGES+=" $CCACHE_PREFIX " && USENETWORK=yes && export DISTCC_DIR=/var/cache/pbuilder/distcc
|
||||
|
||||
[[ $ARCH == 'i386' ]] && EXTRAPACKAGES+=" libssl-dev "
|
||||
|
||||
export DEB_BUILD_OPTIONS=parallel=`nproc`
|
||||
|
||||
# Floating bug with permissions:
|
||||
[ -n "$CCACHEDIR" ] && sudo mkdir -p $CCACHEDIR
|
||||
[ -n "$CCACHEDIR" ] && sudo chmod -R a+rwx $CCACHEDIR || true
|
||||
# chown -R $BUILDUSERID:$BUILDUSERID $CCACHEDIR
|
||||
|
||||
|
||||
# Do not create source package inside pbuilder (-b)
|
||||
# Use current dir to make package (by default should have src archive)
|
||||
# echo "3.0 (native)" > debian/source/format
|
||||
# OR
|
||||
# pdebuild -b --debbuildopts "--source-option=--format=\"3.0 (native)\""
|
||||
# OR
|
||||
DEBBUILDOPTS="-b --source-option=--format=\"3.0 (native)\""
|
||||
|
||||
HOOKDIR="debian/pbuilder-hooks"
|
||||
|
||||
#echo "DEBOOTSTRAPOPTS=${DEBOOTSTRAPOPTS[@]}"
|
||||
#echo "ALLOWUNTRUSTED=${ALLOWUNTRUSTED} OTHERMIRROR=${OTHERMIRROR}"
|
||||
#echo "EXTRAPACKAGES=${EXTRAPACKAGES}"
|
5
debian/changelog
vendored
5
debian/changelog
vendored
@ -1,5 +0,0 @@
|
||||
clickhouse (22.1.1.1) unstable; urgency=low
|
||||
|
||||
* Modified source code
|
||||
|
||||
-- clickhouse-release <clickhouse-release@yandex-team.ru> Thu, 09 Dec 2021 00:32:58 +0300
|
5
debian/changelog.in
vendored
5
debian/changelog.in
vendored
@ -1,5 +0,0 @@
|
||||
clickhouse (@VERSION_STRING@) unstable; urgency=low
|
||||
|
||||
* Modified source code
|
||||
|
||||
-- @AUTHOR@ <@EMAIL@> @DATE@
|
7
debian/clickhouse-client.install
vendored
7
debian/clickhouse-client.install
vendored
@ -1,7 +0,0 @@
|
||||
usr/bin/clickhouse-client
|
||||
usr/bin/clickhouse-local
|
||||
usr/bin/clickhouse-compressor
|
||||
usr/bin/clickhouse-benchmark
|
||||
usr/bin/clickhouse-format
|
||||
usr/bin/clickhouse-obfuscator
|
||||
etc/clickhouse-client/config.xml
|
5
debian/clickhouse-common-static.install
vendored
5
debian/clickhouse-common-static.install
vendored
@ -1,5 +0,0 @@
|
||||
usr/bin/clickhouse
|
||||
usr/bin/clickhouse-odbc-bridge
|
||||
usr/bin/clickhouse-library-bridge
|
||||
usr/bin/clickhouse-extract-from-config
|
||||
usr/share/bash-completion/completions
|
1
debian/clickhouse-server.cron.d
vendored
1
debian/clickhouse-server.cron.d
vendored
@ -1 +0,0 @@
|
||||
#*/10 * * * * root ((which service > /dev/null 2>&1 && (service clickhouse-server condstart ||:)) || /etc/init.d/clickhouse-server condstart) > /dev/null 2>&1
|
4
debian/clickhouse-server.docs
vendored
4
debian/clickhouse-server.docs
vendored
@ -1,4 +0,0 @@
|
||||
LICENSE
|
||||
AUTHORS
|
||||
README.md
|
||||
CHANGELOG.md
|
227
debian/clickhouse-server.init
vendored
227
debian/clickhouse-server.init
vendored
@ -1,227 +0,0 @@
|
||||
#!/bin/sh
|
||||
### BEGIN INIT INFO
|
||||
# Provides: clickhouse-server
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Should-Start: $time $network
|
||||
# Should-Stop: $network
|
||||
# Short-Description: clickhouse-server daemon
|
||||
### END INIT INFO
|
||||
#
|
||||
# NOTES:
|
||||
# - Should-* -- script can start if the listed facilities are missing, unlike Required-*
|
||||
#
|
||||
# For the documentation [1]:
|
||||
#
|
||||
# [1]: https://wiki.debian.org/LSBInitScripts
|
||||
|
||||
CLICKHOUSE_USER=clickhouse
|
||||
CLICKHOUSE_GROUP=${CLICKHOUSE_USER}
|
||||
SHELL=/bin/bash
|
||||
PROGRAM=clickhouse-server
|
||||
CLICKHOUSE_GENERIC_PROGRAM=clickhouse
|
||||
CLICKHOUSE_PROGRAM_ENV=""
|
||||
EXTRACT_FROM_CONFIG=${CLICKHOUSE_GENERIC_PROGRAM}-extract-from-config
|
||||
CLICKHOUSE_CONFDIR=/etc/$PROGRAM
|
||||
CLICKHOUSE_LOGDIR=/var/log/clickhouse-server
|
||||
CLICKHOUSE_LOGDIR_USER=root
|
||||
CLICKHOUSE_DATADIR=/var/lib/clickhouse
|
||||
if [ -d "/var/lock" ]; then
|
||||
LOCALSTATEDIR=/var/lock
|
||||
else
|
||||
LOCALSTATEDIR=/run/lock
|
||||
fi
|
||||
|
||||
if [ ! -d "$LOCALSTATEDIR" ]; then
|
||||
mkdir -p "$LOCALSTATEDIR"
|
||||
fi
|
||||
|
||||
CLICKHOUSE_BINDIR=/usr/bin
|
||||
CLICKHOUSE_CRONFILE=/etc/cron.d/clickhouse-server
|
||||
CLICKHOUSE_CONFIG=$CLICKHOUSE_CONFDIR/config.xml
|
||||
LOCKFILE=$LOCALSTATEDIR/$PROGRAM
|
||||
CLICKHOUSE_PIDDIR=/var/run/$PROGRAM
|
||||
CLICKHOUSE_PIDFILE="$CLICKHOUSE_PIDDIR/$PROGRAM.pid"
|
||||
# CLICKHOUSE_STOP_TIMEOUT=60 # Disabled by default. Place to /etc/default/clickhouse if you need.
|
||||
|
||||
# Some systems lack "flock"
|
||||
command -v flock >/dev/null && FLOCK=flock
|
||||
|
||||
# Override defaults from optional config file
|
||||
test -f /etc/default/clickhouse && . /etc/default/clickhouse
|
||||
|
||||
|
||||
die()
|
||||
{
|
||||
echo $1 >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
|
||||
# Check that configuration file is Ok.
|
||||
check_config()
|
||||
{
|
||||
if [ -x "$CLICKHOUSE_BINDIR/$EXTRACT_FROM_CONFIG" ]; then
|
||||
su -s $SHELL ${CLICKHOUSE_USER} -c "$CLICKHOUSE_BINDIR/$EXTRACT_FROM_CONFIG --config-file=\"$CLICKHOUSE_CONFIG\" --key=path" >/dev/null || die "Configuration file ${CLICKHOUSE_CONFIG} doesn't parse successfully. Won't restart server. You may use forcerestart if you are sure.";
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
initdb()
|
||||
{
|
||||
${CLICKHOUSE_GENERIC_PROGRAM} install --user "${CLICKHOUSE_USER}" --pid-path "${CLICKHOUSE_PIDDIR}" --config-path "${CLICKHOUSE_CONFDIR}" --binary-path "${CLICKHOUSE_BINDIR}"
|
||||
}
|
||||
|
||||
|
||||
start()
|
||||
{
|
||||
${CLICKHOUSE_GENERIC_PROGRAM} start --user "${CLICKHOUSE_USER}" --pid-path "${CLICKHOUSE_PIDDIR}" --config-path "${CLICKHOUSE_CONFDIR}" --binary-path "${CLICKHOUSE_BINDIR}"
|
||||
}
|
||||
|
||||
|
||||
stop()
|
||||
{
|
||||
${CLICKHOUSE_GENERIC_PROGRAM} stop --pid-path "${CLICKHOUSE_PIDDIR}"
|
||||
}
|
||||
|
||||
|
||||
restart()
|
||||
{
|
||||
${CLICKHOUSE_GENERIC_PROGRAM} restart --user "${CLICKHOUSE_USER}" --pid-path "${CLICKHOUSE_PIDDIR}" --config-path "${CLICKHOUSE_CONFDIR}" --binary-path "${CLICKHOUSE_BINDIR}"
|
||||
}
|
||||
|
||||
|
||||
forcestop()
|
||||
{
|
||||
${CLICKHOUSE_GENERIC_PROGRAM} stop --force --pid-path "${CLICKHOUSE_PIDDIR}"
|
||||
}
|
||||
|
||||
|
||||
service_or_func()
|
||||
{
|
||||
if [ -x "/bin/systemctl" ] && [ -f /etc/systemd/system/clickhouse-server.service ] && [ -d /run/systemd/system ]; then
|
||||
systemctl $1 $PROGRAM
|
||||
else
|
||||
$1
|
||||
fi
|
||||
}
|
||||
|
||||
forcerestart()
|
||||
{
|
||||
forcestop
|
||||
# Should not use 'start' function if systemd active
|
||||
service_or_func start
|
||||
}
|
||||
|
||||
use_cron()
|
||||
{
|
||||
# 1. running systemd
|
||||
if [ -x "/bin/systemctl" ] && [ -f /etc/systemd/system/clickhouse-server.service ] && [ -d /run/systemd/system ]; then
|
||||
return 1
|
||||
fi
|
||||
# 2. disabled by config
|
||||
if [ -z "$CLICKHOUSE_CRONFILE" ]; then
|
||||
return 2
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
# returns false if cron disabled (with systemd)
|
||||
enable_cron()
|
||||
{
|
||||
use_cron && sed -i 's/^#*//' "$CLICKHOUSE_CRONFILE"
|
||||
}
|
||||
# returns false if cron disabled (with systemd)
|
||||
disable_cron()
|
||||
{
|
||||
use_cron && sed -i 's/^#*/#/' "$CLICKHOUSE_CRONFILE"
|
||||
}
|
||||
|
||||
|
||||
is_cron_disabled()
|
||||
{
|
||||
use_cron || return 0
|
||||
|
||||
# Assumes that either no lines are commented or all lines are commented.
|
||||
# Also please note, that currently cron file for ClickHouse has only one line (but some time ago there was more).
|
||||
grep -q -E '^#' "$CLICKHOUSE_CRONFILE";
|
||||
}
|
||||
|
||||
|
||||
main()
|
||||
{
|
||||
# See how we were called.
|
||||
EXIT_STATUS=0
|
||||
case "$1" in
|
||||
start)
|
||||
service_or_func start && enable_cron
|
||||
;;
|
||||
stop)
|
||||
disable_cron
|
||||
service_or_func stop
|
||||
;;
|
||||
restart)
|
||||
service_or_func restart && enable_cron
|
||||
;;
|
||||
forcestop)
|
||||
disable_cron
|
||||
forcestop
|
||||
;;
|
||||
forcerestart)
|
||||
forcerestart && enable_cron
|
||||
;;
|
||||
reload)
|
||||
service_or_func restart
|
||||
;;
|
||||
condstart)
|
||||
service_or_func start
|
||||
;;
|
||||
condstop)
|
||||
service_or_func stop
|
||||
;;
|
||||
condrestart)
|
||||
service_or_func restart
|
||||
;;
|
||||
condreload)
|
||||
service_or_func restart
|
||||
;;
|
||||
initdb)
|
||||
initdb
|
||||
;;
|
||||
enable_cron)
|
||||
enable_cron
|
||||
;;
|
||||
disable_cron)
|
||||
disable_cron
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|status|restart|forcestop|forcerestart|reload|condstart|condstop|condrestart|condreload|initdb}"
|
||||
exit 2
|
||||
;;
|
||||
esac
|
||||
|
||||
exit $EXIT_STATUS
|
||||
}
|
||||
|
||||
|
||||
status()
|
||||
{
|
||||
${CLICKHOUSE_GENERIC_PROGRAM} status --pid-path "${CLICKHOUSE_PIDDIR}"
|
||||
}
|
||||
|
||||
|
||||
# Running commands without need of locking
|
||||
case "$1" in
|
||||
status)
|
||||
status
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
(
|
||||
if $FLOCK -n 9; then
|
||||
main "$@"
|
||||
else
|
||||
echo "Init script is already running" && exit 1
|
||||
fi
|
||||
) 9> $LOCKFILE
|
6
debian/clickhouse-server.install
vendored
6
debian/clickhouse-server.install
vendored
@ -1,6 +0,0 @@
|
||||
usr/bin/clickhouse-server
|
||||
usr/bin/clickhouse-copier
|
||||
usr/bin/clickhouse-report
|
||||
etc/clickhouse-server/config.xml
|
||||
etc/clickhouse-server/users.xml
|
||||
etc/systemd/system/clickhouse-server.service
|
47
debian/clickhouse-server.postinst
vendored
47
debian/clickhouse-server.postinst
vendored
@ -1,47 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
PROGRAM=clickhouse-server
|
||||
CLICKHOUSE_USER=${CLICKHOUSE_USER:=clickhouse}
|
||||
CLICKHOUSE_GROUP=${CLICKHOUSE_GROUP:=${CLICKHOUSE_USER}}
|
||||
# Please note that we don't support paths with whitespaces. This is rather ignorant.
|
||||
CLICKHOUSE_CONFDIR=${CLICKHOUSE_CONFDIR:=/etc/clickhouse-server}
|
||||
CLICKHOUSE_DATADIR=${CLICKHOUSE_DATADIR:=/var/lib/clickhouse}
|
||||
CLICKHOUSE_LOGDIR=${CLICKHOUSE_LOGDIR:=/var/log/clickhouse-server}
|
||||
CLICKHOUSE_BINDIR=${CLICKHOUSE_BINDIR:=/usr/bin}
|
||||
CLICKHOUSE_GENERIC_PROGRAM=${CLICKHOUSE_GENERIC_PROGRAM:=clickhouse}
|
||||
EXTRACT_FROM_CONFIG=${CLICKHOUSE_GENERIC_PROGRAM}-extract-from-config
|
||||
CLICKHOUSE_CONFIG=$CLICKHOUSE_CONFDIR/config.xml
|
||||
CLICKHOUSE_PIDDIR=/var/run/$PROGRAM
|
||||
|
||||
[ -f /usr/share/debconf/confmodule ] && . /usr/share/debconf/confmodule
|
||||
[ -f /etc/default/clickhouse ] && . /etc/default/clickhouse
|
||||
|
||||
if [ ! -f "/etc/debian_version" ]; then
|
||||
not_deb_os=1
|
||||
fi
|
||||
|
||||
if [ "$1" = configure ] || [ -n "$not_deb_os" ]; then
|
||||
|
||||
${CLICKHOUSE_GENERIC_PROGRAM} install --user "${CLICKHOUSE_USER}" --group "${CLICKHOUSE_GROUP}" --pid-path "${CLICKHOUSE_PIDDIR}" --config-path "${CLICKHOUSE_CONFDIR}" --binary-path "${CLICKHOUSE_BINDIR}" --log-path "${CLICKHOUSE_LOGDIR}" --data-path "${CLICKHOUSE_DATADIR}"
|
||||
|
||||
if [ -x "/bin/systemctl" ] && [ -f /etc/systemd/system/clickhouse-server.service ] && [ -d /run/systemd/system ]; then
|
||||
# if old rc.d service present - remove it
|
||||
if [ -x "/etc/init.d/clickhouse-server" ] && [ -x "/usr/sbin/update-rc.d" ]; then
|
||||
/usr/sbin/update-rc.d clickhouse-server remove
|
||||
fi
|
||||
|
||||
/bin/systemctl daemon-reload
|
||||
/bin/systemctl enable clickhouse-server
|
||||
else
|
||||
# If you downgrading to version older than 1.1.54336 run: systemctl disable clickhouse-server
|
||||
if [ -x "/etc/init.d/clickhouse-server" ]; then
|
||||
if [ -x "/usr/sbin/update-rc.d" ]; then
|
||||
/usr/sbin/update-rc.d clickhouse-server defaults 19 19 >/dev/null || exit $?
|
||||
else
|
||||
echo # Other OS
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
27
debian/clickhouse-server.service
vendored
27
debian/clickhouse-server.service
vendored
@ -1,27 +0,0 @@
|
||||
[Unit]
|
||||
Description=ClickHouse Server (analytic DBMS for big data)
|
||||
Requires=network-online.target
|
||||
# NOTE: that After/Wants=time-sync.target is not enough, you need to ensure
|
||||
# that the time was adjusted already, if you use systemd-timesyncd you are
|
||||
# safe, but if you use ntp or some other daemon, you should configure it
|
||||
# additionaly.
|
||||
After=time-sync.target network-online.target
|
||||
Wants=time-sync.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=clickhouse
|
||||
Group=clickhouse
|
||||
Restart=always
|
||||
RestartSec=30
|
||||
RuntimeDirectory=clickhouse-server
|
||||
ExecStart=/usr/bin/clickhouse-server --config=/etc/clickhouse-server/config.xml --pid-file=/run/clickhouse-server/clickhouse-server.pid
|
||||
# Minus means that this file is optional.
|
||||
EnvironmentFile=-/etc/default/clickhouse
|
||||
LimitCORE=infinity
|
||||
LimitNOFILE=500000
|
||||
CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE CAP_NET_BIND_SERVICE
|
||||
|
||||
[Install]
|
||||
# ClickHouse should not start from the rescue shell (rescue.target).
|
||||
WantedBy=multi-user.target
|
1
debian/compat
vendored
1
debian/compat
vendored
@ -1 +0,0 @@
|
||||
10
|
58
debian/control
vendored
58
debian/control
vendored
@ -1,58 +0,0 @@
|
||||
Source: clickhouse
|
||||
Section: database
|
||||
Priority: optional
|
||||
Maintainer: Alexey Milovidov <milovidov@clickhouse.com>
|
||||
Build-Depends: debhelper (>= 9),
|
||||
cmake | cmake3,
|
||||
ninja-build,
|
||||
clang-13,
|
||||
llvm-13,
|
||||
lld-13,
|
||||
libc6-dev,
|
||||
tzdata
|
||||
Standards-Version: 3.9.8
|
||||
|
||||
Package: clickhouse-client
|
||||
Architecture: all
|
||||
Depends: ${shlibs:Depends}, ${misc:Depends}, clickhouse-common-static (= ${binary:Version})
|
||||
Replaces: clickhouse-compressor
|
||||
Conflicts: clickhouse-compressor
|
||||
Description: Client binary for ClickHouse
|
||||
ClickHouse is a column-oriented database management system
|
||||
that allows generating analytical data reports in real time.
|
||||
.
|
||||
This package provides clickhouse-client , clickhouse-local and clickhouse-benchmark
|
||||
|
||||
Package: clickhouse-common-static
|
||||
Architecture: any
|
||||
Depends: ${shlibs:Depends}, ${misc:Depends}
|
||||
Suggests: clickhouse-common-static-dbg
|
||||
Replaces: clickhouse-common, clickhouse-server-base
|
||||
Provides: clickhouse-common, clickhouse-server-base
|
||||
Description: Common files for ClickHouse
|
||||
ClickHouse is a column-oriented database management system
|
||||
that allows generating analytical data reports in real time.
|
||||
.
|
||||
This package provides common files for both clickhouse server and client
|
||||
|
||||
Package: clickhouse-server
|
||||
Architecture: all
|
||||
Depends: ${shlibs:Depends}, ${misc:Depends}, clickhouse-common-static (= ${binary:Version}), adduser
|
||||
Recommends: libcap2-bin
|
||||
Replaces: clickhouse-server-common, clickhouse-server-base
|
||||
Provides: clickhouse-server-common
|
||||
Description: Server binary for ClickHouse
|
||||
ClickHouse is a column-oriented database management system
|
||||
that allows generating analytical data reports in real time.
|
||||
.
|
||||
This package provides clickhouse common configuration files
|
||||
|
||||
Package: clickhouse-common-static-dbg
|
||||
Architecture: any
|
||||
Section: debug
|
||||
Priority: optional
|
||||
Depends: ${misc:Depends}
|
||||
Replaces: clickhouse-common-dbg
|
||||
Conflicts: clickhouse-common-dbg
|
||||
Description: debugging symbols for clickhouse-common-static
|
||||
This package contains the debugging symbols for clickhouse-common.
|
132
debian/rules
vendored
132
debian/rules
vendored
@ -1,132 +0,0 @@
|
||||
#!/usr/bin/make -f
|
||||
# -*- makefile -*-
|
||||
|
||||
# Uncomment this to turn on verbose mode.
|
||||
export DH_VERBOSE=1
|
||||
|
||||
# -pie only for static mode
|
||||
export DEB_BUILD_MAINT_OPTIONS=hardening=-all
|
||||
|
||||
# because copy_headers.sh have hardcoded path to build/include_directories.txt
|
||||
BUILDDIR = obj-$(DEB_HOST_GNU_TYPE)
|
||||
CURDIR = $(shell pwd)
|
||||
DESTDIR = $(CURDIR)/debian/tmp
|
||||
|
||||
DEB_HOST_MULTIARCH ?= $(shell dpkg-architecture -qDEB_HOST_MULTIARCH)
|
||||
|
||||
ifeq ($(CCACHE_PREFIX),distcc)
|
||||
THREADS_COUNT=$(shell distcc -j)
|
||||
endif
|
||||
ifeq ($(THREADS_COUNT),)
|
||||
THREADS_COUNT=$(shell nproc || grep -c ^processor /proc/cpuinfo || sysctl -n hw.ncpu || echo 4)
|
||||
endif
|
||||
DEB_BUILD_OPTIONS+=parallel=$(THREADS_COUNT)
|
||||
|
||||
ifndef ENABLE_TESTS
|
||||
CMAKE_FLAGS += -DENABLE_TESTS=0
|
||||
else
|
||||
# To export binaries and from deb build we do not strip them. No need to run tests in deb build as we run them in CI
|
||||
DEB_BUILD_OPTIONS+= nocheck
|
||||
DEB_BUILD_OPTIONS+= nostrip
|
||||
endif
|
||||
|
||||
ifndef MAKE_TARGET
|
||||
MAKE_TARGET = clickhouse-bundle
|
||||
endif
|
||||
|
||||
CMAKE_FLAGS += -DENABLE_UTILS=0
|
||||
|
||||
DEB_CC ?= $(shell which gcc-11 gcc-10 gcc-9 gcc | head -n1)
|
||||
DEB_CXX ?= $(shell which g++-11 g++-10 g++-9 g++ | head -n1)
|
||||
|
||||
ifdef DEB_CXX
|
||||
DEB_BUILD_GNU_TYPE := $(shell dpkg-architecture -qDEB_BUILD_GNU_TYPE)
|
||||
DEB_HOST_GNU_TYPE := $(shell dpkg-architecture -qDEB_HOST_GNU_TYPE)
|
||||
ifeq ($(DEB_BUILD_GNU_TYPE),$(DEB_HOST_GNU_TYPE))
|
||||
CC := $(DEB_CC)
|
||||
CXX := $(DEB_CXX)
|
||||
else ifeq (clang,$(findstring clang,$(DEB_CXX)))
|
||||
# If we crosscompile with clang, it knows what to do
|
||||
CC := $(DEB_CC)
|
||||
CXX := $(DEB_CXX)
|
||||
else
|
||||
CC := $(DEB_HOST_GNU_TYPE)-$(DEB_CC)
|
||||
CXX := $(DEB_HOST_GNU_TYPE)-$(DEB_CXX)
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef CXX
|
||||
CMAKE_FLAGS += -DCMAKE_CXX_COMPILER=`which $(CXX)`
|
||||
endif
|
||||
ifdef CC
|
||||
CMAKE_FLAGS += -DCMAKE_C_COMPILER=`which $(CC)`
|
||||
endif
|
||||
|
||||
ifndef DISABLE_NINJA
|
||||
NINJA=$(shell which ninja)
|
||||
ifneq ($(NINJA),)
|
||||
CMAKE_FLAGS += -GNinja
|
||||
export MAKE=$(NINJA) $(NINJA_FLAGS)
|
||||
endif
|
||||
endif
|
||||
|
||||
ifndef DH_VERBOSE
|
||||
CMAKE_FLAGS += -DCMAKE_VERBOSE_MAKEFILE=0
|
||||
endif
|
||||
|
||||
# Useful for bulding on low memory systems
|
||||
ifndef DISABLE_PARALLEL
|
||||
DH_FLAGS += --parallel
|
||||
else
|
||||
THREADS_COUNT = 1
|
||||
endif
|
||||
|
||||
%:
|
||||
dh $@ $(DH_FLAGS) --buildsystem=cmake
|
||||
|
||||
override_dh_auto_configure:
|
||||
dh_auto_configure -- $(CMAKE_FLAGS)
|
||||
|
||||
override_dh_auto_build:
|
||||
# Fix for ninja. Do not add -O.
|
||||
$(MAKE) -j$(THREADS_COUNT) -C $(BUILDDIR) $(MAKE_TARGET)
|
||||
|
||||
override_dh_auto_test:
|
||||
ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS)))
|
||||
cd $(BUILDDIR) && ctest -j$(THREADS_COUNT) -V
|
||||
endif
|
||||
|
||||
# Disable config.guess and config.sub update
|
||||
override_dh_update_autotools_config:
|
||||
|
||||
override_dh_clean:
|
||||
rm -rf debian/copyright debian/clickhouse-client.docs debian/clickhouse-common-static.docs
|
||||
dh_clean # -X contrib
|
||||
|
||||
override_dh_strip:
|
||||
#https://www.debian.org/doc/debian-policy/ch-source.html#debian-rules-and-deb-build-options
|
||||
ifeq (,$(filter nostrip,$(DEB_BUILD_OPTIONS)))
|
||||
dh_strip -pclickhouse-common-static --dbg-package=clickhouse-common-static-dbg
|
||||
endif
|
||||
|
||||
override_dh_install:
|
||||
# Making docs
|
||||
cp LICENSE debian/copyright
|
||||
|
||||
ln -sf clickhouse-server.docs debian/clickhouse-client.docs
|
||||
ln -sf clickhouse-server.docs debian/clickhouse-common-static.docs
|
||||
|
||||
# systemd compatibility
|
||||
mkdir -p $(DESTDIR)/etc/systemd/system/
|
||||
cp debian/clickhouse-server.service $(DESTDIR)/etc/systemd/system/
|
||||
|
||||
dh_install --list-missing --sourcedir=$(DESTDIR)
|
||||
|
||||
override_dh_auto_install:
|
||||
env DESTDIR=$(DESTDIR) $(MAKE) -j$(THREADS_COUNT) -C $(BUILDDIR) install
|
||||
|
||||
override_dh_shlibdeps:
|
||||
true # We depend only on libc and dh_shlibdeps gives us wrong (too strict) dependency.
|
||||
|
||||
override_dh_builddeb:
|
||||
dh_builddeb -- -Z gzip # Older systems don't have "xz", so use "gzip" instead.
|
1
debian/source/format
vendored
1
debian/source/format
vendored
@ -1 +0,0 @@
|
||||
3.0 (quilt)
|
9
debian/source/options
vendored
9
debian/source/options
vendored
@ -1,9 +0,0 @@
|
||||
tar-ignore
|
||||
tar-ignore="build_*/*"
|
||||
tar-ignore="workspace/*"
|
||||
tar-ignore="contrib/poco/openssl/*"
|
||||
tar-ignore="contrib/poco/gradle/*"
|
||||
tar-ignore="contrib/poco/Data/SQLite/*"
|
||||
tar-ignore="contrib/poco/PDF/*"
|
||||
compression-level=3
|
||||
compression=gzip
|
6
debian/watch
vendored
6
debian/watch
vendored
@ -1,6 +0,0 @@
|
||||
version=4
|
||||
|
||||
opts="filenamemangle=s%(?:.*?)?v?(\d[\d.]*)-stable\.tar\.gz%clickhouse-$1.tar.gz%" \
|
||||
https://github.com/ClickHouse/ClickHouse/tags \
|
||||
(?:.*?/)?v?(\d[\d.]*)-stable\.tar\.gz debian uupdate
|
||||
|
@ -362,19 +362,6 @@ function get_profiles
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select 1"
|
||||
}
|
||||
|
||||
function build_log_column_definitions
|
||||
{
|
||||
# FIXME This loop builds column definitons from TSVWithNamesAndTypes in an
|
||||
# absolutely atrocious way. This should be done by the file() function itself.
|
||||
for x in {right,left}-{addresses,{query,query-thread,trace,{async-,}metric}-log}.tsv
|
||||
do
|
||||
paste -d' ' \
|
||||
<(sed -n '1{s/\t/\n/g;p;q}' "$x" | sed 's/\(^.*$\)/"\1"/') \
|
||||
<(sed -n '2{s/\t/\n/g;p;q}' "$x" ) \
|
||||
| tr '\n' ', ' | sed 's/,$//' > "$x.columns"
|
||||
done
|
||||
}
|
||||
|
||||
# Build and analyze randomization distribution for all queries.
|
||||
function analyze_queries
|
||||
{
|
||||
@ -382,8 +369,6 @@ rm -v analyze-commands.txt analyze-errors.log all-queries.tsv unstable-queries.t
|
||||
rm -rf analyze ||:
|
||||
mkdir analyze analyze/tmp ||:
|
||||
|
||||
build_log_column_definitions
|
||||
|
||||
# Split the raw test output into files suitable for analysis.
|
||||
# To debug calculations only for a particular test, substitute a suitable
|
||||
# wildcard here, e.g. `for test_file in modulo-raw.tsv`.
|
||||
@ -422,12 +407,10 @@ create table partial_query_times engine File(TSVWithNamesAndTypes,
|
||||
|
||||
-- Process queries that were run normally, on both servers.
|
||||
create view left_query_log as select *
|
||||
from file('left-query-log.tsv', TSVWithNamesAndTypes,
|
||||
'$(cat "left-query-log.tsv.columns")');
|
||||
from file('left-query-log.tsv', TSVWithNamesAndTypes);
|
||||
|
||||
create view right_query_log as select *
|
||||
from file('right-query-log.tsv', TSVWithNamesAndTypes,
|
||||
'$(cat "right-query-log.tsv.columns")');
|
||||
from file('right-query-log.tsv', TSVWithNamesAndTypes);
|
||||
|
||||
create view query_logs as
|
||||
select 0 version, query_id, ProfileEvents,
|
||||
@ -645,8 +628,6 @@ mkdir report report/tmp ||:
|
||||
|
||||
rm ./*.{rep,svg} test-times.tsv test-dump.tsv unstable.tsv unstable-query-ids.tsv unstable-query-metrics.tsv changed-perf.tsv unstable-tests.tsv unstable-queries.tsv bad-tests.tsv slow-on-client.tsv all-queries.tsv run-errors.tsv ||:
|
||||
|
||||
build_log_column_definitions
|
||||
|
||||
cat analyze/errors.log >> report/errors.log ||:
|
||||
cat profile-errors.log >> report/errors.log ||:
|
||||
|
||||
@ -1028,8 +1009,7 @@ create table unstable_query_runs engine File(TSVWithNamesAndTypes,
|
||||
;
|
||||
|
||||
create view query_log as select *
|
||||
from file('$version-query-log.tsv', TSVWithNamesAndTypes,
|
||||
'$(cat "$version-query-log.tsv.columns")');
|
||||
from file('$version-query-log.tsv', TSVWithNamesAndTypes);
|
||||
|
||||
create table unstable_run_metrics engine File(TSVWithNamesAndTypes,
|
||||
'unstable-run-metrics.$version.rep') as
|
||||
@ -1057,8 +1037,7 @@ create table unstable_run_metrics_2 engine File(TSVWithNamesAndTypes,
|
||||
array join v, n;
|
||||
|
||||
create view trace_log as select *
|
||||
from file('$version-trace-log.tsv', TSVWithNamesAndTypes,
|
||||
'$(cat "$version-trace-log.tsv.columns")');
|
||||
from file('$version-trace-log.tsv', TSVWithNamesAndTypes);
|
||||
|
||||
create view addresses_src as select addr,
|
||||
-- Some functions change name between builds, e.g. '__clone' or 'clone' or
|
||||
@ -1067,8 +1046,7 @@ create view addresses_src as select addr,
|
||||
[name, 'clone.S (filtered by script)', 'pthread_cond_timedwait (filtered by script)']
|
||||
-- this line is a subscript operator of the above array
|
||||
[1 + multiSearchFirstIndex(name, ['clone.S', 'pthread_cond_timedwait'])] name
|
||||
from file('$version-addresses.tsv', TSVWithNamesAndTypes,
|
||||
'$(cat "$version-addresses.tsv.columns")');
|
||||
from file('$version-addresses.tsv', TSVWithNamesAndTypes);
|
||||
|
||||
create table addresses_join_$version engine Join(any, left, address) as
|
||||
select addr address, name from addresses_src;
|
||||
@ -1195,15 +1173,12 @@ done
|
||||
|
||||
function report_metrics
|
||||
{
|
||||
build_log_column_definitions
|
||||
|
||||
rm -rf metrics ||:
|
||||
mkdir metrics
|
||||
|
||||
clickhouse-local --query "
|
||||
create view right_async_metric_log as
|
||||
select * from file('right-async-metric-log.tsv', TSVWithNamesAndTypes,
|
||||
'$(cat right-async-metric-log.tsv.columns)')
|
||||
select * from file('right-async-metric-log.tsv', TSVWithNamesAndTypes)
|
||||
;
|
||||
|
||||
-- Use the right log as time reference because it may have higher precision.
|
||||
@ -1211,8 +1186,7 @@ create table metrics engine File(TSV, 'metrics/metrics.tsv') as
|
||||
with (select min(event_time) from right_async_metric_log) as min_time
|
||||
select metric, r.event_time - min_time event_time, l.value as left, r.value as right
|
||||
from right_async_metric_log r
|
||||
asof join file('left-async-metric-log.tsv', TSVWithNamesAndTypes,
|
||||
'$(cat left-async-metric-log.tsv.columns)') l
|
||||
asof join file('left-async-metric-log.tsv', TSVWithNamesAndTypes) l
|
||||
on l.metric = r.metric and r.event_time <= l.event_time
|
||||
order by metric, event_time
|
||||
;
|
||||
|
@ -5,11 +5,10 @@ toc_title: HTTP Interface
|
||||
|
||||
# HTTP Interface {#http-interface}
|
||||
|
||||
The HTTP interface lets you use ClickHouse on any platform from any programming language. We use it for working from Java and Perl, as well as shell scripts. In other departments, the HTTP interface is used from Perl, Python, and Go. The HTTP interface is more limited than the native interface, but it has better compatibility.
|
||||
The HTTP interface lets you use ClickHouse on any platform from any programming language in a form of REST API. The HTTP interface is more limited than the native interface, but it has better language support.
|
||||
|
||||
By default, `clickhouse-server` listens for HTTP on port 8123 (this can be changed in the config).
|
||||
|
||||
Sometimes, `curl` command is not available on user operating systems. On Ubuntu or Debian, run `sudo apt install curl`. Please refer this [documentation](https://curl.se/download.html) to install it before running the examples.
|
||||
HTTPS can be enabled as well with port 8443 by default.
|
||||
|
||||
If you make a `GET /` request without parameters, it returns 200 response code and the string which defined in [http_server_default_response](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-http_server_default_response) default value “Ok.” (with a line feed at the end)
|
||||
|
||||
@ -18,11 +17,12 @@ $ curl 'http://localhost:8123/'
|
||||
Ok.
|
||||
```
|
||||
|
||||
Sometimes, `curl` command is not available on user operating systems. On Ubuntu or Debian, run `sudo apt install curl`. Please refer this [documentation](https://curl.se/download.html) to install it before running the examples.
|
||||
|
||||
Web UI can be accessed here: `http://localhost:8123/play`.
|
||||
|
||||
![Web UI](../images/play.png)
|
||||
|
||||
|
||||
In health-check scripts use `GET /ping` request. This handler always returns “Ok.” (with a line feed at the end). Available from version 18.12.13. See also `/replicas_status` to check replica's delay.
|
||||
|
||||
``` bash
|
||||
@ -32,7 +32,7 @@ $ curl 'http://localhost:8123/replicas_status'
|
||||
Ok.
|
||||
```
|
||||
|
||||
Send the request as a URL ‘query’ parameter, or as a POST. Or send the beginning of the query in the ‘query’ parameter, and the rest in the POST (we’ll explain later why this is necessary). The size of the URL is limited to 16 KB, so keep this in mind when sending large queries.
|
||||
Send the request as a URL ‘query’ parameter, or as a POST. Or send the beginning of the query in the ‘query’ parameter, and the rest in the POST (we’ll explain later why this is necessary). The size of the URL is limited to 1 MiB by default, this can be changed with the `http_max_uri_size` setting.
|
||||
|
||||
If successful, you receive the 200 response code and the result in the response body.
|
||||
If an error occurs, you receive the 500 response code and an error description text in the response body.
|
||||
|
@ -28,6 +28,7 @@ toc_title: Adopters
|
||||
| <a href="https://badoo.com" class="favicon">Badoo</a> | Dating | Timeseries | — | 1.6 mln events/sec (2018) | [Slides in Russian, December 2019](https://presentations.clickhouse.com/meetup38/forecast.pdf) |
|
||||
| <a href="https://beeline.ru/" class="favicon">Beeline</a> | Telecom | Data Platform | — | — | [Blog post, July 2021](https://habr.com/en/company/beeline/blog/567508/) |
|
||||
| <a href="https://www.benocs.com/" class="favicon">Benocs</a> | Network Telemetry and Analytics | Main Product | — | — | [Slides in English, October 2017](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup9/lpm.pdf) |
|
||||
| <a href="https://betterstack.com/" class="favicon">Better Stack</a> | Cloud, SaaS | Log Management | - | - | [Official Website](https://betterstack.com/logtail) |
|
||||
| <a href="https://www.bigo.sg/" class="favicon">BIGO</a> | Video | Computing Platform | — | — | [Blog Article, August 2020](https://www.programmersought.com/article/44544895251/) |
|
||||
| <a href="https://www.bilibili.com/" class="favicon">BiliBili</a> | Video sharing | — | — | — | [Blog post, June 2021](https://chowdera.com/2021/06/20210622012241476b.html) |
|
||||
| <a href="https://www.bloomberg.com/">Bloomberg</a> | Finance, Media | Monitoring | — | — | [Job opening, September 2021](https://careers.bloomberg.com/job/detail/94913), [slides, May 2018](https://www.slideshare.net/Altinity/http-analytics-for-6m-requests-per-second-using-clickhouse-by-alexander-bocharov) |
|
||||
@ -112,7 +113,7 @@ toc_title: Adopters
|
||||
| <a href="https://nlmk.com/en/" class="favicon">NLMK</a> | Steel | Monitoring | — | — | [Article in Russian, Jan 2022](https://habr.com/en/company/nlmk/blog/645943/) |
|
||||
| <a href="https://getnoc.com/" class="favicon">NOC Project</a> | Network Monitoring | Analytics | Main Product | — | [Official Website](https://getnoc.com/features/big-data/) |
|
||||
| <a href="https://www.noction.com" class="favicon">Noction</a> | Network Technology | Main Product | — | — | [Official Website](https://www.noction.com/news/irp-3-11-remote-triggered-blackholing-capability)
|
||||
| <a href="https://www.ntop.org/" class="favicon">ntop</a> | Network Monitoning | Monitoring | — | — | [Official website, Jan 2022](https://www.ntop.org/ntop/historical-traffic-analysis-at-scale-using-clickhouse-with-ntopng/) |
|
||||
| <a href="https://www.ntop.org/" class="favicon">ntop</a> | Network Monitoning | Monitoring | — | — | [Official website, January 2022](https://www.ntop.org/ntop/historical-traffic-analysis-at-scale-using-clickhouse-with-ntopng/) |
|
||||
| <a href="https://www.nuna.com/" class="favicon">Nuna Inc.</a> | Health Data Analytics | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=170) |
|
||||
| <a href="https://ok.ru" class="favicon">Ok.ru</a> | Social Network | — | 72 servers | 810 TB compressed, 50bn rows/day, 1.5 TB/day | [SmartData conference, October 2021](https://assets.ctfassets.net/oxjq45e8ilak/4JPHkbJenLgZhBGGyyonFP/57472ec6987003ec4078d0941740703b/____________________ClickHouse_______________________.pdf) |
|
||||
| <a href="https://omnicomm.ru/" class="favicon">Omnicomm</a> | Transportation Monitoring | — | — | — | [Facebook post, October 2021](https://www.facebook.com/OmnicommTeam/posts/2824479777774500) |
|
||||
@ -123,6 +124,7 @@ toc_title: Adopters
|
||||
| <a href="https://panelbear.com/" class="favicon">Panelbear | Analytics | Monitoring and Analytics | — | — | [Tech Stack, November 2020](https://panelbear.com/blog/tech-stack/) |
|
||||
| <a href="https://www.percent.cn/" class="favicon">Percent 百分点</a> | Analytics | Main Product | — | — | [Slides in Chinese, June 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) |
|
||||
| <a href="https://www.percona.com/" class="favicon">Percona</a> | Performance analysis | Percona Monitoring and Management | — | — | [Official website, Mar 2020](https://www.percona.com/blog/2020/03/30/advanced-query-analysis-in-percona-monitoring-and-management-with-direct-clickhouse-access/) |
|
||||
| <a href="https://pingcap.com/" class="favicon">PingCAP</a> | Analytics | Real-Time Transactional and Analytical Processing | - | - | [GitHub, TiFlash/TiDB](https://github.com/pingcap/tiflash) |
|
||||
| <a href="https://plausible.io/" class="favicon">Plausible</a> | Analytics | Main Product | — | — | [Blog post, June 2020](https://twitter.com/PlausibleHQ/status/1273889629087969280) |
|
||||
| <a href="https://posthog.com/" class="favicon">PostHog</a> | Product Analytics | Main Product | — | — | [Release Notes, October 2020](https://posthog.com/blog/the-posthog-array-1-15-0), [Blog, November 2021](https://posthog.com/blog/how-we-turned-clickhouse-into-our-eventmansion) |
|
||||
| <a href="https://postmates.com/" class="favicon">Postmates</a> | Delivery | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=188) |
|
||||
@ -159,6 +161,7 @@ toc_title: Adopters
|
||||
| <a href="https://www.suning.com/" class="favicon">Suning</a> | E-Commerce | User behaviour analytics | — | — | [Blog article](https://www.sohu.com/a/434152235_411876) |
|
||||
| <a href="https://superwall.me/" class="favicon">Superwall</a> | Monetization Tooling | Main product | — | — | [Word of mouth, Jan 2022](https://github.com/ClickHouse/ClickHouse/pull/33573) |
|
||||
| <a href="https://swetrix.com" class="favicon">Swetrix</a> | Analytics | Main Product | — | — | [Source code](https://github.com/swetrix/swetrix-api) |
|
||||
| <a href="https://synpse.net/" class="favicon">Synpse</a> | Application Management | Main Product | - | - | [Tweet, January 2022](https://twitter.com/KRusenas/status/1483571168363880455) |
|
||||
| <a href="https://www.teralytics.net/" class="favicon">Teralytics</a> | Mobility | Analytics | — | — | [Tech blog](https://www.teralytics.net/knowledge-hub/visualizing-mobility-data-the-scalability-challenge) |
|
||||
| <a href="https://www.tencent.com" class="favicon">Tencent</a> | Big Data | Data processing | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/5.%20ClickHouse大数据集群应用_李俊飞腾讯网媒事业部.pdf) |
|
||||
| <a href="https://www.tencent.com" class="favicon">Tencent</a> | Messaging | Logging | — | — | [Talk in Chinese, November 2019](https://youtu.be/T-iVQRuw-QY?t=5050) |
|
||||
@ -172,6 +175,7 @@ toc_title: Adopters
|
||||
| <a href="https://hello.utmstat.com/" class="favicon">UTMSTAT</a> | Analytics | Main product | — | — | [Blog post, June 2020](https://vc.ru/tribuna/133956-striming-dannyh-iz-servisa-skvoznoy-analitiki-v-clickhouse) |
|
||||
| <a href="https://vercel.com/" class="favicon">Vercel</a> | Traffic and Performance Analytics | — | — | — | Direct reference, October 2021 |
|
||||
| <a href="https://vk.com" class="favicon">VKontakte</a> | Social Network | Statistics, Logging | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/3_vk.pdf) |
|
||||
| <a href="https://vkontech.com/" class="favicon">VKontech</a> | Distributed Systems | Migrating from MongoDB | - | - | [Blog, January 2022](https://vkontech.com/migrating-your-reporting-queries-from-a-general-purpose-db-mongodb-to-a-data-warehouse-clickhouse-performance-overview/) |
|
||||
| <a href="https://www.vmware.com/" class="favicon">VMware</a> | Cloud | VeloCloud, SDN | — | — | [Product documentation](https://docs.vmware.com/en/vRealize-Operations-Manager/8.3/com.vmware.vcom.metrics.doc/GUID-A9AD72E1-C948-4CA2-971B-919385AB3CA8.html) |
|
||||
| <a href="https://www.walmartlabs.com/" class="favicon">Walmart Labs</a> | Internet, Retail | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=144) |
|
||||
| <a href="https://wargaming.com/en/" class="favicon">Wargaming</a> | Games | | — | — | [Interview](https://habr.com/en/post/496954/) |
|
||||
@ -197,5 +201,6 @@ toc_title: Adopters
|
||||
| <a href="https://domclick.ru/" class="favicon">ДомКлик</a> | Real Estate | — | — | — | [Article in Russian, October 2021](https://habr.com/ru/company/domclick/blog/585936/) |
|
||||
| <a href="https://magenta-technology.ru/sistema-upravleniya-marshrutami-inkassacii-as-strela/" class="favicon">АС "Стрела"</a> | Transportation | — | — | — | [Job posting, Jan 2022](https://vk.com/topic-111905078_35689124?post=3553) |
|
||||
| <a href="https://piwik.pro/" class="favicon">Piwik PRO</a> | Web Analytics | — | — | — | [Official website, Dec 2018](https://piwik.pro/blog/piwik-pro-clickhouse-faster-efficient-reports/) |
|
||||
| <a href="https://www.deepglint.com/" class="favicon">Deepglint 格灵深瞳</a> | AI, Computer Vision | OLAP | — | — | [Official Website](https://www.deepglint.com/) |
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/introduction/adopters/) <!--hide-->
|
||||
|
@ -182,7 +182,7 @@ Marks numbers: 0 1 2 3 4 5 6 7 8
|
||||
|
||||
Разреженный индекс допускает чтение лишних строк. При чтении одного диапазона первичного ключа, может быть прочитано до `index_granularity * 2` лишних строк в каждом блоке данных.
|
||||
|
||||
Разреженный индекс почти всегда помещаеся в оперативную память и позволяет работать с очень большим количеством строк в таблицах.
|
||||
Разреженный индекс почти всегда помещается в оперативную память и позволяет работать с очень большим количеством строк в таблицах.
|
||||
|
||||
ClickHouse не требует уникального первичного ключа. Можно вставить много строк с одинаковым первичным ключом.
|
||||
|
||||
|
@ -5,9 +5,9 @@ toc_title: Nothing
|
||||
|
||||
# Nothing {#nothing}
|
||||
|
||||
Этот тип данных предназначен только для того, чтобы представлять [NULL](../../../sql-reference/data-types/special-data-types/nothing.md), т.е. отсутствие значения.
|
||||
Этот тип данных предназначен только для того, чтобы представлять [NULL](../../../sql-reference/syntax.md#null-literal), т.е. отсутствие значения.
|
||||
|
||||
Невозможно создать значение типа `Nothing`, поэтому он используется там, где значение не подразумевается. Например, `NULL` записывается как `Nullable(Nothing)` ([Nullable](../../../sql-reference/data-types/special-data-types/nothing.md) — это тип данных, позволяющий хранить `NULL` в таблицах). Также тип `Nothing` используется для обозначения пустых массивов:
|
||||
Невозможно создать значение типа `Nothing`, поэтому он используется там, где значение не подразумевается. Например, `NULL` записывается как `Nullable(Nothing)` ([Nullable](../../../sql-reference/data-types/nullable.md) — это тип данных, позволяющий хранить `NULL` в таблицах). Также тип `Nothing` используется для обозначения пустых массивов:
|
||||
|
||||
``` sql
|
||||
SELECT toTypeName(Array())
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
此数据类型的唯一目的是表示不是期望值的情况。 所以不能创建一个 `Nothing` 类型的值。
|
||||
|
||||
例如,文本 [NULL](../../../sql-reference/data-types/special-data-types/nothing.md#null-literal) 的类型为 `Nullable(Nothing)`。详情请见 [可为空](../../../sql-reference/data-types/special-data-types/nothing.md)。
|
||||
例如,字面量 [NULL](../../../sql-reference/syntax.md#null-literal) 的类型为 `Nullable(Nothing)`。详情请见 [可为空](../../../sql-reference/data-types/nullable.md)。
|
||||
|
||||
`Nothing` 类型也可以用来表示空数组:
|
||||
|
||||
|
@ -66,40 +66,40 @@ int mainEntryClickHouseCompressor(int argc, char ** argv)
|
||||
using namespace DB;
|
||||
namespace po = boost::program_options;
|
||||
|
||||
po::options_description desc = createOptionsDescription("Allowed options", getTerminalWidth());
|
||||
desc.add_options()
|
||||
("help,h", "produce help message")
|
||||
("input", po::value<std::string>()->value_name("INPUT"), "input file")
|
||||
("output", po::value<std::string>()->value_name("OUTPUT"), "output file")
|
||||
("decompress,d", "decompress")
|
||||
("offset-in-compressed-file", po::value<size_t>()->default_value(0ULL), "offset to the compressed block (i.e. physical file offset)")
|
||||
("offset-in-decompressed-block", po::value<size_t>()->default_value(0ULL), "offset to the decompressed block (i.e. virtual offset)")
|
||||
("block-size,b", po::value<unsigned>()->default_value(DBMS_DEFAULT_BUFFER_SIZE), "compress in blocks of specified size")
|
||||
("hc", "use LZ4HC instead of LZ4")
|
||||
("zstd", "use ZSTD instead of LZ4")
|
||||
("codec", po::value<std::vector<std::string>>()->multitoken(), "use codecs combination instead of LZ4")
|
||||
("level", po::value<int>(), "compression level for codecs specified via flags")
|
||||
("none", "use no compression instead of LZ4")
|
||||
("stat", "print block statistics of compressed data")
|
||||
;
|
||||
|
||||
po::positional_options_description positional_desc;
|
||||
positional_desc.add("input", 1);
|
||||
positional_desc.add("output", 1);
|
||||
|
||||
po::variables_map options;
|
||||
po::store(po::command_line_parser(argc, argv).options(desc).positional(positional_desc).run(), options);
|
||||
|
||||
if (options.count("help"))
|
||||
{
|
||||
std::cout << "Usage: " << argv[0] << " [options] < INPUT > OUTPUT" << std::endl;
|
||||
std::cout << "Usage: " << argv[0] << " [options] INPUT OUTPUT" << std::endl;
|
||||
std::cout << desc << std::endl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
po::options_description desc = createOptionsDescription("Allowed options", getTerminalWidth());
|
||||
desc.add_options()
|
||||
("help,h", "produce help message")
|
||||
("input", po::value<std::string>()->value_name("INPUT"), "input file")
|
||||
("output", po::value<std::string>()->value_name("OUTPUT"), "output file")
|
||||
("decompress,d", "decompress")
|
||||
("offset-in-compressed-file", po::value<size_t>()->default_value(0ULL), "offset to the compressed block (i.e. physical file offset)")
|
||||
("offset-in-decompressed-block", po::value<size_t>()->default_value(0ULL), "offset to the decompressed block (i.e. virtual offset)")
|
||||
("block-size,b", po::value<unsigned>()->default_value(DBMS_DEFAULT_BUFFER_SIZE), "compress in blocks of specified size")
|
||||
("hc", "use LZ4HC instead of LZ4")
|
||||
("zstd", "use ZSTD instead of LZ4")
|
||||
("codec", po::value<std::vector<std::string>>()->multitoken(), "use codecs combination instead of LZ4")
|
||||
("level", po::value<int>(), "compression level for codecs specified via flags")
|
||||
("none", "use no compression instead of LZ4")
|
||||
("stat", "print block statistics of compressed data")
|
||||
;
|
||||
|
||||
po::positional_options_description positional_desc;
|
||||
positional_desc.add("input", 1);
|
||||
positional_desc.add("output", 1);
|
||||
|
||||
po::variables_map options;
|
||||
po::store(po::command_line_parser(argc, argv).options(desc).positional(positional_desc).run(), options);
|
||||
|
||||
if (options.count("help"))
|
||||
{
|
||||
std::cout << "Usage: " << argv[0] << " [options] < INPUT > OUTPUT" << std::endl;
|
||||
std::cout << "Usage: " << argv[0] << " [options] INPUT OUTPUT" << std::endl;
|
||||
std::cout << desc << std::endl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool decompress = options.count("decompress");
|
||||
bool use_lz4hc = options.count("hc");
|
||||
bool use_zstd = options.count("zstd");
|
||||
|
@ -44,40 +44,40 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
|
||||
{
|
||||
using namespace DB;
|
||||
|
||||
boost::program_options::options_description desc = createOptionsDescription("Allowed options", getTerminalWidth());
|
||||
desc.add_options()
|
||||
("query", po::value<std::string>(), "query to format")
|
||||
("help,h", "produce help message")
|
||||
("hilite", "add syntax highlight with ANSI terminal escape sequences")
|
||||
("oneline", "format in single line")
|
||||
("quiet,q", "just check syntax, no output on success")
|
||||
("multiquery,n", "allow multiple queries in the same file")
|
||||
("obfuscate", "obfuscate instead of formatting")
|
||||
("backslash", "add a backslash at the end of each line of the formatted query")
|
||||
("allow_settings_after_format_in_insert", "Allow SETTINGS after FORMAT, but note, that this is not always safe")
|
||||
("seed", po::value<std::string>(), "seed (arbitrary string) that determines the result of obfuscation")
|
||||
;
|
||||
|
||||
Settings cmd_settings;
|
||||
for (const auto & field : cmd_settings.all())
|
||||
{
|
||||
if (field.getName() == "max_parser_depth" || field.getName() == "max_query_size")
|
||||
cmd_settings.addProgramOption(desc, field);
|
||||
}
|
||||
|
||||
boost::program_options::variables_map options;
|
||||
boost::program_options::store(boost::program_options::parse_command_line(argc, argv, desc), options);
|
||||
po::notify(options);
|
||||
|
||||
if (options.count("help"))
|
||||
{
|
||||
std::cout << "Usage: " << argv[0] << " [options] < query" << std::endl;
|
||||
std::cout << desc << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
boost::program_options::options_description desc = createOptionsDescription("Allowed options", getTerminalWidth());
|
||||
desc.add_options()
|
||||
("query", po::value<std::string>(), "query to format")
|
||||
("help,h", "produce help message")
|
||||
("hilite", "add syntax highlight with ANSI terminal escape sequences")
|
||||
("oneline", "format in single line")
|
||||
("quiet,q", "just check syntax, no output on success")
|
||||
("multiquery,n", "allow multiple queries in the same file")
|
||||
("obfuscate", "obfuscate instead of formatting")
|
||||
("backslash", "add a backslash at the end of each line of the formatted query")
|
||||
("allow_settings_after_format_in_insert", "Allow SETTINGS after FORMAT, but note, that this is not always safe")
|
||||
("seed", po::value<std::string>(), "seed (arbitrary string) that determines the result of obfuscation")
|
||||
;
|
||||
|
||||
Settings cmd_settings;
|
||||
for (const auto & field : cmd_settings.all())
|
||||
{
|
||||
if (field.getName() == "max_parser_depth" || field.getName() == "max_query_size")
|
||||
cmd_settings.addProgramOption(desc, field);
|
||||
}
|
||||
|
||||
boost::program_options::variables_map options;
|
||||
boost::program_options::store(boost::program_options::parse_command_line(argc, argv, desc), options);
|
||||
po::notify(options);
|
||||
|
||||
if (options.count("help"))
|
||||
{
|
||||
std::cout << "Usage: " << argv[0] << " [options] < query" << std::endl;
|
||||
std::cout << desc << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
bool hilite = options.count("hilite");
|
||||
bool oneline = options.count("oneline");
|
||||
bool quiet = options.count("quiet");
|
||||
|
@ -1231,5 +1231,5 @@ try
|
||||
catch (...)
|
||||
{
|
||||
std::cerr << DB::getCurrentExceptionMessage(true) << '\n';
|
||||
throw;
|
||||
return DB::getCurrentExceptionCode();
|
||||
}
|
||||
|
@ -589,7 +589,7 @@
|
||||
stats.innerText = `Elapsed: ${seconds} sec, read ${formatted_rows} rows, ${formatted_bytes}.`;
|
||||
|
||||
/// We can also render graphs if user performed EXPLAIN PIPELINE graph=1 or EXPLAIN AST graph = 1
|
||||
if (response.data.length > 3 && response.data[0][0].startsWith("digraph") && document.getElementById('query').value.match(/^\s*EXPLAIN/i)) {
|
||||
if (response.data.length > 3 && document.getElementById('query').value.match(/^\s*EXPLAIN/i) && typeof(response.data[0][0]) === "string" && response.data[0][0].startsWith("digraph")) {
|
||||
renderGraph(response);
|
||||
} else {
|
||||
renderTable(response);
|
||||
|
99
release
99
release
@ -1,99 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# If you have "no space left" error, you can change the location of temporary files with BUILDPLACE environment variable.
|
||||
|
||||
# Version increment:
|
||||
# Default release: 18.1.2 -> 18.2.0:
|
||||
# ./release --version
|
||||
# or
|
||||
# ./release --version minor
|
||||
# Bugfix release (only with small patches to previous release): 18.1.2 -> 18.1.3:
|
||||
# ./release --version patch
|
||||
# Do this once per year: 18.1.2 -> 19.0.0:
|
||||
# ./release --version major
|
||||
|
||||
set -e
|
||||
|
||||
# Avoid dependency on locale
|
||||
LC_ALL=C
|
||||
|
||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
cd $CUR_DIR
|
||||
|
||||
source "./utils/release/release_lib.sh"
|
||||
|
||||
DEBUILD_NOSIGN_OPTIONS="-us -uc"
|
||||
DEBUILD_NODEPS_OPTIONS="-d"
|
||||
|
||||
if [ -z "$VERSION_STRING" ] ; then
|
||||
get_revision_author
|
||||
fi
|
||||
|
||||
while [[ $1 == --* ]]
|
||||
do
|
||||
if [[ $1 == '--test' ]]; then
|
||||
TEST='yes'
|
||||
VERSION_POSTFIX+=+test
|
||||
shift
|
||||
elif [[ $1 == '--check-build-dependencies' ]]; then
|
||||
DEBUILD_NODEPS_OPTIONS=""
|
||||
shift
|
||||
elif [[ $1 == '--version' ]]; then
|
||||
gen_revision_author $2
|
||||
exit 0
|
||||
elif [[ $1 == '--rpm' ]]; then
|
||||
MAKE_RPM=1
|
||||
shift
|
||||
elif [[ $1 == '--tgz' ]]; then
|
||||
MAKE_TGZ=1
|
||||
shift
|
||||
else
|
||||
echo "Unknown option $1"
|
||||
exit 2
|
||||
fi
|
||||
done
|
||||
|
||||
# Build options
|
||||
if [ -n "$SANITIZER" ]
|
||||
then
|
||||
if [[ "$SANITIZER" == "address" ]]; then VERSION_POSTFIX+="+asan"
|
||||
elif [[ "$SANITIZER" == "thread" ]]; then VERSION_POSTFIX+="+tsan"
|
||||
elif [[ "$SANITIZER" == "memory" ]]; then VERSION_POSTFIX+="+msan"
|
||||
elif [[ "$SANITIZER" == "undefined" ]]; then VERSION_POSTFIX+="+ubsan"
|
||||
else
|
||||
echo "Unknown value of SANITIZER variable: $SANITIZER"
|
||||
exit 3
|
||||
fi
|
||||
|
||||
export DEB_CC=${DEB_CC=clang-10}
|
||||
export DEB_CXX=${DEB_CXX=clang++-10}
|
||||
EXTRAPACKAGES="$EXTRAPACKAGES clang-10 lld-10"
|
||||
elif [[ $BUILD_TYPE == 'debug' ]]; then
|
||||
CMAKE_BUILD_TYPE=Debug
|
||||
VERSION_POSTFIX+="+debug"
|
||||
fi
|
||||
|
||||
CMAKE_FLAGS=" $MALLOC_OPTS -DSANITIZE=$SANITIZER -DENABLE_CHECK_HEAVY_BUILDS=1 $CMAKE_FLAGS"
|
||||
[[ -n "$CMAKE_BUILD_TYPE" ]] && CMAKE_FLAGS=" -DCMAKE_BUILD_TYPE=$CMAKE_BUILD_TYPE $CMAKE_FLAGS"
|
||||
|
||||
export CMAKE_FLAGS
|
||||
export EXTRAPACKAGES
|
||||
|
||||
VERSION_STRING+=$VERSION_POSTFIX
|
||||
echo -e "\nCurrent version is $VERSION_STRING"
|
||||
|
||||
if [ -z "$NO_BUILD" ] ; then
|
||||
gen_changelog "$VERSION_STRING" "" "$AUTHOR" ""
|
||||
# Build (only binary packages).
|
||||
debuild --preserve-env -e PATH \
|
||||
-e DEB_CC=$DEB_CC -e DEB_CXX=$DEB_CXX -e CMAKE_FLAGS="$CMAKE_FLAGS" \
|
||||
-b ${DEBUILD_NOSIGN_OPTIONS} ${DEBUILD_NODEPS_OPTIONS} ${DEB_ARCH_FLAG}
|
||||
fi
|
||||
|
||||
if [ -n "$MAKE_RPM" ]; then
|
||||
make_rpm
|
||||
fi
|
||||
|
||||
if [ -n "$MAKE_TGZ" ]; then
|
||||
make_tgz
|
||||
fi
|
@ -163,6 +163,11 @@ bool FileSegment::isDownloader() const
|
||||
return getCallerId() == downloader_id;
|
||||
}
|
||||
|
||||
bool FileSegment::isDownloaderImpl(std::lock_guard<std::mutex> & /* segment_lock */) const
|
||||
{
|
||||
return getCallerId() == downloader_id;
|
||||
}
|
||||
|
||||
FileSegment::RemoteFileReaderPtr FileSegment::getRemoteFileReader()
|
||||
{
|
||||
if (!isDownloader())
|
||||
@ -397,6 +402,9 @@ bool FileSegment::reserve(size_t size)
|
||||
|
||||
void FileSegment::setDownloaded(std::lock_guard<std::mutex> & /* segment_lock */)
|
||||
{
|
||||
if (is_downloaded)
|
||||
return;
|
||||
|
||||
download_state = State::DOWNLOADED;
|
||||
is_downloaded = true;
|
||||
downloader_id.clear();
|
||||
@ -426,8 +434,7 @@ void FileSegment::completeBatchAndResetDownloader()
|
||||
{
|
||||
std::lock_guard segment_lock(mutex);
|
||||
|
||||
bool is_downloader = downloader_id == getCallerId();
|
||||
if (!is_downloader)
|
||||
if (!isDownloaderImpl(segment_lock))
|
||||
{
|
||||
cv.notify_all();
|
||||
throw Exception(
|
||||
@ -448,7 +455,7 @@ void FileSegment::complete(State state)
|
||||
std::lock_guard cache_lock(cache->mutex);
|
||||
std::lock_guard segment_lock(mutex);
|
||||
|
||||
bool is_downloader = downloader_id == getCallerId();
|
||||
bool is_downloader = isDownloaderImpl(segment_lock);
|
||||
if (!is_downloader)
|
||||
{
|
||||
cv.notify_all();
|
||||
@ -465,6 +472,9 @@ void FileSegment::complete(State state)
|
||||
"Cannot complete file segment with state: {}", stateToString(state));
|
||||
}
|
||||
|
||||
if (state == State::DOWNLOADED)
|
||||
setDownloaded(segment_lock);
|
||||
|
||||
download_state = state;
|
||||
|
||||
assertNotDetached();
|
||||
@ -475,7 +485,7 @@ void FileSegment::complete(State state)
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
if (!downloader_id.empty() && downloader_id == getCallerIdImpl())
|
||||
if (!downloader_id.empty() && is_downloader)
|
||||
downloader_id.clear();
|
||||
|
||||
cv.notify_all();
|
||||
@ -492,8 +502,12 @@ void FileSegment::complete(std::lock_guard<std::mutex> & cache_lock)
|
||||
if (download_state == State::SKIP_CACHE || detached)
|
||||
return;
|
||||
|
||||
if (download_state != State::DOWNLOADED && getDownloadedSize(segment_lock) == range().size())
|
||||
if (isDownloaderImpl(segment_lock)
|
||||
&& download_state != State::DOWNLOADED
|
||||
&& getDownloadedSize(segment_lock) == range().size())
|
||||
{
|
||||
setDownloaded(segment_lock);
|
||||
}
|
||||
|
||||
assertNotDetached();
|
||||
|
||||
@ -502,7 +516,7 @@ void FileSegment::complete(std::lock_guard<std::mutex> & cache_lock)
|
||||
/// Segment state can be changed from DOWNLOADING or EMPTY only if the caller is the
|
||||
/// downloader or the only owner of the segment.
|
||||
|
||||
bool can_update_segment_state = downloader_id == getCallerIdImpl()
|
||||
bool can_update_segment_state = isDownloaderImpl(segment_lock)
|
||||
|| cache->isLastFileSegmentHolder(key(), offset(), cache_lock, segment_lock);
|
||||
|
||||
if (can_update_segment_state)
|
||||
@ -515,7 +529,7 @@ void FileSegment::complete(std::lock_guard<std::mutex> & cache_lock)
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
if (!downloader_id.empty() && downloader_id == getCallerIdImpl())
|
||||
if (!downloader_id.empty() && isDownloaderImpl(segment_lock))
|
||||
downloader_id.clear();
|
||||
|
||||
cv.notify_all();
|
||||
@ -561,7 +575,7 @@ void FileSegment::completeImpl(std::lock_guard<std::mutex> & cache_lock, std::lo
|
||||
}
|
||||
}
|
||||
|
||||
if (!downloader_id.empty() && (downloader_id == getCallerIdImpl() || is_last_holder))
|
||||
if (!downloader_id.empty() && (isDownloaderImpl(segment_lock) || is_last_holder))
|
||||
{
|
||||
LOG_TEST(log, "Clearing downloader id: {}, current state: {}", downloader_id, stateToString(download_state));
|
||||
downloader_id.clear();
|
||||
|
@ -154,6 +154,7 @@ private:
|
||||
|
||||
void setDownloaded(std::lock_guard<std::mutex> & segment_lock);
|
||||
void setDownloadFailed(std::lock_guard<std::mutex> & segment_lock);
|
||||
bool isDownloaderImpl(std::lock_guard<std::mutex> & segment_lock) const;
|
||||
|
||||
void wrapWithCacheInfo(Exception & e, const String & message, std::lock_guard<std::mutex> & segment_lock) const;
|
||||
|
||||
|
@ -613,7 +613,10 @@ bool CachedReadBufferFromRemoteFS::nextImplStep()
|
||||
{
|
||||
bool need_complete_file_segment = file_segment->isDownloader();
|
||||
if (need_complete_file_segment)
|
||||
{
|
||||
LOG_TEST(log, "Resetting downloader {} from scope exit", file_segment->getDownloader());
|
||||
file_segment->completeBatchAndResetDownloader();
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
@ -193,7 +193,10 @@ void ClientInfo::setInitialQuery()
|
||||
{
|
||||
query_kind = QueryKind::INITIAL_QUERY;
|
||||
fillOSUserHostNameAndVersionInfo();
|
||||
client_name = (DBMS_NAME " ") + client_name;
|
||||
if (client_name.empty())
|
||||
client_name = DBMS_NAME;
|
||||
else
|
||||
client_name = (DBMS_NAME " ") + client_name;
|
||||
}
|
||||
|
||||
|
||||
|
@ -92,26 +92,11 @@ bool ParserDataType::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
}
|
||||
else if (type_name_upper.find("INT") != std::string::npos)
|
||||
{
|
||||
/// Support SIGNED and UNSIGNED integer type modifiers for compatibility with MySQL
|
||||
/// Support SIGNED and UNSIGNED integer type modifiers for compatibility with MySQL.
|
||||
if (ParserKeyword("SIGNED").ignore(pos))
|
||||
type_name_suffix = "SIGNED";
|
||||
else if (ParserKeyword("UNSIGNED").ignore(pos))
|
||||
type_name_suffix = "UNSIGNED";
|
||||
else if (pos->type == TokenType::OpeningRoundBracket)
|
||||
{
|
||||
++pos;
|
||||
if (pos->type != TokenType::Number)
|
||||
return false;
|
||||
++pos;
|
||||
if (pos->type != TokenType::ClosingRoundBracket)
|
||||
return false;
|
||||
++pos;
|
||||
if (ParserKeyword("SIGNED").ignore(pos))
|
||||
type_name_suffix = "SIGNED";
|
||||
else if (ParserKeyword("UNSIGNED").ignore(pos))
|
||||
type_name_suffix = "UNSIGNED";
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (!type_name_suffix.empty())
|
||||
|
@ -2,6 +2,8 @@
|
||||
|
||||
from collections import namedtuple
|
||||
from typing import Any, Dict, List
|
||||
from threading import Thread
|
||||
from queue import Queue
|
||||
import json
|
||||
import time
|
||||
|
||||
@ -25,6 +27,24 @@ MAX_RETRY = 5
|
||||
DEBUG_INFO = {} # type: Dict[str, Any]
|
||||
|
||||
|
||||
class Worker(Thread):
|
||||
def __init__(self, request_queue: Queue, ignore_exception: bool = False):
|
||||
Thread.__init__(self)
|
||||
self.queue = request_queue
|
||||
self.ignore_exception = ignore_exception
|
||||
self.response = {} # type: Dict
|
||||
|
||||
def run(self):
|
||||
m = self.queue.get()
|
||||
try:
|
||||
self.response = _exec_get_with_retry(m)
|
||||
except Exception as e:
|
||||
if not self.ignore_exception:
|
||||
raise
|
||||
print(f"Exception occured, still continue: {e}")
|
||||
self.queue.task_done()
|
||||
|
||||
|
||||
def get_installation_id(jwt_token):
|
||||
headers = {
|
||||
"Authorization": f"Bearer {jwt_token}",
|
||||
@ -88,32 +108,37 @@ def _exec_get_with_retry(url):
|
||||
|
||||
|
||||
WorkflowDescription = namedtuple(
|
||||
"WorkflowDescription", ["run_id", "status", "rerun_url", "cancel_url"]
|
||||
"WorkflowDescription", ["run_id", "status", "rerun_url", "cancel_url", "conclusion"]
|
||||
)
|
||||
|
||||
|
||||
def get_workflows_description_for_pull_request(pull_request_event):
|
||||
def get_workflows_description_for_pull_request(
|
||||
pull_request_event,
|
||||
) -> List[WorkflowDescription]:
|
||||
head_repo = pull_request_event["head"]["repo"]["full_name"]
|
||||
head_branch = pull_request_event["head"]["ref"]
|
||||
head_sha = pull_request_event["head"]["sha"]
|
||||
print("PR", pull_request_event["number"], "has head ref", head_branch)
|
||||
workflows_data = []
|
||||
workflows = _exec_get_with_retry(
|
||||
API_URL + f"/actions/runs?branch={head_branch}&event=pull_request&page=1"
|
||||
)
|
||||
workflows_data += workflows["workflow_runs"]
|
||||
i = 2
|
||||
while len(workflows["workflow_runs"]) > 0:
|
||||
workflows = _exec_get_with_retry(
|
||||
API_URL + f"/actions/runs?branch={head_branch}&event=pull_request&page={i}"
|
||||
)
|
||||
workflows_data += workflows["workflow_runs"]
|
||||
i += 1
|
||||
if i > 30:
|
||||
print("Too many workflows found")
|
||||
break
|
||||
|
||||
DEBUG_INFO["workflows"] = [] # type: List[Dict[str, str]]
|
||||
workflows_data = []
|
||||
request_url = f"{API_URL}/actions/runs?per_page=100"
|
||||
# Get all workflows for the current branch
|
||||
for i in range(1, 11):
|
||||
workflows = _exec_get_with_retry(
|
||||
f"{request_url}&event=pull_request&branch={head_branch}&page={i}"
|
||||
)
|
||||
if not workflows["workflow_runs"]:
|
||||
break
|
||||
workflows_data += workflows["workflow_runs"]
|
||||
if i == 10:
|
||||
print("Too many workflows found")
|
||||
|
||||
if not workflows_data:
|
||||
print("No workflows found by filter")
|
||||
return []
|
||||
|
||||
print(f"Total workflows for the branch {head_branch} found: {len(workflows_data)}")
|
||||
|
||||
DEBUG_INFO["workflows"] = []
|
||||
workflow_descriptions = []
|
||||
for workflow in workflows_data:
|
||||
# Some time workflow["head_repository"]["full_name"] is None
|
||||
@ -123,13 +148,13 @@ def get_workflows_description_for_pull_request(pull_request_event):
|
||||
{
|
||||
"full_name": workflow["head_repository"]["full_name"],
|
||||
"name": workflow["name"],
|
||||
"branch": workflow["head_branch"],
|
||||
}
|
||||
)
|
||||
# unfortunately we cannot filter workflows from forks in request to API
|
||||
# so doing it manually
|
||||
if (
|
||||
workflow["head_sha"] == head_sha
|
||||
and workflow["head_repository"]["full_name"] == head_repo
|
||||
workflow["head_repository"]["full_name"] == head_repo
|
||||
and workflow["name"] in NEED_RERUN_OR_CANCELL_WORKFLOWS
|
||||
):
|
||||
workflow_descriptions.append(
|
||||
@ -138,19 +163,85 @@ def get_workflows_description_for_pull_request(pull_request_event):
|
||||
status=workflow["status"],
|
||||
rerun_url=workflow["rerun_url"],
|
||||
cancel_url=workflow["cancel_url"],
|
||||
conclusion=workflow["conclusion"],
|
||||
)
|
||||
)
|
||||
|
||||
return workflow_descriptions
|
||||
|
||||
|
||||
def get_workflow_description(workflow_id):
|
||||
def get_workflow_description_fallback(event_data) -> List[WorkflowDescription]:
|
||||
pull_request_event = event_data["pull_request"]
|
||||
head_repo = pull_request_event["head"]["repo"]["full_name"]
|
||||
head_branch = pull_request_event["head"]["ref"]
|
||||
head_sha = pull_request_event["head"]["sha"]
|
||||
print("Get last 500 workflows from API to search related there")
|
||||
# Fallback for a case of an already deleted branch and no workflows received
|
||||
request_url = f"{API_URL}/actions/runs?per_page=100"
|
||||
q = Queue() # type: Queue
|
||||
workers = []
|
||||
workflows_data = []
|
||||
i = 1
|
||||
for i in range(1, 6):
|
||||
q.put(f"{request_url}&page={i}")
|
||||
worker = Worker(q, True)
|
||||
worker.start()
|
||||
workers.append(worker)
|
||||
|
||||
for worker in workers:
|
||||
worker.join()
|
||||
if not worker.response:
|
||||
# We ignore get errors, so response can be empty
|
||||
continue
|
||||
# Prefilter workflows
|
||||
workflows_data += [
|
||||
wf
|
||||
for wf in worker.response["workflow_runs"]
|
||||
if wf["head_repository"] is not None
|
||||
and wf["head_repository"]["full_name"] == head_repo
|
||||
and wf["head_branch"] == head_branch
|
||||
and wf["name"] in NEED_RERUN_OR_CANCELL_WORKFLOWS
|
||||
]
|
||||
|
||||
print(f"Total workflows in last 500 actions matches: {len(workflows_data)}")
|
||||
|
||||
DEBUG_INFO["workflows"] = [
|
||||
{
|
||||
"full_name": wf["head_repository"]["full_name"],
|
||||
"name": wf["name"],
|
||||
"branch": wf["head_branch"],
|
||||
}
|
||||
for wf in workflows_data
|
||||
]
|
||||
if event_data["action"] == "synchronize":
|
||||
print(f"Leave only workflows with SHA but {head_sha} for updated PR")
|
||||
# Cancel all events with SHA different than current
|
||||
workflows_data = list(
|
||||
filter(lambda x: x["head_sha"] != head_sha, workflows_data)
|
||||
)
|
||||
|
||||
workflow_descriptions = [
|
||||
WorkflowDescription(
|
||||
run_id=wf["id"],
|
||||
status=wf["status"],
|
||||
rerun_url=wf["rerun_url"],
|
||||
cancel_url=wf["cancel_url"],
|
||||
conclusion=wf["conclusion"],
|
||||
)
|
||||
for wf in workflows_data
|
||||
]
|
||||
|
||||
return workflow_descriptions
|
||||
|
||||
|
||||
def get_workflow_description(workflow_id) -> WorkflowDescription:
|
||||
workflow = _exec_get_with_retry(API_URL + f"/actions/runs/{workflow_id}")
|
||||
return WorkflowDescription(
|
||||
run_id=workflow["id"],
|
||||
status=workflow["status"],
|
||||
rerun_url=workflow["rerun_url"],
|
||||
cancel_url=workflow["cancel_url"],
|
||||
conclusion=workflow["conclusion"],
|
||||
)
|
||||
|
||||
|
||||
@ -189,15 +280,39 @@ def main(event):
|
||||
if action == "closed" or "do not test" in labels:
|
||||
print("PR merged/closed or manually labeled 'do not test' will kill workflows")
|
||||
workflow_descriptions = get_workflows_description_for_pull_request(pull_request)
|
||||
workflow_descriptions = (
|
||||
workflow_descriptions or get_workflow_description_fallback(event_data)
|
||||
)
|
||||
urls_to_cancel = []
|
||||
for workflow_description in workflow_descriptions:
|
||||
if workflow_description.status != "completed":
|
||||
if (
|
||||
workflow_description.status != "completed"
|
||||
and workflow_description.conclusion != "cancelled"
|
||||
):
|
||||
urls_to_cancel.append(workflow_description.cancel_url)
|
||||
print(f"Found {len(urls_to_cancel)} workflows to cancel")
|
||||
exec_workflow_url(urls_to_cancel, token)
|
||||
elif action == "synchronize":
|
||||
print("PR is synchronized, going to stop old actions")
|
||||
workflow_descriptions = get_workflows_description_for_pull_request(pull_request)
|
||||
workflow_descriptions = (
|
||||
workflow_descriptions or get_workflow_description_fallback(event_data)
|
||||
)
|
||||
urls_to_cancel = []
|
||||
for workflow_description in workflow_descriptions:
|
||||
if (
|
||||
workflow_description.status != "completed"
|
||||
and workflow_description.conclusion != "cancelled"
|
||||
):
|
||||
urls_to_cancel.append(workflow_description.cancel_url)
|
||||
print(f"Found {len(urls_to_cancel)} workflows to cancel")
|
||||
exec_workflow_url(urls_to_cancel, token)
|
||||
elif action == "labeled" and "can be tested" in labels:
|
||||
print("PR marked with can be tested label, rerun workflow")
|
||||
workflow_descriptions = get_workflows_description_for_pull_request(pull_request)
|
||||
workflow_descriptions = (
|
||||
workflow_descriptions or get_workflow_description_fallback(event_data)
|
||||
)
|
||||
if not workflow_descriptions:
|
||||
print("Not found any workflows")
|
||||
return
|
||||
@ -205,7 +320,10 @@ def main(event):
|
||||
sorted_workflows = list(sorted(workflow_descriptions, key=lambda x: x.run_id))
|
||||
most_recent_workflow = sorted_workflows[-1]
|
||||
print("Latest workflow", most_recent_workflow)
|
||||
if most_recent_workflow.status != "completed":
|
||||
if (
|
||||
most_recent_workflow.status != "completed"
|
||||
and most_recent_workflow.conclusion != "cancelled"
|
||||
):
|
||||
print("Latest workflow is not completed, cancelling")
|
||||
exec_workflow_url([most_recent_workflow.cancel_url], token)
|
||||
print("Cancelled")
|
||||
|
@ -1,12 +1,13 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import requests
|
||||
import argparse
|
||||
import json
|
||||
|
||||
from threading import Thread
|
||||
from queue import Queue
|
||||
|
||||
import requests # type: ignore
|
||||
|
||||
|
||||
def get_org_team_members(token: str, org: str, team_slug: str) -> tuple:
|
||||
headers = {
|
||||
@ -37,7 +38,7 @@ def get_members_keys(members: tuple) -> str:
|
||||
self.results.append(f"# {m}\n{response.text}")
|
||||
self.queue.task_done()
|
||||
|
||||
q = Queue()
|
||||
q = Queue() # type: Queue
|
||||
workers = []
|
||||
for m in members:
|
||||
q.put(m)
|
||||
@ -61,7 +62,7 @@ def get_members_keys(members: tuple) -> str:
|
||||
|
||||
|
||||
def get_token_from_aws() -> str:
|
||||
import boto3
|
||||
import boto3 # type: ignore
|
||||
|
||||
secret_name = "clickhouse_robot_token"
|
||||
session = boto3.session.Session()
|
||||
@ -81,6 +82,8 @@ def main(token: str, org: str, team_slug: str) -> str:
|
||||
|
||||
|
||||
def handler(event, context):
|
||||
_ = context
|
||||
_ = event
|
||||
token = get_token_from_aws()
|
||||
result = {
|
||||
"statusCode": 200,
|
||||
|
@ -1,13 +1,13 @@
|
||||
FROM public.ecr.aws/lambda/python:3.9
|
||||
|
||||
# Copy function code
|
||||
COPY app.py ${LAMBDA_TASK_ROOT}
|
||||
|
||||
# Install the function's dependencies using file requirements.txt
|
||||
# from your project folder.
|
||||
|
||||
COPY requirements.txt .
|
||||
RUN pip3 install -r requirements.txt --target "${LAMBDA_TASK_ROOT}"
|
||||
|
||||
# Copy function code
|
||||
COPY app.py ${LAMBDA_TASK_ROOT}
|
||||
|
||||
# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile)
|
||||
CMD [ "app.handler" ]
|
||||
|
@ -394,7 +394,7 @@ def rerun_workflow(workflow_description, token):
|
||||
def main(event):
|
||||
token = get_token_from_aws()
|
||||
event_data = json.loads(event["body"])
|
||||
print("The body received:", event_data)
|
||||
print("The body received:", event["body"])
|
||||
workflow_description = get_workflow_description_from_event(event_data)
|
||||
|
||||
print("Got workflow description", workflow_description)
|
||||
|
@ -1,5 +1 @@
|
||||
1
|
||||
CREATE TEMPORARY TABLE t3_00841\n(\n `x` UInt32\n)\nENGINE = Memory
|
||||
1
|
||||
CREATE TEMPORARY TABLE t4_00841\n(\n `x` Int32\n)\nENGINE = Memory
|
||||
1
|
||||
|
@ -3,13 +3,3 @@ INSERT INTO t1_00841 VALUES (1);
|
||||
SELECT * FROM t1_00841;
|
||||
|
||||
CREATE TEMPORARY TABLE test.t2_00841 (x UInt8); -- { serverError 442 }
|
||||
|
||||
CREATE TEMPORARY TABLE t3_00841 (x INT(11) UNSIGNED);
|
||||
SHOW CREATE TEMPORARY TABLE t3_00841;
|
||||
INSERT INTO t3_00841 VALUES (1);
|
||||
SELECT * FROM t3_00841;
|
||||
|
||||
CREATE TEMPORARY TABLE t4_00841 (x INT(11) SIGNED);
|
||||
SHOW CREATE TEMPORARY TABLE t4_00841;
|
||||
INSERT INTO t4_00841 VALUES (1);
|
||||
SELECT * FROM t4_00841;
|
||||
|
1
tests/queries/0_stateless/02270_client_name.reference
Normal file
1
tests/queries/0_stateless/02270_client_name.reference
Normal file
@ -0,0 +1 @@
|
||||
"ClickHouse"
|
3
tests/queries/0_stateless/02270_client_name.sql
Normal file
3
tests/queries/0_stateless/02270_client_name.sql
Normal file
@ -0,0 +1,3 @@
|
||||
select 1 settings log_queries=1, log_queries_min_type='QUERY_FINISH' format Null;
|
||||
system flush logs;
|
||||
select client_name from system.query_log where current_database = currentDatabase() and query like 'select 1%' format CSV;
|
@ -1,250 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import argparse
|
||||
import subprocess
|
||||
import os
|
||||
import logging
|
||||
import shutil
|
||||
import base64
|
||||
import pexpect
|
||||
|
||||
|
||||
# Do nothing if keys are not provided
|
||||
class GpgKey(object):
|
||||
gnupg_dir = os.path.expanduser('~/.gnupg')
|
||||
TEMPGNUPG_DIR = os.path.expanduser('~/.local/tempgnupg')
|
||||
|
||||
def __init__(self, secret_key_path, public_key_path):
|
||||
if secret_key_path and public_key_path:
|
||||
with open(secret_key_path, 'r') as sec, open(public_key_path, 'r') as pub:
|
||||
self._secret_key = sec.read()
|
||||
self._public_key = pub.read()
|
||||
else:
|
||||
self._secret_key = None
|
||||
self._public_key = None
|
||||
|
||||
def __enter__(self):
|
||||
if self._secret_key and self._public_key:
|
||||
if os.path.exists(self.gnupg_dir):
|
||||
shutil.move(self.gnupg_dir, self.TEMPGNUPG_DIR)
|
||||
os.mkdir(self.gnupg_dir)
|
||||
open(os.path.join(self.gnupg_dir, 'secring.gpg'), 'wb').write(base64.b64decode(self._secret_key))
|
||||
open(os.path.join(self.gnupg_dir, 'pubring.gpg'), 'wb').write(base64.b64decode(self._public_key))
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
if self._secret_key and self._public_key:
|
||||
shutil.rmtree(self.gnupg_dir)
|
||||
if os.path.exists(self.TEMPGNUPG_DIR):
|
||||
shutil.move(self.TEMPGNUPG_DIR, self.gnupg_dir)
|
||||
|
||||
|
||||
class DebRelease(object):
|
||||
|
||||
DUPLOAD_CONF_TEMPLATE = '\n\t'.join((
|
||||
"$cfg{{'{title}'}} = {{",
|
||||
'fqdn => "{fqdn}",',
|
||||
'method => "{method}",',
|
||||
'login => "{login}",',
|
||||
'incoming => "{incoming}",',
|
||||
'options => "{options}",',
|
||||
'dinstall_runs => {dinstall_runs},\n}};',))
|
||||
DUPLOAD_CONF_PATH = os.path.expanduser('~/.dupload.conf')
|
||||
DUPLOAD_CONF_TMP_PATH = os.path.expanduser('~/.local/tmp_dupload.cnf')
|
||||
|
||||
def __init__(self, dupload_config, login, ssh_key_path):
|
||||
self.__config = {}
|
||||
for repo, conf in dupload_config.items():
|
||||
d = {
|
||||
"fqdn": conf["fqdn"],
|
||||
"method": "scpb",
|
||||
"login": login,
|
||||
"incoming": conf["incoming"],
|
||||
"dinstall_runs": 0,
|
||||
"options": "-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ConnectionAttempts=3",
|
||||
}
|
||||
d.update(conf)
|
||||
self.__config[repo] = d
|
||||
print(self.__config)
|
||||
self.ssh_key_path = ssh_key_path
|
||||
|
||||
def __enter__(self):
|
||||
if os.path.exists(self.DUPLOAD_CONF_PATH):
|
||||
shutil.move(self.DUPLOAD_CONF_PATH, self.DUPLOAD_CONF_TMP_PATH)
|
||||
self.__dupload_conf = open(self.DUPLOAD_CONF_PATH, 'w')
|
||||
self.__dupload_conf.write('package config;\n\n$default_host = undef;\n\n' + '\n\n'.join([
|
||||
self.DUPLOAD_CONF_TEMPLATE.format(title=title, **values)
|
||||
for title, values in self.__config.items()]))
|
||||
self.__dupload_conf.write('\n')
|
||||
self.__dupload_conf.close()
|
||||
if self.ssh_key_path:
|
||||
subprocess.check_call("ssh-add {}".format(self.ssh_key_path), shell=True)
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
if os.path.exists(self.DUPLOAD_CONF_TMP_PATH):
|
||||
shutil.move(self.DUPLOAD_CONF_TMP_PATH, self.DUPLOAD_CONF_PATH)
|
||||
else:
|
||||
os.unlink(self.DUPLOAD_CONF_PATH)
|
||||
|
||||
|
||||
class SSHConnection(object):
|
||||
def __init__(self, user, host, ssh_key=None):
|
||||
if ssh_key:
|
||||
key_str = "-i {}".format(ssh_key)
|
||||
else:
|
||||
key_str = ""
|
||||
|
||||
self.base_cmd = "ssh {key} {user}@{host}".format(
|
||||
key=key_str, user=user, host=host)
|
||||
|
||||
def execute(self, cmd):
|
||||
logging.info("Executing remote cmd %s", cmd)
|
||||
subprocess.check_call(self.base_cmd + ' "{cmd}"'.format(cmd=cmd),
|
||||
shell=True)
|
||||
|
||||
|
||||
def debsign(path, gpg_passphrase, gpg_sec_key_path, gpg_pub_key_path, gpg_user):
|
||||
try:
|
||||
with GpgKey(gpg_sec_key_path, gpg_pub_key_path):
|
||||
cmd = ('debsign -k \'{key}\' -p"gpg --verbose --no-use-agent --batch '
|
||||
'--no-tty --passphrase {passphrase}" {path}/*.changes').format(
|
||||
key=gpg_user, passphrase=gpg_passphrase, path=path)
|
||||
logging.info("Build debsign cmd '%s'", cmd)
|
||||
subprocess.check_call(cmd, shell=True)
|
||||
logging.info("debsign finished")
|
||||
except Exception as ex:
|
||||
logging.error("Cannot debsign packages on path %s, with user key", path)
|
||||
raise ex
|
||||
|
||||
def rpmsign(path, gpg_passphrase, gpg_sec_key_path, gpg_pub_key_path, gpg_user):
|
||||
try:
|
||||
with GpgKey(gpg_sec_key_path, gpg_pub_key_path):
|
||||
for package in os.listdir(path):
|
||||
package_path = os.path.join(path, package)
|
||||
logging.info("Signing %s", package_path)
|
||||
proc = pexpect.spawn('rpm --resign -D "_signature gpg" -D "_gpg_name {username}" {package}'.format(username=gpg_user, package=package_path))
|
||||
proc.expect_exact("Enter pass phrase: ")
|
||||
proc.sendline(gpg_passphrase)
|
||||
proc.expect(pexpect.EOF)
|
||||
logging.info("Signed successfully")
|
||||
except Exception as ex:
|
||||
logging.error("Cannot rpmsign packages on path %s, with user key", path)
|
||||
raise ex
|
||||
|
||||
def transfer_packages_scp(ssh_key, path, repo_user, repo_url, incoming_directory):
|
||||
logging.info("Transferring packages via scp to %s", repo_url)
|
||||
if ssh_key:
|
||||
key_str = "-i {}".format(ssh_key)
|
||||
else:
|
||||
key_str = ""
|
||||
subprocess.check_call('scp {key_str} {path}/* {user}@{repo}:{incoming}'.format(
|
||||
path=path, user=repo_user, repo=repo_url, key_str=key_str, incoming=incoming_directory), shell=True)
|
||||
logging.info("Transfer via scp finished")
|
||||
|
||||
def transfer_packages_dupload(ssh_key, path, repo_user, repo_url, incoming_directory):
|
||||
repo_short_name = repo_url.split('.')[0]
|
||||
config = {
|
||||
repo_short_name: {
|
||||
"fqdn": repo_url,
|
||||
"incoming": incoming_directory,
|
||||
}
|
||||
}
|
||||
with DebRelease(config, repo_user, ssh_key):
|
||||
logging.info("Duploading")
|
||||
subprocess.check_call("dupload -f --nomail --to {repo} {path}".format(repo=repo_short_name, path=path), shell=True)
|
||||
logging.info("Dupload finished")
|
||||
|
||||
|
||||
def clear_old_incoming_packages(ssh_connection, user):
|
||||
for pkg in ('deb', 'rpm', 'tgz'):
|
||||
for release_type in ('stable', 'testing', 'prestable', 'lts'):
|
||||
try:
|
||||
ssh_connection.execute("rm /home/{user}/incoming/clickhouse/{pkg}/{release_type}/*".format(
|
||||
user=user, pkg=pkg, release_type=release_type))
|
||||
except Exception:
|
||||
logging.info("rm is not required")
|
||||
|
||||
|
||||
def _get_incoming_path(repo_url, user=None, pkg_type=None, release_type=None):
|
||||
if repo_url == 'repo.mirror.yandex.net':
|
||||
return "/home/{user}/incoming/clickhouse/{pkg}/{release_type}".format(
|
||||
user=user, pkg=pkg_type, release_type=release_type)
|
||||
else:
|
||||
return "/repo/{0}/mini-dinstall/incoming/".format(repo_url.split('.')[0])
|
||||
|
||||
|
||||
def _fix_args(args):
|
||||
|
||||
if args.gpg_sec_key_path and not os.path.isabs(args.gpg_sec_key_path):
|
||||
args.gpg_sec_key_path = os.path.join(os.getcwd(), args.gpg_sec_key_path)
|
||||
|
||||
if args.gpg_pub_key_path and not os.path.isabs(args.gpg_pub_key_path):
|
||||
args.gpg_pub_key_path = os.path.join(os.getcwd(), args.gpg_pub_key_path)
|
||||
|
||||
if args.ssh_key_path and not os.path.isabs(args.ssh_key_path):
|
||||
args.ssh_key_path = os.path.join(os.getcwd(), args.ssh_key_path)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
|
||||
parser = argparse.ArgumentParser(description="Program to push clickhouse packages to repository")
|
||||
parser.add_argument('--deb-directory')
|
||||
parser.add_argument('--rpm-directory')
|
||||
parser.add_argument('--tgz-directory')
|
||||
parser.add_argument('--release-type', choices=('testing', 'stable', 'prestable', 'lts'), default='testing')
|
||||
parser.add_argument('--ssh-key-path')
|
||||
parser.add_argument('--gpg-passphrase', required=True)
|
||||
parser.add_argument('--gpg-sec-key-path')
|
||||
parser.add_argument('--gpg-pub-key-path')
|
||||
parser.add_argument('--gpg-key-user', default='robot-clickhouse')
|
||||
parser.add_argument('--repo-url', default='repo.mirror.yandex.net')
|
||||
parser.add_argument('--repo-user', default='buildfarm')
|
||||
|
||||
args = parser.parse_args()
|
||||
if args.deb_directory is None and args.rpm_directory is None and args.tgz_directory is None:
|
||||
parser.error('At least one package directory required')
|
||||
|
||||
_fix_args(args)
|
||||
|
||||
is_open_source = args.repo_url == 'repo.mirror.yandex.net'
|
||||
ssh_connection = SSHConnection(args.repo_user, args.repo_url, args.ssh_key_path)
|
||||
|
||||
packages = []
|
||||
if args.deb_directory:
|
||||
debsign(args.deb_directory, args.gpg_passphrase, args.gpg_sec_key_path, args.gpg_pub_key_path, args.gpg_key_user)
|
||||
packages.append((args.deb_directory, 'deb'))
|
||||
|
||||
if args.rpm_directory:
|
||||
if not is_open_source:
|
||||
raise Exception("Cannot upload .rpm package to {}".format(args.repo_url))
|
||||
rpmsign(args.rpm_directory, args.gpg_passphrase, args.gpg_sec_key_path, args.gpg_pub_key_path, args.gpg_key_user)
|
||||
packages.append((args.rpm_directory, 'rpm'))
|
||||
|
||||
if args.tgz_directory:
|
||||
if not is_open_source:
|
||||
raise Exception("Cannot upload .tgz package to {}".format(args.repo_url))
|
||||
packages.append((args.tgz_directory, 'tgz'))
|
||||
|
||||
if is_open_source:
|
||||
logging.info("Clearing old directory with incoming packages on buildfarm")
|
||||
clear_old_incoming_packages(ssh_connection, args.repo_user)
|
||||
logging.info("Incoming directory cleared")
|
||||
|
||||
for package_path, package_type in packages:
|
||||
logging.info("Processing path '%s' with package type %s", package_path, package_type)
|
||||
incoming_directory = _get_incoming_path(args.repo_url, args.repo_user, package_type, args.release_type)
|
||||
if package_type == "deb":
|
||||
transfer_packages_dupload(args.ssh_key_path, package_path, args.repo_user, args.repo_url, incoming_directory)
|
||||
else:
|
||||
transfer_packages_scp(args.ssh_key_path, package_path, args.repo_user, args.repo_url, incoming_directory)
|
||||
|
||||
logging.info("Running clickhouse install (it takes about (20-30 minutes)")
|
||||
ssh_connection.execute("sudo /usr/sbin/ya-clickhouse-{0}-install".format(package_type))
|
||||
logging.info("Clickhouse installed")
|
||||
logging.info("Pushing clickhouse to repo")
|
||||
ssh_connection.execute("/usr/sbin/push2publicrepo.sh clickhouse")
|
||||
logging.info("Push finished")
|
||||
logging.info("Package '%s' pushed", package_type)
|
||||
else:
|
||||
transfer_packages_dupload(args.ssh_key_path, args.deb_directory, args.repo_user, args.repo_url, _get_incoming_path(args.repo_url))
|
@ -1,300 +0,0 @@
|
||||
set +e
|
||||
# set -x
|
||||
|
||||
function gen_version_string {
|
||||
if [ -n "$TEST" ]; then
|
||||
VERSION_STRING="$VERSION_MAJOR.$VERSION_MINOR.$VERSION_PATCH.$VERSION_REVISION"
|
||||
else
|
||||
VERSION_STRING="$VERSION_MAJOR.$VERSION_MINOR.$VERSION_PATCH"
|
||||
fi
|
||||
}
|
||||
|
||||
function get_version {
|
||||
if [ -z "$VERSION_MAJOR" ] && [ -z "$VERSION_MINOR" ] && [ -z "$VERSION_PATCH" ]; then
|
||||
BASEDIR=$(dirname "${BASH_SOURCE[0]}")/../../
|
||||
VERSION_REVISION=`grep "SET(VERSION_REVISION" ${BASEDIR}/cmake/autogenerated_versions.txt | sed 's/^.*VERSION_REVISION \(.*\)$/\1/' | sed 's/[) ].*//'`
|
||||
VERSION_MAJOR=`grep "SET(VERSION_MAJOR" ${BASEDIR}/cmake/autogenerated_versions.txt | sed 's/^.*VERSION_MAJOR \(.*\)/\1/' | sed 's/[) ].*//'`
|
||||
VERSION_MINOR=`grep "SET(VERSION_MINOR" ${BASEDIR}/cmake/autogenerated_versions.txt | sed 's/^.*VERSION_MINOR \(.*\)/\1/' | sed 's/[) ].*//'`
|
||||
VERSION_PATCH=`grep "SET(VERSION_PATCH" ${BASEDIR}/cmake/autogenerated_versions.txt | sed 's/^.*VERSION_PATCH \(.*\)/\1/' | sed 's/[) ].*//'`
|
||||
fi
|
||||
VERSION_PREFIX="${VERSION_PREFIX:-v}"
|
||||
VERSION_POSTFIX_TAG="${VERSION_POSTFIX:--testing}"
|
||||
|
||||
gen_version_string
|
||||
}
|
||||
|
||||
function get_author {
|
||||
AUTHOR=$(git config --get user.name || echo ${USER})
|
||||
echo $AUTHOR
|
||||
}
|
||||
|
||||
# Generate revision number.
|
||||
# set environment variables REVISION, AUTHOR
|
||||
function gen_revision_author {
|
||||
TYPE=$1
|
||||
get_version
|
||||
|
||||
if [[ $STANDALONE != 'yes' ]]; then
|
||||
|
||||
git fetch --tags
|
||||
|
||||
succeeded=0
|
||||
attempts=0
|
||||
max_attempts=1000
|
||||
while [ $succeeded -eq 0 ] && [ $attempts -le $max_attempts ]; do
|
||||
attempts=$(($attempts + 1))
|
||||
|
||||
if [ "$TYPE" == "major" ]; then
|
||||
VERSION_REVISION=$(($VERSION_REVISION + 1))
|
||||
VERSION_MAJOR=$(($VERSION_MAJOR + 1))
|
||||
VERSION_MINOR=1
|
||||
# Version cannot be zero, otherwise is breaks CMake
|
||||
VERSION_PATCH=1
|
||||
elif [ "$TYPE" == "minor" ] || [ "$TYPE" == "" ]; then
|
||||
VERSION_REVISION=$(($VERSION_REVISION + 1))
|
||||
VERSION_MINOR=$(($VERSION_MINOR + 1))
|
||||
VERSION_PATCH=1
|
||||
elif [ "$TYPE" == "patch" ] || [ "$TYPE" == "bugfix" ]; then
|
||||
# VERSION_REVISION not incremented in new scheme.
|
||||
if [ "$VERSION_MAJOR" -eq "1" ] && [ "$VERSION_MINOR" -eq "1" ]; then
|
||||
VERSION_REVISION=$(($VERSION_REVISION + 1))
|
||||
fi
|
||||
|
||||
VERSION_PATCH=$(($VERSION_PATCH + 1))
|
||||
elif [ "$TYPE" == "env" ]; then
|
||||
echo "Will build revision from env variables -- $VERSION_MAJOR.$VERSION_MINOR.$VERSION_PATCH"
|
||||
else
|
||||
echo "Unknown version type $TYPE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
gen_version_string
|
||||
|
||||
git_tag_grep=`git tag | grep "$VERSION_PREFIX$VERSION_STRING$VERSION_POSTFIX_TAG"`
|
||||
if [ "$git_tag_grep" == "" ]; then
|
||||
succeeded=1
|
||||
fi
|
||||
done
|
||||
if [ $succeeded -eq 0 ]; then
|
||||
echo "Fail to create revision up to $VERSION_REVISION"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
auto_message="Auto version update to"
|
||||
git_log_grep=`git log --oneline --max-count=1 | grep "$auto_message"`
|
||||
if [ "$git_log_grep" == "" ]; then
|
||||
tag="$VERSION_PREFIX$VERSION_STRING$VERSION_POSTFIX_TAG"
|
||||
|
||||
# First tag for correct git describe
|
||||
echo -e "\nTrying to create tag: $tag"
|
||||
git tag -a "$tag" -m "$tag"
|
||||
|
||||
git_describe=`git describe`
|
||||
git_hash=`git rev-parse HEAD`
|
||||
VERSION_DATE=`git show -s --format=%cs $git_hash`
|
||||
|
||||
sed -i -e "s/SET(VERSION_REVISION [^) ]*/SET(VERSION_REVISION $VERSION_REVISION/g;" \
|
||||
-e "s/SET(VERSION_DESCRIBE [^) ]*/SET(VERSION_DESCRIBE $git_describe/g;" \
|
||||
-e "s/SET(VERSION_GITHASH [^) ]*/SET(VERSION_GITHASH $git_hash/g;" \
|
||||
-e "s/SET(VERSION_DATE [^) ]*/SET(VERSION_DATE $VERSION_DATE/g;" \
|
||||
-e "s/SET(VERSION_MAJOR [^) ]*/SET(VERSION_MAJOR $VERSION_MAJOR/g;" \
|
||||
-e "s/SET(VERSION_MINOR [^) ]*/SET(VERSION_MINOR $VERSION_MINOR/g;" \
|
||||
-e "s/SET(VERSION_PATCH [^) ]*/SET(VERSION_PATCH $VERSION_PATCH/g;" \
|
||||
-e "s/SET(VERSION_STRING [^) ]*/SET(VERSION_STRING $VERSION_STRING/g;" \
|
||||
cmake/autogenerated_versions.txt
|
||||
|
||||
gen_changelog "$VERSION_STRING" "" "$AUTHOR" ""
|
||||
gen_dockerfiles "$VERSION_STRING"
|
||||
src/Storages/System/StorageSystemContributors.sh ||:
|
||||
utils/list-versions/list-versions.sh > utils/list-versions/version_date.tsv
|
||||
|
||||
git commit -m "$auto_message [$VERSION_STRING] [$VERSION_REVISION]" cmake/autogenerated_versions.txt debian/changelog docker/*/Dockerfile src/Storages/System/StorageSystemContributors.generated.cpp utils/list-versions/version_date.tsv
|
||||
if [ -z $NO_PUSH ]; then
|
||||
git push
|
||||
fi
|
||||
|
||||
echo "Generated version: ${VERSION_STRING}, revision: ${VERSION_REVISION}."
|
||||
|
||||
# Second tag for correct version information in autogenerated_versions.txt inside tag
|
||||
if git tag --force -a "$tag" -m "$tag"
|
||||
then
|
||||
if [ -z $NO_PUSH ]; then
|
||||
echo -e "\nTrying to push tag to origin: $tag"
|
||||
git push origin "$tag"
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
git tag -d "$tag"
|
||||
echo "Fail to create tag"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
else
|
||||
get_version
|
||||
echo reusing old version $VERSION_STRING
|
||||
fi
|
||||
fi
|
||||
|
||||
AUTHOR=$(git config --get user.name || echo ${USER})
|
||||
export AUTHOR
|
||||
}
|
||||
|
||||
function get_revision_author {
|
||||
get_version
|
||||
AUTHOR=$(get_author)
|
||||
export AUTHOR
|
||||
}
|
||||
|
||||
# Generate changelog from changelog.in.
|
||||
function gen_changelog {
|
||||
VERSION_STRING="$1"
|
||||
CHDATE="$2"
|
||||
AUTHOR="$3"
|
||||
CHLOG="$4"
|
||||
if [ -z "$VERSION_STRING" ] ; then
|
||||
get_revision_author
|
||||
fi
|
||||
|
||||
if [ -z "$CHLOG" ] ; then
|
||||
CHLOG=debian/changelog
|
||||
fi
|
||||
|
||||
if [ -z "$CHDATE" ] ; then
|
||||
CHDATE=$(LC_ALL=C date -R | sed -e 's/,/\\,/g') # Replace comma to '\,'
|
||||
fi
|
||||
|
||||
sed \
|
||||
-e "s/[@]VERSION_STRING[@]/$VERSION_STRING/g" \
|
||||
-e "s/[@]DATE[@]/$CHDATE/g" \
|
||||
-e "s/[@]AUTHOR[@]/$AUTHOR/g" \
|
||||
-e "s/[@]EMAIL[@]/$(whoami)@clickhouse.com/g" \
|
||||
< $CHLOG.in > $CHLOG
|
||||
}
|
||||
|
||||
# Change package versions that are installed for Docker images.
|
||||
function gen_dockerfiles {
|
||||
VERSION_STRING="$1"
|
||||
ls -1 docker/*/Dockerfile | xargs sed -i -r -e 's/ARG version=.+$/ARG version='$VERSION_STRING'/'
|
||||
}
|
||||
|
||||
function make_rpm {
|
||||
[ -z "$VERSION_STRING" ] && get_version && VERSION_STRING+=${VERSION_POSTFIX}
|
||||
VERSION_FULL="${VERSION_STRING}"
|
||||
PACKAGE_DIR=${PACKAGE_DIR=../}
|
||||
|
||||
function deb_unpack {
|
||||
rm -rf $PACKAGE-$VERSION_FULL
|
||||
alien --verbose --generate --to-rpm --scripts ${PACKAGE_DIR}${PACKAGE}_${VERSION_FULL}_${ARCH}.deb
|
||||
cd $PACKAGE-$VERSION_FULL
|
||||
mv ${PACKAGE}-$VERSION_FULL-2.spec ${PACKAGE}-$VERSION_FULL-2.spec.tmp
|
||||
cat ${PACKAGE}-$VERSION_FULL-2.spec.tmp \
|
||||
| grep -vF '%dir "/"' \
|
||||
| grep -vF '%dir "/usr/"' \
|
||||
| grep -vF '%dir "/usr/bin/"' \
|
||||
| grep -vF '%dir "/usr/lib/"' \
|
||||
| grep -vF '%dir "/usr/lib/debug/"' \
|
||||
| grep -vF '%dir "/usr/lib/.build-id/"' \
|
||||
| grep -vF '%dir "/usr/share/"' \
|
||||
| grep -vF '%dir "/usr/share/doc/"' \
|
||||
| grep -vF '%dir "/lib/"' \
|
||||
| grep -vF '%dir "/lib/systemd/"' \
|
||||
| grep -vF '%dir "/lib/systemd/system/"' \
|
||||
| grep -vF '%dir "/etc/"' \
|
||||
| grep -vF '%dir "/etc/security/"' \
|
||||
| grep -vF '%dir "/etc/security/limits.d/"' \
|
||||
| grep -vF '%dir "/etc/init.d/"' \
|
||||
| grep -vF '%dir "/etc/cron.d/"' \
|
||||
| grep -vF '%dir "/etc/systemd/system/"' \
|
||||
| grep -vF '%dir "/etc/systemd/"' \
|
||||
| sed -e 's|%config |%config(noreplace) |' \
|
||||
> ${PACKAGE}-$VERSION_FULL-2.spec
|
||||
}
|
||||
|
||||
function rpm_pack {
|
||||
rpmbuild --buildroot="$CUR_DIR/${PACKAGE}-$VERSION_FULL" -bb --target ${TARGET} "${PACKAGE}-$VERSION_FULL-2.spec"
|
||||
cd $CUR_DIR
|
||||
}
|
||||
|
||||
function unpack_pack {
|
||||
deb_unpack
|
||||
rpm_pack
|
||||
}
|
||||
|
||||
PACKAGE=clickhouse-server
|
||||
ARCH=all
|
||||
TARGET=noarch
|
||||
deb_unpack
|
||||
mv ${PACKAGE}-$VERSION_FULL-2.spec ${PACKAGE}-$VERSION_FULL-2.spec_tmp
|
||||
echo "Requires: clickhouse-common-static = $VERSION_FULL-2" >> ${PACKAGE}-$VERSION_FULL-2.spec
|
||||
echo "Requires: tzdata" >> ${PACKAGE}-$VERSION_FULL-2.spec
|
||||
echo "Requires: initscripts" >> ${PACKAGE}-$VERSION_FULL-2.spec
|
||||
echo "Obsoletes: clickhouse-server-common < $VERSION_FULL" >> ${PACKAGE}-$VERSION_FULL-2.spec
|
||||
|
||||
cat ${PACKAGE}-$VERSION_FULL-2.spec_tmp >> ${PACKAGE}-$VERSION_FULL-2.spec
|
||||
rpm_pack
|
||||
|
||||
PACKAGE=clickhouse-client
|
||||
ARCH=all
|
||||
TARGET=noarch
|
||||
deb_unpack
|
||||
mv ${PACKAGE}-$VERSION_FULL-2.spec ${PACKAGE}-$VERSION_FULL-2.spec_tmp
|
||||
echo "Requires: clickhouse-common-static = $VERSION_FULL-2" >> ${PACKAGE}-$VERSION_FULL-2.spec
|
||||
cat ${PACKAGE}-$VERSION_FULL-2.spec_tmp >> ${PACKAGE}-$VERSION_FULL-2.spec
|
||||
rpm_pack
|
||||
|
||||
PACKAGE=clickhouse-common-static
|
||||
ARCH=amd64
|
||||
TARGET=x86_64
|
||||
unpack_pack
|
||||
|
||||
PACKAGE=clickhouse-common-static-dbg
|
||||
ARCH=amd64
|
||||
TARGET=x86_64
|
||||
unpack_pack
|
||||
|
||||
mv clickhouse-*-${VERSION_FULL}-2.*.rpm ${PACKAGE_DIR}
|
||||
}
|
||||
|
||||
function make_tgz {
|
||||
[ -z "$VERSION_STRING" ] && get_version && VERSION_STRING+=${VERSION_POSTFIX}
|
||||
VERSION_FULL="${VERSION_STRING}"
|
||||
PACKAGE_DIR=${PACKAGE_DIR=../}
|
||||
|
||||
for PACKAGE in clickhouse-server clickhouse-client clickhouse-common-static clickhouse-common-static-dbg; do
|
||||
alien --verbose --scripts --generate --to-tgz ${PACKAGE_DIR}${PACKAGE}_${VERSION_FULL}_*.deb
|
||||
PKGDIR="./${PACKAGE}-${VERSION_FULL}"
|
||||
if [ ! -d "$PKGDIR/install" ]; then
|
||||
mkdir "$PKGDIR/install"
|
||||
fi
|
||||
|
||||
if [ ! -f "$PKGDIR/install/doinst.sh" ]; then
|
||||
echo '#!/bin/sh' > "$PKGDIR/install/doinst.sh"
|
||||
echo 'set -e' >> "$PKGDIR/install/doinst.sh"
|
||||
fi
|
||||
|
||||
SCRIPT_TEXT='
|
||||
SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"
|
||||
for filepath in `find $SCRIPTPATH/.. -type f -or -type l | grep -v "\.\./install/"`; do
|
||||
destpath=${filepath##$SCRIPTPATH/..}
|
||||
mkdir -p $(dirname "$destpath")
|
||||
cp -r "$filepath" "$destpath"
|
||||
done
|
||||
'
|
||||
|
||||
echo "$SCRIPT_TEXT" | sed -i "2r /dev/stdin" "$PKGDIR/install/doinst.sh"
|
||||
|
||||
chmod +x "$PKGDIR/install/doinst.sh"
|
||||
|
||||
if [ -f "/usr/bin/pigz" ]; then
|
||||
tar --use-compress-program=pigz -cf "${PACKAGE}-${VERSION_FULL}.tgz" "$PKGDIR"
|
||||
else
|
||||
tar -czf "${PACKAGE}-${VERSION_FULL}.tgz" "$PKGDIR"
|
||||
fi
|
||||
|
||||
rm -r $PKGDIR
|
||||
done
|
||||
|
||||
|
||||
mv clickhouse-*-${VERSION_FULL}.tgz ${PACKAGE_DIR}
|
||||
}
|
Loading…
Reference in New Issue
Block a user