Merge branch 'master' into cpuid-remove-trash

This commit is contained in:
Alexey Milovidov 2022-04-17 16:30:30 +02:00
commit 27afc08d8f
32 changed files with 98 additions and 1539 deletions

18
debian/.gitignore vendored
View File

@ -1,18 +0,0 @@
control
copyright
tmp/
clickhouse-benchmark/
clickhouse-client.docs
clickhouse-client/
clickhouse-common-static-dbg/
clickhouse-common-static.docs
clickhouse-common-static/
clickhouse-server-base/
clickhouse-server-common/
clickhouse-server/
debhelper-build-stamp
files
*.debhelper.log
*.debhelper
*.substvars

223
debian/.pbuilderrc vendored
View File

@ -1,223 +0,0 @@
#
# sudo apt install pbuilder fakeroot debhelper debian-archive-keyring debian-keyring
#
# ubuntu:
# prepare old (trusty or earlier) host system:
# sudo ln -s gutsy /usr/share/debootstrap/scripts/eoan
# sudo ln -s gutsy /usr/share/debootstrap/scripts/disco
# sudo ln -s gutsy /usr/share/debootstrap/scripts/cosmic
# sudo ln -s gutsy /usr/share/debootstrap/scripts/artful
# sudo ln -s gutsy /usr/share/debootstrap/scripts/bionic
# sudo ln -s sid /usr/share/debootstrap/scripts/buster
# build ubuntu:
# sudo DIST=bionic pbuilder create --configfile debian/.pbuilderrc && DIST=bionic pdebuild --configfile debian/.pbuilderrc
# sudo DIST=cosmic pbuilder create --configfile debian/.pbuilderrc && DIST=cosmic pdebuild --configfile debian/.pbuilderrc
# sudo DIST=disco pbuilder create --configfile debian/.pbuilderrc && DIST=disco pdebuild --configfile debian/.pbuilderrc
# sudo DIST=eoan pbuilder create --configfile debian/.pbuilderrc && DIST=eoan pdebuild --configfile debian/.pbuilderrc
# sudo DIST=devel pbuilder create --configfile debian/.pbuilderrc && DIST=devel pdebuild --configfile debian/.pbuilderrc
# build debian:
# sudo DIST=stable pbuilder create --configfile debian/.pbuilderrc && DIST=stable pdebuild --configfile debian/.pbuilderrc
# sudo DIST=testing pbuilder create --configfile debian/.pbuilderrc && DIST=testing pdebuild --configfile debian/.pbuilderrc
# sudo DIST=unstable pbuilder create --configfile debian/.pbuilderrc && DIST=unstable pdebuild --configfile debian/.pbuilderrc
# sudo DIST=experimental pbuilder create --configfile debian/.pbuilderrc && DIST=experimental pdebuild --configfile debian/.pbuilderrc
# build i386 experimental:
# sudo DIST=trusty ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=trusty ARCH=i386 pdebuild --configfile debian/.pbuilderrc
# sudo DIST=xenial ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=xenial ARCH=i386 pdebuild --configfile debian/.pbuilderrc
# sudo DIST=zesty ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=zesty ARCH=i386 pdebuild --configfile debian/.pbuilderrc
# sudo DIST=artful ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=artful ARCH=i386 pdebuild --configfile debian/.pbuilderrc
# sudo DIST=bionic ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=bionic ARCH=i386 pdebuild --configfile debian/.pbuilderrc
# sudo DIST=stable ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=stable ARCH=i386 pdebuild --configfile debian/.pbuilderrc
# sudo DIST=testing ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=testing ARCH=i386 pdebuild --configfile debian/.pbuilderrc
# sudo DIST=experimental ARCH=i386 pbuilder create --configfile debian/.pbuilderrc && DIST=experimental ARCH=i386 pdebuild --configfile debian/.pbuilderrc
# test gcc-9
# env DEB_CC=gcc-9 DEB_CXX=g++-9 EXTRAPACKAGES="g++-9 gcc-9" DIST=disco pdebuild --configfile debian/.pbuilderrc
# use only clang:
# env DEB_CC=clang-8 DEB_CXX=clang++-8 EXTRAPACKAGES=clang-8 DIST=disco pdebuild --configfile debian/.pbuilderrc
# env DEB_CC=clang-5.0 DEB_CXX=clang++-5.0 EXTRAPACKAGES=clang-5.0 DIST=artful pdebuild --configfile debian/.pbuilderrc
# clang+asan:
# env DEB_CC=clang-5.0 DEB_CXX=clang++-5.0 EXTRAPACKAGES="clang-5.0 libc++abi-dev libc++-dev" CMAKE_FLAGS="-DENABLE_TCMALLOC=0 -DENABLE_UNWIND=0 -DCMAKE_BUILD_TYPE=Asan" DIST=artful pdebuild --configfile debian/.pbuilderrc
# clang+tsan:
# env DEB_CC=clang-5.0 DEB_CXX=clang++-5.0 EXTRAPACKAGES="clang-5.0 libc++abi-dev libc++-dev" CMAKE_FLAGS="-DCMAKE_BUILD_TYPE=Tsan" DIST=artful pdebuild --configfile debian/.pbuilderrc
# without sse for old systems and some VM:
# env DH_VERBOSE=1 CMAKE_FLAGS="-DHAVE_SSE41=0 -DHAVE_SSE42=0 -DHAVE_POPCNT=0 -DHAVE_SSE2_INTRIN=0 -DSSE2FLAG=' ' -DHAVE_SSE42_INTRIN=0 -DSSE4FLAG=' ' -DHAVE_PCLMULQDQ_INTRIN=0 -DPCLMULFLAG=' '" DIST=artful pdebuild --configfile debian/.pbuilderrc
# Note: on trusty host creating some future dists can fail (debootstrap error).
# Your packages built here: /var/cache/pbuilder/*-*/result
# from https://wiki.debian.org/PbuilderTricks :
# Codenames for Debian suites according to their alias. Update these when
# needed.
UNSTABLE_CODENAME="sid"
TESTING_CODENAME="buster"
STABLE_CODENAME="stretch"
STABLE_BACKPORTS_SUITE="$STABLE_CODENAME-backports"
# List of Debian suites.
DEBIAN_SUITES=($UNSTABLE_CODENAME $TESTING_CODENAME $STABLE_CODENAME $STABLE_BACKPORTS_SUITE
"experimental" "unstable" "testing" "stable")
# List of Ubuntu suites. Update these when needed.
UBUNTU_SUITES=("eoan" "disco" "cosmic" "bionic" "artful" "zesty" "xenial" "trusty" "devel")
# Set a default distribution if none is used. Note that you can set your own default (i.e. ${DIST:="unstable"}).
HOST_DIST=`lsb_release --short --codename`
: ${DIST:="$HOST_DIST"}
# Optionally change Debian codenames in $DIST to their aliases.
case "$DIST" in
$UNSTABLE_CODENAME)
DIST="unstable"
;;
$TESTING_CODENAME)
DIST="testing"
;;
$STABLE_CODENAME)
DIST="stable"
;;
esac
# Optionally set the architecture to the host architecture if none set. Note
# that you can set your own default (i.e. ${ARCH:="i386"}).
: ${ARCH:="$(dpkg --print-architecture)"}
NAME="$DIST"
if [ -n "${ARCH}" ]; then
NAME="$NAME-$ARCH"
DEBOOTSTRAPOPTS=("--arch" "$ARCH" "${DEBOOTSTRAPOPTS[@]}")
fi
BASETGZ=${SET_BASETGZ}
BASETGZ=${BASETGZ:="/var/cache/pbuilder/$NAME-base.tgz"}
DISTRIBUTION="$DIST"
BUILDRESULT=${SET_BUILDRESULT}
BUILDRESULT=${BUILDRESULT:="/var/cache/pbuilder/$NAME/result/"}
APTCACHE="/var/cache/pbuilder/$NAME/aptcache/"
BUILDPLACE="/var/cache/pbuilder/build/"
ALLOWUNTRUSTED=${SET_ALLOWUNTRUSTED:=${ALLOWUNTRUSTED}}
#DEBOOTSTRAPOPTS=( '--variant=buildd' $SET_DEBOOTSTRAPOPTS )
if $(echo ${DEBIAN_SUITES[@]} | grep -q $DIST); then
# Debian configuration
OSNAME=debian
MIRRORSITE=${SET_MIRRORSITE="http://deb.debian.org/$OSNAME/"}
COMPONENTS="main contrib non-free"
if $(echo "$STABLE_CODENAME stable" | grep -q $DIST); then
OTHERMIRROR="$OTHERMIRROR | deb $MIRRORSITE $STABLE_BACKPORTS_SUITE $COMPONENTS"
fi
# APTKEYRINGS=/usr/share/keyrings/debian-archive-keyring.gpg
case "$HOST_DIST" in
"trusty" )
DEBOOTSTRAPOPTS+=( '--no-check-gpg' )
;;
*)
DEBOOTSTRAPOPTS+=( '--keyring' '/usr/share/keyrings/debian-archive-keyring.gpg' )
# DEBOOTSTRAPOPTS+=( '--keyring' '/usr/share/keyrings/debian-keyring.gpg' )
esac
elif $(echo ${UBUNTU_SUITES[@]} | grep -q $DIST); then
# Ubuntu configuration
OSNAME=ubuntu
if [[ "$ARCH" == "amd64" || "$ARCH" == "i386" ]]; then
MIRRORSITE=${SET_MIRRORSITE="http://archive.ubuntu.com/$OSNAME/"}
else
MIRRORSITE=${SET_MIRRORSITE="http://ports.ubuntu.com/ubuntu-ports/"}
fi
COMPONENTS="main restricted universe multiverse"
OTHERMIRROR="$OTHERMIRROR | deb $MIRRORSITE $DIST-updates main restricted universe multiverse"
OTHERMIRROR="$OTHERMIRROR | deb $MIRRORSITE $DIST-security main restricted universe multiverse"
OTHERMIRROR="$OTHERMIRROR | deb $MIRRORSITE $DIST-proposed main restricted universe multiverse"
case "$DIST" in
"trusty" | "xenial" )
OTHERMIRROR="$OTHERMIRROR | deb http://ppa.launchpad.net/ubuntu-toolchain-r/test/$OSNAME $DIST main"
ALLOWUNTRUSTED=yes
;;
esac
# deb http://apt.llvm.org/zesty/ llvm-toolchain-zesty-5.0 main
else
echo "Unknown distribution: $DIST"
exit 1
fi
echo "using $NAME $OSNAME $DIST $ARCH $LOGNAME $MIRRORSITE"
case "$DIST" in
"trusty")
# ccache broken
;;
*)
CCACHEDIR=${SET_CCACHEDIR:="/var/cache/pbuilder/ccache"}
;;
esac
# old systems with default gcc <= 6
case "$DIST" in
"trusty" | "xenial" | "stable" )
export DEB_CC=gcc-7
export DEB_CXX=g++-7
;;
esac
if [ "$ARCH" != arm64 ]; then
case "$DIST" in
# TODO: fix llvm-8 and use for "disco" and "eoan"
"experimental")
EXTRAPACKAGES+=" liblld-8-dev libclang-8-dev llvm-8-dev liblld-8 "
export CMAKE_FLAGS="-DLLVM_VERSION=8 $CMAKE_FLAGS"
;;
"eoan" | "disco" | "cosmic" | "testing" | "unstable")
EXTRAPACKAGES+=" liblld-7-dev libclang-7-dev llvm-7-dev liblld-7 "
export CMAKE_FLAGS="-DLLVM_VERSION=7 $CMAKE_FLAGS"
;;
"bionic")
EXTRAPACKAGES+=" liblld-6.0-dev libclang-6.0-dev liblld-6.0 "
export CMAKE_FLAGS="-DLLVM_VERSION=6 $CMAKE_FLAGS"
;;
"artful" )
EXTRAPACKAGES+=" liblld-5.0-dev libclang-5.0-dev liblld-5.0 "
;;
esac
else
export CMAKE_FLAGS="-DENABLE_EMBEDDED_COMPILER=0 $CMAKE_FLAGS"
fi
# Will test symbols
#EXTRAPACKAGES+=" gdb "
# For killall in pbuilder-hooks:
EXTRAPACKAGES+=" psmisc "
[[ $CCACHE_PREFIX == 'distcc' ]] && EXTRAPACKAGES+=" $CCACHE_PREFIX " && USENETWORK=yes && export DISTCC_DIR=/var/cache/pbuilder/distcc
[[ $ARCH == 'i386' ]] && EXTRAPACKAGES+=" libssl-dev "
export DEB_BUILD_OPTIONS=parallel=`nproc`
# Floating bug with permissions:
[ -n "$CCACHEDIR" ] && sudo mkdir -p $CCACHEDIR
[ -n "$CCACHEDIR" ] && sudo chmod -R a+rwx $CCACHEDIR || true
# chown -R $BUILDUSERID:$BUILDUSERID $CCACHEDIR
# Do not create source package inside pbuilder (-b)
# Use current dir to make package (by default should have src archive)
# echo "3.0 (native)" > debian/source/format
# OR
# pdebuild -b --debbuildopts "--source-option=--format=\"3.0 (native)\""
# OR
DEBBUILDOPTS="-b --source-option=--format=\"3.0 (native)\""
HOOKDIR="debian/pbuilder-hooks"
#echo "DEBOOTSTRAPOPTS=${DEBOOTSTRAPOPTS[@]}"
#echo "ALLOWUNTRUSTED=${ALLOWUNTRUSTED} OTHERMIRROR=${OTHERMIRROR}"
#echo "EXTRAPACKAGES=${EXTRAPACKAGES}"

5
debian/changelog vendored
View File

@ -1,5 +0,0 @@
clickhouse (22.1.1.1) unstable; urgency=low
* Modified source code
-- clickhouse-release <clickhouse-release@yandex-team.ru> Thu, 09 Dec 2021 00:32:58 +0300

5
debian/changelog.in vendored
View File

@ -1,5 +0,0 @@
clickhouse (@VERSION_STRING@) unstable; urgency=low
* Modified source code
-- @AUTHOR@ <@EMAIL@> @DATE@

View File

@ -1,7 +0,0 @@
usr/bin/clickhouse-client
usr/bin/clickhouse-local
usr/bin/clickhouse-compressor
usr/bin/clickhouse-benchmark
usr/bin/clickhouse-format
usr/bin/clickhouse-obfuscator
etc/clickhouse-client/config.xml

View File

@ -1,5 +0,0 @@
usr/bin/clickhouse
usr/bin/clickhouse-odbc-bridge
usr/bin/clickhouse-library-bridge
usr/bin/clickhouse-extract-from-config
usr/share/bash-completion/completions

View File

@ -1 +0,0 @@
#*/10 * * * * root ((which service > /dev/null 2>&1 && (service clickhouse-server condstart ||:)) || /etc/init.d/clickhouse-server condstart) > /dev/null 2>&1

View File

@ -1,4 +0,0 @@
LICENSE
AUTHORS
README.md
CHANGELOG.md

View File

@ -1,227 +0,0 @@
#!/bin/sh
### BEGIN INIT INFO
# Provides: clickhouse-server
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Should-Start: $time $network
# Should-Stop: $network
# Short-Description: clickhouse-server daemon
### END INIT INFO
#
# NOTES:
# - Should-* -- script can start if the listed facilities are missing, unlike Required-*
#
# For the documentation [1]:
#
# [1]: https://wiki.debian.org/LSBInitScripts
CLICKHOUSE_USER=clickhouse
CLICKHOUSE_GROUP=${CLICKHOUSE_USER}
SHELL=/bin/bash
PROGRAM=clickhouse-server
CLICKHOUSE_GENERIC_PROGRAM=clickhouse
CLICKHOUSE_PROGRAM_ENV=""
EXTRACT_FROM_CONFIG=${CLICKHOUSE_GENERIC_PROGRAM}-extract-from-config
CLICKHOUSE_CONFDIR=/etc/$PROGRAM
CLICKHOUSE_LOGDIR=/var/log/clickhouse-server
CLICKHOUSE_LOGDIR_USER=root
CLICKHOUSE_DATADIR=/var/lib/clickhouse
if [ -d "/var/lock" ]; then
LOCALSTATEDIR=/var/lock
else
LOCALSTATEDIR=/run/lock
fi
if [ ! -d "$LOCALSTATEDIR" ]; then
mkdir -p "$LOCALSTATEDIR"
fi
CLICKHOUSE_BINDIR=/usr/bin
CLICKHOUSE_CRONFILE=/etc/cron.d/clickhouse-server
CLICKHOUSE_CONFIG=$CLICKHOUSE_CONFDIR/config.xml
LOCKFILE=$LOCALSTATEDIR/$PROGRAM
CLICKHOUSE_PIDDIR=/var/run/$PROGRAM
CLICKHOUSE_PIDFILE="$CLICKHOUSE_PIDDIR/$PROGRAM.pid"
# CLICKHOUSE_STOP_TIMEOUT=60 # Disabled by default. Place to /etc/default/clickhouse if you need.
# Some systems lack "flock"
command -v flock >/dev/null && FLOCK=flock
# Override defaults from optional config file
test -f /etc/default/clickhouse && . /etc/default/clickhouse
die()
{
echo $1 >&2
exit 1
}
# Check that configuration file is Ok.
check_config()
{
if [ -x "$CLICKHOUSE_BINDIR/$EXTRACT_FROM_CONFIG" ]; then
su -s $SHELL ${CLICKHOUSE_USER} -c "$CLICKHOUSE_BINDIR/$EXTRACT_FROM_CONFIG --config-file=\"$CLICKHOUSE_CONFIG\" --key=path" >/dev/null || die "Configuration file ${CLICKHOUSE_CONFIG} doesn't parse successfully. Won't restart server. You may use forcerestart if you are sure.";
fi
}
initdb()
{
${CLICKHOUSE_GENERIC_PROGRAM} install --user "${CLICKHOUSE_USER}" --pid-path "${CLICKHOUSE_PIDDIR}" --config-path "${CLICKHOUSE_CONFDIR}" --binary-path "${CLICKHOUSE_BINDIR}"
}
start()
{
${CLICKHOUSE_GENERIC_PROGRAM} start --user "${CLICKHOUSE_USER}" --pid-path "${CLICKHOUSE_PIDDIR}" --config-path "${CLICKHOUSE_CONFDIR}" --binary-path "${CLICKHOUSE_BINDIR}"
}
stop()
{
${CLICKHOUSE_GENERIC_PROGRAM} stop --pid-path "${CLICKHOUSE_PIDDIR}"
}
restart()
{
${CLICKHOUSE_GENERIC_PROGRAM} restart --user "${CLICKHOUSE_USER}" --pid-path "${CLICKHOUSE_PIDDIR}" --config-path "${CLICKHOUSE_CONFDIR}" --binary-path "${CLICKHOUSE_BINDIR}"
}
forcestop()
{
${CLICKHOUSE_GENERIC_PROGRAM} stop --force --pid-path "${CLICKHOUSE_PIDDIR}"
}
service_or_func()
{
if [ -x "/bin/systemctl" ] && [ -f /etc/systemd/system/clickhouse-server.service ] && [ -d /run/systemd/system ]; then
systemctl $1 $PROGRAM
else
$1
fi
}
forcerestart()
{
forcestop
# Should not use 'start' function if systemd active
service_or_func start
}
use_cron()
{
# 1. running systemd
if [ -x "/bin/systemctl" ] && [ -f /etc/systemd/system/clickhouse-server.service ] && [ -d /run/systemd/system ]; then
return 1
fi
# 2. disabled by config
if [ -z "$CLICKHOUSE_CRONFILE" ]; then
return 2
fi
return 0
}
# returns false if cron disabled (with systemd)
enable_cron()
{
use_cron && sed -i 's/^#*//' "$CLICKHOUSE_CRONFILE"
}
# returns false if cron disabled (with systemd)
disable_cron()
{
use_cron && sed -i 's/^#*/#/' "$CLICKHOUSE_CRONFILE"
}
is_cron_disabled()
{
use_cron || return 0
# Assumes that either no lines are commented or all lines are commented.
# Also please note, that currently cron file for ClickHouse has only one line (but some time ago there was more).
grep -q -E '^#' "$CLICKHOUSE_CRONFILE";
}
main()
{
# See how we were called.
EXIT_STATUS=0
case "$1" in
start)
service_or_func start && enable_cron
;;
stop)
disable_cron
service_or_func stop
;;
restart)
service_or_func restart && enable_cron
;;
forcestop)
disable_cron
forcestop
;;
forcerestart)
forcerestart && enable_cron
;;
reload)
service_or_func restart
;;
condstart)
service_or_func start
;;
condstop)
service_or_func stop
;;
condrestart)
service_or_func restart
;;
condreload)
service_or_func restart
;;
initdb)
initdb
;;
enable_cron)
enable_cron
;;
disable_cron)
disable_cron
;;
*)
echo "Usage: $0 {start|stop|status|restart|forcestop|forcerestart|reload|condstart|condstop|condrestart|condreload|initdb}"
exit 2
;;
esac
exit $EXIT_STATUS
}
status()
{
${CLICKHOUSE_GENERIC_PROGRAM} status --pid-path "${CLICKHOUSE_PIDDIR}"
}
# Running commands without need of locking
case "$1" in
status)
status
exit 0
;;
esac
(
if $FLOCK -n 9; then
main "$@"
else
echo "Init script is already running" && exit 1
fi
) 9> $LOCKFILE

View File

@ -1,6 +0,0 @@
usr/bin/clickhouse-server
usr/bin/clickhouse-copier
usr/bin/clickhouse-report
etc/clickhouse-server/config.xml
etc/clickhouse-server/users.xml
etc/systemd/system/clickhouse-server.service

View File

@ -1,47 +0,0 @@
#!/bin/sh
set -e
# set -x
PROGRAM=clickhouse-server
CLICKHOUSE_USER=${CLICKHOUSE_USER:=clickhouse}
CLICKHOUSE_GROUP=${CLICKHOUSE_GROUP:=${CLICKHOUSE_USER}}
# Please note that we don't support paths with whitespaces. This is rather ignorant.
CLICKHOUSE_CONFDIR=${CLICKHOUSE_CONFDIR:=/etc/clickhouse-server}
CLICKHOUSE_DATADIR=${CLICKHOUSE_DATADIR:=/var/lib/clickhouse}
CLICKHOUSE_LOGDIR=${CLICKHOUSE_LOGDIR:=/var/log/clickhouse-server}
CLICKHOUSE_BINDIR=${CLICKHOUSE_BINDIR:=/usr/bin}
CLICKHOUSE_GENERIC_PROGRAM=${CLICKHOUSE_GENERIC_PROGRAM:=clickhouse}
EXTRACT_FROM_CONFIG=${CLICKHOUSE_GENERIC_PROGRAM}-extract-from-config
CLICKHOUSE_CONFIG=$CLICKHOUSE_CONFDIR/config.xml
CLICKHOUSE_PIDDIR=/var/run/$PROGRAM
[ -f /usr/share/debconf/confmodule ] && . /usr/share/debconf/confmodule
[ -f /etc/default/clickhouse ] && . /etc/default/clickhouse
if [ ! -f "/etc/debian_version" ]; then
not_deb_os=1
fi
if [ "$1" = configure ] || [ -n "$not_deb_os" ]; then
${CLICKHOUSE_GENERIC_PROGRAM} install --user "${CLICKHOUSE_USER}" --group "${CLICKHOUSE_GROUP}" --pid-path "${CLICKHOUSE_PIDDIR}" --config-path "${CLICKHOUSE_CONFDIR}" --binary-path "${CLICKHOUSE_BINDIR}" --log-path "${CLICKHOUSE_LOGDIR}" --data-path "${CLICKHOUSE_DATADIR}"
if [ -x "/bin/systemctl" ] && [ -f /etc/systemd/system/clickhouse-server.service ] && [ -d /run/systemd/system ]; then
# if old rc.d service present - remove it
if [ -x "/etc/init.d/clickhouse-server" ] && [ -x "/usr/sbin/update-rc.d" ]; then
/usr/sbin/update-rc.d clickhouse-server remove
fi
/bin/systemctl daemon-reload
/bin/systemctl enable clickhouse-server
else
# If you downgrading to version older than 1.1.54336 run: systemctl disable clickhouse-server
if [ -x "/etc/init.d/clickhouse-server" ]; then
if [ -x "/usr/sbin/update-rc.d" ]; then
/usr/sbin/update-rc.d clickhouse-server defaults 19 19 >/dev/null || exit $?
else
echo # Other OS
fi
fi
fi
fi

View File

@ -1,27 +0,0 @@
[Unit]
Description=ClickHouse Server (analytic DBMS for big data)
Requires=network-online.target
# NOTE: that After/Wants=time-sync.target is not enough, you need to ensure
# that the time was adjusted already, if you use systemd-timesyncd you are
# safe, but if you use ntp or some other daemon, you should configure it
# additionaly.
After=time-sync.target network-online.target
Wants=time-sync.target
[Service]
Type=simple
User=clickhouse
Group=clickhouse
Restart=always
RestartSec=30
RuntimeDirectory=clickhouse-server
ExecStart=/usr/bin/clickhouse-server --config=/etc/clickhouse-server/config.xml --pid-file=/run/clickhouse-server/clickhouse-server.pid
# Minus means that this file is optional.
EnvironmentFile=-/etc/default/clickhouse
LimitCORE=infinity
LimitNOFILE=500000
CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE CAP_NET_BIND_SERVICE
[Install]
# ClickHouse should not start from the rescue shell (rescue.target).
WantedBy=multi-user.target

1
debian/compat vendored
View File

@ -1 +0,0 @@
10

58
debian/control vendored
View File

@ -1,58 +0,0 @@
Source: clickhouse
Section: database
Priority: optional
Maintainer: Alexey Milovidov <milovidov@clickhouse.com>
Build-Depends: debhelper (>= 9),
cmake | cmake3,
ninja-build,
clang-13,
llvm-13,
lld-13,
libc6-dev,
tzdata
Standards-Version: 3.9.8
Package: clickhouse-client
Architecture: all
Depends: ${shlibs:Depends}, ${misc:Depends}, clickhouse-common-static (= ${binary:Version})
Replaces: clickhouse-compressor
Conflicts: clickhouse-compressor
Description: Client binary for ClickHouse
ClickHouse is a column-oriented database management system
that allows generating analytical data reports in real time.
.
This package provides clickhouse-client , clickhouse-local and clickhouse-benchmark
Package: clickhouse-common-static
Architecture: any
Depends: ${shlibs:Depends}, ${misc:Depends}
Suggests: clickhouse-common-static-dbg
Replaces: clickhouse-common, clickhouse-server-base
Provides: clickhouse-common, clickhouse-server-base
Description: Common files for ClickHouse
ClickHouse is a column-oriented database management system
that allows generating analytical data reports in real time.
.
This package provides common files for both clickhouse server and client
Package: clickhouse-server
Architecture: all
Depends: ${shlibs:Depends}, ${misc:Depends}, clickhouse-common-static (= ${binary:Version}), adduser
Recommends: libcap2-bin
Replaces: clickhouse-server-common, clickhouse-server-base
Provides: clickhouse-server-common
Description: Server binary for ClickHouse
ClickHouse is a column-oriented database management system
that allows generating analytical data reports in real time.
.
This package provides clickhouse common configuration files
Package: clickhouse-common-static-dbg
Architecture: any
Section: debug
Priority: optional
Depends: ${misc:Depends}
Replaces: clickhouse-common-dbg
Conflicts: clickhouse-common-dbg
Description: debugging symbols for clickhouse-common-static
This package contains the debugging symbols for clickhouse-common.

132
debian/rules vendored
View File

@ -1,132 +0,0 @@
#!/usr/bin/make -f
# -*- makefile -*-
# Uncomment this to turn on verbose mode.
export DH_VERBOSE=1
# -pie only for static mode
export DEB_BUILD_MAINT_OPTIONS=hardening=-all
# because copy_headers.sh have hardcoded path to build/include_directories.txt
BUILDDIR = obj-$(DEB_HOST_GNU_TYPE)
CURDIR = $(shell pwd)
DESTDIR = $(CURDIR)/debian/tmp
DEB_HOST_MULTIARCH ?= $(shell dpkg-architecture -qDEB_HOST_MULTIARCH)
ifeq ($(CCACHE_PREFIX),distcc)
THREADS_COUNT=$(shell distcc -j)
endif
ifeq ($(THREADS_COUNT),)
THREADS_COUNT=$(shell nproc || grep -c ^processor /proc/cpuinfo || sysctl -n hw.ncpu || echo 4)
endif
DEB_BUILD_OPTIONS+=parallel=$(THREADS_COUNT)
ifndef ENABLE_TESTS
CMAKE_FLAGS += -DENABLE_TESTS=0
else
# To export binaries and from deb build we do not strip them. No need to run tests in deb build as we run them in CI
DEB_BUILD_OPTIONS+= nocheck
DEB_BUILD_OPTIONS+= nostrip
endif
ifndef MAKE_TARGET
MAKE_TARGET = clickhouse-bundle
endif
CMAKE_FLAGS += -DENABLE_UTILS=0
DEB_CC ?= $(shell which gcc-11 gcc-10 gcc-9 gcc | head -n1)
DEB_CXX ?= $(shell which g++-11 g++-10 g++-9 g++ | head -n1)
ifdef DEB_CXX
DEB_BUILD_GNU_TYPE := $(shell dpkg-architecture -qDEB_BUILD_GNU_TYPE)
DEB_HOST_GNU_TYPE := $(shell dpkg-architecture -qDEB_HOST_GNU_TYPE)
ifeq ($(DEB_BUILD_GNU_TYPE),$(DEB_HOST_GNU_TYPE))
CC := $(DEB_CC)
CXX := $(DEB_CXX)
else ifeq (clang,$(findstring clang,$(DEB_CXX)))
# If we crosscompile with clang, it knows what to do
CC := $(DEB_CC)
CXX := $(DEB_CXX)
else
CC := $(DEB_HOST_GNU_TYPE)-$(DEB_CC)
CXX := $(DEB_HOST_GNU_TYPE)-$(DEB_CXX)
endif
endif
ifdef CXX
CMAKE_FLAGS += -DCMAKE_CXX_COMPILER=`which $(CXX)`
endif
ifdef CC
CMAKE_FLAGS += -DCMAKE_C_COMPILER=`which $(CC)`
endif
ifndef DISABLE_NINJA
NINJA=$(shell which ninja)
ifneq ($(NINJA),)
CMAKE_FLAGS += -GNinja
export MAKE=$(NINJA) $(NINJA_FLAGS)
endif
endif
ifndef DH_VERBOSE
CMAKE_FLAGS += -DCMAKE_VERBOSE_MAKEFILE=0
endif
# Useful for bulding on low memory systems
ifndef DISABLE_PARALLEL
DH_FLAGS += --parallel
else
THREADS_COUNT = 1
endif
%:
dh $@ $(DH_FLAGS) --buildsystem=cmake
override_dh_auto_configure:
dh_auto_configure -- $(CMAKE_FLAGS)
override_dh_auto_build:
# Fix for ninja. Do not add -O.
$(MAKE) -j$(THREADS_COUNT) -C $(BUILDDIR) $(MAKE_TARGET)
override_dh_auto_test:
ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS)))
cd $(BUILDDIR) && ctest -j$(THREADS_COUNT) -V
endif
# Disable config.guess and config.sub update
override_dh_update_autotools_config:
override_dh_clean:
rm -rf debian/copyright debian/clickhouse-client.docs debian/clickhouse-common-static.docs
dh_clean # -X contrib
override_dh_strip:
#https://www.debian.org/doc/debian-policy/ch-source.html#debian-rules-and-deb-build-options
ifeq (,$(filter nostrip,$(DEB_BUILD_OPTIONS)))
dh_strip -pclickhouse-common-static --dbg-package=clickhouse-common-static-dbg
endif
override_dh_install:
# Making docs
cp LICENSE debian/copyright
ln -sf clickhouse-server.docs debian/clickhouse-client.docs
ln -sf clickhouse-server.docs debian/clickhouse-common-static.docs
# systemd compatibility
mkdir -p $(DESTDIR)/etc/systemd/system/
cp debian/clickhouse-server.service $(DESTDIR)/etc/systemd/system/
dh_install --list-missing --sourcedir=$(DESTDIR)
override_dh_auto_install:
env DESTDIR=$(DESTDIR) $(MAKE) -j$(THREADS_COUNT) -C $(BUILDDIR) install
override_dh_shlibdeps:
true # We depend only on libc and dh_shlibdeps gives us wrong (too strict) dependency.
override_dh_builddeb:
dh_builddeb -- -Z gzip # Older systems don't have "xz", so use "gzip" instead.

View File

@ -1 +0,0 @@
3.0 (quilt)

View File

@ -1,9 +0,0 @@
tar-ignore
tar-ignore="build_*/*"
tar-ignore="workspace/*"
tar-ignore="contrib/poco/openssl/*"
tar-ignore="contrib/poco/gradle/*"
tar-ignore="contrib/poco/Data/SQLite/*"
tar-ignore="contrib/poco/PDF/*"
compression-level=3
compression=gzip

6
debian/watch vendored
View File

@ -1,6 +0,0 @@
version=4
opts="filenamemangle=s%(?:.*?)?v?(\d[\d.]*)-stable\.tar\.gz%clickhouse-$1.tar.gz%" \
https://github.com/ClickHouse/ClickHouse/tags \
(?:.*?/)?v?(\d[\d.]*)-stable\.tar\.gz debian uupdate

View File

@ -5,11 +5,10 @@ toc_title: HTTP Interface
# HTTP Interface {#http-interface}
The HTTP interface lets you use ClickHouse on any platform from any programming language. We use it for working from Java and Perl, as well as shell scripts. In other departments, the HTTP interface is used from Perl, Python, and Go. The HTTP interface is more limited than the native interface, but it has better compatibility.
The HTTP interface lets you use ClickHouse on any platform from any programming language in a form of REST API. The HTTP interface is more limited than the native interface, but it has better language support.
By default, `clickhouse-server` listens for HTTP on port 8123 (this can be changed in the config).
Sometimes, `curl` command is not available on user operating systems. On Ubuntu or Debian, run `sudo apt install curl`. Please refer this [documentation](https://curl.se/download.html) to install it before running the examples.
HTTPS can be enabled as well with port 8443 by default.
If you make a `GET /` request without parameters, it returns 200 response code and the string which defined in [http_server_default_response](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-http_server_default_response) default value “Ok.” (with a line feed at the end)
@ -18,11 +17,12 @@ $ curl 'http://localhost:8123/'
Ok.
```
Sometimes, `curl` command is not available on user operating systems. On Ubuntu or Debian, run `sudo apt install curl`. Please refer this [documentation](https://curl.se/download.html) to install it before running the examples.
Web UI can be accessed here: `http://localhost:8123/play`.
![Web UI](../images/play.png)
In health-check scripts use `GET /ping` request. This handler always returns “Ok.” (with a line feed at the end). Available from version 18.12.13. See also `/replicas_status` to check replica's delay.
``` bash
@ -32,7 +32,7 @@ $ curl 'http://localhost:8123/replicas_status'
Ok.
```
Send the request as a URL query parameter, or as a POST. Or send the beginning of the query in the query parameter, and the rest in the POST (well explain later why this is necessary). The size of the URL is limited to 16 KB, so keep this in mind when sending large queries.
Send the request as a URL query parameter, or as a POST. Or send the beginning of the query in the query parameter, and the rest in the POST (well explain later why this is necessary). The size of the URL is limited to 1 MiB by default, this can be changed with the `http_max_uri_size` setting.
If successful, you receive the 200 response code and the result in the response body.
If an error occurs, you receive the 500 response code and an error description text in the response body.

View File

@ -197,5 +197,6 @@ toc_title: Adopters
| <a href="https://domclick.ru/" class="favicon">ДомКлик</a> | Real Estate | — | — | — | [Article in Russian, October 2021](https://habr.com/ru/company/domclick/blog/585936/) |
| <a href="https://magenta-technology.ru/sistema-upravleniya-marshrutami-inkassacii-as-strela/" class="favicon">АС "Стрела"</a> | Transportation | — | — | — | [Job posting, Jan 2022](https://vk.com/topic-111905078_35689124?post=3553) |
| <a href="https://piwik.pro/" class="favicon">Piwik PRO</a> | Web Analytics | — | — | — | [Official website, Dec 2018](https://piwik.pro/blog/piwik-pro-clickhouse-faster-efficient-reports/) |
| <a href="https://www.deepglint.com/" class="favicon">Deepglint 格灵深瞳</a> | AI, Computer Vision | OLAP | — | — | [Official Website](https://www.deepglint.com/) |
[Original article](https://clickhouse.com/docs/en/introduction/adopters/) <!--hide-->

View File

@ -66,40 +66,40 @@ int mainEntryClickHouseCompressor(int argc, char ** argv)
using namespace DB;
namespace po = boost::program_options;
po::options_description desc = createOptionsDescription("Allowed options", getTerminalWidth());
desc.add_options()
("help,h", "produce help message")
("input", po::value<std::string>()->value_name("INPUT"), "input file")
("output", po::value<std::string>()->value_name("OUTPUT"), "output file")
("decompress,d", "decompress")
("offset-in-compressed-file", po::value<size_t>()->default_value(0ULL), "offset to the compressed block (i.e. physical file offset)")
("offset-in-decompressed-block", po::value<size_t>()->default_value(0ULL), "offset to the decompressed block (i.e. virtual offset)")
("block-size,b", po::value<unsigned>()->default_value(DBMS_DEFAULT_BUFFER_SIZE), "compress in blocks of specified size")
("hc", "use LZ4HC instead of LZ4")
("zstd", "use ZSTD instead of LZ4")
("codec", po::value<std::vector<std::string>>()->multitoken(), "use codecs combination instead of LZ4")
("level", po::value<int>(), "compression level for codecs specified via flags")
("none", "use no compression instead of LZ4")
("stat", "print block statistics of compressed data")
;
po::positional_options_description positional_desc;
positional_desc.add("input", 1);
positional_desc.add("output", 1);
po::variables_map options;
po::store(po::command_line_parser(argc, argv).options(desc).positional(positional_desc).run(), options);
if (options.count("help"))
{
std::cout << "Usage: " << argv[0] << " [options] < INPUT > OUTPUT" << std::endl;
std::cout << "Usage: " << argv[0] << " [options] INPUT OUTPUT" << std::endl;
std::cout << desc << std::endl;
return 0;
}
try
{
po::options_description desc = createOptionsDescription("Allowed options", getTerminalWidth());
desc.add_options()
("help,h", "produce help message")
("input", po::value<std::string>()->value_name("INPUT"), "input file")
("output", po::value<std::string>()->value_name("OUTPUT"), "output file")
("decompress,d", "decompress")
("offset-in-compressed-file", po::value<size_t>()->default_value(0ULL), "offset to the compressed block (i.e. physical file offset)")
("offset-in-decompressed-block", po::value<size_t>()->default_value(0ULL), "offset to the decompressed block (i.e. virtual offset)")
("block-size,b", po::value<unsigned>()->default_value(DBMS_DEFAULT_BUFFER_SIZE), "compress in blocks of specified size")
("hc", "use LZ4HC instead of LZ4")
("zstd", "use ZSTD instead of LZ4")
("codec", po::value<std::vector<std::string>>()->multitoken(), "use codecs combination instead of LZ4")
("level", po::value<int>(), "compression level for codecs specified via flags")
("none", "use no compression instead of LZ4")
("stat", "print block statistics of compressed data")
;
po::positional_options_description positional_desc;
positional_desc.add("input", 1);
positional_desc.add("output", 1);
po::variables_map options;
po::store(po::command_line_parser(argc, argv).options(desc).positional(positional_desc).run(), options);
if (options.count("help"))
{
std::cout << "Usage: " << argv[0] << " [options] < INPUT > OUTPUT" << std::endl;
std::cout << "Usage: " << argv[0] << " [options] INPUT OUTPUT" << std::endl;
std::cout << desc << std::endl;
return 0;
}
bool decompress = options.count("decompress");
bool use_lz4hc = options.count("hc");
bool use_zstd = options.count("zstd");

View File

@ -44,40 +44,40 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
{
using namespace DB;
boost::program_options::options_description desc = createOptionsDescription("Allowed options", getTerminalWidth());
desc.add_options()
("query", po::value<std::string>(), "query to format")
("help,h", "produce help message")
("hilite", "add syntax highlight with ANSI terminal escape sequences")
("oneline", "format in single line")
("quiet,q", "just check syntax, no output on success")
("multiquery,n", "allow multiple queries in the same file")
("obfuscate", "obfuscate instead of formatting")
("backslash", "add a backslash at the end of each line of the formatted query")
("allow_settings_after_format_in_insert", "Allow SETTINGS after FORMAT, but note, that this is not always safe")
("seed", po::value<std::string>(), "seed (arbitrary string) that determines the result of obfuscation")
;
Settings cmd_settings;
for (const auto & field : cmd_settings.all())
{
if (field.getName() == "max_parser_depth" || field.getName() == "max_query_size")
cmd_settings.addProgramOption(desc, field);
}
boost::program_options::variables_map options;
boost::program_options::store(boost::program_options::parse_command_line(argc, argv, desc), options);
po::notify(options);
if (options.count("help"))
{
std::cout << "Usage: " << argv[0] << " [options] < query" << std::endl;
std::cout << desc << std::endl;
return 1;
}
try
{
boost::program_options::options_description desc = createOptionsDescription("Allowed options", getTerminalWidth());
desc.add_options()
("query", po::value<std::string>(), "query to format")
("help,h", "produce help message")
("hilite", "add syntax highlight with ANSI terminal escape sequences")
("oneline", "format in single line")
("quiet,q", "just check syntax, no output on success")
("multiquery,n", "allow multiple queries in the same file")
("obfuscate", "obfuscate instead of formatting")
("backslash", "add a backslash at the end of each line of the formatted query")
("allow_settings_after_format_in_insert", "Allow SETTINGS after FORMAT, but note, that this is not always safe")
("seed", po::value<std::string>(), "seed (arbitrary string) that determines the result of obfuscation")
;
Settings cmd_settings;
for (const auto & field : cmd_settings.all())
{
if (field.getName() == "max_parser_depth" || field.getName() == "max_query_size")
cmd_settings.addProgramOption(desc, field);
}
boost::program_options::variables_map options;
boost::program_options::store(boost::program_options::parse_command_line(argc, argv, desc), options);
po::notify(options);
if (options.count("help"))
{
std::cout << "Usage: " << argv[0] << " [options] < query" << std::endl;
std::cout << desc << std::endl;
return 1;
}
bool hilite = options.count("hilite");
bool oneline = options.count("oneline");
bool quiet = options.count("quiet");

View File

@ -1231,5 +1231,5 @@ try
catch (...)
{
std::cerr << DB::getCurrentExceptionMessage(true) << '\n';
throw;
return DB::getCurrentExceptionCode();
}

99
release
View File

@ -1,99 +0,0 @@
#!/usr/bin/env bash
# If you have "no space left" error, you can change the location of temporary files with BUILDPLACE environment variable.
# Version increment:
# Default release: 18.1.2 -> 18.2.0:
# ./release --version
# or
# ./release --version minor
# Bugfix release (only with small patches to previous release): 18.1.2 -> 18.1.3:
# ./release --version patch
# Do this once per year: 18.1.2 -> 19.0.0:
# ./release --version major
set -e
# Avoid dependency on locale
LC_ALL=C
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
cd $CUR_DIR
source "./utils/release/release_lib.sh"
DEBUILD_NOSIGN_OPTIONS="-us -uc"
DEBUILD_NODEPS_OPTIONS="-d"
if [ -z "$VERSION_STRING" ] ; then
get_revision_author
fi
while [[ $1 == --* ]]
do
if [[ $1 == '--test' ]]; then
TEST='yes'
VERSION_POSTFIX+=+test
shift
elif [[ $1 == '--check-build-dependencies' ]]; then
DEBUILD_NODEPS_OPTIONS=""
shift
elif [[ $1 == '--version' ]]; then
gen_revision_author $2
exit 0
elif [[ $1 == '--rpm' ]]; then
MAKE_RPM=1
shift
elif [[ $1 == '--tgz' ]]; then
MAKE_TGZ=1
shift
else
echo "Unknown option $1"
exit 2
fi
done
# Build options
if [ -n "$SANITIZER" ]
then
if [[ "$SANITIZER" == "address" ]]; then VERSION_POSTFIX+="+asan"
elif [[ "$SANITIZER" == "thread" ]]; then VERSION_POSTFIX+="+tsan"
elif [[ "$SANITIZER" == "memory" ]]; then VERSION_POSTFIX+="+msan"
elif [[ "$SANITIZER" == "undefined" ]]; then VERSION_POSTFIX+="+ubsan"
else
echo "Unknown value of SANITIZER variable: $SANITIZER"
exit 3
fi
export DEB_CC=${DEB_CC=clang-10}
export DEB_CXX=${DEB_CXX=clang++-10}
EXTRAPACKAGES="$EXTRAPACKAGES clang-10 lld-10"
elif [[ $BUILD_TYPE == 'debug' ]]; then
CMAKE_BUILD_TYPE=Debug
VERSION_POSTFIX+="+debug"
fi
CMAKE_FLAGS=" $MALLOC_OPTS -DSANITIZE=$SANITIZER -DENABLE_CHECK_HEAVY_BUILDS=1 $CMAKE_FLAGS"
[[ -n "$CMAKE_BUILD_TYPE" ]] && CMAKE_FLAGS=" -DCMAKE_BUILD_TYPE=$CMAKE_BUILD_TYPE $CMAKE_FLAGS"
export CMAKE_FLAGS
export EXTRAPACKAGES
VERSION_STRING+=$VERSION_POSTFIX
echo -e "\nCurrent version is $VERSION_STRING"
if [ -z "$NO_BUILD" ] ; then
gen_changelog "$VERSION_STRING" "" "$AUTHOR" ""
# Build (only binary packages).
debuild --preserve-env -e PATH \
-e DEB_CC=$DEB_CC -e DEB_CXX=$DEB_CXX -e CMAKE_FLAGS="$CMAKE_FLAGS" \
-b ${DEBUILD_NOSIGN_OPTIONS} ${DEBUILD_NODEPS_OPTIONS} ${DEB_ARCH_FLAG}
fi
if [ -n "$MAKE_RPM" ]; then
make_rpm
fi
if [ -n "$MAKE_TGZ" ]; then
make_tgz
fi

View File

@ -163,6 +163,11 @@ bool FileSegment::isDownloader() const
return getCallerId() == downloader_id;
}
bool FileSegment::isDownloaderImpl(std::lock_guard<std::mutex> & /* segment_lock */) const
{
return getCallerId() == downloader_id;
}
FileSegment::RemoteFileReaderPtr FileSegment::getRemoteFileReader()
{
if (!isDownloader())
@ -397,6 +402,9 @@ bool FileSegment::reserve(size_t size)
void FileSegment::setDownloaded(std::lock_guard<std::mutex> & /* segment_lock */)
{
if (is_downloaded)
return;
download_state = State::DOWNLOADED;
is_downloaded = true;
downloader_id.clear();
@ -426,8 +434,7 @@ void FileSegment::completeBatchAndResetDownloader()
{
std::lock_guard segment_lock(mutex);
bool is_downloader = downloader_id == getCallerId();
if (!is_downloader)
if (!isDownloaderImpl(segment_lock))
{
cv.notify_all();
throw Exception(
@ -448,7 +455,7 @@ void FileSegment::complete(State state)
std::lock_guard cache_lock(cache->mutex);
std::lock_guard segment_lock(mutex);
bool is_downloader = downloader_id == getCallerId();
bool is_downloader = isDownloaderImpl(segment_lock);
if (!is_downloader)
{
cv.notify_all();
@ -465,6 +472,9 @@ void FileSegment::complete(State state)
"Cannot complete file segment with state: {}", stateToString(state));
}
if (state == State::DOWNLOADED)
setDownloaded(segment_lock);
download_state = state;
assertNotDetached();
@ -475,7 +485,7 @@ void FileSegment::complete(State state)
}
catch (...)
{
if (!downloader_id.empty() && downloader_id == getCallerIdImpl())
if (!downloader_id.empty() && is_downloader)
downloader_id.clear();
cv.notify_all();
@ -492,8 +502,12 @@ void FileSegment::complete(std::lock_guard<std::mutex> & cache_lock)
if (download_state == State::SKIP_CACHE || detached)
return;
if (download_state != State::DOWNLOADED && getDownloadedSize(segment_lock) == range().size())
if (isDownloaderImpl(segment_lock)
&& download_state != State::DOWNLOADED
&& getDownloadedSize(segment_lock) == range().size())
{
setDownloaded(segment_lock);
}
assertNotDetached();
@ -502,7 +516,7 @@ void FileSegment::complete(std::lock_guard<std::mutex> & cache_lock)
/// Segment state can be changed from DOWNLOADING or EMPTY only if the caller is the
/// downloader or the only owner of the segment.
bool can_update_segment_state = downloader_id == getCallerIdImpl()
bool can_update_segment_state = isDownloaderImpl(segment_lock)
|| cache->isLastFileSegmentHolder(key(), offset(), cache_lock, segment_lock);
if (can_update_segment_state)
@ -515,7 +529,7 @@ void FileSegment::complete(std::lock_guard<std::mutex> & cache_lock)
}
catch (...)
{
if (!downloader_id.empty() && downloader_id == getCallerIdImpl())
if (!downloader_id.empty() && isDownloaderImpl(segment_lock))
downloader_id.clear();
cv.notify_all();
@ -561,7 +575,7 @@ void FileSegment::completeImpl(std::lock_guard<std::mutex> & cache_lock, std::lo
}
}
if (!downloader_id.empty() && (downloader_id == getCallerIdImpl() || is_last_holder))
if (!downloader_id.empty() && (isDownloaderImpl(segment_lock) || is_last_holder))
{
LOG_TEST(log, "Clearing downloader id: {}, current state: {}", downloader_id, stateToString(download_state));
downloader_id.clear();

View File

@ -154,6 +154,7 @@ private:
void setDownloaded(std::lock_guard<std::mutex> & segment_lock);
void setDownloadFailed(std::lock_guard<std::mutex> & segment_lock);
bool isDownloaderImpl(std::lock_guard<std::mutex> & segment_lock) const;
void wrapWithCacheInfo(Exception & e, const String & message, std::lock_guard<std::mutex> & segment_lock) const;

View File

@ -613,7 +613,10 @@ bool CachedReadBufferFromRemoteFS::nextImplStep()
{
bool need_complete_file_segment = file_segment->isDownloader();
if (need_complete_file_segment)
{
LOG_TEST(log, "Resetting downloader {} from scope exit", file_segment->getDownloader());
file_segment->completeBatchAndResetDownloader();
}
}
catch (...)
{

View File

@ -92,26 +92,11 @@ bool ParserDataType::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
}
else if (type_name_upper.find("INT") != std::string::npos)
{
/// Support SIGNED and UNSIGNED integer type modifiers for compatibility with MySQL
/// Support SIGNED and UNSIGNED integer type modifiers for compatibility with MySQL.
if (ParserKeyword("SIGNED").ignore(pos))
type_name_suffix = "SIGNED";
else if (ParserKeyword("UNSIGNED").ignore(pos))
type_name_suffix = "UNSIGNED";
else if (pos->type == TokenType::OpeningRoundBracket)
{
++pos;
if (pos->type != TokenType::Number)
return false;
++pos;
if (pos->type != TokenType::ClosingRoundBracket)
return false;
++pos;
if (ParserKeyword("SIGNED").ignore(pos))
type_name_suffix = "SIGNED";
else if (ParserKeyword("UNSIGNED").ignore(pos))
type_name_suffix = "UNSIGNED";
}
}
if (!type_name_suffix.empty())

View File

@ -1,5 +1 @@
1
CREATE TEMPORARY TABLE t3_00841\n(\n `x` UInt32\n)\nENGINE = Memory
1
CREATE TEMPORARY TABLE t4_00841\n(\n `x` Int32\n)\nENGINE = Memory
1

View File

@ -3,13 +3,3 @@ INSERT INTO t1_00841 VALUES (1);
SELECT * FROM t1_00841;
CREATE TEMPORARY TABLE test.t2_00841 (x UInt8); -- { serverError 442 }
CREATE TEMPORARY TABLE t3_00841 (x INT(11) UNSIGNED);
SHOW CREATE TEMPORARY TABLE t3_00841;
INSERT INTO t3_00841 VALUES (1);
SELECT * FROM t3_00841;
CREATE TEMPORARY TABLE t4_00841 (x INT(11) SIGNED);
SHOW CREATE TEMPORARY TABLE t4_00841;
INSERT INTO t4_00841 VALUES (1);
SELECT * FROM t4_00841;

View File

@ -1,250 +0,0 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import subprocess
import os
import logging
import shutil
import base64
import pexpect
# Do nothing if keys are not provided
class GpgKey(object):
gnupg_dir = os.path.expanduser('~/.gnupg')
TEMPGNUPG_DIR = os.path.expanduser('~/.local/tempgnupg')
def __init__(self, secret_key_path, public_key_path):
if secret_key_path and public_key_path:
with open(secret_key_path, 'r') as sec, open(public_key_path, 'r') as pub:
self._secret_key = sec.read()
self._public_key = pub.read()
else:
self._secret_key = None
self._public_key = None
def __enter__(self):
if self._secret_key and self._public_key:
if os.path.exists(self.gnupg_dir):
shutil.move(self.gnupg_dir, self.TEMPGNUPG_DIR)
os.mkdir(self.gnupg_dir)
open(os.path.join(self.gnupg_dir, 'secring.gpg'), 'wb').write(base64.b64decode(self._secret_key))
open(os.path.join(self.gnupg_dir, 'pubring.gpg'), 'wb').write(base64.b64decode(self._public_key))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._secret_key and self._public_key:
shutil.rmtree(self.gnupg_dir)
if os.path.exists(self.TEMPGNUPG_DIR):
shutil.move(self.TEMPGNUPG_DIR, self.gnupg_dir)
class DebRelease(object):
DUPLOAD_CONF_TEMPLATE = '\n\t'.join((
"$cfg{{'{title}'}} = {{",
'fqdn => "{fqdn}",',
'method => "{method}",',
'login => "{login}",',
'incoming => "{incoming}",',
'options => "{options}",',
'dinstall_runs => {dinstall_runs},\n}};',))
DUPLOAD_CONF_PATH = os.path.expanduser('~/.dupload.conf')
DUPLOAD_CONF_TMP_PATH = os.path.expanduser('~/.local/tmp_dupload.cnf')
def __init__(self, dupload_config, login, ssh_key_path):
self.__config = {}
for repo, conf in dupload_config.items():
d = {
"fqdn": conf["fqdn"],
"method": "scpb",
"login": login,
"incoming": conf["incoming"],
"dinstall_runs": 0,
"options": "-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ConnectionAttempts=3",
}
d.update(conf)
self.__config[repo] = d
print(self.__config)
self.ssh_key_path = ssh_key_path
def __enter__(self):
if os.path.exists(self.DUPLOAD_CONF_PATH):
shutil.move(self.DUPLOAD_CONF_PATH, self.DUPLOAD_CONF_TMP_PATH)
self.__dupload_conf = open(self.DUPLOAD_CONF_PATH, 'w')
self.__dupload_conf.write('package config;\n\n$default_host = undef;\n\n' + '\n\n'.join([
self.DUPLOAD_CONF_TEMPLATE.format(title=title, **values)
for title, values in self.__config.items()]))
self.__dupload_conf.write('\n')
self.__dupload_conf.close()
if self.ssh_key_path:
subprocess.check_call("ssh-add {}".format(self.ssh_key_path), shell=True)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if os.path.exists(self.DUPLOAD_CONF_TMP_PATH):
shutil.move(self.DUPLOAD_CONF_TMP_PATH, self.DUPLOAD_CONF_PATH)
else:
os.unlink(self.DUPLOAD_CONF_PATH)
class SSHConnection(object):
def __init__(self, user, host, ssh_key=None):
if ssh_key:
key_str = "-i {}".format(ssh_key)
else:
key_str = ""
self.base_cmd = "ssh {key} {user}@{host}".format(
key=key_str, user=user, host=host)
def execute(self, cmd):
logging.info("Executing remote cmd %s", cmd)
subprocess.check_call(self.base_cmd + ' "{cmd}"'.format(cmd=cmd),
shell=True)
def debsign(path, gpg_passphrase, gpg_sec_key_path, gpg_pub_key_path, gpg_user):
try:
with GpgKey(gpg_sec_key_path, gpg_pub_key_path):
cmd = ('debsign -k \'{key}\' -p"gpg --verbose --no-use-agent --batch '
'--no-tty --passphrase {passphrase}" {path}/*.changes').format(
key=gpg_user, passphrase=gpg_passphrase, path=path)
logging.info("Build debsign cmd '%s'", cmd)
subprocess.check_call(cmd, shell=True)
logging.info("debsign finished")
except Exception as ex:
logging.error("Cannot debsign packages on path %s, with user key", path)
raise ex
def rpmsign(path, gpg_passphrase, gpg_sec_key_path, gpg_pub_key_path, gpg_user):
try:
with GpgKey(gpg_sec_key_path, gpg_pub_key_path):
for package in os.listdir(path):
package_path = os.path.join(path, package)
logging.info("Signing %s", package_path)
proc = pexpect.spawn('rpm --resign -D "_signature gpg" -D "_gpg_name {username}" {package}'.format(username=gpg_user, package=package_path))
proc.expect_exact("Enter pass phrase: ")
proc.sendline(gpg_passphrase)
proc.expect(pexpect.EOF)
logging.info("Signed successfully")
except Exception as ex:
logging.error("Cannot rpmsign packages on path %s, with user key", path)
raise ex
def transfer_packages_scp(ssh_key, path, repo_user, repo_url, incoming_directory):
logging.info("Transferring packages via scp to %s", repo_url)
if ssh_key:
key_str = "-i {}".format(ssh_key)
else:
key_str = ""
subprocess.check_call('scp {key_str} {path}/* {user}@{repo}:{incoming}'.format(
path=path, user=repo_user, repo=repo_url, key_str=key_str, incoming=incoming_directory), shell=True)
logging.info("Transfer via scp finished")
def transfer_packages_dupload(ssh_key, path, repo_user, repo_url, incoming_directory):
repo_short_name = repo_url.split('.')[0]
config = {
repo_short_name: {
"fqdn": repo_url,
"incoming": incoming_directory,
}
}
with DebRelease(config, repo_user, ssh_key):
logging.info("Duploading")
subprocess.check_call("dupload -f --nomail --to {repo} {path}".format(repo=repo_short_name, path=path), shell=True)
logging.info("Dupload finished")
def clear_old_incoming_packages(ssh_connection, user):
for pkg in ('deb', 'rpm', 'tgz'):
for release_type in ('stable', 'testing', 'prestable', 'lts'):
try:
ssh_connection.execute("rm /home/{user}/incoming/clickhouse/{pkg}/{release_type}/*".format(
user=user, pkg=pkg, release_type=release_type))
except Exception:
logging.info("rm is not required")
def _get_incoming_path(repo_url, user=None, pkg_type=None, release_type=None):
if repo_url == 'repo.mirror.yandex.net':
return "/home/{user}/incoming/clickhouse/{pkg}/{release_type}".format(
user=user, pkg=pkg_type, release_type=release_type)
else:
return "/repo/{0}/mini-dinstall/incoming/".format(repo_url.split('.')[0])
def _fix_args(args):
if args.gpg_sec_key_path and not os.path.isabs(args.gpg_sec_key_path):
args.gpg_sec_key_path = os.path.join(os.getcwd(), args.gpg_sec_key_path)
if args.gpg_pub_key_path and not os.path.isabs(args.gpg_pub_key_path):
args.gpg_pub_key_path = os.path.join(os.getcwd(), args.gpg_pub_key_path)
if args.ssh_key_path and not os.path.isabs(args.ssh_key_path):
args.ssh_key_path = os.path.join(os.getcwd(), args.ssh_key_path)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
parser = argparse.ArgumentParser(description="Program to push clickhouse packages to repository")
parser.add_argument('--deb-directory')
parser.add_argument('--rpm-directory')
parser.add_argument('--tgz-directory')
parser.add_argument('--release-type', choices=('testing', 'stable', 'prestable', 'lts'), default='testing')
parser.add_argument('--ssh-key-path')
parser.add_argument('--gpg-passphrase', required=True)
parser.add_argument('--gpg-sec-key-path')
parser.add_argument('--gpg-pub-key-path')
parser.add_argument('--gpg-key-user', default='robot-clickhouse')
parser.add_argument('--repo-url', default='repo.mirror.yandex.net')
parser.add_argument('--repo-user', default='buildfarm')
args = parser.parse_args()
if args.deb_directory is None and args.rpm_directory is None and args.tgz_directory is None:
parser.error('At least one package directory required')
_fix_args(args)
is_open_source = args.repo_url == 'repo.mirror.yandex.net'
ssh_connection = SSHConnection(args.repo_user, args.repo_url, args.ssh_key_path)
packages = []
if args.deb_directory:
debsign(args.deb_directory, args.gpg_passphrase, args.gpg_sec_key_path, args.gpg_pub_key_path, args.gpg_key_user)
packages.append((args.deb_directory, 'deb'))
if args.rpm_directory:
if not is_open_source:
raise Exception("Cannot upload .rpm package to {}".format(args.repo_url))
rpmsign(args.rpm_directory, args.gpg_passphrase, args.gpg_sec_key_path, args.gpg_pub_key_path, args.gpg_key_user)
packages.append((args.rpm_directory, 'rpm'))
if args.tgz_directory:
if not is_open_source:
raise Exception("Cannot upload .tgz package to {}".format(args.repo_url))
packages.append((args.tgz_directory, 'tgz'))
if is_open_source:
logging.info("Clearing old directory with incoming packages on buildfarm")
clear_old_incoming_packages(ssh_connection, args.repo_user)
logging.info("Incoming directory cleared")
for package_path, package_type in packages:
logging.info("Processing path '%s' with package type %s", package_path, package_type)
incoming_directory = _get_incoming_path(args.repo_url, args.repo_user, package_type, args.release_type)
if package_type == "deb":
transfer_packages_dupload(args.ssh_key_path, package_path, args.repo_user, args.repo_url, incoming_directory)
else:
transfer_packages_scp(args.ssh_key_path, package_path, args.repo_user, args.repo_url, incoming_directory)
logging.info("Running clickhouse install (it takes about (20-30 minutes)")
ssh_connection.execute("sudo /usr/sbin/ya-clickhouse-{0}-install".format(package_type))
logging.info("Clickhouse installed")
logging.info("Pushing clickhouse to repo")
ssh_connection.execute("/usr/sbin/push2publicrepo.sh clickhouse")
logging.info("Push finished")
logging.info("Package '%s' pushed", package_type)
else:
transfer_packages_dupload(args.ssh_key_path, args.deb_directory, args.repo_user, args.repo_url, _get_incoming_path(args.repo_url))

View File

@ -1,300 +0,0 @@
set +e
# set -x
function gen_version_string {
if [ -n "$TEST" ]; then
VERSION_STRING="$VERSION_MAJOR.$VERSION_MINOR.$VERSION_PATCH.$VERSION_REVISION"
else
VERSION_STRING="$VERSION_MAJOR.$VERSION_MINOR.$VERSION_PATCH"
fi
}
function get_version {
if [ -z "$VERSION_MAJOR" ] && [ -z "$VERSION_MINOR" ] && [ -z "$VERSION_PATCH" ]; then
BASEDIR=$(dirname "${BASH_SOURCE[0]}")/../../
VERSION_REVISION=`grep "SET(VERSION_REVISION" ${BASEDIR}/cmake/autogenerated_versions.txt | sed 's/^.*VERSION_REVISION \(.*\)$/\1/' | sed 's/[) ].*//'`
VERSION_MAJOR=`grep "SET(VERSION_MAJOR" ${BASEDIR}/cmake/autogenerated_versions.txt | sed 's/^.*VERSION_MAJOR \(.*\)/\1/' | sed 's/[) ].*//'`
VERSION_MINOR=`grep "SET(VERSION_MINOR" ${BASEDIR}/cmake/autogenerated_versions.txt | sed 's/^.*VERSION_MINOR \(.*\)/\1/' | sed 's/[) ].*//'`
VERSION_PATCH=`grep "SET(VERSION_PATCH" ${BASEDIR}/cmake/autogenerated_versions.txt | sed 's/^.*VERSION_PATCH \(.*\)/\1/' | sed 's/[) ].*//'`
fi
VERSION_PREFIX="${VERSION_PREFIX:-v}"
VERSION_POSTFIX_TAG="${VERSION_POSTFIX:--testing}"
gen_version_string
}
function get_author {
AUTHOR=$(git config --get user.name || echo ${USER})
echo $AUTHOR
}
# Generate revision number.
# set environment variables REVISION, AUTHOR
function gen_revision_author {
TYPE=$1
get_version
if [[ $STANDALONE != 'yes' ]]; then
git fetch --tags
succeeded=0
attempts=0
max_attempts=1000
while [ $succeeded -eq 0 ] && [ $attempts -le $max_attempts ]; do
attempts=$(($attempts + 1))
if [ "$TYPE" == "major" ]; then
VERSION_REVISION=$(($VERSION_REVISION + 1))
VERSION_MAJOR=$(($VERSION_MAJOR + 1))
VERSION_MINOR=1
# Version cannot be zero, otherwise is breaks CMake
VERSION_PATCH=1
elif [ "$TYPE" == "minor" ] || [ "$TYPE" == "" ]; then
VERSION_REVISION=$(($VERSION_REVISION + 1))
VERSION_MINOR=$(($VERSION_MINOR + 1))
VERSION_PATCH=1
elif [ "$TYPE" == "patch" ] || [ "$TYPE" == "bugfix" ]; then
# VERSION_REVISION not incremented in new scheme.
if [ "$VERSION_MAJOR" -eq "1" ] && [ "$VERSION_MINOR" -eq "1" ]; then
VERSION_REVISION=$(($VERSION_REVISION + 1))
fi
VERSION_PATCH=$(($VERSION_PATCH + 1))
elif [ "$TYPE" == "env" ]; then
echo "Will build revision from env variables -- $VERSION_MAJOR.$VERSION_MINOR.$VERSION_PATCH"
else
echo "Unknown version type $TYPE"
exit 1
fi
gen_version_string
git_tag_grep=`git tag | grep "$VERSION_PREFIX$VERSION_STRING$VERSION_POSTFIX_TAG"`
if [ "$git_tag_grep" == "" ]; then
succeeded=1
fi
done
if [ $succeeded -eq 0 ]; then
echo "Fail to create revision up to $VERSION_REVISION"
exit 1
fi
auto_message="Auto version update to"
git_log_grep=`git log --oneline --max-count=1 | grep "$auto_message"`
if [ "$git_log_grep" == "" ]; then
tag="$VERSION_PREFIX$VERSION_STRING$VERSION_POSTFIX_TAG"
# First tag for correct git describe
echo -e "\nTrying to create tag: $tag"
git tag -a "$tag" -m "$tag"
git_describe=`git describe`
git_hash=`git rev-parse HEAD`
VERSION_DATE=`git show -s --format=%cs $git_hash`
sed -i -e "s/SET(VERSION_REVISION [^) ]*/SET(VERSION_REVISION $VERSION_REVISION/g;" \
-e "s/SET(VERSION_DESCRIBE [^) ]*/SET(VERSION_DESCRIBE $git_describe/g;" \
-e "s/SET(VERSION_GITHASH [^) ]*/SET(VERSION_GITHASH $git_hash/g;" \
-e "s/SET(VERSION_DATE [^) ]*/SET(VERSION_DATE $VERSION_DATE/g;" \
-e "s/SET(VERSION_MAJOR [^) ]*/SET(VERSION_MAJOR $VERSION_MAJOR/g;" \
-e "s/SET(VERSION_MINOR [^) ]*/SET(VERSION_MINOR $VERSION_MINOR/g;" \
-e "s/SET(VERSION_PATCH [^) ]*/SET(VERSION_PATCH $VERSION_PATCH/g;" \
-e "s/SET(VERSION_STRING [^) ]*/SET(VERSION_STRING $VERSION_STRING/g;" \
cmake/autogenerated_versions.txt
gen_changelog "$VERSION_STRING" "" "$AUTHOR" ""
gen_dockerfiles "$VERSION_STRING"
src/Storages/System/StorageSystemContributors.sh ||:
utils/list-versions/list-versions.sh > utils/list-versions/version_date.tsv
git commit -m "$auto_message [$VERSION_STRING] [$VERSION_REVISION]" cmake/autogenerated_versions.txt debian/changelog docker/*/Dockerfile src/Storages/System/StorageSystemContributors.generated.cpp utils/list-versions/version_date.tsv
if [ -z $NO_PUSH ]; then
git push
fi
echo "Generated version: ${VERSION_STRING}, revision: ${VERSION_REVISION}."
# Second tag for correct version information in autogenerated_versions.txt inside tag
if git tag --force -a "$tag" -m "$tag"
then
if [ -z $NO_PUSH ]; then
echo -e "\nTrying to push tag to origin: $tag"
git push origin "$tag"
if [ $? -ne 0 ]
then
git tag -d "$tag"
echo "Fail to create tag"
exit 1
fi
fi
fi
else
get_version
echo reusing old version $VERSION_STRING
fi
fi
AUTHOR=$(git config --get user.name || echo ${USER})
export AUTHOR
}
function get_revision_author {
get_version
AUTHOR=$(get_author)
export AUTHOR
}
# Generate changelog from changelog.in.
function gen_changelog {
VERSION_STRING="$1"
CHDATE="$2"
AUTHOR="$3"
CHLOG="$4"
if [ -z "$VERSION_STRING" ] ; then
get_revision_author
fi
if [ -z "$CHLOG" ] ; then
CHLOG=debian/changelog
fi
if [ -z "$CHDATE" ] ; then
CHDATE=$(LC_ALL=C date -R | sed -e 's/,/\\,/g') # Replace comma to '\,'
fi
sed \
-e "s/[@]VERSION_STRING[@]/$VERSION_STRING/g" \
-e "s/[@]DATE[@]/$CHDATE/g" \
-e "s/[@]AUTHOR[@]/$AUTHOR/g" \
-e "s/[@]EMAIL[@]/$(whoami)@clickhouse.com/g" \
< $CHLOG.in > $CHLOG
}
# Change package versions that are installed for Docker images.
function gen_dockerfiles {
VERSION_STRING="$1"
ls -1 docker/*/Dockerfile | xargs sed -i -r -e 's/ARG version=.+$/ARG version='$VERSION_STRING'/'
}
function make_rpm {
[ -z "$VERSION_STRING" ] && get_version && VERSION_STRING+=${VERSION_POSTFIX}
VERSION_FULL="${VERSION_STRING}"
PACKAGE_DIR=${PACKAGE_DIR=../}
function deb_unpack {
rm -rf $PACKAGE-$VERSION_FULL
alien --verbose --generate --to-rpm --scripts ${PACKAGE_DIR}${PACKAGE}_${VERSION_FULL}_${ARCH}.deb
cd $PACKAGE-$VERSION_FULL
mv ${PACKAGE}-$VERSION_FULL-2.spec ${PACKAGE}-$VERSION_FULL-2.spec.tmp
cat ${PACKAGE}-$VERSION_FULL-2.spec.tmp \
| grep -vF '%dir "/"' \
| grep -vF '%dir "/usr/"' \
| grep -vF '%dir "/usr/bin/"' \
| grep -vF '%dir "/usr/lib/"' \
| grep -vF '%dir "/usr/lib/debug/"' \
| grep -vF '%dir "/usr/lib/.build-id/"' \
| grep -vF '%dir "/usr/share/"' \
| grep -vF '%dir "/usr/share/doc/"' \
| grep -vF '%dir "/lib/"' \
| grep -vF '%dir "/lib/systemd/"' \
| grep -vF '%dir "/lib/systemd/system/"' \
| grep -vF '%dir "/etc/"' \
| grep -vF '%dir "/etc/security/"' \
| grep -vF '%dir "/etc/security/limits.d/"' \
| grep -vF '%dir "/etc/init.d/"' \
| grep -vF '%dir "/etc/cron.d/"' \
| grep -vF '%dir "/etc/systemd/system/"' \
| grep -vF '%dir "/etc/systemd/"' \
| sed -e 's|%config |%config(noreplace) |' \
> ${PACKAGE}-$VERSION_FULL-2.spec
}
function rpm_pack {
rpmbuild --buildroot="$CUR_DIR/${PACKAGE}-$VERSION_FULL" -bb --target ${TARGET} "${PACKAGE}-$VERSION_FULL-2.spec"
cd $CUR_DIR
}
function unpack_pack {
deb_unpack
rpm_pack
}
PACKAGE=clickhouse-server
ARCH=all
TARGET=noarch
deb_unpack
mv ${PACKAGE}-$VERSION_FULL-2.spec ${PACKAGE}-$VERSION_FULL-2.spec_tmp
echo "Requires: clickhouse-common-static = $VERSION_FULL-2" >> ${PACKAGE}-$VERSION_FULL-2.spec
echo "Requires: tzdata" >> ${PACKAGE}-$VERSION_FULL-2.spec
echo "Requires: initscripts" >> ${PACKAGE}-$VERSION_FULL-2.spec
echo "Obsoletes: clickhouse-server-common < $VERSION_FULL" >> ${PACKAGE}-$VERSION_FULL-2.spec
cat ${PACKAGE}-$VERSION_FULL-2.spec_tmp >> ${PACKAGE}-$VERSION_FULL-2.spec
rpm_pack
PACKAGE=clickhouse-client
ARCH=all
TARGET=noarch
deb_unpack
mv ${PACKAGE}-$VERSION_FULL-2.spec ${PACKAGE}-$VERSION_FULL-2.spec_tmp
echo "Requires: clickhouse-common-static = $VERSION_FULL-2" >> ${PACKAGE}-$VERSION_FULL-2.spec
cat ${PACKAGE}-$VERSION_FULL-2.spec_tmp >> ${PACKAGE}-$VERSION_FULL-2.spec
rpm_pack
PACKAGE=clickhouse-common-static
ARCH=amd64
TARGET=x86_64
unpack_pack
PACKAGE=clickhouse-common-static-dbg
ARCH=amd64
TARGET=x86_64
unpack_pack
mv clickhouse-*-${VERSION_FULL}-2.*.rpm ${PACKAGE_DIR}
}
function make_tgz {
[ -z "$VERSION_STRING" ] && get_version && VERSION_STRING+=${VERSION_POSTFIX}
VERSION_FULL="${VERSION_STRING}"
PACKAGE_DIR=${PACKAGE_DIR=../}
for PACKAGE in clickhouse-server clickhouse-client clickhouse-common-static clickhouse-common-static-dbg; do
alien --verbose --scripts --generate --to-tgz ${PACKAGE_DIR}${PACKAGE}_${VERSION_FULL}_*.deb
PKGDIR="./${PACKAGE}-${VERSION_FULL}"
if [ ! -d "$PKGDIR/install" ]; then
mkdir "$PKGDIR/install"
fi
if [ ! -f "$PKGDIR/install/doinst.sh" ]; then
echo '#!/bin/sh' > "$PKGDIR/install/doinst.sh"
echo 'set -e' >> "$PKGDIR/install/doinst.sh"
fi
SCRIPT_TEXT='
SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"
for filepath in `find $SCRIPTPATH/.. -type f -or -type l | grep -v "\.\./install/"`; do
destpath=${filepath##$SCRIPTPATH/..}
mkdir -p $(dirname "$destpath")
cp -r "$filepath" "$destpath"
done
'
echo "$SCRIPT_TEXT" | sed -i "2r /dev/stdin" "$PKGDIR/install/doinst.sh"
chmod +x "$PKGDIR/install/doinst.sh"
if [ -f "/usr/bin/pigz" ]; then
tar --use-compress-program=pigz -cf "${PACKAGE}-${VERSION_FULL}.tgz" "$PKGDIR"
else
tar -czf "${PACKAGE}-${VERSION_FULL}.tgz" "$PKGDIR"
fi
rm -r $PKGDIR
done
mv clickhouse-*-${VERSION_FULL}.tgz ${PACKAGE_DIR}
}