mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-25 00:52:02 +00:00
Merge branch 'master' into replicated-mergetree-mutations
Conflicts: dbms/src/Storages/MergeTree/AbandonableLockInZooKeeper.h dbms/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h dbms/src/Storages/StorageReplicatedMergeTree.cpp
This commit is contained in:
commit
c9bb986330
@ -1,3 +1,8 @@
|
|||||||
|
# ClickHouse release 1.1.54383, 2018-05-22
|
||||||
|
|
||||||
|
## Bug fixes:
|
||||||
|
* Fixed a slowdown of replication queue if a table has many replicas.
|
||||||
|
|
||||||
# ClickHouse release 1.1.54381, 2018-05-14
|
# ClickHouse release 1.1.54381, 2018-05-14
|
||||||
|
|
||||||
## Bug fixes:
|
## Bug fixes:
|
||||||
|
@ -1,3 +1,7 @@
|
|||||||
|
# ClickHouse release 1.1.54383, 2018-05-22
|
||||||
|
## Исправление ошибок:
|
||||||
|
* Исправлена деградация скорости выполнения очереди репликации при большом количестве реплик
|
||||||
|
|
||||||
# ClickHouse release 1.1.54381, 2018-05-14
|
# ClickHouse release 1.1.54381, 2018-05-14
|
||||||
|
|
||||||
## Исправление ошибок:
|
## Исправление ошибок:
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
option (ENABLE_EMBEDDED_COMPILER "Set to TRUE to enable support for 'compile' option for query execution" 0)
|
option (ENABLE_EMBEDDED_COMPILER "Set to TRUE to enable support for 'compile' option for query execution" 1)
|
||||||
option (USE_INTERNAL_LLVM_LIBRARY "Use bundled or system LLVM library. Default: system library for quicker developer builds." ${APPLE})
|
option (USE_INTERNAL_LLVM_LIBRARY "Use bundled or system LLVM library. Default: system library for quicker developer builds." ${APPLE})
|
||||||
|
|
||||||
if (ENABLE_EMBEDDED_COMPILER)
|
if (ENABLE_EMBEDDED_COMPILER)
|
||||||
@ -55,3 +55,14 @@ if (ENABLE_EMBEDDED_COMPILER)
|
|||||||
message(STATUS "LLVM C++ compiler flags: ${LLVM_CXXFLAGS}")
|
message(STATUS "LLVM C++ compiler flags: ${LLVM_CXXFLAGS}")
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
|
||||||
|
function(llvm_libs_all REQUIRED_LLVM_LIBRARIES)
|
||||||
|
llvm_map_components_to_libnames (result all)
|
||||||
|
list (REMOVE_ITEM result "LTO" "LLVM")
|
||||||
|
if (TERMCAP_LIBRARY)
|
||||||
|
list (APPEND result ${TERMCAP_LIBRARY})
|
||||||
|
endif ()
|
||||||
|
list (APPEND result ${CMAKE_DL_LIBS})
|
||||||
|
set (${REQUIRED_LLVM_LIBRARIES} ${result} PARENT_SCOPE)
|
||||||
|
endfunction()
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#!/bin/bash -e
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
# set -x
|
||||||
|
|
||||||
# Этот скрипт собирает все заголовочные файлы, нужные для компиляции некоторого translation unit-а
|
# Этот скрипт собирает все заголовочные файлы, нужные для компиляции некоторого translation unit-а
|
||||||
# и копирует их с сохранением путей в директорию DST.
|
# и копирует их с сохранением путей в директорию DST.
|
||||||
# Это затем может быть использовано, чтобы скомпилировать translation unit на другом сервере,
|
# Это затем может быть использовано, чтобы скомпилировать translation unit на другом сервере,
|
||||||
|
@ -100,11 +100,7 @@ else ()
|
|||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (USE_EMBEDDED_COMPILER)
|
if (USE_EMBEDDED_COMPILER)
|
||||||
llvm_map_components_to_libnames(REQUIRED_LLVM_LIBRARIES all)
|
llvm_libs_all(REQUIRED_LLVM_LIBRARIES)
|
||||||
if (TERMCAP_LIBRARY)
|
|
||||||
list(APPEND REQUIRED_LLVM_LIBRARIES ${TERMCAP_LIBRARY})
|
|
||||||
endif ()
|
|
||||||
list(APPEND REQUIRED_LLVM_LIBRARIES ${CMAKE_DL_LIBS})
|
|
||||||
|
|
||||||
target_link_libraries (dbms ${REQUIRED_LLVM_LIBRARIES})
|
target_link_libraries (dbms ${REQUIRED_LLVM_LIBRARIES})
|
||||||
target_include_directories (dbms BEFORE PUBLIC ${LLVM_INCLUDE_DIRS})
|
target_include_directories (dbms BEFORE PUBLIC ${LLVM_INCLUDE_DIRS})
|
||||||
@ -245,9 +241,18 @@ target_include_directories (clickhouse_common_io BEFORE PUBLIC ${DOUBLE_CONVERSI
|
|||||||
# also for copy_headers.sh:
|
# also for copy_headers.sh:
|
||||||
target_include_directories (clickhouse_common_io BEFORE PRIVATE ${COMMON_INCLUDE_DIR})
|
target_include_directories (clickhouse_common_io BEFORE PRIVATE ${COMMON_INCLUDE_DIR})
|
||||||
|
|
||||||
if (USE_EMBEDDED_COMPILER)
|
# TODO: fix and enable:
|
||||||
|
if (0 AND USE_EMBEDDED_COMPILER)
|
||||||
add_custom_target(copy-headers ALL env CLANG=${CMAKE_CURRENT_BINARY_DIR}/src/Server/clickhouse-clang BUILD_PATH=${ClickHouse_BINARY_DIR} DESTDIR=${ClickHouse_SOURCE_DIR} ${ClickHouse_SOURCE_DIR}/copy_headers.sh ${ClickHouse_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}/headers DEPENDS clickhouse-clang WORKING_DIRECTORY ${ClickHouse_SOURCE_DIR} SOURCES ${ClickHouse_SOURCE_DIR}/copy_headers.sh)
|
add_custom_target(copy-headers ALL env CLANG=${CMAKE_CURRENT_BINARY_DIR}/src/Server/clickhouse-clang BUILD_PATH=${ClickHouse_BINARY_DIR} DESTDIR=${ClickHouse_SOURCE_DIR} ${ClickHouse_SOURCE_DIR}/copy_headers.sh ${ClickHouse_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}/headers DEPENDS clickhouse-clang WORKING_DIRECTORY ${ClickHouse_SOURCE_DIR} SOURCES ${ClickHouse_SOURCE_DIR}/copy_headers.sh)
|
||||||
install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/headers DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/clickhouse COMPONENT clickhouse)
|
install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/headers DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/clickhouse COMPONENT clickhouse)
|
||||||
|
|
||||||
|
if (USE_INTERNAL_LLVM_LIBRARY)
|
||||||
|
set(CLANG_HEADERS_DIR "${ClickHouse_SOURCE_DIR}/contrib/llvm/clang/lib/Headers")
|
||||||
|
set(CLANG_HEADERS_DEST "${CMAKE_CURRENT_BINARY_DIR}/headers/usr/local/lib/clang/${LLVM_VERSION}/include") # original: ${LLVM_LIBRARY_OUTPUT_INTDIR}/clang/${CLANG_VERSION}/include
|
||||||
|
add_custom_target(copy-headers-clang ALL ${CMAKE_COMMAND} -E make_directory ${CLANG_HEADERS_DEST} && ${CMAKE_COMMAND} -E copy_if_different ${CLANG_HEADERS_DIR}/* ${CLANG_HEADERS_DEST} )
|
||||||
|
add_dependencies(copy-headers copy-headers-clang)
|
||||||
|
endif ()
|
||||||
|
|
||||||
add_dependencies(clickhouse-bundle copy-headers)
|
add_dependencies(clickhouse-bundle copy-headers)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
1
dbms/scripts/gen_benchmark_data/README.md
Normal file
1
dbms/scripts/gen_benchmark_data/README.md
Normal file
@ -0,0 +1 @@
|
|||||||
|
Hits table generator based on LSTM neural network trained on real hits. You need to have weights for model or train model on real hits to generate data.
|
22
dbms/scripts/gen_benchmark_data/generate.py
Normal file
22
dbms/scripts/gen_benchmark_data/generate.py
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
import argparse
|
||||||
|
|
||||||
|
from model import Model
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
||||||
|
parser.add_argument('-n', type=int, default=100000,
|
||||||
|
help='number of objects to generate')
|
||||||
|
parser.add_argument('--output_file', type=str, default='out.tsv',
|
||||||
|
help='output file name')
|
||||||
|
parser.add_argument('--weights_path', type=str,
|
||||||
|
help='path to weights')
|
||||||
|
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
if not args.weights_path:
|
||||||
|
raise Exception('please specify path to model weights with --weights_path')
|
||||||
|
|
||||||
|
gen = Model()
|
||||||
|
gen.generate(args.n, args.output_file, args.weights_path)
|
||||||
|
|
147
dbms/scripts/gen_benchmark_data/model.py
Normal file
147
dbms/scripts/gen_benchmark_data/model.py
Normal file
@ -0,0 +1,147 @@
|
|||||||
|
import numpy as np
|
||||||
|
import os
|
||||||
|
import pickle
|
||||||
|
import tensorflow as tf
|
||||||
|
|
||||||
|
from random import sample
|
||||||
|
from keras.layers import Dense, Embedding
|
||||||
|
from tqdm import tqdm
|
||||||
|
|
||||||
|
RNN_NUM_UNITS = 256
|
||||||
|
EMB_SIZE = 32
|
||||||
|
MAX_LENGTH = 1049
|
||||||
|
|
||||||
|
|
||||||
|
with open('tokens', 'rb') as f:
|
||||||
|
tokens = pickle.load(f)
|
||||||
|
n_tokens = len(tokens)
|
||||||
|
|
||||||
|
token_to_id = {c: i for i, c in enumerate(tokens)}
|
||||||
|
|
||||||
|
|
||||||
|
def to_matrix(objects, max_len=None, pad=0, dtype='int32'):
|
||||||
|
max_len = max_len or max(map(len, objects))
|
||||||
|
matrix = np.zeros([len(objects), max_len], dtype) + pad
|
||||||
|
|
||||||
|
for i in range(len(objects)):
|
||||||
|
name_ix = list(map(token_to_id.get, objects[i]))
|
||||||
|
matrix[i, :len(name_ix)] = name_ix
|
||||||
|
return matrix.T
|
||||||
|
|
||||||
|
|
||||||
|
class Model:
|
||||||
|
def __init__(self, learning_rate=0.0001):
|
||||||
|
# an embedding layer that converts character ids into embeddings
|
||||||
|
self.embed_x = Embedding(n_tokens, EMB_SIZE)
|
||||||
|
get_h_next = Dense(1024, activation='relu')
|
||||||
|
# a dense layer that maps current hidden state
|
||||||
|
# to probabilities of characters [h_t+1]->P(x_t+1|h_t+1)
|
||||||
|
self.get_probas = Dense(n_tokens, activation='softmax')
|
||||||
|
|
||||||
|
self.input_sequence = tf.placeholder('int32', (MAX_LENGTH, None))
|
||||||
|
batch_size = tf.shape(self.input_sequence)[1]
|
||||||
|
|
||||||
|
self.gru_cell_first = tf.nn.rnn_cell.GRUCell(RNN_NUM_UNITS)
|
||||||
|
self.lstm_cell_second = tf.nn.rnn_cell.LSTMCell(RNN_NUM_UNITS)
|
||||||
|
|
||||||
|
h_prev_first = self.gru_cell_first.zero_state(batch_size, dtype=tf.float32)
|
||||||
|
h_prev_second = tf.nn.rnn_cell.LSTMStateTuple(
|
||||||
|
tf.zeros([batch_size, RNN_NUM_UNITS]), # initial cell state,
|
||||||
|
tf.zeros([batch_size, RNN_NUM_UNITS]) # initial hidden state
|
||||||
|
)
|
||||||
|
|
||||||
|
predicted_probas = []
|
||||||
|
for t in range(MAX_LENGTH):
|
||||||
|
x_t = self.input_sequence[t]
|
||||||
|
# convert character id into embedding
|
||||||
|
x_t_emb = self.embed_x(tf.reshape(x_t, [-1, 1]))[:, 0]
|
||||||
|
|
||||||
|
out_next_first, h_next_first = self.gru_cell_first(x_t_emb, h_prev_first)
|
||||||
|
h_prev_first = h_next_first
|
||||||
|
|
||||||
|
out_next_second, h_next_second = self.lstm_cell_second(out_next_first, h_prev_second)
|
||||||
|
h_prev_second = h_next_second
|
||||||
|
|
||||||
|
probas_next = self.get_probas(out_next_second)
|
||||||
|
predicted_probas.append(probas_next)
|
||||||
|
|
||||||
|
predicted_probas = tf.stack(predicted_probas)
|
||||||
|
|
||||||
|
predictions_matrix = tf.reshape(predicted_probas[:-1], [-1, len(tokens)])
|
||||||
|
answers_matrix = tf.one_hot(tf.reshape(self.input_sequence[1:], [-1]), n_tokens)
|
||||||
|
|
||||||
|
self.loss = tf.reduce_mean(tf.reduce_sum(
|
||||||
|
-answers_matrix * tf.log(tf.clip_by_value(predictions_matrix, 1e-7, 1.0)),
|
||||||
|
reduction_indices=[1]
|
||||||
|
))
|
||||||
|
optimizer = tf.train.AdamOptimizer(learning_rate)
|
||||||
|
gvs = optimizer.compute_gradients(self.loss)
|
||||||
|
capped_gvs = [(gr if gr is None else tf.clip_by_value(gr, -1., 1.), var) for gr, var in gvs]
|
||||||
|
self.optimize = optimizer.apply_gradients(capped_gvs)
|
||||||
|
|
||||||
|
self.sess = tf.Session()
|
||||||
|
self.sess.run(tf.global_variables_initializer())
|
||||||
|
self.saver = tf.train.Saver()
|
||||||
|
|
||||||
|
def train(self, train_data_path, save_dir, num_iters, batch_size=64, restore_from=False):
|
||||||
|
history = []
|
||||||
|
if restore_from:
|
||||||
|
with open(restore_from + '_history') as f:
|
||||||
|
history = pickle.load(f)
|
||||||
|
self.saver.restore(self.sess, restore_from)
|
||||||
|
with open(train_data_path, 'r') as f:
|
||||||
|
train_data = f.readlines()
|
||||||
|
|
||||||
|
train_data = filter(lambda a: len(a) < MAX_LENGTH, train_data)
|
||||||
|
|
||||||
|
for i in tqdm(range(num_iters)):
|
||||||
|
batch = to_matrix(
|
||||||
|
map(lambda a: '\n' + a.rstrip('\n'), sample(train_data, batch_size)),
|
||||||
|
max_len=MAX_LENGTH
|
||||||
|
)
|
||||||
|
loss_i, _ = self.sess.run([self.loss, self.optimize], {self.input_sequence: batch})
|
||||||
|
history.append(loss_i)
|
||||||
|
if len(history) % 2000 == 0:
|
||||||
|
self.saver.save(self.sess, os.path.join(save_dir, '{}_iters'.format(len(history))))
|
||||||
|
self.saver.save(self.sess, os.path.join(save_dir, '{}_iters'.format(len(history))))
|
||||||
|
with open(os.path.join(save_dir, '{}_iters_history'.format(len(history)))) as f:
|
||||||
|
pickle.dump(history, f)
|
||||||
|
|
||||||
|
def generate(self, num_objects, output_file, weights_path):
|
||||||
|
self.saver.restore(self.sess, weights_path)
|
||||||
|
batch_size = num_objects
|
||||||
|
x_t = tf.placeholder('int32', (None, batch_size))
|
||||||
|
h_t_first = tf.Variable(tf.zeros([batch_size, RNN_NUM_UNITS]))
|
||||||
|
h_t_second = tf.nn.rnn_cell.LSTMStateTuple(
|
||||||
|
tf.Variable(tf.zeros([batch_size, RNN_NUM_UNITS])),
|
||||||
|
tf.Variable(tf.zeros([batch_size, RNN_NUM_UNITS]))
|
||||||
|
)
|
||||||
|
|
||||||
|
x_t_emb = self.embed_x(tf.reshape(x_t, [-1, 1]))[:, 0]
|
||||||
|
first_out_next, next_h_first = self.gru_cell_first(x_t_emb, h_t_first)
|
||||||
|
second_out_next, next_h_second = self.lstm_cell_second(first_out_next, h_t_second)
|
||||||
|
next_probs = self.get_probas(second_out_next)
|
||||||
|
|
||||||
|
x_sequence = np.zeros(shape=(1, batch_size), dtype=int) + token_to_id['\n']
|
||||||
|
self.sess.run(
|
||||||
|
[tf.assign(h_t_first, h_t_first.initial_value),
|
||||||
|
tf.assign(h_t_second[0], h_t_second[0].initial_value),
|
||||||
|
tf.assign(h_t_second[1], h_t_second[1].initial_value)]
|
||||||
|
)
|
||||||
|
|
||||||
|
for i in tqdm(range(MAX_LENGTH - 1)):
|
||||||
|
x_probs, _, _, _ = self.sess.run(
|
||||||
|
[next_probs,
|
||||||
|
tf.assign(h_t_second[0], next_h_second[0]),
|
||||||
|
tf.assign(h_t_second[1], next_h_second[1]),
|
||||||
|
tf.assign(h_t_first, next_h_first)],
|
||||||
|
{x_t: [x_sequence[-1, :]]}
|
||||||
|
)
|
||||||
|
|
||||||
|
next_char = [np.random.choice(n_tokens, p=x_probs[i]) for i in range(batch_size)]
|
||||||
|
if sum(next_char) == 0:
|
||||||
|
break
|
||||||
|
x_sequence = np.append(x_sequence, [next_char], axis=0)
|
||||||
|
|
||||||
|
with open(output_file, 'w') as f:
|
||||||
|
f.writelines([''.join([tokens[ix] for ix in x_sequence.T[k]]) + '\n' for k in range(batch_size)])
|
3
dbms/scripts/gen_benchmark_data/requirements.txt
Normal file
3
dbms/scripts/gen_benchmark_data/requirements.txt
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
Keras==2.0.6
|
||||||
|
numpy
|
||||||
|
tensorflow-gpu==1.4.0
|
506
dbms/scripts/gen_benchmark_data/tokens
Normal file
506
dbms/scripts/gen_benchmark_data/tokens
Normal file
@ -0,0 +1,506 @@
|
|||||||
|
(lp0
|
||||||
|
S'\x83'
|
||||||
|
p1
|
||||||
|
aS'\x04'
|
||||||
|
p2
|
||||||
|
aS'\x87'
|
||||||
|
p3
|
||||||
|
aS'\x8b'
|
||||||
|
p4
|
||||||
|
aS'\x8f'
|
||||||
|
p5
|
||||||
|
aS'\x10'
|
||||||
|
p6
|
||||||
|
aS'\x93'
|
||||||
|
p7
|
||||||
|
aS'\x14'
|
||||||
|
p8
|
||||||
|
aS'\x97'
|
||||||
|
p9
|
||||||
|
aS'\x18'
|
||||||
|
p10
|
||||||
|
aS'\x9b'
|
||||||
|
p11
|
||||||
|
aS'\x1c'
|
||||||
|
p12
|
||||||
|
aS'\x9f'
|
||||||
|
p13
|
||||||
|
aS' '
|
||||||
|
p14
|
||||||
|
aS'\xa3'
|
||||||
|
p15
|
||||||
|
aS'$'
|
||||||
|
p16
|
||||||
|
aS'\xa7'
|
||||||
|
p17
|
||||||
|
aS'('
|
||||||
|
p18
|
||||||
|
aS'\xab'
|
||||||
|
p19
|
||||||
|
aS','
|
||||||
|
p20
|
||||||
|
aS'\xaf'
|
||||||
|
p21
|
||||||
|
aS'0'
|
||||||
|
p22
|
||||||
|
aS'\xb3'
|
||||||
|
p23
|
||||||
|
aS'4'
|
||||||
|
p24
|
||||||
|
aS'\xb7'
|
||||||
|
p25
|
||||||
|
aS'8'
|
||||||
|
p26
|
||||||
|
aS'\xbb'
|
||||||
|
p27
|
||||||
|
aS'<'
|
||||||
|
p28
|
||||||
|
aS'\xbf'
|
||||||
|
p29
|
||||||
|
aS'@'
|
||||||
|
p30
|
||||||
|
aS'\xc3'
|
||||||
|
p31
|
||||||
|
aS'D'
|
||||||
|
p32
|
||||||
|
aS'\xc7'
|
||||||
|
p33
|
||||||
|
aS'H'
|
||||||
|
p34
|
||||||
|
aS'\xcb'
|
||||||
|
p35
|
||||||
|
aS'L'
|
||||||
|
p36
|
||||||
|
aS'\xcf'
|
||||||
|
p37
|
||||||
|
aS'P'
|
||||||
|
p38
|
||||||
|
aS'\xd3'
|
||||||
|
p39
|
||||||
|
aS'T'
|
||||||
|
p40
|
||||||
|
aS'\xd7'
|
||||||
|
p41
|
||||||
|
aS'X'
|
||||||
|
p42
|
||||||
|
aS'\xdb'
|
||||||
|
p43
|
||||||
|
aS'\\'
|
||||||
|
p44
|
||||||
|
aS'\xdf'
|
||||||
|
p45
|
||||||
|
aS'`'
|
||||||
|
p46
|
||||||
|
aS'\xe3'
|
||||||
|
p47
|
||||||
|
aS'd'
|
||||||
|
p48
|
||||||
|
aS'\xe7'
|
||||||
|
p49
|
||||||
|
aS'h'
|
||||||
|
p50
|
||||||
|
aS'\xeb'
|
||||||
|
p51
|
||||||
|
aS'l'
|
||||||
|
p52
|
||||||
|
aS'\xef'
|
||||||
|
p53
|
||||||
|
aS'p'
|
||||||
|
p54
|
||||||
|
aS'\xf3'
|
||||||
|
p55
|
||||||
|
aS't'
|
||||||
|
p56
|
||||||
|
aS'\xf7'
|
||||||
|
p57
|
||||||
|
aS'x'
|
||||||
|
p58
|
||||||
|
aS'\xfb'
|
||||||
|
p59
|
||||||
|
aS'|'
|
||||||
|
p60
|
||||||
|
aS'\xff'
|
||||||
|
p61
|
||||||
|
aS'\x80'
|
||||||
|
p62
|
||||||
|
aS'\x03'
|
||||||
|
p63
|
||||||
|
aS'\x84'
|
||||||
|
p64
|
||||||
|
aS'\x07'
|
||||||
|
p65
|
||||||
|
aS'\x88'
|
||||||
|
p66
|
||||||
|
aS'\x0b'
|
||||||
|
p67
|
||||||
|
aS'\x8c'
|
||||||
|
p68
|
||||||
|
aS'\x0f'
|
||||||
|
p69
|
||||||
|
aS'\x90'
|
||||||
|
p70
|
||||||
|
aS'\x13'
|
||||||
|
p71
|
||||||
|
aS'\x94'
|
||||||
|
p72
|
||||||
|
aS'\x17'
|
||||||
|
p73
|
||||||
|
aS'\x98'
|
||||||
|
p74
|
||||||
|
aS'\x1b'
|
||||||
|
p75
|
||||||
|
aS'\x9c'
|
||||||
|
p76
|
||||||
|
aS'\x1f'
|
||||||
|
p77
|
||||||
|
aS'\xa0'
|
||||||
|
p78
|
||||||
|
aS'#'
|
||||||
|
p79
|
||||||
|
aS'\xa4'
|
||||||
|
p80
|
||||||
|
aS"'"
|
||||||
|
p81
|
||||||
|
aS'\xa8'
|
||||||
|
p82
|
||||||
|
aS'+'
|
||||||
|
p83
|
||||||
|
aS'\xac'
|
||||||
|
p84
|
||||||
|
aS'/'
|
||||||
|
p85
|
||||||
|
aS'\xb0'
|
||||||
|
p86
|
||||||
|
aS'3'
|
||||||
|
p87
|
||||||
|
aS'\xb4'
|
||||||
|
p88
|
||||||
|
aS'7'
|
||||||
|
p89
|
||||||
|
aS'\xb8'
|
||||||
|
p90
|
||||||
|
aS';'
|
||||||
|
p91
|
||||||
|
aS'\xbc'
|
||||||
|
p92
|
||||||
|
aS'?'
|
||||||
|
p93
|
||||||
|
aS'\xc0'
|
||||||
|
p94
|
||||||
|
aS'C'
|
||||||
|
p95
|
||||||
|
aS'\xc4'
|
||||||
|
p96
|
||||||
|
aS'G'
|
||||||
|
p97
|
||||||
|
aS'\xc8'
|
||||||
|
p98
|
||||||
|
aS'K'
|
||||||
|
p99
|
||||||
|
aS'\xcc'
|
||||||
|
p100
|
||||||
|
aS'O'
|
||||||
|
p101
|
||||||
|
aS'\xd0'
|
||||||
|
p102
|
||||||
|
aS'S'
|
||||||
|
p103
|
||||||
|
aS'\xd4'
|
||||||
|
p104
|
||||||
|
aS'W'
|
||||||
|
p105
|
||||||
|
aS'\xd8'
|
||||||
|
p106
|
||||||
|
aS'['
|
||||||
|
p107
|
||||||
|
aS'\xdc'
|
||||||
|
p108
|
||||||
|
aS'_'
|
||||||
|
p109
|
||||||
|
aS'\xe0'
|
||||||
|
p110
|
||||||
|
aS'c'
|
||||||
|
p111
|
||||||
|
aS'\xe4'
|
||||||
|
p112
|
||||||
|
aS'g'
|
||||||
|
p113
|
||||||
|
aS'\xe8'
|
||||||
|
p114
|
||||||
|
aS'k'
|
||||||
|
p115
|
||||||
|
aS'\xec'
|
||||||
|
p116
|
||||||
|
aS'o'
|
||||||
|
p117
|
||||||
|
aS'\xf0'
|
||||||
|
p118
|
||||||
|
aS's'
|
||||||
|
p119
|
||||||
|
aS'\xf4'
|
||||||
|
p120
|
||||||
|
aS'w'
|
||||||
|
p121
|
||||||
|
aS'\xf8'
|
||||||
|
p122
|
||||||
|
aS'{'
|
||||||
|
p123
|
||||||
|
aS'\xfc'
|
||||||
|
p124
|
||||||
|
aS'\x7f'
|
||||||
|
p125
|
||||||
|
aS'\x81'
|
||||||
|
p126
|
||||||
|
aS'\x02'
|
||||||
|
p127
|
||||||
|
aS'\x85'
|
||||||
|
p128
|
||||||
|
aS'\x06'
|
||||||
|
p129
|
||||||
|
aS'\x89'
|
||||||
|
p130
|
||||||
|
aS'\n'
|
||||||
|
p131
|
||||||
|
aS'\x8d'
|
||||||
|
p132
|
||||||
|
aS'\x0e'
|
||||||
|
p133
|
||||||
|
aS'\x91'
|
||||||
|
p134
|
||||||
|
aS'\x12'
|
||||||
|
p135
|
||||||
|
aS'\x95'
|
||||||
|
p136
|
||||||
|
aS'\x16'
|
||||||
|
p137
|
||||||
|
aS'\x99'
|
||||||
|
p138
|
||||||
|
aS'\x1a'
|
||||||
|
p139
|
||||||
|
aS'\x9d'
|
||||||
|
p140
|
||||||
|
aS'\x1e'
|
||||||
|
p141
|
||||||
|
aS'\xa1'
|
||||||
|
p142
|
||||||
|
aS'"'
|
||||||
|
p143
|
||||||
|
aS'\xa5'
|
||||||
|
p144
|
||||||
|
aS'&'
|
||||||
|
p145
|
||||||
|
aS'\xa9'
|
||||||
|
p146
|
||||||
|
aS'*'
|
||||||
|
p147
|
||||||
|
aS'\xad'
|
||||||
|
p148
|
||||||
|
aS'.'
|
||||||
|
p149
|
||||||
|
aS'\xb1'
|
||||||
|
p150
|
||||||
|
aS'2'
|
||||||
|
p151
|
||||||
|
aS'\xb5'
|
||||||
|
p152
|
||||||
|
aS'6'
|
||||||
|
p153
|
||||||
|
aS'\xb9'
|
||||||
|
p154
|
||||||
|
aS':'
|
||||||
|
p155
|
||||||
|
aS'\xbd'
|
||||||
|
p156
|
||||||
|
aS'>'
|
||||||
|
p157
|
||||||
|
aS'\xc1'
|
||||||
|
p158
|
||||||
|
aS'B'
|
||||||
|
p159
|
||||||
|
aS'\xc5'
|
||||||
|
p160
|
||||||
|
aS'F'
|
||||||
|
p161
|
||||||
|
aS'\xc9'
|
||||||
|
p162
|
||||||
|
aS'J'
|
||||||
|
p163
|
||||||
|
aS'\xcd'
|
||||||
|
p164
|
||||||
|
aS'N'
|
||||||
|
p165
|
||||||
|
aS'\xd1'
|
||||||
|
p166
|
||||||
|
aS'R'
|
||||||
|
p167
|
||||||
|
aS'\xd5'
|
||||||
|
p168
|
||||||
|
aS'V'
|
||||||
|
p169
|
||||||
|
aS'\xd9'
|
||||||
|
p170
|
||||||
|
aS'Z'
|
||||||
|
p171
|
||||||
|
aS'\xdd'
|
||||||
|
p172
|
||||||
|
aS'^'
|
||||||
|
p173
|
||||||
|
aS'\xe1'
|
||||||
|
p174
|
||||||
|
aS'b'
|
||||||
|
p175
|
||||||
|
aS'\xe5'
|
||||||
|
p176
|
||||||
|
aS'f'
|
||||||
|
p177
|
||||||
|
aS'\xe9'
|
||||||
|
p178
|
||||||
|
aS'j'
|
||||||
|
p179
|
||||||
|
aS'\xed'
|
||||||
|
p180
|
||||||
|
aS'n'
|
||||||
|
p181
|
||||||
|
aS'\xf1'
|
||||||
|
p182
|
||||||
|
aS'r'
|
||||||
|
p183
|
||||||
|
aS'\xf5'
|
||||||
|
p184
|
||||||
|
aS'v'
|
||||||
|
p185
|
||||||
|
aS'\xf9'
|
||||||
|
p186
|
||||||
|
aS'z'
|
||||||
|
p187
|
||||||
|
aS'\xfd'
|
||||||
|
p188
|
||||||
|
aS'~'
|
||||||
|
p189
|
||||||
|
aS'\x01'
|
||||||
|
p190
|
||||||
|
aS'\x82'
|
||||||
|
p191
|
||||||
|
aS'\x05'
|
||||||
|
p192
|
||||||
|
aS'\x86'
|
||||||
|
p193
|
||||||
|
aS'\t'
|
||||||
|
p194
|
||||||
|
aS'\x8a'
|
||||||
|
p195
|
||||||
|
aS'\x8e'
|
||||||
|
p196
|
||||||
|
aS'\x11'
|
||||||
|
p197
|
||||||
|
aS'\x92'
|
||||||
|
p198
|
||||||
|
aS'\x15'
|
||||||
|
p199
|
||||||
|
aS'\x96'
|
||||||
|
p200
|
||||||
|
aS'\x19'
|
||||||
|
p201
|
||||||
|
aS'\x9a'
|
||||||
|
p202
|
||||||
|
aS'\x1d'
|
||||||
|
p203
|
||||||
|
aS'\x9e'
|
||||||
|
p204
|
||||||
|
aS'!'
|
||||||
|
p205
|
||||||
|
aS'\xa2'
|
||||||
|
p206
|
||||||
|
aS'%'
|
||||||
|
p207
|
||||||
|
aS'\xa6'
|
||||||
|
p208
|
||||||
|
aS')'
|
||||||
|
p209
|
||||||
|
aS'\xaa'
|
||||||
|
p210
|
||||||
|
aS'-'
|
||||||
|
p211
|
||||||
|
aS'\xae'
|
||||||
|
p212
|
||||||
|
aS'1'
|
||||||
|
p213
|
||||||
|
aS'\xb2'
|
||||||
|
p214
|
||||||
|
aS'5'
|
||||||
|
p215
|
||||||
|
aS'\xb6'
|
||||||
|
p216
|
||||||
|
aS'9'
|
||||||
|
p217
|
||||||
|
aS'\xba'
|
||||||
|
p218
|
||||||
|
aS'='
|
||||||
|
p219
|
||||||
|
aS'\xbe'
|
||||||
|
p220
|
||||||
|
aS'A'
|
||||||
|
p221
|
||||||
|
aS'\xc2'
|
||||||
|
p222
|
||||||
|
aS'E'
|
||||||
|
p223
|
||||||
|
aS'\xc6'
|
||||||
|
p224
|
||||||
|
aS'I'
|
||||||
|
p225
|
||||||
|
aS'\xca'
|
||||||
|
p226
|
||||||
|
aS'M'
|
||||||
|
p227
|
||||||
|
aS'\xce'
|
||||||
|
p228
|
||||||
|
aS'Q'
|
||||||
|
p229
|
||||||
|
aS'\xd2'
|
||||||
|
p230
|
||||||
|
aS'U'
|
||||||
|
p231
|
||||||
|
aS'\xd6'
|
||||||
|
p232
|
||||||
|
aS'Y'
|
||||||
|
p233
|
||||||
|
aS'\xda'
|
||||||
|
p234
|
||||||
|
aS']'
|
||||||
|
p235
|
||||||
|
aS'\xde'
|
||||||
|
p236
|
||||||
|
aS'a'
|
||||||
|
p237
|
||||||
|
aS'\xe2'
|
||||||
|
p238
|
||||||
|
aS'e'
|
||||||
|
p239
|
||||||
|
aS'\xe6'
|
||||||
|
p240
|
||||||
|
aS'i'
|
||||||
|
p241
|
||||||
|
aS'\xea'
|
||||||
|
p242
|
||||||
|
aS'm'
|
||||||
|
p243
|
||||||
|
aS'\xee'
|
||||||
|
p244
|
||||||
|
aS'q'
|
||||||
|
p245
|
||||||
|
aS'\xf2'
|
||||||
|
p246
|
||||||
|
aS'u'
|
||||||
|
p247
|
||||||
|
aS'\xf6'
|
||||||
|
p248
|
||||||
|
aS'y'
|
||||||
|
p249
|
||||||
|
aS'\xfa'
|
||||||
|
p250
|
||||||
|
aS'}'
|
||||||
|
p251
|
||||||
|
aS'\xfe'
|
||||||
|
p252
|
||||||
|
a.
|
26
dbms/scripts/gen_benchmark_data/train.py
Normal file
26
dbms/scripts/gen_benchmark_data/train.py
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
import argparse
|
||||||
|
|
||||||
|
from model import Model
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
||||||
|
parser.add_argument('--n_iter', type=int, default=10000,
|
||||||
|
help='number of iterations')
|
||||||
|
parser.add_argument('--save_dir', type=str, default='save',
|
||||||
|
help='dir for saving weights')
|
||||||
|
parser.add_argument('--data_path', type=str,
|
||||||
|
help='path to train data')
|
||||||
|
parser.add_argument('--learning_rate', type=int, default=0.0001,
|
||||||
|
help='learning rate')
|
||||||
|
parser.add_argument('--batch_size', type=int, default=64,
|
||||||
|
help='batch size')
|
||||||
|
parser.add_argument('--restore_from', type=str,
|
||||||
|
help='path to train saved weights')
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
if not args.data_path:
|
||||||
|
raise Exception('please specify path to train data with --data_path')
|
||||||
|
|
||||||
|
gen = Model(args.learning_rate)
|
||||||
|
gen.train(args.data_path, args.save_dir, args.n_iter, args.batch_size, args.restore_from)
|
@ -17,9 +17,9 @@ using namespace DB;
|
|||||||
|
|
||||||
TEST(zkutil, zookeeper_connected)
|
TEST(zkutil, zookeeper_connected)
|
||||||
{
|
{
|
||||||
auto zookeeper = std::make_unique<zkutil::ZooKeeper>("localhost:2181");
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
|
auto zookeeper = std::make_unique<zkutil::ZooKeeper>("localhost:2181");
|
||||||
zookeeper->exists("/");
|
zookeeper->exists("/");
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
|
@ -130,8 +130,16 @@ bool JSONEachRowRowInputStream::read(MutableColumns & columns)
|
|||||||
|
|
||||||
read_columns[index] = true;
|
read_columns[index] = true;
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
header.getByPosition(index).type->deserializeTextJSON(*columns[index], istr);
|
header.getByPosition(index).type->deserializeTextJSON(*columns[index], istr);
|
||||||
}
|
}
|
||||||
|
catch (Exception & e)
|
||||||
|
{
|
||||||
|
e.addMessage("(while read the value of key " + name_ref.toString() + ")");
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Fill non-visited columns with the default values.
|
/// Fill non-visited columns with the default values.
|
||||||
for (size_t i = 0; i < num_columns; ++i)
|
for (size_t i = 0; i < num_columns; ++i)
|
||||||
|
@ -1732,6 +1732,11 @@ void Context::setFormatSchemaPath(const String & path)
|
|||||||
shared->format_schema_path = path;
|
shared->format_schema_path = path;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Context::getSampleBlockCacheType & Context::getSampleBlockCache() const
|
||||||
|
{
|
||||||
|
return getQueryContext().get_sample_block_cache;
|
||||||
|
}
|
||||||
|
|
||||||
std::shared_ptr<ActionLocksManager> Context::getActionLocksManager()
|
std::shared_ptr<ActionLocksManager> Context::getActionLocksManager()
|
||||||
{
|
{
|
||||||
auto lock = getLock();
|
auto lock = getLock();
|
||||||
@ -1742,7 +1747,6 @@ std::shared_ptr<ActionLocksManager> Context::getActionLocksManager()
|
|||||||
return shared->action_locks_manager;
|
return shared->action_locks_manager;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
SessionCleaner::~SessionCleaner()
|
SessionCleaner::~SessionCleaner()
|
||||||
{
|
{
|
||||||
try
|
try
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
#include <common/MultiVersion.h>
|
#include <common/MultiVersion.h>
|
||||||
#include <Core/Types.h>
|
#include <Core/Types.h>
|
||||||
#include <Core/NamesAndTypes.h>
|
#include <Core/NamesAndTypes.h>
|
||||||
|
#include <Core/Block.h>
|
||||||
#include <Interpreters/Settings.h>
|
#include <Interpreters/Settings.h>
|
||||||
#include <Interpreters/ClientInfo.h>
|
#include <Interpreters/ClientInfo.h>
|
||||||
#include <IO/CompressionSettings.h>
|
#include <IO/CompressionSettings.h>
|
||||||
@ -398,6 +399,10 @@ public:
|
|||||||
/// User name and session identifier. Named sessions are local to users.
|
/// User name and session identifier. Named sessions are local to users.
|
||||||
using SessionKey = std::pair<String, String>;
|
using SessionKey = std::pair<String, String>;
|
||||||
|
|
||||||
|
using getSampleBlockCacheType = std::unordered_map<std::string, Block>;
|
||||||
|
mutable Context::getSampleBlockCacheType get_sample_block_cache;
|
||||||
|
getSampleBlockCacheType & getSampleBlockCache() const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/** Check if the current client has access to the specified database.
|
/** Check if the current client has access to the specified database.
|
||||||
* If access is denied, throw an exception.
|
* If access is denied, throw an exception.
|
||||||
|
@ -9,7 +9,7 @@
|
|||||||
#include <DataTypes/getLeastSupertype.h>
|
#include <DataTypes/getLeastSupertype.h>
|
||||||
#include <Columns/ColumnConst.h>
|
#include <Columns/ColumnConst.h>
|
||||||
#include <Common/typeid_cast.h>
|
#include <Common/typeid_cast.h>
|
||||||
|
#include <Parsers/queryToString.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -157,7 +157,15 @@ Block InterpreterSelectWithUnionQuery::getSampleBlock(
|
|||||||
const ASTPtr & query_ptr,
|
const ASTPtr & query_ptr,
|
||||||
const Context & context)
|
const Context & context)
|
||||||
{
|
{
|
||||||
return InterpreterSelectWithUnionQuery(query_ptr, context).getSampleBlock();
|
auto & cache = context.getSampleBlockCache();
|
||||||
|
/// Using query string because query_ptr changes for every internal SELECT
|
||||||
|
auto key = queryToString(query_ptr);
|
||||||
|
if (cache.find(key) != cache.end())
|
||||||
|
{
|
||||||
|
return cache[key];
|
||||||
|
}
|
||||||
|
|
||||||
|
return cache[key] = InterpreterSelectWithUnionQuery(query_ptr, context).getSampleBlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -109,8 +109,6 @@ static void onExceptionBeforeStart(const String & query, Context & context, time
|
|||||||
bool log_queries = context.getSettingsRef().log_queries;
|
bool log_queries = context.getSettingsRef().log_queries;
|
||||||
|
|
||||||
/// Log the start of query execution into the table if necessary.
|
/// Log the start of query execution into the table if necessary.
|
||||||
if (log_queries)
|
|
||||||
{
|
|
||||||
QueryLogElement elem;
|
QueryLogElement elem;
|
||||||
|
|
||||||
elem.type = QueryLogElement::EXCEPTION_BEFORE_START;
|
elem.type = QueryLogElement::EXCEPTION_BEFORE_START;
|
||||||
@ -126,10 +124,10 @@ static void onExceptionBeforeStart(const String & query, Context & context, time
|
|||||||
setExceptionStackTrace(elem);
|
setExceptionStackTrace(elem);
|
||||||
logException(context, elem);
|
logException(context, elem);
|
||||||
|
|
||||||
|
if (log_queries)
|
||||||
if (auto query_log = context.getQueryLog())
|
if (auto query_log = context.getQueryLog())
|
||||||
query_log->add(elem);
|
query_log->add(elem);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
|
static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
|
||||||
|
@ -8,12 +8,7 @@ add_library(clickhouse-compiler-lib
|
|||||||
|
|
||||||
target_compile_options(clickhouse-compiler-lib PRIVATE -fno-rtti -fno-exceptions -g0)
|
target_compile_options(clickhouse-compiler-lib PRIVATE -fno-rtti -fno-exceptions -g0)
|
||||||
|
|
||||||
llvm_map_components_to_libnames(REQUIRED_LLVM_LIBRARIES all)
|
llvm_libs_all(REQUIRED_LLVM_LIBRARIES)
|
||||||
if (TERMCAP_LIBRARY)
|
|
||||||
list(APPEND REQUIRED_LLVM_LIBRARIES ${TERMCAP_LIBRARY})
|
|
||||||
endif ()
|
|
||||||
list(APPEND REQUIRED_LLVM_LIBRARIES ${CMAKE_DL_LIBS})
|
|
||||||
|
|
||||||
|
|
||||||
message(STATUS "Using LLVM ${LLVM_VERSION}: ${LLVM_INCLUDE_DIRS} : ${REQUIRED_LLVM_LIBRARIES}")
|
message(STATUS "Using LLVM ${LLVM_VERSION}: ${LLVM_INCLUDE_DIRS} : ${REQUIRED_LLVM_LIBRARIES}")
|
||||||
|
|
||||||
|
@ -8,11 +8,7 @@ add_library(clickhouse-compiler-lib
|
|||||||
|
|
||||||
target_compile_options(clickhouse-compiler-lib PRIVATE -fno-rtti -fno-exceptions -g0)
|
target_compile_options(clickhouse-compiler-lib PRIVATE -fno-rtti -fno-exceptions -g0)
|
||||||
|
|
||||||
llvm_map_components_to_libnames(REQUIRED_LLVM_LIBRARIES all)
|
llvm_libs_all(REQUIRED_LLVM_LIBRARIES)
|
||||||
if (TERMCAP_LIBRARY)
|
|
||||||
list(APPEND REQUIRED_LLVM_LIBRARIES ${TERMCAP_LIBRARY})
|
|
||||||
endif ()
|
|
||||||
list(APPEND REQUIRED_LLVM_LIBRARIES ${CMAKE_DL_LIBS})
|
|
||||||
|
|
||||||
message(STATUS "Using LLVM ${LLVM_VERSION}: ${LLVM_INCLUDE_DIRS} : ${REQUIRED_LLVM_LIBRARIES}")
|
message(STATUS "Using LLVM ${LLVM_VERSION}: ${LLVM_INCLUDE_DIRS} : ${REQUIRED_LLVM_LIBRARIES}")
|
||||||
|
|
||||||
|
@ -8,12 +8,7 @@ add_library(clickhouse-compiler-lib
|
|||||||
|
|
||||||
target_compile_options(clickhouse-compiler-lib PRIVATE -fno-rtti -fno-exceptions -g0)
|
target_compile_options(clickhouse-compiler-lib PRIVATE -fno-rtti -fno-exceptions -g0)
|
||||||
|
|
||||||
llvm_map_components_to_libnames(REQUIRED_LLVM_LIBRARIES all)
|
llvm_libs_all(REQUIRED_LLVM_LIBRARIES)
|
||||||
if (TERMCAP_LIBRARY)
|
|
||||||
list(APPEND REQUIRED_LLVM_LIBRARIES ${TERMCAP_LIBRARY})
|
|
||||||
endif ()
|
|
||||||
list(APPEND REQUIRED_LLVM_LIBRARIES ${CMAKE_DL_LIBS})
|
|
||||||
|
|
||||||
|
|
||||||
message(STATUS "Using LLVM ${LLVM_VERSION}: ${LLVM_INCLUDE_DIRS} : ${REQUIRED_LLVM_LIBRARIES}")
|
message(STATUS "Using LLVM ${LLVM_VERSION}: ${LLVM_INCLUDE_DIRS} : ${REQUIRED_LLVM_LIBRARIES}")
|
||||||
|
|
||||||
|
@ -98,11 +98,16 @@ struct ReplicatedMergeTreeLogEntryData
|
|||||||
|
|
||||||
std::shared_ptr<ReplaceRangeEntry> replace_range_entry;
|
std::shared_ptr<ReplaceRangeEntry> replace_range_entry;
|
||||||
|
|
||||||
/// Part names that supposed to be added to virtual_parts and future_parts
|
/// Returns set of parts that will appear after the entry execution
|
||||||
Strings getVirtualPartNames() const
|
/// These parts are added to virtual_parts
|
||||||
|
Strings getNewPartNames() const
|
||||||
{
|
{
|
||||||
/// TODO: Instead of new_part_name use another field for these commands
|
/// Clear column actually does not produce new parts
|
||||||
if (type == DROP_RANGE || type == CLEAR_COLUMN)
|
if (type == CLEAR_COLUMN)
|
||||||
|
return {};
|
||||||
|
|
||||||
|
/// It does not add a real part, it just disables merges in that range
|
||||||
|
if (type == DROP_RANGE)
|
||||||
return {new_part_name};
|
return {new_part_name};
|
||||||
|
|
||||||
if (type == REPLACE_RANGE)
|
if (type == REPLACE_RANGE)
|
||||||
@ -115,6 +120,18 @@ struct ReplicatedMergeTreeLogEntryData
|
|||||||
return {new_part_name};
|
return {new_part_name};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns set of parts that should be blocked during the entry execution
|
||||||
|
/// These parts are added to future_parts
|
||||||
|
Strings getBlockingPartNames() const
|
||||||
|
{
|
||||||
|
Strings res = getNewPartNames();
|
||||||
|
|
||||||
|
if (type == CLEAR_COLUMN)
|
||||||
|
res.emplace_back(new_part_name);
|
||||||
|
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
/// Access under queue_mutex, see ReplicatedMergeTreeQueue.
|
/// Access under queue_mutex, see ReplicatedMergeTreeQueue.
|
||||||
bool currently_executing = false; /// Whether the action is executing now.
|
bool currently_executing = false; /// Whether the action is executing now.
|
||||||
/// These several fields are informational only (for viewing by the user using system tables).
|
/// These several fields are informational only (for viewing by the user using system tables).
|
||||||
@ -134,7 +151,7 @@ struct ReplicatedMergeTreeLogEntryData
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
struct ReplicatedMergeTreeLogEntry : ReplicatedMergeTreeLogEntryData
|
struct ReplicatedMergeTreeLogEntry : public ReplicatedMergeTreeLogEntryData, std::enable_shared_from_this<ReplicatedMergeTreeLogEntry>
|
||||||
{
|
{
|
||||||
using Ptr = std::shared_ptr<ReplicatedMergeTreeLogEntry>;
|
using Ptr = std::shared_ptr<ReplicatedMergeTreeLogEntry>;
|
||||||
|
|
||||||
@ -143,5 +160,7 @@ struct ReplicatedMergeTreeLogEntry : ReplicatedMergeTreeLogEntryData
|
|||||||
static Ptr parse(const String & s, const zkutil::Stat & stat);
|
static Ptr parse(const String & s, const zkutil::Stat & stat);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
using ReplicatedMergeTreeLogEntryPtr = std::shared_ptr<ReplicatedMergeTreeLogEntry>;
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -97,7 +97,7 @@ void ReplicatedMergeTreeQueue::insertUnlocked(
|
|||||||
std::lock_guard<std::mutex> & /* target_state_lock */,
|
std::lock_guard<std::mutex> & /* target_state_lock */,
|
||||||
std::lock_guard<std::mutex> & /* queue_lock */)
|
std::lock_guard<std::mutex> & /* queue_lock */)
|
||||||
{
|
{
|
||||||
for (const String & virtual_part_name : entry->getVirtualPartNames())
|
for (const String & virtual_part_name : entry->getNewPartNames())
|
||||||
virtual_parts.add(virtual_part_name);
|
virtual_parts.add(virtual_part_name);
|
||||||
|
|
||||||
/// Put 'DROP PARTITION' entries at the beginning of the queue not to make superfluous fetches of parts that will be eventually deleted
|
/// Put 'DROP PARTITION' entries at the beginning of the queue not to make superfluous fetches of parts that will be eventually deleted
|
||||||
@ -592,47 +592,45 @@ void ReplicatedMergeTreeQueue::removePartProducingOpsInRange(zkutil::ZooKeeperPt
|
|||||||
|
|
||||||
|
|
||||||
size_t ReplicatedMergeTreeQueue::getConflictsCountForRange(
|
size_t ReplicatedMergeTreeQueue::getConflictsCountForRange(
|
||||||
const MergeTreePartInfo & range, const String & range_znode,
|
const MergeTreePartInfo & range, const LogEntry & entry,
|
||||||
String * out_conflicts_description, std::lock_guard<std::mutex> & /* queue_lock */) const
|
String * out_description, std::lock_guard<std::mutex> & /* queue_lock */) const
|
||||||
{
|
{
|
||||||
std::vector<std::pair<LogEntryPtr, String>> conflicts;
|
std::vector<std::pair<String, LogEntryPtr>> conflicts;
|
||||||
|
|
||||||
for (auto & elem : queue)
|
for (auto & future_part_elem : future_parts)
|
||||||
{
|
{
|
||||||
if (!elem->currently_executing || elem->znode_name == range_znode)
|
/// Do not check itself log entry
|
||||||
|
if (future_part_elem.second->znode_name == entry.znode_name)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
for (const String & new_part_name : elem->getVirtualPartNames())
|
if (!range.isDisjoint(MergeTreePartInfo::fromPartName(future_part_elem.first, format_version)))
|
||||||
{
|
{
|
||||||
if (!range.isDisjoint(MergeTreePartInfo::fromPartName(new_part_name, format_version)))
|
conflicts.emplace_back(future_part_elem.first, future_part_elem.second);
|
||||||
{
|
|
||||||
conflicts.emplace_back(elem, new_part_name);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if (out_conflicts_description)
|
if (out_description)
|
||||||
{
|
{
|
||||||
std::stringstream ss;
|
std::stringstream ss;
|
||||||
ss << "Can't execute command for range " << range.getPartName() << " (entry " << range_znode << "). ";
|
ss << "Can't execute command for range " << range.getPartName() << " (entry " << entry.znode_name << "). ";
|
||||||
ss << "There are " << conflicts.size() << " currently executing entries blocking it: ";
|
ss << "There are " << conflicts.size() << " currently executing entries blocking it: ";
|
||||||
for (const auto & conflict : conflicts)
|
for (const auto & conflict : conflicts)
|
||||||
ss << conflict.first->typeToString() << " part " << conflict.second << ", ";
|
ss << conflict.second->typeToString() << " part " << conflict.first << ", ";
|
||||||
|
|
||||||
*out_conflicts_description = ss.str();
|
*out_description = ss.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
return conflicts.size();
|
return conflicts.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void ReplicatedMergeTreeQueue::checkThereAreNoConflictsInRange(const MergeTreePartInfo & range, const String & range_znode_name)
|
void ReplicatedMergeTreeQueue::checkThereAreNoConflictsInRange(const MergeTreePartInfo & range, const LogEntry & entry)
|
||||||
{
|
{
|
||||||
String conflicts_description;
|
String conflicts_description;
|
||||||
std::lock_guard<std::mutex> lock(queue_mutex);
|
std::lock_guard<std::mutex> lock(queue_mutex);
|
||||||
|
|
||||||
if (0 != getConflictsCountForRange(range, range_znode_name, &conflicts_description, lock))
|
if (0 != getConflictsCountForRange(range, entry, &conflicts_description, lock))
|
||||||
throw Exception(conflicts_description, ErrorCodes::UNFINISHED);
|
throw Exception(conflicts_description, ErrorCodes::UNFINISHED);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -658,14 +656,14 @@ bool ReplicatedMergeTreeQueue::isNotCoveredByFuturePartsImpl(const String & new_
|
|||||||
auto result_part = MergeTreePartInfo::fromPartName(new_part_name, format_version);
|
auto result_part = MergeTreePartInfo::fromPartName(new_part_name, format_version);
|
||||||
|
|
||||||
/// It can slow down when the size of `future_parts` is large. But it can not be large, since `BackgroundProcessingPool` is limited.
|
/// It can slow down when the size of `future_parts` is large. But it can not be large, since `BackgroundProcessingPool` is limited.
|
||||||
for (const auto & future_part_name : future_parts)
|
for (const auto & future_part_elem : future_parts)
|
||||||
{
|
{
|
||||||
auto future_part = MergeTreePartInfo::fromPartName(future_part_name, format_version);
|
auto future_part = MergeTreePartInfo::fromPartName(future_part_elem.first, format_version);
|
||||||
|
|
||||||
if (future_part.contains(result_part))
|
if (future_part.contains(result_part))
|
||||||
{
|
{
|
||||||
out_reason = "Not executing log entry for part " + new_part_name + " because it is covered by part "
|
out_reason = "Not executing log entry for part " + new_part_name + " because it is covered by part "
|
||||||
+ future_part_name + " that is currently executing";
|
+ future_part_elem.first + " that is currently executing";
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -673,7 +671,7 @@ bool ReplicatedMergeTreeQueue::isNotCoveredByFuturePartsImpl(const String & new_
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ReplicatedMergeTreeQueue::addFuturePartIfNotCoveredByThem(const String & part_name, const LogEntry & entry, String & reject_reason)
|
bool ReplicatedMergeTreeQueue::addFuturePartIfNotCoveredByThem(const String & part_name, LogEntry & entry, String & reject_reason)
|
||||||
{
|
{
|
||||||
std::lock_guard lock(queue_mutex);
|
std::lock_guard lock(queue_mutex);
|
||||||
|
|
||||||
@ -698,7 +696,7 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry(
|
|||||||
|| entry.type == LogEntry::GET_PART
|
|| entry.type == LogEntry::GET_PART
|
||||||
|| entry.type == LogEntry::MUTATE_PART)
|
|| entry.type == LogEntry::MUTATE_PART)
|
||||||
{
|
{
|
||||||
for (const String & new_part_name : entry.getVirtualPartNames())
|
for (const String & new_part_name : entry.getBlockingPartNames())
|
||||||
{
|
{
|
||||||
if (!isNotCoveredByFuturePartsImpl(new_part_name, out_postpone_reason, queue_lock))
|
if (!isNotCoveredByFuturePartsImpl(new_part_name, out_postpone_reason, queue_lock))
|
||||||
{
|
{
|
||||||
@ -765,7 +763,7 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry(
|
|||||||
String range_name = (entry.type == LogEntry::REPLACE_RANGE) ? entry.replace_range_entry->drop_range_part_name : entry.new_part_name;
|
String range_name = (entry.type == LogEntry::REPLACE_RANGE) ? entry.replace_range_entry->drop_range_part_name : entry.new_part_name;
|
||||||
auto range = MergeTreePartInfo::fromPartName(range_name, format_version);
|
auto range = MergeTreePartInfo::fromPartName(range_name, format_version);
|
||||||
|
|
||||||
if (0 != getConflictsCountForRange(range, entry.znode_name, &conflicts_description, queue_lock))
|
if (0 != getConflictsCountForRange(range, entry, &conflicts_description, queue_lock))
|
||||||
{
|
{
|
||||||
LOG_DEBUG(log, conflicts_description);
|
LOG_DEBUG(log, conflicts_description);
|
||||||
return false;
|
return false;
|
||||||
@ -799,22 +797,22 @@ Int64 ReplicatedMergeTreeQueue::getCurrentMutationVersion(const String & partiti
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
ReplicatedMergeTreeQueue::CurrentlyExecuting::CurrentlyExecuting(ReplicatedMergeTreeQueue::LogEntryPtr & entry, ReplicatedMergeTreeQueue & queue)
|
ReplicatedMergeTreeQueue::CurrentlyExecuting::CurrentlyExecuting(const ReplicatedMergeTreeQueue::LogEntryPtr & entry_, ReplicatedMergeTreeQueue & queue)
|
||||||
: entry(entry), queue(queue)
|
: entry(entry_), queue(queue)
|
||||||
{
|
{
|
||||||
entry->currently_executing = true;
|
entry->currently_executing = true;
|
||||||
++entry->num_tries;
|
++entry->num_tries;
|
||||||
entry->last_attempt_time = time(nullptr);
|
entry->last_attempt_time = time(nullptr);
|
||||||
|
|
||||||
for (const String & new_part_name : entry->getVirtualPartNames())
|
for (const String & new_part_name : entry->getBlockingPartNames())
|
||||||
{
|
{
|
||||||
if (!queue.future_parts.insert(new_part_name).second)
|
if (!queue.future_parts.emplace(new_part_name, entry).second)
|
||||||
throw Exception("Tagging already tagged future part " + new_part_name + ". This is a bug.", ErrorCodes::LOGICAL_ERROR);
|
throw Exception("Tagging already tagged future part " + new_part_name + ". This is a bug.", ErrorCodes::LOGICAL_ERROR);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void ReplicatedMergeTreeQueue::CurrentlyExecuting::setActualPartName(const ReplicatedMergeTreeLogEntry & entry,
|
void ReplicatedMergeTreeQueue::CurrentlyExecuting::setActualPartName(ReplicatedMergeTreeQueue::LogEntry & entry,
|
||||||
const String & actual_part_name, ReplicatedMergeTreeQueue & queue)
|
const String & actual_part_name, ReplicatedMergeTreeQueue & queue)
|
||||||
{
|
{
|
||||||
if (!entry.actual_new_part_name.empty())
|
if (!entry.actual_new_part_name.empty())
|
||||||
@ -826,8 +824,8 @@ void ReplicatedMergeTreeQueue::CurrentlyExecuting::setActualPartName(const Repli
|
|||||||
if (entry.actual_new_part_name == entry.new_part_name)
|
if (entry.actual_new_part_name == entry.new_part_name)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!queue.future_parts.insert(entry.actual_new_part_name).second)
|
if (!queue.future_parts.emplace(entry.actual_new_part_name, entry.shared_from_this()).second)
|
||||||
throw Exception("Attaching already exsisting future part " + entry.actual_new_part_name + ". This is a bug.", ErrorCodes::LOGICAL_ERROR);
|
throw Exception("Attaching already existing future part " + entry.actual_new_part_name + ". This is a bug.", ErrorCodes::LOGICAL_ERROR);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -838,7 +836,7 @@ ReplicatedMergeTreeQueue::CurrentlyExecuting::~CurrentlyExecuting()
|
|||||||
entry->currently_executing = false;
|
entry->currently_executing = false;
|
||||||
entry->execution_complete.notify_all();
|
entry->execution_complete.notify_all();
|
||||||
|
|
||||||
for (const String & new_part_name : entry->getVirtualPartNames())
|
for (const String & new_part_name : entry->getBlockingPartNames())
|
||||||
{
|
{
|
||||||
if (!queue.future_parts.erase(new_part_name))
|
if (!queue.future_parts.erase(new_part_name))
|
||||||
LOG_ERROR(queue.log, "Untagging already untagged future part " + new_part_name + ". This is a bug.");
|
LOG_ERROR(queue.log, "Untagging already untagged future part " + new_part_name + ". This is a bug.");
|
||||||
|
@ -27,13 +27,13 @@ private:
|
|||||||
friend class CurrentlyExecuting;
|
friend class CurrentlyExecuting;
|
||||||
friend class ReplicatedMergeTreeMergePredicate;
|
friend class ReplicatedMergeTreeMergePredicate;
|
||||||
|
|
||||||
using StringSet = std::set<String>;
|
|
||||||
|
|
||||||
using LogEntry = ReplicatedMergeTreeLogEntry;
|
using LogEntry = ReplicatedMergeTreeLogEntry;
|
||||||
using LogEntryPtr = LogEntry::Ptr;
|
using LogEntryPtr = LogEntry::Ptr;
|
||||||
|
|
||||||
using Queue = std::list<LogEntryPtr>;
|
using Queue = std::list<LogEntryPtr>;
|
||||||
|
|
||||||
|
using StringSet = std::set<String>;
|
||||||
|
|
||||||
struct ByTime
|
struct ByTime
|
||||||
{
|
{
|
||||||
bool operator()(const LogEntryPtr & lhs, const LogEntryPtr & rhs) const
|
bool operator()(const LogEntryPtr & lhs, const LogEntryPtr & rhs) const
|
||||||
@ -71,7 +71,8 @@ private:
|
|||||||
|
|
||||||
/// parts that will appear as a result of actions performed right now by background threads (these actions are not in the queue).
|
/// parts that will appear as a result of actions performed right now by background threads (these actions are not in the queue).
|
||||||
/// Used to not perform other actions at the same time with these parts.
|
/// Used to not perform other actions at the same time with these parts.
|
||||||
StringSet future_parts;
|
using FuturePartsSet = std::map<String, LogEntryPtr>;
|
||||||
|
FuturePartsSet future_parts;
|
||||||
|
|
||||||
|
|
||||||
/// Protects virtual_parts, log_pointer, mutations.
|
/// Protects virtual_parts, log_pointer, mutations.
|
||||||
@ -168,9 +169,9 @@ private:
|
|||||||
std::optional<time_t> min_unprocessed_insert_time_changed,
|
std::optional<time_t> min_unprocessed_insert_time_changed,
|
||||||
std::optional<time_t> max_processed_insert_time_changed) const;
|
std::optional<time_t> max_processed_insert_time_changed) const;
|
||||||
|
|
||||||
/// Returns list of currently executing entries blocking execution a command modifying specified range
|
/// Returns list of currently executing parts blocking execution a command modifying specified range
|
||||||
size_t getConflictsCountForRange(
|
size_t getConflictsCountForRange(
|
||||||
const MergeTreePartInfo & range, const String & range_znode, String * out_conflicts_description,
|
const MergeTreePartInfo & range, const LogEntry & entry, String * out_description,
|
||||||
std::lock_guard<std::mutex> & queue_lock) const;
|
std::lock_guard<std::mutex> & queue_lock) const;
|
||||||
|
|
||||||
/// Marks the element of the queue as running.
|
/// Marks the element of the queue as running.
|
||||||
@ -183,10 +184,10 @@ private:
|
|||||||
friend class ReplicatedMergeTreeQueue;
|
friend class ReplicatedMergeTreeQueue;
|
||||||
|
|
||||||
/// Created only in the selectEntryToProcess function. It is called under mutex.
|
/// Created only in the selectEntryToProcess function. It is called under mutex.
|
||||||
CurrentlyExecuting(ReplicatedMergeTreeQueue::LogEntryPtr & entry, ReplicatedMergeTreeQueue & queue);
|
CurrentlyExecuting(const ReplicatedMergeTreeQueue::LogEntryPtr & entry_, ReplicatedMergeTreeQueue & queue);
|
||||||
|
|
||||||
/// In case of fetch, we determine actual part during the execution, so we need to update entry. It is called under queue_mutex.
|
/// In case of fetch, we determine actual part during the execution, so we need to update entry. It is called under queue_mutex.
|
||||||
static void setActualPartName(const ReplicatedMergeTreeLogEntry & entry, const String & actual_part_name,
|
static void setActualPartName(ReplicatedMergeTreeQueue::LogEntry & entry, const String & actual_part_name,
|
||||||
ReplicatedMergeTreeQueue & queue);
|
ReplicatedMergeTreeQueue & queue);
|
||||||
public:
|
public:
|
||||||
~CurrentlyExecuting();
|
~CurrentlyExecuting();
|
||||||
@ -231,7 +232,7 @@ public:
|
|||||||
|
|
||||||
/** Throws and exception if there are currently executing entries in the range .
|
/** Throws and exception if there are currently executing entries in the range .
|
||||||
*/
|
*/
|
||||||
void checkThereAreNoConflictsInRange(const MergeTreePartInfo & range, const String & range_znode_name);
|
void checkThereAreNoConflictsInRange(const MergeTreePartInfo & range, const LogEntry & entry);
|
||||||
|
|
||||||
/** In the case where there are not enough parts to perform the merge in part_name
|
/** In the case where there are not enough parts to perform the merge in part_name
|
||||||
* - move actions with merged parts to the end of the queue
|
* - move actions with merged parts to the end of the queue
|
||||||
@ -269,7 +270,7 @@ public:
|
|||||||
/** Check that part isn't in currently generating parts and isn't covered by them and add it to future_parts.
|
/** Check that part isn't in currently generating parts and isn't covered by them and add it to future_parts.
|
||||||
* Locks queue's mutex.
|
* Locks queue's mutex.
|
||||||
*/
|
*/
|
||||||
bool addFuturePartIfNotCoveredByThem(const String & part_name, const LogEntry & entry, String & reject_reason);
|
bool addFuturePartIfNotCoveredByThem(const String & part_name, LogEntry & entry, String & reject_reason);
|
||||||
|
|
||||||
/// A blocker that stops selects from the queue
|
/// A blocker that stops selects from the queue
|
||||||
ActionBlocker actions_blocker;
|
ActionBlocker actions_blocker;
|
||||||
|
@ -69,10 +69,15 @@ ASTPtr rewriteSelectQuery(const ASTPtr & query, const std::string & database, co
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// insert query has database and table names as bare strings
|
/// insert query has database and table names as bare strings
|
||||||
/// Creates a copy of query, changes the database and table names.
|
/// If the query is null, it creates a insert query with the database and tables
|
||||||
|
/// Or it creates a copy of query, changes the database and table names.
|
||||||
ASTPtr rewriteInsertQuery(const ASTPtr & query, const std::string & database, const std::string & table)
|
ASTPtr rewriteInsertQuery(const ASTPtr & query, const std::string & database, const std::string & table)
|
||||||
{
|
{
|
||||||
auto modified_query_ast = query->clone();
|
ASTPtr modified_query_ast = nullptr;
|
||||||
|
if (query == nullptr)
|
||||||
|
modified_query_ast = std::make_shared<ASTInsertQuery>();
|
||||||
|
else
|
||||||
|
modified_query_ast = query->clone();
|
||||||
|
|
||||||
auto & actual_query = typeid_cast<ASTInsertQuery &>(*modified_query_ast);
|
auto & actual_query = typeid_cast<ASTInsertQuery &>(*modified_query_ast);
|
||||||
actual_query.database = database;
|
actual_query.database = database;
|
||||||
|
@ -1034,7 +1034,7 @@ String StorageReplicatedMergeTree::getChecksumsForZooKeeper(const MergeTreeDataP
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool StorageReplicatedMergeTree::executeLogEntry(const LogEntry & entry)
|
bool StorageReplicatedMergeTree::executeLogEntry(LogEntry & entry)
|
||||||
{
|
{
|
||||||
if (entry.type == LogEntry::DROP_RANGE)
|
if (entry.type == LogEntry::DROP_RANGE)
|
||||||
{
|
{
|
||||||
@ -1168,7 +1168,7 @@ void StorageReplicatedMergeTree::writePartLog(
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool StorageReplicatedMergeTree::tryExecuteMerge(const StorageReplicatedMergeTree::LogEntry & entry)
|
bool StorageReplicatedMergeTree::tryExecuteMerge(const LogEntry & entry)
|
||||||
{
|
{
|
||||||
// Log source part names just in case
|
// Log source part names just in case
|
||||||
{
|
{
|
||||||
@ -1426,7 +1426,7 @@ bool StorageReplicatedMergeTree::tryExecutePartMutation(const StorageReplicatedM
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool StorageReplicatedMergeTree::executeFetch(const StorageReplicatedMergeTree::LogEntry & entry)
|
bool StorageReplicatedMergeTree::executeFetch(LogEntry & entry)
|
||||||
{
|
{
|
||||||
String replica = findReplicaHavingCoveringPart(entry, true);
|
String replica = findReplicaHavingCoveringPart(entry, true);
|
||||||
|
|
||||||
@ -1613,7 +1613,7 @@ bool StorageReplicatedMergeTree::executeFetch(const StorageReplicatedMergeTree::
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void StorageReplicatedMergeTree::executeDropRange(const StorageReplicatedMergeTree::LogEntry & entry)
|
void StorageReplicatedMergeTree::executeDropRange(const LogEntry & entry)
|
||||||
{
|
{
|
||||||
auto drop_range_info = MergeTreePartInfo::fromPartName(entry.new_part_name, data.format_version);
|
auto drop_range_info = MergeTreePartInfo::fromPartName(entry.new_part_name, data.format_version);
|
||||||
queue.removePartProducingOpsInRange(getZooKeeper(), drop_range_info);
|
queue.removePartProducingOpsInRange(getZooKeeper(), drop_range_info);
|
||||||
@ -1660,10 +1660,6 @@ void StorageReplicatedMergeTree::executeClearColumnInPartition(const LogEntry &
|
|||||||
|
|
||||||
auto entry_part_info = MergeTreePartInfo::fromPartName(entry.new_part_name, data.format_version);
|
auto entry_part_info = MergeTreePartInfo::fromPartName(entry.new_part_name, data.format_version);
|
||||||
|
|
||||||
/// Assume optimistic scenario, i.e. conflicts are very rare
|
|
||||||
/// So, if conflicts are found, throw an exception and will retry execution later
|
|
||||||
queue.checkThereAreNoConflictsInRange(entry_part_info, entry.znode_name);
|
|
||||||
|
|
||||||
/// We don't change table structure, only data in some parts
|
/// We don't change table structure, only data in some parts
|
||||||
/// To disable reading from these parts, we will sequentially acquire write lock for each part inside alterDataPart()
|
/// To disable reading from these parts, we will sequentially acquire write lock for each part inside alterDataPart()
|
||||||
/// If we will lock the whole table here, a deadlock can occur. For example, if use use Buffer table (CLICKHOUSE-3238)
|
/// If we will lock the whole table here, a deadlock can occur. For example, if use use Buffer table (CLICKHOUSE-3238)
|
||||||
@ -1681,6 +1677,14 @@ void StorageReplicatedMergeTree::executeClearColumnInPartition(const LogEntry &
|
|||||||
size_t modified_parts = 0;
|
size_t modified_parts = 0;
|
||||||
auto parts = data.getDataParts();
|
auto parts = data.getDataParts();
|
||||||
auto columns_for_parts = new_columns.getAllPhysical();
|
auto columns_for_parts = new_columns.getAllPhysical();
|
||||||
|
|
||||||
|
/// Check there are no merges in range again
|
||||||
|
/// TODO: Currently, there are no guarantees that a merge covering entry_part_info will happen during the execution.
|
||||||
|
/// To solve this problem we could add read/write flags for each part in future_parts
|
||||||
|
/// and make more sophisticated checks for merges in shouldExecuteLogEntry().
|
||||||
|
/// But this feature will be useless when the mutation feature is implemented.
|
||||||
|
queue.checkThereAreNoConflictsInRange(entry_part_info, entry);
|
||||||
|
|
||||||
for (const auto & part : parts)
|
for (const auto & part : parts)
|
||||||
{
|
{
|
||||||
if (!entry_part_info.contains(part->info))
|
if (!entry_part_info.contains(part->info))
|
||||||
@ -1712,7 +1716,7 @@ void StorageReplicatedMergeTree::executeClearColumnInPartition(const LogEntry &
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool StorageReplicatedMergeTree::executeReplaceRange(const StorageReplicatedMergeTree::LogEntry & entry)
|
bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry)
|
||||||
{
|
{
|
||||||
Stopwatch watch;
|
Stopwatch watch;
|
||||||
auto & entry_replace = *entry.replace_range_entry;
|
auto & entry_replace = *entry.replace_range_entry;
|
||||||
@ -2454,7 +2458,7 @@ String StorageReplicatedMergeTree::findReplicaHavingPart(const String & part_nam
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
String StorageReplicatedMergeTree::findReplicaHavingCoveringPart(const LogEntry & entry, bool active)
|
String StorageReplicatedMergeTree::findReplicaHavingCoveringPart(LogEntry & entry, bool active)
|
||||||
{
|
{
|
||||||
auto zookeeper = getZooKeeper();
|
auto zookeeper = getZooKeeper();
|
||||||
Strings replicas = zookeeper->getChildren(zookeeper_path + "/replicas");
|
Strings replicas = zookeeper->getChildren(zookeeper_path + "/replicas");
|
||||||
|
@ -360,7 +360,7 @@ private:
|
|||||||
/** Execute the action from the queue. Throws an exception if something is wrong.
|
/** Execute the action from the queue. Throws an exception if something is wrong.
|
||||||
* Returns whether or not it succeeds. If it did not work, write it to the end of the queue.
|
* Returns whether or not it succeeds. If it did not work, write it to the end of the queue.
|
||||||
*/
|
*/
|
||||||
bool executeLogEntry(const LogEntry & entry);
|
bool executeLogEntry(LogEntry & entry);
|
||||||
|
|
||||||
void writePartLog(
|
void writePartLog(
|
||||||
PartLogElement::Type type, const ExecutionStatus & execution_status, UInt64 elapsed_ns,
|
PartLogElement::Type type, const ExecutionStatus & execution_status, UInt64 elapsed_ns,
|
||||||
@ -376,7 +376,7 @@ private:
|
|||||||
|
|
||||||
bool tryExecutePartMutation(const LogEntry & entry);
|
bool tryExecutePartMutation(const LogEntry & entry);
|
||||||
|
|
||||||
bool executeFetch(const LogEntry & entry);
|
bool executeFetch(LogEntry & entry);
|
||||||
|
|
||||||
void executeClearColumnInPartition(const LogEntry & entry);
|
void executeClearColumnInPartition(const LogEntry & entry);
|
||||||
|
|
||||||
@ -430,7 +430,7 @@ private:
|
|||||||
* If found, returns replica name and set 'entry->actual_new_part_name' to name of found largest covering part.
|
* If found, returns replica name and set 'entry->actual_new_part_name' to name of found largest covering part.
|
||||||
* If not found, returns empty string.
|
* If not found, returns empty string.
|
||||||
*/
|
*/
|
||||||
String findReplicaHavingCoveringPart(const LogEntry & entry, bool active);
|
String findReplicaHavingCoveringPart(LogEntry & entry, bool active);
|
||||||
String findReplicaHavingCoveringPart(const String & part_name, bool active, String & found_part_name);
|
String findReplicaHavingCoveringPart(const String & part_name, bool active, String & found_part_name);
|
||||||
|
|
||||||
/** Download the specified part from the specified replica.
|
/** Download the specified part from the specified replica.
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
<yandex>
|
<yandex>
|
||||||
<tcp_port_secure>59440</tcp_port_secure>
|
|
||||||
<insert_format_max_block_size>100000</insert_format_max_block_size>
|
<insert_format_max_block_size>100000</insert_format_max_block_size>
|
||||||
</yandex>
|
</yandex>
|
||||||
|
@ -17,7 +17,7 @@ CONFIG_CLIENT_DIR=${CONFIG_CLIENT_DIR=$CONFIG_DIR}
|
|||||||
CONFIG_SERVER_DIR=${CONFIG_SERVER_DIR=$CONFIG_DIR}
|
CONFIG_SERVER_DIR=${CONFIG_SERVER_DIR=$CONFIG_DIR}
|
||||||
[ ! -f "${CONFIG_CLIENT_DIR}client-test.xml" ] && CONFIG_CLIENT_DIR=${CONFIG_CLIENT_DIR:=/etc/clickhouse-client/}
|
[ ! -f "${CONFIG_CLIENT_DIR}client-test.xml" ] && CONFIG_CLIENT_DIR=${CONFIG_CLIENT_DIR:=/etc/clickhouse-client/}
|
||||||
[ ! -f "${CONFIG_SERVER_DIR}server-test.xml" ] && CONFIG_SERVER_DIR=${CONFIG_SERVER_DIR:=/etc/clickhouse-server/}
|
[ ! -f "${CONFIG_SERVER_DIR}server-test.xml" ] && CONFIG_SERVER_DIR=${CONFIG_SERVER_DIR:=/etc/clickhouse-server/}
|
||||||
CONFIG_CLIENT=${CONFIG_CLIENT:=${CONFIG_CLIENT_DIR}client-test.xml}
|
export CLICKHOUSE_CONFIG_CLIENT=${CLICKHOUSE_CONFIG_CLIENT:=${CONFIG_CLIENT_DIR}client-test.xml}
|
||||||
export CLICKHOUSE_CONFIG=${CLICKHOUSE_CONFIG:=${CONFIG_SERVER_DIR}server-test.xml}
|
export CLICKHOUSE_CONFIG=${CLICKHOUSE_CONFIG:=${CONFIG_SERVER_DIR}server-test.xml}
|
||||||
[ -x "$CUR_DIR/clickhouse-test" ] && TEST_DIR=${TEST_DIR=$CUR_DIR/}
|
[ -x "$CUR_DIR/clickhouse-test" ] && TEST_DIR=${TEST_DIR=$CUR_DIR/}
|
||||||
[ -d "$CUR_DIR/queries" ] && QUERIES_DIR=${QUERIES_DIR=$CUR_DIR/queries}
|
[ -d "$CUR_DIR/queries" ] && QUERIES_DIR=${QUERIES_DIR=$CUR_DIR/queries}
|
||||||
@ -25,8 +25,8 @@ export CLICKHOUSE_CONFIG=${CLICKHOUSE_CONFIG:=${CONFIG_SERVER_DIR}server-test.xm
|
|||||||
[ ! -d "$QUERIES_DIR" ] && [ -d "/usr/share/clickhouse-test/queries" ] && QUERIES_DIR=${QUERIES_DIR=/usr/share/clickhouse-test/queries}
|
[ ! -d "$QUERIES_DIR" ] && [ -d "/usr/share/clickhouse-test/queries" ] && QUERIES_DIR=${QUERIES_DIR=/usr/share/clickhouse-test/queries}
|
||||||
CLICKHOUSE_EXTRACT_CONFIG=${CLICKHOUSE_EXTRACT_CONFIG:="${BIN_DIR}${CLICKHOUSE_BINARY}-extract-from-config --config=$CLICKHOUSE_CONFIG"}
|
CLICKHOUSE_EXTRACT_CONFIG=${CLICKHOUSE_EXTRACT_CONFIG:="${BIN_DIR}${CLICKHOUSE_BINARY}-extract-from-config --config=$CLICKHOUSE_CONFIG"}
|
||||||
|
|
||||||
PORT_RANDOM=${PORT_RANDOM=1}
|
TEST_PORT_RANDOM=${TEST_PORT_RANDOM=1}
|
||||||
if [ "${PORT_RANDOM}" ]; then
|
if [ "${TEST_PORT_RANDOM}" ]; then
|
||||||
CLICKHOUSE_PORT_BASE=${CLICKHOUSE_PORT_BASE:=$(( ( RANDOM % 50000 ) + 10000 ))}
|
CLICKHOUSE_PORT_BASE=${CLICKHOUSE_PORT_BASE:=$(( ( RANDOM % 50000 ) + 10000 ))}
|
||||||
CLICKHOUSE_PORT_TCP=${CLICKHOUSE_PORT_TCP:=$(($CLICKHOUSE_PORT_BASE + 1))}
|
CLICKHOUSE_PORT_TCP=${CLICKHOUSE_PORT_TCP:=$(($CLICKHOUSE_PORT_BASE + 1))}
|
||||||
CLICKHOUSE_PORT_HTTP=${CLICKHOUSE_PORT_HTTP:=$(($CLICKHOUSE_PORT_BASE + 2))}
|
CLICKHOUSE_PORT_HTTP=${CLICKHOUSE_PORT_HTTP:=$(($CLICKHOUSE_PORT_BASE + 2))}
|
||||||
@ -58,23 +58,35 @@ if [ "$TEST_GDB" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Start a local clickhouse server which will be used to run tests
|
# Start a local clickhouse server which will be used to run tests
|
||||||
#PATH=$PATH:$BIN_DIR \
|
|
||||||
$GDB ${BIN_DIR}clickhouse-server --config-file=$CLICKHOUSE_CONFIG -- --http_port=$CLICKHOUSE_PORT_HTTP --tcp_port=$CLICKHOUSE_PORT_TCP --https_port=$CLICKHOUSE_PORT_HTTPS --tcp_port_secure=$CLICKHOUSE_PORT_TCP_SECURE --interserver_http_port=$CLICKHOUSE_PORT_INTERSERVER > $LOG_DIR/stdout 2>&1 &
|
# TODO: fix change shard ports:
|
||||||
|
# --remote_servers.test_shard_localhost_secure.shard.replica.port=$CLICKHOUSE_PORT_TCP_SECURE \
|
||||||
|
# --remote_servers.test_shard_localhost.shard.replica.port=$CLICKHOUSE_PORT_TCP \
|
||||||
|
|
||||||
|
$GDB ${BIN_DIR}clickhouse-server --config-file=$CLICKHOUSE_CONFIG -- \
|
||||||
|
--http_port=$CLICKHOUSE_PORT_HTTP \
|
||||||
|
--tcp_port=$CLICKHOUSE_PORT_TCP \
|
||||||
|
--https_port=$CLICKHOUSE_PORT_HTTPS \
|
||||||
|
--tcp_port_secure=$CLICKHOUSE_PORT_TCP_SECURE \
|
||||||
|
--interserver_http_port=$CLICKHOUSE_PORT_INTERSERVER \
|
||||||
|
> $LOG_DIR/stdout 2>&1 &
|
||||||
CH_PID=$!
|
CH_PID=$!
|
||||||
sleep 3
|
sleep 3
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if [ "$GDB" ]; then
|
if [ "$GDB" ]; then
|
||||||
# Long symbols read
|
# Long symbols read
|
||||||
sleep 40
|
sleep 40
|
||||||
fi
|
fi
|
||||||
|
|
||||||
tail -n50 $LOG_DIR/*
|
tail -n50 $LOG_DIR/*.log || true
|
||||||
|
|
||||||
# Define needed stuff to kill test clickhouse server after tests completion
|
# Define needed stuff to kill test clickhouse server after tests completion
|
||||||
function finish {
|
function finish {
|
||||||
kill $CH_PID || true
|
kill $CH_PID || true
|
||||||
wait
|
wait
|
||||||
tail -n 50 $LOG_DIR/*
|
tail -n 50 $LOG_DIR/*.log || true
|
||||||
if [ "$GDB" ]; then
|
if [ "$GDB" ]; then
|
||||||
cat $DATA_DIR/gdb.log || true
|
cat $DATA_DIR/gdb.log || true
|
||||||
fi
|
fi
|
||||||
@ -88,7 +100,7 @@ if [ -n "$*" ]; then
|
|||||||
else
|
else
|
||||||
TEST_RUN=${TEST_RUN=1}
|
TEST_RUN=${TEST_RUN=1}
|
||||||
TEST_PERF=${TEST_PERF=1}
|
TEST_PERF=${TEST_PERF=1}
|
||||||
${BIN_DIR}clickhouse-client --config ${CONFIG_CLIENT} --port $CLICKHOUSE_PORT_TCP -q 'SELECT * from system.build_options;'
|
${BIN_DIR}clickhouse-client --config ${CLICKHOUSE_CONFIG_CLIENT} --port $CLICKHOUSE_PORT_TCP -m -n -q 'SELECT * from system.build_options; SELECT * FROM system.clusters;'
|
||||||
[ "$TEST_RUN" ] && env PATH=$PATH:$BIN_DIR ${TEST_DIR}clickhouse-test --binary ${BIN_DIR}clickhouse --configclient $CONFIG_CLIENT --configserver $CLICKHOUSE_CONFIG --tmp $DATA_DIR/tmp --queries $QUERIES_DIR $TEST_OPT0 $TEST_OPT
|
[ "$TEST_RUN" ] && env PATH=$PATH:$BIN_DIR ${TEST_DIR}clickhouse-test --binary ${BIN_DIR}clickhouse --configclient $CLICKHOUSE_CONFIG_CLIENT --configserver $CLICKHOUSE_CONFIG --tmp $DATA_DIR/tmp --queries $QUERIES_DIR $TEST_OPT0 $TEST_OPT
|
||||||
( [ "$TEST_PERF" ] && ${BIN_DIR}clickhouse-performance-test --port $CLICKHOUSE_PORT_TCP --r $CUR_DIR/performance --skip-tags=long $* ) || true
|
( [ "$TEST_PERF" ] && ${BIN_DIR}clickhouse-performance-test --port $CLICKHOUSE_PORT_TCP --r $CUR_DIR/performance --skip-tags=long $* ) || true
|
||||||
fi
|
fi
|
||||||
|
@ -0,0 +1,8 @@
|
|||||||
|
<yandex>
|
||||||
|
<profiles>
|
||||||
|
<default>
|
||||||
|
<distributed_directory_monitor_batch_inserts>1</distributed_directory_monitor_batch_inserts>
|
||||||
|
<min_insert_block_size_rows>3</min_insert_block_size_rows>
|
||||||
|
</default>
|
||||||
|
</profiles>
|
||||||
|
</yandex>
|
@ -0,0 +1,7 @@
|
|||||||
|
<yandex>
|
||||||
|
<profiles>
|
||||||
|
<default>
|
||||||
|
<background_pool_size>0</background_pool_size>
|
||||||
|
</default>
|
||||||
|
</profiles>
|
||||||
|
</yandex>
|
@ -0,0 +1,20 @@
|
|||||||
|
<yandex>
|
||||||
|
<remote_servers>
|
||||||
|
<test_cluster>
|
||||||
|
<shard>
|
||||||
|
<replica>
|
||||||
|
<host>remote</host>
|
||||||
|
<port>9000</port>
|
||||||
|
</replica>
|
||||||
|
</shard>
|
||||||
|
</test_cluster>
|
||||||
|
<test_local_cluster>
|
||||||
|
<shard>
|
||||||
|
<replica>
|
||||||
|
<host>localhost</host>
|
||||||
|
<port>9000</port>
|
||||||
|
</replica>
|
||||||
|
</shard>
|
||||||
|
</test_local_cluster>
|
||||||
|
</remote_servers>
|
||||||
|
</yandex>
|
@ -0,0 +1,139 @@
|
|||||||
|
import pytest
|
||||||
|
import time
|
||||||
|
|
||||||
|
from helpers.cluster import ClickHouseCluster
|
||||||
|
from helpers.network import PartitionManager
|
||||||
|
from helpers.test_tools import TSV
|
||||||
|
|
||||||
|
|
||||||
|
cluster = ClickHouseCluster(__file__)
|
||||||
|
|
||||||
|
instance_test_reconnect = cluster.add_instance('instance_test_reconnect', main_configs=['configs/remote_servers.xml'])
|
||||||
|
instance_test_inserts_batching = cluster.add_instance(
|
||||||
|
'instance_test_inserts_batching',
|
||||||
|
main_configs=['configs/remote_servers.xml'], user_configs=['configs/enable_distributed_inserts_batching.xml'])
|
||||||
|
remote = cluster.add_instance('remote', user_configs=['configs/forbid_background_merges.xml'])
|
||||||
|
|
||||||
|
instance_test_inserts_local_cluster = cluster.add_instance(
|
||||||
|
'instance_test_inserts_local_cluster',
|
||||||
|
main_configs=['configs/remote_servers.xml'])
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="module")
|
||||||
|
def started_cluster():
|
||||||
|
try:
|
||||||
|
cluster.start()
|
||||||
|
|
||||||
|
remote.query("CREATE TABLE local1 (x UInt32) ENGINE = Log")
|
||||||
|
|
||||||
|
instance_test_reconnect.query('''
|
||||||
|
CREATE TABLE distributed (x UInt32) ENGINE = Distributed('test_cluster', 'default', 'local1')
|
||||||
|
''')
|
||||||
|
instance_test_reconnect.query("CREATE TABLE local1_source (x UInt32) ENGINE = Memory")
|
||||||
|
instance_test_reconnect.query("CREATE MATERIALIZED VIEW local1_view to distributed AS SELECT x FROM local1_source")
|
||||||
|
|
||||||
|
remote.query("CREATE TABLE local2 (d Date, x UInt32, s String) ENGINE = MergeTree(d, x, 8192)")
|
||||||
|
instance_test_inserts_batching.query('''
|
||||||
|
CREATE TABLE distributed (d Date, x UInt32) ENGINE = Distributed('test_cluster', 'default', 'local2')
|
||||||
|
''')
|
||||||
|
instance_test_inserts_batching.query("CREATE TABLE local2_source (d Date, x UInt32) ENGINE = Log")
|
||||||
|
instance_test_inserts_batching.query("CREATE MATERIALIZED VIEW local2_view to distributed AS SELECT d,x FROM local2_source")
|
||||||
|
|
||||||
|
|
||||||
|
instance_test_inserts_local_cluster.query("CREATE TABLE local_source (d Date, x UInt32) ENGINE = Memory")
|
||||||
|
instance_test_inserts_local_cluster.query("CREATE MATERIALIZED VIEW local_view to distributed_on_local AS SELECT d,x FROM local_source")
|
||||||
|
instance_test_inserts_local_cluster.query("CREATE TABLE local (d Date, x UInt32) ENGINE = MergeTree(d, x, 8192)")
|
||||||
|
instance_test_inserts_local_cluster.query('''
|
||||||
|
CREATE TABLE distributed_on_local (d Date, x UInt32) ENGINE = Distributed('test_local_cluster', 'default', 'local')
|
||||||
|
''')
|
||||||
|
|
||||||
|
|
||||||
|
yield cluster
|
||||||
|
|
||||||
|
finally:
|
||||||
|
cluster.shutdown()
|
||||||
|
|
||||||
|
|
||||||
|
def test_reconnect(started_cluster):
|
||||||
|
instance = instance_test_reconnect
|
||||||
|
|
||||||
|
with PartitionManager() as pm:
|
||||||
|
# Open a connection for insertion.
|
||||||
|
instance.query("INSERT INTO local1_source VALUES (1)")
|
||||||
|
time.sleep(0.5)
|
||||||
|
assert remote.query("SELECT count(*) FROM local1").strip() == '1'
|
||||||
|
|
||||||
|
# Now break the connection.
|
||||||
|
pm.partition_instances(instance, remote, action='REJECT --reject-with tcp-reset')
|
||||||
|
instance.query("INSERT INTO local1_source VALUES (2)")
|
||||||
|
time.sleep(0.5)
|
||||||
|
|
||||||
|
# Heal the partition and insert more data.
|
||||||
|
# The connection must be reestablished and after some time all data must be inserted.
|
||||||
|
pm.heal_all()
|
||||||
|
instance.query("INSERT INTO local1_source VALUES (3)")
|
||||||
|
time.sleep(0.5)
|
||||||
|
|
||||||
|
assert remote.query("SELECT count(*) FROM local1").strip() == '3'
|
||||||
|
|
||||||
|
|
||||||
|
def test_inserts_batching(started_cluster):
|
||||||
|
instance = instance_test_inserts_batching
|
||||||
|
|
||||||
|
with PartitionManager() as pm:
|
||||||
|
pm.partition_instances(instance, remote)
|
||||||
|
|
||||||
|
instance.query("INSERT INTO local2_source(d, x) VALUES ('2000-01-01', 1)")
|
||||||
|
# Sleep a bit so that this INSERT forms a batch of its own.
|
||||||
|
time.sleep(0.2)
|
||||||
|
|
||||||
|
instance.query("INSERT INTO local2_source(x, d) VALUES (2, '2000-01-01')")
|
||||||
|
|
||||||
|
for i in range(3, 7):
|
||||||
|
instance.query("INSERT INTO local2_source(d, x) VALUES ('2000-01-01', {})".format(i))
|
||||||
|
|
||||||
|
for i in range(7, 9):
|
||||||
|
instance.query("INSERT INTO local2_source(x, d) VALUES ({}, '2000-01-01')".format(i))
|
||||||
|
|
||||||
|
instance.query("INSERT INTO local2_source(d, x) VALUES ('2000-01-01', 9)")
|
||||||
|
|
||||||
|
# After ALTER the structure of the saved blocks will be different
|
||||||
|
instance.query("DROP TABLE local2_view")
|
||||||
|
instance.query("ALTER TABLE distributed ADD COLUMN s String")
|
||||||
|
|
||||||
|
# Memory Engine doesn't support ALTER so we just DROP/CREATE everything
|
||||||
|
instance.query("DROP TABLE local2_source")
|
||||||
|
instance.query("CREATE TABLE local2_source (d Date, x UInt32, s String) ENGINE = Memory")
|
||||||
|
instance.query("CREATE MATERIALIZED VIEW local2_view to distributed AS SELECT d,x,s FROM local2_source")
|
||||||
|
|
||||||
|
for i in range(10, 13):
|
||||||
|
instance.query("INSERT INTO local2_source(d, x) VALUES ('2000-01-01', {})".format(i))
|
||||||
|
|
||||||
|
time.sleep(1.0)
|
||||||
|
|
||||||
|
result = remote.query("SELECT _part, groupArray(x) FROM local2 GROUP BY _part ORDER BY _part")
|
||||||
|
|
||||||
|
# Explanation: as merges are turned off on remote instance, active parts in local2 table correspond 1-to-1
|
||||||
|
# to inserted blocks.
|
||||||
|
# Batches of max 3 rows are formed as min_insert_block_size_rows = 3.
|
||||||
|
# Blocks:
|
||||||
|
# 1. Failed batch that is retried with the same contents.
|
||||||
|
# 2. Full batch of inserts regardless of the order of columns thanks to the view.
|
||||||
|
# 3. Full batch of inserts regardless order of columns thanks to the view.
|
||||||
|
# 4. Full batch of inserts after ALTER (that have different block structure).
|
||||||
|
# 5. What was left to insert before ALTER.
|
||||||
|
expected = '''\
|
||||||
|
20000101_20000101_1_1_0 [1]
|
||||||
|
20000101_20000101_2_2_0 [2,3,4]
|
||||||
|
20000101_20000101_3_3_0 [5,6,7]
|
||||||
|
20000101_20000101_4_4_0 [10,11,12]
|
||||||
|
20000101_20000101_5_5_0 [8,9]
|
||||||
|
'''
|
||||||
|
assert TSV(result) == TSV(expected)
|
||||||
|
|
||||||
|
|
||||||
|
def test_inserts_local(started_cluster):
|
||||||
|
instance = instance_test_inserts_local_cluster
|
||||||
|
instance.query("INSERT INTO local_source VALUES ('2000-01-01', 1)")
|
||||||
|
time.sleep(0.5)
|
||||||
|
assert instance.query("SELECT count(*) FROM local").strip() == '1'
|
@ -19,6 +19,7 @@ DROP TABLE test.clear_column;
|
|||||||
|
|
||||||
SELECT '===Replicated case===';
|
SELECT '===Replicated case===';
|
||||||
|
|
||||||
|
SYSTEM STOP MERGES;
|
||||||
DROP TABLE IF EXISTS test.clear_column1;
|
DROP TABLE IF EXISTS test.clear_column1;
|
||||||
DROP TABLE IF EXISTS test.clear_column2;
|
DROP TABLE IF EXISTS test.clear_column2;
|
||||||
CREATE TABLE test.clear_column1 (d Date, i Int64) ENGINE = ReplicatedMergeTree('/clickhouse/test/tables/clear_column', '1', d, d, 8192);
|
CREATE TABLE test.clear_column1 (d Date, i Int64) ENGINE = ReplicatedMergeTree('/clickhouse/test/tables/clear_column', '1', d, d, 8192);
|
||||||
@ -57,6 +58,13 @@ SELECT sum(data_uncompressed_bytes) FROM system.columns WHERE database='test' AN
|
|||||||
ALTER TABLE test.clear_column1 CLEAR COLUMN s IN PARTITION '200001';
|
ALTER TABLE test.clear_column1 CLEAR COLUMN s IN PARTITION '200001';
|
||||||
ALTER TABLE test.clear_column1 CLEAR COLUMN s IN PARTITION '200002';
|
ALTER TABLE test.clear_column1 CLEAR COLUMN s IN PARTITION '200002';
|
||||||
|
|
||||||
|
-- Merges cannot be blocked after all manipulations
|
||||||
|
SET optimize_throw_if_noop = 1;
|
||||||
|
SYSTEM START MERGES;
|
||||||
|
OPTIMIZE TABLE test.clear_column1 PARTITION '200001';
|
||||||
|
OPTIMIZE TABLE test.clear_column1 PARTITION '200002';
|
||||||
|
|
||||||
|
|
||||||
-- clear column in empty partition should be Ok
|
-- clear column in empty partition should be Ok
|
||||||
ALTER TABLE test.clear_column1 CLEAR COLUMN s IN PARTITION '200012', CLEAR COLUMN i IN PARTITION '200012';
|
ALTER TABLE test.clear_column1 CLEAR COLUMN s IN PARTITION '200012', CLEAR COLUMN i IN PARTITION '200012';
|
||||||
-- Drop empty partition also Ok
|
-- Drop empty partition also Ok
|
||||||
|
@ -11,7 +11,7 @@ INSERT INTO test.secure1 VALUES (11,12,13,14,15);
|
|||||||
INSERT INTO test.secure2 VALUES (21,22,23,24,25);
|
INSERT INTO test.secure2 VALUES (21,22,23,24,25);
|
||||||
INSERT INTO test.secure3 VALUES (31,32,33,34,35);
|
INSERT INTO test.secure3 VALUES (31,32,33,34,35);
|
||||||
|
|
||||||
SELECT sleep(1);
|
SELECT 'sleep', sleep(1);
|
||||||
|
|
||||||
SELECT * FROM test.secure1 ORDER BY a;
|
SELECT * FROM test.secure1 ORDER BY a;
|
||||||
SELECT * FROM test.secure2 ORDER BY a;
|
SELECT * FROM test.secure2 ORDER BY a;
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
1
|
1
|
||||||
2
|
2
|
||||||
3
|
3
|
||||||
1
|
4
|
||||||
0
|
sleep 0
|
||||||
1970-01-02 2 3 4 5
|
1970-01-02 2 3 4 5
|
||||||
1970-01-12 12 13 14 15
|
1970-01-12 12 13 14 15
|
||||||
1970-01-22 22 23 24 25
|
1970-01-22 22 23 24 25
|
||||||
|
@ -1,22 +1,46 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
#set -x
|
||||||
|
|
||||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||||
. $CURDIR/../shell_config.sh
|
. $CURDIR/../shell_config.sh
|
||||||
|
|
||||||
# Not default server config needed
|
# Not default server config needed
|
||||||
|
|
||||||
tcp_port_secure=`${CLICKHOUSE_EXTRACT_CONFIG} -k tcp_port_secure 2>/dev/null`
|
|
||||||
if [ -z ${tcp_port_secure} ]; then
|
if [ -n $CLICKHOUSE_CONFIG_CLIENT ]; then
|
||||||
|
USE_CONFIG="--config $CLICKHOUSE_CONFIG_CLIENT"
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
tcp_port_secure=`$CLICKHOUSE_EXTRACT_CONFIG -k tcp_port_secure 2>/dev/null`
|
||||||
|
if [ -z $tcp_port_secure ]; then
|
||||||
# Secure port disabled. Fake result
|
# Secure port disabled. Fake result
|
||||||
cat $CURDIR/00505_secure.reference
|
cat $CURDIR/00505_secure.reference
|
||||||
else
|
else
|
||||||
# Auto port detect
|
|
||||||
${CLICKHOUSE_CLIENT} --secure -q "SELECT 1";
|
|
||||||
${CLICKHOUSE_CLIENT} --secure --port=${CLICKHOUSE_PORT_TCP_SECURE} -q "SELECT 2";
|
|
||||||
|
|
||||||
${CLICKHOUSE_CURL} -sS --insecure ${CLICKHOUSE_URL_HTTPS}?query=SELECT%203
|
if [[ $CLICKHOUSE_CLIENT != *"--port"* ]]; then
|
||||||
|
CLICKHOUSE_CLIENT_SECURE=${CLICKHOUSE_CLIENT_SECURE:="$CLICKHOUSE_CLIENT $USE_CONFIG --secure --port=$CLICKHOUSE_PORT_TCP_SECURE"}
|
||||||
|
|
||||||
|
# Auto port detect. Cant test with re-definedvia command line ports
|
||||||
|
$CLICKHOUSE_CLIENT $USE_CONFIG --secure -q "SELECT 1";
|
||||||
|
else
|
||||||
|
CLICKHOUSE_CLIENT_BINARY=${CLICKHOUSE_CLIENT_BINARY:="${CLICKHOUSE_BINARY}-client"}
|
||||||
|
CLICKHOUSE_CLIENT_SECURE=${CLICKHOUSE_CLIENT_SECURE:="$CLICKHOUSE_CLIENT_BINARY $USE_CONFIG --secure --port=$CLICKHOUSE_PORT_TCP_SECURE"}
|
||||||
|
echo 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT_SECURE -q "SELECT 2;"
|
||||||
|
|
||||||
|
$CLICKHOUSE_CURL -sS --insecure ${CLICKHOUSE_URL_HTTPS}?query=SELECT%203
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT_SECURE -q "SELECT 4;"
|
||||||
|
|
||||||
|
# TODO: can test only on unchanged port. Possible solutions: generate config or pass shard port via command line
|
||||||
|
if [[ "$CLICKHOUSE_PORT_TCP_SECURE" = "$tcp_port_secure" ]]; then
|
||||||
|
cat $CURDIR/00505_distributed_secure.data | $CLICKHOUSE_CLIENT_SECURE -n -m
|
||||||
|
else
|
||||||
|
tail -n 13 $CURDIR/00505_secure.reference
|
||||||
|
fi
|
||||||
|
|
||||||
${CLICKHOUSE_CLIENT} --secure -q "SELECT 1";
|
|
||||||
|
|
||||||
cat $CURDIR/00505_distributed_secure.data | $CLICKHOUSE_CLIENT --secure -n -m
|
|
||||||
fi
|
fi
|
||||||
|
@ -0,0 +1,2 @@
|
|||||||
|
Still alive
|
||||||
|
65535
|
174
dbms/tests/queries/0_stateless/00632_get_sample_block_cache.sql
Normal file
174
dbms/tests/queries/0_stateless/00632_get_sample_block_cache.sql
Normal file
File diff suppressed because one or more lines are too long
@ -4,6 +4,8 @@ export CLICKHOUSE_CLIENT=${CLICKHOUSE_CLIENT:="${CLICKHOUSE_BINARY}-client"}
|
|||||||
export CLICKHOUSE_LOCAL=${CLICKHOUSE_LOCAL:="${CLICKHOUSE_BINARY}-local"}
|
export CLICKHOUSE_LOCAL=${CLICKHOUSE_LOCAL:="${CLICKHOUSE_BINARY}-local"}
|
||||||
|
|
||||||
export CLICKHOUSE_CONFIG=${CLICKHOUSE_CONFIG:="/etc/clickhouse-server/config.xml"}
|
export CLICKHOUSE_CONFIG=${CLICKHOUSE_CONFIG:="/etc/clickhouse-server/config.xml"}
|
||||||
|
export CLICKHOUSE_CONFIG_CLIENT=${CLICKHOUSE_CONFIG_CLIENT:="/etc/clickhouse-client/config.xml"}
|
||||||
|
|
||||||
export CLICKHOUSE_EXTRACT_CONFIG=${CLICKHOUSE_EXTRACT_CONFIG:="$CLICKHOUSE_BINARY-extract-from-config --config=$CLICKHOUSE_CONFIG"}
|
export CLICKHOUSE_EXTRACT_CONFIG=${CLICKHOUSE_EXTRACT_CONFIG:="$CLICKHOUSE_BINARY-extract-from-config --config=$CLICKHOUSE_CONFIG"}
|
||||||
export CLICKHOUSE_CONFIG_GREP=${CLICKHOUSE_CONFIG_GREP:="/etc/clickhouse-server/config-preprocessed.xml"}
|
export CLICKHOUSE_CONFIG_GREP=${CLICKHOUSE_CONFIG_GREP:="/etc/clickhouse-server/config-preprocessed.xml"}
|
||||||
|
|
||||||
|
1
debian/clickhouse-client.install
vendored
1
debian/clickhouse-client.install
vendored
@ -2,5 +2,6 @@ usr/bin/clickhouse-client
|
|||||||
usr/bin/clickhouse-local
|
usr/bin/clickhouse-local
|
||||||
usr/bin/clickhouse-compressor
|
usr/bin/clickhouse-compressor
|
||||||
usr/bin/clickhouse-benchmark
|
usr/bin/clickhouse-benchmark
|
||||||
|
usr/bin/clickhouse-format
|
||||||
etc/clickhouse-client/config.xml
|
etc/clickhouse-client/config.xml
|
||||||
usr/bin/clickhouse-extract-from-config
|
usr/bin/clickhouse-extract-from-config
|
||||||
|
12
debian/pbuilder-hooks/B90test-server
vendored
12
debian/pbuilder-hooks/B90test-server
vendored
@ -5,7 +5,7 @@ set -x
|
|||||||
TEST_CONNECT=${TEST_CONNECT=1}
|
TEST_CONNECT=${TEST_CONNECT=1}
|
||||||
TEST_SSL=${TEST_SSL=1}
|
TEST_SSL=${TEST_SSL=1}
|
||||||
PACKAGE_INSTALL=${PACKAGE_INSTALL=1}
|
PACKAGE_INSTALL=${PACKAGE_INSTALL=1}
|
||||||
PORT_RANDOM=${PORT_RANDOM=1}
|
TEST_PORT_RANDOM=${TEST_PORT_RANDOM=1}
|
||||||
|
|
||||||
if [ "${PACKAGE_INSTALL}" ]; then
|
if [ "${PACKAGE_INSTALL}" ]; then
|
||||||
for PKG in $(ls /tmp/buildd/*.deb | sed -e's,.*/,,;s,_.*,,' ); do
|
for PKG in $(ls /tmp/buildd/*.deb | sed -e's,.*/,,;s,_.*,,' ); do
|
||||||
@ -25,7 +25,7 @@ fi
|
|||||||
|
|
||||||
mkdir -p /etc/clickhouse-server/config.d /etc/clickhouse-client/config.d
|
mkdir -p /etc/clickhouse-server/config.d /etc/clickhouse-client/config.d
|
||||||
|
|
||||||
if [ "${PORT_RANDOM}" ]; then
|
if [ "${TEST_PORT_RANDOM}" ]; then
|
||||||
CLICKHOUSE_PORT_BASE=${CLICKHOUSE_PORT_BASE:=$(( ( RANDOM % 50000 ) + 10000 ))}
|
CLICKHOUSE_PORT_BASE=${CLICKHOUSE_PORT_BASE:=$(( ( RANDOM % 50000 ) + 10000 ))}
|
||||||
CLICKHOUSE_PORT_TCP=${CLICKHOUSE_PORT_TCP:=$(($CLICKHOUSE_PORT_BASE + 1))}
|
CLICKHOUSE_PORT_TCP=${CLICKHOUSE_PORT_TCP:=$(($CLICKHOUSE_PORT_BASE + 1))}
|
||||||
CLICKHOUSE_PORT_HTTP=${CLICKHOUSE_PORT_HTTP:=$(($CLICKHOUSE_PORT_BASE + 2))}
|
CLICKHOUSE_PORT_HTTP=${CLICKHOUSE_PORT_HTTP:=$(($CLICKHOUSE_PORT_BASE + 2))}
|
||||||
@ -41,10 +41,10 @@ export CLICKHOUSE_PORT_TCP_SECURE=${CLICKHOUSE_PORT_TCP_SECURE:=9440}
|
|||||||
export CLICKHOUSE_PORT_HTTPS=${CLICKHOUSE_PORT_HTTPS:=8443}
|
export CLICKHOUSE_PORT_HTTPS=${CLICKHOUSE_PORT_HTTPS:=8443}
|
||||||
|
|
||||||
if [ "${TEST_CONNECT}" ]; then
|
if [ "${TEST_CONNECT}" ]; then
|
||||||
[ "${PORT_RANDOM}" ] && echo "<yandex><http_port>${CLICKHOUSE_PORT_HTTP}</http_port><tcp_port>${CLICKHOUSE_PORT_TCP}</tcp_port><interserver_http_port>${CLICKHOUSE_PORT_INTERSERVER}</interserver_http_port></yandex>" > /etc/clickhouse-server/config.d/port.xml
|
[ "${TEST_PORT_RANDOM}" ] && echo "<yandex><http_port>${CLICKHOUSE_PORT_HTTP}</http_port><tcp_port>${CLICKHOUSE_PORT_TCP}</tcp_port><interserver_http_port>${CLICKHOUSE_PORT_INTERSERVER}</interserver_http_port></yandex>" > /etc/clickhouse-server/config.d/port.xml
|
||||||
|
|
||||||
if [ "${TEST_SSL}" ]; then
|
if [ "${TEST_SSL}" ]; then
|
||||||
[ "${PORT_RANDOM}" ] && echo "<yandex><https_port>${CLICKHOUSE_PORT_HTTPS}</https_port><tcp_port_secure>${CLICKHOUSE_PORT_TCP_SECURE}</tcp_port_secure></yandex>" > /etc/clickhouse-server/config.d/ssl.xml
|
[ "${TEST_PORT_RANDOM}" ] && echo "<yandex><https_port>${CLICKHOUSE_PORT_HTTPS}</https_port><tcp_port_secure>${CLICKHOUSE_PORT_TCP_SECURE}</tcp_port_secure></yandex>" > /etc/clickhouse-server/config.d/ssl.xml
|
||||||
echo "<yandex><openSSL><client><verificationMode>none</verificationMode><invalidCertificateHandler><name>AcceptCertificateHandler</name></invalidCertificateHandler></client></openSSL></yandex>" > /etc/clickhouse-client/config.d/ssl.xml
|
echo "<yandex><openSSL><client><verificationMode>none</verificationMode><invalidCertificateHandler><name>AcceptCertificateHandler</name></invalidCertificateHandler></client></openSSL></yandex>" > /etc/clickhouse-client/config.d/ssl.xml
|
||||||
openssl dhparam -out /etc/clickhouse-server/dhparam.pem 256
|
openssl dhparam -out /etc/clickhouse-server/dhparam.pem 256
|
||||||
openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt
|
openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt
|
||||||
@ -56,7 +56,7 @@ if [ "${TEST_CONNECT}" ]; then
|
|||||||
|
|
||||||
function finish {
|
function finish {
|
||||||
service clickhouse-server stop
|
service clickhouse-server stop
|
||||||
tail -n 100 /var/log/clickhouse-server/*.log /var/log/stderr
|
tail -n 100 /var/log/clickhouse-server/*.log /var/log/clickhouse-server/stderr || true
|
||||||
sleep 1
|
sleep 1
|
||||||
killall -9 clickhouse-server || true
|
killall -9 clickhouse-server || true
|
||||||
}
|
}
|
||||||
@ -66,7 +66,7 @@ if [ "${TEST_CONNECT}" ]; then
|
|||||||
sleep 3
|
sleep 3
|
||||||
|
|
||||||
# TODO: remove me or make only on error:
|
# TODO: remove me or make only on error:
|
||||||
tail -n100 /var/log/clickhouse-server/*.log /var/log/stderr
|
tail -n100 /var/log/clickhouse-server/*.log /var/log/clickhouse-server/stderr || true
|
||||||
|
|
||||||
clickhouse-client --port $CLICKHOUSE_PORT_TCP -q "SELECT * from system.build_options;"
|
clickhouse-client --port $CLICKHOUSE_PORT_TCP -q "SELECT * from system.build_options;"
|
||||||
clickhouse-client ${CLIENT_ADD} -q "SELECT toDateTime(1);"
|
clickhouse-client ${CLIENT_ADD} -q "SELECT toDateTime(1);"
|
||||||
|
2
debian/rules
vendored
2
debian/rules
vendored
@ -99,8 +99,10 @@ override_dh_install:
|
|||||||
mkdir -p $(DESTDIR)/etc/systemd/system/
|
mkdir -p $(DESTDIR)/etc/systemd/system/
|
||||||
cp debian/clickhouse-server.service $(DESTDIR)/etc/systemd/system/
|
cp debian/clickhouse-server.service $(DESTDIR)/etc/systemd/system/
|
||||||
|
|
||||||
|
#TODO: use from cmake:
|
||||||
# In case building clickhouse-server, adding to package binary of clang, ld and header files - for dynamic compilation.
|
# In case building clickhouse-server, adding to package binary of clang, ld and header files - for dynamic compilation.
|
||||||
mkdir -p $(DESTDIR)/usr/share/clickhouse/headers
|
mkdir -p $(DESTDIR)/usr/share/clickhouse/headers
|
||||||
|
CLANG=$(DESTDIR)/usr/bin/clickhouse-clang DESTDIR=$(DESTDIR) ./copy_headers.sh . $(DESTDIR)/usr/share/clickhouse/headers
|
||||||
|
|
||||||
# fake metrika files when private dir is empty
|
# fake metrika files when private dir is empty
|
||||||
mkdir -p $(DESTDIR)/etc/clickhouse-server/metrika
|
mkdir -p $(DESTDIR)/etc/clickhouse-server/metrika
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
FROM ubuntu:17.10
|
FROM ubuntu:18.04
|
||||||
|
|
||||||
RUN apt-get update -y && \
|
RUN apt-get update -y && \
|
||||||
apt-get install -y \
|
env DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||||
cmake pkg-config gcc-7 g++-7 \
|
cmake ninja-build ccache pkg-config gcc g++ \
|
||||||
liblld-5.0-dev libclang-5.0-dev \
|
liblld-6.0-dev libclang-6.0-dev \
|
||||||
libssl-dev libicu-dev libreadline-dev libmysqlclient-dev unixodbc-dev
|
libssl-dev libicu-dev libreadline-dev libmysqlclient-dev unixodbc-dev \
|
||||||
# For tests: bash expect python python-lxml python-termcolor curl perl sudo tzdata
|
bash expect python python-lxml python-termcolor python-requests curl perl sudo tzdata
|
||||||
|
|
||||||
ADD build.sh /
|
ADD build.sh /
|
||||||
RUN chmod +x /build.sh
|
RUN chmod +x /build.sh
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
build:
|
build: image
|
||||||
docker run --network=host --rm --workdir /server --volume $(realpath ../..):/server -it yandex/clickhouse-builder
|
mkdir -p $(HOME)/.ccache
|
||||||
|
docker run --network=host --rm --workdir /server --volume $(realpath ../..):/server --mount=type=bind,source=$(HOME)/.ccache,destination=/ccache -e CCACHE_DIR=/ccache -it yandex/clickhouse-builder
|
||||||
|
|
||||||
pull:
|
pull:
|
||||||
docker pull yandex/clickhouse-builder
|
docker pull yandex/clickhouse-builder
|
||||||
|
@ -1,7 +1,8 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
|
#ccache -s
|
||||||
mkdir -p /server/build_docker
|
mkdir -p /server/build_docker
|
||||||
cd /server/build_docker
|
cd /server/build_docker
|
||||||
cmake /server -D ENABLE_TESTS=0
|
cmake -G Ninja /server -DENABLE_TESTS=1
|
||||||
make -j $(nproc || grep -c ^processor /proc/cpuinfo)
|
cmake --build .
|
||||||
#ctest -V -j $(nproc || grep -c ^processor /proc/cpuinfo)
|
env TEST_OPT="--skip long compile $TEST_OPT" ctest -V -j $(nproc || grep -c ^processor /proc/cpuinfo)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
FROM ubuntu:17.10
|
FROM ubuntu:18.04
|
||||||
|
|
||||||
ARG repository="deb http://repo.yandex.ru/clickhouse/deb/stable/ main/"
|
ARG repository="deb http://repo.yandex.ru/clickhouse/deb/stable/ main/"
|
||||||
ARG version=\*
|
ARG version=\*
|
||||||
@ -9,7 +9,7 @@ RUN apt-get update && \
|
|||||||
apt-key adv --keyserver keyserver.ubuntu.com --recv E0C56BD4 && \
|
apt-key adv --keyserver keyserver.ubuntu.com --recv E0C56BD4 && \
|
||||||
echo $repository | tee /etc/apt/sources.list.d/clickhouse.list && \
|
echo $repository | tee /etc/apt/sources.list.d/clickhouse.list && \
|
||||||
apt-get update && \
|
apt-get update && \
|
||||||
apt-get install --allow-unauthenticated -y clickhouse-client=$version locales tzdata && \
|
env DEBIAN_FRONTEND=noninteractive apt-get install --allow-unauthenticated -y clickhouse-client=$version locales tzdata && \
|
||||||
rm -rf /var/lib/apt/lists/* /var/cache/debconf && \
|
rm -rf /var/lib/apt/lists/* /var/cache/debconf && \
|
||||||
apt-get clean
|
apt-get clean
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
FROM ubuntu:17.10
|
FROM ubuntu:18.04
|
||||||
|
|
||||||
ARG repository="deb http://repo.yandex.ru/clickhouse/deb/stable/ main/"
|
ARG repository="deb http://repo.yandex.ru/clickhouse/deb/stable/ main/"
|
||||||
ARG version=\*
|
ARG version=\*
|
||||||
@ -9,7 +9,7 @@ RUN apt-get update && \
|
|||||||
apt-key adv --keyserver keyserver.ubuntu.com --recv E0C56BD4 && \
|
apt-key adv --keyserver keyserver.ubuntu.com --recv E0C56BD4 && \
|
||||||
echo $repository | tee /etc/apt/sources.list.d/clickhouse.list && \
|
echo $repository | tee /etc/apt/sources.list.d/clickhouse.list && \
|
||||||
apt-get update && \
|
apt-get update && \
|
||||||
apt-get install --allow-unauthenticated -y "clickhouse-server=$version" libgcc-7-dev && \
|
env DEBIAN_FRONTEND=noninteractive apt-get install --allow-unauthenticated -y "clickhouse-server=$version" libgcc-7-dev && \
|
||||||
rm -rf /var/lib/apt/lists/* /var/cache/debconf && \
|
rm -rf /var/lib/apt/lists/* /var/cache/debconf && \
|
||||||
apt-get clean
|
apt-get clean
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
FROM ubuntu:17.10
|
FROM ubuntu:18.04
|
||||||
|
|
||||||
ARG repository="deb http://repo.yandex.ru/clickhouse/deb/stable/ main/"
|
ARG repository="deb http://repo.yandex.ru/clickhouse/deb/stable/ main/"
|
||||||
ARG version=\*
|
ARG version=\*
|
||||||
@ -9,11 +9,8 @@ RUN apt-get update && \
|
|||||||
apt-key adv --keyserver keyserver.ubuntu.com --recv E0C56BD4 && \
|
apt-key adv --keyserver keyserver.ubuntu.com --recv E0C56BD4 && \
|
||||||
echo $repository | tee /etc/apt/sources.list.d/clickhouse.list && \
|
echo $repository | tee /etc/apt/sources.list.d/clickhouse.list && \
|
||||||
apt-get update && \
|
apt-get update && \
|
||||||
apt-get install --allow-unauthenticated -y clickhouse-test && \
|
env DEBIAN_FRONTEND=noninteractive apt-get install --allow-unauthenticated -y clickhouse-test && \
|
||||||
rm -rf /var/lib/apt/lists/* /var/cache/debconf && \
|
rm -rf /var/lib/apt/lists/* /var/cache/debconf && \
|
||||||
apt-get clean
|
apt-get clean
|
||||||
|
|
||||||
# clickhouse-test bug: it doesn't start without server config, remove after release 1.1.54372 :
|
|
||||||
RUN mkdir -p /etc/clickhouse-server && echo "<y></y>" > /etc/clickhouse-server/config.xml
|
|
||||||
|
|
||||||
ENTRYPOINT ["/usr/bin/clickhouse-test"]
|
ENTRYPOINT ["/usr/bin/clickhouse-test"]
|
||||||
|
@ -38,5 +38,7 @@ There are libraries for working with ClickHouse for:
|
|||||||
- [clickhouse_ecto](https://github.com/appodeal/clickhouse_ecto)
|
- [clickhouse_ecto](https://github.com/appodeal/clickhouse_ecto)
|
||||||
- Java
|
- Java
|
||||||
- [clickhouse-client-java](https://github.com/VirtusAI/clickhouse-client-java)
|
- [clickhouse-client-java](https://github.com/VirtusAI/clickhouse-client-java)
|
||||||
|
- Nim
|
||||||
|
- [nim-clickhouse](https://github.com/leonardoce/nim-clickhouse)
|
||||||
|
|
||||||
We have not tested these libraries. They are listed in random order.
|
We have not tested these libraries. They are listed in random order.
|
||||||
|
@ -12,6 +12,7 @@ edit_uri: 'edit/master/docs/en'
|
|||||||
extra_css:
|
extra_css:
|
||||||
- assets/stylesheets/custom.css
|
- assets/stylesheets/custom.css
|
||||||
|
|
||||||
|
|
||||||
markdown_extensions:
|
markdown_extensions:
|
||||||
- codehilite
|
- codehilite
|
||||||
|
|
||||||
@ -222,8 +223,8 @@ pages:
|
|||||||
- 'Dictionary key and fields': 'dicts/external_dicts_dict_structure.md'
|
- 'Dictionary key and fields': 'dicts/external_dicts_dict_structure.md'
|
||||||
- 'Internal dictionaries': 'dicts/internal_dicts.md'
|
- 'Internal dictionaries': 'dicts/internal_dicts.md'
|
||||||
|
|
||||||
- 'Operation':
|
- 'Operations':
|
||||||
# - 'Operation': 'operations/index.md'
|
- 'Operations': 'operations/index.md'
|
||||||
- 'Access rights': 'operations/access_rights.md'
|
- 'Access rights': 'operations/access_rights.md'
|
||||||
- 'Configuration files': 'operations/configuration_files.md'
|
- 'Configuration files': 'operations/configuration_files.md'
|
||||||
- 'Quotas': 'operations/quotas.md'
|
- 'Quotas': 'operations/quotas.md'
|
||||||
@ -237,7 +238,7 @@ pages:
|
|||||||
- 'Settings': 'operations/settings/settings.md'
|
- 'Settings': 'operations/settings/settings.md'
|
||||||
- 'Settings profiles': 'operations/settings/settings_profiles.md'
|
- 'Settings profiles': 'operations/settings/settings_profiles.md'
|
||||||
|
|
||||||
- 'Utilites':
|
- 'Utilities':
|
||||||
- 'Introduction': 'utils/index.md'
|
- 'Introduction': 'utils/index.md'
|
||||||
- 'clickhouse-copier': 'utils/clickhouse-copier.md'
|
- 'clickhouse-copier': 'utils/clickhouse-copier.md'
|
||||||
- 'clickhouse-local': 'utils/clickhouse-local.md'
|
- 'clickhouse-local': 'utils/clickhouse-local.md'
|
||||||
|
@ -24,8 +24,6 @@ theme:
|
|||||||
primary: 'white'
|
primary: 'white'
|
||||||
accent: 'white'
|
accent: 'white'
|
||||||
font: false
|
font: false
|
||||||
# text: Roboto
|
|
||||||
# code: Roboto Mono
|
|
||||||
logo: 'images/logo.svg'
|
logo: 'images/logo.svg'
|
||||||
favicon: 'assets/images/favicon.ico'
|
favicon: 'assets/images/favicon.ico'
|
||||||
include_search_page: false
|
include_search_page: false
|
||||||
|
@ -38,3 +38,5 @@
|
|||||||
- [clickhouse_ecto](https://github.com/appodeal/clickhouse_ecto)
|
- [clickhouse_ecto](https://github.com/appodeal/clickhouse_ecto)
|
||||||
- Java
|
- Java
|
||||||
- [clickhouse-client-java](https://github.com/VirtusAI/clickhouse-client-java)
|
- [clickhouse-client-java](https://github.com/VirtusAI/clickhouse-client-java)
|
||||||
|
- Nim
|
||||||
|
- [nim-clickhouse](https://github.com/leonardoce/nim-clickhouse)
|
||||||
|
6
release
6
release
@ -53,7 +53,7 @@ do
|
|||||||
shift
|
shift
|
||||||
elif [[ $1 == '--fast' ]]; then
|
elif [[ $1 == '--fast' ]]; then
|
||||||
# Wrong but fast pbuilder mode: create base package with all depends
|
# Wrong but fast pbuilder mode: create base package with all depends
|
||||||
EXTRAPACKAGES="$EXTRAPACKAGES debhelper cmake ninja-build gcc-7 g++-7 libc6-dev libmariadbclient-dev libicu-dev libltdl-dev libreadline-dev libssl-dev unixodbc-dev psmisc bash expect python python-lxml python-termcolor curl perl sudo openssl"
|
EXTRAPACKAGES="$EXTRAPACKAGES debhelper cmake ninja-build gcc-7 g++-7 libc6-dev libmariadbclient-dev libicu-dev libltdl-dev libreadline-dev libssl-dev unixodbc-dev psmisc bash expect python python-lxml python-termcolor python-requests curl perl sudo openssl"
|
||||||
shift
|
shift
|
||||||
else
|
else
|
||||||
echo "Unknown option $1"
|
echo "Unknown option $1"
|
||||||
@ -82,9 +82,9 @@ elif [[ $BUILD_TYPE == 'debug' ]]; then
|
|||||||
VERSION_POSTFIX+=+$BUILD_TYPE
|
VERSION_POSTFIX+=+$BUILD_TYPE
|
||||||
fi
|
fi
|
||||||
|
|
||||||
CMAKE_FLAGS=" $LIBTCMALLOC_OPTS $CMAKE_FLAGS"
|
CMAKE_FLAGS=" $LIBTCMALLOC_OPTS -DENABLE_EMBEDDED_COMPILER=1 $CMAKE_FLAGS"
|
||||||
[[ -n "$CMAKE_BUILD_TYPE" ]] && CMAKE_FLAGS=" -DCMAKE_BUILD_TYPE=$CMAKE_BUILD_TYPE $CMAKE_FLAGS"
|
[[ -n "$CMAKE_BUILD_TYPE" ]] && CMAKE_FLAGS=" -DCMAKE_BUILD_TYPE=$CMAKE_BUILD_TYPE $CMAKE_FLAGS"
|
||||||
[[ "$CMAKE_FLAGS" =~ "USE_INTERNAL_LLVM_LIBRARY" ]] || CMAKE_FLAGS=" -DUSE_INTERNAL_LLVM_LIBRARY=1 $CMAKE_FLAGS"
|
#[[ "$CMAKE_FLAGS" =~ "USE_INTERNAL_LLVM_LIBRARY" ]] || CMAKE_FLAGS=" -DUSE_INTERNAL_LLVM_LIBRARY=1 $CMAKE_FLAGS"
|
||||||
|
|
||||||
export CMAKE_FLAGS
|
export CMAKE_FLAGS
|
||||||
export EXTRAPACKAGES
|
export EXTRAPACKAGES
|
||||||
|
@ -11,7 +11,7 @@ sudo apt install -y git bash cmake gcc-7 g++-7 libicu-dev libreadline-dev libmys
|
|||||||
#sudo apt install -y libboost-program-options-dev libboost-system-dev libboost-filesystem-dev libboost-thread-dev zlib1g-dev liblz4-dev libdouble-conversion-dev libzstd-dev libre2-dev libsparsehash-dev librdkafka-dev libcapnp-dev libpoco-dev libsparsehash-dev libgoogle-perftools-dev libunwind-dev googletest libcctz-dev
|
#sudo apt install -y libboost-program-options-dev libboost-system-dev libboost-filesystem-dev libboost-thread-dev zlib1g-dev liblz4-dev libdouble-conversion-dev libzstd-dev libre2-dev libsparsehash-dev librdkafka-dev libcapnp-dev libpoco-dev libsparsehash-dev libgoogle-perftools-dev libunwind-dev googletest libcctz-dev
|
||||||
|
|
||||||
# install testing only stuff if you want:
|
# install testing only stuff if you want:
|
||||||
sudo apt install -y python python-lxml python-termcolor curl perl
|
sudo apt install -y python python-lxml python-termcolor python-requests curl perl
|
||||||
|
|
||||||
# Checkout ClickHouse sources
|
# Checkout ClickHouse sources
|
||||||
git clone --recursive https://github.com/yandex/ClickHouse.git
|
git clone --recursive https://github.com/yandex/ClickHouse.git
|
||||||
|
@ -22,7 +22,7 @@
|
|||||||
sudo pkg install devel/git devel/cmake shells/bash devel/icu devel/libltdl databases/unixODBC devel/google-perftools devel/libdouble-conversion archivers/zstd archivers/liblz4 devel/sparsehash devel/re2
|
sudo pkg install devel/git devel/cmake shells/bash devel/icu devel/libltdl databases/unixODBC devel/google-perftools devel/libdouble-conversion archivers/zstd archivers/liblz4 devel/sparsehash devel/re2
|
||||||
|
|
||||||
# install testing only stuff if you want:
|
# install testing only stuff if you want:
|
||||||
sudo pkg install lang/python devel/py-lxml devel/py-termcolor ftp/curl perl5
|
sudo pkg install lang/python devel/py-lxml devel/py-termcolor www/py-requests ftp/curl perl5
|
||||||
|
|
||||||
# If you want ODBC support: Check UNIXODBC option:
|
# If you want ODBC support: Check UNIXODBC option:
|
||||||
# make -C /usr/ports/devel/poco config reinstall
|
# make -C /usr/ports/devel/poco config reinstall
|
||||||
|
@ -37,6 +37,7 @@ cmake $CUR_DIR/../.. -DCMAKE_CXX_COMPILER=`which $DEB_CXX $CXX` -DCMAKE_C_COMPIL
|
|||||||
`# Skip tests:` \
|
`# Skip tests:` \
|
||||||
`# 00281 requires internal compiler` \
|
`# 00281 requires internal compiler` \
|
||||||
`# 00428 requires sudo (not all vms allow this)` \
|
`# 00428 requires sudo (not all vms allow this)` \
|
||||||
&& ( [ ! ${TEST_RUN=1} ] || ( ( cd $CUR_DIR/../.. && env TEST_OPT="--skip long compile 00428 $TEST_OPT" TEST_PERF= bash -x dbms/tests/clickhouse-test-server ) || ${TEST_TRUE=false} ) )
|
`# 00385 runs infinitly (TODO: fix it)` \
|
||||||
|
&& ( [ ! ${TEST_RUN=1} ] || ( ( cd $CUR_DIR/../.. && env TEST_OPT="--skip long compile 00428 00385 $TEST_OPT" TEST_PORT_RANDOM= TEST_PERF= bash -x dbms/tests/clickhouse-test-server ) || ${TEST_TRUE=false} ) )
|
||||||
|
|
||||||
date
|
date
|
||||||
|
@ -12,6 +12,7 @@ df -h
|
|||||||
date
|
date
|
||||||
|
|
||||||
env TEST_RUN=${TEST_RUN=1} \
|
env TEST_RUN=${TEST_RUN=1} \
|
||||||
|
TEST_PORT_RANDOM= \
|
||||||
`# Skip tests:` \
|
`# Skip tests:` \
|
||||||
`# 00281 requires internal compiler` \
|
`# 00281 requires internal compiler` \
|
||||||
`# 00416 requires patched poco from contrib/` \
|
`# 00416 requires patched poco from contrib/` \
|
||||||
|
@ -411,10 +411,8 @@ clickhouse-client
|
|||||||
|
|
||||||
<h2 id="contacts">Contacts</h2>
|
<h2 id="contacts">Contacts</h2>
|
||||||
<ul class="dashed">
|
<ul class="dashed">
|
||||||
<li>Subscribe to the <a href="https://yandex.com/blog/clickhouse"
|
<li>Subscribe to the <a href="https://clickhouse.yandex/blog/en" target="_blank">official ClickHouse blog</a>
|
||||||
rel="external nofollow" target="_blank">official ClickHouse blog</a>
|
and its <a href="https://clickhouse.yandex/blog/ru" target="_blank">counterpart in Russian</a>.</li>
|
||||||
and its <a href="https://yandex.ru/blog/clickhouse"
|
|
||||||
rel="external nofollow" target="_blank">counterpart in Russian</a>.</li>
|
|
||||||
<li>Ask any questions on <a href="https://stackoverflow.com/questions/tagged/clickhouse"
|
<li>Ask any questions on <a href="https://stackoverflow.com/questions/tagged/clickhouse"
|
||||||
rel="external nofollow" target="_blank">Stack Overflow</a> or
|
rel="external nofollow" target="_blank">Stack Overflow</a> or
|
||||||
<a href="https://groups.google.com/group/clickhouse"
|
<a href="https://groups.google.com/group/clickhouse"
|
||||||
|
Loading…
Reference in New Issue
Block a user