dbms: Server: Merged with master. [#METR-18844]

This commit is contained in:
Alexey Arno 2016-07-07 19:44:05 +03:00
commit fdfba69a46
27 changed files with 460 additions and 39 deletions

View File

@ -136,8 +136,8 @@ endif(WIN32)
if (UNIX AND NOT ANDROID )
add_definitions( -DPOCO_OS_FAMILY_UNIX )
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-private-field -Wno-unused-local-typedef -Wno-for-loop-analysis -Wno-unknown-pragmas")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-unknown-pragmas")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-private-field -Wno-unused-local-typedef -Wno-for-loop-analysis -Wno-unknown-pragmas -Wno-unused-variable")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-unknown-pragmas -Wno-unused-variable")
# Standard 'must be' defines
if (APPLE)
add_definitions( -DPOCO_HAVE_IPv6 -DPOCO_NO_STAT64)

View File

@ -266,6 +266,9 @@ add_library (dbms
include/DB/DataStreams/CSVRowOutputStream.h
include/DB/DataStreams/CSVRowInputStream.h
include/DB/DataStreams/verbosePrintString.h
include/DB/DataStreams/SquashingTransform.h
include/DB/DataStreams/SquashingBlockInputStream.h
include/DB/DataStreams/SquashingBlockOutputStream.h
include/DB/DataTypes/IDataType.h
include/DB/DataTypes/IDataTypeDummy.h
include/DB/DataTypes/DataTypeSet.h
@ -744,6 +747,9 @@ add_library (dbms
src/DataStreams/RemoteBlockInputStream.cpp
src/DataStreams/BlockIO.cpp
src/DataStreams/verbosePrintString.cpp
src/DataStreams/SquashingTransform.cpp
src/DataStreams/SquashingBlockInputStream.cpp
src/DataStreams/SquashingBlockOutputStream.cpp
src/DataTypes/DataTypeString.cpp
src/DataTypes/DataTypeFixedString.cpp

View File

@ -1,4 +1,4 @@
#pragma once
/// Получить количество ядер CPU без учёта hyper-threading.
/// Get number of CPU cores without hyper-threading.
unsigned getNumberOfPhysicalCPUCores();

View File

@ -0,0 +1,34 @@
#pragma once
#include <DB/DataStreams/IProfilingBlockInputStream.h>
#include <DB/DataStreams/SquashingTransform.h>
namespace DB
{
/** Merging consecutive blocks of stream to specified minimum size.
*/
class SquashingBlockInputStream : public IProfilingBlockInputStream
{
public:
SquashingBlockInputStream(BlockInputStreamPtr & src, size_t min_block_size_rows, size_t min_block_size_bytes);
String getName() const override { return "Squashing"; }
String getID() const override
{
std::stringstream res;
res << "Squashing(" << children.at(0)->getID() << ")";
return res.str();
}
protected:
Block readImpl() override;
private:
SquashingTransform transform;
bool all_read = false;
};
}

View File

@ -0,0 +1,32 @@
#pragma once
#include <DB/DataStreams/IBlockOutputStream.h>
#include <DB/DataStreams/SquashingTransform.h>
namespace DB
{
/** Merging consecutive blocks of stream to specified minimum size.
*/
class SquashingBlockOutputStream : public IBlockOutputStream
{
public:
SquashingBlockOutputStream(BlockOutputStreamPtr & dst, size_t min_block_size_rows, size_t min_block_size_bytes);
void write(const Block & block) override;
void flush() override;
void writePrefix() override;
void writeSuffix() override;
private:
BlockOutputStreamPtr output;
SquashingTransform transform;
bool all_written = false;
void finalize();
};
}

View File

@ -0,0 +1,52 @@
#include <DB/Core/Block.h>
namespace DB
{
/** Merging consecutive passed blocks to specified minimum size.
*
* (But if one of input blocks has already at least specified size,
* then don't merge it with neighbours, even if neighbours are small.)
*
* Used to prepare blocks to adequate size for INSERT queries,
* because such storages as Memory, StripeLog, Log, TinyLog...
* store or compress data in blocks exactly as passed to it,
* and blocks of small size are not efficient.
*
* Order of data is kept.
*/
class SquashingTransform
{
public:
/// Conditions on rows and bytes are OR-ed. If one of them is zero, then corresponding condition is ignored.
SquashingTransform(size_t min_block_size_rows, size_t min_block_size_bytes);
/// When not ready, you need to pass more blocks to add function.
struct Result
{
bool ready = false;
Block block;
Result(bool ready_) : ready(ready_) {}
Result(Block && block_) : ready(true), block(std::move(block_)) {}
};
/** Add next block and possibly returns squashed block.
* At end, you need to pass empty block. As the result for last (empty) block, you will get last Result with ready = true.
*/
Result add(Block && block);
private:
size_t min_block_size_rows;
size_t min_block_size_bytes;
Block accumulated_block;
void append(Block && block);
bool isEnoughSize(size_t rows, size_t bytes) const;
};
}

View File

@ -39,6 +39,8 @@ inline char parseEscapeSequence(char c)
{
switch(c)
{
case 'a':
return '\a';
case 'b':
return '\b';
case 'f':
@ -49,6 +51,8 @@ inline char parseEscapeSequence(char c)
return '\r';
case 't':
return '\t';
case 'v':
return '\v';
case '0':
return '\0';
default:

View File

@ -39,6 +39,10 @@ struct Settings
M(SettingUInt64, max_block_size, DEFAULT_BLOCK_SIZE) \
/** Максимальный размер блока для вставки, если мы управляем формированием блоков для вставки. */ \
M(SettingUInt64, max_insert_block_size, DEFAULT_INSERT_BLOCK_SIZE) \
/** Squash blocks passed to INSERT query to specified size in rows, if blocks are not big enough. */ \
M(SettingUInt64, min_insert_block_size_rows, DEFAULT_INSERT_BLOCK_SIZE) \
/** Squash blocks passed to INSERT query to specified size in bytes, if blocks are not big enough. */ \
M(SettingUInt64, min_insert_block_size_bytes, (DEFAULT_INSERT_BLOCK_SIZE * 256)) \
/** Максимальное количество потоков выполнения запроса. По-умолчанию - определять автоматически. */ \
M(SettingMaxThreads, max_threads, 0) \
/** Максимальный размер буфера для чтения из файловой системы. */ \

View File

@ -1143,6 +1143,7 @@ public:
("time,t", "print query execution time to stderr in non-interactive mode (for benchmarks)")
("stacktrace", "print stack traces of exceptions")
("progress", "print progress even in non-interactive mode")
("compression", boost::program_options::value<bool>(), "enable or disable compression")
APPLY_FOR_SETTINGS(DECLARE_SETTING)
APPLY_FOR_LIMITS(DECLARE_LIMIT)
;
@ -1265,6 +1266,8 @@ public:
config().setBool("progress", true);
if (options.count("time"))
print_time_to_stderr = true;
if (options.count("compression"))
config().setBool("compression", options["compression"].as<bool>());
}
};

View File

@ -1,4 +1,5 @@
#include <DB/Common/getNumberOfPhysicalCPUCores.h>
#include <thread>
#if defined(__x86_64__)
@ -7,10 +8,6 @@
namespace DB { namespace ErrorCodes { extern const int CPUID_ERROR; }}
#elif defined(__aarch64__)
#include <thread>
#endif
@ -26,10 +23,14 @@ unsigned getNumberOfPhysicalCPUCores()
if (0 != cpu_identify(&raw_data, &data))
throw DB::Exception("Cannot cpu_identify: " + std::string(cpuid_error()), DB::ErrorCodes::CPUID_ERROR);
/// On Xen VMs, libcpuid returns wrong info (zero number of cores). Fallback to alternative method.
if (data.num_cores == 0 || data.total_logical_cpus == 0 || data.num_logical_cpus == 0)
return std::thread::hardware_concurrency();
return data.num_cores * data.total_logical_cpus / data.num_logical_cpus;
#elif defined(__aarch64__)
/// Считаем, что на этой системе нет hyper-threading.
/// Assuming there are no hyper-threading on the system.
return std::thread::hardware_concurrency();
#endif
}

View File

@ -0,0 +1,31 @@
#include <DB/DataStreams/SquashingBlockInputStream.h>
namespace DB
{
SquashingBlockInputStream::SquashingBlockInputStream(BlockInputStreamPtr & src, size_t min_block_size_rows, size_t min_block_size_bytes)
: transform(min_block_size_rows, min_block_size_bytes)
{
children.emplace_back(src);
}
Block SquashingBlockInputStream::readImpl()
{
if (all_read)
return {};
while (true)
{
Block block = children[0]->read();
if (!block)
all_read = true;
SquashingTransform::Result result = transform.add(std::move(block));
if (result.ready)
return result.block;
}
}
}

View File

@ -0,0 +1,53 @@
#include <DB/DataStreams/SquashingBlockOutputStream.h>
namespace DB
{
SquashingBlockOutputStream::SquashingBlockOutputStream(BlockOutputStreamPtr & dst, size_t min_block_size_rows, size_t min_block_size_bytes)
: output(dst), transform(min_block_size_rows, min_block_size_bytes)
{
}
void SquashingBlockOutputStream::write(const Block & block)
{
SquashingTransform::Result result = transform.add(Block(block));
if (result.ready)
output->write(result.block);
}
void SquashingBlockOutputStream::finalize()
{
if (all_written)
return;
all_written = true;
SquashingTransform::Result result = transform.add({});
if (result.ready && result.block)
output->write(result.block);
}
void SquashingBlockOutputStream::flush()
{
finalize();
output->flush();
}
void SquashingBlockOutputStream::writePrefix()
{
output->writePrefix();
}
void SquashingBlockOutputStream::writeSuffix()
{
finalize();
output->writeSuffix();
}
}

View File

@ -0,0 +1,76 @@
#include <DB/DataStreams/SquashingTransform.h>
namespace DB
{
SquashingTransform::SquashingTransform(size_t min_block_size_rows, size_t min_block_size_bytes)
: min_block_size_rows(min_block_size_rows), min_block_size_bytes(min_block_size_bytes)
{
}
SquashingTransform::Result SquashingTransform::add(Block && block)
{
if (!block)
return Result(std::move(accumulated_block));
/// Just read block is alredy enough.
if (isEnoughSize(block.rowsInFirstColumn(), block.bytes()))
{
/// If no accumulated data, return just read block.
if (!accumulated_block)
return Result(std::move(block));
/// Return accumulated data (may be it has small size) and place new block to accumulated data.
accumulated_block.swap(block);
return Result(std::move(block));
}
/// Accumulated block is already enough.
if (accumulated_block && isEnoughSize(accumulated_block.rowsInFirstColumn(), accumulated_block.bytes()))
{
/// Return accumulated data and place new block to accumulated data.
accumulated_block.swap(block);
return Result(std::move(block));
}
append(std::move(block));
if (isEnoughSize(accumulated_block.rowsInFirstColumn(), accumulated_block.bytes()))
{
Block res;
res.swap(accumulated_block);
return Result(std::move(res));
}
/// Squashed block is not ready.
return false;
}
void SquashingTransform::append(Block && block)
{
if (!accumulated_block)
{
accumulated_block = std::move(block);
return;
}
size_t columns = block.columns();
size_t rows = block.rowsInFirstColumn();
for (size_t i = 0; i < columns; ++i)
accumulated_block.unsafeGetByPosition(i).column->insertRangeFrom(
*block.unsafeGetByPosition(i).column, 0, rows);
}
bool SquashingTransform::isEnoughSize(size_t rows, size_t bytes) const
{
return (!min_block_size_rows && !min_block_size_bytes)
|| (min_block_size_rows && rows >= min_block_size_rows)
|| (min_block_size_bytes && bytes >= min_block_size_bytes);
}
}

View File

@ -5,6 +5,7 @@
#include <DB/DataStreams/AddingDefaultBlockOutputStream.h>
#include <DB/DataStreams/PushingToViewsBlockOutputStream.h>
#include <DB/DataStreams/NullAndDoCopyBlockInputStream.h>
#include <DB/DataStreams/SquashingBlockOutputStream.h>
#include <DB/DataStreams/copyData.h>
#include <DB/Parsers/ASTInsertQuery.h>
@ -78,14 +79,21 @@ BlockIO InterpreterInsertQuery::execute()
NamesAndTypesListPtr required_columns = std::make_shared<NamesAndTypesList>(table->getColumnsList());
/// Создаем кортеж из нескольких стримов, в которые будем писать данные.
BlockOutputStreamPtr out =
std::make_shared<ProhibitColumnsBlockOutputStream>(
std::make_shared<AddingDefaultBlockOutputStream>(
std::make_shared<MaterializingBlockOutputStream>(
std::make_shared<PushingToViewsBlockOutputStream>(query.database, query.table, context, query_ptr)),
required_columns, table->column_defaults, context, static_cast<bool>(context.getSettingsRef().strict_insert_defaults)),
table->materialized_columns);
/// Создаем конвейер из нескольких стримов, в которые будем писать данные.
BlockOutputStreamPtr out;
out = std::make_shared<PushingToViewsBlockOutputStream>(query.database, query.table, context, query_ptr);
out = std::make_shared<MaterializingBlockOutputStream>(out);
out = std::make_shared<AddingDefaultBlockOutputStream>(out,
required_columns, table->column_defaults, context, static_cast<bool>(context.getSettingsRef().strict_insert_defaults));
out = std::make_shared<ProhibitColumnsBlockOutputStream>(out, table->materialized_columns);
out = std::make_shared<SquashingBlockOutputStream>(out,
context.getSettingsRef().min_insert_block_size_rows,
context.getSettingsRef().min_insert_block_size_bytes);
BlockIO res;
res.out_sample = getSampleBlock();
@ -98,7 +106,8 @@ BlockIO InterpreterInsertQuery::execute()
else
{
InterpreterSelectQuery interpreter_select{query.select, context};
BlockInputStreamPtr in{interpreter_select.execute().in};
BlockInputStreamPtr in = interpreter_select.execute().in;
res.in = std::make_shared<NullAndDoCopyBlockInputStream>(in, out);
res.in_sample = interpreter_select.getSampleBlock();
}

View File

@ -785,6 +785,9 @@ QueryProcessingStage::Enum InterpreterSelectQuery::executeFetchColumns()
{
size_t max_streams = settings.max_threads;
if (max_streams == 0)
throw Exception("Logical error: zero number of streams requested", ErrorCodes::LOGICAL_ERROR);
/// Если надо - запрашиваем больше источников, чем количество потоков - для более равномерного распределения работы по потокам.
if (max_streams > 1 && !is_remote)
max_streams *= settings.max_streams_to_max_threads_ratio;

View File

@ -626,7 +626,7 @@ void TCPHandler::initBlockOutput()
{
if (state.compression == Protocol::Compression::Enable)
state.maybe_compressed_out = std::make_shared<CompressedWriteBuffer>(
*out, query_context.getSettings().network_compression_method);
*out, query_context.getSettingsRef().network_compression_method);
else
state.maybe_compressed_out = out;

View File

@ -18,26 +18,26 @@ namespace DB
{
/// Состояние обработки запроса.
/// State of query processing.
struct QueryState
{
/// Идентификатор запроса.
/// Identifier of the query.
String query_id;
QueryProcessingStage::Enum stage = QueryProcessingStage::Complete;
Protocol::Compression::Enum compression = Protocol::Compression::Disable;
/// Откуда читать данные для INSERT-а.
/// From where to read data for INSERT.
std::shared_ptr<ReadBuffer> maybe_compressed_in;
BlockInputStreamPtr block_in;
/// Куда писать возвращаемые данные.
/// Where to write result data.
std::shared_ptr<WriteBuffer> maybe_compressed_out;
BlockOutputStreamPtr block_out;
/// Текст запроса.
/// Query text.
String query;
/// Потоки блоков, с помощью которых выполнять запрос.
/// Streams of blocks, that are processing the query.
BlockIO io;
/// Отменен ли запрос

View File

@ -57,9 +57,7 @@ INSERT INTO test.replacing VALUES ('2000-01-01', 2, 'def', 1);
INSERT INTO test.replacing VALUES ('2000-01-01', 2, 'ghi', 0);
SELECT * FROM test.replacing FINAL ORDER BY k, v, _part_index;
OPTIMIZE TABLE test.replacing;
OPTIMIZE TABLE test.replacing;
OPTIMIZE TABLE test.replacing;
OPTIMIZE TABLE test.replacing PARTITION 200001 FINAL;
SELECT _part_index, * FROM test.replacing ORDER BY k, v, _part_index;
SELECT _part_index, * FROM test.replacing FINAL ORDER BY k, v, _part_index;
@ -91,9 +89,7 @@ INSERT INTO test.replacing VALUES ('2000-01-01', 2, 'def');
INSERT INTO test.replacing VALUES ('2000-01-01', 2, 'ghi');
SELECT * FROM test.replacing FINAL ORDER BY k, _part_index;
OPTIMIZE TABLE test.replacing;
OPTIMIZE TABLE test.replacing;
OPTIMIZE TABLE test.replacing;
OPTIMIZE TABLE test.replacing PARTITION 200001 FINAL;
SELECT _part_index, * FROM test.replacing ORDER BY k, _part_index;
SELECT _part_index, * FROM test.replacing FINAL ORDER BY k, _part_index;

View File

@ -0,0 +1,5 @@
1000000 10
130000 76
1000000 10
120000 1
20000000

View File

@ -0,0 +1,18 @@
DROP TABLE IF EXISTS test.numbers_squashed;
CREATE TABLE test.numbers_squashed AS system.numbers ENGINE = StripeLog;
SET max_block_size = 10000;
SET min_insert_block_size_rows = 1000000;
SET min_insert_block_size_bytes = 0;
INSERT INTO test.numbers_squashed SELECT * FROM system.numbers LIMIT 10000000;
SELECT blockSize() AS b, count() / b AS c FROM test.numbers_squashed GROUP BY blockSize() ORDER BY c DESC;
SET min_insert_block_size_bytes = 1000000;
INSERT INTO test.numbers_squashed SELECT * FROM system.numbers LIMIT 10000000;
SELECT blockSize() AS b, count() / b AS c FROM test.numbers_squashed GROUP BY blockSize() ORDER BY c DESC;
SELECT count() FROM test.numbers_squashed;
DROP TABLE test.numbers_squashed;

View File

@ -0,0 +1,17 @@
10 2
100 1
120
10 3
100 3
330
100 5
10 4
540
100 6
10 5
30 1
680
10 9
100 7
30 1
820

View File

@ -0,0 +1,68 @@
DROP TABLE IF EXISTS test.numbers_squashed;
CREATE TABLE test.numbers_squashed (number UInt8) ENGINE = Memory;
SET min_insert_block_size_rows = 100;
SET min_insert_block_size_bytes = 0;
SET max_threads = 1;
INSERT INTO test.numbers_squashed
SELECT arrayJoin(range(10)) AS number
UNION ALL
SELECT arrayJoin(range(100))
UNION ALL
SELECT arrayJoin(range(10));
SELECT blockSize() AS b, count() / b AS c FROM test.numbers_squashed GROUP BY blockSize() ORDER BY c DESC, b ASC;
SELECT count() FROM test.numbers_squashed;
INSERT INTO test.numbers_squashed
SELECT arrayJoin(range(100)) AS number
UNION ALL
SELECT arrayJoin(range(10))
UNION ALL
SELECT arrayJoin(range(100));
SELECT blockSize() AS b, count() / b AS c FROM test.numbers_squashed GROUP BY blockSize() ORDER BY c DESC, b ASC;
SELECT count() FROM test.numbers_squashed;
INSERT INTO test.numbers_squashed
SELECT arrayJoin(range(10)) AS number
UNION ALL
SELECT arrayJoin(range(100))
UNION ALL
SELECT arrayJoin(range(100));
SELECT blockSize() AS b, count() / b AS c FROM test.numbers_squashed GROUP BY blockSize() ORDER BY c DESC, b ASC;
SELECT count() FROM test.numbers_squashed;
INSERT INTO test.numbers_squashed
SELECT arrayJoin(range(10)) AS number
UNION ALL
SELECT arrayJoin(range(10))
UNION ALL
SELECT arrayJoin(range(10))
UNION ALL
SELECT arrayJoin(range(100))
UNION ALL
SELECT arrayJoin(range(10));
SELECT blockSize() AS b, count() / b AS c FROM test.numbers_squashed GROUP BY blockSize() ORDER BY c DESC, b ASC;
SELECT count() FROM test.numbers_squashed;
SET min_insert_block_size_rows = 10;
INSERT INTO test.numbers_squashed
SELECT arrayJoin(range(10)) AS number
UNION ALL
SELECT arrayJoin(range(10))
UNION ALL
SELECT arrayJoin(range(10))
UNION ALL
SELECT arrayJoin(range(100))
UNION ALL
SELECT arrayJoin(range(10));
SELECT blockSize() AS b, count() / b AS c FROM test.numbers_squashed GROUP BY blockSize() ORDER BY c DESC, b ASC;
SELECT count() FROM test.numbers_squashed;
DROP TABLE test.numbers_squashed;

View File

@ -0,0 +1 @@
07080C0A0D090B5C27223FAA

View File

@ -0,0 +1 @@
SELECT hex('\a\b\f\n\r\t\v\\\'\"\?\xAA');

View File

@ -399,8 +399,10 @@
<code>
<pre>
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv E0C56BD4 # optional
sudo mkdir -p /etc/apt/sources.list.d
echo "deb http://repo.yandex.ru/clickhouse/<span id="distributive">trusty</span>/ dists/stable/main/binary-amd64/" |
echo "deb http://repo.yandex.ru/clickhouse/<span id="distributive">trusty</span> stable main" |
&nbsp;&nbsp;&nbsp;&nbsp;sudo tee /etc/apt/sources.list.d/clickhouse.list
sudo apt-get update

View File

@ -417,23 +417,26 @@ In %%/etc/apt/sources.list%% (or in a separate %%/etc/apt/sources.list.d/clickho
On Ubuntu Trusty (14.04):
%%
deb http://repo.yandex.ru/clickhouse/trusty/ dists/stable/main/binary-amd64/
deb http://repo.yandex.ru/clickhouse/trusty stable main
%%
On Ubuntu Precise (12.04):
%%
deb http://repo.yandex.ru/clickhouse/precise/ dists/stable/main/binary-amd64/
deb http://repo.yandex.ru/clickhouse/precise stable main
%%
Then run:
%%
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv E0C56BD4 # optional
sudo apt-get update
sudo apt-get install clickhouse-client clickhouse-server-common
%%
You can also download and install packages manually from here: <a href="https://dist.yandex.ru/metrika/stable/amd64/">https://dist.yandex.ru/metrika/stable/amd64/</a>.
You can also download and install packages manually from here:
<a href="http://repo.yandex.ru/clickhouse/trusty/pool/main/c/clickhouse/">http://repo.yandex.ru/clickhouse/trusty/pool/main/c/clickhouse/</a>,
<a href="http://repo.yandex.ru/clickhouse/precise/pool/main/c/clickhouse/">http://repo.yandex.ru/clickhouse/precise/pool/main/c/clickhouse/</a>.
ClickHouse contains access restriction settings. They are located in the &#39;users.xml&#39; file (next to &#39;config.xml&#39;).
By default, access is allowed from everywhere for the default user without a password. See &#39;user/default/networks&#39;. For more information, see the section &quot;Configuration files&quot;.
@ -2810,7 +2813,7 @@ Replication is an optional feature. To use replication, set the addresses of the
&lt;/zookeeper>
%%
Use ZooKeeper version 3.4.5 or later. For example, the version in the Ubuntu Precise package is too old. You can get a newer version for Ubuntu Precise from the repository <a href="https://dist.yandex.net/metrika/stable/amd64/">https://dist.yandex.net/metrika/stable/amd64/</a>.
Use ZooKeeper version 3.4.5 or later. For example, the version in the Ubuntu Precise package is too old.
You can specify any existing ZooKeeper cluster - the system will use a directory on it for its own data (the directory is specified when creating a replicatable table).

View File

@ -424,25 +424,27 @@ YT позволяет хранить группы столбцов по отде
На Ubuntu Trusty (14.04):
%%
deb http://repo.yandex.ru/clickhouse/trusty/ dists/stable/main/binary-amd64/
deb http://repo.yandex.ru/clickhouse/trusty stable main
%%
На Ubuntu Precise (12.04):
%%
deb http://repo.yandex.ru/clickhouse/precise/ dists/stable/main/binary-amd64/
deb http://repo.yandex.ru/clickhouse/precise stable main
%%
Затем выполните:
%%
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv E0C56BD4 # optional
sudo apt-get update
sudo apt-get install clickhouse-client clickhouse-server-common
%%
Также можно скачать и установить пакеты вручную, отсюда:
<a href="https://dist.yandex.ru/metrika/stable/amd64/">https://dist.yandex.ru/metrika/stable/amd64/</a>.
<a href="http://repo.yandex.ru/clickhouse/trusty/pool/main/c/clickhouse/">http://repo.yandex.ru/clickhouse/trusty/pool/main/c/clickhouse/</a>,
<a href="http://repo.yandex.ru/clickhouse/precise/pool/main/c/clickhouse/">http://repo.yandex.ru/clickhouse/precise/pool/main/c/clickhouse/</a>.
ClickHouse содержит настройки ограничения доступа. Они расположены в файле users.xml (рядом с config.xml).
По умолчанию, разрешён доступ отовсюду для пользователя default без пароля. См. секцию users/default/networks.
@ -2867,7 +2869,7 @@ min_bytes, max_bytes - условие на количество байт в бу
&lt;/zookeeper&gt;
%%
Используйте версию ZooKeeper не старее 3.4.5. Для примера, в Ubuntu Precise слишком старая версия в пакете. Достаточно новую версию для Ubuntu Precise можно взять из репозитория <a href='https://dist.yandex.net/metrika/stable/amd64/'>https://dist.yandex.net/metrika/stable/amd64/</a>.
Используйте версию ZooKeeper не старее 3.4.5. Для примера, в Ubuntu Precise слишком старая версия в пакете.
Можно указать любой имеющийся у вас ZooKeeper-кластер - система будет использовать в нём одну директорию для своих данных (директория указывается при создании реплицируемой таблицы).