Merge branch 'master' into storage-stripe-log-s3

This commit is contained in:
Pavel Kovalenko 2020-02-19 13:24:23 +03:00
commit d662c1e006
30 changed files with 401 additions and 72 deletions

2
.gitmodules vendored
View File

@ -106,7 +106,7 @@
url = https://github.com/sparsehash/sparsehash-c11.git
[submodule "contrib/grpc"]
path = contrib/grpc
url = https://github.com/grpc/grpc.git
url = https://github.com/ClickHouse-Extras/grpc.git
branch = v1.25.0
[submodule "contrib/aws"]
path = contrib/aws

View File

@ -258,6 +258,16 @@ endif ()
add_subdirectory(src/Common/ZooKeeper)
add_subdirectory(src/Common/Config)
# It's Ok to avoid tracking of unresolved symbols for static linkage because
# they will be resolved at link time nevertheless.
function(target_ignore_unresolved_symbols target)
if (OS_DARWIN)
target_link_libraries (${target} PRIVATE -Wl,-undefined,dynamic_lookup)
else()
target_link_libraries (${target} PRIVATE -Wl,--unresolved-symbols=ignore-all)
endif()
endfunction()
set (all_modules)
macro(add_object_library name common_path)
if (MAKE_STATIC_LIBRARIES OR NOT SPLIT_SHARED_LIBRARIES)
@ -266,7 +276,7 @@ macro(add_object_library name common_path)
list (APPEND all_modules ${name})
add_headers_and_sources(${name} ${common_path})
add_library(${name} SHARED ${${name}_sources} ${${name}_headers})
target_link_libraries (${name} PRIVATE -Wl,--unresolved-symbols=ignore-all)
target_ignore_unresolved_symbols(${name})
endif ()
endmacro()
@ -297,6 +307,7 @@ add_object_library(clickhouse_processors_sources src/Processors/Sources)
if (MAKE_STATIC_LIBRARIES OR NOT SPLIT_SHARED_LIBRARIES)
add_library (dbms STATIC ${dbms_headers} ${dbms_sources})
set (all_modules dbms)
target_ignore_unresolved_symbols (dbms)
else()
add_library (dbms SHARED ${dbms_headers} ${dbms_sources})
target_link_libraries (dbms PUBLIC ${all_modules})
@ -551,13 +562,6 @@ endif()
if (USE_JEMALLOC)
dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${JEMALLOC_INCLUDE_DIR}) # used in Interpreters/AsynchronousMetrics.cpp
target_include_directories (clickhouse_new_delete SYSTEM BEFORE PRIVATE ${JEMALLOC_INCLUDE_DIR})
if(NOT MAKE_STATIC_LIBRARIES AND ${JEMALLOC_LIBRARIES} MATCHES "${CMAKE_STATIC_LIBRARY_SUFFIX}$")
# mallctl in dbms/src/Interpreters/AsynchronousMetrics.cpp
# Actually we link JEMALLOC to almost all libraries.
# This is just hotfix for some uninvestigated problem.
target_link_libraries(clickhouse_interpreters PRIVATE ${JEMALLOC_LIBRARIES})
endif()
endif ()
dbms_target_include_directories (PUBLIC ${DBMS_INCLUDE_DIR})

View File

@ -23,7 +23,6 @@ set(CLICKHOUSE_SERVER_LINK PRIVATE clickhouse_dictionaries clickhouse_common_io
if (USE_POCO_NETSSL)
set(CLICKHOUSE_SERVER_LINK ${CLICKHOUSE_SERVER_LINK} PRIVATE ${Poco_NetSSL_LIBRARY} ${Poco_Crypto_LIBRARY})
endif ()
set(CLICKHOUSE_SERVER_INCLUDE PUBLIC ${ClickHouse_SOURCE_DIR}/libs/libdaemon/include)
clickhouse_program_add(server)

View File

@ -65,7 +65,7 @@ public:
"You must create it manulally with appropriate value or 0 for first start.");
}
int fd = ::open(path.c_str(), O_RDWR | O_CREAT, 0666);
int fd = ::open(path.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0666);
if (-1 == fd)
DB::throwFromErrnoWithPath("Cannot open file " + path, path, DB::ErrorCodes::CANNOT_OPEN_FILE);
@ -139,7 +139,7 @@ public:
{
bool file_exists = Poco::File(path).exists();
int fd = ::open(path.c_str(), O_RDWR | O_CREAT, 0666);
int fd = ::open(path.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0666);
if (-1 == fd)
DB::throwFromErrnoWithPath("Cannot open file " + path, path, DB::ErrorCodes::CANNOT_OPEN_FILE);

View File

@ -20,9 +20,9 @@
M(ReadBufferAIOReadBytes, "") \
M(WriteBufferAIOWrite, "") \
M(WriteBufferAIOWriteBytes, "") \
M(ReadCompressedBytes, "") \
M(CompressedReadBufferBlocks, "") \
M(CompressedReadBufferBytes, "") \
M(ReadCompressedBytes, "Number of bytes (the number of bytes before decompression) read from compressed sources (files, network).") \
M(CompressedReadBufferBlocks, "Number of compressed blocks (the blocks of data that are compressed independent of each other) read from compressed sources (files, network).") \
M(CompressedReadBufferBytes, "Number of uncompressed bytes (the number of bytes after decompression) read from compressed sources (files, network).") \
M(UncompressedCacheHits, "") \
M(UncompressedCacheMisses, "") \
M(UncompressedCacheWeightLost, "") \
@ -51,9 +51,9 @@
M(QueryMaskingRulesMatch, "Number of times query masking rules was successfully matched.") \
\
M(ReplicatedPartFetches, "Number of times a data part was downloaded from replica of a ReplicatedMergeTree table.") \
M(ReplicatedPartFailedFetches, "") \
M(ReplicatedPartFailedFetches, "Number of times a data part was failed to download from replica of a ReplicatedMergeTree table.") \
M(ObsoleteReplicatedParts, "") \
M(ReplicatedPartMerges, "") \
M(ReplicatedPartMerges, "Number of times data parts of ReplicatedMergeTree tables were successfully merged.") \
M(ReplicatedPartFetchesOfMerged, "Number of times we prefer to download already merged part from replica of ReplicatedMergeTree table instead of performing a merge ourself (usually we prefer doing a merge ourself to save network traffic). This happens when we have not all source parts to perform a merge or when the data part is old enough.") \
M(ReplicatedPartMutations, "") \
M(ReplicatedPartChecks, "") \

View File

@ -48,7 +48,7 @@ StatusFile::StatusFile(const std::string & path_)
LOG_INFO(&Logger::get("StatusFile"), "Status file " << path << " already exists and is empty - probably unclean hardware restart.");
}
fd = ::open(path.c_str(), O_WRONLY | O_CREAT, 0666);
fd = ::open(path.c_str(), O_WRONLY | O_CREAT | O_CLOEXEC, 0666);
if (-1 == fd)
throwFromErrnoWithPath("Cannot open file " + path, path, ErrorCodes::CANNOT_OPEN_FILE);

View File

@ -26,7 +26,7 @@ void MMapReadBufferFromFile::open()
{
ProfileEvents::increment(ProfileEvents::FileOpen);
fd = ::open(file_name.c_str(), O_RDONLY);
fd = ::open(file_name.c_str(), O_RDONLY | O_CLOEXEC);
if (-1 == fd)
throwFromErrnoWithPath("Cannot open file " + file_name, file_name,

View File

@ -49,6 +49,7 @@ ReadBufferAIO::ReadBufferAIO(const std::string & filename_, size_t buffer_size_,
int open_flags = (flags_ == -1) ? O_RDONLY : flags_;
open_flags |= O_DIRECT;
open_flags |= O_CLOEXEC;
fd = ::open(filename.c_str(), open_flags);
if (fd == -1)

View File

@ -38,7 +38,7 @@ ReadBufferFromFile::ReadBufferFromFile(
if (o_direct)
flags = flags & ~O_DIRECT;
#endif
fd = ::open(file_name.c_str(), flags == -1 ? O_RDONLY : flags);
fd = ::open(file_name.c_str(), flags == -1 ? O_RDONLY | O_CLOEXEC : flags | O_CLOEXEC);
if (-1 == fd)
throwFromErrnoWithPath("Cannot open file " + file_name, file_name,

View File

@ -58,6 +58,7 @@ WriteBufferAIO::WriteBufferAIO(const std::string & filename_, size_t buffer_size
int open_flags = (flags_ == -1) ? (O_RDWR | O_TRUNC | O_CREAT) : flags_;
open_flags |= O_DIRECT;
open_flags |= O_CLOEXEC;
fd = ::open(filename.c_str(), open_flags, mode_);
if (fd == -1)

View File

@ -41,7 +41,7 @@ WriteBufferFromFile::WriteBufferFromFile(
flags = flags & ~O_DIRECT;
#endif
fd = ::open(file_name.c_str(), flags == -1 ? O_WRONLY | O_TRUNC | O_CREAT : flags, mode);
fd = ::open(file_name.c_str(), flags == -1 ? O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC : flags | O_CLOEXEC, mode);
if (-1 == fd)
throwFromErrnoWithPath("Cannot open file " + file_name, file_name,

View File

@ -2,8 +2,8 @@
#include <Common/CurrentMetrics.h>
#include <Common/NetException.h>
#include <IO/HTTPCommon.h>
#include <Poco/File.h>
#include <ext/scope_guard.h>
#include <Poco/File.h>
#include <Poco/Net/HTTPServerResponse.h>
#include <Poco/Net/HTTPRequest.h>
@ -36,6 +36,8 @@ namespace
static constexpr auto REPLICATION_PROTOCOL_VERSION_WITHOUT_PARTS_SIZE = "0";
static constexpr auto REPLICATION_PROTOCOL_VERSION_WITH_PARTS_SIZE = "1";
static constexpr auto REPLICATION_PROTOCOL_VERSION_WITH_PARTS_SIZE_AND_TTL_INFOS = "2";
std::string getEndpointId(const std::string & node_id)
{
@ -53,10 +55,11 @@ void Service::processQuery(const Poco::Net::HTMLForm & params, ReadBuffer & /*bo
{
String client_protocol_version = params.get("client_protocol_version", REPLICATION_PROTOCOL_VERSION_WITHOUT_PARTS_SIZE);
String part_name = params.get("part");
if (client_protocol_version != REPLICATION_PROTOCOL_VERSION_WITH_PARTS_SIZE && client_protocol_version != REPLICATION_PROTOCOL_VERSION_WITHOUT_PARTS_SIZE)
if (client_protocol_version != REPLICATION_PROTOCOL_VERSION_WITH_PARTS_SIZE_AND_TTL_INFOS
&& client_protocol_version != REPLICATION_PROTOCOL_VERSION_WITH_PARTS_SIZE
&& client_protocol_version != REPLICATION_PROTOCOL_VERSION_WITHOUT_PARTS_SIZE)
throw Exception("Unsupported fetch protocol version", ErrorCodes::UNKNOWN_PROTOCOL);
const auto data_settings = data.getSettings();
@ -75,7 +78,7 @@ void Service::processQuery(const Poco::Net::HTMLForm & params, ReadBuffer & /*bo
response.setChunkedTransferEncoding(false);
return;
}
response.addCookie({"server_protocol_version", REPLICATION_PROTOCOL_VERSION_WITH_PARTS_SIZE});
response.addCookie({"server_protocol_version", REPLICATION_PROTOCOL_VERSION_WITH_PARTS_SIZE_AND_TTL_INFOS});
++total_sends;
SCOPE_EXIT({--total_sends;});
@ -103,10 +106,16 @@ void Service::processQuery(const Poco::Net::HTMLForm & params, ReadBuffer & /*bo
MergeTreeData::DataPart::Checksums data_checksums;
if (client_protocol_version == REPLICATION_PROTOCOL_VERSION_WITH_PARTS_SIZE)
if (client_protocol_version == REPLICATION_PROTOCOL_VERSION_WITH_PARTS_SIZE || client_protocol_version == REPLICATION_PROTOCOL_VERSION_WITH_PARTS_SIZE_AND_TTL_INFOS)
writeBinary(checksums.getTotalSizeOnDisk(), out);
if (client_protocol_version == REPLICATION_PROTOCOL_VERSION_WITH_PARTS_SIZE_AND_TTL_INFOS)
{
WriteBufferFromOwnString ttl_infos_buffer;
part->ttl_infos.write(ttl_infos_buffer);
writeBinary(ttl_infos_buffer.str(), out);
}
writeBinary(checksums.files.size(), out);
for (const auto & it : checksums.files)
{
@ -192,7 +201,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart(
{
{"endpoint", getEndpointId(replica_path)},
{"part", part_name},
{"client_protocol_version", REPLICATION_PROTOCOL_VERSION_WITH_PARTS_SIZE},
{"client_protocol_version", REPLICATION_PROTOCOL_VERSION_WITH_PARTS_SIZE_AND_TTL_INFOS},
{"compress", "false"}
});
@ -218,10 +227,21 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart(
ReservationPtr reservation;
if (server_protocol_version == REPLICATION_PROTOCOL_VERSION_WITH_PARTS_SIZE)
if (server_protocol_version == REPLICATION_PROTOCOL_VERSION_WITH_PARTS_SIZE || server_protocol_version == REPLICATION_PROTOCOL_VERSION_WITH_PARTS_SIZE_AND_TTL_INFOS)
{
size_t sum_files_size;
readBinary(sum_files_size, in);
if (server_protocol_version == REPLICATION_PROTOCOL_VERSION_WITH_PARTS_SIZE_AND_TTL_INFOS)
{
MergeTreeDataPart::TTLInfos ttl_infos;
String ttl_infos_string;
readBinary(ttl_infos_string, in);
ReadBufferFromString ttl_infos_buffer(ttl_infos_string);
assertString("ttl format version: 1\n", ttl_infos_buffer);
ttl_infos.read(ttl_infos_buffer);
reservation = data.reserveSpacePreferringTTLRules(sum_files_size, ttl_infos, std::time(nullptr));
}
else
reservation = data.reserveSpace(sum_files_size);
}
else

View File

@ -196,14 +196,15 @@ MergeTreeData::MergeTreeData(
{
Poco::File(path).createDirectories();
Poco::File(path + "detached").createDirectory();
if (Poco::File{path + "format_version.txt"}.exists())
auto current_version_file_path = path + "format_version.txt";
if (Poco::File{current_version_file_path}.exists())
{
if (!version_file_path.empty())
{
LOG_ERROR(log, "Duplication of version file " << version_file_path << " and " << path << "format_file.txt");
LOG_ERROR(log, "Duplication of version file " << version_file_path << " and " << current_version_file_path);
throw Exception("Multiple format_version.txt file", ErrorCodes::CORRUPTED_DATA);
}
version_file_path = path + "format_version.txt";
version_file_path = current_version_file_path;
}
}

View File

@ -912,7 +912,7 @@ Pipes MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreamsWithOrder(
RangesInDataPart part = parts.back();
parts.pop_back();
size_t & marks_in_part = sum_marks_in_parts.front();
size_t & marks_in_part = sum_marks_in_parts.back();
/// We will not take too few rows from a part.
if (marks_in_part >= min_marks_for_concurrent_read &&

View File

@ -332,6 +332,39 @@ def test_moves_to_disk_eventually_work(started_cluster, name, engine):
node1.query("DROP TABLE IF EXISTS {}".format(name))
def test_replicated_download_ttl_info(started_cluster):
name = "test_replicated_ttl_info"
engine = "ReplicatedMergeTree('/clickhouse/test_replicated_download_ttl_info', '{replica}')"
try:
for i, node in enumerate((node1, node2), start=1):
node.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
node1.query("SYSTEM STOP MOVES {}".format(name))
node2.query("INSERT INTO {} (s1, d1) VALUES ('{}', toDateTime({}))".format(name, get_random_string(1024 * 1024), time.time()-100))
assert set(get_used_disks_for_table(node2, name)) == {"external"}
time.sleep(1)
assert node1.query("SELECT count() FROM {}".format(name)).splitlines() == ["1"]
assert set(get_used_disks_for_table(node1, name)) == {"external"}
finally:
for node in (node1, node2):
try:
node.query("DROP TABLE IF EXISTS {}".format(name))
except:
continue
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_merges_to_disk_do_not_work","MergeTree()",0),

View File

@ -0,0 +1 @@
10000000

View File

@ -0,0 +1,7 @@
DROP TABLE IF EXISTS mt_pk;
CREATE TABLE mt_pk ENGINE = MergeTree PARTITION BY d ORDER BY x
AS SELECT toDate(number % 32) AS d, number AS x FROM system.numbers LIMIT 10000010;
SELECT x FROM mt_pk ORDER BY x ASC LIMIT 10000000, 1;
DROP TABLE mt_pk;

View File

@ -189,6 +189,10 @@ function run_tests
grep ^client-time "$test_name-raw.tsv" | cut -f2- > "$test_name-client-time.tsv"
# this may be slow, run it in background
right/clickhouse local --file "$test_name-queries.tsv" --structure 'query text, run int, version UInt32, time float' --query "$(cat $script_dir/eqmed.sql)" > "$test_name-report.tsv" &
# Check that both servers are alive, to fail faster if they die.
left/clickhouse client --port 9001 --query "select 1 format Null"
right/clickhouse client --port 9002 --query "select 1 format Null"
done
unset TIMEFORMAT

View File

@ -57,5 +57,7 @@ set -m
time ../compare.sh 0 $ref_sha $PR_TO_TEST $SHA_TO_TEST 2>&1 | ts "$(printf '%%Y-%%m-%%d %%H:%%M:%%S\t')" | tee compare.log
set +m
dmesg > dmesg.log
7z a /output/output.7z *.log *.tsv *.html *.txt *.rep
cp compare.log /output

View File

@ -63,6 +63,7 @@ ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/con
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/; \
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/; \
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/lib/llvm-8/bin/llvm-symbolizer /usr/bin/llvm-symbolizer
service zookeeper start

View File

@ -1,15 +1,18 @@
# ClickHouse Adopters
!!! note "Disclaimer"
!!! warning "Disclaimer"
The following list of companies using ClickHouse and their success stories is assembled from public sources, thus might differ from current reality. We'd really appreciate if you share the story of adopting ClickHouse in your company and [add it to the list](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/introduction/adopters.md), but please make sure you won't have any NDA issues by doing so. Providing updates with publications by other companies is also useful.
| Company | Industry | Usecase | Cluster Size | (Un)Compressed Data Size<abbr title="of single replica"><sup>*</sup></abbr> | Reference |
| --- | --- | --- | --- | --- | --- |
| [2gis](https://2gis.ru) | Maps | Monitoring | — | — | [Talk in Russian, July 2019](https://youtu.be/58sPkXfq6nw) |
| [Amadeus](https://amadeus.com/) | Travel | Analytics | — | — | [Press Release, April 2018](https://www.altinity.com/blog/2018/4/5/amadeus-technologies-launches-investment-and-insights-tool-based-on-machine-learning-and-strategy-algorithms) |
| [Appsflyer](https://www.appsflyer.com) | Mobile analytics | Main product | — | — | [Talk in Russian, July 2019](https://www.youtube.com/watch?v=M3wbRlcpBbY) |
| [Badoo](https://badoo.com) | Dating | Timeseries | — | — | [Slides in Russian, December 2019](https://presentations.clickhouse.tech/meetup38/forecast.pdf) |
| [Bloomberg](https://www.bloomberg.com/) | Finance, Media | Monitoring | 102 servers | — | [Slides, May 2018](https://www.slideshare.net/Altinity/http-analytics-for-6m-requests-per-second-using-clickhouse-by-alexander-bocharov) |
| [CERN](http://public.web.cern.ch/public/) | Research | Experiment | — | — | [Press release, April 2012](https://www.yandex.com/company/press_center/press_releases/2012/2012-04-10/) |
| [Cisco](http://public.web.cern.ch/public/) | Networking | Traffic analysis | — | — | [Lightning talk, October 2019](https://youtu.be/-hI1vDR2oPY?t=5057) |
| [Citadel Securities](https://www.citadelsecurities.com/) | Finance | — | — | — | [Contribution, March 2019](https://github.com/ClickHouse/ClickHouse/pull/4774) |
| [ContentSquare](https://contentsquare.com) | Web analytics | Main product | — | — | [Blog post in French, November 2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) |
| [Cloudflare](https://cloudflare.com) | CDN | Traffic analysis | 36 servers | — | [Blog post, May 2017](https://blog.cloudflare.com/how-cloudflare-analyzes-1m-dns-queries-per-second/), [Blog post, March 2018](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/) |
| [Exness](https://www.exness.com) | Trading | Metrics, Logging | — | — | [Talk in Russian, May 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) |
@ -22,3 +25,6 @@
| [Yandex Cloud](https://cloud.yandex.ru/services/managed-clickhouse) | Public Cloud | Main product | — | — | [Talk in Russian, December 2019](https://www.youtube.com/watch?v=pgnak9e_E0o) |
| [Yandex DataLens](https://cloud.yandex.ru/services/datalens) | Business Intelligence | Main product | — | — | [Slides in Russian, December 2019](https://presentations.clickhouse.tech/meetup38/datalens.pdf) |
| [Yandex Metrica](https://metrica.yandex.com) | Web analytics | Main product | 360 servers in one cluster, 1862 servers in one department | 66.41 PiB / 5.68 PiB | [Slides, February 2020](https://presentations.clickhouse.tech/meetup40/introduction/#13) |
[Original article](https://clickhouse.tech/docs/en/introduction/adopters/) <!--hide-->

View File

@ -41,6 +41,7 @@ SELECT * FROM system.asynchronous_metrics LIMIT 10
- [Monitoring](monitoring.md) — Base concepts of ClickHouse monitoring.
- [system.metrics](#system_tables-metrics) — Contains instantly calculated metrics.
- [system.events](#system_tables-events) — Contains a number of events that have occurred.
- [system.metric_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`.
## system.clusters
@ -193,6 +194,7 @@ SELECT * FROM system.events LIMIT 5
- [system.asynchronous_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics.
- [system.metrics](#system_tables-metrics) — Contains instantly calculated metrics.
- [system.metric_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`.
- [Monitoring](monitoring.md) — Base concepts of ClickHouse monitoring.
## system.functions
@ -273,10 +275,68 @@ SELECT * FROM system.metrics LIMIT 10
│ DistributedSend │ 0 │ Number of connections to remote servers sending data that was INSERTed into Distributed tables. Both synchronous and asynchronous mode. │
└────────────────────────────┴───────┴──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
```
**See Also**
- [system.asynchronous_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics.
- [system.events](#system_tables-events) — Contains a number of events that occurred.
- [system.metric_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`.
- [Monitoring](monitoring.md) — Base concepts of ClickHouse monitoring.
## system.metric_log {#system_tables-metric_log}
Contains history of metrics values from tables `system.metrics` and `system.events`, periodically flushed to disk.
To turn on metrics history collection on `system.metric_log`, create `/etc/clickhouse-server/config.d/metric_log.xml` with following content:
```xml
<yandex>
<metric_log>
<database>system</database>
<table>metric_log</table>
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
<collect_interval_milliseconds>1000</collect_interval_milliseconds>
</metric_log>
</yandex>
```
**Example**
```sql
SELECT * FROM system.metric_log LIMIT 1 FORMAT Vertical;
```
```text
Row 1:
──────
event_date: 2020-02-18
event_time: 2020-02-18 07:15:33
milliseconds: 554
ProfileEvent_Query: 0
ProfileEvent_SelectQuery: 0
ProfileEvent_InsertQuery: 0
ProfileEvent_FileOpen: 0
ProfileEvent_Seek: 0
ProfileEvent_ReadBufferFromFileDescriptorRead: 1
ProfileEvent_ReadBufferFromFileDescriptorReadFailed: 0
ProfileEvent_ReadBufferFromFileDescriptorReadBytes: 0
ProfileEvent_WriteBufferFromFileDescriptorWrite: 1
ProfileEvent_WriteBufferFromFileDescriptorWriteFailed: 0
ProfileEvent_WriteBufferFromFileDescriptorWriteBytes: 56
...
CurrentMetric_Query: 0
CurrentMetric_Merge: 0
CurrentMetric_PartMutation: 0
CurrentMetric_ReplicatedFetch: 0
CurrentMetric_ReplicatedSend: 0
CurrentMetric_ReplicatedChecks: 0
...
```
**See also**
- [system.asynchronous_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics.
- [system.events](#system_tables-events) — Contains a number of events that occurred.
- [system.metrics](#system_tables-metrics) — Contains instantly calculated metrics.
- [Monitoring](monitoring.md) — Base concepts of ClickHouse monitoring.
## system.numbers

View File

@ -41,6 +41,7 @@ SELECT * FROM system.asynchronous_metrics LIMIT 10
- [Мониторинг](monitoring.md) — основы мониторинга в ClickHouse.
- [system.metrics](#system_tables-metrics) — таблица с мгновенно вычисляемыми метриками.
- [system.events](#system_tables-events) — таблица с количеством произошедших событий.
- [system.metric_log](#system_tables-metric_log) — таблица фиксирующая историю значений метрик из `system.metrics` и `system.events`.
## system.clusters
@ -185,6 +186,7 @@ SELECT * FROM system.events LIMIT 5
- [system.asynchronous_metrics](#system_tables-asynchronous_metrics) — таблица с периодически вычисляемыми метриками.
- [system.metrics](#system_tables-metrics) — таблица с мгновенно вычисляемыми метриками.
- [system.metric_log](#system_tables-metric_log) — таблица фиксирующая историю значений метрик из `system.metrics` и `system.events`.
- [Мониторинг](monitoring.md) — основы мониторинга в ClickHouse.
## system.functions
@ -270,6 +272,63 @@ SELECT * FROM system.metrics LIMIT 10
- [system.asynchronous_metrics](#system_tables-asynchronous_metrics) — таблица с периодически вычисляемыми метриками.
- [system.events](#system_tables-events) — таблица с количеством произошедших событий.
- [system.metric_log](#system_tables-metric_log) — таблица фиксирующая историю значений метрик из `system.metrics` и `system.events`.
- [Мониторинг](monitoring.md) — основы мониторинга в ClickHouse.
## system.metric_log {#system_tables-metric_log}
Содержит историю значений метрик из таблиц `system.metrics` и `system.events`, периодически сбрасываемую на диск.
Для включения сбора истории метрик в таблице `system.metric_log` создайте `/etc/clickhouse-server/config.d/metric_log.xml` следующего содержания:
```xml
<yandex>
<metric_log>
<database>system</database>
<table>metric_log</table>
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
<collect_interval_milliseconds>1000</collect_interval_milliseconds>
</metric_log>
</yandex>
```
**Пример**
```sql
SELECT * FROM system.metric_log LIMIT 1 FORMAT Vertical;
```
```text
Row 1:
──────
event_date: 2020-02-18
event_time: 2020-02-18 07:15:33
milliseconds: 554
ProfileEvent_Query: 0
ProfileEvent_SelectQuery: 0
ProfileEvent_InsertQuery: 0
ProfileEvent_FileOpen: 0
ProfileEvent_Seek: 0
ProfileEvent_ReadBufferFromFileDescriptorRead: 1
ProfileEvent_ReadBufferFromFileDescriptorReadFailed: 0
ProfileEvent_ReadBufferFromFileDescriptorReadBytes: 0
ProfileEvent_WriteBufferFromFileDescriptorWrite: 1
ProfileEvent_WriteBufferFromFileDescriptorWriteFailed: 0
ProfileEvent_WriteBufferFromFileDescriptorWriteBytes: 56
...
CurrentMetric_Query: 0
CurrentMetric_Merge: 0
CurrentMetric_PartMutation: 0
CurrentMetric_ReplicatedFetch: 0
CurrentMetric_ReplicatedSend: 0
CurrentMetric_ReplicatedChecks: 0
...
```
**Смотрите также**
- [system.asynchronous_metrics](#system_tables-asynchronous_metrics) — таблица с периодически вычисляемыми метриками.
- [system.events](#system_tables-events) — таблица с количеством произошедших событий.
- [system.metrics](#system_tables-metrics) — таблица с мгновенно вычисляемыми метриками.
- [Мониторинг](monitoring.md) — основы мониторинга в ClickHouse.
## system.numbers

View File

@ -98,7 +98,7 @@ def build_for_lang(lang, args):
repo_name='ClickHouse/ClickHouse',
repo_url='https://github.com/ClickHouse/ClickHouse/',
edit_uri='edit/master/docs/%s' % lang,
extra_css=['assets/stylesheets/custom.css'],
extra_css=['assets/stylesheets/custom.css?%s' % args.rev_short],
markdown_extensions=[
'clickhouse',
'admonition',
@ -115,7 +115,10 @@ def build_for_lang(lang, args):
plugins=[],
extra={
'stable_releases': args.stable_releases,
'version_prefix': args.version_prefix
'version_prefix': args.version_prefix,
'rev': args.rev,
'rev_short': args.rev_short,
'rev_url': args.rev_url
}
)
@ -247,6 +250,7 @@ def build_redirects(args):
def build_docs(args):
tasks = []
for lang in args.lang.split(','):
if lang:
tasks.append((lang, args,))
util.run_function_in_parallel(build_for_lang, tasks, threads=False)
build_redirects(args)
@ -303,6 +307,9 @@ if __name__ == '__main__':
from github import choose_latest_releases
args.stable_releases = choose_latest_releases() if args.enable_stable_releases else []
args.rev = subprocess.check_output('git rev-parse HEAD', shell=True).strip()
args.rev_short = subprocess.check_output('git rev-parse --short HEAD', shell=True).strip()
args.rev_url = 'https://github.com/ClickHouse/ClickHouse/commit/%s' % args.rev
logging.basicConfig(
level=logging.DEBUG if args.verbose else logging.INFO,

View File

@ -1,9 +1,3 @@
{% if config.extra.social %}
<div class="md-footer-social">
{% set path = "ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css" %}
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/{{ path }}">
{% for social in config.extra.social %}
<a href="{{ social.link }}" class="md-footer-social__link fa fa-{{ social.type }}"></a>
{% endfor %}
</div>
{% endif %}
<div class="md-footer-social">
<span class="md-footer-copyright__highlight">Built from <a href="{{ config.extra.rev_url }}" rel="external nofollow">{{ config.extra.rev_short }}</a></span>
</div>

View File

@ -1,4 +1,4 @@
alabaster==0.7.10
alabaster==0.7.12
Babel==2.5.1
backports-abc==0.5
beautifulsoup4==4.8.2
@ -11,14 +11,14 @@ docutils==0.16
futures==3.1.1
htmlmin==0.1.12
idna==2.6
imagesize==0.7.1
imagesize==1.2.0
Jinja2==2.11.1
jsmin==2.2.2
livereload==2.5.1
Markdown==2.6.11
MarkupSafe==1.0
mkdocs==1.0.4
Pygments==2.2.0
Pygments==2.5.2
python-slugify==1.2.6
pytz==2017.3
PyYAML==5.3
@ -30,7 +30,7 @@ snowballstemmer==1.2.1
Sphinx==1.6.5
sphinxcontrib-websupport==1.0.1
tornado==5.1
typing==3.6.2
typing==3.7.4.1
Unidecode==1.0.23
urllib3==1.25.8
gitpython==2.1.14

View File

@ -4,10 +4,16 @@ import shutil
import cssmin
import htmlmin
import jinja2
import jsmin
def build_website(args):
logging.info('Building website')
env = jinja2.Environment(
loader=args.output_dir
)
shutil.copytree(
args.website_dir,
args.output_dir,
@ -21,9 +27,25 @@ def build_website(args):
)
)
for root, _, filenames in os.walk(args.output_dir):
for filename in filenames:
path = os.path.join(root, filename)
if not filename.endswith('.html'):
continue
logging.info('Processing %s', path)
with open(path, 'rb') as f:
content = f.read().decode('utf-8')
template = env.from_string(content)
content = template.render(args.__dict__)
with open(path, 'wb') as f:
f.write(content.encode('utf-8'))
def minify_website(args):
if args.minify:
logging.info('Minifying website')
for root, _, filenames in os.walk(args.output_dir):
for filename in filenames:
path = os.path.join(root, filename)

View File

@ -2070,6 +2070,108 @@ var results =
[0.015, 0.012, 0.012]
]
},
{
"system": "ProLiant DL380 G7, 12Gb RAM, 2x Xeon X5675 3.07GHz, 8x300GB SAS soft RAID5",
"time": "2020-02-18 00:00:00",
"result":
[
[0.041, 0.005, 0.005],
[0.084, 0.020, 0.019],
[0.403, 0.046, 0.043],
[0.190, 0.081, 0.082],
[0.192, 0.127, 0.131],
[0.388, 0.324, 0.309],
[0.078, 0.028, 0.038],
[0.055, 0.019, 0.019],
[0.677, 0.614, 0.604],
[0.808, 0.706, 0.727],
[0.282, 0.190, 0.181],
[0.312, 0.223, 0.229],
[0.997, 0.895, 0.891],
[1.167, 1.155, 1.115],
[1.155, 1.088, 1.143],
[1.119, 1.090, 1.109],
[3.451, 3.222, 3.153],
[1.743, 1.770, 1.655],
[9.346, 6.206, 6.436],
[0.352, 0.108, 0.105],
[2.985, 0.993, 0.976],
[3.594, 1.211, 1.195],
[6.626, 2.829, 2.800],
[10.086, 1.331, 1.318],
[1.072, 0.348, 0.332],
[0.535, 0.298, 0.269],
[1.046, 0.362, 0.334],
[3.487, 1.221, 1.165],
[2.718, 1.742, 1.719],
[3.200, 3.158, 3.116],
[1.346, 0.901, 0.917],
[2.336, 1.285, 1.285],
[8.876, 64.491, 123.728],
[10.200, 5.127, 4.743],
[5.196, 4.783, 4.659],
[1.628, 1.544, 1.527],
[0.476, 0.296, 0.285],
[0.172, 0.127, 0.097],
[0.170, 0.078, 0.083],
[0.670, 0.529, 0.511],
[0.181, 0.065, 0.039],
[0.123, 0.029, 0.033],
[0.045, 0.011, 0.011]
]
},
{
"system": "ProLiant DL380 G7, 12Gb RAM, 1x Xeon X5675 3.07GHz, 8x300GB SAS Soft RAID5",
"time": "2020-02-18 00:00:00",
"result":
[
[0.048, 0.005, 0.005],
[0.092, 0.026, 0.026],
[0.167, 0.067, 0.073],
[0.200, 0.117, 0.116],
[0.263, 0.185, 0.203],
[0.587, 0.586, 0.586],
[0.094, 0.043, 0.043],
[0.067, 0.025, 0.026],
[1.371, 1.299, 1.298],
[1.638, 1.546, 1.548],
[0.441, 0.341, 0.337],
[0.482, 0.405, 0.385],
[2.682, 2.680, 2.630],
[3.189, 3.207, 3.167],
[2.634, 2.525, 2.556],
[3.181, 3.200, 3.213],
[7.793, 7.714, 7.768],
[3.802, 3.819, 3.960],
[19.101, 16.177, 15.840],
[0.320, 0.153, 0.134],
[3.108, 2.188, 2.115],
[4.515, 3.139, 3.069],
[7.712, 6.856, 6.906],
[11.063, 2.630, 2.567],
[1.015, 0.739, 0.723],
[0.738, 0.644, 0.623],
[1.048, 0.717, 0.736],
[3.371, 2.905, 2.903],
[4.772, 4.539, 4.518],
[11.700, 11.656, 11.589],
[2.217, 2.083, 2.072],
[4.329, 4.153, 3.889],
[21.212, 21.887, 21.417],
[12.816, 12.501, 12.664],
[13.192, 12.624, 12.820],
[5.454, 5.447, 5.462],
[0.376, 0.280, 0.288],
[0.152, 0.097, 0.113],
[0.171, 0.093, 0.100],
[0.594, 0.484, 0.464],
[0.129, 0.043, 0.036],
[0.098, 0.027, 0.045],
[0.033, 0.025, 0.011]
]
},
];
</script>
@ -2499,6 +2601,7 @@ Results for Azure DS3v2 are from <b>Boris Granveaud</b>.<br/>
Results for AWS are from <b>Wolf Kreuzerkrieg</b>.<br/>
Results for Huawei Taishan are from <b>Peng Gao</b> in sina.com.<br/>
Results for Selectel and AMD EPYC 7402P are from <b>Andrey Dudin</b>.<br/>
Results for ProLiant are from <b>Denis Ustinov</b>.<br/>
Xeon Gold 6230 server is using 4 x SAMSUNG datacenter class SSD in RAID-10.<br/>
Results for Yandex Managed ClickHouse for "cold cache" are biased and should not be compared, because cache was not flushed for every next query.<br/>
</div>

View File

@ -23,7 +23,7 @@
<meta name="keywords"
content="ClickHouse, DBMS, OLAP, relational, analytics, analytical, big data, open-source, SQL, web-analytics" />
<link href="index.css" media="all" rel="stylesheet" />
<link href="index.css?{{ rev_short }}" media="all" rel="stylesheet" />
</head>
<body>
<div id="navbar">
@ -47,7 +47,7 @@
<path class="orange" d="M8,3.25 h1 v1.5 h-1 z"></path>
</svg>
<img id="logo-text" src="images/clickhouse-black.svg" alt="ClickHouse" />
<img id="logo-text" src="images/clickhouse-black.svg?{{ rev_short }}" alt="ClickHouse" />
</h1>
</a>
</div>

View File

@ -8,6 +8,10 @@
<loc>https://clickhouse.tech/benchmark.html</loc>
<changefreq>daily</changefreq>
</url>
<url>
<loc>https://clickhouse.tech/benchmark_hardware.html</loc>
<changefreq>daily</changefreq>
</url>
<url>
<loc>https://clickhouse.tech/tutorial.html</loc>
<changefreq>daily</changefreq>