2017-04-01 09:19:00 +00:00
# include <Storages/Distributed/DistributedBlockOutputStream.h>
2017-07-25 19:42:36 +00:00
# include <Storages/Distributed/DirectoryMonitor.h>
2017-04-01 09:19:00 +00:00
# include <Storages/StorageDistributed.h>
2020-05-04 20:15:38 +00:00
# include <Disks/StoragePolicy.h>
2016-01-28 01:00:42 +00:00
2017-04-01 09:19:00 +00:00
# include <Parsers/formatAST.h>
# include <Parsers/queryToString.h>
2016-01-28 01:00:42 +00:00
2017-04-01 09:19:00 +00:00
# include <IO/WriteBufferFromFile.h>
2018-12-28 18:15:26 +00:00
# include <Compression/CompressedWriteBuffer.h>
2017-07-27 15:24:39 +00:00
# include <IO/Operators.h>
# include <IO/WriteBufferFromString.h>
2017-04-01 09:19:00 +00:00
# include <DataStreams/NativeBlockOutputStream.h>
2017-07-25 19:42:36 +00:00
# include <DataStreams/RemoteBlockOutputStream.h>
2020-04-08 20:07:29 +00:00
# include <DataStreams/ConvertingBlockInputStream.h>
# include <DataStreams/OneBlockInputStream.h>
2017-04-01 09:19:00 +00:00
# include <Interpreters/InterpreterInsertQuery.h>
# include <Interpreters/createBlockSelector.h>
2020-02-10 15:50:12 +00:00
# include <Interpreters/ExpressionActions.h>
2020-05-20 20:16:32 +00:00
# include <Interpreters/Context.h>
2016-01-28 01:00:42 +00:00
2017-04-01 09:19:00 +00:00
# include <DataTypes/DataTypesNumber.h>
2018-12-03 13:11:26 +00:00
# include <DataTypes/DataTypeLowCardinality.h>
2017-08-07 20:26:28 +00:00
# include <Common/setThreadName.h>
2017-04-01 09:19:00 +00:00
# include <Common/ClickHouseRevision.h>
2017-07-25 19:42:36 +00:00
# include <Common/CurrentMetrics.h>
2017-07-13 20:58:19 +00:00
# include <Common/typeid_cast.h>
2017-07-25 19:42:36 +00:00
# include <Common/Exception.h>
2017-07-27 15:24:39 +00:00
# include <Common/ProfileEvents.h>
2017-08-07 20:26:28 +00:00
# include <Common/escapeForFileName.h>
2018-05-29 18:14:31 +00:00
# include <Common/CurrentThread.h>
2020-04-26 10:53:17 +00:00
# include <Common/createHardLink.h>
2017-07-25 19:42:36 +00:00
# include <common/logger_useful.h>
2017-08-07 20:26:28 +00:00
# include <ext/range.h>
2017-10-19 19:52:45 +00:00
# include <ext/scope_guard.h>
2016-01-28 01:00:42 +00:00
2017-05-10 04:29:36 +00:00
# include <Poco/DirectoryIterator.h>
2017-07-25 19:42:36 +00:00
# include <future>
2017-08-03 17:42:31 +00:00
# include <condition_variable>
# include <mutex>
2018-02-01 17:55:08 +00:00
2017-07-25 19:42:36 +00:00
namespace CurrentMetrics
{
extern const Metric DistributedSend ;
}
2016-01-28 01:00:42 +00:00
2017-07-27 15:24:39 +00:00
namespace ProfileEvents
{
extern const Event DistributedSyncInsertionTimeoutExceeded ;
}
2016-01-28 01:00:42 +00:00
2017-02-11 20:20:57 +00:00
namespace DB
2016-01-28 01:00:42 +00:00
{
2017-08-11 15:02:07 +00:00
2017-07-25 19:42:36 +00:00
namespace ErrorCodes
{
2020-02-25 18:02:41 +00:00
extern const int LOGICAL_ERROR ;
2017-07-25 19:42:36 +00:00
extern const int TIMEOUT_EXCEEDED ;
}
2020-04-14 21:05:45 +00:00
static void writeBlockConvert ( const BlockOutputStreamPtr & out , const Block & block , const size_t repeats )
2020-04-08 20:07:29 +00:00
{
if ( ! blocksHaveEqualStructure ( out - > getHeader ( ) , block ) )
{
2020-04-14 21:05:45 +00:00
ConvertingBlockInputStream convert (
2020-04-08 20:07:29 +00:00
std : : make_shared < OneBlockInputStream > ( block ) ,
out - > getHeader ( ) ,
ConvertingBlockInputStream : : MatchColumnsMode : : Name ) ;
auto adopted_block = convert . read ( ) ;
for ( size_t i = 0 ; i < repeats ; + + i )
out - > write ( adopted_block ) ;
}
else
{
for ( size_t i = 0 ; i < repeats ; + + i )
out - > write ( block ) ;
}
}
2017-08-11 15:02:07 +00:00
2018-02-19 00:45:32 +00:00
DistributedBlockOutputStream : : DistributedBlockOutputStream (
2019-08-03 11:02:40 +00:00
const Context & context_ , StorageDistributed & storage_ , const ASTPtr & query_ast_ , const ClusterPtr & cluster_ ,
2019-04-08 10:04:26 +00:00
bool insert_sync_ , UInt64 insert_timeout_ )
2019-08-03 11:02:40 +00:00
: context ( context_ ) , storage ( storage_ ) , query_ast ( query_ast_ ) , query_string ( queryToString ( query_ast_ ) ) ,
2019-04-08 10:04:26 +00:00
cluster ( cluster_ ) , insert_sync ( insert_sync_ ) ,
2020-05-30 21:57:37 +00:00
insert_timeout ( insert_timeout_ ) , log ( & Poco : : Logger : : get ( " DistributedBlockOutputStream " ) )
2018-02-19 00:45:32 +00:00
{
}
Block DistributedBlockOutputStream : : getHeader ( ) const
2016-01-28 01:00:42 +00:00
{
2018-02-19 00:45:32 +00:00
return storage . getSampleBlock ( ) ;
2016-01-28 01:00:42 +00:00
}
2017-08-11 15:02:07 +00:00
2017-08-07 20:26:28 +00:00
void DistributedBlockOutputStream : : writePrefix ( )
2017-08-03 17:42:31 +00:00
{
}
2017-02-11 20:20:57 +00:00
2017-08-11 15:02:07 +00:00
2016-01-28 01:00:42 +00:00
void DistributedBlockOutputStream : : write ( const Block & block )
2017-08-03 17:42:31 +00:00
{
Fix INSERT into Distributed non local node with MATERIALIZED columns
Previous patch e527def18a1bbe5fba0920b7747e9c556fd21ff5 ("Fix INSERT
into Distributed() table with MATERIALIZED column") fixes it only for
cases when the node is local, i.e. direct insert.
This patch address the problem when the node is not local
(`is_local == false`), by erasing materialized columns on INSERT into
Distributed.
And this patch fixes two cases, depends on `insert_distributed_sync`
setting:
- `insert_distributed_sync=0`
```
Not found column value in block. There are only columns: date. Stack trace:
2. 0x7ffff7be92e0 DB::Exception::Exception() dbms/src/Common/Exception.h:27
3. 0x7fffec5d6cf6 DB::Block::getByName(...) dbms/src/Core/Block.cpp:187
4. 0x7fffec2fe067 DB::NativeBlockInputStream::readImpl() dbms/src/DataStreams/NativeBlockInputStream.cpp:159
5. 0x7fffec2d223f DB::IBlockInputStream::read() dbms/src/DataStreams/IBlockInputStream.cpp:61
6. 0x7ffff7c6d40d DB::TCPHandler::receiveData() dbms/programs/server/TCPHandler.cpp:971
7. 0x7ffff7c6cc1d DB::TCPHandler::receivePacket() dbms/programs/server/TCPHandler.cpp:855
8. 0x7ffff7c6a1ef DB::TCPHandler::readDataNext(unsigned long const&, int const&) dbms/programs/server/TCPHandler.cpp:406
9. 0x7ffff7c6a41b DB::TCPHandler::readData(DB::Settings const&) dbms/programs/server/TCPHandler.cpp:437
10. 0x7ffff7c6a5d9 DB::TCPHandler::processInsertQuery(DB::Settings const&) dbms/programs/server/TCPHandler.cpp:464
11. 0x7ffff7c687b5 DB::TCPHandler::runImpl() dbms/programs/server/TCPHandler.cpp:257
```
- `insert_distributed_sync=1`
```
2019.10.18 13:23:22.114578 [ 44 ] {a78f669f-0b08-4337-abf8-d31e958f6d12} <Error> executeQuery: Code: 171, e.displayText() = DB::Exception: Block structure mismatch in RemoteBlockOutputStream stream: different number of columns:
date Date UInt16(size = 1), value Date UInt16(size = 1)
date Date UInt16(size = 0): Insertion status:
Wrote 1 blocks and 0 rows on shard 0 replica 0, 127.0.0.1:59000 (average 0 ms per block)
Wrote 0 blocks and 0 rows on shard 1 replica 0, 127.0.0.2:59000 (average 2 ms per block)
(version 19.16.1.1) (from [::1]:3624) (in query: INSERT INTO distributed_00952 VALUES ), Stack trace:
2. 0x7ffff7be92e0 DB::Exception::Exception() dbms/src/Common/Exception.h:27
3. 0x7fffec5da4e9 DB::checkBlockStructure<void>(...)::{...}::operator()(...) const dbms/src/Core/Block.cpp:460
4. 0x7fffec5da671 void DB::checkBlockStructure<void>(...) dbms/src/Core/Block.cpp:467
5. 0x7fffec5d8d58 DB::assertBlocksHaveEqualStructure(...) dbms/src/Core/Block.cpp:515
6. 0x7fffec326630 DB::RemoteBlockOutputStream::write(DB::Block const&) dbms/src/DataStreams/RemoteBlockOutputStream.cpp:68
7. 0x7fffe98bd154 DB::DistributedBlockOutputStream::runWritingJob(DB::DistributedBlockOutputStream::JobReplica&, DB::Block const&)::{lambda()#1}::operator()() const dbms/src/Storages/Distributed/DistributedBlockOutputStream.cpp:280
<snip>
````
Fixes: #7365
Fixes: #5429
Refs: #6891
2019-10-17 21:33:26 +00:00
Block ordinary_block { block } ;
/* They are added by the AddingDefaultBlockOutputStream, and we will get
* different number of columns eventually */
for ( const auto & col : storage . getColumns ( ) . getMaterialized ( ) )
2019-10-24 09:33:45 +00:00
{
Fix INSERT into Distributed non local node with MATERIALIZED columns
Previous patch e527def18a1bbe5fba0920b7747e9c556fd21ff5 ("Fix INSERT
into Distributed() table with MATERIALIZED column") fixes it only for
cases when the node is local, i.e. direct insert.
This patch address the problem when the node is not local
(`is_local == false`), by erasing materialized columns on INSERT into
Distributed.
And this patch fixes two cases, depends on `insert_distributed_sync`
setting:
- `insert_distributed_sync=0`
```
Not found column value in block. There are only columns: date. Stack trace:
2. 0x7ffff7be92e0 DB::Exception::Exception() dbms/src/Common/Exception.h:27
3. 0x7fffec5d6cf6 DB::Block::getByName(...) dbms/src/Core/Block.cpp:187
4. 0x7fffec2fe067 DB::NativeBlockInputStream::readImpl() dbms/src/DataStreams/NativeBlockInputStream.cpp:159
5. 0x7fffec2d223f DB::IBlockInputStream::read() dbms/src/DataStreams/IBlockInputStream.cpp:61
6. 0x7ffff7c6d40d DB::TCPHandler::receiveData() dbms/programs/server/TCPHandler.cpp:971
7. 0x7ffff7c6cc1d DB::TCPHandler::receivePacket() dbms/programs/server/TCPHandler.cpp:855
8. 0x7ffff7c6a1ef DB::TCPHandler::readDataNext(unsigned long const&, int const&) dbms/programs/server/TCPHandler.cpp:406
9. 0x7ffff7c6a41b DB::TCPHandler::readData(DB::Settings const&) dbms/programs/server/TCPHandler.cpp:437
10. 0x7ffff7c6a5d9 DB::TCPHandler::processInsertQuery(DB::Settings const&) dbms/programs/server/TCPHandler.cpp:464
11. 0x7ffff7c687b5 DB::TCPHandler::runImpl() dbms/programs/server/TCPHandler.cpp:257
```
- `insert_distributed_sync=1`
```
2019.10.18 13:23:22.114578 [ 44 ] {a78f669f-0b08-4337-abf8-d31e958f6d12} <Error> executeQuery: Code: 171, e.displayText() = DB::Exception: Block structure mismatch in RemoteBlockOutputStream stream: different number of columns:
date Date UInt16(size = 1), value Date UInt16(size = 1)
date Date UInt16(size = 0): Insertion status:
Wrote 1 blocks and 0 rows on shard 0 replica 0, 127.0.0.1:59000 (average 0 ms per block)
Wrote 0 blocks and 0 rows on shard 1 replica 0, 127.0.0.2:59000 (average 2 ms per block)
(version 19.16.1.1) (from [::1]:3624) (in query: INSERT INTO distributed_00952 VALUES ), Stack trace:
2. 0x7ffff7be92e0 DB::Exception::Exception() dbms/src/Common/Exception.h:27
3. 0x7fffec5da4e9 DB::checkBlockStructure<void>(...)::{...}::operator()(...) const dbms/src/Core/Block.cpp:460
4. 0x7fffec5da671 void DB::checkBlockStructure<void>(...) dbms/src/Core/Block.cpp:467
5. 0x7fffec5d8d58 DB::assertBlocksHaveEqualStructure(...) dbms/src/Core/Block.cpp:515
6. 0x7fffec326630 DB::RemoteBlockOutputStream::write(DB::Block const&) dbms/src/DataStreams/RemoteBlockOutputStream.cpp:68
7. 0x7fffe98bd154 DB::DistributedBlockOutputStream::runWritingJob(DB::DistributedBlockOutputStream::JobReplica&, DB::Block const&)::{lambda()#1}::operator()() const dbms/src/Storages/Distributed/DistributedBlockOutputStream.cpp:280
<snip>
````
Fixes: #7365
Fixes: #5429
Refs: #6891
2019-10-17 21:33:26 +00:00
if ( ordinary_block . has ( col . name ) )
{
ordinary_block . erase ( col . name ) ;
2020-05-23 22:24:01 +00:00
LOG_DEBUG ( log , " {}: column {} will be removed, because it is MATERIALIZED " ,
2020-05-23 21:26:45 +00:00
storage . getStorageID ( ) . getNameForLogs ( ) , col . name ) ;
Fix INSERT into Distributed non local node with MATERIALIZED columns
Previous patch e527def18a1bbe5fba0920b7747e9c556fd21ff5 ("Fix INSERT
into Distributed() table with MATERIALIZED column") fixes it only for
cases when the node is local, i.e. direct insert.
This patch address the problem when the node is not local
(`is_local == false`), by erasing materialized columns on INSERT into
Distributed.
And this patch fixes two cases, depends on `insert_distributed_sync`
setting:
- `insert_distributed_sync=0`
```
Not found column value in block. There are only columns: date. Stack trace:
2. 0x7ffff7be92e0 DB::Exception::Exception() dbms/src/Common/Exception.h:27
3. 0x7fffec5d6cf6 DB::Block::getByName(...) dbms/src/Core/Block.cpp:187
4. 0x7fffec2fe067 DB::NativeBlockInputStream::readImpl() dbms/src/DataStreams/NativeBlockInputStream.cpp:159
5. 0x7fffec2d223f DB::IBlockInputStream::read() dbms/src/DataStreams/IBlockInputStream.cpp:61
6. 0x7ffff7c6d40d DB::TCPHandler::receiveData() dbms/programs/server/TCPHandler.cpp:971
7. 0x7ffff7c6cc1d DB::TCPHandler::receivePacket() dbms/programs/server/TCPHandler.cpp:855
8. 0x7ffff7c6a1ef DB::TCPHandler::readDataNext(unsigned long const&, int const&) dbms/programs/server/TCPHandler.cpp:406
9. 0x7ffff7c6a41b DB::TCPHandler::readData(DB::Settings const&) dbms/programs/server/TCPHandler.cpp:437
10. 0x7ffff7c6a5d9 DB::TCPHandler::processInsertQuery(DB::Settings const&) dbms/programs/server/TCPHandler.cpp:464
11. 0x7ffff7c687b5 DB::TCPHandler::runImpl() dbms/programs/server/TCPHandler.cpp:257
```
- `insert_distributed_sync=1`
```
2019.10.18 13:23:22.114578 [ 44 ] {a78f669f-0b08-4337-abf8-d31e958f6d12} <Error> executeQuery: Code: 171, e.displayText() = DB::Exception: Block structure mismatch in RemoteBlockOutputStream stream: different number of columns:
date Date UInt16(size = 1), value Date UInt16(size = 1)
date Date UInt16(size = 0): Insertion status:
Wrote 1 blocks and 0 rows on shard 0 replica 0, 127.0.0.1:59000 (average 0 ms per block)
Wrote 0 blocks and 0 rows on shard 1 replica 0, 127.0.0.2:59000 (average 2 ms per block)
(version 19.16.1.1) (from [::1]:3624) (in query: INSERT INTO distributed_00952 VALUES ), Stack trace:
2. 0x7ffff7be92e0 DB::Exception::Exception() dbms/src/Common/Exception.h:27
3. 0x7fffec5da4e9 DB::checkBlockStructure<void>(...)::{...}::operator()(...) const dbms/src/Core/Block.cpp:460
4. 0x7fffec5da671 void DB::checkBlockStructure<void>(...) dbms/src/Core/Block.cpp:467
5. 0x7fffec5d8d58 DB::assertBlocksHaveEqualStructure(...) dbms/src/Core/Block.cpp:515
6. 0x7fffec326630 DB::RemoteBlockOutputStream::write(DB::Block const&) dbms/src/DataStreams/RemoteBlockOutputStream.cpp:68
7. 0x7fffe98bd154 DB::DistributedBlockOutputStream::runWritingJob(DB::DistributedBlockOutputStream::JobReplica&, DB::Block const&)::{lambda()#1}::operator()() const dbms/src/Storages/Distributed/DistributedBlockOutputStream.cpp:280
<snip>
````
Fixes: #7365
Fixes: #5429
Refs: #6891
2019-10-17 21:33:26 +00:00
}
2019-10-24 09:33:45 +00:00
}
Fix INSERT into Distributed non local node with MATERIALIZED columns
Previous patch e527def18a1bbe5fba0920b7747e9c556fd21ff5 ("Fix INSERT
into Distributed() table with MATERIALIZED column") fixes it only for
cases when the node is local, i.e. direct insert.
This patch address the problem when the node is not local
(`is_local == false`), by erasing materialized columns on INSERT into
Distributed.
And this patch fixes two cases, depends on `insert_distributed_sync`
setting:
- `insert_distributed_sync=0`
```
Not found column value in block. There are only columns: date. Stack trace:
2. 0x7ffff7be92e0 DB::Exception::Exception() dbms/src/Common/Exception.h:27
3. 0x7fffec5d6cf6 DB::Block::getByName(...) dbms/src/Core/Block.cpp:187
4. 0x7fffec2fe067 DB::NativeBlockInputStream::readImpl() dbms/src/DataStreams/NativeBlockInputStream.cpp:159
5. 0x7fffec2d223f DB::IBlockInputStream::read() dbms/src/DataStreams/IBlockInputStream.cpp:61
6. 0x7ffff7c6d40d DB::TCPHandler::receiveData() dbms/programs/server/TCPHandler.cpp:971
7. 0x7ffff7c6cc1d DB::TCPHandler::receivePacket() dbms/programs/server/TCPHandler.cpp:855
8. 0x7ffff7c6a1ef DB::TCPHandler::readDataNext(unsigned long const&, int const&) dbms/programs/server/TCPHandler.cpp:406
9. 0x7ffff7c6a41b DB::TCPHandler::readData(DB::Settings const&) dbms/programs/server/TCPHandler.cpp:437
10. 0x7ffff7c6a5d9 DB::TCPHandler::processInsertQuery(DB::Settings const&) dbms/programs/server/TCPHandler.cpp:464
11. 0x7ffff7c687b5 DB::TCPHandler::runImpl() dbms/programs/server/TCPHandler.cpp:257
```
- `insert_distributed_sync=1`
```
2019.10.18 13:23:22.114578 [ 44 ] {a78f669f-0b08-4337-abf8-d31e958f6d12} <Error> executeQuery: Code: 171, e.displayText() = DB::Exception: Block structure mismatch in RemoteBlockOutputStream stream: different number of columns:
date Date UInt16(size = 1), value Date UInt16(size = 1)
date Date UInt16(size = 0): Insertion status:
Wrote 1 blocks and 0 rows on shard 0 replica 0, 127.0.0.1:59000 (average 0 ms per block)
Wrote 0 blocks and 0 rows on shard 1 replica 0, 127.0.0.2:59000 (average 2 ms per block)
(version 19.16.1.1) (from [::1]:3624) (in query: INSERT INTO distributed_00952 VALUES ), Stack trace:
2. 0x7ffff7be92e0 DB::Exception::Exception() dbms/src/Common/Exception.h:27
3. 0x7fffec5da4e9 DB::checkBlockStructure<void>(...)::{...}::operator()(...) const dbms/src/Core/Block.cpp:460
4. 0x7fffec5da671 void DB::checkBlockStructure<void>(...) dbms/src/Core/Block.cpp:467
5. 0x7fffec5d8d58 DB::assertBlocksHaveEqualStructure(...) dbms/src/Core/Block.cpp:515
6. 0x7fffec326630 DB::RemoteBlockOutputStream::write(DB::Block const&) dbms/src/DataStreams/RemoteBlockOutputStream.cpp:68
7. 0x7fffe98bd154 DB::DistributedBlockOutputStream::runWritingJob(DB::DistributedBlockOutputStream::JobReplica&, DB::Block const&)::{lambda()#1}::operator()() const dbms/src/Storages/Distributed/DistributedBlockOutputStream.cpp:280
<snip>
````
Fixes: #7365
Fixes: #5429
Refs: #6891
2019-10-17 21:33:26 +00:00
2017-08-03 17:42:31 +00:00
if ( insert_sync )
Fix INSERT into Distributed non local node with MATERIALIZED columns
Previous patch e527def18a1bbe5fba0920b7747e9c556fd21ff5 ("Fix INSERT
into Distributed() table with MATERIALIZED column") fixes it only for
cases when the node is local, i.e. direct insert.
This patch address the problem when the node is not local
(`is_local == false`), by erasing materialized columns on INSERT into
Distributed.
And this patch fixes two cases, depends on `insert_distributed_sync`
setting:
- `insert_distributed_sync=0`
```
Not found column value in block. There are only columns: date. Stack trace:
2. 0x7ffff7be92e0 DB::Exception::Exception() dbms/src/Common/Exception.h:27
3. 0x7fffec5d6cf6 DB::Block::getByName(...) dbms/src/Core/Block.cpp:187
4. 0x7fffec2fe067 DB::NativeBlockInputStream::readImpl() dbms/src/DataStreams/NativeBlockInputStream.cpp:159
5. 0x7fffec2d223f DB::IBlockInputStream::read() dbms/src/DataStreams/IBlockInputStream.cpp:61
6. 0x7ffff7c6d40d DB::TCPHandler::receiveData() dbms/programs/server/TCPHandler.cpp:971
7. 0x7ffff7c6cc1d DB::TCPHandler::receivePacket() dbms/programs/server/TCPHandler.cpp:855
8. 0x7ffff7c6a1ef DB::TCPHandler::readDataNext(unsigned long const&, int const&) dbms/programs/server/TCPHandler.cpp:406
9. 0x7ffff7c6a41b DB::TCPHandler::readData(DB::Settings const&) dbms/programs/server/TCPHandler.cpp:437
10. 0x7ffff7c6a5d9 DB::TCPHandler::processInsertQuery(DB::Settings const&) dbms/programs/server/TCPHandler.cpp:464
11. 0x7ffff7c687b5 DB::TCPHandler::runImpl() dbms/programs/server/TCPHandler.cpp:257
```
- `insert_distributed_sync=1`
```
2019.10.18 13:23:22.114578 [ 44 ] {a78f669f-0b08-4337-abf8-d31e958f6d12} <Error> executeQuery: Code: 171, e.displayText() = DB::Exception: Block structure mismatch in RemoteBlockOutputStream stream: different number of columns:
date Date UInt16(size = 1), value Date UInt16(size = 1)
date Date UInt16(size = 0): Insertion status:
Wrote 1 blocks and 0 rows on shard 0 replica 0, 127.0.0.1:59000 (average 0 ms per block)
Wrote 0 blocks and 0 rows on shard 1 replica 0, 127.0.0.2:59000 (average 2 ms per block)
(version 19.16.1.1) (from [::1]:3624) (in query: INSERT INTO distributed_00952 VALUES ), Stack trace:
2. 0x7ffff7be92e0 DB::Exception::Exception() dbms/src/Common/Exception.h:27
3. 0x7fffec5da4e9 DB::checkBlockStructure<void>(...)::{...}::operator()(...) const dbms/src/Core/Block.cpp:460
4. 0x7fffec5da671 void DB::checkBlockStructure<void>(...) dbms/src/Core/Block.cpp:467
5. 0x7fffec5d8d58 DB::assertBlocksHaveEqualStructure(...) dbms/src/Core/Block.cpp:515
6. 0x7fffec326630 DB::RemoteBlockOutputStream::write(DB::Block const&) dbms/src/DataStreams/RemoteBlockOutputStream.cpp:68
7. 0x7fffe98bd154 DB::DistributedBlockOutputStream::runWritingJob(DB::DistributedBlockOutputStream::JobReplica&, DB::Block const&)::{lambda()#1}::operator()() const dbms/src/Storages/Distributed/DistributedBlockOutputStream.cpp:280
<snip>
````
Fixes: #7365
Fixes: #5429
Refs: #6891
2019-10-17 21:33:26 +00:00
writeSync ( ordinary_block ) ;
2017-08-03 17:42:31 +00:00
else
Fix INSERT into Distributed non local node with MATERIALIZED columns
Previous patch e527def18a1bbe5fba0920b7747e9c556fd21ff5 ("Fix INSERT
into Distributed() table with MATERIALIZED column") fixes it only for
cases when the node is local, i.e. direct insert.
This patch address the problem when the node is not local
(`is_local == false`), by erasing materialized columns on INSERT into
Distributed.
And this patch fixes two cases, depends on `insert_distributed_sync`
setting:
- `insert_distributed_sync=0`
```
Not found column value in block. There are only columns: date. Stack trace:
2. 0x7ffff7be92e0 DB::Exception::Exception() dbms/src/Common/Exception.h:27
3. 0x7fffec5d6cf6 DB::Block::getByName(...) dbms/src/Core/Block.cpp:187
4. 0x7fffec2fe067 DB::NativeBlockInputStream::readImpl() dbms/src/DataStreams/NativeBlockInputStream.cpp:159
5. 0x7fffec2d223f DB::IBlockInputStream::read() dbms/src/DataStreams/IBlockInputStream.cpp:61
6. 0x7ffff7c6d40d DB::TCPHandler::receiveData() dbms/programs/server/TCPHandler.cpp:971
7. 0x7ffff7c6cc1d DB::TCPHandler::receivePacket() dbms/programs/server/TCPHandler.cpp:855
8. 0x7ffff7c6a1ef DB::TCPHandler::readDataNext(unsigned long const&, int const&) dbms/programs/server/TCPHandler.cpp:406
9. 0x7ffff7c6a41b DB::TCPHandler::readData(DB::Settings const&) dbms/programs/server/TCPHandler.cpp:437
10. 0x7ffff7c6a5d9 DB::TCPHandler::processInsertQuery(DB::Settings const&) dbms/programs/server/TCPHandler.cpp:464
11. 0x7ffff7c687b5 DB::TCPHandler::runImpl() dbms/programs/server/TCPHandler.cpp:257
```
- `insert_distributed_sync=1`
```
2019.10.18 13:23:22.114578 [ 44 ] {a78f669f-0b08-4337-abf8-d31e958f6d12} <Error> executeQuery: Code: 171, e.displayText() = DB::Exception: Block structure mismatch in RemoteBlockOutputStream stream: different number of columns:
date Date UInt16(size = 1), value Date UInt16(size = 1)
date Date UInt16(size = 0): Insertion status:
Wrote 1 blocks and 0 rows on shard 0 replica 0, 127.0.0.1:59000 (average 0 ms per block)
Wrote 0 blocks and 0 rows on shard 1 replica 0, 127.0.0.2:59000 (average 2 ms per block)
(version 19.16.1.1) (from [::1]:3624) (in query: INSERT INTO distributed_00952 VALUES ), Stack trace:
2. 0x7ffff7be92e0 DB::Exception::Exception() dbms/src/Common/Exception.h:27
3. 0x7fffec5da4e9 DB::checkBlockStructure<void>(...)::{...}::operator()(...) const dbms/src/Core/Block.cpp:460
4. 0x7fffec5da671 void DB::checkBlockStructure<void>(...) dbms/src/Core/Block.cpp:467
5. 0x7fffec5d8d58 DB::assertBlocksHaveEqualStructure(...) dbms/src/Core/Block.cpp:515
6. 0x7fffec326630 DB::RemoteBlockOutputStream::write(DB::Block const&) dbms/src/DataStreams/RemoteBlockOutputStream.cpp:68
7. 0x7fffe98bd154 DB::DistributedBlockOutputStream::runWritingJob(DB::DistributedBlockOutputStream::JobReplica&, DB::Block const&)::{lambda()#1}::operator()() const dbms/src/Storages/Distributed/DistributedBlockOutputStream.cpp:280
<snip>
````
Fixes: #7365
Fixes: #5429
Refs: #6891
2019-10-17 21:33:26 +00:00
writeAsync ( ordinary_block ) ;
2017-08-03 17:42:31 +00:00
}
void DistributedBlockOutputStream : : writeAsync ( const Block & block )
2016-01-28 01:00:42 +00:00
{
2017-04-01 07:20:54 +00:00
if ( storage . getShardingKeyExpr ( ) & & ( cluster - > getShardsInfo ( ) . size ( ) > 1 ) )
2017-08-11 15:02:07 +00:00
return writeSplitAsync ( block ) ;
2016-01-28 01:00:42 +00:00
2017-08-11 15:02:07 +00:00
writeAsyncImpl ( block ) ;
2018-03-19 18:26:52 +00:00
+ + inserted_blocks ;
2016-01-28 01:00:42 +00:00
}
2017-08-11 15:02:07 +00:00
2018-02-14 15:11:39 +00:00
std : : string DistributedBlockOutputStream : : getCurrentStateDescription ( )
2017-08-03 17:42:31 +00:00
{
2018-02-14 15:11:39 +00:00
std : : stringstream buffer ;
const auto & addresses = cluster - > getShardsAddresses ( ) ;
2017-10-19 19:52:45 +00:00
2018-02-14 15:11:39 +00:00
buffer < < " Insertion status: \n " ;
for ( auto & shard_jobs : per_shard_jobs )
2018-03-19 18:26:52 +00:00
for ( JobReplica & job : shard_jobs . replicas_jobs )
2017-08-03 17:42:31 +00:00
{
2018-02-14 15:11:39 +00:00
buffer < < " Wrote " < < job . blocks_written < < " blocks and " < < job . rows_written < < " rows "
< < " on shard " < < job . shard_index < < " replica " < < job . replica_index
2018-03-11 18:36:09 +00:00
< < " , " < < addresses [ job . shard_index ] [ job . replica_index ] . readableString ( ) ;
/// Performance statistics
2018-03-19 18:26:52 +00:00
if ( job . blocks_started > 0 )
2018-03-11 18:36:09 +00:00
{
2018-03-19 18:26:52 +00:00
buffer < < " (average " < < job . elapsed_time_ms / job . blocks_started < < " ms per block " ;
if ( job . blocks_started > 1 )
2018-03-11 18:36:09 +00:00
buffer < < " , the slowest block " < < job . max_elapsed_time_for_block_ms < < " ms " ;
buffer < < " ) " ;
}
buffer < < " \n " ;
2017-08-03 17:42:31 +00:00
}
2017-08-11 15:02:07 +00:00
2018-02-14 15:11:39 +00:00
return buffer . str ( ) ;
2017-08-03 17:42:31 +00:00
}
2018-03-19 18:26:52 +00:00
void DistributedBlockOutputStream : : initWritingJobs ( const Block & first_block )
2017-08-03 17:42:31 +00:00
{
2020-05-13 22:53:49 +00:00
const Settings & settings = context . getSettingsRef ( ) ;
2018-02-14 15:11:39 +00:00
const auto & addresses_with_failovers = cluster - > getShardsAddresses ( ) ;
const auto & shards_info = cluster - > getShardsInfo ( ) ;
2018-03-19 18:26:52 +00:00
size_t num_shards = shards_info . size ( ) ;
2017-08-07 20:26:28 +00:00
2018-02-14 15:11:39 +00:00
remote_jobs_count = 0 ;
local_jobs_count = 0 ;
per_shard_jobs . resize ( shards_info . size ( ) ) ;
2017-08-07 20:26:28 +00:00
2018-02-14 15:11:39 +00:00
for ( size_t shard_index : ext : : range ( 0 , shards_info . size ( ) ) )
2017-08-07 20:26:28 +00:00
{
2018-02-14 15:11:39 +00:00
const auto & shard_info = shards_info [ shard_index ] ;
auto & shard_jobs = per_shard_jobs [ shard_index ] ;
2017-08-07 20:26:28 +00:00
2020-05-13 22:53:49 +00:00
/// If hasInternalReplication, than prefer local replica (if !prefer_localhost_replica)
if ( ! shard_info . hasInternalReplication ( ) | | ! shard_info . isLocal ( ) | | ! settings . prefer_localhost_replica )
2017-08-07 20:26:28 +00:00
{
2018-02-14 15:11:39 +00:00
const auto & replicas = addresses_with_failovers [ shard_index ] ;
for ( size_t replica_index : ext : : range ( 0 , replicas . size ( ) ) )
{
2020-05-13 22:53:49 +00:00
if ( ! replicas [ replica_index ] . is_local | | ! settings . prefer_localhost_replica )
2017-08-11 15:02:07 +00:00
{
2018-03-19 18:26:52 +00:00
shard_jobs . replicas_jobs . emplace_back ( shard_index , replica_index , false , first_block ) ;
2018-02-14 15:11:39 +00:00
+ + remote_jobs_count ;
2017-08-11 15:02:07 +00:00
if ( shard_info . hasInternalReplication ( ) )
break ;
}
2018-02-14 15:11:39 +00:00
}
2017-08-07 20:26:28 +00:00
}
2020-05-13 22:53:49 +00:00
if ( shard_info . isLocal ( ) & & settings . prefer_localhost_replica )
2017-08-11 15:02:07 +00:00
{
2018-03-19 18:26:52 +00:00
shard_jobs . replicas_jobs . emplace_back ( shard_index , 0 , true , first_block ) ;
2018-02-14 15:11:39 +00:00
+ + local_jobs_count ;
2017-08-11 15:02:07 +00:00
}
2018-03-19 18:26:52 +00:00
if ( num_shards > 1 )
2020-01-11 09:50:41 +00:00
shard_jobs . shard_current_block_permutation . reserve ( first_block . rows ( ) ) ;
2017-08-07 20:26:28 +00:00
}
}
2017-08-03 17:42:31 +00:00
2018-02-14 15:11:39 +00:00
void DistributedBlockOutputStream : : waitForJobs ( )
2017-08-03 17:42:31 +00:00
{
2018-03-06 14:36:40 +00:00
pool - > wait ( ) ;
2017-08-11 15:02:07 +00:00
2018-02-14 15:11:39 +00:00
if ( insert_timeout )
2017-08-07 20:26:28 +00:00
{
2018-03-06 14:36:40 +00:00
if ( static_cast < UInt64 > ( watch . elapsedSeconds ( ) ) > insert_timeout )
2017-08-11 15:02:07 +00:00
{
2018-02-14 15:11:39 +00:00
ProfileEvents : : increment ( ProfileEvents : : DistributedSyncInsertionTimeoutExceeded ) ;
throw Exception ( " Synchronous distributed insert timeout exceeded. " , ErrorCodes : : TIMEOUT_EXCEEDED ) ;
2017-08-11 15:02:07 +00:00
}
2017-08-07 20:26:28 +00:00
}
2018-03-06 14:36:40 +00:00
size_t jobs_count = remote_jobs_count + local_jobs_count ;
size_t num_finished_jobs = finished_jobs_count ;
if ( num_finished_jobs < jobs_count )
2020-05-23 22:24:01 +00:00
LOG_WARNING ( log , " Expected {} writing jobs, but finished only {} " , jobs_count , num_finished_jobs ) ;
2017-08-11 15:02:07 +00:00
}
2017-08-03 17:42:31 +00:00
2018-03-19 18:26:52 +00:00
ThreadPool : : Job DistributedBlockOutputStream : : runWritingJob ( DistributedBlockOutputStream : : JobReplica & job , const Block & current_block )
2017-08-11 15:02:07 +00:00
{
2018-06-19 20:30:35 +00:00
auto thread_group = CurrentThread : : getGroup ( ) ;
return [ this , thread_group , & job , & current_block ] ( )
2018-02-14 15:11:39 +00:00
{
2018-11-20 17:08:34 +00:00
if ( thread_group )
CurrentThread : : attachToIfDetached ( thread_group ) ;
2018-05-29 18:14:31 +00:00
setThreadName ( " DistrOutStrProc " ) ;
2018-03-19 18:26:52 +00:00
+ + job . blocks_started ;
2018-03-11 18:36:09 +00:00
2018-02-14 15:11:39 +00:00
SCOPE_EXIT ( {
+ + finished_jobs_count ;
2018-03-26 18:39:28 +00:00
UInt64 elapsed_time_for_block_ms = watch_current_block . elapsedMilliseconds ( ) ;
2018-03-11 18:36:09 +00:00
job . elapsed_time_ms + = elapsed_time_for_block_ms ;
job . max_elapsed_time_for_block_ms = std : : max ( job . max_elapsed_time_for_block_ms , elapsed_time_for_block_ms ) ;
2018-02-14 15:11:39 +00:00
} ) ;
2017-08-03 17:42:31 +00:00
2018-02-14 15:11:39 +00:00
const auto & shard_info = cluster - > getShardsInfo ( ) [ job . shard_index ] ;
2018-03-19 18:26:52 +00:00
size_t num_shards = cluster - > getShardsInfo ( ) . size ( ) ;
auto & shard_job = per_shard_jobs [ job . shard_index ] ;
2018-02-14 15:11:39 +00:00
const auto & addresses = cluster - > getShardsAddresses ( ) ;
2018-03-19 18:26:52 +00:00
/// Generate current shard block
if ( num_shards > 1 )
{
2020-01-11 09:50:41 +00:00
auto & shard_permutation = shard_job . shard_current_block_permutation ;
2018-03-19 18:26:52 +00:00
size_t num_shard_rows = shard_permutation . size ( ) ;
for ( size_t j = 0 ; j < current_block . columns ( ) ; + + j )
{
2020-04-22 06:22:14 +00:00
const auto & src_column = current_block . getByPosition ( j ) . column ;
2018-03-19 18:26:52 +00:00
auto & dst_column = job . current_shard_block . getByPosition ( j ) . column ;
/// Zero permutation size has special meaning in IColumn::permute
if ( num_shard_rows )
dst_column = src_column - > permute ( shard_permutation , num_shard_rows ) ;
else
dst_column = src_column - > cloneEmpty ( ) ;
}
}
const Block & shard_block = ( num_shards > 1 ) ? job . current_shard_block : current_block ;
2020-05-07 22:31:31 +00:00
const Settings & settings = context . getSettingsRef ( ) ;
2018-02-14 15:11:39 +00:00
2020-05-07 22:31:31 +00:00
if ( ! job . is_local_job | | ! settings . prefer_localhost_replica )
2017-08-03 17:42:31 +00:00
{
2018-02-14 15:11:39 +00:00
if ( ! job . stream )
{
2019-03-01 23:10:54 +00:00
auto timeouts = ConnectionTimeouts : : getTCPTimeoutsWithFailover ( settings ) ;
2018-02-14 15:11:39 +00:00
if ( shard_info . hasInternalReplication ( ) )
2017-08-11 15:02:07 +00:00
{
2018-02-14 15:11:39 +00:00
/// Skip replica_index in case of internal replication
2018-03-19 18:26:52 +00:00
if ( shard_job . replicas_jobs . size ( ) ! = 1 )
2018-02-14 15:11:39 +00:00
throw Exception ( " There are several writing job for an automatically replicated shard " , ErrorCodes : : LOGICAL_ERROR ) ;
/// TODO: it make sense to rewrite skip_unavailable_shards and max_parallel_replicas here
2019-03-01 23:10:54 +00:00
auto connections = shard_info . pool - > getMany ( timeouts , & settings , PoolMode : : GET_ONE ) ;
2018-02-14 15:11:39 +00:00
if ( connections . empty ( ) | | connections . front ( ) . isNull ( ) )
throw Exception ( " Expected exactly one connection for shard " + toString ( job . shard_index ) , ErrorCodes : : LOGICAL_ERROR ) ;
job . connection_entry = std : : move ( connections . front ( ) ) ;
2017-08-11 15:02:07 +00:00
}
2018-02-14 15:11:39 +00:00
else
{
const auto & replica = addresses . at ( job . shard_index ) . at ( job . replica_index ) ;
2017-08-07 20:26:28 +00:00
2018-02-14 15:11:39 +00:00
const ConnectionPoolPtr & connection_pool = shard_info . per_replica_pools . at ( job . replica_index ) ;
if ( ! connection_pool )
throw Exception ( " Connection pool for replica " + replica . readableString ( ) + " does not exist " , ErrorCodes : : LOGICAL_ERROR ) ;
2019-03-01 23:10:54 +00:00
job . connection_entry = connection_pool - > get ( timeouts , & settings ) ;
2018-02-14 15:11:39 +00:00
if ( job . connection_entry . isNull ( ) )
throw Exception ( " Got empty connection for replica " + replica . readableString ( ) , ErrorCodes : : LOGICAL_ERROR ) ;
}
2017-08-11 15:02:07 +00:00
2018-02-14 15:11:39 +00:00
if ( throttler )
job . connection_entry - > setThrottler ( throttler ) ;
2017-08-11 15:02:07 +00:00
2020-05-17 05:45:20 +00:00
job . stream = std : : make_shared < RemoteBlockOutputStream > ( * job . connection_entry , timeouts , query_string , settings , context . getClientInfo ( ) ) ;
2018-02-14 15:11:39 +00:00
job . stream - > writePrefix ( ) ;
}
2017-08-11 15:02:07 +00:00
2018-02-14 15:11:39 +00:00
CurrentMetrics : : Increment metric_increment { CurrentMetrics : : DistributedSend } ;
2018-03-19 18:26:52 +00:00
job . stream - > write ( shard_block ) ;
2017-10-23 13:47:00 +00:00
}
2020-05-07 22:31:31 +00:00
else // local
2017-08-11 15:02:07 +00:00
{
2018-02-14 15:11:39 +00:00
if ( ! job . stream )
{
/// Forward user settings
2019-04-08 10:04:26 +00:00
job . local_context = std : : make_unique < Context > ( context ) ;
2018-02-14 15:11:39 +00:00
InterpreterInsertQuery interp ( query_ast , * job . local_context ) ;
2020-04-07 23:02:16 +00:00
auto block_io = interp . execute ( ) ;
2020-04-08 20:07:29 +00:00
2020-04-07 23:02:16 +00:00
job . stream = block_io . out ;
2018-02-14 15:11:39 +00:00
job . stream - > writePrefix ( ) ;
}
2020-04-14 21:05:45 +00:00
writeBlockConvert ( job . stream , shard_block , shard_info . getLocalNodeCount ( ) ) ;
2017-08-11 15:02:07 +00:00
}
2018-02-14 15:11:39 +00:00
2018-03-11 18:36:09 +00:00
job . blocks_written + = 1 ;
2018-03-19 18:26:52 +00:00
job . rows_written + = shard_block . rows ( ) ;
2018-02-14 15:11:39 +00:00
} ;
2017-08-11 15:02:07 +00:00
}
void DistributedBlockOutputStream : : writeSync ( const Block & block )
{
2019-04-08 10:04:26 +00:00
const Settings & settings = context . getSettingsRef ( ) ;
2018-03-19 18:26:52 +00:00
const auto & shards_info = cluster - > getShardsInfo ( ) ;
size_t num_shards = shards_info . size ( ) ;
2017-08-11 15:02:07 +00:00
if ( ! pool )
2017-08-07 20:26:28 +00:00
{
2017-08-11 15:02:07 +00:00
/// Deferred initialization. Only for sync insertion.
2018-03-19 18:26:52 +00:00
initWritingJobs ( block ) ;
2017-08-11 15:02:07 +00:00
pool . emplace ( remote_jobs_count + local_jobs_count ) ;
2018-02-14 15:11:39 +00:00
2018-03-11 00:15:26 +00:00
if ( ! throttler & & ( settings . max_network_bandwidth | | settings . max_network_bytes ) )
2018-02-14 15:11:39 +00:00
{
2018-03-11 00:15:26 +00:00
throttler = std : : make_shared < Throttler > ( settings . max_network_bandwidth , settings . max_network_bytes ,
2018-02-14 15:11:39 +00:00
" Network bandwidth limit for a query exceeded. " ) ;
}
2018-03-06 14:36:40 +00:00
watch . restart ( ) ;
2017-08-07 20:26:28 +00:00
}
2018-03-26 18:39:28 +00:00
watch_current_block . restart ( ) ;
2018-03-19 18:26:52 +00:00
if ( num_shards > 1 )
{
auto current_selector = createSelector ( block ) ;
/// Prepare row numbers for each shard
for ( size_t shard_index : ext : : range ( 0 , num_shards ) )
2020-01-11 09:50:41 +00:00
per_shard_jobs [ shard_index ] . shard_current_block_permutation . resize ( 0 ) ;
2018-03-19 18:26:52 +00:00
for ( size_t i = 0 ; i < block . rows ( ) ; + + i )
2020-01-11 09:50:41 +00:00
per_shard_jobs [ current_selector [ i ] ] . shard_current_block_permutation . push_back ( i ) ;
2018-03-19 18:26:52 +00:00
}
2017-08-11 15:02:07 +00:00
2019-10-17 14:41:27 +00:00
try
{
/// Run jobs in parallel for each block and wait them
finished_jobs_count = 0 ;
for ( size_t shard_index : ext : : range ( 0 , shards_info . size ( ) ) )
for ( JobReplica & job : per_shard_jobs [ shard_index ] . replicas_jobs )
pool - > scheduleOrThrowOnError ( runWritingJob ( job , block ) ) ;
}
catch ( . . . )
{
pool - > wait ( ) ;
throw ;
}
2017-08-11 15:02:07 +00:00
2017-08-07 20:26:28 +00:00
try
{
2018-02-14 15:11:39 +00:00
waitForJobs ( ) ;
2017-08-07 20:26:28 +00:00
}
2017-08-10 03:49:03 +00:00
catch ( Exception & exception )
2017-08-07 20:26:28 +00:00
{
2018-02-14 15:11:39 +00:00
exception . addMessage ( getCurrentStateDescription ( ) ) ;
2017-08-03 17:42:31 +00:00
throw ;
}
2018-03-19 18:26:52 +00:00
inserted_blocks + = 1 ;
inserted_rows + = block . rows ( ) ;
2017-08-03 17:42:31 +00:00
}
2016-01-28 01:00:42 +00:00
2017-08-11 15:02:07 +00:00
2018-02-14 15:11:39 +00:00
void DistributedBlockOutputStream : : writeSuffix ( )
{
2020-05-23 21:26:45 +00:00
auto log_performance = [ this ] ( )
2018-03-30 16:25:26 +00:00
{
double elapsed = watch . elapsedSeconds ( ) ;
2020-05-23 22:24:01 +00:00
LOG_DEBUG ( log , " It took {} sec. to insert {} blocks, {} rows per second. {} " , elapsed , inserted_blocks , inserted_rows / elapsed , getCurrentStateDescription ( ) ) ;
2018-03-30 16:25:26 +00:00
} ;
2018-02-14 15:11:39 +00:00
if ( insert_sync & & pool )
{
finished_jobs_count = 0 ;
2019-10-17 14:41:27 +00:00
try
{
for ( auto & shard_jobs : per_shard_jobs )
2018-02-14 15:11:39 +00:00
{
2019-10-17 14:41:27 +00:00
for ( JobReplica & job : shard_jobs . replicas_jobs )
2018-03-30 16:25:26 +00:00
{
2019-10-17 14:41:27 +00:00
if ( job . stream )
2018-05-18 18:31:18 +00:00
{
2019-10-17 14:41:27 +00:00
pool - > scheduleOrThrowOnError ( [ & job ] ( )
{
job . stream - > writeSuffix ( ) ;
} ) ;
}
2018-03-30 16:25:26 +00:00
}
2018-02-14 15:11:39 +00:00
}
2019-10-17 14:41:27 +00:00
}
catch ( . . . )
{
pool - > wait ( ) ;
throw ;
}
2018-02-14 15:11:39 +00:00
2018-03-05 00:47:25 +00:00
try
{
pool - > wait ( ) ;
2018-03-30 16:25:26 +00:00
log_performance ( ) ;
2018-03-05 00:47:25 +00:00
}
catch ( Exception & exception )
{
2018-03-30 16:25:26 +00:00
log_performance ( ) ;
2018-03-05 00:47:25 +00:00
exception . addMessage ( getCurrentStateDescription ( ) ) ;
throw ;
}
2018-02-14 15:11:39 +00:00
}
}
2020-05-01 08:31:05 +00:00
IColumn : : Selector DistributedBlockOutputStream : : createSelector ( const Block & source_block ) const
2017-02-11 20:20:57 +00:00
{
2018-03-19 18:26:52 +00:00
Block current_block_with_sharding_key_expr = source_block ;
storage . getShardingKeyExpr ( ) - > execute ( current_block_with_sharding_key_expr ) ;
const auto & key_column = current_block_with_sharding_key_expr . getByName ( storage . getShardingKeyColumnName ( ) ) ;
2020-04-30 23:37:55 +00:00
return storage . createSelector ( cluster , key_column ) ;
2016-01-28 01:00:42 +00:00
}
2017-02-11 20:20:57 +00:00
2017-08-07 20:26:28 +00:00
Blocks DistributedBlockOutputStream : : splitBlock ( const Block & block )
2016-01-28 01:00:42 +00:00
{
2017-04-01 07:20:54 +00:00
auto selector = createSelector ( block ) ;
2016-01-28 01:00:42 +00:00
2017-04-01 07:20:54 +00:00
/// Split block to num_shard smaller block, using 'selector'.
2016-01-28 01:00:42 +00:00
2017-04-01 07:20:54 +00:00
const size_t num_shards = cluster - > getShardsInfo ( ) . size ( ) ;
Blocks splitted_blocks ( num_shards ) ;
2016-01-28 01:00:42 +00:00
2017-04-01 07:20:54 +00:00
for ( size_t shard_idx = 0 ; shard_idx < num_shards ; + + shard_idx )
splitted_blocks [ shard_idx ] = block . cloneEmpty ( ) ;
2016-01-28 01:00:42 +00:00
2017-04-01 07:20:54 +00:00
size_t columns_in_block = block . columns ( ) ;
for ( size_t col_idx_in_block = 0 ; col_idx_in_block < columns_in_block ; + + col_idx_in_block )
{
2017-12-15 20:48:46 +00:00
MutableColumns splitted_columns = block . getByPosition ( col_idx_in_block ) . column - > scatter ( num_shards , selector ) ;
2017-04-01 07:20:54 +00:00
for ( size_t shard_idx = 0 ; shard_idx < num_shards ; + + shard_idx )
splitted_blocks [ shard_idx ] . getByPosition ( col_idx_in_block ) . column = std : : move ( splitted_columns [ shard_idx ] ) ;
}
2017-02-11 20:20:57 +00:00
2017-08-03 17:42:31 +00:00
return splitted_blocks ;
}
2017-08-11 15:02:07 +00:00
void DistributedBlockOutputStream : : writeSplitAsync ( const Block & block )
2017-08-03 17:42:31 +00:00
{
2017-08-07 20:26:28 +00:00
Blocks splitted_blocks = splitBlock ( block ) ;
2017-08-03 17:42:31 +00:00
const size_t num_shards = splitted_blocks . size ( ) ;
2017-04-01 07:20:54 +00:00
for ( size_t shard_idx = 0 ; shard_idx < num_shards ; + + shard_idx )
if ( splitted_blocks [ shard_idx ] . rows ( ) )
2017-08-11 15:02:07 +00:00
writeAsyncImpl ( splitted_blocks [ shard_idx ] , shard_idx ) ;
2017-07-25 19:42:36 +00:00
2018-03-19 18:26:52 +00:00
+ + inserted_blocks ;
2016-01-28 01:00:42 +00:00
}
2017-02-11 20:20:57 +00:00
2017-08-11 15:02:07 +00:00
void DistributedBlockOutputStream : : writeAsyncImpl ( const Block & block , const size_t shard_id )
2016-01-28 01:00:42 +00:00
{
2017-04-01 07:20:54 +00:00
const auto & shard_info = cluster - > getShardsInfo ( ) [ shard_id ] ;
2020-05-07 22:31:31 +00:00
const auto & settings = context . getSettingsRef ( ) ;
2016-01-28 01:00:42 +00:00
2017-08-11 15:02:07 +00:00
if ( shard_info . hasInternalReplication ( ) )
2017-11-02 14:18:46 +00:00
{
2020-05-14 00:02:28 +00:00
if ( shard_info . isLocal ( ) & & settings . prefer_localhost_replica )
2017-11-02 14:18:46 +00:00
/// Prefer insert into current instance directly
writeToLocal ( block , shard_info . getLocalNodeCount ( ) ) ;
else
2020-05-14 00:02:28 +00:00
writeToShard ( block , { shard_info . pathForInsert ( settings . prefer_localhost_replica ) } ) ;
2017-11-02 14:18:46 +00:00
}
2017-08-11 15:02:07 +00:00
else
{
2020-06-08 18:43:00 +00:00
if ( shard_info . isLocal ( ) & & settings . prefer_localhost_replica )
2017-11-02 14:18:46 +00:00
writeToLocal ( block , shard_info . getLocalNodeCount ( ) ) ;
2017-08-11 15:02:07 +00:00
std : : vector < std : : string > dir_names ;
for ( const auto & address : cluster - > getShardsAddresses ( ) [ shard_id ] )
2020-05-13 22:53:49 +00:00
if ( ! address . is_local | | ! settings . prefer_localhost_replica )
2020-05-07 22:31:31 +00:00
dir_names . push_back ( address . toFullString ( settings . use_compact_format_in_distributed_parts_names ) ) ;
2017-11-02 14:18:46 +00:00
2017-08-23 10:23:16 +00:00
if ( ! dir_names . empty ( ) )
writeToShard ( block , dir_names ) ;
2017-08-11 15:02:07 +00:00
}
2016-01-28 01:00:42 +00:00
}
2017-02-11 20:20:57 +00:00
2017-08-11 15:02:07 +00:00
void DistributedBlockOutputStream : : writeToLocal ( const Block & block , const size_t repeats )
2016-01-28 01:00:42 +00:00
{
2018-01-25 16:04:54 +00:00
/// Async insert does not support settings forwarding yet whereas sync one supports
2019-04-08 10:04:26 +00:00
InterpreterInsertQuery interp ( query_ast , context ) ;
2018-01-25 16:04:54 +00:00
2018-02-14 15:11:39 +00:00
auto block_io = interp . execute ( ) ;
2020-04-07 23:02:16 +00:00
2017-04-01 07:20:54 +00:00
block_io . out - > writePrefix ( ) ;
2020-04-14 21:05:45 +00:00
writeBlockConvert ( block_io . out , block , repeats ) ;
2017-04-01 07:20:54 +00:00
block_io . out - > writeSuffix ( ) ;
2016-01-28 01:00:42 +00:00
}
2017-02-11 20:20:57 +00:00
2016-01-28 01:00:42 +00:00
void DistributedBlockOutputStream : : writeToShard ( const Block & block , const std : : vector < std : : string > & dir_names )
{
2020-04-26 10:53:17 +00:00
/// tmp directory is used to ensure atomicity of transactions
/// and keep monitor thread out from reading incomplete data
2017-04-01 07:20:54 +00:00
std : : string first_file_tmp_path { } ;
2016-01-28 01:00:42 +00:00
2020-04-26 10:53:17 +00:00
const auto & [ disk , data_path ] = storage . getPath ( ) ;
2016-01-28 01:00:42 +00:00
2020-04-26 23:18:24 +00:00
auto it = dir_names . begin ( ) ;
2020-04-26 10:53:17 +00:00
/// on first iteration write block to a temporary directory for subsequent
/// hardlinking to ensure the inode is not freed until we're done
2017-04-01 07:20:54 +00:00
{
2020-04-26 23:18:24 +00:00
const std : : string path ( disk + data_path + * it ) ;
2020-04-14 18:12:08 +00:00
Poco : : File ( path ) . createDirectory ( ) ;
2016-01-28 01:00:42 +00:00
2020-04-26 10:53:17 +00:00
const std : : string tmp_path ( path + " /tmp/ " ) ;
Poco : : File ( tmp_path ) . createDirectory ( ) ;
2016-01-28 01:00:42 +00:00
2020-04-26 23:18:24 +00:00
const std : : string file_name ( toString ( storage . file_names_increment . get ( ) ) + " .bin " ) ;
first_file_tmp_path = tmp_path + file_name ;
2020-04-26 10:53:17 +00:00
2020-05-15 08:13:37 +00:00
/// Write batch to temporary location
{
WriteBufferFromFile out { first_file_tmp_path } ;
CompressedWriteBuffer compress { out } ;
NativeBlockOutputStream stream { compress , ClickHouseRevision : : get ( ) , block . cloneEmpty ( ) } ;
/// Prepare the header.
/// We wrap the header into a string for compatibility with older versions:
/// a shard will able to read the header partly and ignore other parts based on its version.
WriteBufferFromOwnString header_buf ;
writeVarUInt ( ClickHouseRevision : : get ( ) , header_buf ) ;
writeStringBinary ( query_string , header_buf ) ;
context . getSettingsRef ( ) . serialize ( header_buf ) ;
context . getClientInfo ( ) . write ( header_buf , ClickHouseRevision : : get ( ) ) ;
/// Add new fields here, for example:
/// writeVarUInt(my_new_data, header_buf);
/// Write the header.
const StringRef header = header_buf . stringRef ( ) ;
writeVarUInt ( DBMS_DISTRIBUTED_SIGNATURE_HEADER , out ) ;
writeStringBinary ( header , out ) ;
writePODBinary ( CityHash_v1_0_2 : : CityHash128 ( header . data , header . size ) , out ) ;
stream . writePrefix ( ) ;
stream . write ( block ) ;
stream . writeSuffix ( ) ;
}
2016-01-28 01:00:42 +00:00
2020-04-26 23:18:24 +00:00
// Create hardlink here to reuse increment number
const std : : string block_file_path ( path + ' / ' + file_name ) ;
createHardLink ( first_file_tmp_path , block_file_path ) ;
2017-04-01 07:20:54 +00:00
}
2020-04-26 23:18:24 +00:00
+ + it ;
2016-01-28 01:00:42 +00:00
2020-04-26 23:18:24 +00:00
/// Make hardlinks
for ( ; it ! = dir_names . end ( ) ; + + it )
2020-04-25 22:16:16 +00:00
{
2020-04-26 23:18:24 +00:00
const std : : string path ( disk + data_path + * it ) ;
2020-04-26 10:53:17 +00:00
Poco : : File ( path ) . createDirectory ( ) ;
const std : : string block_file_path ( path + ' / ' + toString ( storage . file_names_increment . get ( ) ) + " .bin " ) ;
createHardLink ( first_file_tmp_path , block_file_path ) ;
2020-04-25 22:16:16 +00:00
}
2020-04-26 10:53:17 +00:00
/// remove the temporary file, enabling the OS to reclaim inode after all threads
/// have removed their corresponding files
2017-04-01 07:20:54 +00:00
Poco : : File ( first_file_tmp_path ) . remove ( ) ;
2020-04-26 23:18:24 +00:00
/// Notify
auto sleep_ms = context . getSettingsRef ( ) . distributed_directory_monitor_sleep_time_ms ;
for ( const auto & dir_name : dir_names )
{
auto & directory_monitor = storage . requireDirectoryMonitor ( disk , dir_name ) ;
directory_monitor . scheduleAfter ( sleep_ms . totalMilliseconds ( ) ) ;
}
2016-01-28 01:00:42 +00:00
}
2017-08-07 20:26:28 +00:00
2016-01-28 01:00:42 +00:00
}