mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-09-20 08:40:50 +00:00
Add write logic and remove strange multifile read logic
This commit is contained in:
parent
2d3e08fc74
commit
07e11577d3
@ -15,7 +15,7 @@ namespace ErrorCodes
|
||||
struct ReadBufferFromHDFS::ReadBufferFromHDFSImpl
|
||||
{
|
||||
std::string hdfs_uri;
|
||||
struct hdfsBuilder * builder;
|
||||
hdfsBuilder * builder;
|
||||
hdfsFS fs;
|
||||
hdfsFile fin;
|
||||
|
||||
@ -36,6 +36,7 @@ struct ReadBufferFromHDFS::ReadBufferFromHDFSImpl
|
||||
// set read/connect timeout, default value in libhdfs3 is about 1 hour, and too large
|
||||
/// TODO Allow to tune from query Settings.
|
||||
hdfsBuilderConfSetStr(builder, "input.read.timeout", "60000"); // 1 min
|
||||
hdfsBuilderConfSetStr(builder, "input.write.timeout", "60000"); // 1 min
|
||||
hdfsBuilderConfSetStr(builder, "input.connect.timeout", "60000"); // 1 min
|
||||
|
||||
hdfsBuilderSetNameNode(builder, host.c_str());
|
||||
@ -65,9 +66,8 @@ struct ReadBufferFromHDFS::ReadBufferFromHDFSImpl
|
||||
{
|
||||
int bytes_read = hdfsRead(fs, fin, start, size);
|
||||
if (bytes_read < 0)
|
||||
{
|
||||
throw Exception("Fail to read HDFS file: " + hdfs_uri + " " + std::string(hdfsGetLastError()), ErrorCodes::NETWORK_ERROR);
|
||||
}
|
||||
throw Exception("Fail to read HDFS file: " + hdfs_uri + " " + std::string(hdfsGetLastError()),
|
||||
ErrorCodes::NETWORK_ERROR);
|
||||
return bytes_read;
|
||||
}
|
||||
};
|
||||
|
119
dbms/src/IO/WriteBufferFromHDFS.cpp
Normal file
119
dbms/src/IO/WriteBufferFromHDFS.cpp
Normal file
@ -0,0 +1,119 @@
|
||||
#include <IO/WriteBufferFromHDFS.h>
|
||||
|
||||
#if USE_HDFS
|
||||
#include <Poco/URI.h>
|
||||
#include <hdfs/hdfs.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int NETWORK_ERROR;
|
||||
extern const int CANNOT_FSYNC;
|
||||
}
|
||||
|
||||
|
||||
struct WriteBufferFromHDFS::WriteBufferFromHDFSImpl
|
||||
{
|
||||
std::string hdfs_uri;
|
||||
hdfsBuilder * builder;
|
||||
hdfsFS fs;
|
||||
hdfsFile fout;
|
||||
|
||||
WriteBufferFromHDFSImpl(const std::string & hdfs_name_)
|
||||
{
|
||||
builder = hdfsNewBuilder();
|
||||
hdfs_uri = hdfs_name_;
|
||||
Poco::URI uri(hdfs_name_);
|
||||
auto & host = uri.getHost();
|
||||
auto port = uri.getPort();
|
||||
auto & path = uri.getPath();
|
||||
if (host.empty() || port == 0 || path.empty())
|
||||
{
|
||||
throw Exception("Illegal HDFS URI: " + hdfs_uri, ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
// set read/connect timeout, default value in libhdfs3 is about 1 hour, and too large
|
||||
/// TODO Allow to tune from query Settings.
|
||||
hdfsBuilderConfSetStr(builder, "input.read.timeout", "60000"); // 1 min
|
||||
hdfsBuilderConfSetStr(builder, "input.write.timeout", "60000"); // 1 min
|
||||
hdfsBuilderConfSetStr(builder, "input.connect.timeout", "60000"); // 1 min
|
||||
|
||||
hdfsBuilderSetNameNode(builder, host.c_str());
|
||||
hdfsBuilderSetNameNodePort(builder, port);
|
||||
fs = hdfsBuilderConnect(builder);
|
||||
|
||||
if (fs == nullptr)
|
||||
{
|
||||
throw Exception("Unable to connect to HDFS: " + std::string(hdfsGetLastError()), ErrorCodes::NETWORK_ERROR);
|
||||
}
|
||||
|
||||
fout = hdfsOpenFile(fs, path.c_str(), O_WRONLY, 0, 0, 0);
|
||||
}
|
||||
|
||||
~WriteBufferFromHDFSImpl()
|
||||
{
|
||||
close();
|
||||
hdfsFreeBuilder(builder);
|
||||
}
|
||||
|
||||
void close()
|
||||
{
|
||||
hdfsCloseFile(fs, fout);
|
||||
}
|
||||
|
||||
|
||||
int write(const char * start, size_t size)
|
||||
{
|
||||
int bytes_written = hdfsWrite(fs, fout, start, size);
|
||||
if (bytes_written < 0)
|
||||
throw Exception("Fail to write HDFS file: " + hdfs_uri + " " + std::string(hdfsGetLastError()),
|
||||
ErrorCodes::NETWORK_ERROR);
|
||||
return bytes_written;
|
||||
}
|
||||
|
||||
void sync()
|
||||
{
|
||||
int result = hdfsSync(fs, fout);
|
||||
if (result < 0)
|
||||
throwFromErrno("Cannot HDFS sync" + hdfs_uri + " " + std::string(hdfsGetLastError()),
|
||||
ErrorCodes::CANNOT_FSYNC);
|
||||
}
|
||||
};
|
||||
|
||||
WriteBufferFromHDFS::WriteBufferFromHDFS(const std::string & hdfs_name_, size_t buf_size)
|
||||
: BufferWithOwnMemory<WriteBuffer>(buf_size)
|
||||
, impl(std::make_unique<WriteBufferFromHDFSImpl>(hdfs_name_))
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void WriteBufferFromHDFS::nextImpl()
|
||||
{
|
||||
if (!offset())
|
||||
return;
|
||||
|
||||
size_t bytes_written = 0;
|
||||
|
||||
while (bytes_written != offset())
|
||||
bytes_written += impl->write(working_buffer.begin() + bytes_written, offset() - bytes_written);
|
||||
}
|
||||
|
||||
|
||||
void WriteBufferFromHDFS::sync()
|
||||
{
|
||||
impl->sync();
|
||||
}
|
||||
|
||||
const std::string & WriteBufferFromHDFS::getHDFSUri() const
|
||||
{
|
||||
return impl->hdfs_uri;
|
||||
}
|
||||
|
||||
WriteBufferFromHDFS::~WriteBufferFromHDFS()
|
||||
{
|
||||
}
|
||||
|
||||
}
|
||||
#endif
|
33
dbms/src/IO/WriteBufferFromHDFS.h
Normal file
33
dbms/src/IO/WriteBufferFromHDFS.h
Normal file
@ -0,0 +1,33 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/config.h>
|
||||
|
||||
#if USE_HDFS
|
||||
#include <IO/WriteBuffer.h>
|
||||
#include <IO/BufferWithOwnMemory.h>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
/** Accepts path to file and opens it, or pre-opened file descriptor.
|
||||
* Closes file by himself (thus "owns" a file descriptor).
|
||||
*/
|
||||
class WriteBufferFromHDFS : public BufferWithOwnMemory<WriteBuffer>
|
||||
{
|
||||
struct WriteBufferFromHDFSImpl;
|
||||
std::unique_ptr<WriteBufferFromHDFSImpl> impl;
|
||||
public:
|
||||
WriteBufferFromHDFS(const std::string & hdfs_name_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE);
|
||||
WriteBufferFromHDFS(WriteBufferFromHDFS &&) = default;
|
||||
|
||||
void nextImpl() override;
|
||||
|
||||
~WriteBufferFromHDFS() override;
|
||||
|
||||
const std::string & getHDFSUri() const;
|
||||
|
||||
void sync();
|
||||
};
|
||||
}
|
||||
#endif
|
@ -8,13 +8,12 @@
|
||||
#include <Interpreters/evaluateConstantExpression.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <IO/ReadBufferFromHDFS.h>
|
||||
#include <IO/WriteBufferFromHDFS.h>
|
||||
#include <Formats/FormatFactory.h>
|
||||
#include <DataStreams/IBlockOutputStream.h>
|
||||
#include <DataStreams/UnionBlockInputStream.h>
|
||||
#include <DataStreams/IProfilingBlockInputStream.h>
|
||||
#include <DataStreams/OwningBlockInputStream.h>
|
||||
#include <Poco/Path.h>
|
||||
#include <Common/parseRemoteDescription.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -30,94 +29,101 @@ StorageHDFS::StorageHDFS(const String & uri_,
|
||||
const std::string & table_name_,
|
||||
const String & format_name_,
|
||||
const ColumnsDescription & columns_,
|
||||
Context &)
|
||||
: IStorage(columns_), uri(uri_), format_name(format_name_), table_name(table_name_)
|
||||
Context & context_)
|
||||
: IStorage(columns_)
|
||||
, uri(uri_)
|
||||
, format_name(format_name_)
|
||||
, table_name(table_name_)
|
||||
, context(context_)
|
||||
{
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
class HDFSBlockInputStream : public IProfilingBlockInputStream
|
||||
|
||||
class HDFSBlockInputStream : public IProfilingBlockInputStream
|
||||
{
|
||||
public:
|
||||
HDFSBlockInputStream(const String & uri,
|
||||
const String & format,
|
||||
const Block & sample_block,
|
||||
const Context & context,
|
||||
size_t max_block_size)
|
||||
{
|
||||
public:
|
||||
HDFSBlockInputStream(const String & uri,
|
||||
const String & format,
|
||||
const Block & sample_block,
|
||||
const Context & context,
|
||||
size_t max_block_size)
|
||||
{
|
||||
// Assume no query and fragment in uri, todo, add sanity check
|
||||
String glob_file_names;
|
||||
String url_prefix = uri.substr(0, uri.find_last_of('/'));
|
||||
if (url_prefix.length() == uri.length())
|
||||
{
|
||||
glob_file_names = uri;
|
||||
url_prefix.clear();
|
||||
}
|
||||
else
|
||||
{
|
||||
url_prefix += "/";
|
||||
glob_file_names = uri.substr(url_prefix.length());
|
||||
}
|
||||
std::unique_ptr<ReadBuffer> read_buf = std::make_unique<ReadBufferFromHDFS>(uri);
|
||||
auto input_stream = FormatFactory::instance().getInput(format, *read_buf, sample_block, context, max_block_size);
|
||||
reader = std::make_shared<OwningBlockInputStream<ReadBuffer>>(input_stream, std::move(read_buf));
|
||||
}
|
||||
|
||||
std::vector<String> glob_names_list = parseRemoteDescription(glob_file_names, 0, glob_file_names.length(), ',' , 100/* hard coded max files */);
|
||||
String getName() const override
|
||||
{
|
||||
return "HDFS";
|
||||
}
|
||||
|
||||
BlockInputStreams inputs;
|
||||
Block readImpl() override
|
||||
{
|
||||
return reader->read();
|
||||
}
|
||||
|
||||
for (const auto & name : glob_names_list)
|
||||
{
|
||||
std::unique_ptr<ReadBuffer> read_buf = std::make_unique<ReadBufferFromHDFS>(url_prefix + name);
|
||||
Block getHeader() const override
|
||||
{
|
||||
return reader->getHeader();
|
||||
}
|
||||
|
||||
inputs.emplace_back(
|
||||
std::make_shared<OwningBlockInputStream<ReadBuffer>>(
|
||||
FormatFactory::instance().getInput(format, *read_buf, sample_block, context, max_block_size),
|
||||
std::move(read_buf)));
|
||||
}
|
||||
void readPrefixImpl() override
|
||||
{
|
||||
reader->readPrefix();
|
||||
}
|
||||
|
||||
if (inputs.size() == 0)
|
||||
throw Exception("StorageHDFS inputs interpreter error", ErrorCodes::BAD_ARGUMENTS);
|
||||
void readSuffixImpl() override
|
||||
{
|
||||
reader->readSuffix();
|
||||
}
|
||||
|
||||
if (inputs.size() == 1)
|
||||
{
|
||||
reader = inputs[0];
|
||||
}
|
||||
else
|
||||
{
|
||||
reader = std::make_shared<UnionBlockInputStream>(inputs, nullptr, context.getSettingsRef().max_distributed_connections);
|
||||
}
|
||||
}
|
||||
private:
|
||||
BlockInputStreamPtr reader;
|
||||
};
|
||||
|
||||
String getName() const override
|
||||
{
|
||||
return "HDFS";
|
||||
}
|
||||
class HDFSBlockOutputStream : public IBlockOutputStream
|
||||
{
|
||||
public:
|
||||
HDFSBlockOutputStream(const String & uri,
|
||||
const String & format,
|
||||
const Block & sample_block_,
|
||||
const Context & context)
|
||||
: sample_block(sample_block_)
|
||||
{
|
||||
write_buf = std::make_unique<WriteBufferFromHDFS>(uri);
|
||||
writer = FormatFactory::instance().getOutput(format, *write_buf, sample_block, context);
|
||||
}
|
||||
|
||||
Block readImpl() override
|
||||
{
|
||||
return reader->read();
|
||||
}
|
||||
Block getHeader() const override
|
||||
{
|
||||
return sample_block;
|
||||
}
|
||||
|
||||
Block getHeader() const override
|
||||
{
|
||||
return reader->getHeader();
|
||||
}
|
||||
void write(const Block & block) override
|
||||
{
|
||||
writer->write(block);
|
||||
}
|
||||
|
||||
void readPrefixImpl() override
|
||||
{
|
||||
reader->readPrefix();
|
||||
}
|
||||
void writePrefix() override
|
||||
{
|
||||
writer->writePrefix();
|
||||
}
|
||||
|
||||
void readSuffixImpl() override
|
||||
{
|
||||
if (auto concrete_reader = dynamic_cast<UnionBlockInputStream *>(reader.get()))
|
||||
concrete_reader->cancel(false); // skip Union read suffix assertion
|
||||
void writeSuffix() override
|
||||
{
|
||||
writer->writeSuffix();
|
||||
writer->flush();
|
||||
write_buf->sync();
|
||||
}
|
||||
|
||||
reader->readSuffix();
|
||||
}
|
||||
|
||||
private:
|
||||
BlockInputStreamPtr reader;
|
||||
};
|
||||
private:
|
||||
Block sample_block;
|
||||
std::unique_ptr<WriteBufferFromHDFS> write_buf;
|
||||
BlockOutputStreamPtr writer;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@ -142,7 +148,7 @@ void StorageHDFS::rename(const String & /*new_path_to_db*/, const String & /*new
|
||||
|
||||
BlockOutputStreamPtr StorageHDFS::write(const ASTPtr & /*query*/, const Settings & /*settings*/)
|
||||
{
|
||||
throw Exception("StorageHDFS write is not supported yet", ErrorCodes::NOT_IMPLEMENTED);
|
||||
return std::make_shared<HDFSBlockOutputStream>(uri, format_name, getSampleBlock(), context);
|
||||
}
|
||||
|
||||
void registerStorageHDFS(StorageFactory & factory)
|
||||
|
@ -48,6 +48,7 @@ private:
|
||||
String uri;
|
||||
String format_name;
|
||||
String table_name;
|
||||
Context & context;
|
||||
|
||||
Logger * log = &Logger::get("StorageHDFS");
|
||||
};
|
||||
|
@ -45,3 +45,14 @@ def test_read_write_table(started_cluster):
|
||||
assert hdfs_api.read_data("/simple_table_function") == data
|
||||
|
||||
assert node1.query("select * from hdfs('hdfs://hdfs1:9000/simple_table_function', 'TSV', 'id UInt64, text String, number Float64')") == data
|
||||
|
||||
|
||||
def test_write_table(started_cluster):
|
||||
hdfs_api = HDFSApi("root")
|
||||
|
||||
node1.query("create table OtherHDFSStorage (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/other_storage', 'TSV')")
|
||||
node1.query("insert into OtherHDFSStorage values (10, 'tomas', 55.55), (11, 'jack', 32.54)")
|
||||
|
||||
result = "10\ttomas\t55.55\n11\tjack\t32.54\n"
|
||||
assert hdfs_api.read_data("/other_storage") == result
|
||||
assert node1.query("select * from OtherHDFSStorage order by id") == result
|
||||
|
Loading…
Reference in New Issue
Block a user