diff --git a/src/Storages/StorageS3.cpp b/src/Storages/StorageS3.cpp index ce31308fdd7..ab48afda533 100644 --- a/src/Storages/StorageS3.cpp +++ b/src/Storages/StorageS3.cpp @@ -660,8 +660,14 @@ Pipe StorageS3::read( Block block_for_format; if (isColumnOriented()) { + auto fetch_columns = column_names; + fetch_columns.erase(std::remove_if(fetch_columns.begin(), fetch_columns.end(), + [](const String & col){return col == "_path" || col == "_file"; })); + if (fetch_columns.empty()) + fetch_columns.push_back(ExpressionActions::getSmallestColumn(storage_snapshot->metadata->getColumns().getAllPhysical())); + columns_description = ColumnsDescription{ - storage_snapshot->getSampleBlockForColumns(column_names).getNamesAndTypesList()}; + storage_snapshot->getSampleBlockForColumns(fetch_columns).getNamesAndTypesList()}; block_for_format = storage_snapshot->getSampleBlockForColumns(columns_description.getNamesOfPhysical()); } else diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py index 7f340424ccf..81182d44ab2 100644 --- a/tests/integration/test_storage_hdfs/test.py +++ b/tests/integration/test_storage_hdfs/test.py @@ -554,6 +554,16 @@ def test_insert_select_schema_inference(started_cluster): assert int(result) == 1 +def test_virtual_column(started_cluster): + hdfs_api = started_cluster.hdfs_api + + table_function = (f"hdfs('hdfs://hdfs1:9000/parquet', 'Parquet', 'a Int32, b String')") + node1.query(f"insert into table function {table_function} SELECT 1, 'kek'") + + result = node1.query(f"SELECT _path FROM {table_function}") + assert result.strip() == "parquet" + + if __name__ == "__main__": cluster.start() input("Cluster created, press any key to destroy...") diff --git a/tests/integration/test_storage_s3/test.py b/tests/integration/test_storage_s3/test.py index dd29d0a5d6a..5a2c7722e2b 100644 --- a/tests/integration/test_storage_s3/test.py +++ b/tests/integration/test_storage_s3/test.py @@ -1375,3 +1375,12 @@ def test_insert_select_schema_inference(started_cluster): f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_insert_select.native')" ) assert int(result) == 1 + + +def test_virtual_columns(started_cluster): + bucket = started_cluster.minio_bucket + instance = started_cluster.instances["dummy"] # type: ClickHouseInstance + name = "test_table" + + result = instance.query("SELECT _path FROM s3(s3_parquet, format='Parquet')") + assert result.strip() == "root/test_parquet"