1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
|
#include "clickhouse_config.h"
#if USE_AWS_S3
#include <TableFunctions/TableFunctionS3Cluster.h>
#include <TableFunctions/TableFunctionFactory.h>
#include <Interpreters/parseColumnsListForTableFunction.h>
#include <Storages/StorageS3.h>
#include "registerTableFunctions.h"
#include <memory>
namespace DB
{
StoragePtr TableFunctionS3Cluster::executeImpl(
const ASTPtr & /*function*/, ContextPtr context,
const std::string & table_name, ColumnsDescription /*cached_columns*/, bool /*is_insert_query*/) const
{
StoragePtr storage;
ColumnsDescription columns;
bool structure_argument_was_provided = configuration.structure != "auto";
if (structure_argument_was_provided)
{
columns = parseColumnsListFromString(configuration.structure, context);
}
else if (!structure_hint.empty())
{
columns = structure_hint;
}
if (context->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY)
{
/// On worker node this filename won't contains globs
storage = std::make_shared<StorageS3>(
configuration,
context,
StorageID(getDatabaseName(), table_name),
columns,
ConstraintsDescription{},
/* comment */String{},
/* format_settings */std::nullopt, /// No format_settings for S3Cluster
/*distributed_processing=*/true);
}
else
{
storage = std::make_shared<StorageS3Cluster>(
cluster_name,
configuration,
StorageID(getDatabaseName(), table_name),
columns,
ConstraintsDescription{},
context,
structure_argument_was_provided);
}
storage->startup();
return storage;
}
void registerTableFunctionS3Cluster(TableFunctionFactory & factory)
{
factory.registerFunction<TableFunctionS3Cluster>();
}
}
#endif
|