75 lines
3.0 KiB
XML
75 lines
3.0 KiB
XML
<?xml version="1.0"?>
|
|
<clickhouse>
|
|
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
|
|
Optional. If you don't use replicated tables, you could omit that.
|
|
|
|
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/
|
|
-->
|
|
<zookeeper>
|
|
<node index="1">
|
|
<host>zookeeper-1</host>
|
|
<port>2181</port>
|
|
</node>
|
|
<!-- <node index="2">
|
|
<host>zookeeper-2</host>
|
|
<port>2181</port>
|
|
</node>
|
|
<node index="3">
|
|
<host>zookeeper-3</host>
|
|
<port>2181</port>
|
|
</node> -->
|
|
</zookeeper>
|
|
|
|
<!-- Configuration of clusters that could be used in Distributed tables.
|
|
https://clickhouse.com/docs/en/operations/table_engines/distributed/
|
|
-->
|
|
<remote_servers>
|
|
<cluster>
|
|
<!-- Inter-server per-cluster secret for Distributed queries
|
|
default: no secret (no authentication will be performed)
|
|
|
|
If set, then Distributed queries will be validated on shards, so at least:
|
|
- such cluster should exist on the shard,
|
|
- such cluster should have the same secret.
|
|
|
|
And also (and which is more important), the initial_user will
|
|
be used as current user for the query.
|
|
|
|
Right now the protocol is pretty simple and it only takes into account:
|
|
- cluster name
|
|
- query
|
|
|
|
Also it will be nice if the following will be implemented:
|
|
- source hostname (see interserver_http_host), but then it will depends from DNS,
|
|
it can use IP address instead, but then the you need to get correct on the initiator node.
|
|
- target hostname / ip address (same notes as for source hostname)
|
|
- time-based security tokens
|
|
-->
|
|
<!-- <secret></secret> -->
|
|
<shard>
|
|
<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
|
|
<!-- <internal_replication>false</internal_replication> -->
|
|
<!-- Optional. Shard weight when writing data. Default: 1. -->
|
|
<!-- <weight>1</weight> -->
|
|
<replica>
|
|
<host>clickhouse</host>
|
|
<port>9000</port>
|
|
<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
|
|
<!-- <priority>1</priority> -->
|
|
</replica>
|
|
</shard>
|
|
<!-- <shard>
|
|
<replica>
|
|
<host>clickhouse-2</host>
|
|
<port>9000</port>
|
|
</replica>
|
|
</shard>
|
|
<shard>
|
|
<replica>
|
|
<host>clickhouse-3</host>
|
|
<port>9000</port>
|
|
</replica>
|
|
</shard> -->
|
|
</cluster>
|
|
</remote_servers>
|
|
</clickhouse> |