Docker 安装 Citus 单节点集群:全面指南与详细操作
Docker 安装 Citus 单节点集群:全面指南与详细操作
文章目录
- Docker 安装 Citus 单节点集群:全面指南与详细操作
- 一 服务器资源
- 二 部署图
- 三 安装部署
- 1 创建网络
- 2 运行脚本
- 1)docker-compose.cituscd1.yml
- 2)docker-compose.cituswk1.yml
- 3)docker-compose.cituswk2.yml
- 4)docker-compose.cituswk3.yml
- 四 配置访问
- 五 节点添加
- 六 示例创建表
本文详细介绍了如何使用 Docker 安装 Citus 单节点集群,并为其配置多个工作节点与协调器。通过具体的服务器资源表、部署示意图,以及 docker-compose
脚本,逐步指导读者如何创建 Docker 网络、配置 PostgreSQL 的 postgresql.conf
和 pg_hba.conf
,并完成节点的添加与管理。此外,本文还提供了完整的 SQL 命令示例,帮助用户检查节点健康状况、查看节点表信息、创建分布式表和分片管理。无论是初学者还是有经验的开发者,都可以通过本文轻松完成 Citus 集群的安装与配置。
一 服务器资源
域名解释 | 服务器IP | 端口 | 角色 | 备注 |
---|---|---|---|---|
pg-cd1 | 192.168.0.1 | 15434 | coordinator | |
pg-wk1 | 192.168.0.1 | 15432 | worker | |
pg-wk2 | 192.168.0.2 | 15432 | worker | |
pg-wk3 | 192.168.0.3 | 15432 | worker |
二 部署图
三 安装部署
1 创建网络
docker network create --driver bridge dbnet
2 运行脚本
1)docker-compose.cituscd1.yml
version: "3"
services:
pg-cd-1:
image: citusdata/citus:12.1.3
restart: always
container_name: pg-cd-1
environment:
- TZ=Asia/Shanghai
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=123456
- PGUSER=postgres
- PGPASSWORD=123456
working_dir: /postgresql
ports:
- "15434:5432"
networks:
- dbnet
extra_hosts:
- pg-cd1:192.168.0.1
- pg-wk1:192.168.0.1
- pg-wk2:192.168.0.2
- pg-wk3:192.168.0.3
volumes:
- ./postgresql/data:/var/lib/postgresql/data
- ./postgresql/healthcheck-volume:/healthcheck
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 30s
timeout: 10s
retries: 5
networks:
dbnet:
external: true
2)docker-compose.cituswk1.yml
version: "3"
services:
pg-wk-1:
image: citusdata/citus:12.1.3
restart: always
container_name: pg-wk-1
environment:
- TZ=Asia/Shanghai
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=123456
- PGUSER=postgres
- PGPASSWORD=123456
working_dir: /postgresql
networks:
- dbnet
extra_hosts:
- pg-cd1:192.168.0.1
- pg-wk1:192.168.0.1
- pg-wk2:192.168.0.2
- pg-wk3:192.168.0.3
ports:
- "15432:5432"
volumes:
- ./postgresql/data:/var/lib/postgresql/data
- ./postgresql/healthcheck-volume:/healthcheck
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 30s
timeout: 10s
retries: 5
networks:
dbnet:
external: true
3)docker-compose.cituswk2.yml
version: "3"
services:
pg-wk-2:
image: citusdata/citus:12.1.3
restart: always
container_name: pg-wk-2
environment:
- TZ=Asia/Shanghai
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=123456
- PGUSER=postgres
- PGPASSWORD=123456
working_dir: /postgresql
networks:
- dbnet
extra_hosts:
- pg-cd1:192.168.0.1
- pg-wk1:192.168.0.1
- pg-wk2:192.168.0.2
- pg-wk3:192.168.0.3
ports:
- "15432:5432"
volumes:
- ./postgresql/data:/var/lib/postgresql/data
- ./postgresql/healthcheck-volume:/healthcheck
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 30s
timeout: 10s
retries: 5
networks:
dbnet:
external: true
4)docker-compose.cituswk3.yml
version: "3"
services:
pg-wk-3:
image: citusdata/citus:12.1.3
restart: always
container_name: pg-wk-3
environment:
- TZ=Asia/Shanghai
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=123456
- PGUSER=postgres
- PGPASSWORD=123456
working_dir: /postgresql
networks:
- dbnet
extra_hosts:
- pg-cd1:192.168.0.1
- pg-wk1:192.168.0.1
- pg-wk2:192.168.0.2
- pg-wk3:192.168.0.3
ports:
- "15432:5432"
volumes:
- ./postgresql/data:/var/lib/postgresql/data
- ./postgresql/healthcheck-volume:/healthcheck
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 30s
timeout: 10s
retries: 5
networks:
dbnet:
external: true
四 配置访问
1 修改 postgresql.conf 配置。
# Uncomment listen_addresses for the changes to take effect
listen_addresses = '*'
2 配置postgres客户端访问,修改 pg_hba.conf 。
# 每个节点都得配置,重启的时候有互相通信。
host all all 192.168.0.1/32 trust
host all all 192.168.0.3/32 trust
host all all 192.168.0.2/32 trust
五 节点添加
# 查看扩展
select * from pg_available_extensions;
# 设置协调节点
SELECT citus_set_coordinator_host('pg-cd1', 15434);
# 设置工作节点
SELECT citus_add_node('pg-wk1', 15432);
SELECT citus_add_node('pg-wk2', 15432);
SELECT citus_add_node('pg-wk3', 15432);
# citus_get_active_worker_nodes() 函数返回活动工作线程主机名和端口号的列表。
SELECT * from citus_get_active_worker_nodes();
# 检查所有节点之间的连通性
SELECT * FROM citus_check_cluster_node_health();
# 集群中工作节点的信息
select * from pg_dist_node
# 查看集群表信息
select * from citus_tables;
# citus_total_relation_size 获取指定分布式表的所有分片使用的总磁盘空间
SELECT pg_size_pretty(citus_total_relation_size('devices'));
# 查看表节点分布
SELECT
dp.logicalrelid::regclass AS table_name,
n.nodename,
n.nodeport,
pg_size_pretty(pg_total_relation_size(p.placementid::regclass)) AS shard_size
FROM
pg_dist_partition dp
JOIN
pg_dist_shard s ON dp.logicalrelid = s.logicalrelid
JOIN
pg_dist_placement p ON s.shardid = p.shardid
JOIN
pg_dist_node n ON p.groupid = n.groupid
WHERE
n.noderole = 'primary'
ORDER BY
dp.logicalrelid,
n.nodename;
# 查看分片信息
select * from citus_shards
六 示例创建表
创建分布式表
CREATE TABLE events (
id bigserial NOT NULL,
device_id bigint,
event_id bigserial,
event_time timestamptz default now(),
data jsonb not null,
primary key (id)
);
CREATE INDEX idx_dev_eve ON events (device_id, event_id);
-- distribute the events table across shards placed locally or on the worker nodes
SELECT create_distributed_table('events', 'id', 'hash');
INSERT INTO events (device_id, data)
SELECT s % 100, ('{"measurement":'||random()||'}')::jsonb FROM generate_series(1,10000) s;
CREATE TABLE devices (
device_id bigint primary key,
device_name text,
device_type_id int
);
CREATE INDEX ON devices (device_type_id);
-- co-locate the devices table with the events table
SELECT create_distributed_table('devices', 'device_id', colocate_with := 'events');
-- insert device metadata
INSERT INTO devices (device_id, device_name, device_type_id)
SELECT s, 'device-'||s, 55 FROM generate_series(0, 99) s;