当前位置: 首页 > article >正文

tidb集群基于多副本容灾

https://www.bookstack.cn/read/tidb-8.1-zh/4c922925a950e576.md

数据库版本

  • tidb8.5.0

集群整体架构

IP地址系统版本备注区域
192.168.222.151redhat8.4tidb8.51
192.168.222.152redhat8.4tidb8.51
192.168.222.153redhat8.4tidb8.52
192.168.222.154redhat8.4tidb8.52
192.168.222.155redhat8.4tidb8.53

集群架构介绍

分三个区域,第一个区域 2 个节点,第二个区域 2 个节点,第三个区域 1 个节点
区域 1 作为 primary region,区域 2 作为 secondary region,而区域 3 则作为投票使用的第三个区域 
tikv 5 个副本,pd 5 个副本

设置 SSH 免密登录

# - 192.168.222.151 主机上执行集群部署,扩容,缩容等

# 设置密钥
ssh-keygen -t rsa -b 4096

# 分发公钥到所有目标节点
for i in {1..5};do echo $i;ssh-copy-id root@192.168.222.15$i ;done

tidb 插件安装

# 安装 TiUP:
curl --proto '=https' --tlsv1.2 -sSf https://tiup-mirrors.pingcap.com/install.sh | sh

# 设置环境变量:
source /root/.bash_profile

# 更新 TiUP:
tiup update --self
tiup update cluster

部署 TiDB 集群

  • 初始化配置文件:
tiup cluster template > topology.yaml

# 创建新的配置文件
[root@localhost tidb]# cat tidb-cluster.yaml 
global:
  user: "root"
  ssh_port: 22
  deploy_dir: "/data/tidb/tidbdeploy"
  data_dir: "/data/tidb//tidbdata"
server_configs:
  tikv:
    server.grpc-compression-type: gzip # gzip 启用 TiKV 之间的消息压缩,从而降低网络流量。
  pd:
    replication.location-labels:  ["Region","AZ"] # PD 会根据 TiKV 节点的 Region 和 AZ 配置来进行副本的调度。
pd_servers:
  - host: 192.168.222.151
    name: "pd-1"
  - host: 192.168.222.152
    name: "pd-2"
  - host: 192.168.222.153
    name: "pd-3"
  - host: 192.168.222.154
    name: "pd-4"
  - host: 192.168.222.155
    name: "pd-5"
tidb_servers:
  - host: 192.168.222.151
  - host: 192.168.222.153
tikv_servers:  # 在 TiKV 节点中通过 labels 选项来对每个 TiKV 节点所在的 Region 和 AZ 进行标记
  - host: 192.168.222.151
    config:
      server.labels: { Region: "Region1", AZ: "AZ1" }
  - host: 192.168.222.152
    config:
      server.labels: { Region: "Region1", AZ: "AZ2" }
  - host: 192.168.222.153
    config:
      server.labels: { Region: "Region2", AZ: "AZ3" }
  - host: 192.168.222.154
    config:
      server.labels: { Region: "Region2", AZ: "AZ4" }
  - host: 192.168.222.155
    config:
      server.labels: { Region: "Region3", AZ: "AZ5" }
      # 延长区域 3 参加选举的时间,从而避免该区域中的副本被选举为主节点
      raftstore.raft-min-election-timeout-ticks: 50
      raftstore.raft-max-election-timeout-ticks: 60
monitoring_servers:
  - host: 192.168.222.152
grafana_servers:
  - host: 192.168.222.152
alertmanager_servers:
  - host: 192.168.222.152

部署集群:

# 查看tidb版本
[root@localhost tidb]# tiup list tidb

# 部署
tiup cluster deploy tidb-cluster v8.5.0 tidb-cluster.yaml -u root

#  卸载集群
# tiup cluster destroy tidb-cluster

# 启动集群:
tiup cluster start tidb-cluster --init
# tiup cluster start tidb-cluster

# 更改root密码
/home/data/mysql-8.0.30-el7-x86_64/bin/mysql -h 192.168.222.151 -P 4000 -u root -p'-1*+azn_47bfGm582Z'
ALTER USER 'root'@'%' IDENTIFIED BY '123456';

# 集群版本查看
tiup cluster list

# 查看集群状态:
tiup cluster display tidb-cluster

[root@localhost tidb]# tiup cluster display tidb-cluster
Cluster type:       tidb
Cluster name:       tidb-cluster
Cluster version:    v8.5.0
Deploy user:        root
SSH type:           builtin
Dashboard URL:      http://192.168.222.154:2379/dashboard
Grafana URL:        http://192.168.222.152:3000
ID                     Role          Host             Ports        OS/Arch       Status  Data Dir                               Deploy Dir
--                     ----          ----             -----        -------       ------  --------                               ----------
192.168.222.152:9093   alertmanager  192.168.222.152  9093/9094    linux/x86_64  Up      /data/tidb/tidbdata/alertmanager-9093  /data/tidb/tidbdeploy/alertmanager-9093
192.168.222.152:3000   grafana       192.168.222.152  3000         linux/x86_64  Up      -                                      /data/tidb/tidbdeploy/grafana-3000
192.168.222.151:2379   pd            192.168.222.151  2379/2380    linux/x86_64  Up      /data/tidb/tidbdata/pd-2379            /data/tidb/tidbdeploy/pd-2379
192.168.222.152:2379   pd            192.168.222.152  2379/2380    linux/x86_64  Up      /data/tidb/tidbdata/pd-2379            /data/tidb/tidbdeploy/pd-2379
192.168.222.153:2379   pd            192.168.222.153  2379/2380    linux/x86_64  Up      /data/tidb/tidbdata/pd-2379            /data/tidb/tidbdeploy/pd-2379
192.168.222.154:2379   pd            192.168.222.154  2379/2380    linux/x86_64  Up|UI   /data/tidb/tidbdata/pd-2379            /data/tidb/tidbdeploy/pd-2379
192.168.222.155:2379   pd            192.168.222.155  2379/2380    linux/x86_64  Up|L    /data/tidb/tidbdata/pd-2379            /data/tidb/tidbdeploy/pd-2379
192.168.222.152:9090   prometheus    192.168.222.152  9090/12020   linux/x86_64  Up      /data/tidb/tidbdata/prometheus-9090    /data/tidb/tidbdeploy/prometheus-9090
192.168.222.151:4000   tidb          192.168.222.151  4000/10080   linux/x86_64  Up      -                                      /data/tidb/tidbdeploy/tidb-4000
192.168.222.153:4000   tidb          192.168.222.153  4000/10080   linux/x86_64  Up      -                                      /data/tidb/tidbdeploy/tidb-4000
192.168.222.151:20160  tikv          192.168.222.151  20160/20180  linux/x86_64  Up      /data/tidb/tidbdata/tikv-20160         /data/tidb/tidbdeploy/tikv-20160
192.168.222.152:20160  tikv          192.168.222.152  20160/20180  linux/x86_64  Up      /data/tidb/tidbdata/tikv-20160         /data/tidb/tidbdeploy/tikv-20160
192.168.222.153:20160  tikv          192.168.222.153  20160/20180  linux/x86_64  Up      /data/tidb/tidbdata/tikv-20160         /data/tidb/tidbdeploy/tikv-20160
192.168.222.154:20160  tikv          192.168.222.154  20160/20180  linux/x86_64  Up      /data/tidb/tidbdata/tikv-20160         /data/tidb/tidbdeploy/tikv-20160
192.168.222.155:20160  tikv          192.168.222.155  20160/20180  linux/x86_64  Up      /data/tidb/tidbdata/tikv-20160         /data/tidb/tidbdeploy/tikv-20160
Total nodes: 15

集群的副本数和 Leader 限制进行配置

tiup ctl:v8.5.0 pd config set max-replicas 5
tiup ctl:v8.5.0 pd config set label-property reject-leader Region Region3
# 下面的步骤用于向集群中添加一些测试数据,可选
tiup bench tpcc  prepare -H 192.168.222.151 -P 4000 -p 123456 -D tpcc --warehouses 1

指定 PD leader 的优先级

# 优先级数值越大的节点成为 leader 的可能性越高
tiup ctl:v8.5.0 pd member leader_priority  pd-1 4
tiup ctl:v8.5.0 pd member leader_priority  pd-2 3
tiup ctl:v8.5.0 pd member leader_priority  pd-3 2
tiup ctl:v8.5.0 pd member leader_priority  pd-4 1
tiup ctl:v8.5.0 pd member leader_priority  pd-5 0

# 获取成员信息
tiup ctl:v8.5.0 pd member
# PD 实例健康状态
tiup ctl:v8.5.0 pd health
# PD 存储情况
tiup ctl:v8.5.0 pd store

创建 placement rule,并将测试表的主副本固定在区域 1

-- 创建两个 placement rules,第一个是区域 1 作为主区域,在系统正常时使用,第二个是区域 2 作为备区域。
-- 作为主区域,当区域 1 出现问题时,区域 2 会作为主区域。
CREATE PLACEMENT POLICY primary_rule_for_region1 PRIMARY_REGION="Region1" REGIONS="Region1, Region2,Region3";
CREATE PLACEMENT POLICY secondary_rule_for_region2 PRIMARY_REGION="Region2" REGIONS="Region1,Region2,Region3";
-- 将刚刚创建的规则 primary_rule_for_region1 应用到对应的用户表上。
ALTER TABLE tpcc.warehouse PLACEMENT POLICY=primary_rule_for_region1;
ALTER TABLE tpcc.district PLACEMENT POLICY=primary_rule_for_region1;
-- 说明:请根据需要修改上面的数据库名称、表名和 placement rule 的名称。
-- 使用类似下面的查询,用户可以查看每个区域包含的 leader 数量,以确认 leader 迁移是否完成。
SELECT STORE_ID, address, leader_count, label FROM INFORMATION_SCHEMA.TIKV_STORE_STATUS ORDER BY store_id;

+----------+-----------------------+--------------+------------------------------------------------------------------------+
| STORE_ID | address               | leader_count | label                                                                  |
+----------+-----------------------+--------------+------------------------------------------------------------------------+
|        1 | 192.168.222.153:20160 |            0 | [{"key": "AZ", "value": "AZ3"}, {"key": "Region", "value": "Region2"}] |
|        4 | 192.168.222.154:20160 |            6 | [{"key": "AZ", "value": "AZ4"}, {"key": "Region", "value": "Region2"}] |
|        7 | 192.168.222.152:20160 |            4 | [{"key": "AZ", "value": "AZ2"}, {"key": "Region", "value": "Region1"}] |
|        8 | 192.168.222.155:20160 |            1 | [{"key": "AZ", "value": "AZ5"}, {"key": "Region", "value": "Region3"}] |
|        9 | 192.168.222.151:20160 |            1 | [{"key": "AZ", "value": "AZ1"}, {"key": "Region", "value": "Region1"}] |
+----------+-----------------------+--------------+------------------------------------------------------------------------+

产生一个sql脚本,将非系统schema中的表的leader都设置到特定的区域上

SET @region_name=primary_rule_for_region1;
SELECT concat('ALTER TABLE ', table_schema, '.', table_name, ' PLACEMENT POLICY=', @region_name, ';') FROM information_schema.tables WHERE table_schema NOT IN ('METRICS_SCHEMA', 'PERFORMANCE_SCHEMA', 'INFORMATION_SCHEMA','mysql');

+------------------------------------------------------------------------------------------------+
| concat('ALTER TABLE ', table_schema, '.', table_name, ' PLACEMENT POLICY=', @region_name, ';') |
+------------------------------------------------------------------------------------------------+
| ALTER TABLE tpcc.warehouse PLACEMENT POLICY=primary_rule_for_region1;                          |
| ALTER TABLE tpcc.stock PLACEMENT POLICY=primary_rule_for_region1;                              |
| ALTER TABLE tpcc.orders PLACEMENT POLICY=primary_rule_for_region1;                             |
| ALTER TABLE tpcc.order_line PLACEMENT POLICY=primary_rule_for_region1;                         |
| ALTER TABLE tpcc.new_order PLACEMENT POLICY=primary_rule_for_region1;                          |
| ALTER TABLE tpcc.item PLACEMENT POLICY=primary_rule_for_region1;                               |
| ALTER TABLE tpcc.history PLACEMENT POLICY=primary_rule_for_region1;                            |
| ALTER TABLE tpcc.district PLACEMENT POLICY=primary_rule_for_region1;                           |
| ALTER TABLE tpcc.customer PLACEMENT POLICY=primary_rule_for_region1;                           |
| ALTER TABLE sys.schema_unused_indexes PLACEMENT POLICY=primary_rule_for_region1;               |
+------------------------------------------------------------------------------------------------+

容灾切换

计划内切换

执行如下命令,将所有用户表和 PD Leader 都切换到区域 2

-- 将之前创建的规则 secondary_rule_for_region2 应用到对应的用户表上。
ALTER TABLE tpcc.warehouse PLACEMENT POLICY=secondary_rule_for_region2;
ALTER TABLE tpcc.district PLACEMENT POLICY=secondary_rule_for_region2;

执行如下命令,调低区域 1 的 PD 节点的优先级,并调高区域 2 的 PD 节点的优先级。

tiup ctl:v8.5.0 pd member leader_priority pd-1 2
tiup ctl:v8.5.0 pd member leader_priority pd-2 1
tiup ctl:v8.5.0 pd member leader_priority pd-3 4
tiup ctl:v8.5.0 pd member leader_priority pd-4 3

SELECT STORE_ID, address, leader_count, label FROM INFORMATION_SCHEMA.TIKV_STORE_STATUS ORDER BY store_id;
+----------+-----------------------+--------------+------------------------------------------------------------------------+
| STORE_ID | address               | leader_count | label                                                                  |
+----------+-----------------------+--------------+------------------------------------------------------------------------+
|        1 | 192.168.222.153:20160 |            1 | [{"key": "AZ", "value": "AZ3"}, {"key": "Region", "value": "Region2"}] |
|        4 | 192.168.222.154:20160 |           10 | [{"key": "AZ", "value": "AZ4"}, {"key": "Region", "value": "Region2"}] |
|        7 | 192.168.222.152:20160 |            9 | [{"key": "AZ", "value": "AZ2"}, {"key": "Region", "value": "Region1"}] |
|        8 | 192.168.222.155:20160 |            1 | [{"key": "AZ", "value": "AZ5"}, {"key": "Region", "value": "Region3"}] |
|        9 | 192.168.222.151:20160 |            3 | [{"key": "AZ", "value": "AZ1"}, {"key": "Region", "value": "Region1"}] |
+----------+-----------------------+--------------+------------------------------------------------------------------------+

SET @region_name=secondary_rule_for_region2;
SELECT concat('ALTER TABLE ', table_schema, '.', table_name, ' PLACEMENT POLICY=', @region_name, ';') FROM information_schema.tables WHERE table_schema NOT IN ('METRICS_SCHEMA', 'PERFORMANCE_SCHEMA', 'INFORMATION_SCHEMA','mysql');
+------------------------------------------------------------------------------------------------+
| concat('ALTER TABLE ', table_schema, '.', table_name, ' PLACEMENT POLICY=', @region_name, ';') |
+------------------------------------------------------------------------------------------------+
| ALTER TABLE tpcc.warehouse PLACEMENT POLICY=secondary_rule_for_region2;                        |
| ALTER TABLE tpcc.stock PLACEMENT POLICY=secondary_rule_for_region2;                            |
| ALTER TABLE tpcc.orders PLACEMENT POLICY=secondary_rule_for_region2;                           |
| ALTER TABLE tpcc.order_line PLACEMENT POLICY=secondary_rule_for_region2;                       |
| ALTER TABLE tpcc.new_order PLACEMENT POLICY=secondary_rule_for_region2;                        |
| ALTER TABLE tpcc.item PLACEMENT POLICY=secondary_rule_for_region2;                             |
| ALTER TABLE tpcc.history PLACEMENT POLICY=secondary_rule_for_region2;                          |
| ALTER TABLE tpcc.district PLACEMENT POLICY=secondary_rule_for_region2;                         |
| ALTER TABLE tpcc.customer PLACEMENT POLICY=secondary_rule_for_region2;                         |
| ALTER TABLE sys.schema_unused_indexes PLACEMENT POLICY=secondary_rule_for_region2;             |
+------------------------------------------------------------------------------------------------+

计划外切换

数据记录及验证

mysql> use tpcc
mysql> show tables;
+----------------+
| Tables_in_tpcc |
+----------------+
| customer       |
| district       |
| history        |
| item           |
| new_order      |
| order_line     |
| orders         |
| stock          |
| warehouse      |
+----------------+

mysql> select count(*) from   customer;
+----------+
| count(*) |
+----------+
|    30000 |
+----------+

mysql> select count(*) from   district;
+----------+
| count(*) |
+----------+
|       10 |
+----------+

mysql> select count(*) from   history;
+----------+
| count(*) |
+----------+
|    30000 |
+----------+

mysql> select count(*) from   item;   
+----------+
| count(*) |
+----------+
|   100000 |
+----------+

mysql> select count(*) from   new_order;
+----------+
| count(*) |
+----------+
|     9000 |
+----------+

mysql> select count(*) from   order_line;
+----------+
| count(*) |
+----------+
|   300649 |
+----------+

mysql> select count(*) from   orders;  
+----------+
| count(*) |
+----------+
|    30000 |
+----------+

mysql> select count(*) from   stock;
+----------+
| count(*) |
+----------+
|   100000 |
+----------+

mysql> select count(*) from   warehouse;
+----------+
| count(*) |
+----------+
|        1 |
+----------+

类似下面的命令终止区域 1 上所有的 TiKV、TiDB 和 PD 节点

tiup cluster stop tidb-cluster -N 192.168.222.151:20160,192.168.222.152:20160,192.168.222.151:2379,192.168.222.152:2379

[root@localhost tidb]# sh -x status-tidb.sh 
+ /root/.tiup/bin/tiup cluster display tidb-cluster
Cluster type:       tidb
Cluster name:       tidb-cluster
Cluster version:    v8.5.0
Deploy user:        root
SSH type:           builtin
Dashboard URL:      http://192.168.222.154:2379/dashboard
Grafana URL:        http://192.168.222.152:3000
ID                     Role          Host             Ports        OS/Arch       Status        Data Dir                               Deploy Dir
--                     ----          ----             -----        -------       ------        --------                               ----------
192.168.222.152:9093   alertmanager  192.168.222.152  9093/9094    linux/x86_64  Up            /data/tidb/tidbdata/alertmanager-9093  /data/tidb/tidbdeploy/alertmanager-9093
192.168.222.152:3000   grafana       192.168.222.152  3000         linux/x86_64  Up            -                                      /data/tidb/tidbdeploy/grafana-3000
192.168.222.151:2379   pd            192.168.222.151  2379/2380    linux/x86_64  Down          /data/tidb/tidbdata/pd-2379            /data/tidb/tidbdeploy/pd-2379
192.168.222.152:2379   pd            192.168.222.152  2379/2380    linux/x86_64  Down          /data/tidb/tidbdata/pd-2379            /data/tidb/tidbdeploy/pd-2379
192.168.222.153:2379   pd            192.168.222.153  2379/2380    linux/x86_64  Up|L          /data/tidb/tidbdata/pd-2379            /data/tidb/tidbdeploy/pd-2379
192.168.222.154:2379   pd            192.168.222.154  2379/2380    linux/x86_64  Up|UI         /data/tidb/tidbdata/pd-2379            /data/tidb/tidbdeploy/pd-2379
192.168.222.155:2379   pd            192.168.222.155  2379/2380    linux/x86_64  Up            /data/tidb/tidbdata/pd-2379            /data/tidb/tidbdeploy/pd-2379
192.168.222.152:9090   prometheus    192.168.222.152  9090/12020   linux/x86_64  Up            /data/tidb/tidbdata/prometheus-9090    /data/tidb/tidbdeploy/prometheus-9090
192.168.222.151:4000   tidb          192.168.222.151  4000/10080   linux/x86_64  Up            -                                      /data/tidb/tidbdeploy/tidb-4000
192.168.222.153:4000   tidb          192.168.222.153  4000/10080   linux/x86_64  Up            -                                      /data/tidb/tidbdeploy/tidb-4000
192.168.222.151:20160  tikv          192.168.222.151  20160/20180  linux/x86_64  Disconnected  /data/tidb/tidbdata/tikv-20160         /data/tidb/tidbdeploy/tikv-20160
192.168.222.152:20160  tikv          192.168.222.152  20160/20180  linux/x86_64  Disconnected  /data/tidb/tidbdata/tikv-20160         /data/tidb/tidbdeploy/tikv-20160
192.168.222.153:20160  tikv          192.168.222.153  20160/20180  linux/x86_64  Up            /data/tidb/tidbdata/tikv-20160         /data/tidb/tidbdeploy/tikv-20160
192.168.222.154:20160  tikv          192.168.222.154  20160/20180  linux/x86_64  Up            /data/tidb/tidbdata/tikv-20160         /data/tidb/tidbdeploy/tikv-20160
192.168.222.155:20160  tikv          192.168.222.155  20160/20180  linux/x86_64  Up            /data/tidb/tidbdata/tikv-20160         /data/tidb/tidbdeploy/tikv-20160

运行类似于下面的命令切换用户表的 leader 到区域 2

-- 将之前创建的规则 secondary_rule_for_region2 应用到对应的用户表上。
ALTER TABLE tpcc.warehouse PLACEMENT POLICY=secondary_rule_for_region2;
ALTER TABLE tpcc.district PLACEMENT POLICY=secondary_rule_for_region2;

---可以使用类似下面的查询查看每个区域包含的 leader 数量,以确认 leader 迁移是否完成。
SELECT STORE_ID, address, leader_count, label FROM INFORMATION_SCHEMA.TIKV_STORE_STATUS ORDER BY store_id;
+----------+-----------------------+--------------+------------------------------------------------------------------------+
| STORE_ID | address               | leader_count | label                                                                  |
+----------+-----------------------+--------------+------------------------------------------------------------------------+
|        1 | 192.168.222.153:20160 |            7 | [{"key": "AZ", "value": "AZ3"}, {"key": "Region", "value": "Region2"}] |
|        4 | 192.168.222.154:20160 |           16 | [{"key": "AZ", "value": "AZ4"}, {"key": "Region", "value": "Region2"}] |
|        7 | 192.168.222.152:20160 |            0 | [{"key": "AZ", "value": "AZ2"}, {"key": "Region", "value": "Region1"}] |
|        8 | 192.168.222.155:20160 |            1 | [{"key": "AZ", "value": "AZ5"}, {"key": "Region", "value": "Region3"}] |
|        9 | 192.168.222.151:20160 |            0 | [{"key": "AZ", "value": "AZ1"}, {"key": "Region", "value": "Region1"}] |
+----------+-----------------------+--------------+------------------------------------------------------------------------+

SET @region_name=secondary_rule_for_region2;
SELECT concat('ALTER TABLE ', table_schema, '.', table_name, ' PLACEMENT POLICY=', @region_name, ';') FROM information_schema.tables WHERE table_schema NOT IN ('METRICS_SCHEMA', 'PERFORMANCE_SCHEMA', 'INFORMATION_SCHEMA','mysql');
+------------------------------------------------------------------------------------------------+
| concat('ALTER TABLE ', table_schema, '.', table_name, ' PLACEMENT POLICY=', @region_name, ';') |
+------------------------------------------------------------------------------------------------+
| ALTER TABLE tpcc.warehouse PLACEMENT POLICY=secondary_rule_for_region2;                        |
| ALTER TABLE tpcc.stock PLACEMENT POLICY=secondary_rule_for_region2;                            |
| ALTER TABLE tpcc.orders PLACEMENT POLICY=secondary_rule_for_region2;                           |
| ALTER TABLE tpcc.order_line PLACEMENT POLICY=secondary_rule_for_region2;                       |
| ALTER TABLE tpcc.new_order PLACEMENT POLICY=secondary_rule_for_region2;                        |
| ALTER TABLE tpcc.item PLACEMENT POLICY=secondary_rule_for_region2;                             |
| ALTER TABLE tpcc.history PLACEMENT POLICY=secondary_rule_for_region2;                          |
| ALTER TABLE tpcc.district PLACEMENT POLICY=secondary_rule_for_region2;                         |
| ALTER TABLE tpcc.customer PLACEMENT POLICY=secondary_rule_for_region2;                         |
| ALTER TABLE sys.schema_unused_indexes PLACEMENT POLICY=secondary_rule_for_region2;             |
+------------------------------------------------------------------------------------------------+

数据记录及验证

mysql> use tpcc
mysql> show tables;
+----------------+
| Tables_in_tpcc |
+----------------+
| customer       |
| district       |
| history        |
| item           |
| new_order      |
| order_line     |
| orders         |
| stock          |
| warehouse      |
+----------------+

mysql> select count(*) from   customer;
+----------+
| count(*) |
+----------+
|    30000 |
+----------+

mysql> select count(*) from   district;
+----------+
| count(*) |
+----------+
|       10 |
+----------+

mysql> select count(*) from   history;
+----------+
| count(*) |
+----------+
|    30000 |
+----------+

mysql> select count(*) from   item;
+----------+
| count(*) |
+----------+
|   100000 |
+----------+

mysql> select count(*) from   new_order;
+----------+
| count(*) |
+----------+
|     9000 |
+----------+

mysql> select count(*) from   order_line;
+----------+
| count(*) |
+----------+
|   300649 |
+----------+

mysql> select count(*) from   orders;
+----------+
| count(*) |
+----------+
|    30000 |
+----------+

mysql> select count(*) from   stock;
+----------+
| count(*) |
+----------+
|   100000 |
+----------+

mysql> select count(*) from   warehouse;
+----------+
| count(*) |
+----------+
|        1 |
+----------+

mysql> select * from stock limit 2\G;
*************************** 1. row ***************************
      s_i_id: 1
      s_w_id: 1
  s_quantity: 19
   s_dist_01: UKIQNGEPUWTSYGZMCDQKKYJP
   s_dist_02: CHIXATXOZRCSWBOBQRQWSHRO
   s_dist_03: YBQZDPJBBXPQBXMOSZDDEBQB
   s_dist_04: CDDAJXSODFJHEAYKZVJPJEAB
   s_dist_05: VWWSQAJKPWFAGXOJJKBBGYOQ
   s_dist_06: IYBYBREIKWGBUGCDPPWFRTNJ
   s_dist_07: CYKQNODRFMOSQSMMLUFGZUFT
   s_dist_08: NJHTWYFHNSBCCEOYCXKMWNKP
   s_dist_09: YCLAGDLLJQOMDHCITZFQLEIV
   s_dist_10: HLCVXIONMBDXLQYVGWBRBYJP
       s_ytd: 0
 s_order_cnt: 0
s_remote_cnt: 0
      s_data: Gj6oRKDsiPuGNGaJk7XHr7Ts06JPfRYj5I32yFotmrNCY
*************************** 2. row ***************************
      s_i_id: 2
      s_w_id: 1
  s_quantity: 87
   s_dist_01: ITEWHXFVJGIPGNTQKWFLRQGL
   s_dist_02: JYEOCWWFSJTISVPDKJTBEWRS
   s_dist_03: ZFFUPBNESPMUJZYQSGCMWORJ
   s_dist_04: CGNQASRMKBVBECHTUUEEIRWF
   s_dist_05: TJIHTLDJTFNSQPNUJTCUOEQO
   s_dist_06: UFVRQMXCQSZFZROZLQYBASDS
   s_dist_07: RBYXTILQKJCJOCNLALQSATYB
   s_dist_08: FQKTRFNOTPPEBLKLSMLAJPRZ
   s_dist_09: MEOWFUZGTWVZPJTXWZOHFDIB
   s_dist_10: XHVAROWLRCTSLIFGGBANLUHV
       s_ytd: 0
 s_order_cnt: 0
s_remote_cnt: 0
      s_data: rqWUJSVNqGMSVWB0u5qn9zNgXyOn8nXQI7zw3KT
2 rows in set (0.01 sec)

http://www.kler.cn/a/565947.html

相关文章:

  • DS32编译优化问题【deepseek的功劳】
  • Spring MVC 的执行流程
  • Python基于Django和人脸识别的在线票务系统设计与实现
  • 【Docker】Linux部署web版Firefox
  • 【AIGC系列】4:Stable Diffusion应用实践和代码分析
  • DeepSeek开源周Day4:三连发!突破 AI 训练瓶颈的立体解决方案,并行计算三剑客DualPipe、EPLB与Profile-data
  • 【Java学习】内部类
  • python-leetcode-第 N 个泰波那契数
  • Debian系统关闭休眠模式
  • 2025年2月28日全球科技信息差:技术革新、市场震荡与认知重构
  • 硬件交互之蓝牙耳机交互操作
  • 【算法方法总结·一】二分法的一些技巧和注意事项
  • Python--内置模块和开发规范(上)
  • 深度学习-11.用于自然语言处理的循环神经网络
  • CES Asia 2025聚焦量子计算,多领域进展引关注
  • Pycharm中怎么加快下载三方包速度
  • 如何在一台服务器上搭建 mongodb副本集1主2从节点
  • [选修课]
  • SPM - Coregistered整理
  • 优云智算:借助强大镜像社区,开启AI算力新纪元!