# # Global variables are applied to all deployments and used as the default value of# # the deployments if a specific deployment value is missing.global:user:"tidb"ssh_port:22deploy_dir:"/ssd1/tidb-deploy"data_dir:"/ssd1/tidb-data"# # Monitored variables are applied to all the machines.# monitored:# node_exporter_port: 9100# blackbox_exporter_port: 9115server_configs:tidb:instance.tidb_slow_log_threshold:300tikv:readpool.storage.use-unified-pool:falsereadpool.coprocessor.use-unified-pool:truepd:replication.enable-placement-rules:truereplication.location-labels:["host"]# tiflash:# logger.level: "info"pd_servers:-host: 172.16.100.42
tidb_servers:-host: 172.16.100.42
tikv_servers:-host: 172.16.100.42
port:20160status_port:20180config:server.labels:{host:"tikv_kube42"}# tiflash_servers:# - host: 172.16.100.42# # monitoring_servers:# - host: 172.16.100.42# # grafana_servers:# - host: 172.16.100.42
# 执行集群部署命令:
tiup cluster deploy <cluster-name><version> ./topo.yaml --user root -p
# 参数 <cluster-name> 表示设置集群名称# 参数 <version> 表示设置集群版本,例如 v8.1.1。可以通过 tiup list tidb 命令来查看当前支持部署的 TiDB 版本# 参数 -p 表示在连接目标机器时使用密码登录
tiup cluster deploy test v4.0.14 /ssd1/tidb-deploy/topo.yaml --user tidb -p
# Cluster `test` deployed successfully, you can start it with command: `tiup cluster start test --init`
Started cluster `test` successfully
The root password of TiDB database has been changed.
The new password is: '84^j1TZ-+2b9A@S7Hs'.
Copy and record it to somewhere safe, it is only displayed once, and will not be stored.
The generated password can NOT be get and shown again.
# 启动集群:
tiup cluster start test# 访问集群:# 安装 MySQL 客户端。如果已安装 MySQL 客户端则可跳过这一步骤。
yum -y install mysql
# 访问 TiDB 数据库,密码为空:
mysql -h 10.0.1.1 -P 4000 -u root
# 访问 TiDB 的 Grafana 监控:# 通过 http://172.16.100.42:3000 访问集群 Grafana 监控页面,默认用户名和密码均为 admin。# 访问 TiDB 的 Dashboard:# 通过 http://172.16.100.42:2379/dashboard 访问集群 TiDB Dashboard 监控页面,默认用户名为 root,密码为空。cat .bashrc
PATH=$PATH:$HOME/.local/bin:$HOME/bin
exportPATHexportPATH=/home/tidb/.tiup/bin:$PATHsource<(tiup completion bash)# 集群自启
tiup cluster enabletest# 执行以下命令确认当前已经部署的集群列表:
tiup cluster list
# 执行以下命令查看集群的拓扑结构和状态:
tiup cluster display test# 安装br备份还原工具
tiup install br:v6.6.0
exportAWS_ACCESS_KEY_ID="xxxxxxxxxxxxxxxx"exportAWS_SECRET_ACCESS_KEY="xxxxxxxxxxxxxxxxxxxxxx"
tiup br restore table --db bikeca --table t_bike_ca_trip --pd "172.16.100.42:2379"\
--send-credentials-to-tikv=true \
--log-file restore.log \
--storage "s3://tidbbak/diiing-08-10-2024?access-key=${AWS_ACCESS_KEY_ID}&secret-access-key=${AWS_SECRET_ACCESS_KEY}"\
--s3.endpoint "http://10.96.136.0:7480"\
--ratelimit 64\
--check-requirements=false
ANALYZE TABLE t_bike_ca_trip;
简单调优
## 编辑配置文件
tiup cluster edit-config test## 在tidb模块添加配置
server_configs:
tidb:
instance.tidb_slow_log_threshold: 300## mem-quota-query: 10737418240 # 已弃用
performance.txn-total-size-limit: 10737418240## 滚动重启tidb
tiup cluster reload test -R tidb
SET GLOBAL tidb_mem_quota_query =10737418240;## 查看变量值
show GLOBAL variables like 'tidb_mem_quota_query';
show session variables like 'tidb_mem_quota_query';## 检查配置是否生效,连接tidb集群并执行命令检查确认是否生效:
show config where type='tidb' and name ='performance.txn-total-size-limit';
## 参数参考user: tidb
tidb_version: v4.0.15
last_ops_ver:|-
v1.2.3 tiup
Go Version: go1.13
Git Branch: release-1.2GitHash: df7e28a
topology:global:user: tidb
ssh_port:22ssh_type: builtin
deploy_dir: /data/tdb
data_dir: /data/tdb/data
os: linux
arch: amd64
monitored:node_exporter_port:9100blackbox_exporter_port:9115deploy_dir: /data/tdb/monitor-9100data_dir: /data/tdb/data/monitor-9100log_dir: /data/tdb/monitor-9100/log
server_configs:tidb:alter-primary-key:truebinlog.enable:truebinlog.ignore-error:falsebinlog.write-timeout: 15s
compatible-kill-query:falseenable-streaming:truehost: 0.0.0.0
lease: 45s
log.enable-timestamp:truelog.expensive-threshold:10000log.file.max-days:8log.format: text
log.level: info
log.query-log-max-len:4096log.slow-threshold:500lower-case-table-names:2oom-action: log
performance.committer-concurrency:256performance.stmt-count-limit:500000performance.tcp-keep-alive:trueperformance.txn-total-size-limit:104857600prepared-plan-cache.enabled:trueproxy-protocol.networks: 172.16.188.123
run-ddl:truesplit-table:truestore: tikv
tikv-client.grpc-connection-count:32tikv-client.max-batch-size:256token-limit:1500tikv:coprocessor.region-max-size: 384MB
coprocessor.region-split-size: 256MB
gRPC.grpc-concurrency:8log-level: info
raftdb.max-background-jobs:8raftstore.apply-max-batch-size:16384raftstore.apply-pool-size:8raftstore.hibernate-regions:trueraftstore.raft-max-inflight-msgs:20480raftstore.raft-max-size-per-msg: 2MB
raftstore.region-split-check-diff: 32MB
raftstore.store-max-batch-size:16384raftstore.store-pool-size:8raftstore.sync-log:falsereadpool.coprocessor.max-tasks-per-worker-normal:8000readpool.unified.max-thread-count:32rocksdb.bytes-per-sync: 512MB
rocksdb.compaction-readahead-size: 2MB
rocksdb.defaultcf.level0-slowdown-writes-trigger:32rocksdb.defaultcf.level0-stop-writes-trigger:64rocksdb.defaultcf.max-write-buffer-number:24rocksdb.defaultcf.write-buffer-size: 256MB
rocksdb.lockcf.level0-slowdown-writes-trigger:32rocksdb.lockcf.level0-stop-writes-trigger:64rocksdb.max-background-flushes:4rocksdb.max-background-jobs:8rocksdb.max-sub-compactions:4rocksdb.use-direct-io-for-flush-and-compaction:truerocksdb.wal-bytes-per-sync: 256MB
rocksdb.writecf.level0-slowdown-writes-trigger:32rocksdb.writecf.level0-stop-writes-trigger:64rocksdb.writecf.max-write-buffer-number:24rocksdb.writecf.write-buffer-size: 256MB
storage.block-cache.capacity: 8GB
storage.scheduler-concurrency:4096000storage.scheduler-worker-pool-size:8pd:auto-compaction-mod: periodic
auto-compaction-retention: 10m
quota-backend-bytes:17179869184tiflash:{}tiflash-learner:{}pump:{}drainer:{}cdc:{}tidb_servers:-host: 172.16.188.123
ssh_port:22port:4000status_port:10080deploy_dir: /data/tdb/tidb-4000arch: amd64
os: linux
-host: 172.16.188.143
ssh_port:22port:4000status_port:10080deploy_dir: /data/tdb/tidb-4000arch: amd64
os: linux
tikv_servers:-host: 172.16.188.140
ssh_port:22port:20160status_port:20180deploy_dir: /data/tdb/tikv-20160data_dir: /data/tdb/data/tikv-20160arch: amd64
os: linux
-host: 172.16.188.143
ssh_port:22port:20160status_port:20180deploy_dir: /data/tdb/tikv-20160data_dir: /data/tdb/data/tikv-20160arch: amd64
os: linux
-host: 172.16.188.113
ssh_port:22port:20160status_port:20180deploy_dir: /data/tdb/tikv-20160data_dir: /data/tdb/data/tikv-20160arch: amd64
os: linux
tiflash_servers:[]pd_servers:-host: 172.16.188.120
ssh_port:22name: pd-172.16.188.120-2379client_port:2379peer_port:2380deploy_dir: /data/tdb/pd-2379data_dir: /data/tdb/data/pd-2379arch: amd64
os: linux
-host: 172.16.188.118
ssh_port:22name: pd-172.16.188.118-2379client_port:2379peer_port:2380deploy_dir: /data/tdb/pd-2379data_dir: /data/tdb/data/pd-2379arch: amd64
os: linux
-host: 172.16.188.125
ssh_port:22name: pd-172.16.188.125-2379client_port:2379peer_port:2380deploy_dir: /data/tdb/pd-2379data_dir: /data/tdb/data/pd-2379arch: amd64
os: linux
pump_servers:-host: 172.16.188.143
ssh_port:22port:8250deploy_dir: /data/pump
data_dir: /data/pump/data
log_dir: /data/pump/log
config:gc:5arch: amd64
os: linux
cdc_servers:-host: 172.16.188.120
ssh_port:22port:8300deploy_dir: /data/tdb/cdc-8300arch: amd64
os: linux
monitoring_servers:-host: 172.16.188.113
ssh_port:22port:9090deploy_dir: /data/tdb/prometheus-9090data_dir: /data/tdb/data/prometheus-9090arch: amd64
os: linux
grafana_servers:-host: 172.16.188.123
ssh_port:22port:3000deploy_dir: /data/tdb/grafana-3000arch: amd64
os: linux
alertmanager_servers:-host: 172.16.188.123
ssh_port:22web_port:9093cluster_port:9094deploy_dir: /data/tdb/alertmanager-9093data_dir: /data/tdb/data/alertmanager-9093arch: amd64
os: linux