TiDB Tiup 本地部署

Tiup 本地部署

# 下载并安装 TiUP:
curl --proto '=https' --tlsv1.2 -sSf https://tiup-mirrors.pingcap.com/install.sh | sh

# 声明全局环境变量:
# 注意 TiUP 安装完成后会提示对应 Shell profile 文件的绝对路径。在执行以下 source 命令前,需要将 ${your_shell_profile} 修改为 Shell profile 文件的实际位置。
source ${your_shell_profile}

source /root/.bash_profile

# 安装 TiUP 的 cluster 组件:
tiup cluster

# 如果机器已经安装 TiUP cluster,需要更新软件版本:
tiup update --self && tiup update cluster

# 由于模拟多机部署,需要通过 root 用户调大 sshd 服务的连接数限制:
# 修改 /etc/ssh/sshd_config 将 MaxSessions 调至 20。
# 重启 sshd 服务:
service sshd restart

# 创建并启动集群
# 按下面的配置模板,编辑配置文件,命名为 topo.yaml,其中:
# user: "tidb":表示通过 tidb 系统用户(部署会自动创建)来做集群的内部管理,默认使用 22 端口通过 ssh 登录目标机器
passwd tidb

chown tidb.tidb -R /ssd1/tidb-data
chown tidb.tidb -R /ssd1/tidb-deploy

replication.enable-placement-rules:设置这个 PD 参数来确保 TiFlash 正常运行
host:设置为本部署主机的 IP
配置模板如下:
cat /ssd1/tidb-deploy/topo.yaml
# # Global variables are applied to all deployments and used as the default value of
# # the deployments if a specific deployment value is missing.
global:
 user: "tidb"
 ssh_port: 22
 deploy_dir: "/ssd1/tidb-deploy"
 data_dir: "/ssd1/tidb-data"

# # Monitored variables are applied to all the machines.
# monitored:
#  node_exporter_port: 9100
#  blackbox_exporter_port: 9115

server_configs:
 tidb:
   instance.tidb_slow_log_threshold: 300
 tikv:
   readpool.storage.use-unified-pool: false
   readpool.coprocessor.use-unified-pool: true
 pd:
   replication.enable-placement-rules: true
   replication.location-labels: ["host"]
# tiflash:
#   logger.level: "info"

pd_servers:
 - host: 172.16.100.42

tidb_servers:
 - host: 172.16.100.42

tikv_servers:
 - host: 172.16.100.42
   port: 20160
   status_port: 20180
   config:
     server.labels: { host: "tikv_kube42" }

# tiflash_servers:
#  - host: 172.16.100.42
# 
# monitoring_servers:
#  - host: 172.16.100.42
# 
# grafana_servers:
#  - host: 172.16.100.42
# 执行集群部署命令:
tiup cluster deploy <cluster-name> <version> ./topo.yaml --user root -p
# 参数 <cluster-name> 表示设置集群名称
# 参数 <version> 表示设置集群版本,例如 v8.1.1。可以通过 tiup list tidb 命令来查看当前支持部署的 TiDB 版本
# 参数 -p 表示在连接目标机器时使用密码登录
tiup cluster deploy test v4.0.14 /ssd1/tidb-deploy/topo.yaml --user tidb -p

# Cluster `test` deployed successfully, you can start it with command: `tiup cluster start test --init`

Started cluster `test` successfully
The root password of TiDB database has been changed.
The new password is: '84^j1TZ-+2b9A@S7Hs'.
Copy and record it to somewhere safe, it is only displayed once, and will not be stored.
The generated password can NOT be get and shown again.

# 启动集群:
tiup cluster start test

# 访问集群:
# 安装 MySQL 客户端。如果已安装 MySQL 客户端则可跳过这一步骤。
yum -y install mysql

# 访问 TiDB 数据库,密码为空:
mysql -h 10.0.1.1 -P 4000 -u root

# 访问 TiDB 的 Grafana 监控:
# 通过 http://172.16.100.42:3000 访问集群 Grafana 监控页面,默认用户名和密码均为 admin。
# 访问 TiDB 的 Dashboard:
# 通过 http://172.16.100.42:2379/dashboard 访问集群 TiDB Dashboard 监控页面,默认用户名为 root,密码为空。

cat .bashrc
PATH=$PATH:$HOME/.local/bin:$HOME/bin
export PATH
export PATH=/home/tidb/.tiup/bin:$PATH
source <(tiup completion bash)

# 集群自启
tiup cluster enable test

# 执行以下命令确认当前已经部署的集群列表:
tiup cluster list

# 执行以下命令查看集群的拓扑结构和状态:
tiup cluster display test

# 安装br备份还原工具
tiup install br:v6.6.0

export AWS_ACCESS_KEY_ID="xxxxxxxxxxxxxxxx"
export AWS_SECRET_ACCESS_KEY="xxxxxxxxxxxxxxxxxxxxxx"
tiup br restore table --db bikeca --table t_bike_ca_trip --pd "172.16.100.42:2379" \
  --send-credentials-to-tikv=true \
  --log-file restore.log \
  --storage "s3://tidbbak/diiing-08-10-2024?access-key=${AWS_ACCESS_KEY_ID}&secret-access-key=${AWS_SECRET_ACCESS_KEY}" \
  --s3.endpoint "http://10.96.136.0:7480" \
  --ratelimit 64 \
  --check-requirements=false

ANALYZE TABLE t_bike_ca_trip;

简单调优

## 编辑配置文件
tiup cluster edit-config test

## 在tidb模块添加配置
server_configs:
  tidb:
    instance.tidb_slow_log_threshold: 300
    ## mem-quota-query: 10737418240  # 已弃用
    performance.txn-total-size-limit: 10737418240

## 滚动重启tidb
tiup cluster reload test -R tidb

SET GLOBAL tidb_mem_quota_query = 10737418240;

## 查看变量值
show GLOBAL variables like 'tidb_mem_quota_query';
show session variables like 'tidb_mem_quota_query';

## 检查配置是否生效,连接tidb集群并执行命令检查确认是否生效:
show config where type= 'tidb' and name = 'performance.txn-total-size-limit';
## 参数参考
user: tidb
tidb_version: v4.0.15
last_ops_ver: |-
  v1.2.3 tiup
  Go Version: go1.13
  Git Branch: release-1.2
  GitHash: df7e28a
topology:
  global:
    user: tidb
    ssh_port: 22
    ssh_type: builtin
    deploy_dir: /data/tdb
    data_dir: /data/tdb/data
    os: linux
    arch: amd64
  monitored:
    node_exporter_port: 9100
    blackbox_exporter_port: 9115
    deploy_dir: /data/tdb/monitor-9100
    data_dir: /data/tdb/data/monitor-9100
    log_dir: /data/tdb/monitor-9100/log
  server_configs:
    tidb:
      alter-primary-key: true
      binlog.enable: true
      binlog.ignore-error: false
      binlog.write-timeout: 15s
      compatible-kill-query: false
      enable-streaming: true
      host: 0.0.0.0
      lease: 45s
      log.enable-timestamp: true
      log.expensive-threshold: 10000
      log.file.max-days: 8
      log.format: text
      log.level: info
      log.query-log-max-len: 4096
      log.slow-threshold: 500
      lower-case-table-names: 2
      oom-action: log
      performance.committer-concurrency: 256
      performance.stmt-count-limit: 500000
      performance.tcp-keep-alive: true
      performance.txn-total-size-limit: 104857600
      prepared-plan-cache.enabled: true
      proxy-protocol.networks: 172.16.188.123
      run-ddl: true
      split-table: true
      store: tikv
      tikv-client.grpc-connection-count: 32
      tikv-client.max-batch-size: 256
      token-limit: 1500
    tikv:
      coprocessor.region-max-size: 384MB
      coprocessor.region-split-size: 256MB
      gRPC.grpc-concurrency: 8
      log-level: info
      raftdb.max-background-jobs: 8
      raftstore.apply-max-batch-size: 16384
      raftstore.apply-pool-size: 8
      raftstore.hibernate-regions: true
      raftstore.raft-max-inflight-msgs: 20480
      raftstore.raft-max-size-per-msg: 2MB
      raftstore.region-split-check-diff: 32MB
      raftstore.store-max-batch-size: 16384
      raftstore.store-pool-size: 8
      raftstore.sync-log: false
      readpool.coprocessor.max-tasks-per-worker-normal: 8000
      readpool.unified.max-thread-count: 32
      rocksdb.bytes-per-sync: 512MB
      rocksdb.compaction-readahead-size: 2MB
      rocksdb.defaultcf.level0-slowdown-writes-trigger: 32
      rocksdb.defaultcf.level0-stop-writes-trigger: 64
      rocksdb.defaultcf.max-write-buffer-number: 24
      rocksdb.defaultcf.write-buffer-size: 256MB
      rocksdb.lockcf.level0-slowdown-writes-trigger: 32
      rocksdb.lockcf.level0-stop-writes-trigger: 64
      rocksdb.max-background-flushes: 4
      rocksdb.max-background-jobs: 8
      rocksdb.max-sub-compactions: 4
      rocksdb.use-direct-io-for-flush-and-compaction: true
      rocksdb.wal-bytes-per-sync: 256MB
      rocksdb.writecf.level0-slowdown-writes-trigger: 32
      rocksdb.writecf.level0-stop-writes-trigger: 64
      rocksdb.writecf.max-write-buffer-number: 24
      rocksdb.writecf.write-buffer-size: 256MB
      storage.block-cache.capacity: 8GB
      storage.scheduler-concurrency: 4096000
      storage.scheduler-worker-pool-size: 8
    pd:
      auto-compaction-mod: periodic
      auto-compaction-retention: 10m
      quota-backend-bytes: 17179869184
    tiflash: {}
    tiflash-learner: {}
    pump: {}
    drainer: {}
    cdc: {}
tidb_servers:
  - host: 172.16.188.123
    ssh_port: 22
    port: 4000
    status_port: 10080
    deploy_dir: /data/tdb/tidb-4000
    arch: amd64
    os: linux
  - host: 172.16.188.143
    ssh_port: 22
    port: 4000
    status_port: 10080
    deploy_dir: /data/tdb/tidb-4000
    arch: amd64
    os: linux
  tikv_servers:
  - host: 172.16.188.140
    ssh_port: 22
    port: 20160
    status_port: 20180
    deploy_dir: /data/tdb/tikv-20160
    data_dir: /data/tdb/data/tikv-20160
    arch: amd64
    os: linux
  - host: 172.16.188.143
    ssh_port: 22
    port: 20160
    status_port: 20180
    deploy_dir: /data/tdb/tikv-20160
    data_dir: /data/tdb/data/tikv-20160
    arch: amd64
    os: linux
  - host: 172.16.188.113
    ssh_port: 22
    port: 20160
    status_port: 20180
    deploy_dir: /data/tdb/tikv-20160
    data_dir: /data/tdb/data/tikv-20160
    arch: amd64
    os: linux
  tiflash_servers: []
  pd_servers:
  - host: 172.16.188.120
    ssh_port: 22
    name: pd-172.16.188.120-2379
    client_port: 2379
    peer_port: 2380
    deploy_dir: /data/tdb/pd-2379
    data_dir: /data/tdb/data/pd-2379
    arch: amd64
    os: linux
  - host: 172.16.188.118
    ssh_port: 22
    name: pd-172.16.188.118-2379
    client_port: 2379
    peer_port: 2380
    deploy_dir: /data/tdb/pd-2379
    data_dir: /data/tdb/data/pd-2379
    arch: amd64
    os: linux
  - host: 172.16.188.125
    ssh_port: 22
    name: pd-172.16.188.125-2379
    client_port: 2379
    peer_port: 2380
    deploy_dir: /data/tdb/pd-2379
    data_dir: /data/tdb/data/pd-2379
    arch: amd64
    os: linux
  pump_servers:
  - host: 172.16.188.143
    ssh_port: 22
    port: 8250
    deploy_dir: /data/pump
    data_dir: /data/pump/data
    log_dir: /data/pump/log
    config:
      gc: 5
    arch: amd64
    os: linux
  cdc_servers:
  - host: 172.16.188.120
    ssh_port: 22
    port: 8300
    deploy_dir: /data/tdb/cdc-8300
    arch: amd64
    os: linux
  monitoring_servers:
  - host: 172.16.188.113
    ssh_port: 22
    port: 9090
    deploy_dir: /data/tdb/prometheus-9090
    data_dir: /data/tdb/data/prometheus-9090
    arch: amd64
    os: linux
  grafana_servers:
  - host: 172.16.188.123
    ssh_port: 22
    port: 3000
    deploy_dir: /data/tdb/grafana-3000
    arch: amd64
    os: linux
  alertmanager_servers:
  - host: 172.16.188.123
    ssh_port: 22
    web_port: 9093
    cluster_port: 9094
    deploy_dir: /data/tdb/alertmanager-9093
    data_dir: /data/tdb/data/alertmanager-9093
    arch: amd64
    os: linux