[root@test soft]# tar -C /usr/local -xf go1.25.0.linux-amd64.tar.gz [root@test ~]# cat<<-\EOF>>/root/.bash_profile export PATH=$PATH:/usr/local/go/bin EOF [root@test ~]# source /root/.bash_profile [root@test ~]# go version go version go1.25.0 linux/amd64
Rust 安装:
1 2 3 4 5
[root@test soft]# tar -xf rust-1.89.0-x86_64-unknown-linux-gnu.tar.tar [root@test soft]# cd rust-1.89.0-x86_64-unknown-linux-gnu/ [root@test rust-1.89.0-x86_64-unknown-linux-gnu]# ./install.sh [root@test ~]# rustc --version rustc 1.89.0 (29483883e 2025-08-04)
sshpass 安装:
1 2 3 4 5
[root@test soft]# tar -xf sshpass-1.10.tar.gz [root@test soft]# cd sshpass-1.10/ [root@test sshpass-1.10]# ./configure && make && make install [root@test ~]# sshpass -V sshpass 1.10
[root@test ~]# cat<<-\EOF>topo.yaml # # Global variables are applied to all deployments and used as the default value of # # the deployments if a specific deployment value is missing. global: user: "tidb" ssh_port: 11122 deploy_dir: "/data/tidb-deploy" data_dir: "/data/tidb-data"
# # Monitored variables are applied to all the machines. monitored: node_exporter_port: 9100 blackbox_exporter_port: 9115
+ Detect CPU Arch Name - Detecting node 192.168.31.79 Arch info ... Done
+ Detect CPU OS Name - Detecting node 192.168.31.79 OS info ... Done + Download necessary tools - Downloading check tools for linux/amd64 ... Done + Collect basic system information + Collect basic system information - Getting system info of 192.168.31.79:11122 ... Done + Check time zone - Checking node 192.168.31.79 ... Done + Check system requirements + Check system requirements + Check system requirements + Check system requirements - Checking node 192.168.31.79 ... Done - Checking node 192.168.31.79 ... Done - Checking node 192.168.31.79 ... Done - Checking node 192.168.31.79 ... Done - Checking node 192.168.31.79 ... Done - Checking node 192.168.31.79 ... Done - Checking node 192.168.31.79 ... Done - Checking node 192.168.31.79 ... Done - Checking node 192.168.31.79 ... Done + Cleanup check files - Cleanup check files on 192.168.31.79:11122 ... Done Node Check Result Message ---- ----- ------ ------- 192.168.31.79 os-version Fail CentOS Linux 7 (Core) 7.9.2009 not supported, use version 9 or higher 192.168.31.79 cpu-cores Pass number of CPU cores / threads: 4 192.168.31.79 ntp Warn The NTPd daemon may be not start 192.168.31.79 disk Warn mount point /data does not have 'noatime' option set 192.168.31.79 selinux Pass SELinux is disabled 192.168.31.79 thp Pass THP is disabled 192.168.31.79 command Pass numactl: policy: default 192.168.31.79 cpu-governor Warn Unable to determine current CPU frequency governor policy 192.168.31.79 memory Pass memory size is 8192MB 192.168.31.79 network Pass network speed of ens192 is 10000MB 192.168.31.79 disk Fail multiple components tikv:/data/tidb-data/tikv-20160,tikv:/data/tidb-data/tikv-20161,tikv:/data/tidb-data/tikv-20162,tiflash:/data/tidb-data/tiflash-9000 are using the same partition 192.168.31.79:/data as data dir 192.168.31.79 disk Fail mount point /data does not have 'nodelalloc' option set
[root@test ~]# tiup cluster start lucifer --init Starting cluster lucifer... + [ Serial ] - SSHKeySet: privateKey=/root/.tiup/storage/cluster/clusters/lucifer/ssh/id_rsa, publicKey=/root/.tiup/storage/cluster/clusters/lucifer/ssh/id_rsa.pub + [Parallel] - UserSSH: user=tidb, host=192.168.31.79 + [Parallel] - UserSSH: user=tidb, host=192.168.31.79 + [Parallel] - UserSSH: user=tidb, host=192.168.31.79 + [Parallel] - UserSSH: user=tidb, host=192.168.31.79 + [Parallel] - UserSSH: user=tidb, host=192.168.31.79 + [Parallel] - UserSSH: user=tidb, host=192.168.31.79 + [Parallel] - UserSSH: user=tidb, host=192.168.31.79 + [Parallel] - UserSSH: user=tidb, host=192.168.31.79 + [ Serial ] - StartCluster Starting component pd Starting instance 192.168.31.79:2379 Start instance 192.168.31.79:2379 success Starting component tikv Starting instance 192.168.31.79:20162 Starting instance 192.168.31.79:20160 Starting instance 192.168.31.79:20161 Start instance 192.168.31.79:20162 success Start instance 192.168.31.79:20161 success Start instance 192.168.31.79:20160 success Starting component tidb Starting instance 192.168.31.79:4000 Start instance 192.168.31.79:4000 success Starting component tiflash Starting instance 192.168.31.79:9000 Start instance 192.168.31.79:9000 success Starting component prometheus Starting instance 192.168.31.79:9090 Start instance 192.168.31.79:9090 success Starting component grafana Starting instance 192.168.31.79:3000 Start instance 192.168.31.79:3000 success Starting component node_exporter Starting instance 192.168.31.79 Start 192.168.31.79 success Starting component blackbox_exporter Starting instance 192.168.31.79 Start 192.168.31.79 success + [ Serial ] - UpdateTopology: cluster=lucifer Started cluster `lucifer` successfully The root password of TiDB database has been changed. The new password is: 'm+92G0Q3eNR4^6cq*@'. Copy and record it to somewhere safe, it is only displayed once, and will not be stored. The generated password can NOT be get and shown again.
查看集群:
1 2 3 4
[root@test ~]# tiup cluster list Name User Version Path PrivateKey ---- ---- ------- ---- ---------- lucifer tidb v8.5.3 /root/.tiup/storage/cluster/clusters/lucifer /root/.tiup/storage/cluster/clusters/lucifer/ssh/id_rsa