1、环境规划
3台机,每台机5个实例,分别mongos 1 个,config server 1 个,shard server 3 个
IP:192.168.56.107(mongodb01) 实例:mongos:30000 shard1:40001(主节点) shard2:40002(仲裁节点) shard3:40003(副节点) config:27027
IP:192.168.56.105(mongodb02) 实例:mongos:30000 shard1:40001(副节点) shard2:40002(主节点) shard3:40003(仲裁节点) config:27028
IP:192.168.56.106(mongodb03) 实例:mongos:30000 shard1:40001(仲裁节点) shard2:40002(副节点) shard3:40003(主节点) config:27029
2、创建相应目录
在192.168.56.107(mongodb01)上新建:
mkdir -p /mongodb01/{data,logs,apps,run}
mkdir -p /mongodb01/data/shard{1,2,3}
mkdir -p /mongodb01/data/config
在192.168.56.105(mongodb02)上新建:
mkdir -p /mongodb02/{data,logs,apps,run}
mkdir -p /mongodb02/data/shard{1,2,3}
mkdir -p /mongodb02/data/config
在192.168.56.106(mongodb03)上新建:
mkdir -p /mongodb03/{data,logs,apps,run}
mkdir -p /mongodb03/data/shard{1,2,3}
mkdir -p /mongodb03/data/config
3、配置环境变量
使用root账号修改配置,/mongodb01/apps/bin 用于存放mongodb的执行程序
echo 'export PATH=$PATH:/mongodb01/apps/bin' >> /etc/profile
source /etc/profile
使用root账号修改配置,/mongodb01/apps/bin 用于存放mongodb的执行程序
echo 'export PATH=$PATH:/mongodb02/apps/bin' >> /etc/profile
source /etc/profile
使用root账号修改配置,/mongodb01/apps/bin 用于存放mongodb的执行程序
echo 'export PATH=$PATH:/mongodb03/apps/bin' >> /etc/profile
source /etc/profile
4、创建用户及修改权限
Mongodb01下:
groupadd -g 10001 mongodb
useradd -u 10001 -g mongodb mongodb
id mongodb
passwd mongodb
输入新的密码:123
chown -R mongodb:mongodb /mongodb01
chmod -R 775 /mongodb01
Mongodb02下:
groupadd -g 10001 mongodb
useradd -u 10001 -g mongodb mongodb
id mongodb
passwd mongodb
输入新的密码:123
chown -R mongodb:mongodb /mongodb02
chmod -R 775 /mongodb02
Mongodb03下:
groupadd -g 10001 mongodb
useradd -u 10001 -g mongodb mongodb
id mongodb
passwd mongodb
输入新的密码:123
chown -R mongodb:mongodb /mongodb03
chmod -R 775 /mongodb03
5、下载安装文件
切换用户:
su mongodb
Mongodb01下:切换目录:
cd /mongodb01/apps
下载文件
wget https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-rhel70-4.4.1.tgz
解压文件
tar -xf mongodb-linux-x86_64-rhel70-4.4.1.tgz
mv mongodb-linux-x86_64-rhel70-4.4.1/bin ./
rm -rf mongodb-linux-x86_64-rhel70-4.4.1
Mongodb02下:切换目录:
cd /mongodb02/apps
下载文件
wget https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-rhel70-4.4.1.tgz
解压文件
tar -xf mongodb-linux-x86_64-rhel70-4.4.1.tgz
mv mongodb-linux-x86_64-rhel70-4.4.1/bin ./
rm -rf mongodb-linux-x86_64-rhel70-4.4.1
Mongodb03下:切换目录:
cd /mongodb03/apps
下载文件
wget https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-rhel70-4.4.1.tgz
解压文件
tar -xf mongodb-linux-x86_64-rhel70-4.4.1.tgz
mv mongodb-linux-x86_64-rhel70-4.4.1/bin ./
rm -rf mongodb-linux-x86_64-rhel70-4.4.1
6、创建配置文件
Mongodb01下新建文件目录:
mkdir -p /mongodb01/apps/conf/
vi /mongodb01/apps/conf/mongodb01-config.yml
echo "systemLog:
destination: file
# 注意修改路径
path: "/mongodb01/logs/mongodb01-config.log"
logAppend: true
storage:
journal:
enabled: true
#注意修改路径
dbPath: "/mongodb01/data/config"
engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: 12
processManagement:
fork: true
pidFilePath: "/mongodb01/run/mongodb01-config.pid"
net:
bindIp: 0.0.0.0
### 注意修改端口
port: 27027
setParameter:
enableLocalhostAuthBypass: true
replication:
# 复制集名称
replSetName: "mgconfig"
sharding:
#作为配置服务
clusterRole: configsvr " >>/mongodb01/apps/conf/mongodb01-config.yml
Mongodb02下新建文件目录:
>mkdir -p /mongodb02/apps/conf/
vi /mongodb02/apps/conf/mongodb02-config.yml
echo "systemLog:
destination: file
#注意修改路径
path: "/mongodb02/logs/mongodb02-config.log"
logAppend: true
storage:
journal:
enabled: true
#注意修改路径
dbPath: "/mongodb02/data/config"
engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: 12
processManagement:
fork: true
pidFilePath: "/mongodb02/run/mongodb02-config.pid"
net:
bindIp: 0.0.0.0
#注意修改端口
port: 27028
setParameter:
enableLocalhostAuthBypass: true
replication:
#复制集名称
replSetName: "mgconfig"
sharding:
#作为配置服务
clusterRole: configsvr " >>/mongodb02/apps/conf/mongodb02-config.yml
#Mongodb03下新建文件目录:
mkdir -p /mongodb03/apps/conf/
vi /mongodb03/apps/conf/mongodb03-config.yml
echo "systemLog:
destination: file
#注意修改路径
path: "/mongodb03/logs/mongodb03-config.log"
logAppend: true
storage:
journal:
enabled: true
#注意修改路径
dbPath: "/mongodb03/data/config"
engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: 12
processManagement:
fork: true
pidFilePath: "/mongodb03/run/mongodb03-config.pid"
net:
bindIp: 0.0.0.0
#注意修改端口
port: 27029
setParameter:
enableLocalhostAuthBypass: true
replication:
#复制集名称
replSetName: "mgconfig"
sharding:
#作为配置服务
clusterRole: configsvr " >>/mongodb03/apps/conf/mongodb03-config.yml
在mongodb01下
vi /mongodb01/apps/conf/mongodb01-shard1.yml
echo "systemLog:
destination: file
path: "/mongodb01/logs/mongodb01-shard1.log"
logAppend: true
storage:
journal:
enabled: true
dbPath: "/mongodb01/data/shard1"
processManagement:
fork: true
pidFilePath: "/mongodb01/run/mongodb01-shard1.pid"
net:
bindIp: 0.0.0.0
#注意修改端口
port: 40001
setParameter:
enableLocalhostAuthBypass: true
replication:
#复制集名称
replSetName: "shard1"
sharding:
#作为分片服务
clusterRole: shardsvr" >>/mongodb01/apps/conf/mongodb01-shard1.yml
-----
vi /mongodb01/apps/conf/mongodb01-shard2.yml
echo "systemLog:
destination: file
path: "/mongodb01/logs/mongodb01-shard2.log"
logAppend: true
storage:
journal:
enabled: true
dbPath: "/mongodb01/data/shard2"
processManagement:
fork: true
pidFilePath: "/mongodb01/run/mongodb01-shard2.pid"
net:
bindIp: 0.0.0.0
#注意修改端口
port: 40002
setParameter:
enableLocalhostAuthBypass: true
replication:
#复制集名称
replSetName: "shard2"
sharding:
#作为分片服务
clusterRole: shardsvr" >>/mongodb01/apps/conf/mongodb01-shard2.yml
-----
vi /mongodb01/apps/conf/mongodb01-shard3.yml
echo "systemLog:
destination: file
path: "/mongodb01/logs/mongodb01-shard3.log"
logAppend: true
storage:
journal:
enabled: true
dbPath: "/mongodb01/data/shard3"
processManagement:
fork: true
pidFilePath: "/mongodb01/run/mongodb01-shard3.pid"
net:
bindIp: 0.0.0.0
#注意修改端口
port: 40003
setParameter:
enableLocalhostAuthBypass: true
replication:
#复制集名称
replSetName: "shard3"
sharding:
#作为分片服务
clusterRole: shardsvr" >>/mongodb01/apps/conf/mongodb01-shard3.yml
-----
vi /mongodb01/apps/conf/mongodb01-route.yml
echo "systemLog:
destination: file
#注意修改路径
path: "/mongodb01/logs/mongodb01-route.log"
logAppend: true
processManagement:
fork: true
pidFilePath: "/mongodb01/run/mongodb01-route.pid"
net:
bindIp: 0.0.0.0
#注意修改端口
port: 30000
setParameter:
enableLocalhostAuthBypass: true
replication:
localPingThresholdMs: 15
sharding:
#关联配置服务
configDB: mgconfig/192.168.56.107:27027,192.168.56.105:27028,192.168.56.106:27029 " >>/mongodb01/apps/conf/mongodb01-route.yml
在mongodb02下
vi /mongodb02/apps/conf/mongodb02-shard1.yml
echo "systemLog:
destination: file
path: "/mongodb02/logs/mongodb02-shard1.log"
logAppend: true
storage:
journal:
enabled: true
dbPath: "/mongodb02/data/shard1"
processManagement:
fork: true
pidFilePath: "/mongodb02/run/mongodb02-shard1.pid"
net:
bindIp: 0.0.0.0
#注意修改端口
port: 40001
setParameter:
enableLocalhostAuthBypass: true
replication:
#复制集名称
replSetName: "shard1"
sharding:
#作为分片服务
clusterRole: shardsvr" >>/mongodb02/apps/conf/mongodb02-shard1.yml
-----
vi /mongodb02/apps/conf/mongodb02-shard2.yml
echo "systemLog:
destination: file
path: "/mongodb02/logs/mongodb02-shard2.log"
logAppend: true
storage:
journal:
enabled: true
dbPath: "/mongodb02/data/shard2"
processManagement:
fork: true
pidFilePath: "/mongodb02/run/mongodb02-shard2.pid"
net:
bindIp: 0.0.0.0
#注意修改端口
port: 40002
setParameter:
enableLocalhostAuthBypass: true
replication:
#复制集名称
replSetName: "shard2"
sharding:
#作为分片服务
clusterRole: shardsvr" >>/mongodb02/apps/conf/mongodb02-shard2.yml
----
vi /mongodb02/apps/conf/mongodb02-shard3.yml
echo "systemLog:
destination: file
path: "/mongodb02/logs/mongodb02-shard3.log"
logAppend: true
storage:
journal:
enabled: true
dbPath: "/mongodb02/data/shard3"
processManagement:
fork: true
pidFilePath: "/mongodb02/run/mongodb02-shard3.pid"
net:
bindIp: 0.0.0.0
#注意修改端口
port: 40003
setParameter:
enableLocalhostAuthBypass: true
replication:
#复制集名称
replSetName: "shard3"
sharding:
#作为分片服务
clusterRole: shardsvr" >>/mongodb02/apps/conf/mongodb02-shard3.yml
-----
vi /mongodb02/apps/conf/mongodb02-route.yml
echo "systemLog:
destination: file
#注意修改路径
path: "/mongodb02/logs/mongodb02-route.log"
logAppend: true
processManagement:
fork: true
pidFilePath: "/mongodb02/run/mongodb02-route.pid"
net:
bindIp: 0.0.0.0
#注意修改端口
port: 30000
setParameter:
enableLocalhostAuthBypass: true
replication:
localPingThresholdMs: 15
sharding:
#关联配置服务
configDB: mgconfig/192.168.56.107:27027,192.168.56.105:27028,192.168.56.106:27029 " >>/mongodb02/apps/conf/mongodb02-route.yml
在mongodb03下
vi /mongodb03/apps/conf/mongodb03-shard1.yml
echo "systemLog:
destination: file
path: "/mongodb03/logs/mongodb03-shard1.log"
logAppend: true
storage:
journal:
enabled: true
dbPath: "/mongodb03/data/shard1"
processManagement:
fork: true
pidFilePath: "/mongodb03/run/mongodb03-shard1.pid"
net:
bindIp: 0.0.0.0
#注意修改端口
port: 40001
setParameter:
enableLocalhostAuthBypass: true
replication:
#复制集名称
replSetName: "shard1"
sharding:
#作为分片服务
clusterRole: shardsvr" >>/mongodb03/apps/conf/mongodb03-shard1.yml
-----
vi /mongodb03/apps/conf/mongodb03-shard2.yml
echo "systemLog:
destination: file
path: "/mongodb03/logs/mongodb03-shard2.log"
logAppend: true
storage:
journal:
enabled: true
dbPath: "/mongodb03/data/shard2"
processManagement:
fork: true
pidFilePath: "/mongodb03/run/mongodb03-shard2.pid"
net:
bindIp: 0.0.0.0
#注意修改端口
port: 40002
setParameter:
enableLocalhostAuthBypass: true
replication:
#复制集名称
replSetName: "shard2"
sharding:
#作为分片服务
clusterRole: shardsvr" >>/mongodb03/apps/conf/mongodb03-shard2.yml
-----
vi /mongodb03/apps/conf/mongodb03-shard3.yml
echo "systemLog:
destination: file
path: "/mongodb03/logs/mongodb03-shard3.log"
logAppend: true
storage:
journal:
enabled: true
dbPath: "/mongodb03/data/shard3"
processManagement:
fork: true
pidFilePath: "/mongodb03/run/mongodb03-shard3.pid"
net:
bindIp: 0.0.0.0
#注意修改端口
port: 40003
setParameter:
enableLocalhostAuthBypass: true
replication:
#复制集名称
replSetName: "shard3"
sharding:
#作为分片服务
clusterRole: shardsvr" >>/mongodb03/apps/conf/mongodb03-shard3.yml
-----
vi /mongodb03/apps/conf/mongodb03-route.yml
echo "systemLog:
destination: file
#注意修改路径
path: "/mongodb03/logs/mongodb03-route.log"
logAppend: true
processManagement:
fork: true
pidFilePath: "/mongodb03/run/mongodb03-route.pid"
net:
bindIp: 0.0.0.0
#注意修改端口
port: 30000
setParameter:
enableLocalhostAuthBypass: true
replication:
localPingThresholdMs: 15
sharding:
#关联配置服务
configDB: mgconfig/192.168.56.107:27027,192.168.56.105:27028,192.168.56.106:27029 " >>/mongodb03/apps/conf/mongodb03-route.yml
7、部署配置服务器集群(三台机器都要操作)
启动config服务(3台机器执行相同操作,只是在不同的mongodb上启动时需要修改mongodb0,此处只演示mongodb01上的配置部署)
在mongodb01下
cd /mongodb01/apps/conf/
mongod --config /mongodb01/apps/conf/mongodb01-config.yml
连接一个实例
mongo 192.168.56.107:27027(此处三台机器一致)
初始化复制集
(这个 mgconfig 名字一定要和config 配置文件中 replSet 的名字一致)
```config={id:"mgconfig",members:[{id:0,host:"192.168.56.107:27027"},{id:1,host:"192.168.56.105:27028"},{id:2,host:"192.168.56.106:27029"},]}
rs.initiate(config)
检查状态
rs.status()
复制集配完后,可能状态不会马上改变(可能都是secondary),过几秒就会自动更新 ![](http://www.icode9.com/i/li/?n=4&i=images/blog/202012/03/802a176d4595cb00dd85150780c9aa05.png?,size_16,text_QDUxQ1RP5Y2a5a6i,color_FFFFFF,t_100,g_se,x_10,y_10,shadow_90,type_ZmFuZ3poZW5naGVpdGk=)
8、部署shard1分片集群(3台机器都要操作,此处只操作mongodb01)
启动3台shard1实例
cd /mongodb01/apps/conf/
mongod --config /mongodb01/apps/conf/mongodb01-shard1.yml
连接一个实例
mongo 192.168.56.107:40001(3台机器一致)
创建复制集
use admin
config={_id:"shard1",members:[{_id:0,host:"192.168.56.107:40001",priority:2},{_id:1,host:"192.168.56.105:40001",priority:1},{_id:2,host:"192.168.56.106:40001",arbiterOnly:true},]}
这个 shard1名字一定要和 shard1配置文件中 replSet 的名字一致
初始化复制集
rs.initiate(config)
检查状态
rs.status()
复制集配完后,可能状态不会马上改变(可能都是secondary),过几秒就会自动更新
9、部署shard2分片集群(3台机器都要操作,此处只示范mongodb01)
启动3台shard2实例
cd /mongodb01/apps/conf/
mongod --config /mongodb01/apps/conf/mongodb01-shard2.yml
连接第二个节点创建复制集
为什么是连接第二个,因为规划的shard2 的主节点是105:40002,仲裁节点不能写数据,所以这里不能连107,要连105
mongo 192.168.56.105:40002(三台机器一致)
创建复制集
use admin
config={_id:"shard2",members:[{_id:0,host:"192.168.56.107:40002",arbiterOnly:true},{_id:1,host:"192.168.56.105:40002",priority:2}, {_id:2,host:"192.168.56.106:40002",priority:1},]}
这个 shard2名字一定要和 shard2配置文件中 replSet 的名字一致
初始化复制集
rs.initiate(config)
检查状态
rs.status()
复制集配完后,可能状态不会马上改变(可能都是secondary),过几秒就会自动更新
10、部署shard3分片集群(3台机器都要操作,这里只示范mongodb01)
启动3台shard3实例
cd /mongodb01/apps/conf/
mongod --config /mongodb01/apps/conf/mongodb01-shard3.yml
连接第三个节点创建复制集
为什么是连接第三个,因为规划的shard3 的主节点是106:40003
mongo 192.168.56.106:40003(3台机器一致)
创建复制集
use admin
config={_id:"shard3",members:[{_id:0,host:"192.168.56.107:40003",priority:1},{_id:1,host:"192.168.56.105:40003",arbiterOnly:true},
{_id:2,host:"192.168.56.106:40003",priority:2},]}
这个 shard3名字一定要和 shard3配置文件中 replSet 的名字一致
初始化复制集
rs.initiate(config)
检查状态
rs.status()
复制集配完后,可能状态不会马上改变(可能都是secondary),过几秒就会自动更新
11、启用分片功能
登陆路由节点
cd /mongodb01/apps/conf/
mongos --config /mongodb01/apps/conf/mongodb01-route.yml
mongo 192.168.56.107:30000
use admin
sh.addShard("shard1/192.168.56.107:40001,192.168.56.105:40001,192.168.56.106:40001")
sh.addShard("shard2/192.168.56.107:40002,192.168.56.105:40002,192.168.56.106:40002")
sh.addShard("shard3/192.168.56.107:40003,192.168.56.105:40003,192.168.56.106:40003")
检查状态
sh.status()
12、服务脚本
编写服务脚本,便于启动和停止集群
#!/bin/bash
#mongodb script takes care of starting ||stopping ||reload mongom
#chkconfig:- 80 15
#description: Mongo database
#precessname: mongom
user=mongodb
#source function library
#source /etc/init.d/functions
#/mongodb/apps/mongodb/bin
#the localcation of configfile
config_configfile="/mongodb01/apps/conf/mongodb01-config.yml"
router_configfile="/mongodb01/apps/conf/mongodb01-route.yml"
shard1_configfile="/mongodb01/apps/conf/mongodb01-shard1.yml"
shard2_configfile="/mongodb01/apps/conf/mongodb01-shard2.yml"
shard3_configfile="/mongodb01/apps/conf/mongodb01-shard3.yml"
#the options of start a mongodb server
start_config_options=" --config $config_configfile"
stop_config_options=" --shutdown --dbpath /mongodb01/data/config"
start_router_options=" --config $router_configfile"
start_shard1_options=" --config $shard1_configfile"
stop_shard1_options=" --shutdown --dbpath /mongodb01/data/shard1"
start_shard2_options=" --config $shard2_configfile"
stop_shard2_options=" --shutdown --dbpath /mongodb01/data/shard2"
start_shard3_options=" --config $shard3_configfile"
stop_shard3_options=" --shutdown --dbpath /mongodb01/data/shard3"
#the localcation of mongod
mongod="/mongodb01/apps/bin/mongod"
#the localcation of mongos
mongos="/mongodb01/apps/bin/mongos"
#where to lockfile
config_lockfile="/mongodb01/data/config/mongod.lock"
shard1_lockfile="/mongodb01/data/shard1/mongod.lock"
shard2_lockfile="/mongodb01/data/shard2/mongod.lock"
shard3_lockfile="/mongodb01/data/shard3/mongod.lock"
#where to pidfile
config_pidfile="/mongodb01/run/mongodb01-config.pid"
router_pidfile="/mongodb01/run/mongodb01-route.pid"
shard1_pidfile="/mongodb01/run/mongodb01-shard1.pid"
shard2_pidfile="/mongodb01/run/mongodb01-shard2.pid"
shard3_pidfile="/mongodb01/run/mongodb01-shard3.pid"
#function of start config server
function start-config(){
#print the tips
echo -n $"Starting mongod of config_server:"
$mongod $start_config_options
#get the result
RETVAL=$?
if [ $RETVAL -eq 0 ];then
touch $config_lockfile
fi}
#function of stop config server
function stop-config(){
#print the tips
echo $"stopping mongod of config_server:"
$mongod $stop_config_options
RETVAL=$?
if [ $RETVAL -eq 0 ];then
rm -f $config_lockfile
rm -f $config_pidfile
fi}
#function of start router server
function start-router(){
#print the tips
echo $"Starting mongod of router_server:"
$mongos $start_router_options
}
#function of stop router server
function stop-router(){
#print the tips
echo $"stopping mongod of router_server:"
kill `cat $router_pidfile`
RETVAL=$?
if [ $RETVAL -eq 0 ];then
rm -f $router_pidfile
fi
}
#function of start shard1
function start-shard1(){
#print the tips
echo $"Starting mongod of shard1_server:"
$mongod $start_shard1_options
#get the result
RETVAL=$?
if [ $RETVAL -eq 0 ];then
touch $shard1_lockfile
fi
}
#function of stop shard1
function stop-shard1(){
#print the tips
echo $"stopping mongod of shard1_server:"
$mongod $stop_shard1_options
RETVAL=$?
if [ $RETVAL -eq 0 ];then
rm -f $shard1_lockfile
rm -f $shard1_pidfile
fi
}
#function of start shard2
function start-shard2(){
#print the tips
echo -n $"Starting mongod of shard2_server:"
$mongod $start_shard2_options
#get the result
RETVAL=$?
if [ $RETVAL -eq 0 ];then
touch $shard2_lockfile
fi
}
#function of stop shard2
function stop-shard2(){
#print the tips
echo $"stopping mongod of shard2_server:"
$mongod $stop_shard2_options
RETVAL=$?
if [ $RETVAL -eq 0 ];then
rm -f $shard2_lockfile
rm -f $shard2_pidfile
fi
}
#function of start shard3
function start-shard3(){
#print the tips
echo -n $"Starting mongod of shard3_server:"
$mongod $start_shard3_options
#get the result
RETVAL=$?
if [ $RETVAL -eq 0 ];then
touch $shard3_lockfile
fi}
#function of stop shard3
function stop-shard3(){
#print the tips
echo $"stopping mongod of shard3_server:"
$mongod $stop_shard3_options
RETVAL=$?
if [ $RETVAL -eq 0 ];then
rm -f $shard3_lockfile
rm -f $shard3_pidfile
fi
}
start(){
start-config
echo ''
start-shard1
echo ''
start-shard2
echo ''
start-shard3
echo ''
start-router
}
stop(){
stop-shard1
echo 'stop-shard1 ok'
stop-shard2
echo 'stop-shard2 ok'
stop-shard3
echo 'stop-shard3 ok'
stop-router
echo 'stop-router ok'
stop-config
echo 'stop-config ok'
}
RETVAL=0
case "$1" in
start)
start
;;
stop)
stop
;;
start-all)
start
;;
stop-all)
stop
;;
start-shard3)
start-shard3
;;
stop-shard3)
stop-shard3
;;
start-shard2)
start-shard2
;;
stop-shard2)
stop-shard2
;;
start-shard1)
start-shard1
;;
stop-shard1)
stop-shard1
;;
start-config)
start-config
;;
stop-config)
stop-config
;;
start-router)
start-router
;;
restart-config |reload-config |force-reload-config)
stop-config
start-config
;;
restart-router |reload-router |force-reload-router)
stop-router
start-router
;;
restart-shard1 |reload-shard1 |force-reload-shard1)
stop-shard1
start-shard1
;;
restart-shard2 |reload-shard2 |force-reload-shard2)
stop-shard2
start-shard2
;;
restart-shard3 |reload-shard3 |force-reload-shard3)
stop-shard3
start-shard3
;;
restart-all |reload-all |force-reload-all)
stop
start
;;
conderstart)
[ -f $lockfile ] && restart || :
[ -f $lockfile ] && restart || :
;;
status)
status $mongod
RETVAL=$?
;;
*)
echo "Usage: $0 {start-*|stop-*|restart-*|status|reload-*|force-reload-*|condrestart(* in {all,config,router,shard1,shard2,shard3})}"
esac
exit $RETVAL
13、测试服务器分片功能
模拟写入数据
在tydb库的tyuser表中循环写入6万条数据
mongo 192.168.56.107:30000
use tydb
show collections
for(i=1;i<=60000;i++){db.tyuser.insert({"id":i,"name":"ty"+i})}
启用数据库分片
sh.enableSharding("tydb")
创建的索引
db.tyuser.createIndex({"id":1})
启用表分片
sh.shardCollection(”tydb.tyuser",{"id":1})
查看分片情况
sh.status()
开启平衡器
use admin
sh.startBalance()
关闭平衡器
use admin
sh.stopBalancer()
查看是否关闭
返回flase表示平衡器已关闭,还需要查询均衡器正在运行的 情况
sh.getBalancerState()