MongoDb集群部署

第一章 单机部署

第一节 yum安装

1.1 配置yum源

[root@mongo ~]# vim /etc/yum.repos.d/mongodb.repo
[mongodb-org-4.2]
name=MongoDB Repository
baseurl=https://repo.mongodb.org/yum/redhat/$releasever/mongodb-org/4.2/x86_64/
gpgcheck=1
enabled=1
gpgkey=https://www.mongodb.org/static/pgp/server-4.2.asc

1.2 安装mongo数据库

[root@mongo ~]# yum install mongodb-org -y
Installing:
 mongodb-org                         x86_64                  4.2.3-1.el7                   mongodb-org-4.2                  5.8 k
Installing for dependencies:
 mongodb-org-mongos                  x86_64                  4.2.3-1.el7                   mongodb-org-4.2                   14 M
 mongodb-org-server                  x86_64                  4.2.3-1.el7                   mongodb-org-4.2                   25 M
 mongodb-org-shell                   x86_64                  4.2.3-1.el7                   mongodb-org-4.2                   17 M
 mongodb-org-tools                   x86_64                  4.2.3-1.el7                   mongodb-org-4.2                   62 M

1.3 禁用mongo自动升级

# vim /etc/yum.conf
#在文件中加入下方配置
exclude=mongodb-org,mongodb-org-server,mongodb-org-shell,mongodb-org-mongos,mongodb-org-tools
保存退出

1.4 mongo操作命令

启动:
# systemctl start mongod
重启:
# systemctl restart mongod
关闭:
# systemctl stop mongod
查看运行状态:
# systemctl status mongod

1.5 标准文件

[root@mongo ~]# cat /etc/mongod.conf 
# mongod.conf

# for documentation of all options, see:
#   http://docs.mongodb.org/manual/reference/configuration-options/

# where to write logging data.
systemLog:
  destination: file
  logAppend: true
  path: /var/log/mongodb/mongod.log

# Where and how to store data.
storage:
  dbPath: /var/lib/mongo
  journal:
    enabled: true
#  engine:
#  wiredTiger:

# how the process runs
processManagement:
  fork: true  # fork and run in background
  pidFilePath: /var/run/mongodb/mongod.pid  # location of pidfile
  timeZoneInfo: /usr/share/zoneinfo

# network interfaces
net:
  port: 27017
  bindIp: 127.0.0.1  # Enter 0.0.0.0,:: to bind to all IPv4 and IPv6 addresses or, alternatively, use the net.bindIpAll setting.


#security:

#operationProfiling:

#replication:

#sharding:

## Enterprise-Only Options

#auditLog:

#snmp:
[root@mongo ~]# cat /usr/lib/systemd/system/mongod.service
[Unit]
Description=MongoDB Database Server
Documentation=https://docs.mongodb.org/manual
After=network.target
[Service]
User=mongod
Group=mongod
Environment="OPTIONS=-f /etc/mongod.conf"
EnvironmentFile=-/etc/sysconfig/mongod
ExecStart=/usr/bin/mongod $OPTIONS
ExecStartPre=/usr/bin/mkdir -p /var/run/mongodb
ExecStartPre=/usr/bin/chown mongod:mongod /var/run/mongodb
ExecStartPre=/usr/bin/chmod 0755 /var/run/mongodb
PermissionsStartOnly=true
PIDFile=/var/run/mongodb/mongod.pid
Type=forking
# file size
LimitFSIZE=infinity
# cpu time
LimitCPU=infinity
# virtual memory size
LimitAS=infinity
# open files
LimitNOFILE=64000
# processes/threads
LimitNPROC=64000
# locked memory
LimitMEMLOCK=infinity
# total threads (user+kernel)
TasksMax=infinity
TasksAccounting=false
# Recommended limits for for mongod as specified in
# http://docs.mongodb.org/manual/reference/ulimit/#recommended-settings

[Install]
WantedBy=multi-user.target

第二节 预编译安装

第二章 集群部署

第一节 基础配置

1.1 基础环境

IP地址 主机名称 系统版本 数据库版本
192.168.56.131 mgs01.cjspd.local CentOS7.6_x86_64 MongoDB4.2.3
192.168.56.132 mgs02.cjspd.local CentOS7.6_x86_64 MongoDB4.2.3
192.168.56.133 mgs03.cjspd.local CentOS7.6_x86_64 MongoDB4.2.3

1.2 系统环境

IP地址 主机名 路由服务端口 配置服务端口 分片1端口 分片2端口 分片3端口
192.168.56.131 mgs01.cjspd.local 27017 27018 27001 27002 27003
192.168.56.132 mgs02.cjspd.local 27017 27018 27001 27002 27003
192.168.56.133 mgs03.cjspd.local 27017 27018 27001 27002 27003

1.3 目录规划

1、MongoDB数据库安装目录
/usr/local/mongodb
2、数据库文件与日志文件,配置文件目录
[root@localhost ~]# tree /data
/data
└── mongodb
    ├── conf			#配置文件目录
    ├── data			#数据库文件根目录
    │   ├── config		#配置服务数据目录
    │   ├── shard1		#分片1服务数据目录
    │   └── shard2		#分片2服务数据目录
    │   └── shard3		#分片3服务数据目录
    └── logs			#日志文件目录

1.4 开通防火墙

# firewall-cmd --zone=public --add-port=27017/tcp --permanent
# firewall-cmd --zone=public --add-port=27018/tcp --permanent
# firewall-cmd --zone=public --add-port=27001/tcp --permanent
# firewall-cmd --zone=public --add-port=27002/tcp --permanent
# firewall-cmd --zone=public --add-port=27003/tcp --permanent
# firewalld-cmd --reload

1.5 主机名配置

--主机名配置
# hostnamectl set-hostname mgs01.cjspd.local
# hostnamectl set-hostname mgs02.cjspd.local
# hostnamectl set-hostname mgs03.cjspd.local
--本地解析配置
[root@mgs01 ~]# vim /etc/hosts
192.168.56.131 mgs01.cjspd.local
192.168.56.132 mgs02.cjspd.local
192.168.56.133 mgs03.cjspd.local

第二节 数据库安装

2.1 创建用户和组

[root@mgs01 home]# groupadd mongod
[root@mgs01 home]# useradd -g mongod mongod
[root@mgs01 home]# id mongod
uid=1001(mongod) gid=1001(mongod) groups=1001(mongod)

2.2 解压缩软件

[root@mgs01 setup]# tar -zxvf mongodb-linux-x86_64-rhel70-4.2.3.tgz -C /usr/local
[root@mgs01 setup]# cd /usr/local
[root@mgs01 local]# ll
drwxr-xr-x  3 root root 135 Mar 13 22:58 mongodb-linux-x86_64-rhel70-4.2.3
[root@mgs01 local]# ln -s /usr/local/mongodb-linux-x86_64-rhel70-4.2.3 mongodb
[root@mgs01 local]# ll
lrwxrwxrwx  1 root root  44 Mar 13 22:59 mongodb -> /usr/local/mongodb-linux-x86_64-rhel70-4.2.3
drwxr-xr-x  3 root root 135 Mar 13 22:58 mongodb-linux-x86_64-rhel70-4.2.3

2.3 配置环境变量

[root@mgs01 local]# echo "export PATH=$PATH:/usr/local/mongodb/bin">>/etc/profile
[root@mgs01 local]# source /etc/profile

第三节 复制集部署

3.1 创建目录文件

--192.168.56.131服务器操作
[root@mgs01 local]# mkdir -p /data/mongodb/{conf,data,logs}
[root@mgs01 local]# mkdir -p /data/mongodb/data/{config,shard1,shard2}
[root@mgs01 local]# touch /data/mongodb/logs/{config.log,mongos.log,shard1.log,shard2.log}
[root@mgs01 local]# tree /data
/data
└── mongodb
    ├── conf
    ├── data
    │   ├── config
    │   ├── shard1
    │   └── shard2
    └── logs
        ├── config.log
        ├── mongos.log
        ├── shard1.log
        └── shard2.log
--192.168.56.132服务器操作
[root@mgs02 local]# mkdir -p /data/mongodb/{conf,data,logs}
[root@mgs02 local]# mkdir -p /data/mongodb/data/{config,shard1,shard2}
[root@mgs02 local]# touch /data/mongodb/logs/{config.log,mongos.log,shard1.log,shard2.log}
[root@mgs02 local]# tree /data
/data
└── mongodb
    ├── conf
    ├── data
    │   ├── config
    │   ├── shard1
    │   └── shard2
    └── logs
        ├── config.log
        ├── mongos.log
        ├── shard1.log
        └── shard2.log
--192.168.56.133服务器操作
[root@mgs03 local]# mkdir -p /data/mongodb/{conf,data,logs}
[root@mgs03 local]# mkdir -p /data/mongodb/data/{config,shard1,shard2}
[root@mgs03 local]# touch /data/mongodb/logs/{config.log,mongos.log,shard1.log,shard2.log}
[root@mgs03 local]# tree /data
/data
└── mongodb
    ├── conf
    ├── data
    │   ├── config
    │   ├── shard1
    │   └── shard2
    └── logs
        ├── config.log
        ├── mongos.log
        ├── shard1.log
        └── shard2.log

3.2 配置服务部署

① 创建配置文件

三台服务器执行相同操作

# vim /data/mongodb/conf/config.conf
dbpath=/data/mongodb/data/config
logpath=/data/mongodb/logs/config.log
port=27018
logappend=true
fork=true
maxConns=5000
#复制集名称
replSet=configs
#置参数为true
configsvr=true
#允许任意机器连接
bind_ip=0.0.0.0

启动配置服务

分别启动三台服务器的配置服务

--启动MongoDB的配置服务(192.168.56.131)
[root@mgs01 bin]# /usr/local/mongodb/bin/mongod -f /data/mongodb/conf/config.conf
or
[root@mgs01 bin]# mongod -f /data/mongodb/conf/config.conf
about to fork child process, waiting until server is ready for connections.
forked process: 2109
child process started successfully, parent exiting
[root@mgs01 bin]# ps -ef|grep mongo
[root@mgs01 bin]# ps -ef|grep mongo
root       2109      1 12 23:26 ?        00:00:01 /usr/local/mongodb/bin/mongod -f /data/mongodb/conf/config.conf
root       2156   1959  0 23:26 pts/0    00:00:00 grep --color=auto mongo
--启动MongoDB的配置服务(192.168.56.132)
[root@mgs02 logs]# mongod -f /data/mongodb/conf/config.conf
about to fork child process, waiting until server is ready for connections.
forked process: 2105
child process started successfully, parent exiting
[root@mgs02 logs]# ps -ef|grep mongo
root       2105      1 20 23:27 ?        00:00:01 mongod -f /data/mongodb/conf/config.conf
root       2150   1956  0 23:27 pts/0    00:00:00 grep --color=auto mongo
--启动MongoDB的配置服务(192.168.56.133)
[root@mgs03 logs]# mongod -f /data/mongodb/conf/config.conf
about to fork child process, waiting until server is ready for connections.
forked process: 2109
child process started successfully, parent exiting
[root@mgs03 logs]# ps -ef|grep mongo
root       2109      1 23 23:27 ?        00:00:01 mongod -f /data/mongodb/conf/config.conf
root       2153   1953  0 23:28 pts/0    00:00:00 grep --color=auto mongo

③配置复制集

连接任意一台服务器即可

记得所有服务器关闭防火墙

[root@mgs01 bin]# mongo --host 192.168.56.131 --port 27018
--切换数据库
> use admin;
switched to db admin
--初始化复制集
> rs.initiate({_id:"configs",members:[{_id:0,host:"192.168.56.131:27018"},{_id:1,host:"192.168.56.132:27018"},{_id:2,host:"192.168.56.133:27018"}]})
--其中_id:"configs"的configs是上面config.conf配置文件里的复制集名称,把三台服务器的配置服务组成复制集。
--查看状态:
configs:SECONDARY> rs.status();

3.3 分片服务部署

①创建配置文件

分别在三台服务器操作

----------mgs01-192.168.56.131
--分片服务器shard1
# vim /data/mongodb/conf/shard1.conf
dbpath=/data/mongodb/data/shard1 #其他2个分片对应修改为shard2、shard3文件夹
logpath=/data/mongodb/logs/shard1.log #其他2个分片对应修改为shard2.log、shard3.log
port=27001 #其他2个分片对应修改为27002、27003
logappend=true
fork=true
maxConns=5000
#storageEngine=mmapv1
shardsvr=true
replSet=shard1 #其他2个分片对应修改为shard2、shard3
bind_ip=0.0.0.0
--分片服务器shard2
# vim /data/mongodb/conf/shard2.conf
dbpath=/data/mongodb/data/shard2 #其他2个分片对应修改为shard2、shard3文件夹
logpath=/data/mongodb/logs/shard2.log #其他2个分片对应修改为shard2.log、shard3.log
port=27002 #其他2个分片对应修改为27002、27003
logappend=true
fork=true
maxConns=5000
#storageEngine=mmapv1
shardsvr=true
replSet=shard2 #其他2个分片对应修改为shard2、shard3
bind_ip=0.0.0.0
----------mgs01-192.168.56.132
# vim /data/mongodb/conf/shard1.conf
dbpath=/data/mongodb/data/shard1 #其他2个分片对应修改为shard2、shard3文件夹
logpath=/data/mongodb/logs/shard1.log #其他2个分片对应修改为shard2.log、shard3.log
port=27001 #其他2个分片对应修改为27002、27003
logappend=true
fork=true
maxConns=5000
#storageEngine=mmapv1
shardsvr=true
replSet=shard1 #其他2个分片对应修改为shard2、shard3
bind_ip=0.0.0.0
--分片服务器shard2
# vim /data/mongodb/conf/shard2.conf
dbpath=/data/mongodb/data/shard2 #其他2个分片对应修改为shard2、shard3文件夹
logpath=/data/mongodb/logs/shard2.log #其他2个分片对应修改为shard2.log、shard3.log
port=27002 #其他2个分片对应修改为27002、27003
logappend=true
fork=true
maxConns=5000
#storageEngine=mmapv1
shardsvr=true
replSet=shard2 #其他2个分片对应修改为shard2、shard3
bind_ip=0.0.0.0
----------mgs01-192.168.56.133
# vim /data/mongodb/conf/shard1.conf
dbpath=/data/mongodb/data/shard1 #其他2个分片对应修改为shard2、shard3文件夹
logpath=/data/mongodb/logs/shard1.log #其他2个分片对应修改为shard2.log、shard3.log
port=27001 #其他2个分片对应修改为27002、27003
logappend=true
fork=true
maxConns=5000
#storageEngine=mmapv1
shardsvr=true
replSet=shard1 #其他2个分片对应修改为shard2、shard3
bind_ip=0.0.0.0
--分片服务器shard2
# vim /data/mongodb/conf/shard2.conf
dbpath=/data/mongodb/data/shard2 #其他2个分片对应修改为shard2、shard3文件夹
logpath=/data/mongodb/logs/shard2.log #其他2个分片对应修改为shard2.log、shard3.log
port=27002 #其他2个分片对应修改为27002、27003
logappend=true
fork=true
maxConns=5000
#storageEngine=mmapv1
shardsvr=true
replSet=shard2 #其他2个分片对应修改为shard2、shard3
bind_ip=0.0.0.0

②启动分片服务

--------192.168.56.131
[root@mgs01 conf]# mongod -f /data/mongodb/conf/shard1.conf
[root@mgs01 conf]# mongod -f /data/mongodb/conf/shard2.conf
--------192.168.56.132
[root@mgs02 conf]# mongod -f /data/mongodb/conf/shard1.conf
[root@mgs02 conf]# mongod -f /data/mongodb/conf/shard2.conf
--------192.168.56.133
[root@mgs03 conf]# mongod -f /data/mongodb/conf/shard1.conf
[root@mgs03 conf]# mongod -f /data/mongodb/conf/shard2.conf

③配置复制集

连接任意一台服务器即可

----shard1分片服务器
[root@mgs01 data]# mongo --host 192.168.56.131 --port 27001
--切换数据库
> use admin;
--初始化复制集
> rs.initiate({_id:"shard1",members:[{_id:0,host:"192.168.56.131:27001"},{_id:1,host:"192.168.56.132:27001"},{_id:2,host:"192.168.56.133:27001"}]})
----shard2分片服务器
[root@mgs01 data]# mongo --host 192.168.56.132 --port 27002
> rs.initiate({_id:"shard2",members:[{_id:0,host:"192.168.56.131:27002"},{_id:1,host:"192.168.56.132:27002"},{_id:2,host:"192.168.56.133:27002"}]})

3.4 路由服务器

路由服务部署(3台服务器执行相同操作)

① 创建配置文件

# vim /data/mongodb/conf/mongos.conf
logpath=/data/mongodb/logs/mongos.log
logappend = true
port = 27017
fork = true
configdb = configs/192.168.56.131:27018,192.168.56.132:27018,192.168.56.133:27018
maxConns=20000
bind_ip=0.0.0.0

② 启动mongos服务

[root@mgs01 data]# mongos -f /data/mongodb/conf/mongos.conf 

③ 启动分片服务

连接mongo:

[root@mgs01 data]# mongo --host 192.168.56.131 --port 27017
--切换数据库:
mongos> use admin
switched to db admin
--添加分片,只需在一台机器执行即可:
mongos> sh.addShard("shard1/192.168.56.131:27001,192.168.56.132:27001,192.168.56.133:27001");
{
        "shardAdded" : "shard1",
        "ok" : 1,
        "operationTime" : Timestamp(1584116955, 6),
        "$clusterTime" : {
                "clusterTime" : Timestamp(1584116955, 6),
                "signature" : {
                        "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
                        "keyId" : NumberLong(0)
                }
        }
}
添加另一个分片
mongos> sh.addShard("shard2/192.168.56.131:27002,192.168.56.132:27002,192.168.56.133:27002");
{
        "shardAdded" : "shard1",
        "ok" : 1,
        "operationTime" : Timestamp(1584275052, 1),
        "$clusterTime" : {
                "clusterTime" : Timestamp(1584275052, 1),
                "signature" : {
                        "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
                        "keyId" : NumberLong(0)
                }
        }
}
--查看集群状态
mongos> sh.status();

第四节 系统测试

4.1 分片测试

1)登录到router 实例

[root@mgs01 local]# mongo --port 27017

2)开启分片功能

mongos> use admin
switched to db admin
-- 对库cjspd开启分片
mongos> db.runCommand({"enablesharding":"cjspd"})
{
        "ok" : 1,
        "operationTime" : Timestamp(1584337660, 4),
        "$clusterTime" : {
                "clusterTime" : Timestamp(1584337660, 4),
                "signature" : {
                        "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
                        "keyId" : NumberLong(0)
                }
        }
}
-- 对库llh 下的表person 按字段ID 配置hash 分库算法
mongos> db.runCommand({"shardcollection":"cjspd.person","key":{_id:‘hashed‘}})
{
        "collectionsharded" : "cjspd.person",
        "collectionUUID" : UUID("6410f33a-2f93-44e0-b6a0-7f38754fb54d"),
        "ok" : 1,
        "operationTime" : Timestamp(1584337734, 26),
        "$clusterTime" : {
                "clusterTime" : Timestamp(1584337734, 26),
                "signature" : {
                        "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
                        "keyId" : NumberLong(0)
                }
        }
}

3)插入数据

mongos> use cjspd
switched to db cjspd
mongos>  for(var i=0;i<1000;i++){db.person.insert({name:"chenh"+i});}
WriteResult({ "nInserted" : 1 })

4)校验数据分片

****分库结果校验
[root@mgs01 ~]# mongo --port 27001
--PRIMARY库查询
shard1:PRIMARY> use cjspd
switched to db cjspd
shard1:PRIMARY> db.person.find()
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d3c"), "name" : "chenh0" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d3d"), "name" : "chenh1" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d46"), "name" : "chenh10" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d47"), "name" : "chenh11" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d49"), "name" : "chenh13" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d4c"), "name" : "chenh16" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d4e"), "name" : "chenh18" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d4f"), "name" : "chenh19" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d50"), "name" : "chenh20" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d51"), "name" : "chenh21" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d52"), "name" : "chenh22" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d54"), "name" : "chenh24" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d56"), "name" : "chenh26" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d57"), "name" : "chenh27" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d58"), "name" : "chenh28" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d59"), "name" : "chenh29" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d5a"), "name" : "chenh30" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d5c"), "name" : "chenh32" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d5e"), "name" : "chenh34" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d60"), "name" : "chenh36" }
[root@mgs01 ~]# mongo --port 27002
shard2:PRIMARY> use cjspd
switched to db cjspd
shard2:PRIMARY> db.person.find()
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d3e"), "name" : "chenh2" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d3f"), "name" : "chenh3" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d40"), "name" : "chenh4" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d41"), "name" : "chenh5" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d42"), "name" : "chenh6" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d43"), "name" : "chenh7" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d44"), "name" : "chenh8" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d45"), "name" : "chenh9" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d48"), "name" : "chenh12" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d4a"), "name" : "chenh14" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d4b"), "name" : "chenh15" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d4d"), "name" : "chenh17" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d53"), "name" : "chenh23" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d55"), "name" : "chenh25" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d5b"), "name" : "chenh31" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d5d"), "name" : "chenh33" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d5f"), "name" : "chenh35" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d64"), "name" : "chenh40" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d65"), "name" : "chenh41" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d68"), "name" : "chenh44" }

5)校验复制集同步

******复制集校验(shard1)
--SECONDARY库查看
[root@mgs02 ~]# mongo --port 27001
shard1:SECONDARY> use cjspd
switched to db cjspd
shard1:SECONDARY> rs.slaveOk();
shard1:SECONDARY> db.person.find()
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d3c"), "name" : "chenh0" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d3d"), "name" : "chenh1" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d46"), "name" : "chenh10" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d47"), "name" : "chenh11" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d49"), "name" : "chenh13" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d4c"), "name" : "chenh16" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d4e"), "name" : "chenh18" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d4f"), "name" : "chenh19" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d51"), "name" : "chenh21" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d50"), "name" : "chenh20" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d52"), "name" : "chenh22" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d56"), "name" : "chenh26" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d54"), "name" : "chenh24" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d59"), "name" : "chenh29" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d57"), "name" : "chenh27" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d58"), "name" : "chenh28" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d5a"), "name" : "chenh30" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d5c"), "name" : "chenh32" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d5e"), "name" : "chenh34" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d60"), "name" : "chenh36" }
[root@mgs03 ~]# mongo --port 27001
shard1:SECONDARY> use cjspd
switched to db cjspd
shard1:SECONDARY> rs.slaveOk();
shard1:SECONDARY> db.person.find()
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d3c"), "name" : "chenh0" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d3d"), "name" : "chenh1" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d46"), "name" : "chenh10" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d47"), "name" : "chenh11" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d49"), "name" : "chenh13" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d4c"), "name" : "chenh16" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d4e"), "name" : "chenh18" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d4f"), "name" : "chenh19" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d50"), "name" : "chenh20" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d51"), "name" : "chenh21" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d52"), "name" : "chenh22" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d54"), "name" : "chenh24" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d56"), "name" : "chenh26" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d58"), "name" : "chenh28" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d57"), "name" : "chenh27" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d59"), "name" : "chenh29" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d5a"), "name" : "chenh30" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d5c"), "name" : "chenh32" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d5e"), "name" : "chenh34" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d60"), "name" : "chenh36" }
******复制集校验(shard2)
--SECONDARY库查看
[root@mgs02 ~]# mongo --port 27002
shard1:SECONDARY> use cjspd
switched to db cjspd
shard1:SECONDARY> rs.slaveOk();
shard1:SECONDARY> db.person.find()
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d3e"), "name" : "chenh2" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d40"), "name" : "chenh4" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d3f"), "name" : "chenh3" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d41"), "name" : "chenh5" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d42"), "name" : "chenh6" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d43"), "name" : "chenh7" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d44"), "name" : "chenh8" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d45"), "name" : "chenh9" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d48"), "name" : "chenh12" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d4a"), "name" : "chenh14" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d4b"), "name" : "chenh15" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d4d"), "name" : "chenh17" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d53"), "name" : "chenh23" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d55"), "name" : "chenh25" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d5b"), "name" : "chenh31" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d5d"), "name" : "chenh33" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d5f"), "name" : "chenh35" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d64"), "name" : "chenh40" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d65"), "name" : "chenh41" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d68"), "name" : "chenh44" }
--SECONDARY库查看
[root@mgs03 ~]# mongo --port 27002
shard1:SECONDARY> use cjspd
switched to db cjspd
shard1:SECONDARY> rs.slaveOk();
shard1:SECONDARY> db.person.find()
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d3e"), "name" : "chenh2" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d40"), "name" : "chenh4" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d3f"), "name" : "chenh3" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d41"), "name" : "chenh5" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d43"), "name" : "chenh7" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d42"), "name" : "chenh6" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d45"), "name" : "chenh9" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d44"), "name" : "chenh8" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d48"), "name" : "chenh12" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d4a"), "name" : "chenh14" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d4b"), "name" : "chenh15" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d4d"), "name" : "chenh17" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d53"), "name" : "chenh23" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d55"), "name" : "chenh25" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d5b"), "name" : "chenh31" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d5d"), "name" : "chenh33" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d5f"), "name" : "chenh35" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d64"), "name" : "chenh40" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d65"), "name" : "chenh41" }
{ "_id" : ObjectId("5e6f137fd87da0f642fe9d68"), "name" : "chenh44" }

4.2 临时命令

④ 实现分片功能

设置分片chunk大小

use config
db.setting.save({"_id":"chunksize","value":1}) # 设置块大小为1M是方便实验,不然需要插入海量数据

5、模拟写入数据

use calon
for(i=1;i<=50000;i++){db.user.insert({"id":i,"name":"jack"+i})} #模拟往calon数据库的user表写入5万数据

7、启用数据库分片

sh.enableSharding("calon")

8、创建索引,对表进行分片

db.user.createIndex({"id":1}) # 以"id"作为索引
sh.shardCollection(calon.user",{"id":1}) # 根据"id"对user表进行分片
sh.status() # 查看分片情况

4.3 故障模拟

1)配置服务故障模拟

--模拟故障前状态
[root@mgs01 local]# mongo --port 27018
configs:PRIMARY> rs.status();
        "myState" : 1,
                        "_id" : 0,
                        "name" : "192.168.56.131:27018",
                        "health" : 1,
                        "state" : 1,
                        "stateStr" : "PRIMARY",
                        
                        "_id" : 1,
                        "name" : "192.168.56.132:27018",
                        "health" : 1,
                        "state" : 2,
                        "stateStr" : "SECONDARY",     
                        
                        "_id" : 2,
                        "name" : "192.168.56.133:27018",
                        "health" : 1,
                        "state" : 2,
                        "stateStr" : "SECONDARY",
--模拟关闭PRIMARY节点
[root@mgs01 local]# mongod -f /data/mongodb/conf/config.conf --shutdown
killing process with pid: 4589
--结果检查
[root@localhost local]# mongo --port 27018
configs:PRIMARY> rs.status()
        "myState" : 1,
        
                        "_id" : 0,
                        "name" : "192.168.56.131:27018",
                        "health" : 0,
                        "state" : 8,
                        
                        "_id" : 1,
                        "name" : "192.168.56.132:27018",
                        "health" : 1,
                        "state" : 1,
                        "stateStr" : "PRIMARY",
                        
                        "name" : "192.168.56.133:27018",
                        "health" : 1,
                        "state" : 2,
                        "stateStr" : "SECONDARY",
--服务启动后,节点恢复为SECONDARY状态
                        "_id" : 0,
                        "name" : "192.168.56.131:27018",
                        "health" : 1,
                        "state" : 2,
                        "stateStr" : "SECONDARY",

2)分片服务故障模拟

--模拟故障前状态
[root@mgs01 local]# mongo --port 27001
shard1:PRIMARY> rs.status();
        "myState" : 1,
                        "_id" : 0,
                        "name" : "192.168.56.131:27001",
                        "health" : 1,
                        "state" : 1,
                        "stateStr" : "PRIMARY",
                        
                        "_id" : 1,
                        "name" : "192.168.56.132:27001",
                        "health" : 1,
                        "state" : 2,
                        "stateStr" : "SECONDARY",

                        "_id" : 2,
                        "name" : "192.168.56.133:27001",
                        "health" : 1,
                        "state" : 2,
                        "stateStr" : "SECONDARY",
--模拟关闭PRIMARY节点
[root@mgs01 ~]# mongod -f /data/mongodb/conf/shard1.conf --shutdown
killing process with pid: 4705
--结果检查
[root@localhost local]# mongo --port 27001
shard1:PRIMARY> rs.status
                        "_id" : 0,
                        "name" : "192.168.56.131:27001",
                        "health" : 0,
                        "state" : 8,

                        "_id" : 1,
                        "name" : "192.168.56.132:27001",
                        "health" : 1,
                        "state" : 1,
                        "stateStr" : "PRIMARY",
                        
                        "_id" : 2,
                        "name" : "192.168.56.133:27001",
                        "health" : 1,
                        "state" : 2,
                        "stateStr" : "SECONDARY",

                        "name" : "192.168.56.131:27001",
                        "health" : 1,
                        "state" : 2,
                        "stateStr" : "SECONDARY",
--服务启动后,节点恢复为SECONDARY状态
[root@mgs01 ~]# mongod -f /data/mongodb/conf/shard1.conf
about to fork child process, waiting until server is ready for connections.
forked process: 25826
child process started successfully, parent exiting

                        "_id" : 0,
                        "name" : "192.168.56.131:27001",
                        "health" : 1,
                        "state" : 2,
                        "stateStr" : "SECONDARY",

第五节 配置系统服务

5.1 路由服务

# vim /lib/systemd/system/mongos.service
[Unit]
Description=MongoDB Database of Service mongos
After=network.target remote-fs.target nss-lookup.target
PID=$(cat /var/run/mongos.pid)
[Service]
Type=forking
# PIDFile=/var/run/mongodb/mongod.pid
ExecStart=mongos --config /data/mongodb/conf/mongos.conf
ExecStop=kill $PID
PrivateTmp=false

[Install]
WantedBy=multi-user.target
--启动服务
# systemctl start mongos
# systemctl stop mongos

5.2 配置服务

5.3 分片服务

到此,MongoDB分布式集群就搭建完毕。

第六节 参考配置

6.1 配置文件

config.conf

# vim /data/mongodb/conf/config.conf
dbpath=/home/mongodb/data/config
logpath=/home/mongodb/log/config.log
port=27018
logappend=true
fork=true
maxConns=5000
#复制集名称
replSet=configs
#置参数为true
configsvr=true
#允许任意机器连接
bind_ip=0.0.0.0

mongos.conf

# vim /data/mongodb/conf/mongos.conf
logpath=/data/mongodb/logs/mongos.log
logappend = true
port = 27017
fork = true
configdb = configs/192.168.56.131:27018,192.168.56.132:27018,192.168.56.133:27018
maxConns=20000
bind_ip=0.0.0.0

shard1.conf

# vim /data/mongodb/conf/shard1.conf
dbpath=/data/mongodb/data/shard1 #其他2个分片对应修改为shard2、shard3文件夹
logpath=/data/mongodb/logs/shard1.log #其他2个分片对应修改为shard2.log、shard3.log
port=27001 #其他2个分片对应修改为27002、27003
logappend=true
fork=true
maxConns=5000
storageEngine=mmapv1
shardsvr=true
replSet=shard1 #其他2个分片对应修改为shard2、shard3
bind_ip=0.0.0.0

shard2.conf

# vim /data/mongodb/conf/shard2.conf
dbpath=/data/mongodb/data/shard2 #其他2个分片对应修改为shard2、shard3文件夹
logpath=/data/mongodb/logs/shard2.log #其他2个分片对应修改为shard2.log、shard3.log
port=27002 #其他2个分片对应修改为27002、27003
logappend=true
fork=true
maxConns=5000
storageEngine=mmapv1
shardsvr=true
replSet=shard1 #其他2个分片对应修改为shard2、shard3
bind_ip=0.0.0.0

6.2 安装指令

添加分片

mongos> sh.addShard("192.168.200.A:40000") #添加分片
{ "shardAdded" : "shard0000", "ok" : 1 }
mongos> sh.addShard("192.168.200.B:40000") #添加分片
{ "shardAdded" : "shard0001", "ok" : 1 }
mongos> sh.addShard("192.168.200.C:40000") #添加分片
{ "shardAdded" : "shard0002", "ok" : 1 }

第三章 数据库管理

第一节 分片管理

1.添加分片

sh.addShard("IP:Port")

mongos> sh.addShard("192.168.200.A:40000") #添加分片
{ "shardAdded" : "shard0000", "ok" : 1 }
mongos> sh.addShard("192.168.200.B:40000") #添加分片
{ "shardAdded" : "shard0001", "ok" : 1 }
mongos> sh.addShard("192.168.200.C:40000") #添加分片
{ "shardAdded" : "shard0002", "ok" : 1 }
mongos> sh.addShard("shard1/192.168.56.131:27001,192.168.56.132:27001,192.168.56.133:27001");

2.开启分片

sh.enableSharding("库名")、sh.shardCollection("库名.集合名",{"key":1})

mongos> sh.enableSharding("dba")
{ "ok" : 1 }

mongos> sh.shardCollection("dba.account",{"name":1})
{ "collectionsharded" : "dba.account", "ok" : 1 }

3.查看分片状态

mongos> sh.status()
--- Sharding Status ---...
  shards:
    {  "_id" : "shard0000",  "host" : "192.168.200.A:40000" }
    {  "_id" : "shard0001",  "host" : "192.168.200.B:40000" }
    {  "_id" : "shard0002",  "host" : "192.168.200.C:40000" }
...
  databases:  #库
    {  "_id" : "admin",  "partitioned" : false,  "primary" : "config" }
    {  "_id" : "test",  "partitioned" : false,  "primary" : "shard0000" }
    {  "_id" : "dba",  "partitioned" : true,  "primary" : "shard0000" } #允许分片
        dba.account   #dba下的分片集合
            shard key: { "name" : 1 }                             
            chunks:   #块信息
                shard0000    1
            { "name" : { "$minKey" : 1 } } -->> { "name" : { "$maxKey" : 1 } } on : shard0000 Timestamp(1, 0)  #块里数据的范围

4.分片信息

mongos> db.shards.find()
{ "_id" : "shard0000", "host" : "192.168.200.51:40000" }
{ "_id" : "shard0001", "host" : "192.168.200.52:40000" }
{ "_id" : "shard0002", "host" : "192.168.200.53:40000" }
{ "_id" : "mablevi", "host" : "mablevi/192.168.200.53:50000,192.168.200.53:50001,192.168.200.53:50002" } #副本级分片,不需要把所有的节点写出

分片中所有数据库信息:config.databases

mongos> db.databases.find()
{ "_id" : "admin", "partitioned" : false, "primary" : "config" }
{ "_id" : "test", "partitioned" : false, "primary" : "shard0001" }
{ "_id" : "dba", "partitioned" : true, "primary" : "shard0000" }  #分片
{ "_id" : "abc", "partitioned" : true, "primary" : "shard0000" }  #分片

分片集合信息:config.collections

mongos> db.collections.findOne()
{
    "_id" : "dba.account",
    "lastmod" : ISODate("2015-07-14T15:12:29.706Z"),
    "dropped" : false,
    "key" : {          #片键
        "name" : 1
    },
    "unique" : false,  #片键是否唯一
    "lastmodEpoch" : ObjectId("55a526dd511d36716224fb77")
}

mongos路由的信息:config.mongs 。可以查看所有mongos的状态

mongos> db.mongos.findOne()
{
    "_id" : "mongo2:30000",
    "ping" : ISODate("2015-07-27T03:13:06.178Z"),
    "up" : 323671,      #活着时间
    "waiting" : true, 
    "mongoVersion" : "3.0.4" #版本
}

均衡器锁的信息:config.locks,记录所有集群范围的锁,可得知哪个mongos是均衡器。

mongos> db.locks.findOne()
{
    "_id" : "balancer",   #均衡器
    "state" : 1,          #0非活动状态、1尝试得到锁,但还没得到,2表示正在进行均衡
    "who" : "mongo1:30000:1436888525:1804289383:Balancer:846930886", #哪个mongos当均衡器
    "ts" : ObjectId("55b5a2d5fdd9a605a039f951"),
    "process" : "mongo1:30000:1436888525:1804289383",
    "when" : ISODate("2015-07-27T03:17:41.159Z"),
    "why" : "doing balance round" #均衡导致锁
}

记录所有块的信息:config.chunks,也可以通过sh.status()查看

mongos> db.chunks.find().pretty()
{
    "_id" : "dba.account-name_MinKey",   #标识符
    "lastmod" : Timestamp(2, 0),         #块的版本
    "lastmodEpoch" : ObjectId("55a526dd511d36716224fb77"), #块的版本
    "ns" : "dba.account",    #集合名
    "min" : {                #数据范围
        "name" : { "$minKey" : 1 }
    },
    "max" : {
        "name" : "9XXqCaBhfhPIXLq"
    },
    "shard" : "mablevi"      #所在分片
}
{
    "_id" : "dba.account-name_\"9XXqCaBhfhPIXLq\"",
    "lastmod" : Timestamp(4, 0),
    "lastmodEpoch" : ObjectId("55a526dd511d36716224fb77"),
    "ns" : "dba.account",
    "min" : {
        "name" : "9XXqCaBhfhPIXLq"
    },
    "max" : {
        "name" : "RWINvgjYYQmbZds"
    },
    "shard" : "shard0002"
}

记录所有的分片操作:config.changelog,拆分、迁移

拆分:

{
    "_id" : "mongo1-2015-07-14T15:12:40-55a526e8f0432675a473009c",
    "server" : "mongo1",
    "clientAddr" : "192.168.200.52:53257",
    "time" : ISODate("2015-07-14T15:12:40.375Z"),
    "what" : "multi-split",  #拆分
    "ns" : "dba.account",
    "details" : {            #拆分先后的信息
        "before" : {
            "min" : {        #拆分前范围
                "name" : { "$minKey" : 1 }
            },
            "max" : {
                "name" : { "$maxKey" : 1 }
            }
        },
        "number" : 1,   #第一个块
        "of" : 3,       #拆分成3个块
        "chunk" : {
            "min" : {
                "name" : { "$minKey" : 1 }
            },
            "max" : {
                "name" : "9XXqCaBhfhPIXLq"
            },
            "lastmod" : Timestamp(1, 1), #版本号
            "lastmodEpoch" : ObjectId("55a526dd511d36716224fb77")
        }
    }
}
{
    "_id" : "mongo1-2015-07-14T15:12:40-55a526e8f0432675a473009d",
    "server" : "mongo1",
    "clientAddr" : "192.168.200.52:53257",
    "time" : ISODate("2015-07-14T15:12:40.378Z"),
    "what" : "multi-split",
    "ns" : "dba.account",
    "details" : {
        "before" : {
            "min" : {
                "name" : { "$minKey" : 1 }
            },
            "max" : {
                "name" : { "$maxKey" : 1 }
            }
        },
        "number" : 2,
        "of" : 3,
        "chunk" : {
            "min" : {
                "name" : "9XXqCaBhfhPIXLq"
            },
            "max" : {
                "name" : "okmjUUZuuKgftDC"
            },
            "lastmod" : Timestamp(1, 2), #版本号
            "lastmodEpoch" : ObjectId("55a526dd511d36716224fb77")
        }
    }
}
{
    "_id" : "mongo1-2015-07-14T15:12:40-55a526e8f0432675a473009e",
    "server" : "mongo1",
    "clientAddr" : "192.168.200.52:53257",
    "time" : ISODate("2015-07-14T15:12:40.381Z"),
    "what" : "multi-split",
    "ns" : "dba.account",
    "details" : {
        "before" : {
            "min" : {
                "name" : { "$minKey" : 1 }
            },
            "max" : {
                "name" : { "$maxKey" : 1 }
            }
        },
        "number" : 3,
        "of" : 3,
        "chunk" : {
            "min" : {
                "name" : "okmjUUZuuKgftDC"
            },
            "max" : {
                "name" : { "$maxKey" : 1 }
            },
            "lastmod" : Timestamp(1, 3),  #版本号
            "lastmodEpoch" : ObjectId("55a526dd511d36716224fb77")
        }
    }
}

迁移:4个文档

{
    "_id" : "mongo1-2015-07-14T15:12:41-55a526e9f0432675a47300a1",
    "server" : "mongo1",
    "clientAddr" : "192.168.200.52:53257",
    "time" : ISODate("2015-07-14T15:12:41.406Z"),
    "what" : "moveChunk.start",  #迁移开始
    "ns" : "dba.account",
    "details" : {
        "min" : {
            "name" : { "$minKey" : 1 }
        },
        "max" : {
            "name" : "9XXqCaBhfhPIXLq"
        },
        "from" : "shard0000",
        "to" : "mablevi"
    }
}
{
    "_id" : "mongo3-2015-07-14T15:12:42-55a526ead5bee637c12aadd4",
    "server" : "mongo3",
    "clientAddr" : ":27017",
    "time" : ISODate("2015-07-14T15:12:42.419Z"),
    "what" : "moveChunk.to",   #from分片
    "ns" : "dba.account",
    "details" : {
        "min" : {
            "name" : { "$minKey" : 1 }
        },
        "max" : {
            "name" : "9XXqCaBhfhPIXLq"
        },
        "step 1 of 5" : 327,  #耗时,单位毫秒。检查命令参数
        "step 2 of 5" : 301,  #申请分布锁
        "step 3 of 5" : 1,    #连接到to分片
        "step 4 of 5" : 0,    #数据复制,每个分片都是直接和另一个分片、配置服务器直接连接
        "step 5 of 5" : 407,  #和to分片、配置服务器确认是否完成
        "note" : "success"
    }
}
{
    "_id" : "mongo1-2015-07-14T15:12:42-55a526eaf0432675a47300a2",
    "server" : "mongo1",
    "clientAddr" : "192.168.200.52:53257",
    "time" : ISODate("2015-07-14T15:12:42.854Z"),
    "what" : "moveChunk.commit",  #提交
    "ns" : "dba.account",
    "details" : {
        "min" : {
            "name" : { "$minKey" : 1 }
        },
        "max" : {
            "name" : "9XXqCaBhfhPIXLq"
        },
        "from" : "shard0000",
        "to" : "mablevi",
        "cloned" : NumberLong(1),
        "clonedBytes" : NumberLong(94),
        "catchup" : NumberLong(0),
        "steady" : NumberLong(0)
    }
}
{
    "_id" : "mongo1-2015-07-14T15:12:43-55a526ebf0432675a47300a3",
    "server" : "mongo1",
    "clientAddr" : "192.168.200.52:53257",
    "time" : ISODate("2015-07-14T15:12:43.258Z"),
    "what" : "moveChunk.from",  #to分片
    "ns" : "dba.account",
    "details" : {
        "min" : {
            "name" : { "$minKey" : 1 }
        },
        "max" : {
            "name" : "9XXqCaBhfhPIXLq"
        },    #步骤的时间为毫秒
        "step 1 of 6" : 0,      #迁移索引 
        "step 2 of 6" : 613,    #删除块内的数据,清理数据残余
        "step 3 of 6" : 2,      #文档复制到to分片
        "step 4 of 6" : 1029,   #复制期间,在to分片上执行操作
        "step 5 of 6" : 415,    #等待to分片将新迁移过来的数据复制到集群
        "step 6 of 6" : 0,      #修改元数据完成迁移
"to" : "mablevi",
        "from" : "shard0000",
        "note" : "success"
    }
}

分片标签:config.tags,sh.addShardTag

mongos> db.tags.findOne()
{
    "_id" : {
        "ns" : "abc.number",
        "min" : {
            "num" : 0
        }
    },
    "ns" : "abc.number",
    "min" : {
        "num" : 0
    },
    "max" : {
        "num" : 20
    },
    "tag" : "AAA"
}

分片设置:config.settings,设置分片块的大小和开启关闭均衡器

mongos> db.settings.find()
{ "_id" : "chunksize", "value" : 32 }
{ "_id" : "balancer", "stopped" : false }

网络连接数: db.adminCommand({"connPoolStats":1})

mongos> db.adminCommand({"connPoolStats":1})
{
    "hosts" : {
        "192.168.200.51:20000::0" : {
            "available" : 1,
            "created" : 1
        },
        "192.168.200.51:20000::30" : {
            "available" : 1,
            "created" : 1
        },
        "192.168.200.51:20000,192.168.200.51:21000,192.168.200.51:22000::0" : {
            "available" : 1,
            "created" : 1
        },
        "192.168.200.51:20000,192.168.200.51:21000,192.168.200.51:22000::30" : {
            "available" : 3,
            "created" : 422
        },
        "192.168.200.51:21000::0" : {
            "available" : 1,
            "created" : 1
        },
        "192.168.200.51:21000::30" : {
            "available" : 1,
            "created" : 1
        },
        "192.168.200.51:22000::0" : {
            "available" : 1,
            "created" : 1
        },
        "192.168.200.51:22000::30" : {
            "available" : 1,
            "created" : 1
        },
        "192.168.200.51:40000::0" : {
            "available" : 2,
            "created" : 2
        },
        "192.168.200.52:40000::0" : {
            "available" : 1,
            "created" : 4
        },
        "192.168.200.53:40000::0" : {
            "available" : 1,
            "created" : 2
        },
        "192.168.200.53:50000::5" : {
            "available" : 1,
            "created" : 2
        },
        "192.168.200.53:50001::0" : {
            "available" : 1,
            "created" : 1
        },
        "192.168.200.53:50001::5" : {
            "available" : 1,
            "created" : 2
        },
        "192.168.200.53:50002::0" : {
            "available" : 1,
            "created" : 1
        },
        "192.168.200.53:50002::5" : {
            "available" : 1,
            "created" : 2
        },
        "mablevi/192.168.200.53:50000,192.168.200.53:50001,192.168.200.53:50002::0" : {
            "available" : 2,
            "created" : 3
        }
    },
    "replicaSets" : {
        "mablevi" : {
            "hosts" : [
                {
                    "addr" : "192.168.200.53:50000",
                    "ok" : true,
                    "ismaster" : true,
                    "hidden" : false,
                    "secondary" : false,
                    "pingTimeMillis" : 1
                },
                {
                    "addr" : "192.168.200.53:50001",
                    "ok" : true,
                    "ismaster" : false,
                    "hidden" : false,
                    "secondary" : true,
                    "pingTimeMillis" : 1
                },
                {
                    "addr" : "192.168.200.53:50002",
                    "ok" : true,
                    "ismaster" : false,
                    "hidden" : false,
                    "secondary" : true,
                    "pingTimeMillis" : 1
                }
            ]
        }
    },
    "createdByType" : {
        "master" : 22,
        "set" : 3,
        "sync" : 423
    },
    "totalAvailable" : 21,   #总的可用连接
    "totalCreated" : 448,   #总创建的连接
    "numDBClientConnection" : 40,
    "numAScopedConnection" : 1,
    "ok" : 1
}

拆分块管理

mongos记录每个块中插入多少数据,一旦达到某个阈值,就会检查是否需要拆分块,需要则更新配置服务器上这个块的元信息。具体过程:

① 客户端发起请求,mongos检查当前块的阈值点。

② 达到拆分的阈值点,mongos向分片发起一个拆分请求。

③ 分片计算拆分点,将信息发回给mongos。

④ mongos选择一个拆分点,将这些信息发给配置服务器。

MongoDb集群部署

上一篇:[GO]解决request origin not allowed by Upgrader.CheckOrigin websocket跨域


下一篇:ImageNet ISLVRC2012的下载 解压 使用