MongoDB复制集部署:
1.配置复制集
创建数据文件和日志文件存储路径:
[root@localhost ~]# mkdir /data/mongodb{1,2,3,4}
[root@localhost ~]# touch /data/logs/mongodb/mongodb{1,2,3,4}.log
[root@localhost ~]# chmod 777 /data/logs/mongodb/mongodb*
2.编辑4个MongoDB实例的配置文件,配置replSet参数都设置为kgcrs,复制3份,操作如下:
vim /usr/local/mongodb/bin/mongodb1.conf
port=27017
dbpath=/data/mongodb1
logpath=/data/logs/mongodb/mongodb1.log
logappend=true
fork=true
maxConns=5000
storageEngine=mmapv1
httpinterface=true
replSet=kgcrs
[root@localhost ~]# cp /usr/local/mongodb/bin/mongodb1.conf /usr/local/mongodb/bin/mongodb2.conf
[root@localhost ~]# cp /usr/local/mongodb/bin/mongodb1.conf /usr/local/mongodb/bin/mongodb3.conf
[root@localhost ~]# cp /usr/local/mongodb/bin/mongodb1.conf /usr/local/mongodb/bin/mongodb4.conf
修改各个配置文件的端口,数据存储路径和日志存放路径
3.编写启动停止MongoDB脚本,启动4个MongoDB节点实例
脚本如下:
[root@localhost ~]# vim mongodb.sh
#!/bin/bash
INSTANCE=$1
ACTION=$2
case "$ACTION" in
start)
mongod -f /usr/local/mongodb/bin/${INSTANCE}.conf
;;
stop)
mongod -f /usr/local/mongodb/bin/${INSTANCE}.conf --shutdown
;;
restart)
mongod -f /usr/local/mongodb/bin/${INSTANCE}.conf --shutdown
mongod -f /usr/local/mongodb/bin/${INSTANCE}.conf
;;
*)
echo "Usage : mongod INSTANCE_NAME(Example:mongodb1) ACTION(start|stop|restart)"
;;
esac
[root@localhost ~]# chmod +x mongodb.sh
[root@localhost ~]# ./mongodb.sh mongodb1 start
about to fork child process, waiting until server is ready for connections.
forked process: 4698
child process started successfully, parent exiting
[root@localhost ~]# ./mongodb.sh mongodb2 start
about to fork child process, waiting until server is ready for connections.
forked process: 4726
child process started successfully, parent exiting
[root@localhost ~]# ./mongodb.sh mongodb3 start
about to fork child process, waiting until server is ready for connections.
forked process: 4753
child process started successfully, parent exiting
[root@localhost ~]# ./mongodb.sh mongodb4 start
about to fork child process, waiting until server is ready for connections.
forked process: 4780
child process started successfully, parent exiting
[root@localhost ~]# ps -aux | grep mongod
root 4698 2.7 5.8 1643528 109736 ? Sl 23:24 0:00 mongod -f /usr/local/mongodb/bin/mongodb1.conf
root 4726 10.3 4.7 1479580 88616 ? Sl 23:24 0:01 mongod -f /usr/local/mongodb/bin/mongodb2.conf
root 4753 4.4 3.2 1151996 60996 ? Sl 23:24 0:00 mongod -f /usr/local/mongodb/bin/mongodb3.conf
root 4780 6.6 2.9 1151992 54344 ? Sl 23:24 0:00 mongod -f /usr/local/mongodb/bin/mongodb4.conf
root 4805 0.0 0.0 112648 960 pts/0 S+ 23:24 0:00 grep --color=auto mongod
[root@localhost ~]#
4.初始化配置复制集
[root@localhost ~]# mongo
>
> show dbs
2020-07-10T23:29:52.190+0800 E QUERY [thread1] Error: listDatabases failed:{
"ok" : 0,
"errmsg" : "not master and slaveOk=false",
"code" : 13435,
"codeName" : "NotMasterNoSlaveOk"
} :
_getErrorWithCode@src/mongo/shell/utils.js:25:13
Mongo.prototype.getDBs@src/mongo/shell/mongo.js:62:1
shellHelper.show@src/mongo/shell/utils.js:814:19
shellHelper@src/mongo/shell/utils.js:704:15
@(shellhelp2):1:1
>
> rs.status()
{
"info" : "run rs.initiate(...) if not yet done for the set",
"ok" : 0,
"errmsg" : "no replset config has been received",
"code" : 94,
"codeName" : "NotYetInitialized"
}
> cfg={"_id":"kgcrs","members":[{"_id":"0","host":"192.168.1.101:27017"},{"id":1,"host":"192.168.1.101:27018"},{"id":2,"host":"192.168.1.101:27019"},{"id":3,"host":"192.168.1.101:27020"}];
... }
2020-07-10T23:33:51.985+0800 E QUERY [thread1] SyntaxError: missing } after property list @(shell):1:185
>
>
> cfg={"_id":"kgcrs","members":[{"_id":0,"host":"192.168.1.101:27017"},{"_id":2,"host":"192.168.1.101:27019"},{"_id":3,"host":"192.168.1.101:27020"}]}
{
"_id" : "kgcrs",
"members" : [
{
"_id" : 0,
"host" : "192.168.1.101:27017"
},
{
"_id" : 2,
"host" : "192.168.1.101:27019"
},
{
"_id" : 3,
"host" : "192.168.1.101:27020"
}
]
}
>
> rs.initiate(cfg)
{ "ok" : 1 }
kgcrs:OTHER>
kgcrs:PRIMARY>
这里先通过rs.status()命令来查看复制集的状态信息,提示复制集还未配置,按着定义cfg初始化参数,最后通过rs.initiate(cfg)命令启动复制集
5.查看复制集状态
kgcrs:PRIMARY> rs.status()
{
"set" : "kgcrs",
"date" : ISODate("2020-07-10T15:43:53.841Z"),
"myState" : 1,
"term" : NumberLong(1),
"syncingTo" : "",
"syncSourceHost" : "",
"syncSourceId" : -1,
"heartbeatIntervalMillis" : NumberLong(2000),
"optimes" : {
"lastCommittedOpTime" : {
"ts" : Timestamp(1594395831, 1),
"t" : NumberLong(1)
},
"appliedOpTime" : {
"ts" : Timestamp(1594395831, 1),
"t" : NumberLong(1)
},
"durableOpTime" : {
"ts" : Timestamp(1594395831, 1),
"t" : NumberLong(1)
}
},
"members" : [
{
"_id" : 0,
"name" : "192.168.1.101:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 1171,
"optime" : {
"ts" : Timestamp(1594395831, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2020-07-10T15:43:51Z"),
"syncingTo" : "",
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"electionTime" : Timestamp(1594395640, 1),
"electionDate" : ISODate("2020-07-10T15:40:40Z"),
"configVersion" : 1,
"self" : true,
"lastHeartbeatMessage" : ""
},
{
"_id" : 2,
"name" : "192.168.1.101:27019",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 204,
"optime" : {
"ts" : Timestamp(1594395831, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1594395831, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2020-07-10T15:43:51Z"),
"optimeDurableDate" : ISODate("2020-07-10T15:43:51Z"),
"lastHeartbeat" : ISODate("2020-07-10T15:43:52.367Z"),
"lastHeartbeatRecv" : ISODate("2020-07-10T15:43:53.649Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncingTo" : "192.168.1.101:27017",
"syncSourceHost" : "192.168.1.101:27017",
"syncSourceId" : 0,
"infoMessage" : "",
"configVersion" : 1
},
{
"_id" : 3,
"name" : "192.168.1.101:27020",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 204,
"optime" : {
"ts" : Timestamp(1594395831, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1594395831, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2020-07-10T15:43:51Z"),
"optimeDurableDate" : ISODate("2020-07-10T15:43:51Z"),
"lastHeartbeat" : ISODate("2020-07-10T15:43:52.367Z"),
"lastHeartbeatRecv" : ISODate("2020-07-10T15:43:52.071Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncingTo" : "192.168.1.101:27019",
"syncSourceHost" : "192.168.1.101:27019",
"syncSourceId" : 2,
"infoMessage" : "",
"configVersion" : 1
}
],
"ok" : 1
}
kgcrs:PRIMARY>
其中,health为1代表健康,0代表宕机,state为1代表主节点,为2代表从节点
注意:
在复制集初始化配置时要保证从节点上没有数据
6.增加和删除节点:
复制集启动完了之后,可以通过rs.add()和rs.remove()命令添加和删除节点
kgcrs:PRIMARY> rs.add("192.168.1.101:27018")
{ "ok" : 1 }
kgcrs:PRIMARY> rs.remove("192.168.1.101:27020");
{ "ok" : 1 }
kgcrs:PRIMARY> rs.status()
MongoDB复制集切换:
1.模拟故障:
[root@localhost ~]# ps -aux | grep mongod
root 3413 0.4 2.1 5930868 39876 ? Sl 18:41 0:01 mongod --config /usr/local/mongodb/bin/mongodb1.conf
root 3435 0.4 2.1 5866320 40068 ? Sl 18:41 0:01 mongod --config /usr/local/mongodb/bin/mongodb2.conf
root 3456 0.4 2.1 5903200 40136 ? Sl 18:41 0:01 mongod --config /usr/local/mongodb/bin/mongodb3.conf
root 3477 0.4 3.0 5867348 56704 ? Sl 18:41 0:01 mongod --config /usr/local/mongodb/bin/mongodb4.conf
root 3779 0.0 0.0 112652 960 pts/0 S+ 18:47 0:00 grep --color=auto mongod
[root@localhost ~]# kill -2 3413
[root@localhost ~]# ps -aux | grep mongod
root 3435 0.4 2.1 5866320 40100 ? Sl 18:41 0:01 mongod --config /usr/local/mongodb/bin/mongodb2.conf
root 3456 0.4 2.1 5903200 40140 ? Sl 18:41 0:01 mongod --config /usr/local/mongodb/bin/mongodb3.conf
root 3477 0.4 2.9 5867348 55508 ? Sl 18:41 0:01 mongod --config /usr/local/mongodb/bin/mongodb4.conf
root 3789 0.0 0.0 112652 960 pts/0 S+ 18:47 0:00 grep --color=auto mongod
关闭实例1(mongodb1):
可以查看到主节点切换到192.168.1.101:27019即实例3(mongodb3)
"_id" : 2,
"name" : "192.168.1.101:27019",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
2.手动进行主从切换:
再次启动实例1(mongodb1)
[root@localhost ~]#
[root@localhost ~]# mongod -f /usr/local/mongodb/bin/mongodb1.conf
about to fork child process, waiting until server is ready for connections.
forked process: 3829
child process started successfully, parent exiting
[root@localhost ~]#
[root@localhost ~]# ps -aux | grep mongod
root 3435 0.5 2.1 5866320 40708 ? Sl 18:41 0:02 mongod --config /usr/local/mongodb/bin/mongodb2.conf
root 3456 0.5 3.0 5944168 57640 ? Sl 18:41 0:02 mongod --config /usr/local/mongodb/bin/mongodb3.conf
root 3477 0.4 2.9 5867348 55800 ? Sl 18:41 0:02 mongod --config /usr/local/mongodb/bin/mongodb4.conf
root 3829 1.0 3.8 5866912 71356 ? Sl 18:49 0:00 mongod -f /usr/local/mongodb/bin/mongodb1.conf
root 3912 0.0 0.0 112652 960 pts/0 S+ 18:49 0:00 grep --color=auto mongod
登录实例3(mongdb3)
[root@localhost ~]# mongo --port 27019
kgcrs:PRIMARY> rs.freeze(30);
{
"ok" : 0,
"errmsg" : "cannot freeze node when primary or running for election. state: Primary",
"code" : 95,
"codeName" : "NotSecondary"
}
kgcrs:PRIMARY> rs.stepDown(60,30);
输入以上两条命令后发现主节点转移为实例2(mongodb2):
"_id" : 1,
"name" : "192.168.1.101:27018",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
注:
rs.freeze(30)为暂停30秒不参加选举
rs.stepDown(60,30):告诉主节点交出主节点位置,然后维持从节点状态不少于60秒,同时等待30秒以使主节点和从节点日志同步
MongoDB复制选举原理:
复制原理:
复制是基于oplog,相当于MySQL数据库的二进制日志,只记录发生该表的记录。复制是将主节点的oplog日志同步并应用到其它从节点的过程
选举原理:
节点类型分为标准节点(host),被动节点(passive),仲裁节点(arbiter)
1.只有标准节点可能被选举为活跃节点(primary),有选举权。被动节点有完整副本,不可能成为活跃节点,有选举权。仲裁节点不复制数据,不可能成为活跃节点,只有选举权
2.标准节点和被动节点的区别:priority值高者是标准节点,低者则为被动节点。
3.选举规则是票数高者获胜,priority是优先权为0~1000的值,相当于额外增加0~1000的票数。选举结果:票数高者获胜,若票数相同,数据新者获胜:
如图所示:
验证复制集的选举原理:
1.查看oplog日志:
[root@localhost ~]# mongo --port 27018
kgcrs:PRIMARY>
kgcrs:PRIMARY> show dbs;
admin 0.078GB
local 2.077GB
kgcrs:PRIMARY> use kgc;
switched to db kgc
kgcrs:PRIMARY> db.t1.insert({"id":1,"name":"Tome"})
WriteResult({ "nInserted" : 1 })
kgcrs:PRIMARY> db.t1.insert({"id":2,"name":"Jerry"})
WriteResult({ "nInserted" : 1 })
kgcrs:PRIMARY> db.t1.find()
{ "_id" : ObjectId("5f09a1951ad8248fd8a53d49"), "id" : 1, "name" : "Tome" }
{ "_id" : ObjectId("5f09a1b51ad8248fd8a53d4a"), "id" : 2, "name" : "Jerry" }
kgcrs:PRIMARY> db.t1.update({"id":2},{"$set":{"name":"Bob"}})
WriteResult({ "nMatched" : 1, "nUpserted" : 0, "nModified" : 1 })
kgcrs:PRIMARY> use local
switched to db local
kgcrs:PRIMARY> use kgc
switched to db kgc
kgcrs:PRIMARY> db.t1.remove({"id":2})
WriteResult({ "nRemoved" : 1 })
kgcrs:PRIMARY> use local
switched to db local
kgcrs:PRIMARY> show collections
me
oplog.rs
replset.election
replset.minvalid
startup_log
system.indexes
system.replset
kgcrs:PRIMARY> db.oplog.rs.find()
{ "ts" : Timestamp(1594464303, 1), "h" : NumberLong("7752197714819355477"), "v" : 2, "op" : "n", "ns" : "", "o" : { "msg" : "initiating set" } }
{ "ts" : Timestamp(1594464315, 1), "t" : NumberLong(1), "h" : NumberLong("2460754299542242563"), "v" : 2, "op" : "n", "ns" : "", "o" : { "msg" : "new primary" } }
{ "ts" : Timestamp(1594464325, 1), "t" : NumberLong(1), "h" : NumberLong("-5883241644124023360"), "v" : 2, "op" : "n", "ns" : "", "o" : { "msg" : "periodic noop" } }
其中每个文档都代表主节点上执行的一个操作,oplog会包含所有对数据有修改的操作(查询操作不会记录)
2.配置复制集的优先级:
kgcrs:PRIMARY> cfg={"_id":"kgcrs","members":[{"_id":0,"host":"192.168.1.101:27017","priority":100},{"_id":1,"host":"192.168.1.101:27018","priority":100},{"_id":2,"host":"192.168.1.101:27019","priority":0},{"_id":3,"host":"192.168.1.101:27020","arbiterOnly":true}]}
{
"_id" : "kgcrs",
"members" : [
{
"_id" : 0,
"host" : "192.168.1.101:27017",
"priority" : 100
},
{
"_id" : 1,
"host" : "192.168.1.101:27018",
"priority" : 100
},
{
"_id" : 2,
"host" : "192.168.1.101:27019",
"priority" : 0
},
{
"_id" : 3,
"host" : "192.168.1.101:27020",
"arbiterOnly" : true
}
]
}
kgcrs:PRIMARY>
kgcrs:PRIMARY>
kgcrs:PRIMARY> rs.reconfig(cfg)
{ "ok" : 1 }
kgcrs:PRIMARY> rs.isMaster()
{
"hosts" : [
"192.168.1.101:27017",
"192.168.1.101:27018"
],
"passives" : [
"192.168.1.101:27019"
],
"arbiters" : [
"192.168.1.101:27020"
],
"setName" : "kgcrs",
"setVersion" : 4,
"ismaster" : true,
"secondary" : false,
"primary" : "192.168.1.101:27018",
"me" : "192.168.1.101:27018",
"electionId" : ObjectId("7fffffff0000000000000003"),
"lastWrite" : {
"opTime" : {
"ts" : Timestamp(1594467801, 1),
"t" : NumberLong(3)
},
"lastWriteDate" : ISODate("2020-07-11T11:43:21Z")
},
"maxBsonObjectSize" : 16777216,
"maxMessageSizeBytes" : 48000000,
"maxWriteBatchSize" : 1000,
"localTime" : ISODate("2020-07-11T11:43:26.967Z"),
"maxWireVersion" : 5,
"minWireVersion" : 0,
"readOnly" : false,
"ok" : 1
}
kgcrs:PRIMARY>
其中,hosts包括标准节点,passive包含被动节点,arbiters包含仲裁节点
3.模拟节点故障
关闭主节点实例即mongdb2:
[root@localhost ~]# ps -aux | grep mongod
root 3435 0.5 3.2 6115928 59860 ? Sl 18:41 0:20 mongod --config /usr/local/mongodb/bin/mongodb2.conf
[root@localhost ~]# kill -2 3435
查看当前master:
"_id" : 0,
"name" : "192.168.1.101:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
4.模拟所有标准节点都出现故障
[root@localhost ~]# ps -axu | grep mongod
root 3456 0.5 3.1 6109036 59308 ? Sl 18:41 0:22 mongod --config /usr/local/mongodb/bin/mongodb3.conf
root 3829 0.5 3.9 6066608 73388 ? Sl 18:49 0:20 mongod -f /usr/local/mongodb/bin/mongodb1.conf
root 43939 0.4 3.8 5504688 72616 ? Sl 19:49 0:01 mongod -f /usr/local/mongodb/bin/mongodb4.conf
root 44022 0.0 0.0 112656 956 pts/0 S+ 19:54 0:00 grep --color=auto mongod
[root@localhost ~]# kill -2 3829
[root@localhost ~]# ps -axu | grep mongod
root 3456 0.5 3.1 6109036 59604 ? Sl 18:41 0:23 mongod --config /usr/local/mongodb/bin/mongodb3.conf
root 43939 0.4 3.9 5504688 72992 ? Sl 19:49 0:01 mongod -f /usr/local/mongodb/bin/mongodb4.conf
root 44032 0.0 0.0 112656 960 pts/0 S+ 19:54 0:00 grep --color=auto mongod
[root@localhost ~]#
[root@localhost ~]# mongo --port 27019
kgcrs:SECONDARY>
kgcrs:SECONDARY> rs.status()
"_id" : 2,
"name" : "192.168.1.101:27019",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
结论:
如果所有节点都出现故障,被动节点也不能成为主节点。
MongoDB复制集管理:
1.配置允许在从节点读取数据
默认MongoDB复制集的从节点不能读取数据,可以使用rs.slaveOk()命令允许能够在从节点读取数据
[root@localhost ~]# ps -aux | grep mongod
root 3456 0.5 3.2 6109036 59764 ? Sl 18:41 0:24 mongod --config /usr/local/mongodb/bin/mongodb3.conf
root 43939 0.4 3.9 5504688 73048 ? Sl 19:49 0:02 mongod -f /usr/local/mongodb/bin/mongodb4.conf
root 44074 0.0 0.0 112656 956 pts/0 S+ 19:58 0:00 grep --color=auto mongod
[root@localhost ~]# mongod -f /usr/local/mongodb/bin/mongodb1.conf
about to fork child process, waiting until server is ready for connections.
forked process: 44091
child process started successfully, parent exiting
[root@localhost ~]# mongod -f /usr/local/mongodb/bin/mongodb2.conf
about to fork child process, waiting until server is ready for connections.
forked process: 44158
child process started successfully, parent exiting
[root@localhost ~]# !ps
ps -aux | grep mongod
root 3456 0.5 3.2 6109036 59844 ? Sl 18:41 0:24 mongod --config /usr/local/mongodb/bin/mongodb3.conf
root 43939 0.4 3.9 5505716 73056 ? Sl 19:49 0:02 mongod -f /usr/local/mongodb/bin/mongodb4.conf
root 44091 1.8 4.5 6009536 85704 ? Sl 19:59 0:00 mongod -f /usr/local/mongodb/bin/mongodb1.conf
root 44158 3.6 4.5 6009536 85180 ? Sl 19:59 0:00 mongod -f /usr/local/mongodb/bin/mongodb2.conf
root 44229 0.0 0.0 112656 960 pts/0 S+ 19:59 0:00 grep --color=auto mongod
[root@localhost ~]# mongo
kgcrs:PRIMARY> rs.status()
kgcrs:PRIMARY> rs.help()
kgcrs:PRIMARY> use kgc
switched to db kgc
kgcrs:PRIMARY> show collections
system.indexes
t1
kgcrs:PRIMARY> exit
bye
[root@localhost ~]# mongo --port 27018
kgcrs:SECONDARY> show dbs;
2020-07-11T20:00:41.101+0800 E QUERY [thread1] Error: listDatabases failed:{
"ok" : 0,
"errmsg" : "not master and slaveOk=false",
"code" : 13435,
"codeName" : "NotMasterNoSlaveOk"
} :
_getErrorWithCode@src/mongo/shell/utils.js:25:13
Mongo.prototype.getDBs@src/mongo/shell/mongo.js:62:1
shellHelper.show@src/mongo/shell/utils.js:814:19
shellHelper@src/mongo/shell/utils.js:704:15
@(shellhelp2):1:1
kgcrs:SECONDARY> rs.slaveOk()
kgcrs:SECONDARY> show dbs;
admin 0.078GB
kgc 0.078GB
local 2.077GB
2.查看复制状态信息
可以使用rs.printReplicationInfo()和rs.printSlaveReplicationInfo()命令来查看复制集状态
kgcrs:SECONDARY> rs.status()
kgcrs:SECONDARY> rs.printSlaveReplicationInfo()
source: 192.168.1.101:27018
syncedTo: Sat Jul 11 2020 20:05:19 GMT+0800 (CST)
0 secs (0 hrs) behind the primary
source: 192.168.1.101:27019
syncedTo: Sat Jul 11 2020 20:05:19 GMT+0800 (CST)
0 secs (0 hrs) behind the primary
kgcrs:SECONDARY> rs.printReplicationInfo()
configured oplog size: 1229.484375MB
log length start to end: 4866secs (1.35hrs)
oplog first event time: Sat Jul 11 2020 18:45:03 GMT+0800 (CST)
oplog last event time: Sat Jul 11 2020 20:06:09 GMT+0800 (CST)
now: Sat Jul 11 2020 20:06:12 GMT+0800 (CST)
kgcrs:SECONDARY> use local
switched to db local
kgcrs:SECONDARY> show collections
me
oplog.rs
replset.election
replset.minvalid
startup_log
system.indexes
system.replset
kgcrs:SECONDARY> db.system.replset.findOne()
3.更改oplog大小
oplog集operation log的简写,存储在local数据库中。oplog中新操作会自动替换旧的操作,以保证oplog不会超过预设的大小。默认情况下,oplog大小会占64位的实例5%的可用磁盘空间
kgcrs:PRIMARY> use local
kgcrs:PRIMARY> rs.printReplicationInfo()
kgcrs:PRIMARY> db.oplog.rs.stats()
{
"ns" : "local.oplog.rs",
"size" : 48676,
"count" : 505,
"avgObjSize" : 96,
"numExtents" : 1,
"storageSize" : 1512243200,
"lastExtentSize" : 1512243200,
"paddingFactor" : 1,
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : 1,
"capped" : true,
"max" : NumberLong("9223372036854775807"),
"maxSize" : 1512243200,
"nindexes" : 0,
"totalIndexSize" : 0,
"indexSizes" : {
},
"ok" : 1
}
kgcrs:PRIMARY> db.runCommand({"convertToCapped":"oplog.rs","size":"1024000000"})
4.部署认证的复制:
[root@localhost ~]# mongo
kgcrs:PRIMARY> use admin
switched to db admin
kgcrs:PRIMARY> db.createUser({"user":"root","pwd":"123","roles":["root"]})
Successfully added user: { "user" : "root", "roles" : [ "root" ] }
kgcrs:PRIMARY> exit
bye
[root@localhost ~]# vim /usr/local/mongodb/bin/mongodb1.conf
添加以下内容:
clusterAuthMode=keyFile
keyFile=/usr/local/mongodb/bin/kgcrskey1
[root@localhost ~]# vim /usr/local/mongodb/bin/mongodb2.conf
[root@localhost ~]# vim /usr/local/mongodb/bin/mongodb3.conf
[root@localhost ~]# vim /usr/local/mongodb/bin/mongodb4.conf
[root@localhost ~]# cd /usr/local/mongodb/bin/
[root@localhost bin]# echo "kgcrs key" > kgcrskey1 # 生成4个实例的秘钥文件
[root@localhost bin]# echo "kgcrs key" > kgcrskey2
[root@localhost bin]# echo "kgcrs key" > kgcrskey3
[root@localhost bin]# echo "kgcrs key" > kgcrskey4
[root@localhost bin]# chmod 600 kgcrskey{1..4}
[root@localhost ~]# ./sh_mongo.sh mongodb1 restart # 重启四个实例
[root@localhost ~]# ./sh_mongo.sh mongodb2 restart
[root@localhost ~]# ./sh_mongo.sh mongodb3 restart
[root@localhost ~]# ./sh_mongo.sh mongodb4 restart
[root@localhost ~]# mongo
kgcrs:PRIMARY> show dbs
2020-07-11T20:34:54.075+0800 E QUERY [thread1] Error: listDatabases failed:{
"ok" : 0,
"errmsg" : "not authorized on admin to execute command { listDatabases: 1.0 }",
"code" : 13,
"codeName" : "Unauthorized"
} :
_getErrorWithCode@src/mongo/shell/utils.js:25:13
Mongo.prototype.getDBs@src/mongo/shell/mongo.js:62:1
shellHelper.show@src/mongo/shell/utils.js:814:19
shellHelper@src/mongo/shell/utils.js:704:15
@(shellhelp2):1:1
kgcrs:PRIMARY> rs.status()
{
"ok" : 0,
"errmsg" : "not authorized on admin to execute command { replSetGetStatus: 1.0 }",
"code" : 13,
"codeName" : "Unauthorized"
}
kgcrs:PRIMARY> use admin
switched to db admin
kgcrs:PRIMARY> db.auth("root","123")
1
kgcrs:PRIMARY> rs.status()
{
"set" : "kgcrs",
"date" : ISODate("2020-07-11T12:35:25.636Z"),
"myState" : 1,
"term" : NumberLong(9),
"syncingTo" : "",
"syncSourceHost" : "",
"syncSourceId" : -1,
"heartbeatIntervalMillis" : NumberLong(2000),
"optimes" : {
"lastCommittedOpTime" : {
"ts" : Timestamp(1594470917, 1),
"t" : NumberLong(9)
},
"appliedOpTime" : {
"ts" : Timestamp(1594470917, 1),
"t" : NumberLong(9)
},
"durableOpTime" : {
"ts" : Timestamp(1594470917, 1),
"t" : NumberLong(9)
}
},
"members" : [
...
]
}
kgcrs:PRIMARY> show dbs;
admin 0.078GB
kgc 0.078GB
local 2.077GB
kgcrs:PRIMARY> exit
bye
相关文章
- 08-19Docker环境下的前后端分离项目部署与运维(十二)使用Portainer管理Docker
- 08-19混合云下的 Kubernetes 多集群管理与应用部署
- 08-19部署K2 Blackpearl流程时出错(与基础事务管理器的通信失败或Communication with the underlying transaction manager has failed.
- 08-19Linux-MongoDB复制集的部署与管理
- 08-19自部署CI与云CI的环境管理
- 08-19打印服务器的配置与管理(1) 网络打印机的安装与部署
- 08-19Python多环境扩展管理--pyenv的部署与使用
- 08-19Server Core 的部署与管理
- 08-19持续交付流水线的敏捷利器:环境配置管理与应用部署自动化
- 08-19mtools 是由MongoDB 官方工程师实现的一套工具集,可以很快速的日志查询分析、统计功能,此外还支持本地集群部署管理.