一、关于sharding strategy 及 chunk split
mongos> sh.status()
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("5850e59fce8b5f7ab7cfad34")
}
shards:
active mongoses:
"3.4.0" : 1
autosplit:
Currently enabled: yes
balancer:
Currently enabled: yes
Currently running: no
Balancer lock taken at Wed Dec 14 2016 06:24:31 GMT+0000 (UTC) by ConfigServer:Balancer
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
No recent migrations
databases:
mongos> use admin
switched to db admin
mongos> sh.addShard("shard1/mongo01-jp:27027")
{ "shardAdded" : "shard1", "ok" : 1 }
mongos> sh.addShard("shard2/mongo02-jp:27028")
{ "shardAdded" : "shard2", "ok" : 1 }
mongos> sh.addShard("shard3/mongo03-jp:27029")
{ "shardAdded" : "shard3", "ok" : 1 }
mongos> sh.status()
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("5850e59fce8b5f7ab7cfad34")
}
shards:
{ "_id" : "shard1", "host" : "shard1/mongo01-jp:27027,mongo02-jp:27027", "state" : 1 }
{ "_id" : "shard2", "host" : "shard2/mongo02-jp:27028,mongo03-jp:27028", "state" : 1 }
{ "_id" : "shard3", "host" : "shard3/mongo01-jp:27029,mongo03-jp:27029", "state" : 1 }
active mongoses:
"3.4.0" : 1
autosplit:
Currently enabled: yes
balancer:
Currently enabled: yes
Currently running: no
Balancer lock taken at Wed Dec 14 2016 06:24:31 GMT+0000 (UTC) by ConfigServer:Balancer
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
No recent migrations
databases:
mongos> use config
switched to db config
mongos> db.chunks.find()
mongos> db.settings.find()
mongos> db.settings.save({"_id":"chunksize","value":NumberLong(5)})
WriteResult({ "nMatched" : 0, "nUpserted" : 1, "nModified" : 0, "_id" : "chunksize" })
mongos> db.settings.find()
{ "_id" : "chunksize", "value" : NumberLong(5) }
注: 默认每个chunk大小为64M,支持可以手工设置;较小的chunk可以使数据分布的更均衡,便于迁移,但是带来的问题就是split更加频繁,增加了mongos路由的开支(每个chunk持有的数据量变小,每次query需要访问更多chunks);较大的chunk不便迁移,但是split次数较少,metadata信息较少,mongos路由简单;如果chunk过大会导致数据分布不均。个人认为64M还是较小,建议增加到256M。spit操作只会有insert或者update触发。
mongos> use admin
switched to db admin
mongos> sh.enableSharding("test")
{ "ok" : 1 }
mongos> sh.shardCollection("test.user",{uid:1}) 基于uid range 分片
{ "collectionsharded" : "test.user", "ok" : 1 }
mongos> use test
switched to db test
mongos> db.user.ensureIndex({uid:1}) 对于一个空collection,系统会自动创建索引
{
"raw" : {
"shard1/mongo01-jp:27027,mongo02-jp:27027" : {
"createdCollectionAutomatically" : true,
"numIndexesBefore" : 1,
"numIndexesAfter" : 2,
"ok" : 1,
"$gleStats" : {
"lastOpTime" : {
"ts" : Timestamp(1481700282, 2),
"t" : NumberLong(1)
},
"electionId" : ObjectId("7fffffff0000000000000001")
}
},
"shard2/mongo02-jp:27028,mongo03-jp:27028" : {
"createdCollectionAutomatically" : true,
"numIndexesBefore" : 1,
"numIndexesAfter" : 2,
"ok" : 1,
"$gleStats" : {
"lastOpTime" : {
"ts" : Timestamp(1481700282, 2),
"t" : NumberLong(1)
},
"electionId" : ObjectId("7fffffff0000000000000001")
}
},
"shard3/mongo01-jp:27029,mongo03-jp:27029" : {
"createdCollectionAutomatically" : false,
"numIndexesBefore" : 2,
"numIndexesAfter" : 2,
"note" : "all indexes already exist",
"ok" : 1,
"$gleStats" : {
"lastOpTime" : {
"ts" : Timestamp(1481700282, 1),
"t" : NumberLong(1)
},
"electionId" : ObjectId("7fffffff0000000000000001")
}
}
},
"ok" : 1
}
mongos> for (i=1;i<=100000;i++) db.user.insert({uid:'user'+i,age:(i%15),address:'#'+i+',nongda south road,beijing',preferbooks:['book'+i,'hello world']})
WriteResult({ "nInserted" : 1 })
mongos> sh.status()
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("5850e59fce8b5f7ab7cfad34")
}
shards:
{ "_id" : "shard1", "host" : "shard1/mongo01-jp:27027,mongo02-jp:27027", "state" : 1 }
{ "_id" : "shard2", "host" : "shard2/mongo02-jp:27028,mongo03-jp:27028", "state" : 1 }
{ "_id" : "shard3", "host" : "shard3/mongo01-jp:27029,mongo03-jp:27029", "state" : 1 }
active mongoses:
"3.4.0" : 1
autosplit:
Currently enabled: yes
balancer:
Currently enabled: yes
Currently running: no
Balancer lock taken at Wed Dec 14 2016 06:24:31 GMT+0000 (UTC) by ConfigServer:Balancer
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
8 : Success
databases:
{ "_id" : "test", "primary" : "shard3", "partitioned" : true }
test.user
shard key: { "uid" : 1 }
unique: false
balancing: true
chunks:
shard1 5
shard2 4
shard3 4
{ "uid" : { "$minKey" : 1 } } -->> { "uid" : "user10" } on : shard3 Timestamp(9, 1)
{ "uid" : "user10" } -->> { "uid" : "user17659" } on : shard1 Timestamp(4, 1)
{ "uid" : "user17659" } -->> { "uid" : "user2" } on : shard1 Timestamp(3, 4)
{ "uid" : "user2" } -->> { "uid" : "user2832" } on : shard1 Timestamp(6, 0)
{ "uid" : "user2832" } -->> { "uid" : "user32174" } on : shard3 Timestamp(7, 0)
{ "uid" : "user32174" } -->> { "uid" : "user36003" } on : shard1 Timestamp(8, 0)
{ "uid" : "user36003" } -->> { "uid" : "user5211" } on : shard2 Timestamp(8, 1)
{ "uid" : "user5211" } -->> { "uid" : "user5974" } on : shard2 Timestamp(6, 4)
{ "uid" : "user5974" } -->> { "uid" : "user6713" } on : shard2 Timestamp(4, 7)
{ "uid" : "user6713" } -->> { "uid" : "user8" } on : shard2 Timestamp(4, 4)
{ "uid" : "user8" } -->> { "uid" : "user87659" } on : shard3 Timestamp(8, 2)
{ "uid" : "user87659" } -->> { "uid" : "user9999" } on : shard3 Timestamp(8, 3)
{ "uid" : "user9999" } -->> { "uid" : { "$maxKey" : 1 } } on : shard1 Timestamp(9, 0)
mongos> use config
switched to db config
mongos> db.settings.find()
{ "_id" : "chunksize", "value" : NumberLong(5) }
mongos> db.shards.find()
{ "_id" : "shard1", "host" : "shard1/mongo01-jp:27027,mongo02-jp:27027", "state" : 1 }
{ "_id" : "shard2", "host" : "shard2/mongo02-jp:27028,mongo03-jp:27028", "state" : 1 }
{ "_id" : "shard3", "host" : "shard3/mongo01-jp:27029,mongo03-jp:27029", "state" : 1 }
mongos> db.databases.find()
{ "_id" : "test", "primary" : "shard3", "partitioned" : true }
注: 每个未分片的DB都会有一个primary shard
mongos> db.chunks.find()
{ "_id" : "test.user-uid_MinKey", "lastmod" : Timestamp(9, 1), "lastmodEpoch" : ObjectId("5850f399db3a37e0bc576a2b"), "ns" : "test.user", "min" : { "uid" : { "$minKey" : 1 } }, "max" : { "uid" : "user10" }, "shard" : "shard3" }
{ "_id" : "test.user-uid_\"user2\"", "lastmod" : Timestamp(6, 0), "lastmodEpoch" : ObjectId("5850f399db3a37e0bc576a2b"), "ns" : "test.user", "min" : { "uid" : "user2" }, "max" : { "uid" : "user2832" }, "shard" : "shard1" }
{ "_id" : "test.user-uid_\"user8\"", "lastmod" : Timestamp(8, 2), "lastmodEpoch" : ObjectId("5850f399db3a37e0bc576a2b"), "ns" : "test.user", "min" : { "uid" : "user8" }, "max" : { "uid" : "user87659" }, "shard" : "shard3" }
{ "_id" : "test.user-uid_\"user10\"", "lastmod" : Timestamp(4, 1), "lastmodEpoch" : ObjectId("5850f399db3a37e0bc576a2b"), "ns" : "test.user", "min" : { "uid" : "user10" }, "max" : { "uid" : "user17659" }, "shard" : "shard1" }
{ "_id" : "test.user-uid_\"user17659\"", "lastmod" : Timestamp(3, 4), "lastmodEpoch" : ObjectId("5850f399db3a37e0bc576a2b"), "ns" : "test.user", "min" : { "uid" : "user17659" }, "max" : { "uid" : "user2" }, "shard" : "shard1" }
{ "_id" : "test.user-uid_\"user2832\"", "lastmod" : Timestamp(7, 0), "lastmodEpoch" : ObjectId("5850f399db3a37e0bc576a2b"), "ns" : "test.user", "min" : { "uid" : "user2832" }, "max" : { "uid" : "user32174" }, "shard" : "shard3" }
{ "_id" : "test.user-uid_\"user6713\"", "lastmod" : Timestamp(4, 4), "lastmodEpoch" : ObjectId("5850f399db3a37e0bc576a2b"), "ns" : "test.user", "min" : { "uid" : "user6713" }, "max" : { "uid" : "user8" }, "shard" : "shard2" }
{ "_id" : "test.user-uid_\"user32174\"", "lastmod" : Timestamp(8, 0), "lastmodEpoch" : ObjectId("5850f399db3a37e0bc576a2b"), "ns" : "test.user", "min" : { "uid" : "user32174" }, "max" : { "uid" : "user36003" }, "shard" : "shard1" }
{ "_id" : "test.user-uid_\"user5974\"", "lastmod" : Timestamp(4, 7), "lastmodEpoch" : ObjectId("5850f399db3a37e0bc576a2b"), "ns" : "test.user", "min" : { "uid" : "user5974" }, "max" : { "uid" : "user6713" }, "shard" : "shard2" }
{ "_id" : "test.user-uid_\"user36003\"", "lastmod" : Timestamp(8, 1), "lastmodEpoch" : ObjectId("5850f399db3a37e0bc576a2b"), "ns" : "test.user", "min" : { "uid" : "user36003" }, "max" : { "uid" : "user5211" }, "shard" : "shard2" }
{ "_id" : "test.user-uid_\"user5211\"", "lastmod" : Timestamp(6, 4), "lastmodEpoch" : ObjectId("5850f399db3a37e0bc576a2b"), "ns" : "test.user", "min" : { "uid" : "user5211" }, "max" : { "uid" : "user5974" }, "shard" : "shard2" }
{ "_id" : "test.user-uid_\"user87659\"", "lastmod" : Timestamp(8, 3), "lastmodEpoch" : ObjectId("5850f399db3a37e0bc576a2b"), "ns" : "test.user", "min" : { "uid" : "user87659" }, "max" : { "uid" : "user9999" }, "shard" : "shard3" }
{ "_id" : "test.user-uid_\"user9999\"", "lastmod" : Timestamp(9, 0), "lastmodEpoch" : ObjectId("5850f399db3a37e0bc576a2b"), "ns" : "test.user", "min" : { "uid" : "user9999" }, "max" : { "uid" : { "$maxKey" : 1 } }, "shard" : "shard1" }
mongos> use admin
switched to db admin
mongos> sh.shardCollection("test.user2",{"_id":"hashed"}) 基于hash 分片
{ "collectionsharded" : "test.user2", "ok" : 1 }
mongos> use test
switched to db test
mongos> for (i=1;i<=100000;i++) db.user2.insert({uid:'user'+i,age:(i%15),address:'#'+i+',shangdi south road,beijing',preferbooks:['book'+i,'hello world']})
WriteResult({ "nInserted" : 1 })
mongos> sh.status()
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("5850e59fce8b5f7ab7cfad34")
}
shards:
{ "_id" : "shard1", "host" : "shard1/mongo01-jp:27027,mongo02-jp:27027", "state" : 1 }
{ "_id" : "shard2", "host" : "shard2/mongo02-jp:27028,mongo03-jp:27028", "state" : 1 }
{ "_id" : "shard3", "host" : "shard3/mongo01-jp:27029,mongo03-jp:27029", "state" : 1 }
active mongoses:
"3.4.0" : 1
autosplit:
Currently enabled: yes
balancer:
Currently enabled: yes
Currently running: no
Balancer lock taken at Wed Dec 14 2016 06:24:31 GMT+0000 (UTC) by ConfigServer:Balancer
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
11 : Success
databases:
{ "_id" : "test", "primary" : "shard3", "partitioned" : true }
test.user
shard key: { "uid" : 1 }
unique: false
balancing: true
chunks:
shard1 5
shard2 4
shard3 4
{ "uid" : { "$minKey" : 1 } } -->> { "uid" : "user10" } on : shard3 Timestamp(9, 1)
{ "uid" : "user10" } -->> { "uid" : "user17659" } on : shard1 Timestamp(4, 1)
{ "uid" : "user17659" } -->> { "uid" : "user2" } on : shard1 Timestamp(3, 4)
{ "uid" : "user2" } -->> { "uid" : "user2832" } on : shard1 Timestamp(6, 0)
{ "uid" : "user2832" } -->> { "uid" : "user32174" } on : shard3 Timestamp(7, 0)
{ "uid" : "user32174" } -->> { "uid" : "user36003" } on : shard1 Timestamp(8, 0)
{ "uid" : "user36003" } -->> { "uid" : "user5211" } on : shard2 Timestamp(8, 1)
{ "uid" : "user5211" } -->> { "uid" : "user5974" } on : shard2 Timestamp(6, 4)
{ "uid" : "user5974" } -->> { "uid" : "user6713" } on : shard2 Timestamp(4, 7)
{ "uid" : "user6713" } -->> { "uid" : "user8" } on : shard2 Timestamp(4, 4)
{ "uid" : "user8" } -->> { "uid" : "user87659" } on : shard3 Timestamp(8, 2)
{ "uid" : "user87659" } -->> { "uid" : "user9999" } on : shard3 Timestamp(8, 3)
{ "uid" : "user9999" } -->> { "uid" : { "$maxKey" : 1 } } on : shard1 Timestamp(9, 0)
test.user2
shard key: { "_id" : "hashed" }
unique: false
balancing: true
chunks:
shard1 3
shard2 3
shard3 2
{ "_id" : { "$minKey" : 1 } } -->> { "_id" : NumberLong("-7785268750491956891") } on : shard2 Timestamp(4, 0)
{ "_id" : NumberLong("-7785268750491956891") } -->> { "_id" : NumberLong("-6358150028456512613") } on : shard1 Timestamp(4, 1)
{ "_id" : NumberLong("-6358150028456512613") } -->> { "_id" : NumberLong("-6148914691236517204") } on : shard1 Timestamp(3, 10)
{ "_id" : NumberLong("-6148914691236517204") } -->> { "_id" : NumberLong("-3074457345618258602") } on : shard1 Timestamp(3, 3)
{ "_id" : NumberLong("-3074457345618258602") } -->> { "_id" : NumberLong(0) } on : shard2 Timestamp(3, 4)
{ "_id" : NumberLong(0) } -->> { "_id" : NumberLong("3074457345618258602") } on : shard2 Timestamp(3, 5)
{ "_id" : NumberLong("3074457345618258602") } -->> { "_id" : NumberLong("6148914691236517204") } on : shard3 Timestamp(3, 6)
{ "_id" : NumberLong("6148914691236517204") } -->> { "_id" : { "$maxKey" : 1 } } on : shard3 Timestamp(3, 7)
二、关于balancing
balancing:如果一个shard上chunks比其他shard更多,即不平衡状态,那么mongos将会自动对chunks迁移以达到平衡,balancing的过程不会影响用户的数据操作。集群中任何mongos实例都可以启动balancing线程,默认balancer是开启状态;Config 数据库(Config servers中)中有个lock表,当balancer活跃时,相应的mongos将会尝试通过修改document方式获取“lock”,如果获取“lock”成功,则此mongos负责balancing工作。大家需要注意,mongos实例的本地系统时间会对lock机制带来影响,需要所有的mongos(包括集群中的所有shards、config servers)的时间保持一致(ntpd指令)。
balancer将chunks从持有chunks最多的shard上迁移到持久chunks最少的shard,每次迁移一个,直到集群相对平衡 (最多chunks节点与最少chunks节点之间相差不超过threshold个)。chunks迁移可能会消耗磁盘空间,那些已经迁移出去的chunks不会立即删除,而是归档到一个特定的目录下,“归档”(archive)特性默认是开启的;此外迁移还会消耗一定的网络带宽,或许会影响到性能,影响用户操作的IO吞吐量。建议每次迁移一个chunk,且只有当“最多”与“最少”的差值达到threshold时才开始balancer;或者指定一个时间区间,balancer只会在此时间段内才会迁移chunks。
threshold:该值目前没有办法修改,当chunks总数< 20时,threshold=2,总数 >= 80时,threshold=8,其他为4。一旦balancing工作启动,只有当chunks分布均衡后才会停止。
默认情况下,mongodb会尽可能的耗尽可用磁盘空间,所以需要关注mongodb对磁盘的消耗量;不过当向集群中添加shard节点时,可以指定当前shard允许使用的最大磁盘空间(max size),当shard的磁盘消耗量达到最大值后,balancer将不会向其再迁移chunks,但这不影响此shard上继续接受write操作。
balancer运行在mongos实例上,控制chunks的分布和迁移,全局只有一个balancer处于active状态,可以通过“sh.getBalancerState()”或者“sh.status()”查看balancer是否开启,可以通过“sh.getBalancerHost()”查看balancer运行在哪个mongos上。
1)可以通过sh.setBalancerState(false)来关闭balancer功能,也可以通过设定为true开启balancer。
2)可以通过sh.startBlancer()或者sh.stopBalancer()来开关闭balancer。
3)可以通过db.locks.find({_id:"balancer"})查看balancer持有锁的情况。
4)可以通过修改settting表中的配置来指定balancer的运行时间区间 。其中start和stop格式为“HH:mm”,不需要指定日期。修改activeWindow配置时需要确保balancer的state为true
mongos> sh.getBalancerState()
true
mongos> sh.setBalancerState(false)
{ "ok" : 1 }
mongos> sh.getBalancerState()
false
mongos> sh.setBalancerState(true)
{ "ok" : 1 }
mongos> use config
switched to db config
mongos> db.locks.find({_id:"balancer"})
{ "_id" : "balancer", "state" : 2, "ts" : ObjectId("5850de85ce8b5f7ab7cfacdd"), "who" : "ConfigServer:Balancer", "process" : "ConfigServer", "when" : ISODate("2016-12-14T06:24:31.625Z"), "why" : "CSRS Balancer" }
mongos> db.settings.find()
{ "_id" : "chunksize", "value" : NumberLong(5) }
{ "_id" : "balancer", "stopped" : false, "mode" : "full" }
mongos> db.settings.update(
... { _id:"balancer"},
... { $set: { activeWindow: { start:"23:00",stop:"6:00"} } },
... { upsert: true }
... )
WriteResult({ "nMatched" : 1, "nUpserted" : 0, "nModified" : 1 })
mongos> db.settings.find()
{ "_id" : "chunksize", "value" : NumberLong(5) }
{ "_id" : "balancer", "stopped" : false, "mode" : "full", "activeWindow" : { "start" : "23:00", "stop" : "6:00" } }
参数 _secondaryThrottle与waitForDelete:
_secondaryThrottle表示是对secondary进行节流,默认为true,其语义等同write concern中的{w:2},即当chunk迁移时(documents复制)destination shard中至少有一个secondary接收成功,balancer才会继续进行下一个chunk;不过开发者可以关闭此参数(同{w:1}),同时与write concern一起使用
_waitForDelete表示balancer是否等待source shard删除已经迁移成功的chunk后才继续处理下一个chunk,默认为false,即不等待。
mongos> use config
switched to db config
mongos> db.settings.update (
... { "_id":"balancer"},
... { $set: {"_secondaryThrottle":false, "writeConncern":{"w":"majority"} } },
... { upsert: true}
... )
WriteResult({ "nMatched" : 1, "nUpserted" : 0, "nModified" : 1 })
mongos> db.setting.update(
... {"_id":"balancer"},
... {$set:{"_waitForDelete":true } },
... {upsert:true}
... )
WriteResult({ "nMatched" : 0, "nUpserted" : 1, "nModified" : 0, "_id" : "balancer" })
mongos>