MongoDb Replica Set解决了容错和单点故障问题,但单台机器的存储和承受能力有限,Sharding就是为了海量存储和动态扩容而产生的。这才有了Replica Set+Sharding高可用架构。
Sharding Cluster主要包括如下三部分:
#master.conf配置
dbpath=/data/mongo_cluster/master
logpath=/data/mongo_cluster/master/logs/master.log
pidfilepath=/data/mongo_cluster/master/master.pid
logappend=true
replSet=rep1
bind_ip=192.168.15.130
port=10000
fork=true
journal=true
shardsvr=true
#slave.conf配置
dbpath=/data/mongo_cluster/slave
logpath=/data/mongo_cluster/slave/logs/slave.log
pidfilepath=/data/mongo_cluster/slave/slave.pid
logappend=true
replSet=rep1
bind_ip=192.168.15.130
port=10001
fork=true
journal=true
shardsvr=true
#arbiter.conf配置
dbpath=/data/mongo_cluster/arbiter
logpath=/data/mongo_cluster/arbiter/logs/arbiter.log
pidfilepath=/data/mongo_cluster/arbiter/arbiter.pid
logappend=true
replSet=rep1
bind_ip=192.168.15.130
port=10002
fork=true
journal=true
shardsvr=true
#master.conf配置
dbpath=/data/mongo_cluster2/master
logpath=/data/mongo_cluster2/master/logs/master.log
pidfilepath=/data/mongo_cluster2/master/master.pid
logappend=true
replSet=rep2
bind_ip=192.168.15.130
port=10004
fork=true
journal=true
shardsvr=true
#slave.conf配置
dbpath=/data/mongo_cluster2/slave
logpath=/data/mongo_cluster2/slave/logs/slave.log
pidfilepath=/data/mongo_cluster2/slave/slave.pid
logappend=true
replSet=rep2
bind_ip=192.168.15.130
port=10005
fork=true
journal=true
shardsvr=true
#arbiter.conf配置
dbpath=/data/mongo_cluster2/arbiter
logpath=/data/mongo_cluster2/arbiter/logs/arbiter.log
pidfilepath=/data/mongo_cluster2/arbiter/arbiter.pid
logappend=true
replSet=rep2
bind_ip=192.168.15.130
port=10006
fork=true
journal=true
shardsvr=true
#config server配置configsvr.conf
dbpath=/data/mongo_config_server
logpath=/data/mongo_config_server/logs/configsvr.log
pidfilepath=/data/mongo_config_server/configsvr.pid
logappend=true
bind_ip=192.168.15.130
port=10007
fork=true
journal=true
configsvr=true
#mongos配置 mongos.conf
logpath=/data/mongos/logs/mongos.log
pidfilepath=/data/mongos/mongos.pid
logappend=true
bind_ip=192.168.15.130
port=10008
fork=true
configdb=192.168.15.130:10007
#启动shard
/usr/local/mongodb3.0.5/bin/mongod -f /data/mongo_cluster/master/master.conf
/usr/local/mongodb3.0.5/bin/mongod -f /data/mongo_cluster/slave/slave.conf
/usr/local/mongodb3.0.5/bin/mongod -f /data/mongo_cluster/arbiter/arbiter.conf
/usr/local/mongodb3.0.5/bin/mongod -f /data/mongo_cluster2/master/master.conf
/usr/local/mongodb3.0.5/bin/mongod -f /data/mongo_cluster2/slave/slave.conf
/usr/local/mongodb3.0.5/bin/mongod -f /data/mongo_cluster2/arbiter/arbiter.conf
#启动config server
/usr/local/mongodb3.0.5/bin/mongod -f /data/mongo_config_server/configsvr.conf
#启动mongos
/usr/local/mongodb3.0.5/bin/mongos -f /data/mongos/mongos.conf
#进入mongos
/usr/local/mongodb3.0.5/bin/mongo 192.168.15.130:10008
#连接mongos与replica set
mongos> db.runCommand( { addshard : "rep1/192.168.15.130:10001"});
{ "shardAdded" : "rep1", "ok" : 1 }
mongos> db.runCommand( { addshard : "rep2/192.168.15.130:10004"});
{ "shardAdded" : "rep2", "ok" : 1 }
#查看分片配置
mongos> db.runCommand( { listshards : 1 } );
#指定testdb分片生效
db.runCommand( { enablesharding :"testdb"});
#指定数据库里需要分片的集合和片键
db.runCommand( { shardcollection : "testdb.table1",key : {id: 1} } )
#测试
> use testdb;
#插入测试数据
> for (var i = 1; i <= 1000; i++) db.table1.save({id:i,"test1":"testval1"});
#查看分片情况如下
> db.table1.stats();