2013-05-04 188 views
4

我有数据库有大约4 GB的数据,当我尝试转储它通过mongodump然后它只转储大约1,93 GB的东西。当我试图通过db.copyDatabase()克隆它时,它会复制与mongodump相同数量的数据。MongoDB克隆(db.copyDatabase),克隆只有一半的数据库

MongoDb有一些限制吗?

感谢您的帮助和抱歉,我的英语不好(我希望你能理解我)。

编辑

{ 
     "ns" : "some-db.persons", 
     "firstExtent" : "0:31000 ns:some-db.persons", 
     "lastExtent" : "4:b50d000 ns:some-db.persons", 
     "extentCount" : 13, 
     "extents" : [ 
       { 
         "loc" : "0:31000", 
         "xnext" : "0:42000", 
         "xprev" : "null", 
         "nsdiag" : "some-db.persons", 
         "size" : 32768, 
         "firstRecord" : "0:310b0", 
         "lastRecord" : "0:38e3c" 
       }, 
       { 
         "loc" : "0:42000", 
         "xnext" : "0:62000", 
         "xprev" : "0:31000", 
         "nsdiag" : "some-db.persons", 
         "size" : 131072, 
         "firstRecord" : "0:420b0", 
         "lastRecord" : "0:61e64" 
       }, 
       { 
         "loc" : "0:62000", 
         "xnext" : "0:e2000", 
         "xprev" : "0:42000", 
         "nsdiag" : "some-db.persons", 
         "size" : 524288, 
         "firstRecord" : "0:620b0", 
         "lastRecord" : "0:e0f0c" 
       }, 
       { 
         "loc" : "0:e2000", 
         "xnext" : "0:372000", 
         "xprev" : "0:62000", 
         "nsdiag" : "some-db.persons", 
         "size" : 2097152, 
         "firstRecord" : "0:e20b0", 
         "lastRecord" : "0:2e1d94" 
       }, 
       { 
         "loc" : "0:372000", 
         "xnext" : "0:dd6000", 
         "xprev" : "0:e2000", 
         "nsdiag" : "some-db.persons", 
         "size" : 8388608, 
         "firstRecord" : "0:3720b0", 
         "lastRecord" : "0:b71e80" 
       }, 
       { 
         "loc" : "0:dd6000", 
         "xnext" : "0:2233000", 
         "xprev" : "0:372000", 
         "nsdiag" : "some-db.persons", 
         "size" : 11325440, 
         "firstRecord" : "0:dd60b0", 
         "lastRecord" : "0:18a2db8" 
       }, 
       { 
         "loc" : "0:2233000", 
         "xnext" : "1:2000", 
         "xprev" : "0:dd6000", 
         "nsdiag" : "some-db.persons", 
         "size" : 15290368, 
         "firstRecord" : "0:22330b0", 
         "lastRecord" : "0:30c7e20" 
       }, 
       { 
         "loc" : "1:2000", 
         "xnext" : "1:223d000", 
         "xprev" : "0:2233000", 
         "nsdiag" : "some-db.persons", 
         "size" : 20643840, 
         "firstRecord" : "1:20b0", 
         "lastRecord" : "1:13b1e9c" 
       }, 
       { 
         "loc" : "1:223d000", 
         "xnext" : "2:2000", 
         "xprev" : "1:2000", 
         "nsdiag" : "some-db.persons", 
         "size" : 27869184, 
         "firstRecord" : "1:223d0b0", 
         "lastRecord" : "1:3cd0e7c" 
       }, 
       { 
         "loc" : "2:2000", 
         "xnext" : "2:73ba000", 
         "xprev" : "1:223d000", 
         "nsdiag" : "some-db.persons", 
         "size" : 37625856, 
         "firstRecord" : "2:20b0", 
         "lastRecord" : "2:23e3dd8" 
       }, 
       { 
         "loc" : "2:73ba000", 
         "xnext" : "3:4bd7000", 
         "xprev" : "2:2000", 
         "nsdiag" : "some-db.persons", 
         "size" : 50798592, 
         "firstRecord" : "2:73ba0b0", 
         "lastRecord" : "2:a42be78" 
       }, 
       { 
         "loc" : "3:4bd7000", 
         "xnext" : "4:b50d000", 
         "xprev" : "2:73ba000", 
         "nsdiag" : "some-db.persons", 
         "size" : 68579328, 
         "firstRecord" : "3:4bd70b0", 
         "lastRecord" : "3:8d3dda4" 
       }, 
       { 
         "loc" : "4:b50d000", 
         "xnext" : "null", 
         "xprev" : "3:4bd7000", 
         "nsdiag" : "some-db.persons", 
         "size" : 92581888, 
         "firstRecord" : "4:b50d0b0", 
         "lastRecord" : "4:ba17118" 
       } 
     ], 
     "datasize" : 244286608, 
     "nrecords" : 268872, 
     "lastExtentSize" : 92581888, 
     "padding" : 1, 
     "firstExtentDetails" : { 
       "loc" : "0:31000", 
       "xnext" : "0:42000", 
       "xprev" : "null", 
       "nsdiag" : "some-db.persons", 
       "size" : 32768, 
       "firstRecord" : "0:310b0", 
       "lastRecord" : "0:38e3c" 
     }, 
     "lastExtentDetails" : { 
       "loc" : "4:b50d000", 
       "xnext" : "null", 
       "xprev" : "3:4bd7000", 
       "nsdiag" : "some-db.persons", 
       "size" : 92581888, 
       "firstRecord" : "4:b50d0b0", 
       "lastRecord" : "4:ba17118" 
     }, 
     "objectsFound" : 218678, 
     "invalidObjects" : 0, 
     "bytesWithHeaders" : 207046812, 
     "bytesWithoutHeaders" : 203547964, 
     "deletedCount" : 6, 
     "deletedSize" : 87297536, 
     "nIndexes" : 1, 
     "keysPerIndex" : { 
       "some-db.persons.$_id_" : 268872 
     }, 
     "valid" : true, 
     "errors" : [ ], 
     "ok" : 1 
} 

回答

0

数据的可能都在那里; MongoDB随着时间的推移分配额外的空间,不会包含在副本中。检查集合中的文档计数以进行验证。

+0

好的,我检查了它。首先收集我拥有的96279个文档,第二个集合仅包含70596个文档的268872个文档。我有想法,我会尝试复制第二个集合。 – user1607808 2013-05-04 14:06:55

+0

好吧,我试了一下,但它仍然只复制70 596文档。 – user1607808 2013-05-04 14:15:14

+0

@ user1607808嗯......你是否验证源集合来检查它们的问题? 'db.mycoll.validate()'或'db.mycoll.validate({full:true})' – JohnnyHK 2013-05-04 15:24:50