@ -61,6 +61,18 @@ func readIndexEntryAtOffset(indexFile *os.File, offset int64) (bytes []byte, err
func verifyNeedleIntegrity ( datFile backend . BackendStorageFile , v needle . Version , offset int64 , key NeedleId , size Size ) ( lastAppendAtNs uint64 , err error ) {
func verifyNeedleIntegrity ( datFile backend . BackendStorageFile , v needle . Version , offset int64 , key NeedleId , size Size ) ( lastAppendAtNs uint64 , err error ) {
n := new ( needle . Needle )
n := new ( needle . Needle )
// case: node total memory 8g, set volumeLimitSize=2048 , save 10 files, every file size 2.2g or more , when we restart the volume server , while see out of memory error
// fix: When the size of the last file exceeds 10M, consider directly returning the last modify time
if size > 10 * 1024 * 1024 {
bytes , err := needle . ReadNeedleBlob ( datFile , offset + int64 ( size ) , 0 , v ) ;
if err == nil {
if v == needle . Version3 {
tsOffset := NeedleHeaderSize + 0 + needle . NeedleChecksumSize
n . AppendAtNs = util . BytesToUint64 ( bytes [ tsOffset : tsOffset + TimestampSize ] )
}
}
return n . AppendAtNs , err
}
if err = n . ReadData ( datFile , offset , size , v ) ; err != nil {
if err = n . ReadData ( datFile , offset , size , v ) ; err != nil {
return n . AppendAtNs , fmt . Errorf ( "read data [%d,%d) : %v" , offset , offset + int64 ( size ) , err )
return n . AppendAtNs , fmt . Errorf ( "read data [%d,%d) : %v" , offset , offset + int64 ( size ) , err )
}
}