Browse Source

volume: add special handling for .dat larger than 32GB

pull/1569/head
Chris Lu 4 years ago
parent
commit
06c15ab35c
  1. 7
      weed/storage/needle/needle_read_write.go
  2. 4
      weed/storage/volume_read_write.go

7
weed/storage/needle/needle_read_write.go

@ -24,6 +24,8 @@ const (
TtlBytesLength = 2
)
var ErrorSizeMismatch = errors.New("size mismatch")
func (n *Needle) DiskSize(version Version) int64 {
return GetActualSize(n.Size, version)
}
@ -168,6 +170,11 @@ func ReadNeedleBlob(r backend.BackendStorageFile, offset int64, size Size, versi
func (n *Needle) ReadBytes(bytes []byte, offset int64, size Size, version Version) (err error) {
n.ParseNeedleHeader(bytes)
if n.Size != size {
// cookie is not always passed in for this API. Use size to do preliminary checking.
if OffsetSize == 4 && offset < int64(MaxPossibleVolumeSize) {
glog.Errorf("entry not found1: offset %d found id %x size %d, expected size %d", offset, n.Id, n.Size, size)
return ErrorSizeMismatch
}
return fmt.Errorf("entry not found: offset %d found id %x size %d, expected size %d", offset, n.Id, n.Size, size)
}
switch version {

4
weed/storage/volume_read_write.go

@ -17,6 +17,7 @@ import (
var ErrorNotFound = errors.New("not found")
var ErrorDeleted = errors.New("already deleted")
var ErrorSizeMismatch = errors.New("size mismatch")
// isFileUnchanged checks whether this needle to write is same as last one.
// It requires serialized access in the same volume.
@ -274,6 +275,9 @@ func (v *Volume) readNeedle(n *needle.Needle, readOption *ReadOption) (int, erro
return 0, nil
}
err := n.ReadData(v.DataBackend, nv.Offset.ToAcutalOffset(), readSize, v.Version())
if err == needle.ErrorSizeMismatch && OffsetSize == 4 {
err = n.ReadData(v.DataBackend, nv.Offset.ToAcutalOffset()+int64(MaxPossibleVolumeSize), readSize, v.Version())
}
if err != nil {
return 0, err
}

Loading…
Cancel
Save