Browse Source

refactoring

pull/1153/head
Chris Lu 5 years ago
parent
commit
10bd3c6b4b
  1. 2
      weed/server/volume_grpc_tier.go
  2. 2
      weed/storage/backend/backend.go
  3. 6
      weed/storage/backend/disk_file.go
  4. 6
      weed/storage/backend/memory_map/memory_map_backend.go
  5. 3
      weed/storage/backend/s3_backend/s3_backend.go
  6. 2
      weed/storage/needle/needle_read_write.go
  7. 2
      weed/storage/volume.go
  8. 6
      weed/storage/volume_read_write.go
  9. 8
      weed/storage/volume_super_block.go

2
weed/server/volume_grpc_tier.go

@ -64,7 +64,7 @@ func (vs *VolumeServer) VolumeTierCopyDatToRemote(req *volume_server_pb.VolumeTi
// copy the data file
key, size, err := backendStorage.CopyFile(diskFile.File, fn)
if err != nil {
return fmt.Errorf("backend %s copy file %s: %v", req.DestinationBackendName, diskFile.String(), err)
return fmt.Errorf("backend %s copy file %s: %v", req.DestinationBackendName, diskFile.Name(), err)
}
// save the remote file to volume tier info

2
weed/storage/backend/backend.go

@ -18,7 +18,7 @@ type BackendStorageFile interface {
Truncate(off int64) error
io.Closer
GetStat() (datSize int64, modTime time.Time, err error)
String() string
Name() string
}
type BackendStorage interface {

6
weed/storage/backend/disk_file.go

@ -45,10 +45,6 @@ func (df *DiskFile) GetStat() (datSize int64, modTime time.Time, err error) {
return 0, time.Time{}, err
}
func (df *DiskFile) String() string {
func (df *DiskFile) Name() string {
return df.fullFilePath
}
func (df *DiskFile) Instantiate(src *os.File) error {
panic("should not implement Instantiate for DiskFile")
}

6
weed/storage/backend/memory_map/memory_map_backend.go

@ -55,10 +55,6 @@ func (mmf *MemoryMappedFile) GetStat() (datSize int64, modTime time.Time, err er
return 0, time.Time{}, err
}
func (mmf *MemoryMappedFile) String() string {
func (mmf *MemoryMappedFile) Name() string {
return mmf.mm.File.Name()
}
func (mmf *MemoryMappedFile) Instantiate(src *os.File) error {
panic("should not implement Instantiate for MemoryMappedFile")
}

3
weed/storage/backend/s3_backend/s3_backend.go

@ -153,7 +153,6 @@ func (s3backendStorageFile S3BackendStorageFile) GetStat() (datSize int64, modTi
return
}
func (s3backendStorageFile S3BackendStorageFile) String() string {
func (s3backendStorageFile S3BackendStorageFile) Name() string {
return s3backendStorageFile.key
}

2
weed/storage/needle/needle_read_write.go

@ -131,7 +131,7 @@ func (n *Needle) Append(w backend.BackendStorageFile, version Version) (offset u
defer func(w backend.BackendStorageFile, off int64) {
if err != nil {
if te := w.Truncate(end); te != nil {
glog.V(0).Infof("Failed to truncate %s back to %d with error: %v", w.String(), end, te)
glog.V(0).Infof("Failed to truncate %s back to %d with error: %v", w.Name(), end, te)
}
}
}(w, end)

2
weed/storage/volume.go

@ -83,7 +83,7 @@ func (v *Volume) FileStat() (datSize uint64, idxSize uint64, modTime time.Time)
if e == nil {
return uint64(datFileSize), v.nm.IndexFileSize(), modTime
}
glog.V(0).Infof("Failed to read file size %s %v", v.DataBackend.String(), e)
glog.V(0).Infof("Failed to read file size %s %v", v.DataBackend.Name(), e)
return // -1 causes integer overflow and the volume to become unwritable.
}

6
weed/storage/volume_read_write.go

@ -59,7 +59,7 @@ func (v *Volume) Destroy() (err error) {
func (v *Volume) writeNeedle(n *needle.Needle) (offset uint64, size uint32, isUnchanged bool, err error) {
glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
if v.readOnly {
err = fmt.Errorf("%s is read-only", v.DataBackend.String())
err = fmt.Errorf("%s is read-only", v.DataBackend.Name())
return
}
v.dataFileAccessLock.Lock()
@ -112,7 +112,7 @@ func (v *Volume) writeNeedle(n *needle.Needle) (offset uint64, size uint32, isUn
func (v *Volume) deleteNeedle(n *needle.Needle) (uint32, error) {
glog.V(4).Infof("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
if v.readOnly {
return 0, fmt.Errorf("%s is read-only", v.DataBackend.String())
return 0, fmt.Errorf("%s is read-only", v.DataBackend.Name())
}
v.dataFileAccessLock.Lock()
defer v.dataFileAccessLock.Unlock()
@ -202,7 +202,7 @@ func ScanVolumeFileFrom(version needle.Version, datBackend backend.BackendStorag
if e == io.EOF {
return nil
}
return fmt.Errorf("cannot read %s at offset %d: %v", datBackend.String(), offset, e)
return fmt.Errorf("cannot read %s at offset %d: %v", datBackend.Name(), offset, e)
}
for n != nil {
var needleBody []byte

8
weed/storage/volume_super_block.go

@ -78,7 +78,7 @@ func (v *Volume) maybeWriteSuperBlock() error {
datSize, _, e := v.DataBackend.GetStat()
if e != nil {
glog.V(0).Infof("failed to stat datafile %s: %v", v.DataBackend.String(), e)
glog.V(0).Infof("failed to stat datafile %s: %v", v.DataBackend.Name(), e)
return e
}
if datSize == 0 {
@ -87,7 +87,7 @@ func (v *Volume) maybeWriteSuperBlock() error {
if e != nil && os.IsPermission(e) {
//read-only, but zero length - recreate it!
var dataFile *os.File
if dataFile, e = os.Create(v.DataBackend.String()); e == nil {
if dataFile, e = os.Create(v.DataBackend.Name()); e == nil {
v.DataBackend = backend.NewDiskFile(dataFile)
if _, e = v.DataBackend.WriteAt(v.SuperBlock.Bytes(), 0); e == nil {
v.readOnly = false
@ -108,7 +108,7 @@ func ReadSuperBlock(datBackend backend.BackendStorageFile) (superBlock SuperBloc
header := make([]byte, _SuperBlockSize)
if _, e := datBackend.ReadAt(header, 0); e != nil {
err = fmt.Errorf("cannot read volume %s super block: %v", datBackend.String(), e)
err = fmt.Errorf("cannot read volume %s super block: %v", datBackend.Name(), e)
return
}
@ -127,7 +127,7 @@ func ReadSuperBlock(datBackend backend.BackendStorageFile) (superBlock SuperBloc
superBlock.Extra = &master_pb.SuperBlockExtra{}
err = proto.Unmarshal(extraData, superBlock.Extra)
if err != nil {
err = fmt.Errorf("cannot read volume %s super block extra: %v", datBackend.String(), err)
err = fmt.Errorf("cannot read volume %s super block extra: %v", datBackend.Name(), err)
return
}
}

Loading…
Cancel
Save