Browse Source

go fmt

pull/2252/head
Chris Lu 3 years ago
parent
commit
c5f38c365d
  1. 4
      weed/command/filer_backup.go
  2. 2
      weed/command/filer_meta_backup.go
  3. 6
      weed/command/filer_remote_sync.go
  4. 4
      weed/command/filer_sync.go
  5. 3
      weed/command/imports.go
  6. 2
      weed/filer/filer_remote_storage.go
  7. 2
      weed/filer/filer_remote_storage_test.go
  8. 2
      weed/filer/read_remote.go
  9. 10
      weed/remote_storage/s3/s3_storage_client.go
  10. 4
      weed/server/master_server.go
  11. 10
      weed/server/volume_grpc_remote.go
  12. 2
      weed/shell/command_volume_balance_test.go

4
weed/command/filer_backup.go

@ -107,12 +107,12 @@ func doFilerBackup(grpcDialOption grpc.DialOption, backupOption *FilerBackupOpti
processEventFn := genProcessFunction(sourcePath, targetPath, dataSink, debug)
processEventFnWithOffset := pb.AddOffsetFunc(processEventFn, 3 * time.Second, func(counter int64, lastTsNs int64) error {
processEventFnWithOffset := pb.AddOffsetFunc(processEventFn, 3*time.Second, func(counter int64, lastTsNs int64) error {
glog.V(0).Infof("backup %s progressed to %v %0.2f/sec", sourceFiler, time.Unix(0, lastTsNs), float64(counter)/float64(3))
return setOffset(grpcDialOption, sourceFiler, BackupKeyPrefix, int32(sinkId), lastTsNs)
})
return pb.FollowMetadata(sourceFiler, grpcDialOption, "backup_" + dataSink.GetName(),
return pb.FollowMetadata(sourceFiler, grpcDialOption, "backup_"+dataSink.GetName(),
sourcePath, startFrom.UnixNano(), 0, processEventFnWithOffset, false)
}

2
weed/command/filer_meta_backup.go

@ -189,7 +189,7 @@ func (metaBackup *FilerMetaBackupOptions) streamMetadataBackup() error {
return nil
}
processEventFnWithOffset := pb.AddOffsetFunc(eachEntryFunc, 3 * time.Second, func(counter int64, lastTsNs int64) error {
processEventFnWithOffset := pb.AddOffsetFunc(eachEntryFunc, 3*time.Second, func(counter int64, lastTsNs int64) error {
lastTime := time.Unix(0, lastTsNs)
glog.V(0).Infof("meta backup %s progressed to %v %0.2f/sec", *metaBackup.filerAddress, lastTime, float64(counter)/float64(3))
return metaBackup.setOffset(lastTime)

6
weed/command/filer_remote_sync.go

@ -252,9 +252,9 @@ func updateLocalEntry(filerClient filer_pb.FilerClient, dir string, entry *filer
entry.RemoteEntry = remoteEntry
return filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
_, err := client.UpdateEntry(context.Background(), &filer_pb.UpdateEntryRequest{
Directory: dir,
Entry: entry,
Directory: dir,
Entry: entry,
})
return err
})
}
}

4
weed/command/filer_sync.go

@ -165,12 +165,12 @@ func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, so
return persistEventFn(resp)
}
processEventFnWithOffset := pb.AddOffsetFunc(processEventFn, 3 * time.Second, func(counter int64, lastTsNs int64) error {
processEventFnWithOffset := pb.AddOffsetFunc(processEventFn, 3*time.Second, func(counter int64, lastTsNs int64) error {
glog.V(0).Infof("sync %s to %s progressed to %v %0.2f/sec", sourceFiler, targetFiler, time.Unix(0, lastTsNs), float64(counter)/float64(3))
return setOffset(grpcDialOption, targetFiler, SyncKeyPrefix, sourceFilerSignature, lastTsNs)
})
return pb.FollowMetadata(sourceFiler, grpcDialOption, "syncTo_" + targetFiler,
return pb.FollowMetadata(sourceFiler, grpcDialOption, "syncTo_"+targetFiler,
sourcePath, sourceFilerOffsetTsNs, targetFilerSignature, processEventFnWithOffset, false)
}

3
weed/command/imports.go

@ -27,5 +27,4 @@ import (
_ "github.com/chrislusf/seaweedfs/weed/filer/redis"
_ "github.com/chrislusf/seaweedfs/weed/filer/redis2"
_ "github.com/chrislusf/seaweedfs/weed/filer/sqlite"
)
)

2
weed/filer/filer_remote_storage.go

@ -160,8 +160,6 @@ func RemoveRemoteStorageMapping(oldContent []byte, dir string) (newContent []byt
return
}
func ReadMountMappings(grpcDialOption grpc.DialOption, filerAddress string) (mappings *filer_pb.RemoteStorageMapping, readErr error) {
var oldContent []byte
if readErr = pb.WithFilerClient(filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {

2
weed/filer/filer_remote_storage_test.go

@ -31,4 +31,4 @@ func TestFilerRemoteStorage_FindRemoteStorageClient(t *testing.T) {
_, _, found4 := rs.FindRemoteStorageClient("/a/b/cc")
assert.Equal(t, false, found4, "should not find storage client")
}
}

2
weed/filer/read_remote.go

@ -9,7 +9,7 @@ func (entry *Entry) IsInRemoteOnly() bool {
return len(entry.Chunks) == 0 && entry.Remote != nil && entry.Remote.Size > 0
}
func (f *Filer) ReadRemote(entry *Entry, offset int64, size int64) (data[]byte, err error) {
func (f *Filer) ReadRemote(entry *Entry, offset int64, size int64) (data []byte, err error) {
client, _, found := f.RemoteStorage.GetRemoteStorageClient(entry.Remote.StorageName)
if !found {
return nil, fmt.Errorf("remote storage %v not found", entry.Remote.StorageName)

10
weed/remote_storage/s3/s3_storage_client.go

@ -174,13 +174,13 @@ func toTagging(attributes map[string][]byte) *s3.Tagging {
func (s *s3RemoteStorageClient) readFileRemoteEntry(loc *filer_pb.RemoteStorageLocation) (*filer_pb.RemoteEntry, error) {
resp, err := s.conn.HeadObject(&s3.HeadObjectInput{
Bucket: aws.String(loc.Bucket),
Key: aws.String(loc.Path[1:]),
Bucket: aws.String(loc.Bucket),
Key: aws.String(loc.Path[1:]),
})
if err != nil {
return nil, err
}
return &filer_pb.RemoteEntry{
LastModifiedAt: resp.LastModified.Unix(),
Size: *resp.ContentLength,
@ -200,8 +200,8 @@ func (s *s3RemoteStorageClient) UpdateFileMetadata(loc *filer_pb.RemoteStorageLo
})
} else {
_, err = s.conn.DeleteObjectTagging(&s3.DeleteObjectTaggingInput{
Bucket: aws.String(loc.Bucket),
Key: aws.String(loc.Path[1:]),
Bucket: aws.String(loc.Bucket),
Key: aws.String(loc.Path[1:]),
})
}
return

4
weed/server/master_server.go

@ -26,8 +26,8 @@ import (
)
const (
SequencerType = "master.sequencer.type"
SequencerEtcdUrls = "master.sequencer.sequencer_etcd_urls"
SequencerType = "master.sequencer.type"
SequencerEtcdUrls = "master.sequencer.sequencer_etcd_urls"
SequencerSnowflakeId = "master.sequencer.sequencer_snowflake_id"
)

10
weed/server/volume_grpc_remote.go

@ -18,16 +18,16 @@ func (vs *VolumeServer) FetchAndWriteNeedle(ctx context.Context, req *volume_ser
}
remoteConf := &filer_pb.RemoteConf{
Type: req.RemoteType,
Name: req.RemoteName,
Type: req.RemoteType,
Name: req.RemoteName,
S3AccessKey: req.S3AccessKey,
S3SecretKey: req.S3SecretKey,
S3Region: req.S3Region,
S3Endpoint: req.S3Endpoint,
S3Region: req.S3Region,
S3Endpoint: req.S3Endpoint,
}
client, getClientErr := remote_storage.GetRemoteStorage(remoteConf)
if getClientErr != nil {
if getClientErr != nil {
return nil, fmt.Errorf("get remote client: %v", getClientErr)
}

2
weed/shell/command_volume_balance_test.go

@ -187,7 +187,7 @@ func TestBalance(t *testing.T) {
func TestVolumeSelection(t *testing.T) {
topologyInfo := parseOutput(topoData)
vids, err := collectVolumeIdsForTierChange(nil, topologyInfo, 1000, types.ToDiskType("hdd"), "", 20.0, 0);
vids, err := collectVolumeIdsForTierChange(nil, topologyInfo, 1000, types.ToDiskType("hdd"), "", 20.0, 0)
if err != nil {
t.Errorf("collectVolumeIdsForTierChange: %v", err)
}

Loading…
Cancel
Save