Browse Source

filer.backup: backup small files if the file is saved in filer (saveToFilerLimit > 0)

fix https://github.com/seaweedfs/seaweedfs/issues/3468
pull/3472/head
chrislu 2 years ago
parent
commit
11f99836c3
  1. 4
      weed/replication/sink/azuresink/azure_sink.go
  2. 5
      weed/replication/sink/b2sink/b2_sink.go
  3. 4
      weed/replication/sink/gcssink/gcs_sink.go
  4. 4
      weed/replication/sink/localsink/local_sink.go
  5. 13
      weed/replication/sink/s3sink/s3_sink.go
  6. 5
      weed/replication/sink/s3sink/s3_write.go

4
weed/replication/sink/azuresink/azure_sink.go

@ -119,6 +119,10 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []
return writeErr
}
if len(entry.Content) > 0 {
return writeFunc(entry.Content)
}
if err := repl_util.CopyFromChunkViews(chunkViews, g.filerSource, writeFunc); err != nil {
return err
}

5
weed/replication/sink/b2sink/b2_sink.go

@ -101,13 +101,16 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int
targetObject := bucket.Object(key)
writer := targetObject.NewWriter(context.Background())
defer writer.Close()
writeFunc := func(data []byte) error {
_, writeErr := writer.Write(data)
return writeErr
}
defer writer.Close()
if len(entry.Content) > 0 {
return writeFunc(entry.Content)
}
if err := repl_util.CopyFromChunkViews(chunkViews, g.filerSource, writeFunc); err != nil {
return err

4
weed/replication/sink/gcssink/gcs_sink.go

@ -107,6 +107,10 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []in
return writeErr
}
if len(entry.Content) > 0 {
return writeFunc(entry.Content)
}
if err := repl_util.CopyFromChunkViews(chunkViews, g.filerSource, writeFunc); err != nil {
return err
}

4
weed/replication/sink/localsink/local_sink.go

@ -101,6 +101,10 @@ func (localsink *LocalSink) CreateEntry(key string, entry *filer_pb.Entry, signa
return writeErr
}
if len(entry.Content) > 0 {
return writeFunc(entry.Content)
}
if err := repl_util.CopyFromChunkViews(chunkViews, localsink.filerSource, writeFunc); err != nil {
return err
}

13
weed/replication/sink/s3sink/s3_sink.go

@ -1,6 +1,7 @@
package S3Sink
import (
"bytes"
"context"
"fmt"
"strings"
@ -121,6 +122,7 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry, signatures
}
totalSize := filer.FileSize(entry)
chunkViews := filer.ViewFromChunks(s3sink.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
parts := make([]*s3.CompletedPart, len(chunkViews))
@ -141,6 +143,17 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry, signatures
}
wg.Wait()
// for small files
if len(entry.Content) > 0 {
parts = make([]*s3.CompletedPart, 1)
if part, uploadErr := s3sink.doUploadPart(key, uploadId, 1, bytes.NewReader(entry.Content)); uploadErr != nil {
err = uploadErr
glog.Errorf("uploadPart: %v", uploadErr)
} else {
parts[0] = part
}
}
if err != nil {
s3sink.abortMultipartUpload(key, uploadId)
return fmt.Errorf("uploadPart: %v", err)

5
weed/replication/sink/s3sink/s3_write.go

@ -116,6 +116,11 @@ func (s3sink *S3Sink) uploadPart(key, uploadId string, partId int, chunk *filer.
return nil, fmt.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err)
}
return s3sink.doUploadPart(key, uploadId, partId, readSeeker)
}
func (s3sink *S3Sink) doUploadPart(key, uploadId string, partId int, readSeeker io.ReadSeeker) (*s3.CompletedPart, error) {
input := &s3.UploadPartInput{
Body: readSeeker,
Bucket: aws.String(s3sink.bucket),

Loading…
Cancel
Save