|
@ -127,6 +127,7 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry, signatures |
|
|
|
|
|
|
|
|
parts := make([]*s3.CompletedPart, len(chunkViews)) |
|
|
parts := make([]*s3.CompletedPart, len(chunkViews)) |
|
|
|
|
|
|
|
|
|
|
|
if len(parts) > 0 { |
|
|
var wg sync.WaitGroup |
|
|
var wg sync.WaitGroup |
|
|
for chunkIndex, chunk := range chunkViews { |
|
|
for chunkIndex, chunk := range chunkViews { |
|
|
partId := chunkIndex + 1 |
|
|
partId := chunkIndex + 1 |
|
@ -142,14 +143,13 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry, signatures |
|
|
}(chunk, chunkIndex) |
|
|
}(chunk, chunkIndex) |
|
|
} |
|
|
} |
|
|
wg.Wait() |
|
|
wg.Wait() |
|
|
|
|
|
|
|
|
|
|
|
} else if len(entry.Content) > 0 { |
|
|
// for small files
|
|
|
// for small files
|
|
|
if len(entry.Content) > 0 { |
|
|
|
|
|
parts = make([]*s3.CompletedPart, 1) |
|
|
|
|
|
if part, uploadErr := s3sink.doUploadPart(key, uploadId, 1, bytes.NewReader(entry.Content)); uploadErr != nil { |
|
|
if part, uploadErr := s3sink.doUploadPart(key, uploadId, 1, bytes.NewReader(entry.Content)); uploadErr != nil { |
|
|
err = uploadErr |
|
|
err = uploadErr |
|
|
glog.Errorf("uploadPart: %v", uploadErr) |
|
|
glog.Errorf("uploadPart: %v", uploadErr) |
|
|
} else { |
|
|
} else { |
|
|
|
|
|
parts = make([]*s3.CompletedPart, 1) |
|
|
parts[0] = part |
|
|
parts[0] = part |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|