You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

206 lines
6.0 KiB

6 years ago
6 years ago
6 years ago
  1. package s3api
  2. import (
  3. "fmt"
  4. "path/filepath"
  5. "strconv"
  6. "strings"
  7. "time"
  8. "github.com/aws/aws-sdk-go/aws"
  9. "github.com/aws/aws-sdk-go/service/s3"
  10. "github.com/chrislusf/seaweedfs/weed/filer2"
  11. "github.com/chrislusf/seaweedfs/weed/glog"
  12. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  13. "github.com/satori/go.uuid"
  14. )
  15. type InitiateMultipartUploadResult struct {
  16. s3.CreateMultipartUploadOutput
  17. }
  18. func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code ErrorCode) {
  19. uploadId, _ := uuid.NewV4()
  20. uploadIdString := uploadId.String()
  21. if err := s3a.mkdir(s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) {
  22. if entry.Extended == nil {
  23. entry.Extended = make(map[string][]byte)
  24. }
  25. entry.Extended["key"] = []byte(*input.Key)
  26. }); err != nil {
  27. glog.Errorf("NewMultipartUpload error: %v", err)
  28. return nil, ErrInternalError
  29. }
  30. output = &InitiateMultipartUploadResult{
  31. s3.CreateMultipartUploadOutput{
  32. Bucket: input.Bucket,
  33. Key: input.Key,
  34. UploadId: aws.String(uploadIdString),
  35. },
  36. }
  37. return
  38. }
  39. type CompleteMultipartUploadResult struct {
  40. s3.CompleteMultipartUploadOutput
  41. }
  42. func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploadInput) (output *CompleteMultipartUploadResult, code ErrorCode) {
  43. uploadDirectory := s3a.genUploadsFolder(*input.Bucket) + "/" + *input.UploadId
  44. entries, err := s3a.list(uploadDirectory, "", "", false, 0)
  45. if err != nil {
  46. glog.Errorf("completeMultipartUpload %s %s error: %v", *input.Bucket, *input.UploadId, err)
  47. return nil, ErrNoSuchUpload
  48. }
  49. var finalParts []*filer_pb.FileChunk
  50. var offset int64
  51. for _, entry := range entries {
  52. if strings.HasSuffix(entry.Name, ".part") && !entry.IsDirectory {
  53. for _, chunk := range entry.Chunks {
  54. p := &filer_pb.FileChunk{
  55. FileId: chunk.FileId,
  56. Offset: offset,
  57. Size: chunk.Size,
  58. Mtime: chunk.Mtime,
  59. ETag: chunk.ETag,
  60. }
  61. finalParts = append(finalParts, p)
  62. offset += int64(chunk.Size)
  63. }
  64. }
  65. }
  66. entryName := filepath.Base(*input.Key)
  67. dirName := filepath.Dir(*input.Key)
  68. if dirName == "." {
  69. dirName = ""
  70. }
  71. if strings.HasPrefix(dirName, "/") {
  72. dirName = dirName[1:]
  73. }
  74. dirName = fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, *input.Bucket, dirName)
  75. err = s3a.mkFile(dirName, entryName, finalParts)
  76. if err != nil {
  77. glog.Errorf("completeMultipartUpload %s/%s error: %v", dirName, entryName, err)
  78. return nil, ErrInternalError
  79. }
  80. output = &CompleteMultipartUploadResult{
  81. s3.CompleteMultipartUploadOutput{
  82. Bucket: input.Bucket,
  83. ETag: aws.String("\"" + filer2.ETag(finalParts) + "\""),
  84. Key: input.Key,
  85. },
  86. }
  87. if err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, false, true); err != nil {
  88. glog.V(1).Infof("completeMultipartUpload cleanup %s upload %s: %v", *input.Bucket, *input.UploadId, err)
  89. }
  90. return
  91. }
  92. func (s3a *S3ApiServer) abortMultipartUpload(input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code ErrorCode) {
  93. exists, err := s3a.exists(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true)
  94. if err != nil {
  95. glog.V(1).Infof("bucket %s abort upload %s: %v", *input.Bucket, *input.UploadId, err)
  96. return nil, ErrNoSuchUpload
  97. }
  98. if exists {
  99. err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true, true)
  100. }
  101. if err != nil {
  102. glog.V(1).Infof("bucket %s remove upload %s: %v", *input.Bucket, *input.UploadId, err)
  103. return nil, ErrInternalError
  104. }
  105. return &s3.AbortMultipartUploadOutput{}, ErrNone
  106. }
  107. type ListMultipartUploadsResult struct {
  108. s3.ListMultipartUploadsOutput
  109. }
  110. func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code ErrorCode) {
  111. output = &ListMultipartUploadsResult{
  112. s3.ListMultipartUploadsOutput{
  113. Bucket: input.Bucket,
  114. Delimiter: input.Delimiter,
  115. EncodingType: input.EncodingType,
  116. KeyMarker: input.KeyMarker,
  117. MaxUploads: input.MaxUploads,
  118. Prefix: input.Prefix,
  119. },
  120. }
  121. entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket), *input.Prefix, *input.KeyMarker, true, int(*input.MaxUploads))
  122. if err != nil {
  123. glog.Errorf("listMultipartUploads %s error: %v", *input.Bucket, err)
  124. return
  125. }
  126. for _, entry := range entries {
  127. if entry.Extended != nil {
  128. key := entry.Extended["key"]
  129. output.Uploads = append(output.Uploads, &s3.MultipartUpload{
  130. Key: aws.String(string(key)),
  131. UploadId: aws.String(entry.Name),
  132. })
  133. }
  134. }
  135. return
  136. }
  137. type ListPartsResult struct {
  138. s3.ListPartsOutput
  139. }
  140. func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListPartsResult, code ErrorCode) {
  141. output = &ListPartsResult{
  142. s3.ListPartsOutput{
  143. Bucket: input.Bucket,
  144. Key: input.Key,
  145. UploadId: input.UploadId,
  146. MaxParts: input.MaxParts, // the maximum number of parts to return.
  147. PartNumberMarker: input.PartNumberMarker, // the part number starts after this, exclusive
  148. },
  149. }
  150. entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId,
  151. "", fmt.Sprintf("%04d.part", *input.PartNumberMarker), false, int(*input.MaxParts))
  152. if err != nil {
  153. glog.Errorf("listObjectParts %s %s error: %v", *input.Bucket, *input.UploadId, err)
  154. return nil, ErrNoSuchUpload
  155. }
  156. for _, entry := range entries {
  157. if strings.HasSuffix(entry.Name, ".part") && !entry.IsDirectory {
  158. partNumberString := entry.Name[:len(entry.Name)-len(".part")]
  159. partNumber, err := strconv.Atoi(partNumberString)
  160. if err != nil {
  161. glog.Errorf("listObjectParts %s %s parse %s: %v", *input.Bucket, *input.UploadId, entry.Name, err)
  162. continue
  163. }
  164. output.Parts = append(output.Parts, &s3.Part{
  165. PartNumber: aws.Int64(int64(partNumber)),
  166. LastModified: aws.Time(time.Unix(entry.Attributes.Mtime, 0)),
  167. Size: aws.Int64(int64(filer2.TotalSize(entry.Chunks))),
  168. ETag: aws.String("\"" + filer2.ETag(entry.Chunks) + "\""),
  169. })
  170. }
  171. }
  172. return
  173. }