You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

581 lines
18 KiB

7 years ago
7 years ago
7 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
9 months ago
9 months ago
9 months ago
9 months ago
1 year ago
3 years ago
4 years ago
4 years ago
3 years ago
  1. package s3api
  2. import (
  3. "context"
  4. "encoding/xml"
  5. "fmt"
  6. "github.com/aws/aws-sdk-go/service/s3"
  7. "github.com/seaweedfs/seaweedfs/weed/glog"
  8. "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
  9. "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
  10. "github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
  11. "io"
  12. "net/http"
  13. "net/url"
  14. "strconv"
  15. "strings"
  16. )
  17. type OptionalString struct {
  18. string
  19. set bool
  20. }
  21. func (o OptionalString) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
  22. if !o.set {
  23. return nil
  24. }
  25. return e.EncodeElement(o.string, startElement)
  26. }
  27. type ListBucketResultV2 struct {
  28. XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"`
  29. Name string `xml:"Name"`
  30. Prefix string `xml:"Prefix"`
  31. MaxKeys int `xml:"MaxKeys"`
  32. Delimiter string `xml:"Delimiter,omitempty"`
  33. IsTruncated bool `xml:"IsTruncated"`
  34. Contents []ListEntry `xml:"Contents,omitempty"`
  35. CommonPrefixes []PrefixEntry `xml:"CommonPrefixes,omitempty"`
  36. ContinuationToken OptionalString `xml:"ContinuationToken,omitempty"`
  37. NextContinuationToken string `xml:"NextContinuationToken,omitempty"`
  38. EncodingType string `xml:"EncodingType,omitempty"`
  39. KeyCount int `xml:"KeyCount"`
  40. StartAfter string `xml:"StartAfter,omitempty"`
  41. }
  42. func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
  43. // https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html
  44. // collect parameters
  45. bucket, _ := s3_constants.GetBucketAndObject(r)
  46. glog.V(3).Infof("ListObjectsV2Handler %s", bucket)
  47. originalPrefix, startAfter, delimiter, continuationToken, encodingTypeUrl, fetchOwner, maxKeys := getListObjectsV2Args(r.URL.Query())
  48. if maxKeys < 0 {
  49. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxKeys)
  50. return
  51. }
  52. marker := continuationToken.string
  53. if !continuationToken.set {
  54. marker = startAfter
  55. }
  56. response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter, encodingTypeUrl, fetchOwner)
  57. if err != nil {
  58. s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
  59. return
  60. }
  61. if len(response.Contents) == 0 {
  62. if exists, existErr := s3a.exists(s3a.option.BucketsPath, bucket, true); existErr == nil && !exists {
  63. s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket)
  64. return
  65. }
  66. }
  67. responseV2 := &ListBucketResultV2{
  68. XMLName: response.XMLName,
  69. Name: response.Name,
  70. CommonPrefixes: response.CommonPrefixes,
  71. Contents: response.Contents,
  72. ContinuationToken: continuationToken,
  73. Delimiter: response.Delimiter,
  74. IsTruncated: response.IsTruncated,
  75. KeyCount: len(response.Contents) + len(response.CommonPrefixes),
  76. MaxKeys: response.MaxKeys,
  77. NextContinuationToken: response.NextMarker,
  78. Prefix: response.Prefix,
  79. StartAfter: startAfter,
  80. }
  81. if encodingTypeUrl {
  82. responseV2.EncodingType = s3.EncodingTypeUrl
  83. }
  84. writeSuccessResponseXML(w, r, responseV2)
  85. }
  86. func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
  87. // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
  88. // collect parameters
  89. bucket, _ := s3_constants.GetBucketAndObject(r)
  90. glog.V(3).Infof("ListObjectsV1Handler %s", bucket)
  91. originalPrefix, marker, delimiter, encodingTypeUrl, maxKeys := getListObjectsV1Args(r.URL.Query())
  92. if maxKeys < 0 {
  93. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxKeys)
  94. return
  95. }
  96. response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter, encodingTypeUrl, true)
  97. if err != nil {
  98. s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
  99. return
  100. }
  101. if len(response.Contents) == 0 {
  102. if exists, existErr := s3a.exists(s3a.option.BucketsPath, bucket, true); existErr == nil && !exists {
  103. s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket)
  104. return
  105. }
  106. }
  107. writeSuccessResponseXML(w, r, response)
  108. }
  109. func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, maxKeys int, originalMarker string, delimiter string, encodingTypeUrl bool, fetchOwner bool) (response ListBucketResult, err error) {
  110. // convert full path prefix into directory name and prefix for entry name
  111. requestDir, prefix, marker := normalizePrefixMarker(originalPrefix, originalMarker)
  112. bucketPrefix := fmt.Sprintf("%s/%s/", s3a.option.BucketsPath, bucket)
  113. reqDir := bucketPrefix[:len(bucketPrefix)-1]
  114. if requestDir != "" {
  115. reqDir = fmt.Sprintf("%s%s", bucketPrefix, requestDir)
  116. }
  117. var contents []ListEntry
  118. var commonPrefixes []PrefixEntry
  119. var doErr error
  120. var nextMarker string
  121. cursor := &ListingCursor{
  122. maxKeys: maxKeys,
  123. prefixEndsOnDelimiter: strings.HasSuffix(originalPrefix, "/") && len(originalMarker) == 0,
  124. }
  125. // Todo remove force disable
  126. if s3a.option.AllowListRecursive && prefix == "force_disable" {
  127. err = s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
  128. glog.V(0).Infof("doListFilerRecursiveEntries reqDir: %s, prefix: %s, delimiter: %s, cursor: %+v", reqDir, prefix, delimiter, cursor)
  129. nextMarker, doErr = s3a.doListFilerRecursiveEntries(client, reqDir, prefix, cursor, marker, delimiter, false,
  130. func(dir string, entry *filer_pb.Entry) {
  131. dirName, entryName, prefixName := entryUrlEncode(dir, entry.Name, encodingTypeUrl)
  132. if entry.IsDirectory {
  133. if delimiter == "/" { // A response can contain CommonPrefixes only if you specify a delimiter.
  134. commonPrefixes = append(commonPrefixes, PrefixEntry{
  135. Prefix: fmt.Sprintf("%s/%s/", dirName, prefixName)[len(bucketPrefix):],
  136. })
  137. }
  138. return
  139. }
  140. contents = append(contents, newListEntry(entry, dirName, entryName, bucketPrefix, fetchOwner, entry.IsDirectoryKeyObject()))
  141. cursor.maxKeys--
  142. },
  143. )
  144. return nil
  145. })
  146. response = ListBucketResult{
  147. Name: bucket,
  148. Prefix: originalPrefix,
  149. Marker: originalMarker,
  150. NextMarker: nextMarker,
  151. MaxKeys: maxKeys,
  152. Delimiter: delimiter,
  153. IsTruncated: cursor.isTruncated,
  154. Contents: contents,
  155. CommonPrefixes: commonPrefixes,
  156. }
  157. if encodingTypeUrl {
  158. response.EncodingType = s3.EncodingTypeUrl
  159. }
  160. return
  161. }
  162. // check filer
  163. err = s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
  164. for {
  165. empty := true
  166. nextMarker, doErr = s3a.doListFilerEntries(client, reqDir, prefix, cursor, marker, delimiter, false, func(dir string, entry *filer_pb.Entry) {
  167. empty = false
  168. glog.V(5).Infof("doListFilerEntries dir: %s entry: %+v", dir, entry)
  169. dirName, entryName, prefixName := entryUrlEncode(dir, entry.Name, encodingTypeUrl)
  170. if entry.IsDirectory {
  171. if entry.IsDirectoryKeyObject() {
  172. contents = append(contents, newListEntry(entry, dirName, entryName, bucketPrefix, fetchOwner, true))
  173. cursor.maxKeys--
  174. // https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html
  175. } else if delimiter == "/" { // A response can contain CommonPrefixes only if you specify a delimiter.
  176. commonPrefixes = append(commonPrefixes, PrefixEntry{
  177. Prefix: fmt.Sprintf("%s/%s/", dirName, prefixName)[len(bucketPrefix):],
  178. })
  179. //All of the keys (up to 1,000) rolled up into a common prefix count as a single return when calculating the number of returns.
  180. cursor.maxKeys--
  181. }
  182. } else {
  183. var delimiterFound bool
  184. if delimiter != "" {
  185. // keys that contain the same string between the prefix and the first occurrence of the delimiter are grouped together as a commonPrefix.
  186. // extract the string between the prefix and the delimiter and add it to the commonPrefixes if it's unique.
  187. undelimitedPath := fmt.Sprintf("%s/%s", dir, entry.Name)[len(bucketPrefix):]
  188. // take into account a prefix if supplied while delimiting.
  189. undelimitedPath = strings.TrimPrefix(undelimitedPath, originalPrefix)
  190. delimitedPath := strings.SplitN(undelimitedPath, delimiter, 2)
  191. if len(delimitedPath) == 2 {
  192. // S3 clients expect the delimited prefix to contain the delimiter and prefix.
  193. delimitedPrefix := originalPrefix + delimitedPath[0] + delimiter
  194. for i := range commonPrefixes {
  195. if commonPrefixes[i].Prefix == delimitedPrefix {
  196. delimiterFound = true
  197. break
  198. }
  199. }
  200. if !delimiterFound {
  201. commonPrefixes = append(commonPrefixes, PrefixEntry{
  202. Prefix: delimitedPrefix,
  203. })
  204. cursor.maxKeys--
  205. delimiterFound = true
  206. }
  207. }
  208. }
  209. if !delimiterFound {
  210. contents = append(contents, newListEntry(entry, dirName, entryName, bucketPrefix, fetchOwner, false))
  211. cursor.maxKeys--
  212. }
  213. }
  214. })
  215. if doErr != nil {
  216. return doErr
  217. }
  218. if cursor.isTruncated {
  219. if requestDir != "" {
  220. nextMarker = requestDir + "/" + nextMarker
  221. }
  222. break
  223. } else if empty || strings.HasSuffix(originalPrefix, "/") {
  224. nextMarker = ""
  225. break
  226. } else {
  227. // start next loop
  228. marker = nextMarker
  229. }
  230. }
  231. response = ListBucketResult{
  232. Name: bucket,
  233. Prefix: originalPrefix,
  234. Marker: originalMarker,
  235. NextMarker: nextMarker,
  236. MaxKeys: maxKeys,
  237. Delimiter: delimiter,
  238. IsTruncated: cursor.isTruncated,
  239. Contents: contents,
  240. CommonPrefixes: commonPrefixes,
  241. }
  242. if encodingTypeUrl {
  243. response.EncodingType = s3.EncodingTypeUrl
  244. }
  245. return nil
  246. })
  247. return
  248. }
  249. type ListingCursor struct {
  250. maxKeys int
  251. isTruncated bool
  252. prefixEndsOnDelimiter bool
  253. }
  254. // the prefix and marker may be in different directories
  255. // normalizePrefixMarker ensures the prefix and marker both starts from the same directory
  256. func normalizePrefixMarker(prefix, marker string) (alignedDir, alignedPrefix, alignedMarker string) {
  257. // alignedDir should not end with "/"
  258. // alignedDir, alignedPrefix, alignedMarker should only have "/" in middle
  259. if len(marker) == 0 {
  260. prefix = strings.Trim(prefix, "/")
  261. } else {
  262. prefix = strings.TrimLeft(prefix, "/")
  263. }
  264. marker = strings.TrimLeft(marker, "/")
  265. if prefix == "" {
  266. return "", "", marker
  267. }
  268. if marker == "" {
  269. alignedDir, alignedPrefix = toDirAndName(prefix)
  270. return
  271. }
  272. if !strings.HasPrefix(marker, prefix) {
  273. // something wrong
  274. return "", prefix, marker
  275. }
  276. if strings.HasPrefix(marker, prefix+"/") {
  277. alignedDir = prefix
  278. alignedPrefix = ""
  279. alignedMarker = marker[len(alignedDir)+1:]
  280. return
  281. }
  282. alignedDir, alignedPrefix = toDirAndName(prefix)
  283. if alignedDir != "" {
  284. alignedMarker = marker[len(alignedDir)+1:]
  285. } else {
  286. alignedMarker = marker
  287. }
  288. return
  289. }
  290. func toDirAndName(dirAndName string) (dir, name string) {
  291. sepIndex := strings.LastIndex(dirAndName, "/")
  292. if sepIndex >= 0 {
  293. dir, name = dirAndName[0:sepIndex], dirAndName[sepIndex+1:]
  294. } else {
  295. name = dirAndName
  296. }
  297. return
  298. }
  299. func toParentAndDescendants(dirAndName string) (dir, name string) {
  300. sepIndex := strings.Index(dirAndName, "/")
  301. if sepIndex >= 0 {
  302. dir, name = dirAndName[0:sepIndex], dirAndName[sepIndex+1:]
  303. } else {
  304. name = dirAndName
  305. }
  306. return
  307. }
  308. func (s3a *S3ApiServer) doListFilerRecursiveEntries(client filer_pb.SeaweedFilerClient, dir, prefix string, cursor *ListingCursor, marker, delimiter string, inclusiveStartFrom bool, eachEntryFn func(dir string, entry *filer_pb.Entry)) (nextMarker string, err error) {
  309. if prefix == "/" && delimiter == "/" {
  310. return
  311. }
  312. request := &filer_pb.ListEntriesRequest{
  313. Directory: dir,
  314. Prefix: prefix,
  315. Limit: uint32(cursor.maxKeys + 2),
  316. StartFromFileName: marker,
  317. InclusiveStartFrom: inclusiveStartFrom,
  318. Recursive: true,
  319. }
  320. ctx, cancel := context.WithCancel(context.Background())
  321. defer cancel()
  322. stream, listErr := client.ListEntries(ctx, request)
  323. if listErr != nil {
  324. return "", fmt.Errorf("list entires %+v: %v", request, listErr)
  325. }
  326. for {
  327. resp, recvErr := stream.Recv()
  328. if recvErr != nil {
  329. if recvErr == io.EOF {
  330. break
  331. } else {
  332. return "", fmt.Errorf("iterating entires %+v: %v", request, recvErr)
  333. }
  334. }
  335. eachEntryFn(dir, resp.Entry)
  336. }
  337. return
  338. }
  339. func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, dir, prefix string, cursor *ListingCursor, marker, delimiter string, inclusiveStartFrom bool, eachEntryFn func(dir string, entry *filer_pb.Entry)) (nextMarker string, err error) {
  340. // invariants
  341. // prefix and marker should be under dir, marker may contain "/"
  342. // maxKeys should be updated for each recursion
  343. // glog.V(4).Infof("doListFilerEntries dir: %s, prefix: %s, marker %s, maxKeys: %d, prefixEndsOnDelimiter: %+v", dir, prefix, marker, cursor.maxKeys, cursor.prefixEndsOnDelimiter)
  344. if prefix == "/" && delimiter == "/" {
  345. return
  346. }
  347. if cursor.maxKeys <= 0 {
  348. return
  349. }
  350. if strings.Contains(marker, "/") {
  351. subDir, subMarker := toParentAndDescendants(marker)
  352. // println("doListFilerEntries dir", dir+"/"+subDir, "subMarker", subMarker)
  353. subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+subDir, "", cursor, subMarker, delimiter, false, eachEntryFn)
  354. if subErr != nil {
  355. err = subErr
  356. return
  357. }
  358. nextMarker = subDir + "/" + subNextMarker
  359. // finished processing this subdirectory
  360. marker = subDir
  361. }
  362. if cursor.isTruncated {
  363. return
  364. }
  365. // now marker is also a direct child of dir
  366. request := &filer_pb.ListEntriesRequest{
  367. Directory: dir,
  368. Prefix: prefix,
  369. Limit: uint32(cursor.maxKeys + 2), // bucket root directory needs to skip additional s3_constants.MultipartUploadsFolder folder
  370. StartFromFileName: marker,
  371. InclusiveStartFrom: inclusiveStartFrom,
  372. }
  373. if cursor.prefixEndsOnDelimiter {
  374. request.Limit = uint32(1)
  375. }
  376. ctx, cancel := context.WithCancel(context.Background())
  377. defer cancel()
  378. stream, listErr := client.ListEntries(ctx, request)
  379. if listErr != nil {
  380. err = fmt.Errorf("list entires %+v: %v", request, listErr)
  381. return
  382. }
  383. for {
  384. resp, recvErr := stream.Recv()
  385. if recvErr != nil {
  386. if recvErr == io.EOF {
  387. break
  388. } else {
  389. err = fmt.Errorf("iterating entires %+v: %v", request, recvErr)
  390. return
  391. }
  392. }
  393. if cursor.maxKeys <= 0 {
  394. cursor.isTruncated = true
  395. continue
  396. }
  397. entry := resp.Entry
  398. nextMarker = entry.Name
  399. if cursor.prefixEndsOnDelimiter {
  400. if entry.Name == prefix && entry.IsDirectory {
  401. if delimiter != "/" {
  402. cursor.prefixEndsOnDelimiter = false
  403. }
  404. } else {
  405. continue
  406. }
  407. }
  408. if entry.IsDirectory {
  409. // glog.V(4).Infof("List Dir Entries %s, file: %s, maxKeys %d", dir, entry.Name, cursor.maxKeys)
  410. if entry.Name == s3_constants.MultipartUploadsFolder { // FIXME no need to apply to all directories. this extra also affects maxKeys
  411. continue
  412. }
  413. if delimiter != "/" || cursor.prefixEndsOnDelimiter {
  414. if cursor.prefixEndsOnDelimiter {
  415. cursor.prefixEndsOnDelimiter = false
  416. if entry.IsDirectoryKeyObject() {
  417. eachEntryFn(dir, entry)
  418. }
  419. } else {
  420. eachEntryFn(dir, entry)
  421. }
  422. subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+entry.Name, "", cursor, "", delimiter, false, eachEntryFn)
  423. if subErr != nil {
  424. err = fmt.Errorf("doListFilerEntries2: %v", subErr)
  425. return
  426. }
  427. // println("doListFilerEntries2 dir", dir+"/"+entry.Name, "subNextMarker", subNextMarker)
  428. nextMarker = entry.Name + "/" + subNextMarker
  429. if cursor.isTruncated {
  430. return
  431. }
  432. // println("doListFilerEntries2 nextMarker", nextMarker)
  433. } else {
  434. var isEmpty bool
  435. if !s3a.option.AllowEmptyFolder && entry.IsOlderDir() {
  436. //if isEmpty, err = s3a.ensureDirectoryAllEmpty(client, dir, entry.Name); err != nil {
  437. // glog.Errorf("check empty folder %s: %v", dir, err)
  438. //}
  439. }
  440. if !isEmpty {
  441. eachEntryFn(dir, entry)
  442. }
  443. }
  444. } else {
  445. eachEntryFn(dir, entry)
  446. // glog.V(4).Infof("List File Entries %s, file: %s, maxKeys %d", dir, entry.Name, cursor.maxKeys)
  447. }
  448. if cursor.prefixEndsOnDelimiter {
  449. cursor.prefixEndsOnDelimiter = false
  450. }
  451. }
  452. return
  453. }
  454. func getListObjectsV2Args(values url.Values) (prefix, startAfter, delimiter string, token OptionalString, encodingTypeUrl bool, fetchOwner bool, maxkeys int) {
  455. prefix = values.Get("prefix")
  456. token = OptionalString{set: values.Has("continuation-token"), string: values.Get("continuation-token")}
  457. startAfter = values.Get("start-after")
  458. delimiter = values.Get("delimiter")
  459. encodingTypeUrl = values.Get("encoding-type") == s3.EncodingTypeUrl
  460. if values.Get("max-keys") != "" {
  461. maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
  462. } else {
  463. maxkeys = maxObjectListSizeLimit
  464. }
  465. fetchOwner = values.Get("fetch-owner") == "true"
  466. return
  467. }
  468. func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, encodingTypeUrl bool, maxkeys int) {
  469. prefix = values.Get("prefix")
  470. marker = values.Get("marker")
  471. delimiter = values.Get("delimiter")
  472. encodingTypeUrl = values.Get("encoding-type") == "url"
  473. if values.Get("max-keys") != "" {
  474. maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
  475. } else {
  476. maxkeys = maxObjectListSizeLimit
  477. }
  478. return
  479. }
  480. func (s3a *S3ApiServer) ensureDirectoryAllEmpty(filerClient filer_pb.SeaweedFilerClient, parentDir, name string) (isEmpty bool, err error) {
  481. // println("+ ensureDirectoryAllEmpty", dir, name)
  482. glog.V(4).Infof("+ isEmpty %s/%s", parentDir, name)
  483. defer glog.V(4).Infof("- isEmpty %s/%s %v", parentDir, name, isEmpty)
  484. var fileCounter int
  485. var subDirs []string
  486. currentDir := parentDir + "/" + name
  487. var startFrom string
  488. var isExhausted bool
  489. var foundEntry bool
  490. for fileCounter == 0 && !isExhausted && err == nil {
  491. err = filer_pb.SeaweedList(filerClient, currentDir, "", func(entry *filer_pb.Entry, isLast bool) error {
  492. foundEntry = true
  493. if entry.IsOlderDir() {
  494. subDirs = append(subDirs, entry.Name)
  495. } else {
  496. fileCounter++
  497. }
  498. startFrom = entry.Name
  499. isExhausted = isExhausted || isLast
  500. glog.V(4).Infof(" * %s/%s isLast: %t", currentDir, startFrom, isLast)
  501. return nil
  502. }, startFrom, false, 8)
  503. if !foundEntry {
  504. break
  505. }
  506. }
  507. if err != nil {
  508. return false, err
  509. }
  510. if fileCounter > 0 {
  511. return false, nil
  512. }
  513. for _, subDir := range subDirs {
  514. isSubEmpty, subErr := s3a.ensureDirectoryAllEmpty(filerClient, currentDir, subDir)
  515. if subErr != nil {
  516. return false, subErr
  517. }
  518. if !isSubEmpty {
  519. return false, nil
  520. }
  521. }
  522. glog.V(1).Infof("deleting empty folder %s", currentDir)
  523. if err = doDeleteEntry(filerClient, parentDir, name, true, false); err != nil {
  524. return
  525. }
  526. return true, nil
  527. }