You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

607 lines
19 KiB

7 years ago
7 years ago
7 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
9 months ago
9 months ago
9 months ago
9 months ago
9 months ago
9 months ago
9 months ago
1 year ago
3 years ago
4 years ago
4 years ago
3 years ago
  1. package s3api
  2. import (
  3. "context"
  4. "encoding/xml"
  5. "fmt"
  6. "github.com/aws/aws-sdk-go/service/s3"
  7. "github.com/seaweedfs/seaweedfs/weed/glog"
  8. "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
  9. "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
  10. "github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
  11. "io"
  12. "net/http"
  13. "net/url"
  14. "sort"
  15. "strconv"
  16. "strings"
  17. )
  18. type OptionalString struct {
  19. string
  20. set bool
  21. }
  22. func (o OptionalString) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
  23. if !o.set {
  24. return nil
  25. }
  26. return e.EncodeElement(o.string, startElement)
  27. }
  28. type ListBucketResultV2 struct {
  29. XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"`
  30. Name string `xml:"Name"`
  31. Prefix string `xml:"Prefix"`
  32. MaxKeys int `xml:"MaxKeys"`
  33. Delimiter string `xml:"Delimiter,omitempty"`
  34. IsTruncated bool `xml:"IsTruncated"`
  35. Contents []ListEntry `xml:"Contents,omitempty"`
  36. CommonPrefixes []PrefixEntry `xml:"CommonPrefixes,omitempty"`
  37. ContinuationToken OptionalString `xml:"ContinuationToken,omitempty"`
  38. NextContinuationToken string `xml:"NextContinuationToken,omitempty"`
  39. EncodingType string `xml:"EncodingType,omitempty"`
  40. KeyCount int `xml:"KeyCount"`
  41. StartAfter string `xml:"StartAfter,omitempty"`
  42. }
  43. func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
  44. // https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html
  45. // collect parameters
  46. bucket, _ := s3_constants.GetBucketAndObject(r)
  47. glog.V(3).Infof("ListObjectsV2Handler %s", bucket)
  48. originalPrefix, startAfter, delimiter, continuationToken, encodingTypeUrl, fetchOwner, maxKeys := getListObjectsV2Args(r.URL.Query())
  49. if maxKeys < 0 {
  50. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxKeys)
  51. return
  52. }
  53. marker := continuationToken.string
  54. if !continuationToken.set {
  55. marker = startAfter
  56. }
  57. response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter, encodingTypeUrl, fetchOwner)
  58. if err != nil {
  59. s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
  60. return
  61. }
  62. if len(response.Contents) == 0 {
  63. if exists, existErr := s3a.exists(s3a.option.BucketsPath, bucket, true); existErr == nil && !exists {
  64. s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket)
  65. return
  66. }
  67. }
  68. responseV2 := &ListBucketResultV2{
  69. XMLName: response.XMLName,
  70. Name: response.Name,
  71. CommonPrefixes: response.CommonPrefixes,
  72. Contents: response.Contents,
  73. ContinuationToken: continuationToken,
  74. Delimiter: response.Delimiter,
  75. IsTruncated: response.IsTruncated,
  76. KeyCount: len(response.Contents) + len(response.CommonPrefixes),
  77. MaxKeys: response.MaxKeys,
  78. NextContinuationToken: response.NextMarker,
  79. Prefix: response.Prefix,
  80. StartAfter: startAfter,
  81. }
  82. if encodingTypeUrl {
  83. responseV2.EncodingType = s3.EncodingTypeUrl
  84. }
  85. writeSuccessResponseXML(w, r, responseV2)
  86. }
  87. func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
  88. // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
  89. // collect parameters
  90. bucket, _ := s3_constants.GetBucketAndObject(r)
  91. glog.V(3).Infof("ListObjectsV1Handler %s", bucket)
  92. originalPrefix, marker, delimiter, encodingTypeUrl, maxKeys := getListObjectsV1Args(r.URL.Query())
  93. if maxKeys < 0 {
  94. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxKeys)
  95. return
  96. }
  97. response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter, encodingTypeUrl, true)
  98. if err != nil {
  99. s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
  100. return
  101. }
  102. if len(response.Contents) == 0 {
  103. if exists, existErr := s3a.exists(s3a.option.BucketsPath, bucket, true); existErr == nil && !exists {
  104. s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket)
  105. return
  106. }
  107. }
  108. writeSuccessResponseXML(w, r, response)
  109. }
  110. func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, maxKeys int, originalMarker string, delimiter string, encodingTypeUrl bool, fetchOwner bool) (response ListBucketResult, err error) {
  111. // convert full path prefix into directory name and prefix for entry name
  112. requestDir, prefix, marker := normalizePrefixMarker(originalPrefix, originalMarker)
  113. bucketPrefix := fmt.Sprintf("%s/%s/", s3a.option.BucketsPath, bucket)
  114. reqDir := bucketPrefix[:len(bucketPrefix)-1]
  115. if requestDir != "" {
  116. reqDir = fmt.Sprintf("%s%s", bucketPrefix, requestDir)
  117. }
  118. var contents []ListEntry
  119. var commonPrefixes []PrefixEntry
  120. var doErr error
  121. var nextMarker string
  122. cursor := &ListingCursor{
  123. maxKeys: maxKeys,
  124. prefixEndsOnDelimiter: strings.HasSuffix(originalPrefix, "/") && len(originalMarker) == 0,
  125. }
  126. // Todo remove force disable
  127. if s3a.option.AllowListRecursive && prefix != "" && (delimiter == "" || delimiter == "/") {
  128. err = s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
  129. glog.V(0).Infof("doListFilerRecursiveEntries reqDir: %s, prefix: %s, delimiter: %s, cursor: %+v", reqDir, prefix, delimiter, cursor)
  130. nextMarker, doErr = s3a.doListFilerRecursiveEntries(client, reqDir, prefix, cursor, marker, delimiter, false,
  131. func(dir string, entry *filer_pb.Entry) {
  132. glog.V(5).Infof("doListFilerRecursiveEntries dir %s, shortDir %s, entry: %+v, cursor: %+v", dir, dir[len(bucketPrefix):], entry, cursor)
  133. if cursor.isTruncated {
  134. return
  135. }
  136. dirName, entryName, prefixName := entryUrlEncode(dir, entry.Name, encodingTypeUrl)
  137. isCommonDir := strings.Index(dir[len(bucketPrefix):], "/") != -1
  138. if cursor.prefixEndsOnDelimiter && !isCommonDir && entry.Name == prefix {
  139. return
  140. }
  141. if delimiter == "/" {
  142. if entry.IsDirectory {
  143. commonPrefixes = append(commonPrefixes, PrefixEntry{
  144. Prefix: fmt.Sprintf("%s/%s/", dirName, prefixName)[len(bucketPrefix):],
  145. })
  146. cursor.Decrease()
  147. return
  148. } else if isCommonDir {
  149. return
  150. }
  151. }
  152. contents = append(contents, newListEntry(entry, dirName, entryName, bucketPrefix, fetchOwner, entry.IsDirectoryKeyObject()))
  153. cursor.Decrease()
  154. },
  155. )
  156. return nil
  157. })
  158. response = ListBucketResult{
  159. Name: bucket,
  160. Prefix: originalPrefix,
  161. Marker: originalMarker,
  162. NextMarker: nextMarker,
  163. MaxKeys: maxKeys,
  164. Delimiter: delimiter,
  165. IsTruncated: cursor.isTruncated,
  166. Contents: contents,
  167. CommonPrefixes: commonPrefixes,
  168. }
  169. if encodingTypeUrl {
  170. response.EncodingType = s3.EncodingTypeUrl
  171. }
  172. return
  173. }
  174. // check filer
  175. err = s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
  176. for {
  177. empty := true
  178. nextMarker, doErr = s3a.doListFilerEntries(client, reqDir, prefix, cursor, marker, delimiter, false, func(dir string, entry *filer_pb.Entry) {
  179. empty = false
  180. glog.V(5).Infof("doListFilerEntries dir: %s entry: %+v", dir, entry)
  181. dirName, entryName, prefixName := entryUrlEncode(dir, entry.Name, encodingTypeUrl)
  182. if entry.IsDirectory {
  183. if entry.IsDirectoryKeyObject() {
  184. contents = append(contents, newListEntry(entry, dirName, entryName, bucketPrefix, fetchOwner, true))
  185. cursor.maxKeys--
  186. // https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html
  187. } else if delimiter == "/" { // A response can contain CommonPrefixes only if you specify a delimiter.
  188. commonPrefixes = append(commonPrefixes, PrefixEntry{
  189. Prefix: fmt.Sprintf("%s/%s/", dirName, prefixName)[len(bucketPrefix):],
  190. })
  191. //All of the keys (up to 1,000) rolled up into a common prefix count as a single return when calculating the number of returns.
  192. cursor.maxKeys--
  193. }
  194. } else {
  195. var delimiterFound bool
  196. if delimiter != "" {
  197. // keys that contain the same string between the prefix and the first occurrence of the delimiter are grouped together as a commonPrefix.
  198. // extract the string between the prefix and the delimiter and add it to the commonPrefixes if it's unique.
  199. undelimitedPath := fmt.Sprintf("%s/%s", dir, entry.Name)[len(bucketPrefix):]
  200. // take into account a prefix if supplied while delimiting.
  201. undelimitedPath = strings.TrimPrefix(undelimitedPath, originalPrefix)
  202. delimitedPath := strings.SplitN(undelimitedPath, delimiter, 2)
  203. if len(delimitedPath) == 2 {
  204. // S3 clients expect the delimited prefix to contain the delimiter and prefix.
  205. delimitedPrefix := originalPrefix + delimitedPath[0] + delimiter
  206. for i := range commonPrefixes {
  207. if commonPrefixes[i].Prefix == delimitedPrefix {
  208. delimiterFound = true
  209. break
  210. }
  211. }
  212. if !delimiterFound {
  213. commonPrefixes = append(commonPrefixes, PrefixEntry{
  214. Prefix: delimitedPrefix,
  215. })
  216. cursor.maxKeys--
  217. delimiterFound = true
  218. }
  219. }
  220. }
  221. if !delimiterFound {
  222. contents = append(contents, newListEntry(entry, dirName, entryName, bucketPrefix, fetchOwner, false))
  223. cursor.maxKeys--
  224. }
  225. }
  226. })
  227. if doErr != nil {
  228. return doErr
  229. }
  230. if cursor.isTruncated {
  231. if requestDir != "" {
  232. nextMarker = requestDir + "/" + nextMarker
  233. }
  234. break
  235. } else if empty || strings.HasSuffix(originalPrefix, "/") {
  236. nextMarker = ""
  237. break
  238. } else {
  239. // start next loop
  240. marker = nextMarker
  241. }
  242. }
  243. response = ListBucketResult{
  244. Name: bucket,
  245. Prefix: originalPrefix,
  246. Marker: originalMarker,
  247. NextMarker: nextMarker,
  248. MaxKeys: maxKeys,
  249. Delimiter: delimiter,
  250. IsTruncated: cursor.isTruncated,
  251. Contents: contents,
  252. CommonPrefixes: commonPrefixes,
  253. }
  254. if encodingTypeUrl {
  255. sort.Slice(response.CommonPrefixes, func(i, j int) bool {
  256. return response.CommonPrefixes[i].Prefix < response.CommonPrefixes[j].Prefix
  257. })
  258. response.EncodingType = s3.EncodingTypeUrl
  259. }
  260. return nil
  261. })
  262. return
  263. }
  264. type ListingCursor struct {
  265. maxKeys int
  266. isTruncated bool
  267. prefixEndsOnDelimiter bool
  268. }
  269. func (l *ListingCursor) Decrease() {
  270. l.maxKeys--
  271. if l.maxKeys == 0 {
  272. l.isTruncated = true
  273. }
  274. }
  275. // the prefix and marker may be in different directories
  276. // normalizePrefixMarker ensures the prefix and marker both starts from the same directory
  277. func normalizePrefixMarker(prefix, marker string) (alignedDir, alignedPrefix, alignedMarker string) {
  278. // alignedDir should not end with "/"
  279. // alignedDir, alignedPrefix, alignedMarker should only have "/" in middle
  280. if len(marker) == 0 {
  281. prefix = strings.Trim(prefix, "/")
  282. } else {
  283. prefix = strings.TrimLeft(prefix, "/")
  284. }
  285. marker = strings.TrimLeft(marker, "/")
  286. if prefix == "" {
  287. return "", "", marker
  288. }
  289. if marker == "" {
  290. alignedDir, alignedPrefix = toDirAndName(prefix)
  291. return
  292. }
  293. if !strings.HasPrefix(marker, prefix) {
  294. // something wrong
  295. return "", prefix, marker
  296. }
  297. if strings.HasPrefix(marker, prefix+"/") {
  298. alignedDir = prefix
  299. alignedPrefix = ""
  300. alignedMarker = marker[len(alignedDir)+1:]
  301. return
  302. }
  303. alignedDir, alignedPrefix = toDirAndName(prefix)
  304. if alignedDir != "" {
  305. alignedMarker = marker[len(alignedDir)+1:]
  306. } else {
  307. alignedMarker = marker
  308. }
  309. return
  310. }
  311. func toDirAndName(dirAndName string) (dir, name string) {
  312. sepIndex := strings.LastIndex(dirAndName, "/")
  313. if sepIndex >= 0 {
  314. dir, name = dirAndName[0:sepIndex], dirAndName[sepIndex+1:]
  315. } else {
  316. name = dirAndName
  317. }
  318. return
  319. }
  320. func toParentAndDescendants(dirAndName string) (dir, name string) {
  321. sepIndex := strings.Index(dirAndName, "/")
  322. if sepIndex >= 0 {
  323. dir, name = dirAndName[0:sepIndex], dirAndName[sepIndex+1:]
  324. } else {
  325. name = dirAndName
  326. }
  327. return
  328. }
  329. func (s3a *S3ApiServer) doListFilerRecursiveEntries(client filer_pb.SeaweedFilerClient, dir, prefix string, cursor *ListingCursor, marker, delimiter string, inclusiveStartFrom bool, eachEntryFn func(dir string, entry *filer_pb.Entry)) (nextMarker string, err error) {
  330. if prefix == "/" && delimiter == "/" {
  331. return
  332. }
  333. request := &filer_pb.ListEntriesRequest{
  334. Directory: dir,
  335. Prefix: prefix,
  336. Limit: uint32(cursor.maxKeys),
  337. StartFromFileName: marker,
  338. InclusiveStartFrom: inclusiveStartFrom,
  339. Recursive: true,
  340. }
  341. if cursor.prefixEndsOnDelimiter {
  342. request.Limit += 1
  343. }
  344. ctx, cancel := context.WithCancel(context.Background())
  345. defer cancel()
  346. stream, listErr := client.ListEntries(ctx, request)
  347. if listErr != nil {
  348. return "", fmt.Errorf("list entires %+v: %v", request, listErr)
  349. }
  350. for {
  351. resp, recvErr := stream.Recv()
  352. if recvErr != nil {
  353. if recvErr == io.EOF {
  354. break
  355. } else {
  356. return "", fmt.Errorf("iterating entires %+v: %v", request, recvErr)
  357. }
  358. }
  359. eachEntryFn(resp.Dir, resp.Entry)
  360. }
  361. return
  362. }
  363. func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, dir, prefix string, cursor *ListingCursor, marker, delimiter string, inclusiveStartFrom bool, eachEntryFn func(dir string, entry *filer_pb.Entry)) (nextMarker string, err error) {
  364. // invariants
  365. // prefix and marker should be under dir, marker may contain "/"
  366. // maxKeys should be updated for each recursion
  367. // glog.V(4).Infof("doListFilerEntries dir: %s, prefix: %s, marker %s, maxKeys: %d, prefixEndsOnDelimiter: %+v", dir, prefix, marker, cursor.maxKeys, cursor.prefixEndsOnDelimiter)
  368. if prefix == "/" && delimiter == "/" {
  369. return
  370. }
  371. if cursor.maxKeys <= 0 {
  372. return
  373. }
  374. if strings.Contains(marker, "/") {
  375. subDir, subMarker := toParentAndDescendants(marker)
  376. // println("doListFilerEntries dir", dir+"/"+subDir, "subMarker", subMarker)
  377. subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+subDir, "", cursor, subMarker, delimiter, false, eachEntryFn)
  378. if subErr != nil {
  379. err = subErr
  380. return
  381. }
  382. nextMarker = subDir + "/" + subNextMarker
  383. // finished processing this subdirectory
  384. marker = subDir
  385. }
  386. if cursor.isTruncated {
  387. return
  388. }
  389. // now marker is also a direct child of dir
  390. request := &filer_pb.ListEntriesRequest{
  391. Directory: dir,
  392. Prefix: prefix,
  393. Limit: uint32(cursor.maxKeys + 2), // bucket root directory needs to skip additional s3_constants.MultipartUploadsFolder folder
  394. StartFromFileName: marker,
  395. InclusiveStartFrom: inclusiveStartFrom,
  396. }
  397. if cursor.prefixEndsOnDelimiter {
  398. request.Limit = uint32(1)
  399. }
  400. ctx, cancel := context.WithCancel(context.Background())
  401. defer cancel()
  402. stream, listErr := client.ListEntries(ctx, request)
  403. if listErr != nil {
  404. err = fmt.Errorf("list entires %+v: %v", request, listErr)
  405. return
  406. }
  407. for {
  408. resp, recvErr := stream.Recv()
  409. if recvErr != nil {
  410. if recvErr == io.EOF {
  411. break
  412. } else {
  413. err = fmt.Errorf("iterating entires %+v: %v", request, recvErr)
  414. return
  415. }
  416. }
  417. if cursor.maxKeys <= 0 {
  418. cursor.isTruncated = true
  419. continue
  420. }
  421. entry := resp.Entry
  422. nextMarker = entry.Name
  423. if cursor.prefixEndsOnDelimiter {
  424. if entry.Name == prefix && entry.IsDirectory {
  425. if delimiter != "/" {
  426. cursor.prefixEndsOnDelimiter = false
  427. }
  428. } else {
  429. continue
  430. }
  431. }
  432. if entry.IsDirectory {
  433. // glog.V(4).Infof("List Dir Entries %s, file: %s, maxKeys %d", dir, entry.Name, cursor.maxKeys)
  434. if entry.Name == s3_constants.MultipartUploadsFolder { // FIXME no need to apply to all directories. this extra also affects maxKeys
  435. continue
  436. }
  437. if delimiter != "/" || cursor.prefixEndsOnDelimiter {
  438. if cursor.prefixEndsOnDelimiter {
  439. cursor.prefixEndsOnDelimiter = false
  440. if entry.IsDirectoryKeyObject() {
  441. eachEntryFn(dir, entry)
  442. }
  443. } else {
  444. eachEntryFn(dir, entry)
  445. }
  446. subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+entry.Name, "", cursor, "", delimiter, false, eachEntryFn)
  447. if subErr != nil {
  448. err = fmt.Errorf("doListFilerEntries2: %v", subErr)
  449. return
  450. }
  451. // println("doListFilerEntries2 dir", dir+"/"+entry.Name, "subNextMarker", subNextMarker)
  452. nextMarker = entry.Name + "/" + subNextMarker
  453. if cursor.isTruncated {
  454. return
  455. }
  456. // println("doListFilerEntries2 nextMarker", nextMarker)
  457. } else {
  458. var isEmpty bool
  459. if !s3a.option.AllowEmptyFolder && entry.IsOlderDir() {
  460. //if isEmpty, err = s3a.ensureDirectoryAllEmpty(client, dir, entry.Name); err != nil {
  461. // glog.Errorf("check empty folder %s: %v", dir, err)
  462. //}
  463. }
  464. if !isEmpty {
  465. eachEntryFn(dir, entry)
  466. }
  467. }
  468. } else {
  469. eachEntryFn(dir, entry)
  470. // glog.V(4).Infof("List File Entries %s, file: %s, maxKeys %d", dir, entry.Name, cursor.maxKeys)
  471. }
  472. if cursor.prefixEndsOnDelimiter {
  473. cursor.prefixEndsOnDelimiter = false
  474. }
  475. }
  476. return
  477. }
  478. func getListObjectsV2Args(values url.Values) (prefix, startAfter, delimiter string, token OptionalString, encodingTypeUrl bool, fetchOwner bool, maxkeys int) {
  479. prefix = values.Get("prefix")
  480. token = OptionalString{set: values.Has("continuation-token"), string: values.Get("continuation-token")}
  481. startAfter = values.Get("start-after")
  482. delimiter = values.Get("delimiter")
  483. encodingTypeUrl = values.Get("encoding-type") == s3.EncodingTypeUrl
  484. if values.Get("max-keys") != "" {
  485. maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
  486. } else {
  487. maxkeys = maxObjectListSizeLimit
  488. }
  489. fetchOwner = values.Get("fetch-owner") == "true"
  490. return
  491. }
  492. func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, encodingTypeUrl bool, maxkeys int) {
  493. prefix = values.Get("prefix")
  494. marker = values.Get("marker")
  495. delimiter = values.Get("delimiter")
  496. encodingTypeUrl = values.Get("encoding-type") == "url"
  497. if values.Get("max-keys") != "" {
  498. maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
  499. } else {
  500. maxkeys = maxObjectListSizeLimit
  501. }
  502. return
  503. }
  504. func (s3a *S3ApiServer) ensureDirectoryAllEmpty(filerClient filer_pb.SeaweedFilerClient, parentDir, name string) (isEmpty bool, err error) {
  505. // println("+ ensureDirectoryAllEmpty", dir, name)
  506. glog.V(4).Infof("+ isEmpty %s/%s", parentDir, name)
  507. defer glog.V(4).Infof("- isEmpty %s/%s %v", parentDir, name, isEmpty)
  508. var fileCounter int
  509. var subDirs []string
  510. currentDir := parentDir + "/" + name
  511. var startFrom string
  512. var isExhausted bool
  513. var foundEntry bool
  514. for fileCounter == 0 && !isExhausted && err == nil {
  515. err = filer_pb.SeaweedList(filerClient, currentDir, "", func(entry *filer_pb.Entry, isLast bool) error {
  516. foundEntry = true
  517. if entry.IsOlderDir() {
  518. subDirs = append(subDirs, entry.Name)
  519. } else {
  520. fileCounter++
  521. }
  522. startFrom = entry.Name
  523. isExhausted = isExhausted || isLast
  524. glog.V(4).Infof(" * %s/%s isLast: %t", currentDir, startFrom, isLast)
  525. return nil
  526. }, startFrom, false, 8)
  527. if !foundEntry {
  528. break
  529. }
  530. }
  531. if err != nil {
  532. return false, err
  533. }
  534. if fileCounter > 0 {
  535. return false, nil
  536. }
  537. for _, subDir := range subDirs {
  538. isSubEmpty, subErr := s3a.ensureDirectoryAllEmpty(filerClient, currentDir, subDir)
  539. if subErr != nil {
  540. return false, subErr
  541. }
  542. if !isSubEmpty {
  543. return false, nil
  544. }
  545. }
  546. glog.V(1).Infof("deleting empty folder %s", currentDir)
  547. if err = doDeleteEntry(filerClient, parentDir, name, true, false); err != nil {
  548. return
  549. }
  550. return true, nil
  551. }