diff --git a/.github/workflows/s3tests.yml b/.github/workflows/s3tests.yml index d72fa3a3a..866f2d888 100644 --- a/.github/workflows/s3tests.yml +++ b/.github/workflows/s3tests.yml @@ -194,4 +194,7 @@ jobs: s3tests_boto3/functional/test_s3.py::test_ranged_request_skip_leading_bytes_response_code \ s3tests_boto3/functional/test_s3.py::test_ranged_request_return_trailing_bytes_response_code \ s3tests_boto3/functional/test_s3.py::test_copy_object_ifmatch_good \ - s3tests_boto3/functional/test_s3.py::test_copy_object_ifnonematch_failed + s3tests_boto3/functional/test_s3.py::test_copy_object_ifnonematch_failed \ + s3tests_boto3/functional/test_s3.py::test_lifecycle_set \ + s3tests_boto3/functional/test_s3.py::test_lifecycle_get \ + s3tests_boto3/functional/test_s3.py::test_lifecycle_set_filter diff --git a/k8s/charts/seaweedfs/Chart.yaml b/k8s/charts/seaweedfs/Chart.yaml index 7e6db9c26..de35cd51b 100644 --- a/k8s/charts/seaweedfs/Chart.yaml +++ b/k8s/charts/seaweedfs/Chart.yaml @@ -2,4 +2,4 @@ apiVersion: v1 description: SeaweedFS name: seaweedfs appVersion: "3.65" -version: 3.65.0 +version: 3.65.1 diff --git a/k8s/charts/seaweedfs/templates/filler-ingress.yaml b/k8s/charts/seaweedfs/templates/filer-ingress.yaml similarity index 93% rename from k8s/charts/seaweedfs/templates/filler-ingress.yaml rename to k8s/charts/seaweedfs/templates/filer-ingress.yaml index e5cc9a275..7a7c98860 100644 --- a/k8s/charts/seaweedfs/templates/filler-ingress.yaml +++ b/k8s/charts/seaweedfs/templates/filer-ingress.yaml @@ -11,9 +11,9 @@ kind: Ingress metadata: name: ingress-{{ template "seaweedfs.name" . }}-filer namespace: {{ .Release.Namespace }} - {{- if .Values.filer.ingress.annotations }} + {{- with .Values.filer.ingress.annotations }} annotations: - {{ tpl .Values.filer.ingress.annotations . | nindent 4 | trim }} + {{- toYaml . | nindent 4 }} {{- end }} labels: app.kubernetes.io/name: {{ template "seaweedfs.name" . }} diff --git a/k8s/charts/seaweedfs/templates/master-ingress.yaml b/k8s/charts/seaweedfs/templates/master-ingress.yaml index 7bac95809..62d7f7a50 100644 --- a/k8s/charts/seaweedfs/templates/master-ingress.yaml +++ b/k8s/charts/seaweedfs/templates/master-ingress.yaml @@ -11,9 +11,9 @@ kind: Ingress metadata: name: ingress-{{ template "seaweedfs.name" . }}-master namespace: {{ .Release.Namespace }} - {{- if .Values.master.ingress.annotations }} + {{- with .Values.master.ingress.annotations }} annotations: - {{ tpl .Values.master.ingress.annotations . | nindent 4 | trim }} + {{- toYaml . | nindent 4 }} {{- end }} labels: app.kubernetes.io/name: {{ template "seaweedfs.name" . }} diff --git a/k8s/charts/seaweedfs/templates/s3-ingress.yaml b/k8s/charts/seaweedfs/templates/s3-ingress.yaml index 08d58f662..7b279793b 100644 --- a/k8s/charts/seaweedfs/templates/s3-ingress.yaml +++ b/k8s/charts/seaweedfs/templates/s3-ingress.yaml @@ -10,9 +10,9 @@ kind: Ingress metadata: name: ingress-{{ template "seaweedfs.name" . }}-s3 namespace: {{ .Release.Namespace }} - {{- if .Values.s3.ingress.annotations }} + {{- with .Values.s3.ingress.annotations }} annotations: - {{- tpl .Values.s3.ingress.annotations . | nindent 4 }} + {{- toYaml . | nindent 4 }} {{- end }} labels: app.kubernetes.io/name: {{ template "seaweedfs.name" . }} diff --git a/k8s/charts/seaweedfs/values.yaml b/k8s/charts/seaweedfs/values.yaml index 26009a28b..c382371ca 100644 --- a/k8s/charts/seaweedfs/values.yaml +++ b/k8s/charts/seaweedfs/values.yaml @@ -171,7 +171,7 @@ master: className: "nginx" # host: false for "*" hostname host: "master.seaweedfs.local" - annotations: | + annotations: nginx.ingress.kubernetes.io/auth-type: "basic" nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret" nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Master' @@ -540,7 +540,7 @@ filer: className: "nginx" # host: false for "*" hostname host: "seaweedfs.cluster.local" - annotations: | + annotations: nginx.ingress.kubernetes.io/backend-protocol: GRPC nginx.ingress.kubernetes.io/auth-type: "basic" nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret" @@ -746,7 +746,7 @@ s3: # host: false for "*" hostname host: "seaweedfs.cluster.local" # additional ingress annotations for the s3 endpoint - annotations: "" + annotations: {} tls: [] certificates: diff --git a/weed/filer/filer_delete_entry.go b/weed/filer/filer_delete_entry.go index 4107886e8..66b7290ce 100644 --- a/weed/filer/filer_delete_entry.go +++ b/weed/filer/filer_delete_entry.go @@ -38,7 +38,7 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR return nil }) if err != nil { - glog.V(0).Infof("delete directory %s: %v", p, err) + glog.V(2).Infof("delete directory %s: %v", p, err) return fmt.Errorf("delete directory %s: %v", p, err) } } @@ -76,7 +76,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry } if lastFileName == "" && !isRecursive && len(entries) > 0 { // only for first iteration in the loop - glog.V(0).Infof("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name()) + glog.V(2).Infof("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name()) return fmt.Errorf("%s: %s", MsgFailDelNonEmptyFolder, entry.FullPath) } diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go index 04e1e00a4..151bdaca5 100644 --- a/weed/s3api/s3api_bucket_handlers.go +++ b/weed/s3api/s3api_bucket_handlers.go @@ -1,6 +1,7 @@ package s3api import ( + "bytes" "context" "encoding/xml" "errors" @@ -10,6 +11,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/util" "math" "net/http" + "strings" "time" "github.com/seaweedfs/seaweedfs/weed/filer" @@ -325,38 +327,155 @@ func (s3a *S3ApiServer) GetBucketLifecycleConfigurationHandler(w http.ResponseWr s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchLifecycleConfiguration) return } + response := Lifecycle{} - for prefix, internalTtl := range ttls { + for locationPrefix, internalTtl := range ttls { ttl, _ := needle.ReadTTL(internalTtl) days := int(ttl.Minutes() / 60 / 24) if days == 0 { continue } + prefix, found := strings.CutPrefix(locationPrefix, fmt.Sprintf("%s/%s/", s3a.option.BucketsPath, bucket)) + if !found { + continue + } response.Rules = append(response.Rules, Rule{ - Status: Enabled, Filter: Filter{ - Prefix: Prefix{string: prefix, set: true}, - set: true, - }, + ID: prefix, + Status: Enabled, + Prefix: Prefix{val: prefix, set: true}, Expiration: Expiration{Days: days, set: true}, }) } + writeSuccessResponseXML(w, r, response) } // PutBucketLifecycleConfigurationHandler Put Bucket Lifecycle configuration // https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html func (s3a *S3ApiServer) PutBucketLifecycleConfigurationHandler(w http.ResponseWriter, r *http.Request) { + // collect parameters + bucket, _ := s3_constants.GetBucketAndObject(r) + glog.V(3).Infof("PutBucketLifecycleConfigurationHandler %s", bucket) - s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented) + if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { + s3err.WriteErrorResponse(w, r, err) + return + } + + lifeCycleConfig := Lifecycle{} + if err := xmlDecoder(r.Body, &lifeCycleConfig, r.ContentLength); err != nil { + glog.Warningf("PutBucketLifecycleConfigurationHandler xml decode: %s", err) + s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML) + return + } + + fc, err := filer.ReadFilerConf(s3a.option.Filer, s3a.option.GrpcDialOption, nil) + if err != nil { + glog.Errorf("PutBucketLifecycleConfigurationHandler read filer config: %s", err) + s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) + return + } + collectionName := s3a.getCollectionName(bucket) + collectionTtls := fc.GetCollectionTtls(collectionName) + changed := false + + for _, rule := range lifeCycleConfig.Rules { + if rule.Status != Enabled { + continue + } + var rulePrefix string + switch { + case rule.Filter.Prefix.set: + rulePrefix = rule.Filter.Prefix.val + case rule.Prefix.set: + rulePrefix = rule.Prefix.val + case !rule.Expiration.Date.IsZero() || rule.Transition.Days > 0 || !rule.Transition.Date.IsZero(): + s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented) + return + } + + if rule.Expiration.Days == 0 { + continue + } + + locConf := &filer_pb.FilerConf_PathConf{ + LocationPrefix: fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, bucket, rulePrefix), + Collection: collectionName, + Ttl: fmt.Sprintf("%dd", rule.Expiration.Days), + } + if ttl, ok := collectionTtls[locConf.LocationPrefix]; ok && ttl == locConf.Ttl { + continue + } + if err := fc.AddLocationConf(locConf); err != nil { + glog.Errorf("PutBucketLifecycleConfigurationHandler add location config: %s", err) + s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) + return + } + changed = true + } + + if changed { + var buf bytes.Buffer + if err := fc.ToText(&buf); err != nil { + glog.Errorf("PutBucketLifecycleConfigurationHandler save config to text: %s", err) + s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) + } + if err := s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { + return filer.SaveInsideFiler(client, filer.DirectoryEtcSeaweedFS, filer.FilerConfName, buf.Bytes()) + }); err != nil { + glog.Errorf("PutBucketLifecycleConfigurationHandler save config inside filer: %s", err) + s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) + return + } + } + writeSuccessResponseEmpty(w, r) } // DeleteBucketLifecycleHandler Delete Bucket Lifecycle // https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html func (s3a *S3ApiServer) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) { + // collect parameters + bucket, _ := s3_constants.GetBucketAndObject(r) + glog.V(3).Infof("DeleteBucketLifecycleHandler %s", bucket) - s3err.WriteEmptyResponse(w, r, http.StatusNoContent) + if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { + s3err.WriteErrorResponse(w, r, err) + return + } + fc, err := filer.ReadFilerConf(s3a.option.Filer, s3a.option.GrpcDialOption, nil) + if err != nil { + glog.Errorf("DeleteBucketLifecycleHandler read filer config: %s", err) + s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) + return + } + collectionTtls := fc.GetCollectionTtls(s3a.getCollectionName(bucket)) + changed := false + for prefix, ttl := range collectionTtls { + bucketPrefix := fmt.Sprintf("%s/%s/", s3a.option.BucketsPath, bucket) + if strings.HasPrefix(prefix, bucketPrefix) && strings.HasSuffix(ttl, "d") { + fc.DeleteLocationConf(prefix) + changed = true + } + } + + if changed { + var buf bytes.Buffer + if err := fc.ToText(&buf); err != nil { + glog.Errorf("DeleteBucketLifecycleHandler save config to text: %s", err) + s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) + } + if err := s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { + return filer.SaveInsideFiler(client, filer.DirectoryEtcSeaweedFS, filer.FilerConfName, buf.Bytes()) + }); err != nil { + glog.Errorf("DeleteBucketLifecycleHandler save config inside filer: %s", err) + s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) + return + } + } + + s3err.WriteEmptyResponse(w, r, http.StatusNoContent) } // GetBucketLocationHandler Get bucket location diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go index dd3a16bf8..1d58af8bc 100644 --- a/weed/s3api/s3api_object_handlers.go +++ b/weed/s3api/s3api_object_handlers.go @@ -2,9 +2,6 @@ package s3api import ( "bytes" - "crypto/md5" - "encoding/json" - "encoding/xml" "fmt" "io" "net/http" @@ -13,25 +10,13 @@ import ( "time" "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/util/mem" - "golang.org/x/exp/slices" - - "github.com/pquerna/cachecontrol/cacheobject" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" + "github.com/seaweedfs/seaweedfs/weed/util/mem" "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - weed_server "github.com/seaweedfs/seaweedfs/weed/server" "github.com/seaweedfs/seaweedfs/weed/util" ) -const ( - deleteMultipleObjectsLimit = 1000 -) - func mimeDetect(r *http.Request, dataReader io.Reader) io.ReadCloser { mimeBuffer := make([]byte, 512) size, _ := dataReader.Read(mimeBuffer) @@ -42,92 +27,6 @@ func mimeDetect(r *http.Request, dataReader io.Reader) io.ReadCloser { return io.NopCloser(dataReader) } -func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request) { - - // http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html - - bucket, object := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("PutObjectHandler %s %s", bucket, object) - - _, err := validateContentMd5(r.Header) - if err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidDigest) - return - } - - if r.Header.Get("Cache-Control") != "" { - if _, err = cacheobject.ParseRequestCacheControl(r.Header.Get("Cache-Control")); err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidDigest) - return - } - } - - if r.Header.Get("Expires") != "" { - if _, err = time.Parse(http.TimeFormat, r.Header.Get("Expires")); err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrMalformedDate) - return - } - } - - dataReader := r.Body - rAuthType := getRequestAuthType(r) - if s3a.iam.isEnabled() { - var s3ErrCode s3err.ErrorCode - switch rAuthType { - case authTypeStreamingSigned: - dataReader, s3ErrCode = s3a.iam.newSignV4ChunkedReader(r) - case authTypeSignedV2, authTypePresignedV2: - _, s3ErrCode = s3a.iam.isReqAuthenticatedV2(r) - case authTypePresigned, authTypeSigned: - _, s3ErrCode = s3a.iam.reqSignatureV4Verify(r) - } - if s3ErrCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, s3ErrCode) - return - } - } else { - if authTypeStreamingSigned == rAuthType { - s3err.WriteErrorResponse(w, r, s3err.ErrAuthNotSetup) - return - } - } - defer dataReader.Close() - - objectContentType := r.Header.Get("Content-Type") - if strings.HasSuffix(object, "/") && r.ContentLength <= 1024 { - if err := s3a.mkdir( - s3a.option.BucketsPath, bucket+strings.TrimSuffix(object, "/"), - func(entry *filer_pb.Entry) { - if objectContentType == "" { - objectContentType = s3_constants.FolderMimeType - } - if r.ContentLength > 0 { - entry.Content, _ = io.ReadAll(r.Body) - } - entry.Attributes.Mime = objectContentType - }); err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - } else { - uploadUrl := s3a.toFilerUrl(bucket, object) - if objectContentType == "" { - dataReader = mimeDetect(r, dataReader) - } - - etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader, "", bucket) - - if errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } - - setEtag(w, etag) - } - - writeSuccessResponseEmpty(w, r) -} - func urlEscapeObject(object string) string { t := urlPathEscape(removeDuplicateSlashes(object)) if strings.HasPrefix(t, "/") { @@ -196,162 +95,6 @@ func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request s3a.proxyToFiler(w, r, destUrl, false, passThroughResponse) } -func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { - - bucket, object := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("DeleteObjectHandler %s %s", bucket, object) - - destUrl := s3a.toFilerUrl(bucket, object) - - s3a.proxyToFiler(w, r, destUrl, true, func(proxyResponse *http.Response, w http.ResponseWriter) (statusCode int) { - statusCode = http.StatusNoContent - for k, v := range proxyResponse.Header { - w.Header()[k] = v - } - w.WriteHeader(statusCode) - return statusCode - }) -} - -// / ObjectIdentifier carries key name for the object to delete. -type ObjectIdentifier struct { - ObjectName string `xml:"Key"` -} - -// DeleteObjectsRequest - xml carrying the object key names which needs to be deleted. -type DeleteObjectsRequest struct { - // Element to enable quiet mode for the request - Quiet bool - // List of objects to be deleted - Objects []ObjectIdentifier `xml:"Object"` -} - -// DeleteError structure. -type DeleteError struct { - Code string - Message string - Key string -} - -// DeleteObjectsResponse container for multiple object deletes. -type DeleteObjectsResponse struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteResult" json:"-"` - - // Collection of all deleted objects - DeletedObjects []ObjectIdentifier `xml:"Deleted,omitempty"` - - // Collection of errors deleting certain objects. - Errors []DeleteError `xml:"Error,omitempty"` -} - -// DeleteMultipleObjectsHandler - Delete multiple objects -func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) { - - bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("DeleteMultipleObjectsHandler %s", bucket) - - deleteXMLBytes, err := io.ReadAll(r.Body) - if err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - deleteObjects := &DeleteObjectsRequest{} - if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML) - return - } - - if len(deleteObjects.Objects) > deleteMultipleObjectsLimit { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxDeleteObjects) - return - } - - var deletedObjects []ObjectIdentifier - var deleteErrors []DeleteError - var auditLog *s3err.AccessLog - - directoriesWithDeletion := make(map[string]int) - - if s3err.Logger != nil { - auditLog = s3err.GetAccessLog(r, http.StatusNoContent, s3err.ErrNone) - } - s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - - // delete file entries - for _, object := range deleteObjects.Objects { - if object.ObjectName == "" { - continue - } - lastSeparator := strings.LastIndex(object.ObjectName, "/") - parentDirectoryPath, entryName, isDeleteData, isRecursive := "", object.ObjectName, true, false - if lastSeparator > 0 && lastSeparator+1 < len(object.ObjectName) { - entryName = object.ObjectName[lastSeparator+1:] - parentDirectoryPath = "/" + object.ObjectName[:lastSeparator] - } - parentDirectoryPath = fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, parentDirectoryPath) - - err := doDeleteEntry(client, parentDirectoryPath, entryName, isDeleteData, isRecursive) - if err == nil { - directoriesWithDeletion[parentDirectoryPath]++ - deletedObjects = append(deletedObjects, object) - } else if strings.Contains(err.Error(), filer.MsgFailDelNonEmptyFolder) { - deletedObjects = append(deletedObjects, object) - } else { - delete(directoriesWithDeletion, parentDirectoryPath) - deleteErrors = append(deleteErrors, DeleteError{ - Code: "", - Message: err.Error(), - Key: object.ObjectName, - }) - } - if auditLog != nil { - auditLog.Key = entryName - s3err.PostAccessLog(*auditLog) - } - } - - // purge empty folders, only checking folders with deletions - for len(directoriesWithDeletion) > 0 { - directoriesWithDeletion = s3a.doDeleteEmptyDirectories(client, directoriesWithDeletion) - } - - return nil - }) - - deleteResp := DeleteObjectsResponse{} - if !deleteObjects.Quiet { - deleteResp.DeletedObjects = deletedObjects - } - deleteResp.Errors = deleteErrors - - writeSuccessResponseXML(w, r, deleteResp) - -} - -func (s3a *S3ApiServer) doDeleteEmptyDirectories(client filer_pb.SeaweedFilerClient, directoriesWithDeletion map[string]int) (newDirectoriesWithDeletion map[string]int) { - var allDirs []string - for dir := range directoriesWithDeletion { - allDirs = append(allDirs, dir) - } - slices.SortFunc(allDirs, func(a, b string) int { - return len(b) - len(a) - }) - newDirectoriesWithDeletion = make(map[string]int) - for _, dir := range allDirs { - parentDir, dirName := util.FullPath(dir).DirAndName() - if parentDir == s3a.option.BucketsPath { - continue - } - if err := doDeleteEntry(client, parentDir, dirName, false, false); err != nil { - glog.V(4).Infof("directory %s has %d deletion but still not empty: %v", dir, directoriesWithDeletion[dir], err) - } else { - newDirectoriesWithDeletion[parentDir]++ - } - } - return -} - func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, destUrl string, isWrite bool, responseFn func(proxyResponse *http.Response, w http.ResponseWriter) (statusCode int)) { glog.V(3).Infof("s3 proxying %s to %s", r.Method, destUrl) @@ -477,104 +220,3 @@ func passThroughResponse(proxyResponse *http.Response, w http.ResponseWriter) (s } return statusCode } - -func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.Reader, destination string, bucket string) (etag string, code s3err.ErrorCode) { - - hash := md5.New() - var body = io.TeeReader(dataReader, hash) - - proxyReq, err := http.NewRequest("PUT", uploadUrl, body) - - if err != nil { - glog.Errorf("NewRequest %s: %v", uploadUrl, err) - return "", s3err.ErrInternalError - } - - proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr) - if destination != "" { - proxyReq.Header.Set(s3_constants.SeaweedStorageDestinationHeader, destination) - } - - if s3a.option.FilerGroup != "" { - query := proxyReq.URL.Query() - query.Add("collection", s3a.getCollectionName(bucket)) - proxyReq.URL.RawQuery = query.Encode() - } - - for header, values := range r.Header { - for _, value := range values { - proxyReq.Header.Add(header, value) - } - } - // ensure that the Authorization header is overriding any previous - // Authorization header which might be already present in proxyReq - s3a.maybeAddFilerJwtAuthorization(proxyReq, true) - resp, postErr := s3a.client.Do(proxyReq) - - if postErr != nil { - glog.Errorf("post to filer: %v", postErr) - return "", s3err.ErrInternalError - } - defer resp.Body.Close() - - etag = fmt.Sprintf("%x", hash.Sum(nil)) - - resp_body, ra_err := io.ReadAll(resp.Body) - if ra_err != nil { - glog.Errorf("upload to filer response read %d: %v", resp.StatusCode, ra_err) - return etag, s3err.ErrInternalError - } - var ret weed_server.FilerPostResult - unmarshal_err := json.Unmarshal(resp_body, &ret) - if unmarshal_err != nil { - glog.Errorf("failing to read upload to %s : %v", uploadUrl, string(resp_body)) - return "", s3err.ErrInternalError - } - if ret.Error != "" { - glog.Errorf("upload to filer error: %v", ret.Error) - return "", filerErrorToS3Error(ret.Error) - } - - return etag, s3err.ErrNone -} - -func setEtag(w http.ResponseWriter, etag string) { - if etag != "" { - if strings.HasPrefix(etag, "\"") { - w.Header()["ETag"] = []string{etag} - } else { - w.Header()["ETag"] = []string{"\"" + etag + "\""} - } - } -} - -func filerErrorToS3Error(errString string) s3err.ErrorCode { - switch { - case strings.HasPrefix(errString, "existing ") && strings.HasSuffix(errString, "is a directory"): - return s3err.ErrExistingObjectIsDirectory - case strings.HasSuffix(errString, "is a file"): - return s3err.ErrExistingObjectIsFile - default: - return s3err.ErrInternalError - } -} - -func (s3a *S3ApiServer) maybeAddFilerJwtAuthorization(r *http.Request, isWrite bool) { - encodedJwt := s3a.maybeGetFilerJwtAuthorizationToken(isWrite) - - if encodedJwt == "" { - return - } - - r.Header.Set("Authorization", "BEARER "+string(encodedJwt)) -} - -func (s3a *S3ApiServer) maybeGetFilerJwtAuthorizationToken(isWrite bool) string { - var encodedJwt security.EncodedJwt - if isWrite { - encodedJwt = security.GenJwtForFilerServer(s3a.filerGuard.SigningKey, s3a.filerGuard.ExpiresAfterSec) - } else { - encodedJwt = security.GenJwtForFilerServer(s3a.filerGuard.ReadSigningKey, s3a.filerGuard.ReadExpiresAfterSec) - } - return string(encodedJwt) -} diff --git a/weed/s3api/s3api_object_copy_handlers.go b/weed/s3api/s3api_object_handlers_copy.go similarity index 100% rename from weed/s3api/s3api_object_copy_handlers.go rename to weed/s3api/s3api_object_handlers_copy.go diff --git a/weed/s3api/s3api_object_copy_handlers_test.go b/weed/s3api/s3api_object_handlers_copy_test.go similarity index 100% rename from weed/s3api/s3api_object_copy_handlers_test.go rename to weed/s3api/s3api_object_handlers_copy_test.go diff --git a/weed/s3api/s3api_object_handlers_delete.go b/weed/s3api/s3api_object_handlers_delete.go new file mode 100644 index 000000000..197522d0c --- /dev/null +++ b/weed/s3api/s3api_object_handlers_delete.go @@ -0,0 +1,199 @@ +package s3api + +import ( + "encoding/xml" + "fmt" + "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" + "golang.org/x/exp/slices" + "io" + "net/http" + "strings" + + "github.com/seaweedfs/seaweedfs/weed/filer" + + "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" + + "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" + "github.com/seaweedfs/seaweedfs/weed/util" +) + +const ( + deleteMultipleObjectsLimit = 1000 +) + +func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { + + bucket, object := s3_constants.GetBucketAndObject(r) + glog.V(3).Infof("DeleteObjectHandler %s %s", bucket, object) + + object = urlPathEscape(removeDuplicateSlashes(object)) + + s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { + + err := doDeleteEntry(client, s3a.option.BucketsPath+"/"+bucket, object, true, false) + if err != nil { + // skip deletion error, usually the file is not found + return nil + } + + if s3a.option.AllowEmptyFolder { + return nil + } + + directoriesWithDeletion := make(map[string]int) + lastSeparator := strings.LastIndex(object, "/") + if lastSeparator > 0 { + parentDirectoryPath := fmt.Sprintf("%s/%s", s3a.option.BucketsPath, bucket) + directoriesWithDeletion[parentDirectoryPath]++ + + // purge empty folders, only checking folders with deletions + for len(directoriesWithDeletion) > 0 { + directoriesWithDeletion = s3a.doDeleteEmptyDirectories(client, directoriesWithDeletion) + } + } + + return nil + }) + + w.WriteHeader(http.StatusNoContent) +} + +// / ObjectIdentifier carries key name for the object to delete. +type ObjectIdentifier struct { + ObjectName string `xml:"Key"` +} + +// DeleteObjectsRequest - xml carrying the object key names which needs to be deleted. +type DeleteObjectsRequest struct { + // Element to enable quiet mode for the request + Quiet bool + // List of objects to be deleted + Objects []ObjectIdentifier `xml:"Object"` +} + +// DeleteError structure. +type DeleteError struct { + Code string + Message string + Key string +} + +// DeleteObjectsResponse container for multiple object deletes. +type DeleteObjectsResponse struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteResult" json:"-"` + + // Collection of all deleted objects + DeletedObjects []ObjectIdentifier `xml:"Deleted,omitempty"` + + // Collection of errors deleting certain objects. + Errors []DeleteError `xml:"Error,omitempty"` +} + +// DeleteMultipleObjectsHandler - Delete multiple objects +func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) { + + bucket, _ := s3_constants.GetBucketAndObject(r) + glog.V(3).Infof("DeleteMultipleObjectsHandler %s", bucket) + + deleteXMLBytes, err := io.ReadAll(r.Body) + if err != nil { + s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) + return + } + + deleteObjects := &DeleteObjectsRequest{} + if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil { + s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML) + return + } + + if len(deleteObjects.Objects) > deleteMultipleObjectsLimit { + s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxDeleteObjects) + return + } + + var deletedObjects []ObjectIdentifier + var deleteErrors []DeleteError + var auditLog *s3err.AccessLog + + directoriesWithDeletion := make(map[string]int) + + if s3err.Logger != nil { + auditLog = s3err.GetAccessLog(r, http.StatusNoContent, s3err.ErrNone) + } + s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { + + // delete file entries + for _, object := range deleteObjects.Objects { + if object.ObjectName == "" { + continue + } + lastSeparator := strings.LastIndex(object.ObjectName, "/") + parentDirectoryPath, entryName, isDeleteData, isRecursive := "", object.ObjectName, true, false + if lastSeparator > 0 && lastSeparator+1 < len(object.ObjectName) { + entryName = object.ObjectName[lastSeparator+1:] + parentDirectoryPath = "/" + object.ObjectName[:lastSeparator] + } + parentDirectoryPath = fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, parentDirectoryPath) + + err := doDeleteEntry(client, parentDirectoryPath, entryName, isDeleteData, isRecursive) + if err == nil { + directoriesWithDeletion[parentDirectoryPath]++ + deletedObjects = append(deletedObjects, object) + } else if strings.Contains(err.Error(), filer.MsgFailDelNonEmptyFolder) { + deletedObjects = append(deletedObjects, object) + } else { + delete(directoriesWithDeletion, parentDirectoryPath) + deleteErrors = append(deleteErrors, DeleteError{ + Code: "", + Message: err.Error(), + Key: object.ObjectName, + }) + } + if auditLog != nil { + auditLog.Key = entryName + s3err.PostAccessLog(*auditLog) + } + } + + // purge empty folders, only checking folders with deletions + for len(directoriesWithDeletion) > 0 { + directoriesWithDeletion = s3a.doDeleteEmptyDirectories(client, directoriesWithDeletion) + } + + return nil + }) + + deleteResp := DeleteObjectsResponse{} + if !deleteObjects.Quiet { + deleteResp.DeletedObjects = deletedObjects + } + deleteResp.Errors = deleteErrors + + writeSuccessResponseXML(w, r, deleteResp) + +} + +func (s3a *S3ApiServer) doDeleteEmptyDirectories(client filer_pb.SeaweedFilerClient, directoriesWithDeletion map[string]int) (newDirectoriesWithDeletion map[string]int) { + var allDirs []string + for dir := range directoriesWithDeletion { + allDirs = append(allDirs, dir) + } + slices.SortFunc(allDirs, func(a, b string) int { + return len(b) - len(a) + }) + newDirectoriesWithDeletion = make(map[string]int) + for _, dir := range allDirs { + parentDir, dirName := util.FullPath(dir).DirAndName() + if parentDir == s3a.option.BucketsPath { + continue + } + if err := doDeleteEntry(client, parentDir, dirName, false, false); err != nil { + glog.V(4).Infof("directory %s has %d deletion but still not empty: %v", dir, directoriesWithDeletion[dir], err) + } else { + newDirectoriesWithDeletion[parentDir]++ + } + } + return +} diff --git a/weed/s3api/s3api_objects_list_handlers.go b/weed/s3api/s3api_object_handlers_list.go similarity index 99% rename from weed/s3api/s3api_objects_list_handlers.go rename to weed/s3api/s3api_object_handlers_list.go index b00e4630d..38e7f6fef 100644 --- a/weed/s3api/s3api_objects_list_handlers.go +++ b/weed/s3api/s3api_object_handlers_list.go @@ -415,9 +415,9 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d } else { var isEmpty bool if !s3a.option.AllowEmptyFolder && entry.IsOlderDir() { - if isEmpty, err = s3a.ensureDirectoryAllEmpty(client, dir, entry.Name); err != nil { - glog.Errorf("check empty folder %s: %v", dir, err) - } + //if isEmpty, err = s3a.ensureDirectoryAllEmpty(client, dir, entry.Name); err != nil { + // glog.Errorf("check empty folder %s: %v", dir, err) + //} } if !isEmpty { eachEntryFn(dir, entry) diff --git a/weed/s3api/s3api_objects_list_handlers_test.go b/weed/s3api/s3api_object_handlers_list_test.go similarity index 100% rename from weed/s3api/s3api_objects_list_handlers_test.go rename to weed/s3api/s3api_object_handlers_list_test.go diff --git a/weed/s3api/s3api_object_multipart_handlers.go b/weed/s3api/s3api_object_handlers_multipart.go similarity index 100% rename from weed/s3api/s3api_object_multipart_handlers.go rename to weed/s3api/s3api_object_handlers_multipart.go diff --git a/weed/s3api/s3api_object_handlers_put.go b/weed/s3api/s3api_object_handlers_put.go new file mode 100644 index 000000000..49d385afc --- /dev/null +++ b/weed/s3api/s3api_object_handlers_put.go @@ -0,0 +1,207 @@ +package s3api + +import ( + "crypto/md5" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + "time" + + "github.com/pquerna/cachecontrol/cacheobject" + "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" + "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" + "github.com/seaweedfs/seaweedfs/weed/security" + + "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" + weed_server "github.com/seaweedfs/seaweedfs/weed/server" +) + +func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request) { + + // http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html + + bucket, object := s3_constants.GetBucketAndObject(r) + glog.V(3).Infof("PutObjectHandler %s %s", bucket, object) + + _, err := validateContentMd5(r.Header) + if err != nil { + s3err.WriteErrorResponse(w, r, s3err.ErrInvalidDigest) + return + } + + if r.Header.Get("Cache-Control") != "" { + if _, err = cacheobject.ParseRequestCacheControl(r.Header.Get("Cache-Control")); err != nil { + s3err.WriteErrorResponse(w, r, s3err.ErrInvalidDigest) + return + } + } + + if r.Header.Get("Expires") != "" { + if _, err = time.Parse(http.TimeFormat, r.Header.Get("Expires")); err != nil { + s3err.WriteErrorResponse(w, r, s3err.ErrMalformedDate) + return + } + } + + dataReader := r.Body + rAuthType := getRequestAuthType(r) + if s3a.iam.isEnabled() { + var s3ErrCode s3err.ErrorCode + switch rAuthType { + case authTypeStreamingSigned: + dataReader, s3ErrCode = s3a.iam.newSignV4ChunkedReader(r) + case authTypeSignedV2, authTypePresignedV2: + _, s3ErrCode = s3a.iam.isReqAuthenticatedV2(r) + case authTypePresigned, authTypeSigned: + _, s3ErrCode = s3a.iam.reqSignatureV4Verify(r) + } + if s3ErrCode != s3err.ErrNone { + s3err.WriteErrorResponse(w, r, s3ErrCode) + return + } + } else { + if authTypeStreamingSigned == rAuthType { + s3err.WriteErrorResponse(w, r, s3err.ErrAuthNotSetup) + return + } + } + defer dataReader.Close() + + objectContentType := r.Header.Get("Content-Type") + if strings.HasSuffix(object, "/") && r.ContentLength <= 1024 { + if err := s3a.mkdir( + s3a.option.BucketsPath, bucket+strings.TrimSuffix(object, "/"), + func(entry *filer_pb.Entry) { + if objectContentType == "" { + objectContentType = s3_constants.FolderMimeType + } + if r.ContentLength > 0 { + entry.Content, _ = io.ReadAll(r.Body) + } + entry.Attributes.Mime = objectContentType + }); err != nil { + s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) + return + } + } else { + uploadUrl := s3a.toFilerUrl(bucket, object) + if objectContentType == "" { + dataReader = mimeDetect(r, dataReader) + } + + etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader, "", bucket) + + if errCode != s3err.ErrNone { + s3err.WriteErrorResponse(w, r, errCode) + return + } + + setEtag(w, etag) + } + + writeSuccessResponseEmpty(w, r) +} + +func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.Reader, destination string, bucket string) (etag string, code s3err.ErrorCode) { + + hash := md5.New() + var body = io.TeeReader(dataReader, hash) + + proxyReq, err := http.NewRequest("PUT", uploadUrl, body) + + if err != nil { + glog.Errorf("NewRequest %s: %v", uploadUrl, err) + return "", s3err.ErrInternalError + } + + proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr) + if destination != "" { + proxyReq.Header.Set(s3_constants.SeaweedStorageDestinationHeader, destination) + } + + if s3a.option.FilerGroup != "" { + query := proxyReq.URL.Query() + query.Add("collection", s3a.getCollectionName(bucket)) + proxyReq.URL.RawQuery = query.Encode() + } + + for header, values := range r.Header { + for _, value := range values { + proxyReq.Header.Add(header, value) + } + } + // ensure that the Authorization header is overriding any previous + // Authorization header which might be already present in proxyReq + s3a.maybeAddFilerJwtAuthorization(proxyReq, true) + resp, postErr := s3a.client.Do(proxyReq) + + if postErr != nil { + glog.Errorf("post to filer: %v", postErr) + return "", s3err.ErrInternalError + } + defer resp.Body.Close() + + etag = fmt.Sprintf("%x", hash.Sum(nil)) + + resp_body, ra_err := io.ReadAll(resp.Body) + if ra_err != nil { + glog.Errorf("upload to filer response read %d: %v", resp.StatusCode, ra_err) + return etag, s3err.ErrInternalError + } + var ret weed_server.FilerPostResult + unmarshal_err := json.Unmarshal(resp_body, &ret) + if unmarshal_err != nil { + glog.Errorf("failing to read upload to %s : %v", uploadUrl, string(resp_body)) + return "", s3err.ErrInternalError + } + if ret.Error != "" { + glog.Errorf("upload to filer error: %v", ret.Error) + return "", filerErrorToS3Error(ret.Error) + } + + return etag, s3err.ErrNone +} + +func setEtag(w http.ResponseWriter, etag string) { + if etag != "" { + if strings.HasPrefix(etag, "\"") { + w.Header()["ETag"] = []string{etag} + } else { + w.Header()["ETag"] = []string{"\"" + etag + "\""} + } + } +} + +func filerErrorToS3Error(errString string) s3err.ErrorCode { + switch { + case strings.HasPrefix(errString, "existing ") && strings.HasSuffix(errString, "is a directory"): + return s3err.ErrExistingObjectIsDirectory + case strings.HasSuffix(errString, "is a file"): + return s3err.ErrExistingObjectIsFile + default: + return s3err.ErrInternalError + } +} + +func (s3a *S3ApiServer) maybeAddFilerJwtAuthorization(r *http.Request, isWrite bool) { + encodedJwt := s3a.maybeGetFilerJwtAuthorizationToken(isWrite) + + if encodedJwt == "" { + return + } + + r.Header.Set("Authorization", "BEARER "+string(encodedJwt)) +} + +func (s3a *S3ApiServer) maybeGetFilerJwtAuthorizationToken(isWrite bool) string { + var encodedJwt security.EncodedJwt + if isWrite { + encodedJwt = security.GenJwtForFilerServer(s3a.filerGuard.SigningKey, s3a.filerGuard.ExpiresAfterSec) + } else { + encodedJwt = security.GenJwtForFilerServer(s3a.filerGuard.ReadSigningKey, s3a.filerGuard.ReadExpiresAfterSec) + } + return string(encodedJwt) +} diff --git a/weed/s3api/s3api_object_skip_handlers.go b/weed/s3api/s3api_object_handlers_skip.go similarity index 100% rename from weed/s3api/s3api_object_skip_handlers.go rename to weed/s3api/s3api_object_handlers_skip.go diff --git a/weed/s3api/s3api_object_tagging_handlers.go b/weed/s3api/s3api_object_handlers_tagging.go similarity index 100% rename from weed/s3api/s3api_object_tagging_handlers.go rename to weed/s3api/s3api_object_handlers_tagging.go diff --git a/weed/s3api/s3api_policy.go b/weed/s3api/s3api_policy.go index 6e2c8cfa2..dab2e3f02 100644 --- a/weed/s3api/s3api_policy.go +++ b/weed/s3api/s3api_policy.go @@ -47,8 +47,14 @@ type Filter struct { // Prefix holds the prefix xml tag in and type Prefix struct { - string - set bool + XMLName xml.Name `xml:"Prefix"` + set bool + + val string +} + +func (p Prefix) String() string { + return p.val } // MarshalXML encodes Prefix field into an XML form. @@ -56,11 +62,21 @@ func (p Prefix) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error if !p.set { return nil } - return e.EncodeElement(p.string, startElement) + return e.EncodeElement(p.val, startElement) +} + +func (p *Prefix) UnmarshalXML(d *xml.Decoder, startElement xml.StartElement) error { + prefix := "" + _ = d.DecodeElement(&prefix, &startElement) + *p = Prefix{set: true, val: prefix} + return nil } // MarshalXML encodes Filter field into an XML form. func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if !f.set { + return nil + } if err := e.EncodeToken(start); err != nil { return err } diff --git a/weed/storage/needle/needle_parse_upload.go b/weed/storage/needle/needle_parse_upload.go index e00729811..b512c16a2 100644 --- a/weed/storage/needle/needle_parse_upload.go +++ b/weed/storage/needle/needle_parse_upload.go @@ -44,7 +44,15 @@ func ParseUpload(r *http.Request, sizeLimit int64, bytesBuffer *bytes.Buffer) (p } if r.Method == "POST" { - e = parseMultipart(r, sizeLimit, pu) + contentType := r.Header.Get("Content-Type") + + // If content-type is explicitly set, upload the file without parsing form-data + if contentType != "" && !strings.Contains(contentType, "form-data") { + e = parseRawPost(r, sizeLimit, pu) + } else { + e = parseMultipart(r, sizeLimit, pu) + } + } else { e = parsePut(r, sizeLimit, pu) } @@ -205,3 +213,65 @@ func parseMultipart(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error return } + +func parseRawPost(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error) { + + defer func() { + if e != nil && r.Body != nil { + io.Copy(io.Discard, r.Body) + r.Body.Close() + } + }() + + pu.FileName = r.Header.Get("Content-Disposition") + + if pu.FileName != "" && strings.Contains(pu.FileName, "filename=") { + parts := strings.Split(pu.FileName, "filename=") + parts = strings.Split(parts[1], "\"") + + pu.FileName = parts[1] + } else { + pu.FileName = "" + } + + if pu.FileName != "" { + pu.FileName = path.Base(pu.FileName) + } else { + pu.FileName = path.Base(r.URL.Path) + } + + var dataSize int64 + dataSize, e = pu.bytesBuffer.ReadFrom(io.LimitReader(r.Body, sizeLimit+1)) + + if e != nil { + glog.V(0).Infoln("Reading Content [ERROR]", e) + return + } + if dataSize == sizeLimit+1 { + e = fmt.Errorf("file over the limited %d bytes", sizeLimit) + return + } + pu.Data = pu.bytesBuffer.Bytes() + pu.IsChunkedFile, _ = strconv.ParseBool(r.FormValue("cm")) + + if !pu.IsChunkedFile { + + dotIndex := strings.LastIndex(pu.FileName, ".") + ext, mtype := "", "" + if dotIndex > 0 { + ext = strings.ToLower(pu.FileName[dotIndex:]) + mtype = mime.TypeByExtension(ext) + } + contentType := r.Header.Get("Content-Type") + + if contentType != "" && contentType != "application/octet-stream" && mtype != contentType { + pu.MimeType = contentType // only return mime type if not deducible + mtype = contentType + } + + } + pu.IsGzipped = r.Header.Get("Content-Encoding") == "gzip" + // pu.IsZstd = r.Header.Get("Content-Encoding") == "zstd" + + return +} diff --git a/weed/storage/volume_checking.go b/weed/storage/volume_checking.go index 4d51e081f..0c5f154e8 100644 --- a/weed/storage/volume_checking.go +++ b/weed/storage/volume_checking.go @@ -109,6 +109,9 @@ func verifyNeedleIntegrity(datFile backend.BackendStorageFile, v needle.Version, return 0, fmt.Errorf("verifyNeedleIntegrity check %s entry offset %d size %d: %v", datFile.Name(), offset, size, err) } n.AppendAtNs = util.BytesToUint64(bytes) + if n.HasTtl() { + return n.AppendAtNs, nil + } fileTailOffset := offset + needle.GetActualSize(size, v) fileSize, _, err := datFile.GetStat() if err != nil {