Browse Source
lifecycle worker: NoncurrentVersionExpiration support (#8810)
lifecycle worker: NoncurrentVersionExpiration support (#8810)
* lifecycle worker: add NoncurrentVersionExpiration support Add version-aware scanning to the rule-based execution path. When the walker encounters a .versions directory, processVersionsDirectory(): - Lists all version entries (v_<versionId>) - Sorts by version timestamp (newest first) - Walks non-current versions with ShouldExpireNoncurrentVersion() which handles both NoncurrentDays and NewerNoncurrentVersions - Extracts successor time from version IDs (both old/new format) - Skips delete markers in noncurrent version counting - Falls back to entry Mtime when version ID timestamp is unavailable Helper functions: - sortVersionsByTimestamp: insertion sort by version ID timestamp - getEntryVersionTimestamp: extracts timestamp with Mtime fallback * lifecycle worker: address review feedback for noncurrent versions - Use sentinel errLimitReached in versions directory handler - Set NoncurrentIndex on ObjectInfo for proper NewerNoncurrentVersions evaluation * lifecycle worker: fail closed on XML parse error, guard zero Mtime - Fail closed when lifecycle XML exists but fails to parse, instead of falling back to TTL which could apply broader rules - Guard Mtime > 0 before using time.Unix(mtime, 0) to avoid mapping unset Mtime to 1970, which would misorder versions and cause premature expiration * lifecycle worker: count delete markers toward NoncurrentIndex Noncurrent delete markers should count toward the NewerNoncurrentVersions retention threshold so data versions get the correct position index. Previously, skipping delete markers without incrementing the index could retain too many versions after delete/recreate cycles. * lifecycle worker: fix version ordering, error propagation, and fail-closed scope 1. Use full version ID comparison (CompareVersionIds) for sorting .versions entries, not just decoded timestamps. Two versions with the same timestamp prefix but different random suffixes were previously misordered, potentially treating the newest version as noncurrent and deleting it. 2. Propagate .versions listing failures to the caller instead of swallowing them with (nil, 0). Transient filer errors on a .versions directory now surface in the job result. 3. Narrow the fail-closed path to only malformed lifecycle XML (errMalformedLifecycleXML). Transient filer LookupEntry errors now fall back to TTL with a warning, matching the original intent of "fail closed on bad config, not on network blips." * lifecycle worker: only skip .uploads at bucket root * lifecycle worker: sort.Slice, mixed-format test, XML presence tracking - Replace manual insertion sort with sort.Slice in sortVersionsByVersionId - Add TestCompareVersionIdsMixedFormats covering old/new format ordering - Distinguish "no lifecycle XML" (nil) from "XML present but no effective rules" (non-nil empty slice) so buckets with all-disabled rules don't incorrectly fall back to filer.conf TTL expiration * lifecycle worker: guard nil Attributes, use TrimSuffix in test - Guard entry.Attributes != nil before accessing GetFileSize() and Mtime in both listExpiredObjectsByRules and processVersionsDirectory - Use strings.TrimPrefix/TrimSuffix in TestVersionsDirectoryNaming to match the production code pattern * lifecycle worker: skip TTL scan when XML present, fix test assertions - When lifecycle XML is present but has no effective rules, skip object scanning entirely instead of falling back to TTL path - Test sort output against concrete expected names instead of re-using the same comparator as the sort itself --------- Co-authored-by: Copilot <copilot@github.com>pull/4306/merge
committed by
GitHub
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 359 additions and 17 deletions
-
183weed/plugin/worker/lifecycle/execution.go
-
24weed/plugin/worker/lifecycle/rules.go
-
112weed/plugin/worker/lifecycle/version_test.go
-
57weed/s3api/s3lifecycle/version_time.go
@ -0,0 +1,112 @@ |
|||
package lifecycle |
|||
|
|||
import ( |
|||
"fmt" |
|||
"math" |
|||
"strings" |
|||
"testing" |
|||
"time" |
|||
|
|||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" |
|||
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" |
|||
"github.com/seaweedfs/seaweedfs/weed/s3api/s3lifecycle" |
|||
) |
|||
|
|||
// makeVersionId creates a new-format version ID from a timestamp.
|
|||
func makeVersionId(t time.Time) string { |
|||
inverted := math.MaxInt64 - t.UnixNano() |
|||
return fmt.Sprintf("%016x", inverted) + "0000000000000000" |
|||
} |
|||
|
|||
func TestSortVersionsByVersionId(t *testing.T) { |
|||
t1 := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC) |
|||
t2 := time.Date(2026, 2, 1, 0, 0, 0, 0, time.UTC) |
|||
t3 := time.Date(2026, 3, 1, 0, 0, 0, 0, time.UTC) |
|||
|
|||
vid1 := makeVersionId(t1) |
|||
vid2 := makeVersionId(t2) |
|||
vid3 := makeVersionId(t3) |
|||
|
|||
entries := []*filer_pb.Entry{ |
|||
{Name: "v_" + vid1}, |
|||
{Name: "v_" + vid3}, |
|||
{Name: "v_" + vid2}, |
|||
} |
|||
|
|||
sortVersionsByVersionId(entries) |
|||
|
|||
// Should be sorted newest first: t3, t2, t1.
|
|||
expected := []string{"v_" + vid3, "v_" + vid2, "v_" + vid1} |
|||
for i, want := range expected { |
|||
if entries[i].Name != want { |
|||
t.Errorf("entries[%d].Name = %s, want %s", i, entries[i].Name, want) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func TestSortVersionsByVersionId_SameTimestampDifferentSuffix(t *testing.T) { |
|||
// Two versions with the same timestamp prefix but different random suffix.
|
|||
// The sort must still produce a deterministic order.
|
|||
base := makeVersionId(time.Date(2026, 6, 1, 0, 0, 0, 0, time.UTC)) |
|||
vid1 := base[:16] + "aaaaaaaaaaaaaaaa" |
|||
vid2 := base[:16] + "bbbbbbbbbbbbbbbb" |
|||
|
|||
entries := []*filer_pb.Entry{ |
|||
{Name: "v_" + vid2}, |
|||
{Name: "v_" + vid1}, |
|||
} |
|||
|
|||
sortVersionsByVersionId(entries) |
|||
|
|||
// New format: smaller hex = newer. vid1 ("aaa...") < vid2 ("bbb...") so vid1 is newer.
|
|||
if strings.TrimPrefix(entries[0].Name, "v_") != vid1 { |
|||
t.Errorf("expected vid1 (newer) first, got %s", entries[0].Name) |
|||
} |
|||
} |
|||
|
|||
func TestCompareVersionIdsMixedFormats(t *testing.T) { |
|||
// Old format: raw nanosecond timestamp (below threshold ~0x17...).
|
|||
// New format: inverted timestamp (above threshold ~0x68...).
|
|||
oldTs := time.Date(2023, 6, 15, 12, 0, 0, 0, time.UTC) |
|||
newTs := time.Date(2026, 3, 1, 0, 0, 0, 0, time.UTC) |
|||
|
|||
oldFormatId := fmt.Sprintf("%016x", oldTs.UnixNano()) + "abcdef0123456789" |
|||
newFormatId := makeVersionId(newTs) // uses inverted timestamp
|
|||
|
|||
// newTs is more recent, so newFormatId should sort as "newer".
|
|||
cmp := s3lifecycle.CompareVersionIds(newFormatId, oldFormatId) |
|||
if cmp >= 0 { |
|||
t.Errorf("expected new-format ID (2026) to be newer than old-format ID (2023), got cmp=%d", cmp) |
|||
} |
|||
|
|||
// Reverse comparison.
|
|||
cmp2 := s3lifecycle.CompareVersionIds(oldFormatId, newFormatId) |
|||
if cmp2 <= 0 { |
|||
t.Errorf("expected old-format ID (2023) to be older than new-format ID (2026), got cmp=%d", cmp2) |
|||
} |
|||
|
|||
// Sort a mixed slice: should be newest-first.
|
|||
entries := []*filer_pb.Entry{ |
|||
{Name: "v_" + oldFormatId}, |
|||
{Name: "v_" + newFormatId}, |
|||
} |
|||
sortVersionsByVersionId(entries) |
|||
|
|||
if strings.TrimPrefix(entries[0].Name, "v_") != newFormatId { |
|||
t.Errorf("expected new-format (newer) entry first after sort") |
|||
} |
|||
} |
|||
|
|||
func TestVersionsDirectoryNaming(t *testing.T) { |
|||
if s3_constants.VersionsFolder != ".versions" { |
|||
t.Fatalf("unexpected VersionsFolder constant: %q", s3_constants.VersionsFolder) |
|||
} |
|||
|
|||
versionsDir := "/buckets/mybucket/path/to/key.versions" |
|||
bucketPath := "/buckets/mybucket" |
|||
relDir := strings.TrimPrefix(versionsDir, bucketPath+"/") |
|||
objKey := strings.TrimSuffix(relDir, s3_constants.VersionsFolder) |
|||
if objKey != "path/to/key" { |
|||
t.Errorf("expected 'path/to/key', got %q", objKey) |
|||
} |
|||
} |
|||
Write
Preview
Loading…
Cancel
Save
Reference in new issue