Browse Source
s3api: persist lifecycle TTL rules and enforce on assign
fix-8303-s3-lifecycle-ttl-assign
s3api: persist lifecycle TTL rules and enforce on assign
fix-8303-s3-lifecycle-ttl-assign
5 changed files with 217 additions and 3 deletions
-
5weed/s3api/s3_constants/extend_key.go
-
18weed/s3api/s3api_bucket_handlers.go
-
124weed/s3api/s3api_bucket_lifecycle_ttl.go
-
68weed/s3api/s3api_bucket_lifecycle_ttl_test.go
-
5weed/s3api/s3api_object_handlers_put.go
@ -0,0 +1,124 @@ |
|||||
|
package s3api |
||||
|
|
||||
|
import ( |
||||
|
"encoding/json" |
||||
|
"fmt" |
||||
|
"sort" |
||||
|
"strings" |
||||
|
|
||||
|
"github.com/seaweedfs/seaweedfs/weed/glog" |
||||
|
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" |
||||
|
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err" |
||||
|
) |
||||
|
|
||||
|
type bucketLifecycleTTLRule struct { |
||||
|
Prefix string `json:"prefix"` |
||||
|
TtlSec int32 `json:"ttlSec"` |
||||
|
} |
||||
|
|
||||
|
func normalizeLifecycleRulePrefix(prefix string) string { |
||||
|
prefix = strings.TrimSpace(prefix) |
||||
|
prefix = strings.TrimPrefix(prefix, "/") |
||||
|
if prefix == "." || prefix == "/" { |
||||
|
return "" |
||||
|
} |
||||
|
return prefix |
||||
|
} |
||||
|
|
||||
|
func encodeBucketLifecycleTTLRules(rules []bucketLifecycleTTLRule) ([]byte, error) { |
||||
|
if len(rules) == 0 { |
||||
|
return nil, nil |
||||
|
} |
||||
|
sorted := make([]bucketLifecycleTTLRule, 0, len(rules)) |
||||
|
for _, rule := range rules { |
||||
|
if rule.TtlSec <= 0 { |
||||
|
continue |
||||
|
} |
||||
|
sorted = append(sorted, bucketLifecycleTTLRule{ |
||||
|
Prefix: normalizeLifecycleRulePrefix(rule.Prefix), |
||||
|
TtlSec: rule.TtlSec, |
||||
|
}) |
||||
|
} |
||||
|
if len(sorted) == 0 { |
||||
|
return nil, nil |
||||
|
} |
||||
|
sort.Slice(sorted, func(i, j int) bool { |
||||
|
return sorted[i].Prefix < sorted[j].Prefix |
||||
|
}) |
||||
|
return json.Marshal(sorted) |
||||
|
} |
||||
|
|
||||
|
func decodeBucketLifecycleTTLRules(extended map[string][]byte) []bucketLifecycleTTLRule { |
||||
|
if len(extended) == 0 { |
||||
|
return nil |
||||
|
} |
||||
|
serialized := extended[s3_constants.ExtBucketLifecycleTTLRulesKey] |
||||
|
if len(serialized) == 0 { |
||||
|
return nil |
||||
|
} |
||||
|
var rules []bucketLifecycleTTLRule |
||||
|
if err := json.Unmarshal(serialized, &rules); err != nil { |
||||
|
glog.Warningf("decode bucket lifecycle ttl rules: %v", err) |
||||
|
return nil |
||||
|
} |
||||
|
return rules |
||||
|
} |
||||
|
|
||||
|
func matchBucketLifecycleTTLSeconds(rules []bucketLifecycleTTLRule, objectKey string) int32 { |
||||
|
if len(rules) == 0 { |
||||
|
return 0 |
||||
|
} |
||||
|
objectKey = normalizeLifecycleRulePrefix(objectKey) |
||||
|
var bestPrefix string |
||||
|
var ttlSeconds int32 |
||||
|
for _, rule := range rules { |
||||
|
prefix := normalizeLifecycleRulePrefix(rule.Prefix) |
||||
|
if prefix != "" && !strings.HasPrefix(objectKey, prefix) { |
||||
|
continue |
||||
|
} |
||||
|
if len(prefix) >= len(bestPrefix) { |
||||
|
bestPrefix = prefix |
||||
|
ttlSeconds = rule.TtlSec |
||||
|
} |
||||
|
} |
||||
|
return ttlSeconds |
||||
|
} |
||||
|
|
||||
|
func (s3a *S3ApiServer) resolveBucketLifecycleTTLSeconds(bucket, filePath string) int32 { |
||||
|
config, errCode := s3a.getBucketConfig(bucket) |
||||
|
if errCode != s3err.ErrNone || config == nil || config.Entry == nil { |
||||
|
return 0 |
||||
|
} |
||||
|
bucketPrefix := s3a.bucketDir(bucket) + "/" |
||||
|
objectKey := strings.TrimPrefix(filePath, bucketPrefix) |
||||
|
if objectKey == filePath { |
||||
|
return 0 |
||||
|
} |
||||
|
rules := decodeBucketLifecycleTTLRules(config.Entry.Extended) |
||||
|
return matchBucketLifecycleTTLSeconds(rules, objectKey) |
||||
|
} |
||||
|
|
||||
|
func (s3a *S3ApiServer) persistBucketLifecycleTTLRules(bucket string, rules []bucketLifecycleTTLRule) error { |
||||
|
serialized, err := encodeBucketLifecycleTTLRules(rules) |
||||
|
if err != nil { |
||||
|
return fmt.Errorf("encode lifecycle ttl rules: %w", err) |
||||
|
} |
||||
|
errCode := s3a.updateBucketConfig(bucket, func(config *BucketConfig) error { |
||||
|
if config.Entry == nil { |
||||
|
return fmt.Errorf("bucket %s has no entry", bucket) |
||||
|
} |
||||
|
if config.Entry.Extended == nil { |
||||
|
config.Entry.Extended = make(map[string][]byte) |
||||
|
} |
||||
|
if len(serialized) == 0 { |
||||
|
delete(config.Entry.Extended, s3_constants.ExtBucketLifecycleTTLRulesKey) |
||||
|
} else { |
||||
|
config.Entry.Extended[s3_constants.ExtBucketLifecycleTTLRulesKey] = serialized |
||||
|
} |
||||
|
return nil |
||||
|
}) |
||||
|
if errCode != s3err.ErrNone { |
||||
|
return fmt.Errorf("persist lifecycle ttl rules: %v", errCode) |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
@ -0,0 +1,68 @@ |
|||||
|
package s3api |
||||
|
|
||||
|
import ( |
||||
|
"encoding/json" |
||||
|
"testing" |
||||
|
|
||||
|
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" |
||||
|
) |
||||
|
|
||||
|
func TestMatchBucketLifecycleTTLSeconds(t *testing.T) { |
||||
|
rules := []bucketLifecycleTTLRule{ |
||||
|
{Prefix: "", TtlSec: 3600}, |
||||
|
{Prefix: "logs/", TtlSec: 7200}, |
||||
|
{Prefix: "logs/archive/", TtlSec: 10800}, |
||||
|
} |
||||
|
|
||||
|
if got := matchBucketLifecycleTTLSeconds(rules, "logs/file.txt"); got != 7200 { |
||||
|
t.Fatalf("expected 7200 for logs/file.txt, got %d", got) |
||||
|
} |
||||
|
if got := matchBucketLifecycleTTLSeconds(rules, "logs/archive/file.txt"); got != 10800 { |
||||
|
t.Fatalf("expected 10800 for logs/archive/file.txt, got %d", got) |
||||
|
} |
||||
|
if got := matchBucketLifecycleTTLSeconds(rules, "other/file.txt"); got != 3600 { |
||||
|
t.Fatalf("expected 3600 for other/file.txt, got %d", got) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestDecodeBucketLifecycleTTLRules(t *testing.T) { |
||||
|
extended := map[string][]byte{ |
||||
|
s3_constants.ExtBucketLifecycleTTLRulesKey: []byte(`[{"prefix":"logs/","ttlSec":86400}]`), |
||||
|
} |
||||
|
rules := decodeBucketLifecycleTTLRules(extended) |
||||
|
if len(rules) != 1 { |
||||
|
t.Fatalf("expected 1 rule, got %d", len(rules)) |
||||
|
} |
||||
|
if rules[0].Prefix != "logs/" || rules[0].TtlSec != 86400 { |
||||
|
t.Fatalf("unexpected rule: %+v", rules[0]) |
||||
|
} |
||||
|
|
||||
|
extended[s3_constants.ExtBucketLifecycleTTLRulesKey] = []byte(`{invalid json`) |
||||
|
if got := decodeBucketLifecycleTTLRules(extended); got != nil { |
||||
|
t.Fatalf("expected nil rules for invalid JSON, got %+v", got) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestEncodeBucketLifecycleTTLRules(t *testing.T) { |
||||
|
serialized, err := encodeBucketLifecycleTTLRules([]bucketLifecycleTTLRule{ |
||||
|
{Prefix: "/logs/", TtlSec: 86400}, |
||||
|
{Prefix: "tmp/", TtlSec: 0}, |
||||
|
{Prefix: "", TtlSec: 3600}, |
||||
|
}) |
||||
|
if err != nil { |
||||
|
t.Fatalf("encode failed: %v", err) |
||||
|
} |
||||
|
var decoded []bucketLifecycleTTLRule |
||||
|
if err := json.Unmarshal(serialized, &decoded); err != nil { |
||||
|
t.Fatalf("unmarshal failed: %v", err) |
||||
|
} |
||||
|
if len(decoded) != 2 { |
||||
|
t.Fatalf("expected 2 persisted rules, got %d", len(decoded)) |
||||
|
} |
||||
|
if decoded[0].Prefix != "" || decoded[0].TtlSec != 3600 { |
||||
|
t.Fatalf("unexpected first rule: %+v", decoded[0]) |
||||
|
} |
||||
|
if decoded[1].Prefix != "logs/" || decoded[1].TtlSec != 86400 { |
||||
|
t.Fatalf("unexpected second rule: %+v", decoded[1]) |
||||
|
} |
||||
|
} |
||||
Write
Preview
Loading…
Cancel
Save
Reference in new issue