Browse Source
Worker set its working directory (#8461)
Worker set its working directory (#8461)
* set working directory * consolidate to worker directory * working directory * correct directory name * refactoring to use wildcard matcher * simplify * cleaning ec working directory * fix reference * clean * adjust testpull/8360/merge
committed by
GitHub
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
23 changed files with 559 additions and 815 deletions
-
30weed/command/mini.go
-
6weed/command/plugin_worker_test.go
-
2weed/command/worker.go
-
30weed/command/worker_runtime.go
-
13weed/plugin/worker/erasure_coding_handler.go
-
147weed/plugin/worker/vacuum_handler.go
-
168weed/plugin/worker/volume_metrics.go
-
6weed/s3api/auth_credentials.go
-
6weed/s3api/auth_credentials_test.go
-
7weed/s3api/policy_engine/conditions.go
-
3weed/s3api/policy_engine/engine_test.go
-
23weed/s3api/policy_engine/types.go
-
469weed/s3api/policy_engine/wildcard_matcher_test.go
-
11weed/s3api/s3tables/permissions.go
-
39weed/util/wildcard/filter.go
-
116weed/util/wildcard/wildcard_matcher.go
-
211weed/util/wildcard/wildcard_matcher_test.go
-
27weed/worker/tasks/erasure_coding/detection.go
-
18weed/worker/tasks/erasure_coding/ec_task.go
-
8weed/worker/tasks/vacuum/detection.go
-
11weed/worker/types/base/task.go
-
15weed/worker/types/task.go
-
8weed/worker/worker.go
@ -0,0 +1,168 @@ |
|||
package pluginworker |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"sort" |
|||
"strings" |
|||
"time" |
|||
|
|||
"github.com/seaweedfs/seaweedfs/weed/admin/topology" |
|||
"github.com/seaweedfs/seaweedfs/weed/glog" |
|||
"github.com/seaweedfs/seaweedfs/weed/pb" |
|||
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb" |
|||
"github.com/seaweedfs/seaweedfs/weed/util/wildcard" |
|||
workertypes "github.com/seaweedfs/seaweedfs/weed/worker/types" |
|||
"google.golang.org/grpc" |
|||
) |
|||
|
|||
func collectVolumeMetricsFromMasters( |
|||
ctx context.Context, |
|||
masterAddresses []string, |
|||
collectionFilter string, |
|||
grpcDialOption grpc.DialOption, |
|||
) ([]*workertypes.VolumeHealthMetrics, *topology.ActiveTopology, error) { |
|||
if grpcDialOption == nil { |
|||
return nil, nil, fmt.Errorf("grpc dial option is not configured") |
|||
} |
|||
if len(masterAddresses) == 0 { |
|||
return nil, nil, fmt.Errorf("no master addresses provided in cluster context") |
|||
} |
|||
|
|||
for _, masterAddress := range masterAddresses { |
|||
response, err := fetchVolumeList(ctx, masterAddress, grpcDialOption) |
|||
if err != nil { |
|||
glog.Warningf("Plugin worker failed master volume list at %s: %v", masterAddress, err) |
|||
continue |
|||
} |
|||
|
|||
metrics, activeTopology, buildErr := buildVolumeMetrics(response, collectionFilter) |
|||
if buildErr != nil { |
|||
glog.Warningf("Plugin worker failed to build metrics from master %s: %v", masterAddress, buildErr) |
|||
continue |
|||
} |
|||
return metrics, activeTopology, nil |
|||
} |
|||
|
|||
return nil, nil, fmt.Errorf("failed to load topology from all provided masters") |
|||
} |
|||
|
|||
func fetchVolumeList(ctx context.Context, address string, grpcDialOption grpc.DialOption) (*master_pb.VolumeListResponse, error) { |
|||
var lastErr error |
|||
for _, candidate := range masterAddressCandidates(address) { |
|||
if ctx.Err() != nil { |
|||
return nil, ctx.Err() |
|||
} |
|||
|
|||
dialCtx, cancelDial := context.WithTimeout(ctx, 5*time.Second) |
|||
conn, err := pb.GrpcDial(dialCtx, candidate, false, grpcDialOption) |
|||
cancelDial() |
|||
if err != nil { |
|||
lastErr = err |
|||
continue |
|||
} |
|||
|
|||
client := master_pb.NewSeaweedClient(conn) |
|||
callCtx, cancelCall := context.WithTimeout(ctx, 10*time.Second) |
|||
response, callErr := client.VolumeList(callCtx, &master_pb.VolumeListRequest{}) |
|||
cancelCall() |
|||
_ = conn.Close() |
|||
|
|||
if callErr == nil { |
|||
return response, nil |
|||
} |
|||
lastErr = callErr |
|||
} |
|||
|
|||
if lastErr == nil { |
|||
lastErr = fmt.Errorf("no valid master address candidate") |
|||
} |
|||
return nil, lastErr |
|||
} |
|||
|
|||
func buildVolumeMetrics( |
|||
response *master_pb.VolumeListResponse, |
|||
collectionFilter string, |
|||
) ([]*workertypes.VolumeHealthMetrics, *topology.ActiveTopology, error) { |
|||
if response == nil || response.TopologyInfo == nil { |
|||
return nil, nil, fmt.Errorf("volume list response has no topology info") |
|||
} |
|||
|
|||
activeTopology := topology.NewActiveTopology(10) |
|||
if err := activeTopology.UpdateTopology(response.TopologyInfo); err != nil { |
|||
return nil, nil, err |
|||
} |
|||
|
|||
patterns := wildcard.CompileWildcardMatchers(collectionFilter) |
|||
volumeSizeLimitBytes := uint64(response.VolumeSizeLimitMb) * 1024 * 1024 |
|||
now := time.Now() |
|||
metrics := make([]*workertypes.VolumeHealthMetrics, 0, 256) |
|||
|
|||
for _, dc := range response.TopologyInfo.DataCenterInfos { |
|||
for _, rack := range dc.RackInfos { |
|||
for _, node := range rack.DataNodeInfos { |
|||
for diskType, diskInfo := range node.DiskInfos { |
|||
for _, volume := range diskInfo.VolumeInfos { |
|||
if !wildcard.MatchesAnyWildcard(patterns, volume.Collection) { |
|||
continue |
|||
} |
|||
|
|||
metric := &workertypes.VolumeHealthMetrics{ |
|||
VolumeID: volume.Id, |
|||
Server: node.Id, |
|||
ServerAddress: string(pb.NewServerAddressFromDataNode(node)), |
|||
DiskType: diskType, |
|||
DiskId: volume.DiskId, |
|||
DataCenter: dc.Id, |
|||
Rack: rack.Id, |
|||
Collection: volume.Collection, |
|||
Size: volume.Size, |
|||
DeletedBytes: volume.DeletedByteCount, |
|||
LastModified: time.Unix(volume.ModifiedAtSecond, 0), |
|||
ReplicaCount: 1, |
|||
ExpectedReplicas: int(volume.ReplicaPlacement), |
|||
IsReadOnly: volume.ReadOnly, |
|||
} |
|||
if metric.Size > 0 { |
|||
metric.GarbageRatio = float64(metric.DeletedBytes) / float64(metric.Size) |
|||
} |
|||
if volumeSizeLimitBytes > 0 { |
|||
metric.FullnessRatio = float64(metric.Size) / float64(volumeSizeLimitBytes) |
|||
} |
|||
metric.Age = now.Sub(metric.LastModified) |
|||
metrics = append(metrics, metric) |
|||
} |
|||
} |
|||
} |
|||
} |
|||
} |
|||
|
|||
replicaCounts := make(map[uint32]int) |
|||
for _, metric := range metrics { |
|||
replicaCounts[metric.VolumeID]++ |
|||
} |
|||
for _, metric := range metrics { |
|||
metric.ReplicaCount = replicaCounts[metric.VolumeID] |
|||
} |
|||
|
|||
return metrics, activeTopology, nil |
|||
} |
|||
|
|||
func masterAddressCandidates(address string) []string { |
|||
trimmed := strings.TrimSpace(address) |
|||
if trimmed == "" { |
|||
return nil |
|||
} |
|||
candidateSet := map[string]struct{}{ |
|||
trimmed: {}, |
|||
} |
|||
converted := pb.ServerToGrpcAddress(trimmed) |
|||
candidateSet[converted] = struct{}{} |
|||
|
|||
candidates := make([]string, 0, len(candidateSet)) |
|||
for candidate := range candidateSet { |
|||
candidates = append(candidates, candidate) |
|||
} |
|||
sort.Strings(candidates) |
|||
return candidates |
|||
} |
|||
@ -1,469 +0,0 @@ |
|||
package policy_engine |
|||
|
|||
import ( |
|||
"testing" |
|||
) |
|||
|
|||
func TestMatchesWildcard(t *testing.T) { |
|||
tests := []struct { |
|||
name string |
|||
pattern string |
|||
str string |
|||
expected bool |
|||
}{ |
|||
// Basic functionality tests
|
|||
{ |
|||
name: "Exact match", |
|||
pattern: "test", |
|||
str: "test", |
|||
expected: true, |
|||
}, |
|||
{ |
|||
name: "Single wildcard", |
|||
pattern: "*", |
|||
str: "anything", |
|||
expected: true, |
|||
}, |
|||
{ |
|||
name: "Empty string with wildcard", |
|||
pattern: "*", |
|||
str: "", |
|||
expected: true, |
|||
}, |
|||
|
|||
// Star (*) wildcard tests
|
|||
{ |
|||
name: "Prefix wildcard", |
|||
pattern: "test*", |
|||
str: "test123", |
|||
expected: true, |
|||
}, |
|||
{ |
|||
name: "Suffix wildcard", |
|||
pattern: "*test", |
|||
str: "123test", |
|||
expected: true, |
|||
}, |
|||
{ |
|||
name: "Middle wildcard", |
|||
pattern: "test*123", |
|||
str: "testABC123", |
|||
expected: true, |
|||
}, |
|||
{ |
|||
name: "Multiple wildcards", |
|||
pattern: "test*abc*123", |
|||
str: "testXYZabcDEF123", |
|||
expected: true, |
|||
}, |
|||
{ |
|||
name: "No match", |
|||
pattern: "test*", |
|||
str: "other", |
|||
expected: false, |
|||
}, |
|||
|
|||
// Question mark (?) wildcard tests
|
|||
{ |
|||
name: "Single question mark", |
|||
pattern: "test?", |
|||
str: "test1", |
|||
expected: true, |
|||
}, |
|||
{ |
|||
name: "Multiple question marks", |
|||
pattern: "test??", |
|||
str: "test12", |
|||
expected: true, |
|||
}, |
|||
{ |
|||
name: "Question mark no match", |
|||
pattern: "test?", |
|||
str: "test12", |
|||
expected: false, |
|||
}, |
|||
{ |
|||
name: "Mixed wildcards", |
|||
pattern: "test*abc?def", |
|||
str: "testXYZabc1def", |
|||
expected: true, |
|||
}, |
|||
|
|||
// Edge cases
|
|||
{ |
|||
name: "Empty pattern", |
|||
pattern: "", |
|||
str: "", |
|||
expected: true, |
|||
}, |
|||
{ |
|||
name: "Empty pattern with string", |
|||
pattern: "", |
|||
str: "test", |
|||
expected: false, |
|||
}, |
|||
{ |
|||
name: "Pattern with string empty", |
|||
pattern: "test", |
|||
str: "", |
|||
expected: false, |
|||
}, |
|||
|
|||
// Special characters
|
|||
{ |
|||
name: "Pattern with regex special chars", |
|||
pattern: "test[abc]", |
|||
str: "test[abc]", |
|||
expected: true, |
|||
}, |
|||
{ |
|||
name: "Pattern with dots", |
|||
pattern: "test.txt", |
|||
str: "test.txt", |
|||
expected: true, |
|||
}, |
|||
{ |
|||
name: "Pattern with dots and wildcard", |
|||
pattern: "*.txt", |
|||
str: "test.txt", |
|||
expected: true, |
|||
}, |
|||
} |
|||
|
|||
for _, tt := range tests { |
|||
t.Run(tt.name, func(t *testing.T) { |
|||
result := MatchesWildcard(tt.pattern, tt.str) |
|||
if result != tt.expected { |
|||
t.Errorf("Pattern %s against %s: expected %v, got %v", tt.pattern, tt.str, tt.expected, result) |
|||
} |
|||
}) |
|||
} |
|||
} |
|||
|
|||
func TestWildcardMatcher(t *testing.T) { |
|||
tests := []struct { |
|||
name string |
|||
pattern string |
|||
strings []string |
|||
expected []bool |
|||
}{ |
|||
{ |
|||
name: "Simple star pattern", |
|||
pattern: "test*", |
|||
strings: []string{"test", "test123", "testing", "other"}, |
|||
expected: []bool{true, true, true, false}, |
|||
}, |
|||
{ |
|||
name: "Question mark pattern", |
|||
pattern: "test?", |
|||
strings: []string{"test1", "test2", "test", "test12"}, |
|||
expected: []bool{true, true, false, false}, |
|||
}, |
|||
{ |
|||
name: "Mixed pattern", |
|||
pattern: "*.txt", |
|||
strings: []string{"file.txt", "test.txt", "file.doc", "txt"}, |
|||
expected: []bool{true, true, false, false}, |
|||
}, |
|||
} |
|||
|
|||
for _, tt := range tests { |
|||
t.Run(tt.name, func(t *testing.T) { |
|||
matcher, err := NewWildcardMatcher(tt.pattern) |
|||
if err != nil { |
|||
t.Fatalf("Failed to create matcher: %v", err) |
|||
} |
|||
|
|||
for i, str := range tt.strings { |
|||
result := matcher.Match(str) |
|||
if result != tt.expected[i] { |
|||
t.Errorf("Pattern %s against %s: expected %v, got %v", tt.pattern, str, tt.expected[i], result) |
|||
} |
|||
} |
|||
}) |
|||
} |
|||
} |
|||
|
|||
func TestCompileWildcardPattern(t *testing.T) { |
|||
tests := []struct { |
|||
name string |
|||
pattern string |
|||
input string |
|||
want bool |
|||
}{ |
|||
{"Star wildcard", "s3:Get*", "s3:GetObject", true}, |
|||
{"Question mark wildcard", "s3:Get?bject", "s3:GetObject", true}, |
|||
{"Mixed wildcards", "s3:*Object*", "s3:GetObjectAcl", true}, |
|||
} |
|||
|
|||
for _, tt := range tests { |
|||
t.Run(tt.name, func(t *testing.T) { |
|||
regex, err := CompileWildcardPattern(tt.pattern) |
|||
if err != nil { |
|||
t.Errorf("CompileWildcardPattern() error = %v", err) |
|||
return |
|||
} |
|||
got := regex.MatchString(tt.input) |
|||
if got != tt.want { |
|||
t.Errorf("CompileWildcardPattern() = %v, want %v", got, tt.want) |
|||
} |
|||
}) |
|||
} |
|||
} |
|||
|
|||
// BenchmarkWildcardMatchingPerformance demonstrates the performance benefits of caching
|
|||
func BenchmarkWildcardMatchingPerformance(b *testing.B) { |
|||
patterns := []string{ |
|||
"s3:Get*", |
|||
"s3:Put*", |
|||
"s3:Delete*", |
|||
"s3:List*", |
|||
"arn:aws:s3:::bucket/*", |
|||
"arn:aws:s3:::bucket/prefix*", |
|||
"user:*", |
|||
"user:admin-*", |
|||
} |
|||
|
|||
inputs := []string{ |
|||
"s3:GetObject", |
|||
"s3:PutObject", |
|||
"s3:DeleteObject", |
|||
"s3:ListBucket", |
|||
"arn:aws:s3:::bucket/file.txt", |
|||
"arn:aws:s3:::bucket/prefix/file.txt", |
|||
"user:admin", |
|||
"user:admin-john", |
|||
} |
|||
|
|||
b.Run("WithoutCache", func(b *testing.B) { |
|||
for i := 0; i < b.N; i++ { |
|||
for _, pattern := range patterns { |
|||
for _, input := range inputs { |
|||
MatchesWildcard(pattern, input) |
|||
} |
|||
} |
|||
} |
|||
}) |
|||
|
|||
b.Run("WithCache", func(b *testing.B) { |
|||
for i := 0; i < b.N; i++ { |
|||
for _, pattern := range patterns { |
|||
for _, input := range inputs { |
|||
FastMatchesWildcard(pattern, input) |
|||
} |
|||
} |
|||
} |
|||
}) |
|||
} |
|||
|
|||
// BenchmarkWildcardMatcherReuse demonstrates the performance benefits of reusing WildcardMatcher instances
|
|||
func BenchmarkWildcardMatcherReuse(b *testing.B) { |
|||
pattern := "s3:Get*" |
|||
input := "s3:GetObject" |
|||
|
|||
b.Run("NewMatcherEveryTime", func(b *testing.B) { |
|||
for i := 0; i < b.N; i++ { |
|||
matcher, _ := NewWildcardMatcher(pattern) |
|||
matcher.Match(input) |
|||
} |
|||
}) |
|||
|
|||
b.Run("CachedMatcher", func(b *testing.B) { |
|||
for i := 0; i < b.N; i++ { |
|||
matcher, _ := GetCachedWildcardMatcher(pattern) |
|||
matcher.Match(input) |
|||
} |
|||
}) |
|||
} |
|||
|
|||
// TestWildcardMatcherCaching verifies that caching works correctly
|
|||
func TestWildcardMatcherCaching(t *testing.T) { |
|||
pattern := "s3:Get*" |
|||
|
|||
// Get the first matcher
|
|||
matcher1, err := GetCachedWildcardMatcher(pattern) |
|||
if err != nil { |
|||
t.Fatalf("Failed to get cached matcher: %v", err) |
|||
} |
|||
|
|||
// Get the second matcher - should be the same instance
|
|||
matcher2, err := GetCachedWildcardMatcher(pattern) |
|||
if err != nil { |
|||
t.Fatalf("Failed to get cached matcher: %v", err) |
|||
} |
|||
|
|||
// Check that they're the same instance (same pointer)
|
|||
if matcher1 != matcher2 { |
|||
t.Errorf("Expected same matcher instance, got different instances") |
|||
} |
|||
|
|||
// Test that both matchers work correctly
|
|||
testInput := "s3:GetObject" |
|||
if !matcher1.Match(testInput) { |
|||
t.Errorf("First matcher failed to match %s", testInput) |
|||
} |
|||
if !matcher2.Match(testInput) { |
|||
t.Errorf("Second matcher failed to match %s", testInput) |
|||
} |
|||
} |
|||
|
|||
// TestFastMatchesWildcard verifies that the fast matching function works correctly
|
|||
func TestFastMatchesWildcard(t *testing.T) { |
|||
tests := []struct { |
|||
pattern string |
|||
input string |
|||
want bool |
|||
}{ |
|||
{"s3:Get*", "s3:GetObject", true}, |
|||
{"s3:Put*", "s3:GetObject", false}, |
|||
{"arn:aws:s3:::bucket/*", "arn:aws:s3:::bucket/file.txt", true}, |
|||
{"user:admin-*", "user:admin-john", true}, |
|||
{"user:admin-*", "user:guest-john", false}, |
|||
} |
|||
|
|||
for _, tt := range tests { |
|||
t.Run(tt.pattern+"_"+tt.input, func(t *testing.T) { |
|||
got := FastMatchesWildcard(tt.pattern, tt.input) |
|||
if got != tt.want { |
|||
t.Errorf("FastMatchesWildcard(%q, %q) = %v, want %v", tt.pattern, tt.input, got, tt.want) |
|||
} |
|||
}) |
|||
} |
|||
} |
|||
|
|||
// TestWildcardMatcherCacheBounding tests the bounded cache functionality
|
|||
func TestWildcardMatcherCacheBounding(t *testing.T) { |
|||
// Clear cache before test
|
|||
wildcardMatcherCache.ClearCache() |
|||
|
|||
// Get original max size
|
|||
originalMaxSize := wildcardMatcherCache.maxSize |
|||
|
|||
// Set a small max size for testing
|
|||
wildcardMatcherCache.maxSize = 3 |
|||
defer func() { |
|||
wildcardMatcherCache.maxSize = originalMaxSize |
|||
wildcardMatcherCache.ClearCache() |
|||
}() |
|||
|
|||
// Add patterns up to max size
|
|||
patterns := []string{"pattern1", "pattern2", "pattern3"} |
|||
for _, pattern := range patterns { |
|||
_, err := GetCachedWildcardMatcher(pattern) |
|||
if err != nil { |
|||
t.Fatalf("Failed to get cached matcher for %s: %v", pattern, err) |
|||
} |
|||
} |
|||
|
|||
// Verify cache size
|
|||
size, maxSize := wildcardMatcherCache.GetCacheStats() |
|||
if size != 3 { |
|||
t.Errorf("Expected cache size 3, got %d", size) |
|||
} |
|||
if maxSize != 3 { |
|||
t.Errorf("Expected max size 3, got %d", maxSize) |
|||
} |
|||
|
|||
// Add another pattern, should evict the least recently used
|
|||
_, err := GetCachedWildcardMatcher("pattern4") |
|||
if err != nil { |
|||
t.Fatalf("Failed to get cached matcher for pattern4: %v", err) |
|||
} |
|||
|
|||
// Cache should still be at max size
|
|||
size, _ = wildcardMatcherCache.GetCacheStats() |
|||
if size != 3 { |
|||
t.Errorf("Expected cache size 3 after eviction, got %d", size) |
|||
} |
|||
|
|||
// The first pattern should have been evicted
|
|||
wildcardMatcherCache.mu.RLock() |
|||
if _, exists := wildcardMatcherCache.matchers["pattern1"]; exists { |
|||
t.Errorf("Expected pattern1 to be evicted, but it still exists") |
|||
} |
|||
if _, exists := wildcardMatcherCache.matchers["pattern4"]; !exists { |
|||
t.Errorf("Expected pattern4 to be in cache, but it doesn't exist") |
|||
} |
|||
wildcardMatcherCache.mu.RUnlock() |
|||
} |
|||
|
|||
// TestWildcardMatcherCacheLRU tests the LRU eviction policy
|
|||
func TestWildcardMatcherCacheLRU(t *testing.T) { |
|||
// Clear cache before test
|
|||
wildcardMatcherCache.ClearCache() |
|||
|
|||
// Get original max size
|
|||
originalMaxSize := wildcardMatcherCache.maxSize |
|||
|
|||
// Set a small max size for testing
|
|||
wildcardMatcherCache.maxSize = 3 |
|||
defer func() { |
|||
wildcardMatcherCache.maxSize = originalMaxSize |
|||
wildcardMatcherCache.ClearCache() |
|||
}() |
|||
|
|||
// Add patterns to fill cache
|
|||
patterns := []string{"pattern1", "pattern2", "pattern3"} |
|||
for _, pattern := range patterns { |
|||
_, err := GetCachedWildcardMatcher(pattern) |
|||
if err != nil { |
|||
t.Fatalf("Failed to get cached matcher for %s: %v", pattern, err) |
|||
} |
|||
} |
|||
|
|||
// Access pattern1 to make it most recently used
|
|||
_, err := GetCachedWildcardMatcher("pattern1") |
|||
if err != nil { |
|||
t.Fatalf("Failed to access pattern1: %v", err) |
|||
} |
|||
|
|||
// Add another pattern, should evict pattern2 (now least recently used)
|
|||
_, err = GetCachedWildcardMatcher("pattern4") |
|||
if err != nil { |
|||
t.Fatalf("Failed to get cached matcher for pattern4: %v", err) |
|||
} |
|||
|
|||
// pattern1 should still be in cache (was accessed recently)
|
|||
// pattern2 should be evicted (was least recently used)
|
|||
wildcardMatcherCache.mu.RLock() |
|||
if _, exists := wildcardMatcherCache.matchers["pattern1"]; !exists { |
|||
t.Errorf("Expected pattern1 to remain in cache (most recently used)") |
|||
} |
|||
if _, exists := wildcardMatcherCache.matchers["pattern2"]; exists { |
|||
t.Errorf("Expected pattern2 to be evicted (least recently used)") |
|||
} |
|||
if _, exists := wildcardMatcherCache.matchers["pattern3"]; !exists { |
|||
t.Errorf("Expected pattern3 to remain in cache") |
|||
} |
|||
if _, exists := wildcardMatcherCache.matchers["pattern4"]; !exists { |
|||
t.Errorf("Expected pattern4 to be in cache") |
|||
} |
|||
wildcardMatcherCache.mu.RUnlock() |
|||
} |
|||
|
|||
// TestWildcardMatcherCacheClear tests the cache clearing functionality
|
|||
func TestWildcardMatcherCacheClear(t *testing.T) { |
|||
// Add some patterns to cache
|
|||
patterns := []string{"pattern1", "pattern2", "pattern3"} |
|||
for _, pattern := range patterns { |
|||
_, err := GetCachedWildcardMatcher(pattern) |
|||
if err != nil { |
|||
t.Fatalf("Failed to get cached matcher for %s: %v", pattern, err) |
|||
} |
|||
} |
|||
|
|||
// Verify cache has patterns
|
|||
size, _ := wildcardMatcherCache.GetCacheStats() |
|||
if size == 0 { |
|||
t.Errorf("Expected cache to have patterns before clearing") |
|||
} |
|||
|
|||
// Clear cache
|
|||
wildcardMatcherCache.ClearCache() |
|||
|
|||
// Verify cache is empty
|
|||
size, _ = wildcardMatcherCache.GetCacheStats() |
|||
if size != 0 { |
|||
t.Errorf("Expected cache to be empty after clearing, got size %d", size) |
|||
} |
|||
} |
|||
@ -0,0 +1,39 @@ |
|||
package wildcard |
|||
|
|||
import "strings" |
|||
|
|||
// CompileWildcardMatchers parses comma-separated wildcard patterns and compiles them.
|
|||
// Empty tokens are ignored. Invalid patterns are skipped.
|
|||
func CompileWildcardMatchers(filter string) []*WildcardMatcher { |
|||
parts := strings.Split(filter, ",") |
|||
matchers := make([]*WildcardMatcher, 0, len(parts)) |
|||
for _, part := range parts { |
|||
trimmed := strings.TrimSpace(part) |
|||
if trimmed == "" { |
|||
continue |
|||
} |
|||
matcher, err := NewWildcardMatcher(trimmed) |
|||
if err != nil { |
|||
continue |
|||
} |
|||
matchers = append(matchers, matcher) |
|||
} |
|||
if len(matchers) == 0 { |
|||
return nil |
|||
} |
|||
return matchers |
|||
} |
|||
|
|||
// MatchesAnyWildcard returns true when no matcher is provided,
|
|||
// or when any matcher matches the given value.
|
|||
func MatchesAnyWildcard(matchers []*WildcardMatcher, value string) bool { |
|||
if len(matchers) == 0 { |
|||
return true |
|||
} |
|||
for _, matcher := range matchers { |
|||
if matcher != nil && matcher.Match(value) { |
|||
return true |
|||
} |
|||
} |
|||
return false |
|||
} |
|||
@ -0,0 +1,211 @@ |
|||
package wildcard |
|||
|
|||
import "testing" |
|||
|
|||
func TestMatchesWildcard(t *testing.T) { |
|||
tests := []struct { |
|||
name string |
|||
pattern string |
|||
str string |
|||
expected bool |
|||
}{ |
|||
{"Exact match", "test", "test", true}, |
|||
{"Single wildcard", "*", "anything", true}, |
|||
{"Empty string with wildcard", "*", "", true}, |
|||
{"Prefix wildcard", "test*", "test123", true}, |
|||
{"Suffix wildcard", "*test", "123test", true}, |
|||
{"Middle wildcard", "test*123", "testABC123", true}, |
|||
{"Multiple wildcards", "test*abc*123", "testXYZabcDEF123", true}, |
|||
{"No match", "test*", "other", false}, |
|||
{"Single question mark", "test?", "test1", true}, |
|||
{"Multiple question marks", "test??", "test12", true}, |
|||
{"Question mark no match", "test?", "test12", false}, |
|||
{"Mixed wildcards", "test*abc?def", "testXYZabc1def", true}, |
|||
{"Empty pattern", "", "", true}, |
|||
{"Empty pattern with string", "", "test", false}, |
|||
{"Pattern with string empty", "test", "", false}, |
|||
{"Pattern with regex special chars", "test[abc]", "test[abc]", true}, |
|||
{"Pattern with dots", "test.txt", "test.txt", true}, |
|||
{"Pattern with dots and wildcard", "*.txt", "test.txt", true}, |
|||
} |
|||
for _, tt := range tests { |
|||
t.Run(tt.name, func(t *testing.T) { |
|||
got := MatchesWildcard(tt.pattern, tt.str) |
|||
if got != tt.expected { |
|||
t.Errorf("MatchesWildcard(%q, %q) = %v, want %v", tt.pattern, tt.str, got, tt.expected) |
|||
} |
|||
}) |
|||
} |
|||
} |
|||
|
|||
func TestWildcardMatcherMatch(t *testing.T) { |
|||
tests := []struct { |
|||
name string |
|||
pattern string |
|||
inputs []string |
|||
expected []bool |
|||
}{ |
|||
{"Simple star", "test*", []string{"test", "test123", "testing", "other"}, []bool{true, true, true, false}}, |
|||
{"Question mark", "test?", []string{"test1", "test2", "test", "test12"}, []bool{true, true, false, false}}, |
|||
{"Extension filter", "*.txt", []string{"file.txt", "test.txt", "file.doc", "txt"}, []bool{true, true, false, false}}, |
|||
} |
|||
for _, tt := range tests { |
|||
t.Run(tt.name, func(t *testing.T) { |
|||
m, err := NewWildcardMatcher(tt.pattern) |
|||
if err != nil { |
|||
t.Fatalf("NewWildcardMatcher: %v", err) |
|||
} |
|||
for i, s := range tt.inputs { |
|||
got := m.Match(s) |
|||
if got != tt.expected[i] { |
|||
t.Errorf("Match(%q) = %v, want %v", s, got, tt.expected[i]) |
|||
} |
|||
} |
|||
}) |
|||
} |
|||
} |
|||
|
|||
func TestCompileWildcardPattern(t *testing.T) { |
|||
tests := []struct { |
|||
pattern string |
|||
input string |
|||
want bool |
|||
}{ |
|||
{"s3:Get*", "s3:GetObject", true}, |
|||
{"s3:Get?bject", "s3:GetObject", true}, |
|||
{"s3:*Object*", "s3:GetObjectAcl", true}, |
|||
} |
|||
for _, tt := range tests { |
|||
t.Run(tt.pattern, func(t *testing.T) { |
|||
re, err := CompileWildcardPattern(tt.pattern) |
|||
if err != nil { |
|||
t.Fatalf("CompileWildcardPattern: %v", err) |
|||
} |
|||
if got := re.MatchString(tt.input); got != tt.want { |
|||
t.Errorf("got %v, want %v", got, tt.want) |
|||
} |
|||
}) |
|||
} |
|||
} |
|||
|
|||
func TestFastMatchesWildcard(t *testing.T) { |
|||
tests := []struct { |
|||
pattern string |
|||
input string |
|||
want bool |
|||
}{ |
|||
{"s3:Get*", "s3:GetObject", true}, |
|||
{"s3:Put*", "s3:GetObject", false}, |
|||
{"arn:aws:s3:::bucket/*", "arn:aws:s3:::bucket/file.txt", true}, |
|||
{"user:admin-*", "user:admin-john", true}, |
|||
{"user:admin-*", "user:guest-john", false}, |
|||
} |
|||
for _, tt := range tests { |
|||
t.Run(tt.pattern+"_"+tt.input, func(t *testing.T) { |
|||
got := FastMatchesWildcard(tt.pattern, tt.input) |
|||
if got != tt.want { |
|||
t.Errorf("FastMatchesWildcard(%q, %q) = %v, want %v", tt.pattern, tt.input, got, tt.want) |
|||
} |
|||
}) |
|||
} |
|||
} |
|||
|
|||
func TestWildcardMatcherCaching(t *testing.T) { |
|||
m1, err := GetCachedWildcardMatcher("s3:Get*") |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
m2, err := GetCachedWildcardMatcher("s3:Get*") |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
if m1 != m2 { |
|||
t.Error("expected same cached instance") |
|||
} |
|||
if !m1.Match("s3:GetObject") { |
|||
t.Error("expected match") |
|||
} |
|||
} |
|||
|
|||
func TestWildcardMatcherCacheBounding(t *testing.T) { |
|||
wildcardMatcherCache.ClearCache() |
|||
orig := wildcardMatcherCache.maxSize |
|||
wildcardMatcherCache.maxSize = 3 |
|||
defer func() { |
|||
wildcardMatcherCache.maxSize = orig |
|||
wildcardMatcherCache.ClearCache() |
|||
}() |
|||
for _, p := range []string{"p1", "p2", "p3"} { |
|||
GetCachedWildcardMatcher(p) |
|||
} |
|||
size, maxSize := wildcardMatcherCache.GetCacheStats() |
|||
if size != 3 { |
|||
t.Errorf("expected size 3, got %d", size) |
|||
} |
|||
if maxSize != 3 { |
|||
t.Errorf("expected maxSize 3, got %d", maxSize) |
|||
} |
|||
GetCachedWildcardMatcher("p4") |
|||
size, _ = wildcardMatcherCache.GetCacheStats() |
|||
if size != 3 { |
|||
t.Errorf("expected size 3 after eviction, got %d", size) |
|||
} |
|||
wildcardMatcherCache.mu.RLock() |
|||
defer wildcardMatcherCache.mu.RUnlock() |
|||
if _, ok := wildcardMatcherCache.matchers["p1"]; ok { |
|||
t.Error("p1 should have been evicted (LRU)") |
|||
} |
|||
if _, ok := wildcardMatcherCache.matchers["p4"]; !ok { |
|||
t.Error("p4 should be in cache") |
|||
} |
|||
} |
|||
|
|||
func TestWildcardMatcherCacheLRU(t *testing.T) { |
|||
wildcardMatcherCache.ClearCache() |
|||
orig := wildcardMatcherCache.maxSize |
|||
wildcardMatcherCache.maxSize = 3 |
|||
defer func() { |
|||
wildcardMatcherCache.maxSize = orig |
|||
wildcardMatcherCache.ClearCache() |
|||
}() |
|||
for _, p := range []string{"p1", "p2", "p3"} { |
|||
GetCachedWildcardMatcher(p) |
|||
} |
|||
GetCachedWildcardMatcher("p1") // access p1 to make it most-recently used
|
|||
GetCachedWildcardMatcher("p4") // should evict p2 (now LRU)
|
|||
wildcardMatcherCache.mu.RLock() |
|||
defer wildcardMatcherCache.mu.RUnlock() |
|||
if _, ok := wildcardMatcherCache.matchers["p2"]; ok { |
|||
t.Error("p2 should be evicted (least recently used)") |
|||
} |
|||
if _, ok := wildcardMatcherCache.matchers["p1"]; !ok { |
|||
t.Error("p1 should remain (recently accessed)") |
|||
} |
|||
if _, ok := wildcardMatcherCache.matchers["p3"]; !ok { |
|||
t.Error("p3 should remain") |
|||
} |
|||
if _, ok := wildcardMatcherCache.matchers["p4"]; !ok { |
|||
t.Error("p4 should be in cache") |
|||
} |
|||
} |
|||
|
|||
func TestWildcardMatcherCacheClear(t *testing.T) { |
|||
GetCachedWildcardMatcher("test") |
|||
wildcardMatcherCache.ClearCache() |
|||
size, _ := wildcardMatcherCache.GetCacheStats() |
|||
if size != 0 { |
|||
t.Errorf("expected 0 after clear, got %d", size) |
|||
} |
|||
} |
|||
|
|||
func BenchmarkMatchesWildcard(b *testing.B) { |
|||
for i := 0; i < b.N; i++ { |
|||
MatchesWildcard("s3:Get*", "s3:GetObject") |
|||
} |
|||
} |
|||
|
|||
func BenchmarkFastMatchesWildcard(b *testing.B) { |
|||
for i := 0; i < b.N; i++ { |
|||
FastMatchesWildcard("s3:Get*", "s3:GetObject") |
|||
} |
|||
} |
|||
Write
Preview
Loading…
Cancel
Save
Reference in new issue