Browse Source

chore: execute goimports to format the code (#7983)

* chore: execute goimports to format the code

Signed-off-by: promalert <promalert@outlook.com>

* goimports -w .

---------

Signed-off-by: promalert <promalert@outlook.com>
Co-authored-by: Chris Lu <chris.lu@gmail.com>
pull/6224/merge
promalert 1 week ago
committed by GitHub
parent
commit
9012069bd7
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 7
      other/mq_client_example/agent_pub_record/agent_pub_record.go
  2. 5
      other/mq_client_example/agent_sub_record/agent_sub_record.go
  3. 4
      seaweedfs-rdma-sidecar/pkg/rdma/client.go
  4. 1
      seaweedfs-rdma-sidecar/pkg/seaweedfs/client.go
  5. 5
      telemetry/proto/telemetry.pb.go
  6. 1
      test/fuse_integration/framework.go
  7. 2
      test/fuse_integration/minimal_test.go
  8. 10
      test/kafka/integration/rebalancing_test.go
  9. 5
      test/kafka/integration/schema_end_to_end_test.go
  10. 28
      test/kafka/kafka-client-loadtest/internal/consumer/consumer_stalling_test.go
  11. 5
      test/kafka/kafka-client-loadtest/internal/schema/pb/loadtest.pb.go
  12. 1
      test/kafka/loadtest/resume_million_test.go
  13. 16
      test/kafka/unit/gateway_test.go
  14. 11
      test/s3/etag/s3_etag_test.go
  15. 6
      test/s3/iam/s3_iam_integration_test.go
  16. 3
      test/s3/s3client/s3client.go
  17. 1
      test/s3/sse/github_7562_copy_test.go
  18. 1
      test/s3/versioning/s3_versioning_multipart_test.go
  19. 1
      test/s3/versioning/s3_versioning_pagination_stress_test.go
  20. 5
      test/sftp/basic_test.go
  21. 1
      test/sftp/framework.go
  22. 7
      unmaintained/load_test/load_test_meta_tail/load_test_meta_tail.go
  23. 5
      unmaintained/repeated_vacuum/repeated_vacuum.go
  24. 7
      unmaintained/s3/presigned_put/presigned_put.go
  25. 3
      unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go
  26. 3
      unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go
  27. 2
      unmaintained/volume_tailer/volume_tailer.go
  28. 3
      weed/cluster/cluster_test.go
  29. 3
      weed/cluster/group_members.go
  30. 3
      weed/cluster/lock_manager/distributed_lock_manager.go
  31. 5
      weed/cluster/lock_manager/lock_manager.go
  32. 7
      weed/command/autocomplete.go
  33. 7
      weed/command/benchmark.go
  34. 4
      weed/command/filer_meta_tail.go
  35. 3
      weed/command/filer_meta_tail_elastic.go
  36. 5
      weed/command/filer_remote_gateway.go
  37. 11
      weed/command/filer_remote_gateway_buckets.go
  38. 3
      weed/command/filer_remote_sync.go
  39. 5
      weed/command/filer_sync_jobs.go
  40. 9
      weed/command/mini.go
  41. 3
      weed/command/mount_linux.go
  42. 2
      weed/command/sftp.go
  43. 1
      weed/command/shell.go
  44. 2
      weed/command/update.go
  45. 3
      weed/command/version.go
  46. 24
      weed/credential/filer_etc/filer_etc_identity.go
  47. 14
      weed/credential/filer_etc/filer_etc_policy.go
  48. 2
      weed/credential/filer_etc/filer_etc_store.go
  49. 3
      weed/filer/abstract_sql/abstract_sql_store_kv.go
  50. 3
      weed/filer/arangodb/arangodb_store_bucket.go
  51. 1
      weed/filer/cassandra/cassandra_store_kv.go
  52. 1
      weed/filer/cassandra2/cassandra_store_kv.go
  53. 5
      weed/filer/configuration.go
  54. 3
      weed/filer/empty_folder_cleanup/cleanup_queue.go
  55. 3
      weed/filer/empty_folder_cleanup/cleanup_queue_test.go
  56. 1
      weed/filer/empty_folder_cleanup/empty_folder_cleaner.go
  57. 1
      weed/filer/empty_folder_cleanup/empty_folder_cleaner_test.go
  58. 2
      weed/filer/entry.go
  59. 1
      weed/filer/etcd/etcd_store_kv.go
  60. 3
      weed/filer/etcd/etcd_store_test.go
  61. 3
      weed/filer/filechunk_section_test.go
  62. 2
      weed/filer/filechunks.go
  63. 2
      weed/filer/filechunks2_test.go
  64. 3
      weed/filer/filechunks_read.go
  65. 3
      weed/filer/filechunks_read_test.go
  66. 6
      weed/filer/filerstore_wrapper.go
  67. 3
      weed/filer/hbase/hbase_store_kv.go
  68. 3
      weed/filer/interval_list_test.go
  69. 1
      weed/filer/leveldb/leveldb_store_kv.go
  70. 2
      weed/filer/leveldb/leveldb_store_test.go
  71. 2
      weed/filer/leveldb2/leveldb2_store_test.go
  72. 3
      weed/filer/leveldb3/leveldb3_store_bucket.go
  73. 2
      weed/filer/leveldb3/leveldb3_store_test.go
  74. 5
      weed/filer/mysql/mysql_store.go
  75. 1
      weed/filer/read_remote.go
  76. 4
      weed/filer/reader_cache_test.go
  77. 3
      weed/filer/redis2/redis_sentinel_store.go
  78. 5
      weed/filer/redis3/kv_directory_children_test.go
  79. 1
      weed/filer/redis3/skiplist_element_store.go
  80. 3
      weed/filer/redis_lua/redis_sentinel_store.go
  81. 1
      weed/filer/redis_lua/stored_procedure/init.go
  82. 1
      weed/filer/remote_mapping.go
  83. 5
      weed/filer/remote_storage.go
  84. 3
      weed/filer/remote_storage_test.go
  85. 1
      weed/filer/ydb/ydb_store_kv.go
  86. 3
      weed/filer/ydb/ydb_store_test.go
  87. 1
      weed/filer/ydb/ydb_types.go
  88. 3
      weed/glog/glog_file.go
  89. 20
      weed/iam/sts/constants.go
  90. 2
      weed/iam/sts/sts_service_test.go
  91. 8
      weed/iam/utils/arn_utils_test.go
  92. 3
      weed/images/orientation_test.go
  93. 3
      weed/images/resizing_test.go
  94. 3
      weed/mount/inode_to_path_test.go
  95. 3
      weed/mount/locked_entry.go
  96. 4
      weed/mount/meta_cache/meta_cache.go
  97. 3
      weed/mount/meta_cache/meta_cache_subscribe.go
  98. 3
      weed/mount/page_writer/chunk_interval_list_test.go
  99. 1
      weed/mount/weedfs_forget.go
  100. 1
      weed/mount/weedfs_grpc_server.go

7
other/mq_client_example/agent_pub_record/agent_pub_record.go

@ -3,13 +3,14 @@ package main
import (
"flag"
"fmt"
"github.com/seaweedfs/seaweedfs/other/mq_client_example/example"
"github.com/seaweedfs/seaweedfs/weed/mq/client/agent_client"
"github.com/seaweedfs/seaweedfs/weed/mq/schema"
"log"
"sync"
"sync/atomic"
"time"
"github.com/seaweedfs/seaweedfs/other/mq_client_example/example"
"github.com/seaweedfs/seaweedfs/weed/mq/client/agent_client"
"github.com/seaweedfs/seaweedfs/weed/mq/schema"
)
var (

5
other/mq_client_example/agent_sub_record/agent_sub_record.go

@ -3,13 +3,14 @@ package main
import (
"flag"
"fmt"
"log"
"time"
"github.com/seaweedfs/seaweedfs/other/mq_client_example/example"
"github.com/seaweedfs/seaweedfs/weed/mq/client/agent_client"
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
"log"
"time"
)
var (

4
seaweedfs-rdma-sidecar/pkg/rdma/client.go

@ -419,7 +419,7 @@ func (c *Client) Read(ctx context.Context, req *ReadRequest) (*ReadResponse, err
}).Info("✅ RDMA read completed successfully")
// MOCK DATA IMPLEMENTATION - FOR DEVELOPMENT/TESTING ONLY
//
//
// This section generates placeholder data for the mock RDMA implementation.
// In a production RDMA implementation, this should be replaced with:
//
@ -472,7 +472,7 @@ func (c *Client) ReadFileRange(ctx context.Context, fileID string, offset, size
if err != nil {
return nil, fmt.Errorf("invalid file ID %s: %w", fileID, err)
}
req := &ReadRequest{
VolumeID: volumeID,
NeedleID: needleID,

1
seaweedfs-rdma-sidecar/pkg/seaweedfs/client.go

@ -8,6 +8,7 @@ import (
"net/http"
"os"
"path/filepath"
"strings"
"time"
"seaweedfs-rdma-sidecar/pkg/rdma"

5
telemetry/proto/telemetry.pb.go

@ -7,10 +7,11 @@
package proto
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
const (

1
test/fuse_integration/framework.go

@ -3,6 +3,7 @@ package fuse_test
import (
"fmt"
"io/fs"
"net"
"os"
"os/exec"
"path/filepath"

2
test/fuse_integration/minimal_test.go

@ -3,5 +3,5 @@ package fuse_test
import "testing"
func TestMinimal(t *testing.T) {
t.Log("minimal test")
t.Log("minimal test")
}

10
test/kafka/integration/rebalancing_test.go

@ -152,7 +152,7 @@ func testTwoConsumersRebalance(t *testing.T, addr, topicName, groupID string) {
// Wait for rebalancing to occur - both consumers should get new assignments
var rebalancedAssignment1, rebalancedAssignment2 []int32
// Consumer1 should get a rebalance assignment
select {
case partitions := <-handler1.assignments:
@ -372,7 +372,7 @@ func testMultipleConsumersJoin(t *testing.T, addr, topicName, groupID string) {
t.Errorf("Partition %d assigned to multiple consumers", partition)
}
}
// Each consumer should get exactly 1 partition (4 partitions / 4 consumers)
if len(assignment) != 1 {
t.Errorf("Consumer%d should get exactly 1 partition, got %d", i, len(assignment))
@ -408,7 +408,7 @@ func (h *RebalanceTestHandler) Setup(session sarama.ConsumerGroupSession) error
h.readyOnce.Do(func() {
close(h.ready)
})
// Send partition assignment
partitions := make([]int32, 0)
for topic, partitionList := range session.Claims() {
@ -417,13 +417,13 @@ func (h *RebalanceTestHandler) Setup(session sarama.ConsumerGroupSession) error
partitions = append(partitions, partition)
}
}
select {
case h.assignments <- partitions:
default:
// Channel might be full, that's ok
}
return nil
}

5
test/kafka/integration/schema_end_to_end_test.go

@ -86,7 +86,7 @@ func TestSchemaEndToEnd_AvroRoundTrip(t *testing.T) {
// Verify all fields
assert.Equal(t, int32(12345), decodedMap["id"])
assert.Equal(t, "Alice Johnson", decodedMap["name"])
// Verify union fields
emailUnion, ok := decodedMap["email"].(map[string]interface{})
require.True(t, ok, "Email should be a union")
@ -126,7 +126,7 @@ func TestSchemaEndToEnd_ProtobufRoundTrip(t *testing.T) {
require.Equal(t, uint32(2), envelope.SchemaID, "Schema ID should match")
// Note: ParseConfluentEnvelope defaults to FormatAvro; format detection requires schema registry
require.Equal(t, schema.FormatAvro, envelope.Format, "Format defaults to Avro without schema registry lookup")
// For Protobuf with indexes, we need to use the specialized parser
protobufEnvelope, ok := schema.ParseConfluentProtobufEnvelopeWithIndexCount(confluentMsg, 1)
require.True(t, ok, "Message should be a valid Protobuf envelope")
@ -269,7 +269,6 @@ func createMockSchemaRegistryForE2E(t *testing.T) *httptest.Server {
}))
}
func getUserAvroSchemaForE2E() string {
return `{
"type": "record",

28
test/kafka/kafka-client-loadtest/internal/consumer/consumer_stalling_test.go

@ -5,7 +5,7 @@ import (
)
// TestConsumerStallingPattern is a REPRODUCER for the consumer stalling bug.
//
//
// This test simulates the exact pattern that causes consumers to stall:
// 1. Consumer reads messages in batches
// 2. Consumer commits offset after each batch
@ -24,7 +24,7 @@ import (
// If the test PASSES, it means consumer successfully fetches all messages (bug fixed)
func TestConsumerStallingPattern(t *testing.T) {
t.Skip("REPRODUCER TEST: Requires running load test infrastructure. See comments for setup.")
// This test documents the exact stalling pattern:
// - Consumers consume messages 0-163, commit offset 163
// - Next iteration: fetch offset 164+
@ -36,7 +36,7 @@ func TestConsumerStallingPattern(t *testing.T) {
// 2. Empty fetch doesn't mean "end of partition" (could be transient)
// 3. Consumer retries on empty fetch instead of giving up
// 4. Logging shows why fetch stopped
t.Logf("=== CONSUMER STALLING REPRODUCER ===")
t.Logf("")
t.Logf("Setup Steps:")
@ -72,27 +72,27 @@ func TestConsumerStallingPattern(t *testing.T) {
// This is a UNIT reproducer that can run standalone
func TestOffsetPlusOneCalculation(t *testing.T) {
testCases := []struct {
name string
committedOffset int64
name string
committedOffset int64
expectedNextOffset int64
}{
{"Offset 0", 0, 1},
{"Offset 99", 99, 100},
{"Offset 163", 163, 164}, // The exact stalling point!
{"Offset 163", 163, 164}, // The exact stalling point!
{"Offset 999", 999, 1000},
{"Large offset", 10000, 10001},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
// This is the critical calculation
nextOffset := tc.committedOffset + 1
if nextOffset != tc.expectedNextOffset {
t.Fatalf("OFFSET MATH BUG: committed=%d, next=%d (expected %d)",
tc.committedOffset, nextOffset, tc.expectedNextOffset)
}
t.Logf("✓ offset %d → next fetch at %d", tc.committedOffset, nextOffset)
})
}
@ -105,18 +105,18 @@ func TestEmptyFetchShouldNotStopConsumer(t *testing.T) {
// Scenario: Consumer committed offset 163, then fetches 164+
committedOffset := int64(163)
nextFetchOffset := committedOffset + 1
// First attempt: get empty (transient - data might not be available yet)
// WRONG behavior (bug): Consumer sees 0 bytes and stops
// wrongConsumerLogic := (firstFetchResult == 0) // gives up!
// CORRECT behavior: Consumer should retry
correctConsumerLogic := true // continues retrying
correctConsumerLogic := true // continues retrying
if !correctConsumerLogic {
t.Fatalf("Consumer incorrectly gave up after empty fetch at offset %d", nextFetchOffset)
}
t.Logf("✓ Empty fetch doesn't stop consumer, continues retrying")
})
}

5
test/kafka/kafka-client-loadtest/internal/schema/pb/loadtest.pb.go

@ -7,11 +7,12 @@
package pb
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
const (

1
test/kafka/loadtest/resume_million_test.go

@ -205,4 +205,3 @@ func TestResumeMillionRecords_Fixed(t *testing.T) {
glog.Infof("🏆 MILLION RECORD KAFKA INTEGRATION TEST COMPLETED SUCCESSFULLY!")
}

16
test/kafka/unit/gateway_test.go

@ -16,7 +16,7 @@ func TestGatewayBasicFunctionality(t *testing.T) {
defer gateway.CleanupAndClose()
addr := gateway.StartAndWait()
// Give the gateway a bit more time to be fully ready
time.Sleep(200 * time.Millisecond)
@ -32,17 +32,17 @@ func TestGatewayBasicFunctionality(t *testing.T) {
func testGatewayAcceptsConnections(t *testing.T, addr string) {
// Test basic TCP connection to gateway
t.Logf("Testing connection to gateway at %s", addr)
conn, err := net.DialTimeout("tcp", addr, 5*time.Second)
if err != nil {
t.Fatalf("Failed to connect to gateway: %v", err)
}
defer conn.Close()
// Test that we can establish a connection and the gateway is listening
// We don't need to send a full Kafka request for this basic test
t.Logf("Successfully connected to gateway at %s", addr)
// Optional: Test that we can write some data without error
testData := []byte("test")
conn.SetWriteDeadline(time.Now().Add(1 * time.Second))
@ -57,19 +57,19 @@ func testGatewayRefusesAfterClose(t *testing.T, gateway *testutil.GatewayTestSer
// Get the address from the gateway's listener
host, port := gateway.GetListenerAddr()
addr := fmt.Sprintf("%s:%d", host, port)
// Close the gateway
gateway.CleanupAndClose()
t.Log("Testing that gateway refuses connections after close")
// Attempt to connect - should fail
conn, err := net.DialTimeout("tcp", addr, 2*time.Second)
if err == nil {
conn.Close()
t.Fatal("Expected connection to fail after gateway close, but it succeeded")
}
// Verify it's a connection refused error
if !strings.Contains(err.Error(), "connection refused") && !strings.Contains(err.Error(), "connect: connection refused") {
t.Logf("Connection failed as expected with error: %v", err)

11
test/s3/etag/s3_etag_test.go

@ -63,11 +63,11 @@ const (
autoChunkSize = 8 * 1024 * 1024
// Test sizes
smallFileSize = 1 * 1024 // 1KB - single chunk
mediumFileSize = 256 * 1024 // 256KB - single chunk (at threshold)
largeFileSize = 10 * 1024 * 1024 // 10MB - triggers auto-chunking (2 chunks)
xlFileSize = 25 * 1024 * 1024 // 25MB - triggers auto-chunking (4 chunks)
multipartSize = 5 * 1024 * 1024 // 5MB per part for multipart uploads
smallFileSize = 1 * 1024 // 1KB - single chunk
mediumFileSize = 256 * 1024 // 256KB - single chunk (at threshold)
largeFileSize = 10 * 1024 * 1024 // 10MB - triggers auto-chunking (2 chunks)
xlFileSize = 25 * 1024 * 1024 // 25MB - triggers auto-chunking (4 chunks)
multipartSize = 5 * 1024 * 1024 // 5MB per part for multipart uploads
)
// ETag format patterns
@ -540,4 +540,3 @@ func TestMultipleLargeFileUploads(t *testing.T) {
assert.NoError(t, err, "File %d ETag should be valid hex", i)
}
}

6
test/s3/iam/s3_iam_integration_test.go

@ -443,7 +443,7 @@ func TestS3IAMBucketPolicyIntegration(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, testObjectData, string(data))
result.Body.Close()
// Clean up bucket policy after this test
_, err = adminClient.DeleteBucketPolicy(&s3.DeleteBucketPolicyInput{
Bucket: aws.String(bucketName),
@ -481,7 +481,7 @@ func TestS3IAMBucketPolicyIntegration(t *testing.T) {
assert.Contains(t, *policyResult.Policy, "Deny")
// NOTE: Enforcement test is commented out due to known architectural limitation:
//
//
// KNOWN LIMITATION: DeleteObject uses the coarse-grained ACTION_WRITE constant,
// which convertActionToS3Format maps to "s3:PutObject" (not "s3:DeleteObject").
// This means the policy engine evaluates the deny policy against "s3:PutObject",
@ -499,7 +499,7 @@ func TestS3IAMBucketPolicyIntegration(t *testing.T) {
// awsErr, ok := err.(awserr.Error)
// require.True(t, ok, "Error should be an awserr.Error")
// assert.Equal(t, "AccessDenied", awsErr.Code(), "Expected AccessDenied error code")
// Clean up bucket policy after this test
_, err = adminClient.DeleteBucketPolicy(&s3.DeleteBucketPolicyInput{
Bucket: aws.String(bucketName),

3
test/s3/s3client/s3client.go

@ -2,12 +2,13 @@ package main
import (
"context"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/aws/retry"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/s3"
"time"
)
func main() {

1
test/s3/sse/github_7562_copy_test.go

@ -502,4 +502,3 @@ func TestGitHub7562LargeFile(t *testing.T) {
t.Log("Large file test passed!")
}

1
test/s3/versioning/s3_versioning_multipart_test.go

@ -518,4 +518,3 @@ func TestMultipartUploadDeleteMarkerListBehavior(t *testing.T) {
t.Logf("Object restored after delete marker removal, ETag=%s", multipartETag)
}

1
test/s3/versioning/s3_versioning_pagination_stress_test.go

@ -319,4 +319,3 @@ func listAllVersions(t *testing.T, client *s3.Client, bucketName, objectKey stri
t.Logf("Total: %d versions in %d pages", len(allVersions), pageCount)
return allVersions
}

5
test/sftp/basic_test.go

@ -540,11 +540,11 @@ func TestPathEdgeCases(t *testing.T) {
// Therefore, we cannot trigger the server-side path traversal block with this client.
// Instead, we verify that the file is created successfully within the jail (contained).
// The server-side protection logic is verified in unit tests (sftpd/sftp_server_test.go).
file, err := sftpClient.Create(traversalPath)
require.NoError(t, err, "creation should succeed because client sanitizes path")
file.Close()
// Clean up
err = sftpClient.Remove(traversalPath)
require.NoError(t, err)
@ -649,4 +649,3 @@ func TestFileContent(t *testing.T) {
sftpClient.Remove(filename)
})
}

1
test/sftp/framework.go

@ -420,4 +420,3 @@ func findTestDataPath() string {
return "./testdata"
}

7
unmaintained/load_test/load_test_meta_tail/load_test_meta_tail.go

@ -4,6 +4,10 @@ import (
"context"
"flag"
"fmt"
"strconv"
"strings"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
@ -11,9 +15,6 @@ import (
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"strconv"
"strings"
"time"
)
var (

5
unmaintained/repeated_vacuum/repeated_vacuum.go

@ -4,17 +4,16 @@ import (
"context"
"flag"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/pb"
"log"
"math/rand"
"time"
"google.golang.org/grpc"
"github.com/seaweedfs/seaweedfs/weed/operation"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/security"
"github.com/seaweedfs/seaweedfs/weed/util"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
"google.golang.org/grpc"
)
var (

7
unmaintained/s3/presigned_put/presigned_put.go

@ -4,13 +4,14 @@ import (
"crypto/md5"
"encoding/base64"
"fmt"
"net/http"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
"net/http"
"strings"
"time"
)
// Downloads an item from an S3 Bucket in the region configured in the shared config

3
unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go

@ -4,7 +4,6 @@ import (
"bytes"
"flag"
"fmt"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
"io"
"log"
"math/rand"
@ -14,6 +13,8 @@ import (
"strings"
"sync"
"time"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
var (

3
unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go

@ -4,7 +4,6 @@ import (
"bytes"
"flag"
"fmt"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
"io"
"log"
"math/rand"
@ -15,6 +14,8 @@ import (
"strings"
"sync"
"time"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
var (

2
unmaintained/volume_tailer/volume_tailer.go

@ -3,11 +3,11 @@ package main
import (
"context"
"flag"
"github.com/seaweedfs/seaweedfs/weed/pb"
"log"
"time"
"github.com/seaweedfs/seaweedfs/weed/operation"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/security"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
util2 "github.com/seaweedfs/seaweedfs/weed/util"

3
weed/cluster/cluster_test.go

@ -1,10 +1,11 @@
package cluster
import (
"github.com/seaweedfs/seaweedfs/weed/pb"
"strconv"
"sync"
"testing"
"github.com/seaweedfs/seaweedfs/weed/pb"
)
func TestConcurrentAddRemoveNodes(t *testing.T) {

3
weed/cluster/group_members.go

@ -1,8 +1,9 @@
package cluster
import (
"github.com/seaweedfs/seaweedfs/weed/pb"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb"
)
type GroupMembers struct {

3
weed/cluster/lock_manager/distributed_lock_manager.go

@ -2,9 +2,10 @@ package lock_manager
import (
"fmt"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb"
"time"
)
const RenewInterval = time.Second * 3

5
weed/cluster/lock_manager/lock_manager.go

@ -2,10 +2,11 @@ package lock_manager
import (
"fmt"
"github.com/google/uuid"
"github.com/seaweedfs/seaweedfs/weed/glog"
"sync"
"time"
"github.com/google/uuid"
"github.com/seaweedfs/seaweedfs/weed/glog"
)
var LockErrorNonEmptyTokenOnNewLock = fmt.Errorf("lock: non-empty token on a new lock")

7
weed/command/autocomplete.go

@ -2,12 +2,13 @@ package command
import (
"fmt"
"github.com/posener/complete"
completeinstall "github.com/posener/complete/cmd/install"
flag "github.com/seaweedfs/seaweedfs/weed/util/fla9"
"os"
"path/filepath"
"runtime"
"github.com/posener/complete"
completeinstall "github.com/posener/complete/cmd/install"
flag "github.com/seaweedfs/seaweedfs/weed/util/fla9"
)
func AutocompleteMain(commands []*Command) bool {

7
weed/command/benchmark.go

@ -4,8 +4,6 @@ import (
"bufio"
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/util/version"
"io"
"math"
"math/rand"
@ -16,14 +14,15 @@ import (
"sync"
"time"
"google.golang.org/grpc"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/operation"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/security"
"github.com/seaweedfs/seaweedfs/weed/util"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
"github.com/seaweedfs/seaweedfs/weed/util/version"
"github.com/seaweedfs/seaweedfs/weed/wdclient"
"google.golang.org/grpc"
)
type BenchmarkOptions struct {

4
weed/command/filer_meta_tail.go

@ -2,13 +2,13 @@ package command
import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/pb"
"os"
"path/filepath"
"strings"
"time"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/security"
"github.com/seaweedfs/seaweedfs/weed/util"

3
weed/command/filer_meta_tail_elastic.go

@ -5,11 +5,12 @@ package command
import (
"context"
"strings"
jsoniter "github.com/json-iterator/go"
elastic "github.com/olivere/elastic/v7"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
"strings"
)
type EsDocument struct {

5
weed/command/filer_remote_gateway.go

@ -3,6 +3,9 @@ package command
import (
"context"
"fmt"
"os"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
@ -11,8 +14,6 @@ import (
"github.com/seaweedfs/seaweedfs/weed/security"
"github.com/seaweedfs/seaweedfs/weed/util"
"google.golang.org/grpc"
"os"
"time"
)
type RemoteGatewayOptions struct {

11
weed/command/filer_remote_gateway_buckets.go

@ -3,6 +3,12 @@ package command
import (
"context"
"fmt"
"math"
"math/rand"
"path/filepath"
"strings"
"time"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb"
@ -12,11 +18,6 @@ import (
"github.com/seaweedfs/seaweedfs/weed/replication/source"
"github.com/seaweedfs/seaweedfs/weed/util"
"google.golang.org/protobuf/proto"
"math"
"math/rand"
"path/filepath"
"strings"
"time"
)
func (option *RemoteGatewayOptions) followBucketUpdatesAndUploadToRemote(filerSource *source.FilerSource) error {

3
weed/command/filer_remote_sync.go

@ -2,6 +2,8 @@ package command
import (
"fmt"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
@ -9,7 +11,6 @@ import (
"github.com/seaweedfs/seaweedfs/weed/security"
"github.com/seaweedfs/seaweedfs/weed/util"
"google.golang.org/grpc"
"time"
)
type RemoteSyncOptions struct {

5
weed/command/filer_sync_jobs.go

@ -1,12 +1,13 @@
package command
import (
"sync"
"sync/atomic"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
"sync"
"sync/atomic"
)
type MetadataProcessor struct {

9
weed/command/mini.go

@ -1105,7 +1105,6 @@ func startMiniWorker() {
glog.Infof("Maintenance worker %s started successfully", workerInstance.ID())
}
const credentialsInstructionTemplate = `
To create S3 credentials, you have two options:
@ -1166,10 +1165,10 @@ func printWelcomeMessage() {
fmt.Fprintf(&sb, credentialsInstructionTemplate, *miniIp, *miniAdminOptions.port)
} else {
sb.WriteString("\n To create S3 credentials, use environment variables:\n\n")
sb.WriteString(" export AWS_ACCESS_KEY_ID=your-access-key\\n")
sb.WriteString(" export AWS_SECRET_ACCESS_KEY=your-secret-key\\n")
sb.WriteString(" weed mini -dir=/data\\n")
sb.WriteString(" This will create initial credentials for the 'mini' user.\\n")
sb.WriteString(" export AWS_ACCESS_KEY_ID=your-access-key\\n")
sb.WriteString(" export AWS_SECRET_ACCESS_KEY=your-secret-key\\n")
sb.WriteString(" weed mini -dir=/data\\n")
sb.WriteString(" This will create initial credentials for the 'mini' user.\\n")
}
fmt.Print(sb.String())

3
weed/command/mount_linux.go

@ -3,10 +3,11 @@ package command
import (
"bufio"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/glog"
"io"
"os"
"strings"
"github.com/seaweedfs/seaweedfs/weed/glog"
)
const (

2
weed/command/sftp.go

@ -3,7 +3,6 @@ package command
import (
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/util/version"
"net"
"os"
"runtime"
@ -17,6 +16,7 @@ import (
stats_collect "github.com/seaweedfs/seaweedfs/weed/stats"
"github.com/seaweedfs/seaweedfs/weed/util"
"github.com/seaweedfs/seaweedfs/weed/util/grace"
"github.com/seaweedfs/seaweedfs/weed/util/version"
)
var (

1
weed/command/shell.go

@ -2,6 +2,7 @@ package command
import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/security"

2
weed/command/update.go

@ -10,7 +10,6 @@ import (
"encoding/hex"
"encoding/json"
"fmt"
swv "github.com/seaweedfs/seaweedfs/weed/util/version"
"io"
"net/http"
"os"
@ -22,6 +21,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/util"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
swv "github.com/seaweedfs/seaweedfs/weed/util/version"
"golang.org/x/net/context/ctxhttp"
)

3
weed/command/version.go

@ -2,8 +2,9 @@ package command
import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/util/version"
"runtime"
"github.com/seaweedfs/seaweedfs/weed/util/version"
)
var cmdVersion = &Command{

24
weed/credential/filer_etc/filer_etc_identity.go

@ -15,7 +15,7 @@ import (
func (store *FilerEtcStore) LoadConfiguration(ctx context.Context) (*iam_pb.S3ApiConfiguration, error) {
s3cfg := &iam_pb.S3ApiConfiguration{}
glog.V(1).Infof("Loading IAM configuration from %s/%s (using current active filer)",
glog.V(1).Infof("Loading IAM configuration from %s/%s (using current active filer)",
filer.IamConfigDirectory, filer.IamIdentityFile)
err := store.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
@ -25,31 +25,31 @@ func (store *FilerEtcStore) LoadConfiguration(ctx context.Context) (*iam_pb.S3Ap
content, err := filer.ReadInsideFiler(client, filer.IamConfigDirectory, filer.IamIdentityFile)
if err != nil {
if err == filer_pb.ErrNotFound {
glog.V(1).Infof("IAM identity file not found at %s/%s, no credentials loaded",
glog.V(1).Infof("IAM identity file not found at %s/%s, no credentials loaded",
filer.IamConfigDirectory, filer.IamIdentityFile)
return nil
}
glog.Errorf("Failed to read IAM identity file from %s/%s: %v",
glog.Errorf("Failed to read IAM identity file from %s/%s: %v",
filer.IamConfigDirectory, filer.IamIdentityFile, err)
return err
}
if len(content) == 0 {
glog.V(1).Infof("IAM identity file at %s/%s is empty",
glog.V(1).Infof("IAM identity file at %s/%s is empty",
filer.IamConfigDirectory, filer.IamIdentityFile)
return nil
}
glog.V(2).Infof("Read %d bytes from %s/%s",
glog.V(2).Infof("Read %d bytes from %s/%s",
len(content), filer.IamConfigDirectory, filer.IamIdentityFile)
if err := filer.ParseS3ConfigurationFromBytes(content, s3cfg); err != nil {
glog.Errorf("Failed to parse IAM configuration from %s/%s: %v",
glog.Errorf("Failed to parse IAM configuration from %s/%s: %v",
filer.IamConfigDirectory, filer.IamIdentityFile, err)
return err
}
glog.V(1).Infof("Successfully parsed IAM configuration with %d identities and %d accounts",
glog.V(1).Infof("Successfully parsed IAM configuration with %d identities and %d accounts",
len(s3cfg.Identities), len(s3cfg.Accounts))
return nil
})
@ -63,7 +63,7 @@ func (store *FilerEtcStore) LoadConfiguration(ctx context.Context) (*iam_pb.S3Ap
for _, identity := range s3cfg.Identities {
credCount := len(identity.Credentials)
actionCount := len(identity.Actions)
glog.V(2).Infof(" Identity: %s (credentials: %d, actions: %d)",
glog.V(2).Infof(" Identity: %s (credentials: %d, actions: %d)",
identity.Name, credCount, actionCount)
for _, cred := range identity.Credentials {
glog.V(3).Infof(" Access Key: %s", cred.AccessKey)

14
weed/credential/filer_etc/filer_etc_policy.go

@ -24,14 +24,14 @@ func (store *FilerEtcStore) GetPolicies(ctx context.Context) (map[string]policy_
store.mu.RLock()
configured := store.filerAddressFunc != nil
store.mu.RUnlock()
if !configured {
glog.V(1).Infof("Filer client not configured for policy retrieval, returning empty policies")
// Return empty policies if filer client is not configured
return policiesCollection.Policies, nil
}
glog.V(2).Infof("Loading IAM policies from %s/%s (using current active filer)",
glog.V(2).Infof("Loading IAM policies from %s/%s (using current active filer)",
filer.IamConfigDirectory, filer.IamPoliciesFile)
err := store.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
@ -41,27 +41,27 @@ func (store *FilerEtcStore) GetPolicies(ctx context.Context) (map[string]policy_
content, err := filer.ReadInsideFiler(client, filer.IamConfigDirectory, filer.IamPoliciesFile)
if err != nil {
if err == filer_pb.ErrNotFound {
glog.V(1).Infof("Policies file not found at %s/%s, returning empty policies",
glog.V(1).Infof("Policies file not found at %s/%s, returning empty policies",
filer.IamConfigDirectory, filer.IamPoliciesFile)
// If file doesn't exist, return empty collection
return nil
}
glog.Errorf("Failed to read IAM policies file from %s/%s: %v",
glog.Errorf("Failed to read IAM policies file from %s/%s: %v",
filer.IamConfigDirectory, filer.IamPoliciesFile, err)
return err
}
if len(content) == 0 {
glog.V(2).Infof("IAM policies file at %s/%s is empty",
glog.V(2).Infof("IAM policies file at %s/%s is empty",
filer.IamConfigDirectory, filer.IamPoliciesFile)
return nil
}
glog.V(2).Infof("Read %d bytes from %s/%s",
glog.V(2).Infof("Read %d bytes from %s/%s",
len(content), filer.IamConfigDirectory, filer.IamPoliciesFile)
if err := json.Unmarshal(content, policiesCollection); err != nil {
glog.Errorf("Failed to parse IAM policies from %s/%s: %v",
glog.Errorf("Failed to parse IAM policies from %s/%s: %v",
filer.IamConfigDirectory, filer.IamPoliciesFile, err)
return err
}

2
weed/credential/filer_etc/filer_etc_store.go

@ -64,7 +64,7 @@ func (store *FilerEtcStore) withFilerClient(fn func(client filer_pb.SeaweedFiler
filerAddress := store.filerAddressFunc()
dialOption := store.grpcDialOption
store.mu.RUnlock()
if filerAddress == "" {
return fmt.Errorf("filer_etc: no filer discovered yet - please ensure a filer is running and accessible")
}

3
weed/filer/abstract_sql/abstract_sql_store_kv.go

@ -5,10 +5,11 @@ import (
"database/sql"
"encoding/base64"
"fmt"
"strings"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/util"
"strings"
)
func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {

3
weed/filer/arangodb/arangodb_store_bucket.go

@ -2,11 +2,10 @@ package arangodb
import (
"context"
"github.com/arangodb/go-driver"
"time"
"github.com/arangodb/go-driver"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
)

1
weed/filer/cassandra/cassandra_store_kv.go

@ -4,6 +4,7 @@ import (
"context"
"encoding/base64"
"fmt"
"github.com/gocql/gocql"
"github.com/seaweedfs/seaweedfs/weed/filer"
)

1
weed/filer/cassandra2/cassandra_store_kv.go

@ -4,6 +4,7 @@ import (
"context"
"encoding/base64"
"fmt"
"github.com/gocql/gocql"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/util"

5
weed/filer/configuration.go

@ -1,11 +1,12 @@
package filer
import (
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/util"
"os"
"reflect"
"strings"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/util"
)
var (

3
weed/filer/empty_folder_cleanup/cleanup_queue.go

@ -203,6 +203,3 @@ func (q *CleanupQueue) OldestAge() time.Duration {
item := front.Value.(*queueItem)
return time.Since(item.queueTime)
}

3
weed/filer/empty_folder_cleanup/cleanup_queue_test.go

@ -367,6 +367,3 @@ func TestCleanupQueue_Concurrent(t *testing.T) {
// Just verify no panic occurred and queue is in consistent state
_ = q.Len()
}

1
weed/filer/empty_folder_cleanup/empty_folder_cleaner.go

@ -433,4 +433,3 @@ func (efc *EmptyFolderCleaner) GetCachedFolderCount(folder string) (int, bool) {
}
return 0, false
}

1
weed/filer/empty_folder_cleanup/empty_folder_cleaner_test.go

@ -566,4 +566,3 @@ func TestEmptyFolderCleaner_queueFIFOOrder(t *testing.T) {
cleaner.Stop()
}

2
weed/filer/entry.go

@ -1,11 +1,11 @@
package filer
import (
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
"os"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
"github.com/seaweedfs/seaweedfs/weed/util"
)

1
weed/filer/etcd/etcd_store_kv.go

@ -3,6 +3,7 @@ package etcd
import (
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/filer"
)

3
weed/filer/etcd/etcd_store_test.go

@ -1,8 +1,9 @@
package etcd
import (
"github.com/seaweedfs/seaweedfs/weed/filer/store_test"
"testing"
"github.com/seaweedfs/seaweedfs/weed/filer/store_test"
)
func TestStore(t *testing.T) {

3
weed/filer/filechunk_section_test.go

@ -1,8 +1,9 @@
package filer
import (
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"testing"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
)
func Test_removeGarbageChunks(t *testing.T) {

2
weed/filer/filechunks.go

@ -4,11 +4,11 @@ import (
"bytes"
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/wdclient"
"math"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
"github.com/seaweedfs/seaweedfs/weed/wdclient"
)
func TotalSize(chunks []*filer_pb.FileChunk) (size uint64) {

2
weed/filer/filechunks2_test.go

@ -2,13 +2,13 @@ package filer
import (
"context"
"github.com/stretchr/testify/assert"
"log"
"slices"
"testing"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/stretchr/testify/assert"
)
func TestDoMinusChunks(t *testing.T) {

3
weed/filer/filechunks_read.go

@ -2,8 +2,9 @@ package filer
import (
"container/list"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"slices"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
)
func readResolvedChunks(chunks []*filer_pb.FileChunk, startOffset int64, stopOffset int64) (visibles *IntervalList[*VisibleInterval]) {

3
weed/filer/filechunks_read_test.go

@ -2,10 +2,11 @@ package filer
import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"math"
"math/rand"
"testing"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
)
func TestReadResolvedChunks(t *testing.T) {

6
weed/filer/filerstore_wrapper.go

@ -32,9 +32,9 @@ type VirtualFilerStore interface {
}
type FilerStoreWrapper struct {
defaultStore FilerStore
pathToStore ptrie.Trie[string]
storeIdToStore map[string]FilerStore
defaultStore FilerStore
pathToStore ptrie.Trie[string]
storeIdToStore map[string]FilerStore
hasPathSpecificStore bool // fast check to skip MatchPrefix when no path-specific stores
}

3
weed/filer/hbase/hbase_store_kv.go

@ -2,9 +2,10 @@ package hbase
import (
"context"
"time"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/tsuna/gohbase/hrpc"
"time"
)
const (

3
weed/filer/interval_list_test.go

@ -2,8 +2,9 @@ package filer
import (
"fmt"
"github.com/stretchr/testify/assert"
"testing"
"github.com/stretchr/testify/assert"
)
type IntervalInt int

1
weed/filer/leveldb/leveldb_store_kv.go

@ -3,6 +3,7 @@ package leveldb
import (
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/syndtr/goleveldb/leveldb"
)

2
weed/filer/leveldb/leveldb_store_test.go

@ -3,12 +3,12 @@ package leveldb
import (
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/pb"
"os"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/util"
)

2
weed/filer/leveldb2/leveldb2_store_test.go

@ -2,10 +2,10 @@ package leveldb
import (
"context"
"github.com/seaweedfs/seaweedfs/weed/pb"
"testing"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/util"
)

3
weed/filer/leveldb3/leveldb3_store_bucket.go

@ -1,8 +1,9 @@
package leveldb
import (
"github.com/seaweedfs/seaweedfs/weed/filer"
"os"
"github.com/seaweedfs/seaweedfs/weed/filer"
)
var _ filer.BucketAware = (*LevelDB3Store)(nil)

2
weed/filer/leveldb3/leveldb3_store_test.go

@ -2,10 +2,10 @@ package leveldb
import (
"context"
"github.com/seaweedfs/seaweedfs/weed/pb"
"testing"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/util"
)

5
weed/filer/mysql/mysql_store.go

@ -5,14 +5,13 @@ import (
"crypto/x509"
"database/sql"
"fmt"
"github.com/go-sql-driver/mysql"
"os"
"strings"
"time"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/go-sql-driver/mysql"
_ "github.com/go-sql-driver/mysql"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/filer/abstract_sql"
"github.com/seaweedfs/seaweedfs/weed/util"
)

1
weed/filer/read_remote.go

@ -2,6 +2,7 @@ package filer
import (
"context"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/remote_pb"
"github.com/seaweedfs/seaweedfs/weed/util"

4
weed/filer/reader_cache_test.go

@ -311,7 +311,7 @@ func TestSingleChunkCacherDoneSignal(t *testing.T) {
// TestSingleChunkCacherLookupError tests handling of lookup errors
func TestSingleChunkCacherLookupError(t *testing.T) {
cache := newMockChunkCacheForReaderCache()
// Lookup function that returns an error
lookupFn := func(ctx context.Context, fileId string) ([]string, error) {
return nil, fmt.Errorf("lookup failed for %s", fileId)
@ -322,7 +322,7 @@ func TestSingleChunkCacherLookupError(t *testing.T) {
buffer := make([]byte, 100)
_, err := rc.ReadChunkAt(context.Background(), buffer, "error-test", nil, false, 0, 100, true)
if err == nil {
t.Error("Expected an error, got nil")
}

3
weed/filer/redis2/redis_sentinel_store.go

@ -1,10 +1,11 @@
package redis2
import (
"time"
"github.com/redis/go-redis/v9"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/util"
"time"
)
func init() {

5
weed/filer/redis3/kv_directory_children_test.go

@ -3,11 +3,12 @@ package redis3
import (
"context"
"fmt"
"github.com/redis/go-redis/v9"
"github.com/stvp/tempredis"
"strconv"
"testing"
"time"
"github.com/redis/go-redis/v9"
"github.com/stvp/tempredis"
)
var names = []string{

1
weed/filer/redis3/skiplist_element_store.go

@ -3,6 +3,7 @@ package redis3
import (
"context"
"fmt"
"github.com/redis/go-redis/v9"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/util/skiplist"

3
weed/filer/redis_lua/redis_sentinel_store.go

@ -1,10 +1,11 @@
package redis_lua
import (
"time"
"github.com/redis/go-redis/v9"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/util"
"time"
)
func init() {

1
weed/filer/redis_lua/stored_procedure/init.go

@ -2,6 +2,7 @@ package stored_procedure
import (
_ "embed"
"github.com/redis/go-redis/v9"
)

1
weed/filer/remote_mapping.go

@ -2,6 +2,7 @@ package filer
import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/remote_pb"

5
weed/filer/remote_storage.go

@ -3,14 +3,15 @@ package filer
import (
"context"
"fmt"
"math"
"strings"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/remote_pb"
"github.com/seaweedfs/seaweedfs/weed/remote_storage"
"github.com/seaweedfs/seaweedfs/weed/util"
"google.golang.org/grpc"
"google.golang.org/protobuf/proto"
"math"
"strings"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"

3
weed/filer/remote_storage_test.go

@ -1,9 +1,10 @@
package filer
import (
"testing"
"github.com/seaweedfs/seaweedfs/weed/pb/remote_pb"
"github.com/stretchr/testify/assert"
"testing"
)
func TestFilerRemoteStorage_FindRemoteStorageClient(t *testing.T) {

1
weed/filer/ydb/ydb_store_kv.go

@ -6,6 +6,7 @@ package ydb
import (
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/filer/abstract_sql"
"github.com/seaweedfs/seaweedfs/weed/util"

3
weed/filer/ydb/ydb_store_test.go

@ -4,8 +4,9 @@
package ydb
import (
"github.com/seaweedfs/seaweedfs/weed/filer/store_test"
"testing"
"github.com/seaweedfs/seaweedfs/weed/filer/store_test"
)
func TestStore(t *testing.T) {

1
weed/filer/ydb/ydb_types.go

@ -5,6 +5,7 @@ package ydb
import (
"fmt"
"github.com/ydb-platform/ydb-go-sdk/v3/table"
"github.com/ydb-platform/ydb-go-sdk/v3/table/options"
"github.com/ydb-platform/ydb-go-sdk/v3/table/types"

3
weed/glog/glog_file.go

@ -21,7 +21,6 @@ package glog
import (
"errors"
"fmt"
flag "github.com/seaweedfs/seaweedfs/weed/util/fla9"
"os"
"os/user"
"path/filepath"
@ -29,6 +28,8 @@ import (
"strings"
"sync"
"time"
flag "github.com/seaweedfs/seaweedfs/weed/util/fla9"
)
// MaxSize is the maximum size of a log file in bytes.

20
weed/iam/sts/constants.go

@ -42,16 +42,16 @@ const (
// Configuration Field Names
const (
ConfigFieldFilerAddress = "filerAddress"
ConfigFieldBasePath = "basePath"
ConfigFieldIssuer = "issuer"
ConfigFieldClientID = "clientId"
ConfigFieldClientSecret = "clientSecret"
ConfigFieldJWKSUri = "jwksUri"
ConfigFieldScopes = "scopes"
ConfigFieldUserInfoUri = "userInfoUri"
ConfigFieldRedirectUri = "redirectUri"
ConfigFieldTLSCACert = "tlsCaCert"
ConfigFieldFilerAddress = "filerAddress"
ConfigFieldBasePath = "basePath"
ConfigFieldIssuer = "issuer"
ConfigFieldClientID = "clientId"
ConfigFieldClientSecret = "clientSecret"
ConfigFieldJWKSUri = "jwksUri"
ConfigFieldScopes = "scopes"
ConfigFieldUserInfoUri = "userInfoUri"
ConfigFieldRedirectUri = "redirectUri"
ConfigFieldTLSCACert = "tlsCaCert"
ConfigFieldTLSInsecureSkipVerify = "tlsInsecureSkipVerify"
)

2
weed/iam/sts/sts_service_test.go

@ -457,7 +457,7 @@ func TestSessionDurationCappedByTokenExpiration(t *testing.T) {
service := NewSTSService()
config := &STSConfig{
TokenDuration: FlexibleDuration{time.Hour}, // Default: 1 hour
TokenDuration: FlexibleDuration{time.Hour}, // Default: 1 hour
MaxSessionLength: FlexibleDuration{time.Hour * 12},
Issuer: "test-sts",
SigningKey: []byte("test-signing-key-32-characters-long"),

8
weed/iam/utils/arn_utils_test.go

@ -453,10 +453,10 @@ func TestSecurityMaliciousSTSUserARNs(t *testing.T) {
// should be accepted as a valid role name "role/name".
func TestEdgeCaseMultipleRoleMarkers(t *testing.T) {
testCases := []struct {
name string
arn string
expected string
useSTS bool
name string
arn string
expected string
useSTS bool
}{
{
name: "legacy_format_role_in_path",

3
weed/images/orientation_test.go

@ -1,9 +1,10 @@
package images
import (
"github.com/seaweedfs/seaweedfs/weed/util"
"os"
"testing"
"github.com/seaweedfs/seaweedfs/weed/util"
)
func TestXYZ(t *testing.T) {

3
weed/images/resizing_test.go

@ -2,9 +2,10 @@ package images
import (
"bytes"
"github.com/seaweedfs/seaweedfs/weed/util"
"os"
"testing"
"github.com/seaweedfs/seaweedfs/weed/util"
)
func TestResizing(t *testing.T) {

3
weed/mount/inode_to_path_test.go

@ -1,8 +1,9 @@
package mount
import (
"github.com/seaweedfs/seaweedfs/weed/util"
"testing"
"github.com/seaweedfs/seaweedfs/weed/util"
)
func TestInodeEntry_removeOnePath(t *testing.T) {

3
weed/mount/locked_entry.go

@ -1,8 +1,9 @@
package mount
import (
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"sync"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
)
type LockedEntry struct {

4
weed/mount/meta_cache/meta_cache.go

@ -19,8 +19,8 @@ import (
// e.g. fill fileId field for chunks
type MetaCache struct {
root util.FullPath
localStore filer.VirtualFilerStore
root util.FullPath
localStore filer.VirtualFilerStore
leveldbStore *leveldb.LevelDBStore // direct reference for batch operations
sync.RWMutex
uidGidMapper *UidGidMapper

3
weed/mount/meta_cache/meta_cache_subscribe.go

@ -2,12 +2,13 @@ package meta_cache
import (
"context"
"strings"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
"strings"
)
type MetadataFollower struct {

3
weed/mount/page_writer/chunk_interval_list_test.go

@ -1,8 +1,9 @@
package page_writer
import (
"github.com/stretchr/testify/assert"
"testing"
"github.com/stretchr/testify/assert"
)
func Test_PageChunkWrittenIntervalList(t *testing.T) {

1
weed/mount/weedfs_forget.go

@ -2,6 +2,7 @@ package mount
import (
"context"
"github.com/seaweedfs/seaweedfs/weed/util"
)

1
weed/mount/weedfs_grpc_server.go

@ -3,6 +3,7 @@ package mount
import (
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/mount_pb"
)

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save